summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--contrib/llvm/include/llvm-c/Core.h39
-rw-r--r--contrib/llvm/include/llvm-c/Disassembler.h9
-rw-r--r--contrib/llvm/include/llvm-c/Target.h46
-rw-r--r--contrib/llvm/include/llvm-c/TargetMachine.h2
-rw-r--r--contrib/llvm/include/llvm-c/Transforms/Vectorize.h3
-rw-r--r--contrib/llvm/include/llvm/ADT/APFloat.h11
-rw-r--r--contrib/llvm/include/llvm/ADT/APInt.h29
-rw-r--r--contrib/llvm/include/llvm/ADT/ArrayRef.h13
-rw-r--r--contrib/llvm/include/llvm/ADT/BitVector.h86
-rw-r--r--contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h15
-rw-r--r--contrib/llvm/include/llvm/ADT/DeltaAlgorithm.h14
-rw-r--r--contrib/llvm/include/llvm/ADT/DenseMap.h11
-rw-r--r--contrib/llvm/include/llvm/ADT/DenseMapInfo.h6
-rw-r--r--contrib/llvm/include/llvm/ADT/EquivalenceClasses.h2
-rw-r--r--contrib/llvm/include/llvm/ADT/FoldingSet.h9
-rw-r--r--contrib/llvm/include/llvm/ADT/Hashing.h3
-rw-r--r--contrib/llvm/include/llvm/ADT/ImmutableList.h5
-rw-r--r--contrib/llvm/include/llvm/ADT/ImmutableMap.h4
-rw-r--r--contrib/llvm/include/llvm/ADT/ImmutableSet.h81
-rw-r--r--contrib/llvm/include/llvm/ADT/MapVector.h90
-rw-r--r--contrib/llvm/include/llvm/ADT/Optional.h9
-rw-r--r--contrib/llvm/include/llvm/ADT/OwningPtr.h27
-rw-r--r--contrib/llvm/include/llvm/ADT/PackedVector.h27
-rw-r--r--contrib/llvm/include/llvm/ADT/PointerIntPair.h4
-rw-r--r--contrib/llvm/include/llvm/ADT/ScopedHashTable.h4
-rw-r--r--contrib/llvm/include/llvm/ADT/SetVector.h92
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallBitVector.h30
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallPtrSet.h7
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallString.h100
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallVector.h156
-rw-r--r--contrib/llvm/include/llvm/ADT/SparseBitVector.h18
-rw-r--r--contrib/llvm/include/llvm/ADT/SparseSet.h10
-rw-r--r--contrib/llvm/include/llvm/ADT/StringExtras.h23
-rw-r--r--contrib/llvm/include/llvm/ADT/StringRef.h163
-rw-r--r--contrib/llvm/include/llvm/ADT/StringSet.h9
-rw-r--r--contrib/llvm/include/llvm/ADT/Trie.h334
-rw-r--r--contrib/llvm/include/llvm/ADT/Triple.h34
-rw-r--r--contrib/llvm/include/llvm/ADT/Twine.h28
-rw-r--r--contrib/llvm/include/llvm/ADT/ValueMap.h4
-rw-r--r--contrib/llvm/include/llvm/ADT/ilist.h5
-rw-r--r--contrib/llvm/include/llvm/AddressingMode.h41
-rw-r--r--contrib/llvm/include/llvm/Analysis/AliasAnalysis.h26
-rw-r--r--contrib/llvm/include/llvm/Analysis/AliasSetTracker.h5
-rw-r--r--contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h38
-rw-r--r--contrib/llvm/include/llvm/Analysis/CallGraph.h6
-rw-r--r--contrib/llvm/include/llvm/Analysis/CaptureTracking.h2
-rw-r--r--contrib/llvm/include/llvm/Analysis/CodeMetrics.h8
-rw-r--r--contrib/llvm/include/llvm/Analysis/ConstantFolding.h16
-rw-r--r--contrib/llvm/include/llvm/Analysis/DependenceAnalysis.h885
-rw-r--r--contrib/llvm/include/llvm/Analysis/Dominators.h2
-rw-r--r--contrib/llvm/include/llvm/Analysis/IVUsers.h4
-rw-r--r--contrib/llvm/include/llvm/Analysis/InlineCost.h11
-rw-r--r--contrib/llvm/include/llvm/Analysis/InstructionSimplify.h54
-rw-r--r--contrib/llvm/include/llvm/Analysis/IntervalPartition.h4
-rw-r--r--contrib/llvm/include/llvm/Analysis/LazyValueInfo.h8
-rw-r--r--contrib/llvm/include/llvm/Analysis/Loads.h4
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopDependenceAnalysis.h124
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopInfo.h15
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h1
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h70
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h6
-rw-r--r--contrib/llvm/include/llvm/Analysis/PHITransAddr.h6
-rw-r--r--contrib/llvm/include/llvm/Analysis/Passes.h23
-rw-r--r--contrib/llvm/include/llvm/Analysis/ProfileDataLoader.h139
-rw-r--r--contrib/llvm/include/llvm/Analysis/ProfileDataTypes.h39
-rw-r--r--contrib/llvm/include/llvm/Analysis/ProfileInfoTypes.h10
-rw-r--r--contrib/llvm/include/llvm/Analysis/RegionInfo.h39
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolution.h10
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h14
-rw-r--r--contrib/llvm/include/llvm/Analysis/SparsePropagation.h6
-rw-r--r--contrib/llvm/include/llvm/Analysis/ValueTracking.h26
-rw-r--r--contrib/llvm/include/llvm/Argument.h5
-rw-r--r--contrib/llvm/include/llvm/Attributes.h553
-rw-r--r--contrib/llvm/include/llvm/BasicBlock.h5
-rw-r--r--contrib/llvm/include/llvm/Bitcode/Archive.h14
-rw-r--r--contrib/llvm/include/llvm/Bitcode/BitstreamReader.h8
-rw-r--r--contrib/llvm/include/llvm/Bitcode/BitstreamWriter.h4
-rw-r--r--contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h7
-rw-r--r--contrib/llvm/include/llvm/CallingConv.h24
-rw-r--r--contrib/llvm/include/llvm/CodeGen/AsmPrinter.h10
-rw-r--r--contrib/llvm/include/llvm/CodeGen/CallingConvLower.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/CommandFlags.h228
-rw-r--r--contrib/llvm/include/llvm/CodeGen/FastISel.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GCMetadata.h5
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h7
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/IntrinsicLowering.h6
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveInterval.h35
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h49
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveVariables.h6
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h26
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h9
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineConstantPool.h6
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h27
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFunction.h12
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstr.h83
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h17
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h40
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h6
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h9
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineOperand.h49
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachinePostDominators.h87
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h80
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineSSAUpdater.h6
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineScheduler.h237
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h21
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicBase.h5
-rw-r--r--contrib/llvm/include/llvm/CodeGen/Passes.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PseudoSourceValue.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegAllocPBQP.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegisterClassInfo.h19
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegisterPressure.h3
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h9
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h99
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleDAGILP.h86
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h114
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h5
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAG.h16
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h65
-rw-r--r--contrib/llvm/include/llvm/CodeGen/TargetSchedule.h167
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ValueTypes.h128
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ValueTypes.td62
-rw-r--r--contrib/llvm/include/llvm/Constant.h9
-rw-r--r--contrib/llvm/include/llvm/Constants.h56
-rw-r--r--contrib/llvm/include/llvm/DIBuilder.h43
-rw-r--r--contrib/llvm/include/llvm/DataLayout.h (renamed from contrib/llvm/include/llvm/Target/TargetData.h)170
-rw-r--r--contrib/llvm/include/llvm/DebugInfo.h12
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DIContext.h36
-rw-r--r--contrib/llvm/include/llvm/DefaultPasses.h2
-rw-r--r--contrib/llvm/include/llvm/DerivedTypes.h35
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h19
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h15
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h31
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/ObjectBuffer.h80
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/ObjectImage.h61
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h47
-rw-r--r--contrib/llvm/include/llvm/Function.h85
-rw-r--r--contrib/llvm/include/llvm/GlobalAlias.h5
-rw-r--r--contrib/llvm/include/llvm/GlobalValue.h32
-rw-r--r--contrib/llvm/include/llvm/GlobalVariable.h7
-rw-r--r--contrib/llvm/include/llvm/IRBuilder.h69
-rw-r--r--contrib/llvm/include/llvm/InitializePasses.h13
-rw-r--r--contrib/llvm/include/llvm/InlineAsm.h30
-rw-r--r--contrib/llvm/include/llvm/InstrTypes.h29
-rw-r--r--contrib/llvm/include/llvm/Instruction.h5
-rw-r--r--contrib/llvm/include/llvm/Instructions.h437
-rw-r--r--contrib/llvm/include/llvm/IntrinsicInst.h58
-rw-r--r--contrib/llvm/include/llvm/Intrinsics.h4
-rw-r--r--contrib/llvm/include/llvm/Intrinsics.td12
-rw-r--r--contrib/llvm/include/llvm/IntrinsicsARM.td437
-rw-r--r--contrib/llvm/include/llvm/IntrinsicsMips.td125
-rw-r--r--contrib/llvm/include/llvm/IntrinsicsX86.td82
-rw-r--r--contrib/llvm/include/llvm/LLVMContext.h10
-rw-r--r--contrib/llvm/include/llvm/LinkAllPasses.h8
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmBackend.h24
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmInfo.h20
-rw-r--r--contrib/llvm/include/llvm/MC/MCAssembler.h38
-rw-r--r--contrib/llvm/include/llvm/MC/MCCodeEmitter.h10
-rw-r--r--contrib/llvm/include/llvm/MC/MCContext.h5
-rw-r--r--contrib/llvm/include/llvm/MC/MCDwarf.h11
-rw-r--r--contrib/llvm/include/llvm/MC/MCELFObjectWriter.h9
-rw-r--r--contrib/llvm/include/llvm/MC/MCExpr.h21
-rw-r--r--contrib/llvm/include/llvm/MC/MCInst.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstPrinter.h13
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstrDesc.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MCLabel.h8
-rw-r--r--contrib/llvm/include/llvm/MC/MCMachObjectWriter.h6
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectFileInfo.h3
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectStreamer.h10
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectWriter.h5
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h4
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h14
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h38
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h8
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h54
-rw-r--r--contrib/llvm/include/llvm/MC/MCRegisterInfo.h9
-rw-r--r--contrib/llvm/include/llvm/MC/MCSchedule.h141
-rw-r--r--contrib/llvm/include/llvm/MC/MCSection.h8
-rw-r--r--contrib/llvm/include/llvm/MC/MCSectionCOFF.h1
-rw-r--r--contrib/llvm/include/llvm/MC/MCSectionELF.h1
-rw-r--r--contrib/llvm/include/llvm/MC/MCSectionMachO.h1
-rw-r--r--contrib/llvm/include/llvm/MC/MCStreamer.h24
-rw-r--r--contrib/llvm/include/llvm/MC/MCSubtargetInfo.h65
-rw-r--r--contrib/llvm/include/llvm/MC/MCSymbol.h11
-rw-r--r--contrib/llvm/include/llvm/MC/MCTargetAsmLexer.h10
-rw-r--r--contrib/llvm/include/llvm/MC/MCTargetAsmParser.h77
-rw-r--r--contrib/llvm/include/llvm/MC/MCValue.h2
-rw-r--r--contrib/llvm/include/llvm/MC/SubtargetFeature.h6
-rw-r--r--contrib/llvm/include/llvm/MDBuilder.h21
-rw-r--r--contrib/llvm/include/llvm/Metadata.h10
-rw-r--r--contrib/llvm/include/llvm/Object/Archive.h1
-rw-r--r--contrib/llvm/include/llvm/Object/Binary.h5
-rw-r--r--contrib/llvm/include/llvm/Object/COFF.h3
-rw-r--r--contrib/llvm/include/llvm/Object/ELF.h370
-rw-r--r--contrib/llvm/include/llvm/Object/MachO.h3
-rw-r--r--contrib/llvm/include/llvm/Object/MachOFormat.h10
-rw-r--r--contrib/llvm/include/llvm/Object/ObjectFile.h43
-rw-r--r--contrib/llvm/include/llvm/Object/RelocVisitor.h131
-rw-r--r--contrib/llvm/include/llvm/Operator.h50
-rw-r--r--contrib/llvm/include/llvm/Pass.h5
-rw-r--r--contrib/llvm/include/llvm/PassAnalysisSupport.h2
-rw-r--r--contrib/llvm/include/llvm/PassSupport.h4
-rw-r--r--contrib/llvm/include/llvm/Support/AlignOf.h65
-rw-r--r--contrib/llvm/include/llvm/Support/Allocator.h8
-rw-r--r--contrib/llvm/include/llvm/Support/CallSite.h36
-rw-r--r--contrib/llvm/include/llvm/Support/Casting.h16
-rw-r--r--contrib/llvm/include/llvm/Support/CommandLine.h17
-rw-r--r--contrib/llvm/include/llvm/Support/Compiler.h35
-rw-r--r--contrib/llvm/include/llvm/Support/DataExtractor.h5
-rw-r--r--contrib/llvm/include/llvm/Support/ELF.h60
-rw-r--r--contrib/llvm/include/llvm/Support/FileOutputBuffer.h7
-rw-r--r--contrib/llvm/include/llvm/Support/FileSystem.h78
-rw-r--r--contrib/llvm/include/llvm/Support/Format.h40
-rw-r--r--contrib/llvm/include/llvm/Support/FormattedStream.h11
-rw-r--r--contrib/llvm/include/llvm/Support/GCOV.h32
-rw-r--r--contrib/llvm/include/llvm/Support/InstVisitor.h6
-rw-r--r--contrib/llvm/include/llvm/Support/IntegersSubset.h8
-rw-r--r--contrib/llvm/include/llvm/Support/IntegersSubsetMapping.h18
-rw-r--r--contrib/llvm/include/llvm/Support/LEB128.h2
-rw-r--r--contrib/llvm/include/llvm/Support/LockFileManager.h4
-rw-r--r--contrib/llvm/include/llvm/Support/MathExtras.h31
-rw-r--r--contrib/llvm/include/llvm/Support/Memory.h65
-rw-r--r--contrib/llvm/include/llvm/Support/MemoryBuffer.h5
-rw-r--r--contrib/llvm/include/llvm/Support/Mutex.h5
-rw-r--r--contrib/llvm/include/llvm/Support/MutexGuard.h4
-rw-r--r--contrib/llvm/include/llvm/Support/PathV1.h4
-rw-r--r--contrib/llvm/include/llvm/Support/PathV2.h113
-rw-r--r--contrib/llvm/include/llvm/Support/PrettyStackTrace.h10
-rw-r--r--contrib/llvm/include/llvm/Support/Program.h4
-rw-r--r--contrib/llvm/include/llvm/Support/RWMutex.h5
-rw-r--r--contrib/llvm/include/llvm/Support/Regex.h8
-rw-r--r--contrib/llvm/include/llvm/Support/Registry.h7
-rw-r--r--contrib/llvm/include/llvm/Support/SourceMgr.h8
-rw-r--r--contrib/llvm/include/llvm/Support/StreamableMemoryObject.h20
-rw-r--r--contrib/llvm/include/llvm/Support/TargetFolder.h15
-rw-r--r--contrib/llvm/include/llvm/Support/TargetRegistry.h31
-rw-r--r--contrib/llvm/include/llvm/Support/Threading.h4
-rw-r--r--contrib/llvm/include/llvm/Support/TimeValue.h7
-rw-r--r--contrib/llvm/include/llvm/Support/Timer.h7
-rw-r--r--contrib/llvm/include/llvm/Support/ValueHandle.h4
-rw-r--r--contrib/llvm/include/llvm/Support/YAMLParser.h13
-rw-r--r--contrib/llvm/include/llvm/Support/circular_raw_ostream.h4
-rw-r--r--contrib/llvm/include/llvm/Support/raw_os_ostream.h10
-rw-r--r--contrib/llvm/include/llvm/Support/raw_ostream.h64
-rw-r--r--contrib/llvm/include/llvm/Support/system_error.h8
-rw-r--r--contrib/llvm/include/llvm/Support/type_traits.h10
-rw-r--r--contrib/llvm/include/llvm/SymbolTableListTraits.h1
-rw-r--r--contrib/llvm/include/llvm/TableGen/Error.h19
-rw-r--r--contrib/llvm/include/llvm/TableGen/Main.h9
-rw-r--r--contrib/llvm/include/llvm/TableGen/Record.h486
-rw-r--r--contrib/llvm/include/llvm/TableGen/TableGenAction.h35
-rw-r--r--contrib/llvm/include/llvm/Target/Mangler.h9
-rw-r--r--contrib/llvm/include/llvm/Target/Target.td104
-rw-r--r--contrib/llvm/include/llvm/Target/TargetCallingConv.h27
-rw-r--r--contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h121
-rw-r--r--contrib/llvm/include/llvm/Target/TargetInstrInfo.h37
-rw-r--r--contrib/llvm/include/llvm/Target/TargetIntrinsicInfo.h5
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLibraryInfo.h103
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLowering.h131
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h9
-rw-r--r--contrib/llvm/include/llvm/Target/TargetMachine.h20
-rw-r--r--contrib/llvm/include/llvm/Target/TargetOpcodes.h6
-rw-r--r--contrib/llvm/include/llvm/Target/TargetOptions.h4
-rw-r--r--contrib/llvm/include/llvm/Target/TargetRegisterInfo.h62
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSchedule.td340
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSelectionDAG.td4
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSelectionDAGInfo.h10
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSubtargetInfo.h23
-rw-r--r--contrib/llvm/include/llvm/Target/TargetTransformImpl.h98
-rw-r--r--contrib/llvm/include/llvm/TargetTransformInfo.h204
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO.h25
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO/InlinerPass.h2
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h1
-rw-r--r--contrib/llvm/include/llvm/Transforms/Instrumentation.h2
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar.h6
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/AddrModeMatcher.h3
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h28
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h32
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h33
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/Cloning.h15
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/IntegerDivision.h48
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/Local.h30
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h4
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h2
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h52
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h2
-rw-r--r--contrib/llvm/include/llvm/Transforms/Vectorize.h6
-rw-r--r--contrib/llvm/include/llvm/Type.h23
-rw-r--r--contrib/llvm/include/llvm/Use.h3
-rw-r--r--contrib/llvm/include/llvm/User.h46
-rw-r--r--contrib/llvm/include/llvm/Value.h12
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysis.cpp10
-rw-r--r--contrib/llvm/lib/Analysis/AliasSetTracker.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/Analysis.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp170
-rw-r--r--contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp134
-rw-r--r--contrib/llvm/lib/Analysis/CaptureTracking.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/CodeMetrics.cpp10
-rw-r--r--contrib/llvm/lib/Analysis/ConstantFolding.cpp265
-rw-r--r--contrib/llvm/lib/Analysis/CostModel.cpp193
-rw-r--r--contrib/llvm/lib/Analysis/DependenceAnalysis.cpp3786
-rw-r--r--contrib/llvm/lib/Analysis/DominanceFrontier.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/IPA/CallGraph.cpp13
-rw-r--r--contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/IVUsers.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/InlineCost.cpp91
-rw-r--r--contrib/llvm/lib/Analysis/InstructionSimplify.cpp89
-rw-r--r--contrib/llvm/lib/Analysis/LazyValueInfo.cpp52
-rw-r--r--contrib/llvm/lib/Analysis/Lint.cpp56
-rw-r--r--contrib/llvm/lib/Analysis/Loads.cpp8
-rw-r--r--contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp362
-rw-r--r--contrib/llvm/lib/Analysis/LoopInfo.cpp11
-rw-r--r--contrib/llvm/lib/Analysis/MemoryBuiltins.cpp184
-rw-r--r--contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp30
-rw-r--r--contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/PHITransAddr.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/ProfileDataLoader.cpp155
-rw-r--r--contrib/llvm/lib/Analysis/ProfileDataLoaderPass.cpp188
-rw-r--r--contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/ProfileInfo.cpp26
-rw-r--r--contrib/llvm/lib/Analysis/RegionInfo.cpp26
-rw-r--r--contrib/llvm/lib/Analysis/RegionPass.cpp5
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolution.cpp116
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp23
-rw-r--r--contrib/llvm/lib/Analysis/Trace.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/ValueTracking.cpp61
-rw-r--r--contrib/llvm/lib/Archive/ArchiveInternals.h2
-rw-r--r--contrib/llvm/lib/Archive/ArchiveReader.cpp4
-rw-r--r--contrib/llvm/lib/AsmParser/LLLexer.cpp9
-rw-r--r--contrib/llvm/lib/AsmParser/LLParser.cpp257
-rw-r--r--contrib/llvm/lib/AsmParser/LLParser.h2
-rw-r--r--contrib/llvm/lib/AsmParser/LLToken.h11
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp159
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h67
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp130
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h6
-rw-r--r--contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/AllocationOrder.cpp5
-rw-r--r--contrib/llvm/lib/CodeGen/Analysis.cpp18
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp171
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp234
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp18
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DIE.h8
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.h4
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp67
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h7
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp203
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h27
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h40
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/Win64Exception.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/BranchFolding.cpp25
-rw-r--r--contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/CallingConvLower.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/CodeGen.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp8
-rw-r--r--contrib/llvm/lib/CodeGen/EarlyIfConversion.cpp10
-rw-r--r--contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp11
-rw-r--r--contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/GCStrategy.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/IfConversion.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/InlineSpiller.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp8
-rw-r--r--contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/LiveInterval.cpp97
-rw-r--r--contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp831
-rw-r--r--contrib/llvm/lib/CodeGen/LiveIntervalUnion.h4
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp11
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRegMatrix.h2
-rw-r--r--contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp5
-rw-r--r--contrib/llvm/lib/CodeGen/LiveVariables.cpp42
-rw-r--r--contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp90
-rw-r--r--contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp20
-rw-r--r--contrib/llvm/lib/CodeGen/MachineCSE.cpp70
-rw-r--r--contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp13
-rw-r--r--contrib/llvm/lib/CodeGen/MachineFunction.cpp49
-rw-r--r--contrib/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/MachineInstr.cpp337
-rw-r--r--contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp62
-rw-r--r--contrib/llvm/lib/CodeGen/MachineLICM.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/MachineModuleInfoImpls.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/MachinePostDominators.cpp55
-rw-r--r--contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp18
-rw-r--r--contrib/llvm/lib/CodeGen/MachineScheduler.cpp1458
-rw-r--r--contrib/llvm/lib/CodeGen/MachineSink.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp74
-rw-r--r--contrib/llvm/lib/CodeGen/MachineTraceMetrics.h13
-rw-r--r--contrib/llvm/lib/CodeGen/MachineVerifier.cpp157
-rw-r--r--contrib/llvm/lib/CodeGen/Passes.cpp17
-rw-r--r--contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp5
-rw-r--r--contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp9
-rw-r--r--contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp8
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocBasic.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocFast.cpp73
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp8
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp17
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp10
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp1077
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterCoalescer.h7
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterPressure.cpp35
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterScavenging.cpp7
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAG.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp346
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAGPrinter.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp1190
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp8
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp55
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp147
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp50
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp43
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h3
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp48
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp73
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp27
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SDNodeOrdering.h4
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp182
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp44
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp18
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h9
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp284
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp372
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h19
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp11
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp31
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp7
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp43
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/ShrinkWrapping.cpp22
-rw-r--r--contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp86
-rw-r--r--contrib/llvm/lib/CodeGen/SlotIndexes.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/SplitKit.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/StackColoring.cpp783
-rw-r--r--contrib/llvm/lib/CodeGen/StackProtector.cpp69
-rw-r--r--contrib/llvm/lib/CodeGen/StackSlotColoring.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp11
-rw-r--r--contrib/llvm/lib/CodeGen/TailDuplication.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp120
-rw-r--r--contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp12
-rw-r--r--contrib/llvm/lib/CodeGen/TargetSchedule.cpp306
-rw-r--r--contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp499
-rw-r--r--contrib/llvm/lib/CodeGen/VirtRegMap.cpp16
-rw-r--r--contrib/llvm/lib/CodeGen/VirtRegMap.h4
-rw-r--r--contrib/llvm/lib/DebugInfo/DIContext.cpp7
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFCompileUnit.cpp48
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFCompileUnit.h16
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFContext.cpp220
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFContext.h40
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp1
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp184
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h54
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp27
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugLine.h8
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.cpp67
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.h78
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFFormValue.cpp76
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFFormValue.h2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp43
-rw-r--r--contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp32
-rw-r--r--contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventsWrapper.h (renamed from contrib/llvm/include/llvm/ExecutionEngine/IntelJITEventsWrapper.h)2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_config.h454
-rw-r--r--contrib/llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_types.h70
-rw-r--r--contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.c481
-rw-r--r--contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h259
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp8
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h4
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp14
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp4
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.h4
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp36
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp113
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h26
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.cpp14
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h50
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp8
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/JITRegistrar.h6
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h (renamed from contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImage.h)135
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp145
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp555
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h56
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h96
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp28
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h6
-rw-r--r--contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp9
-rw-r--r--contrib/llvm/lib/MC/ELFObjectWriter.cpp28
-rw-r--r--contrib/llvm/lib/MC/MCAsmBackend.cpp7
-rw-r--r--contrib/llvm/lib/MC/MCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp1
-rw-r--r--contrib/llvm/lib/MC/MCAsmStreamer.cpp27
-rw-r--r--contrib/llvm/lib/MC/MCAssembler.cpp15
-rw-r--r--contrib/llvm/lib/MC/MCContext.cpp6
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/Disassembler.cpp14
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp3
-rw-r--r--contrib/llvm/lib/MC/MCDwarf.cpp2
-rw-r--r--contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp8
-rw-r--r--contrib/llvm/lib/MC/MCELFStreamer.cpp52
-rw-r--r--contrib/llvm/lib/MC/MCExpr.cpp11
-rw-r--r--contrib/llvm/lib/MC/MCInst.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCInstPrinter.cpp14
-rw-r--r--contrib/llvm/lib/MC/MCLabel.cpp2
-rw-r--r--contrib/llvm/lib/MC/MCMachOStreamer.cpp59
-rw-r--r--contrib/llvm/lib/MC/MCObjectFileInfo.cpp33
-rw-r--r--contrib/llvm/lib/MC/MCObjectStreamer.cpp45
-rw-r--r--contrib/llvm/lib/MC/MCParser/AsmLexer.cpp13
-rw-r--r--contrib/llvm/lib/MC/MCParser/AsmParser.cpp578
-rw-r--r--contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp2
-rw-r--r--contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp3
-rw-r--r--contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp2
-rw-r--r--contrib/llvm/lib/MC/MCParser/MCTargetAsmParser.cpp2
-rw-r--r--contrib/llvm/lib/MC/MCRegisterInfo.cpp3
-rw-r--r--contrib/llvm/lib/MC/MCStreamer.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCSubtargetInfo.cpp57
-rw-r--r--contrib/llvm/lib/MC/MCSymbol.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCValue.cpp2
-rw-r--r--contrib/llvm/lib/MC/MachObjectWriter.cpp53
-rw-r--r--contrib/llvm/lib/MC/SubtargetFeature.cpp35
-rw-r--r--contrib/llvm/lib/MC/WinCOFFStreamer.cpp42
-rw-r--r--contrib/llvm/lib/Object/COFFObjectFile.cpp14
-rw-r--r--contrib/llvm/lib/Object/MachOObjectFile.cpp21
-rw-r--r--contrib/llvm/lib/Support/APFloat.cpp230
-rw-r--r--contrib/llvm/lib/Support/Atomic.cpp14
-rw-r--r--contrib/llvm/lib/Support/CommandLine.cpp26
-rw-r--r--contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp10
-rw-r--r--contrib/llvm/lib/Support/DataExtractor.cpp6
-rw-r--r--contrib/llvm/lib/Support/DataStream.cpp2
-rw-r--r--contrib/llvm/lib/Support/DynamicLibrary.cpp2
-rw-r--r--contrib/llvm/lib/Support/Errno.cpp15
-rw-r--r--contrib/llvm/lib/Support/FoldingSet.cpp18
-rw-r--r--contrib/llvm/lib/Support/Host.cpp3
-rw-r--r--contrib/llvm/lib/Support/LockFileManager.cpp2
-rw-r--r--contrib/llvm/lib/Support/Memory.cpp56
-rw-r--r--contrib/llvm/lib/Support/MemoryBuffer.cpp58
-rw-r--r--contrib/llvm/lib/Support/SmallVector.cpp6
-rw-r--r--contrib/llvm/lib/Support/StreamableMemoryObject.cpp24
-rw-r--r--contrib/llvm/lib/Support/StringMap.cpp9
-rw-r--r--contrib/llvm/lib/Support/StringRef.cpp4
-rw-r--r--contrib/llvm/lib/Support/Triple.cpp59
-rw-r--r--contrib/llvm/lib/Support/Unix/Memory.inc206
-rw-r--r--contrib/llvm/lib/Support/Unix/Path.inc17
-rw-r--r--contrib/llvm/lib/Support/Unix/Signals.inc38
-rw-r--r--contrib/llvm/lib/Support/Windows/Memory.inc165
-rw-r--r--contrib/llvm/lib/Support/Windows/PathV2.inc2
-rw-r--r--contrib/llvm/lib/Support/YAMLParser.cpp7
-rw-r--r--contrib/llvm/lib/Support/raw_ostream.cpp12
-rw-r--r--contrib/llvm/lib/Support/regexec.c2
-rw-r--r--contrib/llvm/lib/Support/system_error.cpp10
-rw-r--r--contrib/llvm/lib/TableGen/Error.cpp35
-rw-r--r--contrib/llvm/lib/TableGen/Main.cpp138
-rw-r--r--contrib/llvm/lib/TableGen/Record.cpp615
-rw-r--r--contrib/llvm/lib/TableGen/TGParser.cpp94
-rw-r--r--contrib/llvm/lib/TableGen/TGParser.h23
-rw-r--r--contrib/llvm/lib/TableGen/TableGenAction.cpp15
-rw-r--r--contrib/llvm/lib/Target/ARM/ARM.h1
-rw-r--r--contrib/llvm/lib/Target/ARM/ARM.td31
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp98
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h40
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp943
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h16
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp173
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h13
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMCallingConv.td2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp8
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp9
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h5
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.cpp78
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.h59
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp16
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFastISel.cpp233
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp41
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp174
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp679
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.h19
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrFormats.td21
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp62
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.td212
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrNEON.td125
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrThumb.td6
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td138
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrVFP.td12
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp6
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h12
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td23
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSchedule.td2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleA9.td597
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleSwift.td1085
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp9
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSubtarget.h14
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp19
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetMachine.h35
-rw-r--r--contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp280
-rw-r--r--contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp92
-rw-r--r--contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp584
-rw-r--r--contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h3
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp13
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp7
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp1
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp10
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h5
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp8
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp51
-rw-r--r--contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp74
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp6
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.cpp2
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp43
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUSubtarget.h4
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp5
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.h17
-rw-r--r--contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp35
-rw-r--r--contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h4
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.cpp2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td10
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp28
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td312
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp681
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.h244
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonPeephole.cpp35
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp52
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h5
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td1
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td1
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp33
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp28
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.h17
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.cpp4
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp4
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonVarargsCallingConvention.h8
-rw-r--r--contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp29
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp107
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.h59
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.cpp2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp5
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp7
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.h20
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp4
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp4
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp16
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.h1
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp4
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp4
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp14
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp6
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h16
-rw-r--r--contrib/llvm/lib/Target/Mangler.cpp9
-rw-r--r--contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp1289
-rw-r--r--contrib/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp29
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp4
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h6
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp81
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h28
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp10
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp60
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips.td19
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16FrameLowering.cpp56
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16FrameLowering.h5
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16InstrInfo.cpp93
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16InstrInfo.h4
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16InstrInfo.td1225
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp122
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.h5
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td95
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp2
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp42
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h8
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsCallingConv.td12
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp46
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsDSPInstrFormats.td309
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsDSPInstrInfo.td1319
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp11
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp36
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsFrameLowering.h3
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp210
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp1204
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelLowering.h164
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrFPU.td41
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrFormats.td29
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp55
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.h12
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.td208
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsLongBranch.cpp232
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp29
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMCInstLower.h3
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp13
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMachineFunction.h50
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp50
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h4
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td27
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp16
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp51
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.h8
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp45
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.h9
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp9
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSubtarget.h11
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp7
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetMachine.h18
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp10
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTX.td34
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h4
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp105
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp68
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.h3
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp4
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h4
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp20
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.h12
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp7
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h16
-rw-r--r--contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp8
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp13
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp92
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h10
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp4
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp56
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPC.td10
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp101
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCCallingConv.td7
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp266
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h71
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp156
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp1111
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h60
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td115
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td32
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td41
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp24
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td200
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp36
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h5
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSchedule.td88
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td60
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td81
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleE500mc.td265
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleE5500.td309
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td7
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td7
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td8
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td10
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp15
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h57
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp5
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h15
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp2
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp2
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td2
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp4
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h15
-rw-r--r--contrib/llvm/lib/Target/Target.cpp19
-rw-r--r--contrib/llvm/lib/Target/TargetELFWriterInfo.cpp25
-rw-r--r--contrib/llvm/lib/Target/TargetLibraryInfo.cpp75
-rw-r--r--contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp4
-rw-r--r--contrib/llvm/lib/Target/TargetMachineC.cpp12
-rw-r--r--contrib/llvm/lib/Target/TargetRegisterInfo.cpp6
-rw-r--r--contrib/llvm/lib/Target/TargetTransformImpl.cpp353
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp43
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp445
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp12
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h2
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c16
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h12
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h5
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp66
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h3
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp48
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h5
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp12
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h16
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp25
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp5
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp37
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp115
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h7
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp48
-rw-r--r--contrib/llvm/lib/Target/X86/X86.td13
-rw-r--r--contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp113
-rw-r--r--contrib/llvm/lib/Target/X86/X86AsmPrinter.h37
-rw-r--r--contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h2
-rw-r--r--contrib/llvm/lib/Target/X86/X86CallingConv.td59
-rw-r--r--contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp61
-rw-r--r--contrib/llvm/lib/Target/X86/X86ELFWriterInfo.cpp147
-rw-r--r--contrib/llvm/lib/Target/X86/X86ELFWriterInfo.h59
-rw-r--r--contrib/llvm/lib/Target/X86/X86FastISel.cpp56
-rw-r--r--contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp6
-rw-r--r--contrib/llvm/lib/Target/X86/X86FrameLowering.cpp13
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp609
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.cpp3024
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.h154
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrCompiler.td466
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrControl.td7
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFMA.td432
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFormats.td91
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td31
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.cpp536
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.h3
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.td83
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrMMX.td75
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrSSE.td1488
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td78
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrTSX.td32
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrXOP.td17
-rw-r--r--contrib/llvm/lib/Target/X86/X86JITInfo.cpp17
-rw-r--r--contrib/llvm/lib/Target/X86/X86MCInstLower.cpp54
-rw-r--r--contrib/llvm/lib/Target/X86/X86MCInstLower.h52
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp66
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.h9
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.td496
-rw-r--r--contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/X86/X86Subtarget.cpp25
-rw-r--r--contrib/llvm/lib/Target/X86/X86Subtarget.h31
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetMachine.cpp19
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetMachine.h32
-rw-r--r--contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp12
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp4
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp13
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp16
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td4
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp4
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h15
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp64
-rw-r--r--contrib/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp47
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp107
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp63
-rw-r--r--contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp34
-rw-r--r--contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp93
-rw-r--r--contrib/llvm/lib/Transforms/IPO/IPO.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/IPO/Inliner.cpp28
-rw-r--r--contrib/llvm/lib/Transforms/IPO/Internalize.cpp38
-rw-r--r--contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp26
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp56
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PruneEH.cpp10
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombine.h14
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp172
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp25
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp70
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp201
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp17
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp30
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h4
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp401
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp437
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/BlackList.cpp105
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/BlackList.h57
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp16
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.cpp79
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.h37
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp73
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/MaximumSpanningTree.h53
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp137
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp80
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp5
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/DCE.cpp17
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp203
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp120
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/GVN.cpp99
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp10
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp42
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LICM.cpp45
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp37
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp65
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp58
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp9
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp13
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp72
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ObjCARC.cpp150
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp105
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SCCP.cpp10
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SROA.cpp3697
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Scalar.cpp3
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp198
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp62
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp980
-rw-r--r--contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp45
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp154
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp262
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp14
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Utils/IntegerDivision.cpp420
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LCSSA.cpp15
-rw-r--r--contrib/llvm/lib/Transforms/Utils/Local.cpp82
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/Utils/MetaRenamer.cpp132
-rw-r--r--contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp18
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp1507
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp1149
-rw-r--r--contrib/llvm/lib/Transforms/Utils/Utils.cpp1
-rw-r--r--contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp1012
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp1941
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/Vectorize.cpp8
-rw-r--r--contrib/llvm/lib/VMCore/AsmWriter.cpp129
-rw-r--r--contrib/llvm/lib/VMCore/Attributes.cpp518
-rw-r--r--contrib/llvm/lib/VMCore/AttributesImpl.h71
-rw-r--r--contrib/llvm/lib/VMCore/AutoUpgrade.cpp3
-rw-r--r--contrib/llvm/lib/VMCore/ConstantFold.cpp28
-rw-r--r--contrib/llvm/lib/VMCore/Constants.cpp27
-rw-r--r--contrib/llvm/lib/VMCore/ConstantsContext.h35
-rw-r--r--contrib/llvm/lib/VMCore/Core.cpp62
-rw-r--r--contrib/llvm/lib/VMCore/DIBuilder.cpp40
-rw-r--r--contrib/llvm/lib/VMCore/DataLayout.cpp (renamed from contrib/llvm/lib/Target/TargetData.cpp)240
-rw-r--r--contrib/llvm/lib/VMCore/DebugInfo.cpp10
-rw-r--r--contrib/llvm/lib/VMCore/Dominators.cpp10
-rw-r--r--contrib/llvm/lib/VMCore/Function.cpp21
-rw-r--r--contrib/llvm/lib/VMCore/GCOV.cpp30
-rw-r--r--contrib/llvm/lib/VMCore/IRBuilder.cpp6
-rw-r--r--contrib/llvm/lib/VMCore/InlineAsm.cpp13
-rw-r--r--contrib/llvm/lib/VMCore/Instructions.cpp63
-rw-r--r--contrib/llvm/lib/VMCore/LLVMContext.cpp5
-rw-r--r--contrib/llvm/lib/VMCore/LLVMContextImpl.cpp18
-rw-r--r--contrib/llvm/lib/VMCore/LLVMContextImpl.h9
-rw-r--r--contrib/llvm/lib/VMCore/PassManager.cpp2
-rw-r--r--contrib/llvm/lib/VMCore/TargetTransformInfo.cpp31
-rw-r--r--contrib/llvm/lib/VMCore/Type.cpp41
-rw-r--r--contrib/llvm/lib/VMCore/User.cpp9
-rw-r--r--contrib/llvm/lib/VMCore/Value.cpp2
-rw-r--r--contrib/llvm/lib/VMCore/ValueTypes.cpp30
-rw-r--r--contrib/llvm/lib/VMCore/Verifier.cpp181
-rw-r--r--contrib/llvm/tools/bugpoint/ExtractFunction.cpp2
-rw-r--r--contrib/llvm/tools/bugpoint/OptimizerDriver.cpp2
-rw-r--r--contrib/llvm/tools/clang/include/clang-c/Index.h170
-rw-r--r--contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMT.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h15
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTContext.h616
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Attr.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/BuiltinTypes.def2
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h17
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CharUnits.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Comment.h189
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CommentBriefParser.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CommentCommandTraits.h186
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CommentCommands.td156
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CommentHTMLTags.td54
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CommentLexer.h116
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CommentParser.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CommentSema.h48
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Decl.h52
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclBase.h27
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h46
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h106
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h130
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Expr.h249
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h214
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h33
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/NSAPI.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/RawCommentList.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h13
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/SelectorLocationsKind.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Stmt.h377
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h92
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Type.h163
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h42
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/UnresolvedSet.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/VTableBuilder.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchFinder.h29
-rw-r--r--contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchers.h1445
-rw-r--r--contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersInternal.h621
-rw-r--r--contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersMacros.h65
-rw-r--r--contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTTypeTraits.h209
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h30
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h35
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/CFG.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/ObjCNoReturn.h46
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Attr.td16
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Builtins.def65
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsMips.def63
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsNVPTX.def246
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def9
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h27
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommentKinds.td16
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td24
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td7
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td83
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td35
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticOptions.def93
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticOptions.h85
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td50
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td358
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td43
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/FileManager.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h33
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def22
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Module.h30
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/ObjCRuntime.h45
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Sanitizers.def69
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h21
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h15
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h59
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def41
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h25
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td8
-rw-r--r--contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Action.h15
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Arg.h26
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/ArgList.h72
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td43
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td419
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Compilation.h11
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Driver.h43
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Job.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/OptParser.td31
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/OptTable.h61
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Option.h324
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Options.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Options.td1647
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Tool.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h40
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Types.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h86
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h135
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.def132
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h182
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h44
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h79
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h111
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h16
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h34
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/LangStandard.h18
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def58
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/LogDiagnosticPrinter.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h86
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h25
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/HeaderSearchOptions.h (renamed from contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h)11
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Lexer.h91
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h60
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h74
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h53
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PPMutationListener.h43
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PTHLexer.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h84
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h128
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PreprocessorOptions.h (renamed from contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h)13
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Token.h18
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/TokenLexer.h17
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/Parser.h197
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/Core/DeltaTree.h (renamed from contrib/llvm/tools/clang/include/clang/Rewrite/DeltaTree.h)4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/Core/HTMLRewrite.h (renamed from contrib/llvm/tools/clang/include/clang/Rewrite/HTMLRewrite.h)0
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/Core/RewriteRope.h (renamed from contrib/llvm/tools/clang/include/clang/Rewrite/RewriteRope.h)26
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/Core/Rewriter.h (renamed from contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h)6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/Core/TokenRewriter.h (renamed from contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h)4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/Frontend/ASTConsumers.h (renamed from contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h)0
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/Frontend/FixItRewriter.h (renamed from contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h)2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/Frontend/FrontendActions.h (renamed from contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h)0
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/Frontend/Rewriters.h (renamed from contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h)0
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h20
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h54
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h11
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Initialization.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/LocInfoType.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/MultiplexExternalSemaSource.h367
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Overload.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Ownership.h216
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Scope.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h226
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Sema.h249
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/SemaConsumer.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Template.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h18
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/TypoCorrection.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h234
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h476
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h72
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ContinuousRangeMap.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/Module.h66
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ModuleManager.h9
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/DereferenceChecker.h35
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Analyses.def (renamed from contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def)2
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h308
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h89
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h58
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h129
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h17
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h80
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h167
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h130
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h111
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h70
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h52
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h11
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h74
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h76
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h43
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h95
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h31
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h57
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h45
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h52
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/CommandLineClangTool.h80
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/CommonOptionsParser.h89
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h95
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabasePluginRegistry.h27
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/FileMatchTrie.h90
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/JSONCompilationDatabase.h107
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/Refactoring.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h48
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp75
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h2
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTContext.cpp325
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp277
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp478
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Comment.cpp93
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp51
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp141
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentDumper.cpp48
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp62
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentParser.cpp72
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentSema.cpp314
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Decl.cpp88
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclBase.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp30
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp269
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp86
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DumpXML.cpp23
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Expr.cpp299
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp121
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp61
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp133
-rw-r--r--contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp242
-rw-r--r--contrib/llvm/tools/clang/lib/AST/NSAPI.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ParentMap.cpp69
-rw-r--r--contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp69
-rw-r--r--contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp84
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Stmt.cpp167
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp115
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp68
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp16
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp63
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Type.cpp55
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp55
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp411
-rw-r--r--contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchersInternal.cpp66
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp41
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/BodyFarm.cpp374
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/BodyFarm.h43
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CFG.cpp83
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp73
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ObjCNoReturn.cpp67
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp68
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp36
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp340
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp64
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c24
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/ConvertUTFWrapper.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp25
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/FileManager.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Module.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp133
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Targets.cpp406
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Version.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h38
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp109
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp138
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h26
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp430
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h12
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp223
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp31
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp257
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h3
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp53
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp77
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp17
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp568
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp71
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp41
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp482
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp86
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp156
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp798
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h8
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp17
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h4
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp26
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp128
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp60
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp66
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h133
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp227
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h36
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp57
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h15
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h6
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp58
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp65
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp752
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Arg.cpp34
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ArgList.cpp41
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/CC1AsOptions.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Compilation.cpp100
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Driver.cpp248
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/OptTable.cpp158
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Option.cpp333
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.h106
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp61
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp481
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains.h43
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tools.cpp974
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tools.h5
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Types.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/WindowsToolChain.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp306
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp22
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp139
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp1263
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp94
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp76
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp133
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp86
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp232
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp203
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp213
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/__wmmintrin_aes.h67
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/__wmmintrin_pclmul.h34
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/altivec.h26
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h19
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/cpuid.h2
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/f16cintrin.h58
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/immintrin.h4
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/module.map37
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/rtmintrin.h49
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/unwind.h2
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/wmmintrin.h41
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/x86intrin.h4
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/xmmintrin.h9
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp47
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Lexer.cpp75
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp263
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp90
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp524
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp56
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp20
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp333
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Pragma.cpp22
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp56
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp66
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp88
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp224
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp190
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp179
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp203
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp67
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp248
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParsePragma.h37
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp297
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp131
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/Parser.cpp272
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h10
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Core/CMakeLists.txt24
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Core/DeltaTree.cpp (renamed from contrib/llvm/tools/clang/lib/Rewrite/DeltaTree.cpp)5
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Core/HTMLRewrite.cpp (renamed from contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp)5
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Core/Makefile18
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Core/RewriteRope.cpp (renamed from contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp)6
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Core/Rewriter.cpp (renamed from contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp)2
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Core/TokenRewriter.cpp (renamed from contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp)2
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Frontend/CMakeLists.txt28
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Frontend/FixItRewriter.cpp (renamed from contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp)2
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Frontend/FrontendActions.cpp (renamed from contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp)8
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Frontend/HTMLPrint.cpp (renamed from contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp)6
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Frontend/InclusionRewriter.cpp (renamed from contrib/llvm/tools/clang/lib/Rewrite/InclusionRewriter.cpp)12
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Frontend/Makefile18
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Frontend/RewriteMacros.cpp (renamed from contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp)4
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Frontend/RewriteModernObjC.cpp (renamed from contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp)156
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Frontend/RewriteObjC.cpp (renamed from contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp)38
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Frontend/RewriteTest.cpp (renamed from contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp)4
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp290
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp38
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp138
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/MultiplexExternalSemaSource.cpp271
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/ScopeInfo.cpp189
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/Sema.cpp62
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp17
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp23
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp68
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp715
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp86
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp829
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp135
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp687
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp145
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp64
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp475
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp163
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp34
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp124
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp120
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp31
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp43
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp227
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp430
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp87
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp930
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaStmtAsm.cpp661
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaStmtAttr.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp365
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp160
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp126
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp172
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaType.cpp176
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TreeTransform.h360
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp2323
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp151
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp35
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp781
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp58
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp28
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/Module.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp42
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp92
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp109
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp77
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp22
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp59
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td83
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp72
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp35
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp60
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp180
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp12
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp550
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp41
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp16
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp259
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp87
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp20
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp218
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp203
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp94
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp16
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp356
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp19
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp348
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp23
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp46
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp36
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp138
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp446
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp517
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp602
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp166
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp27
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp54
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp39
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp15
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp204
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp155
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp414
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp133
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp79
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp351
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp82
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp15
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp34
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp306
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp47
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp68
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp66
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp308
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp37
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h4
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp25
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp86
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp109
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp154
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h4
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/CommonOptionsParser.cpp (renamed from contrib/llvm/tools/clang/lib/Tooling/CommandLineClangTool.cpp)61
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp297
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/CustomCompilationDatabase.h42
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/FileMatchTrie.cpp188
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp303
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/Refactoring.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp30
-rw-r--r--contrib/llvm/tools/clang/tools/driver/cc1_main.cpp78
-rw-r--r--contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp13
-rw-r--r--contrib/llvm/tools/clang/tools/driver/driver.cpp12
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp4
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangCommentCommandInfoEmitter.cpp72
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangCommentHTMLTagsEmitter.cpp69
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp20
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.cpp23
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp28
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.cpp101
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp175
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/TableGenBackends.h5
-rw-r--r--contrib/llvm/tools/llc/llc.cpp219
-rw-r--r--contrib/llvm/tools/lli/RecordingMemoryManager.cpp87
-rw-r--r--contrib/llvm/tools/lli/RecordingMemoryManager.h78
-rw-r--r--contrib/llvm/tools/lli/RemoteTarget.cpp61
-rw-r--r--contrib/llvm/tools/lli/RemoteTarget.h101
-rw-r--r--contrib/llvm/tools/lli/lli.cpp258
-rw-r--r--contrib/llvm/tools/llvm-ar/llvm-ar.cpp203
-rw-r--r--contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp30
-rw-r--r--contrib/llvm/tools/llvm-extract/llvm-extract.cpp51
-rw-r--r--contrib/llvm/tools/llvm-mc/llvm-mc.cpp11
-rw-r--r--contrib/llvm/tools/llvm-nm/llvm-nm.cpp9
-rw-r--r--contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp17
-rw-r--r--contrib/llvm/tools/llvm-ranlib/llvm-ranlib.cpp59
-rw-r--r--contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp8
-rw-r--r--contrib/llvm/tools/llvm-stress/llvm-stress.cpp4
-rw-r--r--contrib/llvm/tools/opt/opt.cpp98
-rw-r--r--contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp558
-rw-r--r--contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp73
-rw-r--r--contrib/llvm/utils/TableGen/AsmWriterInst.cpp23
-rw-r--r--contrib/llvm/utils/TableGen/CallingConvEmitter.cpp11
-rw-r--r--contrib/llvm/utils/TableGen/CodeEmitterGen.cpp13
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp666
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h63
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenInstruction.cpp148
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenInstruction.h23
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenMapTable.cpp606
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenRegisters.cpp112
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenRegisters.h14
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenSchedule.cpp1664
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenSchedule.h368
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenTarget.cpp59
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenTarget.h8
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcher.h2
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp11
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp29
-rw-r--r--contrib/llvm/utils/TableGen/DFAPacketizerEmitter.cpp168
-rw-r--r--contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp6
-rw-r--r--contrib/llvm/utils/TableGen/EDEmitter.cpp44
-rw-r--r--contrib/llvm/utils/TableGen/FastISelEmitter.cpp13
-rw-r--r--contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp19
-rw-r--r--contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp17
-rw-r--r--contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp79
-rw-r--r--contrib/llvm/utils/TableGen/PseudoLoweringEmitter.cpp34
-rw-r--r--contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp166
-rw-r--r--contrib/llvm/utils/TableGen/SequenceToOffsetTable.h6
-rw-r--r--contrib/llvm/utils/TableGen/SetTheory.cpp140
-rw-r--r--contrib/llvm/utils/TableGen/SetTheory.h10
-rw-r--r--contrib/llvm/utils/TableGen/SubtargetEmitter.cpp688
-rw-r--r--contrib/llvm/utils/TableGen/TGValueTypes.cpp26
-rw-r--r--contrib/llvm/utils/TableGen/TableGen.cpp159
-rw-r--r--contrib/llvm/utils/TableGen/TableGenBackends.h1
-rw-r--r--contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp21
-rw-r--r--contrib/llvm/utils/TableGen/X86ModRMFilters.h25
-rw-r--r--contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp35
-rw-r--r--contrib/llvm/utils/TableGen/X86RecognizableInstr.h23
-rw-r--r--lib/clang/Makefile3
-rw-r--r--lib/clang/clang.build.mk29
-rw-r--r--lib/clang/include/Makefile6
-rw-r--r--lib/clang/include/MipsGenAsmMatcher.inc2
-rw-r--r--lib/clang/include/MipsGenMCPseudoLowering.inc2
-rw-r--r--lib/clang/include/clang/AST/CommentCommandInfo.inc2
-rw-r--r--lib/clang/include/clang/AST/CommentHTMLTags.inc2
-rw-r--r--lib/clang/include/clang/AST/CommentHTMLTagsProperties.inc2
-rw-r--r--lib/clang/include/clang/Basic/Version.inc4
-rw-r--r--lib/clang/include/llvm/Config/config.h6
-rw-r--r--lib/clang/libclanganalysis/Makefile4
-rw-r--r--lib/clang/libclangast/Makefile3
-rw-r--r--lib/clang/libclangrewritecore/Makefile18
-rw-r--r--lib/clang/libclangrewritefrontend/Makefile (renamed from lib/clang/libclangrewrite/Makefile)14
-rw-r--r--lib/clang/libclangsema/Makefile3
-rw-r--r--lib/clang/libclangstaticanalyzercheckers/Makefile8
-rw-r--r--lib/clang/libclangstaticanalyzercore/Makefile7
-rw-r--r--lib/clang/libllvmanalysis/Makefile5
-rw-r--r--lib/clang/libllvmarmcodegen/Makefile1
-rw-r--r--lib/clang/libllvmcodegen/Makefile13
-rw-r--r--lib/clang/libllvmcore/Makefile3
-rw-r--r--lib/clang/libllvmdebuginfo/Makefile1
-rw-r--r--lib/clang/libllvminstrumentation/Makefile2
-rw-r--r--lib/clang/libllvmipo/Makefile1
-rw-r--r--lib/clang/libllvmmcjit/Makefile4
-rw-r--r--lib/clang/libllvmmipsasmparser/Makefile3
-rw-r--r--lib/clang/libllvmmipscodegen/Makefile1
-rw-r--r--lib/clang/libllvmmipsdesc/Makefile1
-rw-r--r--lib/clang/libllvmscalaropts/Makefile3
-rw-r--r--lib/clang/libllvmtablegen/Makefile1
-rw-r--r--lib/clang/libllvmtarget/Makefile6
-rw-r--r--lib/clang/libllvmtransformutils/Makefile7
-rw-r--r--lib/clang/libllvmvectorize/Makefile1
-rw-r--r--lib/clang/libllvmx86codegen/Makefile1
-rw-r--r--tools/build/mk/OptionalObsoleteFiles.inc4
-rw-r--r--usr.bin/clang/clang-tblgen/Makefile2
-rw-r--r--usr.bin/clang/clang/Makefile9
-rw-r--r--usr.bin/clang/llc/Makefile6
-rw-r--r--usr.bin/clang/lli/Makefile8
-rw-r--r--usr.bin/clang/llvm-mc/Makefile6
-rw-r--r--usr.bin/clang/llvm-objdump/Makefile6
-rw-r--r--usr.bin/clang/llvm-rtdyld/Makefile6
-rw-r--r--usr.bin/clang/opt/Makefile29
-rw-r--r--usr.bin/clang/tblgen/Makefile1
1658 files changed, 106116 insertions, 43299 deletions
diff --git a/contrib/llvm/include/llvm-c/Core.h b/contrib/llvm/include/llvm-c/Core.h
index 0bd5db3..620d088 100644
--- a/contrib/llvm/include/llvm-c/Core.h
+++ b/contrib/llvm/include/llvm-c/Core.h
@@ -173,10 +173,11 @@ typedef enum {
LLVMUWTable = 1 << 30,
LLVMNonLazyBind = 1 << 31
- // FIXME: This attribute is currently not included in the C API as
- // a temporary measure until the API/ABI impact to the C API is understood
- // and the path forward agreed upon.
- //LLVMAddressSafety = 1ULL << 32
+ /* FIXME: This attribute is currently not included in the C API as
+ a temporary measure until the API/ABI impact to the C API is understood
+ and the path forward agreed upon.
+ LLVMAddressSafety = 1ULL << 32
+ */
} LLVMAttribute;
typedef enum {
@@ -282,6 +283,7 @@ typedef enum {
LLVMLinkOnceAnyLinkage, /**< Keep one copy of function when linking (inline)*/
LLVMLinkOnceODRLinkage, /**< Same, but only replaced by something
equivalent. */
+ LLVMLinkOnceODRAutoHideLinkage, /**< Like LinkOnceODR, but possibly hidden. */
LLVMWeakAnyLinkage, /**< Keep one copy of function when linking (weak) */
LLVMWeakODRLinkage, /**< Same, but only replaced by something
equivalent. */
@@ -295,9 +297,7 @@ typedef enum {
LLVMGhostLinkage, /**< Obsolete */
LLVMCommonLinkage, /**< Tentative definitions */
LLVMLinkerPrivateLinkage, /**< Like Private, but linker removes. */
- LLVMLinkerPrivateWeakLinkage, /**< Like LinkerPrivate, but is weak. */
- LLVMLinkerPrivateWeakDefAutoLinkage /**< Like LinkerPrivateWeak, but possibly
- hidden. */
+ LLVMLinkerPrivateWeakLinkage /**< Like LinkerPrivate, but is weak. */
} LLVMLinkage;
typedef enum {
@@ -1803,7 +1803,7 @@ LLVMAttribute LLVMGetAttribute(LLVMValueRef Arg);
* Set the alignment for a function parameter.
*
* @see llvm::Argument::addAttr()
- * @see llvm::Attribute::constructAlignmentFromInt()
+ * @see llvm::AttrBuilder::addAlignmentAttr()
*/
void LLVMSetParamAlignment(LLVMValueRef Arg, unsigned align);
@@ -1869,6 +1869,27 @@ LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count);
const char *LLVMGetMDString(LLVMValueRef V, unsigned* Len);
/**
+ * Obtain the number of operands from an MDNode value.
+ *
+ * @param V MDNode to get number of operands from.
+ * @return Number of operands of the MDNode.
+ */
+unsigned LLVMGetMDNodeNumOperands(LLVMValueRef V);
+
+/**
+ * Obtain the given MDNode's operands.
+ *
+ * The passed LLVMValueRef pointer should point to enough memory to hold all of
+ * the operands of the given MDNode (see LLVMGetMDNodeNumOperands) as
+ * LLVMValueRefs. This memory will be populated with the LLVMValueRefs of the
+ * MDNode's operands.
+ *
+ * @param V MDNode to get the operands from.
+ * @param Dest Destination array for operands.
+ */
+void LLVMGetMDNodeOperands(LLVMValueRef V, LLVMValueRef *Dest);
+
+/**
* @}
*/
@@ -2688,7 +2709,7 @@ namespace llvm {
template<typename T>
inline T **unwrap(LLVMValueRef *Vals, unsigned Length) {
- #if DEBUG
+ #ifdef DEBUG
for (LLVMValueRef *I = Vals, *E = Vals + Length; I != E; ++I)
cast<T>(*I);
#endif
diff --git a/contrib/llvm/include/llvm-c/Disassembler.h b/contrib/llvm/include/llvm-c/Disassembler.h
index 69fdc64..b8c4ad9 100644
--- a/contrib/llvm/include/llvm-c/Disassembler.h
+++ b/contrib/llvm/include/llvm-c/Disassembler.h
@@ -146,6 +146,15 @@ LLVMDisasmContextRef LLVMCreateDisasm(const char *TripleName, void *DisInfo,
LLVMSymbolLookupCallback SymbolLookUp);
/**
+ * Set the disassembler's options. Returns 1 if it can set the Options and 0
+ * otherwise.
+ */
+int LLVMSetDisasmOptions(LLVMDisasmContextRef DC, uint64_t Options);
+
+/* The option to produce marked up assembly. */
+#define LLVMDisassembler_Option_UseMarkup 1
+
+/**
* Dispose of a disassembler context.
*/
void LLVMDisasmDispose(LLVMDisasmContextRef DC);
diff --git a/contrib/llvm/include/llvm-c/Target.h b/contrib/llvm/include/llvm-c/Target.h
index 8915040..57abfa0 100644
--- a/contrib/llvm/include/llvm-c/Target.h
+++ b/contrib/llvm/include/llvm-c/Target.h
@@ -145,7 +145,7 @@ static inline LLVMBool LLVMInitializeNativeTarget(void) {
/*===-- Target Data -------------------------------------------------------===*/
/** Creates target data from a target layout string.
- See the constructor llvm::TargetData::TargetData. */
+ See the constructor llvm::DataLayout::DataLayout. */
LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep);
/** Adds target data information to a pass manager. This does not take ownership
@@ -160,48 +160,58 @@ void LLVMAddTargetLibraryInfo(LLVMTargetLibraryInfoRef, LLVMPassManagerRef);
/** Converts target data to a target layout string. The string must be disposed
with LLVMDisposeMessage.
- See the constructor llvm::TargetData::TargetData. */
+ See the constructor llvm::DataLayout::DataLayout. */
char *LLVMCopyStringRepOfTargetData(LLVMTargetDataRef);
/** Returns the byte order of a target, either LLVMBigEndian or
LLVMLittleEndian.
- See the method llvm::TargetData::isLittleEndian. */
+ See the method llvm::DataLayout::isLittleEndian. */
enum LLVMByteOrdering LLVMByteOrder(LLVMTargetDataRef);
/** Returns the pointer size in bytes for a target.
- See the method llvm::TargetData::getPointerSize. */
+ See the method llvm::DataLayout::getPointerSize. */
unsigned LLVMPointerSize(LLVMTargetDataRef);
+/** Returns the pointer size in bytes for a target for a specified
+ address space.
+ See the method llvm::DataLayout::getPointerSize. */
+unsigned LLVMPointerSizeForAS(LLVMTargetDataRef, unsigned AS);
+
/** Returns the integer type that is the same size as a pointer on a target.
- See the method llvm::TargetData::getIntPtrType. */
+ See the method llvm::DataLayout::getIntPtrType. */
LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef);
+/** Returns the integer type that is the same size as a pointer on a target.
+ This version allows the address space to be specified.
+ See the method llvm::DataLayout::getIntPtrType. */
+LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef, unsigned AS);
+
/** Computes the size of a type in bytes for a target.
- See the method llvm::TargetData::getTypeSizeInBits. */
+ See the method llvm::DataLayout::getTypeSizeInBits. */
unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef, LLVMTypeRef);
/** Computes the storage size of a type in bytes for a target.
- See the method llvm::TargetData::getTypeStoreSize. */
+ See the method llvm::DataLayout::getTypeStoreSize. */
unsigned long long LLVMStoreSizeOfType(LLVMTargetDataRef, LLVMTypeRef);
/** Computes the ABI size of a type in bytes for a target.
- See the method llvm::TargetData::getTypeAllocSize. */
+ See the method llvm::DataLayout::getTypeAllocSize. */
unsigned long long LLVMABISizeOfType(LLVMTargetDataRef, LLVMTypeRef);
/** Computes the ABI alignment of a type in bytes for a target.
- See the method llvm::TargetData::getTypeABISize. */
+ See the method llvm::DataLayout::getTypeABISize. */
unsigned LLVMABIAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef);
/** Computes the call frame alignment of a type in bytes for a target.
- See the method llvm::TargetData::getTypeABISize. */
+ See the method llvm::DataLayout::getTypeABISize. */
unsigned LLVMCallFrameAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef);
/** Computes the preferred alignment of a type in bytes for a target.
- See the method llvm::TargetData::getTypeABISize. */
+ See the method llvm::DataLayout::getTypeABISize. */
unsigned LLVMPreferredAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef);
/** Computes the preferred alignment of a global variable in bytes for a target.
- See the method llvm::TargetData::getPreferredAlignment. */
+ See the method llvm::DataLayout::getPreferredAlignment. */
unsigned LLVMPreferredAlignmentOfGlobal(LLVMTargetDataRef,
LLVMValueRef GlobalVar);
@@ -216,7 +226,7 @@ unsigned long long LLVMOffsetOfElement(LLVMTargetDataRef, LLVMTypeRef StructTy,
unsigned Element);
/** Deallocates a TargetData.
- See the destructor llvm::TargetData::~TargetData. */
+ See the destructor llvm::DataLayout::~DataLayout. */
void LLVMDisposeTargetData(LLVMTargetDataRef);
/**
@@ -227,15 +237,15 @@ void LLVMDisposeTargetData(LLVMTargetDataRef);
}
namespace llvm {
- class TargetData;
+ class DataLayout;
class TargetLibraryInfo;
- inline TargetData *unwrap(LLVMTargetDataRef P) {
- return reinterpret_cast<TargetData*>(P);
+ inline DataLayout *unwrap(LLVMTargetDataRef P) {
+ return reinterpret_cast<DataLayout*>(P);
}
- inline LLVMTargetDataRef wrap(const TargetData *P) {
- return reinterpret_cast<LLVMTargetDataRef>(const_cast<TargetData*>(P));
+ inline LLVMTargetDataRef wrap(const DataLayout *P) {
+ return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout*>(P));
}
inline TargetLibraryInfo *unwrap(LLVMTargetLibraryInfoRef P) {
diff --git a/contrib/llvm/include/llvm-c/TargetMachine.h b/contrib/llvm/include/llvm-c/TargetMachine.h
index 0d35d73..29668de 100644
--- a/contrib/llvm/include/llvm-c/TargetMachine.h
+++ b/contrib/llvm/include/llvm-c/TargetMachine.h
@@ -104,7 +104,7 @@ char *LLVMGetTargetMachineCPU(LLVMTargetMachineRef T);
LLVMDisposeMessage. */
char *LLVMGetTargetMachineFeatureString(LLVMTargetMachineRef T);
-/** Returns the llvm::TargetData used for this llvm:TargetMachine. */
+/** Returns the llvm::DataLayout used for this llvm:TargetMachine. */
LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T);
/** Emits an asm or object file for the given module to the filename. This
diff --git a/contrib/llvm/include/llvm-c/Transforms/Vectorize.h b/contrib/llvm/include/llvm-c/Transforms/Vectorize.h
index 9e7c754..68a9bdd 100644
--- a/contrib/llvm/include/llvm-c/Transforms/Vectorize.h
+++ b/contrib/llvm/include/llvm-c/Transforms/Vectorize.h
@@ -36,6 +36,9 @@ extern "C" {
/** See llvm::createBBVectorizePass function. */
void LLVMAddBBVectorizePass(LLVMPassManagerRef PM);
+/** See llvm::createLoopVectorizePass function. */
+void LLVMAddLoopVectorizePass(LLVMPassManagerRef PM);
+
/**
* @}
*/
diff --git a/contrib/llvm/include/llvm/ADT/APFloat.h b/contrib/llvm/include/llvm/ADT/APFloat.h
index 5a625a4..31c6e6a 100644
--- a/contrib/llvm/include/llvm/ADT/APFloat.h
+++ b/contrib/llvm/include/llvm/ADT/APFloat.h
@@ -455,14 +455,11 @@ namespace llvm {
/* The sign bit of this number. */
unsigned int sign: 1;
-
- /* For PPCDoubleDouble, we have a second exponent and sign (the second
- significand is appended to the first one, although it would be wrong to
- regard these as a single number for arithmetic purposes). These fields
- are not meaningful for any other type. */
- exponent_t exponent2 : 11;
- unsigned int sign2: 1;
};
+
+ // See friend declaration above. This additional declaration is required in
+ // order to compile LLVM with IBM xlC compiler.
+ hash_code hash_value(const APFloat &Arg);
} /* namespace llvm */
#endif /* LLVM_FLOAT_H */
diff --git a/contrib/llvm/include/llvm/ADT/APInt.h b/contrib/llvm/include/llvm/ADT/APInt.h
index f30a6e3..c7c8016b 100644
--- a/contrib/llvm/include/llvm/ADT/APInt.h
+++ b/contrib/llvm/include/llvm/ADT/APInt.h
@@ -251,7 +251,7 @@ public:
/// constructor.
APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]);
- /// This constructor interprets the string \arg str in the given radix. The
+ /// This constructor interprets the string \p str in the given radix. The
/// interpretation stops when the first character that is not suitable for the
/// radix is encountered, or the end of the string. Acceptable radix values
/// are 2, 8, 10, 16, and 36. It is an error for the value implied by the
@@ -760,7 +760,7 @@ public:
APInt shl(unsigned shiftAmt) const {
assert(shiftAmt <= BitWidth && "Invalid shift amount");
if (isSingleWord()) {
- if (shiftAmt == BitWidth)
+ if (shiftAmt >= BitWidth)
return APInt(BitWidth, 0); // avoid undefined shift results
return APInt(BitWidth, VAL << shiftAmt);
}
@@ -1231,15 +1231,15 @@ public:
}
/// This method determines how many bits are required to hold the APInt
- /// equivalent of the string given by \arg str.
+ /// equivalent of the string given by \p str.
/// @brief Get bits required for string value.
static unsigned getBitsNeeded(StringRef str, uint8_t radix);
/// countLeadingZeros - This function is an APInt version of the
/// countLeadingZeros_{32,64} functions in MathExtras.h. It counts the number
/// of zeros from the most significant bit to the first one bit.
- /// @returns BitWidth if the value is zero.
- /// @returns the number of zeros from the most significant bit to the first
+ /// @returns BitWidth if the value is zero, otherwise
+ /// returns the number of zeros from the most significant bit to the first
/// one bits.
unsigned countLeadingZeros() const {
if (isSingleWord()) {
@@ -1252,8 +1252,8 @@ public:
/// countLeadingOnes - This function is an APInt version of the
/// countLeadingOnes_{32,64} functions in MathExtras.h. It counts the number
/// of ones from the most significant bit to the first zero bit.
- /// @returns 0 if the high order bit is not set
- /// @returns the number of 1 bits from the most significant to the least
+ /// @returns 0 if the high order bit is not set, otherwise
+ /// returns the number of 1 bits from the most significant to the least
/// @brief Count the number of leading one bits.
unsigned countLeadingOnes() const;
@@ -1266,8 +1266,8 @@ public:
/// countTrailingZeros - This function is an APInt version of the
/// countTrailingZeros_{32,64} functions in MathExtras.h. It counts
/// the number of zeros from the least significant bit to the first set bit.
- /// @returns BitWidth if the value is zero.
- /// @returns the number of zeros from the least significant bit to the first
+ /// @returns BitWidth if the value is zero, otherwise
+ /// returns the number of zeros from the least significant bit to the first
/// one bit.
/// @brief Count the number of trailing zero bits.
unsigned countTrailingZeros() const;
@@ -1275,8 +1275,8 @@ public:
/// countTrailingOnes - This function is an APInt version of the
/// countTrailingOnes_{32,64} functions in MathExtras.h. It counts
/// the number of ones from the least significant bit to the first zero bit.
- /// @returns BitWidth if the value is all ones.
- /// @returns the number of ones from the least significant bit to the first
+ /// @returns BitWidth if the value is all ones, otherwise
+ /// returns the number of ones from the least significant bit to the first
/// zero bit.
/// @brief Count the number of trailing one bits.
unsigned countTrailingOnes() const {
@@ -1288,8 +1288,8 @@ public:
/// countPopulation - This function is an APInt version of the
/// countPopulation_{32,64} functions in MathExtras.h. It counts the number
/// of 1 bits in the APInt value.
- /// @returns 0 if the value is zero.
- /// @returns the number of set bits.
+ /// @returns 0 if the value is zero, otherwise returns the number of set
+ /// bits.
/// @brief Count the number of bits set.
unsigned countPopulation() const {
if (isSingleWord())
@@ -1780,6 +1780,9 @@ inline APInt Not(const APInt& APIVal) {
} // End of APIntOps namespace
+ // See friend declaration above. This additional declaration is required in
+ // order to compile LLVM with IBM xlC compiler.
+ hash_code hash_value(const APInt &Arg);
} // End of llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/ADT/ArrayRef.h b/contrib/llvm/include/llvm/ADT/ArrayRef.h
index cf55aad..1e35d62 100644
--- a/contrib/llvm/include/llvm/ADT/ArrayRef.h
+++ b/contrib/llvm/include/llvm/ADT/ArrayRef.h
@@ -59,12 +59,17 @@ namespace llvm {
ArrayRef(const T *begin, const T *end)
: Data(begin), Length(end - begin) {}
- /// Construct an ArrayRef from a SmallVector.
- /*implicit*/ ArrayRef(const SmallVectorTemplateCommon<T> &Vec)
- : Data(Vec.data()), Length(Vec.size()) {}
+ /// Construct an ArrayRef from a SmallVector. This is templated in order to
+ /// avoid instantiating SmallVectorTemplateCommon<T> whenever we
+ /// copy-construct an ArrayRef.
+ template<typename U>
+ /*implicit*/ ArrayRef(const SmallVectorTemplateCommon<T, U> &Vec)
+ : Data(Vec.data()), Length(Vec.size()) {
+ }
/// Construct an ArrayRef from a std::vector.
- /*implicit*/ ArrayRef(const std::vector<T> &Vec)
+ template<typename A>
+ /*implicit*/ ArrayRef(const std::vector<T, A> &Vec)
: Data(Vec.empty() ? (T*)0 : &Vec[0]), Length(Vec.size()) {}
/// Construct an ArrayRef from a C array.
diff --git a/contrib/llvm/include/llvm/ADT/BitVector.h b/contrib/llvm/include/llvm/ADT/BitVector.h
index 3e2e5f2..9d6388f 100644
--- a/contrib/llvm/include/llvm/ADT/BitVector.h
+++ b/contrib/llvm/include/llvm/ADT/BitVector.h
@@ -172,7 +172,7 @@ public:
unsigned BitPos = Prev % BITWORD_SIZE;
BitWord Copy = Bits[WordPos];
// Mask off previous bits.
- Copy &= ~0L << BitPos;
+ Copy &= ~0UL << BitPos;
if (Copy != 0) {
if (sizeof(BitWord) == 4)
@@ -237,6 +237,34 @@ public:
return *this;
}
+ /// set - Efficiently set a range of bits in [I, E)
+ BitVector &set(unsigned I, unsigned E) {
+ assert(I <= E && "Attempted to set backwards range!");
+ assert(E <= size() && "Attempted to set out-of-bounds range!");
+
+ if (I == E) return *this;
+
+ if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
+ BitWord EMask = 1UL << (E % BITWORD_SIZE);
+ BitWord IMask = 1UL << (I % BITWORD_SIZE);
+ BitWord Mask = EMask - IMask;
+ Bits[I / BITWORD_SIZE] |= Mask;
+ return *this;
+ }
+
+ BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
+ Bits[I / BITWORD_SIZE] |= PrefixMask;
+ I = RoundUpToAlignment(I, BITWORD_SIZE);
+
+ for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
+ Bits[I / BITWORD_SIZE] = ~0UL;
+
+ BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
+ Bits[I / BITWORD_SIZE] |= PostfixMask;
+
+ return *this;
+ }
+
BitVector &reset() {
init_words(Bits, Capacity, false);
return *this;
@@ -247,6 +275,34 @@ public:
return *this;
}
+ /// reset - Efficiently reset a range of bits in [I, E)
+ BitVector &reset(unsigned I, unsigned E) {
+ assert(I <= E && "Attempted to reset backwards range!");
+ assert(E <= size() && "Attempted to reset out-of-bounds range!");
+
+ if (I == E) return *this;
+
+ if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
+ BitWord EMask = 1UL << (E % BITWORD_SIZE);
+ BitWord IMask = 1UL << (I % BITWORD_SIZE);
+ BitWord Mask = EMask - IMask;
+ Bits[I / BITWORD_SIZE] &= ~Mask;
+ return *this;
+ }
+
+ BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
+ Bits[I / BITWORD_SIZE] &= ~PrefixMask;
+ I = RoundUpToAlignment(I, BITWORD_SIZE);
+
+ for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
+ Bits[I / BITWORD_SIZE] = 0UL;
+
+ BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
+ Bits[I / BITWORD_SIZE] &= ~PostfixMask;
+
+ return *this;
+ }
+
BitVector &flip() {
for (unsigned i = 0; i < NumBitWords(size()); ++i)
Bits[i] = ~Bits[i];
@@ -311,7 +367,7 @@ public:
return !(*this == RHS);
}
- // Intersection, union, disjoint union.
+ /// Intersection, union, disjoint union.
BitVector &operator&=(const BitVector &RHS) {
unsigned ThisWords = NumBitWords(size());
unsigned RHSWords = NumBitWords(RHS.size());
@@ -328,7 +384,7 @@ public:
return *this;
}
- // reset - Reset bits that are set in RHS. Same as *this &= ~RHS.
+ /// reset - Reset bits that are set in RHS. Same as *this &= ~RHS.
BitVector &reset(const BitVector &RHS) {
unsigned ThisWords = NumBitWords(size());
unsigned RHSWords = NumBitWords(RHS.size());
@@ -338,6 +394,23 @@ public:
return *this;
}
+ /// test - Check if (This - RHS) is zero.
+ /// This is the same as reset(RHS) and any().
+ bool test(const BitVector &RHS) const {
+ unsigned ThisWords = NumBitWords(size());
+ unsigned RHSWords = NumBitWords(RHS.size());
+ unsigned i;
+ for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+ if ((Bits[i] & ~RHS.Bits[i]) != 0)
+ return true;
+
+ for (; i != ThisWords ; ++i)
+ if (Bits[i] != 0)
+ return true;
+
+ return false;
+ }
+
BitVector &operator|=(const BitVector &RHS) {
if (size() < RHS.size())
resize(RHS.size());
@@ -451,8 +524,11 @@ private:
// Then set any stray high bits of the last used word.
unsigned ExtraBits = Size % BITWORD_SIZE;
if (ExtraBits) {
- Bits[UsedWords-1] &= ~(~0L << ExtraBits);
- Bits[UsedWords-1] |= (0 - (BitWord)t) << ExtraBits;
+ BitWord ExtraBitMask = ~0UL << ExtraBits;
+ if (t)
+ Bits[UsedWords-1] |= ExtraBitMask;
+ else
+ Bits[UsedWords-1] &= ~ExtraBitMask;
}
}
diff --git a/contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h b/contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h
index e502ac4..2dfed07 100644
--- a/contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h
+++ b/contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h
@@ -48,17 +48,18 @@ public:
public:
virtual ~DAGDeltaAlgorithm() {}
- /// Run - Minimize the DAG formed by the \arg Changes vertices and the \arg
- /// Dependencies edges by executing \see ExecuteOneTest() on subsets of
+ /// Run - Minimize the DAG formed by the \p Changes vertices and the
+ /// \p Dependencies edges by executing \see ExecuteOneTest() on subsets of
/// changes and returning the smallest set which still satisfies the test
- /// predicate and the input \arg Dependencies.
+ /// predicate and the input \p Dependencies.
///
/// \param Changes The list of changes.
///
/// \param Dependencies The list of dependencies amongst changes. For each
- /// (x,y) in \arg Dependencies, both x and y must be in \arg Changes. The
- /// minimization algorithm guarantees that for each tested changed set S, x
- /// \in S implies y \in S. It is an error to have cyclic dependencies.
+ /// (x,y) in \p Dependencies, both x and y must be in \p Changes. The
+ /// minimization algorithm guarantees that for each tested changed set S,
+ /// \f$ x \in S \f$ implies \f$ y \in S \f$. It is an error to have cyclic
+ /// dependencies.
changeset_ty Run(const changeset_ty &Changes,
const std::vector<edge_ty> &Dependencies);
@@ -67,7 +68,7 @@ public:
const changesetlist_ty &Sets,
const changeset_ty &Required) {}
- /// ExecuteOneTest - Execute a single test predicate on the change set \arg S.
+ /// ExecuteOneTest - Execute a single test predicate on the change set \p S.
virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
};
diff --git a/contrib/llvm/include/llvm/ADT/DeltaAlgorithm.h b/contrib/llvm/include/llvm/ADT/DeltaAlgorithm.h
index 45ba198..7bf7960 100644
--- a/contrib/llvm/include/llvm/ADT/DeltaAlgorithm.h
+++ b/contrib/llvm/include/llvm/ADT/DeltaAlgorithm.h
@@ -45,23 +45,23 @@ private:
/// since we always reduce following a success.
std::set<changeset_ty> FailedTestsCache;
- /// GetTestResult - Get the test result for the \arg Changes from the
+ /// GetTestResult - Get the test result for the \p Changes from the
/// cache, executing the test if necessary.
///
/// \param Changes - The change set to test.
/// \return - The test result.
bool GetTestResult(const changeset_ty &Changes);
- /// Split - Partition a set of changes \arg S into one or two subsets.
+ /// Split - Partition a set of changes \p S into one or two subsets.
void Split(const changeset_ty &S, changesetlist_ty &Res);
- /// Delta - Minimize a set of \arg Changes which has been partioned into
+ /// Delta - Minimize a set of \p Changes which has been partioned into
/// smaller sets, by attempting to remove individual subsets.
changeset_ty Delta(const changeset_ty &Changes,
const changesetlist_ty &Sets);
- /// Search - Search for a subset (or subsets) in \arg Sets which can be
- /// removed from \arg Changes while still satisfying the predicate.
+ /// Search - Search for a subset (or subsets) in \p Sets which can be
+ /// removed from \p Changes while still satisfying the predicate.
///
/// \param Res - On success, a subset of Changes which satisfies the
/// predicate.
@@ -74,13 +74,13 @@ protected:
virtual void UpdatedSearchState(const changeset_ty &Changes,
const changesetlist_ty &Sets) {}
- /// ExecuteOneTest - Execute a single test predicate on the change set \arg S.
+ /// ExecuteOneTest - Execute a single test predicate on the change set \p S.
virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
public:
virtual ~DeltaAlgorithm();
- /// Run - Minimize the set \arg Changes by executing \see ExecuteOneTest() on
+ /// Run - Minimize the set \p Changes by executing \see ExecuteOneTest() on
/// subsets of changes and returning the smallest set which still satisfies
/// the test predicate.
changeset_ty Run(const changeset_ty &Changes);
diff --git a/contrib/llvm/include/llvm/ADT/DenseMap.h b/contrib/llvm/include/llvm/ADT/DenseMap.h
index f60d688..ac4bdbd 100644
--- a/contrib/llvm/include/llvm/ADT/DenseMap.h
+++ b/contrib/llvm/include/llvm/ADT/DenseMap.h
@@ -420,9 +420,10 @@ private:
NumBuckets = getNumBuckets();
}
if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) {
- this->grow(NumBuckets);
+ this->grow(NumBuckets * 2);
LookupBucketFor(Key, TheBucket);
}
+ assert(TheBucket);
// Only update the state after we've grown our bucket space appropriately
// so that when growing buckets we have self-consistent entry count.
@@ -599,7 +600,7 @@ public:
unsigned OldNumBuckets = NumBuckets;
BucketT *OldBuckets = Buckets;
- allocateBuckets(std::max<unsigned>(64, NextPowerOf2(AtLeast)));
+ allocateBuckets(std::max<unsigned>(64, NextPowerOf2(AtLeast-1)));
assert(Buckets);
if (!OldBuckets) {
this->BaseT::initEmpty();
@@ -825,11 +826,11 @@ public:
}
void grow(unsigned AtLeast) {
- if (AtLeast > InlineBuckets)
- AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast));
+ if (AtLeast >= InlineBuckets)
+ AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
if (Small) {
- if (AtLeast <= InlineBuckets)
+ if (AtLeast < InlineBuckets)
return; // Nothing to do.
// First move the inline buckets into a temporary storage.
diff --git a/contrib/llvm/include/llvm/ADT/DenseMapInfo.h b/contrib/llvm/include/llvm/ADT/DenseMapInfo.h
index 1559a35..6f17a64 100644
--- a/contrib/llvm/include/llvm/ADT/DenseMapInfo.h
+++ b/contrib/llvm/include/llvm/ADT/DenseMapInfo.h
@@ -31,12 +31,12 @@ struct DenseMapInfo {
template<typename T>
struct DenseMapInfo<T*> {
static inline T* getEmptyKey() {
- intptr_t Val = -1;
+ uintptr_t Val = static_cast<uintptr_t>(-1);
Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
return reinterpret_cast<T*>(Val);
}
static inline T* getTombstoneKey() {
- intptr_t Val = -2;
+ uintptr_t Val = static_cast<uintptr_t>(-2);
Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
return reinterpret_cast<T*>(Val);
}
@@ -105,7 +105,7 @@ template<> struct DenseMapInfo<int> {
// Provide DenseMapInfo for longs.
template<> struct DenseMapInfo<long> {
static inline long getEmptyKey() {
- return (1UL << (sizeof(long) * 8 - 1)) - 1L;
+ return (1UL << (sizeof(long) * 8 - 1)) - 1UL;
}
static inline long getTombstoneKey() { return getEmptyKey() - 1L; }
static unsigned getHashValue(const long& Val) {
diff --git a/contrib/llvm/include/llvm/ADT/EquivalenceClasses.h b/contrib/llvm/include/llvm/ADT/EquivalenceClasses.h
index 771476c..1d81772 100644
--- a/contrib/llvm/include/llvm/ADT/EquivalenceClasses.h
+++ b/contrib/llvm/include/llvm/ADT/EquivalenceClasses.h
@@ -33,6 +33,7 @@ namespace llvm {
///
/// Here is a simple example using integers:
///
+/// \code
/// EquivalenceClasses<int> EC;
/// EC.unionSets(1, 2); // insert 1, 2 into the same set
/// EC.insert(4); EC.insert(5); // insert 4, 5 into own sets
@@ -46,6 +47,7 @@ namespace llvm {
/// cerr << *MI << " "; // Print member.
/// cerr << "\n"; // Finish set.
/// }
+/// \endcode
///
/// This example prints:
/// 4
diff --git a/contrib/llvm/include/llvm/ADT/FoldingSet.h b/contrib/llvm/include/llvm/ADT/FoldingSet.h
index ba415ac..375d84a 100644
--- a/contrib/llvm/include/llvm/ADT/FoldingSet.h
+++ b/contrib/llvm/include/llvm/ADT/FoldingSet.h
@@ -278,6 +278,10 @@ public:
bool operator==(FoldingSetNodeIDRef) const;
+ /// Used to compare the "ordering" of two nodes as defined by the
+ /// profiled bits and their ordering defined by memcmp().
+ bool operator<(FoldingSetNodeIDRef) const;
+
const unsigned *getData() const { return Data; }
size_t getSize() const { return Size; }
};
@@ -327,6 +331,11 @@ public:
bool operator==(const FoldingSetNodeID &RHS) const;
bool operator==(const FoldingSetNodeIDRef RHS) const;
+ /// Used to compare the "ordering" of two nodes as defined by the
+ /// profiled bits and their ordering defined by memcmp().
+ bool operator<(const FoldingSetNodeID &RHS) const;
+ bool operator<(const FoldingSetNodeIDRef RHS) const;
+
/// Intern - Copy this node's data to a memory region allocated from the
/// given allocator and return a FoldingSetNodeIDRef describing the
/// interned data.
diff --git a/contrib/llvm/include/llvm/ADT/Hashing.h b/contrib/llvm/include/llvm/ADT/Hashing.h
index 6ab0725..cda31a2 100644
--- a/contrib/llvm/include/llvm/ADT/Hashing.h
+++ b/contrib/llvm/include/llvm/ADT/Hashing.h
@@ -409,7 +409,6 @@ bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
/// combining them, this (as an optimization) directly combines the integers.
template <typename InputIteratorT>
hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
- typedef typename std::iterator_traits<InputIteratorT>::value_type ValueT;
const size_t seed = get_execution_seed();
char buffer[64], *buffer_ptr = buffer;
char *const buffer_end = buffer_ptr + array_lengthof(buffer);
@@ -711,7 +710,7 @@ hash_code hash_combine(const T1 &arg1) {
#endif
-// Implementation details for implementatinos of hash_value overloads provided
+// Implementation details for implementations of hash_value overloads provided
// here.
namespace hashing {
namespace detail {
diff --git a/contrib/llvm/include/llvm/ADT/ImmutableList.h b/contrib/llvm/include/llvm/ADT/ImmutableList.h
index d7c0074..20bdd90 100644
--- a/contrib/llvm/include/llvm/ADT/ImmutableList.h
+++ b/contrib/llvm/include/llvm/ADT/ImmutableList.h
@@ -33,9 +33,8 @@ class ImmutableListImpl : public FoldingSetNode {
friend class ImmutableListFactory<T>;
- // Do not implement.
- void operator=(const ImmutableListImpl&);
- ImmutableListImpl(const ImmutableListImpl&);
+ void operator=(const ImmutableListImpl&) LLVM_DELETED_FUNCTION;
+ ImmutableListImpl(const ImmutableListImpl&) LLVM_DELETED_FUNCTION;
public:
const T& getHead() const { return Head; }
diff --git a/contrib/llvm/include/llvm/ADT/ImmutableMap.h b/contrib/llvm/include/llvm/ADT/ImmutableMap.h
index 8346ffa..4883c5b 100644
--- a/contrib/llvm/include/llvm/ADT/ImmutableMap.h
+++ b/contrib/llvm/include/llvm/ADT/ImmutableMap.h
@@ -122,8 +122,8 @@ public:
}
private:
- Factory(const Factory& RHS); // DO NOT IMPLEMENT
- void operator=(const Factory& RHS); // DO NOT IMPLEMENT
+ Factory(const Factory& RHS) LLVM_DELETED_FUNCTION;
+ void operator=(const Factory& RHS) LLVM_DELETED_FUNCTION;
};
bool contains(key_type_ref K) const {
diff --git a/contrib/llvm/include/llvm/ADT/ImmutableSet.h b/contrib/llvm/include/llvm/ADT/ImmutableSet.h
index 949dc44..3900f96 100644
--- a/contrib/llvm/include/llvm/ADT/ImmutableSet.h
+++ b/contrib/llvm/include/llvm/ADT/ImmutableSet.h
@@ -22,7 +22,6 @@
#include <cassert>
#include <functional>
#include <vector>
-#include <stdio.h>
namespace llvm {
@@ -84,13 +83,13 @@ public:
}
return NULL;
}
-
+
/// getMaxElement - Find the subtree associated with the highest ranged
/// key value.
ImutAVLTree* getMaxElement() {
ImutAVLTree *T = this;
- ImutAVLTree *Right = T->getRight();
- while (Right) { T = right; right = T->getRight(); }
+ ImutAVLTree *Right = T->getRight();
+ while (Right) { T = Right; Right = T->getRight(); }
return T;
}
@@ -258,7 +257,7 @@ private:
/// method returns false for an instance of ImutAVLTree, all subtrees
/// will also have this method return false. The converse is not true.
bool isMutable() const { return IsMutable; }
-
+
/// hasCachedDigest - Returns true if the digest for this tree is cached.
/// This can only be true if the tree is immutable.
bool hasCachedDigest() const { return IsDigestCached; }
@@ -280,7 +279,7 @@ private:
assert(isMutable() && "Mutable flag already removed.");
IsMutable = false;
}
-
+
/// markedCachedDigest - Clears the NoCachedDigest flag for a tree.
void markedCachedDigest() {
assert(!hasCachedDigest() && "NoCachedDigest flag already removed.");
@@ -349,7 +348,7 @@ public:
else
factory->Cache[factory->maskCacheIndex(computeDigest())] = next;
}
-
+
// We need to clear the mutability bit in case we are
// destroying the node as part of a sweep in ImutAVLFactory::recoverNodes().
IsMutable = false;
@@ -415,7 +414,7 @@ public:
TreeTy* getEmptyTree() const { return NULL; }
protected:
-
+
//===--------------------------------------------------===//
// A bunch of quick helper functions used for reasoning
// about the properties of trees and their children.
@@ -461,7 +460,7 @@ protected:
// returned to the caller.
//===--------------------------------------------------===//
- TreeTy* createNode(TreeTy* L, value_type_ref V, TreeTy* R) {
+ TreeTy* createNode(TreeTy* L, value_type_ref V, TreeTy* R) {
BumpPtrAllocator& A = getAllocator();
TreeTy* T;
if (!freeNodes.empty()) {
@@ -469,8 +468,7 @@ protected:
freeNodes.pop_back();
assert(T != L);
assert(T != R);
- }
- else {
+ } else {
T = (TreeTy*) A.Allocate<TreeTy>();
}
new (T) TreeTy(this, L, R, V, incrementHeight(L,R));
@@ -513,7 +511,8 @@ protected:
return createNode(createNode(LL,L,LRL), LR, createNode(LRR,V,R));
}
- else if (hr > hl + 2) {
+
+ if (hr > hl + 2) {
assert(!isEmpty(R) && "Right tree cannot be empty to have a height >= 2");
TreeTy *RL = getLeft(R);
@@ -529,8 +528,8 @@ protected:
return createNode(createNode(L,V,RLL), RL, createNode(RLR,R,RR));
}
- else
- return createNode(L,V,R);
+
+ return createNode(L,V,R);
}
/// add_internal - Creates a new tree that includes the specified
@@ -604,7 +603,7 @@ protected:
markImmutable(getLeft(T));
markImmutable(getRight(T));
}
-
+
public:
TreeTy *getCanonicalTree(TreeTy *TNew) {
if (!TNew)
@@ -937,7 +936,7 @@ public:
private:
TreeTy *Root;
-
+
public:
/// Constructs a set from a pointer to a tree root. In general one
/// should use a Factory object to create sets instead of directly
@@ -1006,10 +1005,10 @@ public:
typename TreeTy::Factory *getTreeFactory() const {
return const_cast<typename TreeTy::Factory *>(&F);
}
-
+
private:
- Factory(const Factory& RHS); // DO NOT IMPLEMENT
- void operator=(const Factory& RHS); // DO NOT IMPLEMENT
+ Factory(const Factory& RHS) LLVM_DELETED_FUNCTION;
+ void operator=(const Factory& RHS) LLVM_DELETED_FUNCTION;
};
friend class Factory;
@@ -1027,11 +1026,11 @@ public:
return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
}
- TreeTy *getRoot() {
+ TreeTy *getRoot() {
if (Root) { Root->retain(); }
return Root;
}
-
+
TreeTy *getRootWithoutRetain() const {
return Root;
}
@@ -1092,7 +1091,7 @@ public:
void validateTree() const { if (Root) Root->validateTree(); }
};
-
+
// NOTE: This may some day replace the current ImmutableSet.
template <typename ValT, typename ValInfo = ImutContainerInfo<ValT> >
class ImmutableSetRef {
@@ -1101,11 +1100,11 @@ public:
typedef typename ValInfo::value_type_ref value_type_ref;
typedef ImutAVLTree<ValInfo> TreeTy;
typedef typename TreeTy::Factory FactoryTy;
-
+
private:
TreeTy *Root;
FactoryTy *Factory;
-
+
public:
/// Constructs a set from a pointer to a tree root. In general one
/// should use a Factory object to create sets instead of directly
@@ -1133,44 +1132,44 @@ public:
~ImmutableSetRef() {
if (Root) { Root->release(); }
}
-
+
static inline ImmutableSetRef getEmptySet(FactoryTy *F) {
return ImmutableSetRef(0, F);
}
-
+
ImmutableSetRef add(value_type_ref V) {
return ImmutableSetRef(Factory->add(Root, V), Factory);
}
-
+
ImmutableSetRef remove(value_type_ref V) {
return ImmutableSetRef(Factory->remove(Root, V), Factory);
}
-
+
/// Returns true if the set contains the specified value.
bool contains(value_type_ref V) const {
return Root ? Root->contains(V) : false;
}
-
+
ImmutableSet<ValT> asImmutableSet(bool canonicalize = true) const {
return ImmutableSet<ValT>(canonicalize ?
Factory->getCanonicalTree(Root) : Root);
}
-
+
TreeTy *getRootWithoutRetain() const {
return Root;
}
-
+
bool operator==(const ImmutableSetRef &RHS) const {
return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
}
-
+
bool operator!=(const ImmutableSetRef &RHS) const {
return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
}
/// isEmpty - Return true if the set contains no elements.
bool isEmpty() const { return !Root; }
-
+
/// isSingleton - Return true if the set contains exactly one element.
/// This method runs in constant time.
bool isSingleton() const { return getHeight() == 1; }
@@ -1178,7 +1177,7 @@ public:
//===--------------------------------------------------===//
// Iterators.
//===--------------------------------------------------===//
-
+
class iterator {
typename TreeTy::iterator itr;
iterator(TreeTy* t) : itr(t) {}
@@ -1194,28 +1193,28 @@ public:
inline bool operator!=(const iterator& RHS) const { return RHS.itr != itr; }
inline value_type *operator->() const { return &(operator*()); }
};
-
+
iterator begin() const { return iterator(Root); }
iterator end() const { return iterator(); }
-
+
//===--------------------------------------------------===//
// Utility methods.
//===--------------------------------------------------===//
-
+
unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
-
+
static inline void Profile(FoldingSetNodeID& ID, const ImmutableSetRef& S) {
ID.AddPointer(S.Root);
}
-
+
inline void Profile(FoldingSetNodeID& ID) const {
return Profile(ID,*this);
}
-
+
//===--------------------------------------------------===//
// For testing.
//===--------------------------------------------------===//
-
+
void validateTree() const { if (Root) Root->validateTree(); }
};
diff --git a/contrib/llvm/include/llvm/ADT/MapVector.h b/contrib/llvm/include/llvm/ADT/MapVector.h
new file mode 100644
index 0000000..6aacca5
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/MapVector.h
@@ -0,0 +1,90 @@
+//===- llvm/ADT/MapVector.h - Map with deterministic value order *- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a map that provides insertion order iteration. The
+// interface is purposefully minimal. The key is assumed to be cheap to copy
+// and 2 copies are kept, one for indexing in a DenseMap, one for iteration in
+// a std::vector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_MAPVECTOR_H
+#define LLVM_ADT_MAPVECTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include <vector>
+
+namespace llvm {
+
+/// This class implements a map that also provides access to all stored values
+/// in a deterministic order. The values are kept in a std::vector and the
+/// mapping is done with DenseMap from Keys to indexes in that vector.
+template<typename KeyT, typename ValueT,
+ typename MapType = llvm::DenseMap<KeyT, unsigned>,
+ typename VectorType = std::vector<std::pair<KeyT, ValueT> > >
+class MapVector {
+ typedef typename VectorType::size_type SizeType;
+
+ MapType Map;
+ VectorType Vector;
+
+public:
+ typedef typename VectorType::iterator iterator;
+ typedef typename VectorType::const_iterator const_iterator;
+
+ SizeType size() const {
+ return Vector.size();
+ }
+
+ iterator begin() {
+ return Vector.begin();
+ }
+
+ const_iterator begin() const {
+ return Vector.begin();
+ }
+
+ iterator end() {
+ return Vector.end();
+ }
+
+ const_iterator end() const {
+ return Vector.end();
+ }
+
+ bool empty() const {
+ return Vector.empty();
+ }
+
+ void clear() {
+ Map.clear();
+ Vector.clear();
+ }
+
+ ValueT &operator[](const KeyT &Key) {
+ std::pair<KeyT, unsigned> Pair = std::make_pair(Key, 0);
+ std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
+ unsigned &I = Result.first->second;
+ if (Result.second) {
+ Vector.push_back(std::make_pair(Key, ValueT()));
+ I = Vector.size() - 1;
+ }
+ return Vector[I].second;
+ }
+
+ unsigned count(const KeyT &Key) const {
+ typename MapType::const_iterator Pos = Map.find(Key);
+ return Pos == Map.end()? 0 : 1;
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/Optional.h b/contrib/llvm/include/llvm/ADT/Optional.h
index ee8b69f..f43aeb1 100644
--- a/contrib/llvm/include/llvm/ADT/Optional.h
+++ b/contrib/llvm/include/llvm/ADT/Optional.h
@@ -16,8 +16,13 @@
#ifndef LLVM_ADT_OPTIONAL
#define LLVM_ADT_OPTIONAL
+#include "llvm/Support/Compiler.h"
#include <cassert>
+#if LLVM_USE_RVALUE_REFERENCES
+#include <utility>
+#endif
+
namespace llvm {
template<typename T>
@@ -28,6 +33,10 @@ public:
explicit Optional() : x(), hasVal(false) {}
Optional(const T &y) : x(y), hasVal(true) {}
+#if LLVM_USE_RVALUE_REFERENCES
+ Optional(T &&y) : x(std::forward<T>(y)), hasVal(true) {}
+#endif
+
static inline Optional create(const T* y) {
return y ? Optional(*y) : Optional();
}
diff --git a/contrib/llvm/include/llvm/ADT/OwningPtr.h b/contrib/llvm/include/llvm/ADT/OwningPtr.h
index 6d9c305..05bcd40 100644
--- a/contrib/llvm/include/llvm/ADT/OwningPtr.h
+++ b/contrib/llvm/include/llvm/ADT/OwningPtr.h
@@ -14,6 +14,7 @@
#ifndef LLVM_ADT_OWNING_PTR_H
#define LLVM_ADT_OWNING_PTR_H
+#include "llvm/Support/Compiler.h"
#include <cassert>
#include <cstddef>
@@ -25,12 +26,21 @@ namespace llvm {
/// pointee object can be taken away from OwningPtr by using the take method.
template<class T>
class OwningPtr {
- OwningPtr(OwningPtr const &); // DO NOT IMPLEMENT
- OwningPtr &operator=(OwningPtr const &); // DO NOT IMPLEMENT
+ OwningPtr(OwningPtr const &) LLVM_DELETED_FUNCTION;
+ OwningPtr &operator=(OwningPtr const &) LLVM_DELETED_FUNCTION;
T *Ptr;
public:
explicit OwningPtr(T *P = 0) : Ptr(P) {}
+#if LLVM_USE_RVALUE_REFERENCES
+ OwningPtr(OwningPtr &&Other) : Ptr(Other.take()) {}
+
+ OwningPtr &operator=(OwningPtr &&Other) {
+ reset(Other.take());
+ return *this;
+ }
+#endif
+
~OwningPtr() {
delete Ptr;
}
@@ -79,12 +89,21 @@ inline void swap(OwningPtr<T> &a, OwningPtr<T> &b) {
/// functionality as OwningPtr, except that it works for array types.
template<class T>
class OwningArrayPtr {
- OwningArrayPtr(OwningArrayPtr const &); // DO NOT IMPLEMENT
- OwningArrayPtr &operator=(OwningArrayPtr const &); // DO NOT IMPLEMENT
+ OwningArrayPtr(OwningArrayPtr const &) LLVM_DELETED_FUNCTION;
+ OwningArrayPtr &operator=(OwningArrayPtr const &) LLVM_DELETED_FUNCTION;
T *Ptr;
public:
explicit OwningArrayPtr(T *P = 0) : Ptr(P) {}
+#if LLVM_USE_RVALUE_REFERENCES
+ OwningArrayPtr(OwningArrayPtr &&Other) : Ptr(Other.take()) {}
+
+ OwningArrayPtr &operator=(OwningArrayPtr &&Other) {
+ reset(Other.take());
+ return *this;
+ }
+#endif
+
~OwningArrayPtr() {
delete [] Ptr;
}
diff --git a/contrib/llvm/include/llvm/ADT/PackedVector.h b/contrib/llvm/include/llvm/ADT/PackedVector.h
index 2eaddc2..1ae2a77 100644
--- a/contrib/llvm/include/llvm/ADT/PackedVector.h
+++ b/contrib/llvm/include/llvm/ADT/PackedVector.h
@@ -19,32 +19,32 @@
namespace llvm {
-template <typename T, unsigned BitNum, bool isSigned>
+template <typename T, unsigned BitNum, typename BitVectorTy, bool isSigned>
class PackedVectorBase;
// This won't be necessary if we can specialize members without specializing
// the parent template.
-template <typename T, unsigned BitNum>
-class PackedVectorBase<T, BitNum, false> {
+template <typename T, unsigned BitNum, typename BitVectorTy>
+class PackedVectorBase<T, BitNum, BitVectorTy, false> {
protected:
- static T getValue(const llvm::BitVector &Bits, unsigned Idx) {
+ static T getValue(const BitVectorTy &Bits, unsigned Idx) {
T val = T();
for (unsigned i = 0; i != BitNum; ++i)
val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
return val;
}
- static void setValue(llvm::BitVector &Bits, unsigned Idx, T val) {
+ static void setValue(BitVectorTy &Bits, unsigned Idx, T val) {
assert((val >> BitNum) == 0 && "value is too big");
for (unsigned i = 0; i != BitNum; ++i)
Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i);
}
};
-template <typename T, unsigned BitNum>
-class PackedVectorBase<T, BitNum, true> {
+template <typename T, unsigned BitNum, typename BitVectorTy>
+class PackedVectorBase<T, BitNum, BitVectorTy, true> {
protected:
- static T getValue(const llvm::BitVector &Bits, unsigned Idx) {
+ static T getValue(const BitVectorTy &Bits, unsigned Idx) {
T val = T();
for (unsigned i = 0; i != BitNum-1; ++i)
val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
@@ -53,7 +53,7 @@ protected:
return val;
}
- static void setValue(llvm::BitVector &Bits, unsigned Idx, T val) {
+ static void setValue(BitVectorTy &Bits, unsigned Idx, T val) {
if (val < 0) {
val = ~val;
Bits.set((Idx << (BitNum-1)) + BitNum-1);
@@ -71,11 +71,12 @@ protected:
/// @endcode
/// will create a vector accepting values -2, -1, 0, 1. Any other value will hit
/// an assertion.
-template <typename T, unsigned BitNum>
-class PackedVector : public PackedVectorBase<T, BitNum,
+template <typename T, unsigned BitNum, typename BitVectorTy = BitVector>
+class PackedVector : public PackedVectorBase<T, BitNum, BitVectorTy,
std::numeric_limits<T>::is_signed> {
- llvm::BitVector Bits;
- typedef PackedVectorBase<T, BitNum, std::numeric_limits<T>::is_signed> base;
+ BitVectorTy Bits;
+ typedef PackedVectorBase<T, BitNum, BitVectorTy,
+ std::numeric_limits<T>::is_signed> base;
public:
class reference {
diff --git a/contrib/llvm/include/llvm/ADT/PointerIntPair.h b/contrib/llvm/include/llvm/ADT/PointerIntPair.h
index fcc758b..71c379b 100644
--- a/contrib/llvm/include/llvm/ADT/PointerIntPair.h
+++ b/contrib/llvm/include/llvm/ADT/PointerIntPair.h
@@ -135,12 +135,12 @@ template<typename PointerTy, unsigned IntBits, typename IntType>
struct DenseMapInfo<PointerIntPair<PointerTy, IntBits, IntType> > {
typedef PointerIntPair<PointerTy, IntBits, IntType> Ty;
static Ty getEmptyKey() {
- intptr_t Val = -1;
+ uintptr_t Val = static_cast<uintptr_t>(-1);
Val <<= PointerLikeTypeTraits<PointerTy>::NumLowBitsAvailable;
return Ty(reinterpret_cast<PointerTy>(Val), IntType((1 << IntBits)-1));
}
static Ty getTombstoneKey() {
- intptr_t Val = -2;
+ uintptr_t Val = static_cast<uintptr_t>(-2);
Val <<= PointerLikeTypeTraits<PointerTy>::NumLowBitsAvailable;
return Ty(reinterpret_cast<PointerTy>(Val), IntType(0));
}
diff --git a/contrib/llvm/include/llvm/ADT/ScopedHashTable.h b/contrib/llvm/include/llvm/ADT/ScopedHashTable.h
index a6803ee..efddd9f 100644
--- a/contrib/llvm/include/llvm/ADT/ScopedHashTable.h
+++ b/contrib/llvm/include/llvm/ADT/ScopedHashTable.h
@@ -90,8 +90,8 @@ class ScopedHashTableScope {
/// LastValInScope - This is the last value that was inserted for this scope
/// or null if none have been inserted yet.
ScopedHashTableVal<K, V> *LastValInScope;
- void operator=(ScopedHashTableScope&); // DO NOT IMPLEMENT
- ScopedHashTableScope(ScopedHashTableScope&); // DO NOT IMPLEMENT
+ void operator=(ScopedHashTableScope&) LLVM_DELETED_FUNCTION;
+ ScopedHashTableScope(ScopedHashTableScope&) LLVM_DELETED_FUNCTION;
public:
ScopedHashTableScope(ScopedHashTable<K, V, KInfo, AllocatorTy> &HT);
~ScopedHashTableScope();
diff --git a/contrib/llvm/include/llvm/ADT/SetVector.h b/contrib/llvm/include/llvm/ADT/SetVector.h
index 965f0de..d2f7286 100644
--- a/contrib/llvm/include/llvm/ADT/SetVector.h
+++ b/contrib/llvm/include/llvm/ADT/SetVector.h
@@ -27,10 +27,11 @@
namespace llvm {
+/// \brief A vector that has set insertion semantics.
+///
/// This adapter class provides a way to keep a set of things that also has the
/// property of a deterministic iteration order. The order of iteration is the
/// order of insertion.
-/// @brief A vector that has set insertion semantics.
template <typename T, typename Vector = std::vector<T>,
typename Set = SmallSet<T, 16> >
class SetVector {
@@ -45,59 +46,59 @@ public:
typedef typename vector_type::const_iterator const_iterator;
typedef typename vector_type::size_type size_type;
- /// @brief Construct an empty SetVector
+ /// \brief Construct an empty SetVector
SetVector() {}
- /// @brief Initialize a SetVector with a range of elements
+ /// \brief Initialize a SetVector with a range of elements
template<typename It>
SetVector(It Start, It End) {
insert(Start, End);
}
- /// @brief Determine if the SetVector is empty or not.
+ /// \brief Determine if the SetVector is empty or not.
bool empty() const {
return vector_.empty();
}
- /// @brief Determine the number of elements in the SetVector.
+ /// \brief Determine the number of elements in the SetVector.
size_type size() const {
return vector_.size();
}
- /// @brief Get an iterator to the beginning of the SetVector.
+ /// \brief Get an iterator to the beginning of the SetVector.
iterator begin() {
return vector_.begin();
}
- /// @brief Get a const_iterator to the beginning of the SetVector.
+ /// \brief Get a const_iterator to the beginning of the SetVector.
const_iterator begin() const {
return vector_.begin();
}
- /// @brief Get an iterator to the end of the SetVector.
+ /// \brief Get an iterator to the end of the SetVector.
iterator end() {
return vector_.end();
}
- /// @brief Get a const_iterator to the end of the SetVector.
+ /// \brief Get a const_iterator to the end of the SetVector.
const_iterator end() const {
return vector_.end();
}
- /// @brief Return the last element of the SetVector.
+ /// \brief Return the last element of the SetVector.
const T &back() const {
assert(!empty() && "Cannot call back() on empty SetVector!");
return vector_.back();
}
- /// @brief Index into the SetVector.
+ /// \brief Index into the SetVector.
const_reference operator[](size_type n) const {
assert(n < vector_.size() && "SetVector access out of range!");
return vector_[n];
}
- /// @returns true iff the element was inserted into the SetVector.
- /// @brief Insert a new element into the SetVector.
+ /// \brief Insert a new element into the SetVector.
+ /// \returns true iff the element was inserted into the SetVector.
bool insert(const value_type &X) {
bool result = set_.insert(X);
if (result)
@@ -105,7 +106,7 @@ public:
return result;
}
- /// @brief Insert a range of elements into the SetVector.
+ /// \brief Insert a range of elements into the SetVector.
template<typename It>
void insert(It Start, It End) {
for (; Start != End; ++Start)
@@ -113,7 +114,7 @@ public:
vector_.push_back(*Start);
}
- /// @brief Remove an item from the set vector.
+ /// \brief Remove an item from the set vector.
bool remove(const value_type& X) {
if (set_.erase(X)) {
typename vector_type::iterator I =
@@ -125,20 +126,44 @@ public:
return false;
}
-
- /// @returns 0 if the element is not in the SetVector, 1 if it is.
- /// @brief Count the number of elements of a given key in the SetVector.
+ /// \brief Remove items from the set vector based on a predicate function.
+ ///
+ /// This is intended to be equivalent to the following code, if we could
+ /// write it:
+ ///
+ /// \code
+ /// V.erase(std::remove_if(V.begin(), V.end(), P), V.end());
+ /// \endcode
+ ///
+ /// However, SetVector doesn't expose non-const iterators, making any
+ /// algorithm like remove_if impossible to use.
+ ///
+ /// \returns true if any element is removed.
+ template <typename UnaryPredicate>
+ bool remove_if(UnaryPredicate P) {
+ typename vector_type::iterator I
+ = std::remove_if(vector_.begin(), vector_.end(),
+ TestAndEraseFromSet<UnaryPredicate>(P, set_));
+ if (I == vector_.end())
+ return false;
+ vector_.erase(I, vector_.end());
+ return true;
+ }
+
+
+ /// \brief Count the number of elements of a given key in the SetVector.
+ /// \returns 0 if the element is not in the SetVector, 1 if it is.
size_type count(const key_type &key) const {
return set_.count(key);
}
- /// @brief Completely clear the SetVector
+ /// \brief Completely clear the SetVector
void clear() {
set_.clear();
vector_.clear();
}
- /// @brief Remove the last element of the SetVector.
+ /// \brief Remove the last element of the SetVector.
void pop_back() {
assert(!empty() && "Cannot remove an element from an empty SetVector!");
set_.erase(back());
@@ -160,18 +185,41 @@ public:
}
private:
+ /// \brief A wrapper predicate designed for use with std::remove_if.
+ ///
+ /// This predicate wraps a predicate suitable for use with std::remove_if to
+ /// call set_.erase(x) on each element which is slated for removal.
+ template <typename UnaryPredicate>
+ class TestAndEraseFromSet {
+ UnaryPredicate P;
+ set_type &set_;
+
+ public:
+ typedef typename UnaryPredicate::argument_type argument_type;
+
+ TestAndEraseFromSet(UnaryPredicate P, set_type &set_) : P(P), set_(set_) {}
+
+ bool operator()(argument_type Arg) {
+ if (P(Arg)) {
+ set_.erase(Arg);
+ return true;
+ }
+ return false;
+ }
+ };
+
set_type set_; ///< The set.
vector_type vector_; ///< The vector.
};
-/// SmallSetVector - A SetVector that performs no allocations if smaller than
+/// \brief A SetVector that performs no allocations if smaller than
/// a certain size.
template <typename T, unsigned N>
class SmallSetVector : public SetVector<T, SmallVector<T, N>, SmallSet<T, N> > {
public:
SmallSetVector() {}
- /// @brief Initialize a SmallSetVector with a range of elements
+ /// \brief Initialize a SmallSetVector with a range of elements
template<typename It>
SmallSetVector(It Start, It End) {
this->insert(Start, End);
diff --git a/contrib/llvm/include/llvm/ADT/SmallBitVector.h b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
index 7a645e0..a9cd54e 100644
--- a/contrib/llvm/include/llvm/ADT/SmallBitVector.h
+++ b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
@@ -300,6 +300,21 @@ public:
return *this;
}
+ /// set - Efficiently set a range of bits in [I, E)
+ SmallBitVector &set(unsigned I, unsigned E) {
+ assert(I <= E && "Attempted to set backwards range!");
+ assert(E <= size() && "Attempted to set out-of-bounds range!");
+ if (I == E) return *this;
+ if (isSmall()) {
+ uintptr_t EMask = ((uintptr_t)1) << E;
+ uintptr_t IMask = ((uintptr_t)1) << I;
+ uintptr_t Mask = EMask - IMask;
+ setSmallBits(getSmallBits() | Mask);
+ } else
+ getPointer()->set(I, E);
+ return *this;
+ }
+
SmallBitVector &reset() {
if (isSmall())
setSmallBits(0);
@@ -316,6 +331,21 @@ public:
return *this;
}
+ /// reset - Efficiently reset a range of bits in [I, E)
+ SmallBitVector &reset(unsigned I, unsigned E) {
+ assert(I <= E && "Attempted to reset backwards range!");
+ assert(E <= size() && "Attempted to reset out-of-bounds range!");
+ if (I == E) return *this;
+ if (isSmall()) {
+ uintptr_t EMask = ((uintptr_t)1) << E;
+ uintptr_t IMask = ((uintptr_t)1) << I;
+ uintptr_t Mask = EMask - IMask;
+ setSmallBits(getSmallBits() & ~Mask);
+ } else
+ getPointer()->reset(I, E);
+ return *this;
+ }
+
SmallBitVector &flip() {
if (isSmall())
setSmallBits(~getSmallBits());
diff --git a/contrib/llvm/include/llvm/ADT/SmallPtrSet.h b/contrib/llvm/include/llvm/ADT/SmallPtrSet.h
index 498a034..3bb8830 100644
--- a/contrib/llvm/include/llvm/ADT/SmallPtrSet.h
+++ b/contrib/llvm/include/llvm/ADT/SmallPtrSet.h
@@ -15,12 +15,13 @@
#ifndef LLVM_ADT_SMALLPTRSET_H
#define LLVM_ADT_SMALLPTRSET_H
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cassert>
#include <cstddef>
#include <cstring>
#include <iterator>
-#include "llvm/Support/DataTypes.h"
-#include "llvm/Support/PointerLikeTypeTraits.h"
namespace llvm {
@@ -132,7 +133,7 @@ private:
/// Grow - Allocate a larger backing store for the buckets and move it over.
void Grow(unsigned NewSize);
- void operator=(const SmallPtrSetImpl &RHS); // DO NOT IMPLEMENT.
+ void operator=(const SmallPtrSetImpl &RHS) LLVM_DELETED_FUNCTION;
protected:
/// swap - Swaps the elements of two sets.
/// Note: This method assumes that both sets have the same small size.
diff --git a/contrib/llvm/include/llvm/ADT/SmallString.h b/contrib/llvm/include/llvm/ADT/SmallString.h
index c6f0a5b..8da99d1 100644
--- a/contrib/llvm/include/llvm/ADT/SmallString.h
+++ b/contrib/llvm/include/llvm/ADT/SmallString.h
@@ -44,25 +44,25 @@ public:
/// @name String Assignment
/// @{
- /// Assign from a repeated element
+ /// Assign from a repeated element.
void assign(size_t NumElts, char Elt) {
this->SmallVectorImpl<char>::assign(NumElts, Elt);
}
- /// Assign from an iterator pair
+ /// Assign from an iterator pair.
template<typename in_iter>
void assign(in_iter S, in_iter E) {
this->clear();
SmallVectorImpl<char>::append(S, E);
}
- /// Assign from a StringRef
+ /// Assign from a StringRef.
void assign(StringRef RHS) {
this->clear();
SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
}
- /// Assign from a SmallVector
+ /// Assign from a SmallVector.
void assign(const SmallVectorImpl<char> &RHS) {
this->clear();
SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
@@ -72,7 +72,7 @@ public:
/// @name String Concatenation
/// @{
- /// Append from an iterator pair
+ /// Append from an iterator pair.
template<typename in_iter>
void append(in_iter S, in_iter E) {
SmallVectorImpl<char>::append(S, E);
@@ -83,12 +83,12 @@ public:
}
- /// Append from a StringRef
+ /// Append from a StringRef.
void append(StringRef RHS) {
SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
}
- /// Append from a SmallVector
+ /// Append from a SmallVector.
void append(const SmallVectorImpl<char> &RHS) {
SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
}
@@ -97,19 +97,19 @@ public:
/// @name String Comparison
/// @{
- /// equals - Check for string equality, this is more efficient than
- /// compare() when the relative ordering of inequal strings isn't needed.
+ /// Check for string equality. This is more efficient than compare() when
+ /// the relative ordering of inequal strings isn't needed.
bool equals(StringRef RHS) const {
return str().equals(RHS);
}
- /// equals_lower - Check for string equality, ignoring case.
+ /// Check for string equality, ignoring case.
bool equals_lower(StringRef RHS) const {
return str().equals_lower(RHS);
}
- /// compare - Compare two strings; the result is -1, 0, or 1 if this string
- /// is lexicographically less than, equal to, or greater than the \arg RHS.
+ /// Compare two strings; the result is -1, 0, or 1 if this string is
+ /// lexicographically less than, equal to, or greater than the \p RHS.
int compare(StringRef RHS) const {
return str().compare(RHS);
}
@@ -129,12 +129,12 @@ public:
/// @name String Predicates
/// @{
- /// startswith - Check if this string starts with the given \arg Prefix.
+ /// startswith - Check if this string starts with the given \p Prefix.
bool startswith(StringRef Prefix) const {
return str().startswith(Prefix);
}
- /// endswith - Check if this string ends with the given \arg Suffix.
+ /// endswith - Check if this string ends with the given \p Suffix.
bool endswith(StringRef Suffix) const {
return str().endswith(Suffix);
}
@@ -143,76 +143,76 @@ public:
/// @name String Searching
/// @{
- /// find - Search for the first character \arg C in the string.
+ /// find - Search for the first character \p C in the string.
///
- /// \return - The index of the first occurrence of \arg C, or npos if not
+ /// \return - The index of the first occurrence of \p C, or npos if not
/// found.
size_t find(char C, size_t From = 0) const {
return str().find(C, From);
}
- /// find - Search for the first string \arg Str in the string.
+ /// Search for the first string \p Str in the string.
///
- /// \return - The index of the first occurrence of \arg Str, or npos if not
+ /// \returns The index of the first occurrence of \p Str, or npos if not
/// found.
size_t find(StringRef Str, size_t From = 0) const {
return str().find(Str, From);
}
- /// rfind - Search for the last character \arg C in the string.
+ /// Search for the last character \p C in the string.
///
- /// \return - The index of the last occurrence of \arg C, or npos if not
+ /// \returns The index of the last occurrence of \p C, or npos if not
/// found.
size_t rfind(char C, size_t From = StringRef::npos) const {
return str().rfind(C, From);
}
- /// rfind - Search for the last string \arg Str in the string.
+ /// Search for the last string \p Str in the string.
///
- /// \return - The index of the last occurrence of \arg Str, or npos if not
+ /// \returns The index of the last occurrence of \p Str, or npos if not
/// found.
size_t rfind(StringRef Str) const {
return str().rfind(Str);
}
- /// find_first_of - Find the first character in the string that is \arg C,
- /// or npos if not found. Same as find.
+ /// Find the first character in the string that is \p C, or npos if not
+ /// found. Same as find.
size_t find_first_of(char C, size_t From = 0) const {
return str().find_first_of(C, From);
}
- /// find_first_of - Find the first character in the string that is in \arg
- /// Chars, or npos if not found.
+ /// Find the first character in the string that is in \p Chars, or npos if
+ /// not found.
///
- /// Note: O(size() + Chars.size())
+ /// Complexity: O(size() + Chars.size())
size_t find_first_of(StringRef Chars, size_t From = 0) const {
return str().find_first_of(Chars, From);
}
- /// find_first_not_of - Find the first character in the string that is not
- /// \arg C or npos if not found.
+ /// Find the first character in the string that is not \p C or npos if not
+ /// found.
size_t find_first_not_of(char C, size_t From = 0) const {
return str().find_first_not_of(C, From);
}
- /// find_first_not_of - Find the first character in the string that is not
- /// in the string \arg Chars, or npos if not found.
+ /// Find the first character in the string that is not in the string
+ /// \p Chars, or npos if not found.
///
- /// Note: O(size() + Chars.size())
+ /// Complexity: O(size() + Chars.size())
size_t find_first_not_of(StringRef Chars, size_t From = 0) const {
return str().find_first_not_of(Chars, From);
}
- /// find_last_of - Find the last character in the string that is \arg C, or
- /// npos if not found.
+ /// Find the last character in the string that is \p C, or npos if not
+ /// found.
size_t find_last_of(char C, size_t From = StringRef::npos) const {
return str().find_last_of(C, From);
}
- /// find_last_of - Find the last character in the string that is in \arg C,
- /// or npos if not found.
+ /// Find the last character in the string that is in \p C, or npos if not
+ /// found.
///
- /// Note: O(size() + Chars.size())
+ /// Complexity: O(size() + Chars.size())
size_t find_last_of(
StringRef Chars, size_t From = StringRef::npos) const {
return str().find_last_of(Chars, From);
@@ -222,13 +222,13 @@ public:
/// @name Helpful Algorithms
/// @{
- /// count - Return the number of occurrences of \arg C in the string.
+ /// Return the number of occurrences of \p C in the string.
size_t count(char C) const {
return str().count(C);
}
- /// count - Return the number of non-overlapped occurrences of \arg Str in
- /// the string.
+ /// Return the number of non-overlapped occurrences of \p Str in the
+ /// string.
size_t count(StringRef Str) const {
return str().count(Str);
}
@@ -237,36 +237,36 @@ public:
/// @name Substring Operations
/// @{
- /// substr - Return a reference to the substring from [Start, Start + N).
+ /// Return a reference to the substring from [Start, Start + N).
///
- /// \param Start - The index of the starting character in the substring; if
+ /// \param Start The index of the starting character in the substring; if
/// the index is npos or greater than the length of the string then the
/// empty substring will be returned.
///
- /// \param N - The number of characters to included in the substring. If N
+ /// \param N The number of characters to included in the substring. If \p N
/// exceeds the number of characters remaining in the string, the string
- /// suffix (starting with \arg Start) will be returned.
+ /// suffix (starting with \p Start) will be returned.
StringRef substr(size_t Start, size_t N = StringRef::npos) const {
return str().substr(Start, N);
}
- /// slice - Return a reference to the substring from [Start, End).
+ /// Return a reference to the substring from [Start, End).
///
- /// \param Start - The index of the starting character in the substring; if
+ /// \param Start The index of the starting character in the substring; if
/// the index is npos or greater than the length of the string then the
/// empty substring will be returned.
///
- /// \param End - The index following the last character to include in the
- /// substring. If this is npos, or less than \arg Start, or exceeds the
+ /// \param End The index following the last character to include in the
+ /// substring. If this is npos, or less than \p Start, or exceeds the
/// number of characters remaining in the string, the string suffix
- /// (starting with \arg Start) will be returned.
+ /// (starting with \p Start) will be returned.
StringRef slice(size_t Start, size_t End) const {
return str().slice(Start, End);
}
// Extra methods.
- /// Explicit conversion to StringRef
+ /// Explicit conversion to StringRef.
StringRef str() const { return StringRef(this->begin(), this->size()); }
// TODO: Make this const, if it's safe...
diff --git a/contrib/llvm/include/llvm/ADT/SmallVector.h b/contrib/llvm/include/llvm/ADT/SmallVector.h
index 9fbbbe4..6e0fd94 100644
--- a/contrib/llvm/include/llvm/ADT/SmallVector.h
+++ b/contrib/llvm/include/llvm/ADT/SmallVector.h
@@ -14,6 +14,7 @@
#ifndef LLVM_ADT_SMALLVECTOR_H
#define LLVM_ADT_SMALLVECTOR_H
+#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
@@ -32,44 +33,20 @@ class SmallVectorBase {
protected:
void *BeginX, *EndX, *CapacityX;
- // Allocate raw space for N elements of type T. If T has a ctor or dtor, we
- // don't want it to be automatically run, so we need to represent the space as
- // something else. An array of char would work great, but might not be
- // aligned sufficiently. Instead we use some number of union instances for
- // the space, which guarantee maximal alignment.
- union U {
- double D;
- long double LD;
- long long L;
- void *P;
- } FirstEl;
- // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
-
protected:
- SmallVectorBase(size_t Size)
- : BeginX(&FirstEl), EndX(&FirstEl), CapacityX((char*)&FirstEl+Size) {}
-
- /// isSmall - Return true if this is a smallvector which has not had dynamic
- /// memory allocated for it.
- bool isSmall() const {
- return BeginX == static_cast<const void*>(&FirstEl);
- }
-
- /// resetToSmall - Put this vector in a state of being small.
- void resetToSmall() {
- BeginX = EndX = CapacityX = &FirstEl;
- }
+ SmallVectorBase(void *FirstEl, size_t Size)
+ : BeginX(FirstEl), EndX(FirstEl), CapacityX((char*)FirstEl+Size) {}
/// grow_pod - This is an implementation of the grow() method which only works
/// on POD-like data types and is out of line to reduce code duplication.
- void grow_pod(size_t MinSizeInBytes, size_t TSize);
+ void grow_pod(void *FirstEl, size_t MinSizeInBytes, size_t TSize);
public:
/// size_in_bytes - This returns size()*sizeof(T).
size_t size_in_bytes() const {
return size_t((char*)EndX - (char*)BeginX);
}
-
+
/// capacity_in_bytes - This returns capacity()*sizeof(T).
size_t capacity_in_bytes() const {
return size_t((char*)CapacityX - (char*)BeginX);
@@ -78,11 +55,41 @@ public:
bool empty() const { return BeginX == EndX; }
};
+template <typename T, unsigned N> struct SmallVectorStorage;
-template <typename T>
+/// SmallVectorTemplateCommon - This is the part of SmallVectorTemplateBase
+/// which does not depend on whether the type T is a POD. The extra dummy
+/// template argument is used by ArrayRef to avoid unnecessarily requiring T
+/// to be complete.
+template <typename T, typename = void>
class SmallVectorTemplateCommon : public SmallVectorBase {
+private:
+ template <typename, unsigned> friend struct SmallVectorStorage;
+
+ // Allocate raw space for N elements of type T. If T has a ctor or dtor, we
+ // don't want it to be automatically run, so we need to represent the space as
+ // something else. Use an array of char of sufficient alignment.
+ typedef llvm::AlignedCharArrayUnion<T> U;
+ U FirstEl;
+ // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
+
protected:
- SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(Size) {}
+ SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(&FirstEl, Size) {}
+
+ void grow_pod(size_t MinSizeInBytes, size_t TSize) {
+ SmallVectorBase::grow_pod(&FirstEl, MinSizeInBytes, TSize);
+ }
+
+ /// isSmall - Return true if this is a smallvector which has not had dynamic
+ /// memory allocated for it.
+ bool isSmall() const {
+ return BeginX == static_cast<const void*>(&FirstEl);
+ }
+
+ /// resetToSmall - Put this vector in a state of being small.
+ void resetToSmall() {
+ BeginX = EndX = CapacityX = &FirstEl;
+ }
void setEnd(T *P) { this->EndX = P; }
public:
@@ -677,8 +684,8 @@ public:
RHS.begin(), RHS.end());
}
- /// set_size - Set the array size to \arg N, which the current array must have
- /// enough capacity for.
+ /// Set the array size to \p N, which the current array must have enough
+ /// capacity for.
///
/// This does not construct or destroy any elements in the vector.
///
@@ -844,6 +851,17 @@ SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
}
#endif
+/// Storage for the SmallVector elements which aren't contained in
+/// SmallVectorTemplateCommon. There are 'N-1' elements here. The remaining '1'
+/// element is in the base class. This is specialized for the N=1 and N=0 cases
+/// to avoid allocating unnecessary storage.
+template <typename T, unsigned N>
+struct SmallVectorStorage {
+ typename SmallVectorTemplateCommon<T>::U InlineElts[N - 1];
+};
+template <typename T> struct SmallVectorStorage<T, 1> {};
+template <typename T> struct SmallVectorStorage<T, 0> {};
+
/// SmallVector - This is a 'vector' (really, a variable-sized array), optimized
/// for the case when the array is small. It contains some number of elements
/// in-place, which allows it to avoid heap allocation when the actual number of
@@ -854,41 +872,23 @@ SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
///
template <typename T, unsigned N>
class SmallVector : public SmallVectorImpl<T> {
- /// InlineElts - These are 'N-1' elements that are stored inline in the body
- /// of the vector. The extra '1' element is stored in SmallVectorImpl.
- typedef typename SmallVectorImpl<T>::U U;
- enum {
- // MinUs - The number of U's require to cover N T's.
- MinUs = (static_cast<unsigned int>(sizeof(T))*N +
- static_cast<unsigned int>(sizeof(U)) - 1) /
- static_cast<unsigned int>(sizeof(U)),
-
- // NumInlineEltsElts - The number of elements actually in this array. There
- // is already one in the parent class, and we have to round up to avoid
- // having a zero-element array.
- NumInlineEltsElts = MinUs > 1 ? (MinUs - 1) : 1,
-
- // NumTsAvailable - The number of T's we actually have space for, which may
- // be more than N due to rounding.
- NumTsAvailable = (NumInlineEltsElts+1)*static_cast<unsigned int>(sizeof(U))/
- static_cast<unsigned int>(sizeof(T))
- };
- U InlineElts[NumInlineEltsElts];
+ /// Storage - Inline space for elements which aren't stored in the base class.
+ SmallVectorStorage<T, N> Storage;
public:
- SmallVector() : SmallVectorImpl<T>(NumTsAvailable) {
+ SmallVector() : SmallVectorImpl<T>(N) {
}
explicit SmallVector(unsigned Size, const T &Value = T())
- : SmallVectorImpl<T>(NumTsAvailable) {
+ : SmallVectorImpl<T>(N) {
this->assign(Size, Value);
}
template<typename ItTy>
- SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(NumTsAvailable) {
+ SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
this->append(S, E);
}
- SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(NumTsAvailable) {
+ SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
if (!RHS.empty())
SmallVectorImpl<T>::operator=(RHS);
}
@@ -899,7 +899,7 @@ public:
}
#if LLVM_USE_RVALUE_REFERENCES
- SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(NumTsAvailable) {
+ SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
if (!RHS.empty())
SmallVectorImpl<T>::operator=(::std::move(RHS));
}
@@ -912,48 +912,6 @@ public:
};
-/// Specialize SmallVector at N=0. This specialization guarantees
-/// that it can be instantiated at an incomplete T if none of its
-/// members are required.
-template <typename T>
-class SmallVector<T,0> : public SmallVectorImpl<T> {
-public:
- SmallVector() : SmallVectorImpl<T>(0) {
- }
-
- explicit SmallVector(unsigned Size, const T &Value = T())
- : SmallVectorImpl<T>(0) {
- this->assign(Size, Value);
- }
-
- template<typename ItTy>
- SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(0) {
- this->append(S, E);
- }
-
- SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(0) {
- if (!RHS.empty())
- SmallVectorImpl<T>::operator=(RHS);
- }
-
- const SmallVector &operator=(const SmallVector &RHS) {
- SmallVectorImpl<T>::operator=(RHS);
- return *this;
- }
-
-#if LLVM_USE_RVALUE_REFERENCES
- SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(0) {
- if (!RHS.empty())
- SmallVectorImpl<T>::operator=(::std::move(RHS));
- }
-
- const SmallVector &operator=(SmallVector &&RHS) {
- SmallVectorImpl<T>::operator=(::std::move(RHS));
- return *this;
- }
-#endif
-};
-
template<typename T, unsigned N>
static inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
return X.capacity_in_bytes();
diff --git a/contrib/llvm/include/llvm/ADT/SparseBitVector.h b/contrib/llvm/include/llvm/ADT/SparseBitVector.h
index 89774c3..306e928 100644
--- a/contrib/llvm/include/llvm/ADT/SparseBitVector.h
+++ b/contrib/llvm/include/llvm/ADT/SparseBitVector.h
@@ -158,7 +158,7 @@ public:
&& "Word Position outside of element");
// Mask off previous bits.
- Copy &= ~0L << BitPos;
+ Copy &= ~0UL << BitPos;
if (Copy != 0) {
if (sizeof(BitWord) == 4)
@@ -262,6 +262,22 @@ public:
}
};
+template <unsigned ElementSize>
+struct ilist_traits<SparseBitVectorElement<ElementSize> >
+ : public ilist_default_traits<SparseBitVectorElement<ElementSize> > {
+ typedef SparseBitVectorElement<ElementSize> Element;
+
+ Element *createSentinel() const { return static_cast<Element *>(&Sentinel); }
+ static void destroySentinel(Element *) {}
+
+ Element *provideInitialHead() const { return createSentinel(); }
+ Element *ensureHead(Element *) const { return createSentinel(); }
+ static void noteHead(Element *, Element *) {}
+
+private:
+ mutable ilist_half_node<Element> Sentinel;
+};
+
template <unsigned ElementSize = 128>
class SparseBitVector {
typedef ilist<SparseBitVectorElement<ElementSize> > ElementList;
diff --git a/contrib/llvm/include/llvm/ADT/SparseSet.h b/contrib/llvm/include/llvm/ADT/SparseSet.h
index 55696333..063c675 100644
--- a/contrib/llvm/include/llvm/ADT/SparseSet.h
+++ b/contrib/llvm/include/llvm/ADT/SparseSet.h
@@ -110,9 +110,9 @@ struct SparseSetValFunctor<KeyT, KeyT, KeyFunctorT> {
/// For sets that may grow to thousands of elements, SparseT should be set to
/// uint16_t or uint32_t.
///
-/// @param ValueT The type of objects in the set.
-/// @param KeyFunctorT A functor that computes an unsigned index from KeyT.
-/// @param SparseT An unsigned integer type. See above.
+/// @tparam ValueT The type of objects in the set.
+/// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT.
+/// @tparam SparseT An unsigned integer type. See above.
///
template<typename ValueT,
typename KeyFunctorT = llvm::identity<unsigned>,
@@ -128,8 +128,8 @@ class SparseSet {
// Disable copy construction and assignment.
// This data structure is not meant to be used that way.
- SparseSet(const SparseSet&); // DO NOT IMPLEMENT.
- SparseSet &operator=(const SparseSet&); // DO NOT IMPLEMENT.
+ SparseSet(const SparseSet&) LLVM_DELETED_FUNCTION;
+ SparseSet &operator=(const SparseSet&) LLVM_DELETED_FUNCTION;
public:
typedef ValueT value_type;
diff --git a/contrib/llvm/include/llvm/ADT/StringExtras.h b/contrib/llvm/include/llvm/ADT/StringExtras.h
index 655d884..bf27c43 100644
--- a/contrib/llvm/include/llvm/ADT/StringExtras.h
+++ b/contrib/llvm/include/llvm/ADT/StringExtras.h
@@ -21,7 +21,7 @@ namespace llvm {
template<typename T> class SmallVectorImpl;
/// hexdigit - Return the hexadecimal character for the
-/// given number \arg X (which should be less than 16).
+/// given number \p X (which should be less than 16).
static inline char hexdigit(unsigned X, bool LowerCase = false) {
const char HexChar = LowerCase ? 'a' : 'A';
return X < 10 ? '0' + X : HexChar + X - 10;
@@ -125,10 +125,29 @@ void SplitString(StringRef Source,
// X*33+c -> X*33^c
static inline unsigned HashString(StringRef Str, unsigned Result = 0) {
for (unsigned i = 0, e = Str.size(); i != e; ++i)
- Result = Result * 33 + Str[i];
+ Result = Result * 33 + (unsigned char)Str[i];
return Result;
}
+/// Returns the English suffix for an ordinal integer (-st, -nd, -rd, -th).
+static inline StringRef getOrdinalSuffix(unsigned Val) {
+ // It is critically important that we do this perfectly for
+ // user-written sequences with over 100 elements.
+ switch (Val % 100) {
+ case 11:
+ case 12:
+ case 13:
+ return "th";
+ default:
+ switch (Val % 10) {
+ case 1: return "st";
+ case 2: return "nd";
+ case 3: return "rd";
+ default: return "th";
+ }
+ }
+}
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/ADT/StringRef.h b/contrib/llvm/include/llvm/ADT/StringRef.h
index cd84603..292bde0 100644
--- a/contrib/llvm/include/llvm/ADT/StringRef.h
+++ b/contrib/llvm/include/llvm/ADT/StringRef.h
@@ -138,7 +138,7 @@ namespace llvm {
}
/// compare - Compare two strings; the result is -1, 0, or 1 if this string
- /// is lexicographically less than, equal to, or greater than the \arg RHS.
+ /// is lexicographically less than, equal to, or greater than the \p RHS.
int compare(StringRef RHS) const {
// Check the prefix for a mismatch.
if (int Res = compareMemory(Data, RHS.Data, min(Length, RHS.Length)))
@@ -205,13 +205,13 @@ namespace llvm {
/// @name String Predicates
/// @{
- /// startswith - Check if this string starts with the given \arg Prefix.
+ /// Check if this string starts with the given \p Prefix.
bool startswith(StringRef Prefix) const {
return Length >= Prefix.Length &&
compareMemory(Data, Prefix.Data, Prefix.Length) == 0;
}
- /// endswith - Check if this string ends with the given \arg Suffix.
+ /// Check if this string ends with the given \p Suffix.
bool endswith(StringRef Suffix) const {
return Length >= Suffix.Length &&
compareMemory(end() - Suffix.Length, Suffix.Data, Suffix.Length) == 0;
@@ -221,9 +221,9 @@ namespace llvm {
/// @name String Searching
/// @{
- /// find - Search for the first character \arg C in the string.
+ /// Search for the first character \p C in the string.
///
- /// \return - The index of the first occurrence of \arg C, or npos if not
+ /// \returns The index of the first occurrence of \p C, or npos if not
/// found.
size_t find(char C, size_t From = 0) const {
for (size_t i = min(From, Length), e = Length; i != e; ++i)
@@ -232,15 +232,15 @@ namespace llvm {
return npos;
}
- /// find - Search for the first string \arg Str in the string.
+ /// Search for the first string \p Str in the string.
///
- /// \return - The index of the first occurrence of \arg Str, or npos if not
+ /// \returns The index of the first occurrence of \p Str, or npos if not
/// found.
size_t find(StringRef Str, size_t From = 0) const;
- /// rfind - Search for the last character \arg C in the string.
+ /// Search for the last character \p C in the string.
///
- /// \return - The index of the last occurrence of \arg C, or npos if not
+ /// \returns The index of the last occurrence of \p C, or npos if not
/// found.
size_t rfind(char C, size_t From = npos) const {
From = min(From, Length);
@@ -253,61 +253,61 @@ namespace llvm {
return npos;
}
- /// rfind - Search for the last string \arg Str in the string.
+ /// Search for the last string \p Str in the string.
///
- /// \return - The index of the last occurrence of \arg Str, or npos if not
+ /// \returns The index of the last occurrence of \p Str, or npos if not
/// found.
size_t rfind(StringRef Str) const;
- /// find_first_of - Find the first character in the string that is \arg C,
- /// or npos if not found. Same as find.
+ /// Find the first character in the string that is \p C, or npos if not
+ /// found. Same as find.
size_type find_first_of(char C, size_t From = 0) const {
return find(C, From);
}
- /// find_first_of - Find the first character in the string that is in \arg
- /// Chars, or npos if not found.
+ /// Find the first character in the string that is in \p Chars, or npos if
+ /// not found.
///
- /// Note: O(size() + Chars.size())
+ /// Complexity: O(size() + Chars.size())
size_type find_first_of(StringRef Chars, size_t From = 0) const;
- /// find_first_not_of - Find the first character in the string that is not
- /// \arg C or npos if not found.
+ /// Find the first character in the string that is not \p C or npos if not
+ /// found.
size_type find_first_not_of(char C, size_t From = 0) const;
- /// find_first_not_of - Find the first character in the string that is not
- /// in the string \arg Chars, or npos if not found.
+ /// Find the first character in the string that is not in the string
+ /// \p Chars, or npos if not found.
///
- /// Note: O(size() + Chars.size())
+ /// Complexity: O(size() + Chars.size())
size_type find_first_not_of(StringRef Chars, size_t From = 0) const;
- /// find_last_of - Find the last character in the string that is \arg C, or
- /// npos if not found.
+ /// Find the last character in the string that is \p C, or npos if not
+ /// found.
size_type find_last_of(char C, size_t From = npos) const {
return rfind(C, From);
}
- /// find_last_of - Find the last character in the string that is in \arg C,
- /// or npos if not found.
+ /// Find the last character in the string that is in \p C, or npos if not
+ /// found.
///
- /// Note: O(size() + Chars.size())
+ /// Complexity: O(size() + Chars.size())
size_type find_last_of(StringRef Chars, size_t From = npos) const;
- /// find_last_not_of - Find the last character in the string that is not
- /// \arg C, or npos if not found.
+ /// Find the last character in the string that is not \p C, or npos if not
+ /// found.
size_type find_last_not_of(char C, size_t From = npos) const;
- /// find_last_not_of - Find the last character in the string that is not in
- /// \arg Chars, or npos if not found.
+ /// Find the last character in the string that is not in \p Chars, or
+ /// npos if not found.
///
- /// Note: O(size() + Chars.size())
+ /// Complexity: O(size() + Chars.size())
size_type find_last_not_of(StringRef Chars, size_t From = npos) const;
/// @}
/// @name Helpful Algorithms
/// @{
- /// count - Return the number of occurrences of \arg C in the string.
+ /// Return the number of occurrences of \p C in the string.
size_t count(char C) const {
size_t Count = 0;
for (size_t i = 0, e = Length; i != e; ++i)
@@ -316,18 +316,17 @@ namespace llvm {
return Count;
}
- /// count - Return the number of non-overlapped occurrences of \arg Str in
+ /// Return the number of non-overlapped occurrences of \p Str in
/// the string.
size_t count(StringRef Str) const;
- /// getAsInteger - Parse the current string as an integer of the specified
- /// radix. If Radix is specified as zero, this does radix autosensing using
+ /// Parse the current string as an integer of the specified radix. If
+ /// \p Radix is specified as zero, this does radix autosensing using
/// extended C rules: 0 is octal, 0x is hex, 0b is binary.
///
/// If the string is invalid or if only a subset of the string is valid,
/// this returns true to signify the error. The string is considered
/// erroneous if empty or if it overflows T.
- ///
template <typename T>
typename enable_if_c<std::numeric_limits<T>::is_signed, bool>::type
getAsInteger(unsigned Radix, T &Result) const {
@@ -350,13 +349,12 @@ namespace llvm {
return false;
}
- /// getAsInteger - Parse the current string as an integer of the
- /// specified radix, or of an autosensed radix if the radix given
- /// is 0. The current value in Result is discarded, and the
- /// storage is changed to be wide enough to store the parsed
- /// integer.
+ /// Parse the current string as an integer of the specified \p Radix, or of
+ /// an autosensed radix if the \p Radix given is 0. The current value in
+ /// \p Result is discarded, and the storage is changed to be wide enough to
+ /// store the parsed integer.
///
- /// Returns true if the string does not solely consist of a valid
+ /// \returns true if the string does not solely consist of a valid
/// non-empty number in the appropriate base.
///
/// APInt::fromString is superficially similar but assumes the
@@ -367,70 +365,70 @@ namespace llvm {
/// @name String Operations
/// @{
- // lower - Convert the given ASCII string to lowercase.
+ // Convert the given ASCII string to lowercase.
std::string lower() const;
- /// upper - Convert the given ASCII string to uppercase.
+ /// Convert the given ASCII string to uppercase.
std::string upper() const;
/// @}
/// @name Substring Operations
/// @{
- /// substr - Return a reference to the substring from [Start, Start + N).
+ /// Return a reference to the substring from [Start, Start + N).
///
- /// \param Start - The index of the starting character in the substring; if
+ /// \param Start The index of the starting character in the substring; if
/// the index is npos or greater than the length of the string then the
/// empty substring will be returned.
///
- /// \param N - The number of characters to included in the substring. If N
+ /// \param N The number of characters to included in the substring. If N
/// exceeds the number of characters remaining in the string, the string
- /// suffix (starting with \arg Start) will be returned.
+ /// suffix (starting with \p Start) will be returned.
StringRef substr(size_t Start, size_t N = npos) const {
Start = min(Start, Length);
return StringRef(Data + Start, min(N, Length - Start));
}
- /// drop_front - Return a StringRef equal to 'this' but with the first
- /// elements dropped.
+ /// Return a StringRef equal to 'this' but with the first \p N elements
+ /// dropped.
StringRef drop_front(unsigned N = 1) const {
assert(size() >= N && "Dropping more elements than exist");
return substr(N);
}
- /// drop_back - Return a StringRef equal to 'this' but with the last
- /// elements dropped.
+ /// Return a StringRef equal to 'this' but with the last \p N elements
+ /// dropped.
StringRef drop_back(unsigned N = 1) const {
assert(size() >= N && "Dropping more elements than exist");
return substr(0, size()-N);
}
- /// slice - Return a reference to the substring from [Start, End).
+ /// Return a reference to the substring from [Start, End).
///
- /// \param Start - The index of the starting character in the substring; if
+ /// \param Start The index of the starting character in the substring; if
/// the index is npos or greater than the length of the string then the
/// empty substring will be returned.
///
- /// \param End - The index following the last character to include in the
- /// substring. If this is npos, or less than \arg Start, or exceeds the
+ /// \param End The index following the last character to include in the
+ /// substring. If this is npos, or less than \p Start, or exceeds the
/// number of characters remaining in the string, the string suffix
- /// (starting with \arg Start) will be returned.
+ /// (starting with \p Start) will be returned.
StringRef slice(size_t Start, size_t End) const {
Start = min(Start, Length);
End = min(max(Start, End), Length);
return StringRef(Data + Start, End - Start);
}
- /// split - Split into two substrings around the first occurrence of a
- /// separator character.
+ /// Split into two substrings around the first occurrence of a separator
+ /// character.
///
- /// If \arg Separator is in the string, then the result is a pair (LHS, RHS)
+ /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
/// such that (*this == LHS + Separator + RHS) is true and RHS is
- /// maximal. If \arg Separator is not in the string, then the result is a
+ /// maximal. If \p Separator is not in the string, then the result is a
/// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
///
- /// \param Separator - The character to split on.
- /// \return - The split substrings.
+ /// \param Separator The character to split on.
+ /// \returns The split substrings.
std::pair<StringRef, StringRef> split(char Separator) const {
size_t Idx = find(Separator);
if (Idx == npos)
@@ -438,12 +436,12 @@ namespace llvm {
return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
}
- /// split - Split into two substrings around the first occurrence of a
- /// separator string.
+ /// Split into two substrings around the first occurrence of a separator
+ /// string.
///
- /// If \arg Separator is in the string, then the result is a pair (LHS, RHS)
+ /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
/// such that (*this == LHS + Separator + RHS) is true and RHS is
- /// maximal. If \arg Separator is not in the string, then the result is a
+ /// maximal. If \p Separator is not in the string, then the result is a
/// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
///
/// \param Separator - The string to split on.
@@ -455,14 +453,13 @@ namespace llvm {
return std::make_pair(slice(0, Idx), slice(Idx + Separator.size(), npos));
}
- /// split - Split into substrings around the occurrences of a separator
- /// string.
+ /// Split into substrings around the occurrences of a separator string.
///
- /// Each substring is stored in \arg A. If \arg MaxSplit is >= 0, at most
- /// \arg MaxSplit splits are done and consequently <= \arg MaxSplit
+ /// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most
+ /// \p MaxSplit splits are done and consequently <= \p MaxSplit
/// elements are added to A.
- /// If \arg KeepEmpty is false, empty strings are not added to \arg A. They
- /// still count when considering \arg MaxSplit
+ /// If \p KeepEmpty is false, empty strings are not added to \p A. They
+ /// still count when considering \p MaxSplit
/// An useful invariant is that
/// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true
///
@@ -474,12 +471,12 @@ namespace llvm {
StringRef Separator, int MaxSplit = -1,
bool KeepEmpty = true) const;
- /// rsplit - Split into two substrings around the last occurrence of a
- /// separator character.
+ /// Split into two substrings around the last occurrence of a separator
+ /// character.
///
- /// If \arg Separator is in the string, then the result is a pair (LHS, RHS)
+ /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
/// such that (*this == LHS + Separator + RHS) is true and RHS is
- /// minimal. If \arg Separator is not in the string, then the result is a
+ /// minimal. If \p Separator is not in the string, then the result is a
/// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
///
/// \param Separator - The character to split on.
@@ -491,20 +488,20 @@ namespace llvm {
return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
}
- /// ltrim - Return string with consecutive characters in \arg Chars starting
- /// from the left removed.
+ /// Return string with consecutive characters in \p Chars starting from
+ /// the left removed.
StringRef ltrim(StringRef Chars = " \t\n\v\f\r") const {
return drop_front(std::min(Length, find_first_not_of(Chars)));
}
- /// rtrim - Return string with consecutive characters in \arg Chars starting
- /// from the right removed.
+ /// Return string with consecutive characters in \p Chars starting from
+ /// the right removed.
StringRef rtrim(StringRef Chars = " \t\n\v\f\r") const {
return drop_back(Length - std::min(Length, find_last_not_of(Chars) + 1));
}
- /// trim - Return string with consecutive characters in \arg Chars starting
- /// from the left and right removed.
+ /// Return string with consecutive characters in \p Chars starting from
+ /// the left and right removed.
StringRef trim(StringRef Chars = " \t\n\v\f\r") const {
return ltrim(Chars).rtrim(Chars);
}
diff --git a/contrib/llvm/include/llvm/ADT/StringSet.h b/contrib/llvm/include/llvm/ADT/StringSet.h
index 9c55f6b..b69a964 100644
--- a/contrib/llvm/include/llvm/ADT/StringSet.h
+++ b/contrib/llvm/include/llvm/ADT/StringSet.h
@@ -29,8 +29,13 @@ namespace llvm {
assert(!InLang.empty());
const char *KeyStart = InLang.data();
const char *KeyEnd = KeyStart + InLang.size();
- return base::insert(llvm::StringMapEntry<char>::
- Create(KeyStart, KeyEnd, base::getAllocator(), '+'));
+ llvm::StringMapEntry<char> *Entry = llvm::StringMapEntry<char>::
+ Create(KeyStart, KeyEnd, base::getAllocator(), '+');
+ if (!base::insert(Entry)) {
+ Entry->Destroy(base::getAllocator());
+ return false;
+ }
+ return true;
}
};
}
diff --git a/contrib/llvm/include/llvm/ADT/Trie.h b/contrib/llvm/include/llvm/ADT/Trie.h
deleted file mode 100644
index 845af01..0000000
--- a/contrib/llvm/include/llvm/ADT/Trie.h
+++ /dev/null
@@ -1,334 +0,0 @@
-//===- llvm/ADT/Trie.h ---- Generic trie structure --------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class defines a generic trie structure. The trie structure
-// is immutable after creation, but the payload contained within it is not.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ADT_TRIE_H
-#define LLVM_ADT_TRIE_H
-
-#include "llvm/ADT/GraphTraits.h"
-#include "llvm/Support/DOTGraphTraits.h"
-
-#include <cassert>
-#include <vector>
-
-namespace llvm {
-
-// FIXME:
-// - Labels are usually small, maybe it's better to use SmallString
-// - Should we use char* during construction?
-// - Should we templatize Empty with traits-like interface?
-
-template<class Payload>
-class Trie {
- friend class GraphTraits<Trie<Payload> >;
- friend class DOTGraphTraits<Trie<Payload> >;
-public:
- class Node {
- friend class Trie;
-
- public:
- typedef std::vector<Node*> NodeVectorType;
- typedef typename NodeVectorType::iterator iterator;
- typedef typename NodeVectorType::const_iterator const_iterator;
-
- private:
- enum QueryResult {
- Same = -3,
- StringIsPrefix = -2,
- LabelIsPrefix = -1,
- DontMatch = 0,
- HaveCommonPart
- };
-
- struct NodeCmp {
- bool operator() (Node* N1, Node* N2) {
- return (N1->Label[0] < N2->Label[0]);
- }
- bool operator() (Node* N, char Id) {
- return (N->Label[0] < Id);
- }
- };
-
- std::string Label;
- Payload Data;
- NodeVectorType Children;
-
- // Do not implement
- Node(const Node&);
- Node& operator=(const Node&);
-
- inline void addEdge(Node* N) {
- if (Children.empty())
- Children.push_back(N);
- else {
- iterator I = std::lower_bound(Children.begin(), Children.end(),
- N, NodeCmp());
- // FIXME: no dups are allowed
- Children.insert(I, N);
- }
- }
-
- inline void setEdge(Node* N) {
- char Id = N->Label[0];
- iterator I = std::lower_bound(Children.begin(), Children.end(),
- Id, NodeCmp());
- assert(I != Children.end() && "Node does not exists!");
- *I = N;
- }
-
- QueryResult query(const std::string& s) const {
- unsigned i, l;
- unsigned l1 = s.length();
- unsigned l2 = Label.length();
-
- // Find the length of common part
- l = std::min(l1, l2);
- i = 0;
- while ((i < l) && (s[i] == Label[i]))
- ++i;
-
- if (i == l) { // One is prefix of another, find who is who
- if (l1 == l2)
- return Same;
- else if (i == l1)
- return StringIsPrefix;
- else
- return LabelIsPrefix;
- } else // s and Label have common (possible empty) part, return its length
- return (QueryResult)i;
- }
-
- public:
- inline explicit Node(const Payload& data, const std::string& label = ""):
- Label(label), Data(data) { }
-
- inline const Payload& data() const { return Data; }
- inline void setData(const Payload& data) { Data = data; }
-
- inline const std::string& label() const { return Label; }
-
-#if 0
- inline void dump() {
- llvm::cerr << "Node: " << this << "\n"
- << "Label: " << Label << "\n"
- << "Children:\n";
-
- for (iterator I = Children.begin(), E = Children.end(); I != E; ++I)
- llvm::cerr << (*I)->Label << "\n";
- }
-#endif
-
- inline Node* getEdge(char Id) {
- Node* fNode = NULL;
- iterator I = std::lower_bound(Children.begin(), Children.end(),
- Id, NodeCmp());
- if (I != Children.end() && (*I)->Label[0] == Id)
- fNode = *I;
-
- return fNode;
- }
-
- inline iterator begin() { return Children.begin(); }
- inline const_iterator begin() const { return Children.begin(); }
- inline iterator end () { return Children.end(); }
- inline const_iterator end () const { return Children.end(); }
-
- inline size_t size () const { return Children.size(); }
- inline bool empty() const { return Children.empty(); }
- inline const Node* &front() const { return Children.front(); }
- inline Node* &front() { return Children.front(); }
- inline const Node* &back() const { return Children.back(); }
- inline Node* &back() { return Children.back(); }
-
- };
-
-private:
- std::vector<Node*> Nodes;
- Payload Empty;
-
- inline Node* addNode(const Payload& data, const std::string label = "") {
- Node* N = new Node(data, label);
- Nodes.push_back(N);
- return N;
- }
-
- inline Node* splitEdge(Node* N, char Id, size_t index) {
- Node* eNode = N->getEdge(Id);
- assert(eNode && "Node doesn't exist");
-
- const std::string &l = eNode->Label;
- assert(index > 0 && index < l.length() && "Trying to split too far!");
- std::string l1 = l.substr(0, index);
- std::string l2 = l.substr(index);
-
- Node* nNode = addNode(Empty, l1);
- N->setEdge(nNode);
-
- eNode->Label = l2;
- nNode->addEdge(eNode);
-
- return nNode;
- }
-
- // Do not implement
- Trie(const Trie&);
- Trie& operator=(const Trie&);
-
-public:
- inline explicit Trie(const Payload& empty):Empty(empty) {
- addNode(Empty);
- }
- inline ~Trie() {
- for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
- delete Nodes[i];
- }
-
- inline Node* getRoot() const { return Nodes[0]; }
-
- bool addString(const std::string& s, const Payload& data);
- const Payload& lookup(const std::string& s) const;
-
-};
-
-// Define this out-of-line to dissuade the C++ compiler from inlining it.
-template<class Payload>
-bool Trie<Payload>::addString(const std::string& s, const Payload& data) {
- Node* cNode = getRoot();
- Node* tNode = NULL;
- std::string s1(s);
-
- while (tNode == NULL) {
- char Id = s1[0];
- if (Node* nNode = cNode->getEdge(Id)) {
- typename Node::QueryResult r = nNode->query(s1);
-
- switch (r) {
- case Node::Same:
- case Node::StringIsPrefix:
- // Currently we don't allow to have two strings in the trie one
- // being a prefix of another. This should be fixed.
- assert(0 && "FIXME!");
- return false;
- case Node::DontMatch:
- llvm_unreachable("Impossible!");
- case Node::LabelIsPrefix:
- s1 = s1.substr(nNode->label().length());
- cNode = nNode;
- break;
- default:
- nNode = splitEdge(cNode, Id, r);
- tNode = addNode(data, s1.substr(r));
- nNode->addEdge(tNode);
- }
- } else {
- tNode = addNode(data, s1);
- cNode->addEdge(tNode);
- }
- }
-
- return true;
-}
-
-template<class Payload>
-const Payload& Trie<Payload>::lookup(const std::string& s) const {
- Node* cNode = getRoot();
- Node* tNode = NULL;
- std::string s1(s);
-
- while (tNode == NULL) {
- char Id = s1[0];
- if (Node* nNode = cNode->getEdge(Id)) {
- typename Node::QueryResult r = nNode->query(s1);
-
- switch (r) {
- case Node::Same:
- tNode = nNode;
- break;
- case Node::StringIsPrefix:
- return Empty;
- case Node::DontMatch:
- llvm_unreachable("Impossible!");
- case Node::LabelIsPrefix:
- s1 = s1.substr(nNode->label().length());
- cNode = nNode;
- break;
- default:
- return Empty;
- }
- } else
- return Empty;
- }
-
- return tNode->data();
-}
-
-template<class Payload>
-struct GraphTraits<Trie<Payload> > {
- typedef Trie<Payload> TrieType;
- typedef typename TrieType::Node NodeType;
- typedef typename NodeType::iterator ChildIteratorType;
-
- static inline NodeType *getEntryNode(const TrieType& T) {
- return T.getRoot();
- }
-
- static inline ChildIteratorType child_begin(NodeType *N) {
- return N->begin();
- }
- static inline ChildIteratorType child_end(NodeType *N) { return N->end(); }
-
- typedef typename std::vector<NodeType*>::const_iterator nodes_iterator;
-
- static inline nodes_iterator nodes_begin(const TrieType& G) {
- return G.Nodes.begin();
- }
- static inline nodes_iterator nodes_end(const TrieType& G) {
- return G.Nodes.end();
- }
-
-};
-
-template<class Payload>
-struct DOTGraphTraits<Trie<Payload> > : public DefaultDOTGraphTraits {
- typedef typename Trie<Payload>::Node NodeType;
- typedef typename GraphTraits<Trie<Payload> >::ChildIteratorType EdgeIter;
-
- static std::string getGraphName(const Trie<Payload>& T) {
- return "Trie";
- }
-
- static std::string getNodeLabel(NodeType* Node, const Trie<Payload>& T) {
- if (T.getRoot() == Node)
- return "<Root>";
- else
- return Node->label();
- }
-
- static std::string getEdgeSourceLabel(NodeType* Node, EdgeIter I) {
- NodeType* N = *I;
- return N->label().substr(0, 1);
- }
-
- static std::string getNodeAttributes(const NodeType* Node,
- const Trie<Payload>& T) {
- if (Node->data() != T.Empty)
- return "color=blue";
-
- return "";
- }
-
-};
-
-} // end of llvm namespace
-
-#endif // LLVM_ADT_TRIE_H
diff --git a/contrib/llvm/include/llvm/ADT/Triple.h b/contrib/llvm/include/llvm/ADT/Triple.h
index 7f7061a..408d70c 100644
--- a/contrib/llvm/include/llvm/ADT/Triple.h
+++ b/contrib/llvm/include/llvm/ADT/Triple.h
@@ -65,7 +65,9 @@ public:
nvptx, // NVPTX: 32-bit
nvptx64, // NVPTX: 64-bit
le32, // le32: generic little-endian 32-bit CPU (PNaCl / Emscripten)
- amdil // amdil: amd IL
+ amdil, // amdil: amd IL
+ spir, // SPIR: standard portable IR for OpenCL 32-bit version
+ spir64 // SPIR: standard portable IR for OpenCL 64-bit version
};
enum VendorType {
UnknownVendor,
@@ -74,7 +76,9 @@ public:
PC,
SCEI,
BGP,
- BGQ
+ BGQ,
+ Freescale,
+ IBM
};
enum OSType {
UnknownOS,
@@ -99,7 +103,8 @@ public:
RTEMS,
NativeClient,
CNK, // BG/P Compute-Node Kernel
- Bitrig
+ Bitrig,
+ AIX
};
enum EnvironmentType {
UnknownEnvironment,
@@ -109,7 +114,8 @@ public:
GNUEABIHF,
EABI,
MachO,
- ANDROIDEABI
+ Android,
+ ELF
};
private:
@@ -341,7 +347,7 @@ public:
/// to a known type.
void setEnvironment(EnvironmentType Kind);
- /// setTriple - Set all components to the new triple \arg Str.
+ /// setTriple - Set all components to the new triple \p Str.
void setTriple(const Twine &Str);
/// setArchName - Set the architecture (first) component of the
@@ -392,11 +398,10 @@ public:
/// @name Static helpers for IDs.
/// @{
- /// getArchTypeName - Get the canonical name for the \arg Kind
- /// architecture.
+ /// getArchTypeName - Get the canonical name for the \p Kind architecture.
static const char *getArchTypeName(ArchType Kind);
- /// getArchTypePrefix - Get the "prefix" canonical name for the \arg Kind
+ /// getArchTypePrefix - Get the "prefix" canonical name for the \p Kind
/// architecture. This is the prefix used by the architecture specific
/// builtins, and is suitable for passing to \see
/// Intrinsic::getIntrinsicForGCCBuiltin().
@@ -404,15 +409,13 @@ public:
/// \return - The architecture prefix, or 0 if none is defined.
static const char *getArchTypePrefix(ArchType Kind);
- /// getVendorTypeName - Get the canonical name for the \arg Kind
- /// vendor.
+ /// getVendorTypeName - Get the canonical name for the \p Kind vendor.
static const char *getVendorTypeName(VendorType Kind);
- /// getOSTypeName - Get the canonical name for the \arg Kind operating
- /// system.
+ /// getOSTypeName - Get the canonical name for the \p Kind operating system.
static const char *getOSTypeName(OSType Kind);
- /// getEnvironmentTypeName - Get the canonical name for the \arg Kind
+ /// getEnvironmentTypeName - Get the canonical name for the \p Kind
/// environment.
static const char *getEnvironmentTypeName(EnvironmentType Kind);
@@ -424,11 +427,6 @@ public:
/// architecture name (e.g., "x86").
static ArchType getArchTypeForLLVMName(StringRef Str);
- /// getArchTypeForDarwinArchName - Get the architecture type for a "Darwin"
- /// architecture name, for example as accepted by "gcc -arch" (see also
- /// arch(3)).
- static ArchType getArchTypeForDarwinArchName(StringRef Str);
-
/// @}
};
diff --git a/contrib/llvm/include/llvm/ADT/Twine.h b/contrib/llvm/include/llvm/ADT/Twine.h
index 9101df8..cc290d5 100644
--- a/contrib/llvm/include/llvm/ADT/Twine.h
+++ b/contrib/llvm/include/llvm/ADT/Twine.h
@@ -44,7 +44,7 @@ namespace llvm {
/// itself, and renders as an empty string. This can be returned from APIs to
/// effectively nullify any concatenations performed on the result.
///
- /// \b Implementation \n
+ /// \b Implementation
///
/// Given the nature of a Twine, it is not possible for the Twine's
/// concatenation method to construct interior nodes; the result must be
@@ -67,7 +67,7 @@ namespace llvm {
///
/// These invariants are check by \see isValid().
///
- /// \b Efficiency Considerations \n
+ /// \b Efficiency Considerations
///
/// The Twine is designed to yield efficient and small code for common
/// situations. For this reason, the concat() method is inlined so that
@@ -303,37 +303,37 @@ namespace llvm {
LHS.character = static_cast<char>(Val);
}
- /// Construct a twine to print \arg Val as an unsigned decimal integer.
+ /// Construct a twine to print \p Val as an unsigned decimal integer.
explicit Twine(unsigned Val)
: LHSKind(DecUIKind), RHSKind(EmptyKind) {
LHS.decUI = Val;
}
- /// Construct a twine to print \arg Val as a signed decimal integer.
+ /// Construct a twine to print \p Val as a signed decimal integer.
explicit Twine(int Val)
: LHSKind(DecIKind), RHSKind(EmptyKind) {
LHS.decI = Val;
}
- /// Construct a twine to print \arg Val as an unsigned decimal integer.
+ /// Construct a twine to print \p Val as an unsigned decimal integer.
explicit Twine(const unsigned long &Val)
: LHSKind(DecULKind), RHSKind(EmptyKind) {
LHS.decUL = &Val;
}
- /// Construct a twine to print \arg Val as a signed decimal integer.
+ /// Construct a twine to print \p Val as a signed decimal integer.
explicit Twine(const long &Val)
: LHSKind(DecLKind), RHSKind(EmptyKind) {
LHS.decL = &Val;
}
- /// Construct a twine to print \arg Val as an unsigned decimal integer.
+ /// Construct a twine to print \p Val as an unsigned decimal integer.
explicit Twine(const unsigned long long &Val)
: LHSKind(DecULLKind), RHSKind(EmptyKind) {
LHS.decULL = &Val;
}
- /// Construct a twine to print \arg Val as a signed decimal integer.
+ /// Construct a twine to print \p Val as a signed decimal integer.
explicit Twine(const long long &Val)
: LHSKind(DecLLKind), RHSKind(EmptyKind) {
LHS.decLL = &Val;
@@ -370,7 +370,7 @@ namespace llvm {
/// @name Numeric Conversions
/// @{
- // Construct a twine to print \arg Val as an unsigned hexadecimal integer.
+ // Construct a twine to print \p Val as an unsigned hexadecimal integer.
static Twine utohexstr(const uint64_t &Val) {
Child LHS, RHS;
LHS.uHex = &Val;
@@ -447,17 +447,17 @@ namespace llvm {
/// The returned StringRef's size does not include the null terminator.
StringRef toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const;
- /// print - Write the concatenated string represented by this twine to the
- /// stream \arg OS.
+ /// Write the concatenated string represented by this twine to the
+ /// stream \p OS.
void print(raw_ostream &OS) const;
- /// dump - Dump the concatenated string represented by this twine to stderr.
+ /// Dump the concatenated string represented by this twine to stderr.
void dump() const;
- /// print - Write the representation of this twine to the stream \arg OS.
+ /// Write the representation of this twine to the stream \p OS.
void printRepr(raw_ostream &OS) const;
- /// dumpRepr - Dump the representation of this twine to stderr.
+ /// Dump the representation of this twine to stderr.
void dumpRepr() const;
/// @}
diff --git a/contrib/llvm/include/llvm/ADT/ValueMap.h b/contrib/llvm/include/llvm/ADT/ValueMap.h
index f7e2551..d23fccf 100644
--- a/contrib/llvm/include/llvm/ADT/ValueMap.h
+++ b/contrib/llvm/include/llvm/ADT/ValueMap.h
@@ -80,8 +80,8 @@ class ValueMap {
typedef typename Config::ExtraData ExtraData;
MapT Map;
ExtraData Data;
- ValueMap(const ValueMap&); // DO NOT IMPLEMENT
- ValueMap& operator=(const ValueMap&); // DO NOT IMPLEMENT
+ ValueMap(const ValueMap&) LLVM_DELETED_FUNCTION;
+ ValueMap& operator=(const ValueMap&) LLVM_DELETED_FUNCTION;
public:
typedef KeyT key_type;
typedef ValueT mapped_type;
diff --git a/contrib/llvm/include/llvm/ADT/ilist.h b/contrib/llvm/include/llvm/ADT/ilist.h
index ba9864a..7f5cd17 100644
--- a/contrib/llvm/include/llvm/ADT/ilist.h
+++ b/contrib/llvm/include/llvm/ADT/ilist.h
@@ -38,6 +38,7 @@
#ifndef LLVM_ADT_ILIST_H
#define LLVM_ADT_ILIST_H
+#include "llvm/Support/Compiler.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -331,8 +332,8 @@ class iplist : public Traits {
// No fundamental reason why iplist can't be copyable, but the default
// copy/copy-assign won't do.
- iplist(const iplist &); // do not implement
- void operator=(const iplist &); // do not implement
+ iplist(const iplist &) LLVM_DELETED_FUNCTION;
+ void operator=(const iplist &) LLVM_DELETED_FUNCTION;
public:
typedef NodeTy *pointer;
diff --git a/contrib/llvm/include/llvm/AddressingMode.h b/contrib/llvm/include/llvm/AddressingMode.h
new file mode 100644
index 0000000..70b3c05
--- /dev/null
+++ b/contrib/llvm/include/llvm/AddressingMode.h
@@ -0,0 +1,41 @@
+//===--------- llvm/AddressingMode.h - Addressing Mode -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file contains addressing mode data structures which are shared
+// between LSR and a number of places in the codegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADDRESSING_MODE_H
+#define LLVM_ADDRESSING_MODE_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class GlobalValue;
+
+/// AddrMode - This represents an addressing mode of:
+/// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
+/// If BaseGV is null, there is no BaseGV.
+/// If BaseOffs is zero, there is no base offset.
+/// If HasBaseReg is false, there is no base register.
+/// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
+/// no scale.
+///
+struct AddrMode {
+ GlobalValue *BaseGV;
+ int64_t BaseOffs;
+ bool HasBaseReg;
+ int64_t Scale;
+ AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
index 674868a..be274af 100644
--- a/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
@@ -45,7 +45,8 @@ namespace llvm {
class LoadInst;
class StoreInst;
class VAArgInst;
-class TargetData;
+class DataLayout;
+class TargetLibraryInfo;
class Pass;
class AnalysisUsage;
class MemTransferInst;
@@ -54,7 +55,8 @@ class DominatorTree;
class AliasAnalysis {
protected:
- const TargetData *TD;
+ const DataLayout *TD;
+ const TargetLibraryInfo *TLI;
private:
AliasAnalysis *AA; // Previous Alias Analysis to chain to.
@@ -73,7 +75,7 @@ protected:
public:
static char ID; // Class identification, replacement for typeinfo
- AliasAnalysis() : TD(0), AA(0) {}
+ AliasAnalysis() : TD(0), TLI(0), AA(0) {}
virtual ~AliasAnalysis(); // We want to be subclassed
/// UnknownSize - This is a special value which can be used with the
@@ -81,12 +83,17 @@ public:
/// know the sizes of the potential memory references.
static uint64_t const UnknownSize = ~UINT64_C(0);
- /// getTargetData - Return a pointer to the current TargetData object, or
- /// null if no TargetData object is available.
+ /// getDataLayout - Return a pointer to the current DataLayout object, or
+ /// null if no DataLayout object is available.
///
- const TargetData *getTargetData() const { return TD; }
+ const DataLayout *getDataLayout() const { return TD; }
- /// getTypeStoreSize - Return the TargetData store size for the given type,
+ /// getTargetLibraryInfo - Return a pointer to the current TargetLibraryInfo
+ /// object, or null if no TargetLibraryInfo object is available.
+ ///
+ const TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
+
+ /// getTypeStoreSize - Return the DataLayout store size for the given type,
/// if known, or a conservative value otherwise.
///
uint64_t getTypeStoreSize(Type *Ty);
@@ -187,6 +194,11 @@ public:
return isNoAlias(Location(V1, V1Size), Location(V2, V2Size));
}
+ /// isNoAlias - A convenience wrapper.
+ bool isNoAlias(const Value *V1, const Value *V2) {
+ return isNoAlias(Location(V1), Location(V2));
+ }
+
/// isMustAlias - A convenience wrapper.
bool isMustAlias(const Location &LocA, const Location &LocB) {
return alias(LocA, LocB) == MustAlias;
diff --git a/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h b/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h
index 95626d6..1e606c8 100644
--- a/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h
+++ b/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h
@@ -109,7 +109,6 @@ class AliasSet : public ilist_node<AliasSet> {
PointerRec *PtrList, **PtrListEnd; // Doubly linked list of nodes.
AliasSet *Forward; // Forwarding pointer.
- AliasSet *Next, *Prev; // Doubly linked list of AliasSets.
// All instructions without a specific address in this alias set.
std::vector<AssertingVH<Instruction> > UnknownInsts;
@@ -226,8 +225,8 @@ private:
AccessTy(NoModRef), AliasTy(MustAlias), Volatile(false) {
}
- AliasSet(const AliasSet &AS); // do not implement
- void operator=(const AliasSet &AS); // do not implement
+ AliasSet(const AliasSet &AS) LLVM_DELETED_FUNCTION;
+ void operator=(const AliasSet &AS) LLVM_DELETED_FUNCTION;
PointerRec *getSomePointer() const {
return PtrList;
diff --git a/contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h b/contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
index 006daa0..c0567da 100644
--- a/contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
@@ -28,11 +28,14 @@ class raw_ostream;
///
/// This is a function analysis pass which provides information on the relative
/// probabilities of each "edge" in the function's CFG where such an edge is
-/// defined by a pair of basic blocks. The probability for a given block and
-/// a successor block are always relative to the probabilities of the other
-/// successor blocks. Another way of looking at it is that the probabilities
-/// for a given block B and each of its successors should sum to exactly
-/// one (100%).
+/// defined by a pair (PredBlock and an index in the successors). The
+/// probability of an edge from one block is always relative to the
+/// probabilities of other edges from the block. The probabilites of all edges
+/// from a block sum to exactly one (100%).
+/// We use a pair (PredBlock and an index in the successors) to uniquely
+/// identify an edge, since we can have multiple edges from Src to Dst.
+/// As an example, we can have a switch which jumps to Dst with value 0 and
+/// value 10.
class BranchProbabilityInfo : public FunctionPass {
public:
static char ID;
@@ -52,6 +55,12 @@ public:
/// leaving the 'Src' block. The returned probability is never zero, and can
/// only be one if the source block has only one successor.
BranchProbability getEdgeProbability(const BasicBlock *Src,
+ unsigned IndexInSuccessors) const;
+
+ /// \brief Get the probability of going from Src to Dst.
+ ///
+ /// It returns the sum of all probabilities for edges from Src to Dst.
+ BranchProbability getEdgeProbability(const BasicBlock *Src,
const BasicBlock *Dst) const;
/// \brief Test if an edge is hot relative to other out-edges of the Src.
@@ -74,25 +83,34 @@ public:
raw_ostream &printEdgeProbability(raw_ostream &OS, const BasicBlock *Src,
const BasicBlock *Dst) const;
- /// \brief Get the raw edge weight calculated for the block pair.
+ /// \brief Get the raw edge weight calculated for the edge.
///
/// This returns the raw edge weight. It is guaranteed to fall between 1 and
/// UINT32_MAX. Note that the raw edge weight is not meaningful in isolation.
/// This interface should be very carefully, and primarily by routines that
/// are updating the analysis by later calling setEdgeWeight.
+ uint32_t getEdgeWeight(const BasicBlock *Src,
+ unsigned IndexInSuccessors) const;
+
+ /// \brief Get the raw edge weight calculated for the block pair.
+ ///
+ /// This returns the sum of all raw edge weights from Src to Dst.
+ /// It is guaranteed to fall between 1 and UINT32_MAX.
uint32_t getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const;
- /// \brief Set the raw edge weight for the block pair.
+ /// \brief Set the raw edge weight for a given edge.
///
- /// This allows a pass to explicitly set the edge weight for a block. It can
+ /// This allows a pass to explicitly set the edge weight for an edge. It can
/// be used when updating the CFG to update and preserve the branch
/// probability information. Read the implementation of how these edge
/// weights are calculated carefully before using!
- void setEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst,
+ void setEdgeWeight(const BasicBlock *Src, unsigned IndexInSuccessors,
uint32_t Weight);
private:
- typedef std::pair<const BasicBlock *, const BasicBlock *> Edge;
+ // Since we allow duplicate edges from one basic block to another, we use
+ // a pair (PredBlock and an index in the successors) to specify an edge.
+ typedef std::pair<const BasicBlock *, unsigned> Edge;
// Default weight value. Used when we don't have information about the edge.
// TODO: DEFAULT_WEIGHT makes sense during static predication, when none of
diff --git a/contrib/llvm/include/llvm/Analysis/CallGraph.h b/contrib/llvm/include/llvm/Analysis/CallGraph.h
index fb77da7..6a9ed31 100644
--- a/contrib/llvm/include/llvm/Analysis/CallGraph.h
+++ b/contrib/llvm/include/llvm/Analysis/CallGraph.h
@@ -185,9 +185,9 @@ private:
/// in the CalledFunctions array of this or other CallGraphNodes.
unsigned NumReferences;
- CallGraphNode(const CallGraphNode &); // DO NOT IMPLEMENT
- void operator=(const CallGraphNode &); // DO NOT IMPLEMENT
-
+ CallGraphNode(const CallGraphNode &) LLVM_DELETED_FUNCTION;
+ void operator=(const CallGraphNode &) LLVM_DELETED_FUNCTION;
+
void DropRef() { --NumReferences; }
void AddRef() { ++NumReferences; }
public:
diff --git a/contrib/llvm/include/llvm/Analysis/CaptureTracking.h b/contrib/llvm/include/llvm/Analysis/CaptureTracking.h
index 9b5e842..2889269 100644
--- a/contrib/llvm/include/llvm/Analysis/CaptureTracking.h
+++ b/contrib/llvm/include/llvm/Analysis/CaptureTracking.h
@@ -46,7 +46,7 @@ namespace llvm {
/// capture) return false. To search it, return true.
///
/// U->getUser() is always an Instruction.
- virtual bool shouldExplore(Use *U) = 0;
+ virtual bool shouldExplore(Use *U);
/// captured - Information about the pointer was captured by the user of
/// use U. Return true to stop the traversal or false to continue looking
diff --git a/contrib/llvm/include/llvm/Analysis/CodeMetrics.h b/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
index 03c807c..4398faa 100644
--- a/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
+++ b/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
@@ -22,11 +22,11 @@ namespace llvm {
class BasicBlock;
class Function;
class Instruction;
- class TargetData;
+ class DataLayout;
class Value;
/// \brief Check whether an instruction is likely to be "free" when lowered.
- bool isInstructionFree(const Instruction *I, const TargetData *TD = 0);
+ bool isInstructionFree(const Instruction *I, const DataLayout *TD = 0);
/// \brief Check whether a call will lower to something small.
///
@@ -85,10 +85,10 @@ namespace llvm {
NumRets(0) {}
/// \brief Add information about a block to the current state.
- void analyzeBasicBlock(const BasicBlock *BB, const TargetData *TD = 0);
+ void analyzeBasicBlock(const BasicBlock *BB, const DataLayout *TD = 0);
/// \brief Add information about a function to the current state.
- void analyzeFunction(Function *F, const TargetData *TD = 0);
+ void analyzeFunction(Function *F, const DataLayout *TD = 0);
};
}
diff --git a/contrib/llvm/include/llvm/Analysis/ConstantFolding.h b/contrib/llvm/include/llvm/Analysis/ConstantFolding.h
index 2fdef5f..12e623e 100644
--- a/contrib/llvm/include/llvm/Analysis/ConstantFolding.h
+++ b/contrib/llvm/include/llvm/Analysis/ConstantFolding.h
@@ -12,7 +12,7 @@
//
// Also, to supplement the basic VMCore ConstantExpr simplifications,
// this file declares some additional folding routines that can make use of
-// TargetData information. These functions cannot go in VMCore due to library
+// DataLayout information. These functions cannot go in VMCore due to library
// dependency issues.
//
//===----------------------------------------------------------------------===//
@@ -24,7 +24,7 @@ namespace llvm {
class Constant;
class ConstantExpr;
class Instruction;
- class TargetData;
+ class DataLayout;
class TargetLibraryInfo;
class Function;
class Type;
@@ -36,14 +36,14 @@ namespace llvm {
/// Note that this fails if not all of the operands are constant. Otherwise,
/// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form.
-Constant *ConstantFoldInstruction(Instruction *I, const TargetData *TD = 0,
+Constant *ConstantFoldInstruction(Instruction *I, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0);
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
-/// using the specified TargetData. If successful, the constant result is
+/// using the specified DataLayout. If successful, the constant result is
/// result is returned, if not, null is returned.
Constant *ConstantFoldConstantExpression(const ConstantExpr *CE,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0);
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
@@ -54,7 +54,7 @@ Constant *ConstantFoldConstantExpression(const ConstantExpr *CE,
///
Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0);
/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
@@ -63,7 +63,7 @@ Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
///
Constant *ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *LHS, Constant *RHS,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0);
/// ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue
@@ -75,7 +75,7 @@ Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
/// ConstantFoldLoadFromConstPtr - Return the value that a load from C would
/// produce if it is constant and determinable. If this is not determinable,
/// return null.
-Constant *ConstantFoldLoadFromConstPtr(Constant *C, const TargetData *TD = 0);
+Constant *ConstantFoldLoadFromConstPtr(Constant *C, const DataLayout *TD = 0);
/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
/// getelementptr constantexpr, return the constant value being addressed by the
diff --git a/contrib/llvm/include/llvm/Analysis/DependenceAnalysis.h b/contrib/llvm/include/llvm/Analysis/DependenceAnalysis.h
new file mode 100644
index 0000000..b4327ee
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/DependenceAnalysis.h
@@ -0,0 +1,885 @@
+//===-- llvm/Analysis/DependenceAnalysis.h -------------------- -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// DependenceAnalysis is an LLVM pass that analyses dependences between memory
+// accesses. Currently, it is an implementation of the approach described in
+//
+// Practical Dependence Testing
+// Goff, Kennedy, Tseng
+// PLDI 1991
+//
+// There's a single entry point that analyzes the dependence between a pair
+// of memory references in a function, returning either NULL, for no dependence,
+// or a more-or-less detailed description of the dependence between them.
+//
+// Please note that this is work in progress and the interface is subject to
+// change.
+//
+// Plausible changes:
+// Return a set of more precise dependences instead of just one dependence
+// summarizing all.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DEPENDENCEANALYSIS_H
+#define LLVM_ANALYSIS_DEPENDENCEANALYSIS_H
+
+#include "llvm/Instructions.h"
+#include "llvm/Pass.h"
+#include "llvm/ADT/SmallBitVector.h"
+
+namespace llvm {
+ class AliasAnalysis;
+ class Loop;
+ class LoopInfo;
+ class ScalarEvolution;
+ class SCEV;
+ class SCEVConstant;
+ class raw_ostream;
+
+ /// Dependence - This class represents a dependence between two memory
+ /// memory references in a function. It contains minimal information and
+ /// is used in the very common situation where the compiler is unable to
+ /// determine anything beyond the existence of a dependence; that is, it
+ /// represents a confused dependence (see also FullDependence). In most
+ /// cases (for output, flow, and anti dependences), the dependence implies
+ /// an ordering, where the source must precede the destination; in contrast,
+ /// input dependences are unordered.
+ class Dependence {
+ public:
+ Dependence(const Instruction *Source,
+ const Instruction *Destination) :
+ Src(Source), Dst(Destination) {}
+ virtual ~Dependence() {}
+
+ /// Dependence::DVEntry - Each level in the distance/direction vector
+ /// has a direction (or perhaps a union of several directions), and
+ /// perhaps a distance.
+ struct DVEntry {
+ enum { NONE = 0,
+ LT = 1,
+ EQ = 2,
+ LE = 3,
+ GT = 4,
+ NE = 5,
+ GE = 6,
+ ALL = 7 };
+ unsigned char Direction : 3; // Init to ALL, then refine.
+ bool Scalar : 1; // Init to true.
+ bool PeelFirst : 1; // Peeling the first iteration will break dependence.
+ bool PeelLast : 1; // Peeling the last iteration will break the dependence.
+ bool Splitable : 1; // Splitting the loop will break dependence.
+ const SCEV *Distance; // NULL implies no distance available.
+ DVEntry() : Direction(ALL), Scalar(true), PeelFirst(false),
+ PeelLast(false), Splitable(false), Distance(NULL) { }
+ };
+
+ /// getSrc - Returns the source instruction for this dependence.
+ ///
+ const Instruction *getSrc() const { return Src; }
+
+ /// getDst - Returns the destination instruction for this dependence.
+ ///
+ const Instruction *getDst() const { return Dst; }
+
+ /// isInput - Returns true if this is an input dependence.
+ ///
+ bool isInput() const;
+
+ /// isOutput - Returns true if this is an output dependence.
+ ///
+ bool isOutput() const;
+
+ /// isFlow - Returns true if this is a flow (aka true) dependence.
+ ///
+ bool isFlow() const;
+
+ /// isAnti - Returns true if this is an anti dependence.
+ ///
+ bool isAnti() const;
+
+ /// isOrdered - Returns true if dependence is Output, Flow, or Anti
+ ///
+ bool isOrdered() const { return isOutput() || isFlow() || isAnti(); }
+
+ /// isUnordered - Returns true if dependence is Input
+ ///
+ bool isUnordered() const { return isInput(); }
+
+ /// isLoopIndependent - Returns true if this is a loop-independent
+ /// dependence.
+ virtual bool isLoopIndependent() const { return true; }
+
+ /// isConfused - Returns true if this dependence is confused
+ /// (the compiler understands nothing and makes worst-case
+ /// assumptions).
+ virtual bool isConfused() const { return true; }
+
+ /// isConsistent - Returns true if this dependence is consistent
+ /// (occurs every time the source and destination are executed).
+ virtual bool isConsistent() const { return false; }
+
+ /// getLevels - Returns the number of common loops surrounding the
+ /// source and destination of the dependence.
+ virtual unsigned getLevels() const { return 0; }
+
+ /// getDirection - Returns the direction associated with a particular
+ /// level.
+ virtual unsigned getDirection(unsigned Level) const { return DVEntry::ALL; }
+
+ /// getDistance - Returns the distance (or NULL) associated with a
+ /// particular level.
+ virtual const SCEV *getDistance(unsigned Level) const { return NULL; }
+
+ /// isPeelFirst - Returns true if peeling the first iteration from
+ /// this loop will break this dependence.
+ virtual bool isPeelFirst(unsigned Level) const { return false; }
+
+ /// isPeelLast - Returns true if peeling the last iteration from
+ /// this loop will break this dependence.
+ virtual bool isPeelLast(unsigned Level) const { return false; }
+
+ /// isSplitable - Returns true if splitting this loop will break
+ /// the dependence.
+ virtual bool isSplitable(unsigned Level) const { return false; }
+
+ /// isScalar - Returns true if a particular level is scalar; that is,
+ /// if no subscript in the source or destination mention the induction
+ /// variable associated with the loop at this level.
+ virtual bool isScalar(unsigned Level) const;
+
+ /// dump - For debugging purposes, dumps a dependence to OS.
+ ///
+ void dump(raw_ostream &OS) const;
+ private:
+ const Instruction *Src, *Dst;
+ friend class DependenceAnalysis;
+ };
+
+
+ /// FullDependence - This class represents a dependence between two memory
+ /// references in a function. It contains detailed information about the
+ /// dependence (direction vectors, etc) and is used when the compiler is
+ /// able to accurately analyze the interaction of the references; that is,
+ /// it is not a confused dependence (see Dependence). In most cases
+ /// (for output, flow, and anti dependences), the dependence implies an
+ /// ordering, where the source must precede the destination; in contrast,
+ /// input dependences are unordered.
+ class FullDependence : public Dependence {
+ public:
+ FullDependence(const Instruction *Src,
+ const Instruction *Dst,
+ bool LoopIndependent,
+ unsigned Levels);
+ ~FullDependence() {
+ delete DV;
+ }
+
+ /// isLoopIndependent - Returns true if this is a loop-independent
+ /// dependence.
+ bool isLoopIndependent() const { return LoopIndependent; }
+
+ /// isConfused - Returns true if this dependence is confused
+ /// (the compiler understands nothing and makes worst-case
+ /// assumptions).
+ bool isConfused() const { return false; }
+
+ /// isConsistent - Returns true if this dependence is consistent
+ /// (occurs every time the source and destination are executed).
+ bool isConsistent() const { return Consistent; }
+
+ /// getLevels - Returns the number of common loops surrounding the
+ /// source and destination of the dependence.
+ unsigned getLevels() const { return Levels; }
+
+ /// getDirection - Returns the direction associated with a particular
+ /// level.
+ unsigned getDirection(unsigned Level) const;
+
+ /// getDistance - Returns the distance (or NULL) associated with a
+ /// particular level.
+ const SCEV *getDistance(unsigned Level) const;
+
+ /// isPeelFirst - Returns true if peeling the first iteration from
+ /// this loop will break this dependence.
+ bool isPeelFirst(unsigned Level) const;
+
+ /// isPeelLast - Returns true if peeling the last iteration from
+ /// this loop will break this dependence.
+ bool isPeelLast(unsigned Level) const;
+
+ /// isSplitable - Returns true if splitting the loop will break
+ /// the dependence.
+ bool isSplitable(unsigned Level) const;
+
+ /// isScalar - Returns true if a particular level is scalar; that is,
+ /// if no subscript in the source or destination mention the induction
+ /// variable associated with the loop at this level.
+ bool isScalar(unsigned Level) const;
+ private:
+ unsigned short Levels;
+ bool LoopIndependent;
+ bool Consistent; // Init to true, then refine.
+ DVEntry *DV;
+ friend class DependenceAnalysis;
+ };
+
+
+ /// DependenceAnalysis - This class is the main dependence-analysis driver.
+ ///
+ class DependenceAnalysis : public FunctionPass {
+ void operator=(const DependenceAnalysis &); // do not implement
+ DependenceAnalysis(const DependenceAnalysis &); // do not implement
+ public:
+ /// depends - Tests for a dependence between the Src and Dst instructions.
+ /// Returns NULL if no dependence; otherwise, returns a Dependence (or a
+ /// FullDependence) with as much information as can be gleaned.
+ /// The flag PossiblyLoopIndependent should be set by the caller
+ /// if it appears that control flow can reach from Src to Dst
+ /// without traversing a loop back edge.
+ Dependence *depends(const Instruction *Src,
+ const Instruction *Dst,
+ bool PossiblyLoopIndependent);
+
+ /// getSplitIteration - Give a dependence that's splitable at some
+ /// particular level, return the iteration that should be used to split
+ /// the loop.
+ ///
+ /// Generally, the dependence analyzer will be used to build
+ /// a dependence graph for a function (basically a map from instructions
+ /// to dependences). Looking for cycles in the graph shows us loops
+ /// that cannot be trivially vectorized/parallelized.
+ ///
+ /// We can try to improve the situation by examining all the dependences
+ /// that make up the cycle, looking for ones we can break.
+ /// Sometimes, peeling the first or last iteration of a loop will break
+ /// dependences, and there are flags for those possibilities.
+ /// Sometimes, splitting a loop at some other iteration will do the trick,
+ /// and we've got a flag for that case. Rather than waste the space to
+ /// record the exact iteration (since we rarely know), we provide
+ /// a method that calculates the iteration. It's a drag that it must work
+ /// from scratch, but wonderful in that it's possible.
+ ///
+ /// Here's an example:
+ ///
+ /// for (i = 0; i < 10; i++)
+ /// A[i] = ...
+ /// ... = A[11 - i]
+ ///
+ /// There's a loop-carried flow dependence from the store to the load,
+ /// found by the weak-crossing SIV test. The dependence will have a flag,
+ /// indicating that the dependence can be broken by splitting the loop.
+ /// Calling getSplitIteration will return 5.
+ /// Splitting the loop breaks the dependence, like so:
+ ///
+ /// for (i = 0; i <= 5; i++)
+ /// A[i] = ...
+ /// ... = A[11 - i]
+ /// for (i = 6; i < 10; i++)
+ /// A[i] = ...
+ /// ... = A[11 - i]
+ ///
+ /// breaks the dependence and allows us to vectorize/parallelize
+ /// both loops.
+ const SCEV *getSplitIteration(const Dependence *Dep, unsigned Level);
+
+ private:
+ AliasAnalysis *AA;
+ ScalarEvolution *SE;
+ LoopInfo *LI;
+ Function *F;
+
+ /// Subscript - This private struct represents a pair of subscripts from
+ /// a pair of potentially multi-dimensional array references. We use a
+ /// vector of them to guide subscript partitioning.
+ struct Subscript {
+ const SCEV *Src;
+ const SCEV *Dst;
+ enum ClassificationKind { ZIV, SIV, RDIV, MIV, NonLinear } Classification;
+ SmallBitVector Loops;
+ SmallBitVector GroupLoops;
+ SmallBitVector Group;
+ };
+
+ struct CoefficientInfo {
+ const SCEV *Coeff;
+ const SCEV *PosPart;
+ const SCEV *NegPart;
+ const SCEV *Iterations;
+ };
+
+ struct BoundInfo {
+ const SCEV *Iterations;
+ const SCEV *Upper[8];
+ const SCEV *Lower[8];
+ unsigned char Direction;
+ unsigned char DirSet;
+ };
+
+ /// Constraint - This private class represents a constraint, as defined
+ /// in the paper
+ ///
+ /// Practical Dependence Testing
+ /// Goff, Kennedy, Tseng
+ /// PLDI 1991
+ ///
+ /// There are 5 kinds of constraint, in a hierarchy.
+ /// 1) Any - indicates no constraint, any dependence is possible.
+ /// 2) Line - A line ax + by = c, where a, b, and c are parameters,
+ /// representing the dependence equation.
+ /// 3) Distance - The value d of the dependence distance;
+ /// 4) Point - A point <x, y> representing the dependence from
+ /// iteration x to iteration y.
+ /// 5) Empty - No dependence is possible.
+ class Constraint {
+ private:
+ enum ConstraintKind { Empty, Point, Distance, Line, Any } Kind;
+ ScalarEvolution *SE;
+ const SCEV *A;
+ const SCEV *B;
+ const SCEV *C;
+ const Loop *AssociatedLoop;
+ public:
+ /// isEmpty - Return true if the constraint is of kind Empty.
+ bool isEmpty() const { return Kind == Empty; }
+
+ /// isPoint - Return true if the constraint is of kind Point.
+ bool isPoint() const { return Kind == Point; }
+
+ /// isDistance - Return true if the constraint is of kind Distance.
+ bool isDistance() const { return Kind == Distance; }
+
+ /// isLine - Return true if the constraint is of kind Line.
+ /// Since Distance's can also be represented as Lines, we also return
+ /// true if the constraint is of kind Distance.
+ bool isLine() const { return Kind == Line || Kind == Distance; }
+
+ /// isAny - Return true if the constraint is of kind Any;
+ bool isAny() const { return Kind == Any; }
+
+ /// getX - If constraint is a point <X, Y>, returns X.
+ /// Otherwise assert.
+ const SCEV *getX() const;
+
+ /// getY - If constraint is a point <X, Y>, returns Y.
+ /// Otherwise assert.
+ const SCEV *getY() const;
+
+ /// getA - If constraint is a line AX + BY = C, returns A.
+ /// Otherwise assert.
+ const SCEV *getA() const;
+
+ /// getB - If constraint is a line AX + BY = C, returns B.
+ /// Otherwise assert.
+ const SCEV *getB() const;
+
+ /// getC - If constraint is a line AX + BY = C, returns C.
+ /// Otherwise assert.
+ const SCEV *getC() const;
+
+ /// getD - If constraint is a distance, returns D.
+ /// Otherwise assert.
+ const SCEV *getD() const;
+
+ /// getAssociatedLoop - Returns the loop associated with this constraint.
+ const Loop *getAssociatedLoop() const;
+
+ /// setPoint - Change a constraint to Point.
+ void setPoint(const SCEV *X, const SCEV *Y, const Loop *CurrentLoop);
+
+ /// setLine - Change a constraint to Line.
+ void setLine(const SCEV *A, const SCEV *B,
+ const SCEV *C, const Loop *CurrentLoop);
+
+ /// setDistance - Change a constraint to Distance.
+ void setDistance(const SCEV *D, const Loop *CurrentLoop);
+
+ /// setEmpty - Change a constraint to Empty.
+ void setEmpty();
+
+ /// setAny - Change a constraint to Any.
+ void setAny(ScalarEvolution *SE);
+
+ /// dump - For debugging purposes. Dumps the constraint
+ /// out to OS.
+ void dump(raw_ostream &OS) const;
+ };
+
+
+ /// establishNestingLevels - Examines the loop nesting of the Src and Dst
+ /// instructions and establishes their shared loops. Sets the variables
+ /// CommonLevels, SrcLevels, and MaxLevels.
+ /// The source and destination instructions needn't be contained in the same
+ /// loop. The routine establishNestingLevels finds the level of most deeply
+ /// nested loop that contains them both, CommonLevels. An instruction that's
+ /// not contained in a loop is at level = 0. MaxLevels is equal to the level
+ /// of the source plus the level of the destination, minus CommonLevels.
+ /// This lets us allocate vectors MaxLevels in length, with room for every
+ /// distinct loop referenced in both the source and destination subscripts.
+ /// The variable SrcLevels is the nesting depth of the source instruction.
+ /// It's used to help calculate distinct loops referenced by the destination.
+ /// Here's the map from loops to levels:
+ /// 0 - unused
+ /// 1 - outermost common loop
+ /// ... - other common loops
+ /// CommonLevels - innermost common loop
+ /// ... - loops containing Src but not Dst
+ /// SrcLevels - innermost loop containing Src but not Dst
+ /// ... - loops containing Dst but not Src
+ /// MaxLevels - innermost loop containing Dst but not Src
+ /// Consider the follow code fragment:
+ /// for (a = ...) {
+ /// for (b = ...) {
+ /// for (c = ...) {
+ /// for (d = ...) {
+ /// A[] = ...;
+ /// }
+ /// }
+ /// for (e = ...) {
+ /// for (f = ...) {
+ /// for (g = ...) {
+ /// ... = A[];
+ /// }
+ /// }
+ /// }
+ /// }
+ /// }
+ /// If we're looking at the possibility of a dependence between the store
+ /// to A (the Src) and the load from A (the Dst), we'll note that they
+ /// have 2 loops in common, so CommonLevels will equal 2 and the direction
+ /// vector for Result will have 2 entries. SrcLevels = 4 and MaxLevels = 7.
+ /// A map from loop names to level indices would look like
+ /// a - 1
+ /// b - 2 = CommonLevels
+ /// c - 3
+ /// d - 4 = SrcLevels
+ /// e - 5
+ /// f - 6
+ /// g - 7 = MaxLevels
+ void establishNestingLevels(const Instruction *Src,
+ const Instruction *Dst);
+
+ unsigned CommonLevels, SrcLevels, MaxLevels;
+
+ /// mapSrcLoop - Given one of the loops containing the source, return
+ /// its level index in our numbering scheme.
+ unsigned mapSrcLoop(const Loop *SrcLoop) const;
+
+ /// mapDstLoop - Given one of the loops containing the destination,
+ /// return its level index in our numbering scheme.
+ unsigned mapDstLoop(const Loop *DstLoop) const;
+
+ /// isLoopInvariant - Returns true if Expression is loop invariant
+ /// in LoopNest.
+ bool isLoopInvariant(const SCEV *Expression, const Loop *LoopNest) const;
+
+ /// removeMatchingExtensions - Examines a subscript pair.
+ /// If the source and destination are identically sign (or zero)
+ /// extended, it strips off the extension in an effort to
+ /// simplify the actual analysis.
+ void removeMatchingExtensions(Subscript *Pair);
+
+ /// collectCommonLoops - Finds the set of loops from the LoopNest that
+ /// have a level <= CommonLevels and are referred to by the SCEV Expression.
+ void collectCommonLoops(const SCEV *Expression,
+ const Loop *LoopNest,
+ SmallBitVector &Loops) const;
+
+ /// checkSrcSubscript - Examines the SCEV Src, returning true iff it's
+ /// linear. Collect the set of loops mentioned by Src.
+ bool checkSrcSubscript(const SCEV *Src,
+ const Loop *LoopNest,
+ SmallBitVector &Loops);
+
+ /// checkDstSubscript - Examines the SCEV Dst, returning true iff it's
+ /// linear. Collect the set of loops mentioned by Dst.
+ bool checkDstSubscript(const SCEV *Dst,
+ const Loop *LoopNest,
+ SmallBitVector &Loops);
+
+ /// isKnownPredicate - Compare X and Y using the predicate Pred.
+ /// Basically a wrapper for SCEV::isKnownPredicate,
+ /// but tries harder, especially in the presence of sign and zero
+ /// extensions and symbolics.
+ bool isKnownPredicate(ICmpInst::Predicate Pred,
+ const SCEV *X,
+ const SCEV *Y) const;
+
+ /// collectUpperBound - All subscripts are the same type (on my machine,
+ /// an i64). The loop bound may be a smaller type. collectUpperBound
+ /// find the bound, if available, and zero extends it to the Type T.
+ /// (I zero extend since the bound should always be >= 0.)
+ /// If no upper bound is available, return NULL.
+ const SCEV *collectUpperBound(const Loop *l, Type *T) const;
+
+ /// collectConstantUpperBound - Calls collectUpperBound(), then
+ /// attempts to cast it to SCEVConstant. If the cast fails,
+ /// returns NULL.
+ const SCEVConstant *collectConstantUpperBound(const Loop *l, Type *T) const;
+
+ /// classifyPair - Examines the subscript pair (the Src and Dst SCEVs)
+ /// and classifies it as either ZIV, SIV, RDIV, MIV, or Nonlinear.
+ /// Collects the associated loops in a set.
+ Subscript::ClassificationKind classifyPair(const SCEV *Src,
+ const Loop *SrcLoopNest,
+ const SCEV *Dst,
+ const Loop *DstLoopNest,
+ SmallBitVector &Loops);
+
+ /// testZIV - Tests the ZIV subscript pair (Src and Dst) for dependence.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// If the dependence isn't proven to exist,
+ /// marks the Result as inconsistent.
+ bool testZIV(const SCEV *Src,
+ const SCEV *Dst,
+ FullDependence &Result) const;
+
+ /// testSIV - Tests the SIV subscript pair (Src and Dst) for dependence.
+ /// Things of the form [c1 + a1*i] and [c2 + a2*j], where
+ /// i and j are induction variables, c1 and c2 are loop invariant,
+ /// and a1 and a2 are constant.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction vector entry and, when possible,
+ /// the distance vector entry.
+ /// If the dependence isn't proven to exist,
+ /// marks the Result as inconsistent.
+ bool testSIV(const SCEV *Src,
+ const SCEV *Dst,
+ unsigned &Level,
+ FullDependence &Result,
+ Constraint &NewConstraint,
+ const SCEV *&SplitIter) const;
+
+ /// testRDIV - Tests the RDIV subscript pair (Src and Dst) for dependence.
+ /// Things of the form [c1 + a1*i] and [c2 + a2*j]
+ /// where i and j are induction variables, c1 and c2 are loop invariant,
+ /// and a1 and a2 are constant.
+ /// With minor algebra, this test can also be used for things like
+ /// [c1 + a1*i + a2*j][c2].
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Marks the Result as inconsistent.
+ bool testRDIV(const SCEV *Src,
+ const SCEV *Dst,
+ FullDependence &Result) const;
+
+ /// testMIV - Tests the MIV subscript pair (Src and Dst) for dependence.
+ /// Returns true if dependence disproved.
+ /// Can sometimes refine direction vectors.
+ bool testMIV(const SCEV *Src,
+ const SCEV *Dst,
+ const SmallBitVector &Loops,
+ FullDependence &Result) const;
+
+ /// strongSIVtest - Tests the strong SIV subscript pair (Src and Dst)
+ /// for dependence.
+ /// Things of the form [c1 + a*i] and [c2 + a*i],
+ /// where i is an induction variable, c1 and c2 are loop invariant,
+ /// and a is a constant
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction and distance.
+ bool strongSIVtest(const SCEV *Coeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurrentLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const;
+
+ /// weakCrossingSIVtest - Tests the weak-crossing SIV subscript pair
+ /// (Src and Dst) for dependence.
+ /// Things of the form [c1 + a*i] and [c2 - a*i],
+ /// where i is an induction variable, c1 and c2 are loop invariant,
+ /// and a is a constant.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction entry.
+ /// Set consistent to false.
+ /// Marks the dependence as splitable.
+ bool weakCrossingSIVtest(const SCEV *SrcCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurrentLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint,
+ const SCEV *&SplitIter) const;
+
+ /// ExactSIVtest - Tests the SIV subscript pair
+ /// (Src and Dst) for dependence.
+ /// Things of the form [c1 + a1*i] and [c2 + a2*i],
+ /// where i is an induction variable, c1 and c2 are loop invariant,
+ /// and a1 and a2 are constant.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction entry.
+ /// Set consistent to false.
+ bool exactSIVtest(const SCEV *SrcCoeff,
+ const SCEV *DstCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurrentLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const;
+
+ /// weakZeroSrcSIVtest - Tests the weak-zero SIV subscript pair
+ /// (Src and Dst) for dependence.
+ /// Things of the form [c1] and [c2 + a*i],
+ /// where i is an induction variable, c1 and c2 are loop invariant,
+ /// and a is a constant. See also weakZeroDstSIVtest.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction entry.
+ /// Set consistent to false.
+ /// If loop peeling will break the dependence, mark appropriately.
+ bool weakZeroSrcSIVtest(const SCEV *DstCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurrentLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const;
+
+ /// weakZeroDstSIVtest - Tests the weak-zero SIV subscript pair
+ /// (Src and Dst) for dependence.
+ /// Things of the form [c1 + a*i] and [c2],
+ /// where i is an induction variable, c1 and c2 are loop invariant,
+ /// and a is a constant. See also weakZeroSrcSIVtest.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction entry.
+ /// Set consistent to false.
+ /// If loop peeling will break the dependence, mark appropriately.
+ bool weakZeroDstSIVtest(const SCEV *SrcCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurrentLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const;
+
+ /// exactRDIVtest - Tests the RDIV subscript pair for dependence.
+ /// Things of the form [c1 + a*i] and [c2 + b*j],
+ /// where i and j are induction variable, c1 and c2 are loop invariant,
+ /// and a and b are constants.
+ /// Returns true if any possible dependence is disproved.
+ /// Marks the result as inconsistent.
+ /// Works in some cases that symbolicRDIVtest doesn't,
+ /// and vice versa.
+ bool exactRDIVtest(const SCEV *SrcCoeff,
+ const SCEV *DstCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *SrcLoop,
+ const Loop *DstLoop,
+ FullDependence &Result) const;
+
+ /// symbolicRDIVtest - Tests the RDIV subscript pair for dependence.
+ /// Things of the form [c1 + a*i] and [c2 + b*j],
+ /// where i and j are induction variable, c1 and c2 are loop invariant,
+ /// and a and b are constants.
+ /// Returns true if any possible dependence is disproved.
+ /// Marks the result as inconsistent.
+ /// Works in some cases that exactRDIVtest doesn't,
+ /// and vice versa. Can also be used as a backup for
+ /// ordinary SIV tests.
+ bool symbolicRDIVtest(const SCEV *SrcCoeff,
+ const SCEV *DstCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *SrcLoop,
+ const Loop *DstLoop) const;
+
+ /// gcdMIVtest - Tests an MIV subscript pair for dependence.
+ /// Returns true if any possible dependence is disproved.
+ /// Marks the result as inconsistent.
+ /// Can sometimes disprove the equal direction for 1 or more loops.
+ // Can handle some symbolics that even the SIV tests don't get,
+ /// so we use it as a backup for everything.
+ bool gcdMIVtest(const SCEV *Src,
+ const SCEV *Dst,
+ FullDependence &Result) const;
+
+ /// banerjeeMIVtest - Tests an MIV subscript pair for dependence.
+ /// Returns true if any possible dependence is disproved.
+ /// Marks the result as inconsistent.
+ /// Computes directions.
+ bool banerjeeMIVtest(const SCEV *Src,
+ const SCEV *Dst,
+ const SmallBitVector &Loops,
+ FullDependence &Result) const;
+
+ /// collectCoefficientInfo - Walks through the subscript,
+ /// collecting each coefficient, the associated loop bounds,
+ /// and recording its positive and negative parts for later use.
+ CoefficientInfo *collectCoeffInfo(const SCEV *Subscript,
+ bool SrcFlag,
+ const SCEV *&Constant) const;
+
+ /// getPositivePart - X^+ = max(X, 0).
+ ///
+ const SCEV *getPositivePart(const SCEV *X) const;
+
+ /// getNegativePart - X^- = min(X, 0).
+ ///
+ const SCEV *getNegativePart(const SCEV *X) const;
+
+ /// getLowerBound - Looks through all the bounds info and
+ /// computes the lower bound given the current direction settings
+ /// at each level.
+ const SCEV *getLowerBound(BoundInfo *Bound) const;
+
+ /// getUpperBound - Looks through all the bounds info and
+ /// computes the upper bound given the current direction settings
+ /// at each level.
+ const SCEV *getUpperBound(BoundInfo *Bound) const;
+
+ /// exploreDirections - Hierarchically expands the direction vector
+ /// search space, combining the directions of discovered dependences
+ /// in the DirSet field of Bound. Returns the number of distinct
+ /// dependences discovered. If the dependence is disproved,
+ /// it will return 0.
+ unsigned exploreDirections(unsigned Level,
+ CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ const SmallBitVector &Loops,
+ unsigned &DepthExpanded,
+ const SCEV *Delta) const;
+
+ /// testBounds - Returns true iff the current bounds are plausible.
+ ///
+ bool testBounds(unsigned char DirKind,
+ unsigned Level,
+ BoundInfo *Bound,
+ const SCEV *Delta) const;
+
+ /// findBoundsALL - Computes the upper and lower bounds for level K
+ /// using the * direction. Records them in Bound.
+ void findBoundsALL(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const;
+
+ /// findBoundsLT - Computes the upper and lower bounds for level K
+ /// using the < direction. Records them in Bound.
+ void findBoundsLT(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const;
+
+ /// findBoundsGT - Computes the upper and lower bounds for level K
+ /// using the > direction. Records them in Bound.
+ void findBoundsGT(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const;
+
+ /// findBoundsEQ - Computes the upper and lower bounds for level K
+ /// using the = direction. Records them in Bound.
+ void findBoundsEQ(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const;
+
+ /// intersectConstraints - Updates X with the intersection
+ /// of the Constraints X and Y. Returns true if X has changed.
+ bool intersectConstraints(Constraint *X,
+ const Constraint *Y);
+
+ /// propagate - Review the constraints, looking for opportunities
+ /// to simplify a subscript pair (Src and Dst).
+ /// Return true if some simplification occurs.
+ /// If the simplification isn't exact (that is, if it is conservative
+ /// in terms of dependence), set consistent to false.
+ bool propagate(const SCEV *&Src,
+ const SCEV *&Dst,
+ SmallBitVector &Loops,
+ SmallVector<Constraint, 4> &Constraints,
+ bool &Consistent);
+
+ /// propagateDistance - Attempt to propagate a distance
+ /// constraint into a subscript pair (Src and Dst).
+ /// Return true if some simplification occurs.
+ /// If the simplification isn't exact (that is, if it is conservative
+ /// in terms of dependence), set consistent to false.
+ bool propagateDistance(const SCEV *&Src,
+ const SCEV *&Dst,
+ Constraint &CurConstraint,
+ bool &Consistent);
+
+ /// propagatePoint - Attempt to propagate a point
+ /// constraint into a subscript pair (Src and Dst).
+ /// Return true if some simplification occurs.
+ bool propagatePoint(const SCEV *&Src,
+ const SCEV *&Dst,
+ Constraint &CurConstraint);
+
+ /// propagateLine - Attempt to propagate a line
+ /// constraint into a subscript pair (Src and Dst).
+ /// Return true if some simplification occurs.
+ /// If the simplification isn't exact (that is, if it is conservative
+ /// in terms of dependence), set consistent to false.
+ bool propagateLine(const SCEV *&Src,
+ const SCEV *&Dst,
+ Constraint &CurConstraint,
+ bool &Consistent);
+
+ /// findCoefficient - Given a linear SCEV,
+ /// return the coefficient corresponding to specified loop.
+ /// If there isn't one, return the SCEV constant 0.
+ /// For example, given a*i + b*j + c*k, returning the coefficient
+ /// corresponding to the j loop would yield b.
+ const SCEV *findCoefficient(const SCEV *Expr,
+ const Loop *TargetLoop) const;
+
+ /// zeroCoefficient - Given a linear SCEV,
+ /// return the SCEV given by zeroing out the coefficient
+ /// corresponding to the specified loop.
+ /// For example, given a*i + b*j + c*k, zeroing the coefficient
+ /// corresponding to the j loop would yield a*i + c*k.
+ const SCEV *zeroCoefficient(const SCEV *Expr,
+ const Loop *TargetLoop) const;
+
+ /// addToCoefficient - Given a linear SCEV Expr,
+ /// return the SCEV given by adding some Value to the
+ /// coefficient corresponding to the specified TargetLoop.
+ /// For example, given a*i + b*j + c*k, adding 1 to the coefficient
+ /// corresponding to the j loop would yield a*i + (b+1)*j + c*k.
+ const SCEV *addToCoefficient(const SCEV *Expr,
+ const Loop *TargetLoop,
+ const SCEV *Value) const;
+
+ /// updateDirection - Update direction vector entry
+ /// based on the current constraint.
+ void updateDirection(Dependence::DVEntry &Level,
+ const Constraint &CurConstraint) const;
+ public:
+ static char ID; // Class identification, replacement for typeinfo
+ DependenceAnalysis() : FunctionPass(ID) {
+ initializeDependenceAnalysisPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F);
+ void releaseMemory();
+ void getAnalysisUsage(AnalysisUsage &) const;
+ void print(raw_ostream &, const Module * = 0) const;
+ }; // class DependenceAnalysis
+
+ /// createDependenceAnalysisPass - This creates an instance of the
+ /// DependenceAnalysis pass.
+ FunctionPass *createDependenceAnalysisPass();
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/Dominators.h b/contrib/llvm/include/llvm/Analysis/Dominators.h
index a1cc196..8940971 100644
--- a/contrib/llvm/include/llvm/Analysis/Dominators.h
+++ b/contrib/llvm/include/llvm/Analysis/Dominators.h
@@ -346,7 +346,7 @@ public:
DomTreeNodeBase<NodeT> *getRootNode() { return RootNode; }
const DomTreeNodeBase<NodeT> *getRootNode() const { return RootNode; }
- /// properlyDominates - Returns true iff this dominates N and this != N.
+ /// properlyDominates - Returns true iff A dominates B and A != B.
/// Note that this is not a constant time operation!
///
bool properlyDominates(const DomTreeNodeBase<NodeT> *A,
diff --git a/contrib/llvm/include/llvm/Analysis/IVUsers.h b/contrib/llvm/include/llvm/Analysis/IVUsers.h
index 2bf79b9..9b98013 100644
--- a/contrib/llvm/include/llvm/Analysis/IVUsers.h
+++ b/contrib/llvm/include/llvm/Analysis/IVUsers.h
@@ -28,7 +28,7 @@ class IVUsers;
class ScalarEvolution;
class SCEV;
class IVUsers;
-class TargetData;
+class DataLayout;
/// IVStrideUse - Keep track of one use of a strided induction variable.
/// The Expr member keeps track of the expression, User is the actual user
@@ -123,7 +123,7 @@ class IVUsers : public LoopPass {
LoopInfo *LI;
DominatorTree *DT;
ScalarEvolution *SE;
- TargetData *TD;
+ DataLayout *TD;
SmallPtrSet<Instruction*,16> Processed;
/// IVUses - A list of all tracked IV uses of induction variable expressions
diff --git a/contrib/llvm/include/llvm/Analysis/InlineCost.h b/contrib/llvm/include/llvm/Analysis/InlineCost.h
index 0cba135..a075db3 100644
--- a/contrib/llvm/include/llvm/Analysis/InlineCost.h
+++ b/contrib/llvm/include/llvm/Analysis/InlineCost.h
@@ -26,7 +26,7 @@
namespace llvm {
class CallSite;
- class TargetData;
+ class DataLayout;
namespace InlineConstants {
// Various magic constants used to adjust heuristics.
@@ -36,6 +36,9 @@ namespace llvm {
const int LastCallToStaticBonus = -15000;
const int ColdccPenalty = 2000;
const int NoreturnPenalty = 10000;
+ /// Do not inline functions which allocate this many bytes on the stack
+ /// when the caller is recursive.
+ const unsigned TotalAllocaSizeRecursiveCaller = 1024;
}
/// \brief Represents the cost of inlining a function.
@@ -101,13 +104,13 @@ namespace llvm {
/// InlineCostAnalyzer - Cost analyzer used by inliner.
class InlineCostAnalyzer {
- // TargetData if available, or null.
- const TargetData *TD;
+ // DataLayout if available, or null.
+ const DataLayout *TD;
public:
InlineCostAnalyzer(): TD(0) {}
- void setTargetData(const TargetData *TData) { TD = TData; }
+ void setDataLayout(const DataLayout *TData) { TD = TData; }
/// \brief Get an InlineCost object representing the cost of inlining this
/// callsite.
diff --git a/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h b/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h
index 152e885..e561e37 100644
--- a/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h
+++ b/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h
@@ -24,7 +24,7 @@ namespace llvm {
class ArrayRef;
class DominatorTree;
class Instruction;
- class TargetData;
+ class DataLayout;
class TargetLibraryInfo;
class Type;
class Value;
@@ -32,122 +32,122 @@ namespace llvm {
/// SimplifyAddInst - Given operands for an Add, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifySubInst - Given operands for a Sub, see if we can
/// fold the result. If not, this returns null.
Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyMulInst - Given operands for a Mul, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyMulInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ Value *SimplifyMulInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifySDivInst - Given operands for an SDiv, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifySDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ Value *SimplifySDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyUDivInst - Given operands for a UDiv, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyUDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ Value *SimplifyUDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyFDivInst - Given operands for an FDiv, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyFDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ Value *SimplifyFDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifySRemInst - Given operands for an SRem, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifySRemInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ Value *SimplifySRemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyURemInst - Given operands for a URem, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyURemInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ Value *SimplifyURemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyFRemInst - Given operands for an FRem, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyFRemInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ Value *SimplifyFRemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyShlInst - Given operands for a Shl, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyLShrInst - Given operands for a LShr, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyAShrInst - Given operands for a AShr, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyAndInst - Given operands for an And, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyAndInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ Value *SimplifyAndInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyOrInst - Given operands for an Or, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyOrInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ Value *SimplifyOrInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyXorInst - Given operands for a Xor, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyXorInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ Value *SimplifyXorInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
/// the result. If not, this returns null.
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const TargetData *TD = 0,
+ Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
@@ -155,13 +155,13 @@ namespace llvm {
/// can fold the result. If not, this returns null.
Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyTruncInst - Given operands for an TruncInst, see if we can fold
/// the result. If not, this returns null.
- Value *SimplifyTruncInst(Value *Op, Type *Ty, const TargetData *TD = 0,
+ Value *SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
@@ -171,20 +171,20 @@ namespace llvm {
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyBinOp - Given operands for a BinaryOperator, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
- Value *SimplifyInstruction(Instruction *I, const TargetData *TD = 0,
+ Value *SimplifyInstruction(Instruction *I, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
@@ -198,7 +198,7 @@ namespace llvm {
///
/// The function returns true if any simplifications were performed.
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
@@ -209,7 +209,7 @@ namespace llvm {
/// of the users impacted. It returns true if any simplifications were
/// performed.
bool recursivelySimplifyInstruction(Instruction *I,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Analysis/IntervalPartition.h b/contrib/llvm/include/llvm/Analysis/IntervalPartition.h
index df7313f..bce84be 100644
--- a/contrib/llvm/include/llvm/Analysis/IntervalPartition.h
+++ b/contrib/llvm/include/llvm/Analysis/IntervalPartition.h
@@ -33,8 +33,8 @@ namespace llvm {
//
// IntervalPartition - This class builds and holds an "interval partition" for
// a function. This partition divides the control flow graph into a set of
-// maximal intervals, as defined with the properties above. Intuitively, a
-// BasicBlock is a (possibly nonexistent) loop with a "tail" of non looping
+// maximal intervals, as defined with the properties above. Intuitively, an
+// interval is a (possibly nonexistent) loop with a "tail" of non looping
// nodes following it.
//
class IntervalPartition : public FunctionPass {
diff --git a/contrib/llvm/include/llvm/Analysis/LazyValueInfo.h b/contrib/llvm/include/llvm/Analysis/LazyValueInfo.h
index 065c230..197e94e 100644
--- a/contrib/llvm/include/llvm/Analysis/LazyValueInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/LazyValueInfo.h
@@ -19,18 +19,18 @@
namespace llvm {
class Constant;
- class TargetData;
+ class DataLayout;
class TargetLibraryInfo;
class Value;
/// LazyValueInfo - This pass computes, caches, and vends lazy value constraint
/// information.
class LazyValueInfo : public FunctionPass {
- class TargetData *TD;
+ class DataLayout *TD;
class TargetLibraryInfo *TLI;
void *PImpl;
- LazyValueInfo(const LazyValueInfo&); // DO NOT IMPLEMENT.
- void operator=(const LazyValueInfo&); // DO NOT IMPLEMENT.
+ LazyValueInfo(const LazyValueInfo&) LLVM_DELETED_FUNCTION;
+ void operator=(const LazyValueInfo&) LLVM_DELETED_FUNCTION;
public:
static char ID;
LazyValueInfo() : FunctionPass(ID), PImpl(0) {
diff --git a/contrib/llvm/include/llvm/Analysis/Loads.h b/contrib/llvm/include/llvm/Analysis/Loads.h
index 5f0aefb..afc90c2 100644
--- a/contrib/llvm/include/llvm/Analysis/Loads.h
+++ b/contrib/llvm/include/llvm/Analysis/Loads.h
@@ -19,7 +19,7 @@
namespace llvm {
class AliasAnalysis;
-class TargetData;
+class DataLayout;
class MDNode;
/// isSafeToLoadUnconditionally - Return true if we know that executing a load
@@ -27,7 +27,7 @@ class MDNode;
/// specified pointer, we do a quick local scan of the basic block containing
/// ScanFrom, to determine if the address is already accessed.
bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
- unsigned Align, const TargetData *TD = 0);
+ unsigned Align, const DataLayout *TD = 0);
/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at
/// the instruction before ScanFrom) checking to see if we have the value at
diff --git a/contrib/llvm/include/llvm/Analysis/LoopDependenceAnalysis.h b/contrib/llvm/include/llvm/Analysis/LoopDependenceAnalysis.h
deleted file mode 100644
index f195d27..0000000
--- a/contrib/llvm/include/llvm/Analysis/LoopDependenceAnalysis.h
+++ /dev/null
@@ -1,124 +0,0 @@
-//===- llvm/Analysis/LoopDependenceAnalysis.h --------------- -*- C++ -*---===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// LoopDependenceAnalysis is an LLVM pass that analyses dependences in memory
-// accesses in loops.
-//
-// Please note that this is work in progress and the interface is subject to
-// change.
-//
-// TODO: adapt as interface progresses
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ANALYSIS_LOOP_DEPENDENCE_ANALYSIS_H
-#define LLVM_ANALYSIS_LOOP_DEPENDENCE_ANALYSIS_H
-
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Analysis/LoopPass.h"
-#include "llvm/Support/Allocator.h"
-
-namespace llvm {
-
-class AliasAnalysis;
-class AnalysisUsage;
-class ScalarEvolution;
-class SCEV;
-class Value;
-class raw_ostream;
-
-class LoopDependenceAnalysis : public LoopPass {
- AliasAnalysis *AA;
- ScalarEvolution *SE;
-
- /// L - The loop we are currently analysing.
- Loop *L;
-
- /// TODO: doc
- enum DependenceResult { Independent = 0, Dependent = 1, Unknown = 2 };
-
- /// TODO: doc
- struct Subscript {
- /// TODO: Add distance, direction, breaking conditions, ...
- };
-
- /// DependencePair - Represents a data dependence relation between to memory
- /// reference instructions.
- struct DependencePair : public FastFoldingSetNode {
- Value *A;
- Value *B;
- DependenceResult Result;
- SmallVector<Subscript, 4> Subscripts;
-
- DependencePair(const FoldingSetNodeID &ID, Value *a, Value *b) :
- FastFoldingSetNode(ID), A(a), B(b), Result(Unknown), Subscripts() {}
- };
-
- /// findOrInsertDependencePair - Return true if a DependencePair for the
- /// given Values already exists, false if a new DependencePair had to be
- /// created. The third argument is set to the pair found or created.
- bool findOrInsertDependencePair(Value*, Value*, DependencePair*&);
-
- /// getLoops - Collect all loops of the loop nest L in which
- /// a given SCEV is variant.
- void getLoops(const SCEV*, DenseSet<const Loop*>*) const;
-
- /// isLoopInvariant - True if a given SCEV is invariant in all loops of the
- /// loop nest starting at the innermost loop L.
- bool isLoopInvariant(const SCEV*) const;
-
- /// isAffine - An SCEV is affine with respect to the loop nest starting at
- /// the innermost loop L if it is of the form A+B*X where A, B are invariant
- /// in the loop nest and X is a induction variable in the loop nest.
- bool isAffine(const SCEV*) const;
-
- /// TODO: doc
- bool isZIVPair(const SCEV*, const SCEV*) const;
- bool isSIVPair(const SCEV*, const SCEV*) const;
- DependenceResult analyseZIV(const SCEV*, const SCEV*, Subscript*) const;
- DependenceResult analyseSIV(const SCEV*, const SCEV*, Subscript*) const;
- DependenceResult analyseMIV(const SCEV*, const SCEV*, Subscript*) const;
- DependenceResult analyseSubscript(const SCEV*, const SCEV*, Subscript*) const;
- DependenceResult analysePair(DependencePair*) const;
-
-public:
- static char ID; // Class identification, replacement for typeinfo
- LoopDependenceAnalysis() : LoopPass(ID) {
- initializeLoopDependenceAnalysisPass(*PassRegistry::getPassRegistry());
- }
-
- /// isDependencePair - Check whether two values can possibly give rise to
- /// a data dependence: that is the case if both are instructions accessing
- /// memory and at least one of those accesses is a write.
- bool isDependencePair(const Value*, const Value*) const;
-
- /// depends - Return a boolean indicating if there is a data dependence
- /// between two instructions.
- bool depends(Value*, Value*);
-
- bool runOnLoop(Loop*, LPPassManager&);
- virtual void releaseMemory();
- virtual void getAnalysisUsage(AnalysisUsage&) const;
- void print(raw_ostream&, const Module* = 0) const;
-
-private:
- FoldingSet<DependencePair> Pairs;
- BumpPtrAllocator PairAllocator;
-}; // class LoopDependenceAnalysis
-
-// createLoopDependenceAnalysisPass - This creates an instance of the
-// LoopDependenceAnalysis pass.
-//
-LoopPass *createLoopDependenceAnalysisPass();
-
-} // namespace llvm
-
-#endif /* LLVM_ANALYSIS_LOOP_DEPENDENCE_ANALYSIS_H */
diff --git a/contrib/llvm/include/llvm/Analysis/LoopInfo.h b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
index eeb482d..c5d7b01 100644
--- a/contrib/llvm/include/llvm/Analysis/LoopInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
@@ -72,10 +72,9 @@ class LoopBase {
// Blocks - The list of blocks in this loop. First entry is the header node.
std::vector<BlockT*> Blocks;
- // DO NOT IMPLEMENT
- LoopBase(const LoopBase<BlockT, LoopT> &);
- // DO NOT IMPLEMENT
- const LoopBase<BlockT, LoopT>&operator=(const LoopBase<BlockT, LoopT> &);
+ LoopBase(const LoopBase<BlockT, LoopT> &) LLVM_DELETED_FUNCTION;
+ const LoopBase<BlockT, LoopT>&
+ operator=(const LoopBase<BlockT, LoopT> &) LLVM_DELETED_FUNCTION;
public:
/// Loop ctor - This creates an empty loop.
LoopBase() : ParentLoop(0) {}
@@ -416,8 +415,8 @@ class LoopInfoBase {
friend class LoopBase<BlockT, LoopT>;
friend class LoopInfo;
- void operator=(const LoopInfoBase &); // do not implement
- LoopInfoBase(const LoopInfo &); // do not implement
+ void operator=(const LoopInfoBase &) LLVM_DELETED_FUNCTION;
+ LoopInfoBase(const LoopInfo &) LLVM_DELETED_FUNCTION;
public:
LoopInfoBase() { }
~LoopInfoBase() { releaseMemory(); }
@@ -550,8 +549,8 @@ class LoopInfo : public FunctionPass {
LoopInfoBase<BasicBlock, Loop> LI;
friend class LoopBase<BasicBlock, Loop>;
- void operator=(const LoopInfo &); // do not implement
- LoopInfo(const LoopInfo &); // do not implement
+ void operator=(const LoopInfo &) LLVM_DELETED_FUNCTION;
+ LoopInfo(const LoopInfo &) LLVM_DELETED_FUNCTION;
public:
static char ID; // Pass identification, replacement for typeid
diff --git a/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h b/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
index c07fbf7..3bb96f9 100644
--- a/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
+++ b/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
@@ -145,7 +145,6 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const {
// Loop over the predecessors of the header node...
BlockT *Header = getHeader();
- typedef GraphTraits<BlockT*> BlockTraits;
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(Header),
diff --git a/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h b/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
index e674e74..a842898 100644
--- a/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -27,7 +27,8 @@
namespace llvm {
class CallInst;
class PointerType;
-class TargetData;
+class DataLayout;
+class TargetLibraryInfo;
class Type;
class Value;
@@ -35,27 +36,33 @@ class Value;
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like).
-bool isAllocationFn(const Value *V, bool LookThroughBitCast = false);
+bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a function that returns a
/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
-bool isNoAliasFn(const Value *V, bool LookThroughBitCast = false);
+bool isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory (such as malloc).
-bool isMallocLikeFn(const Value *V, bool LookThroughBitCast = false);
+bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates zero-filled memory (such as calloc).
-bool isCallocLikeFn(const Value *V, bool LookThroughBitCast = false);
+bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
-bool isAllocLikeFn(const Value *V, bool LookThroughBitCast = false);
+bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// reallocates memory (such as realloc).
-bool isReallocLikeFn(const Value *V, bool LookThroughBitCast = false);
+bool isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast = false);
//===----------------------------------------------------------------------===//
@@ -65,36 +72,39 @@ bool isReallocLikeFn(const Value *V, bool LookThroughBitCast = false);
/// extractMallocCall - Returns the corresponding CallInst if the instruction
/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
/// ignore InvokeInst here.
-const CallInst *extractMallocCall(const Value *I);
-static inline CallInst *extractMallocCall(Value *I) {
- return const_cast<CallInst*>(extractMallocCall((const Value*)I));
+const CallInst *extractMallocCall(const Value *I, const TargetLibraryInfo *TLI);
+static inline CallInst *extractMallocCall(Value *I,
+ const TargetLibraryInfo *TLI) {
+ return const_cast<CallInst*>(extractMallocCall((const Value*)I, TLI));
}
/// isArrayMalloc - Returns the corresponding CallInst if the instruction
/// is a call to malloc whose array size can be determined and the array size
/// is not constant 1. Otherwise, return NULL.
-const CallInst *isArrayMalloc(const Value *I, const TargetData *TD);
+const CallInst *isArrayMalloc(const Value *I, const DataLayout *TD,
+ const TargetLibraryInfo *TLI);
/// getMallocType - Returns the PointerType resulting from the malloc call.
/// The PointerType depends on the number of bitcast uses of the malloc call:
/// 0: PointerType is the malloc calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
-PointerType *getMallocType(const CallInst *CI);
+PointerType *getMallocType(const CallInst *CI, const TargetLibraryInfo *TLI);
/// getMallocAllocatedType - Returns the Type allocated by malloc call.
/// The Type depends on the number of bitcast uses of the malloc call:
/// 0: PointerType is the malloc calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
-Type *getMallocAllocatedType(const CallInst *CI);
+Type *getMallocAllocatedType(const CallInst *CI, const TargetLibraryInfo *TLI);
/// getMallocArraySize - Returns the array size of a malloc call. If the
/// argument passed to malloc is a multiple of the size of the malloced type,
/// then return that multiple. For non-array mallocs, the multiple is
/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
/// determined.
-Value *getMallocArraySize(CallInst *CI, const TargetData *TD,
+Value *getMallocArraySize(CallInst *CI, const DataLayout *TD,
+ const TargetLibraryInfo *TLI,
bool LookThroughSExt = false);
@@ -104,9 +114,10 @@ Value *getMallocArraySize(CallInst *CI, const TargetData *TD,
/// extractCallocCall - Returns the corresponding CallInst if the instruction
/// is a calloc call.
-const CallInst *extractCallocCall(const Value *I);
-static inline CallInst *extractCallocCall(Value *I) {
- return const_cast<CallInst*>(extractCallocCall((const Value*)I));
+const CallInst *extractCallocCall(const Value *I, const TargetLibraryInfo *TLI);
+static inline CallInst *extractCallocCall(Value *I,
+ const TargetLibraryInfo *TLI) {
+ return const_cast<CallInst*>(extractCallocCall((const Value*)I, TLI));
}
@@ -115,10 +126,10 @@ static inline CallInst *extractCallocCall(Value *I) {
//
/// isFreeCall - Returns non-null if the value is a call to the builtin free()
-const CallInst *isFreeCall(const Value *I);
+const CallInst *isFreeCall(const Value *I, const TargetLibraryInfo *TLI);
-static inline CallInst *isFreeCall(Value *I) {
- return const_cast<CallInst*>(isFreeCall((const Value*)I));
+static inline CallInst *isFreeCall(Value *I, const TargetLibraryInfo *TLI) {
+ return const_cast<CallInst*>(isFreeCall((const Value*)I, TLI));
}
@@ -130,8 +141,8 @@ static inline CallInst *isFreeCall(Value *I) {
/// object size in Size if successful, and false otherwise.
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables.
-bool getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD,
- bool RoundToAlign = false);
+bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *TD,
+ const TargetLibraryInfo *TLI, bool RoundToAlign = false);
@@ -142,10 +153,12 @@ typedef std::pair<APInt, APInt> SizeOffsetType;
class ObjectSizeOffsetVisitor
: public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> {
- const TargetData *TD;
+ const DataLayout *TD;
+ const TargetLibraryInfo *TLI;
bool RoundToAlign;
unsigned IntTyBits;
APInt Zero;
+ SmallPtrSet<Instruction *, 8> SeenInsts;
APInt align(APInt Size, uint64_t Align);
@@ -154,8 +167,8 @@ class ObjectSizeOffsetVisitor
}
public:
- ObjectSizeOffsetVisitor(const TargetData *TD, LLVMContext &Context,
- bool RoundToAlign = false);
+ ObjectSizeOffsetVisitor(const DataLayout *TD, const TargetLibraryInfo *TLI,
+ LLVMContext &Context, bool RoundToAlign = false);
SizeOffsetType compute(Value *V);
@@ -200,10 +213,10 @@ class ObjectSizeOffsetEvaluator
typedef DenseMap<const Value*, WeakEvalType> CacheMapTy;
typedef SmallPtrSet<const Value*, 8> PtrSetTy;
- const TargetData *TD;
+ const DataLayout *TD;
+ const TargetLibraryInfo *TLI;
LLVMContext &Context;
BuilderTy Builder;
- ObjectSizeOffsetVisitor Visitor;
IntegerType *IntTy;
Value *Zero;
CacheMapTy CacheMap;
@@ -215,7 +228,8 @@ class ObjectSizeOffsetEvaluator
SizeOffsetEvalType compute_(Value *V);
public:
- ObjectSizeOffsetEvaluator(const TargetData *TD, LLVMContext &Context);
+ ObjectSizeOffsetEvaluator(const DataLayout *TD, const TargetLibraryInfo *TLI,
+ LLVMContext &Context);
SizeOffsetEvalType compute(Value *V);
bool knownSize(SizeOffsetEvalType SizeOffset) {
diff --git a/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h b/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
index 7e049d6..a715eae 100644
--- a/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
@@ -29,7 +29,7 @@ namespace llvm {
class Instruction;
class CallSite;
class AliasAnalysis;
- class TargetData;
+ class DataLayout;
class MemoryDependenceAnalysis;
class PredIteratorCache;
class DominatorTree;
@@ -323,7 +323,7 @@ namespace llvm {
/// Current AA implementation, just a cache.
AliasAnalysis *AA;
- TargetData *TD;
+ DataLayout *TD;
DominatorTree *DT;
OwningPtr<PredIteratorCache> PredCache;
public:
@@ -412,7 +412,7 @@ namespace llvm {
int64_t MemLocOffs,
unsigned MemLocSize,
const LoadInst *LI,
- const TargetData &TD);
+ const DataLayout &TD);
private:
MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall,
diff --git a/contrib/llvm/include/llvm/Analysis/PHITransAddr.h b/contrib/llvm/include/llvm/Analysis/PHITransAddr.h
index ff9a247..5a77fce 100644
--- a/contrib/llvm/include/llvm/Analysis/PHITransAddr.h
+++ b/contrib/llvm/include/llvm/Analysis/PHITransAddr.h
@@ -19,7 +19,7 @@
namespace llvm {
class DominatorTree;
- class TargetData;
+ class DataLayout;
class TargetLibraryInfo;
/// PHITransAddr - An address value which tracks and handles phi translation.
@@ -37,7 +37,7 @@ class PHITransAddr {
Value *Addr;
/// TD - The target data we are playing with if known, otherwise null.
- const TargetData *TD;
+ const DataLayout *TD;
/// TLI - The target library info if known, otherwise null.
const TargetLibraryInfo *TLI;
@@ -45,7 +45,7 @@ class PHITransAddr {
/// InstInputs - The inputs for our symbolic address.
SmallVector<Instruction*, 4> InstInputs;
public:
- PHITransAddr(Value *addr, const TargetData *td) : Addr(addr), TD(td), TLI(0) {
+ PHITransAddr(Value *addr, const DataLayout *td) : Addr(addr), TD(td), TLI(0) {
// If the address is an instruction, the whole thing is considered an input.
if (Instruction *I = dyn_cast<Instruction>(Addr))
InstInputs.push_back(I);
diff --git a/contrib/llvm/include/llvm/Analysis/Passes.h b/contrib/llvm/include/llvm/Analysis/Passes.h
index a22bd12..27726f4 100644
--- a/contrib/llvm/include/llvm/Analysis/Passes.h
+++ b/contrib/llvm/include/llvm/Analysis/Passes.h
@@ -103,6 +103,14 @@ namespace llvm {
//===--------------------------------------------------------------------===//
//
+ // createProfileMetadataLoaderPass - This pass loads information from a
+ // profile dump file and sets branch weight metadata.
+ //
+ ModulePass *createProfileMetadataLoaderPass();
+ extern char &ProfileMetadataLoaderPassID;
+
+ //===--------------------------------------------------------------------===//
+ //
// createNoProfileInfoPass - This pass implements the default "no profile".
//
ImmutablePass *createNoProfileInfoPass();
@@ -172,11 +180,20 @@ namespace llvm {
//===--------------------------------------------------------------------===//
//
- // createLoopDependenceAnalysisPass - This creates an instance of the
- // LoopDependenceAnalysis pass.
+ // createDependenceAnalysisPass - This creates an instance of the
+ // DependenceAnalysis pass.
+ //
+ FunctionPass *createDependenceAnalysisPass();
+
+ //===--------------------------------------------------------------------===//
+ //
+ // createCostModelAnalysisPass - This creates an instance of the
+ // CostModelAnalysis pass.
//
- LoopPass *createLoopDependenceAnalysisPass();
+ FunctionPass *createCostModelAnalysisPass();
+ //===--------------------------------------------------------------------===//
+ //
// Minor pass prototypes, allowing us to expose them through bugpoint and
// analyze.
FunctionPass *createInstCountPass();
diff --git a/contrib/llvm/include/llvm/Analysis/ProfileDataLoader.h b/contrib/llvm/include/llvm/Analysis/ProfileDataLoader.h
new file mode 100644
index 0000000..9efbafc
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/ProfileDataLoader.h
@@ -0,0 +1,139 @@
+//===- ProfileDataLoader.h - Load & convert profile info ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The ProfileDataLoader class is used to load profiling data from a dump file.
+// The ProfileDataT<FType, BType> class is used to store the mapping of this
+// data to control flow edges.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_PROFILEDATALOADER_H
+#define LLVM_ANALYSIS_PROFILEDATALOADER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <string>
+
+namespace llvm {
+
+class ModulePass;
+class Function;
+class BasicBlock;
+
+// Helper for dumping edges to dbgs().
+raw_ostream& operator<<(raw_ostream &O, std::pair<const BasicBlock *,
+ const BasicBlock *> E);
+
+/// \brief The ProfileDataT<FType, BType> class is used to store the mapping of
+/// profiling data to control flow edges.
+///
+/// An edge is defined by its source and sink basic blocks.
+template<class FType, class BType>
+class ProfileDataT {
+public:
+ // The profiling information defines an Edge by its source and sink basic
+ // blocks.
+ typedef std::pair<const BType*, const BType*> Edge;
+
+private:
+ typedef DenseMap<Edge, unsigned> EdgeWeights;
+
+ /// \brief Count the number of times a transition between two blocks is
+ /// executed.
+ ///
+ /// As a special case, we also hold an edge from the null BasicBlock to the
+ /// entry block to indicate how many times the function was entered.
+ DenseMap<const FType*, EdgeWeights> EdgeInformation;
+
+public:
+ /// getFunction() - Returns the Function for an Edge.
+ static const FType *getFunction(Edge e) {
+ // e.first may be NULL
+ assert(((!e.first) || (e.first->getParent() == e.second->getParent()))
+ && "A ProfileData::Edge can not be between two functions");
+ assert(e.second && "A ProfileData::Edge must have a real sink");
+ return e.second->getParent();
+ }
+
+ /// getEdge() - Creates an Edge between two BasicBlocks.
+ static Edge getEdge(const BType *Src, const BType *Dest) {
+ return Edge(Src, Dest);
+ }
+
+ /// getEdgeWeight - Return the number of times that a given edge was
+ /// executed.
+ unsigned getEdgeWeight(Edge e) const {
+ const FType *f = getFunction(e);
+ assert((EdgeInformation.find(f) != EdgeInformation.end())
+ && "No profiling information for function");
+ EdgeWeights weights = EdgeInformation.find(f)->second;
+
+ assert((weights.find(e) != weights.end())
+ && "No profiling information for edge");
+ return weights.find(e)->second;
+ }
+
+ /// addEdgeWeight - Add 'weight' to the already stored execution count for
+ /// this edge.
+ void addEdgeWeight(Edge e, unsigned weight) {
+ EdgeInformation[getFunction(e)][e] += weight;
+ }
+};
+
+typedef ProfileDataT<Function, BasicBlock> ProfileData;
+//typedef ProfileDataT<MachineFunction, MachineBasicBlock> MachineProfileData;
+
+/// The ProfileDataLoader class is used to load raw profiling data from the
+/// dump file.
+class ProfileDataLoader {
+private:
+ /// The name of the file where the raw profiling data is stored.
+ const std::string &Filename;
+
+ /// A vector of the command line arguments used when the target program was
+ /// run to generate profiling data. One entry per program run.
+ SmallVector<std::string, 1> CommandLines;
+
+ /// The raw values for how many times each edge was traversed, values from
+ /// multiple program runs are accumulated.
+ SmallVector<unsigned, 32> EdgeCounts;
+
+public:
+ /// ProfileDataLoader ctor - Read the specified profiling data file, exiting
+ /// the program if the file is invalid or broken.
+ ProfileDataLoader(const char *ToolName, const std::string &Filename);
+
+ /// A special value used to represent the weight of an edge which has not
+ /// been counted yet.
+ static const unsigned Uncounted;
+
+ /// getNumExecutions - Return the number of times the target program was run
+ /// to generate this profiling data.
+ unsigned getNumExecutions() const { return CommandLines.size(); }
+
+ /// getExecution - Return the command line parameters used to generate the
+ /// i'th set of profiling data.
+ const std::string &getExecution(unsigned i) const { return CommandLines[i]; }
+
+ const std::string &getFileName() const { return Filename; }
+
+ /// getRawEdgeCounts - Return the raw profiling data, this is just a list of
+ /// numbers with no mappings to edges.
+ ArrayRef<unsigned> getRawEdgeCounts() const { return EdgeCounts; }
+};
+
+/// createProfileMetadataLoaderPass - This function returns a Pass that loads
+/// the profiling information for the module from the specified filename.
+ModulePass *createProfileMetadataLoaderPass(const std::string &Filename);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ProfileDataTypes.h b/contrib/llvm/include/llvm/Analysis/ProfileDataTypes.h
new file mode 100644
index 0000000..1be15e0
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/ProfileDataTypes.h
@@ -0,0 +1,39 @@
+/*===-- ProfileDataTypes.h - Profiling info shared constants --------------===*\
+|*
+|* The LLVM Compiler Infrastructure
+|*
+|* This file is distributed under the University of Illinois Open Source
+|* License. See LICENSE.TXT for details.
+|*
+|*===----------------------------------------------------------------------===*|
+|*
+|* This file defines constants shared by the various different profiling
+|* runtime libraries and the LLVM C++ profile metadata loader. It must be a
+|* C header because, at present, the profiling runtimes are written in C.
+|*
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_ANALYSIS_PROFILEDATATYPES_H
+#define LLVM_ANALYSIS_PROFILEDATATYPES_H
+
+/* Included by libprofile. */
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* TODO: Strip out unused entries once ProfileInfo etc has been removed. */
+enum ProfilingType {
+ ArgumentInfo = 1, /* The command line argument block */
+ FunctionInfo = 2, /* Function profiling information */
+ BlockInfo = 3, /* Block profiling information */
+ EdgeInfo = 4, /* Edge profiling information */
+ PathInfo = 5, /* Path profiling information */
+ BBTraceInfo = 6, /* Basic block trace information */
+ OptEdgeInfo = 7 /* Edge profiling information, optimal version */
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* LLVM_ANALYSIS_PROFILEDATATYPES_H */
diff --git a/contrib/llvm/include/llvm/Analysis/ProfileInfoTypes.h b/contrib/llvm/include/llvm/Analysis/ProfileInfoTypes.h
index 6b4ac85..45aab5b 100644
--- a/contrib/llvm/include/llvm/Analysis/ProfileInfoTypes.h
+++ b/contrib/llvm/include/llvm/Analysis/ProfileInfoTypes.h
@@ -27,15 +27,7 @@ enum ProfilingStorageType {
ProfilingHash = 2
};
-enum ProfilingType {
- ArgumentInfo = 1, /* The command line argument block */
- FunctionInfo = 2, /* Function profiling information */
- BlockInfo = 3, /* Block profiling information */
- EdgeInfo = 4, /* Edge profiling information */
- PathInfo = 5, /* Path profiling information */
- BBTraceInfo = 6, /* Basic block trace information */
- OptEdgeInfo = 7 /* Edge profiling information, optimal version */
-};
+#include "llvm/Analysis/ProfileDataTypes.h"
/*
* The header for tables that map path numbers to path counters.
diff --git a/contrib/llvm/include/llvm/Analysis/RegionInfo.h b/contrib/llvm/include/llvm/Analysis/RegionInfo.h
index 188d11c..48d7ee6 100644
--- a/contrib/llvm/include/llvm/Analysis/RegionInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/RegionInfo.h
@@ -54,10 +54,8 @@ class FlatIt {};
/// @brief A RegionNode represents a subregion or a BasicBlock that is part of a
/// Region.
class RegionNode {
- // DO NOT IMPLEMENT
- RegionNode(const RegionNode &);
- // DO NOT IMPLEMENT
- const RegionNode &operator=(const RegionNode &);
+ RegionNode(const RegionNode &) LLVM_DELETED_FUNCTION;
+ const RegionNode &operator=(const RegionNode &) LLVM_DELETED_FUNCTION;
protected:
/// This is the entry basic block that starts this region node. If this is a
@@ -203,10 +201,8 @@ inline Region* RegionNode::getNodeAs<Region>() const {
/// tree, the second one creates a graphical representation using graphviz.
class Region : public RegionNode {
friend class RegionInfo;
- // DO NOT IMPLEMENT
- Region(const Region &);
- // DO NOT IMPLEMENT
- const Region &operator=(const Region &);
+ Region(const Region &) LLVM_DELETED_FUNCTION;
+ const Region &operator=(const Region &) LLVM_DELETED_FUNCTION;
// Information necessary to manage this Region.
RegionInfo* RI;
@@ -473,27 +469,6 @@ public:
const_iterator end() const { return children.end(); }
//@}
- /// @name BasicBlock Node Iterators
- ///
- /// These iterators iterate over all BasicBlock RegionNodes that are
- /// contained in this Region. The iterator also iterates over BasicBlock
- /// RegionNodes that are elements of a subregion of this Region. It is
- /// therefore called a flat iterator.
- //@{
- typedef df_iterator<RegionNode*, SmallPtrSet<RegionNode*, 8>, false,
- GraphTraits<FlatIt<RegionNode*> > > block_node_iterator;
-
- typedef df_iterator<const RegionNode*, SmallPtrSet<const RegionNode*, 8>,
- false, GraphTraits<FlatIt<const RegionNode*> > >
- const_block_node_iterator;
-
- block_node_iterator block_node_begin();
- block_node_iterator block_node_end();
-
- const_block_node_iterator block_node_begin() const;
- const_block_node_iterator block_node_end() const;
- //@}
-
/// @name BasicBlock Iterators
///
/// These iterators iterate over all BasicBlocks that are contained in this
@@ -586,10 +561,8 @@ class RegionInfo : public FunctionPass {
typedef DenseMap<BasicBlock*, Region*> BBtoRegionMap;
typedef SmallPtrSet<Region*, 4> RegionSet;
- // DO NOT IMPLEMENT
- RegionInfo(const RegionInfo &);
- // DO NOT IMPLEMENT
- const RegionInfo &operator=(const RegionInfo &);
+ RegionInfo(const RegionInfo &) LLVM_DELETED_FUNCTION;
+ const RegionInfo &operator=(const RegionInfo &) LLVM_DELETED_FUNCTION;
DominatorTree *DT;
PostDominatorTree *PDT;
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
index c213ade..235adca0 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -40,7 +40,7 @@ namespace llvm {
class DominatorTree;
class Type;
class ScalarEvolution;
- class TargetData;
+ class DataLayout;
class TargetLibraryInfo;
class LLVMContext;
class Loop;
@@ -70,8 +70,8 @@ namespace llvm {
unsigned short SubclassData;
private:
- SCEV(const SCEV &); // DO NOT IMPLEMENT
- void operator=(const SCEV &); // DO NOT IMPLEMENT
+ SCEV(const SCEV &) LLVM_DELETED_FUNCTION;
+ void operator=(const SCEV &) LLVM_DELETED_FUNCTION;
public:
/// NoWrapFlags are bitfield indices into SubclassData.
@@ -162,7 +162,6 @@ namespace llvm {
SCEVCouldNotCompute();
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVCouldNotCompute *S) { return true; }
static bool classof(const SCEV *S);
};
@@ -227,7 +226,7 @@ namespace llvm {
/// TD - The target data information for the target we are targeting.
///
- TargetData *TD;
+ DataLayout *TD;
/// TLI - The target library information for the target we are targeting.
///
@@ -874,6 +873,7 @@ namespace llvm {
virtual void releaseMemory();
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
virtual void print(raw_ostream &OS, const Module* = 0) const;
+ virtual void verifyAnalysis() const;
private:
FoldingSet<SCEV> UniqueSCEVs;
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
index ded1297..54db7d6 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -46,7 +46,6 @@ namespace llvm {
Type *getType() const { return V->getType(); }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVConstant *S) { return true; }
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scConstant;
}
@@ -68,7 +67,6 @@ namespace llvm {
Type *getType() const { return Ty; }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVCastExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scTruncate ||
S->getSCEVType() == scZeroExtend ||
@@ -88,7 +86,6 @@ namespace llvm {
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVTruncateExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scTruncate;
}
@@ -106,7 +103,6 @@ namespace llvm {
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVZeroExtendExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scZeroExtend;
}
@@ -124,7 +120,6 @@ namespace llvm {
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVSignExtendExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scSignExtend;
}
@@ -166,7 +161,6 @@ namespace llvm {
}
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVNAryExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scAddExpr ||
S->getSCEVType() == scMulExpr ||
@@ -188,7 +182,6 @@ namespace llvm {
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVCommutativeExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scAddExpr ||
S->getSCEVType() == scMulExpr ||
@@ -223,7 +216,6 @@ namespace llvm {
}
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVAddExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scAddExpr;
}
@@ -242,7 +234,6 @@ namespace llvm {
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVMulExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scMulExpr;
}
@@ -274,7 +265,6 @@ namespace llvm {
}
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVUDivExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scUDivExpr;
}
@@ -358,7 +348,6 @@ namespace llvm {
}
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVAddRecExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scAddRecExpr;
}
@@ -380,7 +369,6 @@ namespace llvm {
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVSMaxExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scSMaxExpr;
}
@@ -402,7 +390,6 @@ namespace llvm {
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVUMaxExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scUMaxExpr;
}
@@ -449,7 +436,6 @@ namespace llvm {
Type *getType() const { return getValPtr()->getType(); }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVUnknown *S) { return true; }
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scUnknown;
}
diff --git a/contrib/llvm/include/llvm/Analysis/SparsePropagation.h b/contrib/llvm/include/llvm/Analysis/SparsePropagation.h
index c3c2f4b..b758eca 100644
--- a/contrib/llvm/include/llvm/Analysis/SparsePropagation.h
+++ b/contrib/llvm/include/llvm/Analysis/SparsePropagation.h
@@ -130,9 +130,9 @@ class SparseSolver {
/// PHI nodes retriggered.
typedef std::pair<BasicBlock*,BasicBlock*> Edge;
std::set<Edge> KnownFeasibleEdges;
-
- SparseSolver(const SparseSolver&); // DO NOT IMPLEMENT
- void operator=(const SparseSolver&); // DO NOT IMPLEMENT
+
+ SparseSolver(const SparseSolver&) LLVM_DELETED_FUNCTION;
+ void operator=(const SparseSolver&) LLVM_DELETED_FUNCTION;
public:
explicit SparseSolver(AbstractLatticeFunction *Lattice)
: LatticeFunc(Lattice) {}
diff --git a/contrib/llvm/include/llvm/Analysis/ValueTracking.h b/contrib/llvm/include/llvm/Analysis/ValueTracking.h
index e8d45f6..a857524 100644
--- a/contrib/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/contrib/llvm/include/llvm/Analysis/ValueTracking.h
@@ -22,7 +22,7 @@ namespace llvm {
class Value;
class Instruction;
class APInt;
- class TargetData;
+ class DataLayout;
class StringRef;
class MDNode;
@@ -37,27 +37,27 @@ namespace llvm {
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
void ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
- const TargetData *TD = 0, unsigned Depth = 0);
+ const DataLayout *TD = 0, unsigned Depth = 0);
void computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero);
/// ComputeSignBit - Determine whether the sign bit is known to be zero or
/// one. Convenience wrapper around ComputeMaskedBits.
void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
- const TargetData *TD = 0, unsigned Depth = 0);
+ const DataLayout *TD = 0, unsigned Depth = 0);
/// isPowerOfTwo - Return true if the given value is known to have exactly one
/// bit set when defined. For vectors return true if every element is known to
/// be a power of two when defined. Supports values with integer or pointer
/// type and vectors of integers. If 'OrZero' is set then returns true if the
/// given value is either a power of two or zero.
- bool isPowerOfTwo(Value *V, const TargetData *TD = 0, bool OrZero = false,
+ bool isPowerOfTwo(Value *V, const DataLayout *TD = 0, bool OrZero = false,
unsigned Depth = 0);
/// isKnownNonZero - Return true if the given value is known to be non-zero
/// when defined. For vectors return true if every element is known to be
/// non-zero when defined. Supports values with integer or pointer type and
/// vectors of integers.
- bool isKnownNonZero(Value *V, const TargetData *TD = 0, unsigned Depth = 0);
+ bool isKnownNonZero(Value *V, const DataLayout *TD = 0, unsigned Depth = 0);
/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
/// this predicate to simplify operations downstream. Mask is known to be
@@ -69,7 +69,7 @@ namespace llvm {
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
bool MaskedValueIsZero(Value *V, const APInt &Mask,
- const TargetData *TD = 0, unsigned Depth = 0);
+ const DataLayout *TD = 0, unsigned Depth = 0);
/// ComputeNumSignBits - Return the number of times the sign bit of the
@@ -80,7 +80,7 @@ namespace llvm {
///
/// 'Op' must have a scalar integer type.
///
- unsigned ComputeNumSignBits(Value *Op, const TargetData *TD = 0,
+ unsigned ComputeNumSignBits(Value *Op, const DataLayout *TD = 0,
unsigned Depth = 0);
/// ComputeMultiple - This function computes the integer multiple of Base that
@@ -118,10 +118,10 @@ namespace llvm {
/// it can be expressed as a base pointer plus a constant offset. Return the
/// base and offset to the caller.
Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
- const TargetData &TD);
+ const DataLayout &TD);
static inline const Value *
GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
- const TargetData &TD) {
+ const DataLayout &TD) {
return GetPointerBaseWithConstantOffset(const_cast<Value*>(Ptr), Offset,TD);
}
@@ -143,10 +143,10 @@ namespace llvm {
/// being addressed. Note that the returned value has pointer type if the
/// specified value does. If the MaxLookup value is non-zero, it limits the
/// number of instructions to be stripped off.
- Value *GetUnderlyingObject(Value *V, const TargetData *TD = 0,
+ Value *GetUnderlyingObject(Value *V, const DataLayout *TD = 0,
unsigned MaxLookup = 6);
static inline const Value *
- GetUnderlyingObject(const Value *V, const TargetData *TD = 0,
+ GetUnderlyingObject(const Value *V, const DataLayout *TD = 0,
unsigned MaxLookup = 6) {
return GetUnderlyingObject(const_cast<Value *>(V), TD, MaxLookup);
}
@@ -156,7 +156,7 @@ namespace llvm {
/// multiple objects.
void GetUnderlyingObjects(Value *V,
SmallVectorImpl<Value *> &Objects,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
unsigned MaxLookup = 6);
/// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer
@@ -182,7 +182,7 @@ namespace llvm {
/// However, this method can return true for instructions that read memory;
/// for such instructions, moving them may change the resulting value.
bool isSafeToSpeculativelyExecute(const Value *V,
- const TargetData *TD = 0);
+ const DataLayout *TD = 0);
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Argument.h b/contrib/llvm/include/llvm/Argument.h
index e66075c..b1c2218 100644
--- a/contrib/llvm/include/llvm/Argument.h
+++ b/contrib/llvm/include/llvm/Argument.h
@@ -68,8 +68,8 @@ public:
/// attribute on it in its containing function.
bool hasNoCaptureAttr() const;
- /// hasSRetAttr - Return true if this argument has the sret attribute on it in
- /// its containing function.
+ /// hasStructRetAttr - Return true if this argument has the sret attribute on
+ /// it in its containing function.
bool hasStructRetAttr() const;
/// addAttr - Add a Attribute to an argument
@@ -81,7 +81,6 @@ public:
/// classof - Methods for support type inquiry through isa, cast, and
/// dyn_cast:
///
- static inline bool classof(const Argument *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() == ArgumentVal;
}
diff --git a/contrib/llvm/include/llvm/Attributes.h b/contrib/llvm/include/llvm/Attributes.h
index 223aa00..a9c2d74 100644
--- a/contrib/llvm/include/llvm/Attributes.h
+++ b/contrib/llvm/include/llvm/Attributes.h
@@ -21,268 +21,280 @@
#include <string>
namespace llvm {
-class Type;
-
-namespace Attribute {
-/// We use this proxy POD type to allow constructing Attributes constants
-/// using initializer lists. Do not use this class directly.
-struct AttrConst {
- uint64_t v;
- AttrConst operator | (const AttrConst Attrs) const {
- AttrConst Res = {v | Attrs.v};
- return Res;
- }
- AttrConst operator ~ () const {
- AttrConst Res = {~v};
- return Res;
- }
-};
-} // namespace Attribute
+class AttrBuilder;
+class AttributesImpl;
+class LLVMContext;
+class Type;
/// Attributes - A bitset of attributes.
class Attributes {
- public:
- Attributes() : Bits(0) { }
- explicit Attributes(uint64_t Val) : Bits(Val) { }
- /*implicit*/ Attributes(Attribute::AttrConst Val) : Bits(Val.v) { }
- // This is a "safe bool() operator".
- operator const void *() const { return Bits ? this : 0; }
- bool isEmptyOrSingleton() const { return (Bits & (Bits - 1)) == 0; }
- bool operator == (const Attributes &Attrs) const {
- return Bits == Attrs.Bits;
+public:
+ /// Function parameters and results can have attributes to indicate how they
+ /// should be treated by optimizations and code generation. This enumeration
+ /// lists the attributes that can be associated with parameters, function
+ /// results or the function itself.
+ ///
+ /// Note that uwtable is about the ABI or the user mandating an entry in the
+ /// unwind table. The nounwind attribute is about an exception passing by the
+ /// function.
+ ///
+ /// In a theoretical system that uses tables for profiling and sjlj for
+ /// exceptions, they would be fully independent. In a normal system that uses
+ /// tables for both, the semantics are:
+ ///
+ /// nil = Needs an entry because an exception might pass by.
+ /// nounwind = No need for an entry
+ /// uwtable = Needs an entry because the ABI says so and because
+ /// an exception might pass by.
+ /// uwtable + nounwind = Needs an entry because the ABI says so.
+
+ enum AttrVal {
+ // IR-Level Attributes
+ None, ///< No attributes have been set
+ AddressSafety, ///< Address safety checking is on.
+ Alignment, ///< Alignment of parameter (5 bits)
+ ///< stored as log2 of alignment with +1 bias
+ ///< 0 means unaligned different from align 1
+ AlwaysInline, ///< inline=always
+ ByVal, ///< Pass structure by value
+ InlineHint, ///< Source said inlining was desirable
+ InReg, ///< Force argument to be passed in register
+ MinSize, ///< Function must be optimized for size first
+ Naked, ///< Naked function
+ Nest, ///< Nested function static chain
+ NoAlias, ///< Considered to not alias after call
+ NoCapture, ///< Function creates no aliases of pointer
+ NoImplicitFloat, ///< Disable implicit floating point insts
+ NoInline, ///< inline=never
+ NonLazyBind, ///< Function is called early and/or
+ ///< often, so lazy binding isn't worthwhile
+ NoRedZone, ///< Disable redzone
+ NoReturn, ///< Mark the function as not returning
+ NoUnwind, ///< Function doesn't unwind stack
+ OptimizeForSize, ///< opt_size
+ ReadNone, ///< Function does not access memory
+ ReadOnly, ///< Function only reads from memory
+ ReturnsTwice, ///< Function can return twice
+ SExt, ///< Sign extended before/after call
+ StackAlignment, ///< Alignment of stack for function (3 bits)
+ ///< stored as log2 of alignment with +1 bias 0
+ ///< means unaligned (different from
+ ///< alignstack={1))
+ StackProtect, ///< Stack protection.
+ StackProtectReq, ///< Stack protection required.
+ StructRet, ///< Hidden pointer to structure to return
+ UWTable, ///< Function must be in a unwind table
+ ZExt ///< Zero extended before/after call
+ };
+private:
+ AttributesImpl *Attrs;
+ Attributes(AttributesImpl *A) : Attrs(A) {}
+public:
+ Attributes() : Attrs(0) {}
+ Attributes(const Attributes &A) : Attrs(A.Attrs) {}
+ Attributes &operator=(const Attributes &A) {
+ Attrs = A.Attrs;
+ return *this;
}
- bool operator != (const Attributes &Attrs) const {
- return Bits != Attrs.Bits;
+
+ /// get - Return a uniquified Attributes object. This takes the uniquified
+ /// value from the Builder and wraps it in the Attributes class.
+ static Attributes get(LLVMContext &Context, ArrayRef<AttrVal> Vals);
+ static Attributes get(LLVMContext &Context, AttrBuilder &B);
+
+ /// @brief Return true if the attribute is present.
+ bool hasAttribute(AttrVal Val) const;
+
+ /// @brief Return true if attributes exist
+ bool hasAttributes() const;
+
+ /// @brief Return true if the attributes are a non-null intersection.
+ bool hasAttributes(const Attributes &A) const;
+
+ /// @brief Returns the alignment field of an attribute as a byte alignment
+ /// value.
+ unsigned getAlignment() const;
+
+ /// @brief Returns the stack alignment field of an attribute as a byte
+ /// alignment value.
+ unsigned getStackAlignment() const;
+
+ /// @brief Parameter attributes that do not apply to vararg call arguments.
+ bool hasIncompatibleWithVarArgsAttrs() const {
+ return hasAttribute(Attributes::StructRet);
}
- Attributes operator | (const Attributes &Attrs) const {
- return Attributes(Bits | Attrs.Bits);
+
+ /// @brief Attributes that only apply to function parameters.
+ bool hasParameterOnlyAttrs() const {
+ return hasAttribute(Attributes::ByVal) ||
+ hasAttribute(Attributes::Nest) ||
+ hasAttribute(Attributes::StructRet) ||
+ hasAttribute(Attributes::NoCapture);
}
- Attributes operator & (const Attributes &Attrs) const {
- return Attributes(Bits & Attrs.Bits);
+
+ /// @brief Attributes that may be applied to the function itself. These cannot
+ /// be used on return values or function parameters.
+ bool hasFunctionOnlyAttrs() const {
+ return hasAttribute(Attributes::NoReturn) ||
+ hasAttribute(Attributes::NoUnwind) ||
+ hasAttribute(Attributes::ReadNone) ||
+ hasAttribute(Attributes::ReadOnly) ||
+ hasAttribute(Attributes::NoInline) ||
+ hasAttribute(Attributes::AlwaysInline) ||
+ hasAttribute(Attributes::OptimizeForSize) ||
+ hasAttribute(Attributes::StackProtect) ||
+ hasAttribute(Attributes::StackProtectReq) ||
+ hasAttribute(Attributes::NoRedZone) ||
+ hasAttribute(Attributes::NoImplicitFloat) ||
+ hasAttribute(Attributes::Naked) ||
+ hasAttribute(Attributes::InlineHint) ||
+ hasAttribute(Attributes::StackAlignment) ||
+ hasAttribute(Attributes::UWTable) ||
+ hasAttribute(Attributes::NonLazyBind) ||
+ hasAttribute(Attributes::ReturnsTwice) ||
+ hasAttribute(Attributes::AddressSafety) ||
+ hasAttribute(Attributes::MinSize);
}
- Attributes operator ^ (const Attributes &Attrs) const {
- return Attributes(Bits ^ Attrs.Bits);
+
+ bool operator==(const Attributes &A) const {
+ return Attrs == A.Attrs;
}
- Attributes &operator |= (const Attributes &Attrs) {
- Bits |= Attrs.Bits;
- return *this;
+ bool operator!=(const Attributes &A) const {
+ return Attrs != A.Attrs;
}
- Attributes &operator &= (const Attributes &Attrs) {
- Bits &= Attrs.Bits;
- return *this;
+
+ uint64_t Raw() const;
+
+ /// @brief Which attributes cannot be applied to a type.
+ static Attributes typeIncompatible(Type *Ty);
+
+ /// encodeLLVMAttributesForBitcode - This returns an integer containing an
+ /// encoding of all the LLVM attributes found in the given attribute bitset.
+ /// Any change to this encoding is a breaking change to bitcode compatibility.
+ static uint64_t encodeLLVMAttributesForBitcode(Attributes Attrs);
+
+ /// decodeLLVMAttributesForBitcode - This returns an attribute bitset
+ /// containing the LLVM attributes that have been decoded from the given
+ /// integer. This function must stay in sync with
+ /// 'encodeLLVMAttributesForBitcode'.
+ static Attributes decodeLLVMAttributesForBitcode(LLVMContext &C,
+ uint64_t EncodedAttrs);
+
+ /// getAsString - The set of Attributes set in Attributes is converted to a
+ /// string of equivalent mnemonics. This is, presumably, for writing out the
+ /// mnemonics for the assembly writer.
+ /// @brief Convert attribute bits to text
+ std::string getAsString() const;
+};
+
+//===----------------------------------------------------------------------===//
+/// AttrBuilder - This class is used in conjunction with the Attributes::get
+/// method to create an Attributes object. The object itself is uniquified. The
+/// Builder's value, however, is not. So this can be used as a quick way to test
+/// for equality, presence of attributes, etc.
+class AttrBuilder {
+ uint64_t Bits;
+public:
+ AttrBuilder() : Bits(0) {}
+ explicit AttrBuilder(uint64_t B) : Bits(B) {}
+ AttrBuilder(const Attributes &A) : Bits(A.Raw()) {}
+ AttrBuilder(const AttrBuilder &B) : Bits(B.Bits) {}
+
+ void clear() { Bits = 0; }
+
+ /// addAttribute - Add an attribute to the builder.
+ AttrBuilder &addAttribute(Attributes::AttrVal Val);
+
+ /// removeAttribute - Remove an attribute from the builder.
+ AttrBuilder &removeAttribute(Attributes::AttrVal Val);
+
+ /// addAttribute - Add the attributes from A to the builder.
+ AttrBuilder &addAttributes(const Attributes &A);
+
+ /// removeAttribute - Remove the attributes from A from the builder.
+ AttrBuilder &removeAttributes(const Attributes &A);
+
+ /// hasAttribute - Return true if the builder has the specified attribute.
+ bool hasAttribute(Attributes::AttrVal A) const;
+
+ /// hasAttributes - Return true if the builder has IR-level attributes.
+ bool hasAttributes() const;
+
+ /// hasAttributes - Return true if the builder has any attribute that's in the
+ /// specified attribute.
+ bool hasAttributes(const Attributes &A) const;
+
+ /// hasAlignmentAttr - Return true if the builder has an alignment attribute.
+ bool hasAlignmentAttr() const;
+
+ /// getAlignment - Retrieve the alignment attribute, if it exists.
+ uint64_t getAlignment() const;
+
+ /// getStackAlignment - Retrieve the stack alignment attribute, if it exists.
+ uint64_t getStackAlignment() const;
+
+ /// addAlignmentAttr - This turns an int alignment (which must be a power of
+ /// 2) into the form used internally in Attributes.
+ AttrBuilder &addAlignmentAttr(unsigned Align);
+
+ /// addStackAlignmentAttr - This turns an int stack alignment (which must be a
+ /// power of 2) into the form used internally in Attributes.
+ AttrBuilder &addStackAlignmentAttr(unsigned Align);
+
+ /// addRawValue - Add the raw value to the internal representation.
+ /// N.B. This should be used ONLY for decoding LLVM bitcode!
+ AttrBuilder &addRawValue(uint64_t Val);
+
+ /// @brief Remove attributes that are used on functions only.
+ void removeFunctionOnlyAttrs() {
+ removeAttribute(Attributes::NoReturn)
+ .removeAttribute(Attributes::NoUnwind)
+ .removeAttribute(Attributes::ReadNone)
+ .removeAttribute(Attributes::ReadOnly)
+ .removeAttribute(Attributes::NoInline)
+ .removeAttribute(Attributes::AlwaysInline)
+ .removeAttribute(Attributes::OptimizeForSize)
+ .removeAttribute(Attributes::StackProtect)
+ .removeAttribute(Attributes::StackProtectReq)
+ .removeAttribute(Attributes::NoRedZone)
+ .removeAttribute(Attributes::NoImplicitFloat)
+ .removeAttribute(Attributes::Naked)
+ .removeAttribute(Attributes::InlineHint)
+ .removeAttribute(Attributes::StackAlignment)
+ .removeAttribute(Attributes::UWTable)
+ .removeAttribute(Attributes::NonLazyBind)
+ .removeAttribute(Attributes::ReturnsTwice)
+ .removeAttribute(Attributes::AddressSafety)
+ .removeAttribute(Attributes::MinSize);
}
- Attributes operator ~ () const { return Attributes(~Bits); }
+
uint64_t Raw() const { return Bits; }
- private:
- // Currently, we need less than 64 bits.
- uint64_t Bits;
-};
-namespace Attribute {
-
-/// Function parameters and results can have attributes to indicate how they
-/// should be treated by optimizations and code generation. This enumeration
-/// lists the attributes that can be associated with parameters, function
-/// results or the function itself.
-/// @brief Function attributes.
-
-// We declare AttrConst objects that will be used throughout the code
-// and also raw uint64_t objects with _i suffix to be used below for other
-// constant declarations. This is done to avoid static CTORs and at the same
-// time to keep type-safety of Attributes.
-#define DECLARE_LLVM_ATTRIBUTE(name, value) \
- const uint64_t name##_i = value; \
- const AttrConst name = {value};
-
-DECLARE_LLVM_ATTRIBUTE(None,0) ///< No attributes have been set
-DECLARE_LLVM_ATTRIBUTE(ZExt,1<<0) ///< Zero extended before/after call
-DECLARE_LLVM_ATTRIBUTE(SExt,1<<1) ///< Sign extended before/after call
-DECLARE_LLVM_ATTRIBUTE(NoReturn,1<<2) ///< Mark the function as not returning
-DECLARE_LLVM_ATTRIBUTE(InReg,1<<3) ///< Force argument to be passed in register
-DECLARE_LLVM_ATTRIBUTE(StructRet,1<<4) ///< Hidden pointer to structure to return
-DECLARE_LLVM_ATTRIBUTE(NoUnwind,1<<5) ///< Function doesn't unwind stack
-DECLARE_LLVM_ATTRIBUTE(NoAlias,1<<6) ///< Considered to not alias after call
-DECLARE_LLVM_ATTRIBUTE(ByVal,1<<7) ///< Pass structure by value
-DECLARE_LLVM_ATTRIBUTE(Nest,1<<8) ///< Nested function static chain
-DECLARE_LLVM_ATTRIBUTE(ReadNone,1<<9) ///< Function does not access memory
-DECLARE_LLVM_ATTRIBUTE(ReadOnly,1<<10) ///< Function only reads from memory
-DECLARE_LLVM_ATTRIBUTE(NoInline,1<<11) ///< inline=never
-DECLARE_LLVM_ATTRIBUTE(AlwaysInline,1<<12) ///< inline=always
-DECLARE_LLVM_ATTRIBUTE(OptimizeForSize,1<<13) ///< opt_size
-DECLARE_LLVM_ATTRIBUTE(StackProtect,1<<14) ///< Stack protection.
-DECLARE_LLVM_ATTRIBUTE(StackProtectReq,1<<15) ///< Stack protection required.
-DECLARE_LLVM_ATTRIBUTE(Alignment,31<<16) ///< Alignment of parameter (5 bits)
- // stored as log2 of alignment with +1 bias
- // 0 means unaligned different from align 1
-DECLARE_LLVM_ATTRIBUTE(NoCapture,1<<21) ///< Function creates no aliases of pointer
-DECLARE_LLVM_ATTRIBUTE(NoRedZone,1<<22) /// disable redzone
-DECLARE_LLVM_ATTRIBUTE(NoImplicitFloat,1<<23) /// disable implicit floating point
- /// instructions.
-DECLARE_LLVM_ATTRIBUTE(Naked,1<<24) ///< Naked function
-DECLARE_LLVM_ATTRIBUTE(InlineHint,1<<25) ///< source said inlining was
- ///desirable
-DECLARE_LLVM_ATTRIBUTE(StackAlignment,7<<26) ///< Alignment of stack for
- ///function (3 bits) stored as log2
- ///of alignment with +1 bias
- ///0 means unaligned (different from
- ///alignstack= {1))
-DECLARE_LLVM_ATTRIBUTE(ReturnsTwice,1<<29) ///< Function can return twice
-DECLARE_LLVM_ATTRIBUTE(UWTable,1<<30) ///< Function must be in a unwind
- ///table
-DECLARE_LLVM_ATTRIBUTE(NonLazyBind,1U<<31) ///< Function is called early and/or
- /// often, so lazy binding isn't
- /// worthwhile.
-DECLARE_LLVM_ATTRIBUTE(AddressSafety,1ULL<<32) ///< Address safety checking is on.
-DECLARE_LLVM_ATTRIBUTE(IANSDialect,1ULL<<33) ///< Inline asm non-standard dialect.
- /// When not set, ATT dialect assumed.
- /// When set implies the Intel dialect.
-
-#undef DECLARE_LLVM_ATTRIBUTE
-
-/// Note that uwtable is about the ABI or the user mandating an entry in the
-/// unwind table. The nounwind attribute is about an exception passing by the
-/// function.
-/// In a theoretical system that uses tables for profiling and sjlj for
-/// exceptions, they would be fully independent. In a normal system that
-/// uses tables for both, the semantics are:
-/// nil = Needs an entry because an exception might pass by.
-/// nounwind = No need for an entry
-/// uwtable = Needs an entry because the ABI says so and because
-/// an exception might pass by.
-/// uwtable + nounwind = Needs an entry because the ABI says so.
-
-/// @brief Attributes that only apply to function parameters.
-const AttrConst ParameterOnly = {ByVal_i | Nest_i |
- StructRet_i | NoCapture_i};
-
-/// @brief Attributes that may be applied to the function itself. These cannot
-/// be used on return values or function parameters.
-const AttrConst FunctionOnly = {NoReturn_i | NoUnwind_i | ReadNone_i |
- ReadOnly_i | NoInline_i | AlwaysInline_i | OptimizeForSize_i |
- StackProtect_i | StackProtectReq_i | NoRedZone_i | NoImplicitFloat_i |
- Naked_i | InlineHint_i | StackAlignment_i |
- UWTable_i | NonLazyBind_i | ReturnsTwice_i | AddressSafety_i |
- IANSDialect_i};
-
-/// @brief Parameter attributes that do not apply to vararg call arguments.
-const AttrConst VarArgsIncompatible = {StructRet_i};
-
-/// @brief Attributes that are mutually incompatible.
-const AttrConst MutuallyIncompatible[5] = {
- {ByVal_i | Nest_i | StructRet_i},
- {ByVal_i | Nest_i | InReg_i },
- {ZExt_i | SExt_i},
- {ReadNone_i | ReadOnly_i},
- {NoInline_i | AlwaysInline_i}
+ bool operator==(const AttrBuilder &B) {
+ return Bits == B.Bits;
+ }
+ bool operator!=(const AttrBuilder &B) {
+ return Bits != B.Bits;
+ }
};
-/// @brief Which attributes cannot be applied to a type.
-Attributes typeIncompatible(Type *Ty);
-
-/// This turns an int alignment (a power of 2, normally) into the
-/// form used internally in Attributes.
-inline Attributes constructAlignmentFromInt(unsigned i) {
- // Default alignment, allow the target to define how to align it.
- if (i == 0)
- return None;
-
- assert(isPowerOf2_32(i) && "Alignment must be a power of two.");
- assert(i <= 0x40000000 && "Alignment too large.");
- return Attributes((Log2_32(i)+1) << 16);
-}
-
-/// This returns the alignment field of an attribute as a byte alignment value.
-inline unsigned getAlignmentFromAttrs(Attributes A) {
- Attributes Align = A & Attribute::Alignment;
- if (!Align)
- return 0;
-
- return 1U << ((Align.Raw() >> 16) - 1);
-}
-
-/// This turns an int stack alignment (which must be a power of 2) into
-/// the form used internally in Attributes.
-inline Attributes constructStackAlignmentFromInt(unsigned i) {
- // Default alignment, allow the target to define how to align it.
- if (i == 0)
- return None;
-
- assert(isPowerOf2_32(i) && "Alignment must be a power of two.");
- assert(i <= 0x100 && "Alignment too large.");
- return Attributes((Log2_32(i)+1) << 26);
-}
-
-/// This returns the stack alignment field of an attribute as a byte alignment
-/// value.
-inline unsigned getStackAlignmentFromAttrs(Attributes A) {
- Attributes StackAlign = A & Attribute::StackAlignment;
- if (!StackAlign)
- return 0;
-
- return 1U << ((StackAlign.Raw() >> 26) - 1);
-}
-
-/// This returns an integer containing an encoding of all the
-/// LLVM attributes found in the given attribute bitset. Any
-/// change to this encoding is a breaking change to bitcode
-/// compatibility.
-inline uint64_t encodeLLVMAttributesForBitcode(Attributes Attrs) {
- // FIXME: It doesn't make sense to store the alignment information as an
- // expanded out value, we should store it as a log2 value. However, we can't
- // just change that here without breaking bitcode compatibility. If this ever
- // becomes a problem in practice, we should introduce new tag numbers in the
- // bitcode file and have those tags use a more efficiently encoded alignment
- // field.
-
- // Store the alignment in the bitcode as a 16-bit raw value instead of a
- // 5-bit log2 encoded value. Shift the bits above the alignment up by
- // 11 bits.
-
- uint64_t EncodedAttrs = Attrs.Raw() & 0xffff;
- if (Attrs & Attribute::Alignment)
- EncodedAttrs |= (1ull << 16) <<
- (((Attrs & Attribute::Alignment).Raw()-1) >> 16);
- EncodedAttrs |= (Attrs.Raw() & (0xfffull << 21)) << 11;
-
- return EncodedAttrs;
-}
-
-/// This returns an attribute bitset containing the LLVM attributes
-/// that have been decoded from the given integer. This function
-/// must stay in sync with 'encodeLLVMAttributesForBitcode'.
-inline Attributes decodeLLVMAttributesForBitcode(uint64_t EncodedAttrs) {
- // The alignment is stored as a 16-bit raw value from bits 31--16.
- // We shift the bits above 31 down by 11 bits.
-
- unsigned Alignment = (EncodedAttrs & (0xffffull << 16)) >> 16;
- assert((!Alignment || isPowerOf2_32(Alignment)) &&
- "Alignment must be a power of two.");
-
- Attributes Attrs(EncodedAttrs & 0xffff);
- if (Alignment)
- Attrs |= Attribute::constructAlignmentFromInt(Alignment);
- Attrs |= Attributes((EncodedAttrs & (0xfffull << 32)) >> 11);
-
- return Attrs;
-}
-
-
-/// The set of Attributes set in Attributes is converted to a
-/// string of equivalent mnemonics. This is, presumably, for writing out
-/// the mnemonics for the assembly writer.
-/// @brief Convert attribute bits to text
-std::string getAsString(Attributes Attrs);
-} // end namespace Attribute
-
-/// This is just a pair of values to associate a set of attributes
-/// with an index.
-struct AttributeWithIndex {
- Attributes Attrs; ///< The attributes that are set, or'd together.
- unsigned Index; ///< Index of the parameter for which the attributes apply.
- ///< Index 0 is used for return value attributes.
- ///< Index ~0U is used for function attributes.
+//===----------------------------------------------------------------------===//
+// AttributeWithIndex
+//===----------------------------------------------------------------------===//
+/// AttributeWithIndex - This is just a pair of values to associate a set of
+/// attributes with an index.
+struct AttributeWithIndex {
+ Attributes Attrs; ///< The attributes that are set, or'd together.
+ unsigned Index; ///< Index of the parameter for which the attributes apply.
+ ///< Index 0 is used for return value attributes.
+ ///< Index ~0U is used for function attributes.
+
+ static AttributeWithIndex get(LLVMContext &C, unsigned Idx,
+ ArrayRef<Attributes::AttrVal> Attrs) {
+ return get(Idx, Attributes::get(C, Attrs));
+ }
static AttributeWithIndex get(unsigned Idx, Attributes Attrs) {
AttributeWithIndex P;
P.Index = Idx;
@@ -300,31 +312,42 @@ class AttributeListImpl;
/// AttrListPtr - This class manages the ref count for the opaque
/// AttributeListImpl object and provides accessors for it.
class AttrListPtr {
- /// AttrList - The attributes that we are managing. This can be null
- /// to represent the empty attributes list.
+public:
+ enum AttrIndex {
+ ReturnIndex = 0U,
+ FunctionIndex = ~0U
+ };
+private:
+ /// @brief The attributes that we are managing. This can be null to represent
+ /// the empty attributes list.
AttributeListImpl *AttrList;
+
+ /// @brief The attributes for the specified index are returned. Attributes
+ /// for the result are denoted with Idx = 0.
+ Attributes getAttributes(unsigned Idx) const;
+
+ explicit AttrListPtr(AttributeListImpl *LI) : AttrList(LI) {}
public:
AttrListPtr() : AttrList(0) {}
- AttrListPtr(const AttrListPtr &P);
+ AttrListPtr(const AttrListPtr &P) : AttrList(P.AttrList) {}
const AttrListPtr &operator=(const AttrListPtr &RHS);
- ~AttrListPtr();
//===--------------------------------------------------------------------===//
// Attribute List Construction and Mutation
//===--------------------------------------------------------------------===//
/// get - Return a Attributes list with the specified parameters in it.
- static AttrListPtr get(ArrayRef<AttributeWithIndex> Attrs);
+ static AttrListPtr get(LLVMContext &C, ArrayRef<AttributeWithIndex> Attrs);
/// addAttr - Add the specified attribute at the specified index to this
/// attribute list. Since attribute lists are immutable, this
/// returns the new list.
- AttrListPtr addAttr(unsigned Idx, Attributes Attrs) const;
+ AttrListPtr addAttr(LLVMContext &C, unsigned Idx, Attributes Attrs) const;
/// removeAttr - Remove the specified attribute at the specified index from
/// this attribute list. Since attribute lists are immutable, this
/// returns the new list.
- AttrListPtr removeAttr(unsigned Idx, Attributes Attrs) const;
+ AttrListPtr removeAttr(LLVMContext &C, unsigned Idx, Attributes Attrs) const;
//===--------------------------------------------------------------------===//
// Attribute List Accessors
@@ -332,36 +355,38 @@ public:
/// getParamAttributes - The attributes for the specified index are
/// returned.
Attributes getParamAttributes(unsigned Idx) const {
- assert (Idx && Idx != ~0U && "Invalid parameter index!");
return getAttributes(Idx);
}
/// getRetAttributes - The attributes for the ret value are
/// returned.
Attributes getRetAttributes() const {
- return getAttributes(0);
+ return getAttributes(ReturnIndex);
}
/// getFnAttributes - The function attributes are returned.
Attributes getFnAttributes() const {
- return getAttributes(~0U);
+ return getAttributes(FunctionIndex);
}
/// paramHasAttr - Return true if the specified parameter index has the
/// specified attribute set.
bool paramHasAttr(unsigned Idx, Attributes Attr) const {
- return getAttributes(Idx) & Attr;
+ return getAttributes(Idx).hasAttributes(Attr);
}
/// getParamAlignment - Return the alignment for the specified function
/// parameter.
unsigned getParamAlignment(unsigned Idx) const {
- return Attribute::getAlignmentFromAttrs(getAttributes(Idx));
+ return getAttributes(Idx).getAlignment();
}
/// hasAttrSomewhere - Return true if the specified attribute is set for at
/// least one parameter or for the return value.
- bool hasAttrSomewhere(Attributes Attr) const;
+ bool hasAttrSomewhere(Attributes::AttrVal Attr) const;
+
+ unsigned getNumAttrs() const;
+ Attributes &getAttributesAtIndex(unsigned i) const;
/// operator==/!= - Provide equality predicates.
bool operator==(const AttrListPtr &RHS) const
@@ -369,8 +394,6 @@ public:
bool operator!=(const AttrListPtr &RHS) const
{ return AttrList != RHS.AttrList; }
- void dump() const;
-
//===--------------------------------------------------------------------===//
// Attribute List Introspection
//===--------------------------------------------------------------------===//
@@ -400,13 +423,7 @@ public:
/// holds a index number plus a set of attributes.
const AttributeWithIndex &getSlot(unsigned Slot) const;
-private:
- explicit AttrListPtr(AttributeListImpl *L);
-
- /// getAttributes - The attributes for the specified index are
- /// returned. Attributes for the result are denoted with Idx = 0.
- Attributes getAttributes(unsigned Idx) const;
-
+ void dump() const;
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/BasicBlock.h b/contrib/llvm/include/llvm/BasicBlock.h
index d2aa167..02c2a96 100644
--- a/contrib/llvm/include/llvm/BasicBlock.h
+++ b/contrib/llvm/include/llvm/BasicBlock.h
@@ -79,8 +79,8 @@ private:
void setParent(Function *parent);
friend class SymbolTableListTraits<BasicBlock, Function>;
- BasicBlock(const BasicBlock &); // Do not implement
- void operator=(const BasicBlock &); // Do not implement
+ BasicBlock(const BasicBlock &) LLVM_DELETED_FUNCTION;
+ void operator=(const BasicBlock &) LLVM_DELETED_FUNCTION;
/// BasicBlock ctor - If the function parameter is specified, the basic block
/// is automatically inserted at either the end of the function (if
@@ -213,7 +213,6 @@ public:
ValueSymbolTable *getValueSymbolTable();
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const BasicBlock *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() == Value::BasicBlockVal;
}
diff --git a/contrib/llvm/include/llvm/Bitcode/Archive.h b/contrib/llvm/include/llvm/Bitcode/Archive.h
index 3c75e58..4fd4b5d 100644
--- a/contrib/llvm/include/llvm/Bitcode/Archive.h
+++ b/contrib/llvm/include/llvm/Bitcode/Archive.h
@@ -415,8 +415,8 @@ class Archive {
/// name will be truncated at 15 characters. If \p Compress is specified,
/// all archive members will be compressed before being written. If
/// \p PrintSymTab is true, the symbol table will be printed to std::cout.
- /// @returns true if an error occurred, \p error set to error message
- /// @returns false if the writing succeeded.
+ /// @returns true if an error occurred, \p error set to error message;
+ /// returns false if the writing succeeded.
/// @brief Write (possibly modified) archive contents to disk
bool writeToDisk(
bool CreateSymbolTable=false, ///< Create Symbol table
@@ -480,8 +480,8 @@ class Archive {
/// Writes one ArchiveMember to an ofstream. If an error occurs, returns
/// false, otherwise true. If an error occurs and error is non-null then
/// it will be set to an error message.
- /// @returns false Writing member succeeded
- /// @returns true Writing member failed, \p error set to error message
+ /// @returns false if writing member succeeded,
+ /// returns true if writing member failed, \p error set to error message.
bool writeMember(
const ArchiveMember& member, ///< The member to be written
std::ofstream& ARFile, ///< The file to write member onto
@@ -527,9 +527,9 @@ class Archive {
/// @name Hidden
/// @{
private:
- Archive(); ///< Do not implement
- Archive(const Archive&); ///< Do not implement
- Archive& operator=(const Archive&); ///< Do not implement
+ Archive() LLVM_DELETED_FUNCTION;
+ Archive(const Archive&) LLVM_DELETED_FUNCTION;
+ Archive& operator=(const Archive&) LLVM_DELETED_FUNCTION;
/// @}
};
diff --git a/contrib/llvm/include/llvm/Bitcode/BitstreamReader.h b/contrib/llvm/include/llvm/Bitcode/BitstreamReader.h
index 6586829..840f57e 100644
--- a/contrib/llvm/include/llvm/Bitcode/BitstreamReader.h
+++ b/contrib/llvm/include/llvm/Bitcode/BitstreamReader.h
@@ -47,9 +47,9 @@ private:
/// block/record name information in the BlockInfo block. Only llvm-bcanalyzer
/// uses this.
bool IgnoreBlockInfoNames;
-
- BitstreamReader(const BitstreamReader&); // DO NOT IMPLEMENT
- void operator=(const BitstreamReader&); // DO NOT IMPLEMENT
+
+ BitstreamReader(const BitstreamReader&) LLVM_DELETED_FUNCTION;
+ void operator=(const BitstreamReader&) LLVM_DELETED_FUNCTION;
public:
BitstreamReader() : IgnoreBlockInfoNames(true) {
}
@@ -409,7 +409,7 @@ public:
}
/// EnterSubBlock - Having read the ENTER_SUBBLOCK abbrevid, enter
- /// the block, and return true if the block is valid.
+ /// the block, and return true if the block has an error.
bool EnterSubBlock(unsigned BlockID, unsigned *NumWordsP = 0) {
// Save the current block's state on BlockScope.
BlockScope.push_back(Block(CurCodeSize));
diff --git a/contrib/llvm/include/llvm/Bitcode/BitstreamWriter.h b/contrib/llvm/include/llvm/Bitcode/BitstreamWriter.h
index 475da13..dea118f 100644
--- a/contrib/llvm/include/llvm/Bitcode/BitstreamWriter.h
+++ b/contrib/llvm/include/llvm/Bitcode/BitstreamWriter.h
@@ -155,6 +155,7 @@ public:
}
void EmitVBR(uint32_t Val, unsigned NumBits) {
+ assert(NumBits <= 32 && "Too many bits to emit!");
uint32_t Threshold = 1U << (NumBits-1);
// Emit the bits with VBR encoding, NumBits-1 bits at a time.
@@ -167,10 +168,11 @@ public:
}
void EmitVBR64(uint64_t Val, unsigned NumBits) {
+ assert(NumBits <= 32 && "Too many bits to emit!");
if ((uint32_t)Val == Val)
return EmitVBR((uint32_t)Val, NumBits);
- uint64_t Threshold = 1U << (NumBits-1);
+ uint32_t Threshold = 1U << (NumBits-1);
// Emit the bits with VBR encoding, NumBits-1 bits at a time.
while (Val >= Threshold) {
diff --git a/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
index a8c34cb..c1dc190 100644
--- a/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -161,11 +161,14 @@ namespace bitc {
CST_CODE_CE_INSERTELT = 15, // CE_INSERTELT: [opval, opval, opval]
CST_CODE_CE_SHUFFLEVEC = 16, // CE_SHUFFLEVEC: [opval, opval, opval]
CST_CODE_CE_CMP = 17, // CE_CMP: [opty, opval, opval, pred]
- CST_CODE_INLINEASM = 18, // INLINEASM: [sideeffect,asmstr,conststr]
+ CST_CODE_INLINEASM_OLD = 18, // INLINEASM: [sideeffect|alignstack,
+ // asmstr,conststr]
CST_CODE_CE_SHUFVEC_EX = 19, // SHUFVEC_EX: [opty, opval, opval, opval]
CST_CODE_CE_INBOUNDS_GEP = 20,// INBOUNDS_GEP: [n x operands]
CST_CODE_BLOCKADDRESS = 21, // CST_CODE_BLOCKADDRESS [fnty, fnval, bb#]
- CST_CODE_DATA = 22 // DATA: [n x elements]
+ CST_CODE_DATA = 22, // DATA: [n x elements]
+ CST_CODE_INLINEASM = 23 // INLINEASM: [sideeffect|alignstack|
+ // asmdialect,asmstr,conststr]
};
/// CastOpcodes - These are values used in the bitcode files to encode which
diff --git a/contrib/llvm/include/llvm/CallingConv.h b/contrib/llvm/include/llvm/CallingConv.h
index 4c5ee62..053f4eb 100644
--- a/contrib/llvm/include/llvm/CallingConv.h
+++ b/contrib/llvm/include/llvm/CallingConv.h
@@ -94,7 +94,29 @@ namespace CallingConv {
/// MBLAZE_INTR - Calling convention used for MBlaze interrupt support
/// routines (i.e. GCC's save_volatiles attribute).
- MBLAZE_SVOL = 74
+ MBLAZE_SVOL = 74,
+
+ /// SPIR_FUNC - Calling convention for SPIR non-kernel device functions.
+ /// No lowering or expansion of arguments.
+ /// Structures are passed as a pointer to a struct with the byval attribute.
+ /// Functions can only call SPIR_FUNC and SPIR_KERNEL functions.
+ /// Functions can only have zero or one return values.
+ /// Variable arguments are not allowed, except for printf.
+ /// How arguments/return values are lowered are not specified.
+ /// Functions are only visible to the devices.
+ SPIR_FUNC = 75,
+
+ /// SPIR_KERNEL - Calling convention for SPIR kernel functions.
+ /// Inherits the restrictions of SPIR_FUNC, except
+ /// Cannot have non-void return values.
+ /// Cannot have variable arguments.
+ /// Can also be called by the host.
+ /// Is externally visible.
+ SPIR_KERNEL = 76,
+
+ /// Intel_OCL_BI - Calling conventions for Intel OpenCL built-ins
+ Intel_OCL_BI = 77
+
};
} // End CallingConv namespace
diff --git a/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
index 170a528..a92b859 100644
--- a/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -17,6 +17,7 @@
#define LLVM_CODEGEN_ASMPRINTER_H
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/InlineAsm.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
@@ -47,7 +48,7 @@ namespace llvm {
class DwarfException;
class Mangler;
class TargetLoweringObjectFile;
- class TargetData;
+ class DataLayout;
class TargetMachine;
/// AsmPrinter - This class is intended to be used as a driving class for all
@@ -130,8 +131,8 @@ namespace llvm {
/// getObjFileLowering - Return information about object file lowering.
const TargetLoweringObjectFile &getObjFileLowering() const;
- /// getTargetData - Return information about data layout.
- const TargetData &getTargetData() const;
+ /// getDataLayout - Return information about data layout.
+ const DataLayout &getDataLayout() const;
/// getCurrentSection() - Return the current section we are emitting to.
const MCSection *getCurrentSection() const;
@@ -460,7 +461,8 @@ namespace llvm {
mutable unsigned SetCounter;
/// EmitInlineAsm - Emit a blob of inline asm to the output streamer.
- void EmitInlineAsm(StringRef Str, const MDNode *LocMDNode = 0) const;
+ void EmitInlineAsm(StringRef Str, const MDNode *LocMDNode = 0,
+ InlineAsm::AsmDialect AsmDialect = InlineAsm::AD_ATT) const;
/// EmitInlineAsm - This method formats and emits the specified machine
/// instruction that is an inline asm.
diff --git a/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h b/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h
index 3afe309..436918b1 100644
--- a/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h
+++ b/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/Target/TargetCallingConv.h"
#include "llvm/CallingConv.h"
@@ -288,6 +289,7 @@ public:
StackOffset = ((StackOffset + Align-1) & ~(Align-1));
unsigned Result = StackOffset;
StackOffset += Size;
+ MF.getFrameInfo()->ensureMaxAlignment(Align);
return Result;
}
diff --git a/contrib/llvm/include/llvm/CodeGen/CommandFlags.h b/contrib/llvm/include/llvm/CodeGen/CommandFlags.h
new file mode 100644
index 0000000..90ee234
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/CommandFlags.h
@@ -0,0 +1,228 @@
+//===-- CommandFlags.h - Register Coalescing Interface ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains codegen-specific flags that are shared between different
+// command line tools. The tools "llc" and "opt" both use this file to prevent
+// flag duplication.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_COMMAND_LINE_FLAGS_H
+#define LLVM_CODEGEN_COMMAND_LINE_FLAGS_H
+
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Target/TargetMachine.h"
+
+#include <string>
+using namespace llvm;
+
+cl::opt<std::string>
+MArch("march", cl::desc("Architecture to generate code for (see --version)"));
+
+cl::opt<std::string>
+MCPU("mcpu",
+ cl::desc("Target a specific cpu type (-mcpu=help for details)"),
+ cl::value_desc("cpu-name"),
+ cl::init(""));
+
+cl::list<std::string>
+MAttrs("mattr",
+ cl::CommaSeparated,
+ cl::desc("Target specific attributes (-mattr=help for details)"),
+ cl::value_desc("a1,+a2,-a3,..."));
+
+cl::opt<Reloc::Model>
+RelocModel("relocation-model",
+ cl::desc("Choose relocation model"),
+ cl::init(Reloc::Default),
+ cl::values(
+ clEnumValN(Reloc::Default, "default",
+ "Target default relocation model"),
+ clEnumValN(Reloc::Static, "static",
+ "Non-relocatable code"),
+ clEnumValN(Reloc::PIC_, "pic",
+ "Fully relocatable, position independent code"),
+ clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
+ "Relocatable external references, non-relocatable code"),
+ clEnumValEnd));
+
+cl::opt<llvm::CodeModel::Model>
+CMModel("code-model",
+ cl::desc("Choose code model"),
+ cl::init(CodeModel::Default),
+ cl::values(clEnumValN(CodeModel::Default, "default",
+ "Target default code model"),
+ clEnumValN(CodeModel::Small, "small",
+ "Small code model"),
+ clEnumValN(CodeModel::Kernel, "kernel",
+ "Kernel code model"),
+ clEnumValN(CodeModel::Medium, "medium",
+ "Medium code model"),
+ clEnumValN(CodeModel::Large, "large",
+ "Large code model"),
+ clEnumValEnd));
+
+cl::opt<bool>
+RelaxAll("mc-relax-all",
+ cl::desc("When used with filetype=obj, "
+ "relax all fixups in the emitted object file"));
+
+cl::opt<TargetMachine::CodeGenFileType>
+FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
+ cl::desc("Choose a file type (not all types are supported by all targets):"),
+ cl::values(
+ clEnumValN(TargetMachine::CGFT_AssemblyFile, "asm",
+ "Emit an assembly ('.s') file"),
+ clEnumValN(TargetMachine::CGFT_ObjectFile, "obj",
+ "Emit a native object ('.o') file"),
+ clEnumValN(TargetMachine::CGFT_Null, "null",
+ "Emit nothing, for performance testing"),
+ clEnumValEnd));
+
+cl::opt<bool> DisableDotLoc("disable-dot-loc", cl::Hidden,
+ cl::desc("Do not use .loc entries"));
+
+cl::opt<bool> DisableCFI("disable-cfi", cl::Hidden,
+ cl::desc("Do not use .cfi_* directives"));
+
+cl::opt<bool> EnableDwarfDirectory("enable-dwarf-directory", cl::Hidden,
+ cl::desc("Use .file directives with an explicit directory."));
+
+cl::opt<bool>
+DisableRedZone("disable-red-zone",
+ cl::desc("Do not emit code that uses the red zone."),
+ cl::init(false));
+
+cl::opt<bool>
+EnableFPMAD("enable-fp-mad",
+ cl::desc("Enable less precise MAD instructions to be generated"),
+ cl::init(false));
+
+cl::opt<bool>
+DisableFPElim("disable-fp-elim",
+ cl::desc("Disable frame pointer elimination optimization"),
+ cl::init(false));
+
+cl::opt<bool>
+DisableFPElimNonLeaf("disable-non-leaf-fp-elim",
+ cl::desc("Disable frame pointer elimination optimization for non-leaf funcs"),
+ cl::init(false));
+
+cl::opt<bool>
+EnableUnsafeFPMath("enable-unsafe-fp-math",
+ cl::desc("Enable optimizations that may decrease FP precision"),
+ cl::init(false));
+
+cl::opt<bool>
+EnableNoInfsFPMath("enable-no-infs-fp-math",
+ cl::desc("Enable FP math optimizations that assume no +-Infs"),
+ cl::init(false));
+
+cl::opt<bool>
+EnableNoNaNsFPMath("enable-no-nans-fp-math",
+ cl::desc("Enable FP math optimizations that assume no NaNs"),
+ cl::init(false));
+
+cl::opt<bool>
+EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math",
+ cl::Hidden,
+ cl::desc("Force codegen to assume rounding mode can change dynamically"),
+ cl::init(false));
+
+cl::opt<bool>
+GenerateSoftFloatCalls("soft-float",
+ cl::desc("Generate software floating point library calls"),
+ cl::init(false));
+
+cl::opt<llvm::FloatABI::ABIType>
+FloatABIForCalls("float-abi",
+ cl::desc("Choose float ABI type"),
+ cl::init(FloatABI::Default),
+ cl::values(
+ clEnumValN(FloatABI::Default, "default",
+ "Target default float ABI type"),
+ clEnumValN(FloatABI::Soft, "soft",
+ "Soft float ABI (implied by -soft-float)"),
+ clEnumValN(FloatABI::Hard, "hard",
+ "Hard float ABI (uses FP registers)"),
+ clEnumValEnd));
+
+cl::opt<llvm::FPOpFusion::FPOpFusionMode>
+FuseFPOps("fp-contract",
+ cl::desc("Enable aggresive formation of fused FP ops"),
+ cl::init(FPOpFusion::Standard),
+ cl::values(
+ clEnumValN(FPOpFusion::Fast, "fast",
+ "Fuse FP ops whenever profitable"),
+ clEnumValN(FPOpFusion::Standard, "on",
+ "Only fuse 'blessed' FP ops."),
+ clEnumValN(FPOpFusion::Strict, "off",
+ "Only fuse FP ops when the result won't be effected."),
+ clEnumValEnd));
+
+cl::opt<bool>
+DontPlaceZerosInBSS("nozero-initialized-in-bss",
+ cl::desc("Don't place zero-initialized symbols into bss section"),
+ cl::init(false));
+
+cl::opt<bool>
+EnableGuaranteedTailCallOpt("tailcallopt",
+ cl::desc("Turn fastcc calls into tail calls by (potentially) changing ABI."),
+ cl::init(false));
+
+cl::opt<bool>
+DisableTailCalls("disable-tail-calls",
+ cl::desc("Never emit tail calls"),
+ cl::init(false));
+
+cl::opt<unsigned>
+OverrideStackAlignment("stack-alignment",
+ cl::desc("Override default stack alignment"),
+ cl::init(0));
+
+cl::opt<bool>
+EnableRealignStack("realign-stack",
+ cl::desc("Realign stack if needed"),
+ cl::init(true));
+
+cl::opt<std::string>
+TrapFuncName("trap-func", cl::Hidden,
+ cl::desc("Emit a call to trap function rather than a trap instruction"),
+ cl::init(""));
+
+cl::opt<bool>
+EnablePIE("enable-pie",
+ cl::desc("Assume the creation of a position independent executable."),
+ cl::init(false));
+
+cl::opt<bool>
+SegmentedStacks("segmented-stacks",
+ cl::desc("Use segmented stacks if possible."),
+ cl::init(false));
+
+cl::opt<bool>
+UseInitArray("use-init-array",
+ cl::desc("Use .init_array instead of .ctors."),
+ cl::init(false));
+
+cl::opt<std::string> StopAfter("stop-after",
+ cl::desc("Stop compilation after a specific pass"),
+ cl::value_desc("pass-name"),
+ cl::init(""));
+cl::opt<std::string> StartAfter("start-after",
+ cl::desc("Resume compilation after a specific pass"),
+ cl::value_desc("pass-name"),
+ cl::init(""));
+
+cl::opt<unsigned>
+SSPBufferSize("stack-protector-buffer-size", cl::init(8),
+ cl::desc("Lower bound for a buffer to be considered for "
+ "stack protection"));
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/FastISel.h b/contrib/llvm/include/llvm/CodeGen/FastISel.h
index 7cb9695..7c24e36 100644
--- a/contrib/llvm/include/llvm/CodeGen/FastISel.h
+++ b/contrib/llvm/include/llvm/CodeGen/FastISel.h
@@ -32,7 +32,7 @@ class MachineFunction;
class MachineInstr;
class MachineFrameInfo;
class MachineRegisterInfo;
-class TargetData;
+class DataLayout;
class TargetInstrInfo;
class TargetLibraryInfo;
class TargetLowering;
@@ -54,7 +54,7 @@ protected:
MachineConstantPool &MCP;
DebugLoc DL;
const TargetMachine &TM;
- const TargetData &TD;
+ const DataLayout &TD;
const TargetInstrInfo &TII;
const TargetLowering &TLI;
const TargetRegisterInfo &TRI;
diff --git a/contrib/llvm/include/llvm/CodeGen/GCMetadata.h b/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
index 20e33f7..076f6f3 100644
--- a/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
+++ b/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
@@ -122,6 +122,11 @@ namespace llvm {
Roots.push_back(GCRoot(Num, Metadata));
}
+ /// removeStackRoot - Removes a root.
+ roots_iterator removeStackRoot(roots_iterator position) {
+ return Roots.erase(position);
+ }
+
/// addSafePoint - Notes the existence of a safe point. Num is the ID of the
/// label just prior to the safe point (if the code generator is using
/// MachineModuleInfo).
diff --git a/contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h b/contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h
index 17a2653..4a6b5ac 100644
--- a/contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h
+++ b/contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h
@@ -48,9 +48,10 @@ namespace llvm {
// May only be subclassed.
GCMetadataPrinter();
- // Do not implement.
- GCMetadataPrinter(const GCMetadataPrinter &);
- GCMetadataPrinter &operator=(const GCMetadataPrinter &);
+ private:
+ GCMetadataPrinter(const GCMetadataPrinter &) LLVM_DELETED_FUNCTION;
+ GCMetadataPrinter &
+ operator=(const GCMetadataPrinter &) LLVM_DELETED_FUNCTION;
public:
GCStrategy &getStrategy() { return *S; }
diff --git a/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h b/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
index f387bd5..5d0a3b4 100644
--- a/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -637,6 +637,10 @@ namespace ISD {
ATOMIC_LOAD_UMIN,
ATOMIC_LOAD_UMAX,
+ /// This corresponds to the llvm.lifetime.* intrinsics. The first operand
+ /// is the chain and the second operand is the alloca pointer.
+ LIFETIME_START, LIFETIME_END,
+
/// BUILTIN_OP_END - This must be the last enum value in this list.
/// The target-specific pre-isel opcode values start here.
BUILTIN_OP_END
diff --git a/contrib/llvm/include/llvm/CodeGen/IntrinsicLowering.h b/contrib/llvm/include/llvm/CodeGen/IntrinsicLowering.h
index 767b666..5a3fb4b 100644
--- a/contrib/llvm/include/llvm/CodeGen/IntrinsicLowering.h
+++ b/contrib/llvm/include/llvm/CodeGen/IntrinsicLowering.h
@@ -21,15 +21,15 @@
namespace llvm {
class CallInst;
class Module;
- class TargetData;
+ class DataLayout;
class IntrinsicLowering {
- const TargetData& TD;
+ const DataLayout& TD;
bool Warned;
public:
- explicit IntrinsicLowering(const TargetData &td) :
+ explicit IntrinsicLowering(const DataLayout &td) :
TD(td), Warned(false) {}
/// AddPrototypes - This method, if called, causes all of the prototypes
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveInterval.h b/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
index a3ce47c..185e414 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
@@ -29,6 +29,7 @@
#include <climits>
namespace llvm {
+ class CoalescerPair;
class LiveIntervals;
class MachineInstr;
class MachineRegisterInfo;
@@ -113,9 +114,6 @@ namespace llvm {
void dump() const;
void print(raw_ostream &os) const;
-
- private:
- LiveRange(); // DO NOT IMPLEMENT
};
template <> struct isPodLike<LiveRange> { static const bool value = true; };
@@ -275,11 +273,6 @@ namespace llvm {
void MergeValueInAsValue(const LiveInterval &RHS,
const VNInfo *RHSValNo, VNInfo *LHSValNo);
- /// Copy - Copy the specified live interval. This copies all the fields
- /// except for the register of the interval.
- void Copy(const LiveInterval &RHS, MachineRegisterInfo *MRI,
- VNInfo::Allocator &VNInfoAllocator);
-
bool empty() const { return ranges.empty(); }
/// beginIndex - Return the lowest numbered slot covered by interval.
@@ -312,12 +305,6 @@ namespace llvm {
return r != end() && r->end == index;
}
- /// killedInRange - Return true if the interval has kills in [Start,End).
- /// Note that the kill point is considered the end of a live range, so it is
- /// not contained in the live range. If a live range ends at End, it won't
- /// be counted as a kill by this method.
- bool killedInRange(SlotIndex Start, SlotIndex End) const;
-
/// getLiveRangeContaining - Return the live range that contains the
/// specified index, or null if there is none.
const LiveRange *getLiveRangeContaining(SlotIndex Idx) const {
@@ -366,6 +353,14 @@ namespace llvm {
return overlapsFrom(other, other.begin());
}
+ /// overlaps - Return true if the two intervals have overlapping segments
+ /// that are not coalescable according to CP.
+ ///
+ /// Overlapping segments where one interval is defined by a coalescable
+ /// copy are allowed.
+ bool overlaps(const LiveInterval &Other, const CoalescerPair &CP,
+ const SlotIndexes&) const;
+
/// overlaps - Return true if the live interval overlaps a range specified
/// by [Start, End).
bool overlaps(SlotIndex Start, SlotIndex End) const;
@@ -469,7 +464,7 @@ namespace llvm {
VNInfo *LHSValNo = 0,
const VNInfo *RHSValNo = 0);
- LiveInterval& operator=(const LiveInterval& rhs); // DO NOT IMPLEMENT
+ LiveInterval& operator=(const LiveInterval& rhs) LLVM_DELETED_FUNCTION;
};
@@ -501,7 +496,9 @@ namespace llvm {
if (I == E)
return;
// Is this an instruction live-in segment?
- if (SlotIndex::isEarlierInstr(I->start, Idx)) {
+ // If Idx is the start index of a basic block, include live-in segments
+ // that start at Idx.getBaseIndex().
+ if (I->start <= Idx.getBaseIndex()) {
EarlyVal = I->valno;
EndPoint = I->end;
// Move to the potentially live-out segment.
@@ -510,6 +507,12 @@ namespace llvm {
if (++I == E)
return;
}
+ // Special case: A PHIDef value can have its def in the middle of a
+ // segment if the value happens to be live out of the layout
+ // predecessor.
+ // Such a value is not live-in.
+ if (EarlyVal->def == Idx.getBaseIndex())
+ EarlyVal = 0;
}
// I now points to the segment that may be live-through, or defined by
// this instr. Ignore segments starting after the current instr.
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h b/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
index da521db..b421753 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -65,12 +65,6 @@ namespace llvm {
/// Live interval pointers for all the virtual registers.
IndexedMap<LiveInterval*, VirtReg2IndexFunctor> VirtRegIntervals;
- /// AllocatableRegs - A bit vector of allocatable registers.
- BitVector AllocatableRegs;
-
- /// ReservedRegs - A bit vector of reserved registers.
- BitVector ReservedRegs;
-
/// RegMaskSlots - Sorted list of instructions with register mask operands.
/// Always use the 'r' slot, RegMasks are normal clobbers, not early
/// clobbers.
@@ -123,18 +117,6 @@ namespace llvm {
return VirtRegIntervals.inBounds(Reg) && VirtRegIntervals[Reg];
}
- /// isAllocatable - is the physical register reg allocatable in the current
- /// function?
- bool isAllocatable(unsigned reg) const {
- return AllocatableRegs.test(reg);
- }
-
- /// isReserved - is the physical register reg reserved in the current
- /// function
- bool isReserved(unsigned reg) const {
- return ReservedRegs.test(reg);
- }
-
// Interval creation.
LiveInterval &getOrCreateInterval(unsigned Reg) {
if (!hasInterval(Reg)) {
@@ -165,6 +147,26 @@ namespace llvm {
bool shrinkToUses(LiveInterval *li,
SmallVectorImpl<MachineInstr*> *dead = 0);
+ /// extendToIndices - Extend the live range of LI to reach all points in
+ /// Indices. The points in the Indices array must be jointly dominated by
+ /// existing defs in LI. PHI-defs are added as needed to maintain SSA form.
+ ///
+ /// If a SlotIndex in Indices is the end index of a basic block, LI will be
+ /// extended to be live out of the basic block.
+ ///
+ /// See also LiveRangeCalc::extend().
+ void extendToIndices(LiveInterval *LI, ArrayRef<SlotIndex> Indices);
+
+ /// pruneValue - If an LI value is live at Kill, prune its live range by
+ /// removing any liveness reachable from Kill. Add live range end points to
+ /// EndPoints such that extendToIndices(LI, EndPoints) will reconstruct the
+ /// value's live range.
+ ///
+ /// Calling pruneValue() and extendToIndices() can be used to reconstruct
+ /// SSA form after adding defs to a virtual register.
+ void pruneValue(LiveInterval *LI, SlotIndex Kill,
+ SmallVectorImpl<SlotIndex> *EndPoints);
+
SlotIndexes *getSlotIndexes() const {
return Indexes;
}
@@ -252,21 +254,26 @@ namespace llvm {
/// addKillFlags - Add kill flags to any instruction that kills a virtual
/// register.
- void addKillFlags();
+ void addKillFlags(const VirtRegMap*);
/// handleMove - call this method to notify LiveIntervals that
/// instruction 'mi' has been moved within a basic block. This will update
/// the live intervals for all operands of mi. Moves between basic blocks
/// are not supported.
- void handleMove(MachineInstr* MI);
+ ///
+ /// \param UpdateFlags Update live intervals for nonallocatable physregs.
+ void handleMove(MachineInstr* MI, bool UpdateFlags = false);
/// moveIntoBundle - Update intervals for operands of MI so that they
/// begin/end on the SlotIndex for BundleStart.
///
+ /// \param UpdateFlags Update live intervals for nonallocatable physregs.
+ ///
/// Requires MI and BundleStart to have SlotIndexes, and assumes
/// existing liveness is accurate. BundleStart should be the first
/// instruction in the Bundle.
- void handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart);
+ void handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart,
+ bool UpdateFlags = false);
// Register mask functions.
//
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveVariables.h b/contrib/llvm/include/llvm/CodeGen/LiveVariables.h
index d4bb409..3bb134b 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveVariables.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveVariables.h
@@ -126,12 +126,6 @@ private:
/// building live intervals.
SparseBitVector<> PHIJoins;
- /// ReservedRegisters - This vector keeps track of which registers
- /// are reserved register which are not allocatable by the target machine.
- /// We can not track liveness for values that are in this set.
- ///
- BitVector ReservedRegisters;
-
private: // Intermediate data structures
MachineFunction *MF;
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index c917bd8..97c3945 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -351,6 +351,8 @@ public:
/// parameter is stored in Weights list and it may be used by
/// MachineBranchProbabilityInfo analysis to calculate branch probability.
///
+ /// Note that duplicate Machine CFG edges are not allowed.
+ ///
void addSuccessor(MachineBasicBlock *succ, uint32_t weight = 0);
/// removeSuccessor - Remove successor from the successors list of this
@@ -545,6 +547,28 @@ public:
return findDebugLoc(MBBI.getInstrIterator());
}
+ /// Possible outcome of a register liveness query to computeRegisterLiveness()
+ enum LivenessQueryResult {
+ LQR_Live, ///< Register is known to be live.
+ LQR_OverlappingLive, ///< Register itself is not live, but some overlapping
+ ///< register is.
+ LQR_Dead, ///< Register is known to be dead.
+ LQR_Unknown ///< Register liveness not decidable from local
+ ///< neighborhood.
+ };
+
+ /// computeRegisterLiveness - Return whether (physical) register \c Reg
+ /// has been <def>ined and not <kill>ed as of just before \c MI.
+ ///
+ /// Search is localised to a neighborhood of
+ /// \c Neighborhood instructions before (searching for defs or kills) and
+ /// Neighborhood instructions after (searching just for defs) MI.
+ ///
+ /// \c Reg must be a physical register.
+ LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI,
+ unsigned Reg, MachineInstr *MI,
+ unsigned Neighborhood=10);
+
// Debugging methods.
void dump() const;
void print(raw_ostream &OS, SlotIndexes* = 0) const;
@@ -572,7 +596,7 @@ private:
/// getSuccWeight - Return weight of the edge from this block to MBB. This
/// method should NOT be called directly, but by using getEdgeWeight method
/// from MachineBranchProbabilityInfo class.
- uint32_t getSuccWeight(const MachineBasicBlock *succ) const;
+ uint32_t getSuccWeight(const_succ_iterator Succ) const;
// Methods used to maintain doubly linked list of blocks...
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
index af4db7d..12189ce 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
@@ -16,14 +16,12 @@
#define LLVM_CODEGEN_MACHINEBRANCHPROBABILITYINFO_H
#include "llvm/Pass.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/Support/BranchProbability.h"
#include <climits>
namespace llvm {
-class raw_ostream;
-class MachineBasicBlock;
-
class MachineBranchProbabilityInfo : public ImmutablePass {
virtual void anchor();
@@ -52,6 +50,11 @@ public:
uint32_t getEdgeWeight(const MachineBasicBlock *Src,
const MachineBasicBlock *Dst) const;
+ // Same thing, but using a const_succ_iterator from Src. This is faster when
+ // the iterator is already available.
+ uint32_t getEdgeWeight(const MachineBasicBlock *Src,
+ MachineBasicBlock::const_succ_iterator Dst) const;
+
// Get sum of the block successors' weights, potentially scaling them to fit
// within 32-bits. If scaling is required, sets Scale based on the necessary
// adjustment. Any edge weights used with the sum should be divided by Scale.
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineConstantPool.h b/contrib/llvm/include/llvm/CodeGen/MachineConstantPool.h
index d6d65a2..8ed215d 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineConstantPool.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineConstantPool.h
@@ -25,7 +25,7 @@ namespace llvm {
class Constant;
class FoldingSetNodeID;
-class TargetData;
+class DataLayout;
class TargetMachine;
class Type;
class MachineConstantPool;
@@ -132,14 +132,14 @@ public:
/// address of the function constant pool values.
/// @brief The machine constant pool.
class MachineConstantPool {
- const TargetData *TD; ///< The machine's TargetData.
+ const DataLayout *TD; ///< The machine's DataLayout.
unsigned PoolAlignment; ///< The alignment for the pool.
std::vector<MachineConstantPoolEntry> Constants; ///< The pool of constants.
/// MachineConstantPoolValues that use an existing MachineConstantPoolEntry.
DenseSet<MachineConstantPoolValue*> MachineCPVsSharingEntries;
public:
/// @brief The only constructor.
- explicit MachineConstantPool(const TargetData *td)
+ explicit MachineConstantPool(const DataLayout *td)
: TD(td), PoolAlignment(1) {}
~MachineConstantPool();
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
index 8b958e4..0e4e132 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
@@ -21,13 +21,15 @@
namespace llvm {
class raw_ostream;
-class TargetData;
+class DataLayout;
class TargetRegisterClass;
class Type;
class MachineFunction;
class MachineBasicBlock;
class TargetFrameLowering;
class BitVector;
+class Value;
+class AllocaInst;
/// The CalleeSavedInfo class tracks the information need to locate where a
/// callee saved register is in the current frame.
@@ -103,14 +105,18 @@ class MachineFrameInfo {
// protector.
bool MayNeedSP;
+ /// Alloca - If this stack object is originated from an Alloca instruction
+ /// this value saves the original IR allocation. Can be NULL.
+ const AllocaInst *Alloca;
+
// PreAllocated - If true, the object was mapped into the local frame
// block and doesn't need additional handling for allocation beyond that.
bool PreAllocated;
StackObject(uint64_t Sz, unsigned Al, int64_t SP, bool IM,
- bool isSS, bool NSP)
+ bool isSS, bool NSP, const AllocaInst *Val)
: SPOffset(SP), Size(Sz), Alignment(Al), isImmutable(IM),
- isSpillSlot(isSS), MayNeedSP(NSP), PreAllocated(false) {}
+ isSpillSlot(isSS), MayNeedSP(NSP), Alloca(Val), PreAllocated(false) {}
};
/// Objects - The list of stack objects allocated...
@@ -362,6 +368,14 @@ public:
ensureMaxAlignment(Align);
}
+ /// getObjectAllocation - Return the underlying Alloca of the specified
+ /// stack object if it exists. Returns 0 if none exists.
+ const AllocaInst* getObjectAllocation(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].Alloca;
+ }
+
/// NeedsStackProtector - Returns true if the object may need stack
/// protectors.
bool MayNeedStackProtector(int ObjectIdx) const {
@@ -482,9 +496,10 @@ public:
/// a nonnegative identifier to represent it.
///
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS,
- bool MayNeedSP = false) {
+ bool MayNeedSP = false, const AllocaInst *Alloca = 0) {
assert(Size != 0 && "Cannot allocate zero size stack objects!");
- Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, MayNeedSP));
+ Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, MayNeedSP,
+ Alloca));
int Index = (int)Objects.size() - NumFixedObjects - 1;
assert(Index >= 0 && "Bad frame index!");
ensureMaxAlignment(Alignment);
@@ -516,7 +531,7 @@ public:
///
int CreateVariableSizedObject(unsigned Alignment) {
HasVarSizedObjects = true;
- Objects.push_back(StackObject(0, Alignment, 0, false, false, true));
+ Objects.push_back(StackObject(0, Alignment, 0, false, false, true, 0));
ensureMaxAlignment(Alignment);
return (int)Objects.size()-NumFixedObjects-1;
}
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFunction.h b/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
index 062c750..025e18a 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -127,8 +127,8 @@ class MachineFunction {
/// about the control flow of such functions.
bool ExposesReturnsTwice;
- MachineFunction(const MachineFunction &); // DO NOT IMPLEMENT
- void operator=(const MachineFunction&); // DO NOT IMPLEMENT
+ MachineFunction(const MachineFunction &) LLVM_DELETED_FUNCTION;
+ void operator=(const MachineFunction&) LLVM_DELETED_FUNCTION;
public:
MachineFunction(const Function *Fn, const TargetMachine &TM,
unsigned FunctionNum, MachineModuleInfo &MMI,
@@ -138,15 +138,19 @@ public:
MachineModuleInfo &getMMI() const { return MMI; }
GCModuleInfo *getGMI() const { return GMI; }
MCContext &getContext() const { return Ctx; }
-
+
/// getFunction - Return the LLVM function that this machine code represents
///
const Function *getFunction() const { return Fn; }
+ /// getName - Return the name of the corresponding LLVM function.
+ ///
+ StringRef getName() const;
+
/// getFunctionNumber - Return a unique ID for the current function.
///
unsigned getFunctionNumber() const { return FunctionNumber; }
-
+
/// getTarget - Return the target machine this machine code is compiled with
///
const TargetMachine &getTarget() const { return Target; }
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
index 27756ab..7eb03a9 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -25,6 +25,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/InlineAsm.h"
#include "llvm/Support/DebugLoc.h"
#include <vector>
@@ -81,8 +82,8 @@ private:
MachineBasicBlock *Parent; // Pointer to the owning basic block.
DebugLoc debugLoc; // Source line information.
- MachineInstr(const MachineInstr&); // DO NOT IMPLEMENT
- void operator=(const MachineInstr&); // DO NOT IMPLEMENT
+ MachineInstr(const MachineInstr&) LLVM_DELETED_FUNCTION;
+ void operator=(const MachineInstr&) LLVM_DELETED_FUNCTION;
// Intrusive list support
friend struct ilist_traits<MachineInstr>;
@@ -97,25 +98,10 @@ private:
/// MCID NULL and no operands.
MachineInstr();
- // The next two constructors have DebugLoc and non-DebugLoc versions;
- // over time, the non-DebugLoc versions should be phased out and eventually
- // removed.
-
- /// MachineInstr ctor - This constructor creates a MachineInstr and adds the
- /// implicit operands. It reserves space for the number of operands specified
- /// by the MCInstrDesc. The version with a DebugLoc should be preferred.
- explicit MachineInstr(const MCInstrDesc &MCID, bool NoImp = false);
-
- /// MachineInstr ctor - Work exactly the same as the ctor above, except that
- /// the MachineInstr is created and added to the end of the specified basic
- /// block. The version with a DebugLoc should be preferred.
- MachineInstr(MachineBasicBlock *MBB, const MCInstrDesc &MCID);
-
/// MachineInstr ctor - This constructor create a MachineInstr and add the
/// implicit operands. It reserves space for number of operands specified by
/// MCInstrDesc. An explicit DebugLoc is supplied.
- explicit MachineInstr(const MCInstrDesc &MCID, const DebugLoc dl,
- bool NoImp = false);
+ MachineInstr(const MCInstrDesc &MCID, const DebugLoc dl, bool NoImp = false);
/// MachineInstr ctor - Work exactly the same as the ctor above, except that
/// the MachineInstr is created and added to the end of the specified basic
@@ -459,6 +445,11 @@ public:
/// Instructions with this flag set are not necessarily simple load
/// instructions, they may load a value and modify it, for example.
bool mayLoad(QueryType Type = AnyInBundle) const {
+ if (isInlineAsm()) {
+ unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+ if (ExtraInfo & InlineAsm::Extra_MayLoad)
+ return true;
+ }
return hasProperty(MCID::MayLoad, Type);
}
@@ -468,6 +459,11 @@ public:
/// instructions, they may store a modified value based on their operands, or
/// may not actually modify anything, for example.
bool mayStore(QueryType Type = AnyInBundle) const {
+ if (isInlineAsm()) {
+ unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+ if (ExtraInfo & InlineAsm::Extra_MayStore)
+ return true;
+ }
return hasProperty(MCID::MayStore, Type);
}
@@ -610,6 +606,7 @@ public:
bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; }
bool isStackAligningInlineAsm() const;
+ InlineAsm::AsmDialect getInlineAsmDialect() const;
bool isInsertSubreg() const {
return getOpcode() == TargetOpcode::INSERT_SUBREG;
}
@@ -782,16 +779,43 @@ public:
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) const;
+ /// tieOperands - Add a tie between the register operands at DefIdx and
+ /// UseIdx. The tie will cause the register allocator to ensure that the two
+ /// operands are assigned the same physical register.
+ ///
+ /// Tied operands are managed automatically for explicit operands in the
+ /// MCInstrDesc. This method is for exceptional cases like inline asm.
+ void tieOperands(unsigned DefIdx, unsigned UseIdx);
+
+ /// findTiedOperandIdx - Given the index of a tied register operand, find the
+ /// operand it is tied to. Defs are tied to uses and vice versa. Returns the
+ /// index of the tied operand which must exist.
+ unsigned findTiedOperandIdx(unsigned OpIdx) const;
+
/// isRegTiedToUseOperand - Given the index of a register def operand,
/// check if the register def is tied to a source operand, due to either
/// two-address elimination or inline assembly constraints. Returns the
/// first tied use operand index by reference if UseOpIdx is not null.
- bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx = 0) const;
+ bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx = 0) const {
+ const MachineOperand &MO = getOperand(DefOpIdx);
+ if (!MO.isReg() || !MO.isDef() || !MO.isTied())
+ return false;
+ if (UseOpIdx)
+ *UseOpIdx = findTiedOperandIdx(DefOpIdx);
+ return true;
+ }
/// isRegTiedToDefOperand - Return true if the use operand of the specified
/// index is tied to an def operand. It also returns the def operand index by
/// reference if DefOpIdx is not null.
- bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx = 0) const;
+ bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx = 0) const {
+ const MachineOperand &MO = getOperand(UseOpIdx);
+ if (!MO.isReg() || !MO.isUse() || !MO.isTied())
+ return false;
+ if (DefOpIdx)
+ *DefOpIdx = findTiedOperandIdx(UseOpIdx);
+ return true;
+ }
/// clearKillInfo - Clears kill flags on all operands.
///
@@ -852,11 +876,11 @@ public:
bool isSafeToReMat(const TargetInstrInfo *TII, AliasAnalysis *AA,
unsigned DstReg) const;
- /// hasVolatileMemoryRef - Return true if this instruction may have a
- /// volatile memory reference, or if the information describing the
- /// memory reference is not available. Return false if it is known to
- /// have no volatile memory references.
- bool hasVolatileMemoryRef() const;
+ /// hasOrderedMemoryRef - Return true if this instruction may have an ordered
+ /// or volatile memory reference, or if the information describing the memory
+ /// reference is not available. Return false if it is known to have no
+ /// ordered or volatile memory references.
+ bool hasOrderedMemoryRef() const;
/// isInvariantLoad - Return true if this instruction is loading from a
/// location whose value is invariant across the function. For example,
@@ -935,6 +959,15 @@ private:
/// return null.
MachineRegisterInfo *getRegInfo();
+ /// untieRegOperand - Break any tie involving OpIdx.
+ void untieRegOperand(unsigned OpIdx) {
+ MachineOperand &MO = getOperand(OpIdx);
+ if (MO.isReg() && MO.isTied()) {
+ getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0;
+ MO.TiedTo = 0;
+ }
+ }
+
/// addImplicitDefUseOperands - Add all implicit def and use operands to
/// this instruction.
void addImplicitDefUseOperands();
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
index 654361f..7706853 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -176,15 +176,24 @@ public:
}
// Add a displacement from an existing MachineOperand with an added offset.
- const MachineInstrBuilder &addDisp(const MachineOperand &Disp,
- int64_t off) const {
+ const MachineInstrBuilder &addDisp(const MachineOperand &Disp, int64_t off,
+ unsigned char TargetFlags = 0) const {
switch (Disp.getType()) {
default:
llvm_unreachable("Unhandled operand type in addDisp()");
case MachineOperand::MO_Immediate:
return addImm(Disp.getImm() + off);
- case MachineOperand::MO_GlobalAddress:
- return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off);
+ case MachineOperand::MO_GlobalAddress: {
+ // If caller specifies new TargetFlags then use it, otherwise the
+ // default behavior is to copy the target flags from the existing
+ // MachineOperand. This means if the caller wants to clear the
+ // target flags it needs to do so explicitly.
+ if (TargetFlags)
+ return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off,
+ TargetFlags);
+ return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off,
+ Disp.getTargetFlags());
+ }
}
}
};
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h b/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h
index dc5f9a6..854ba06 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h
@@ -130,9 +130,9 @@ public:
return OpI - InstrI->operands_begin();
}
- /// RegInfo - Information about a virtual register used by a set of operands.
+ /// VirtRegInfo - Information about a virtual register used by a set of operands.
///
- struct RegInfo {
+ struct VirtRegInfo {
/// Reads - One of the operands read the virtual register. This does not
/// include <undef> or <internal> use operands, see MO::readsReg().
bool Reads;
@@ -146,6 +146,32 @@ public:
bool Tied;
};
+ /// PhysRegInfo - Information about a physical register used by a set of
+ /// operands.
+ struct PhysRegInfo {
+ /// Clobbers - Reg or an overlapping register is defined, or a regmask
+ /// clobbers Reg.
+ bool Clobbers;
+
+ /// Defines - Reg or a super-register is defined.
+ bool Defines;
+
+ /// DefinesOverlap - Reg or an overlapping register is defined.
+ bool DefinesOverlap;
+
+ /// Reads - Read or a super-register is read.
+ bool Reads;
+
+ /// ReadsOverlap - Reg or an overlapping register is read.
+ bool ReadsOverlap;
+
+ /// DefinesDead - All defs of a Reg or a super-register are dead.
+ bool DefinesDead;
+
+ /// There is a kill of Reg or a super-register.
+ bool Kills;
+ };
+
/// analyzeVirtReg - Analyze how the current instruction or bundle uses a
/// virtual register. This function should not be called after operator++(),
/// it expects a fresh iterator.
@@ -154,8 +180,16 @@ public:
/// @param Ops When set, this vector will receive an (MI, OpNum) entry for
/// each operand referring to Reg.
/// @returns A filled-in RegInfo struct.
- RegInfo analyzeVirtReg(unsigned Reg,
+ VirtRegInfo analyzeVirtReg(unsigned Reg,
SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops = 0);
+
+ /// analyzePhysReg - Analyze how the current instruction or bundle uses a
+ /// physical register. This function should not be called after operator++(),
+ /// it expects a fresh iterator.
+ ///
+ /// @param Reg The physical register to analyze.
+ /// @returns A filled-in PhysRegInfo struct.
+ PhysRegInfo analyzePhysReg(unsigned Reg, const TargetRegisterInfo *TRI);
};
/// MIOperands - Iterate over operands of a single instruction.
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
index f7c4e86..928145d 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
@@ -26,7 +26,7 @@
namespace llvm {
class MachineBasicBlock;
-class TargetData;
+class DataLayout;
class raw_ostream;
/// MachineJumpTableEntry - One jump table in the jump table info.
@@ -84,9 +84,9 @@ public:
JTEntryKind getEntryKind() const { return EntryKind; }
/// getEntrySize - Return the size of each entry in the jump table.
- unsigned getEntrySize(const TargetData &TD) const;
+ unsigned getEntrySize(const DataLayout &TD) const;
/// getEntryAlignment - Return the alignment of each entry in the jump table.
- unsigned getEntryAlignment(const TargetData &TD) const;
+ unsigned getEntryAlignment(const DataLayout &TD) const;
/// createJumpTableIndex - Create a new jump table.
///
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h
index 3e204be..d53f041 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h
@@ -73,8 +73,8 @@ class MachineLoopInfo : public MachineFunctionPass {
LoopInfoBase<MachineBasicBlock, MachineLoop> LI;
friend class LoopBase<MachineBasicBlock, MachineLoop>;
- void operator=(const MachineLoopInfo &); // do not implement
- MachineLoopInfo(const MachineLoopInfo &); // do not implement
+ void operator=(const MachineLoopInfo &) LLVM_DELETED_FUNCTION;
+ MachineLoopInfo(const MachineLoopInfo &) LLVM_DELETED_FUNCTION;
public:
static char ID; // Pass identification, replacement for typeid
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h b/contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h
index 1ac9080..ddb1271 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h
@@ -151,6 +151,15 @@ public:
bool isNonTemporal() const { return Flags & MONonTemporal; }
bool isInvariant() const { return Flags & MOInvariant; }
+ /// isUnordered - Returns true if this memory operation doesn't have any
+ /// ordering constraints other than normal aliasing. Volatile and atomic
+ /// memory operations can't be reordered.
+ ///
+ /// Currently, we don't model the difference between volatile and atomic
+ /// operations. They should retain their ordering relative to all memory
+ /// operations.
+ bool isUnordered() const { return !isVolatile(); }
+
/// refineAlignment - Update this MachineMemOperand to reflect the alignment
/// of MMO, if it has a greater alignment. This must only be used when the
/// new alignment applies to all users of this MachineMemOperand.
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h
index 9401ffd1..7afc7eb 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h
@@ -38,7 +38,7 @@ namespace llvm {
/// this GV is external.
DenseMap<MCSymbol*, StubValueTy> HiddenGVStubs;
- virtual void Anchor(); // Out of line virtual method.
+ virtual void anchor(); // Out of line virtual method.
public:
MachineModuleInfoMachO(const MachineModuleInfo &) {}
@@ -76,7 +76,7 @@ namespace llvm {
/// mode.
DenseMap<MCSymbol*, StubValueTy> GVStubs;
- virtual void Anchor(); // Out of line virtual method.
+ virtual void anchor(); // Out of line virtual method.
public:
MachineModuleInfoELF(const MachineModuleInfo &) {}
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineOperand.h b/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
index 37d42b3..606833c 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
@@ -14,7 +14,6 @@
#ifndef LLVM_CODEGEN_MACHINEOPERAND_H
#define LLVM_CODEGEN_MACHINEOPERAND_H
-#include "llvm/ADT/Hashing.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
@@ -30,6 +29,7 @@ class MachineRegisterInfo;
class MDNode;
class TargetMachine;
class TargetRegisterInfo;
+class hash_code;
class raw_ostream;
class MCSymbol;
@@ -60,12 +60,20 @@ private:
/// union.
unsigned char OpKind; // MachineOperandType
- /// SubReg - Subregister number, only valid for MO_Register. A value of 0
- /// indicates the MO_Register has no subReg.
- unsigned char SubReg;
+ // This union is discriminated by OpKind.
+ union {
+ /// SubReg - Subregister number, only valid for MO_Register. A value of 0
+ /// indicates the MO_Register has no subReg.
+ unsigned char SubReg;
+
+ /// TargetFlags - This is a set of target-specific operand flags.
+ unsigned char TargetFlags;
+ };
- /// TargetFlags - This is a set of target-specific operand flags.
- unsigned char TargetFlags;
+ /// TiedTo - Non-zero when this register operand is tied to another register
+ /// operand. The encoding of this field is described in the block comment
+ /// before MachineInstr::tieOperands().
+ unsigned char TiedTo : 4;
/// IsDef/IsImp/IsKill/IsDead flags - These are only valid for MO_Register
/// operands.
@@ -176,9 +184,17 @@ public:
///
MachineOperandType getType() const { return (MachineOperandType)OpKind; }
- unsigned char getTargetFlags() const { return TargetFlags; }
- void setTargetFlags(unsigned char F) { TargetFlags = F; }
- void addTargetFlag(unsigned char F) { TargetFlags |= F; }
+ unsigned char getTargetFlags() const {
+ return isReg() ? 0 : TargetFlags;
+ }
+ void setTargetFlags(unsigned char F) {
+ assert(!isReg() && "Register operands can't have target flags");
+ TargetFlags = F;
+ }
+ void addTargetFlag(unsigned char F) {
+ assert(!isReg() && "Register operands can't have target flags");
+ TargetFlags |= F;
+ }
/// getParent - Return the instruction that this operand belongs to.
@@ -288,6 +304,11 @@ public:
return IsEarlyClobber;
}
+ bool isTied() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return TiedTo;
+ }
+
bool isDebug() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsDebug;
@@ -421,7 +442,7 @@ public:
int64_t getOffset() const {
assert((isGlobal() || isSymbol() || isCPI() || isTargetIndex() ||
isBlockAddress()) && "Wrong MachineOperand accessor");
- return (int64_t(Contents.OffsetedInfo.OffsetHi) << 32) |
+ return int64_t(uint64_t(Contents.OffsetedInfo.OffsetHi) << 32) |
SmallContents.OffsetLo;
}
@@ -548,6 +569,7 @@ public:
Op.IsUndef = isUndef;
Op.IsInternalRead = isInternalRead;
Op.IsEarlyClobber = isEarlyClobber;
+ Op.TiedTo = 0;
Op.IsDebug = isDebug;
Op.SmallContents.RegNo = Reg;
Op.Contents.Reg.Prev = 0;
@@ -606,11 +628,11 @@ public:
Op.setTargetFlags(TargetFlags);
return Op;
}
- static MachineOperand CreateBA(const BlockAddress *BA,
+ static MachineOperand CreateBA(const BlockAddress *BA, int64_t Offset,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_BlockAddress);
Op.Contents.OffsetedInfo.Val.BA = BA;
- Op.setOffset(0); // Offset is always 0.
+ Op.setOffset(Offset);
Op.setTargetFlags(TargetFlags);
return Op;
}
@@ -665,6 +687,9 @@ inline raw_ostream &operator<<(raw_ostream &OS, const MachineOperand& MO) {
return OS;
}
+ // See friend declaration above. This additional declaration is required in
+ // order to compile LLVM with IBM xlC compiler.
+ hash_code hash_value(const MachineOperand &MO);
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachinePostDominators.h b/contrib/llvm/include/llvm/CodeGen/MachinePostDominators.h
new file mode 100644
index 0000000..a9fc843
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachinePostDominators.h
@@ -0,0 +1,87 @@
+//=- llvm/CodeGen/MachineDominators.h ----------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes interfaces to post dominance information for
+// target-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H
+#define LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/DominatorInternals.h"
+
+namespace llvm {
+
+///
+/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used
+/// to compute the a post-dominator tree.
+///
+struct MachinePostDominatorTree : public MachineFunctionPass {
+private:
+ DominatorTreeBase<MachineBasicBlock> *DT;
+
+public:
+ static char ID;
+
+ MachinePostDominatorTree();
+
+ ~MachinePostDominatorTree();
+
+ FunctionPass *createMachinePostDominatorTreePass();
+
+ const std::vector<MachineBasicBlock *> &getRoots() const {
+ return DT->getRoots();
+ }
+
+ MachineDomTreeNode *getRootNode() const {
+ return DT->getRootNode();
+ }
+
+ MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
+ return DT->getNode(BB);
+ }
+
+ MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
+ return DT->getNode(BB);
+ }
+
+ bool dominates(MachineDomTreeNode *A, MachineDomTreeNode *B) const {
+ return DT->dominates(A, B);
+ }
+
+ bool dominates(MachineBasicBlock *A, MachineBasicBlock *B) const {
+ return DT->dominates(A, B);
+ }
+
+ bool
+ properlyDominates(const MachineDomTreeNode *A, MachineDomTreeNode *B) const {
+ return DT->properlyDominates(A, B);
+ }
+
+ bool
+ properlyDominates(MachineBasicBlock *A, MachineBasicBlock *B) const {
+ return DT->properlyDominates(A, B);
+ }
+
+ MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
+ MachineBasicBlock *B) {
+ return DT->findNearestCommonDominator(A, B);
+ }
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ virtual void print(llvm::raw_ostream &OS, const Module *M = 0) const;
+};
+} //end of namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
index 42a8aa4..4e86363 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -77,16 +77,20 @@ class MachineRegisterInfo {
return MO->Contents.Reg.Next;
}
- /// UsedPhysRegs - This is a bit vector that is computed and set by the
+ /// UsedRegUnits - This is a bit vector that is computed and set by the
/// register allocator, and must be kept up to date by passes that run after
/// register allocation (though most don't modify this). This is used
/// so that the code generator knows which callee save registers to save and
/// for other target specific uses.
- /// This vector only has bits set for registers explicitly used, not their
- /// aliases.
- BitVector UsedPhysRegs;
-
- /// UsedPhysRegMask - Additional used physregs, but including aliases.
+ /// This vector has bits set for register units that are modified in the
+ /// current function. It doesn't include registers clobbered by function
+ /// calls with register mask operands.
+ BitVector UsedRegUnits;
+
+ /// UsedPhysRegMask - Additional used physregs including aliases.
+ /// This bit vector represents all the registers clobbered by function calls.
+ /// It can model things that UsedRegUnits can't, such as function calls that
+ /// clobber ymm7 but preserve the low half in xmm7.
BitVector UsedPhysRegMask;
/// ReservedRegs - This is a bit vector of reserved registers. The target
@@ -95,9 +99,6 @@ class MachineRegisterInfo {
/// started.
BitVector ReservedRegs;
- /// AllocatableRegs - From TRI->getAllocatableSet.
- mutable BitVector AllocatableRegs;
-
/// LiveIns/LiveOuts - Keep track of the physical registers that are
/// livein/liveout of the function. Live in values are typically arguments in
/// registers, live out values are typically return values in registers.
@@ -106,8 +107,8 @@ class MachineRegisterInfo {
std::vector<std::pair<unsigned, unsigned> > LiveIns;
std::vector<unsigned> LiveOuts;
- MachineRegisterInfo(const MachineRegisterInfo&); // DO NOT IMPLEMENT
- void operator=(const MachineRegisterInfo&); // DO NOT IMPLEMENT
+ MachineRegisterInfo(const MachineRegisterInfo&) LLVM_DELETED_FUNCTION;
+ void operator=(const MachineRegisterInfo&) LLVM_DELETED_FUNCTION;
public:
explicit MachineRegisterInfo(const TargetRegisterInfo &TRI);
~MachineRegisterInfo();
@@ -360,29 +361,27 @@ public:
//===--------------------------------------------------------------------===//
/// isPhysRegUsed - Return true if the specified register is used in this
- /// function. This only works after register allocation.
+ /// function. Also check for clobbered aliases and registers clobbered by
+ /// function calls with register mask operands.
+ ///
+ /// This only works after register allocation. It is primarily used by
+ /// PrologEpilogInserter to determine which callee-saved registers need
+ /// spilling.
bool isPhysRegUsed(unsigned Reg) const {
- return UsedPhysRegs.test(Reg) || UsedPhysRegMask.test(Reg);
- }
-
- /// isPhysRegOrOverlapUsed - Return true if Reg or any overlapping register
- /// is used in this function.
- bool isPhysRegOrOverlapUsed(unsigned Reg) const {
if (UsedPhysRegMask.test(Reg))
return true;
- for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
- if (UsedPhysRegs.test(*AI))
+ for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
+ if (UsedRegUnits.test(*Units))
return true;
return false;
}
/// setPhysRegUsed - Mark the specified register used in this function.
/// This should only be called during and after register allocation.
- void setPhysRegUsed(unsigned Reg) { UsedPhysRegs.set(Reg); }
-
- /// addPhysRegsUsed - Mark the specified registers used in this function.
- /// This should only be called during and after register allocation.
- void addPhysRegsUsed(const BitVector &Regs) { UsedPhysRegs |= Regs; }
+ void setPhysRegUsed(unsigned Reg) {
+ for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
+ UsedRegUnits.set(*Units);
+ }
/// addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used.
/// This corresponds to the bit mask attached to register mask operands.
@@ -393,8 +392,9 @@ public:
/// setPhysRegUnused - Mark the specified register unused in this function.
/// This should only be called during and after register allocation.
void setPhysRegUnused(unsigned Reg) {
- UsedPhysRegs.reset(Reg);
UsedPhysRegMask.reset(Reg);
+ for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
+ UsedRegUnits.reset(*Units);
}
@@ -427,6 +427,34 @@ public:
return !reservedRegsFrozen() || ReservedRegs.test(PhysReg);
}
+ /// getReservedRegs - Returns a reference to the frozen set of reserved
+ /// registers. This method should always be preferred to calling
+ /// TRI::getReservedRegs() when possible.
+ const BitVector &getReservedRegs() const {
+ assert(reservedRegsFrozen() &&
+ "Reserved registers haven't been frozen yet. "
+ "Use TRI::getReservedRegs().");
+ return ReservedRegs;
+ }
+
+ /// isReserved - Returns true when PhysReg is a reserved register.
+ ///
+ /// Reserved registers may belong to an allocatable register class, but the
+ /// target has explicitly requested that they are not used.
+ ///
+ bool isReserved(unsigned PhysReg) const {
+ return getReservedRegs().test(PhysReg);
+ }
+
+ /// isAllocatable - Returns true when PhysReg belongs to an allocatable
+ /// register class and it hasn't been reserved.
+ ///
+ /// Allocatable registers may show up in the allocation order of some virtual
+ /// register, so a register allocator needs to track its liveness and
+ /// availability.
+ bool isAllocatable(unsigned PhysReg) const {
+ return TRI->isInAllocatableClass(PhysReg) && !isReserved(PhysReg);
+ }
//===--------------------------------------------------------------------===//
// LiveIn/LiveOut Management
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineSSAUpdater.h b/contrib/llvm/include/llvm/CodeGen/MachineSSAUpdater.h
index cbb45a7..edf93d1 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineSSAUpdater.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineSSAUpdater.h
@@ -14,6 +14,8 @@
#ifndef LLVM_CODEGEN_MACHINESSAUPDATER_H
#define LLVM_CODEGEN_MACHINESSAUPDATER_H
+#include "llvm/Support/Compiler.h"
+
namespace llvm {
class MachineBasicBlock;
class MachineFunction;
@@ -106,8 +108,8 @@ private:
void ReplaceRegWith(unsigned OldReg, unsigned NewReg);
unsigned GetValueAtEndOfBlockInternal(MachineBasicBlock *BB);
- void operator=(const MachineSSAUpdater&); // DO NOT IMPLEMENT
- MachineSSAUpdater(const MachineSSAUpdater&); // DO NOT IMPLEMENT
+ void operator=(const MachineSSAUpdater&) LLVM_DELETED_FUNCTION;
+ MachineSSAUpdater(const MachineSSAUpdater&) LLVM_DELETED_FUNCTION;
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h b/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h
index 8da2045..31bd606 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h
@@ -28,9 +28,15 @@
#define MACHINESCHEDULER_H
#include "llvm/CodeGen/MachinePassRegistry.h"
+#include "llvm/CodeGen/RegisterPressure.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/Target/TargetInstrInfo.h"
namespace llvm {
+extern cl::opt<bool> ForceTopDown;
+extern cl::opt<bool> ForceBottomUp;
+
class AliasAnalysis;
class LiveIntervals;
class MachineDominatorTree;
@@ -93,6 +99,237 @@ public:
}
};
+class ScheduleDAGMI;
+
+/// MachineSchedStrategy - Interface to the scheduling algorithm used by
+/// ScheduleDAGMI.
+class MachineSchedStrategy {
+public:
+ virtual ~MachineSchedStrategy() {}
+
+ /// Initialize the strategy after building the DAG for a new region.
+ virtual void initialize(ScheduleDAGMI *DAG) = 0;
+
+ /// Notify this strategy that all roots have been released (including those
+ /// that depend on EntrySU or ExitSU).
+ virtual void registerRoots() {}
+
+ /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
+ /// schedule the node at the top of the unscheduled region. Otherwise it will
+ /// be scheduled at the bottom.
+ virtual SUnit *pickNode(bool &IsTopNode) = 0;
+
+ /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an
+ /// instruction and updated scheduled/remaining flags in the DAG nodes.
+ virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
+
+ /// When all predecessor dependencies have been resolved, free this node for
+ /// top-down scheduling.
+ virtual void releaseTopNode(SUnit *SU) = 0;
+ /// When all successor dependencies have been resolved, free this node for
+ /// bottom-up scheduling.
+ virtual void releaseBottomNode(SUnit *SU) = 0;
+};
+
+/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
+/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
+/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
+///
+/// This is a convenience class that may be used by implementations of
+/// MachineSchedStrategy.
+class ReadyQueue {
+ unsigned ID;
+ std::string Name;
+ std::vector<SUnit*> Queue;
+
+public:
+ ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
+
+ unsigned getID() const { return ID; }
+
+ StringRef getName() const { return Name; }
+
+ // SU is in this queue if it's NodeQueueID is a superset of this ID.
+ bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
+
+ bool empty() const { return Queue.empty(); }
+
+ void clear() { Queue.clear(); }
+
+ unsigned size() const { return Queue.size(); }
+
+ typedef std::vector<SUnit*>::iterator iterator;
+
+ iterator begin() { return Queue.begin(); }
+
+ iterator end() { return Queue.end(); }
+
+ iterator find(SUnit *SU) {
+ return std::find(Queue.begin(), Queue.end(), SU);
+ }
+
+ void push(SUnit *SU) {
+ Queue.push_back(SU);
+ SU->NodeQueueId |= ID;
+ }
+
+ iterator remove(iterator I) {
+ (*I)->NodeQueueId &= ~ID;
+ *I = Queue.back();
+ unsigned idx = I - Queue.begin();
+ Queue.pop_back();
+ return Queue.begin() + idx;
+ }
+
+#ifndef NDEBUG
+ void dump();
+#endif
+};
+
+/// Mutate the DAG as a postpass after normal DAG building.
+class ScheduleDAGMutation {
+public:
+ virtual ~ScheduleDAGMutation() {}
+
+ virtual void apply(ScheduleDAGMI *DAG) = 0;
+};
+
+/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that schedules
+/// machine instructions while updating LiveIntervals and tracking regpressure.
+class ScheduleDAGMI : public ScheduleDAGInstrs {
+protected:
+ AliasAnalysis *AA;
+ RegisterClassInfo *RegClassInfo;
+ MachineSchedStrategy *SchedImpl;
+
+ /// Ordered list of DAG postprocessing steps.
+ std::vector<ScheduleDAGMutation*> Mutations;
+
+ MachineBasicBlock::iterator LiveRegionEnd;
+
+ /// Register pressure in this region computed by buildSchedGraph.
+ IntervalPressure RegPressure;
+ RegPressureTracker RPTracker;
+
+ /// List of pressure sets that exceed the target's pressure limit before
+ /// scheduling, listed in increasing set ID order. Each pressure set is paired
+ /// with its max pressure in the currently scheduled regions.
+ std::vector<PressureElement> RegionCriticalPSets;
+
+ /// The top of the unscheduled zone.
+ MachineBasicBlock::iterator CurrentTop;
+ IntervalPressure TopPressure;
+ RegPressureTracker TopRPTracker;
+
+ /// The bottom of the unscheduled zone.
+ MachineBasicBlock::iterator CurrentBottom;
+ IntervalPressure BotPressure;
+ RegPressureTracker BotRPTracker;
+
+#ifndef NDEBUG
+ /// The number of instructions scheduled so far. Used to cut off the
+ /// scheduler at the point determined by misched-cutoff.
+ unsigned NumInstrsScheduled;
+#endif
+
+public:
+ ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S):
+ ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, /*IsPostRA=*/false, C->LIS),
+ AA(C->AA), RegClassInfo(C->RegClassInfo), SchedImpl(S),
+ RPTracker(RegPressure), CurrentTop(), TopRPTracker(TopPressure),
+ CurrentBottom(), BotRPTracker(BotPressure) {
+#ifndef NDEBUG
+ NumInstrsScheduled = 0;
+#endif
+ }
+
+ virtual ~ScheduleDAGMI() {
+ delete SchedImpl;
+ }
+
+ /// Add a postprocessing step to the DAG builder.
+ /// Mutations are applied in the order that they are added after normal DAG
+ /// building and before MachineSchedStrategy initialization.
+ void addMutation(ScheduleDAGMutation *Mutation) {
+ Mutations.push_back(Mutation);
+ }
+
+ MachineBasicBlock::iterator top() const { return CurrentTop; }
+ MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
+
+ /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
+ /// region. This covers all instructions in a block, while schedule() may only
+ /// cover a subset.
+ void enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned endcount);
+
+
+ /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
+ /// reorderable instructions.
+ virtual void schedule();
+
+ /// Get current register pressure for the top scheduled instructions.
+ const IntervalPressure &getTopPressure() const { return TopPressure; }
+ const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
+
+ /// Get current register pressure for the bottom scheduled instructions.
+ const IntervalPressure &getBotPressure() const { return BotPressure; }
+ const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }
+
+ /// Get register pressure for the entire scheduling region before scheduling.
+ const IntervalPressure &getRegPressure() const { return RegPressure; }
+
+ const std::vector<PressureElement> &getRegionCriticalPSets() const {
+ return RegionCriticalPSets;
+ }
+
+protected:
+ // Top-Level entry points for the schedule() driver...
+
+ /// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking
+ /// enabled. This sets up three trackers. RPTracker will cover the entire DAG
+ /// region, TopTracker and BottomTracker will be initialized to the top and
+ /// bottom of the DAG region without covereing any unscheduled instruction.
+ void buildDAGWithRegPressure();
+
+ /// Apply each ScheduleDAGMutation step in order. This allows different
+ /// instances of ScheduleDAGMI to perform custom DAG postprocessing.
+ void postprocessDAG();
+
+ /// Identify DAG roots and setup scheduler queues.
+ void initQueues();
+
+ /// Move an instruction and update register pressure.
+ void scheduleMI(SUnit *SU, bool IsTopNode);
+
+ /// Update scheduler DAG and queues after scheduling an instruction.
+ void updateQueues(SUnit *SU, bool IsTopNode);
+
+ /// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
+ void placeDebugValues();
+
+ /// \brief dump the scheduled Sequence.
+ void dumpSchedule() const;
+
+ // Lesser helpers...
+
+ void initRegPressure();
+
+ void updateScheduledPressure(std::vector<unsigned> NewMaxPressure);
+
+ void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
+ bool checkSchedLimit();
+
+ void releaseRoots();
+
+ void releaseSucc(SUnit *SU, SDep *SuccEdge);
+ void releaseSuccessors(SUnit *SU);
+ void releasePred(SUnit *SU, SDep *PredEdge);
+ void releasePredecessors(SUnit *SU);
+};
+
} // namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h b/contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h
index a5d8b0d..83c379b 100644
--- a/contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h
@@ -19,6 +19,7 @@
#include <list>
#include <map>
+#include <llvm/ADT/ilist.h>
namespace PBQP {
@@ -31,16 +32,16 @@ namespace PBQP {
class NodeEntry;
class EdgeEntry;
- typedef std::list<NodeEntry> NodeList;
- typedef std::list<EdgeEntry> EdgeList;
+ typedef llvm::ilist<NodeEntry> NodeList;
+ typedef llvm::ilist<EdgeEntry> EdgeList;
public:
- typedef NodeList::iterator NodeItr;
- typedef NodeList::const_iterator ConstNodeItr;
+ typedef NodeEntry* NodeItr;
+ typedef const NodeEntry* ConstNodeItr;
- typedef EdgeList::iterator EdgeItr;
- typedef EdgeList::const_iterator ConstEdgeItr;
+ typedef EdgeEntry* EdgeItr;
+ typedef const EdgeEntry* ConstEdgeItr;
private:
@@ -52,12 +53,14 @@ namespace PBQP {
private:
- class NodeEntry {
+ class NodeEntry : public llvm::ilist_node<NodeEntry> {
+ friend struct llvm::ilist_sentinel_traits<NodeEntry>;
private:
Vector costs;
AdjEdgeList adjEdges;
unsigned degree;
void *data;
+ NodeEntry() : costs(0, 0) {}
public:
NodeEntry(const Vector &costs) : costs(costs), degree(0) {}
Vector& getCosts() { return costs; }
@@ -77,12 +80,14 @@ namespace PBQP {
void* getData() { return data; }
};
- class EdgeEntry {
+ class EdgeEntry : public llvm::ilist_node<EdgeEntry> {
+ friend struct llvm::ilist_sentinel_traits<EdgeEntry>;
private:
NodeItr node1, node2;
Matrix costs;
AdjEdgeItr node1AEItr, node2AEItr;
void *data;
+ EdgeEntry() : costs(0, 0, 0) {}
public:
EdgeEntry(NodeItr node1, NodeItr node2, const Matrix &costs)
: node1(node1), node2(node2), costs(costs) {}
diff --git a/contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicBase.h b/contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicBase.h
index 3fee18c..0c1fcb7 100644
--- a/contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicBase.h
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicBase.h
@@ -113,7 +113,7 @@ namespace PBQP {
}
/// \brief Add the given node to the list of nodes to be optimally reduced.
- /// @return nItr Node iterator to be added.
+ /// @param nItr Node iterator to be added.
///
/// You probably don't want to over-ride this, except perhaps to record
/// statistics before calling this implementation. HeuristicBase relies on
@@ -193,8 +193,9 @@ namespace PBQP {
/// reduce list.
/// @return True if a reduction takes place, false if the heuristic reduce
/// list is empty.
- void heuristicReduce() {
+ bool heuristicReduce() {
llvm_unreachable("Must be implemented in derived class.");
+ return false;
}
/// \brief Prepare a change in the costs on the given edge.
diff --git a/contrib/llvm/include/llvm/CodeGen/Passes.h b/contrib/llvm/include/llvm/CodeGen/Passes.h
index 07b3b45..7bd5764 100644
--- a/contrib/llvm/include/llvm/CodeGen/Passes.h
+++ b/contrib/llvm/include/llvm/CodeGen/Passes.h
@@ -404,6 +404,10 @@ namespace llvm {
/// inserting cmov instructions.
extern char &EarlyIfConverterID;
+ /// StackSlotColoring - This pass performs stack coloring and merging.
+ /// It merges disjoint allocas to reduce the stack size.
+ extern char &StackColoringID;
+
/// IfConverter - This pass performs machine code if conversion.
extern char &IfConverterID;
diff --git a/contrib/llvm/include/llvm/CodeGen/PseudoSourceValue.h b/contrib/llvm/include/llvm/CodeGen/PseudoSourceValue.h
index 7dab4f9..8f52d3b 100644
--- a/contrib/llvm/include/llvm/CodeGen/PseudoSourceValue.h
+++ b/contrib/llvm/include/llvm/CodeGen/PseudoSourceValue.h
@@ -50,7 +50,6 @@ namespace llvm {
/// classof - Methods for support type inquiry through isa, cast, and
/// dyn_cast:
///
- static inline bool classof(const PseudoSourceValue *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() == PseudoSourceValueVal ||
V->getValueID() == FixedStackPseudoSourceValueVal;
@@ -90,9 +89,6 @@ namespace llvm {
/// classof - Methods for support type inquiry through isa, cast, and
/// dyn_cast:
///
- static inline bool classof(const FixedStackPseudoSourceValue *) {
- return true;
- }
static inline bool classof(const Value *V) {
return V->getValueID() == FixedStackPseudoSourceValueVal;
}
diff --git a/contrib/llvm/include/llvm/CodeGen/RegAllocPBQP.h b/contrib/llvm/include/llvm/CodeGen/RegAllocPBQP.h
index bce3ec7..acfc07d 100644
--- a/contrib/llvm/include/llvm/CodeGen/RegAllocPBQP.h
+++ b/contrib/llvm/include/llvm/CodeGen/RegAllocPBQP.h
@@ -109,8 +109,8 @@ namespace llvm {
/// class to support additional constraints for your architecture.
class PBQPBuilder {
private:
- PBQPBuilder(const PBQPBuilder&) {}
- void operator=(const PBQPBuilder&) {}
+ PBQPBuilder(const PBQPBuilder&) LLVM_DELETED_FUNCTION;
+ void operator=(const PBQPBuilder&) LLVM_DELETED_FUNCTION;
public:
typedef std::set<unsigned> RegSet;
diff --git a/contrib/llvm/include/llvm/CodeGen/RegisterClassInfo.h b/contrib/llvm/include/llvm/CodeGen/RegisterClassInfo.h
index 400e1f4..4467b62 100644
--- a/contrib/llvm/include/llvm/CodeGen/RegisterClassInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/RegisterClassInfo.h
@@ -106,25 +106,6 @@ public:
return CalleeSaved[N-1];
return 0;
}
-
- /// isReserved - Returns true when PhysReg is a reserved register.
- ///
- /// Reserved registers may belong to an allocatable register class, but the
- /// target has explicitly requested that they are not used.
- ///
- bool isReserved(unsigned PhysReg) const {
- return Reserved.test(PhysReg);
- }
-
- /// isAllocatable - Returns true when PhysReg belongs to an allocatable
- /// register class and it hasn't been reserved.
- ///
- /// Allocatable registers may show up in the allocation order of some virtual
- /// register, so a register allocator needs to track its liveness and
- /// availability.
- bool isAllocatable(unsigned PhysReg) const {
- return TRI->isInAllocatableClass(PhysReg) && !isReserved(PhysReg);
- }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/CodeGen/RegisterPressure.h b/contrib/llvm/include/llvm/CodeGen/RegisterPressure.h
index 2043155..30326d0 100644
--- a/contrib/llvm/include/llvm/CodeGen/RegisterPressure.h
+++ b/contrib/llvm/include/llvm/CodeGen/RegisterPressure.h
@@ -43,7 +43,7 @@ struct RegisterPressure {
/// class. This is only useful to account for spilling or rematerialization.
void decrease(const TargetRegisterClass *RC, const TargetRegisterInfo *TRI);
- void dump(const TargetRegisterInfo *TRI);
+ void dump(const TargetRegisterInfo *TRI) const;
};
/// RegisterPressure computed within a region of instructions delimited by
@@ -197,6 +197,7 @@ public:
/// This result is complete if either advance() or recede() has returned true,
/// or if closeRegion() was explicitly invoked.
RegisterPressure &getPressure() { return P; }
+ const RegisterPressure &getPressure() const { return P; }
/// Get the register set pressure at the current position, which may be less
/// than the pressure across the traversed region.
diff --git a/contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h b/contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h
index 3986a8d..08d3169 100644
--- a/contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h
+++ b/contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h
@@ -18,6 +18,7 @@
#define LLVM_CODEGEN_REGISTER_SCAVENGING_H
#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/ADT/BitVector.h"
namespace llvm {
@@ -59,10 +60,6 @@ class RegScavenger {
///
BitVector CalleeSavedRegs;
- /// ReservedRegs - A bitvector of reserved registers.
- ///
- BitVector ReservedRegs;
-
/// RegsAvailable - The current state of all the physical registers immediately
/// before MBBI. One bit per physical register. If bit is set that means it's
/// available, unset means the register is currently being used.
@@ -130,12 +127,12 @@ public:
void setUsed(unsigned Reg);
private:
/// isReserved - Returns true if a register is reserved. It is never "unused".
- bool isReserved(unsigned Reg) const { return ReservedRegs.test(Reg); }
+ bool isReserved(unsigned Reg) const { return MRI->isReserved(Reg); }
/// isUsed / isUnused - Test if a register is currently being used.
///
bool isUsed(unsigned Reg) const {
- return !RegsAvailable.test(Reg) || ReservedRegs.test(Reg);
+ return !RegsAvailable.test(Reg) || isReserved(Reg);
}
/// isAliasUsed - Is Reg or an alias currently in use?
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h b/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
index 85ab47b..7e0ca14 100644
--- a/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
@@ -31,6 +31,7 @@ namespace llvm {
class MachineFunction;
class MachineRegisterInfo;
class MachineInstr;
+ struct MCSchedClassDesc;
class TargetRegisterInfo;
class ScheduleDAG;
class SDNode;
@@ -52,6 +53,13 @@ namespace llvm {
Order ///< Any other ordering dependency.
};
+ enum OrderKind {
+ Barrier, ///< An unknown scheduling barrier.
+ MayAliasMem, ///< Nonvolatile load/Store instructions that may alias.
+ MustAliasMem, ///< Nonvolatile load/Store instructions that must alias.
+ Artificial ///< Arbitrary weak DAG edge (no actual dependence).
+ };
+
private:
/// Dep - A pointer to the depending/depended-on SUnit, and an enum
/// indicating the kind of the dependency.
@@ -65,26 +73,18 @@ namespace llvm {
unsigned Reg;
/// Order - Additional information about Order dependencies.
- struct {
- /// isNormalMemory - True if both sides of the dependence
- /// access memory in non-volatile and fully modeled ways.
- bool isNormalMemory : 1;
-
- /// isMustAlias - True if both sides of the dependence are known to
- /// access the same memory.
- bool isMustAlias : 1;
-
- /// isArtificial - True if this is an artificial dependency, meaning
- /// it is not necessary for program correctness, and may be safely
- /// deleted if necessary.
- bool isArtificial : 1;
- } Order;
+ unsigned OrdKind; // enum OrderKind
} Contents;
/// Latency - The time associated with this edge. Often this is just
/// the value of the Latency field of the predecessor, however advanced
/// models may provide additional information about specific edges.
unsigned Latency;
+ /// Record MinLatency seperately from "expected" Latency.
+ ///
+ /// FIXME: this field is not packed on LP64. Convert to 16-bit DAG edge
+ /// latency after introducing saturating truncation.
+ unsigned MinLatency;
public:
/// SDep - Construct a null SDep. This is only for use by container
@@ -93,28 +93,28 @@ namespace llvm {
SDep() : Dep(0, Data) {}
/// SDep - Construct an SDep with the specified values.
- SDep(SUnit *S, Kind kind, unsigned latency = 1, unsigned Reg = 0,
- bool isNormalMemory = false, bool isMustAlias = false,
- bool isArtificial = false)
- : Dep(S, kind), Contents(), Latency(latency) {
+ SDep(SUnit *S, Kind kind, unsigned Reg)
+ : Dep(S, kind), Contents() {
switch (kind) {
+ default:
+ llvm_unreachable("Reg given for non-register dependence!");
case Anti:
case Output:
assert(Reg != 0 &&
"SDep::Anti and SDep::Output must use a non-zero Reg!");
- // fall through
- case Data:
- assert(!isMustAlias && "isMustAlias only applies with SDep::Order!");
- assert(!isArtificial && "isArtificial only applies with SDep::Order!");
Contents.Reg = Reg;
+ Latency = 0;
break;
- case Order:
- assert(Reg == 0 && "Reg given for non-register dependence!");
- Contents.Order.isNormalMemory = isNormalMemory;
- Contents.Order.isMustAlias = isMustAlias;
- Contents.Order.isArtificial = isArtificial;
+ case Data:
+ Contents.Reg = Reg;
+ Latency = 1;
break;
}
+ MinLatency = Latency;
+ }
+ SDep(SUnit *S, OrderKind kind)
+ : Dep(S, Order), Contents(), Latency(0), MinLatency(0) {
+ Contents.OrdKind = kind;
}
/// Return true if the specified SDep is equivalent except for latency.
@@ -126,16 +126,14 @@ namespace llvm {
case Output:
return Contents.Reg == Other.Contents.Reg;
case Order:
- return Contents.Order.isNormalMemory ==
- Other.Contents.Order.isNormalMemory &&
- Contents.Order.isMustAlias == Other.Contents.Order.isMustAlias &&
- Contents.Order.isArtificial == Other.Contents.Order.isArtificial;
+ return Contents.OrdKind == Other.Contents.OrdKind;
}
llvm_unreachable("Invalid dependency kind!");
}
bool operator==(const SDep &Other) const {
- return overlaps(Other) && Latency == Other.Latency;
+ return overlaps(Other)
+ && Latency == Other.Latency && MinLatency == Other.MinLatency;
}
bool operator!=(const SDep &Other) const {
@@ -155,6 +153,18 @@ namespace llvm {
Latency = Lat;
}
+ /// getMinLatency - Return the minimum latency for this edge. Minimum
+ /// latency is used for scheduling groups, while normal (expected) latency
+ /// is for instruction cost and critical path.
+ unsigned getMinLatency() const {
+ return MinLatency;
+ }
+
+ /// setMinLatency - Set the minimum latency for this edge.
+ void setMinLatency(unsigned Lat) {
+ MinLatency = Lat;
+ }
+
//// getSUnit - Return the SUnit to which this edge points.
SUnit *getSUnit() const {
return Dep.getPointer();
@@ -179,20 +189,21 @@ namespace llvm {
/// memory accesses where both sides of the dependence access memory
/// in non-volatile and fully modeled ways.
bool isNormalMemory() const {
- return getKind() == Order && Contents.Order.isNormalMemory;
+ return getKind() == Order && (Contents.OrdKind == MayAliasMem
+ || Contents.OrdKind == MustAliasMem);
}
/// isMustAlias - Test if this is an Order dependence that is marked
/// as "must alias", meaning that the SUnits at either end of the edge
/// have a memory dependence on a known memory location.
bool isMustAlias() const {
- return getKind() == Order && Contents.Order.isMustAlias;
+ return getKind() == Order && Contents.OrdKind == MustAliasMem;
}
/// isArtificial - Test if this is an Order dependence that is marked
/// as "artificial", meaning it isn't necessary for correctness.
bool isArtificial() const {
- return getKind() == Order && Contents.Order.isArtificial;
+ return getKind() == Order && Contents.OrdKind == Artificial;
}
/// isAssignedRegDep - Test if this is a Data dependence that is
@@ -239,6 +250,8 @@ namespace llvm {
// this node was cloned.
// (SD scheduling only)
+ const MCSchedClassDesc *SchedClass; // NULL or resolved SchedClass.
+
// Preds/Succs - The SUnits before/after us in the graph.
SmallVector<SDep, 4> Preds; // All sunit predecessors.
SmallVector<SDep, 4> Succs; // All sunit successors.
@@ -286,7 +299,7 @@ namespace llvm {
/// SUnit - Construct an SUnit for pre-regalloc scheduling to represent
/// an SDNode and any nodes flagged to it.
SUnit(SDNode *node, unsigned nodenum)
- : Node(node), Instr(0), OrigNode(0), NodeNum(nodenum),
+ : Node(node), Instr(0), OrigNode(0), SchedClass(0), NodeNum(nodenum),
NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
NumSuccsLeft(0), NumRegDefsLeft(0), Latency(0),
isVRegCycle(false), isCall(false), isCallOp(false), isTwoAddress(false),
@@ -300,7 +313,7 @@ namespace llvm {
/// SUnit - Construct an SUnit for post-regalloc scheduling to represent
/// a MachineInstr.
SUnit(MachineInstr *instr, unsigned nodenum)
- : Node(0), Instr(instr), OrigNode(0), NodeNum(nodenum),
+ : Node(0), Instr(instr), OrigNode(0), SchedClass(0), NodeNum(nodenum),
NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
NumSuccsLeft(0), NumRegDefsLeft(0), Latency(0),
isVRegCycle(false), isCall(false), isCallOp(false), isTwoAddress(false),
@@ -313,7 +326,7 @@ namespace llvm {
/// SUnit - Construct a placeholder SUnit.
SUnit()
- : Node(0), Instr(0), OrigNode(0), NodeNum(~0u),
+ : Node(0), Instr(0), OrigNode(0), SchedClass(0), NodeNum(~0u),
NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
NumSuccsLeft(0), NumRegDefsLeft(0), Latency(0),
isVRegCycle(false), isCall(false), isCallOp(false), isTwoAddress(false),
@@ -555,16 +568,6 @@ namespace llvm {
unsigned VerifyScheduledDAG(bool isBottomUp);
#endif
- protected:
- /// ComputeLatency - Compute node latency.
- ///
- virtual void computeLatency(SUnit *SU) = 0;
-
- /// ForceUnitLatencies - Return true if all scheduling edges should be given
- /// a latency value of one. The default is to return false; schedulers may
- /// override this as needed.
- virtual bool forceUnitLatencies() const { return false; }
-
private:
// Return the MCInstrDesc of this SDNode or NULL.
const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleDAGILP.h b/contrib/llvm/include/llvm/CodeGen/ScheduleDAGILP.h
new file mode 100644
index 0000000..1aa4058
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleDAGILP.h
@@ -0,0 +1,86 @@
+//===- ScheduleDAGILP.h - ILP metric for ScheduleDAGInstrs ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Definition of an ILP metric for machine level instruction scheduling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEDAGILP_H
+#define LLVM_CODEGEN_SCHEDULEDAGILP_H
+
+#include "llvm/Support/DataTypes.h"
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+class ScheduleDAGInstrs;
+class SUnit;
+
+/// \brief Represent the ILP of the subDAG rooted at a DAG node.
+struct ILPValue {
+ unsigned InstrCount;
+ unsigned Cycles;
+
+ ILPValue(): InstrCount(0), Cycles(0) {}
+
+ ILPValue(unsigned count, unsigned cycles):
+ InstrCount(count), Cycles(cycles) {}
+
+ bool isValid() const { return Cycles > 0; }
+
+ // Order by the ILP metric's value.
+ bool operator<(ILPValue RHS) const {
+ return (uint64_t)InstrCount * RHS.Cycles
+ < (uint64_t)Cycles * RHS.InstrCount;
+ }
+ bool operator>(ILPValue RHS) const {
+ return RHS < *this;
+ }
+ bool operator<=(ILPValue RHS) const {
+ return (uint64_t)InstrCount * RHS.Cycles
+ <= (uint64_t)Cycles * RHS.InstrCount;
+ }
+ bool operator>=(ILPValue RHS) const {
+ return RHS <= *this;
+ }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void print(raw_ostream &OS) const;
+
+ void dump() const;
+#endif
+};
+
+/// \brief Compute the values of each DAG node for an ILP metric.
+///
+/// This metric assumes that the DAG is a forest of trees with roots at the
+/// bottom of the schedule.
+class ScheduleDAGILP {
+ bool IsBottomUp;
+ std::vector<ILPValue> ILPValues;
+
+public:
+ ScheduleDAGILP(bool IsBU): IsBottomUp(IsBU) {}
+
+ /// \brief Initialize the result data with the size of the DAG.
+ void resize(unsigned NumSUnits);
+
+ /// \brief Compute the ILP metric for the subDAG at this root.
+ void computeILP(const SUnit *Root);
+
+ /// \brief Get the ILP value for a DAG node.
+ ILPValue getILP(const SUnit *SU);
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val);
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h b/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h
index 1bde942..4bcd35a 100644
--- a/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h
@@ -18,6 +18,7 @@
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/ADT/SmallSet.h"
@@ -30,72 +31,6 @@ namespace llvm {
class LiveIntervals;
class RegPressureTracker;
- /// LoopDependencies - This class analyzes loop-oriented register
- /// dependencies, which are used to guide scheduling decisions.
- /// For example, loop induction variable increments should be
- /// scheduled as soon as possible after the variable's last use.
- ///
- class LoopDependencies {
- const MachineDominatorTree &MDT;
-
- public:
- typedef std::map<unsigned, std::pair<const MachineOperand *, unsigned> >
- LoopDeps;
- LoopDeps Deps;
-
- LoopDependencies(const MachineDominatorTree &mdt) : MDT(mdt) {}
-
- /// VisitLoop - Clear out any previous state and analyze the given loop.
- ///
- void VisitLoop(const MachineLoop *Loop) {
- assert(Deps.empty() && "stale loop dependencies");
-
- MachineBasicBlock *Header = Loop->getHeader();
- SmallSet<unsigned, 8> LoopLiveIns;
- for (MachineBasicBlock::livein_iterator LI = Header->livein_begin(),
- LE = Header->livein_end(); LI != LE; ++LI)
- LoopLiveIns.insert(*LI);
-
- const MachineDomTreeNode *Node = MDT.getNode(Header);
- const MachineBasicBlock *MBB = Node->getBlock();
- assert(Loop->contains(MBB) &&
- "Loop does not contain header!");
- VisitRegion(Node, MBB, Loop, LoopLiveIns);
- }
-
- private:
- void VisitRegion(const MachineDomTreeNode *Node,
- const MachineBasicBlock *MBB,
- const MachineLoop *Loop,
- const SmallSet<unsigned, 8> &LoopLiveIns) {
- unsigned Count = 0;
- for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
- I != E; ++I) {
- const MachineInstr *MI = I;
- if (MI->isDebugValue())
- continue;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isUse())
- continue;
- unsigned MOReg = MO.getReg();
- if (LoopLiveIns.count(MOReg))
- Deps.insert(std::make_pair(MOReg, std::make_pair(&MO, Count)));
- }
- ++Count; // Not every iteration due to dbg_value above.
- }
-
- const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
- for (std::vector<MachineDomTreeNode*>::const_iterator I =
- Children.begin(), E = Children.end(); I != E; ++I) {
- const MachineDomTreeNode *ChildNode = *I;
- MachineBasicBlock *ChildBlock = ChildNode->getBlock();
- if (Loop->contains(ChildBlock))
- VisitRegion(ChildNode, ChildBlock, Loop, LoopLiveIns);
- }
- }
- };
-
/// An individual mapping from virtual register number to SUnit.
struct VReg2SUnit {
unsigned VirtReg;
@@ -108,6 +43,15 @@ namespace llvm {
}
};
+ /// Record a physical register access.
+ /// For non data-dependent uses, OpIdx == -1.
+ struct PhysRegSUOper {
+ SUnit *SU;
+ int OpIdx;
+
+ PhysRegSUOper(SUnit *su, int op): SU(su), OpIdx(op) {}
+ };
+
/// Combine a SparseSet with a 1x1 vector to track physical registers.
/// The SparseSet allows iterating over the (few) live registers for quickly
/// comparing against a regmask or clearing the set.
@@ -116,7 +60,7 @@ namespace llvm {
/// cleared between scheduling regions without freeing unused entries.
class Reg2SUnitsMap {
SparseSet<unsigned> PhysRegSet;
- std::vector<std::vector<SUnit*> > SUnits;
+ std::vector<std::vector<PhysRegSUOper> > SUnits;
public:
typedef SparseSet<unsigned>::const_iterator const_iterator;
@@ -140,7 +84,7 @@ namespace llvm {
/// If this register is mapped, return its existing SUnits vector.
/// Otherwise map the register and return an empty SUnits vector.
- std::vector<SUnit *> &operator[](unsigned Reg) {
+ std::vector<PhysRegSUOper> &operator[](unsigned Reg) {
bool New = PhysRegSet.insert(Reg).second;
assert((!New || SUnits[Reg].empty()) && "stale SUnits vector");
(void)New;
@@ -167,11 +111,13 @@ namespace llvm {
const MachineLoopInfo &MLI;
const MachineDominatorTree &MDT;
const MachineFrameInfo *MFI;
- const InstrItineraryData *InstrItins;
/// Live Intervals provides reaching defs in preRA scheduling.
LiveIntervals *LIS;
+ /// TargetSchedModel provides an interface to the machine model.
+ TargetSchedModel SchedModel;
+
/// isPostRA flag indicates vregs cannot be present.
bool IsPostRA;
@@ -223,10 +169,6 @@ namespace llvm {
/// to minimize construction/destruction.
std::vector<SUnit *> PendingLoads;
- /// LoopRegs - Track which registers are used for loop-carried dependencies.
- ///
- LoopDependencies LoopRegs;
-
/// DbgValues - Remember instruction that precedes DBG_VALUE.
/// These are generated by buildSchedGraph but persist so they can be
/// referenced when emitting the final schedule.
@@ -244,6 +186,16 @@ namespace llvm {
virtual ~ScheduleDAGInstrs() {}
+ /// \brief Get the machine model for instruction scheduling.
+ const TargetSchedModel *getSchedModel() const { return &SchedModel; }
+
+ /// \brief Resolve and cache a resolved scheduling class for an SUnit.
+ const MCSchedClassDesc *getSchedClass(SUnit *SU) const {
+ if (!SU->SchedClass)
+ SU->SchedClass = SchedModel.resolveSchedClass(SU->getInstr());
+ return SU->SchedClass;
+ }
+
/// begin - Return an iterator to the top of the current scheduling region.
MachineBasicBlock::iterator begin() const { return RegionBegin; }
@@ -284,20 +236,6 @@ namespace llvm {
/// used by instructions in the fallthrough block.
void addSchedBarrierDeps();
- /// computeLatency - Compute node latency.
- ///
- virtual void computeLatency(SUnit *SU);
-
- /// computeOperandLatency - Return dependence edge latency using
- /// operand use/def information
- ///
- /// FindMin may be set to get the minimum vs. expected latency. Minimum
- /// latency is used for scheduling groups, while expected latency is for
- /// instruction cost and critical path.
- virtual unsigned computeOperandLatency(SUnit *Def, SUnit *Use,
- const SDep& dep,
- bool FindMin = false) const;
-
/// schedule - Order nodes according to selected style, filling
/// in the Sequence member.
///
@@ -319,7 +257,7 @@ namespace llvm {
protected:
void initSUnits();
- void addPhysRegDataDeps(SUnit *SU, const MachineOperand &MO);
+ void addPhysRegDataDeps(SUnit *SU, unsigned OperIdx);
void addPhysRegDeps(SUnit *SU, unsigned OperIdx);
void addVRegDefDeps(SUnit *SU, unsigned OperIdx);
void addVRegUseDeps(SUnit *SU, unsigned OperIdx);
diff --git a/contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h b/contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h
index a582b0c..836b73a 100644
--- a/contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h
+++ b/contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h
@@ -102,6 +102,11 @@ ScheduleDAGSDNodes *createVLIWDAGScheduler(SelectionDAGISel *IS,
ScheduleDAGSDNodes *createDefaultScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel);
+/// createDAGLinearizer - This creates a "no-scheduling" scheduler which
+/// linearize the DAG using topological order.
+ScheduleDAGSDNodes *createDAGLinearizer(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel);
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
index 1ccfe54..619ee69 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -73,8 +73,8 @@ class SDDbgInfo {
SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
DenseMap<const SDNode*, SmallVector<SDDbgValue*, 2> > DbgValMap;
- void operator=(const SDDbgInfo&); // Do not implement.
- SDDbgInfo(const SDDbgInfo&); // Do not implement.
+ void operator=(const SDDbgInfo&) LLVM_DELETED_FUNCTION;
+ SDDbgInfo(const SDDbgInfo&) LLVM_DELETED_FUNCTION;
public:
SDDbgInfo() {}
@@ -222,8 +222,8 @@ private:
DenseSet<SDNode *> &visited,
int level, bool &printed);
- void operator=(const SelectionDAG&); // Do not implement.
- SelectionDAG(const SelectionDAG&); // Do not implement.
+ void operator=(const SelectionDAG&) LLVM_DELETED_FUNCTION;
+ SelectionDAG(const SelectionDAG&) LLVM_DELETED_FUNCTION;
public:
explicit SelectionDAG(const TargetMachine &TM, llvm::CodeGenOpt::Level);
@@ -437,7 +437,13 @@ public:
SDValue getRegisterMask(const uint32_t *RegMask);
SDValue getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label);
SDValue getBlockAddress(const BlockAddress *BA, EVT VT,
- bool isTarget = false, unsigned char TargetFlags = 0);
+ int64_t Offset = 0, bool isTarget = false,
+ unsigned char TargetFlags = 0);
+ SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT,
+ int64_t Offset = 0,
+ unsigned char TargetFlags = 0) {
+ return getBlockAddress(BA, VT, Offset, true, TargetFlags);
+ }
SDValue getCopyToReg(SDValue Chain, DebugLoc dl, unsigned Reg, SDValue N) {
return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index db361ee..362e9af 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -216,8 +216,8 @@ class SDUse {
/// this operand.
SDUse **Prev, *Next;
- SDUse(const SDUse &U); // Do not implement
- void operator=(const SDUse &U); // Do not implement
+ SDUse(const SDUse &U) LLVM_DELETED_FUNCTION;
+ void operator=(const SDUse &U) LLVM_DELETED_FUNCTION;
public:
SDUse() : Val(), User(NULL), Prev(NULL), Next(NULL) {}
@@ -662,9 +662,6 @@ public:
///
void dumprWithDepth(const SelectionDAG *G = 0, unsigned depth = 100) const;
-
- static bool classof(const SDNode *) { return true; }
-
/// Profile - Gather unique data for the node.
///
void Profile(FoldingSetNodeID &ID) const;
@@ -956,7 +953,12 @@ public:
const MachinePointerInfo &getPointerInfo() const {
return MMO->getPointerInfo();
}
-
+
+ /// getAddressSpace - Return the address space for the associated pointer
+ unsigned getAddressSpace() const {
+ return getPointerInfo().getAddrSpace();
+ }
+
/// refineAlignment - Update this MemSDNode's MachineMemOperand information
/// to reflect the alignment of NewMMO, if it has a greater alignment.
/// This must only be used when the new alignment applies to all users of
@@ -971,7 +973,6 @@ public:
}
// Methods to support isa and dyn_cast
- static bool classof(const MemSDNode *) { return true; }
static bool classof(const SDNode *N) {
// For some targets, we lower some target intrinsics to a MemIntrinsicNode
// with either an intrinsic or a target opcode.
@@ -1011,11 +1012,6 @@ class AtomicSDNode : public MemSDNode {
SubclassData |= SynchScope << 12;
assert(getOrdering() == Ordering && "Ordering encoding error!");
assert(getSynchScope() == SynchScope && "Synch-scope encoding error!");
-
- assert((readMem() || getOrdering() <= Monotonic) &&
- "Acquire/Release MachineMemOperand must be a load!");
- assert((writeMem() || getOrdering() <= Monotonic) &&
- "Acquire/Release MachineMemOperand must be a store!");
}
public:
@@ -1061,7 +1057,6 @@ public:
}
// Methods to support isa and dyn_cast
- static bool classof(const AtomicSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
N->getOpcode() == ISD::ATOMIC_SWAP ||
@@ -1093,7 +1088,6 @@ public:
}
// Methods to support isa and dyn_cast
- static bool classof(const MemIntrinsicSDNode *) { return true; }
static bool classof(const SDNode *N) {
// We lower some target intrinsics to their target opcode
// early a node with a target opcode can be of this class
@@ -1148,7 +1142,6 @@ public:
}
static bool isSplatMask(const int *Mask, EVT VT);
- static bool classof(const ShuffleVectorSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::VECTOR_SHUFFLE;
}
@@ -1172,7 +1165,6 @@ public:
bool isNullValue() const { return Value->isNullValue(); }
bool isAllOnesValue() const { return Value->isAllOnesValue(); }
- static bool classof(const ConstantSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::Constant ||
N->getOpcode() == ISD::TargetConstant;
@@ -1207,9 +1199,6 @@ public:
/// have to duplicate its logic everywhere it's called.
bool isExactlyValue(double V) const {
bool ignored;
- // convert is not supported on this type
- if (&Value->getValueAPF().getSemantics() == &APFloat::PPCDoubleDouble)
- return false;
APFloat Tmp(V);
Tmp.convert(Value->getValueAPF().getSemantics(),
APFloat::rmNearestTiesToEven, &ignored);
@@ -1219,7 +1208,6 @@ public:
static bool isValueValidForType(EVT VT, const APFloat& Val);
- static bool classof(const ConstantFPSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::ConstantFP ||
N->getOpcode() == ISD::TargetConstantFP;
@@ -1241,7 +1229,6 @@ public:
// Return the address space this GlobalAddress belongs to.
unsigned getAddressSpace() const;
- static bool classof(const GlobalAddressSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::GlobalAddress ||
N->getOpcode() == ISD::TargetGlobalAddress ||
@@ -1261,7 +1248,6 @@ public:
int getIndex() const { return FI; }
- static bool classof(const FrameIndexSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::FrameIndex ||
N->getOpcode() == ISD::TargetFrameIndex;
@@ -1281,7 +1267,6 @@ public:
int getIndex() const { return JTI; }
unsigned char getTargetFlags() const { return TargetFlags; }
- static bool classof(const JumpTableSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::JumpTable ||
N->getOpcode() == ISD::TargetJumpTable;
@@ -1342,7 +1327,6 @@ public:
Type *getType() const;
- static bool classof(const ConstantPoolSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::ConstantPool ||
N->getOpcode() == ISD::TargetConstantPool;
@@ -1366,7 +1350,6 @@ public:
int getIndex() const { return Index; }
int64_t getOffset() const { return Offset; }
- static bool classof(const TargetIndexSDNode*) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::TargetIndex;
}
@@ -1385,7 +1368,6 @@ public:
MachineBasicBlock *getBasicBlock() const { return MBB; }
- static bool classof(const BasicBlockSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::BasicBlock;
}
@@ -1395,7 +1377,7 @@ public:
/// BUILD_VECTORs.
class BuildVectorSDNode : public SDNode {
// These are constructed as SDNodes and then cast to BuildVectorSDNodes.
- explicit BuildVectorSDNode(); // Do not implement
+ explicit BuildVectorSDNode() LLVM_DELETED_FUNCTION;
public:
/// isConstantSplat - Check if this is a constant splat, and if so, find the
/// smallest element size that splats the vector. If MinSplatBits is
@@ -1410,7 +1392,6 @@ public:
unsigned &SplatBitSize, bool &HasAnyUndefs,
unsigned MinSplatBits = 0, bool isBigEndian = false);
- static inline bool classof(const BuildVectorSDNode *) { return true; }
static inline bool classof(const SDNode *N) {
return N->getOpcode() == ISD::BUILD_VECTOR;
}
@@ -1431,7 +1412,6 @@ public:
/// getValue - return the contained Value.
const Value *getValue() const { return V; }
- static bool classof(const SrcValueSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::SRCVALUE;
}
@@ -1446,7 +1426,6 @@ public:
const MDNode *getMD() const { return MD; }
- static bool classof(const MDNodeSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MDNODE_SDNODE;
}
@@ -1463,7 +1442,6 @@ public:
unsigned getReg() const { return Reg; }
- static bool classof(const RegisterSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::Register;
}
@@ -1480,7 +1458,6 @@ public:
const uint32_t *getRegMask() const { return RegMask; }
- static bool classof(const RegisterMaskSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::RegisterMask;
}
@@ -1488,18 +1465,19 @@ public:
class BlockAddressSDNode : public SDNode {
const BlockAddress *BA;
+ int64_t Offset;
unsigned char TargetFlags;
friend class SelectionDAG;
BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
- unsigned char Flags)
+ int64_t o, unsigned char Flags)
: SDNode(NodeTy, DebugLoc(), getSDVTList(VT)),
- BA(ba), TargetFlags(Flags) {
+ BA(ba), Offset(o), TargetFlags(Flags) {
}
public:
const BlockAddress *getBlockAddress() const { return BA; }
+ int64_t getOffset() const { return Offset; }
unsigned char getTargetFlags() const { return TargetFlags; }
- static bool classof(const BlockAddressSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::BlockAddress ||
N->getOpcode() == ISD::TargetBlockAddress;
@@ -1517,7 +1495,6 @@ class EHLabelSDNode : public SDNode {
public:
MCSymbol *getLabel() const { return Label; }
- static bool classof(const EHLabelSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::EH_LABEL;
}
@@ -1537,7 +1514,6 @@ public:
const char *getSymbol() const { return Symbol; }
unsigned char getTargetFlags() const { return TargetFlags; }
- static bool classof(const ExternalSymbolSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::ExternalSymbol ||
N->getOpcode() == ISD::TargetExternalSymbol;
@@ -1555,7 +1531,6 @@ public:
ISD::CondCode get() const { return Condition; }
- static bool classof(const CondCodeSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::CONDCODE;
}
@@ -1575,7 +1550,6 @@ class CvtRndSatSDNode : public SDNode {
public:
ISD::CvtCode getCvtCode() const { return CvtCode; }
- static bool classof(const CvtRndSatSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::CONVERT_RNDSAT;
}
@@ -1594,7 +1568,6 @@ public:
EVT getVT() const { return ValueType; }
- static bool classof(const VTSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::VALUETYPE;
}
@@ -1638,7 +1611,6 @@ public:
/// isUnindexed - Return true if this is NOT a pre/post inc/dec load/store.
bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
- static bool classof(const LSBaseSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::LOAD ||
N->getOpcode() == ISD::STORE;
@@ -1670,7 +1642,6 @@ public:
const SDValue &getBasePtr() const { return getOperand(1); }
const SDValue &getOffset() const { return getOperand(2); }
- static bool classof(const LoadSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::LOAD;
}
@@ -1701,7 +1672,6 @@ public:
const SDValue &getBasePtr() const { return getOperand(2); }
const SDValue &getOffset() const { return getOperand(3); }
- static bool classof(const StoreSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::STORE;
}
@@ -1742,7 +1712,6 @@ public:
MemRefsEnd = NewMemRefsEnd;
}
- static bool classof(const MachineSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->isMachineOpcode();
}
@@ -1750,10 +1719,10 @@ public:
class SDNodeIterator : public std::iterator<std::forward_iterator_tag,
SDNode, ptrdiff_t> {
- SDNode *Node;
+ const SDNode *Node;
unsigned Operand;
- SDNodeIterator(SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
+ SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
public:
bool operator==(const SDNodeIterator& x) const {
return Operand == x.Operand;
@@ -1784,8 +1753,8 @@ public:
return Operand - Other.Operand;
}
- static SDNodeIterator begin(SDNode *N) { return SDNodeIterator(N, 0); }
- static SDNodeIterator end (SDNode *N) {
+ static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
+ static SDNodeIterator end (const SDNode *N) {
return SDNodeIterator(N, N->getNumOperands());
}
diff --git a/contrib/llvm/include/llvm/CodeGen/TargetSchedule.h b/contrib/llvm/include/llvm/CodeGen/TargetSchedule.h
new file mode 100644
index 0000000..88e6105
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/TargetSchedule.h
@@ -0,0 +1,167 @@
+//===-- llvm/CodeGen/TargetSchedule.h - Sched Machine Model -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a wrapper around MCSchedModel that allows the interface to
+// benefit from information currently only available in TargetInstrInfo.
+// Ideally, the scheduling interface would be fully defined in the MC layer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETSCHEDMODEL_H
+#define LLVM_TARGET_TARGETSCHEDMODEL_H
+
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class TargetRegisterInfo;
+class TargetSubtargetInfo;
+class TargetInstrInfo;
+class MachineInstr;
+
+/// Provide an instruction scheduling machine model to CodeGen passes.
+class TargetSchedModel {
+ // For efficiency, hold a copy of the statically defined MCSchedModel for this
+ // processor.
+ MCSchedModel SchedModel;
+ InstrItineraryData InstrItins;
+ const TargetSubtargetInfo *STI;
+ const TargetInstrInfo *TII;
+
+ SmallVector<unsigned, 16> ResourceFactors;
+ unsigned MicroOpFactor; // Multiply to normalize microops to resource units.
+ unsigned ResourceLCM; // Resource units per cycle. Latency normalization factor.
+public:
+ TargetSchedModel(): STI(0), TII(0) {}
+
+ /// \brief Initialize the machine model for instruction scheduling.
+ ///
+ /// The machine model API keeps a copy of the top-level MCSchedModel table
+ /// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve
+ /// dynamic properties.
+ void init(const MCSchedModel &sm, const TargetSubtargetInfo *sti,
+ const TargetInstrInfo *tii);
+
+ /// Return the MCSchedClassDesc for this instruction.
+ const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;
+
+ /// \brief TargetInstrInfo getter.
+ const TargetInstrInfo *getInstrInfo() const { return TII; }
+
+ /// \brief Return true if this machine model includes an instruction-level
+ /// scheduling model.
+ ///
+ /// This is more detailed than the course grain IssueWidth and default
+ /// latency properties, but separate from the per-cycle itinerary data.
+ bool hasInstrSchedModel() const;
+
+ const MCSchedModel *getMCSchedModel() const { return &SchedModel; }
+
+ /// \brief Return true if this machine model includes cycle-to-cycle itinerary
+ /// data.
+ ///
+ /// This models scheduling at each stage in the processor pipeline.
+ bool hasInstrItineraries() const;
+
+ const InstrItineraryData *getInstrItineraries() const {
+ if (hasInstrItineraries())
+ return &InstrItins;
+ return 0;
+ }
+
+ /// \brief Identify the processor corresponding to the current subtarget.
+ unsigned getProcessorID() const { return SchedModel.getProcessorID(); }
+
+ /// \brief Maximum number of micro-ops that may be scheduled per cycle.
+ unsigned getIssueWidth() const { return SchedModel.IssueWidth; }
+
+ /// \brief Return the number of issue slots required for this MI.
+ unsigned getNumMicroOps(const MachineInstr *MI,
+ const MCSchedClassDesc *SC = 0) const;
+
+ /// \brief Get the number of kinds of resources for this target.
+ unsigned getNumProcResourceKinds() const {
+ return SchedModel.getNumProcResourceKinds();
+ }
+
+ /// \brief Get a processor resource by ID for convenience.
+ const MCProcResourceDesc *getProcResource(unsigned PIdx) const {
+ return SchedModel.getProcResource(PIdx);
+ }
+
+ typedef const MCWriteProcResEntry *ProcResIter;
+
+ // \brief Get an iterator into the processor resources consumed by this
+ // scheduling class.
+ ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const {
+ // The subtarget holds a single resource table for all processors.
+ return STI->getWriteProcResBegin(SC);
+ }
+ ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const {
+ return STI->getWriteProcResEnd(SC);
+ }
+
+ /// \brief Multiply the number of units consumed for a resource by this factor
+ /// to normalize it relative to other resources.
+ unsigned getResourceFactor(unsigned ResIdx) const {
+ return ResourceFactors[ResIdx];
+ }
+
+ /// \brief Multiply number of micro-ops by this factor to normalize it
+ /// relative to other resources.
+ unsigned getMicroOpFactor() const {
+ return MicroOpFactor;
+ }
+
+ /// \brief Multiply cycle count by this factor to normalize it relative to
+ /// other resources. This is the number of resource units per cycle.
+ unsigned getLatencyFactor() const {
+ return ResourceLCM;
+ }
+
+ /// \brief Compute operand latency based on the available machine model.
+ ///
+ /// Computes and return the latency of the given data dependent def and use
+ /// when the operand indices are already known. UseMI may be NULL for an
+ /// unknown user.
+ ///
+ /// FindMin may be set to get the minimum vs. expected latency. Minimum
+ /// latency is used for scheduling groups, while expected latency is for
+ /// instruction cost and critical path.
+ unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
+ const MachineInstr *UseMI, unsigned UseOperIdx,
+ bool FindMin) const;
+
+ /// \brief Compute the instruction latency based on the available machine
+ /// model.
+ ///
+ /// Compute and return the expected latency of this instruction independent of
+ /// a particular use. computeOperandLatency is the prefered API, but this is
+ /// occasionally useful to help estimate instruction cost.
+ unsigned computeInstrLatency(const MachineInstr *MI) const;
+
+ /// \brief Output dependency latency of a pair of defs of the same register.
+ ///
+ /// This is typically one cycle.
+ unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *DepMI) const;
+
+private:
+ /// getDefLatency is a helper for computeOperandLatency. Return the
+ /// instruction's latency if operand lookup is not required.
+ /// Otherwise return -1.
+ int getDefLatency(const MachineInstr *DefMI, bool FindMin) const;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ValueTypes.h b/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
index eb38cd3..2401992 100644
--- a/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
+++ b/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
@@ -56,50 +56,56 @@ namespace llvm {
FIRST_FP_VALUETYPE = f16,
LAST_FP_VALUETYPE = ppcf128,
- v2i8 = 13, // 2 x i8
- v4i8 = 14, // 4 x i8
- v8i8 = 15, // 8 x i8
- v16i8 = 16, // 16 x i8
- v32i8 = 17, // 32 x i8
- v2i16 = 18, // 2 x i16
- v4i16 = 19, // 4 x i16
- v8i16 = 20, // 8 x i16
- v16i16 = 21, // 16 x i16
- v2i32 = 22, // 2 x i32
- v4i32 = 23, // 4 x i32
- v8i32 = 24, // 8 x i32
- v16i32 = 25, // 16 x i32
- v1i64 = 26, // 1 x i64
- v2i64 = 27, // 2 x i64
- v4i64 = 28, // 4 x i64
- v8i64 = 29, // 8 x i64
- v16i64 = 30, // 16 x i64
-
- v2f16 = 31, // 2 x f16
- v2f32 = 32, // 2 x f32
- v4f32 = 33, // 4 x f32
- v8f32 = 34, // 8 x f32
- v2f64 = 35, // 2 x f64
- v4f64 = 36, // 4 x f64
-
- FIRST_VECTOR_VALUETYPE = v2i8,
+ v2i1 = 13, // 2 x i1
+ v4i1 = 14, // 4 x i1
+ v8i1 = 15, // 8 x i1
+ v16i1 = 16, // 16 x i1
+ v2i8 = 17, // 2 x i8
+ v4i8 = 18, // 4 x i8
+ v8i8 = 19, // 8 x i8
+ v16i8 = 20, // 16 x i8
+ v32i8 = 21, // 32 x i8
+ v1i16 = 22, // 1 x i16
+ v2i16 = 23, // 2 x i16
+ v4i16 = 24, // 4 x i16
+ v8i16 = 25, // 8 x i16
+ v16i16 = 26, // 16 x i16
+ v1i32 = 27, // 1 x i32
+ v2i32 = 28, // 2 x i32
+ v4i32 = 29, // 4 x i32
+ v8i32 = 30, // 8 x i32
+ v16i32 = 31, // 16 x i32
+ v1i64 = 32, // 1 x i64
+ v2i64 = 33, // 2 x i64
+ v4i64 = 34, // 4 x i64
+ v8i64 = 35, // 8 x i64
+ v16i64 = 36, // 16 x i64
+
+ v2f16 = 37, // 2 x f16
+ v2f32 = 38, // 2 x f32
+ v4f32 = 39, // 4 x f32
+ v8f32 = 40, // 8 x f32
+ v2f64 = 41, // 2 x f64
+ v4f64 = 42, // 4 x f64
+
+ FIRST_VECTOR_VALUETYPE = v2i1,
LAST_VECTOR_VALUETYPE = v4f64,
- FIRST_INTEGER_VECTOR_VALUETYPE = v2i8,
+ FIRST_INTEGER_VECTOR_VALUETYPE = v2i1,
LAST_INTEGER_VECTOR_VALUETYPE = v16i64,
FIRST_FP_VECTOR_VALUETYPE = v2f16,
LAST_FP_VECTOR_VALUETYPE = v4f64,
- x86mmx = 37, // This is an X86 MMX value
+ x86mmx = 43, // This is an X86 MMX value
- Glue = 38, // This glues nodes together during pre-RA sched
+ Glue = 44, // This glues nodes together during pre-RA sched
- isVoid = 39, // This has no value
+ isVoid = 45, // This has no value
- Untyped = 40, // This value takes a register, but has
+ Untyped = 46, // This value takes a register, but has
// unspecified type. The register class
// will be determined by the opcode.
- LAST_VALUETYPE = 41, // This always remains at the end of the list.
+ LAST_VALUETYPE = 47, // This always remains at the end of the list.
// This is the current maximum for LAST_VALUETYPE.
// MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
@@ -175,6 +181,18 @@ namespace llvm {
SimpleTy <= MVT::LAST_VECTOR_VALUETYPE);
}
+ /// is16BitVector - Return true if this is a 16-bit vector type.
+ bool is16BitVector() const {
+ return (SimpleTy == MVT::v2i8 || SimpleTy == MVT::v1i16 ||
+ SimpleTy == MVT::v16i1);
+ }
+
+ /// is32BitVector - Return true if this is a 32-bit vector type.
+ bool is32BitVector() const {
+ return (SimpleTy == MVT::v4i8 || SimpleTy == MVT::v2i16 ||
+ SimpleTy == MVT::v1i32);
+ }
+
/// is64BitVector - Return true if this is a 64-bit vector type.
bool is64BitVector() const {
return (SimpleTy == MVT::v8i8 || SimpleTy == MVT::v4i16 ||
@@ -233,15 +251,21 @@ namespace llvm {
switch (SimpleTy) {
default:
llvm_unreachable("Not a vector MVT!");
+ case v2i1 :
+ case v4i1 :
+ case v8i1 :
+ case v16i1: return i1;
case v2i8 :
case v4i8 :
case v8i8 :
case v16i8:
case v32i8: return i8;
+ case v1i16:
case v2i16:
case v4i16:
case v8i16:
case v16i16: return i16;
+ case v1i32:
case v2i32:
case v4i32:
case v8i32:
@@ -265,21 +289,25 @@ namespace llvm {
default:
llvm_unreachable("Not a vector MVT!");
case v32i8: return 32;
+ case v16i1:
case v16i8:
case v16i16:
case v16i32:
case v16i64:return 16;
+ case v8i1:
case v8i8 :
case v8i16:
case v8i32:
case v8i64:
case v8f32: return 8;
+ case v4i1:
case v4i8:
case v4i16:
case v4i32:
case v4i64:
case v4f32:
case v4f64: return 4;
+ case v2i1:
case v2i8:
case v2i16:
case v2i32:
@@ -287,6 +315,8 @@ namespace llvm {
case v2f16:
case v2f32:
case v2f64: return 2;
+ case v1i16:
+ case v1i32:
case v1i64: return 1;
}
}
@@ -302,15 +332,21 @@ namespace llvm {
default:
llvm_unreachable("getSizeInBits called on extended MVT.");
case i1 : return 1;
- case i8 : return 8;
+ case v2i1: return 2;
+ case v4i1: return 4;
+ case i8 :
+ case v8i1: return 8;
case i16 :
case f16:
- case v2i8: return 16;
+ case v16i1:
+ case v2i8:
+ case v1i16: return 16;
case f32 :
case i32 :
case v4i8:
case v2i16:
- case v2f16: return 32;
+ case v2f16:
+ case v1i32: return 32;
case x86mmx:
case f64 :
case i64 :
@@ -393,6 +429,12 @@ namespace llvm {
switch (VT.SimpleTy) {
default:
break;
+ case MVT::i1:
+ if (NumElements == 2) return MVT::v2i1;
+ if (NumElements == 4) return MVT::v4i1;
+ if (NumElements == 8) return MVT::v8i1;
+ if (NumElements == 16) return MVT::v16i1;
+ break;
case MVT::i8:
if (NumElements == 2) return MVT::v2i8;
if (NumElements == 4) return MVT::v4i8;
@@ -401,12 +443,14 @@ namespace llvm {
if (NumElements == 32) return MVT::v32i8;
break;
case MVT::i16:
+ if (NumElements == 1) return MVT::v1i16;
if (NumElements == 2) return MVT::v2i16;
if (NumElements == 4) return MVT::v4i16;
if (NumElements == 8) return MVT::v8i16;
if (NumElements == 16) return MVT::v16i16;
break;
case MVT::i32:
+ if (NumElements == 1) return MVT::v1i32;
if (NumElements == 2) return MVT::v2i32;
if (NumElements == 4) return MVT::v4i32;
if (NumElements == 8) return MVT::v8i32;
@@ -529,6 +573,16 @@ namespace llvm {
return isSimple() ? V.isVector() : isExtendedVector();
}
+ /// is16BitVector - Return true if this is a 16-bit vector type.
+ bool is16BitVector() const {
+ return isSimple() ? V.is16BitVector() : isExtended16BitVector();
+ }
+
+ /// is32BitVector - Return true if this is a 32-bit vector type.
+ bool is32BitVector() const {
+ return isSimple() ? V.is32BitVector() : isExtended32BitVector();
+ }
+
/// is64BitVector - Return true if this is a 64-bit vector type.
bool is64BitVector() const {
return isSimple() ? V.is64BitVector() : isExtended64BitVector();
@@ -740,6 +794,8 @@ namespace llvm {
bool isExtendedFloatingPoint() const;
bool isExtendedInteger() const;
bool isExtendedVector() const;
+ bool isExtended16BitVector() const;
+ bool isExtended32BitVector() const;
bool isExtended64BitVector() const;
bool isExtended128BitVector() const;
bool isExtended256BitVector() const;
diff --git a/contrib/llvm/include/llvm/CodeGen/ValueTypes.td b/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
index f4b75bd..a707f88 100644
--- a/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
+++ b/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
@@ -33,36 +33,42 @@ def f80 : ValueType<80 , 10>; // 80-bit floating point value
def f128 : ValueType<128, 11>; // 128-bit floating point value
def ppcf128: ValueType<128, 12>; // PPC 128-bit floating point value
-def v2i8 : ValueType<16 , 13>; // 2 x i8 vector value
-def v4i8 : ValueType<32 , 14>; // 4 x i8 vector value
-def v8i8 : ValueType<64 , 15>; // 8 x i8 vector value
-def v16i8 : ValueType<128, 16>; // 16 x i8 vector value
-def v32i8 : ValueType<256, 17>; // 32 x i8 vector value
-def v2i16 : ValueType<32 , 18>; // 2 x i16 vector value
-def v4i16 : ValueType<64 , 19>; // 4 x i16 vector value
-def v8i16 : ValueType<128, 20>; // 8 x i16 vector value
-def v16i16 : ValueType<256, 21>; // 16 x i16 vector value
-def v2i32 : ValueType<64 , 22>; // 2 x i32 vector value
-def v4i32 : ValueType<128, 23>; // 4 x i32 vector value
-def v8i32 : ValueType<256, 24>; // 8 x i32 vector value
-def v16i32 : ValueType<512, 25>; // 16 x i32 vector value
-def v1i64 : ValueType<64 , 26>; // 1 x i64 vector value
-def v2i64 : ValueType<128, 27>; // 2 x i64 vector value
-def v4i64 : ValueType<256, 28>; // 4 x i64 vector value
-def v8i64 : ValueType<512, 29>; // 8 x i64 vector value
-def v16i64 : ValueType<1024,30>; // 16 x i64 vector value
+def v2i1 : ValueType<2 , 13>; // 2 x i1 vector value
+def v4i1 : ValueType<4 , 14>; // 4 x i1 vector value
+def v8i1 : ValueType<8 , 15>; // 8 x i1 vector value
+def v16i1 : ValueType<16, 16>; // 16 x i1 vector value
+def v2i8 : ValueType<16 , 17>; // 2 x i8 vector value
+def v4i8 : ValueType<32 , 18>; // 4 x i8 vector value
+def v8i8 : ValueType<64 , 19>; // 8 x i8 vector value
+def v16i8 : ValueType<128, 20>; // 16 x i8 vector value
+def v32i8 : ValueType<256, 21>; // 32 x i8 vector value
+def v1i16 : ValueType<16 , 22>; // 1 x i16 vector value
+def v2i16 : ValueType<32 , 23>; // 2 x i16 vector value
+def v4i16 : ValueType<64 , 24>; // 4 x i16 vector value
+def v8i16 : ValueType<128, 25>; // 8 x i16 vector value
+def v16i16 : ValueType<256, 26>; // 16 x i16 vector value
+def v1i32 : ValueType<32 , 27>; // 1 x i32 vector value
+def v2i32 : ValueType<64 , 28>; // 2 x i32 vector value
+def v4i32 : ValueType<128, 29>; // 4 x i32 vector value
+def v8i32 : ValueType<256, 30>; // 8 x i32 vector value
+def v16i32 : ValueType<512, 31>; // 16 x i32 vector value
+def v1i64 : ValueType<64 , 32>; // 1 x i64 vector value
+def v2i64 : ValueType<128, 33>; // 2 x i64 vector value
+def v4i64 : ValueType<256, 34>; // 4 x i64 vector value
+def v8i64 : ValueType<512, 35>; // 8 x i64 vector value
+def v16i64 : ValueType<1024,36>; // 16 x i64 vector value
-def v2f16 : ValueType<32 , 31>; // 2 x f16 vector value
-def v2f32 : ValueType<64 , 32>; // 2 x f32 vector value
-def v4f32 : ValueType<128, 33>; // 4 x f32 vector value
-def v8f32 : ValueType<256, 34>; // 8 x f32 vector value
-def v2f64 : ValueType<128, 35>; // 2 x f64 vector value
-def v4f64 : ValueType<256, 36>; // 4 x f64 vector value
+def v2f16 : ValueType<32 , 37>; // 2 x f16 vector value
+def v2f32 : ValueType<64 , 38>; // 2 x f32 vector value
+def v4f32 : ValueType<128, 39>; // 4 x f32 vector value
+def v8f32 : ValueType<256, 40>; // 8 x f32 vector value
+def v2f64 : ValueType<128, 41>; // 2 x f64 vector value
+def v4f64 : ValueType<256, 42>; // 4 x f64 vector value
-def x86mmx : ValueType<64 , 37>; // X86 MMX value
-def FlagVT : ValueType<0 , 38>; // Pre-RA sched glue
-def isVoid : ValueType<0 , 39>; // Produces no value
-def untyped: ValueType<8 , 40>; // Produces an untyped value
+def x86mmx : ValueType<64 , 43>; // X86 MMX value
+def FlagVT : ValueType<0 , 44>; // Pre-RA sched glue
+def isVoid : ValueType<0 , 45>; // Produces no value
+def untyped: ValueType<8 , 46>; // Produces an untyped value
def MetadataVT: ValueType<0, 250>; // Metadata
diff --git a/contrib/llvm/include/llvm/Constant.h b/contrib/llvm/include/llvm/Constant.h
index e0e516d..0ddd1db 100644
--- a/contrib/llvm/include/llvm/Constant.h
+++ b/contrib/llvm/include/llvm/Constant.h
@@ -39,8 +39,8 @@ namespace llvm {
/// don't have to worry about the lifetime of the objects.
/// @brief LLVM Constant Representation
class Constant : public User {
- void operator=(const Constant &); // Do not implement
- Constant(const Constant &); // Do not implement
+ void operator=(const Constant &) LLVM_DELETED_FUNCTION;
+ Constant(const Constant &) LLVM_DELETED_FUNCTION;
virtual void anchor();
protected:
@@ -65,6 +65,9 @@ public:
/// true for things like constant expressions that could divide by zero.
bool canTrap() const;
+ /// isThreadDependent - Return true if the value can vary between threads.
+ bool isThreadDependent() const;
+
/// isConstantUsed - Return true if the constant has users other than constant
/// exprs and other dangling things.
bool isConstantUsed() const;
@@ -108,8 +111,6 @@ public:
virtual void destroyConstant() { llvm_unreachable("Not reached!"); }
//// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const Constant *) { return true; }
- static inline bool classof(const GlobalValue *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() >= ConstantFirstVal &&
V->getValueID() <= ConstantLastVal;
diff --git a/contrib/llvm/include/llvm/Constants.h b/contrib/llvm/include/llvm/Constants.h
index fdd5382..7f94ef4 100644
--- a/contrib/llvm/include/llvm/Constants.h
+++ b/contrib/llvm/include/llvm/Constants.h
@@ -49,8 +49,8 @@ struct ConvertConstantType;
/// @brief Class for constant integers.
class ConstantInt : public Constant {
virtual void anchor();
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
- ConstantInt(const ConstantInt &); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
+ ConstantInt(const ConstantInt &) LLVM_DELETED_FUNCTION;
ConstantInt(IntegerType *Ty, const APInt& V);
APInt Val;
protected:
@@ -221,7 +221,6 @@ public:
}
/// @brief Methods to support type inquiry through isa, cast, and dyn_cast.
- static inline bool classof(const ConstantInt *) { return true; }
static bool classof(const Value *V) {
return V->getValueID() == ConstantIntVal;
}
@@ -234,8 +233,8 @@ public:
class ConstantFP : public Constant {
APFloat Val;
virtual void anchor();
- void *operator new(size_t, unsigned);// DO NOT IMPLEMENT
- ConstantFP(const ConstantFP &); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
+ ConstantFP(const ConstantFP &) LLVM_DELETED_FUNCTION;
friend class LLVMContextImpl;
protected:
ConstantFP(Type *Ty, const APFloat& V);
@@ -283,15 +282,11 @@ public:
bool isExactlyValue(double V) const {
bool ignored;
- // convert is not supported on this type
- if (&Val.getSemantics() == &APFloat::PPCDoubleDouble)
- return false;
APFloat FV(V);
FV.convert(Val.getSemantics(), APFloat::rmNearestTiesToEven, &ignored);
return isExactlyValue(FV);
}
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ConstantFP *) { return true; }
static bool classof(const Value *V) {
return V->getValueID() == ConstantFPVal;
}
@@ -301,8 +296,8 @@ public:
/// ConstantAggregateZero - All zero aggregate value
///
class ConstantAggregateZero : public Constant {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
- ConstantAggregateZero(const ConstantAggregateZero &); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
+ ConstantAggregateZero(const ConstantAggregateZero &) LLVM_DELETED_FUNCTION;
protected:
explicit ConstantAggregateZero(Type *ty)
: Constant(ty, ConstantAggregateZeroVal, 0, 0) {}
@@ -334,7 +329,6 @@ public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
///
- static bool classof(const ConstantAggregateZero *) { return true; }
static bool classof(const Value *V) {
return V->getValueID() == ConstantAggregateZeroVal;
}
@@ -346,7 +340,7 @@ public:
///
class ConstantArray : public Constant {
friend struct ConstantArrayCreator<ConstantArray, ArrayType>;
- ConstantArray(const ConstantArray &); // DO NOT IMPLEMENT
+ ConstantArray(const ConstantArray &) LLVM_DELETED_FUNCTION;
protected:
ConstantArray(ArrayType *T, ArrayRef<Constant *> Val);
public:
@@ -367,7 +361,6 @@ public:
virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ConstantArray *) { return true; }
static bool classof(const Value *V) {
return V->getValueID() == ConstantArrayVal;
}
@@ -385,7 +378,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantArray, Constant)
//
class ConstantStruct : public Constant {
friend struct ConstantArrayCreator<ConstantStruct, StructType>;
- ConstantStruct(const ConstantStruct &); // DO NOT IMPLEMENT
+ ConstantStruct(const ConstantStruct &) LLVM_DELETED_FUNCTION;
protected:
ConstantStruct(StructType *T, ArrayRef<Constant *> Val);
public:
@@ -426,7 +419,6 @@ public:
virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ConstantStruct *) { return true; }
static bool classof(const Value *V) {
return V->getValueID() == ConstantStructVal;
}
@@ -445,7 +437,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantStruct, Constant)
///
class ConstantVector : public Constant {
friend struct ConstantArrayCreator<ConstantVector, VectorType>;
- ConstantVector(const ConstantVector &); // DO NOT IMPLEMENT
+ ConstantVector(const ConstantVector &) LLVM_DELETED_FUNCTION;
protected:
ConstantVector(VectorType *T, ArrayRef<Constant *> Val);
public:
@@ -474,7 +466,6 @@ public:
virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ConstantVector *) { return true; }
static bool classof(const Value *V) {
return V->getValueID() == ConstantVectorVal;
}
@@ -491,8 +482,8 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantVector, Constant)
/// ConstantPointerNull - a constant pointer value that points to null
///
class ConstantPointerNull : public Constant {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
- ConstantPointerNull(const ConstantPointerNull &); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
+ ConstantPointerNull(const ConstantPointerNull &) LLVM_DELETED_FUNCTION;
protected:
explicit ConstantPointerNull(PointerType *T)
: Constant(reinterpret_cast<Type*>(T),
@@ -517,7 +508,6 @@ public:
}
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ConstantPointerNull *) { return true; }
static bool classof(const Value *V) {
return V->getValueID() == ConstantPointerNullVal;
}
@@ -543,8 +533,8 @@ class ConstantDataSequential : public Constant {
/// element array of i8, or a 1-element array of i32. They'll both end up in
/// the same StringMap bucket, linked up.
ConstantDataSequential *Next;
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
- ConstantDataSequential(const ConstantDataSequential &); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
+ ConstantDataSequential(const ConstantDataSequential &) LLVM_DELETED_FUNCTION;
protected:
explicit ConstantDataSequential(Type *ty, ValueTy VT, const char *Data)
: Constant(ty, VT, 0, 0), DataElements(Data), Next(0) {}
@@ -639,7 +629,6 @@ public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
///
- static bool classof(const ConstantDataSequential *) { return true; }
static bool classof(const Value *V) {
return V->getValueID() == ConstantDataArrayVal ||
V->getValueID() == ConstantDataVectorVal;
@@ -655,8 +644,8 @@ private:
/// operands because it stores all of the elements of the constant as densely
/// packed data, instead of as Value*'s.
class ConstantDataArray : public ConstantDataSequential {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
- ConstantDataArray(const ConstantDataArray &); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
+ ConstantDataArray(const ConstantDataArray &) LLVM_DELETED_FUNCTION;
virtual void anchor();
friend class ConstantDataSequential;
explicit ConstantDataArray(Type *ty, const char *Data)
@@ -695,7 +684,6 @@ public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
///
- static bool classof(const ConstantDataArray *) { return true; }
static bool classof(const Value *V) {
return V->getValueID() == ConstantDataArrayVal;
}
@@ -708,8 +696,8 @@ public:
/// operands because it stores all of the elements of the constant as densely
/// packed data, instead of as Value*'s.
class ConstantDataVector : public ConstantDataSequential {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
- ConstantDataVector(const ConstantDataVector &); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
+ ConstantDataVector(const ConstantDataVector &) LLVM_DELETED_FUNCTION;
virtual void anchor();
friend class ConstantDataSequential;
explicit ConstantDataVector(Type *ty, const char *Data)
@@ -749,7 +737,6 @@ public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
///
- static bool classof(const ConstantDataVector *) { return true; }
static bool classof(const Value *V) {
return V->getValueID() == ConstantDataVectorVal;
}
@@ -760,7 +747,7 @@ public:
/// BlockAddress - The address of a basic block.
///
class BlockAddress : public Constant {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
void *operator new(size_t s) { return User::operator new(s, 2); }
BlockAddress(Function *F, BasicBlock *BB);
public:
@@ -781,7 +768,6 @@ public:
virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const BlockAddress *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() == BlockAddressVal;
}
@@ -1094,7 +1080,6 @@ public:
virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ConstantExpr *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() == ConstantExprVal;
}
@@ -1125,8 +1110,8 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantExpr, Constant)
/// LangRef.html#undefvalues for details.
///
class UndefValue : public Constant {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
- UndefValue(const UndefValue &); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
+ UndefValue(const UndefValue &) LLVM_DELETED_FUNCTION;
protected:
explicit UndefValue(Type *T) : Constant(T, UndefValueVal, 0, 0) {}
protected:
@@ -1159,7 +1144,6 @@ public:
virtual void destroyConstant();
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const UndefValue *) { return true; }
static bool classof(const Value *V) {
return V->getValueID() == UndefValueVal;
}
diff --git a/contrib/llvm/include/llvm/DIBuilder.h b/contrib/llvm/include/llvm/DIBuilder.h
index 2ed48a9..2f07800 100644
--- a/contrib/llvm/include/llvm/DIBuilder.h
+++ b/contrib/llvm/include/llvm/DIBuilder.h
@@ -63,8 +63,8 @@ namespace llvm {
SmallVector<Value *, 4> AllSubprograms;
SmallVector<Value *, 4> AllGVs;
- DIBuilder(const DIBuilder &); // DO NOT IMPLEMENT
- void operator=(const DIBuilder &); // DO NOT IMPLEMENT
+ DIBuilder(const DIBuilder &) LLVM_DELETED_FUNCTION;
+ void operator=(const DIBuilder &) LLVM_DELETED_FUNCTION;
public:
explicit DIBuilder(Module &M);
@@ -179,8 +179,10 @@ namespace llvm {
/// @param Ty Parent type.
/// @param PropertyName Name of the Objective C property associated with
/// this ivar.
- /// @param GetterName Name of the Objective C property getter selector.
- /// @param SetterName Name of the Objective C property setter selector.
+ /// @param PropertyGetterName Name of the Objective C property getter
+ /// selector.
+ /// @param PropertySetterName Name of the Objective C property setter
+ /// selector.
/// @param PropertyAttributes Objective C property attributes.
DIType createObjCIVar(StringRef Name, DIFile File,
unsigned LineNo, uint64_t SizeInBits,
@@ -201,7 +203,7 @@ namespace llvm {
/// @param OffsetInBits Member offset.
/// @param Flags Flags to encode member attribute, e.g. private
/// @param Ty Parent type.
- /// @param Property Property associated with this ivar.
+ /// @param PropertyNode Property associated with this ivar.
DIType createObjCIVar(StringRef Name, DIFile File,
unsigned LineNo, uint64_t SizeInBits,
uint64_t AlignInBits, uint64_t OffsetInBits,
@@ -228,7 +230,7 @@ namespace llvm {
/// @param Scope Scope in which this class is defined.
/// @param Name class name.
/// @param File File where this member is defined.
- /// @param LineNo Line number.
+ /// @param LineNumber Line number.
/// @param SizeInBits Member size.
/// @param AlignInBits Member alignment.
/// @param OffsetInBits Member offset.
@@ -250,7 +252,7 @@ namespace llvm {
/// @param Scope Scope in which this struct is defined.
/// @param Name Struct name.
/// @param File File where this member is defined.
- /// @param LineNo Line number.
+ /// @param LineNumber Line number.
/// @param SizeInBits Member size.
/// @param AlignInBits Member alignment.
/// @param Flags Flags to encode member attribute, e.g. private
@@ -265,7 +267,7 @@ namespace llvm {
/// @param Scope Scope in which this union is defined.
/// @param Name Union name.
/// @param File File where this member is defined.
- /// @param LineNo Line number.
+ /// @param LineNumber Line number.
/// @param SizeInBits Member size.
/// @param AlignInBits Member alignment.
/// @param Flags Flags to encode member attribute, e.g. private
@@ -325,33 +327,36 @@ namespace llvm {
/// @param Scope Scope in which this enumeration is defined.
/// @param Name Union name.
/// @param File File where this member is defined.
- /// @param LineNo Line number.
+ /// @param LineNumber Line number.
/// @param SizeInBits Member size.
/// @param AlignInBits Member alignment.
/// @param Elements Enumeration elements.
- /// @param Flags Flags (e.g. forward decl)
DIType createEnumerationType(DIDescriptor Scope, StringRef Name,
DIFile File, unsigned LineNumber,
uint64_t SizeInBits, uint64_t AlignInBits,
- DIArray Elements, DIType ClassType,
- unsigned Flags);
+ DIArray Elements, DIType ClassType);
/// createSubroutineType - Create subroutine type.
- /// @param File File in which this subroutine is defined.
- /// @param ParamterTypes An array of subroutine parameter types. This
- /// includes return type at 0th index.
+ /// @param File File in which this subroutine is defined.
+ /// @param ParameterTypes An array of subroutine parameter types. This
+ /// includes return type at 0th index.
DIType createSubroutineType(DIFile File, DIArray ParameterTypes);
/// createArtificialType - Create a new DIType with "artificial" flag set.
DIType createArtificialType(DIType Ty);
+ /// createObjectPointerType - Create a new DIType with the "object pointer"
+ /// flag set.
+ DIType createObjectPointerType(DIType Ty);
+
/// createTemporaryType - Create a temporary forward-declared type.
DIType createTemporaryType();
DIType createTemporaryType(DIFile F);
/// createForwardDecl - Create a temporary forward-declared type.
DIType createForwardDecl(unsigned Tag, StringRef Name, DIDescriptor Scope,
- DIFile F, unsigned Line, unsigned RuntimeLang = 0);
+ DIFile F, unsigned Line, unsigned RuntimeLang = 0,
+ uint64_t SizeInBits = 0, uint64_t AlignInBits = 0);
/// retainType - Retain DIType in a module even if it is not referenced
/// through debug info anchors.
@@ -383,9 +388,9 @@ namespace llvm {
/// createStaticVariable - Create a new descriptor for the specified
/// variable.
- /// @param Conext Variable scope.
+ /// @param Context Variable scope.
/// @param Name Name of the variable.
- /// @param LinakgeName Mangled name of the variable.
+ /// @param LinkageName Mangled name of the variable.
/// @param File File where this variable is defined.
/// @param LineNo Line number.
/// @param Ty Variable Type.
@@ -426,7 +431,7 @@ namespace llvm {
/// DW_TAG_arg_variable.
/// @param Scope Variable scope.
/// @param Name Variable name.
- /// @param File File where this variable is defined.
+ /// @param F File where this variable is defined.
/// @param LineNo Line number.
/// @param Ty Variable Type
/// @param Addr An array of complex address operations.
diff --git a/contrib/llvm/include/llvm/Target/TargetData.h b/contrib/llvm/include/llvm/DataLayout.h
index 4f94ab7..24ad05f 100644
--- a/contrib/llvm/include/llvm/Target/TargetData.h
+++ b/contrib/llvm/include/llvm/DataLayout.h
@@ -1,4 +1,4 @@
-//===-- llvm/Target/TargetData.h - Data size & alignment info ---*- C++ -*-===//
+//===--------- llvm/DataLayout.h - Data size & alignment info ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines target properties related to datatype size/offset/alignment
+// This file defines layout properties related to datatype size/offset/alignment
// information. It uses lazy annotations to cache information about how
// structure types are laid out and used.
//
@@ -17,11 +17,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_TARGET_TARGETDATA_H
-#define LLVM_TARGET_TARGETDATA_H
+#ifndef LLVM_DATALAYOUT_H
+#define LLVM_DATALAYOUT_H
#include "llvm/Pass.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -36,7 +37,7 @@ class LLVMContext;
template<typename T>
class ArrayRef;
-/// Enum used to categorize the alignment types stored by TargetAlignElem
+/// Enum used to categorize the alignment types stored by LayoutAlignElem
enum AlignTypeEnum {
INTEGER_ALIGN = 'i', ///< Integer type alignment
VECTOR_ALIGN = 'v', ///< Vector type alignment
@@ -45,38 +46,55 @@ enum AlignTypeEnum {
STACK_ALIGN = 's' ///< Stack objects alignment
};
-/// Target alignment element.
+/// Layout alignment element.
///
-/// Stores the alignment data associated with a given alignment type (pointer,
-/// integer, vector, float) and type bit width.
+/// Stores the alignment data associated with a given alignment type (integer,
+/// vector, float) and type bit width.
///
/// @note The unusual order of elements in the structure attempts to reduce
/// padding and make the structure slightly more cache friendly.
-struct TargetAlignElem {
- AlignTypeEnum AlignType : 8; ///< Alignment type (AlignTypeEnum)
+struct LayoutAlignElem {
+ unsigned AlignType : 8; ///< Alignment type (AlignTypeEnum)
+ unsigned TypeBitWidth : 24; ///< Type bit width
+ unsigned ABIAlign : 16; ///< ABI alignment for this type/bitw
+ unsigned PrefAlign : 16; ///< Pref. alignment for this type/bitw
+
+ /// Initializer
+ static LayoutAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
+ unsigned pref_align, uint32_t bit_width);
+ /// Equality predicate
+ bool operator==(const LayoutAlignElem &rhs) const;
+};
+
+/// Layout pointer alignment element.
+///
+/// Stores the alignment data associated with a given pointer and address space.
+///
+/// @note The unusual order of elements in the structure attempts to reduce
+/// padding and make the structure slightly more cache friendly.
+struct PointerAlignElem {
unsigned ABIAlign; ///< ABI alignment for this type/bitw
unsigned PrefAlign; ///< Pref. alignment for this type/bitw
uint32_t TypeBitWidth; ///< Type bit width
+ uint32_t AddressSpace; ///< Address space for the pointer type
/// Initializer
- static TargetAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
+ static PointerAlignElem get(uint32_t addr_space, unsigned abi_align,
unsigned pref_align, uint32_t bit_width);
/// Equality predicate
- bool operator==(const TargetAlignElem &rhs) const;
+ bool operator==(const PointerAlignElem &rhs) const;
};
-/// TargetData - This class holds a parsed version of the target data layout
+
+/// DataLayout - This class holds a parsed version of the target data layout
/// string in a module and provides methods for querying it. The target data
/// layout string is specified *by the target* - a frontend generating LLVM IR
/// is required to generate the right target data for the target being codegen'd
/// to. If some measure of portability is desired, an empty string may be
/// specified in the module.
-class TargetData : public ImmutablePass {
+class DataLayout : public ImmutablePass {
private:
bool LittleEndian; ///< Defaults to false
- unsigned PointerMemSize; ///< Pointer size in bytes
- unsigned PointerABIAlign; ///< Pointer ABI alignment
- unsigned PointerPrefAlign; ///< Pointer preferred alignment
unsigned StackNaturalAlign; ///< Stack natural alignment
SmallVector<unsigned char, 8> LegalIntWidths; ///< Legal Integers.
@@ -85,13 +103,18 @@ private:
///
/// @sa init().
/// @note Could support multiple size pointer alignments, e.g., 32-bit
- /// pointers vs. 64-bit pointers by extending TargetAlignment, but for now,
+ /// pointers vs. 64-bit pointers by extending LayoutAlignment, but for now,
/// we don't.
- SmallVector<TargetAlignElem, 16> Alignments;
+ SmallVector<LayoutAlignElem, 16> Alignments;
+ DenseMap<unsigned, PointerAlignElem> Pointers;
/// InvalidAlignmentElem - This member is a signal that a requested alignment
/// type and bit width were not found in the SmallVector.
- static const TargetAlignElem InvalidAlignmentElem;
+ static const LayoutAlignElem InvalidAlignmentElem;
+
+ /// InvalidPointerElem - This member is a signal that a requested pointer
+ /// type and bit width were not found in the DenseSet.
+ static const PointerAlignElem InvalidPointerElem;
// The StructType -> StructLayout map.
mutable void *LayoutMap;
@@ -101,18 +124,31 @@ private:
unsigned pref_align, uint32_t bit_width);
unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
bool ABIAlign, Type *Ty) const;
+
+ //! Set/initialize pointer alignments
+ void setPointerAlignment(uint32_t addr_space, unsigned abi_align,
+ unsigned pref_align, uint32_t bit_width);
+
//! Internal helper method that returns requested alignment for type.
unsigned getAlignment(Type *Ty, bool abi_or_pref) const;
/// Valid alignment predicate.
///
- /// Predicate that tests a TargetAlignElem reference returned by get() against
+ /// Predicate that tests a LayoutAlignElem reference returned by get() against
/// InvalidAlignmentElem.
- bool validAlignment(const TargetAlignElem &align) const {
+ bool validAlignment(const LayoutAlignElem &align) const {
return &align != &InvalidAlignmentElem;
}
- /// Initialise a TargetData object with default values, ensure that the
+ /// Valid pointer predicate.
+ ///
+ /// Predicate that tests a PointerAlignElem reference returned by get() against
+ /// InvalidPointerElem.
+ bool validPointer(const PointerAlignElem &align) const {
+ return &align != &InvalidPointerElem;
+ }
+
+ /// Initialise a DataLayout object with default values, ensure that the
/// target data pass is registered.
void init();
@@ -121,43 +157,42 @@ public:
///
/// @note This has to exist, because this is a pass, but it should never be
/// used.
- TargetData();
+ DataLayout();
- /// Constructs a TargetData from a specification string. See init().
- explicit TargetData(StringRef TargetDescription)
+ /// Constructs a DataLayout from a specification string. See init().
+ explicit DataLayout(StringRef LayoutDescription)
: ImmutablePass(ID) {
- std::string errMsg = parseSpecifier(TargetDescription, this);
+ std::string errMsg = parseSpecifier(LayoutDescription, this);
assert(errMsg == "" && "Invalid target data layout string.");
(void)errMsg;
}
/// Parses a target data specification string. Returns an error message
/// if the string is malformed, or the empty string on success. Optionally
- /// initialises a TargetData object if passed a non-null pointer.
- static std::string parseSpecifier(StringRef TargetDescription, TargetData* td = 0);
+ /// initialises a DataLayout object if passed a non-null pointer.
+ static std::string parseSpecifier(StringRef LayoutDescription,
+ DataLayout* td = 0);
/// Initialize target data from properties stored in the module.
- explicit TargetData(const Module *M);
+ explicit DataLayout(const Module *M);
- TargetData(const TargetData &TD) :
+ DataLayout(const DataLayout &TD) :
ImmutablePass(ID),
LittleEndian(TD.isLittleEndian()),
- PointerMemSize(TD.PointerMemSize),
- PointerABIAlign(TD.PointerABIAlign),
- PointerPrefAlign(TD.PointerPrefAlign),
LegalIntWidths(TD.LegalIntWidths),
Alignments(TD.Alignments),
+ Pointers(TD.Pointers),
LayoutMap(0)
{ }
- ~TargetData(); // Not virtual, do not subclass this class
+ ~DataLayout(); // Not virtual, do not subclass this class
- /// Target endianness...
+ /// Layout endianness...
bool isLittleEndian() const { return LittleEndian; }
bool isBigEndian() const { return !LittleEndian; }
/// getStringRepresentation - Return the string representation of the
- /// TargetData. This representation is in the same format accepted by the
+ /// DataLayout. This representation is in the same format accepted by the
/// string constructor above.
std::string getStringRepresentation() const;
@@ -195,15 +230,42 @@ public:
return false;
}
- /// Target pointer alignment
- unsigned getPointerABIAlignment() const { return PointerABIAlign; }
+ /// Layout pointer alignment
+ /// FIXME: The defaults need to be removed once all of
+ /// the backends/clients are updated.
+ unsigned getPointerABIAlignment(unsigned AS = 0) const {
+ DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS);
+ if (val == Pointers.end()) {
+ val = Pointers.find(0);
+ }
+ return val->second.ABIAlign;
+ }
/// Return target's alignment for stack-based pointers
- unsigned getPointerPrefAlignment() const { return PointerPrefAlign; }
- /// Target pointer size
- unsigned getPointerSize() const { return PointerMemSize; }
- /// Target pointer size, in bits
- unsigned getPointerSizeInBits() const { return 8*PointerMemSize; }
-
+ /// FIXME: The defaults need to be removed once all of
+ /// the backends/clients are updated.
+ unsigned getPointerPrefAlignment(unsigned AS = 0) const {
+ DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS);
+ if (val == Pointers.end()) {
+ val = Pointers.find(0);
+ }
+ return val->second.PrefAlign;
+ }
+ /// Layout pointer size
+ /// FIXME: The defaults need to be removed once all of
+ /// the backends/clients are updated.
+ unsigned getPointerSize(unsigned AS = 0) const {
+ DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS);
+ if (val == Pointers.end()) {
+ val = Pointers.find(0);
+ }
+ return val->second.TypeBitWidth;
+ }
+ /// Layout pointer size, in bits
+ /// FIXME: The defaults need to be removed once all of
+ /// the backends/clients are updated.
+ unsigned getPointerSizeInBits(unsigned AS = 0) const {
+ return getPointerSize(AS) * 8;
+ }
/// Size examples:
///
/// Type SizeInBits StoreSizeInBits AllocSizeInBits[*]
@@ -279,10 +341,14 @@ public:
///
unsigned getPreferredTypeAlignmentShift(Type *Ty) const;
- /// getIntPtrType - Return an unsigned integer type that is the same size or
- /// greater to the host pointer size.
- ///
- IntegerType *getIntPtrType(LLVMContext &C) const;
+ /// getIntPtrType - Return an integer type with size at least as big as that
+ /// of a pointer in the given address space.
+ IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const;
+
+ /// getIntPtrType - Return an integer (vector of integer) type with size at
+ /// least as big as that of a pointer of the given pointer (vector of pointer)
+ /// type.
+ Type *getIntPtrType(Type *) const;
/// getIndexedOffset - return the offset from the beginning of the type for
/// the specified indices. This is used to implement getelementptr.
@@ -318,7 +384,7 @@ public:
};
/// StructLayout - used to lazily calculate structure layout information for a
-/// target machine, based on the TargetData structure.
+/// target machine, based on the DataLayout structure.
///
class StructLayout {
uint64_t StructSize;
@@ -354,8 +420,8 @@ public:
}
private:
- friend class TargetData; // Only TargetData can create this class
- StructLayout(StructType *ST, const TargetData &TD);
+ friend class DataLayout; // Only DataLayout can create this class
+ StructLayout(StructType *ST, const DataLayout &TD);
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/DebugInfo.h b/contrib/llvm/include/llvm/DebugInfo.h
index 618220f..dae03ad 100644
--- a/contrib/llvm/include/llvm/DebugInfo.h
+++ b/contrib/llvm/include/llvm/DebugInfo.h
@@ -60,7 +60,8 @@ namespace llvm {
FlagArtificial = 1 << 6,
FlagExplicit = 1 << 7,
FlagPrototyped = 1 << 8,
- FlagObjcClassComplete = 1 << 9
+ FlagObjcClassComplete = 1 << 9,
+ FlagObjectPointer = 1 << 10
};
protected:
const MDNode *DbgNode;
@@ -80,6 +81,7 @@ namespace llvm {
GlobalVariable *getGlobalVariableField(unsigned Elt) const;
Constant *getConstantField(unsigned Elt) const;
Function *getFunctionField(unsigned Elt) const;
+ void replaceFunctionField(unsigned Elt, Function *F);
public:
explicit DIDescriptor() : DbgNode(0) {}
@@ -287,6 +289,9 @@ namespace llvm {
bool isArtificial() const {
return (getFlags() & FlagArtificial) != 0;
}
+ bool isObjectPointer() const {
+ return (getFlags() & FlagObjectPointer) != 0;
+ }
bool isObjcClassComplete() const {
return (getFlags() & FlagObjcClassComplete) != 0;
}
@@ -558,6 +563,7 @@ namespace llvm {
bool describes(const Function *F);
Function *getFunction() const { return getFunctionField(16); }
+ void replaceFunction(Function *F) { replaceFunctionField(16, F); }
DIArray getTemplateParams() const { return getFieldAs<DIArray>(17); }
DISubprogram getFunctionDeclaration() const {
return getFieldAs<DISubprogram>(18);
@@ -644,6 +650,10 @@ namespace llvm {
return (getUnsignedField(6) & FlagArtificial) != 0;
}
+ bool isObjectPointer() const {
+ return (getUnsignedField(6) & FlagObjectPointer) != 0;
+ }
+
/// getInlinedAt - If this variable is inlined then return inline location.
MDNode *getInlinedAt() const;
diff --git a/contrib/llvm/include/llvm/DebugInfo/DIContext.h b/contrib/llvm/include/llvm/DebugInfo/DIContext.h
index cfdeb46..26bd1f6 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DIContext.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DIContext.h
@@ -15,6 +15,8 @@
#ifndef LLVM_DEBUGINFO_DICONTEXT_H
#define LLVM_DEBUGINFO_DICONTEXT_H
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
@@ -54,6 +56,23 @@ public:
}
};
+/// DIInliningInfo - a format-neutral container for inlined code description.
+class DIInliningInfo {
+ SmallVector<DILineInfo, 4> Frames;
+ public:
+ DIInliningInfo() {}
+ DILineInfo getFrame(unsigned Index) const {
+ assert(Index < Frames.size());
+ return Frames[Index];
+ }
+ uint32_t getNumberOfFrames() const {
+ return Frames.size();
+ }
+ void addFrame(const DILineInfo &Frame) {
+ Frames.push_back(Frame);
+ }
+};
+
/// DILineInfoSpecifier - controls which fields of DILineInfo container
/// should be filled with data.
class DILineInfoSpecifier {
@@ -71,6 +90,13 @@ public:
}
};
+// In place of applying the relocations to the data we've read from disk we use
+// a separate mapping table to the side and checking that at locations in the
+// dwarf where we expect relocated values. This adds a bit of complexity to the
+// dwarf parsing/extraction at the benefit of not allocating memory for the
+// entire size of the debug info sections.
+typedef DenseMap<uint64_t, std::pair<uint8_t, int64_t> > RelocAddrMap;
+
class DIContext {
public:
virtual ~DIContext();
@@ -81,12 +107,16 @@ public:
StringRef abbrevSection,
StringRef aRangeSection = StringRef(),
StringRef lineSection = StringRef(),
- StringRef stringSection = StringRef());
+ StringRef stringSection = StringRef(),
+ StringRef rangeSection = StringRef(),
+ const RelocAddrMap &Map = RelocAddrMap());
virtual void dump(raw_ostream &OS) = 0;
- virtual DILineInfo getLineInfoForAddress(uint64_t address,
- DILineInfoSpecifier specifier = DILineInfoSpecifier()) = 0;
+ virtual DILineInfo getLineInfoForAddress(uint64_t Address,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;
+ virtual DIInliningInfo getInliningInfoForAddress(uint64_t Address,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;
};
}
diff --git a/contrib/llvm/include/llvm/DefaultPasses.h b/contrib/llvm/include/llvm/DefaultPasses.h
index 929569d..9f1ade8 100644
--- a/contrib/llvm/include/llvm/DefaultPasses.h
+++ b/contrib/llvm/include/llvm/DefaultPasses.h
@@ -14,7 +14,7 @@
#ifndef LLVM_DEFAULT_PASS_SUPPORT_H
#define LLVM_DEFAULT_PASS_SUPPORT_H
-#include <llvm/PassSupport.h>
+#include "llvm/PassSupport.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/DerivedTypes.h b/contrib/llvm/include/llvm/DerivedTypes.h
index da5ad27..c862c2c 100644
--- a/contrib/llvm/include/llvm/DerivedTypes.h
+++ b/contrib/llvm/include/llvm/DerivedTypes.h
@@ -20,6 +20,7 @@
#include "llvm/Type.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
@@ -84,7 +85,6 @@ public:
bool isPowerOf2ByteWidth() const;
// Methods for support type inquiry through isa, cast, and dyn_cast.
- static inline bool classof(const IntegerType *) { return true; }
static inline bool classof(const Type *T) {
return T->getTypeID() == IntegerTyID;
}
@@ -94,8 +94,8 @@ public:
/// FunctionType - Class to represent function types
///
class FunctionType : public Type {
- FunctionType(const FunctionType &); // Do not implement
- const FunctionType &operator=(const FunctionType &); // Do not implement
+ FunctionType(const FunctionType &) LLVM_DELETED_FUNCTION;
+ const FunctionType &operator=(const FunctionType &) LLVM_DELETED_FUNCTION;
FunctionType(Type *Result, ArrayRef<Type*> Params, bool IsVarArgs);
public:
@@ -133,7 +133,6 @@ public:
unsigned getNumParams() const { return NumContainedTys - 1; }
// Methods for support type inquiry through isa, cast, and dyn_cast.
- static inline bool classof(const FunctionType *) { return true; }
static inline bool classof(const Type *T) {
return T->getTypeID() == FunctionTyID;
}
@@ -156,7 +155,6 @@ public:
bool indexValid(unsigned Idx) const;
// Methods for support type inquiry through isa, cast, and dyn_cast.
- static inline bool classof(const CompositeType *) { return true; }
static inline bool classof(const Type *T) {
return T->getTypeID() == ArrayTyID ||
T->getTypeID() == StructTyID ||
@@ -183,12 +181,12 @@ public:
/// Independent of what kind of struct you have, the body of a struct type are
/// laid out in memory consequtively with the elements directly one after the
/// other (if the struct is packed) or (if not packed) with padding between the
-/// elements as defined by TargetData (which is required to match what the code
+/// elements as defined by DataLayout (which is required to match what the code
/// generator for a target expects).
///
class StructType : public CompositeType {
- StructType(const StructType &); // Do not implement
- const StructType &operator=(const StructType &); // Do not implement
+ StructType(const StructType &) LLVM_DELETED_FUNCTION;
+ const StructType &operator=(const StructType &) LLVM_DELETED_FUNCTION;
StructType(LLVMContext &C)
: CompositeType(C, StructTyID), SymbolTableEntry(0) {}
enum {
@@ -292,7 +290,6 @@ public:
}
// Methods for support type inquiry through isa, cast, and dyn_cast.
- static inline bool classof(const StructType *) { return true; }
static inline bool classof(const Type *T) {
return T->getTypeID() == StructTyID;
}
@@ -308,8 +305,8 @@ public:
///
class SequentialType : public CompositeType {
Type *ContainedType; ///< Storage for the single contained type.
- SequentialType(const SequentialType &); // Do not implement!
- const SequentialType &operator=(const SequentialType &); // Do not implement!
+ SequentialType(const SequentialType &) LLVM_DELETED_FUNCTION;
+ const SequentialType &operator=(const SequentialType &) LLVM_DELETED_FUNCTION;
protected:
SequentialType(TypeID TID, Type *ElType)
@@ -322,7 +319,6 @@ public:
Type *getElementType() const { return ContainedTys[0]; }
// Methods for support type inquiry through isa, cast, and dyn_cast.
- static inline bool classof(const SequentialType *) { return true; }
static inline bool classof(const Type *T) {
return T->getTypeID() == ArrayTyID ||
T->getTypeID() == PointerTyID ||
@@ -336,8 +332,8 @@ public:
class ArrayType : public SequentialType {
uint64_t NumElements;
- ArrayType(const ArrayType &); // Do not implement
- const ArrayType &operator=(const ArrayType &); // Do not implement
+ ArrayType(const ArrayType &) LLVM_DELETED_FUNCTION;
+ const ArrayType &operator=(const ArrayType &) LLVM_DELETED_FUNCTION;
ArrayType(Type *ElType, uint64_t NumEl);
public:
/// ArrayType::get - This static method is the primary way to construct an
@@ -352,7 +348,6 @@ public:
uint64_t getNumElements() const { return NumElements; }
// Methods for support type inquiry through isa, cast, and dyn_cast.
- static inline bool classof(const ArrayType *) { return true; }
static inline bool classof(const Type *T) {
return T->getTypeID() == ArrayTyID;
}
@@ -363,8 +358,8 @@ public:
class VectorType : public SequentialType {
unsigned NumElements;
- VectorType(const VectorType &); // Do not implement
- const VectorType &operator=(const VectorType &); // Do not implement
+ VectorType(const VectorType &) LLVM_DELETED_FUNCTION;
+ const VectorType &operator=(const VectorType &) LLVM_DELETED_FUNCTION;
VectorType(Type *ElType, unsigned NumEl);
public:
/// VectorType::get - This static method is the primary way to construct an
@@ -419,7 +414,6 @@ public:
}
// Methods for support type inquiry through isa, cast, and dyn_cast.
- static inline bool classof(const VectorType *) { return true; }
static inline bool classof(const Type *T) {
return T->getTypeID() == VectorTyID;
}
@@ -429,8 +423,8 @@ public:
/// PointerType - Class to represent pointers.
///
class PointerType : public SequentialType {
- PointerType(const PointerType &); // Do not implement
- const PointerType &operator=(const PointerType &); // Do not implement
+ PointerType(const PointerType &) LLVM_DELETED_FUNCTION;
+ const PointerType &operator=(const PointerType &) LLVM_DELETED_FUNCTION;
explicit PointerType(Type *ElType, unsigned AddrSpace);
public:
/// PointerType::get - This constructs a pointer to an object of the specified
@@ -451,7 +445,6 @@ public:
inline unsigned getAddressSpace() const { return getSubclassData(); }
// Implement support type inquiry through isa, cast, and dyn_cast.
- static inline bool classof(const PointerType *) { return true; }
static inline bool classof(const Type *T) {
return T->getTypeID() == PointerTyID;
}
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h b/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
index ae8b68d..8073d8f 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
@@ -42,7 +42,7 @@ class JITMemoryManager;
class MachineCodeInfo;
class Module;
class MutexGuard;
-class TargetData;
+class DataLayout;
class Triple;
class Type;
@@ -88,7 +88,7 @@ public:
/// \brief Erase an entry from the mapping table.
///
- /// \returns The address that \arg ToUnmap was happed to.
+ /// \returns The address that \p ToUnmap was happed to.
void *RemoveMapping(const MutexGuard &, const GlobalValue *ToUnmap);
};
@@ -104,7 +104,7 @@ class ExecutionEngine {
ExecutionEngineState EEState;
/// The target data for the platform for which execution is being performed.
- const TargetData *TD;
+ const DataLayout *TD;
/// Whether lazy JIT compilation is enabled.
bool CompilingLazily;
@@ -123,7 +123,7 @@ protected:
/// optimize for the case where there is only one module.
SmallVector<Module*, 1> Modules;
- void setTargetData(const TargetData *td) { TD = td; }
+ void setDataLayout(const DataLayout *td) { TD = td; }
/// getMemoryforGV - Allocate memory for a global variable.
virtual char *getMemoryForGV(const GlobalVariable *GV);
@@ -213,7 +213,7 @@ public:
//===--------------------------------------------------------------------===//
- const TargetData *getTargetData() const { return TD; }
+ const DataLayout *getDataLayout() const { return TD; }
/// removeModule - Remove a Module from the list of modules. Returns true if
/// M is found.
@@ -244,11 +244,18 @@ public:
/// Map the address of a JIT section as returned from the memory manager
/// to the address in the target process as the running code will see it.
/// This is the address which will be used for relocation resolution.
- virtual void mapSectionAddress(void *LocalAddress, uint64_t TargetAddress) {
+ virtual void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress) {
llvm_unreachable("Re-mapping of section addresses not supported with this "
"EE!");
}
+ // finalizeObject - This method should be called after sections within an
+ // object have been relocated using mapSectionAddress. When this method is
+ // called the MCJIT execution engine will reapply relocations for a loaded
+ // object. This method has no effect for the legacy JIT engine or the
+ // interpeter.
+ virtual void finalizeObject() {}
+
/// runStaticConstructorsDestructors - This method is used to execute all of
/// the static constructors or destructors for a program.
///
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h b/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h
index eea603f..e6586e7 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h
@@ -26,6 +26,7 @@ class Function;
class MachineFunction;
class OProfileWrapper;
class IntelJITEventsWrapper;
+class ObjectImage;
/// JITEvent_EmittedFunctionDetails - Helper struct for containing information
/// about a generated machine code function.
@@ -76,6 +77,20 @@ public:
/// matching NotifyFreeingMachineCode call.
virtual void NotifyFreeingMachineCode(void *) {}
+ /// NotifyObjectEmitted - Called after an object has been successfully
+ /// emitted to memory. NotifyFunctionEmitted will not be called for
+ /// individual functions in the object.
+ ///
+ /// ELF-specific information
+ /// The ObjectImage contains the generated object image
+ /// with section headers updated to reflect the address at which sections
+ /// were loaded and with relocations performed in-place on debug sections.
+ virtual void NotifyObjectEmitted(const ObjectImage &Obj) {}
+
+ /// NotifyFreeingObject - Called just before the memory associated with
+ /// a previously emitted object is released.
+ virtual void NotifyFreeingObject(const ObjectImage &Obj) {}
+
#if LLVM_USE_INTEL_JITEVENTS
// Construct an IntelJITEventListener
static JITEventListener *createIntelJITEventListener();
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h b/contrib/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h
index 4c75b6a..9089646 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h
@@ -10,7 +10,9 @@
#ifndef LLVM_EXECUTION_ENGINE_JIT_MEMMANAGER_H
#define LLVM_EXECUTION_ENGINE_JIT_MEMMANAGER_H
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/Support/DataTypes.h"
+
#include <string>
namespace llvm {
@@ -22,7 +24,7 @@ namespace llvm {
/// memory for the code generated by the JIT. This can be reimplemented by
/// clients that have a strong desire to control how the layout of JIT'd memory
/// works.
-class JITMemoryManager {
+class JITMemoryManager : public RTDyldMemoryManager {
protected:
bool HasGOT;
@@ -47,17 +49,6 @@ public:
/// debugging, and may be turned on by default in debug mode.
virtual void setPoisonMemory(bool poison) = 0;
- /// getPointerToNamedFunction - This method returns the address of the
- /// specified function. As such it is only useful for resolving library
- /// symbols, not code generated symbols.
- ///
- /// If AbortOnFailure is false and no function with the given name is
- /// found, this function silently returns a null pointer. Otherwise,
- /// it prints a message to stderr and aborts.
- ///
- virtual void *getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure = true) = 0;
-
//===--------------------------------------------------------------------===//
// Global Offset Table Management
//===--------------------------------------------------------------------===//
@@ -112,22 +103,6 @@ public:
virtual void endFunctionBody(const Function *F, uint8_t *FunctionStart,
uint8_t *FunctionEnd) = 0;
- /// allocateCodeSection - Allocate a memory block of (at least) the given
- /// size suitable for executable code. The SectionID is a unique identifier
- /// assigned by the JIT and passed through to the memory manager for
- /// the instance class to use if it needs to communicate to the JIT about
- /// a given section after the fact.
- virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
- unsigned SectionID) = 0;
-
- /// allocateDataSection - Allocate a memory block of (at least) the given
- /// size suitable for data. The SectionID is a unique identifier
- /// assigned by the JIT and passed through to the memory manager for
- /// the instance class to use if it needs to communicate to the JIT about
- /// a given section after the fact.
- virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
- unsigned SectionID) = 0;
-
/// allocateSpace - Allocate a memory block of the given size. This method
/// cannot be called between calls to startFunctionBody and endFunctionBody.
virtual uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) = 0;
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/ObjectBuffer.h b/contrib/llvm/include/llvm/ExecutionEngine/ObjectBuffer.h
new file mode 100644
index 0000000..a0a77b8
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/ObjectBuffer.h
@@ -0,0 +1,80 @@
+//===---- ObjectBuffer.h - Utility class to wrap object image memory -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares a wrapper class to hold the memory into which an
+// object will be generated.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_OBJECTBUFFER_H
+#define LLVM_EXECUTIONENGINE_OBJECTBUFFER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+
+/// ObjectBuffer - This class acts as a container for the memory buffer used during
+/// generation and loading of executable objects using MCJIT and RuntimeDyld. The
+/// underlying memory for the object will be owned by the ObjectBuffer instance
+/// throughout its lifetime. The getMemBuffer() method provides a way to create a
+/// MemoryBuffer wrapper object instance to be owned by other classes (such as
+/// ObjectFile) as needed, but the MemoryBuffer instance returned does not own the
+/// actual memory it points to.
+class ObjectBuffer {
+public:
+ ObjectBuffer() {}
+ ObjectBuffer(MemoryBuffer* Buf) : Buffer(Buf) {}
+ virtual ~ObjectBuffer() {}
+
+ /// getMemBuffer - Like MemoryBuffer::getMemBuffer() this function
+ /// returns a pointer to an object that is owned by the caller. However,
+ /// the caller does not take ownership of the underlying memory.
+ MemoryBuffer *getMemBuffer() const {
+ return MemoryBuffer::getMemBuffer(Buffer->getBuffer(), "", false);
+ }
+
+ const char *getBufferStart() const { return Buffer->getBufferStart(); }
+ size_t getBufferSize() const { return Buffer->getBufferSize(); }
+
+protected:
+ // The memory contained in an ObjectBuffer
+ OwningPtr<MemoryBuffer> Buffer;
+};
+
+/// ObjectBufferStream - This class encapsulates the SmallVector and
+/// raw_svector_ostream needed to generate an object using MC code emission
+/// while providing a common ObjectBuffer interface for access to the
+/// memory once the object has been generated.
+class ObjectBufferStream : public ObjectBuffer {
+public:
+ ObjectBufferStream() : OS(SV) {}
+ virtual ~ObjectBufferStream() {}
+
+ raw_ostream &getOStream() { return OS; }
+ void flush()
+ {
+ OS.flush();
+
+ // Make the data accessible via the ObjectBuffer::Buffer
+ Buffer.reset(MemoryBuffer::getMemBuffer(StringRef(SV.data(), SV.size()),
+ "",
+ false));
+ }
+
+protected:
+ SmallVector<char, 4096> SV; // Working buffer into which we JIT.
+ raw_svector_ostream OS; // streaming wrapper
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/ObjectImage.h b/contrib/llvm/include/llvm/ExecutionEngine/ObjectImage.h
new file mode 100644
index 0000000..82549ad
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/ObjectImage.h
@@ -0,0 +1,61 @@
+//===---- ObjectImage.h - Format independent executuable object image -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares a file format independent ObjectImage class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_OBJECTIMAGE_H
+#define LLVM_EXECUTIONENGINE_OBJECTIMAGE_H
+
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/ExecutionEngine/ObjectBuffer.h"
+
+namespace llvm {
+
+
+/// ObjectImage - A container class that represents an ObjectFile that has been
+/// or is in the process of being loaded into memory for execution.
+class ObjectImage {
+ ObjectImage() LLVM_DELETED_FUNCTION;
+ ObjectImage(const ObjectImage &other) LLVM_DELETED_FUNCTION;
+
+protected:
+ OwningPtr<ObjectBuffer> Buffer;
+
+public:
+ ObjectImage(ObjectBuffer *Input) : Buffer(Input) {}
+ virtual ~ObjectImage() {}
+
+ virtual object::symbol_iterator begin_symbols() const = 0;
+ virtual object::symbol_iterator end_symbols() const = 0;
+
+ virtual object::section_iterator begin_sections() const = 0;
+ virtual object::section_iterator end_sections() const = 0;
+
+ virtual /* Triple::ArchType */ unsigned getArch() const = 0;
+
+ // Subclasses can override these methods to update the image with loaded
+ // addresses for sections and common symbols
+ virtual void updateSectionAddress(const object::SectionRef &Sec,
+ uint64_t Addr) = 0;
+ virtual void updateSymbolAddress(const object::SymbolRef &Sym,
+ uint64_t Addr) = 0;
+
+ virtual StringRef getData() const = 0;
+
+ // Subclasses can override these methods to provide JIT debugging support
+ virtual void registerWithDebugger() = 0;
+ virtual void deregisterWithDebugger() = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H
+
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h b/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h
index a5c9272..891f534 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h
@@ -15,43 +15,55 @@
#define LLVM_RUNTIME_DYLD_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/ObjectBuffer.h"
#include "llvm/Support/Memory.h"
namespace llvm {
class RuntimeDyldImpl;
-class MemoryBuffer;
+class ObjectImage;
// RuntimeDyld clients often want to handle the memory management of
-// what gets placed where. For JIT clients, this is an abstraction layer
-// over the JITMemoryManager, which references objects by their source
-// representations in LLVM IR.
+// what gets placed where. For JIT clients, this is the subset of
+// JITMemoryManager required for dynamic loading of binaries.
+//
// FIXME: As the RuntimeDyld fills out, additional routines will be needed
// for the varying types of objects to be allocated.
class RTDyldMemoryManager {
- RTDyldMemoryManager(const RTDyldMemoryManager&); // DO NOT IMPLEMENT
- void operator=(const RTDyldMemoryManager&); // DO NOT IMPLEMENT
+ RTDyldMemoryManager(const RTDyldMemoryManager&) LLVM_DELETED_FUNCTION;
+ void operator=(const RTDyldMemoryManager&) LLVM_DELETED_FUNCTION;
public:
RTDyldMemoryManager() {}
virtual ~RTDyldMemoryManager();
/// allocateCodeSection - Allocate a memory block of (at least) the given
- /// size suitable for executable code.
+ /// size suitable for executable code. The SectionID is a unique identifier
+ /// assigned by the JIT engine, and optionally recorded by the memory manager
+ /// to access a loaded section.
virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID) = 0;
/// allocateDataSection - Allocate a memory block of (at least) the given
- /// size suitable for data.
+ /// size suitable for data. The SectionID is a unique identifier
+ /// assigned by the JIT engine, and optionally recorded by the memory manager
+ /// to access a loaded section.
virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID) = 0;
+ /// getPointerToNamedFunction - This method returns the address of the
+ /// specified function. As such it is only useful for resolving library
+ /// symbols, not code generated symbols.
+ ///
+ /// If AbortOnFailure is false and no function with the given name is
+ /// found, this function returns a null pointer. Otherwise, it prints a
+ /// message to stderr and aborts.
virtual void *getPointerToNamedFunction(const std::string &Name,
bool AbortOnFailure = true) = 0;
};
class RuntimeDyld {
- RuntimeDyld(const RuntimeDyld &); // DO NOT IMPLEMENT
- void operator=(const RuntimeDyld &); // DO NOT IMPLEMENT
+ RuntimeDyld(const RuntimeDyld &) LLVM_DELETED_FUNCTION;
+ void operator=(const RuntimeDyld &) LLVM_DELETED_FUNCTION;
// RuntimeDyldImpl is the actual class. RuntimeDyld is just the public
// interface.
@@ -62,17 +74,24 @@ protected:
// Any relocations already associated with the symbol will be re-resolved.
void reassignSectionAddress(unsigned SectionID, uint64_t Addr);
public:
- RuntimeDyld(RTDyldMemoryManager*);
+ RuntimeDyld(RTDyldMemoryManager *);
~RuntimeDyld();
- /// Load an in-memory object file into the dynamic linker.
- bool loadObject(MemoryBuffer *InputBuffer);
+ /// loadObject - prepare the object contained in the input buffer for
+ /// execution. Ownership of the input buffer is transferred to the
+ /// ObjectImage instance returned from this function if successful.
+ /// In the case of load failure, the input buffer will be deleted.
+ ObjectImage *loadObject(ObjectBuffer *InputBuffer);
/// Get the address of our local copy of the symbol. This may or may not
/// be the address used for relocation (clients can copy the data around
/// and resolve relocatons based on where they put it).
void *getSymbolAddress(StringRef Name);
+ /// Get the address of the target copy of the symbol. This is the address
+ /// used for relocation.
+ uint64_t getSymbolLoadAddress(StringRef Name);
+
/// Resolve the relocations for all symbols we currently know about.
void resolveRelocations();
@@ -80,7 +99,7 @@ public:
/// Map the address of a JIT section as returned from the memory manager
/// to the address in the target process as the running code will see it.
/// This is the address which will be used for relocation resolution.
- void mapSectionAddress(void *LocalAddress, uint64_t TargetAddress);
+ void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress);
StringRef getErrorString();
};
diff --git a/contrib/llvm/include/llvm/Function.h b/contrib/llvm/include/llvm/Function.h
index fdd90d1..e211e9a 100644
--- a/contrib/llvm/include/llvm/Function.h
+++ b/contrib/llvm/include/llvm/Function.h
@@ -109,9 +109,9 @@ private:
BuildLazyArguments();
}
void BuildLazyArguments() const;
-
- Function(const Function&); // DO NOT IMPLEMENT
- void operator=(const Function&); // DO NOT IMPLEMENT
+
+ Function(const Function&) LLVM_DELETED_FUNCTION;
+ void operator=(const Function&) LLVM_DELETED_FUNCTION;
/// Function ctor - If the (optional) Module argument is specified, the
/// function is automatically inserted into the end of the function list for
@@ -168,17 +168,17 @@ public:
///
void setAttributes(const AttrListPtr &attrs) { AttributeList = attrs; }
- /// hasFnAttr - Return true if this function has the given attribute.
- bool hasFnAttr(Attributes N) const {
- // Function Attributes are stored at ~0 index
- return AttributeList.paramHasAttr(~0U, N);
+ /// getFnAttributes - Return the function attributes for querying.
+ ///
+ Attributes getFnAttributes() const {
+ return AttributeList.getFnAttributes();
}
/// addFnAttr - Add function attributes to this function.
///
- void addFnAttr(Attributes N) {
+ void addFnAttr(Attributes::AttrVal N) {
// Function Attributes are stored at ~0 index
- addAttribute(~0U, N);
+ addAttribute(AttrListPtr::FunctionIndex, Attributes::get(getContext(), N));
}
/// removeFnAttr - Remove function attributes from this function.
@@ -195,9 +195,15 @@ public:
void setGC(const char *Str);
void clearGC();
- /// @brief Determine whether the function has the given attribute.
- bool paramHasAttr(unsigned i, Attributes attr) const {
- return AttributeList.paramHasAttr(i, attr);
+
+ /// getRetAttributes - Return the return attributes for querying.
+ Attributes getRetAttributes() const {
+ return AttributeList.getRetAttributes();
+ }
+
+ /// getParamAttributes - Return the parameter attributes for querying.
+ Attributes getParamAttributes(unsigned Idx) const {
+ return AttributeList.getParamAttributes(Idx);
}
/// addAttribute - adds the attribute to the list of attributes.
@@ -213,50 +219,44 @@ public:
/// @brief Determine if the function does not access memory.
bool doesNotAccessMemory() const {
- return hasFnAttr(Attribute::ReadNone);
+ return getFnAttributes().hasAttribute(Attributes::ReadNone);
}
- void setDoesNotAccessMemory(bool DoesNotAccessMemory = true) {
- if (DoesNotAccessMemory) addFnAttr(Attribute::ReadNone);
- else removeFnAttr(Attribute::ReadNone);
+ void setDoesNotAccessMemory() {
+ addFnAttr(Attributes::ReadNone);
}
/// @brief Determine if the function does not access or only reads memory.
bool onlyReadsMemory() const {
- return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
+ return doesNotAccessMemory() ||
+ getFnAttributes().hasAttribute(Attributes::ReadOnly);
}
- void setOnlyReadsMemory(bool OnlyReadsMemory = true) {
- if (OnlyReadsMemory) addFnAttr(Attribute::ReadOnly);
- else removeFnAttr(Attribute::ReadOnly | Attribute::ReadNone);
+ void setOnlyReadsMemory() {
+ addFnAttr(Attributes::ReadOnly);
}
/// @brief Determine if the function cannot return.
bool doesNotReturn() const {
- return hasFnAttr(Attribute::NoReturn);
+ return getFnAttributes().hasAttribute(Attributes::NoReturn);
}
- void setDoesNotReturn(bool DoesNotReturn = true) {
- if (DoesNotReturn) addFnAttr(Attribute::NoReturn);
- else removeFnAttr(Attribute::NoReturn);
+ void setDoesNotReturn() {
+ addFnAttr(Attributes::NoReturn);
}
/// @brief Determine if the function cannot unwind.
bool doesNotThrow() const {
- return hasFnAttr(Attribute::NoUnwind);
+ return getFnAttributes().hasAttribute(Attributes::NoUnwind);
}
- void setDoesNotThrow(bool DoesNotThrow = true) {
- if (DoesNotThrow) addFnAttr(Attribute::NoUnwind);
- else removeFnAttr(Attribute::NoUnwind);
+ void setDoesNotThrow() {
+ addFnAttr(Attributes::NoUnwind);
}
/// @brief True if the ABI mandates (or the user requested) that this
/// function be in a unwind table.
bool hasUWTable() const {
- return hasFnAttr(Attribute::UWTable);
+ return getFnAttributes().hasAttribute(Attributes::UWTable);
}
- void setHasUWTable(bool HasUWTable = true) {
- if (HasUWTable)
- addFnAttr(Attribute::UWTable);
- else
- removeFnAttr(Attribute::UWTable);
+ void setHasUWTable() {
+ addFnAttr(Attributes::UWTable);
}
/// @brief True if this function needs an unwind table.
@@ -267,27 +267,25 @@ public:
/// @brief Determine if the function returns a structure through first
/// pointer argument.
bool hasStructRetAttr() const {
- return paramHasAttr(1, Attribute::StructRet);
+ return getParamAttributes(1).hasAttribute(Attributes::StructRet);
}
/// @brief Determine if the parameter does not alias other parameters.
/// @param n The parameter to check. 1 is the first parameter, 0 is the return
bool doesNotAlias(unsigned n) const {
- return paramHasAttr(n, Attribute::NoAlias);
+ return getParamAttributes(n).hasAttribute(Attributes::NoAlias);
}
- void setDoesNotAlias(unsigned n, bool DoesNotAlias = true) {
- if (DoesNotAlias) addAttribute(n, Attribute::NoAlias);
- else removeAttribute(n, Attribute::NoAlias);
+ void setDoesNotAlias(unsigned n) {
+ addAttribute(n, Attributes::get(getContext(), Attributes::NoAlias));
}
/// @brief Determine if the parameter can be captured.
/// @param n The parameter to check. 1 is the first parameter, 0 is the return
bool doesNotCapture(unsigned n) const {
- return paramHasAttr(n, Attribute::NoCapture);
+ return getParamAttributes(n).hasAttribute(Attributes::NoCapture);
}
- void setDoesNotCapture(unsigned n, bool DoesNotCapture = true) {
- if (DoesNotCapture) addAttribute(n, Attribute::NoCapture);
- else removeAttribute(n, Attribute::NoCapture);
+ void setDoesNotCapture(unsigned n) {
+ addAttribute(n, Attributes::get(getContext(), Attributes::NoCapture));
}
/// copyAttributesFrom - copy all additional attributes (those not needed to
@@ -400,7 +398,6 @@ public:
void viewCFGOnly() const;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const Function *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() == Value::FunctionVal;
}
diff --git a/contrib/llvm/include/llvm/GlobalAlias.h b/contrib/llvm/include/llvm/GlobalAlias.h
index 164d976..d0f0147 100644
--- a/contrib/llvm/include/llvm/GlobalAlias.h
+++ b/contrib/llvm/include/llvm/GlobalAlias.h
@@ -28,8 +28,8 @@ template<typename ValueSubClass, typename ItemParentClass>
class GlobalAlias : public GlobalValue, public ilist_node<GlobalAlias> {
friend class SymbolTableListTraits<GlobalAlias, Module>;
- void operator=(const GlobalAlias &); // Do not implement
- GlobalAlias(const GlobalAlias &); // Do not implement
+ void operator=(const GlobalAlias &) LLVM_DELETED_FUNCTION;
+ GlobalAlias(const GlobalAlias &) LLVM_DELETED_FUNCTION;
void setParent(Module *parent);
@@ -76,7 +76,6 @@ public:
const GlobalValue *resolveAliasedGlobal(bool stopOnWeak = true) const;
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const GlobalAlias *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() == Value::GlobalAliasVal;
}
diff --git a/contrib/llvm/include/llvm/GlobalValue.h b/contrib/llvm/include/llvm/GlobalValue.h
index 8b969f3..7f7f74b 100644
--- a/contrib/llvm/include/llvm/GlobalValue.h
+++ b/contrib/llvm/include/llvm/GlobalValue.h
@@ -26,7 +26,7 @@ class PointerType;
class Module;
class GlobalValue : public Constant {
- GlobalValue(const GlobalValue &); // do not implement
+ GlobalValue(const GlobalValue &) LLVM_DELETED_FUNCTION;
public:
/// @brief An enumeration for the kinds of linkage for global values.
enum LinkageTypes {
@@ -34,6 +34,7 @@ public:
AvailableExternallyLinkage, ///< Available for inspection, not emission.
LinkOnceAnyLinkage, ///< Keep one copy of function when linking (inline)
LinkOnceODRLinkage, ///< Same, but only replaced by something equivalent.
+ LinkOnceODRAutoHideLinkage, ///< Like LinkOnceODRLinkage but addr not taken.
WeakAnyLinkage, ///< Keep one copy of named function when linking (weak)
WeakODRLinkage, ///< Same, but only replaced by something equivalent.
AppendingLinkage, ///< Special purpose, only applies to global arrays
@@ -41,8 +42,6 @@ public:
PrivateLinkage, ///< Like Internal, but omit from symbol table.
LinkerPrivateLinkage, ///< Like Private, but linker removes.
LinkerPrivateWeakLinkage, ///< Like LinkerPrivate, but weak.
- LinkerPrivateWeakDefAutoLinkage, ///< Like LinkerPrivateWeak, but possibly
- /// hidden.
DLLImportLinkage, ///< Function to be imported from DLL
DLLExportLinkage, ///< Function to be accessible from DLL.
ExternalWeakLinkage,///< ExternalWeak linkage description.
@@ -123,7 +122,12 @@ public:
return Linkage == AvailableExternallyLinkage;
}
static bool isLinkOnceLinkage(LinkageTypes Linkage) {
- return Linkage == LinkOnceAnyLinkage || Linkage == LinkOnceODRLinkage;
+ return Linkage == LinkOnceAnyLinkage ||
+ Linkage == LinkOnceODRLinkage ||
+ Linkage == LinkOnceODRAutoHideLinkage;
+ }
+ static bool isLinkOnceODRAutoHideLinkage(LinkageTypes Linkage) {
+ return Linkage == LinkOnceODRAutoHideLinkage;
}
static bool isWeakLinkage(LinkageTypes Linkage) {
return Linkage == WeakAnyLinkage || Linkage == WeakODRLinkage;
@@ -143,13 +147,9 @@ public:
static bool isLinkerPrivateWeakLinkage(LinkageTypes Linkage) {
return Linkage == LinkerPrivateWeakLinkage;
}
- static bool isLinkerPrivateWeakDefAutoLinkage(LinkageTypes Linkage) {
- return Linkage == LinkerPrivateWeakDefAutoLinkage;
- }
static bool isLocalLinkage(LinkageTypes Linkage) {
return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage) ||
- isLinkerPrivateLinkage(Linkage) || isLinkerPrivateWeakLinkage(Linkage) ||
- isLinkerPrivateWeakDefAutoLinkage(Linkage);
+ isLinkerPrivateLinkage(Linkage) || isLinkerPrivateWeakLinkage(Linkage);
}
static bool isDLLImportLinkage(LinkageTypes Linkage) {
return Linkage == DLLImportLinkage;
@@ -178,8 +178,7 @@ public:
Linkage == LinkOnceAnyLinkage ||
Linkage == CommonLinkage ||
Linkage == ExternalWeakLinkage ||
- Linkage == LinkerPrivateWeakLinkage ||
- Linkage == LinkerPrivateWeakDefAutoLinkage;
+ Linkage == LinkerPrivateWeakLinkage;
}
/// isWeakForLinker - Whether the definition of this global may be replaced at
@@ -192,10 +191,10 @@ public:
Linkage == WeakODRLinkage ||
Linkage == LinkOnceAnyLinkage ||
Linkage == LinkOnceODRLinkage ||
+ Linkage == LinkOnceODRAutoHideLinkage ||
Linkage == CommonLinkage ||
Linkage == ExternalWeakLinkage ||
- Linkage == LinkerPrivateWeakLinkage ||
- Linkage == LinkerPrivateWeakDefAutoLinkage;
+ Linkage == LinkerPrivateWeakLinkage;
}
bool hasExternalLinkage() const { return isExternalLinkage(Linkage); }
@@ -205,6 +204,9 @@ public:
bool hasLinkOnceLinkage() const {
return isLinkOnceLinkage(Linkage);
}
+ bool hasLinkOnceODRAutoHideLinkage() const {
+ return isLinkOnceODRAutoHideLinkage(Linkage);
+ }
bool hasWeakLinkage() const {
return isWeakLinkage(Linkage);
}
@@ -215,9 +217,6 @@ public:
bool hasLinkerPrivateWeakLinkage() const {
return isLinkerPrivateWeakLinkage(Linkage);
}
- bool hasLinkerPrivateWeakDefAutoLinkage() const {
- return isLinkerPrivateWeakDefAutoLinkage(Linkage);
- }
bool hasLocalLinkage() const { return isLocalLinkage(Linkage); }
bool hasDLLImportLinkage() const { return isDLLImportLinkage(Linkage); }
bool hasDLLExportLinkage() const { return isDLLExportLinkage(Linkage); }
@@ -288,7 +287,6 @@ public:
inline const Module *getParent() const { return Parent; }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const GlobalValue *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() == Value::FunctionVal ||
V->getValueID() == Value::GlobalVariableVal ||
diff --git a/contrib/llvm/include/llvm/GlobalVariable.h b/contrib/llvm/include/llvm/GlobalVariable.h
index 99b7a73..b9d3f68 100644
--- a/contrib/llvm/include/llvm/GlobalVariable.h
+++ b/contrib/llvm/include/llvm/GlobalVariable.h
@@ -34,9 +34,9 @@ template<typename ValueSubClass, typename ItemParentClass>
class GlobalVariable : public GlobalValue, public ilist_node<GlobalVariable> {
friend class SymbolTableListTraits<GlobalVariable, Module>;
- void *operator new(size_t, unsigned); // Do not implement
- void operator=(const GlobalVariable &); // Do not implement
- GlobalVariable(const GlobalVariable &); // Do not implement
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
+ void operator=(const GlobalVariable &) LLVM_DELETED_FUNCTION;
+ GlobalVariable(const GlobalVariable &) LLVM_DELETED_FUNCTION;
void setParent(Module *parent);
@@ -174,7 +174,6 @@ public:
virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const GlobalVariable *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() == Value::GlobalVariableVal;
}
diff --git a/contrib/llvm/include/llvm/IRBuilder.h b/contrib/llvm/include/llvm/IRBuilder.h
index d5b6f47..f63a160 100644
--- a/contrib/llvm/include/llvm/IRBuilder.h
+++ b/contrib/llvm/include/llvm/IRBuilder.h
@@ -17,6 +17,7 @@
#include "llvm/Instructions.h"
#include "llvm/BasicBlock.h"
+#include "llvm/DataLayout.h"
#include "llvm/LLVMContext.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
@@ -266,6 +267,10 @@ public:
return Type::getInt8PtrTy(Context, AddrSpace);
}
+ IntegerType* getIntPtrTy(DataLayout *DL, unsigned AddrSpace = 0) {
+ return DL->getIntPtrType(Context, AddrSpace);
+ }
+
//===--------------------------------------------------------------------===//
// Intrinsic creation methods
//===--------------------------------------------------------------------===//
@@ -285,12 +290,15 @@ public:
/// If the pointers aren't i8*, they will be converted. If a TBAA tag is
/// specified, it will be added to the instruction.
CallInst *CreateMemCpy(Value *Dst, Value *Src, uint64_t Size, unsigned Align,
- bool isVolatile = false, MDNode *TBAATag = 0) {
- return CreateMemCpy(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag);
+ bool isVolatile = false, MDNode *TBAATag = 0,
+ MDNode *TBAAStructTag = 0) {
+ return CreateMemCpy(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag,
+ TBAAStructTag);
}
CallInst *CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align,
- bool isVolatile = false, MDNode *TBAATag = 0);
+ bool isVolatile = false, MDNode *TBAATag = 0,
+ MDNode *TBAAStructTag = 0);
/// CreateMemMove - Create and insert a memmove between the specified
/// pointers. If the pointers aren't i8*, they will be converted. If a TBAA
@@ -810,6 +818,31 @@ public:
StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
return Insert(new StoreInst(Val, Ptr, isVolatile));
}
+ // Provided to resolve 'CreateAlignedLoad(Ptr, Align, "...")' correctly,
+ // instead of converting the string to 'bool' for the isVolatile parameter.
+ LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) {
+ LoadInst *LI = CreateLoad(Ptr, Name);
+ LI->setAlignment(Align);
+ return LI;
+ }
+ LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align,
+ const Twine &Name = "") {
+ LoadInst *LI = CreateLoad(Ptr, Name);
+ LI->setAlignment(Align);
+ return LI;
+ }
+ LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile,
+ const Twine &Name = "") {
+ LoadInst *LI = CreateLoad(Ptr, isVolatile, Name);
+ LI->setAlignment(Align);
+ return LI;
+ }
+ StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align,
+ bool isVolatile = false) {
+ StoreInst *SI = CreateStore(Val, Ptr, isVolatile);
+ SI->setAlignment(Align);
+ return SI;
+ }
FenceInst *CreateFence(AtomicOrdering Ordering,
SynchronizationScope SynchScope = CrossThread) {
return Insert(new FenceInst(Context, Ordering, SynchScope));
@@ -970,6 +1003,30 @@ public:
Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
return CreateCast(Instruction::SExt, V, DestTy, Name);
}
+ /// CreateZExtOrTrunc - Create a ZExt or Trunc from the integer value V to
+ /// DestTy. Return the value untouched if the type of V is already DestTy.
+ Value *CreateZExtOrTrunc(Value *V, IntegerType *DestTy,
+ const Twine &Name = "") {
+ assert(isa<IntegerType>(V->getType()) && "Can only zero extend integers!");
+ IntegerType *IntTy = cast<IntegerType>(V->getType());
+ if (IntTy->getBitWidth() < DestTy->getBitWidth())
+ return CreateZExt(V, DestTy, Name);
+ if (IntTy->getBitWidth() > DestTy->getBitWidth())
+ return CreateTrunc(V, DestTy, Name);
+ return V;
+ }
+ /// CreateSExtOrTrunc - Create a SExt or Trunc from the integer value V to
+ /// DestTy. Return the value untouched if the type of V is already DestTy.
+ Value *CreateSExtOrTrunc(Value *V, IntegerType *DestTy,
+ const Twine &Name = "") {
+ assert(isa<IntegerType>(V->getType()) && "Can only sign extend integers!");
+ IntegerType *IntTy = cast<IntegerType>(V->getType());
+ if (IntTy->getBitWidth() < DestTy->getBitWidth())
+ return CreateSExt(V, DestTy, Name);
+ if (IntTy->getBitWidth() > DestTy->getBitWidth())
+ return CreateTrunc(V, DestTy, Name);
+ return V;
+ }
Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = ""){
return CreateCast(Instruction::FPToUI, V, DestTy, Name);
}
@@ -1052,7 +1109,7 @@ public:
private:
// Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a compile time
// error, instead of converting the string to bool for the isSigned parameter.
- Value *CreateIntCast(Value *, Type *, const char *); // DO NOT IMPLEMENT
+ Value *CreateIntCast(Value *, Type *, const char *) LLVM_DELETED_FUNCTION;
public:
Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
if (V->getType() == DestTy)
@@ -1261,13 +1318,13 @@ public:
// Utility creation methods
//===--------------------------------------------------------------------===//
- /// CreateIsNull - Return an i1 value testing if \arg Arg is null.
+ /// CreateIsNull - Return an i1 value testing if \p Arg is null.
Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
Name);
}
- /// CreateIsNotNull - Return an i1 value testing if \arg Arg is not null.
+ /// CreateIsNotNull - Return an i1 value testing if \p Arg is not null.
Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
Name);
diff --git a/contrib/llvm/include/llvm/InitializePasses.h b/contrib/llvm/include/llvm/InitializePasses.h
index de97957..8c164eb 100644
--- a/contrib/llvm/include/llvm/InitializePasses.h
+++ b/contrib/llvm/include/llvm/InitializePasses.h
@@ -66,6 +66,7 @@ void initializeAliasDebuggerPass(PassRegistry&);
void initializeAliasSetPrinterPass(PassRegistry&);
void initializeAlwaysInlinerPass(PassRegistry&);
void initializeArgPromotionPass(PassRegistry&);
+void initializeBarrierNoopPass(PassRegistry&);
void initializeBasicAliasAnalysisPass(PassRegistry&);
void initializeBasicCallGraphPass(PassRegistry&);
void initializeBlockExtractorPassPass(PassRegistry&);
@@ -87,6 +88,7 @@ void initializeCodePlacementOptPass(PassRegistry&);
void initializeConstantMergePass(PassRegistry&);
void initializeConstantPropagationPass(PassRegistry&);
void initializeMachineCopyPropagationPass(PassRegistry&);
+void initializeCostModelAnalysisPass(PassRegistry&);
void initializeCorrelatedValuePropagationPass(PassRegistry&);
void initializeDAEPass(PassRegistry&);
void initializeDAHPass(PassRegistry&);
@@ -94,6 +96,7 @@ void initializeDCEPass(PassRegistry&);
void initializeDSEPass(PassRegistry&);
void initializeDeadInstEliminationPass(PassRegistry&);
void initializeDeadMachineInstructionElimPass(PassRegistry&);
+void initializeDependenceAnalysisPass(PassRegistry&);
void initializeDomOnlyPrinterPass(PassRegistry&);
void initializeDomOnlyViewerPass(PassRegistry&);
void initializeDomPrinterPass(PassRegistry&);
@@ -141,10 +144,10 @@ void initializeLiveRegMatrixPass(PassRegistry&);
void initializeLiveStacksPass(PassRegistry&);
void initializeLiveVariablesPass(PassRegistry&);
void initializeLoaderPassPass(PassRegistry&);
+void initializeProfileMetadataLoaderPassPass(PassRegistry&);
void initializePathProfileLoaderPassPass(PassRegistry&);
void initializeLocalStackSlotPassPass(PassRegistry&);
void initializeLoopDeletionPass(PassRegistry&);
-void initializeLoopDependenceAnalysisPass(PassRegistry&);
void initializeLoopExtractorPass(PassRegistry&);
void initializeLoopInfoPass(PassRegistry&);
void initializeLoopInstSimplifyPass(PassRegistry&);
@@ -166,6 +169,7 @@ void initializeMachineBlockPlacementStatsPass(PassRegistry&);
void initializeMachineBranchProbabilityInfoPass(PassRegistry&);
void initializeMachineCSEPass(PassRegistry&);
void initializeMachineDominatorTreePass(PassRegistry&);
+void initializeMachinePostDominatorTreePass(PassRegistry&);
void initializeMachineLICMPass(PassRegistry&);
void initializeMachineLoopInfoPass(PassRegistry&);
void initializeMachineLoopRangesPass(PassRegistry&);
@@ -177,6 +181,7 @@ void initializeMachineVerifierPassPass(PassRegistry&);
void initializeMemCpyOptPass(PassRegistry&);
void initializeMemDepPrinterPass(PassRegistry&);
void initializeMemoryDependenceAnalysisPass(PassRegistry&);
+void initializeMetaRenamerPass(PassRegistry&);
void initializeMergeFunctionsPass(PassRegistry&);
void initializeModuleDebugInfoPrinterPass(PassRegistry&);
void initializeNoAAPass(PassRegistry&);
@@ -219,6 +224,7 @@ void initializeRegionOnlyViewerPass(PassRegistry&);
void initializeRegionPrinterPass(PassRegistry&);
void initializeRegionViewerPass(PassRegistry&);
void initializeSCCPPass(PassRegistry&);
+void initializeSROAPass(PassRegistry&);
void initializeSROA_DTPass(PassRegistry&);
void initializeSROA_SSAUpPass(PassRegistry&);
void initializeScalarEvolutionAliasAnalysisPass(PassRegistry&);
@@ -231,6 +237,7 @@ void initializeSinkingPass(PassRegistry&);
void initializeSlotIndexesPass(PassRegistry&);
void initializeSpillPlacementPass(PassRegistry&);
void initializeStackProtectorPass(PassRegistry&);
+void initializeStackColoringPass(PassRegistry&);
void initializeStackSlotColoringPass(PassRegistry&);
void initializeStripDeadDebugInfoPass(PassRegistry&);
void initializeStripDeadPrototypesPassPass(PassRegistry&);
@@ -241,7 +248,8 @@ void initializeStrongPHIEliminationPass(PassRegistry&);
void initializeTailCallElimPass(PassRegistry&);
void initializeTailDuplicatePassPass(PassRegistry&);
void initializeTargetPassConfigPass(PassRegistry&);
-void initializeTargetDataPass(PassRegistry&);
+void initializeDataLayoutPass(PassRegistry&);
+void initializeTargetTransformInfoPass(PassRegistry&);
void initializeTargetLibraryInfoPass(PassRegistry&);
void initializeTwoAddressInstructionPassPass(PassRegistry&);
void initializeTypeBasedAliasAnalysisPass(PassRegistry&);
@@ -254,6 +262,7 @@ void initializeVirtRegRewriterPass(PassRegistry&);
void initializeInstSimplifierPass(PassRegistry&);
void initializeUnpackMachineBundlesPass(PassRegistry&);
void initializeFinalizeMachineBundlesPass(PassRegistry&);
+void initializeLoopVectorizePass(PassRegistry&);
void initializeBBVectorizePass(PassRegistry&);
void initializeMachineFunctionPrinterPassPass(PassRegistry&);
}
diff --git a/contrib/llvm/include/llvm/InlineAsm.h b/contrib/llvm/include/llvm/InlineAsm.h
index 37aa18b..b5e0fd4 100644
--- a/contrib/llvm/include/llvm/InlineAsm.h
+++ b/contrib/llvm/include/llvm/InlineAsm.h
@@ -33,20 +33,28 @@ template<class ConstantClass, class TypeClass, class ValType>
struct ConstantCreator;
class InlineAsm : public Value {
+public:
+ enum AsmDialect {
+ AD_ATT,
+ AD_Intel
+ };
+
+private:
friend struct ConstantCreator<InlineAsm, PointerType, InlineAsmKeyType>;
friend class ConstantUniqueMap<InlineAsmKeyType, const InlineAsmKeyType&,
PointerType, InlineAsm, false>;
- InlineAsm(const InlineAsm &); // do not implement
- void operator=(const InlineAsm&); // do not implement
+ InlineAsm(const InlineAsm &) LLVM_DELETED_FUNCTION;
+ void operator=(const InlineAsm&) LLVM_DELETED_FUNCTION;
std::string AsmString, Constraints;
bool HasSideEffects;
bool IsAlignStack;
-
+ AsmDialect Dialect;
+
InlineAsm(PointerType *Ty, const std::string &AsmString,
const std::string &Constraints, bool hasSideEffects,
- bool isAlignStack);
+ bool isAlignStack, AsmDialect asmDialect);
virtual ~InlineAsm();
/// When the ConstantUniqueMap merges two types and makes two InlineAsms
@@ -58,11 +66,13 @@ public:
///
static InlineAsm *get(FunctionType *Ty, StringRef AsmString,
StringRef Constraints, bool hasSideEffects,
- bool isAlignStack = false);
+ bool isAlignStack = false,
+ AsmDialect asmDialect = AD_ATT);
bool hasSideEffects() const { return HasSideEffects; }
bool isAlignStack() const { return IsAlignStack; }
-
+ AsmDialect getDialect() const { return Dialect; }
+
/// getType - InlineAsm's are always pointers.
///
PointerType *getType() const {
@@ -179,7 +189,6 @@ public:
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const InlineAsm *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() == Value::InlineAsmVal;
}
@@ -193,17 +202,20 @@ public:
Op_InputChain = 0,
Op_AsmString = 1,
Op_MDNode = 2,
- Op_ExtraInfo = 3, // HasSideEffects, IsAlignStack
+ Op_ExtraInfo = 3, // HasSideEffects, IsAlignStack, AsmDialect.
Op_FirstOperand = 4,
// Fixed operands on an INLINEASM MachineInstr.
MIOp_AsmString = 0,
- MIOp_ExtraInfo = 1, // HasSideEffects, IsAlignStack
+ MIOp_ExtraInfo = 1, // HasSideEffects, IsAlignStack, AsmDialect.
MIOp_FirstOperand = 2,
// Interpretation of the MIOp_ExtraInfo bit field.
Extra_HasSideEffects = 1,
Extra_IsAlignStack = 2,
+ Extra_AsmDialect = 4,
+ Extra_MayLoad = 8,
+ Extra_MayStore = 16,
// Inline asm operands map to multiple SDNode / MachineInstr operands.
// The first operand is an immediate describing the asm operand, the low
diff --git a/contrib/llvm/include/llvm/InstrTypes.h b/contrib/llvm/include/llvm/InstrTypes.h
index 2529f24..da17f3b 100644
--- a/contrib/llvm/include/llvm/InstrTypes.h
+++ b/contrib/llvm/include/llvm/InstrTypes.h
@@ -73,7 +73,6 @@ public:
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const TerminatorInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->isTerminator();
}
@@ -88,7 +87,7 @@ public:
//===----------------------------------------------------------------------===//
class UnaryInstruction : public Instruction {
- void *operator new(size_t, unsigned); // Do not implement
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
protected:
UnaryInstruction(Type *Ty, unsigned iType, Value *V,
@@ -113,7 +112,6 @@ public:
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const UnaryInstruction *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Alloca ||
I->getOpcode() == Instruction::Load ||
@@ -138,14 +136,14 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)
//===----------------------------------------------------------------------===//
class BinaryOperator : public Instruction {
- void *operator new(size_t, unsigned); // Do not implement
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
protected:
void init(BinaryOps iType);
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
const Twine &Name, Instruction *InsertBefore);
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
const Twine &Name, BasicBlock *InsertAtEnd);
- virtual BinaryOperator *clone_impl() const;
+ virtual BinaryOperator *clone_impl() const LLVM_OVERRIDE;
public:
// allocate space for exactly two operands
void *operator new(size_t s) {
@@ -361,7 +359,6 @@ public:
bool isExact() const;
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const BinaryOperator *) { return true; }
static inline bool classof(const Instruction *I) {
return I->isBinaryOp();
}
@@ -388,7 +385,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)
/// if (isa<CastInst>(Instr)) { ... }
/// @brief Base class of casting instructions.
class CastInst : public UnaryInstruction {
- virtual void anchor();
+ virtual void anchor() LLVM_OVERRIDE;
protected:
/// @brief Constructor with insert-before-instruction semantics for subclasses
CastInst(Type *Ty, unsigned iType, Value *S,
@@ -563,7 +560,7 @@ public:
/// IntPtrTy argument is used to make accurate determinations for casts
/// involving Integer and Pointer types. They are no-op casts if the integer
/// is the same size as the pointer. However, pointer size varies with
- /// platform. Generally, the result of TargetData::getIntPtrType() should be
+ /// platform. Generally, the result of DataLayout::getIntPtrType() should be
/// passed in. If that's not available, use Type::Int64Ty, which will make
/// the isNoopCast call conservative.
/// @brief Determine if the described cast is a no-op cast.
@@ -581,8 +578,8 @@ public:
/// Determine how a pair of casts can be eliminated, if they can be at all.
/// This is a helper function for both CastInst and ConstantExpr.
- /// @returns 0 if the CastInst pair can't be eliminated
- /// @returns Instruction::CastOps value for a cast that can replace
+ /// @returns 0 if the CastInst pair can't be eliminated, otherwise
+ /// returns Instruction::CastOps value for a cast that can replace
/// the pair, casting SrcTy to DstTy.
/// @brief Determine if a cast pair is eliminable
static unsigned isEliminableCastPair(
@@ -591,7 +588,9 @@ public:
Type *SrcTy, ///< SrcTy of 1st cast
Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
Type *DstTy, ///< DstTy of 2nd cast
- Type *IntPtrTy ///< Integer type corresponding to Ptr types, or null
+ Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null
+ Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null
+ Type *DstIntPtrTy ///< Integer type corresponding to Ptr DstTy, or null
);
/// @brief Return the opcode of this CastInst
@@ -611,7 +610,6 @@ public:
static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy);
/// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const CastInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->isCast();
}
@@ -627,8 +625,8 @@ public:
/// This class is the base class for the comparison instructions.
/// @brief Abstract base class of comparison instructions.
class CmpInst : public Instruction {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
- CmpInst(); // do not implement
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
+ CmpInst() LLVM_DELETED_FUNCTION;
protected:
CmpInst(Type *ty, Instruction::OtherOps op, unsigned short pred,
Value *LHS, Value *RHS, const Twine &Name = "",
@@ -638,7 +636,7 @@ protected:
Value *LHS, Value *RHS, const Twine &Name,
BasicBlock *InsertAtEnd);
- virtual void Anchor() const; // Out of line virtual method.
+ virtual void anchor() LLVM_OVERRIDE; // Out of line virtual method.
public:
/// This enumeration lists the possible predicates for CmpInst subclasses.
/// Values in the range 0-31 are reserved for FCmpInst, while values in the
@@ -816,7 +814,6 @@ public:
static bool isFalseWhenEqual(unsigned short predicate);
/// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const CmpInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::ICmp ||
I->getOpcode() == Instruction::FCmp;
diff --git a/contrib/llvm/include/llvm/Instruction.h b/contrib/llvm/include/llvm/Instruction.h
index 5512dcc..8aa8a56 100644
--- a/contrib/llvm/include/llvm/Instruction.h
+++ b/contrib/llvm/include/llvm/Instruction.h
@@ -28,8 +28,8 @@ template<typename ValueSubClass, typename ItemParentClass>
class SymbolTableListTraits;
class Instruction : public User, public ilist_node<Instruction> {
- void operator=(const Instruction &); // Do not implement
- Instruction(const Instruction &); // Do not implement
+ void operator=(const Instruction &) LLVM_DELETED_FUNCTION;
+ Instruction(const Instruction &) LLVM_DELETED_FUNCTION;
BasicBlock *Parent;
DebugLoc DbgLoc; // 'dbg' Metadata cache.
@@ -310,7 +310,6 @@ public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const Instruction *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() >= Value::InstructionVal;
}
diff --git a/contrib/llvm/include/llvm/Instructions.h b/contrib/llvm/include/llvm/Instructions.h
index f5187e6..69593b4 100644
--- a/contrib/llvm/include/llvm/Instructions.h
+++ b/contrib/llvm/include/llvm/Instructions.h
@@ -112,7 +112,6 @@ public:
bool isStaticAlloca() const;
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const AllocaInst *) { return true; }
static inline bool classof(const Instruction *I) {
return (I->getOpcode() == Instruction::Alloca);
}
@@ -226,13 +225,13 @@ public:
const Value *getPointerOperand() const { return getOperand(0); }
static unsigned getPointerOperandIndex() { return 0U; }
+ /// \brief Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
- return cast<PointerType>(getPointerOperand()->getType())->getAddressSpace();
+ return getPointerOperand()->getType()->getPointerAddressSpace();
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const LoadInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Load;
}
@@ -255,7 +254,7 @@ private:
/// StoreInst - an instruction for storing to memory
///
class StoreInst : public Instruction {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
void AssertOK();
protected:
virtual StoreInst *clone_impl() const;
@@ -349,12 +348,12 @@ public:
const Value *getPointerOperand() const { return getOperand(1); }
static unsigned getPointerOperandIndex() { return 1U; }
+ /// \brief Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
- return cast<PointerType>(getPointerOperand()->getType())->getAddressSpace();
+ return getPointerOperand()->getType()->getPointerAddressSpace();
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const StoreInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Store;
}
@@ -382,7 +381,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)
/// FenceInst - an instruction for ordering other memory operations
///
class FenceInst : public Instruction {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
void Init(AtomicOrdering Ordering, SynchronizationScope SynchScope);
protected:
virtual FenceInst *clone_impl() const;
@@ -426,7 +425,6 @@ public:
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const FenceInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Fence;
}
@@ -450,7 +448,7 @@ private:
/// there. Returns the value that was loaded.
///
class AtomicCmpXchgInst : public Instruction {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
void Init(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering Ordering, SynchronizationScope SynchScope);
protected:
@@ -521,12 +519,12 @@ public:
Value *getNewValOperand() { return getOperand(2); }
const Value *getNewValOperand() const { return getOperand(2); }
+ /// \brief Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
- return cast<PointerType>(getPointerOperand()->getType())->getAddressSpace();
+ return getPointerOperand()->getType()->getPointerAddressSpace();
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const AtomicCmpXchgInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::AtomicCmpXchg;
}
@@ -557,7 +555,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)
/// the old value.
///
class AtomicRMWInst : public Instruction {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
protected:
virtual AtomicRMWInst *clone_impl() const;
public:
@@ -665,12 +663,12 @@ public:
Value *getValOperand() { return getOperand(1); }
const Value *getValOperand() const { return getOperand(1); }
+ /// \brief Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
- return cast<PointerType>(getPointerOperand()->getType())->getAddressSpace();
+ return getPointerOperand()->getType()->getPointerAddressSpace();
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const AtomicRMWInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::AtomicRMW;
}
@@ -768,6 +766,13 @@ public:
return reinterpret_cast<PointerType*>(Instruction::getType());
}
+ /// \brief Returns the address space of this instruction's pointer type.
+ unsigned getAddressSpace() const {
+ // Note that this is always the same as the pointer operand's address space
+ // and that is cheaper to compute, so cheat here.
+ return getPointerAddressSpace();
+ }
+
/// getIndexedType - Returns the type of the element that would be loaded with
/// a load instruction with the specified parameters.
///
@@ -778,10 +783,6 @@ public:
static Type *getIndexedType(Type *Ptr, ArrayRef<Constant *> IdxList);
static Type *getIndexedType(Type *Ptr, ArrayRef<uint64_t> IdxList);
- /// getIndexedType - Returns the address space used by the GEP pointer.
- ///
- static unsigned getAddressSpace(Value *Ptr);
-
inline op_iterator idx_begin() { return op_begin()+1; }
inline const_op_iterator idx_begin() const { return op_begin()+1; }
inline op_iterator idx_end() { return op_end(); }
@@ -797,22 +798,23 @@ public:
return 0U; // get index for modifying correct operand.
}
- unsigned getPointerAddressSpace() const {
- return cast<PointerType>(getType())->getAddressSpace();
- }
-
/// getPointerOperandType - Method to return the pointer operand as a
/// PointerType.
Type *getPointerOperandType() const {
return getPointerOperand()->getType();
}
+ /// \brief Returns the address space of the pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return getPointerOperandType()->getPointerAddressSpace();
+ }
+
/// GetGEPReturnType - Returns the pointer type returned by the GEP
/// instruction, which may be a vector of pointers.
static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
Type *PtrTy = PointerType::get(checkGEPType(
getIndexedType(Ptr->getType(), IdxList)),
- getAddressSpace(Ptr));
+ Ptr->getType()->getPointerAddressSpace());
// Vector GEP
if (Ptr->getType()->isVectorTy()) {
unsigned NumElem = cast<VectorType>(Ptr->getType())->getNumElements();
@@ -849,7 +851,6 @@ public:
bool isInBounds() const;
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const GetElementPtrInst *) { return true; }
static inline bool classof(const Instruction *I) {
return (I->getOpcode() == Instruction::GetElementPtr);
}
@@ -897,13 +898,13 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
/// This instruction compares its operands according to the predicate given
/// to the constructor. It only operates on integers or pointers. The operands
/// must be identical types.
-/// @brief Represent an integer comparison operator.
+/// \brief Represent an integer comparison operator.
class ICmpInst: public CmpInst {
protected:
- /// @brief Clone an identical ICmpInst
+ /// \brief Clone an identical ICmpInst
virtual ICmpInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics.
+ /// \brief Constructor with insert-before-instruction semantics.
ICmpInst(
Instruction *InsertBefore, ///< Where to insert
Predicate pred, ///< The predicate to use for the comparison
@@ -924,7 +925,7 @@ public:
"Invalid operand types for ICmp instruction");
}
- /// @brief Constructor with insert-at-end semantics.
+ /// \brief Constructor with insert-at-end semantics.
ICmpInst(
BasicBlock &InsertAtEnd, ///< Block to insert into.
Predicate pred, ///< The predicate to use for the comparison
@@ -945,7 +946,7 @@ public:
"Invalid operand types for ICmp instruction");
}
- /// @brief Constructor with no-insertion semantics
+ /// \brief Constructor with no-insertion semantics
ICmpInst(
Predicate pred, ///< The predicate to use for the comparison
Value *LHS, ///< The left-hand-side of the expression
@@ -967,25 +968,25 @@ public:
/// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
/// @returns the predicate that would be the result if the operand were
/// regarded as signed.
- /// @brief Return the signed version of the predicate
+ /// \brief Return the signed version of the predicate
Predicate getSignedPredicate() const {
return getSignedPredicate(getPredicate());
}
/// This is a static version that you can use without an instruction.
- /// @brief Return the signed version of the predicate.
+ /// \brief Return the signed version of the predicate.
static Predicate getSignedPredicate(Predicate pred);
/// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
/// @returns the predicate that would be the result if the operand were
/// regarded as unsigned.
- /// @brief Return the unsigned version of the predicate
+ /// \brief Return the unsigned version of the predicate
Predicate getUnsignedPredicate() const {
return getUnsignedPredicate(getPredicate());
}
/// This is a static version that you can use without an instruction.
- /// @brief Return the unsigned version of the predicate.
+ /// \brief Return the unsigned version of the predicate.
static Predicate getUnsignedPredicate(Predicate pred);
/// isEquality - Return true if this predicate is either EQ or NE. This also
@@ -1001,7 +1002,7 @@ public:
}
/// @returns true if the predicate of this ICmpInst is commutative
- /// @brief Determine if this relation is commutative.
+ /// \brief Determine if this relation is commutative.
bool isCommutative() const { return isEquality(); }
/// isRelational - Return true if the predicate is relational (not EQ or NE).
@@ -1017,21 +1018,20 @@ public:
}
/// Initialize a set of values that all satisfy the predicate with C.
- /// @brief Make a ConstantRange for a relation with a constant value.
+ /// \brief Make a ConstantRange for a relation with a constant value.
static ConstantRange makeConstantRange(Predicate pred, const APInt &C);
/// Exchange the two operands to this instruction in such a way that it does
/// not modify the semantics of the instruction. The predicate value may be
/// changed to retain the same result if the predicate is order dependent
/// (e.g. ult).
- /// @brief Swap operands and adjust predicate.
+ /// \brief Swap operands and adjust predicate.
void swapOperands() {
setPredicate(getSwappedPredicate());
Op<0>().swap(Op<1>());
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ICmpInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::ICmp;
}
@@ -1048,13 +1048,13 @@ public:
/// This instruction compares its operands according to the predicate given
/// to the constructor. It only operates on floating point values or packed
/// vectors of floating point values. The operands must be identical types.
-/// @brief Represents a floating point comparison operator.
+/// \brief Represents a floating point comparison operator.
class FCmpInst: public CmpInst {
protected:
- /// @brief Clone an identical FCmpInst
+ /// \brief Clone an identical FCmpInst
virtual FCmpInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics.
+ /// \brief Constructor with insert-before-instruction semantics.
FCmpInst(
Instruction *InsertBefore, ///< Where to insert
Predicate pred, ///< The predicate to use for the comparison
@@ -1073,7 +1073,7 @@ public:
"Invalid operand types for FCmp instruction");
}
- /// @brief Constructor with insert-at-end semantics.
+ /// \brief Constructor with insert-at-end semantics.
FCmpInst(
BasicBlock &InsertAtEnd, ///< Block to insert into.
Predicate pred, ///< The predicate to use for the comparison
@@ -1092,7 +1092,7 @@ public:
"Invalid operand types for FCmp instruction");
}
- /// @brief Constructor with no-insertion semantics
+ /// \brief Constructor with no-insertion semantics
FCmpInst(
Predicate pred, ///< The predicate to use for the comparison
Value *LHS, ///< The left-hand-side of the expression
@@ -1110,14 +1110,14 @@ public:
}
/// @returns true if the predicate of this instruction is EQ or NE.
- /// @brief Determine if this is an equality predicate.
+ /// \brief Determine if this is an equality predicate.
bool isEquality() const {
return getPredicate() == FCMP_OEQ || getPredicate() == FCMP_ONE ||
getPredicate() == FCMP_UEQ || getPredicate() == FCMP_UNE;
}
/// @returns true if the predicate of this instruction is commutative.
- /// @brief Determine if this is a commutative predicate.
+ /// \brief Determine if this is a commutative predicate.
bool isCommutative() const {
return isEquality() ||
getPredicate() == FCMP_FALSE ||
@@ -1127,21 +1127,20 @@ public:
}
/// @returns true if the predicate is relational (not EQ or NE).
- /// @brief Determine if this a relational predicate.
+ /// \brief Determine if this a relational predicate.
bool isRelational() const { return !isEquality(); }
/// Exchange the two operands to this instruction in such a way that it does
/// not modify the semantics of the instruction. The predicate value may be
/// changed to retain the same result if the predicate is order dependent
/// (e.g. ult).
- /// @brief Swap operands and adjust predicate.
+ /// \brief Swap operands and adjust predicate.
void swapOperands() {
setPredicate(getSwappedPredicate());
Op<0>().swap(Op<1>());
}
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const FCmpInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::FCmp;
}
@@ -1163,12 +1162,12 @@ class CallInst : public Instruction {
void init(Value *Func, const Twine &NameStr);
/// Construct a CallInst given a range of arguments.
- /// @brief Construct a CallInst from a range of arguments
+ /// \brief Construct a CallInst from a range of arguments
inline CallInst(Value *Func, ArrayRef<Value *> Args,
const Twine &NameStr, Instruction *InsertBefore);
/// Construct a CallInst given a range of arguments.
- /// @brief Construct a CallInst from a range of arguments
+ /// \brief Construct a CallInst from a range of arguments
inline CallInst(Value *Func, ArrayRef<Value *> Args,
const Twine &NameStr, BasicBlock *InsertAtEnd);
@@ -1267,77 +1266,78 @@ public:
/// removeAttribute - removes the attribute from the list of attributes.
void removeAttribute(unsigned i, Attributes attr);
- /// \brief Return true if this call has the given attribute.
- bool hasFnAttr(Attributes N) const {
- return paramHasAttr(~0, N);
- }
+ /// \brief Determine whether this call has the given attribute.
+ bool hasFnAttr(Attributes::AttrVal A) const;
- /// @brief Determine whether the call or the callee has the given attribute.
- bool paramHasAttr(unsigned i, Attributes attr) const;
+ /// \brief Determine whether the call or the callee has the given attributes.
+ bool paramHasAttr(unsigned i, Attributes::AttrVal A) const;
- /// @brief Extract the alignment for a call or parameter (0=unknown).
+ /// \brief Extract the alignment for a call or parameter (0=unknown).
unsigned getParamAlignment(unsigned i) const {
return AttributeList.getParamAlignment(i);
}
- /// @brief Return true if the call should not be inlined.
- bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
- void setIsNoInline(bool Value = true) {
- if (Value) addAttribute(~0, Attribute::NoInline);
- else removeAttribute(~0, Attribute::NoInline);
+ /// \brief Return true if the call should not be inlined.
+ bool isNoInline() const { return hasFnAttr(Attributes::NoInline); }
+ void setIsNoInline() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::NoInline));
}
- /// @brief Return true if the call can return twice
+ /// \brief Return true if the call can return twice
bool canReturnTwice() const {
- return hasFnAttr(Attribute::ReturnsTwice);
+ return hasFnAttr(Attributes::ReturnsTwice);
}
- void setCanReturnTwice(bool Value = true) {
- if (Value) addAttribute(~0, Attribute::ReturnsTwice);
- else removeAttribute(~0, Attribute::ReturnsTwice);
+ void setCanReturnTwice() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::ReturnsTwice));
}
- /// @brief Determine if the call does not access memory.
+ /// \brief Determine if the call does not access memory.
bool doesNotAccessMemory() const {
- return hasFnAttr(Attribute::ReadNone);
+ return hasFnAttr(Attributes::ReadNone);
}
- void setDoesNotAccessMemory(bool NotAccessMemory = true) {
- if (NotAccessMemory) addAttribute(~0, Attribute::ReadNone);
- else removeAttribute(~0, Attribute::ReadNone);
+ void setDoesNotAccessMemory() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::ReadNone));
}
- /// @brief Determine if the call does not access or only reads memory.
+ /// \brief Determine if the call does not access or only reads memory.
bool onlyReadsMemory() const {
- return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
+ return doesNotAccessMemory() || hasFnAttr(Attributes::ReadOnly);
}
- void setOnlyReadsMemory(bool OnlyReadsMemory = true) {
- if (OnlyReadsMemory) addAttribute(~0, Attribute::ReadOnly);
- else removeAttribute(~0, Attribute::ReadOnly | Attribute::ReadNone);
+ void setOnlyReadsMemory() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::ReadOnly));
}
- /// @brief Determine if the call cannot return.
- bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
- void setDoesNotReturn(bool DoesNotReturn = true) {
- if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn);
- else removeAttribute(~0, Attribute::NoReturn);
+ /// \brief Determine if the call cannot return.
+ bool doesNotReturn() const { return hasFnAttr(Attributes::NoReturn); }
+ void setDoesNotReturn() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::NoReturn));
}
- /// @brief Determine if the call cannot unwind.
- bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
- void setDoesNotThrow(bool DoesNotThrow = true) {
- if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind);
- else removeAttribute(~0, Attribute::NoUnwind);
+ /// \brief Determine if the call cannot unwind.
+ bool doesNotThrow() const { return hasFnAttr(Attributes::NoUnwind); }
+ void setDoesNotThrow() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::NoUnwind));
}
- /// @brief Determine if the call returns a structure through first
+ /// \brief Determine if the call returns a structure through first
/// pointer argument.
bool hasStructRetAttr() const {
// Be friendly and also check the callee.
- return paramHasAttr(1, Attribute::StructRet);
+ return paramHasAttr(1, Attributes::StructRet);
}
- /// @brief Determine if any call argument is an aggregate passed by value.
+ /// \brief Determine if any call argument is an aggregate passed by value.
bool hasByValArgument() const {
- return AttributeList.hasAttrSomewhere(Attribute::ByVal);
+ for (unsigned I = 0, E = AttributeList.getNumAttrs(); I != E; ++I)
+ if (AttributeList.getAttributesAtIndex(I).hasAttribute(Attributes::ByVal))
+ return true;
+ return false;
}
/// getCalledFunction - Return the function called, or null if this is an
@@ -1363,7 +1363,6 @@ public:
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const CallInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Call;
}
@@ -1469,7 +1468,6 @@ public:
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SelectInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Select;
}
@@ -1512,7 +1510,6 @@ public:
static unsigned getPointerOperandIndex() { return 0U; }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const VAArgInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == VAArg;
}
@@ -1566,7 +1563,6 @@ public:
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ExtractElementInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::ExtractElement;
}
@@ -1625,7 +1621,6 @@ public:
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const InsertElementInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::InsertElement;
}
@@ -1706,7 +1701,6 @@ public:
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ShuffleVectorInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::ShuffleVector;
}
@@ -1802,7 +1796,6 @@ public:
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ExtractValueInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::ExtractValue;
}
@@ -1839,7 +1832,7 @@ ExtractValueInst::ExtractValueInst(Value *Agg,
class InsertValueInst : public Instruction {
SmallVector<unsigned, 4> Indices;
- void *operator new(size_t, unsigned); // Do not implement
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
InsertValueInst(const InsertValueInst &IVI);
void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
const Twine &NameStr);
@@ -1924,7 +1917,6 @@ public:
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const InsertValueInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::InsertValue;
}
@@ -1970,7 +1962,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
// scientist's overactive imagination.
//
class PHINode : public Instruction {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
/// ReservedSpace - The number of operands actually allocated. NumOperands is
/// the number actually in use.
unsigned ReservedSpace;
@@ -2141,7 +2133,6 @@ public:
Value *hasConstantValue() const;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const PHINode *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::PHI;
}
@@ -2178,7 +2169,7 @@ class LandingPadInst : public Instruction {
public:
enum ClauseType { Catch, Filter };
private:
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
// Allocate space for exactly zero operands.
void *operator new(size_t s) {
return User::operator new(s, 0);
@@ -2249,7 +2240,6 @@ public:
void reserveClauses(unsigned Size) { growOperands(Size); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const LandingPadInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::LandingPad;
}
@@ -2318,7 +2308,6 @@ public:
unsigned getNumSuccessors() const { return 0; }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ReturnInst *) { return true; }
static inline bool classof(const Instruction *I) {
return (I->getOpcode() == Instruction::Ret);
}
@@ -2418,7 +2407,6 @@ public:
void swapSuccessors();
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const BranchInst *) { return true; }
static inline bool classof(const Instruction *I) {
return (I->getOpcode() == Instruction::Br);
}
@@ -2445,7 +2433,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)
/// SwitchInst - Multiway switch
///
class SwitchInst : public TerminatorInst {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
unsigned ReservedSpace;
// Operands format:
// Operand[0] = Value to switch on
@@ -2613,7 +2601,7 @@ public:
}
/// addCase - Add an entry to the switch instruction...
- /// @Deprecated
+ /// @deprecated
/// Note:
/// This action invalidates case_end(). Old case_end() iterator will
/// point to the added case.
@@ -2699,7 +2687,7 @@ public:
}
/// Resolves case value for current case.
- /// @Deprecated
+ /// @deprecated
ConstantIntTy *getCaseValue() {
assert(Index < SI->getNumCases() && "Index out the number of cases.");
IntegersSubsetRef CaseRanges = *SubsetIt;
@@ -2803,7 +2791,7 @@ public:
CaseIt(const ParentTy& Src) : ParentTy(Src) {}
/// Sets the new value for current case.
- /// @Deprecated.
+ /// @deprecated.
void setValue(ConstantInt *V) {
assert(Index < SI->getNumCases() && "Index out the number of cases.");
IntegersSubsetToBB Mapping;
@@ -2829,7 +2817,6 @@ public:
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SwitchInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Switch;
}
@@ -2857,7 +2844,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)
/// IndirectBrInst - Indirect Branch Instruction.
///
class IndirectBrInst : public TerminatorInst {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
unsigned ReservedSpace;
// Operand[0] = Value to switch on
// Operand[1] = Default basic block destination
@@ -2928,7 +2915,6 @@ public:
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const IndirectBrInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::IndirectBr;
}
@@ -2963,14 +2949,14 @@ class InvokeInst : public TerminatorInst {
/// Construct an InvokeInst given a range of arguments.
///
- /// @brief Construct an InvokeInst from a range of arguments
+ /// \brief Construct an InvokeInst from a range of arguments
inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
ArrayRef<Value *> Args, unsigned Values,
const Twine &NameStr, Instruction *InsertBefore);
/// Construct an InvokeInst given a range of arguments.
///
- /// @brief Construct an InvokeInst from a range of arguments
+ /// \brief Construct an InvokeInst from a range of arguments
inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
ArrayRef<Value *> Args, unsigned Values,
const Twine &NameStr, BasicBlock *InsertAtEnd);
@@ -3029,68 +3015,69 @@ public:
/// removeAttribute - removes the attribute from the list of attributes.
void removeAttribute(unsigned i, Attributes attr);
- /// \brief Return true if this call has the given attribute.
- bool hasFnAttr(Attributes N) const {
- return paramHasAttr(~0, N);
- }
+ /// \brief Determine whether this call has the NoAlias attribute.
+ bool hasFnAttr(Attributes::AttrVal A) const;
- /// @brief Determine whether the call or the callee has the given attribute.
- bool paramHasAttr(unsigned i, Attributes attr) const;
+ /// \brief Determine whether the call or the callee has the given attributes.
+ bool paramHasAttr(unsigned i, Attributes::AttrVal A) const;
- /// @brief Extract the alignment for a call or parameter (0=unknown).
+ /// \brief Extract the alignment for a call or parameter (0=unknown).
unsigned getParamAlignment(unsigned i) const {
return AttributeList.getParamAlignment(i);
}
- /// @brief Return true if the call should not be inlined.
- bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
- void setIsNoInline(bool Value = true) {
- if (Value) addAttribute(~0, Attribute::NoInline);
- else removeAttribute(~0, Attribute::NoInline);
+ /// \brief Return true if the call should not be inlined.
+ bool isNoInline() const { return hasFnAttr(Attributes::NoInline); }
+ void setIsNoInline() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::NoInline));
}
- /// @brief Determine if the call does not access memory.
+ /// \brief Determine if the call does not access memory.
bool doesNotAccessMemory() const {
- return hasFnAttr(Attribute::ReadNone);
+ return hasFnAttr(Attributes::ReadNone);
}
- void setDoesNotAccessMemory(bool NotAccessMemory = true) {
- if (NotAccessMemory) addAttribute(~0, Attribute::ReadNone);
- else removeAttribute(~0, Attribute::ReadNone);
+ void setDoesNotAccessMemory() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::ReadNone));
}
- /// @brief Determine if the call does not access or only reads memory.
+ /// \brief Determine if the call does not access or only reads memory.
bool onlyReadsMemory() const {
- return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
+ return doesNotAccessMemory() || hasFnAttr(Attributes::ReadOnly);
}
- void setOnlyReadsMemory(bool OnlyReadsMemory = true) {
- if (OnlyReadsMemory) addAttribute(~0, Attribute::ReadOnly);
- else removeAttribute(~0, Attribute::ReadOnly | Attribute::ReadNone);
+ void setOnlyReadsMemory() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::ReadOnly));
}
- /// @brief Determine if the call cannot return.
- bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
- void setDoesNotReturn(bool DoesNotReturn = true) {
- if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn);
- else removeAttribute(~0, Attribute::NoReturn);
+ /// \brief Determine if the call cannot return.
+ bool doesNotReturn() const { return hasFnAttr(Attributes::NoReturn); }
+ void setDoesNotReturn() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::NoReturn));
}
- /// @brief Determine if the call cannot unwind.
- bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
- void setDoesNotThrow(bool DoesNotThrow = true) {
- if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind);
- else removeAttribute(~0, Attribute::NoUnwind);
+ /// \brief Determine if the call cannot unwind.
+ bool doesNotThrow() const { return hasFnAttr(Attributes::NoUnwind); }
+ void setDoesNotThrow() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::NoUnwind));
}
- /// @brief Determine if the call returns a structure through first
+ /// \brief Determine if the call returns a structure through first
/// pointer argument.
bool hasStructRetAttr() const {
// Be friendly and also check the callee.
- return paramHasAttr(1, Attribute::StructRet);
+ return paramHasAttr(1, Attributes::StructRet);
}
- /// @brief Determine if any call argument is an aggregate passed by value.
+ /// \brief Determine if any call argument is an aggregate passed by value.
bool hasByValArgument() const {
- return AttributeList.hasAttrSomewhere(Attribute::ByVal);
+ for (unsigned I = 0, E = AttributeList.getNumAttrs(); I != E; ++I)
+ if (AttributeList.getAttributesAtIndex(I).hasAttribute(Attributes::ByVal))
+ return true;
+ return false;
}
/// getCalledFunction - Return the function called, or null if this is an
@@ -3141,7 +3128,6 @@ public:
unsigned getNumSuccessors() const { return 2; }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const InvokeInst *) { return true; }
static inline bool classof(const Instruction *I) {
return (I->getOpcode() == Instruction::Invoke);
}
@@ -3221,7 +3207,6 @@ public:
unsigned getNumSuccessors() const { return 0; }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ResumeInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Resume;
}
@@ -3251,7 +3236,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)
/// end of the block cannot be reached.
///
class UnreachableInst : public TerminatorInst {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
protected:
virtual UnreachableInst *clone_impl() const;
@@ -3266,7 +3251,6 @@ public:
unsigned getNumSuccessors() const { return 0; }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const UnreachableInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Unreachable;
}
@@ -3283,14 +3267,14 @@ private:
// TruncInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a truncation of integer types.
+/// \brief This class represents a truncation of integer types.
class TruncInst : public CastInst {
protected:
- /// @brief Clone an identical TruncInst
+ /// \brief Clone an identical TruncInst
virtual TruncInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
TruncInst(
Value *S, ///< The value to be truncated
Type *Ty, ///< The (smaller) type to truncate to
@@ -3298,7 +3282,7 @@ public:
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
TruncInst(
Value *S, ///< The value to be truncated
Type *Ty, ///< The (smaller) type to truncate to
@@ -3306,8 +3290,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const TruncInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Trunc;
}
@@ -3320,14 +3303,14 @@ public:
// ZExtInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents zero extension of integer types.
+/// \brief This class represents zero extension of integer types.
class ZExtInst : public CastInst {
protected:
- /// @brief Clone an identical ZExtInst
+ /// \brief Clone an identical ZExtInst
virtual ZExtInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
ZExtInst(
Value *S, ///< The value to be zero extended
Type *Ty, ///< The type to zero extend to
@@ -3335,7 +3318,7 @@ public:
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end semantics.
+ /// \brief Constructor with insert-at-end semantics.
ZExtInst(
Value *S, ///< The value to be zero extended
Type *Ty, ///< The type to zero extend to
@@ -3343,8 +3326,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ZExtInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == ZExt;
}
@@ -3357,14 +3339,14 @@ public:
// SExtInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a sign extension of integer types.
+/// \brief This class represents a sign extension of integer types.
class SExtInst : public CastInst {
protected:
- /// @brief Clone an identical SExtInst
+ /// \brief Clone an identical SExtInst
virtual SExtInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
SExtInst(
Value *S, ///< The value to be sign extended
Type *Ty, ///< The type to sign extend to
@@ -3372,7 +3354,7 @@ public:
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
SExtInst(
Value *S, ///< The value to be sign extended
Type *Ty, ///< The type to sign extend to
@@ -3380,8 +3362,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SExtInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == SExt;
}
@@ -3394,14 +3375,14 @@ public:
// FPTruncInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a truncation of floating point types.
+/// \brief This class represents a truncation of floating point types.
class FPTruncInst : public CastInst {
protected:
- /// @brief Clone an identical FPTruncInst
+ /// \brief Clone an identical FPTruncInst
virtual FPTruncInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
FPTruncInst(
Value *S, ///< The value to be truncated
Type *Ty, ///< The type to truncate to
@@ -3409,7 +3390,7 @@ public:
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
FPTruncInst(
Value *S, ///< The value to be truncated
Type *Ty, ///< The type to truncate to
@@ -3417,8 +3398,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const FPTruncInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == FPTrunc;
}
@@ -3431,14 +3411,14 @@ public:
// FPExtInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents an extension of floating point types.
+/// \brief This class represents an extension of floating point types.
class FPExtInst : public CastInst {
protected:
- /// @brief Clone an identical FPExtInst
+ /// \brief Clone an identical FPExtInst
virtual FPExtInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
FPExtInst(
Value *S, ///< The value to be extended
Type *Ty, ///< The type to extend to
@@ -3446,7 +3426,7 @@ public:
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
FPExtInst(
Value *S, ///< The value to be extended
Type *Ty, ///< The type to extend to
@@ -3454,8 +3434,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const FPExtInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == FPExt;
}
@@ -3468,14 +3447,14 @@ public:
// UIToFPInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a cast unsigned integer to floating point.
+/// \brief This class represents a cast unsigned integer to floating point.
class UIToFPInst : public CastInst {
protected:
- /// @brief Clone an identical UIToFPInst
+ /// \brief Clone an identical UIToFPInst
virtual UIToFPInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
UIToFPInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3483,7 +3462,7 @@ public:
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
UIToFPInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3491,8 +3470,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const UIToFPInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == UIToFP;
}
@@ -3505,14 +3483,14 @@ public:
// SIToFPInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a cast from signed integer to floating point.
+/// \brief This class represents a cast from signed integer to floating point.
class SIToFPInst : public CastInst {
protected:
- /// @brief Clone an identical SIToFPInst
+ /// \brief Clone an identical SIToFPInst
virtual SIToFPInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
SIToFPInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3520,7 +3498,7 @@ public:
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
SIToFPInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3528,8 +3506,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SIToFPInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == SIToFP;
}
@@ -3542,14 +3519,14 @@ public:
// FPToUIInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a cast from floating point to unsigned integer
+/// \brief This class represents a cast from floating point to unsigned integer
class FPToUIInst : public CastInst {
protected:
- /// @brief Clone an identical FPToUIInst
+ /// \brief Clone an identical FPToUIInst
virtual FPToUIInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
FPToUIInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3557,7 +3534,7 @@ public:
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
FPToUIInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3565,8 +3542,7 @@ public:
BasicBlock *InsertAtEnd ///< Where to insert the new instruction
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const FPToUIInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == FPToUI;
}
@@ -3579,14 +3555,14 @@ public:
// FPToSIInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a cast from floating point to signed integer.
+/// \brief This class represents a cast from floating point to signed integer.
class FPToSIInst : public CastInst {
protected:
- /// @brief Clone an identical FPToSIInst
+ /// \brief Clone an identical FPToSIInst
virtual FPToSIInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
FPToSIInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3594,7 +3570,7 @@ public:
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
FPToSIInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3602,8 +3578,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const FPToSIInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == FPToSI;
}
@@ -3616,10 +3591,10 @@ public:
// IntToPtrInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a cast from an integer to a pointer.
+/// \brief This class represents a cast from an integer to a pointer.
class IntToPtrInst : public CastInst {
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
IntToPtrInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3627,7 +3602,7 @@ public:
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
IntToPtrInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3635,11 +3610,15 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Clone an identical IntToPtrInst
+ /// \brief Clone an identical IntToPtrInst
virtual IntToPtrInst *clone_impl() const;
+ /// \brief Returns the address space of this instruction's pointer type.
+ unsigned getAddressSpace() const {
+ return getType()->getPointerAddressSpace();
+ }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const IntToPtrInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == IntToPtr;
}
@@ -3652,14 +3631,14 @@ public:
// PtrToIntInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a cast from a pointer to an integer
+/// \brief This class represents a cast from a pointer to an integer
class PtrToIntInst : public CastInst {
protected:
- /// @brief Clone an identical PtrToIntInst
+ /// \brief Clone an identical PtrToIntInst
virtual PtrToIntInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
PtrToIntInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3667,7 +3646,7 @@ public:
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
PtrToIntInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3675,8 +3654,19 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
+ /// \brief Gets the pointer operand.
+ Value *getPointerOperand() { return getOperand(0); }
+ /// \brief Gets the pointer operand.
+ const Value *getPointerOperand() const { return getOperand(0); }
+ /// \brief Gets the operand index of the pointer operand.
+ static unsigned getPointerOperandIndex() { return 0U; }
+
+ /// \brief Returns the address space of the pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return getPointerOperand()->getType()->getPointerAddressSpace();
+ }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const PtrToIntInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == PtrToInt;
}
@@ -3689,14 +3679,14 @@ public:
// BitCastInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a no-op cast from one type to another.
+/// \brief This class represents a no-op cast from one type to another.
class BitCastInst : public CastInst {
protected:
- /// @brief Clone an identical BitCastInst
+ /// \brief Clone an identical BitCastInst
virtual BitCastInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
BitCastInst(
Value *S, ///< The value to be casted
Type *Ty, ///< The type to casted to
@@ -3704,7 +3694,7 @@ public:
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
BitCastInst(
Value *S, ///< The value to be casted
Type *Ty, ///< The type to casted to
@@ -3713,7 +3703,6 @@ public:
);
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const BitCastInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == BitCast;
}
diff --git a/contrib/llvm/include/llvm/IntrinsicInst.h b/contrib/llvm/include/llvm/IntrinsicInst.h
index 1cebdd2..9b2afd5 100644
--- a/contrib/llvm/include/llvm/IntrinsicInst.h
+++ b/contrib/llvm/include/llvm/IntrinsicInst.h
@@ -34,9 +34,9 @@ namespace llvm {
/// functions. This allows the standard isa/dyncast/cast functionality to
/// work with calls to intrinsic functions.
class IntrinsicInst : public CallInst {
- IntrinsicInst(); // DO NOT IMPLEMENT
- IntrinsicInst(const IntrinsicInst&); // DO NOT IMPLEMENT
- void operator=(const IntrinsicInst&); // DO NOT IMPLEMENT
+ IntrinsicInst() LLVM_DELETED_FUNCTION;
+ IntrinsicInst(const IntrinsicInst&) LLVM_DELETED_FUNCTION;
+ void operator=(const IntrinsicInst&) LLVM_DELETED_FUNCTION;
public:
/// getIntrinsicID - Return the intrinsic ID of this intrinsic.
///
@@ -45,7 +45,6 @@ namespace llvm {
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const IntrinsicInst *) { return true; }
static inline bool classof(const CallInst *I) {
if (const Function *CF = I->getCalledFunction())
return CF->getIntrinsicID() != 0;
@@ -62,7 +61,6 @@ namespace llvm {
public:
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const DbgInfoIntrinsic *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
switch (I->getIntrinsicID()) {
case Intrinsic::dbg_declare:
@@ -86,7 +84,6 @@ namespace llvm {
MDNode *getVariable() const { return cast<MDNode>(getArgOperand(1)); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const DbgDeclareInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::dbg_declare;
}
@@ -108,7 +105,6 @@ namespace llvm {
MDNode *getVariable() const { return cast<MDNode>(getArgOperand(2)); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const DbgValueInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::dbg_value;
}
@@ -175,7 +171,6 @@ namespace llvm {
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MemIntrinsic *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
switch (I->getIntrinsicID()) {
case Intrinsic::memcpy:
@@ -205,7 +200,6 @@ namespace llvm {
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MemSetInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::memset;
}
@@ -238,7 +232,6 @@ namespace llvm {
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MemTransferInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::memcpy ||
I->getIntrinsicID() == Intrinsic::memmove;
@@ -254,7 +247,6 @@ namespace llvm {
class MemCpyInst : public MemTransferInst {
public:
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MemCpyInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::memcpy;
}
@@ -268,7 +260,6 @@ namespace llvm {
class MemMoveInst : public MemTransferInst {
public:
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MemMoveInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::memmove;
}
@@ -277,6 +268,49 @@ namespace llvm {
}
};
+ /// VAStartInst - This represents the llvm.va_start intrinsic.
+ ///
+ class VAStartInst : public IntrinsicInst {
+ public:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::vastart;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ Value *getArgList() const { return const_cast<Value*>(getArgOperand(0)); }
+ };
+
+ /// VAEndInst - This represents the llvm.va_end intrinsic.
+ ///
+ class VAEndInst : public IntrinsicInst {
+ public:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::vaend;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ Value *getArgList() const { return const_cast<Value*>(getArgOperand(0)); }
+ };
+
+ /// VACopyInst - This represents the llvm.va_copy intrinsic.
+ ///
+ class VACopyInst : public IntrinsicInst {
+ public:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::vacopy;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ Value *getDest() const { return const_cast<Value*>(getArgOperand(0)); }
+ Value *getSrc() const { return const_cast<Value*>(getArgOperand(1)); }
+ };
+
}
#endif
diff --git a/contrib/llvm/include/llvm/Intrinsics.h b/contrib/llvm/include/llvm/Intrinsics.h
index c350388..3108a8e 100644
--- a/contrib/llvm/include/llvm/Intrinsics.h
+++ b/contrib/llvm/include/llvm/Intrinsics.h
@@ -50,7 +50,7 @@ namespace Intrinsic {
/// Intrinsic::getType(ID) - Return the function type for an intrinsic.
///
FunctionType *getType(LLVMContext &Context, ID id,
- ArrayRef<Type*> Tys = ArrayRef<Type*>());
+ ArrayRef<Type*> Tys = ArrayRef<Type*>());
/// Intrinsic::isOverloaded(ID) - Returns true if the intrinsic can be
/// overloaded.
@@ -58,7 +58,7 @@ namespace Intrinsic {
/// Intrinsic::getAttributes(ID) - Return the attributes for an intrinsic.
///
- AttrListPtr getAttributes(ID id);
+ AttrListPtr getAttributes(LLVMContext &C, ID id);
/// Intrinsic::getDeclaration(M, ID) - Create or insert an LLVM Function
/// declaration for an intrinsic, and return it.
diff --git a/contrib/llvm/include/llvm/Intrinsics.td b/contrib/llvm/include/llvm/Intrinsics.td
index d1a0fee..2e1597f 100644
--- a/contrib/llvm/include/llvm/Intrinsics.td
+++ b/contrib/llvm/include/llvm/Intrinsics.td
@@ -121,15 +121,21 @@ def llvm_metadata_ty : LLVMType<MetadataVT>; // !{...}
def llvm_x86mmx_ty : LLVMType<x86mmx>;
def llvm_ptrx86mmx_ty : LLVMPointerType<llvm_x86mmx_ty>; // <1 x i64>*
+def llvm_v2i1_ty : LLVMType<v2i1>; // 2 x i1
+def llvm_v4i1_ty : LLVMType<v4i1>; // 4 x i1
+def llvm_v8i1_ty : LLVMType<v8i1>; // 8 x i1
+def llvm_v16i1_ty : LLVMType<v16i1>; // 16 x i1
def llvm_v2i8_ty : LLVMType<v2i8>; // 2 x i8
def llvm_v4i8_ty : LLVMType<v4i8>; // 4 x i8
def llvm_v8i8_ty : LLVMType<v8i8>; // 8 x i8
def llvm_v16i8_ty : LLVMType<v16i8>; // 16 x i8
def llvm_v32i8_ty : LLVMType<v32i8>; // 32 x i8
+def llvm_v1i16_ty : LLVMType<v1i16>; // 1 x i16
def llvm_v2i16_ty : LLVMType<v2i16>; // 2 x i16
def llvm_v4i16_ty : LLVMType<v4i16>; // 4 x i16
def llvm_v8i16_ty : LLVMType<v8i16>; // 8 x i16
def llvm_v16i16_ty : LLVMType<v16i16>; // 16 x i16
+def llvm_v1i32_ty : LLVMType<v1i32>; // 1 x i32
def llvm_v2i32_ty : LLVMType<v2i32>; // 2 x i32
def llvm_v4i32_ty : LLVMType<v4i32>; // 4 x i32
def llvm_v8i32_ty : LLVMType<v8i32>; // 8 x i32
@@ -279,9 +285,9 @@ let Properties = [IntrNoMem] in {
// NOTE: these are internal interfaces.
def int_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
-def int_longjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty]>;
+def int_longjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>;
def int_sigsetjmp : Intrinsic<[llvm_i32_ty] , [llvm_ptr_ty, llvm_i32_ty]>;
-def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty]>;
+def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>;
// Internal interface for object size checking
def int_objectsize : Intrinsic<[llvm_anyint_ty], [llvm_ptr_ty, llvm_i1_ty],
@@ -339,7 +345,7 @@ let Properties = [IntrNoMem] in {
}
def int_eh_sjlj_functioncontext : Intrinsic<[], [llvm_ptr_ty]>;
def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
-def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty]>;
+def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty], [IntrNoReturn]>;
//===---------------- Generic Variable Attribute Intrinsics----------------===//
//
diff --git a/contrib/llvm/include/llvm/IntrinsicsARM.td b/contrib/llvm/include/llvm/IntrinsicsARM.td
index fa8034e..93b1ae1 100644
--- a/contrib/llvm/include/llvm/IntrinsicsARM.td
+++ b/contrib/llvm/include/llvm/IntrinsicsARM.td
@@ -16,147 +16,136 @@
// TLS
let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
- def int_arm_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">,
- Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
-}
+
+def int_arm_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">,
+ Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
//===----------------------------------------------------------------------===//
// Saturating Arithmentic
-let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
- def int_arm_qadd : GCCBuiltin<"__builtin_arm_qadd">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, Commutative]>;
- def int_arm_qsub : GCCBuiltin<"__builtin_arm_qsub">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_arm_ssat : GCCBuiltin<"__builtin_arm_ssat">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_arm_usat : GCCBuiltin<"__builtin_arm_usat">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-}
+def int_arm_qadd : GCCBuiltin<"__builtin_arm_qadd">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+def int_arm_qsub : GCCBuiltin<"__builtin_arm_qsub">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_ssat : GCCBuiltin<"__builtin_arm_ssat">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_usat : GCCBuiltin<"__builtin_arm_usat">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
//===----------------------------------------------------------------------===//
// Load and Store exclusive doubleword
-let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
- def int_arm_strexd : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
- llvm_ptr_ty], [IntrReadWriteArgMem]>;
- def int_arm_ldrexd : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_ptr_ty],
- [IntrReadArgMem]>;
-}
+def int_arm_strexd : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_ptr_ty], [IntrReadWriteArgMem]>;
+def int_arm_ldrexd : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_ptr_ty],
+ [IntrReadArgMem]>;
//===----------------------------------------------------------------------===//
// VFP
-let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
- def int_arm_get_fpscr : GCCBuiltin<"__builtin_arm_get_fpscr">,
- Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
- def int_arm_set_fpscr : GCCBuiltin<"__builtin_arm_set_fpscr">,
- Intrinsic<[], [llvm_i32_ty], []>;
- def int_arm_vcvtr : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
- [IntrNoMem]>;
- def int_arm_vcvtru : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
- [IntrNoMem]>;
-}
+def int_arm_get_fpscr : GCCBuiltin<"__builtin_arm_get_fpscr">,
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
+def int_arm_set_fpscr : GCCBuiltin<"__builtin_arm_set_fpscr">,
+ Intrinsic<[], [llvm_i32_ty], []>;
+def int_arm_vcvtr : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
+ [IntrNoMem]>;
+def int_arm_vcvtru : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
+ [IntrNoMem]>;
//===----------------------------------------------------------------------===//
// Coprocessor
-let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
- // Move to coprocessor
- def int_arm_mcr : GCCBuiltin<"__builtin_arm_mcr">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
- def int_arm_mcr2 : GCCBuiltin<"__builtin_arm_mcr2">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
-
- // Move from coprocessor
- def int_arm_mrc : GCCBuiltin<"__builtin_arm_mrc">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty], []>;
- def int_arm_mrc2 : GCCBuiltin<"__builtin_arm_mrc2">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty], []>;
-
- // Coprocessor data processing
- def int_arm_cdp : GCCBuiltin<"__builtin_arm_cdp">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
- def int_arm_cdp2 : GCCBuiltin<"__builtin_arm_cdp2">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
-
- // Move from two registers to coprocessor
- def int_arm_mcrr : GCCBuiltin<"__builtin_arm_mcrr">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty], []>;
- def int_arm_mcrr2 : GCCBuiltin<"__builtin_arm_mcrr2">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty], []>;
-}
+// Move to coprocessor
+def int_arm_mcr : GCCBuiltin<"__builtin_arm_mcr">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_mcr2 : GCCBuiltin<"__builtin_arm_mcr2">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+
+// Move from coprocessor
+def int_arm_mrc : GCCBuiltin<"__builtin_arm_mrc">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_mrc2 : GCCBuiltin<"__builtin_arm_mrc2">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], []>;
+
+// Coprocessor data processing
+def int_arm_cdp : GCCBuiltin<"__builtin_arm_cdp">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_cdp2 : GCCBuiltin<"__builtin_arm_cdp2">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+
+// Move from two registers to coprocessor
+def int_arm_mcrr : GCCBuiltin<"__builtin_arm_mcrr">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_mcrr2 : GCCBuiltin<"__builtin_arm_mcrr2">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], []>;
//===----------------------------------------------------------------------===//
// Advanced SIMD (NEON)
-let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
-
- // The following classes do not correspond directly to GCC builtins.
- class Neon_1Arg_Intrinsic
- : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
- class Neon_1Arg_Narrow_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMExtendedElementVectorType<0>], [IntrNoMem]>;
- class Neon_2Arg_Intrinsic
- : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem]>;
- class Neon_2Arg_Narrow_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMExtendedElementVectorType<0>,
- LLVMExtendedElementVectorType<0>],
- [IntrNoMem]>;
- class Neon_2Arg_Long_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMTruncatedElementVectorType<0>,
- LLVMTruncatedElementVectorType<0>],
- [IntrNoMem]>;
- class Neon_3Arg_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem]>;
- class Neon_3Arg_Long_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>,
- LLVMTruncatedElementVectorType<0>,
- LLVMTruncatedElementVectorType<0>],
- [IntrNoMem]>;
- class Neon_CvtFxToFP_Intrinsic
- : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
- class Neon_CvtFPToFx_Intrinsic
- : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>;
-
- // The table operands for VTBL and VTBX consist of 1 to 4 v8i8 vectors.
- // Besides the table, VTBL has one other v8i8 argument and VTBX has two.
- // Overall, the classes range from 2 to 6 v8i8 arguments.
- class Neon_Tbl2Arg_Intrinsic
- : Intrinsic<[llvm_v8i8_ty],
- [llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
- class Neon_Tbl3Arg_Intrinsic
- : Intrinsic<[llvm_v8i8_ty],
- [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
- class Neon_Tbl4Arg_Intrinsic
- : Intrinsic<[llvm_v8i8_ty],
- [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty],
- [IntrNoMem]>;
- class Neon_Tbl5Arg_Intrinsic
- : Intrinsic<[llvm_v8i8_ty],
- [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty,
- llvm_v8i8_ty], [IntrNoMem]>;
- class Neon_Tbl6Arg_Intrinsic
- : Intrinsic<[llvm_v8i8_ty],
- [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty,
- llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
-}
+// The following classes do not correspond directly to GCC builtins.
+class Neon_1Arg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+class Neon_1Arg_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMExtendedElementVectorType<0>], [IntrNoMem]>;
+class Neon_2Arg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+class Neon_2Arg_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMExtendedElementVectorType<0>,
+ LLVMExtendedElementVectorType<0>],
+ [IntrNoMem]>;
+class Neon_2Arg_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMTruncatedElementVectorType<0>,
+ LLVMTruncatedElementVectorType<0>],
+ [IntrNoMem]>;
+class Neon_3Arg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+class Neon_3Arg_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>,
+ LLVMTruncatedElementVectorType<0>,
+ LLVMTruncatedElementVectorType<0>],
+ [IntrNoMem]>;
+class Neon_CvtFxToFP_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
+class Neon_CvtFPToFx_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>;
+
+// The table operands for VTBL and VTBX consist of 1 to 4 v8i8 vectors.
+// Besides the table, VTBL has one other v8i8 argument and VTBX has two.
+// Overall, the classes range from 2 to 6 v8i8 arguments.
+class Neon_Tbl2Arg_Intrinsic
+ : Intrinsic<[llvm_v8i8_ty],
+ [llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
+class Neon_Tbl3Arg_Intrinsic
+ : Intrinsic<[llvm_v8i8_ty],
+ [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
+class Neon_Tbl4Arg_Intrinsic
+ : Intrinsic<[llvm_v8i8_ty],
+ [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty],
+ [IntrNoMem]>;
+class Neon_Tbl5Arg_Intrinsic
+ : Intrinsic<[llvm_v8i8_ty],
+ [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty,
+ llvm_v8i8_ty], [IntrNoMem]>;
+class Neon_Tbl6Arg_Intrinsic
+ : Intrinsic<[llvm_v8i8_ty],
+ [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty,
+ llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
// Arithmetic ops
@@ -209,20 +198,18 @@ def int_arm_neon_vsubhn : Neon_2Arg_Narrow_Intrinsic;
def int_arm_neon_vrsubhn : Neon_2Arg_Narrow_Intrinsic;
// Vector Absolute Compare.
-let TargetPrefix = "arm" in {
- def int_arm_neon_vacged : Intrinsic<[llvm_v2i32_ty],
- [llvm_v2f32_ty, llvm_v2f32_ty],
- [IntrNoMem]>;
- def int_arm_neon_vacgeq : Intrinsic<[llvm_v4i32_ty],
- [llvm_v4f32_ty, llvm_v4f32_ty],
- [IntrNoMem]>;
- def int_arm_neon_vacgtd : Intrinsic<[llvm_v2i32_ty],
- [llvm_v2f32_ty, llvm_v2f32_ty],
- [IntrNoMem]>;
- def int_arm_neon_vacgtq : Intrinsic<[llvm_v4i32_ty],
- [llvm_v4f32_ty, llvm_v4f32_ty],
- [IntrNoMem]>;
-}
+def int_arm_neon_vacged : Intrinsic<[llvm_v2i32_ty],
+ [llvm_v2f32_ty, llvm_v2f32_ty],
+ [IntrNoMem]>;
+def int_arm_neon_vacgeq : Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+def int_arm_neon_vacgtd : Intrinsic<[llvm_v2i32_ty],
+ [llvm_v2f32_ty, llvm_v2f32_ty],
+ [IntrNoMem]>;
+def int_arm_neon_vacgtq : Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
// Vector Absolute Differences.
def int_arm_neon_vabds : Neon_2Arg_Intrinsic;
@@ -235,24 +222,20 @@ def int_arm_neon_vpadd : Neon_2Arg_Intrinsic;
// Note: This is different than the other "long" NEON intrinsics because
// the result vector has half as many elements as the source vector.
// The source and destination vector types must be specified separately.
-let TargetPrefix = "arm" in {
- def int_arm_neon_vpaddls : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty],
- [IntrNoMem]>;
- def int_arm_neon_vpaddlu : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty],
- [IntrNoMem]>;
-}
+def int_arm_neon_vpaddls : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty],
+ [IntrNoMem]>;
+def int_arm_neon_vpaddlu : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty],
+ [IntrNoMem]>;
// Vector Pairwise Add and Accumulate Long.
// Note: This is similar to vpaddl but the destination vector also appears
// as the first argument.
-let TargetPrefix = "arm" in {
- def int_arm_neon_vpadals : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty],
- [IntrNoMem]>;
- def int_arm_neon_vpadalu : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty],
- [IntrNoMem]>;
-}
+def int_arm_neon_vpadals : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty],
+ [IntrNoMem]>;
+def int_arm_neon_vpadalu : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty],
+ [IntrNoMem]>;
// Vector Pairwise Maximum and Minimum.
def int_arm_neon_vpmaxs : Neon_2Arg_Intrinsic;
@@ -364,79 +347,83 @@ def int_arm_neon_vtbx2 : Neon_Tbl4Arg_Intrinsic;
def int_arm_neon_vtbx3 : Neon_Tbl5Arg_Intrinsic;
def int_arm_neon_vtbx4 : Neon_Tbl6Arg_Intrinsic;
-let TargetPrefix = "arm" in {
-
- // De-interleaving vector loads from N-element structures.
- // Source operands are the address and alignment.
- def int_arm_neon_vld1 : Intrinsic<[llvm_anyvector_ty],
- [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
- def int_arm_neon_vld2 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
- [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
- def int_arm_neon_vld3 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>],
- [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
- def int_arm_neon_vld4 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>],
- [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
-
- // Vector load N-element structure to one lane.
- // Source operands are: the address, the N input vectors (since only one
- // lane is assigned), the lane number, and the alignment.
- def int_arm_neon_vld2lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
- [llvm_ptr_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, llvm_i32_ty,
- llvm_i32_ty], [IntrReadArgMem]>;
- def int_arm_neon_vld3lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>],
- [llvm_ptr_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>,
- llvm_i32_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
- def int_arm_neon_vld4lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>],
- [llvm_ptr_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMMatchType<0>, llvm_i32_ty,
- llvm_i32_ty], [IntrReadArgMem]>;
-
- // Interleaving vector stores from N-element structures.
- // Source operands are: the address, the N vectors, and the alignment.
- def int_arm_neon_vst1 : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- llvm_i32_ty], [IntrReadWriteArgMem]>;
- def int_arm_neon_vst2 : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, llvm_i32_ty],
- [IntrReadWriteArgMem]>;
- def int_arm_neon_vst3 : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, LLVMMatchType<0>,
- llvm_i32_ty], [IntrReadWriteArgMem]>;
- def int_arm_neon_vst4 : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMMatchType<0>, llvm_i32_ty],
- [IntrReadWriteArgMem]>;
-
- // Vector store N-element structure from one lane.
- // Source operands are: the address, the N vectors, the lane number, and
- // the alignment.
- def int_arm_neon_vst2lane : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, llvm_i32_ty,
- llvm_i32_ty], [IntrReadWriteArgMem]>;
- def int_arm_neon_vst3lane : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, LLVMMatchType<0>,
- llvm_i32_ty, llvm_i32_ty],
- [IntrReadWriteArgMem]>;
- def int_arm_neon_vst4lane : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMMatchType<0>, llvm_i32_ty,
- llvm_i32_ty], [IntrReadWriteArgMem]>;
-}
+// De-interleaving vector loads from N-element structures.
+// Source operands are the address and alignment.
+def int_arm_neon_vld1 : Intrinsic<[llvm_anyvector_ty],
+ [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+def int_arm_neon_vld2 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+ [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+def int_arm_neon_vld3 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>],
+ [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+def int_arm_neon_vld4 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
+ [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+// Vector load N-element structure to one lane.
+// Source operands are: the address, the N input vectors (since only one
+// lane is assigned), the lane number, and the alignment.
+def int_arm_neon_vld2lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+ [llvm_ptr_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadArgMem]>;
+def int_arm_neon_vld3lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>],
+ [llvm_ptr_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+def int_arm_neon_vld4lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
+ [llvm_ptr_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadArgMem]>;
+
+// Interleaving vector stores from N-element structures.
+// Source operands are: the address, the N vectors, and the alignment.
+def int_arm_neon_vst1 : Intrinsic<[],
+ [llvm_ptr_ty, llvm_anyvector_ty,
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
+def int_arm_neon_vst2 : Intrinsic<[],
+ [llvm_ptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<0>, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+def int_arm_neon_vst3 : Intrinsic<[],
+ [llvm_ptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
+def int_arm_neon_vst4 : Intrinsic<[],
+ [llvm_ptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+// Vector store N-element structure from one lane.
+// Source operands are: the address, the N vectors, the lane number, and
+// the alignment.
+def int_arm_neon_vst2lane : Intrinsic<[],
+ [llvm_ptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<0>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
+def int_arm_neon_vst3lane : Intrinsic<[],
+ [llvm_ptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+def int_arm_neon_vst4lane : Intrinsic<[],
+ [llvm_ptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
+
+// Vector bitwise select.
+def int_arm_neon_vbsl : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+} // end TargetPrefix
diff --git a/contrib/llvm/include/llvm/IntrinsicsMips.td b/contrib/llvm/include/llvm/IntrinsicsMips.td
index 4375ac2..e40e162 100644
--- a/contrib/llvm/include/llvm/IntrinsicsMips.td
+++ b/contrib/llvm/include/llvm/IntrinsicsMips.td
@@ -14,11 +14,15 @@
//===----------------------------------------------------------------------===//
// MIPS DSP data types
def mips_v2q15_ty: LLVMType<v2i16>;
+def mips_v4q7_ty: LLVMType<v4i8>;
def mips_q31_ty: LLVMType<i32>;
let TargetPrefix = "mips" in { // All intrinsics start with "llvm.mips.".
//===----------------------------------------------------------------------===//
+// MIPS DSP Rev 1
+
+//===----------------------------------------------------------------------===//
// Addition/subtraction
def int_mips_addu_qb : GCCBuiltin<"__builtin_mips_addu_qb">,
@@ -261,4 +265,125 @@ def int_mips_lhx: GCCBuiltin<"__builtin_mips_lhx">,
Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadArgMem]>;
def int_mips_lwx: GCCBuiltin<"__builtin_mips_lwx">,
Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadArgMem]>;
+
+//===----------------------------------------------------------------------===//
+// MIPS DSP Rev 2
+
+def int_mips_absq_s_qb: GCCBuiltin<"__builtin_mips_absq_s_qb">,
+ Intrinsic<[mips_v4q7_ty], [mips_v4q7_ty], []>;
+
+def int_mips_addqh_ph: GCCBuiltin<"__builtin_mips_addqh_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_addqh_r_ph: GCCBuiltin<"__builtin_mips_addqh_r_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_addqh_w: GCCBuiltin<"__builtin_mips_addqh_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_addqh_r_w: GCCBuiltin<"__builtin_mips_addqh_r_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty],
+ [IntrNoMem, Commutative]>;
+
+def int_mips_addu_ph: GCCBuiltin<"__builtin_mips_addu_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+def int_mips_addu_s_ph: GCCBuiltin<"__builtin_mips_addu_s_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+
+def int_mips_adduh_qb: GCCBuiltin<"__builtin_mips_adduh_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_adduh_r_qb: GCCBuiltin<"__builtin_mips_adduh_r_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
+ [IntrNoMem, Commutative]>;
+
+def int_mips_append: GCCBuiltin<"__builtin_mips_append">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_balign: GCCBuiltin<"__builtin_mips_balign">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_cmpgdu_eq_qb: GCCBuiltin<"__builtin_mips_cmpgdu_eq_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmpgdu_lt_qb: GCCBuiltin<"__builtin_mips_cmpgdu_lt_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmpgdu_le_qb: GCCBuiltin<"__builtin_mips_cmpgdu_le_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+
+def int_mips_dpa_w_ph: GCCBuiltin<"__builtin_mips_dpa_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+ [IntrNoMem]>;
+def int_mips_dps_w_ph: GCCBuiltin<"__builtin_mips_dps_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+ [IntrNoMem]>;
+
+def int_mips_dpaqx_s_w_ph: GCCBuiltin<"__builtin_mips_dpaqx_s_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpaqx_sa_w_ph: GCCBuiltin<"__builtin_mips_dpaqx_sa_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpax_w_ph: GCCBuiltin<"__builtin_mips_dpax_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+ [IntrNoMem]>;
+def int_mips_dpsx_w_ph: GCCBuiltin<"__builtin_mips_dpsx_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+ [IntrNoMem]>;
+def int_mips_dpsqx_s_w_ph: GCCBuiltin<"__builtin_mips_dpsqx_s_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpsqx_sa_w_ph: GCCBuiltin<"__builtin_mips_dpsqx_sa_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+
+def int_mips_mul_ph: GCCBuiltin<"__builtin_mips_mul_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+def int_mips_mul_s_ph: GCCBuiltin<"__builtin_mips_mul_s_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+
+def int_mips_mulq_rs_w: GCCBuiltin<"__builtin_mips_mulq_rs_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>;
+def int_mips_mulq_s_ph: GCCBuiltin<"__builtin_mips_mulq_s_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_mulq_s_w: GCCBuiltin<"__builtin_mips_mulq_s_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>;
+def int_mips_mulsa_w_ph: GCCBuiltin<"__builtin_mips_mulsa_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+ [IntrNoMem]>;
+
+def int_mips_precr_qb_ph: GCCBuiltin<"__builtin_mips_precr_qb_ph">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;
+def int_mips_precr_sra_ph_w: GCCBuiltin<"__builtin_mips_precr_sra_ph_w">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_precr_sra_r_ph_w: GCCBuiltin<"__builtin_mips_precr_sra_r_ph_w">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_prepend: GCCBuiltin<"__builtin_mips_prepend">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_shra_qb: GCCBuiltin<"__builtin_mips_shra_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shra_r_qb: GCCBuiltin<"__builtin_mips_shra_r_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shrl_ph: GCCBuiltin<"__builtin_mips_shrl_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_subqh_ph: GCCBuiltin<"__builtin_mips_subqh_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_subqh_r_ph: GCCBuiltin<"__builtin_mips_subqh_r_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_subqh_w: GCCBuiltin<"__builtin_mips_subqh_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>;
+def int_mips_subqh_r_w: GCCBuiltin<"__builtin_mips_subqh_r_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>;
+
+def int_mips_subu_ph: GCCBuiltin<"__builtin_mips_subu_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;
+def int_mips_subu_s_ph: GCCBuiltin<"__builtin_mips_subu_s_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;
+
+def int_mips_subuh_qb: GCCBuiltin<"__builtin_mips_subuh_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_subuh_r_qb: GCCBuiltin<"__builtin_mips_subuh_r_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
}
diff --git a/contrib/llvm/include/llvm/IntrinsicsX86.td b/contrib/llvm/include/llvm/IntrinsicsX86.td
index e8039f2..d2463c0 100644
--- a/contrib/llvm/include/llvm/IntrinsicsX86.td
+++ b/contrib/llvm/include/llvm/IntrinsicsX86.td
@@ -219,7 +219,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse_storeu_ps : GCCBuiltin<"__builtin_ia32_storeups">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v4f32_ty], []>;
+ llvm_v4f32_ty], [IntrReadWriteArgMem]>;
}
// Cacheability support ops
@@ -502,13 +502,13 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse2_storeu_pd : GCCBuiltin<"__builtin_ia32_storeupd">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v2f64_ty], []>;
+ llvm_v2f64_ty], [IntrReadWriteArgMem]>;
def int_x86_sse2_storeu_dq : GCCBuiltin<"__builtin_ia32_storedqu">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v16i8_ty], []>;
+ llvm_v16i8_ty], [IntrReadWriteArgMem]>;
def int_x86_sse2_storel_dq : GCCBuiltin<"__builtin_ia32_storelv4si">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v4i32_ty], []>;
+ llvm_v4i32_ty], [IntrReadWriteArgMem]>;
}
// Misc.
@@ -1270,19 +1270,19 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_vbroadcast_ss :
GCCBuiltin<"__builtin_ia32_vbroadcastss">,
- Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
def int_x86_avx_vbroadcast_sd_256 :
GCCBuiltin<"__builtin_ia32_vbroadcastsd256">,
- Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
def int_x86_avx_vbroadcast_ss_256 :
GCCBuiltin<"__builtin_ia32_vbroadcastss256">,
- Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
def int_x86_avx_vbroadcastf128_pd_256 :
GCCBuiltin<"__builtin_ia32_vbroadcastf128_pd256">,
- Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
def int_x86_avx_vbroadcastf128_ps_256 :
GCCBuiltin<"__builtin_ia32_vbroadcastf128_ps256">,
- Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
}
// SIMD load ops
@@ -1294,41 +1294,45 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// SIMD store ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_storeu_pd_256 : GCCBuiltin<"__builtin_ia32_storeupd256">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v4f64_ty], []>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4f64_ty], [IntrReadWriteArgMem]>;
def int_x86_avx_storeu_ps_256 : GCCBuiltin<"__builtin_ia32_storeups256">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v8f32_ty], []>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8f32_ty], [IntrReadWriteArgMem]>;
def int_x86_avx_storeu_dq_256 : GCCBuiltin<"__builtin_ia32_storedqu256">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v32i8_ty], []>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_v32i8_ty], [IntrReadWriteArgMem]>;
}
// Conditional load ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_maskload_pd : GCCBuiltin<"__builtin_ia32_maskloadpd">,
- Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2f64_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2f64_ty],
+ [IntrReadArgMem]>;
def int_x86_avx_maskload_ps : GCCBuiltin<"__builtin_ia32_maskloadps">,
- Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4f32_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4f32_ty],
+ [IntrReadArgMem]>;
def int_x86_avx_maskload_pd_256 : GCCBuiltin<"__builtin_ia32_maskloadpd256">,
- Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4f64_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4f64_ty],
+ [IntrReadArgMem]>;
def int_x86_avx_maskload_ps_256 : GCCBuiltin<"__builtin_ia32_maskloadps256">,
- Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8f32_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8f32_ty],
+ [IntrReadArgMem]>;
}
// Conditional store ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_maskstore_pd : GCCBuiltin<"__builtin_ia32_maskstorepd">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v2f64_ty, llvm_v2f64_ty], []>;
+ llvm_v2f64_ty, llvm_v2f64_ty], [IntrReadWriteArgMem]>;
def int_x86_avx_maskstore_ps : GCCBuiltin<"__builtin_ia32_maskstoreps">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v4f32_ty, llvm_v4f32_ty], []>;
+ llvm_v4f32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
def int_x86_avx_maskstore_pd_256 :
GCCBuiltin<"__builtin_ia32_maskstorepd256">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v4f64_ty, llvm_v4f64_ty], []>;
+ llvm_v4f64_ty, llvm_v4f64_ty], [IntrReadWriteArgMem]>;
def int_x86_avx_maskstore_ps_256 :
GCCBuiltin<"__builtin_ia32_maskstoreps256">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v8f32_ty, llvm_v8f32_ty], []>;
+ llvm_v8f32_ty, llvm_v8f32_ty], [IntrReadWriteArgMem]>;
}
//===----------------------------------------------------------------------===//
@@ -1632,7 +1636,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v8f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_x86_avx2_vbroadcasti128 :
GCCBuiltin<"__builtin_ia32_vbroadcastsi256">,
- Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
def int_x86_avx2_pbroadcastb_128 :
GCCBuiltin<"__builtin_ia32_pbroadcastb128">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
@@ -1685,27 +1689,35 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Conditional load ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx2_maskload_d : GCCBuiltin<"__builtin_ia32_maskloadd">,
- Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_v4i32_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_v4i32_ty],
+ [IntrReadArgMem]>;
def int_x86_avx2_maskload_q : GCCBuiltin<"__builtin_ia32_maskloadq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_v2i64_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_v2i64_ty],
+ [IntrReadArgMem]>;
def int_x86_avx2_maskload_d_256 : GCCBuiltin<"__builtin_ia32_maskloadd256">,
- Intrinsic<[llvm_v8i32_ty], [llvm_ptr_ty, llvm_v8i32_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v8i32_ty], [llvm_ptr_ty, llvm_v8i32_ty],
+ [IntrReadArgMem]>;
def int_x86_avx2_maskload_q_256 : GCCBuiltin<"__builtin_ia32_maskloadq256">,
- Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty, llvm_v4i64_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty, llvm_v4i64_ty],
+ [IntrReadArgMem]>;
}
// Conditional store ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx2_maskstore_d : GCCBuiltin<"__builtin_ia32_maskstored">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty], []>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrReadWriteArgMem]>;
def int_x86_avx2_maskstore_q : GCCBuiltin<"__builtin_ia32_maskstoreq">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty], []>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrReadWriteArgMem]>;
def int_x86_avx2_maskstore_d_256 :
GCCBuiltin<"__builtin_ia32_maskstored256">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty], []>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty],
+ [IntrReadWriteArgMem]>;
def int_x86_avx2_maskstore_q_256 :
GCCBuiltin<"__builtin_ia32_maskstoreq256">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty], []>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty],
+ [IntrReadWriteArgMem]>;
}
// Variable bit shift ops
@@ -2547,3 +2559,15 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_rdrand_32 : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [], []>;
def int_x86_rdrand_64 : Intrinsic<[llvm_i64_ty, llvm_i32_ty], [], []>;
}
+
+//===----------------------------------------------------------------------===//
+// RTM intrinsics. Transactional Memory support.
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_xbegin : GCCBuiltin<"__builtin_ia32_xbegin">,
+ Intrinsic<[llvm_i32_ty], [], []>;
+ def int_x86_xend : GCCBuiltin<"__builtin_ia32_xend">,
+ Intrinsic<[], [], []>;
+ def int_x86_xabort : GCCBuiltin<"__builtin_ia32_xabort">,
+ Intrinsic<[], [llvm_i8_ty], [IntrNoReturn]>;
+}
diff --git a/contrib/llvm/include/llvm/LLVMContext.h b/contrib/llvm/include/llvm/LLVMContext.h
index a8306a9..5903e2e 100644
--- a/contrib/llvm/include/llvm/LLVMContext.h
+++ b/contrib/llvm/include/llvm/LLVMContext.h
@@ -15,6 +15,8 @@
#ifndef LLVM_LLVMCONTEXT_H
#define LLVM_LLVMCONTEXT_H
+#include "llvm/Support/Compiler.h"
+
namespace llvm {
class LLVMContextImpl;
@@ -43,7 +45,8 @@ public:
MD_tbaa = 1, // "tbaa"
MD_prof = 2, // "prof"
MD_fpmath = 3, // "fpmath"
- MD_range = 4 // "range"
+ MD_range = 4, // "range"
+ MD_tbaa_struct = 5 // "tbaa.struct"
};
/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
@@ -87,9 +90,8 @@ public:
void emitError(const Twine &ErrorStr);
private:
- // DO NOT IMPLEMENT
- LLVMContext(LLVMContext&);
- void operator=(LLVMContext&);
+ LLVMContext(LLVMContext&) LLVM_DELETED_FUNCTION;
+ void operator=(LLVMContext&) LLVM_DELETED_FUNCTION;
/// addModule - Register a module as being instantiated in this context. If
/// the context is deleted, the module will be deleted as well.
diff --git a/contrib/llvm/include/llvm/LinkAllPasses.h b/contrib/llvm/include/llvm/LinkAllPasses.h
index 697c94c..806e4b3 100644
--- a/contrib/llvm/include/llvm/LinkAllPasses.h
+++ b/contrib/llvm/include/llvm/LinkAllPasses.h
@@ -60,10 +60,12 @@ namespace {
(void) llvm::createCFGSimplificationPass();
(void) llvm::createConstantMergePass();
(void) llvm::createConstantPropagationPass();
+ (void) llvm::createCostModelAnalysisPass();
(void) llvm::createDeadArgEliminationPass();
(void) llvm::createDeadCodeEliminationPass();
(void) llvm::createDeadInstEliminationPass();
(void) llvm::createDeadStoreEliminationPass();
+ (void) llvm::createDependenceAnalysisPass();
(void) llvm::createDomOnlyPrinterPass();
(void) llvm::createDomPrinterPass();
(void) llvm::createDomOnlyViewerPass();
@@ -81,11 +83,10 @@ namespace {
(void) llvm::createIPSCCPPass();
(void) llvm::createIndVarSimplifyPass();
(void) llvm::createInstructionCombiningPass();
- (void) llvm::createInternalizePass(false);
+ (void) llvm::createInternalizePass();
(void) llvm::createLCSSAPass();
(void) llvm::createLICMPass();
(void) llvm::createLazyValueInfoPass();
- (void) llvm::createLoopDependenceAnalysisPass();
(void) llvm::createLoopExtractorPass();
(void) llvm::createLoopSimplifyPass();
(void) llvm::createLoopStrengthReducePass();
@@ -107,6 +108,7 @@ namespace {
(void) llvm::createProfileVerifierPass();
(void) llvm::createPathProfileVerifierPass();
(void) llvm::createProfileLoaderPass();
+ (void) llvm::createProfileMetadataLoaderPass();
(void) llvm::createPathProfileLoaderPass();
(void) llvm::createPromoteMemoryToRegisterPass();
(void) llvm::createDemoteRegisterToMemoryPass();
@@ -140,6 +142,7 @@ namespace {
(void) llvm::createLoopDeletionPass();
(void) llvm::createPostDomTree();
(void) llvm::createInstructionNamerPass();
+ (void) llvm::createMetaRenamerPass();
(void) llvm::createFunctionAttrsPass();
(void) llvm::createMergeFunctionsPass();
(void) llvm::createPrintModulePass(0);
@@ -153,6 +156,7 @@ namespace {
(void) llvm::createCorrelatedValuePropagationPass();
(void) llvm::createMemDepPrinter();
(void) llvm::createInstructionSimplifierPass();
+ (void) llvm::createLoopVectorizePass();
(void) llvm::createBBVectorizePass();
(void)new llvm::IntervalPartition();
diff --git a/contrib/llvm/include/llvm/MC/MCAsmBackend.h b/contrib/llvm/include/llvm/MC/MCAsmBackend.h
index 05e6286..72ed1a3 100644
--- a/contrib/llvm/include/llvm/MC/MCAsmBackend.h
+++ b/contrib/llvm/include/llvm/MC/MCAsmBackend.h
@@ -30,12 +30,13 @@ class raw_ostream;
/// MCAsmBackend - Generic interface to target specific assembler backends.
class MCAsmBackend {
- MCAsmBackend(const MCAsmBackend &); // DO NOT IMPLEMENT
- void operator=(const MCAsmBackend &); // DO NOT IMPLEMENT
+ MCAsmBackend(const MCAsmBackend &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCAsmBackend &) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses.
MCAsmBackend();
unsigned HasReliableSymbolDifference : 1;
+ unsigned HasDataInCodeSupport : 1;
public:
virtual ~MCAsmBackend();
@@ -65,6 +66,12 @@ public:
return HasReliableSymbolDifference;
}
+ /// hasDataInCodeSupport - Check whether this target implements data-in-code
+ /// markers. If not, data region directives will be ignored.
+ bool hasDataInCodeSupport() const {
+ return HasDataInCodeSupport;
+ }
+
/// doesSectionRequireSymbols - Check whether the given section requires that
/// all symbols (even temporaries) have symbol table entries.
virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
@@ -99,7 +106,7 @@ public:
/// @}
- /// applyFixup - Apply the \arg Value for given \arg Fixup into the provided
+ /// applyFixup - Apply the \p Value for given \p Fixup into the provided
/// data fragment, at the offset specified by the fixup and following the
/// fixup kind as appropriate.
virtual void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
@@ -126,13 +133,20 @@ public:
/// RelaxInstruction - Relax the instruction in the given fragment to the next
/// wider instruction.
///
- /// \param Inst - The instruction to relax, which may be the same as the
+ /// \param Inst The instruction to relax, which may be the same as the
/// output.
- /// \parm Res [output] - On return, the relaxed instruction.
+ /// \param [out] Res On return, the relaxed instruction.
virtual void relaxInstruction(const MCInst &Inst, MCInst &Res) const = 0;
/// @}
+ /// getMinimumNopSize - Returns the minimum size of a nop in bytes on this
+ /// target. The assembler will use this to emit excess padding in situations
+ /// where the padding required for simple alignment would be less than the
+ /// minimum nop size.
+ ///
+ virtual unsigned getMinimumNopSize() const { return 1; }
+
/// writeNopData - Write an (optimal) nop sequence of Count bytes to the given
/// output. If the target cannot generate such a sequence, it should return an
/// error.
diff --git a/contrib/llvm/include/llvm/MC/MCAsmInfo.h b/contrib/llvm/include/llvm/MC/MCAsmInfo.h
index 9f5230b..97aad71 100644
--- a/contrib/llvm/include/llvm/MC/MCAsmInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCAsmInfo.h
@@ -33,7 +33,7 @@ namespace llvm {
}
namespace LCOMM {
- enum LCOMMType { None, NoAlignment, ByteAlignment };
+ enum LCOMMType { NoAlignment, ByteAlignment, Log2Alignment };
}
/// MCAsmInfo - This class is intended to be used as a base class for asm
@@ -247,14 +247,14 @@ namespace llvm {
/// .long a - b
bool HasAggressiveSymbolFolding; // Defaults to true.
- /// LCOMMDirectiveType - Describes if the target supports the .lcomm
- /// directive and whether it has an alignment parameter.
- LCOMM::LCOMMType LCOMMDirectiveType; // Defaults to LCOMM::None.
-
- /// COMMDirectiveAlignmentIsInBytes - True is COMMDirective's optional
+ /// COMMDirectiveAlignmentIsInBytes - True is .comm's and .lcomms optional
/// alignment is to be specified in bytes instead of log2(n).
bool COMMDirectiveAlignmentIsInBytes; // Defaults to true;
+ /// LCOMMDirectiveAlignment - Describes if the .lcomm directive for the
+ /// target supports an alignment argument and how it is interpreted.
+ LCOMM::LCOMMType LCOMMDirectiveAlignmentType; // Defaults to NoAlignment.
+
/// HasDotTypeDotSizeDirective - True if the target has .type and .size
/// directives, this is true for most ELF targets.
bool HasDotTypeDotSizeDirective; // Defaults to true.
@@ -496,13 +496,13 @@ namespace llvm {
bool hasAggressiveSymbolFolding() const {
return HasAggressiveSymbolFolding;
}
- LCOMM::LCOMMType getLCOMMDirectiveType() const {
- return LCOMMDirectiveType;
- }
- bool hasDotTypeDotSizeDirective() const {return HasDotTypeDotSizeDirective;}
bool getCOMMDirectiveAlignmentIsInBytes() const {
return COMMDirectiveAlignmentIsInBytes;
}
+ LCOMM::LCOMMType getLCOMMDirectiveAlignmentType() const {
+ return LCOMMDirectiveAlignmentType;
+ }
+ bool hasDotTypeDotSizeDirective() const {return HasDotTypeDotSizeDirective;}
bool hasSingleParameterDotFile() const { return HasSingleParameterDotFile; }
bool hasNoDeadStrip() const { return HasNoDeadStrip; }
bool hasSymbolResolver() const { return HasSymbolResolver; }
diff --git a/contrib/llvm/include/llvm/MC/MCAssembler.h b/contrib/llvm/include/llvm/MC/MCAssembler.h
index b7b2d66..5771415 100644
--- a/contrib/llvm/include/llvm/MC/MCAssembler.h
+++ b/contrib/llvm/include/llvm/MC/MCAssembler.h
@@ -40,8 +40,8 @@ class MCAsmBackend;
class MCFragment : public ilist_node<MCFragment> {
friend class MCAsmLayout;
- MCFragment(const MCFragment&); // DO NOT IMPLEMENT
- void operator=(const MCFragment&); // DO NOT IMPLEMENT
+ MCFragment(const MCFragment&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCFragment&) LLVM_DELETED_FUNCTION;
public:
enum FragmentType {
@@ -99,8 +99,6 @@ public:
unsigned getLayoutOrder() const { return LayoutOrder; }
void setLayoutOrder(unsigned Value) { LayoutOrder = Value; }
- static bool classof(const MCFragment *O) { return true; }
-
void dump();
};
@@ -151,7 +149,6 @@ public:
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_Data;
}
- static bool classof(const MCDataFragment *) { return true; }
};
// FIXME: This current incarnation of MCInstFragment doesn't make much sense, as
@@ -176,7 +173,7 @@ public:
typedef SmallVectorImpl<MCFixup>::iterator fixup_iterator;
public:
- MCInstFragment(MCInst _Inst, MCSectionData *SD = 0)
+ MCInstFragment(const MCInst &_Inst, MCSectionData *SD = 0)
: MCFragment(FT_Inst, SD), Inst(_Inst) {
}
@@ -191,7 +188,7 @@ public:
MCInst &getInst() { return Inst; }
const MCInst &getInst() const { return Inst; }
- void setInst(MCInst Value) { Inst = Value; }
+ void setInst(const MCInst& Value) { Inst = Value; }
/// @}
/// @name Fixup Access
@@ -213,7 +210,6 @@ public:
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_Inst;
}
- static bool classof(const MCInstFragment *) { return true; }
};
class MCAlignFragment : public MCFragment {
@@ -225,7 +221,7 @@ class MCAlignFragment : public MCFragment {
/// Value - Value to use for filling padding bytes.
int64_t Value;
- /// ValueSize - The size of the integer (in bytes) of \arg Value.
+ /// ValueSize - The size of the integer (in bytes) of \p Value.
unsigned ValueSize;
/// MaxBytesToEmit - The maximum number of bytes to emit; if the alignment
@@ -263,7 +259,6 @@ public:
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_Align;
}
- static bool classof(const MCAlignFragment *) { return true; }
};
class MCFillFragment : public MCFragment {
@@ -272,7 +267,7 @@ class MCFillFragment : public MCFragment {
/// Value - Value to use for filling bytes.
int64_t Value;
- /// ValueSize - The size (in bytes) of \arg Value to use when filling, or 0 if
+ /// ValueSize - The size (in bytes) of \p Value to use when filling, or 0 if
/// this is a virtual fill fragment.
unsigned ValueSize;
@@ -302,7 +297,6 @@ public:
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_Fill;
}
- static bool classof(const MCFillFragment *) { return true; }
};
class MCOrgFragment : public MCFragment {
@@ -331,7 +325,6 @@ public:
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_Org;
}
- static bool classof(const MCOrgFragment *) { return true; }
};
class MCLEBFragment : public MCFragment {
@@ -364,7 +357,6 @@ public:
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_LEB;
}
- static bool classof(const MCLEBFragment *) { return true; }
};
class MCDwarfLineAddrFragment : public MCFragment {
@@ -401,7 +393,6 @@ public:
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_Dwarf;
}
- static bool classof(const MCDwarfLineAddrFragment *) { return true; }
};
class MCDwarfCallFrameFragment : public MCFragment {
@@ -431,7 +422,6 @@ public:
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_DwarfFrame;
}
- static bool classof(const MCDwarfCallFrameFragment *) { return true; }
};
// FIXME: Should this be a separate class, or just merged into MCSection? Since
@@ -440,8 +430,8 @@ public:
class MCSectionData : public ilist_node<MCSectionData> {
friend class MCAsmLayout;
- MCSectionData(const MCSectionData&); // DO NOT IMPLEMENT
- void operator=(const MCSectionData&); // DO NOT IMPLEMENT
+ MCSectionData(const MCSectionData&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCSectionData&) LLVM_DELETED_FUNCTION;
public:
typedef iplist<MCFragment> FragmentListType;
@@ -683,8 +673,8 @@ public:
typedef std::vector<DataRegionData>::iterator data_region_iterator;
private:
- MCAssembler(const MCAssembler&); // DO NOT IMPLEMENT
- void operator=(const MCAssembler&); // DO NOT IMPLEMENT
+ MCAssembler(const MCAssembler&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCAssembler&) LLVM_DELETED_FUNCTION;
MCContext &Context;
@@ -738,7 +728,7 @@ private:
/// \param Value [out] On return, the value of the fixup as currently laid
/// out.
/// \return Whether the fixup value was fully resolved. This is true if the
- /// \arg Value result is fixed, otherwise the value may change due to
+ /// \p Value result is fixed, otherwise the value may change due to
/// relocation.
bool evaluateFixup(const MCAsmLayout &Layout,
const MCFixup &Fixup, const MCFragment *DF,
@@ -775,7 +765,7 @@ private:
public:
/// Compute the effective fragment size assuming it is laid out at the given
- /// \arg SectionAddress and \arg FragmentOffset.
+ /// \p SectionAddress and \p FragmentOffset.
uint64_t computeFragmentSize(const MCAsmLayout &Layout,
const MCFragment &F) const;
@@ -804,7 +794,7 @@ public:
public:
/// Construct a new assembler instance.
///
- /// \arg OS - The stream to output to.
+ /// \param OS The stream to output to.
//
// FIXME: How are we going to parameterize this? Two obvious options are stay
// concrete and require clients to pass in a target like object. The other
@@ -824,7 +814,7 @@ public:
MCObjectWriter &getWriter() const { return Writer; }
/// Finish - Do final processing and write the object to the output stream.
- /// \arg Writer is used for custom object writer (as the MCJIT does),
+ /// \p Writer is used for custom object writer (as the MCJIT does),
/// if not specified it is automatically created from backend.
void Finish();
diff --git a/contrib/llvm/include/llvm/MC/MCCodeEmitter.h b/contrib/llvm/include/llvm/MC/MCCodeEmitter.h
index 934ef69..0574890 100644
--- a/contrib/llvm/include/llvm/MC/MCCodeEmitter.h
+++ b/contrib/llvm/include/llvm/MC/MCCodeEmitter.h
@@ -10,6 +10,8 @@
#ifndef LLVM_MC_MCCODEEMITTER_H
#define LLVM_MC_MCCODEEMITTER_H
+#include "llvm/Support/Compiler.h"
+
namespace llvm {
class MCFixup;
class MCInst;
@@ -19,16 +21,16 @@ template<typename T> class SmallVectorImpl;
/// MCCodeEmitter - Generic instruction encoding interface.
class MCCodeEmitter {
private:
- MCCodeEmitter(const MCCodeEmitter &); // DO NOT IMPLEMENT
- void operator=(const MCCodeEmitter &); // DO NOT IMPLEMENT
+ MCCodeEmitter(const MCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCCodeEmitter &) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses.
MCCodeEmitter();
public:
virtual ~MCCodeEmitter();
- /// EncodeInstruction - Encode the given \arg Inst to bytes on the output
- /// stream \arg OS.
+ /// EncodeInstruction - Encode the given \p Inst to bytes on the output
+ /// stream \p OS.
virtual void EncodeInstruction(const MCInst &Inst, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const = 0;
};
diff --git a/contrib/llvm/include/llvm/MC/MCContext.h b/contrib/llvm/include/llvm/MC/MCContext.h
index 59545d3..5a8830c 100644
--- a/contrib/llvm/include/llvm/MC/MCContext.h
+++ b/contrib/llvm/include/llvm/MC/MCContext.h
@@ -40,8 +40,8 @@ namespace llvm {
/// of the sections that it creates.
///
class MCContext {
- MCContext(const MCContext&); // DO NOT IMPLEMENT
- MCContext &operator=(const MCContext&); // DO NOT IMPLEMENT
+ MCContext(const MCContext&) LLVM_DELETED_FUNCTION;
+ MCContext &operator=(const MCContext&) LLVM_DELETED_FUNCTION;
public:
typedef StringMap<MCSymbol*, BumpPtrAllocator&> SymbolTable;
private:
@@ -183,6 +183,7 @@ namespace llvm {
/// LookupSymbol - Get the symbol for \p Name, or null.
MCSymbol *LookupSymbol(StringRef Name) const;
+ MCSymbol *LookupSymbol(const Twine &Name) const;
/// getSymbols - Get a reference for the symbol table for clients that
/// want to, for example, iterate over all symbols. 'const' because we
diff --git a/contrib/llvm/include/llvm/MC/MCDwarf.h b/contrib/llvm/include/llvm/MC/MCDwarf.h
index fdb7ab2..8fc437f 100644
--- a/contrib/llvm/include/llvm/MC/MCDwarf.h
+++ b/contrib/llvm/include/llvm/MC/MCDwarf.h
@@ -19,6 +19,7 @@
#include "llvm/MC/MachineLocation.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/Compiler.h"
#include <vector>
namespace llvm {
@@ -48,8 +49,8 @@ namespace llvm {
MCDwarfFile(StringRef name, unsigned dirIndex)
: Name(name), DirIndex(dirIndex) {}
- MCDwarfFile(const MCDwarfFile&); // DO NOT IMPLEMENT
- void operator=(const MCDwarfFile&); // DO NOT IMPLEMENT
+ MCDwarfFile(const MCDwarfFile&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCDwarfFile&) LLVM_DELETED_FUNCTION;
public:
/// getName - Get the base name of this MCDwarfFile.
StringRef getName() const { return Name; }
@@ -58,7 +59,7 @@ namespace llvm {
unsigned getDirIndex() const { return DirIndex; }
- /// print - Print the value to the stream \arg OS.
+ /// print - Print the value to the stream \p OS.
void print(raw_ostream &OS) const;
/// dump - Print the value to stderr.
@@ -177,8 +178,8 @@ namespace llvm {
class MCLineSection {
private:
- MCLineSection(const MCLineSection&); // DO NOT IMPLEMENT
- void operator=(const MCLineSection&); // DO NOT IMPLEMENT
+ MCLineSection(const MCLineSection&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCLineSection&) LLVM_DELETED_FUNCTION;
public:
// Constructor to create an MCLineSection with an empty MCLineEntries
diff --git a/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h b/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
index abbe188..38cdc72 100644
--- a/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
+++ b/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
@@ -85,6 +85,9 @@ public:
const MCFragment &F,
const MCFixup &Fixup,
bool IsPCRel) const;
+ virtual const MCSymbol *undefinedExplicitRelSym(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const;
virtual void adjustFixupOffset(const MCFixup &Fixup,
uint64_t &RelocOffset);
@@ -93,9 +96,9 @@ public:
/// @name Accessors
/// @{
- uint8_t getOSABI() { return OSABI; }
- uint16_t getEMachine() { return EMachine; }
- bool hasRelocationAddend() { return HasRelocationAddend; }
+ uint8_t getOSABI() const { return OSABI; }
+ uint16_t getEMachine() const { return EMachine; }
+ bool hasRelocationAddend() const { return HasRelocationAddend; }
bool is64Bit() const { return Is64Bit; }
bool isN64() const { return IsN64; }
/// @}
diff --git a/contrib/llvm/include/llvm/MC/MCExpr.h b/contrib/llvm/include/llvm/MC/MCExpr.h
index aa62eb2..00eef27 100644
--- a/contrib/llvm/include/llvm/MC/MCExpr.h
+++ b/contrib/llvm/include/llvm/MC/MCExpr.h
@@ -41,8 +41,8 @@ public:
private:
ExprKind Kind;
- MCExpr(const MCExpr&); // DO NOT IMPLEMENT
- void operator=(const MCExpr&); // DO NOT IMPLEMENT
+ MCExpr(const MCExpr&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCExpr&) LLVM_DELETED_FUNCTION;
bool EvaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm,
const MCAsmLayout *Layout,
@@ -78,11 +78,11 @@ public:
/// values. If not given, then only non-symbolic expressions will be
/// evaluated.
/// @result - True on success.
+ bool EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout,
+ const SectionAddrMap &Addrs) const;
bool EvaluateAsAbsolute(int64_t &Res) const;
bool EvaluateAsAbsolute(int64_t &Res, const MCAssembler &Asm) const;
bool EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout) const;
- bool EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout,
- const SectionAddrMap &Addrs) const;
/// EvaluateAsRelocatable - Try to evaluate the expression to a relocatable
/// value, i.e. an expression of the fixed form (a - b + constant).
@@ -99,8 +99,6 @@ public:
const MCSection *FindAssociatedSection() const;
/// @}
-
- static bool classof(const MCExpr *) { return true; }
};
inline raw_ostream &operator<<(raw_ostream &OS, const MCExpr &E) {
@@ -132,7 +130,6 @@ public:
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Constant;
}
- static bool classof(const MCConstantExpr *) { return true; }
};
/// MCSymbolRefExpr - Represent a reference to a symbol from inside an
@@ -170,8 +167,10 @@ public:
VK_ARM_TPOFF,
VK_ARM_GOTTPOFF,
VK_ARM_TARGET1,
+ VK_ARM_TARGET2,
- VK_PPC_TOC,
+ VK_PPC_TOC, // TOC base
+ VK_PPC_TOC_ENTRY, // TOC entry
VK_PPC_DARWIN_HA16, // ha16(symbol)
VK_PPC_DARWIN_LO16, // lo16(symbol)
VK_PPC_GAS_HA16, // symbol@ha
@@ -247,7 +246,6 @@ public:
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::SymbolRef;
}
- static bool classof(const MCSymbolRefExpr *) { return true; }
};
/// MCUnaryExpr - Unary assembler expressions.
@@ -301,7 +299,6 @@ public:
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Unary;
}
- static bool classof(const MCUnaryExpr *) { return true; }
};
/// MCBinaryExpr - Binary assembler expressions.
@@ -436,7 +433,6 @@ public:
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Binary;
}
- static bool classof(const MCBinaryExpr *) { return true; }
};
/// MCTargetExpr - This is an extension point for target-specific MCExpr
@@ -445,7 +441,7 @@ public:
/// NOTE: All subclasses are required to have trivial destructors because
/// MCExprs are bump pointer allocated and not destructed.
class MCTargetExpr : public MCExpr {
- virtual void Anchor();
+ virtual void anchor();
protected:
MCTargetExpr() : MCExpr(Target) {}
virtual ~MCTargetExpr() {}
@@ -460,7 +456,6 @@ public:
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Target;
}
- static bool classof(const MCTargetExpr *) { return true; }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCInst.h b/contrib/llvm/include/llvm/MC/MCInst.h
index 397a37d..e91c6a2 100644
--- a/contrib/llvm/include/llvm/MC/MCInst.h
+++ b/contrib/llvm/include/llvm/MC/MCInst.h
@@ -182,7 +182,7 @@ public:
void dump() const;
/// \brief Dump the MCInst as prettily as possible using the additional MC
- /// structures, if given. Operators are separated by the \arg Separator
+ /// structures, if given. Operators are separated by the \p Separator
/// string.
void dump_pretty(raw_ostream &OS, const MCAsmInfo *MAI = 0,
const MCInstPrinter *Printer = 0,
diff --git a/contrib/llvm/include/llvm/MC/MCInstPrinter.h b/contrib/llvm/include/llvm/MC/MCInstPrinter.h
index 3c4f28b..3b9420a 100644
--- a/contrib/llvm/include/llvm/MC/MCInstPrinter.h
+++ b/contrib/llvm/include/llvm/MC/MCInstPrinter.h
@@ -33,12 +33,16 @@ protected:
/// The current set of available features.
unsigned AvailableFeatures;
+ /// True if we are printing marked up assembly.
+ bool UseMarkup;
+
/// Utility function for printing annotations.
void printAnnotation(raw_ostream &OS, StringRef Annot);
public:
MCInstPrinter(const MCAsmInfo &mai, const MCInstrInfo &mii,
const MCRegisterInfo &mri)
- : CommentStream(0), MAI(mai), MII(mii), MRI(mri), AvailableFeatures(0) {}
+ : CommentStream(0), MAI(mai), MII(mii), MRI(mri), AvailableFeatures(0),
+ UseMarkup(0) {}
virtual ~MCInstPrinter();
@@ -59,6 +63,13 @@ public:
unsigned getAvailableFeatures() const { return AvailableFeatures; }
void setAvailableFeatures(unsigned Value) { AvailableFeatures = Value; }
+
+ bool getUseMarkup() const { return UseMarkup; }
+ void setUseMarkup(bool Value) { UseMarkup = Value; }
+
+ /// Utility functions to make adding mark ups simpler.
+ StringRef markup(StringRef s) const;
+ StringRef markup(StringRef a, StringRef b) const;
};
} // namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCInstrDesc.h b/contrib/llvm/include/llvm/MC/MCInstrDesc.h
index dbf16d8..02383f8 100644
--- a/contrib/llvm/include/llvm/MC/MCInstrDesc.h
+++ b/contrib/llvm/include/llvm/MC/MCInstrDesc.h
@@ -1,4 +1,4 @@
-//===-- llvm/Mc/McInstrDesc.h - Instruction Descriptors -*- C++ -*-===//
+//===-- llvm/MC/MCInstrDesc.h - Instruction Descriptors -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/include/llvm/MC/MCLabel.h b/contrib/llvm/include/llvm/MC/MCLabel.h
index 727520d..f531de8 100644
--- a/contrib/llvm/include/llvm/MC/MCLabel.h
+++ b/contrib/llvm/include/llvm/MC/MCLabel.h
@@ -14,6 +14,8 @@
#ifndef LLVM_MC_MCLABEL_H
#define LLVM_MC_MCLABEL_H
+#include "llvm/Support/Compiler.h"
+
namespace llvm {
class MCContext;
class raw_ostream;
@@ -30,8 +32,8 @@ namespace llvm {
MCLabel(unsigned instance)
: Instance(instance) {}
- MCLabel(const MCLabel&); // DO NOT IMPLEMENT
- void operator=(const MCLabel&); // DO NOT IMPLEMENT
+ MCLabel(const MCLabel&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCLabel&) LLVM_DELETED_FUNCTION;
public:
/// getInstance - Get the current instance of this Directional Local Label.
unsigned getInstance() const { return Instance; }
@@ -40,7 +42,7 @@ namespace llvm {
/// Label.
unsigned incInstance() { return ++Instance; }
- /// print - Print the value to the stream \arg OS.
+ /// print - Print the value to the stream \p OS.
void print(raw_ostream &OS) const;
/// dump - Print the value to stderr.
diff --git a/contrib/llvm/include/llvm/MC/MCMachObjectWriter.h b/contrib/llvm/include/llvm/MC/MCMachObjectWriter.h
index 949d907..efaabfb 100644
--- a/contrib/llvm/include/llvm/MC/MCMachObjectWriter.h
+++ b/contrib/llvm/include/llvm/MC/MCMachObjectWriter.h
@@ -153,8 +153,8 @@ public:
/// WriteSegmentLoadCommand - Write a segment load command.
///
- /// \arg NumSections - The number of sections in this segment.
- /// \arg SectionDataSize - The total size of the sections.
+ /// \param NumSections The number of sections in this segment.
+ /// \param SectionDataSize The total size of the sections.
void WriteSegmentLoadCommand(unsigned NumSections,
uint64_t VMSize,
uint64_t SectionDataStartOffset,
@@ -233,6 +233,8 @@ public:
void computeSectionAddresses(const MCAssembler &Asm,
const MCAsmLayout &Layout);
+ void markAbsoluteVariableSymbols(MCAssembler &Asm,
+ const MCAsmLayout &Layout);
void ExecutePostLayoutBinding(MCAssembler &Asm, const MCAsmLayout &Layout);
virtual bool IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
diff --git a/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h b/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h
index 74e2263..23e5513 100644
--- a/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h
@@ -84,7 +84,8 @@ protected:
/// this is the section to emit them into.
const MCSection *CompactUnwindSection;
- /// DwarfAccelNamesSection, DwarfAccelObjCSection
+ /// DwarfAccelNamesSection, DwarfAccelObjCSection,
+ /// DwarfAccelNamespaceSection, DwarfAccelTypesSection -
/// If we use the DWARF accelerated hash tables then we want toe emit these
/// sections.
const MCSection *DwarfAccelNamesSection;
diff --git a/contrib/llvm/include/llvm/MC/MCObjectStreamer.h b/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
index a69075d..08b00f1 100644
--- a/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
+++ b/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
@@ -72,6 +72,13 @@ public:
virtual void ChangeSection(const MCSection *Section);
virtual void EmitInstruction(const MCInst &Inst);
virtual void EmitInstToFragment(const MCInst &Inst);
+ virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
+ virtual void EmitValueToAlignment(unsigned ByteAlignment,
+ int64_t Value = 0,
+ unsigned ValueSize = 1,
+ unsigned MaxBytesToEmit = 0);
+ virtual void EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit = 0);
virtual bool EmitValueToOffset(const MCExpr *Offset, unsigned char Value);
virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta,
const MCSymbol *LastLabel,
@@ -80,6 +87,9 @@ public:
virtual void EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
const MCSymbol *Label);
virtual void EmitGPRel32Value(const MCExpr *Value);
+ virtual void EmitGPRel64Value(const MCExpr *Value);
+ virtual void EmitFill(uint64_t NumBytes, uint8_t FillValue,
+ unsigned AddrSpace);
virtual void FinishImpl();
/// @}
diff --git a/contrib/llvm/include/llvm/MC/MCObjectWriter.h b/contrib/llvm/include/llvm/MC/MCObjectWriter.h
index 9591a00..14fe75f 100644
--- a/contrib/llvm/include/llvm/MC/MCObjectWriter.h
+++ b/contrib/llvm/include/llvm/MC/MCObjectWriter.h
@@ -11,6 +11,7 @@
#define LLVM_MC_MCOBJECTWRITER_H
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
@@ -35,8 +36,8 @@ class MCValue;
/// The object writer also contains a number of helper methods for writing
/// binary data to the output stream.
class MCObjectWriter {
- MCObjectWriter(const MCObjectWriter &); // DO NOT IMPLEMENT
- void operator=(const MCObjectWriter &); // DO NOT IMPLEMENT
+ MCObjectWriter(const MCObjectWriter &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCObjectWriter &) LLVM_DELETED_FUNCTION;
protected:
raw_ostream &OS;
diff --git a/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h b/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h
index 9a8735f..e102dfb 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h
@@ -31,8 +31,8 @@ class AsmLexer : public MCAsmLexer {
const MemoryBuffer *CurBuf;
bool isAtStartOfLine;
- void operator=(const AsmLexer&); // DO NOT IMPLEMENT
- AsmLexer(const AsmLexer&); // DO NOT IMPLEMENT
+ void operator=(const AsmLexer&) LLVM_DELETED_FUNCTION;
+ AsmLexer(const AsmLexer&) LLVM_DELETED_FUNCTION;
protected:
/// LexToken - Read the next token and return its code.
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
index 5e29ad4..0a961d6 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
@@ -11,6 +11,7 @@
#define LLVM_MC_MCASMLEXER_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/SMLoc.h"
@@ -39,6 +40,7 @@ public:
// No-value.
EndOfStatement,
Colon,
+ Space,
Plus, Minus, Tilde,
Slash, // '/'
BackSlash, // '\'
@@ -121,10 +123,11 @@ class MCAsmLexer {
SMLoc ErrLoc;
std::string Err;
- MCAsmLexer(const MCAsmLexer &); // DO NOT IMPLEMENT
- void operator=(const MCAsmLexer &); // DO NOT IMPLEMENT
+ MCAsmLexer(const MCAsmLexer &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCAsmLexer &) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses.
const char *TokStart;
+ bool SkipSpace;
MCAsmLexer();
@@ -169,11 +172,14 @@ public:
/// getKind - Get the kind of current token.
AsmToken::TokenKind getKind() const { return CurTok.getKind(); }
- /// is - Check if the current token has kind \arg K.
+ /// is - Check if the current token has kind \p K.
bool is(AsmToken::TokenKind K) const { return CurTok.is(K); }
- /// isNot - Check if the current token has kind \arg K.
+ /// isNot - Check if the current token has kind \p K.
bool isNot(AsmToken::TokenKind K) const { return CurTok.isNot(K); }
+
+ /// setSkipSpace - Set whether spaces should be ignored by the lexer
+ void setSkipSpace(bool val) { SkipSpace = val; }
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
index 793c709..a71d3c3 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
@@ -20,6 +20,9 @@ class MCAsmLexer;
class MCAsmParserExtension;
class MCContext;
class MCExpr;
+class MCInstPrinter;
+class MCInstrInfo;
+class MCParsedAsmOperand;
class MCStreamer;
class MCTargetAsmParser;
class SMLoc;
@@ -28,6 +31,16 @@ class SourceMgr;
class StringRef;
class Twine;
+/// MCAsmParserSemaCallback - Generic Sema callback for assembly parser.
+class MCAsmParserSemaCallback {
+public:
+ virtual ~MCAsmParserSemaCallback();
+ virtual void *LookupInlineAsmIdentifier(StringRef Name, void *Loc,
+ unsigned &Size) = 0;
+ virtual bool LookupInlineAsmField(StringRef Base, StringRef Member,
+ unsigned &Offset) = 0;
+};
+
/// MCAsmParser - Generic assembler parser interface, for use by target specific
/// assembly parsers.
class MCAsmParser {
@@ -35,8 +48,8 @@ public:
typedef bool (*DirectiveHandler)(MCAsmParserExtension*, StringRef, SMLoc);
private:
- MCAsmParser(const MCAsmParser &); // DO NOT IMPLEMENT
- void operator=(const MCAsmParser &); // DO NOT IMPLEMENT
+ MCAsmParser(const MCAsmParser &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCAsmParser &) LLVM_DELETED_FUNCTION;
MCTargetAsmParser *TargetParser;
@@ -73,15 +86,26 @@ public:
/// Run - Run the parser on the input source buffer.
virtual bool Run(bool NoInitialTextSection, bool NoFinalize = false) = 0;
- /// Warning - Emit a warning at the location \arg L, with the message \arg
- /// Msg.
+ virtual void setParsingInlineAsm(bool V) = 0;
+ virtual bool isParsingInlineAsm() = 0;
+
+ /// ParseMSInlineAsm - Parse ms-style inline assembly.
+ virtual bool ParseMSInlineAsm(void *AsmLoc, std::string &AsmString,
+ unsigned &NumOutputs, unsigned &NumInputs,
+ SmallVectorImpl<std::pair<void *, bool> > &OpDecls,
+ SmallVectorImpl<std::string> &Constraints,
+ SmallVectorImpl<std::string> &Clobbers,
+ const MCInstrInfo *MII,
+ const MCInstPrinter *IP,
+ MCAsmParserSemaCallback &SI) = 0;
+
+ /// Warning - Emit a warning at the location \p L, with the message \p Msg.
///
/// \return The return value is true, if warnings are fatal.
virtual bool Warning(SMLoc L, const Twine &Msg,
ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) = 0;
- /// Error - Emit an error at the location \arg L, with the message \arg
- /// Msg.
+ /// Error - Emit an error at the location \p L, with the message \p Msg.
///
/// \return The return value is always true, as an idiomatic convenience to
/// clients.
@@ -100,7 +124,7 @@ public:
ArrayRef<SMRange> Ranges = ArrayRef<SMRange>());
/// ParseIdentifier - Parse an identifier or string (as a quoted identifier)
- /// and set \arg Res to the identifier contents.
+ /// and set \p Res to the identifier contents.
virtual bool ParseIdentifier(StringRef &Res) = 0;
/// \brief Parse up to the end of statement and return the contents from the
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h
index 4e2aee9..0918c93 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h
@@ -21,8 +21,8 @@ class Twine;
/// which is implemented by target and object file assembly parser
/// implementations.
class MCAsmParserExtension {
- MCAsmParserExtension(const MCAsmParserExtension &); // DO NOT IMPLEMENT
- void operator=(const MCAsmParserExtension &); // DO NOT IMPLEMENT
+ MCAsmParserExtension(const MCAsmParserExtension &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCAsmParserExtension &) LLVM_DELETED_FUNCTION;
MCAsmParser *Parser;
@@ -43,8 +43,8 @@ protected:
public:
virtual ~MCAsmParserExtension();
- /// \brief Initialize the extension for parsing using the given \arg
- /// Parser. The extension should use the AsmParser interfaces to register its
+ /// \brief Initialize the extension for parsing using the given \p Parser.
+ /// The extension should use the AsmParser interfaces to register its
/// parsing routines.
virtual void Initialize(MCAsmParser &Parser);
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h b/contrib/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
index 2556e5f..60e7887 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
@@ -19,15 +19,69 @@ class raw_ostream;
/// base class is used by target-independent clients and is the interface
/// between parsing an asm instruction and recognizing it.
class MCParsedAsmOperand {
+ /// MCOperandNum - The corresponding MCInst operand number. Only valid when
+ /// parsing MS-style inline assembly.
+ unsigned MCOperandNum;
+
+ /// Constraint - The constraint on this operand. Only valid when parsing
+ /// MS-style inline assembly.
+ std::string Constraint;
+
public:
MCParsedAsmOperand() {}
virtual ~MCParsedAsmOperand() {}
+ void setConstraint(StringRef C) { Constraint = C.str(); }
+ StringRef getConstraint() { return Constraint; }
+
+ void setMCOperandNum (unsigned OpNum) { MCOperandNum = OpNum; }
+ unsigned getMCOperandNum() { return MCOperandNum; }
+
+ unsigned getNameLen() {
+ assert (getStartLoc().isValid() && "Invalid StartLoc!");
+ assert (getEndLoc().isValid() && "Invalid EndLoc!");
+ return getEndLoc().getPointer() - getStartLoc().getPointer();
+ }
+
+ StringRef getName() {
+ return StringRef(getStartLoc().getPointer(), getNameLen());
+ }
+
+ /// isToken - Is this a token operand?
+ virtual bool isToken() const = 0;
+ /// isImm - Is this an immediate operand?
+ virtual bool isImm() const = 0;
+ /// isReg - Is this a register operand?
+ virtual bool isReg() const = 0;
+ virtual unsigned getReg() const = 0;
+
+ /// isMem - Is this a memory operand?
+ virtual bool isMem() const = 0;
+ virtual unsigned getMemSize() const { return 0; }
+
/// getStartLoc - Get the location of the first token of this operand.
virtual SMLoc getStartLoc() const = 0;
/// getEndLoc - Get the location of the last token of this operand.
virtual SMLoc getEndLoc() const = 0;
+ /// needAsmRewrite - AsmRewrites happen in both the target-independent and
+ /// target-dependent parsers. The target-independent parser calls this
+ /// function to determine if the target-dependent parser has already taken
+ /// care of the rewrites. Only valid when parsing MS-style inline assembly.
+ virtual bool needAsmRewrite() const { return true; }
+
+ /// isOffsetOf - Do we need to emit code to get the offset of the variable,
+ /// rather then the value of the variable? Only valid when parsing MS-style
+ /// inline assembly.
+ virtual bool isOffsetOf() const { return false; }
+
+ /// getOffsetOfLoc - Get the location of the offset operator.
+ virtual SMLoc getOffsetOfLoc() const { return SMLoc(); }
+
+ /// needSizeDirective - Do we need to emit a sizing directive for this
+ /// operand? Only valid when parsing MS-style inline assembly.
+ virtual bool needSizeDirective() const { return false; }
+
/// print - Print a debug representation of the operand to the given stream.
virtual void print(raw_ostream &OS) const = 0;
/// dump - Print to the debug stream.
diff --git a/contrib/llvm/include/llvm/MC/MCRegisterInfo.h b/contrib/llvm/include/llvm/MC/MCRegisterInfo.h
index 46a9d71..f05baea 100644
--- a/contrib/llvm/include/llvm/MC/MCRegisterInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCRegisterInfo.h
@@ -333,6 +333,13 @@ public:
return NumRegs;
}
+ /// getNumSubRegIndices - Return the number of sub-register indices
+ /// understood by the target. Index 0 is reserved for the no-op sub-register,
+ /// while 1 to getNumSubRegIndices() - 1 represent real sub-registers.
+ unsigned getNumSubRegIndices() const {
+ return NumSubRegIndices;
+ }
+
/// getNumRegUnits - Return the number of (native) register units in the
/// target. Register units are numbered from 0 to getNumRegUnits() - 1. They
/// can be accessed through MCRegUnitIterator defined below.
@@ -363,7 +370,7 @@ public:
/// getRegClass - Returns the register class associated with the enumeration
/// value. See class MCOperandInfo.
- const MCRegisterClass getRegClass(unsigned i) const {
+ const MCRegisterClass& getRegClass(unsigned i) const {
assert(i < getNumRegClasses() && "Register Class ID out of range");
return Classes[i];
}
diff --git a/contrib/llvm/include/llvm/MC/MCSchedule.h b/contrib/llvm/include/llvm/MC/MCSchedule.h
index 3b1cdf1..0c71ee5 100644
--- a/contrib/llvm/include/llvm/MC/MCSchedule.h
+++ b/contrib/llvm/include/llvm/MC/MCSchedule.h
@@ -16,17 +16,111 @@
#define LLVM_MC_MCSCHEDMODEL_H
#include "llvm/Support/DataTypes.h"
+#include <cassert>
namespace llvm {
struct InstrItinerary;
+/// Define a kind of processor resource that will be modeled by the scheduler.
+struct MCProcResourceDesc {
+#ifndef NDEBUG
+ const char *Name;
+#endif
+ unsigned NumUnits; // Number of resource of this kind
+ unsigned SuperIdx; // Index of the resources kind that contains this kind.
+
+ // Buffered resources may be consumed at some indeterminate cycle after
+ // dispatch (e.g. for instructions that may issue out-of-order). Unbuffered
+ // resources always consume their resource some fixed number of cycles after
+ // dispatch (e.g. for instruction interlocking that may stall the pipeline).
+ bool IsBuffered;
+
+ bool operator==(const MCProcResourceDesc &Other) const {
+ return NumUnits == Other.NumUnits && SuperIdx == Other.SuperIdx
+ && IsBuffered == Other.IsBuffered;
+ }
+};
+
+/// Identify one of the processor resource kinds consumed by a particular
+/// scheduling class for the specified number of cycles.
+struct MCWriteProcResEntry {
+ unsigned ProcResourceIdx;
+ unsigned Cycles;
+
+ bool operator==(const MCWriteProcResEntry &Other) const {
+ return ProcResourceIdx == Other.ProcResourceIdx && Cycles == Other.Cycles;
+ }
+};
+
+/// Specify the latency in cpu cycles for a particular scheduling class and def
+/// index. -1 indicates an invalid latency. Heuristics would typically consider
+/// an instruction with invalid latency to have infinite latency. Also identify
+/// the WriteResources of this def. When the operand expands to a sequence of
+/// writes, this ID is the last write in the sequence.
+struct MCWriteLatencyEntry {
+ int Cycles;
+ unsigned WriteResourceID;
+
+ bool operator==(const MCWriteLatencyEntry &Other) const {
+ return Cycles == Other.Cycles && WriteResourceID == Other.WriteResourceID;
+ }
+};
+
+/// Specify the number of cycles allowed after instruction issue before a
+/// particular use operand reads its registers. This effectively reduces the
+/// write's latency. Here we allow negative cycles for corner cases where
+/// latency increases. This rule only applies when the entry's WriteResource
+/// matches the write's WriteResource.
+///
+/// MCReadAdvanceEntries are sorted first by operand index (UseIdx), then by
+/// WriteResourceIdx.
+struct MCReadAdvanceEntry {
+ unsigned UseIdx;
+ unsigned WriteResourceID;
+ int Cycles;
+
+ bool operator==(const MCReadAdvanceEntry &Other) const {
+ return UseIdx == Other.UseIdx && WriteResourceID == Other.WriteResourceID
+ && Cycles == Other.Cycles;
+ }
+};
+
+/// Summarize the scheduling resources required for an instruction of a
+/// particular scheduling class.
+///
+/// Defined as an aggregate struct for creating tables with initializer lists.
+struct MCSchedClassDesc {
+ static const unsigned short InvalidNumMicroOps = UINT16_MAX;
+ static const unsigned short VariantNumMicroOps = UINT16_MAX - 1;
+
+#ifndef NDEBUG
+ const char* Name;
+#endif
+ unsigned short NumMicroOps;
+ bool BeginGroup;
+ bool EndGroup;
+ unsigned WriteProcResIdx; // First index into WriteProcResTable.
+ unsigned NumWriteProcResEntries;
+ unsigned WriteLatencyIdx; // First index into WriteLatencyTable.
+ unsigned NumWriteLatencyEntries;
+ unsigned ReadAdvanceIdx; // First index into ReadAdvanceTable.
+ unsigned NumReadAdvanceEntries;
+
+ bool isValid() const {
+ return NumMicroOps != InvalidNumMicroOps;
+ }
+ bool isVariant() const {
+ return NumMicroOps == VariantNumMicroOps;
+ }
+};
+
/// Machine model for scheduling, bundling, and heuristics.
///
/// The machine model directly provides basic information about the
/// microarchitecture to the scheduler in the form of properties. It also
-/// optionally refers to scheduler resources tables and itinerary
-/// tables. Scheduler resources tables model the latency and cost for each
+/// optionally refers to scheduler resource tables and itinerary
+/// tables. Scheduler resource tables model the latency and cost for each
/// instruction type. Itinerary tables are an independant mechanism that
/// provides a detailed reservation table describing each cycle of instruction
/// execution. Subtargets may define any or all of the above categories of data
@@ -84,8 +178,11 @@ public:
static const unsigned DefaultMispredictPenalty = 10;
private:
- // TODO: Add a reference to proc resource types and sched resource tables.
-
+ unsigned ProcID;
+ const MCProcResourceDesc *ProcResourceTable;
+ const MCSchedClassDesc *SchedClassTable;
+ unsigned NumProcResourceKinds;
+ unsigned NumSchedClasses;
// Instruction itinerary tables used by InstrItineraryData.
friend class InstrItineraryData;
const InstrItinerary *InstrItineraries;
@@ -100,13 +197,45 @@ public:
LoadLatency(DefaultLoadLatency),
HighLatency(DefaultHighLatency),
MispredictPenalty(DefaultMispredictPenalty),
- InstrItineraries(0) {}
+ ProcID(0), ProcResourceTable(0), SchedClassTable(0),
+ NumProcResourceKinds(0), NumSchedClasses(0),
+ InstrItineraries(0) {
+ (void)NumProcResourceKinds;
+ (void)NumSchedClasses;
+ }
// Table-gen driven ctor.
MCSchedModel(unsigned iw, int ml, unsigned ll, unsigned hl, unsigned mp,
+ unsigned pi, const MCProcResourceDesc *pr,
+ const MCSchedClassDesc *sc, unsigned npr, unsigned nsc,
const InstrItinerary *ii):
IssueWidth(iw), MinLatency(ml), LoadLatency(ll), HighLatency(hl),
- MispredictPenalty(mp), InstrItineraries(ii){}
+ MispredictPenalty(mp), ProcID(pi), ProcResourceTable(pr),
+ SchedClassTable(sc), NumProcResourceKinds(npr), NumSchedClasses(nsc),
+ InstrItineraries(ii) {}
+
+ unsigned getProcessorID() const { return ProcID; }
+
+ /// Does this machine model include instruction-level scheduling.
+ bool hasInstrSchedModel() const { return SchedClassTable; }
+
+ unsigned getNumProcResourceKinds() const {
+ return NumProcResourceKinds;
+ }
+
+ const MCProcResourceDesc *getProcResource(unsigned ProcResourceIdx) const {
+ assert(hasInstrSchedModel() && "No scheduling machine model");
+
+ assert(ProcResourceIdx < NumProcResourceKinds && "bad proc resource idx");
+ return &ProcResourceTable[ProcResourceIdx];
+ }
+
+ const MCSchedClassDesc *getSchedClassDesc(unsigned SchedClassIdx) const {
+ assert(hasInstrSchedModel() && "No scheduling machine model");
+
+ assert(SchedClassIdx < NumSchedClasses && "bad scheduling class idx");
+ return &SchedClassTable[SchedClassIdx];
+ }
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/MC/MCSection.h b/contrib/llvm/include/llvm/MC/MCSection.h
index 7da6534..21fdb6b 100644
--- a/contrib/llvm/include/llvm/MC/MCSection.h
+++ b/contrib/llvm/include/llvm/MC/MCSection.h
@@ -15,7 +15,7 @@
#define LLVM_MC_MCSECTION_H
#include "llvm/MC/SectionKind.h"
-#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
class MCAsmInfo;
@@ -33,8 +33,8 @@ namespace llvm {
};
private:
- MCSection(const MCSection&); // DO NOT IMPLEMENT
- void operator=(const MCSection&); // DO NOT IMPLEMENT
+ MCSection(const MCSection&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCSection&) LLVM_DELETED_FUNCTION;
protected:
MCSection(SectionVariant V, SectionKind K) : Variant(V), Kind(K) {}
SectionVariant Variant;
@@ -64,8 +64,6 @@ namespace llvm {
/// isVirtualSection - Check whether this section is "virtual", that is
/// has no actual object file contents.
virtual bool isVirtualSection() const = 0;
-
- static bool classof(const MCSection *) { return true; }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCSectionCOFF.h b/contrib/llvm/include/llvm/MC/MCSectionCOFF.h
index 7eacde5..b050c0f 100644
--- a/contrib/llvm/include/llvm/MC/MCSectionCOFF.h
+++ b/contrib/llvm/include/llvm/MC/MCSectionCOFF.h
@@ -61,7 +61,6 @@ namespace llvm {
static bool classof(const MCSection *S) {
return S->getVariant() == SV_COFF;
}
- static bool classof(const MCSectionCOFF *) { return true; }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCSectionELF.h b/contrib/llvm/include/llvm/MC/MCSectionELF.h
index 7321ca8..4d54465 100644
--- a/contrib/llvm/include/llvm/MC/MCSectionELF.h
+++ b/contrib/llvm/include/llvm/MC/MCSectionELF.h
@@ -76,7 +76,6 @@ public:
static bool classof(const MCSection *S) {
return S->getVariant() == SV_ELF;
}
- static bool classof(const MCSectionELF *) { return true; }
// Return the entry size for sections with fixed-width data.
static unsigned DetermineEntrySize(SectionKind Kind);
diff --git a/contrib/llvm/include/llvm/MC/MCSectionMachO.h b/contrib/llvm/include/llvm/MC/MCSectionMachO.h
index 15eb4f4..71ea8f3 100644
--- a/contrib/llvm/include/llvm/MC/MCSectionMachO.h
+++ b/contrib/llvm/include/llvm/MC/MCSectionMachO.h
@@ -174,7 +174,6 @@ public:
static bool classof(const MCSection *S) {
return S->getVariant() == SV_MachO;
}
- static bool classof(const MCSectionMachO *) { return true; }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCStreamer.h b/contrib/llvm/include/llvm/MC/MCStreamer.h
index e8c3e59..230d27e 100644
--- a/contrib/llvm/include/llvm/MC/MCStreamer.h
+++ b/contrib/llvm/include/llvm/MC/MCStreamer.h
@@ -47,8 +47,8 @@ namespace llvm {
class MCStreamer {
MCContext &Context;
- MCStreamer(const MCStreamer&); // DO NOT IMPLEMENT
- MCStreamer &operator=(const MCStreamer&); // DO NOT IMPLEMENT
+ MCStreamer(const MCStreamer&) LLVM_DELETED_FUNCTION;
+ MCStreamer &operator=(const MCStreamer&) LLVM_DELETED_FUNCTION;
bool EmitEHFrame;
bool EmitDebugFrame;
@@ -342,7 +342,7 @@ namespace llvm {
/// @name Generating Data
/// @{
- /// EmitBytes - Emit the bytes in \arg Data into the output.
+ /// EmitBytes - Emit the bytes in \p Data into the output.
///
/// This is used to implement assembler directives such as .byte, .ascii,
/// etc.
@@ -554,6 +554,11 @@ namespace llvm {
virtual void EmitRegSave(const SmallVectorImpl<unsigned> &RegList,
bool isVector);
+ /// PPC-related methods.
+ /// FIXME: Eventually replace it with some "target MC streamer" and move
+ /// these methods there.
+ virtual void EmitTCEntry(const MCSymbol &S);
+
/// FinishImpl - Streamer specific finalization.
virtual void FinishImpl() = 0;
/// Finish - Finish emission of machine code.
@@ -573,17 +578,14 @@ namespace llvm {
/// InstPrint.
///
/// \param CE - If given, a code emitter to use to show the instruction
- /// encoding inline with the assembly. This method takes ownership of \arg CE.
+ /// encoding inline with the assembly. This method takes ownership of \p CE.
///
/// \param TAB - If given, a target asm backend to use to show the fixup
/// information in conjunction with encoding information. This method takes
- /// ownership of \arg TAB.
+ /// ownership of \p TAB.
///
/// \param ShowInst - Whether to show the MCInst representation inline with
/// the assembly.
- ///
- /// \param DecodeLSDA - If true, emit comments that translates the LSDA into a
- /// human readable format. Only usable with CFI.
MCStreamer *createAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
bool isVerboseAsm,
bool useLoc,
@@ -597,7 +599,7 @@ namespace llvm {
/// createMachOStreamer - Create a machine code streamer which will generate
/// Mach-O format object files.
///
- /// Takes ownership of \arg TAB and \arg CE.
+ /// Takes ownership of \p TAB and \p CE.
MCStreamer *createMachOStreamer(MCContext &Ctx, MCAsmBackend &TAB,
raw_ostream &OS, MCCodeEmitter *CE,
bool RelaxAll = false);
@@ -605,7 +607,7 @@ namespace llvm {
/// createWinCOFFStreamer - Create a machine code streamer which will
/// generate Microsoft COFF format object files.
///
- /// Takes ownership of \arg TAB and \arg CE.
+ /// Takes ownership of \p TAB and \p CE.
MCStreamer *createWinCOFFStreamer(MCContext &Ctx,
MCAsmBackend &TAB,
MCCodeEmitter &CE, raw_ostream &OS,
@@ -620,7 +622,7 @@ namespace llvm {
/// createPureStreamer - Create a machine code streamer which will generate
/// "pure" MC object files, for use with MC-JIT and testing tools.
///
- /// Takes ownership of \arg TAB and \arg CE.
+ /// Takes ownership of \p TAB and \p CE.
MCStreamer *createPureStreamer(MCContext &Ctx, MCAsmBackend &TAB,
raw_ostream &OS, MCCodeEmitter *CE);
diff --git a/contrib/llvm/include/llvm/MC/MCSubtargetInfo.h b/contrib/llvm/include/llvm/MC/MCSubtargetInfo.h
index 31d632d..69213cd 100644
--- a/contrib/llvm/include/llvm/MC/MCSubtargetInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCSubtargetInfo.h
@@ -30,7 +30,14 @@ class MCSubtargetInfo {
std::string TargetTriple; // Target triple
const SubtargetFeatureKV *ProcFeatures; // Processor feature list
const SubtargetFeatureKV *ProcDesc; // Processor descriptions
- const SubtargetInfoKV *ProcSchedModel; // Scheduler machine model
+
+ // Scheduler machine model
+ const SubtargetInfoKV *ProcSchedModels;
+ const MCWriteProcResEntry *WriteProcResTable;
+ const MCWriteLatencyEntry *WriteLatencyTable;
+ const MCReadAdvanceEntry *ReadAdvanceTable;
+ const MCSchedModel *CPUSchedModel;
+
const InstrStage *Stages; // Instruction itinerary stages
const unsigned *OperandCycles; // Itinerary operand cycles
const unsigned *ForwardingPaths; // Forwarding paths
@@ -43,6 +50,9 @@ public:
const SubtargetFeatureKV *PF,
const SubtargetFeatureKV *PD,
const SubtargetInfoKV *ProcSched,
+ const MCWriteProcResEntry *WPR,
+ const MCWriteLatencyEntry *WL,
+ const MCReadAdvanceEntry *RA,
const InstrStage *IS,
const unsigned *OC, const unsigned *FP,
unsigned NF, unsigned NP);
@@ -58,9 +68,9 @@ public:
return FeatureBits;
}
- /// ReInitMCSubtargetInfo - Change CPU (and optionally supplemented with
- /// feature string), recompute and return feature bits.
- uint64_t ReInitMCSubtargetInfo(StringRef CPU, StringRef FS);
+ /// InitMCProcessorInfo - Set or change the CPU (optionally supplemented with
+ /// feature string). Recompute feature bits and scheduling model.
+ void InitMCProcessorInfo(StringRef CPU, StringRef FS);
/// ToggleFeature - Toggle a feature and returns the re-computed feature
/// bits. This version does not change the implied bits.
@@ -72,11 +82,56 @@ public:
/// getSchedModelForCPU - Get the machine model of a CPU.
///
- MCSchedModel *getSchedModelForCPU(StringRef CPU) const;
+ const MCSchedModel *getSchedModelForCPU(StringRef CPU) const;
+
+ /// getSchedModel - Get the machine model for this subtarget's CPU.
+ ///
+ const MCSchedModel *getSchedModel() const { return CPUSchedModel; }
+
+ /// Return an iterator at the first process resource consumed by the given
+ /// scheduling class.
+ const MCWriteProcResEntry *getWriteProcResBegin(
+ const MCSchedClassDesc *SC) const {
+ return &WriteProcResTable[SC->WriteProcResIdx];
+ }
+ const MCWriteProcResEntry *getWriteProcResEnd(
+ const MCSchedClassDesc *SC) const {
+ return getWriteProcResBegin(SC) + SC->NumWriteProcResEntries;
+ }
+
+ const MCWriteLatencyEntry *getWriteLatencyEntry(const MCSchedClassDesc *SC,
+ unsigned DefIdx) const {
+ assert(DefIdx < SC->NumWriteLatencyEntries &&
+ "MachineModel does not specify a WriteResource for DefIdx");
+
+ return &WriteLatencyTable[SC->WriteLatencyIdx + DefIdx];
+ }
+
+ int getReadAdvanceCycles(const MCSchedClassDesc *SC, unsigned UseIdx,
+ unsigned WriteResID) const {
+ // TODO: The number of read advance entries in a class can be significant
+ // (~50). Consider compressing the WriteID into a dense ID of those that are
+ // used by ReadAdvance and representing them as a bitset.
+ for (const MCReadAdvanceEntry *I = &ReadAdvanceTable[SC->ReadAdvanceIdx],
+ *E = I + SC->NumReadAdvanceEntries; I != E; ++I) {
+ if (I->UseIdx < UseIdx)
+ continue;
+ if (I->UseIdx > UseIdx)
+ break;
+ // Find the first WriteResIdx match, which has the highest cycle count.
+ if (!I->WriteResourceID || I->WriteResourceID == WriteResID) {
+ return I->Cycles;
+ }
+ }
+ return 0;
+ }
/// getInstrItineraryForCPU - Get scheduling itinerary of a CPU.
///
InstrItineraryData getInstrItineraryForCPU(StringRef CPU) const;
+
+ /// Initialize an InstrItineraryData instance.
+ void initInstrItins(InstrItineraryData &InstrItins) const;
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/MC/MCSymbol.h b/contrib/llvm/include/llvm/MC/MCSymbol.h
index 0583ce5..fe92755 100644
--- a/contrib/llvm/include/llvm/MC/MCSymbol.h
+++ b/contrib/llvm/include/llvm/MC/MCSymbol.h
@@ -15,6 +15,7 @@
#define LLVM_MC_MCSYMBOL_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
class MCExpr;
@@ -62,8 +63,8 @@ namespace llvm {
: Name(name), Section(0), Value(0),
IsTemporary(isTemporary), IsUsed(false) {}
- MCSymbol(const MCSymbol&); // DO NOT IMPLEMENT
- void operator=(const MCSymbol&); // DO NOT IMPLEMENT
+ MCSymbol(const MCSymbol&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCSymbol&) LLVM_DELETED_FUNCTION;
public:
/// getName - Get the symbol name.
StringRef getName() const { return Name; }
@@ -112,7 +113,7 @@ namespace llvm {
return *Section;
}
- /// setSection - Mark the symbol as defined in the section \arg S.
+ /// setSection - Mark the symbol as defined in the section \p S.
void setSection(const MCSection &S) { Section = &S; }
/// setUndefined - Mark the symbol as undefined.
@@ -132,7 +133,7 @@ namespace llvm {
return Value != 0;
}
- /// getValue() - Get the value for variable symbols.
+ /// getVariableValue() - Get the value for variable symbols.
const MCExpr *getVariableValue() const {
assert(isVariable() && "Invalid accessor!");
IsUsed = true;
@@ -148,7 +149,7 @@ namespace llvm {
/// @}
- /// print - Print the value to the stream \arg OS.
+ /// print - Print the value to the stream \p OS.
void print(raw_ostream &OS) const;
/// dump - Print the value to stderr.
diff --git a/contrib/llvm/include/llvm/MC/MCTargetAsmLexer.h b/contrib/llvm/include/llvm/MC/MCTargetAsmLexer.h
index f5c8c09..b1cc546 100644
--- a/contrib/llvm/include/llvm/MC/MCTargetAsmLexer.h
+++ b/contrib/llvm/include/llvm/MC/MCTargetAsmLexer.h
@@ -24,8 +24,8 @@ class MCTargetAsmLexer {
SMLoc ErrLoc;
std::string Err;
- MCTargetAsmLexer(const MCTargetAsmLexer &); // DO NOT IMPLEMENT
- void operator=(const MCTargetAsmLexer &); // DO NOT IMPLEMENT
+ MCTargetAsmLexer(const MCTargetAsmLexer &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCTargetAsmLexer &) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses.
MCTargetAsmLexer(const Target &);
@@ -45,7 +45,7 @@ public:
const Target &getTarget() const { return TheTarget; }
- /// InstallLexer - Set the lexer to get tokens from lower-level lexer \arg L.
+ /// InstallLexer - Set the lexer to get tokens from lower-level lexer \p L.
void InstallLexer(MCAsmLexer &L) {
Lexer = &L;
}
@@ -77,10 +77,10 @@ public:
/// getKind - Get the kind of current token.
AsmToken::TokenKind getKind() const { return CurTok.getKind(); }
- /// is - Check if the current token has kind \arg K.
+ /// is - Check if the current token has kind \p K.
bool is(AsmToken::TokenKind K) const { return CurTok.is(K); }
- /// isNot - Check if the current token has kind \arg K.
+ /// isNot - Check if the current token has kind \p K.
bool isNot(AsmToken::TokenKind K) const { return CurTok.isNot(K); }
};
diff --git a/contrib/llvm/include/llvm/MC/MCTargetAsmParser.h b/contrib/llvm/include/llvm/MC/MCTargetAsmParser.h
index 929a204..483a80b 100644
--- a/contrib/llvm/include/llvm/MC/MCTargetAsmParser.h
+++ b/contrib/llvm/include/llvm/MC/MCTargetAsmParser.h
@@ -21,11 +21,43 @@ class MCParsedAsmOperand;
class MCInst;
template <typename T> class SmallVectorImpl;
+enum AsmRewriteKind {
+ AOK_DotOperator, // Rewrite a dot operator expression as an immediate.
+ // E.g., [eax].foo.bar -> [eax].8
+ AOK_Emit, // Rewrite _emit as .byte.
+ AOK_Imm, // Rewrite as $$N.
+ AOK_ImmPrefix, // Add $$ before a parsed Imm.
+ AOK_Input, // Rewrite in terms of $N.
+ AOK_Output, // Rewrite in terms of $N.
+ AOK_SizeDirective, // Add a sizing directive (e.g., dword ptr).
+ AOK_Skip // Skip emission (e.g., offset/type operators).
+};
+
+struct AsmRewrite {
+ AsmRewriteKind Kind;
+ SMLoc Loc;
+ unsigned Len;
+ unsigned Val;
+public:
+ AsmRewrite(AsmRewriteKind kind, SMLoc loc, unsigned len = 0, unsigned val = 0)
+ : Kind(kind), Loc(loc), Len(len), Val(val) {}
+};
+
+struct ParseInstructionInfo {
+
+ SmallVectorImpl<AsmRewrite> *AsmRewrites;
+
+ ParseInstructionInfo() : AsmRewrites(0) {}
+ ParseInstructionInfo(SmallVectorImpl<AsmRewrite> *rewrites)
+ : AsmRewrites(rewrites) {}
+
+ ~ParseInstructionInfo() {}
+};
+
/// MCTargetAsmParser - Generic interface to target specific assembly parsers.
class MCTargetAsmParser : public MCAsmParserExtension {
public:
enum MatchResultTy {
- Match_ConversionFail,
Match_InvalidOperand,
Match_MissingFeature,
Match_MnemonicFail,
@@ -34,20 +66,34 @@ public:
};
private:
- MCTargetAsmParser(const MCTargetAsmParser &); // DO NOT IMPLEMENT
- void operator=(const MCTargetAsmParser &); // DO NOT IMPLEMENT
+ MCTargetAsmParser(const MCTargetAsmParser &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCTargetAsmParser &) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses.
MCTargetAsmParser();
/// AvailableFeatures - The current set of available features.
unsigned AvailableFeatures;
+ /// ParsingInlineAsm - Are we parsing ms-style inline assembly?
+ bool ParsingInlineAsm;
+
+ /// SemaCallback - The Sema callback implementation. Must be set when parsing
+ /// ms-style inline assembly.
+ MCAsmParserSemaCallback *SemaCallback;
+
public:
virtual ~MCTargetAsmParser();
unsigned getAvailableFeatures() const { return AvailableFeatures; }
void setAvailableFeatures(unsigned Value) { AvailableFeatures = Value; }
+ bool isParsingInlineAsm () { return ParsingInlineAsm; }
+ void setParsingInlineAsm (bool Value) { ParsingInlineAsm = Value; }
+
+ void setSemaCallback(MCAsmParserSemaCallback *Callback) {
+ SemaCallback = Callback;
+ }
+
virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
SMLoc &EndLoc) = 0;
@@ -64,7 +110,8 @@ public:
/// \param Operands [out] - The list of parsed operands, this returns
/// ownership of them to the caller.
/// \return True on failure.
- virtual bool ParseInstruction(StringRef Name, SMLoc NameLoc,
+ virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) = 0;
/// ParseDirective - Parse a target specific assembler directive
@@ -79,18 +126,9 @@ public:
/// \param DirectiveID - the identifier token of the directive.
virtual bool ParseDirective(AsmToken DirectiveID) = 0;
- /// MatchInstruction - Recognize a series of operands of a parsed instruction
- /// as an actual MCInst. This returns false on success and returns true on
- /// failure to match.
- ///
- /// On failure, the target parser is responsible for emitting a diagnostic
- /// explaining the match failure.
- virtual bool
- MatchInstruction(SMLoc IDLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- SmallVectorImpl<MCInst> &MCInsts) {
- return true;
- }
+ /// mnemonicIsValid - This returns true if this is a valid mnemonic and false
+ /// otherwise.
+ virtual bool mnemonicIsValid(StringRef Mnemonic) = 0;
/// MatchAndEmitInstruction - Recognize a series of operands of a parsed
/// instruction as an actual MCInst and emit it to the specified MCStreamer.
@@ -99,9 +137,10 @@ public:
/// On failure, the target parser is responsible for emitting a diagnostic
/// explaining the match failure.
virtual bool
- MatchAndEmitInstruction(SMLoc IDLoc,
+ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out) = 0;
+ MCStreamer &Out, unsigned &ErrorInfo,
+ bool MatchingInlineAsm) = 0;
/// checkTargetMatchPredicate - Validate the instruction match against
/// any complex target predicates not expressible via match classes.
@@ -109,6 +148,8 @@ public:
return Match_Success;
}
+ virtual void convertToMapAndConstraints(unsigned Kind,
+ const SmallVectorImpl<MCParsedAsmOperand*> &Operands) = 0;
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/MC/MCValue.h b/contrib/llvm/include/llvm/MC/MCValue.h
index 8352ed1..f9af8bc 100644
--- a/contrib/llvm/include/llvm/MC/MCValue.h
+++ b/contrib/llvm/include/llvm/MC/MCValue.h
@@ -46,7 +46,7 @@ public:
/// isAbsolute - Is this an absolute (as opposed to relocatable) value.
bool isAbsolute() const { return !SymA && !SymB; }
- /// print - Print the value to the stream \arg OS.
+ /// print - Print the value to the stream \p OS.
void print(raw_ostream &OS, const MCAsmInfo *MAI) const;
/// dump - Print the value to stderr.
diff --git a/contrib/llvm/include/llvm/MC/SubtargetFeature.h b/contrib/llvm/include/llvm/MC/SubtargetFeature.h
index 507d882..57f0518 100644
--- a/contrib/llvm/include/llvm/MC/SubtargetFeature.h
+++ b/contrib/llvm/include/llvm/MC/SubtargetFeature.h
@@ -50,7 +50,7 @@ struct SubtargetFeatureKV {
//
struct SubtargetInfoKV {
const char *Key; // K-V key string
- void *Value; // K-V pointer value
+ const void *Value; // K-V pointer value
// Compare routine for std binary search
bool operator<(const SubtargetInfoKV &S) const {
@@ -95,10 +95,6 @@ public:
const SubtargetFeatureKV *FeatureTable,
size_t FeatureTableSize);
- /// Get scheduling itinerary of a CPU.
- void *getItinerary(const StringRef CPU,
- const SubtargetInfoKV *Table, size_t TableSize);
-
/// Print feature string.
void print(raw_ostream &OS) const;
diff --git a/contrib/llvm/include/llvm/MDBuilder.h b/contrib/llvm/include/llvm/MDBuilder.h
index 2aa48b0..1867a63 100644
--- a/contrib/llvm/include/llvm/MDBuilder.h
+++ b/contrib/llvm/include/llvm/MDBuilder.h
@@ -134,6 +134,27 @@ namespace llvm {
}
}
+ struct TBAAStructField {
+ uint64_t Offset;
+ uint64_t Size;
+ MDNode *TBAA;
+ TBAAStructField(uint64_t Offset, uint64_t Size, MDNode *TBAA) :
+ Offset(Offset), Size(Size), TBAA(TBAA) {}
+ };
+
+ /// \brief Return metadata for a tbaa.struct node with the given
+ /// struct field descriptions.
+ MDNode *createTBAAStructNode(ArrayRef<TBAAStructField> Fields) {
+ SmallVector<Value *, 4> Vals(Fields.size() * 3);
+ Type *Int64 = IntegerType::get(Context, 64);
+ for (unsigned i = 0, e = Fields.size(); i != e; ++i) {
+ Vals[i * 3 + 0] = ConstantInt::get(Int64, Fields[i].Offset);
+ Vals[i * 3 + 1] = ConstantInt::get(Int64, Fields[i].Size);
+ Vals[i * 3 + 2] = Fields[i].TBAA;
+ }
+ return MDNode::get(Context, Vals);
+ }
+
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Metadata.h b/contrib/llvm/include/llvm/Metadata.h
index b40549b..0fbbb95 100644
--- a/contrib/llvm/include/llvm/Metadata.h
+++ b/contrib/llvm/include/llvm/Metadata.h
@@ -37,7 +37,7 @@ template<typename ValueSubClass, typename ItemParentClass>
/// MDString is always unnamed.
class MDString : public Value {
virtual void anchor();
- MDString(const MDString &); // DO NOT IMPLEMENT
+ MDString(const MDString &) LLVM_DELETED_FUNCTION;
explicit MDString(LLVMContext &C);
public:
@@ -59,7 +59,6 @@ public:
iterator end() const { return getName().end(); }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MDString *) { return true; }
static bool classof(const Value *V) {
return V->getValueID() == MDStringVal;
}
@@ -71,8 +70,8 @@ class MDNodeOperand;
//===----------------------------------------------------------------------===//
/// MDNode - a tuple of other values.
class MDNode : public Value, public FoldingSetNode {
- MDNode(const MDNode &); // DO NOT IMPLEMENT
- void operator=(const MDNode &); // DO NOT IMPLEMENT
+ MDNode(const MDNode &) LLVM_DELETED_FUNCTION;
+ void operator=(const MDNode &) LLVM_DELETED_FUNCTION;
friend class MDNodeOperand;
friend class LLVMContextImpl;
friend struct FoldingSetTrait<MDNode>;
@@ -161,7 +160,6 @@ public:
void Profile(FoldingSetNodeID &ID) const;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MDNode *) { return true; }
static bool classof(const Value *V) {
return V->getValueID() == MDNodeVal;
}
@@ -195,7 +193,7 @@ class NamedMDNode : public ilist_node<NamedMDNode> {
friend struct ilist_traits<NamedMDNode>;
friend class LLVMContextImpl;
friend class Module;
- NamedMDNode(const NamedMDNode &); // DO NOT IMPLEMENT
+ NamedMDNode(const NamedMDNode &) LLVM_DELETED_FUNCTION;
std::string Name;
Module *Parent;
diff --git a/contrib/llvm/include/llvm/Object/Archive.h b/contrib/llvm/include/llvm/Object/Archive.h
index 358b27a..f3d8249 100644
--- a/contrib/llvm/include/llvm/Object/Archive.h
+++ b/contrib/llvm/include/llvm/Object/Archive.h
@@ -129,7 +129,6 @@ public:
symbol_iterator end_symbols() const;
// Cast methods.
- static inline bool classof(Archive const *v) { return true; }
static inline bool classof(Binary const *v) {
return v->isArchive();
}
diff --git a/contrib/llvm/include/llvm/Object/Binary.h b/contrib/llvm/include/llvm/Object/Binary.h
index befe812..d555de3 100644
--- a/contrib/llvm/include/llvm/Object/Binary.h
+++ b/contrib/llvm/include/llvm/Object/Binary.h
@@ -26,8 +26,8 @@ namespace object {
class Binary {
private:
- Binary(); // = delete
- Binary(const Binary &other); // = delete
+ Binary() LLVM_DELETED_FUNCTION;
+ Binary(const Binary &other) LLVM_DELETED_FUNCTION;
unsigned int TypeID;
@@ -64,7 +64,6 @@ public:
// Cast methods.
unsigned int getType() const { return TypeID; }
- static inline bool classof(const Binary *v) { return true; }
// Convenience methods
bool isObject() const {
diff --git a/contrib/llvm/include/llvm/Object/COFF.h b/contrib/llvm/include/llvm/Object/COFF.h
index 967420e..6f42d76 100644
--- a/contrib/llvm/include/llvm/Object/COFF.h
+++ b/contrib/llvm/include/llvm/Object/COFF.h
@@ -116,6 +116,7 @@ protected:
virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const;
virtual error_code getSymbolSection(DataRefImpl Symb,
section_iterator &Res) const;
+ virtual error_code getSymbolValue(DataRefImpl Symb, uint64_t &Val) const;
virtual error_code getSectionNext(DataRefImpl Sec, SectionRef &Res) const;
virtual error_code getSectionName(DataRefImpl Sec, StringRef &Res) const;
@@ -128,6 +129,7 @@ protected:
virtual error_code isSectionBSS(DataRefImpl Sec, bool &Res) const;
virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const;
virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const;
+ virtual error_code isSectionReadOnlyData(DataRefImpl Sec, bool &Res) const;
virtual error_code isSectionRequiredForExecution(DataRefImpl Sec,
bool &Res) const;
virtual error_code sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb,
@@ -197,7 +199,6 @@ public:
static inline bool classof(const Binary *v) {
return v->isCOFF();
}
- static inline bool classof(const COFFObjectFile *v) { return true; }
};
}
diff --git a/contrib/llvm/include/llvm/Object/ELF.h b/contrib/llvm/include/llvm/Object/ELF.h
index 7698441..466de93 100644
--- a/contrib/llvm/include/llvm/Object/ELF.h
+++ b/contrib/llvm/include/llvm/Object/ELF.h
@@ -387,11 +387,65 @@ struct Elf_Rel_Impl<target_endianness, false, isRela>
}
};
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Ehdr_Impl {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits)
+ unsigned char e_ident[ELF::EI_NIDENT]; // ELF Identification bytes
+ Elf_Half e_type; // Type of file (see ET_*)
+ Elf_Half e_machine; // Required architecture for this file (see EM_*)
+ Elf_Word e_version; // Must be equal to 1
+ Elf_Addr e_entry; // Address to jump to in order to start program
+ Elf_Off e_phoff; // Program header table's file offset, in bytes
+ Elf_Off e_shoff; // Section header table's file offset, in bytes
+ Elf_Word e_flags; // Processor-specific flags
+ Elf_Half e_ehsize; // Size of ELF header, in bytes
+ Elf_Half e_phentsize;// Size of an entry in the program header table
+ Elf_Half e_phnum; // Number of entries in the program header table
+ Elf_Half e_shentsize;// Size of an entry in the section header table
+ Elf_Half e_shnum; // Number of entries in the section header table
+ Elf_Half e_shstrndx; // Section header table index of section name
+ // string table
+ bool checkMagic() const {
+ return (memcmp(e_ident, ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
+ }
+ unsigned char getFileClass() const { return e_ident[ELF::EI_CLASS]; }
+ unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; }
+};
+
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Phdr;
+
+template<support::endianness target_endianness>
+struct Elf_Phdr<target_endianness, false> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, false)
+ Elf_Word p_type; // Type of segment
+ Elf_Off p_offset; // FileOffset where segment is located, in bytes
+ Elf_Addr p_vaddr; // Virtual Address of beginning of segment
+ Elf_Addr p_paddr; // Physical address of beginning of segment (OS-specific)
+ Elf_Word p_filesz; // Num. of bytes in file image of segment (may be zero)
+ Elf_Word p_memsz; // Num. of bytes in mem image of segment (may be zero)
+ Elf_Word p_flags; // Segment flags
+ Elf_Word p_align; // Segment alignment constraint
+};
+
+template<support::endianness target_endianness>
+struct Elf_Phdr<target_endianness, true> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, true)
+ Elf_Word p_type; // Type of segment
+ Elf_Word p_flags; // Segment flags
+ Elf_Off p_offset; // FileOffset where segment is located, in bytes
+ Elf_Addr p_vaddr; // Virtual Address of beginning of segment
+ Elf_Addr p_paddr; // Physical address of beginning of segment (OS-specific)
+ Elf_Word p_filesz; // Num. of bytes in file image of segment (may be zero)
+ Elf_Word p_memsz; // Num. of bytes in mem image of segment (may be zero)
+ Elf_Word p_align; // Segment alignment constraint
+};
template<support::endianness target_endianness, bool is64Bits>
class ELFObjectFile : public ObjectFile {
LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits)
+ typedef Elf_Ehdr_Impl<target_endianness, is64Bits> Elf_Ehdr;
typedef Elf_Shdr_Impl<target_endianness, is64Bits> Elf_Shdr;
typedef Elf_Sym_Impl<target_endianness, is64Bits> Elf_Sym;
typedef Elf_Dyn_Impl<target_endianness, is64Bits> Elf_Dyn;
@@ -406,28 +460,6 @@ class ELFObjectFile : public ObjectFile {
typedef content_iterator<DynRef> dyn_iterator;
protected:
- struct Elf_Ehdr {
- unsigned char e_ident[ELF::EI_NIDENT]; // ELF Identification bytes
- Elf_Half e_type; // Type of file (see ET_*)
- Elf_Half e_machine; // Required architecture for this file (see EM_*)
- Elf_Word e_version; // Must be equal to 1
- Elf_Addr e_entry; // Address to jump to in order to start program
- Elf_Off e_phoff; // Program header table's file offset, in bytes
- Elf_Off e_shoff; // Section header table's file offset, in bytes
- Elf_Word e_flags; // Processor-specific flags
- Elf_Half e_ehsize; // Size of ELF header, in bytes
- Elf_Half e_phentsize;// Size of an entry in the program header table
- Elf_Half e_phnum; // Number of entries in the program header table
- Elf_Half e_shentsize;// Size of an entry in the section header table
- Elf_Half e_shnum; // Number of entries in the section header table
- Elf_Half e_shstrndx; // Section header table index of section name
- // string table
- bool checkMagic() const {
- return (memcmp(e_ident, ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
- }
- unsigned char getFileClass() const { return e_ident[ELF::EI_CLASS]; }
- unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; }
- };
// This flag is used for classof, to distinguish ELFObjectFile from
// its subclass. If more subclasses will be created, this flag will
// have to become an enum.
@@ -459,6 +491,59 @@ private:
// This is set the first time getLoadName is called.
mutable const char *dt_soname;
+public:
+ /// \brief Iterate over relocations in a .rel or .rela section.
+ template<class RelocT>
+ class ELFRelocationIterator {
+ public:
+ typedef void difference_type;
+ typedef const RelocT value_type;
+ typedef std::forward_iterator_tag iterator_category;
+ typedef value_type &reference;
+ typedef value_type *pointer;
+
+ /// \brief Default construct iterator.
+ ELFRelocationIterator() : Section(0), Current(0) {}
+ ELFRelocationIterator(const Elf_Shdr *Sec, const char *Start)
+ : Section(Sec)
+ , Current(Start) {}
+
+ reference operator *() {
+ assert(Current && "Attempted to dereference an invalid iterator!");
+ return *reinterpret_cast<const RelocT*>(Current);
+ }
+
+ pointer operator ->() {
+ assert(Current && "Attempted to dereference an invalid iterator!");
+ return reinterpret_cast<const RelocT*>(Current);
+ }
+
+ bool operator ==(const ELFRelocationIterator &Other) {
+ return Section == Other.Section && Current == Other.Current;
+ }
+
+ bool operator !=(const ELFRelocationIterator &Other) {
+ return !(*this == Other);
+ }
+
+ ELFRelocationIterator &operator ++(int) {
+ assert(Current && "Attempted to increment an invalid iterator!");
+ Current += Section->sh_entsize;
+ return *this;
+ }
+
+ ELFRelocationIterator operator ++() {
+ ELFRelocationIterator Tmp = *this;
+ ++*this;
+ return Tmp;
+ }
+
+ private:
+ const Elf_Shdr *Section;
+ const char *Current;
+ };
+
+private:
// Records for each version index the corresponding Verdef or Vernaux entry.
// This is filled the first time LoadVersionMap() is called.
class VersionMapEntry : public PointerIntPair<const void*, 1> {
@@ -535,6 +620,7 @@ protected:
virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const;
virtual error_code getSymbolSection(DataRefImpl Symb,
section_iterator &Res) const;
+ virtual error_code getSymbolValue(DataRefImpl Symb, uint64_t &Val) const;
friend class DynRefImpl<target_endianness, is64Bits>;
virtual error_code getDynNext(DataRefImpl DynData, DynRef &Result) const;
@@ -555,6 +641,7 @@ protected:
bool &Res) const;
virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const;
virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const;
+ virtual error_code isSectionReadOnlyData(DataRefImpl Sec, bool &Res) const;
virtual error_code sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb,
bool &Result) const;
virtual relocation_iterator getSectionRelBegin(DataRefImpl Sec) const;
@@ -594,6 +681,27 @@ public:
virtual dyn_iterator begin_dynamic_table() const;
virtual dyn_iterator end_dynamic_table() const;
+ typedef ELFRelocationIterator<Elf_Rela> Elf_Rela_Iter;
+ typedef ELFRelocationIterator<Elf_Rel> Elf_Rel_Iter;
+
+ virtual Elf_Rela_Iter beginELFRela(const Elf_Shdr *sec) const {
+ return Elf_Rela_Iter(sec, (const char *)(base() + sec->sh_offset));
+ }
+
+ virtual Elf_Rela_Iter endELFRela(const Elf_Shdr *sec) const {
+ return Elf_Rela_Iter(sec, (const char *)
+ (base() + sec->sh_offset + sec->sh_size));
+ }
+
+ virtual Elf_Rel_Iter beginELFRel(const Elf_Shdr *sec) const {
+ return Elf_Rel_Iter(sec, (const char *)(base() + sec->sh_offset));
+ }
+
+ virtual Elf_Rel_Iter endELFRel(const Elf_Shdr *sec) const {
+ return Elf_Rel_Iter(sec, (const char *)
+ (base() + sec->sh_offset + sec->sh_size));
+ }
+
virtual uint8_t getBytesInAddress() const;
virtual StringRef getFileFormatName() const;
virtual StringRef getObjectType() const { return "ELF"; }
@@ -608,6 +716,7 @@ public:
const Elf_Shdr *getSection(const Elf_Sym *symb) const;
const Elf_Shdr *getElfSection(section_iterator &It) const;
const Elf_Sym *getElfSymbol(symbol_iterator &It) const;
+ const Elf_Sym *getElfSymbol(uint32_t index) const;
// Methods for type inquiry through isa, cast, and dyn_cast
bool isDyldType() const { return isDyldELFObject; }
@@ -615,7 +724,6 @@ public:
return v->getType() == getELFType(target_endianness == support::little,
is64Bits);
}
- static inline bool classof(const ELFObjectFile *v) { return true; }
};
// Iterate through the version definitions, and place each Elf_Verdef
@@ -804,6 +912,16 @@ ELFObjectFile<target_endianness, is64Bits>
}
template<support::endianness target_endianness, bool is64Bits>
+const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Sym *
+ELFObjectFile<target_endianness, is64Bits>
+ ::getElfSymbol(uint32_t index) const {
+ DataRefImpl SymbolData;
+ SymbolData.d.a = index;
+ SymbolData.d.b = 1;
+ return getSymbol(SymbolData);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
error_code ELFObjectFile<target_endianness, is64Bits>
::getSymbolFileOffset(DataRefImpl Symb,
uint64_t &Result) const {
@@ -863,7 +981,18 @@ error_code ELFObjectFile<target_endianness, is64Bits>
case ELF::STT_FUNC:
case ELF::STT_OBJECT:
case ELF::STT_NOTYPE:
- Result = symb->st_value + (Section ? Section->sh_addr : 0);
+ bool IsRelocatable;
+ switch(Header->e_type) {
+ case ELF::ET_EXEC:
+ case ELF::ET_DYN:
+ IsRelocatable = false;
+ break;
+ default:
+ IsRelocatable = true;
+ }
+ Result = symb->st_value;
+ if (IsRelocatable && Section != 0)
+ Result += Section->sh_addr;
return object_error::success;
default:
Result = UnknownAddressOrSize;
@@ -1034,6 +1163,16 @@ error_code ELFObjectFile<target_endianness, is64Bits>
template<support::endianness target_endianness, bool is64Bits>
error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolValue(DataRefImpl Symb,
+ uint64_t &Val) const {
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+ Val = symb->st_value;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
::getSectionNext(DataRefImpl Sec, SectionRef &Result) const {
const uint8_t *sec = reinterpret_cast<const uint8_t *>(Sec.p);
sec += Header->e_shentsize;
@@ -1160,7 +1299,8 @@ error_code ELFObjectFile<target_endianness, is64Bits>
}
template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>::isSectionZeroInit(DataRefImpl Sec,
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::isSectionZeroInit(DataRefImpl Sec,
bool &Result) const {
const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
// For ELF, all zero-init sections are virtual (that is, they occupy no space
@@ -1174,6 +1314,18 @@ error_code ELFObjectFile<target_endianness, is64Bits>::isSectionZeroInit(DataRef
template<support::endianness target_endianness, bool is64Bits>
error_code ELFObjectFile<target_endianness, is64Bits>
+ ::isSectionReadOnlyData(DataRefImpl Sec,
+ bool &Result) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ if (sec->sh_flags & ELF::SHF_WRITE || sec->sh_flags & ELF::SHF_EXECINSTR)
+ Result = false;
+ else
+ Result = true;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
::sectionContainsSymbol(DataRefImpl Sec,
DataRefImpl Symb,
bool &Result) const {
@@ -1444,6 +1596,143 @@ error_code ELFObjectFile<target_endianness, is64Bits>
res = "Unknown";
}
break;
+ case ELF::EM_ARM:
+ switch (type) {
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_NONE);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PC24);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ABS32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_REL32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_PC_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ABS16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ABS12);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_ABS5);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ABS8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_SBREL32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_CALL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_PC8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_BREL_ADJ);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_DESC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_SWI8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_XPC25);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_XPC22);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_DTPMOD32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_DTPOFF32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_TPOFF32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_COPY);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GLOB_DAT);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_JUMP_SLOT);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_RELATIVE);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOTOFF32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_BASE_PREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOT_BREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PLT32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_CALL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_JUMP24);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_JUMP24);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_BASE_ABS);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PCREL_7_0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PCREL_15_8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PCREL_23_15);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_SBREL_11_0_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SBREL_19_12_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SBREL_27_20_CK);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TARGET1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_SBREL31);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_V4BX);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TARGET2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PREL31);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVW_ABS_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVT_ABS);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVW_PREL_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVT_PREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVW_ABS_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVT_ABS);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVW_PREL_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVT_PREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_JUMP19);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_JUMP6);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_ALU_PREL_11_0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_PC12);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ABS32_NOI);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_REL32_NOI);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PC_G0_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PC_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PC_G1_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PC_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PC_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_PC_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_PC_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_PC_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_PC_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_PC_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_PC_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_PC_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_PC_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SB_G0_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SB_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SB_G1_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SB_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SB_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_SB_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_SB_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_SB_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_SB_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_SB_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_SB_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_SB_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_SB_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_SB_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVW_BREL_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVT_BREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVW_BREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVW_BREL_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVT_BREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVW_BREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_GOTDESC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_CALL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_DESCSEQ);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_TLS_CALL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PLT32_ABS);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOT_ABS);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOT_PREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOT_BREL12);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOTOFF12);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOTRELAX);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GNU_VTENTRY);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GNU_VTINHERIT);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_JUMP11);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_JUMP8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_GD32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_LDM32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_LDO32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_IE32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_LE32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_LDO12);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_LE12);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_IE12GP);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_3);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_4);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_5);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_6);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_7);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_9);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_10);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_11);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_12);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_13);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_14);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_15);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ME_TOO);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_TLS_DESCSEQ16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_TLS_DESCSEQ32);
+ default:
+ res = "Unknown";
+ }
+ break;
case ELF::EM_HEXAGON:
switch (type) {
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_NONE);
@@ -1574,15 +1863,15 @@ error_code ELFObjectFile<target_endianness, is64Bits>
int64_t addend = 0;
uint16_t symbol_index = 0;
switch (sec->sh_type) {
- default :
+ default:
return object_error::parse_failed;
- case ELF::SHT_REL : {
+ case ELF::SHT_REL: {
type = getRel(Rel)->getType();
symbol_index = getRel(Rel)->getSymbol();
// TODO: Read implicit addend from section data.
break;
}
- case ELF::SHT_RELA : {
+ case ELF::SHT_RELA: {
type = getRela(Rel)->getType();
symbol_index = getRela(Rel)->getSymbol();
addend = getRela(Rel)->r_addend;
@@ -1596,9 +1885,8 @@ error_code ELFObjectFile<target_endianness, is64Bits>
switch (Header->e_machine) {
case ELF::EM_X86_64:
switch (type) {
- case ELF::R_X86_64_32S:
- res = symname;
- break;
+ case ELF::R_X86_64_PC8:
+ case ELF::R_X86_64_PC16:
case ELF::R_X86_64_PC32: {
std::string fmtbuf;
raw_string_ostream fmt(fmtbuf);
@@ -1607,10 +1895,23 @@ error_code ELFObjectFile<target_endianness, is64Bits>
Result.append(fmtbuf.begin(), fmtbuf.end());
}
break;
+ case ELF::R_X86_64_8:
+ case ELF::R_X86_64_16:
+ case ELF::R_X86_64_32:
+ case ELF::R_X86_64_32S:
+ case ELF::R_X86_64_64: {
+ std::string fmtbuf;
+ raw_string_ostream fmt(fmtbuf);
+ fmt << symname << (addend < 0 ? "" : "+") << addend;
+ fmt.flush();
+ Result.append(fmtbuf.begin(), fmtbuf.end());
+ }
+ break;
default:
res = "Unknown";
}
break;
+ case ELF::EM_ARM:
case ELF::EM_HEXAGON:
res = symname;
break;
@@ -2024,6 +2325,8 @@ StringRef ELFObjectFile<target_endianness, is64Bits>
return "ELF64-i386";
case ELF::EM_X86_64:
return "ELF64-x86-64";
+ case ELF::EM_PPC64:
+ return "ELF64-ppc64";
default:
return "ELF64-unknown";
}
@@ -2044,6 +2347,11 @@ unsigned ELFObjectFile<target_endianness, is64Bits>::getArch() const {
return Triple::arm;
case ELF::EM_HEXAGON:
return Triple::hexagon;
+ case ELF::EM_MIPS:
+ return (target_endianness == support::little) ?
+ Triple::mipsel : Triple::mips;
+ case ELF::EM_PPC64:
+ return Triple::ppc64;
default:
return Triple::UnknownArch;
}
diff --git a/contrib/llvm/include/llvm/Object/MachO.h b/contrib/llvm/include/llvm/Object/MachO.h
index 0b73f94..4e03daa 100644
--- a/contrib/llvm/include/llvm/Object/MachO.h
+++ b/contrib/llvm/include/llvm/Object/MachO.h
@@ -49,7 +49,6 @@ public:
static inline bool classof(const Binary *v) {
return v->isMachO();
}
- static inline bool classof(const MachOObjectFile *v) { return true; }
protected:
virtual error_code getSymbolNext(DataRefImpl Symb, SymbolRef &Res) const;
@@ -62,6 +61,7 @@ protected:
virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const;
virtual error_code getSymbolSection(DataRefImpl Symb,
section_iterator &Res) const;
+ virtual error_code getSymbolValue(DataRefImpl Symb, uint64_t &Val) const;
virtual error_code getSectionNext(DataRefImpl Sec, SectionRef &Res) const;
virtual error_code getSectionName(DataRefImpl Sec, StringRef &Res) const;
@@ -76,6 +76,7 @@ protected:
bool &Res) const;
virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const;
virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const;
+ virtual error_code isSectionReadOnlyData(DataRefImpl Sec, bool &Res) const;
virtual error_code sectionContainsSymbol(DataRefImpl DRI, DataRefImpl S,
bool &Result) const;
virtual relocation_iterator getSectionRelBegin(DataRefImpl Sec) const;
diff --git a/contrib/llvm/include/llvm/Object/MachOFormat.h b/contrib/llvm/include/llvm/Object/MachOFormat.h
index f30d431..c0f700d 100644
--- a/contrib/llvm/include/llvm/Object/MachOFormat.h
+++ b/contrib/llvm/include/llvm/Object/MachOFormat.h
@@ -61,7 +61,10 @@ namespace mach {
CSARM_V6 = 6,
CSARM_V5TEJ = 7,
CSARM_XSCALE = 8,
- CSARM_V7 = 9
+ CSARM_V7 = 9,
+ CSARM_V7F = 10,
+ CSARM_V7S = 11,
+ CSARM_V7K = 12
};
/// \brief PowerPC Machine Subtypes.
@@ -273,6 +276,10 @@ namespace macho {
uint16_t Flags;
uint32_t Value;
};
+ // Despite containing a uint64_t, this structure is only 4-byte aligned within
+ // a MachO file.
+#pragma pack(push)
+#pragma pack(4)
struct Symbol64TableEntry {
uint32_t StringIndex;
uint8_t Type;
@@ -280,6 +287,7 @@ namespace macho {
uint16_t Flags;
uint64_t Value;
};
+#pragma pack(pop)
/// @}
/// @name Data-in-code Table Entry
diff --git a/contrib/llvm/include/llvm/Object/ObjectFile.h b/contrib/llvm/include/llvm/Object/ObjectFile.h
index 2ec656b..1a3120a 100644
--- a/contrib/llvm/include/llvm/Object/ObjectFile.h
+++ b/contrib/llvm/include/llvm/Object/ObjectFile.h
@@ -76,13 +76,13 @@ public:
}
};
-inline bool operator ==(const DataRefImpl &a, const DataRefImpl &b) {
+inline bool operator==(const DataRefImpl &a, const DataRefImpl &b) {
// Check bitwise identical. This is the only legal way to compare a union w/o
// knowing which member is in use.
return std::memcmp(&a, &b, sizeof(DataRefImpl)) == 0;
}
-inline bool operator <(const DataRefImpl &a, const DataRefImpl &b) {
+inline bool operator<(const DataRefImpl &a, const DataRefImpl &b) {
// Check bitwise identical. This is the only legal way to compare a union w/o
// knowing which member is in use.
return std::memcmp(&a, &b, sizeof(DataRefImpl)) < 0;
@@ -144,7 +144,7 @@ public:
SectionRef(DataRefImpl SectionP, const ObjectFile *Owner);
bool operator==(const SectionRef &Other) const;
- bool operator <(const SectionRef &Other) const;
+ bool operator<(const SectionRef &Other) const;
error_code getNext(SectionRef &Result) const;
@@ -163,6 +163,7 @@ public:
error_code isRequiredForExecution(bool &Result) const;
error_code isVirtual(bool &Result) const;
error_code isZeroInit(bool &Result) const;
+ error_code isReadOnlyData(bool &Result) const;
error_code containsSymbol(SymbolRef S, bool &Result) const;
@@ -207,11 +208,13 @@ public:
SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner);
bool operator==(const SymbolRef &Other) const;
- bool operator <(const SymbolRef &Other) const;
+ bool operator<(const SymbolRef &Other) const;
error_code getNext(SymbolRef &Result) const;
error_code getName(StringRef &Result) const;
+ /// Returns the symbol virtual address (i.e. address at which it will be
+ /// mapped).
error_code getAddress(uint64_t &Result) const;
error_code getFileOffset(uint64_t &Result) const;
error_code getSize(uint64_t &Result) const;
@@ -231,6 +234,9 @@ public:
/// end_sections() if it is undefined or is an absolute symbol.
error_code getSection(section_iterator &Result) const;
+ /// @brief Get value of the symbol in the symbol table.
+ error_code getValue(uint64_t &Val) const;
+
DataRefImpl getRawDataRefImpl() const;
};
typedef content_iterator<SymbolRef> symbol_iterator;
@@ -248,7 +254,7 @@ public:
LibraryRef(DataRefImpl LibraryP, const ObjectFile *Owner);
bool operator==(const LibraryRef &Other) const;
- bool operator <(const LibraryRef &Other) const;
+ bool operator<(const LibraryRef &Other) const;
error_code getNext(LibraryRef &Result) const;
@@ -263,11 +269,11 @@ const uint64_t UnknownAddressOrSize = ~0ULL;
/// ObjectFile - This class is the base class for all object file types.
/// Concrete instances of this object are created by createObjectFile, which
-/// figure out which type to create.
+/// figures out which type to create.
class ObjectFile : public Binary {
virtual void anchor();
- ObjectFile(); // = delete
- ObjectFile(const ObjectFile &other); // = delete
+ ObjectFile() LLVM_DELETED_FUNCTION;
+ ObjectFile(const ObjectFile &other) LLVM_DELETED_FUNCTION;
protected:
ObjectFile(unsigned int Type, MemoryBuffer *source, error_code &ec);
@@ -287,8 +293,8 @@ protected:
friend class SymbolRef;
virtual error_code getSymbolNext(DataRefImpl Symb, SymbolRef &Res) const = 0;
virtual error_code getSymbolName(DataRefImpl Symb, StringRef &Res) const = 0;
- virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const =0;
- virtual error_code getSymbolFileOffset(DataRefImpl Symb, uint64_t &Res) const =0;
+ virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const = 0;
+ virtual error_code getSymbolFileOffset(DataRefImpl Symb, uint64_t &Res)const=0;
virtual error_code getSymbolSize(DataRefImpl Symb, uint64_t &Res) const = 0;
virtual error_code getSymbolType(DataRefImpl Symb,
SymbolRef::Type &Res) const = 0;
@@ -297,6 +303,7 @@ protected:
uint32_t &Res) const = 0;
virtual error_code getSymbolSection(DataRefImpl Symb,
section_iterator &Res) const = 0;
+ virtual error_code getSymbolValue(DataRefImpl Symb, uint64_t &Val) const = 0;
// Same as above for SectionRef.
friend class SectionRef;
@@ -314,6 +321,7 @@ protected:
// A section is 'virtual' if its contents aren't present in the object image.
virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const = 0;
virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const = 0;
+ virtual error_code isSectionReadOnlyData(DataRefImpl Sec, bool &Res) const =0;
virtual error_code sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb,
bool &Result) const = 0;
virtual relocation_iterator getSectionRelBegin(DataRefImpl Sec) const = 0;
@@ -384,7 +392,6 @@ public:
static inline bool classof(const Binary *v) {
return v->isObject();
}
- static inline bool classof(const ObjectFile *v) { return true; }
public:
static ObjectFile *createCOFFObjectFile(MemoryBuffer *Object);
@@ -401,7 +408,7 @@ inline bool SymbolRef::operator==(const SymbolRef &Other) const {
return SymbolPimpl == Other.SymbolPimpl;
}
-inline bool SymbolRef::operator <(const SymbolRef &Other) const {
+inline bool SymbolRef::operator<(const SymbolRef &Other) const {
return SymbolPimpl < Other.SymbolPimpl;
}
@@ -441,6 +448,10 @@ inline error_code SymbolRef::getType(SymbolRef::Type &Result) const {
return OwningObject->getSymbolType(SymbolPimpl, Result);
}
+inline error_code SymbolRef::getValue(uint64_t &Val) const {
+ return OwningObject->getSymbolValue(SymbolPimpl, Val);
+}
+
inline DataRefImpl SymbolRef::getRawDataRefImpl() const {
return SymbolPimpl;
}
@@ -456,7 +467,7 @@ inline bool SectionRef::operator==(const SectionRef &Other) const {
return SectionPimpl == Other.SectionPimpl;
}
-inline bool SectionRef::operator <(const SectionRef &Other) const {
+inline bool SectionRef::operator<(const SectionRef &Other) const {
return SectionPimpl < Other.SectionPimpl;
}
@@ -508,6 +519,10 @@ inline error_code SectionRef::isZeroInit(bool &Result) const {
return OwningObject->isSectionZeroInit(SectionPimpl, Result);
}
+inline error_code SectionRef::isReadOnlyData(bool &Result) const {
+ return OwningObject->isSectionReadOnlyData(SectionPimpl, Result);
+}
+
inline error_code SectionRef::containsSymbol(SymbolRef S, bool &Result) const {
return OwningObject->sectionContainsSymbol(SectionPimpl, S.SymbolPimpl,
Result);
@@ -586,7 +601,7 @@ inline bool LibraryRef::operator==(const LibraryRef &Other) const {
return LibraryPimpl == Other.LibraryPimpl;
}
-inline bool LibraryRef::operator <(const LibraryRef &Other) const {
+inline bool LibraryRef::operator<(const LibraryRef &Other) const {
return LibraryPimpl < Other.LibraryPimpl;
}
diff --git a/contrib/llvm/include/llvm/Object/RelocVisitor.h b/contrib/llvm/include/llvm/Object/RelocVisitor.h
new file mode 100644
index 0000000..7668bde
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/RelocVisitor.h
@@ -0,0 +1,131 @@
+//===-- RelocVisitor.h - Visitor for object file relocations -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a wrapper around all the different types of relocations
+// in different file formats, such that a client can handle them in a unified
+// manner by only implementing a minimal number of functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LLVM_OBJECT_RELOCVISITOR
+#define _LLVM_OBJECT_RELOCVISITOR
+
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+namespace object {
+
+struct RelocToApply {
+ // The computed value after applying the relevant relocations.
+ int64_t Value;
+
+ // The width of the value; how many bytes to touch when applying the
+ // relocation.
+ char Width;
+ RelocToApply(const RelocToApply &In) : Value(In.Value), Width(In.Width) {}
+ RelocToApply(int64_t Value, char Width) : Value(Value), Width(Width) {}
+ RelocToApply() : Value(0), Width(0) {}
+};
+
+/// @brief Base class for object file relocation visitors.
+class RelocVisitor {
+public:
+ explicit RelocVisitor(llvm::StringRef FileFormat)
+ : FileFormat(FileFormat), HasError(false) {}
+
+ // TODO: Should handle multiple applied relocations via either passing in the
+ // previously computed value or just count paired relocations as a single
+ // visit.
+ RelocToApply visit(uint32_t RelocType, RelocationRef R, uint64_t SecAddr = 0,
+ uint64_t Value = 0) {
+ if (FileFormat == "ELF64-x86-64") {
+ switch (RelocType) {
+ case llvm::ELF::R_X86_64_NONE:
+ return visitELF_X86_64_NONE(R);
+ case llvm::ELF::R_X86_64_64:
+ return visitELF_X86_64_64(R, Value);
+ case llvm::ELF::R_X86_64_PC32:
+ return visitELF_X86_64_PC32(R, Value, SecAddr);
+ case llvm::ELF::R_X86_64_32:
+ return visitELF_X86_64_32(R, Value);
+ case llvm::ELF::R_X86_64_32S:
+ return visitELF_X86_64_32S(R, Value);
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
+ }
+ return RelocToApply();
+ }
+
+ bool error() { return HasError; }
+
+private:
+ llvm::StringRef FileFormat;
+ bool HasError;
+
+ /// Operations
+
+ // Width is the width in bytes of the extend.
+ RelocToApply zeroExtend(RelocToApply r, char Width) {
+ if (Width == r.Width)
+ return r;
+ r.Value &= (1 << ((Width * 8))) - 1;
+ return r;
+ }
+ RelocToApply signExtend(RelocToApply r, char Width) {
+ if (Width == r.Width)
+ return r;
+ bool SignBit = r.Value & (1 << ((Width * 8) - 1));
+ if (SignBit) {
+ r.Value |= ~((1 << (Width * 8)) - 1);
+ } else {
+ r.Value &= (1 << (Width * 8)) - 1;
+ }
+ return r;
+ }
+
+ /// X86-64 ELF
+ RelocToApply visitELF_X86_64_NONE(RelocationRef R) {
+ return RelocToApply(0, 0);
+ }
+ RelocToApply visitELF_X86_64_64(RelocationRef R, uint64_t Value) {
+ int64_t Addend;
+ R.getAdditionalInfo(Addend);
+ return RelocToApply(Value + Addend, 8);
+ }
+ RelocToApply visitELF_X86_64_PC32(RelocationRef R, uint64_t Value,
+ uint64_t SecAddr) {
+ int64_t Addend;
+ R.getAdditionalInfo(Addend);
+ uint64_t Address;
+ R.getAddress(Address);
+ return RelocToApply(Value + Addend - Address, 4);
+ }
+ RelocToApply visitELF_X86_64_32(RelocationRef R, uint64_t Value) {
+ int64_t Addend;
+ R.getAdditionalInfo(Addend);
+ uint32_t Res = (Value + Addend) & 0xFFFFFFFF;
+ return RelocToApply(Res, 4);
+ }
+ RelocToApply visitELF_X86_64_32S(RelocationRef R, uint64_t Value) {
+ int64_t Addend;
+ R.getAdditionalInfo(Addend);
+ int32_t Res = (Value + Addend) & 0xFFFFFFFF;
+ return RelocToApply(Res, 4);
+ }
+};
+
+}
+}
+#endif
diff --git a/contrib/llvm/include/llvm/Operator.h b/contrib/llvm/include/llvm/Operator.h
index 1e86980..b326c11 100644
--- a/contrib/llvm/include/llvm/Operator.h
+++ b/contrib/llvm/include/llvm/Operator.h
@@ -16,6 +16,7 @@
#define LLVM_OPERATOR_H
#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/Instruction.h"
#include "llvm/Type.h"
@@ -32,9 +33,14 @@ class Operator : public User {
private:
// Do not implement any of these. The Operator class is intended to be used
// as a utility, and is never itself instantiated.
- void *operator new(size_t, unsigned);
- void *operator new(size_t s);
- Operator();
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
+ void *operator new(size_t s) LLVM_DELETED_FUNCTION;
+ Operator() LLVM_DELETED_FUNCTION;
+
+protected:
+ // NOTE: Cannot use LLVM_DELETED_FUNCTION because it's not legal to delete
+ // an overridden method that's not deleted in the base class. Cannot leave
+ // this unimplemented because that leads to an ODR-violation.
~Operator();
public:
@@ -57,7 +63,6 @@ public:
return Instruction::UserOp1;
}
- static inline bool classof(const Operator *) { return true; }
static inline bool classof(const Instruction *) { return true; }
static inline bool classof(const ConstantExpr *) { return true; }
static inline bool classof(const Value *V) {
@@ -77,8 +82,6 @@ public:
};
private:
- ~OverflowingBinaryOperator(); // do not implement
-
friend class BinaryOperator;
friend class ConstantExpr;
void setHasNoUnsignedWrap(bool B) {
@@ -103,7 +106,6 @@ public:
return (SubclassOptionalData & NoSignedWrap) != 0;
}
- static inline bool classof(const OverflowingBinaryOperator *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Add ||
I->getOpcode() == Instruction::Sub ||
@@ -131,8 +133,6 @@ public:
};
private:
- ~PossiblyExactOperator(); // do not implement
-
friend class BinaryOperator;
friend class ConstantExpr;
void setIsExact(bool B) {
@@ -167,9 +167,6 @@ public:
/// FPMathOperator - Utility class for floating point operations which can have
/// information about relaxed accuracy requirements attached to them.
class FPMathOperator : public Operator {
-private:
- ~FPMathOperator(); // do not implement
-
public:
/// \brief Get the maximum error permitted by this operation in ULPs. An
@@ -177,7 +174,6 @@ public:
/// default precision.
float getFPAccuracy() const;
- static inline bool classof(const FPMathOperator *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getType()->isFPOrFPVectorTy();
}
@@ -191,11 +187,7 @@ public:
/// opcodes.
template<typename SuperClass, unsigned Opc>
class ConcreteOperator : public SuperClass {
- ~ConcreteOperator(); // DO NOT IMPLEMENT
public:
- static inline bool classof(const ConcreteOperator<SuperClass, Opc> *) {
- return true;
- }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Opc;
}
@@ -210,45 +202,35 @@ public:
class AddOperator
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Add> {
- ~AddOperator(); // DO NOT IMPLEMENT
};
class SubOperator
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Sub> {
- ~SubOperator(); // DO NOT IMPLEMENT
};
class MulOperator
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Mul> {
- ~MulOperator(); // DO NOT IMPLEMENT
};
class ShlOperator
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Shl> {
- ~ShlOperator(); // DO NOT IMPLEMENT
};
-
+
class SDivOperator
: public ConcreteOperator<PossiblyExactOperator, Instruction::SDiv> {
- ~SDivOperator(); // DO NOT IMPLEMENT
};
class UDivOperator
: public ConcreteOperator<PossiblyExactOperator, Instruction::UDiv> {
- ~UDivOperator(); // DO NOT IMPLEMENT
};
class AShrOperator
: public ConcreteOperator<PossiblyExactOperator, Instruction::AShr> {
- ~AShrOperator(); // DO NOT IMPLEMENT
};
class LShrOperator
: public ConcreteOperator<PossiblyExactOperator, Instruction::LShr> {
- ~LShrOperator(); // DO NOT IMPLEMENT
};
-
-
-
+
+
+
class GEPOperator
: public ConcreteOperator<Operator, Instruction::GetElementPtr> {
- ~GEPOperator(); // DO NOT IMPLEMENT
-
enum {
IsInBounds = (1 << 0)
};
@@ -288,6 +270,12 @@ public:
return getPointerOperand()->getType();
}
+ /// getPointerAddressSpace - Method to return the address space of the
+ /// pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return cast<PointerType>(getPointerOperandType())->getAddressSpace();
+ }
+
unsigned getNumIndices() const { // Note: always non-negative
return getNumOperands() - 1;
}
diff --git a/contrib/llvm/include/llvm/Pass.h b/contrib/llvm/include/llvm/Pass.h
index 888537d..cd651db 100644
--- a/contrib/llvm/include/llvm/Pass.h
+++ b/contrib/llvm/include/llvm/Pass.h
@@ -29,6 +29,7 @@
#ifndef LLVM_PASS_H
#define LLVM_PASS_H
+#include "llvm/Support/Compiler.h"
#include <string>
namespace llvm {
@@ -82,8 +83,8 @@ class Pass {
AnalysisResolver *Resolver; // Used to resolve analysis
const void *PassID;
PassKind Kind;
- void operator=(const Pass&); // DO NOT IMPLEMENT
- Pass(const Pass &); // DO NOT IMPLEMENT
+ void operator=(const Pass&) LLVM_DELETED_FUNCTION;
+ Pass(const Pass &) LLVM_DELETED_FUNCTION;
public:
explicit Pass(PassKind K, char &pid) : Resolver(0), PassID(&pid), Kind(K) { }
diff --git a/contrib/llvm/include/llvm/PassAnalysisSupport.h b/contrib/llvm/include/llvm/PassAnalysisSupport.h
index 5c6a2d7..d14d73b 100644
--- a/contrib/llvm/include/llvm/PassAnalysisSupport.h
+++ b/contrib/llvm/include/llvm/PassAnalysisSupport.h
@@ -120,7 +120,7 @@ public:
class PMDataManager;
class AnalysisResolver {
private:
- AnalysisResolver(); // DO NOT IMPLEMENT
+ AnalysisResolver() LLVM_DELETED_FUNCTION;
public:
explicit AnalysisResolver(PMDataManager &P) : PM(P) { }
diff --git a/contrib/llvm/include/llvm/PassSupport.h b/contrib/llvm/include/llvm/PassSupport.h
index c50c2cc..c6ad44f 100644
--- a/contrib/llvm/include/llvm/PassSupport.h
+++ b/contrib/llvm/include/llvm/PassSupport.h
@@ -126,8 +126,8 @@ public:
}
private:
- void operator=(const PassInfo &); // do not implement
- PassInfo(const PassInfo &); // do not implement
+ void operator=(const PassInfo &) LLVM_DELETED_FUNCTION;
+ PassInfo(const PassInfo &) LLVM_DELETED_FUNCTION;
};
#define CALL_ONCE_INITIALIZATION(function) \
diff --git a/contrib/llvm/include/llvm/Support/AlignOf.h b/contrib/llvm/include/llvm/Support/AlignOf.h
index cf71251..d6b0ab8 100644
--- a/contrib/llvm/include/llvm/Support/AlignOf.h
+++ b/contrib/llvm/include/llvm/Support/AlignOf.h
@@ -68,24 +68,20 @@ inline unsigned alignOf() { return AlignOf<T>::Alignment; }
/// integer literal can be used to specify an alignment constraint. Once built
/// up here, we can then begin to indirect between these using normal C++
/// template parameters.
-template <size_t Alignment> struct AlignedCharArrayImpl {};
-template <> struct AlignedCharArrayImpl<0> {
- typedef char type;
-};
+template <size_t Alignment> struct AlignedCharArrayImpl;
+
+// MSVC requires special handling here.
+#ifndef _MSC_VER
+
#if __has_feature(cxx_alignas)
#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
template <> struct AlignedCharArrayImpl<x> { \
- typedef char alignas(x) type; \
+ char alignas(x) aligned; \
}
-#elif defined(__clang__) || defined(__GNUC__)
+#elif defined(__GNUC__) || defined(__IBM_ATTRIBUTES)
#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
template <> struct AlignedCharArrayImpl<x> { \
- typedef char type __attribute__((aligned(x))); \
- }
-#elif defined(_MSC_VER)
-#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
- template <> struct AlignedCharArrayImpl<x> { \
- typedef __declspec(align(x)) char type; \
+ char aligned __attribute__((aligned(x))); \
}
#else
# error No supported align as directive.
@@ -104,9 +100,38 @@ LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1024);
LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2048);
LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4096);
LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8192);
+
+#undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
+
+#else // _MSC_VER
+
+// We provide special variations of this template for the most common
+// alignments because __declspec(align(...)) doesn't actually work when it is
+// a member of a by-value function argument in MSVC, even if the alignment
+// request is something reasonably like 8-byte or 16-byte.
+template <> struct AlignedCharArrayImpl<1> { char aligned; };
+template <> struct AlignedCharArrayImpl<2> { short aligned; };
+template <> struct AlignedCharArrayImpl<4> { int aligned; };
+template <> struct AlignedCharArrayImpl<8> { double aligned; };
+
+#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
+ template <> struct AlignedCharArrayImpl<x> { \
+ __declspec(align(x)) char aligned; \
+ }
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(512);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1024);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2048);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4096);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8192);
// Any larger and MSVC complains.
#undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
+#endif // _MSC_VER
+
/// \brief This union template exposes a suitably aligned and sized character
/// array member which can hold elements of any of up to four types.
///
@@ -134,17 +159,11 @@ public:
/// constrain the layout of this character array.
char buffer[sizeof(SizerImpl)];
- // Sadly, Clang and GCC both fail to align a character array properly even
- // with an explicit alignment attribute. To work around this, we union
- // the character array that will actually be used with a struct that contains
- // a single aligned character member. Tests seem to indicate that both Clang
- // and GCC will properly register the alignment of a struct containing an
- // aligned member, and this alignment should carry over to the character
- // array in the union.
- struct {
- typename llvm::AlignedCharArrayImpl<AlignOf<AlignerImpl>::Alignment>::type
- nonce_inner_member;
- } nonce_member;
+private:
+ // Tests seem to indicate that both Clang and GCC will properly register the
+ // alignment of a struct containing an aligned member, and this alignment
+ // should carry over to the character array in the union.
+ llvm::AlignedCharArrayImpl<AlignOf<AlignerImpl>::Alignment> nonce_member;
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Support/Allocator.h b/contrib/llvm/include/llvm/Support/Allocator.h
index a2ad24f..a644b13 100644
--- a/contrib/llvm/include/llvm/Support/Allocator.h
+++ b/contrib/llvm/include/llvm/Support/Allocator.h
@@ -79,8 +79,8 @@ class MallocSlabAllocator : public SlabAllocator {
public:
MallocSlabAllocator() : Allocator() { }
virtual ~MallocSlabAllocator();
- virtual MemSlab *Allocate(size_t Size);
- virtual void Deallocate(MemSlab *Slab);
+ virtual MemSlab *Allocate(size_t Size) LLVM_OVERRIDE;
+ virtual void Deallocate(MemSlab *Slab) LLVM_OVERRIDE;
};
/// BumpPtrAllocator - This allocator is useful for containers that need
@@ -88,8 +88,8 @@ public:
/// allocating memory, and never deletes it until the entire block is dead. This
/// makes allocation speedy, but must only be used when the trade-off is ok.
class BumpPtrAllocator {
- BumpPtrAllocator(const BumpPtrAllocator &); // do not implement
- void operator=(const BumpPtrAllocator &); // do not implement
+ BumpPtrAllocator(const BumpPtrAllocator &) LLVM_DELETED_FUNCTION;
+ void operator=(const BumpPtrAllocator &) LLVM_DELETED_FUNCTION;
/// SlabSize - Allocate data into slabs of this size unless we get an
/// allocation above SizeThreshold.
diff --git a/contrib/llvm/include/llvm/Support/CallSite.h b/contrib/llvm/include/llvm/Support/CallSite.h
index c23bb6a..ad8d6d4 100644
--- a/contrib/llvm/include/llvm/Support/CallSite.h
+++ b/contrib/llvm/include/llvm/Support/CallSite.h
@@ -81,7 +81,7 @@ public:
InstrTy *operator->() const { return I.getPointer(); }
operator bool() const { return I.getPointer(); }
- /// getCalledValue - Return the pointer to function that is being called...
+ /// getCalledValue - Return the pointer to function that is being called.
///
ValTy *getCalledValue() const {
assert(getInstruction() && "Not a call or invoke instruction!");
@@ -95,7 +95,7 @@ public:
return dyn_cast<FunTy>(getCalledValue());
}
- /// setCalledFunction - Set the callee to the specified value...
+ /// setCalledFunction - Set the callee to the specified value.
///
void setCalledFunction(Value *V) {
assert(getInstruction() && "Not a call or invoke instruction!");
@@ -130,7 +130,7 @@ public:
}
/// arg_iterator - The type of iterator to use when looping over actual
- /// arguments at this call site...
+ /// arguments at this call site.
typedef IterTy arg_iterator;
/// arg_begin/arg_end - Return iterators corresponding to the actual argument
@@ -185,13 +185,13 @@ public:
}
/// \brief Return true if this function has the given attribute.
- bool hasFnAttr(Attributes N) const {
- CALLSITE_DELEGATE_GETTER(hasFnAttr(N));
+ bool hasFnAttr(Attributes::AttrVal A) const {
+ CALLSITE_DELEGATE_GETTER(hasFnAttr(A));
}
- /// paramHasAttr - whether the call or the callee has the given attribute.
- bool paramHasAttr(uint16_t i, Attributes attr) const {
- CALLSITE_DELEGATE_GETTER(paramHasAttr(i, attr));
+ /// \brief Return true if the call or the callee has the given attribute.
+ bool paramHasAttr(unsigned i, Attributes::AttrVal A) const {
+ CALLSITE_DELEGATE_GETTER(paramHasAttr(i, A));
}
/// @brief Extract the alignment for a call or parameter (0=unknown).
@@ -211,32 +211,32 @@ public:
bool doesNotAccessMemory() const {
CALLSITE_DELEGATE_GETTER(doesNotAccessMemory());
}
- void setDoesNotAccessMemory(bool doesNotAccessMemory = true) {
- CALLSITE_DELEGATE_SETTER(setDoesNotAccessMemory(doesNotAccessMemory));
+ void setDoesNotAccessMemory() {
+ CALLSITE_DELEGATE_SETTER(setDoesNotAccessMemory());
}
/// @brief Determine if the call does not access or only reads memory.
bool onlyReadsMemory() const {
CALLSITE_DELEGATE_GETTER(onlyReadsMemory());
}
- void setOnlyReadsMemory(bool onlyReadsMemory = true) {
- CALLSITE_DELEGATE_SETTER(setOnlyReadsMemory(onlyReadsMemory));
+ void setOnlyReadsMemory() {
+ CALLSITE_DELEGATE_SETTER(setOnlyReadsMemory());
}
/// @brief Determine if the call cannot return.
bool doesNotReturn() const {
CALLSITE_DELEGATE_GETTER(doesNotReturn());
}
- void setDoesNotReturn(bool doesNotReturn = true) {
- CALLSITE_DELEGATE_SETTER(setDoesNotReturn(doesNotReturn));
+ void setDoesNotReturn() {
+ CALLSITE_DELEGATE_SETTER(setDoesNotReturn());
}
/// @brief Determine if the call cannot unwind.
bool doesNotThrow() const {
CALLSITE_DELEGATE_GETTER(doesNotThrow());
}
- void setDoesNotThrow(bool doesNotThrow = true) {
- CALLSITE_DELEGATE_SETTER(setDoesNotThrow(doesNotThrow));
+ void setDoesNotThrow() {
+ CALLSITE_DELEGATE_SETTER(setDoesNotThrow());
}
#undef CALLSITE_DELEGATE_GETTER
@@ -244,12 +244,12 @@ public:
/// @brief Determine whether this argument is not captured.
bool doesNotCapture(unsigned ArgNo) const {
- return paramHasAttr(ArgNo + 1, Attribute::NoCapture);
+ return paramHasAttr(ArgNo + 1, Attributes::NoCapture);
}
/// @brief Determine whether this argument is passed by value.
bool isByValArgument(unsigned ArgNo) const {
- return paramHasAttr(ArgNo + 1, Attribute::ByVal);
+ return paramHasAttr(ArgNo + 1, Attributes::ByVal);
}
/// hasArgument - Returns true if this CallSite passes the given Value* as an
diff --git a/contrib/llvm/include/llvm/Support/Casting.h b/contrib/llvm/include/llvm/Support/Casting.h
index 3aab436..0c71882 100644
--- a/contrib/llvm/include/llvm/Support/Casting.h
+++ b/contrib/llvm/include/llvm/Support/Casting.h
@@ -15,6 +15,7 @@
#ifndef LLVM_SUPPORT_CASTING_H
#define LLVM_SUPPORT_CASTING_H
+#include "llvm/Support/type_traits.h"
#include <cassert>
namespace llvm {
@@ -44,13 +45,23 @@ template<typename From> struct simplify_type<const From> {
// The core of the implementation of isa<X> is here; To and From should be
// the names of classes. This template can be specialized to customize the
// implementation of isa<> without rewriting it from scratch.
-template <typename To, typename From>
+template <typename To, typename From, typename Enabler = void>
struct isa_impl {
static inline bool doit(const From &Val) {
return To::classof(&Val);
}
};
+/// \brief Always allow upcasts, and perform no dynamic check for them.
+template <typename To, typename From>
+struct isa_impl<To, From,
+ typename llvm::enable_if_c<
+ llvm::is_base_of<To, From>::value
+ >::type
+ > {
+ static inline bool doit(const From &) { return true; }
+};
+
template <typename To, typename From> struct isa_impl_cl {
static inline bool doit(const From &Val) {
return isa_impl<To, From>::doit(Val);
@@ -65,18 +76,21 @@ template <typename To, typename From> struct isa_impl_cl<To, const From> {
template <typename To, typename From> struct isa_impl_cl<To, From*> {
static inline bool doit(const From *Val) {
+ assert(Val && "isa<> used on a null pointer");
return isa_impl<To, From>::doit(*Val);
}
};
template <typename To, typename From> struct isa_impl_cl<To, const From*> {
static inline bool doit(const From *Val) {
+ assert(Val && "isa<> used on a null pointer");
return isa_impl<To, From>::doit(*Val);
}
};
template <typename To, typename From> struct isa_impl_cl<To, const From*const> {
static inline bool doit(const From *Val) {
+ assert(Val && "isa<> used on a null pointer");
return isa_impl<To, From>::doit(*Val);
}
};
diff --git a/contrib/llvm/include/llvm/Support/CommandLine.h b/contrib/llvm/include/llvm/Support/CommandLine.h
index ae1570d..872c579 100644
--- a/contrib/llvm/include/llvm/Support/CommandLine.h
+++ b/contrib/llvm/include/llvm/Support/CommandLine.h
@@ -41,16 +41,14 @@ namespace cl {
// ParseCommandLineOptions - Command line option processing entry point.
//
void ParseCommandLineOptions(int argc, const char * const *argv,
- const char *Overview = 0,
- bool ReadResponseFiles = false);
+ const char *Overview = 0);
//===----------------------------------------------------------------------===//
// ParseEnvironmentOptions - Environment variable option processing alternate
// entry point.
//
void ParseEnvironmentOptions(const char *progName, const char *envvar,
- const char *Overview = 0,
- bool ReadResponseFiles = false);
+ const char *Overview = 0);
///===---------------------------------------------------------------------===//
/// SetVersionPrinter - Override the default (LLVM specific) version printer
@@ -1509,7 +1507,7 @@ class bits : public Option, public bits_storage<DataType, Storage> {
typename ParserClass::parser_data_type();
if (Parser.parse(*this, ArgName, Arg, Val))
return true; // Parse Error!
- addValue(Val);
+ this->addValue(Val);
setPosition(pos);
Positions.push_back(pos);
return false;
@@ -1608,15 +1606,16 @@ public:
class alias : public Option {
Option *AliasFor;
virtual bool handleOccurrence(unsigned pos, StringRef /*ArgName*/,
- StringRef Arg) {
+ StringRef Arg) LLVM_OVERRIDE {
return AliasFor->handleOccurrence(pos, AliasFor->ArgStr, Arg);
}
// Handle printing stuff...
- virtual size_t getOptionWidth() const;
- virtual void printOptionInfo(size_t GlobalWidth) const;
+ virtual size_t getOptionWidth() const LLVM_OVERRIDE;
+ virtual void printOptionInfo(size_t GlobalWidth) const LLVM_OVERRIDE;
// Aliases do not need to print their values.
- virtual void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const {}
+ virtual void printOptionValue(size_t /*GlobalWidth*/,
+ bool /*Force*/) const LLVM_OVERRIDE {}
void done() {
if (!hasArgStr())
diff --git a/contrib/llvm/include/llvm/Support/Compiler.h b/contrib/llvm/include/llvm/Support/Compiler.h
index 4469ae3..7ceeb32 100644
--- a/contrib/llvm/include/llvm/Support/Compiler.h
+++ b/contrib/llvm/include/llvm/Support/Compiler.h
@@ -24,7 +24,7 @@
/// does not imply the existence of any other C++ library features.
#if (__has_feature(cxx_rvalue_references) \
|| defined(__GXX_EXPERIMENTAL_CXX0X__) \
- || _MSC_VER >= 1600)
+ || (defined(_MSC_VER) && _MSC_VER >= 1600))
#define LLVM_USE_RVALUE_REFERENCES 1
#else
#define LLVM_USE_RVALUE_REFERENCES 0
@@ -40,7 +40,7 @@
/// LLVM_DELETED_FUNCTION - Expands to = delete if the compiler supports it.
/// Use to mark functions as uncallable. Member functions with this should
-/// be declared private so that some behaivor is kept in C++03 mode.
+/// be declared private so that some behavior is kept in C++03 mode.
///
/// class DontCopy {
/// private:
@@ -57,6 +57,22 @@
#define LLVM_DELETED_FUNCTION
#endif
+/// LLVM_FINAL - Expands to 'final' if the compiler supports it.
+/// Use to mark classes or virtual methods as final.
+#if (__has_feature(cxx_override_control))
+#define LLVM_FINAL final
+#else
+#define LLVM_FINAL
+#endif
+
+/// LLVM_OVERRIDE - Expands to 'override' if the compiler supports it.
+/// Use to mark virtual methods as overriding a base class method.
+#if (__has_feature(cxx_override_control))
+#define LLVM_OVERRIDE override
+#else
+#define LLVM_OVERRIDE
+#endif
+
/// LLVM_LIBRARY_VISIBILITY - If a class marked with this attribute is linked
/// into a shared library, then the class should be private to the library and
/// not accessible from outside it. Can also be used to mark variables and
@@ -106,9 +122,11 @@
#endif
#if (__GNUC__ >= 4)
-#define BUILTIN_EXPECT(EXPR, VALUE) __builtin_expect((EXPR), (VALUE))
+#define LLVM_LIKELY(EXPR) __builtin_expect((bool)(EXPR), true)
+#define LLVM_UNLIKELY(EXPR) __builtin_expect((bool)(EXPR), false)
#else
-#define BUILTIN_EXPECT(EXPR, VALUE) (EXPR)
+#define LLVM_LIKELY(EXPR) (EXPR)
+#define LLVM_UNLIKELY(EXPR) (EXPR)
#endif
@@ -187,4 +205,13 @@
# define LLVM_BUILTIN_UNREACHABLE __builtin_unreachable()
#endif
+// LLVM_BUILTIN_TRAP - On compilers which support it, expands to an expression
+// which causes the program to exit abnormally.
+#if defined(__clang__) || (__GNUC__ > 4) \
+ || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)
+# define LLVM_BUILTIN_TRAP __builtin_trap()
+#else
+# define LLVM_BUILTIN_TRAP *(volatile int*)0x11 = 0
+#endif
+
#endif
diff --git a/contrib/llvm/include/llvm/Support/DataExtractor.h b/contrib/llvm/include/llvm/Support/DataExtractor.h
index 506ec96..a3ae782 100644
--- a/contrib/llvm/include/llvm/Support/DataExtractor.h
+++ b/contrib/llvm/include/llvm/Support/DataExtractor.h
@@ -10,6 +10,7 @@
#ifndef LLVM_SUPPORT_DATAEXTRACTOR_H
#define LLVM_SUPPORT_DATAEXTRACTOR_H
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
@@ -99,8 +100,8 @@ public:
/// enough bytes to extract this value, the offset will be left
/// unmodified.
///
- /// @param[in] byte_size
- /// The size in byte of the integer to extract.
+ /// @param[in] size
+ /// The size in bytes of the integer to extract.
///
/// @return
/// The sign extended signed integer value that was extracted,
diff --git a/contrib/llvm/include/llvm/Support/ELF.h b/contrib/llvm/include/llvm/Support/ELF.h
index f7ae60f..2cd2671 100644
--- a/contrib/llvm/include/llvm/Support/ELF.h
+++ b/contrib/llvm/include/llvm/Support/ELF.h
@@ -441,6 +441,7 @@ enum {
R_MICROBLAZE_COPY = 21
};
+// ELF Relocation types for PPC32
enum {
R_PPC_NONE = 0, /* No relocation. */
R_PPC_ADDR32 = 1,
@@ -456,7 +457,23 @@ enum {
R_PPC_REL14 = 11,
R_PPC_REL14_BRTAKEN = 12,
R_PPC_REL14_BRNTAKEN = 13,
- R_PPC_REL32 = 26
+ R_PPC_REL32 = 26,
+ R_PPC_TPREL16_LO = 70,
+ R_PPC_TPREL16_HA = 72
+};
+
+// ELF Relocation types for PPC64
+enum {
+ R_PPC64_ADDR16_LO = 4,
+ R_PPC64_ADDR16_HI = 5,
+ R_PPC64_ADDR14 = 7,
+ R_PPC64_REL24 = 10,
+ R_PPC64_ADDR64 = 38,
+ R_PPC64_ADDR16_HIGHER = 39,
+ R_PPC64_ADDR16_HIGHEST = 41,
+ R_PPC64_TOC16 = 47,
+ R_PPC64_TOC = 51,
+ R_PPC64_TOC16_DS = 63
};
// ARM Specific e_flags
@@ -674,8 +691,36 @@ enum {
R_MIPS_NUM = 218
};
+// Hexagon Specific e_flags
+// Release 5 ABI
+enum {
+ // Object processor version flags, bits[3:0]
+ EF_HEXAGON_MACH_V2 = 0x00000001, // Hexagon V2
+ EF_HEXAGON_MACH_V3 = 0x00000002, // Hexagon V3
+ EF_HEXAGON_MACH_V4 = 0x00000003, // Hexagon V4
+ EF_HEXAGON_MACH_V5 = 0x00000004, // Hexagon V5
+
+ // Highest ISA version flags
+ EF_HEXAGON_ISA_MACH = 0x00000000, // Same as specified in bits[3:0]
+ // of e_flags
+ EF_HEXAGON_ISA_V2 = 0x00000010, // Hexagon V2 ISA
+ EF_HEXAGON_ISA_V3 = 0x00000020, // Hexagon V3 ISA
+ EF_HEXAGON_ISA_V4 = 0x00000030, // Hexagon V4 ISA
+ EF_HEXAGON_ISA_V5 = 0x00000040 // Hexagon V5 ISA
+};
+
+// Hexagon specific Section indexes for common small data
+// Release 5 ABI
+enum {
+ SHN_HEXAGON_SCOMMON = 0xff00, // Other access sizes
+ SHN_HEXAGON_SCOMMON_1 = 0xff01, // Byte-sized access
+ SHN_HEXAGON_SCOMMON_2 = 0xff02, // Half-word-sized access
+ SHN_HEXAGON_SCOMMON_4 = 0xff03, // Word-sized access
+ SHN_HEXAGON_SCOMMON_8 = 0xff04 // Double-word-size access
+};
+
// ELF Relocation types for Hexagon
-// Release 5 ABI - Document: 80-V9418-3 Rev. J
+// Release 5 ABI
enum {
R_HEX_NONE = 0,
R_HEX_B22_PCREL = 1,
@@ -1103,6 +1148,9 @@ enum {
PT_PHDR = 6, // The program header table itself.
PT_TLS = 7, // The thread-local storage template.
PT_LOOS = 0x60000000, // Lowest operating system-specific pt entry type.
+ PT_HIOS = 0x6fffffff, // Highest operating system-specific pt entry type.
+ PT_LOPROC = 0x70000000, // Lowest processor-specific program hdr entry type.
+ PT_HIPROC = 0x7fffffff, // Highest processor-specific program hdr entry type.
// x86-64 program header types.
// These all contain stack unwind tables.
@@ -1113,9 +1161,11 @@ enum {
PT_GNU_STACK = 0x6474e551, // Indicates stack executability.
PT_GNU_RELRO = 0x6474e552, // Read-only after relocation.
- PT_HIOS = 0x6fffffff, // Highest operating system-specific pt entry type.
- PT_LOPROC = 0x70000000, // Lowest processor-specific program hdr entry type.
- PT_HIPROC = 0x7fffffff // Highest processor-specific program hdr entry type.
+ // ARM program header types.
+ PT_ARM_ARCHEXT = 0x70000000, // Platform architecture compatibility information
+ // These all contain stack unwind tables.
+ PT_ARM_EXIDX = 0x70000001,
+ PT_ARM_UNWIND = 0x70000001
};
// Segment flag bits.
diff --git a/contrib/llvm/include/llvm/Support/FileOutputBuffer.h b/contrib/llvm/include/llvm/Support/FileOutputBuffer.h
index 0f07164..bcd35e3 100644
--- a/contrib/llvm/include/llvm/Support/FileOutputBuffer.h
+++ b/contrib/llvm/include/llvm/Support/FileOutputBuffer.h
@@ -78,10 +78,11 @@ public:
~FileOutputBuffer();
+private:
+ FileOutputBuffer(const FileOutputBuffer &) LLVM_DELETED_FUNCTION;
+ FileOutputBuffer &operator=(const FileOutputBuffer &) LLVM_DELETED_FUNCTION;
protected:
- FileOutputBuffer(const FileOutputBuffer &); // DO NOT IMPLEMENT
- FileOutputBuffer &operator=(const FileOutputBuffer &); // DO NOT IMPLEMENT
- FileOutputBuffer(uint8_t *Start, uint8_t *End,
+ FileOutputBuffer(uint8_t *Start, uint8_t *End,
StringRef Path, StringRef TempPath);
uint8_t *BufferStart;
diff --git a/contrib/llvm/include/llvm/Support/FileSystem.h b/contrib/llvm/include/llvm/Support/FileSystem.h
index f4a9aa0..b455b28 100644
--- a/contrib/llvm/include/llvm/Support/FileSystem.h
+++ b/contrib/llvm/include/llvm/Support/FileSystem.h
@@ -40,7 +40,7 @@
#include <string>
#include <vector>
-#if HAVE_SYS_STAT_H
+#ifdef HAVE_SYS_STAT_H
#include <sys/stat.h>
#endif
@@ -280,7 +280,7 @@ error_code create_symlink(const Twine &to, const Twine &from);
/// @brief Get the current path.
///
/// @param result Holds the current path on return.
-/// @results errc::success if the current path has been stored in result,
+/// @returns errc::success if the current path has been stored in result,
/// otherwise a platform specific error_code.
error_code current_path(SmallVectorImpl<char> &result);
@@ -289,7 +289,7 @@ error_code current_path(SmallVectorImpl<char> &result);
/// @param path Input path.
/// @param existed Set to true if \a path existed, false if it did not.
/// undefined otherwise.
-/// @results errc::success if path has been removed and existed has been
+/// @returns errc::success if path has been removed and existed has been
/// successfully set, otherwise a platform specific error_code.
error_code remove(const Twine &path, bool &existed);
@@ -298,7 +298,7 @@ error_code remove(const Twine &path, bool &existed);
///
/// @param path Input path.
/// @param num_removed Number of files removed.
-/// @results errc::success if path has been removed and num_removed has been
+/// @returns errc::success if path has been removed and num_removed has been
/// successfully set, otherwise a platform specific error_code.
error_code remove_all(const Twine &path, uint32_t &num_removed);
@@ -323,7 +323,7 @@ error_code resize_file(const Twine &path, uint64_t size);
/// @brief Does file exist?
///
/// @param status A file_status previously returned from stat.
-/// @results True if the file represented by status exists, false if it does
+/// @returns True if the file represented by status exists, false if it does
/// not.
bool exists(file_status status);
@@ -332,7 +332,7 @@ bool exists(file_status status);
/// @param path Input path.
/// @param result Set to true if the file represented by status exists, false if
/// it does not. Undefined otherwise.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code exists(const Twine &path, bool &result);
@@ -350,7 +350,7 @@ inline bool exists(const Twine &path) {
///
/// assert(status_known(A) || status_known(B));
///
-/// @results True if A and B both represent the same file system entity, false
+/// @returns True if A and B both represent the same file system entity, false
/// otherwise.
bool equivalent(file_status A, file_status B);
@@ -362,7 +362,7 @@ bool equivalent(file_status A, file_status B);
/// @param B Input path B.
/// @param result Set to true if stat(A) and stat(B) have the same device and
/// inode (or equivalent).
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code equivalent(const Twine &A, const Twine &B, bool &result);
@@ -384,7 +384,7 @@ error_code file_size(const Twine &path, uint64_t &result);
/// @brief Does status represent a directory?
///
/// @param status A file_status previously returned from status.
-/// @results status.type() == file_type::directory_file.
+/// @returns status.type() == file_type::directory_file.
bool is_directory(file_status status);
/// @brief Is path a directory?
@@ -392,14 +392,14 @@ bool is_directory(file_status status);
/// @param path Input path.
/// @param result Set to true if \a path is a directory, false if it is not.
/// Undefined otherwise.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code is_directory(const Twine &path, bool &result);
/// @brief Does status represent a regular file?
///
/// @param status A file_status previously returned from status.
-/// @results status_known(status) && status.type() == file_type::regular_file.
+/// @returns status_known(status) && status.type() == file_type::regular_file.
bool is_regular_file(file_status status);
/// @brief Is path a regular file?
@@ -407,7 +407,7 @@ bool is_regular_file(file_status status);
/// @param path Input path.
/// @param result Set to true if \a path is a regular file, false if it is not.
/// Undefined otherwise.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code is_regular_file(const Twine &path, bool &result);
@@ -415,7 +415,7 @@ error_code is_regular_file(const Twine &path, bool &result);
/// directory, regular file, or symlink?
///
/// @param status A file_status previously returned from status.
-/// @results exists(s) && !is_regular_file(s) && !is_directory(s) &&
+/// @returns exists(s) && !is_regular_file(s) && !is_directory(s) &&
/// !is_symlink(s)
bool is_other(file_status status);
@@ -425,14 +425,14 @@ bool is_other(file_status status);
/// @param path Input path.
/// @param result Set to true if \a path exists, but is not a directory, regular
/// file, or a symlink, false if it does not. Undefined otherwise.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code is_other(const Twine &path, bool &result);
/// @brief Does status represent a symlink?
///
/// @param status A file_status previously returned from stat.
-/// @param result status.type() == symlink_file.
+/// @returns status.type() == symlink_file.
bool is_symlink(file_status status);
/// @brief Is path a symlink?
@@ -440,7 +440,7 @@ bool is_symlink(file_status status);
/// @param path Input path.
/// @param result Set to true if \a path is a symlink, false if it is not.
/// Undefined otherwise.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code is_symlink(const Twine &path, bool &result);
@@ -448,28 +448,28 @@ error_code is_symlink(const Twine &path, bool &result);
///
/// @param path Input path.
/// @param result Set to the file status.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code status(const Twine &path, file_status &result);
/// @brief Modifies permission bits on a file
///
/// @param path Input path.
-/// @results errc::success if permissions have been changed, otherwise a
+/// @returns errc::success if permissions have been changed, otherwise a
/// platform specific error_code.
error_code permissions(const Twine &path, perms prms);
/// @brief Is status available?
///
-/// @param path Input path.
-/// @results True if status() != status_error.
+/// @param s Input file status.
+/// @returns True if status() != status_error.
bool status_known(file_status s);
/// @brief Is status available?
///
/// @param path Input path.
/// @param result Set to true if status() != status_error.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code status_known(const Twine &path, bool &result);
@@ -486,11 +486,11 @@ error_code status_known(const Twine &path, bool &result);
/// clang-%%-%%-%%-%%-%%.s => /tmp/clang-a0-b1-c2-d3-e4.s
///
/// @param model Name to base unique path off of.
-/// @param result_fs Set to the opened file's file descriptor.
+/// @param result_fd Set to the opened file's file descriptor.
/// @param result_path Set to the opened file's absolute path.
-/// @param makeAbsolute If true and @model is not an absolute path, a temp
+/// @param makeAbsolute If true and \a model is not an absolute path, a temp
/// directory will be prepended.
-/// @results errc::success if result_{fd,path} have been successfully set,
+/// @returns errc::success if result_{fd,path} have been successfully set,
/// otherwise a platform specific error_code.
error_code unique_file(const Twine &model, int &result_fd,
SmallVectorImpl<char> &result_path,
@@ -503,7 +503,7 @@ error_code unique_file(const Twine &model, int &result_fd,
///
/// @param path Input path.
/// @param result Set to the canonicalized version of \a path.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code canonicalize(const Twine &path, SmallVectorImpl<char> &result);
@@ -511,7 +511,7 @@ error_code canonicalize(const Twine &path, SmallVectorImpl<char> &result);
///
/// @param path Input path.
/// @param magic Byte sequence to compare \a path's first len(magic) bytes to.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code has_magic(const Twine &path, const Twine &magic, bool &result);
@@ -522,7 +522,7 @@ error_code has_magic(const Twine &path, const Twine &magic, bool &result);
/// @param result Set to the first \a len bytes in the file pointed to by
/// \a path. Or the entire file if file_size(path) < len, in which
/// case result.size() returns the size of the file.
-/// @results errc::success if result has been successfully set,
+/// @returns errc::success if result has been successfully set,
/// errc::value_too_large if len is larger then the file pointed to by
/// \a path, otherwise a platform specific error_code.
error_code get_magic(const Twine &path, uint32_t len,
@@ -535,14 +535,14 @@ file_magic identify_magic(StringRef magic);
///
/// @param path Input path.
/// @param result Set to the type of file, or LLVMFileType::Unknown_FileType.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code identify_magic(const Twine &path, file_magic &result);
/// @brief Get library paths the system linker uses.
///
/// @param result Set to the list of system library paths.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code GetSystemLibraryPaths(SmallVectorImpl<std::string> &result);
@@ -550,7 +550,7 @@ error_code GetSystemLibraryPaths(SmallVectorImpl<std::string> &result);
/// + LLVM_LIB_SEARCH_PATH + LLVM_LIBDIR.
///
/// @param result Set to the list of bitcode library paths.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code GetBitcodeLibraryPaths(SmallVectorImpl<std::string> &result);
@@ -563,7 +563,7 @@ error_code GetBitcodeLibraryPaths(SmallVectorImpl<std::string> &result);
///
/// @param short_name Library name one would give to the system linker.
/// @param result Set to the absolute path \a short_name represents.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code FindLibrary(const Twine &short_name, SmallVectorImpl<char> &result);
@@ -572,7 +572,7 @@ error_code FindLibrary(const Twine &short_name, SmallVectorImpl<char> &result);
/// @param argv0 The program name as it was spelled on the command line.
/// @param MainAddr Address of some symbol in the executable (not in a library).
/// @param result Set to the absolute path of the current executable.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code GetMainExecutable(const char *argv0, void *MainAddr,
SmallVectorImpl<char> &result);
@@ -586,9 +586,9 @@ class mapped_file_region {
public:
enum mapmode {
- readonly, //< May only access map via const_data as read only.
- readwrite, //< May access map via data and modify it. Written to path.
- priv //< May modify via data, but changes are lost on destruction.
+ readonly, ///< May only access map via const_data as read only.
+ readwrite, ///< May access map via data and modify it. Written to path.
+ priv ///< May modify via data, but changes are lost on destruction.
};
private:
@@ -596,7 +596,7 @@ private:
mapmode Mode;
uint64_t Size;
void *Mapping;
-#if LLVM_ON_WIN32
+#ifdef LLVM_ON_WIN32
int FileDescriptor;
void *FileHandle;
void *FileMappingHandle;
@@ -658,13 +658,13 @@ public:
///
/// @param path Path to file to map.
/// @param file_offset Byte offset in file where mapping should begin.
-/// @param size_t Byte length of range of the file to map.
+/// @param size Byte length of range of the file to map.
/// @param map_writable If true, the file will be mapped in r/w such
/// that changes to the mapped buffer will be flushed back
/// to the file. If false, the file will be mapped read-only
/// and the buffer will be read-only.
/// @param result Set to the start address of the mapped buffer.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code map_file_pages(const Twine &path, off_t file_offset, size_t size,
bool map_writable, void *&result);
@@ -674,7 +674,7 @@ error_code map_file_pages(const Twine &path, off_t file_offset, size_t size,
///
/// @param base Pointer to the start of the buffer.
/// @param size Byte length of the range to unmmap.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code unmap_file_pages(void *base, size_t size);
diff --git a/contrib/llvm/include/llvm/Support/Format.h b/contrib/llvm/include/llvm/Support/Format.h
index 59812d9..aaa54e1 100644
--- a/contrib/llvm/include/llvm/Support/Format.h
+++ b/contrib/llvm/include/llvm/Support/Format.h
@@ -170,31 +170,47 @@ public:
}
};
-/// format - This is a helper function that is used to produce formatted output.
-/// This is typically used like: OS << format("%0.4f", myfloat) << '\n';
+/// This is a helper function that is used to produce formatted output.
+///
+/// This is typically used like:
+/// \code
+/// OS << format("%0.4f", myfloat) << '\n';
+/// \endcode
template <typename T>
inline format_object1<T> format(const char *Fmt, const T &Val) {
return format_object1<T>(Fmt, Val);
}
-/// format - This is a helper function that is used to produce formatted output.
-/// This is typically used like: OS << format("%0.4f", myfloat) << '\n';
+/// This is a helper function that is used to produce formatted output.
+///
+/// This is typically used like:
+/// \code
+/// OS << format("%0.4f", myfloat) << '\n';
+/// \endcode
template <typename T1, typename T2>
inline format_object2<T1, T2> format(const char *Fmt, const T1 &Val1,
const T2 &Val2) {
return format_object2<T1, T2>(Fmt, Val1, Val2);
}
-/// format - This is a helper function that is used to produce formatted output.
-/// This is typically used like: OS << format("%0.4f", myfloat) << '\n';
+/// This is a helper function that is used to produce formatted output.
+///
+/// This is typically used like:
+/// \code
+/// OS << format("%0.4f", myfloat) << '\n';
+/// \endcode
template <typename T1, typename T2, typename T3>
inline format_object3<T1, T2, T3> format(const char *Fmt, const T1 &Val1,
const T2 &Val2, const T3 &Val3) {
return format_object3<T1, T2, T3>(Fmt, Val1, Val2, Val3);
}
-/// format - This is a helper function that is used to produce formatted output.
-/// This is typically used like: OS << format("%0.4f", myfloat) << '\n';
+/// This is a helper function that is used to produce formatted output.
+///
+/// This is typically used like:
+/// \code
+/// OS << format("%0.4f", myfloat) << '\n';
+/// \endcode
template <typename T1, typename T2, typename T3, typename T4>
inline format_object4<T1, T2, T3, T4> format(const char *Fmt, const T1 &Val1,
const T2 &Val2, const T3 &Val3,
@@ -202,8 +218,12 @@ inline format_object4<T1, T2, T3, T4> format(const char *Fmt, const T1 &Val1,
return format_object4<T1, T2, T3, T4>(Fmt, Val1, Val2, Val3, Val4);
}
-/// format - This is a helper function that is used to produce formatted output.
-/// This is typically used like: OS << format("%0.4f", myfloat) << '\n';
+/// This is a helper function that is used to produce formatted output.
+///
+/// This is typically used like:
+/// \code
+/// OS << format("%0.4f", myfloat) << '\n';
+/// \endcode
template <typename T1, typename T2, typename T3, typename T4, typename T5>
inline format_object5<T1, T2, T3, T4, T5> format(const char *Fmt,const T1 &Val1,
const T2 &Val2, const T3 &Val3,
diff --git a/contrib/llvm/include/llvm/Support/FormattedStream.h b/contrib/llvm/include/llvm/Support/FormattedStream.h
index 58a1885..21635dc 100644
--- a/contrib/llvm/include/llvm/Support/FormattedStream.h
+++ b/contrib/llvm/include/llvm/Support/FormattedStream.h
@@ -55,14 +55,15 @@ namespace llvm
///
const char *Scanned;
- virtual void write_impl(const char *Ptr, size_t Size);
+ virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE;
/// current_pos - Return the current position within the stream,
/// not counting the bytes currently in the buffer.
- virtual uint64_t current_pos() const {
- // This has the same effect as calling TheStream.current_pos(),
- // but that interface is private.
- return TheStream->tell() - TheStream->GetNumBytesInBuffer();
+ virtual uint64_t current_pos() const LLVM_OVERRIDE {
+ // Our current position in the stream is all the contents which have been
+ // written to the underlying stream (*not* the current position of the
+ // underlying stream).
+ return TheStream->tell();
}
/// ComputeColumn - Examine the given output buffer and figure out which
diff --git a/contrib/llvm/include/llvm/Support/GCOV.h b/contrib/llvm/include/llvm/Support/GCOV.h
index 19e1ce8..e552315 100644
--- a/contrib/llvm/include/llvm/Support/GCOV.h
+++ b/contrib/llvm/include/llvm/Support/GCOV.h
@@ -27,13 +27,15 @@ class GCOVBlock;
class GCOVLines;
class FileInfo;
-enum GCOVFormat {
- InvalidGCOV,
- GCNO_402,
- GCNO_404,
- GCDA_402,
- GCDA_404
-};
+namespace GCOV {
+ enum GCOVFormat {
+ InvalidGCOV,
+ GCNO_402,
+ GCNO_404,
+ GCDA_402,
+ GCDA_404
+ };
+} // end GCOV namespace
/// GCOVBuffer - A wrapper around MemoryBuffer to provide GCOV specific
/// read operations.
@@ -42,20 +44,20 @@ public:
GCOVBuffer(MemoryBuffer *B) : Buffer(B), Cursor(0) {}
/// readGCOVFormat - Read GCOV signature at the beginning of buffer.
- enum GCOVFormat readGCOVFormat() {
+ GCOV::GCOVFormat readGCOVFormat() {
StringRef Magic = Buffer->getBuffer().slice(0, 12);
Cursor = 12;
if (Magic == "oncg*404MVLL")
- return GCNO_404;
+ return GCOV::GCNO_404;
else if (Magic == "oncg*204MVLL")
- return GCNO_402;
+ return GCOV::GCNO_402;
else if (Magic == "adcg*404MVLL")
- return GCDA_404;
+ return GCOV::GCDA_404;
else if (Magic == "adcg*204MVLL")
- return GCDA_402;
+ return GCOV::GCDA_402;
Cursor = 0;
- return InvalidGCOV;
+ return GCOV::InvalidGCOV;
}
/// readFunctionTag - If cursor points to a function tag then increment the
@@ -128,7 +130,7 @@ public:
StringRef Str = Buffer->getBuffer().slice(Cursor, Cursor+4);
assert (Str.empty() == false && "Unexpected memory buffer end!");
Cursor += 4;
- Result = *(uint32_t *)(Str.data());
+ Result = *(const uint32_t *)(Str.data());
return Result;
}
@@ -170,7 +172,7 @@ class GCOVFunction {
public:
GCOVFunction() : Ident(0), LineNumber(0) {}
~GCOVFunction();
- bool read(GCOVBuffer &Buffer, GCOVFormat Format);
+ bool read(GCOVBuffer &Buffer, GCOV::GCOVFormat Format);
void dump();
void collectLineCounts(FileInfo &FI);
private:
diff --git a/contrib/llvm/include/llvm/Support/InstVisitor.h b/contrib/llvm/include/llvm/Support/InstVisitor.h
index 109b3cf..6dfb4de 100644
--- a/contrib/llvm/include/llvm/Support/InstVisitor.h
+++ b/contrib/llvm/include/llvm/Support/InstVisitor.h
@@ -209,6 +209,9 @@ public:
RetTy visitMemMoveInst(MemMoveInst &I) { DELEGATE(MemTransferInst); }
RetTy visitMemTransferInst(MemTransferInst &I) { DELEGATE(MemIntrinsic); }
RetTy visitMemIntrinsic(MemIntrinsic &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitVAStartInst(VAStartInst &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitVAEndInst(VAEndInst &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitVACopyInst(VACopyInst &I) { DELEGATE(IntrinsicInst); }
RetTy visitIntrinsicInst(IntrinsicInst &I) { DELEGATE(CallInst); }
// Call and Invoke are slightly different as they delegate first through
@@ -262,6 +265,9 @@ private:
case Intrinsic::memcpy: DELEGATE(MemCpyInst);
case Intrinsic::memmove: DELEGATE(MemMoveInst);
case Intrinsic::memset: DELEGATE(MemSetInst);
+ case Intrinsic::vastart: DELEGATE(VAStartInst);
+ case Intrinsic::vaend: DELEGATE(VAEndInst);
+ case Intrinsic::vacopy: DELEGATE(VACopyInst);
case Intrinsic::not_intrinsic: break;
}
}
diff --git a/contrib/llvm/include/llvm/Support/IntegersSubset.h b/contrib/llvm/include/llvm/Support/IntegersSubset.h
index bb9e769..03039fd 100644
--- a/contrib/llvm/include/llvm/Support/IntegersSubset.h
+++ b/contrib/llvm/include/llvm/Support/IntegersSubset.h
@@ -411,8 +411,8 @@ public:
unsigned getSize() const {
APInt sz(((const APInt&)getItem(0).getLow()).getBitWidth(), 0);
for (unsigned i = 0, e = getNumItems(); i != e; ++i) {
- const APInt &Low = getItem(i).getLow();
- const APInt &High = getItem(i).getHigh();
+ const APInt Low = getItem(i).getLow();
+ const APInt High = getItem(i).getHigh();
APInt S = High - Low + 1;
sz += S;
}
@@ -426,8 +426,8 @@ public:
APInt getSingleValue(unsigned idx) const {
APInt sz(((const APInt&)getItem(0).getLow()).getBitWidth(), 0);
for (unsigned i = 0, e = getNumItems(); i != e; ++i) {
- const APInt &Low = getItem(i).getLow();
- const APInt &High = getItem(i).getHigh();
+ const APInt Low = getItem(i).getLow();
+ const APInt High = getItem(i).getHigh();
APInt S = High - Low + 1;
APInt oldSz = sz;
sz += S;
diff --git a/contrib/llvm/include/llvm/Support/IntegersSubsetMapping.h b/contrib/llvm/include/llvm/Support/IntegersSubsetMapping.h
index cab18dc..7635d5e 100644
--- a/contrib/llvm/include/llvm/Support/IntegersSubsetMapping.h
+++ b/contrib/llvm/include/llvm/Support/IntegersSubsetMapping.h
@@ -42,6 +42,7 @@ public:
struct RangeEx : public RangeTy {
RangeEx() : Weight(1) {}
RangeEx(const RangeTy &R) : RangeTy(R), Weight(1) {}
+ RangeEx(const RangeTy &R, unsigned W) : RangeTy(R), Weight(W) {}
RangeEx(const IntTy &C) : RangeTy(C), Weight(1) {}
RangeEx(const IntTy &L, const IntTy &H) : RangeTy(L, H), Weight(1) {}
RangeEx(const IntTy &L, const IntTy &H, unsigned W) :
@@ -316,13 +317,13 @@ public:
Items.clear();
const IntTy *Low = &OldItems.begin()->first.getLow();
const IntTy *High = &OldItems.begin()->first.getHigh();
- unsigned Weight = 1;
+ unsigned Weight = OldItems.begin()->first.Weight;
SuccessorClass *Successor = OldItems.begin()->second;
for (CaseItemIt j = OldItems.begin(), i = j++, e = OldItems.end();
j != e; i = j++) {
if (isJoinable(i, j)) {
const IntTy *CurHigh = &j->first.getHigh();
- ++Weight;
+ Weight += j->first.Weight;
if (*CurHigh > *High)
High = CurHigh;
} else {
@@ -330,7 +331,7 @@ public:
add(R, Successor);
Low = &j->first.getLow();
High = &j->first.getHigh();
- Weight = 1;
+ Weight = j->first.Weight;
Successor = j->second;
}
}
@@ -362,10 +363,17 @@ public:
/// Adds all ranges and values from given ranges set to the current
/// mapping.
- void add(const IntegersSubsetTy &CRS, SuccessorClass *S = 0) {
+ void add(const IntegersSubsetTy &CRS, SuccessorClass *S = 0,
+ unsigned Weight = 0) {
+ unsigned ItemWeight = 1;
+ if (Weight)
+ // Weight is associated with CRS, for now we perform a division to
+ // get the weight for each item.
+ ItemWeight = Weight / CRS.getNumItems();
for (unsigned i = 0, e = CRS.getNumItems(); i < e; ++i) {
RangeTy R = CRS.getItem(i);
- add(R, S);
+ RangeEx REx(R, ItemWeight);
+ add(REx, S);
}
}
diff --git a/contrib/llvm/include/llvm/Support/LEB128.h b/contrib/llvm/include/llvm/Support/LEB128.h
index 410edd4..b52e5bc 100644
--- a/contrib/llvm/include/llvm/Support/LEB128.h
+++ b/contrib/llvm/include/llvm/Support/LEB128.h
@@ -15,7 +15,7 @@
#ifndef LLVM_SYSTEM_LEB128_H
#define LLVM_SYSTEM_LEB128_H
-#include <llvm/Support/raw_ostream.h>
+#include "llvm/Support/raw_ostream.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/Support/LockFileManager.h b/contrib/llvm/include/llvm/Support/LockFileManager.h
index e2fa8eb..8c4a760 100644
--- a/contrib/llvm/include/llvm/Support/LockFileManager.h
+++ b/contrib/llvm/include/llvm/Support/LockFileManager.h
@@ -47,8 +47,8 @@ private:
Optional<std::pair<std::string, int> > Owner;
Optional<error_code> Error;
- LockFileManager(const LockFileManager &);
- LockFileManager &operator=(const LockFileManager &);
+ LockFileManager(const LockFileManager &) LLVM_DELETED_FUNCTION;
+ LockFileManager &operator=(const LockFileManager &) LLVM_DELETED_FUNCTION;
static Optional<std::pair<std::string, int> >
readLockFile(StringRef LockFileName);
diff --git a/contrib/llvm/include/llvm/Support/MathExtras.h b/contrib/llvm/include/llvm/Support/MathExtras.h
index 4005161..11f9e63 100644
--- a/contrib/llvm/include/llvm/Support/MathExtras.h
+++ b/contrib/llvm/include/llvm/Support/MathExtras.h
@@ -431,21 +431,22 @@ inline uint64_t NextPowerOf2(uint64_t A) {
return A + 1;
}
-/// RoundUpToAlignment - Returns the next integer (mod 2**64) that is
-/// greater than or equal to \arg Value and is a multiple of \arg
-/// Align. Align must be non-zero.
+/// Returns the next integer (mod 2**64) that is greater than or equal to
+/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
///
/// Examples:
-/// RoundUpToAlignment(5, 8) = 8
-/// RoundUpToAlignment(17, 8) = 24
-/// RoundUpToAlignment(~0LL, 8) = 0
+/// \code
+/// RoundUpToAlignment(5, 8) = 8
+/// RoundUpToAlignment(17, 8) = 24
+/// RoundUpToAlignment(~0LL, 8) = 0
+/// \endcode
inline uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align) {
return ((Value + Align - 1) / Align) * Align;
}
-/// OffsetToAlignment - Return the offset to the next integer (mod 2**64) that
-/// is greater than or equal to \arg Value and is a multiple of \arg
-/// Align. Align must be non-zero.
+/// Returns the offset to the next integer (mod 2**64) that is greater than
+/// or equal to \p Value and is a multiple of \p Align. \p Align must be
+/// non-zero.
inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
return RoundUpToAlignment(Value, Align) - Value;
}
@@ -463,12 +464,24 @@ template <unsigned B> inline int32_t SignExtend32(uint32_t x) {
return int32_t(x << (32 - B)) >> (32 - B);
}
+/// \brief Sign extend number in the bottom B bits of X to a 32-bit int.
+/// Requires 0 < B <= 32.
+inline int32_t SignExtend32(uint32_t X, unsigned B) {
+ return int32_t(X << (32 - B)) >> (32 - B);
+}
+
/// SignExtend64 - Sign extend B-bit number x to 64-bit int.
/// Usage int64_t r = SignExtend64<5>(x);
template <unsigned B> inline int64_t SignExtend64(uint64_t x) {
return int64_t(x << (64 - B)) >> (64 - B);
}
+/// \brief Sign extend number in the bottom B bits of X to a 64-bit int.
+/// Requires 0 < B <= 64.
+inline int64_t SignExtend64(uint64_t X, unsigned B) {
+ return int64_t(X << (64 - B)) >> (64 - B);
+}
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Support/Memory.h b/contrib/llvm/include/llvm/Support/Memory.h
index 37890e7..025eee7 100644
--- a/contrib/llvm/include/llvm/Support/Memory.h
+++ b/contrib/llvm/include/llvm/Support/Memory.h
@@ -15,6 +15,7 @@
#define LLVM_SYSTEM_MEMORY_H
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/system_error.h"
#include <string>
namespace llvm {
@@ -43,6 +44,70 @@ namespace sys {
/// @brief An abstraction for memory operations.
class Memory {
public:
+ enum ProtectionFlags {
+ MF_READ = 0x1000000,
+ MF_WRITE = 0x2000000,
+ MF_EXEC = 0x4000000
+ };
+
+ /// This method allocates a block of memory that is suitable for loading
+ /// dynamically generated code (e.g. JIT). An attempt to allocate
+ /// \p NumBytes bytes of virtual memory is made.
+ /// \p NearBlock may point to an existing allocation in which case
+ /// an attempt is made to allocate more memory near the existing block.
+ /// The actual allocated address is not guaranteed to be near the requested
+ /// address.
+ /// \p Flags is used to set the initial protection flags for the block
+ /// of the memory.
+ /// \p EC [out] returns an object describing any error that occurs.
+ ///
+ /// This method may allocate more than the number of bytes requested. The
+ /// actual number of bytes allocated is indicated in the returned
+ /// MemoryBlock.
+ ///
+ /// The start of the allocated block must be aligned with the
+ /// system allocation granularity (64K on Windows, page size on Linux).
+ /// If the address following \p NearBlock is not so aligned, it will be
+ /// rounded up to the next allocation granularity boundary.
+ ///
+ /// \r a non-null MemoryBlock if the function was successful,
+ /// otherwise a null MemoryBlock is with \p EC describing the error.
+ ///
+ /// @brief Allocate mapped memory.
+ static MemoryBlock allocateMappedMemory(size_t NumBytes,
+ const MemoryBlock *const NearBlock,
+ unsigned Flags,
+ error_code &EC);
+
+ /// This method releases a block of memory that was allocated with the
+ /// allocateMappedMemory method. It should not be used to release any
+ /// memory block allocated any other way.
+ /// \p Block describes the memory to be released.
+ ///
+ /// \r error_success if the function was successful, or an error_code
+ /// describing the failure if an error occurred.
+ ///
+ /// @brief Release mapped memory.
+ static error_code releaseMappedMemory(MemoryBlock &Block);
+
+ /// This method sets the protection flags for a block of memory to the
+ /// state specified by /p Flags. The behavior is not specified if the
+ /// memory was not allocated using the allocateMappedMemory method.
+ /// \p Block describes the memory block to be protected.
+ /// \p Flags specifies the new protection state to be assigned to the block.
+ /// \p ErrMsg [out] returns a string describing any error that occured.
+ ///
+ /// If \p Flags is MF_WRITE, the actual behavior varies
+ /// with the operating system (i.e. MF_READ | MF_WRITE on Windows) and the
+ /// target architecture (i.e. MF_WRITE -> MF_READ | MF_WRITE on i386).
+ ///
+ /// \r error_success if the function was successful, or an error_code
+ /// describing the failure if an error occurred.
+ ///
+ /// @brief Set memory protection state.
+ static error_code protectMappedMemory(const MemoryBlock &Block,
+ unsigned Flags);
+
/// This method allocates a block of Read/Write/Execute memory that is
/// suitable for executing dynamically generated code (e.g. JIT). An
/// attempt to allocate \p NumBytes bytes of virtual memory is made.
diff --git a/contrib/llvm/include/llvm/Support/MemoryBuffer.h b/contrib/llvm/include/llvm/Support/MemoryBuffer.h
index 06816de..1f02907 100644
--- a/contrib/llvm/include/llvm/Support/MemoryBuffer.h
+++ b/contrib/llvm/include/llvm/Support/MemoryBuffer.h
@@ -15,6 +15,7 @@
#define LLVM_SUPPORT_MEMORYBUFFER_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -36,8 +37,8 @@ class MemoryBuffer {
const char *BufferStart; // Start of the buffer.
const char *BufferEnd; // End of the buffer.
- MemoryBuffer(const MemoryBuffer &); // DO NOT IMPLEMENT
- MemoryBuffer &operator=(const MemoryBuffer &); // DO NOT IMPLEMENT
+ MemoryBuffer(const MemoryBuffer &) LLVM_DELETED_FUNCTION;
+ MemoryBuffer &operator=(const MemoryBuffer &) LLVM_DELETED_FUNCTION;
protected:
MemoryBuffer() {}
void init(const char *BufStart, const char *BufEnd,
diff --git a/contrib/llvm/include/llvm/Support/Mutex.h b/contrib/llvm/include/llvm/Support/Mutex.h
index 42ea630..6abc533 100644
--- a/contrib/llvm/include/llvm/Support/Mutex.h
+++ b/contrib/llvm/include/llvm/Support/Mutex.h
@@ -14,6 +14,7 @@
#ifndef LLVM_SYSTEM_MUTEX_H
#define LLVM_SYSTEM_MUTEX_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Threading.h"
#include <cassert>
@@ -75,8 +76,8 @@ namespace llvm
/// @name Do Not Implement
/// @{
private:
- MutexImpl(const MutexImpl & original);
- void operator=(const MutexImpl &);
+ MutexImpl(const MutexImpl &) LLVM_DELETED_FUNCTION;
+ void operator=(const MutexImpl &) LLVM_DELETED_FUNCTION;
/// @}
};
diff --git a/contrib/llvm/include/llvm/Support/MutexGuard.h b/contrib/llvm/include/llvm/Support/MutexGuard.h
index cd13bfe..6bb1622 100644
--- a/contrib/llvm/include/llvm/Support/MutexGuard.h
+++ b/contrib/llvm/include/llvm/Support/MutexGuard.h
@@ -26,8 +26,8 @@ namespace llvm {
/// @brief Guard a section of code with a Mutex.
class MutexGuard {
sys::Mutex &M;
- MutexGuard(const MutexGuard &); // DO NOT IMPLEMENT
- void operator=(const MutexGuard &); // DO NOT IMPLEMENT
+ MutexGuard(const MutexGuard &) LLVM_DELETED_FUNCTION;
+ void operator=(const MutexGuard &) LLVM_DELETED_FUNCTION;
public:
MutexGuard(sys::Mutex &m) : M(m) { M.acquire(); }
~MutexGuard() { M.release(); }
diff --git a/contrib/llvm/include/llvm/Support/PathV1.h b/contrib/llvm/include/llvm/Support/PathV1.h
index f4bedf9..643ee8c 100644
--- a/contrib/llvm/include/llvm/Support/PathV1.h
+++ b/contrib/llvm/include/llvm/Support/PathV1.h
@@ -683,8 +683,8 @@ namespace sys {
/// This function returns status information about the file. The type of
/// path (file or directory) is updated to reflect the actual contents
/// of the file system.
- /// @returns 0 on failure, with Error explaining why (if non-zero)
- /// @returns a pointer to a FileStatus structure on success.
+ /// @returns 0 on failure, with Error explaining why (if non-zero),
+ /// otherwise returns a pointer to a FileStatus structure on success.
/// @brief Get file status.
const FileStatus *getFileStatus(
bool forceUpdate = false, ///< Force an update from the file system
diff --git a/contrib/llvm/include/llvm/Support/PathV2.h b/contrib/llvm/include/llvm/Support/PathV2.h
index 8d79709..ae1a21c 100644
--- a/contrib/llvm/include/llvm/Support/PathV2.h
+++ b/contrib/llvm/include/llvm/Support/PathV2.h
@@ -39,13 +39,14 @@ namespace path {
/// The backwards traversal order is the reverse of forward traversal.
///
/// Iteration examples. Each component is separated by ',':
-/// / => /
-/// /foo => /,foo
-/// foo/ => foo,.
-/// /foo/bar => /,foo,bar
-/// ../ => ..,.
-/// C:\foo\bar => C:,/,foo,bar
-///
+/// @code
+/// / => /
+/// /foo => /,foo
+/// foo/ => foo,.
+/// /foo/bar => /,foo,bar
+/// ../ => ..,.
+/// C:\foo\bar => C:,/,foo,bar
+/// @endcode
class const_iterator {
StringRef Path; ///< The entire path.
StringRef Component; ///< The current component. Not necessarily in Path.
@@ -107,18 +108,22 @@ inline reverse_iterator rend(StringRef path) {
/// @brief Remove the last component from \a path unless it is the root dir.
///
-/// directory/filename.cpp => directory/
-/// directory/ => directory
-/// / => /
+/// @code
+/// directory/filename.cpp => directory/
+/// directory/ => directory
+/// / => /
+/// @endcode
///
/// @param path A path that is modified to not have a file component.
void remove_filename(SmallVectorImpl<char> &path);
/// @brief Replace the file extension of \a path with \a extension.
///
-/// ./filename.cpp => ./filename.extension
-/// ./filename => ./filename.extension
-/// ./ => ./.extension
+/// @code
+/// ./filename.cpp => ./filename.extension
+/// ./filename => ./filename.extension
+/// ./ => ./.extension
+/// @endcode
///
/// @param path A path that has its extension replaced with \a extension.
/// @param extension The extension to be added. It may be empty. It may also
@@ -128,12 +133,14 @@ void replace_extension(SmallVectorImpl<char> &path, const Twine &extension);
/// @brief Append to path.
///
-/// /foo + bar/f => /foo/bar/f
-/// /foo/ + bar/f => /foo/bar/f
-/// foo + bar/f => foo/bar/f
+/// @code
+/// /foo + bar/f => /foo/bar/f
+/// /foo/ + bar/f => /foo/bar/f
+/// foo + bar/f => foo/bar/f
+/// @endcode
///
/// @param path Set to \a path + \a component.
-/// @param component The component to be appended to \a path.
+/// @param a The component to be appended to \a path.
void append(SmallVectorImpl<char> &path, const Twine &a,
const Twine &b = "",
const Twine &c = "",
@@ -141,9 +148,11 @@ void append(SmallVectorImpl<char> &path, const Twine &a,
/// @brief Append to path.
///
-/// /foo + [bar,f] => /foo/bar/f
-/// /foo/ + [bar,f] => /foo/bar/f
-/// foo + [bar,f] => foo/bar/f
+/// @code
+/// /foo + [bar,f] => /foo/bar/f
+/// /foo/ + [bar,f] => /foo/bar/f
+/// foo + [bar,f] => foo/bar/f
+/// @endcode
///
/// @param path Set to \a path + [\a begin, \a end).
/// @param begin Start of components to append.
@@ -169,9 +178,11 @@ void native(const Twine &path, SmallVectorImpl<char> &result);
/// @brief Get root name.
///
-/// //net/hello => //net
-/// c:/hello => c: (on Windows, on other platforms nothing)
-/// /hello => <empty>
+/// @code
+/// //net/hello => //net
+/// c:/hello => c: (on Windows, on other platforms nothing)
+/// /hello => <empty>
+/// @endcode
///
/// @param path Input path.
/// @result The root name of \a path if it has one, otherwise "".
@@ -179,9 +190,11 @@ const StringRef root_name(StringRef path);
/// @brief Get root directory.
///
-/// /goo/hello => /
-/// c:/hello => /
-/// d/file.txt => <empty>
+/// @code
+/// /goo/hello => /
+/// c:/hello => /
+/// d/file.txt => <empty>
+/// @endcode
///
/// @param path Input path.
/// @result The root directory of \a path if it has one, otherwise
@@ -198,9 +211,11 @@ const StringRef root_path(StringRef path);
/// @brief Get relative path.
///
-/// C:\hello\world => hello\world
-/// foo/bar => foo/bar
-/// /foo/bar => foo/bar
+/// @code
+/// C:\hello\world => hello\world
+/// foo/bar => foo/bar
+/// /foo/bar => foo/bar
+/// @endcode
///
/// @param path Input path.
/// @result The path starting after root_path if one exists, otherwise "".
@@ -208,9 +223,11 @@ const StringRef relative_path(StringRef path);
/// @brief Get parent path.
///
-/// / => <empty>
-/// /foo => /
-/// foo/../bar => foo/..
+/// @code
+/// / => <empty>
+/// /foo => /
+/// foo/../bar => foo/..
+/// @endcode
///
/// @param path Input path.
/// @result The parent path of \a path if one exists, otherwise "".
@@ -218,10 +235,12 @@ const StringRef parent_path(StringRef path);
/// @brief Get filename.
///
-/// /foo.txt => foo.txt
-/// . => .
-/// .. => ..
-/// / => /
+/// @code
+/// /foo.txt => foo.txt
+/// . => .
+/// .. => ..
+/// / => /
+/// @endcode
///
/// @param path Input path.
/// @result The filename part of \a path. This is defined as the last component
@@ -234,11 +253,13 @@ const StringRef filename(StringRef path);
/// substring of filename ending at (but not including) the last dot. Otherwise
/// it is filename.
///
-/// /foo/bar.txt => bar
-/// /foo/bar => bar
-/// /foo/.txt => <empty>
-/// /foo/. => .
-/// /foo/.. => ..
+/// @code
+/// /foo/bar.txt => bar
+/// /foo/bar => bar
+/// /foo/.txt => <empty>
+/// /foo/. => .
+/// /foo/.. => ..
+/// @endcode
///
/// @param path Input path.
/// @result The stem of \a path.
@@ -250,9 +271,11 @@ const StringRef stem(StringRef path);
/// substring of filename starting at (and including) the last dot, and ending
/// at the end of \a path. Otherwise "".
///
-/// /foo/bar.txt => .txt
-/// /foo/bar => <empty>
-/// /foo/.txt => .txt
+/// @code
+/// /foo/bar.txt => .txt
+/// /foo/bar => <empty>
+/// /foo/.txt => .txt
+/// @endcode
///
/// @param path Input path.
/// @result The extension of \a path.
@@ -272,7 +295,7 @@ bool is_separator(char value);
/// ignored if the user or system has set the typical environment variable
/// (e.g., TEMP on Windows, TMPDIR on *nix) to specify a temporary directory.
///
-/// @param Result Holds the resulting path name.
+/// @param result Holds the resulting path name.
void system_temp_directory(bool erasedOnReboot, SmallVectorImpl<char> &result);
/// @brief Has root name?
diff --git a/contrib/llvm/include/llvm/Support/PrettyStackTrace.h b/contrib/llvm/include/llvm/Support/PrettyStackTrace.h
index 9b3ecda..2122e06 100644
--- a/contrib/llvm/include/llvm/Support/PrettyStackTrace.h
+++ b/contrib/llvm/include/llvm/Support/PrettyStackTrace.h
@@ -16,6 +16,8 @@
#ifndef LLVM_SUPPORT_PRETTYSTACKTRACE_H
#define LLVM_SUPPORT_PRETTYSTACKTRACE_H
+#include "llvm/Support/Compiler.h"
+
namespace llvm {
class raw_ostream;
@@ -32,8 +34,8 @@ namespace llvm {
/// virtual stack trace. This gets dumped out if the program crashes.
class PrettyStackTraceEntry {
const PrettyStackTraceEntry *NextEntry;
- PrettyStackTraceEntry(const PrettyStackTraceEntry &); // DO NOT IMPLEMENT
- void operator=(const PrettyStackTraceEntry&); // DO NOT IMPLEMENT
+ PrettyStackTraceEntry(const PrettyStackTraceEntry &) LLVM_DELETED_FUNCTION;
+ void operator=(const PrettyStackTraceEntry&) LLVM_DELETED_FUNCTION;
public:
PrettyStackTraceEntry();
virtual ~PrettyStackTraceEntry();
@@ -52,7 +54,7 @@ namespace llvm {
const char *Str;
public:
PrettyStackTraceString(const char *str) : Str(str) {}
- virtual void print(raw_ostream &OS) const;
+ virtual void print(raw_ostream &OS) const LLVM_OVERRIDE;
};
/// PrettyStackTraceProgram - This object prints a specified program arguments
@@ -63,7 +65,7 @@ namespace llvm {
public:
PrettyStackTraceProgram(int argc, const char * const*argv)
: ArgC(argc), ArgV(argv) {}
- virtual void print(raw_ostream &OS) const;
+ virtual void print(raw_ostream &OS) const LLVM_OVERRIDE;
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Support/Program.h b/contrib/llvm/include/llvm/Support/Program.h
index a85f235..7c9a951 100644
--- a/contrib/llvm/include/llvm/Support/Program.h
+++ b/contrib/llvm/include/llvm/Support/Program.h
@@ -34,8 +34,8 @@ namespace sys {
void *Data_;
// Noncopyable.
- Program(const Program& other);
- Program& operator=(const Program& other);
+ Program(const Program& other) LLVM_DELETED_FUNCTION;
+ Program& operator=(const Program& other) LLVM_DELETED_FUNCTION;
/// @name Methods
/// @{
diff --git a/contrib/llvm/include/llvm/Support/RWMutex.h b/contrib/llvm/include/llvm/Support/RWMutex.h
index 0d4cb81..935b307 100644
--- a/contrib/llvm/include/llvm/Support/RWMutex.h
+++ b/contrib/llvm/include/llvm/Support/RWMutex.h
@@ -14,6 +14,7 @@
#ifndef LLVM_SYSTEM_RWMUTEX_H
#define LLVM_SYSTEM_RWMUTEX_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Threading.h"
#include <cassert>
@@ -75,8 +76,8 @@ namespace llvm
/// @name Do Not Implement
/// @{
private:
- RWMutexImpl(const RWMutexImpl & original);
- void operator=(const RWMutexImpl &);
+ RWMutexImpl(const RWMutexImpl & original) LLVM_DELETED_FUNCTION;
+ void operator=(const RWMutexImpl &) LLVM_DELETED_FUNCTION;
/// @}
};
diff --git a/contrib/llvm/include/llvm/Support/Regex.h b/contrib/llvm/include/llvm/Support/Regex.h
index 7648e77..ffe09b1 100644
--- a/contrib/llvm/include/llvm/Support/Regex.h
+++ b/contrib/llvm/include/llvm/Support/Regex.h
@@ -36,7 +36,7 @@ namespace llvm {
Newline=2
};
- /// Compiles the given POSIX Extended Regular Expression \arg Regex.
+ /// Compiles the given POSIX Extended Regular Expression \p Regex.
/// This implementation supports regexes and matching strings with embedded
/// NUL characters.
Regex(StringRef Regex, unsigned Flags = NoFlags);
@@ -51,17 +51,17 @@ namespace llvm {
/// many entries plus one for the whole regex (as element 0).
unsigned getNumMatches() const;
- /// matches - Match the regex against a given \arg String.
+ /// matches - Match the regex against a given \p String.
///
/// \param Matches - If given, on a successful match this will be filled in
- /// with references to the matched group expressions (inside \arg String),
+ /// with references to the matched group expressions (inside \p String),
/// the first group is always the entire pattern.
///
/// This returns true on a successful match.
bool match(StringRef String, SmallVectorImpl<StringRef> *Matches = 0);
/// sub - Return the result of replacing the first match of the regex in
- /// \arg String with the \arg Repl string. Backreferences like "\0" in the
+ /// \p String with the \p Repl string. Backreferences like "\0" in the
/// replacement string are replaced with the appropriate match substring.
///
/// Note that the replacement string has backslash escaping performed on
diff --git a/contrib/llvm/include/llvm/Support/Registry.h b/contrib/llvm/include/llvm/Support/Registry.h
index d0375be..29eafb6 100644
--- a/contrib/llvm/include/llvm/Support/Registry.h
+++ b/contrib/llvm/include/llvm/Support/Registry.h
@@ -37,7 +37,7 @@ namespace llvm {
/// is necessary to define an alternate traits class.
template <typename T>
class RegistryTraits {
- RegistryTraits(); // Do not implement.
+ RegistryTraits() LLVM_DELETED_FUNCTION;
public:
typedef SimpleRegistryEntry<T> entry;
@@ -63,7 +63,7 @@ namespace llvm {
class iterator;
private:
- Registry(); // Do not implement.
+ Registry() LLVM_DELETED_FUNCTION;
static void Announce(const entry &E) {
for (listener *Cur = ListenerHead; Cur; Cur = Cur->Next)
@@ -120,6 +120,7 @@ namespace llvm {
/// Abstract base class for registry listeners, which are informed when new
/// entries are added to the registry. Simply subclass and instantiate:
///
+ /// \code
/// class CollectorPrinter : public Registry<Collector>::listener {
/// protected:
/// void registered(const Registry<Collector>::entry &e) {
@@ -131,7 +132,7 @@ namespace llvm {
/// };
///
/// CollectorPrinter Printer;
- ///
+ /// \endcode
class listener {
listener *Prev, *Next;
diff --git a/contrib/llvm/include/llvm/Support/SourceMgr.h b/contrib/llvm/include/llvm/Support/SourceMgr.h
index 8949a3a..bcf95f2 100644
--- a/contrib/llvm/include/llvm/Support/SourceMgr.h
+++ b/contrib/llvm/include/llvm/Support/SourceMgr.h
@@ -64,9 +64,9 @@ private:
DiagHandlerTy DiagHandler;
void *DiagContext;
-
- SourceMgr(const SourceMgr&); // DO NOT IMPLEMENT
- void operator=(const SourceMgr&); // DO NOT IMPLEMENT
+
+ SourceMgr(const SourceMgr&) LLVM_DELETED_FUNCTION;
+ void operator=(const SourceMgr&) LLVM_DELETED_FUNCTION;
public:
SourceMgr() : LineNoCache(0), DiagHandler(0), DiagContext(0) {}
~SourceMgr();
@@ -145,7 +145,7 @@ public:
/// GetMessage - Return an SMDiagnostic at the specified location with the
/// specified string.
///
- /// @param Type - If non-null, the kind of message (e.g., "error") which is
+ /// @param Msg If non-null, the kind of message (e.g., "error") which is
/// prefixed to the message.
SMDiagnostic GetMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg,
ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) const;
diff --git a/contrib/llvm/include/llvm/Support/StreamableMemoryObject.h b/contrib/llvm/include/llvm/Support/StreamableMemoryObject.h
index 531dbb2..a2b4bcb 100644
--- a/contrib/llvm/include/llvm/Support/StreamableMemoryObject.h
+++ b/contrib/llvm/include/llvm/Support/StreamableMemoryObject.h
@@ -12,6 +12,7 @@
#define STREAMABLEMEMORYOBJECT_H_
#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/MemoryObject.h"
#include "llvm/Support/DataStream.h"
#include <vector>
@@ -107,14 +108,15 @@ class StreamableMemoryObject : public MemoryObject {
class StreamingMemoryObject : public StreamableMemoryObject {
public:
StreamingMemoryObject(DataStreamer *streamer);
- virtual uint64_t getBase() const { return 0; }
- virtual uint64_t getExtent() const;
- virtual int readByte(uint64_t address, uint8_t* ptr) const;
+ virtual uint64_t getBase() const LLVM_OVERRIDE { return 0; }
+ virtual uint64_t getExtent() const LLVM_OVERRIDE;
+ virtual int readByte(uint64_t address, uint8_t* ptr) const LLVM_OVERRIDE;
virtual int readBytes(uint64_t address,
uint64_t size,
uint8_t* buf,
- uint64_t* copied) const ;
- virtual const uint8_t *getPointer(uint64_t address, uint64_t size) const {
+ uint64_t* copied) const LLVM_OVERRIDE;
+ virtual const uint8_t *getPointer(uint64_t address,
+ uint64_t size) const LLVM_OVERRIDE {
// This could be fixed by ensuring the bytes are fetched and making a copy,
// requiring that the bitcode size be known, or otherwise ensuring that
// the memory doesn't go away/get reallocated, but it's
@@ -122,8 +124,8 @@ public:
assert(0 && "getPointer in streaming memory objects not allowed");
return NULL;
}
- virtual bool isValidAddress(uint64_t address) const;
- virtual bool isObjectEnd(uint64_t address) const;
+ virtual bool isValidAddress(uint64_t address) const LLVM_OVERRIDE;
+ virtual bool isObjectEnd(uint64_t address) const LLVM_OVERRIDE;
/// Drop s bytes from the front of the stream, pushing the positions of the
/// remaining bytes down by s. This is used to skip past the bitcode header,
@@ -170,8 +172,8 @@ private:
return true;
}
- StreamingMemoryObject(const StreamingMemoryObject&); // DO NOT IMPLEMENT
- void operator=(const StreamingMemoryObject&); // DO NOT IMPLEMENT
+ StreamingMemoryObject(const StreamingMemoryObject&) LLVM_DELETED_FUNCTION;
+ void operator=(const StreamingMemoryObject&) LLVM_DELETED_FUNCTION;
};
StreamableMemoryObject *getNonStreamedMemoryObject(
diff --git a/contrib/llvm/include/llvm/Support/TargetFolder.h b/contrib/llvm/include/llvm/Support/TargetFolder.h
index c65faa6..45f7816 100644
--- a/contrib/llvm/include/llvm/Support/TargetFolder.h
+++ b/contrib/llvm/include/llvm/Support/TargetFolder.h
@@ -26,11 +26,11 @@
namespace llvm {
-class TargetData;
+class DataLayout;
/// TargetFolder - Create constants with target dependent folding.
class TargetFolder {
- const TargetData *TD;
+ const DataLayout *TD;
/// Fold - Fold the constant using target specific information.
Constant *Fold(Constant *C) const {
@@ -41,7 +41,7 @@ class TargetFolder {
}
public:
- explicit TargetFolder(const TargetData *TheTD) : TD(TheTD) {}
+ explicit TargetFolder(const DataLayout *TheTD) : TD(TheTD) {}
//===--------------------------------------------------------------------===//
// Binary Operators
@@ -177,7 +177,14 @@ public:
return Fold(ConstantExpr::getIntegerCast(C, DestTy, isSigned));
}
Constant *CreatePointerCast(Constant *C, Type *DestTy) const {
- return ConstantExpr::getPointerCast(C, DestTy);
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return Fold(ConstantExpr::getPointerCast(C, DestTy));
+ }
+ Constant *CreateFPCast(Constant *C, Type *DestTy) const {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return Fold(ConstantExpr::getFPCast(C, DestTy));
}
Constant *CreateBitCast(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::BitCast, C, DestTy);
diff --git a/contrib/llvm/include/llvm/Support/TargetRegistry.h b/contrib/llvm/include/llvm/Support/TargetRegistry.h
index 8253c4c..ca58bfb 100644
--- a/contrib/llvm/include/llvm/Support/TargetRegistry.h
+++ b/contrib/llvm/include/llvm/Support/TargetRegistry.h
@@ -273,7 +273,7 @@ namespace llvm {
/// createMCAsmInfo - Create a MCAsmInfo implementation for the specified
/// target triple.
///
- /// \arg Triple - This argument is used to determine the target machine
+ /// \param Triple This argument is used to determine the target machine
/// feature set; it should always be provided. Generally this should be
/// either the target triple from the module, or the target triple of the
/// host if that does not exist.
@@ -319,12 +319,12 @@ namespace llvm {
/// createMCSubtargetInfo - Create a MCSubtargetInfo implementation.
///
- /// \arg Triple - This argument is used to determine the target machine
+ /// \param Triple This argument is used to determine the target machine
/// feature set; it should always be provided. Generally this should be
/// either the target triple from the module, or the target triple of the
/// host if that does not exist.
- /// \arg CPU - This specifies the name of the target CPU.
- /// \arg Features - This specifies the string representation of the
+ /// \param CPU This specifies the name of the target CPU.
+ /// \param Features This specifies the string representation of the
/// additional target features.
MCSubtargetInfo *createMCSubtargetInfo(StringRef Triple, StringRef CPU,
StringRef Features) const {
@@ -334,9 +334,9 @@ namespace llvm {
}
/// createTargetMachine - Create a target specific machine implementation
- /// for the specified \arg Triple.
+ /// for the specified \p Triple.
///
- /// \arg Triple - This argument is used to determine the target machine
+ /// \param Triple This argument is used to determine the target machine
/// feature set; it should always be provided. Generally this should be
/// either the target triple from the module, or the target triple of the
/// host if that does not exist.
@@ -353,8 +353,7 @@ namespace llvm {
/// createMCAsmBackend - Create a target specific assembly parser.
///
- /// \arg Triple - The target triple string.
- /// \arg Backend - The target independent assembler object.
+ /// \param Triple The target triple string.
MCAsmBackend *createMCAsmBackend(StringRef Triple, StringRef CPU) const {
if (!MCAsmBackendCtorFn)
return 0;
@@ -372,7 +371,7 @@ namespace llvm {
/// createMCAsmParser - Create a target specific assembly parser.
///
- /// \arg Parser - The target independent parser implementation to use for
+ /// \param Parser The target independent parser implementation to use for
/// parsing and lexing.
MCTargetAsmParser *createMCAsmParser(MCSubtargetInfo &STI,
MCAsmParser &Parser) const {
@@ -418,13 +417,13 @@ namespace llvm {
/// createMCObjectStreamer - Create a target specific MCStreamer.
///
- /// \arg TT - The target triple.
- /// \arg Ctx - The target context.
- /// \arg TAB - The target assembler backend object. Takes ownership.
- /// \arg _OS - The stream object.
- /// \arg _Emitter - The target independent assembler object.Takes ownership.
- /// \arg RelaxAll - Relax all fixups?
- /// \arg NoExecStack - Mark file as not needing a executable stack.
+ /// \param TT The target triple.
+ /// \param Ctx The target context.
+ /// \param TAB The target assembler backend object. Takes ownership.
+ /// \param _OS The stream object.
+ /// \param _Emitter The target independent assembler object.Takes ownership.
+ /// \param RelaxAll Relax all fixups?
+ /// \param NoExecStack Mark file as not needing a executable stack.
MCStreamer *createMCObjectStreamer(StringRef TT, MCContext &Ctx,
MCAsmBackend &TAB,
raw_ostream &_OS,
diff --git a/contrib/llvm/include/llvm/Support/Threading.h b/contrib/llvm/include/llvm/Support/Threading.h
index c0e842c..9017afb 100644
--- a/contrib/llvm/include/llvm/Support/Threading.h
+++ b/contrib/llvm/include/llvm/Support/Threading.h
@@ -41,8 +41,8 @@ namespace llvm {
/// before llvm_start_multithreaded().
void llvm_release_global_lock();
- /// llvm_execute_on_thread - Execute the given \arg UserFn on a separate
- /// thread, passing it the provided \arg UserData.
+ /// llvm_execute_on_thread - Execute the given \p UserFn on a separate
+ /// thread, passing it the provided \p UserData.
///
/// This function does not guarantee that the code will actually be executed
/// on a separate thread or honoring the requested stack size, but tries to do
diff --git a/contrib/llvm/include/llvm/Support/TimeValue.h b/contrib/llvm/include/llvm/Support/TimeValue.h
index 94f132a..e780b50 100644
--- a/contrib/llvm/include/llvm/Support/TimeValue.h
+++ b/contrib/llvm/include/llvm/Support/TimeValue.h
@@ -153,7 +153,6 @@ namespace sys {
/// Determine if \p this is greater than or equal to \p that.
/// @returns True iff *this >= that.
- /// @brief True if this >= that.
int operator >= (const TimeValue &that) const {
if ( this->seconds_ > that.seconds_ ) {
return 1;
@@ -164,8 +163,7 @@ namespace sys {
}
/// Determines if two TimeValue objects represent the same moment in time.
- /// @brief True iff *this == that.
- /// @brief True if this == that.
+ /// @returns True iff *this == that.
int operator == (const TimeValue &that) const {
return (this->seconds_ == that.seconds_) &&
(this->nanos_ == that.nanos_);
@@ -173,8 +171,7 @@ namespace sys {
/// Determines if two TimeValue objects represent times that are not the
/// same.
- /// @return True iff *this != that.
- /// @brief True if this != that.
+ /// @returns True iff *this != that.
int operator != (const TimeValue &that) const { return !(*this == that); }
/// Adds two TimeValue objects together.
diff --git a/contrib/llvm/include/llvm/Support/Timer.h b/contrib/llvm/include/llvm/Support/Timer.h
index 404cb6d..a741882 100644
--- a/contrib/llvm/include/llvm/Support/Timer.h
+++ b/contrib/llvm/include/llvm/Support/Timer.h
@@ -15,6 +15,7 @@
#ifndef LLVM_SUPPORT_TIMER_H
#define LLVM_SUPPORT_TIMER_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
@@ -130,7 +131,7 @@ private:
///
class TimeRegion {
Timer *T;
- TimeRegion(const TimeRegion &); // DO NOT IMPLEMENT
+ TimeRegion(const TimeRegion &) LLVM_DELETED_FUNCTION;
public:
explicit TimeRegion(Timer &t) : T(&t) {
T->startTimer();
@@ -168,8 +169,8 @@ class TimerGroup {
std::vector<std::pair<TimeRecord, std::string> > TimersToPrint;
TimerGroup **Prev, *Next; // Doubly linked list of TimerGroup's.
- TimerGroup(const TimerGroup &TG); // DO NOT IMPLEMENT
- void operator=(const TimerGroup &TG); // DO NOT IMPLEMENT
+ TimerGroup(const TimerGroup &TG) LLVM_DELETED_FUNCTION;
+ void operator=(const TimerGroup &TG) LLVM_DELETED_FUNCTION;
public:
explicit TimerGroup(StringRef name);
~TimerGroup();
diff --git a/contrib/llvm/include/llvm/Support/ValueHandle.h b/contrib/llvm/include/llvm/Support/ValueHandle.h
index 61e21b8..dbcf0fd 100644
--- a/contrib/llvm/include/llvm/Support/ValueHandle.h
+++ b/contrib/llvm/include/llvm/Support/ValueHandle.h
@@ -59,8 +59,8 @@ private:
// pair. The 'setValPtrInt' and 'getValPtrInt' methods below give them this
// access.
PointerIntPair<Value*, 2> VP;
-
- explicit ValueHandleBase(const ValueHandleBase&); // DO NOT IMPLEMENT.
+
+ ValueHandleBase(const ValueHandleBase&) LLVM_DELETED_FUNCTION;
public:
explicit ValueHandleBase(HandleBaseKind Kind)
: PrevPair(0, Kind), Next(0), VP(0, 0) {}
diff --git a/contrib/llvm/include/llvm/Support/YAMLParser.h b/contrib/llvm/include/llvm/Support/YAMLParser.h
index 98910eb..12958fa 100644
--- a/contrib/llvm/include/llvm/Support/YAMLParser.h
+++ b/contrib/llvm/include/llvm/Support/YAMLParser.h
@@ -133,7 +133,6 @@ public:
virtual void skip() {}
unsigned int getType() const { return TypeID; }
- static inline bool classof(const Node *) { return true; }
void *operator new ( size_t Size
, BumpPtrAllocator &Alloc
@@ -166,7 +165,6 @@ class NullNode : public Node {
public:
NullNode(OwningPtr<Document> &D) : Node(NK_Null, D, StringRef()) {}
- static inline bool classof(const NullNode *) { return true; }
static inline bool classof(const Node *N) {
return N->getType() == NK_Null;
}
@@ -199,7 +197,6 @@ public:
/// This happens with escaped characters and multi-line literals.
StringRef getValue(SmallVectorImpl<char> &Storage) const;
- static inline bool classof(const ScalarNode *) { return true; }
static inline bool classof(const Node *N) {
return N->getType() == NK_Scalar;
}
@@ -241,12 +238,11 @@ public:
/// @returns The value, or nullptr if failed() == true.
Node *getValue();
- virtual void skip() {
+ virtual void skip() LLVM_OVERRIDE {
getKey()->skip();
getValue()->skip();
}
- static inline bool classof(const KeyValueNode *) { return true; }
static inline bool classof(const Node *N) {
return N->getType() == NK_KeyValue;
}
@@ -358,11 +354,10 @@ public:
iterator end() { return iterator(); }
- virtual void skip() {
+ virtual void skip() LLVM_OVERRIDE {
yaml::skip(*this);
}
- static inline bool classof(const MappingNode *) { return true; }
static inline bool classof(const Node *N) {
return N->getType() == NK_Mapping;
}
@@ -421,11 +416,10 @@ public:
iterator end() { return iterator(); }
- virtual void skip() {
+ virtual void skip() LLVM_OVERRIDE {
yaml::skip(*this);
}
- static inline bool classof(const SequenceNode *) { return true; }
static inline bool classof(const Node *N) {
return N->getType() == NK_Sequence;
}
@@ -450,7 +444,6 @@ public:
StringRef getName() const { return Name; }
Node *getTarget();
- static inline bool classof(const ScalarNode *) { return true; }
static inline bool classof(const Node *N) {
return N->getType() == NK_Alias;
}
diff --git a/contrib/llvm/include/llvm/Support/circular_raw_ostream.h b/contrib/llvm/include/llvm/Support/circular_raw_ostream.h
index 2b3c329..2823af3 100644
--- a/contrib/llvm/include/llvm/Support/circular_raw_ostream.h
+++ b/contrib/llvm/include/llvm/Support/circular_raw_ostream.h
@@ -81,12 +81,12 @@ namespace llvm
Filled = false;
}
- virtual void write_impl(const char *Ptr, size_t Size);
+ virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE;
/// current_pos - Return the current position within the stream,
/// not counting the bytes currently in the buffer.
///
- virtual uint64_t current_pos() const {
+ virtual uint64_t current_pos() const LLVM_OVERRIDE {
// This has the same effect as calling TheStream.current_pos(),
// but that interface is private.
return TheStream->tell() - TheStream->GetNumBytesInBuffer();
diff --git a/contrib/llvm/include/llvm/Support/raw_os_ostream.h b/contrib/llvm/include/llvm/Support/raw_os_ostream.h
index 4f5d361..4385721 100644
--- a/contrib/llvm/include/llvm/Support/raw_os_ostream.h
+++ b/contrib/llvm/include/llvm/Support/raw_os_ostream.h
@@ -24,14 +24,14 @@ namespace llvm {
/// use the underlying stream to detect errors.
class raw_os_ostream : public raw_ostream {
std::ostream &OS;
-
+
/// write_impl - See raw_ostream::write_impl.
- virtual void write_impl(const char *Ptr, size_t Size);
-
+ virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE;
+
/// current_pos - Return the current position within the stream, not
/// counting the bytes currently in the buffer.
- virtual uint64_t current_pos() const;
-
+ virtual uint64_t current_pos() const LLVM_OVERRIDE;
+
public:
raw_os_ostream(std::ostream &O) : OS(O) {}
~raw_os_ostream();
diff --git a/contrib/llvm/include/llvm/Support/raw_ostream.h b/contrib/llvm/include/llvm/Support/raw_ostream.h
index 5de749a..eab0f2d 100644
--- a/contrib/llvm/include/llvm/Support/raw_ostream.h
+++ b/contrib/llvm/include/llvm/Support/raw_ostream.h
@@ -15,6 +15,7 @@
#define LLVM_SUPPORT_RAW_OSTREAM_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -29,8 +30,8 @@ namespace llvm {
class raw_ostream {
private:
// Do not implement. raw_ostream is noncopyable.
- void operator=(const raw_ostream &);
- raw_ostream(const raw_ostream &);
+ void operator=(const raw_ostream &) LLVM_DELETED_FUNCTION;
+ raw_ostream(const raw_ostream &) LLVM_DELETED_FUNCTION;
/// The buffer is handled in such a way that the buffer is
/// uninitialized, unbuffered, or out of space when OutBufCur >=
@@ -191,10 +192,10 @@ public:
raw_ostream &operator<<(double N);
- /// write_hex - Output \arg N in hexadecimal, without any prefix or padding.
+ /// write_hex - Output \p N in hexadecimal, without any prefix or padding.
raw_ostream &write_hex(unsigned long long N);
- /// write_escaped - Output \arg Str, turning '\\', '\t', '\n', '"', and
+ /// write_escaped - Output \p Str, turning '\\', '\t', '\n', '"', and
/// anything that doesn't satisfy std::isprint into an escape sequence.
raw_ostream &write_escaped(StringRef Str, bool UseHexEscapes = false);
@@ -210,13 +211,19 @@ public:
/// Changes the foreground color of text that will be output from this point
/// forward.
- /// @param colors ANSI color to use, the special SAVEDCOLOR can be used to
+ /// @param Color ANSI color to use, the special SAVEDCOLOR can be used to
/// change only the bold attribute, and keep colors untouched
- /// @param bold bold/brighter text, default false
- /// @param bg if true change the background, default: change foreground
+ /// @param Bold bold/brighter text, default false
+ /// @param BG if true change the background, default: change foreground
/// @returns itself so it can be used within << invocations
- virtual raw_ostream &changeColor(enum Colors, bool = false, bool = false) {
- return *this; }
+ virtual raw_ostream &changeColor(enum Colors Color,
+ bool Bold = false,
+ bool BG = false) {
+ (void)Color;
+ (void)Bold;
+ (void)BG;
+ return *this;
+ }
/// Resets the colors to terminal defaults. Call this when you are done
/// outputting colored text, or before program exit.
@@ -239,15 +246,16 @@ public:
private:
/// write_impl - The is the piece of the class that is implemented
- /// by subclasses. This writes the \args Size bytes starting at
- /// \arg Ptr to the underlying stream.
+ /// by subclasses. This writes the \p Size bytes starting at
+ /// \p Ptr to the underlying stream.
///
/// This function is guaranteed to only be called at a point at which it is
/// safe for the subclass to install a new buffer via SetBuffer.
///
- /// \arg Ptr - The start of the data to be written. For buffered streams this
+ /// \param Ptr The start of the data to be written. For buffered streams this
/// is guaranteed to be the start of the buffer.
- /// \arg Size - The number of bytes to be written.
+ ///
+ /// \param Size The number of bytes to be written.
///
/// \invariant { Size > 0 }
virtual void write_impl(const char *Ptr, size_t Size) = 0;
@@ -314,14 +322,14 @@ class raw_fd_ostream : public raw_ostream {
uint64_t pos;
/// write_impl - See raw_ostream::write_impl.
- virtual void write_impl(const char *Ptr, size_t Size);
+ virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE;
/// current_pos - Return the current position within the stream, not
/// counting the bytes currently in the buffer.
- virtual uint64_t current_pos() const { return pos; }
+ virtual uint64_t current_pos() const LLVM_OVERRIDE { return pos; }
/// preferred_buffer_size - Determine an efficient buffer size.
- virtual size_t preferred_buffer_size() const;
+ virtual size_t preferred_buffer_size() const LLVM_OVERRIDE;
/// error_detected - Set the flag indicating that an output error has
/// been encountered.
@@ -382,14 +390,14 @@ public:
}
virtual raw_ostream &changeColor(enum Colors colors, bool bold=false,
- bool bg=false);
- virtual raw_ostream &resetColor();
+ bool bg=false) LLVM_OVERRIDE;
+ virtual raw_ostream &resetColor() LLVM_OVERRIDE;
- virtual raw_ostream &reverseColor();
+ virtual raw_ostream &reverseColor() LLVM_OVERRIDE;
- virtual bool is_displayed() const;
+ virtual bool is_displayed() const LLVM_OVERRIDE;
- virtual bool has_colors() const;
+ virtual bool has_colors() const LLVM_OVERRIDE;
/// has_error - Return the value of the flag in this raw_fd_ostream indicating
/// whether an output error has been encountered.
@@ -435,11 +443,11 @@ class raw_string_ostream : public raw_ostream {
std::string &OS;
/// write_impl - See raw_ostream::write_impl.
- virtual void write_impl(const char *Ptr, size_t Size);
+ virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE;
/// current_pos - Return the current position within the stream, not
/// counting the bytes currently in the buffer.
- virtual uint64_t current_pos() const { return OS.size(); }
+ virtual uint64_t current_pos() const LLVM_OVERRIDE { return OS.size(); }
public:
explicit raw_string_ostream(std::string &O) : OS(O) {}
~raw_string_ostream();
@@ -459,15 +467,15 @@ class raw_svector_ostream : public raw_ostream {
SmallVectorImpl<char> &OS;
/// write_impl - See raw_ostream::write_impl.
- virtual void write_impl(const char *Ptr, size_t Size);
+ virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE;
/// current_pos - Return the current position within the stream, not
/// counting the bytes currently in the buffer.
- virtual uint64_t current_pos() const;
+ virtual uint64_t current_pos() const LLVM_OVERRIDE;
public:
/// Construct a new raw_svector_ostream.
///
- /// \arg O - The vector to write to; this should generally have at least 128
+ /// \param O The vector to write to; this should generally have at least 128
/// bytes free to avoid any extraneous memory overhead.
explicit raw_svector_ostream(SmallVectorImpl<char> &O);
~raw_svector_ostream();
@@ -485,11 +493,11 @@ public:
/// raw_null_ostream - A raw_ostream that discards all output.
class raw_null_ostream : public raw_ostream {
/// write_impl - See raw_ostream::write_impl.
- virtual void write_impl(const char *Ptr, size_t size);
+ virtual void write_impl(const char *Ptr, size_t size) LLVM_OVERRIDE;
/// current_pos - Return the current position within the stream, not
/// counting the bytes currently in the buffer.
- virtual uint64_t current_pos() const;
+ virtual uint64_t current_pos() const LLVM_OVERRIDE;
public:
explicit raw_null_ostream() {}
diff --git a/contrib/llvm/include/llvm/Support/system_error.h b/contrib/llvm/include/llvm/Support/system_error.h
index af81206..0d164f6 100644
--- a/contrib/llvm/include/llvm/Support/system_error.h
+++ b/contrib/llvm/include/llvm/Support/system_error.h
@@ -17,6 +17,8 @@
#ifndef LLVM_SYSTEM_SYSTEM_ERROR_H
#define LLVM_SYSTEM_SYSTEM_ERROR_H
+#include "llvm/Support/Compiler.h"
+
/*
system_error synopsis
@@ -629,8 +631,8 @@ public:
private:
error_category();
- error_category(const error_category&);// = delete;
- error_category& operator=(const error_category&);// = delete;
+ error_category(const error_category&) LLVM_DELETED_FUNCTION;
+ error_category& operator=(const error_category&) LLVM_DELETED_FUNCTION;
public:
virtual const char* name() const = 0;
@@ -651,7 +653,7 @@ public:
class _do_message : public error_category
{
public:
- virtual std::string message(int ev) const;
+ virtual std::string message(int ev) const LLVM_OVERRIDE;
};
const error_category& generic_category();
diff --git a/contrib/llvm/include/llvm/Support/type_traits.h b/contrib/llvm/include/llvm/Support/type_traits.h
index 7b97547..f930639 100644
--- a/contrib/llvm/include/llvm/Support/type_traits.h
+++ b/contrib/llvm/include/llvm/Support/type_traits.h
@@ -54,8 +54,9 @@ struct is_class
// is_class<> metafunction due to Paul Mensonides (leavings@attbi.com). For
// more details:
// http://groups.google.com/groups?hl=en&selm=000001c1cc83%24e154d5e0%247772e50c%40c161550a&rnum=1
- public:
- enum { value = sizeof(char) == sizeof(dont_use::is_class_helper<T>(0)) };
+public:
+ static const bool value =
+ sizeof(char) == sizeof(dont_use::is_class_helper<T>(0));
};
@@ -162,12 +163,11 @@ template <typename T> class is_integral_or_enum {
static UnderlyingT &nonce_instance;
public:
- enum {
+ static const bool
value = (!is_class<UnderlyingT>::value && !is_pointer<UnderlyingT>::value &&
!is_same<UnderlyingT, float>::value &&
!is_same<UnderlyingT, double>::value &&
- sizeof(char) != sizeof(check_int_convertible(nonce_instance)))
- };
+ sizeof(char) != sizeof(check_int_convertible(nonce_instance)));
};
// enable_if_c - Enable/disable a template based on a metafunction
diff --git a/contrib/llvm/include/llvm/SymbolTableListTraits.h b/contrib/llvm/include/llvm/SymbolTableListTraits.h
index 91a4eb9..ec5c88f 100644
--- a/contrib/llvm/include/llvm/SymbolTableListTraits.h
+++ b/contrib/llvm/include/llvm/SymbolTableListTraits.h
@@ -46,7 +46,6 @@ public:
/// getListOwner - Return the object that owns this list. If this is a list
/// of instructions, it returns the BasicBlock that owns them.
ItemParentClass *getListOwner() {
- typedef iplist<ValueSubClass> ItemParentClass::*Sublist;
size_t Offset(size_t(&((ItemParentClass*)0->*ItemParentClass::
getSublistAccess(static_cast<ValueSubClass*>(0)))));
iplist<ValueSubClass>* Anchor(static_cast<iplist<ValueSubClass>*>(this));
diff --git a/contrib/llvm/include/llvm/TableGen/Error.h b/contrib/llvm/include/llvm/TableGen/Error.h
index fd5f805..2f6b7e6 100644
--- a/contrib/llvm/include/llvm/TableGen/Error.h
+++ b/contrib/llvm/include/llvm/TableGen/Error.h
@@ -19,26 +19,17 @@
namespace llvm {
-class TGError {
- SMLoc Loc;
- std::string Message;
-public:
- TGError(SMLoc loc, const std::string &message) : Loc(loc), Message(message) {}
-
- SMLoc getLoc() const { return Loc; }
- const std::string &getMessage() const { return Message; }
-};
-
-void PrintWarning(SMLoc WarningLoc, const Twine &Msg);
+void PrintWarning(ArrayRef<SMLoc> WarningLoc, const Twine &Msg);
void PrintWarning(const char *Loc, const Twine &Msg);
void PrintWarning(const Twine &Msg);
-void PrintWarning(const TGError &Warning);
-void PrintError(SMLoc ErrorLoc, const Twine &Msg);
+void PrintError(ArrayRef<SMLoc> ErrorLoc, const Twine &Msg);
void PrintError(const char *Loc, const Twine &Msg);
void PrintError(const Twine &Msg);
-void PrintError(const TGError &Error);
+LLVM_ATTRIBUTE_NORETURN void PrintFatalError(const std::string &Msg);
+LLVM_ATTRIBUTE_NORETURN void PrintFatalError(ArrayRef<SMLoc> ErrorLoc,
+ const std::string &Msg);
extern SourceMgr SrcMgr;
diff --git a/contrib/llvm/include/llvm/TableGen/Main.h b/contrib/llvm/include/llvm/TableGen/Main.h
index deaef4a..6b51e20 100644
--- a/contrib/llvm/include/llvm/TableGen/Main.h
+++ b/contrib/llvm/include/llvm/TableGen/Main.h
@@ -16,10 +16,13 @@
namespace llvm {
-class TableGenAction;
+class RecordKeeper;
+class raw_ostream;
+/// \brief Perform the action using Records, and write output to OS.
+/// \returns true on error, false otherwise
+typedef bool TableGenMainFn(raw_ostream &OS, RecordKeeper &Records);
-/// Run the table generator, performing the specified Action on parsed records.
-int TableGenMain(char *argv0, TableGenAction &Action);
+int TableGenMain(char *argv0, TableGenMainFn *MainFn);
}
diff --git a/contrib/llvm/include/llvm/TableGen/Record.h b/contrib/llvm/include/llvm/TableGen/Record.h
index a8256b7..319298c 100644
--- a/contrib/llvm/include/llvm/TableGen/Record.h
+++ b/contrib/llvm/include/llvm/TableGen/Record.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
@@ -66,10 +67,27 @@ class RecordKeeper;
//===----------------------------------------------------------------------===//
class RecTy {
+public:
+ /// \brief Subclass discriminator (for dyn_cast<> et al.)
+ enum RecTyKind {
+ BitRecTyKind,
+ BitsRecTyKind,
+ IntRecTyKind,
+ StringRecTyKind,
+ ListRecTyKind,
+ DagRecTyKind,
+ RecordRecTyKind
+ };
+
+private:
+ RecTyKind Kind;
ListRecTy *ListTy;
virtual void anchor();
+
public:
- RecTy() : ListTy(0) {}
+ RecTyKind getRecTyKind() const { return Kind; }
+
+ RecTy(RecTyKind K) : Kind(K), ListTy(0) {}
virtual ~RecTy() {}
virtual std::string getAsString() const = 0;
@@ -132,8 +150,12 @@ inline raw_ostream &operator<<(raw_ostream &OS, const RecTy &Ty) {
///
class BitRecTy : public RecTy {
static BitRecTy Shared;
- BitRecTy() {}
+ BitRecTy() : RecTy(BitRecTyKind) {}
public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == BitRecTyKind;
+ }
+
static BitRecTy *get() { return &Shared; }
virtual Init *convertValue( UnsetInit *UI) { return (Init*)UI; }
@@ -152,9 +174,9 @@ public:
virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);}
virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);}
- std::string getAsString() const { return "bit"; }
+ virtual std::string getAsString() const { return "bit"; }
- bool typeIsConvertibleTo(const RecTy *RHS) const {
+ virtual bool typeIsConvertibleTo(const RecTy *RHS) const {
return RHS->baseClassOf(this);
}
virtual bool baseClassOf(const BitRecTy *RHS) const { return true; }
@@ -173,8 +195,12 @@ public:
///
class BitsRecTy : public RecTy {
unsigned Size;
- explicit BitsRecTy(unsigned Sz) : Size(Sz) {}
+ explicit BitsRecTy(unsigned Sz) : RecTy(BitsRecTyKind), Size(Sz) {}
public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == BitsRecTyKind;
+ }
+
static BitsRecTy *get(unsigned Sz);
unsigned getNumBits() const { return Size; }
@@ -195,9 +221,9 @@ public:
virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);}
virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);}
- std::string getAsString() const;
+ virtual std::string getAsString() const;
- bool typeIsConvertibleTo(const RecTy *RHS) const {
+ virtual bool typeIsConvertibleTo(const RecTy *RHS) const {
return RHS->baseClassOf(this);
}
virtual bool baseClassOf(const BitRecTy *RHS) const { return Size == 1; }
@@ -217,8 +243,12 @@ public:
///
class IntRecTy : public RecTy {
static IntRecTy Shared;
- IntRecTy() {}
+ IntRecTy() : RecTy(IntRecTyKind) {}
public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == IntRecTyKind;
+ }
+
static IntRecTy *get() { return &Shared; }
virtual Init *convertValue( UnsetInit *UI) { return (Init*)UI; }
@@ -237,9 +267,9 @@ public:
virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);}
virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);}
- std::string getAsString() const { return "int"; }
+ virtual std::string getAsString() const { return "int"; }
- bool typeIsConvertibleTo(const RecTy *RHS) const {
+ virtual bool typeIsConvertibleTo(const RecTy *RHS) const {
return RHS->baseClassOf(this);
}
@@ -257,8 +287,12 @@ public:
///
class StringRecTy : public RecTy {
static StringRecTy Shared;
- StringRecTy() {}
+ StringRecTy() : RecTy(StringRecTyKind) {}
public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == StringRecTyKind;
+ }
+
static StringRecTy *get() { return &Shared; }
virtual Init *convertValue( UnsetInit *UI) { return (Init*)UI; }
@@ -278,9 +312,9 @@ public:
virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);}
virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);}
- std::string getAsString() const { return "string"; }
+ virtual std::string getAsString() const { return "string"; }
- bool typeIsConvertibleTo(const RecTy *RHS) const {
+ virtual bool typeIsConvertibleTo(const RecTy *RHS) const {
return RHS->baseClassOf(this);
}
@@ -300,9 +334,13 @@ public:
///
class ListRecTy : public RecTy {
RecTy *Ty;
- explicit ListRecTy(RecTy *T) : Ty(T) {}
+ explicit ListRecTy(RecTy *T) : RecTy(ListRecTyKind), Ty(T) {}
friend ListRecTy *RecTy::getListTy();
public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == ListRecTyKind;
+ }
+
static ListRecTy *get(RecTy *T) { return T->getListTy(); }
RecTy *getElementType() const { return Ty; }
@@ -322,9 +360,9 @@ public:
virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);}
virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);}
- std::string getAsString() const;
+ virtual std::string getAsString() const;
- bool typeIsConvertibleTo(const RecTy *RHS) const {
+ virtual bool typeIsConvertibleTo(const RecTy *RHS) const {
return RHS->baseClassOf(this);
}
@@ -343,8 +381,12 @@ public:
///
class DagRecTy : public RecTy {
static DagRecTy Shared;
- DagRecTy() {}
+ DagRecTy() : RecTy(DagRecTyKind) {}
public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == DagRecTyKind;
+ }
+
static DagRecTy *get() { return &Shared; }
virtual Init *convertValue( UnsetInit *UI) { return (Init*)UI; }
@@ -363,9 +405,9 @@ public:
virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);}
virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);}
- std::string getAsString() const { return "dag"; }
+ virtual std::string getAsString() const { return "dag"; }
- bool typeIsConvertibleTo(const RecTy *RHS) const {
+ virtual bool typeIsConvertibleTo(const RecTy *RHS) const {
return RHS->baseClassOf(this);
}
@@ -384,9 +426,13 @@ public:
///
class RecordRecTy : public RecTy {
Record *Rec;
- explicit RecordRecTy(Record *R) : Rec(R) {}
+ explicit RecordRecTy(Record *R) : RecTy(RecordRecTyKind), Rec(R) {}
friend class Record;
public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == RecordRecTyKind;
+ }
+
static RecordRecTy *get(Record *R);
Record *getRecord() const { return Rec; }
@@ -407,9 +453,9 @@ public:
virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);}
virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);}
- std::string getAsString() const;
+ virtual std::string getAsString() const;
- bool typeIsConvertibleTo(const RecTy *RHS) const {
+ virtual bool typeIsConvertibleTo(const RecTy *RHS) const {
return RHS->baseClassOf(this);
}
virtual bool baseClassOf(const BitRecTy *RHS) const { return false; }
@@ -431,12 +477,53 @@ RecTy *resolveTypes(RecTy *T1, RecTy *T2);
//===----------------------------------------------------------------------===//
class Init {
- Init(const Init &); // Do not define.
- Init &operator=(const Init &); // Do not define.
+protected:
+ /// \brief Discriminator enum (for isa<>, dyn_cast<>, et al.)
+ ///
+ /// This enum is laid out by a preorder traversal of the inheritance
+ /// hierarchy, and does not contain an entry for abstract classes, as per
+ /// the recommendation in docs/HowToSetUpLLVMStyleRTTI.rst.
+ ///
+ /// We also explicitly include "first" and "last" values for each
+ /// interior node of the inheritance tree, to make it easier to read the
+ /// corresponding classof().
+ ///
+ /// We could pack these a bit tighter by not having the IK_FirstXXXInit
+ /// and IK_LastXXXInit be their own values, but that would degrade
+ /// readability for really no benefit.
+ enum InitKind {
+ IK_BitInit,
+ IK_BitsInit,
+ IK_FirstTypedInit,
+ IK_DagInit,
+ IK_DefInit,
+ IK_FieldInit,
+ IK_IntInit,
+ IK_ListInit,
+ IK_FirstOpInit,
+ IK_BinOpInit,
+ IK_TernOpInit,
+ IK_UnOpInit,
+ IK_LastOpInit,
+ IK_StringInit,
+ IK_VarInit,
+ IK_VarListElementInit,
+ IK_LastTypedInit,
+ IK_UnsetInit,
+ IK_VarBitInit
+ };
+
+private:
+ const InitKind Kind;
+ Init(const Init &) LLVM_DELETED_FUNCTION;
+ Init &operator=(const Init &) LLVM_DELETED_FUNCTION;
virtual void anchor();
+public:
+ InitKind getKind() const { return Kind; }
+
protected:
- Init(void) {}
+ explicit Init(InitKind K) : Kind(K) {}
public:
virtual ~Init() {}
@@ -509,6 +596,18 @@ public:
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const {
return const_cast<Init *>(this);
}
+
+ /// getBit - This method is used to return the initializer for the specified
+ /// bit.
+ virtual Init *getBit(unsigned Bit) const = 0;
+
+ /// getBitVar - This method is used to retrieve the initializer for bit
+ /// reference. For non-VarBitInit, it simply returns itself.
+ virtual Init *getBitVar() const { return const_cast<Init*>(this); }
+
+ /// getBitNum - This method is used to retrieve the bit number of a bit
+ /// reference. For non-VarBitInit, it simply returns 0.
+ virtual unsigned getBitNum() const { return 0; }
};
inline raw_ostream &operator<<(raw_ostream &OS, const Init &I) {
@@ -521,13 +620,17 @@ inline raw_ostream &operator<<(raw_ostream &OS, const Init &I) {
class TypedInit : public Init {
RecTy *Ty;
- TypedInit(const TypedInit &Other); // Do not define.
- TypedInit &operator=(const TypedInit &Other); // Do not define.
+ TypedInit(const TypedInit &Other) LLVM_DELETED_FUNCTION;
+ TypedInit &operator=(const TypedInit &Other) LLVM_DELETED_FUNCTION;
protected:
- explicit TypedInit(RecTy *T) : Ty(T) {}
+ explicit TypedInit(InitKind K, RecTy *T) : Init(K), Ty(T) {}
public:
+ static bool classof(const Init *I) {
+ return I->getKind() >= IK_FirstTypedInit &&
+ I->getKind() <= IK_LastTypedInit;
+ }
RecTy *getType() const { return Ty; }
virtual Init *
@@ -541,13 +644,6 @@ public:
///
virtual RecTy *getFieldType(const std::string &FieldName) const;
- /// resolveBitReference - This method is used to implement
- /// VarBitInit::resolveReferences. If the bit is able to be resolved, we
- /// simply return the resolved value, otherwise we return null.
- ///
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const = 0;
-
/// resolveListElementReference - This method is used to implement
/// VarListElementInit::resolveReferences. If the list element is resolvable
/// now, we return the resolved value, otherwise we return null.
@@ -559,18 +655,25 @@ public:
/// UnsetInit - ? - Represents an uninitialized value
///
class UnsetInit : public Init {
- UnsetInit() : Init() {}
- UnsetInit(const UnsetInit &); // Do not define.
- UnsetInit &operator=(const UnsetInit &Other); // Do not define.
+ UnsetInit() : Init(IK_UnsetInit) {}
+ UnsetInit(const UnsetInit &) LLVM_DELETED_FUNCTION;
+ UnsetInit &operator=(const UnsetInit &Other) LLVM_DELETED_FUNCTION;
virtual void anchor();
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_UnsetInit;
+ }
static UnsetInit *get();
virtual Init *convertInitializerTo(RecTy *Ty) const {
return Ty->convertValue(const_cast<UnsetInit *>(this));
}
+ virtual Init *getBit(unsigned Bit) const {
+ return const_cast<UnsetInit*>(this);
+ }
+
virtual bool isComplete() const { return false; }
virtual std::string getAsString() const { return "?"; }
};
@@ -581,12 +684,15 @@ public:
class BitInit : public Init {
bool Value;
- explicit BitInit(bool V) : Value(V) {}
- BitInit(const BitInit &Other); // Do not define.
- BitInit &operator=(BitInit &Other); // Do not define.
+ explicit BitInit(bool V) : Init(IK_BitInit), Value(V) {}
+ BitInit(const BitInit &Other) LLVM_DELETED_FUNCTION;
+ BitInit &operator=(BitInit &Other) LLVM_DELETED_FUNCTION;
virtual void anchor();
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_BitInit;
+ }
static BitInit *get(bool V);
bool getValue() const { return Value; }
@@ -595,6 +701,11 @@ public:
return Ty->convertValue(const_cast<BitInit *>(this));
}
+ virtual Init *getBit(unsigned Bit) const {
+ assert(Bit < 1 && "Bit index out of range!");
+ return const_cast<BitInit*>(this);
+ }
+
virtual std::string getAsString() const { return Value ? "1" : "0"; }
};
@@ -604,23 +715,22 @@ public:
class BitsInit : public Init, public FoldingSetNode {
std::vector<Init*> Bits;
- BitsInit(ArrayRef<Init *> Range) : Bits(Range.begin(), Range.end()) {}
+ BitsInit(ArrayRef<Init *> Range)
+ : Init(IK_BitsInit), Bits(Range.begin(), Range.end()) {}
- BitsInit(const BitsInit &Other); // Do not define.
- BitsInit &operator=(const BitsInit &Other); // Do not define.
+ BitsInit(const BitsInit &Other) LLVM_DELETED_FUNCTION;
+ BitsInit &operator=(const BitsInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_BitsInit;
+ }
static BitsInit *get(ArrayRef<Init *> Range);
void Profile(FoldingSetNodeID &ID) const;
unsigned getNumBits() const { return Bits.size(); }
- Init *getBit(unsigned Bit) const {
- assert(Bit < Bits.size() && "Bit index out of range!");
- return Bits[Bit];
- }
-
virtual Init *convertInitializerTo(RecTy *Ty) const {
return Ty->convertValue(const_cast<BitsInit *>(this));
}
@@ -640,6 +750,11 @@ public:
virtual std::string getAsString() const;
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const;
+
+ virtual Init *getBit(unsigned Bit) const {
+ assert(Bit < Bits.size() && "Bit index out of range!");
+ return Bits[Bit];
+ }
};
@@ -648,12 +763,16 @@ public:
class IntInit : public TypedInit {
int64_t Value;
- explicit IntInit(int64_t V) : TypedInit(IntRecTy::get()), Value(V) {}
+ explicit IntInit(int64_t V)
+ : TypedInit(IK_IntInit, IntRecTy::get()), Value(V) {}
- IntInit(const IntInit &Other); // Do not define.
- IntInit &operator=(const IntInit &Other); // Do note define.
+ IntInit(const IntInit &Other) LLVM_DELETED_FUNCTION;
+ IntInit &operator=(const IntInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_IntInit;
+ }
static IntInit *get(int64_t V);
int64_t getValue() const { return Value; }
@@ -666,15 +785,6 @@ public:
virtual std::string getAsString() const;
- /// resolveBitReference - This method is used to implement
- /// VarBitInit::resolveReferences. If the bit is able to be resolved, we
- /// simply return the resolved value, otherwise we return null.
- ///
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const {
- llvm_unreachable("Illegal bit reference off int");
- }
-
/// resolveListElementReference - This method is used to implement
/// VarListElementInit::resolveReferences. If the list element is resolvable
/// now, we return the resolved value, otherwise we return null.
@@ -682,6 +792,10 @@ public:
unsigned Elt) const {
llvm_unreachable("Illegal element reference off int");
}
+
+ virtual Init *getBit(unsigned Bit) const {
+ return BitInit::get((Value & (1ULL << Bit)) != 0);
+ }
};
@@ -691,13 +805,16 @@ class StringInit : public TypedInit {
std::string Value;
explicit StringInit(const std::string &V)
- : TypedInit(StringRecTy::get()), Value(V) {}
+ : TypedInit(IK_StringInit, StringRecTy::get()), Value(V) {}
- StringInit(const StringInit &Other); // Do not define.
- StringInit &operator=(const StringInit &Other); // Do not define.
+ StringInit(const StringInit &Other) LLVM_DELETED_FUNCTION;
+ StringInit &operator=(const StringInit &Other) LLVM_DELETED_FUNCTION;
virtual void anchor();
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_StringInit;
+ }
static StringInit *get(StringRef);
const std::string &getValue() const { return Value; }
@@ -709,15 +826,6 @@ public:
virtual std::string getAsString() const { return "\"" + Value + "\""; }
virtual std::string getAsUnquotedString() const { return Value; }
- /// resolveBitReference - This method is used to implement
- /// VarBitInit::resolveReferences. If the bit is able to be resolved, we
- /// simply return the resolved value, otherwise we return null.
- ///
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const {
- llvm_unreachable("Illegal bit reference off string");
- }
-
/// resolveListElementReference - This method is used to implement
/// VarListElementInit::resolveReferences. If the list element is resolvable
/// now, we return the resolved value, otherwise we return null.
@@ -725,6 +833,10 @@ public:
unsigned Elt) const {
llvm_unreachable("Illegal element reference off string");
}
+
+ virtual Init *getBit(unsigned Bit) const {
+ llvm_unreachable("Illegal bit reference off string");
+ }
};
/// ListInit - [AL, AH, CL] - Represent a list of defs
@@ -736,12 +848,16 @@ public:
private:
explicit ListInit(ArrayRef<Init *> Range, RecTy *EltTy)
- : TypedInit(ListRecTy::get(EltTy)), Values(Range.begin(), Range.end()) {}
+ : TypedInit(IK_ListInit, ListRecTy::get(EltTy)),
+ Values(Range.begin(), Range.end()) {}
- ListInit(const ListInit &Other); // Do not define.
- ListInit &operator=(const ListInit &Other); // Do not define.
+ ListInit(const ListInit &Other) LLVM_DELETED_FUNCTION;
+ ListInit &operator=(const ListInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_ListInit;
+ }
static ListInit *get(ArrayRef<Init *> Range, RecTy *EltTy);
void Profile(FoldingSetNodeID &ID) const;
@@ -754,7 +870,8 @@ public:
Record *getElementAsRecord(unsigned i) const;
- Init *convertInitListSlice(const std::vector<unsigned> &Elements) const;
+ virtual Init *
+ convertInitListSlice(const std::vector<unsigned> &Elements) const;
virtual Init *convertInitializerTo(RecTy *Ty) const {
return Ty->convertValue(const_cast<ListInit *>(this));
@@ -777,33 +894,32 @@ public:
inline size_t size () const { return Values.size(); }
inline bool empty() const { return Values.empty(); }
- /// resolveBitReference - This method is used to implement
- /// VarBitInit::resolveReferences. If the bit is able to be resolved, we
- /// simply return the resolved value, otherwise we return null.
- ///
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const {
- llvm_unreachable("Illegal bit reference off list");
- }
-
/// resolveListElementReference - This method is used to implement
/// VarListElementInit::resolveReferences. If the list element is resolvable
/// now, we return the resolved value, otherwise we return null.
virtual Init *resolveListElementReference(Record &R, const RecordVal *RV,
unsigned Elt) const;
+
+ virtual Init *getBit(unsigned Bit) const {
+ llvm_unreachable("Illegal bit reference off list");
+ }
};
/// OpInit - Base class for operators
///
class OpInit : public TypedInit {
- OpInit(const OpInit &Other); // Do not define.
- OpInit &operator=(OpInit &Other); // Do not define.
+ OpInit(const OpInit &Other) LLVM_DELETED_FUNCTION;
+ OpInit &operator=(OpInit &Other) LLVM_DELETED_FUNCTION;
protected:
- explicit OpInit(RecTy *Type) : TypedInit(Type) {}
+ explicit OpInit(InitKind K, RecTy *Type) : TypedInit(K, Type) {}
public:
+ static bool classof(const Init *I) {
+ return I->getKind() >= IK_FirstOpInit &&
+ I->getKind() <= IK_LastOpInit;
+ }
// Clone - Clone this operator, replacing arguments with the new list
virtual OpInit *clone(std::vector<Init *> &Operands) const = 0;
@@ -818,10 +934,10 @@ public:
return Ty->convertValue(const_cast<OpInit *>(this));
}
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const;
virtual Init *resolveListElementReference(Record &R, const RecordVal *RV,
unsigned Elt) const;
+
+ virtual Init *getBit(unsigned Bit) const;
};
@@ -835,12 +951,15 @@ private:
Init *LHS;
UnOpInit(UnaryOp opc, Init *lhs, RecTy *Type)
- : OpInit(Type), Opc(opc), LHS(lhs) {}
+ : OpInit(IK_UnOpInit, Type), Opc(opc), LHS(lhs) {}
- UnOpInit(const UnOpInit &Other); // Do not define.
- UnOpInit &operator=(const UnOpInit &Other); // Do not define.
+ UnOpInit(const UnOpInit &Other) LLVM_DELETED_FUNCTION;
+ UnOpInit &operator=(const UnOpInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_UnOpInit;
+ }
static UnOpInit *get(UnaryOp opc, Init *lhs, RecTy *Type);
// Clone - Clone this operator, replacing arguments with the new list
@@ -850,8 +969,8 @@ public:
return UnOpInit::get(getOpcode(), *Operands.begin(), getType());
}
- int getNumOperands() const { return 1; }
- Init *getOperand(int i) const {
+ virtual int getNumOperands() const { return 1; }
+ virtual Init *getOperand(int i) const {
assert(i == 0 && "Invalid operand id for unary operator");
return getOperand();
}
@@ -861,7 +980,7 @@ public:
// Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
- Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const;
+ virtual Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const;
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const;
@@ -878,12 +997,15 @@ private:
Init *LHS, *RHS;
BinOpInit(BinaryOp opc, Init *lhs, Init *rhs, RecTy *Type) :
- OpInit(Type), Opc(opc), LHS(lhs), RHS(rhs) {}
+ OpInit(IK_BinOpInit, Type), Opc(opc), LHS(lhs), RHS(rhs) {}
- BinOpInit(const BinOpInit &Other); // Do not define.
- BinOpInit &operator=(const BinOpInit &Other); // Do not define.
+ BinOpInit(const BinOpInit &Other) LLVM_DELETED_FUNCTION;
+ BinOpInit &operator=(const BinOpInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_BinOpInit;
+ }
static BinOpInit *get(BinaryOp opc, Init *lhs, Init *rhs,
RecTy *Type);
@@ -894,8 +1016,8 @@ public:
return BinOpInit::get(getOpcode(), Operands[0], Operands[1], getType());
}
- int getNumOperands() const { return 2; }
- Init *getOperand(int i) const {
+ virtual int getNumOperands() const { return 2; }
+ virtual Init *getOperand(int i) const {
assert((i == 0 || i == 1) && "Invalid operand id for binary operator");
if (i == 0) {
return getLHS();
@@ -910,7 +1032,7 @@ public:
// Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
- Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const;
+ virtual Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const;
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const;
@@ -928,12 +1050,15 @@ private:
TernOpInit(TernaryOp opc, Init *lhs, Init *mhs, Init *rhs,
RecTy *Type) :
- OpInit(Type), Opc(opc), LHS(lhs), MHS(mhs), RHS(rhs) {}
+ OpInit(IK_TernOpInit, Type), Opc(opc), LHS(lhs), MHS(mhs), RHS(rhs) {}
- TernOpInit(const TernOpInit &Other); // Do not define.
- TernOpInit &operator=(const TernOpInit &Other); // Do not define.
+ TernOpInit(const TernOpInit &Other) LLVM_DELETED_FUNCTION;
+ TernOpInit &operator=(const TernOpInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_TernOpInit;
+ }
static TernOpInit *get(TernaryOp opc, Init *lhs,
Init *mhs, Init *rhs,
RecTy *Type);
@@ -946,8 +1071,8 @@ public:
getType());
}
- int getNumOperands() const { return 3; }
- Init *getOperand(int i) const {
+ virtual int getNumOperands() const { return 3; }
+ virtual Init *getOperand(int i) const {
assert((i == 0 || i == 1 || i == 2) &&
"Invalid operand id for ternary operator");
if (i == 0) {
@@ -966,7 +1091,7 @@ public:
// Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
- Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const;
+ virtual Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const;
virtual bool isComplete() const { return false; }
@@ -982,14 +1107,17 @@ class VarInit : public TypedInit {
Init *VarName;
explicit VarInit(const std::string &VN, RecTy *T)
- : TypedInit(T), VarName(StringInit::get(VN)) {}
+ : TypedInit(IK_VarInit, T), VarName(StringInit::get(VN)) {}
explicit VarInit(Init *VN, RecTy *T)
- : TypedInit(T), VarName(VN) {}
+ : TypedInit(IK_VarInit, T), VarName(VN) {}
- VarInit(const VarInit &Other); // Do not define.
- VarInit &operator=(const VarInit &Other); // Do not define.
+ VarInit(const VarInit &Other) LLVM_DELETED_FUNCTION;
+ VarInit &operator=(const VarInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_VarInit;
+ }
static VarInit *get(const std::string &VN, RecTy *T);
static VarInit *get(Init *VN, RecTy *T);
@@ -1003,8 +1131,6 @@ public:
return getNameInit()->getAsUnquotedString();
}
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const;
virtual Init *resolveListElementReference(Record &R, const RecordVal *RV,
unsigned Elt) const;
@@ -1019,6 +1145,8 @@ public:
///
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const;
+ virtual Init *getBit(unsigned Bit) const;
+
virtual std::string getAsString() const { return getName(); }
};
@@ -1029,27 +1157,37 @@ class VarBitInit : public Init {
TypedInit *TI;
unsigned Bit;
- VarBitInit(TypedInit *T, unsigned B) : TI(T), Bit(B) {
- assert(T->getType() && dynamic_cast<BitsRecTy*>(T->getType()) &&
- ((BitsRecTy*)T->getType())->getNumBits() > B &&
+ VarBitInit(TypedInit *T, unsigned B) : Init(IK_VarBitInit), TI(T), Bit(B) {
+ assert(T->getType() &&
+ (isa<IntRecTy>(T->getType()) ||
+ (isa<BitsRecTy>(T->getType()) &&
+ cast<BitsRecTy>(T->getType())->getNumBits() > B)) &&
"Illegal VarBitInit expression!");
}
- VarBitInit(const VarBitInit &Other); // Do not define.
- VarBitInit &operator=(const VarBitInit &Other); // Do not define.
+ VarBitInit(const VarBitInit &Other) LLVM_DELETED_FUNCTION;
+ VarBitInit &operator=(const VarBitInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_VarBitInit;
+ }
static VarBitInit *get(TypedInit *T, unsigned B);
virtual Init *convertInitializerTo(RecTy *Ty) const {
return Ty->convertValue(const_cast<VarBitInit *>(this));
}
- TypedInit *getVariable() const { return TI; }
- unsigned getBitNum() const { return Bit; }
+ virtual Init *getBitVar() const { return TI; }
+ virtual unsigned getBitNum() const { return Bit; }
virtual std::string getAsString() const;
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const;
+
+ virtual Init *getBit(unsigned B) const {
+ assert(B < 1 && "Bit index out of range!");
+ return const_cast<VarBitInit*>(this);
+ }
};
/// VarListElementInit - List[4] - Represent access to one element of a var or
@@ -1059,18 +1197,20 @@ class VarListElementInit : public TypedInit {
unsigned Element;
VarListElementInit(TypedInit *T, unsigned E)
- : TypedInit(dynamic_cast<ListRecTy*>(T->getType())->getElementType()),
- TI(T), Element(E) {
- assert(T->getType() && dynamic_cast<ListRecTy*>(T->getType()) &&
+ : TypedInit(IK_VarListElementInit,
+ cast<ListRecTy>(T->getType())->getElementType()),
+ TI(T), Element(E) {
+ assert(T->getType() && isa<ListRecTy>(T->getType()) &&
"Illegal VarBitInit expression!");
}
- VarListElementInit(const VarListElementInit &Other); // Do not define.
- VarListElementInit &operator=(const VarListElementInit &Other); // Do
- // not
- // define.
+ VarListElementInit(const VarListElementInit &Other) LLVM_DELETED_FUNCTION;
+ void operator=(const VarListElementInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_VarListElementInit;
+ }
static VarListElementInit *get(TypedInit *T, unsigned E);
virtual Init *convertInitializerTo(RecTy *Ty) const {
@@ -1080,9 +1220,6 @@ public:
TypedInit *getVariable() const { return TI; }
unsigned getElementNum() const { return Element; }
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const;
-
/// resolveListElementReference - This method is used to implement
/// VarListElementInit::resolveReferences. If the list element is resolvable
/// now, we return the resolved value, otherwise we return null.
@@ -1092,6 +1229,8 @@ public:
virtual std::string getAsString() const;
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const;
+
+ virtual Init *getBit(unsigned Bit) const;
};
/// DefInit - AL - Represent a reference to a 'def' in the description
@@ -1099,13 +1238,16 @@ public:
class DefInit : public TypedInit {
Record *Def;
- DefInit(Record *D, RecordRecTy *T) : TypedInit(T), Def(D) {}
+ DefInit(Record *D, RecordRecTy *T) : TypedInit(IK_DefInit, T), Def(D) {}
friend class Record;
- DefInit(const DefInit &Other); // Do not define.
- DefInit &operator=(const DefInit &Other); // Do not define.
+ DefInit(const DefInit &Other) LLVM_DELETED_FUNCTION;
+ DefInit &operator=(const DefInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_DefInit;
+ }
static DefInit *get(Record*);
virtual Init *convertInitializerTo(RecTy *Ty) const {
@@ -1122,12 +1264,7 @@ public:
virtual std::string getAsString() const;
- /// resolveBitReference - This method is used to implement
- /// VarBitInit::resolveReferences. If the bit is able to be resolved, we
- /// simply return the resolved value, otherwise we return null.
- ///
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const {
+ virtual Init *getBit(unsigned Bit) const {
llvm_unreachable("Illegal bit reference off def");
}
@@ -1148,14 +1285,17 @@ class FieldInit : public TypedInit {
std::string FieldName; // Field we are accessing
FieldInit(Init *R, const std::string &FN)
- : TypedInit(R->getFieldType(FN)), Rec(R), FieldName(FN) {
+ : TypedInit(IK_FieldInit, R->getFieldType(FN)), Rec(R), FieldName(FN) {
assert(getType() && "FieldInit with non-record type!");
}
- FieldInit(const FieldInit &Other); // Do not define.
- FieldInit &operator=(const FieldInit &Other); // Do not define.
+ FieldInit(const FieldInit &Other) LLVM_DELETED_FUNCTION;
+ FieldInit &operator=(const FieldInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_FieldInit;
+ }
static FieldInit *get(Init *R, const std::string &FN);
static FieldInit *get(Init *R, const Init *FN);
@@ -1163,8 +1303,8 @@ public:
return Ty->convertValue(const_cast<FieldInit *>(this));
}
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const;
+ virtual Init *getBit(unsigned Bit) const;
+
virtual Init *resolveListElementReference(Record &R,
const RecordVal *RV,
unsigned Elt) const;
@@ -1189,14 +1329,17 @@ class DagInit : public TypedInit, public FoldingSetNode {
DagInit(Init *V, const std::string &VN,
ArrayRef<Init *> ArgRange,
ArrayRef<std::string> NameRange)
- : TypedInit(DagRecTy::get()), Val(V), ValName(VN),
+ : TypedInit(IK_DagInit, DagRecTy::get()), Val(V), ValName(VN),
Args(ArgRange.begin(), ArgRange.end()),
ArgNames(NameRange.begin(), NameRange.end()) {}
- DagInit(const DagInit &Other); // Do not define.
- DagInit &operator=(const DagInit &Other); // Do not define.
+ DagInit(const DagInit &Other) LLVM_DELETED_FUNCTION;
+ DagInit &operator=(const DagInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_DagInit;
+ }
static DagInit *get(Init *V, const std::string &VN,
ArrayRef<Init *> ArgRange,
ArrayRef<std::string> NameRange);
@@ -1243,8 +1386,7 @@ public:
inline size_t name_size () const { return ArgNames.size(); }
inline bool name_empty() const { return ArgNames.empty(); }
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const {
+ virtual Init *getBit(unsigned Bit) const {
llvm_unreachable("Illegal bit reference off dag");
}
@@ -1301,7 +1443,9 @@ class Record {
// Unique record ID.
unsigned ID;
Init *Name;
- SMLoc Loc;
+ // Location where record was instantiated, followed by the location of
+ // multiclass prototypes used.
+ SmallVector<SMLoc, 4> Locs;
std::vector<Init *> TemplateArgs;
std::vector<RecordVal> Values;
std::vector<Record*> SuperClasses;
@@ -1317,15 +1461,25 @@ class Record {
public:
// Constructs a record.
- explicit Record(const std::string &N, SMLoc loc, RecordKeeper &records) :
- ID(LastID++), Name(StringInit::get(N)), Loc(loc), TrackedRecords(records),
- TheInit(0) {
+ explicit Record(const std::string &N, ArrayRef<SMLoc> locs,
+ RecordKeeper &records) :
+ ID(LastID++), Name(StringInit::get(N)), Locs(locs.begin(), locs.end()),
+ TrackedRecords(records), TheInit(0) {
init();
}
- explicit Record(Init *N, SMLoc loc, RecordKeeper &records) :
- ID(LastID++), Name(N), Loc(loc), TrackedRecords(records), TheInit(0) {
+ explicit Record(Init *N, ArrayRef<SMLoc> locs, RecordKeeper &records) :
+ ID(LastID++), Name(N), Locs(locs.begin(), locs.end()),
+ TrackedRecords(records), TheInit(0) {
init();
}
+
+ // When copy-constructing a Record, we must still guarantee a globally unique
+ // ID number. All other fields can be copied normally.
+ Record(const Record &O) :
+ ID(LastID++), Name(O.Name), Locs(O.Locs), TemplateArgs(O.TemplateArgs),
+ Values(O.Values), SuperClasses(O.SuperClasses),
+ TrackedRecords(O.TrackedRecords), TheInit(O.TheInit) { }
+
~Record() {}
@@ -1345,7 +1499,7 @@ public:
void setName(Init *Name); // Also updates RecordKeeper.
void setName(const std::string &Name); // Also updates RecordKeeper.
- SMLoc getLoc() const { return Loc; }
+ ArrayRef<SMLoc> getLoc() const { return Locs; }
/// get the corresponding DefInit.
DefInit *getDefInit();
@@ -1507,6 +1661,12 @@ public:
///
bool getValueAsBit(StringRef FieldName) const;
+ /// getValueAsBitOrUnset - This method looks up the specified field and
+ /// returns its value as a bit. If the field is unset, sets Unset to true and
+ /// retunrs false.
+ ///
+ bool getValueAsBitOrUnset(StringRef FieldName, bool &Unset) const;
+
/// getValueAsInt - This method looks up the specified field and returns its
/// value as an int64_t, throwing an exception if the field does not exist or
/// if the value is not the right type.
@@ -1601,6 +1761,16 @@ struct LessRecord {
}
};
+/// LessRecordByID - Sorting predicate to sort record pointers by their
+/// unique ID. If you just need a deterministic order, use this, since it
+/// just compares two `unsigned`; the other sorting predicates require
+/// string manipulation.
+struct LessRecordByID {
+ bool operator()(const Record *LHS, const Record *RHS) const {
+ return LHS->getID() < RHS->getID();
+ }
+};
+
/// LessRecordFieldName - Sorting predicate to sort record pointers by their
/// name field.
///
diff --git a/contrib/llvm/include/llvm/TableGen/TableGenAction.h b/contrib/llvm/include/llvm/TableGen/TableGenAction.h
deleted file mode 100644
index 733ae62..0000000
--- a/contrib/llvm/include/llvm/TableGen/TableGenAction.h
+++ /dev/null
@@ -1,35 +0,0 @@
-//===- llvm/TableGen/TableGenAction.h - defines TableGenAction --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the TableGenAction base class to be derived from by
-// tblgen tools.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TABLEGEN_TABLEGENACTION_H
-#define LLVM_TABLEGEN_TABLEGENACTION_H
-
-namespace llvm {
-
-class raw_ostream;
-class RecordKeeper;
-
-class TableGenAction {
- virtual void anchor();
-public:
- virtual ~TableGenAction() {}
-
- /// Perform the action using Records, and write output to OS.
- /// @returns true on error, false otherwise
- virtual bool operator()(raw_ostream &OS, RecordKeeper &Records) = 0;
-};
-
-}
-
-#endif
diff --git a/contrib/llvm/include/llvm/Target/Mangler.h b/contrib/llvm/include/llvm/Target/Mangler.h
index d5e165e..a50f54a 100644
--- a/contrib/llvm/include/llvm/Target/Mangler.h
+++ b/contrib/llvm/include/llvm/Target/Mangler.h
@@ -22,7 +22,7 @@ class GlobalValue;
template <typename T> class SmallVectorImpl;
class MCContext;
class MCSymbol;
-class TargetData;
+class DataLayout;
class Mangler {
public:
@@ -34,7 +34,7 @@ public:
private:
MCContext &Context;
- const TargetData &TD;
+ const DataLayout &TD;
/// AnonGlobalIDs - We need to give global values the same name every time
/// they are mangled. This keeps track of the number we give to anonymous
@@ -47,20 +47,19 @@ private:
unsigned NextAnonGlobalID;
public:
- Mangler(MCContext &context, const TargetData &td)
+ Mangler(MCContext &context, const DataLayout &td)
: Context(context), TD(td), NextAnonGlobalID(1) {}
/// getSymbol - Return the MCSymbol for the specified global value. This
/// symbol is the main label that is the address of the global.
MCSymbol *getSymbol(const GlobalValue *GV);
-
/// getNameWithPrefix - Fill OutName with the name of the appropriate prefix
/// and the specified global variable's name. If the global variable doesn't
/// have a name, this fills in a unique name for the global.
void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
bool isImplicitlyPrivate);
-
+
/// getNameWithPrefix - Fill OutName with the name of the appropriate prefix
/// and the specified name as the global variable name. GVName must not be
/// empty.
diff --git a/contrib/llvm/include/llvm/Target/Target.td b/contrib/llvm/include/llvm/Target/Target.td
index 1816445..12f5c0e 100644
--- a/contrib/llvm/include/llvm/Target/Target.td
+++ b/contrib/llvm/include/llvm/Target/Target.td
@@ -343,8 +343,8 @@ class Instruction {
bit isBarrier = 0; // Can control flow fall through this instruction?
bit isCall = 0; // Is this instruction a call instruction?
bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand?
- bit mayLoad = 0; // Is it possible for this inst to read memory?
- bit mayStore = 0; // Is it possible for this inst to write memory?
+ bit mayLoad = ?; // Is it possible for this inst to read memory?
+ bit mayStore = ?; // Is it possible for this inst to write memory?
bit isConvertibleToThreeAddress = 0; // Can this 2-addr instruction promote?
bit isCommutable = 0; // Is this 3 operand instruction commutable?
bit isTerminator = 0; // Is this part of the terminator for a basic block?
@@ -369,7 +369,7 @@ class Instruction {
//
// neverHasSideEffects - Set on an instruction with no pattern if it has no
// side effects.
- bit hasSideEffects = 0;
+ bit hasSideEffects = ?;
bit neverHasSideEffects = 0;
// Is this instruction a "real" instruction (with a distinct machine
@@ -495,7 +495,8 @@ def ptr_rc : PointerLikeRegClass<0>;
/// unknown definition - Mark this operand as being of unknown type, causing
/// it to be resolved by inference in the context it is used.
-def unknown;
+class unknown_class;
+def unknown : unknown_class;
/// AsmOperandClass - Representation for the kinds of operands which the target
/// specific parser can create and the assembly matcher may need to distinguish.
@@ -602,23 +603,31 @@ def f64imm : Operand<f64>;
///
def zero_reg;
+/// OperandWithDefaultOps - This Operand class can be used as the parent class
+/// for an Operand that needs to be initialized with a default value if
+/// no value is supplied in a pattern. This class can be used to simplify the
+/// pattern definitions for instructions that have target specific flags
+/// encoded as immediate operands.
+class OperandWithDefaultOps<ValueType ty, dag defaultops>
+ : Operand<ty> {
+ dag DefaultOps = defaultops;
+}
+
/// PredicateOperand - This can be used to define a predicate operand for an
/// instruction. OpTypes specifies the MIOperandInfo for the operand, and
/// AlwaysVal specifies the value of this predicate when set to "always
/// execute".
class PredicateOperand<ValueType ty, dag OpTypes, dag AlwaysVal>
- : Operand<ty> {
+ : OperandWithDefaultOps<ty, AlwaysVal> {
let MIOperandInfo = OpTypes;
- dag DefaultOps = AlwaysVal;
}
/// OptionalDefOperand - This is used to define a optional definition operand
/// for an instruction. DefaultOps is the register the operand represents if
/// none is supplied, e.g. zero_reg.
class OptionalDefOperand<ValueType ty, dag OpTypes, dag defaultops>
- : Operand<ty> {
+ : OperandWithDefaultOps<ty, defaultops> {
let MIOperandInfo = OpTypes;
- dag DefaultOps = defaultops;
}
@@ -631,6 +640,17 @@ class InstrInfo {
// Sparc manual specifies its instructions in the format [31..0] (big), while
// PowerPC specifies them using the format [0..31] (little).
bit isLittleEndianEncoding = 0;
+
+ // The instruction properties mayLoad, mayStore, and hasSideEffects are unset
+ // by default, and TableGen will infer their value from the instruction
+ // pattern when possible.
+ //
+ // Normally, TableGen will issue an error it it can't infer the value of a
+ // property that hasn't been set explicitly. When guessInstructionProperties
+ // is set, it will guess a safe value instead.
+ //
+ // This option is a temporary migration help. It will go away.
+ bit guessInstructionProperties = 1;
}
// Standard Pseudo Instructions.
@@ -734,6 +754,18 @@ def BUNDLE : Instruction {
let InOperandList = (ins variable_ops);
let AsmString = "BUNDLE";
}
+def LIFETIME_START : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$id);
+ let AsmString = "LIFETIME_START";
+ let neverHasSideEffects = 1;
+}
+def LIFETIME_END : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$id);
+ let AsmString = "LIFETIME_END";
+ let neverHasSideEffects = 1;
+}
}
//===----------------------------------------------------------------------===//
@@ -753,6 +785,10 @@ class AsmParser {
// function of the AsmParser class to call on every matched instruction.
// This can be used to perform target specific instruction post-processing.
string AsmParserInstCleanup = "";
+
+ //ShouldEmitMatchRegisterName - Set to false if the target needs a hand
+ //written register name matcher
+ bit ShouldEmitMatchRegisterName = 1;
}
def DefaultAsmParser : AsmParser;
@@ -953,12 +989,64 @@ class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f> {
// ProcessorModel allows subtargets to specify the more general
// SchedMachineModel instead if a ProcessorItinerary. Subtargets will
// gradually move to this newer form.
+//
+// Although this class always passes NoItineraries to the Processor
+// class, the SchedMachineModel may still define valid Itineraries.
class ProcessorModel<string n, SchedMachineModel m, list<SubtargetFeature> f>
: Processor<n, NoItineraries, f> {
let SchedModel = m;
}
//===----------------------------------------------------------------------===//
+// InstrMapping - This class is used to create mapping tables to relate
+// instructions with each other based on the values specified in RowFields,
+// ColFields, KeyCol and ValueCols.
+//
+class InstrMapping {
+ // FilterClass - Used to limit search space only to the instructions that
+ // define the relationship modeled by this InstrMapping record.
+ string FilterClass;
+
+ // RowFields - List of fields/attributes that should be same for all the
+ // instructions in a row of the relation table. Think of this as a set of
+ // properties shared by all the instructions related by this relationship
+ // model and is used to categorize instructions into subgroups. For instance,
+ // if we want to define a relation that maps 'Add' instruction to its
+ // predicated forms, we can define RowFields like this:
+ //
+ // let RowFields = BaseOp
+ // All add instruction predicated/non-predicated will have to set their BaseOp
+ // to the same value.
+ //
+ // def Add: { let BaseOp = 'ADD'; let predSense = 'nopred' }
+ // def Add_predtrue: { let BaseOp = 'ADD'; let predSense = 'true' }
+ // def Add_predfalse: { let BaseOp = 'ADD'; let predSense = 'false' }
+ list<string> RowFields = [];
+
+ // List of fields/attributes that are same for all the instructions
+ // in a column of the relation table.
+ // Ex: let ColFields = 'predSense' -- It means that the columns are arranged
+ // based on the 'predSense' values. All the instruction in a specific
+ // column have the same value and it is fixed for the column according
+ // to the values set in 'ValueCols'.
+ list<string> ColFields = [];
+
+ // Values for the fields/attributes listed in 'ColFields'.
+ // Ex: let KeyCol = 'nopred' -- It means that the key instruction (instruction
+ // that models this relation) should be non-predicated.
+ // In the example above, 'Add' is the key instruction.
+ list<string> KeyCol = [];
+
+ // List of values for the fields/attributes listed in 'ColFields', one for
+ // each column in the relation table.
+ //
+ // Ex: let ValueCols = [['true'],['false']] -- It adds two columns in the
+ // table. First column requires all the instructions to have predSense
+ // set to 'true' and second column requires it to be 'false'.
+ list<list<string> > ValueCols = [];
+}
+
+//===----------------------------------------------------------------------===//
// Pull in the common support for calling conventions.
//
include "llvm/Target/TargetCallingConv.td"
diff --git a/contrib/llvm/include/llvm/Target/TargetCallingConv.h b/contrib/llvm/include/llvm/Target/TargetCallingConv.h
index f8cebef..2160e37 100644
--- a/contrib/llvm/include/llvm/Target/TargetCallingConv.h
+++ b/contrib/llvm/include/llvm/Target/TargetCallingConv.h
@@ -113,9 +113,18 @@ namespace ISD {
MVT VT;
bool Used;
+ /// Index original Function's argument.
+ unsigned OrigArgIndex;
+
+ /// Offset in bytes of current input value relative to the beginning of
+ /// original argument. E.g. if argument was splitted into four 32 bit
+ /// registers, we got 4 InputArgs with PartOffsets 0, 4, 8 and 12.
+ unsigned PartOffset;
+
InputArg() : VT(MVT::Other), Used(false) {}
- InputArg(ArgFlagsTy flags, EVT vt, bool used)
- : Flags(flags), Used(used) {
+ InputArg(ArgFlagsTy flags, EVT vt, bool used,
+ unsigned origIdx, unsigned partOffs)
+ : Flags(flags), Used(used), OrigArgIndex(origIdx), PartOffset(partOffs) {
VT = vt.getSimpleVT();
}
};
@@ -131,9 +140,19 @@ namespace ISD {
/// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
bool IsFixed;
+ /// Index original Function's argument.
+ unsigned OrigArgIndex;
+
+ /// Offset in bytes of current output value relative to the beginning of
+ /// original argument. E.g. if argument was splitted into four 32 bit
+ /// registers, we got 4 OutputArgs with PartOffsets 0, 4, 8 and 12.
+ unsigned PartOffset;
+
OutputArg() : IsFixed(false) {}
- OutputArg(ArgFlagsTy flags, EVT vt, bool isfixed)
- : Flags(flags), IsFixed(isfixed) {
+ OutputArg(ArgFlagsTy flags, EVT vt, bool isfixed,
+ unsigned origIdx, unsigned partOffs)
+ : Flags(flags), IsFixed(isfixed), OrigArgIndex(origIdx),
+ PartOffset(partOffs) {
VT = vt.getSimpleVT();
}
};
diff --git a/contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h b/contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h
deleted file mode 100644
index 5e48629..0000000
--- a/contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h
+++ /dev/null
@@ -1,121 +0,0 @@
-//===-- llvm/Target/TargetELFWriterInfo.h - ELF Writer Info -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the TargetELFWriterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_TARGETELFWRITERINFO_H
-#define LLVM_TARGET_TARGETELFWRITERINFO_H
-
-namespace llvm {
-
- //===--------------------------------------------------------------------===//
- // TargetELFWriterInfo
- //===--------------------------------------------------------------------===//
-
- class TargetELFWriterInfo {
- protected:
- // EMachine - This field is the target specific value to emit as the
- // e_machine member of the ELF header.
- unsigned short EMachine;
- bool is64Bit, isLittleEndian;
- public:
-
- // Machine architectures
- enum MachineType {
- EM_NONE = 0, // No machine
- EM_M32 = 1, // AT&T WE 32100
- EM_SPARC = 2, // SPARC
- EM_386 = 3, // Intel 386
- EM_68K = 4, // Motorola 68000
- EM_88K = 5, // Motorola 88000
- EM_486 = 6, // Intel 486 (deprecated)
- EM_860 = 7, // Intel 80860
- EM_MIPS = 8, // MIPS R3000
- EM_PPC = 20, // PowerPC
- EM_ARM = 40, // ARM
- EM_ALPHA = 41, // DEC Alpha
- EM_SPARCV9 = 43, // SPARC V9
- EM_X86_64 = 62, // AMD64
- EM_HEXAGON = 164 // Qualcomm Hexagon
- };
-
- // ELF File classes
- enum {
- ELFCLASS32 = 1, // 32-bit object file
- ELFCLASS64 = 2 // 64-bit object file
- };
-
- // ELF Endianess
- enum {
- ELFDATA2LSB = 1, // Little-endian object file
- ELFDATA2MSB = 2 // Big-endian object file
- };
-
- explicit TargetELFWriterInfo(bool is64Bit_, bool isLittleEndian_);
- virtual ~TargetELFWriterInfo();
-
- unsigned short getEMachine() const { return EMachine; }
- unsigned getEFlags() const { return 0; }
- unsigned getEIClass() const { return is64Bit ? ELFCLASS64 : ELFCLASS32; }
- unsigned getEIData() const {
- return isLittleEndian ? ELFDATA2LSB : ELFDATA2MSB;
- }
-
- /// ELF Header and ELF Section Header Info
- unsigned getHdrSize() const { return is64Bit ? 64 : 52; }
- unsigned getSHdrSize() const { return is64Bit ? 64 : 40; }
-
- /// Symbol Table Info
- unsigned getSymTabEntrySize() const { return is64Bit ? 24 : 16; }
-
- /// getPrefELFAlignment - Returns the preferred alignment for ELF. This
- /// is used to align some sections.
- unsigned getPrefELFAlignment() const { return is64Bit ? 8 : 4; }
-
- /// getRelocationEntrySize - Entry size used in the relocation section
- unsigned getRelocationEntrySize() const {
- return is64Bit ? (hasRelocationAddend() ? 24 : 16)
- : (hasRelocationAddend() ? 12 : 8);
- }
-
- /// getRelocationType - Returns the target specific ELF Relocation type.
- /// 'MachineRelTy' contains the object code independent relocation type
- virtual unsigned getRelocationType(unsigned MachineRelTy) const = 0;
-
- /// hasRelocationAddend - True if the target uses an addend in the
- /// ELF relocation entry.
- virtual bool hasRelocationAddend() const = 0;
-
- /// getDefaultAddendForRelTy - Gets the default addend value for a
- /// relocation entry based on the target ELF relocation type.
- virtual long int getDefaultAddendForRelTy(unsigned RelTy,
- long int Modifier = 0) const = 0;
-
- /// getRelTySize - Returns the size of relocatable field in bits
- virtual unsigned getRelocationTySize(unsigned RelTy) const = 0;
-
- /// isPCRelativeRel - True if the relocation type is pc relative
- virtual bool isPCRelativeRel(unsigned RelTy) const = 0;
-
- /// getJumpTableRelocationTy - Returns the machine relocation type used
- /// to reference a jumptable.
- virtual unsigned getAbsoluteLabelMachineRelTy() const = 0;
-
- /// computeRelocation - Some relocatable fields could be relocated
- /// directly, avoiding the relocation symbol emission, compute the
- /// final relocation value for this symbol.
- virtual long int computeRelocation(unsigned SymOffset, unsigned RelOffset,
- unsigned RelTy) const = 0;
- };
-
-} // end llvm namespace
-
-#endif // LLVM_TARGET_TARGETELFWRITERINFO_H
diff --git a/contrib/llvm/include/llvm/Target/TargetInstrInfo.h b/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
index da30ab8..4570813 100644
--- a/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
@@ -45,8 +45,8 @@ template<class T> class SmallVectorImpl;
/// TargetInstrInfo - Interface to description of machine instruction set
///
class TargetInstrInfo : public MCInstrInfo {
- TargetInstrInfo(const TargetInstrInfo &); // DO NOT IMPLEMENT
- void operator=(const TargetInstrInfo &); // DO NOT IMPLEMENT
+ TargetInstrInfo(const TargetInstrInfo &) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetInstrInfo &) LLVM_DELETED_FUNCTION;
public:
TargetInstrInfo(int CFSetupOpcode = -1, int CFDestroyOpcode = -1)
: CallFrameSetupOpcode(CFSetupOpcode),
@@ -459,6 +459,13 @@ public:
}
/// copyPhysReg - Emit instructions to copy a pair of physical registers.
+ ///
+ /// This function should support copies within any legal register class as
+ /// well as any cross-class copies created during instruction selection.
+ ///
+ /// The source and destination registers may overlap, which may require a
+ /// careful implementation when multiple copy instructions are required for
+ /// large registers. See for example the ARM target.
virtual void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
@@ -794,29 +801,6 @@ public:
const MachineInstr *UseMI, unsigned UseIdx,
bool FindMin = false) const;
- /// computeOperandLatency - Compute and return the latency of the given data
- /// dependent def and use. DefMI must be a valid def. UseMI may be NULL for
- /// an unknown use. If the subtarget allows, this may or may not need to call
- /// getOperandLatency().
- ///
- /// FindMin may be set to get the minimum vs. expected latency. Minimum
- /// latency is used for scheduling groups, while expected latency is for
- /// instruction cost and critical path.
- unsigned computeOperandLatency(const InstrItineraryData *ItinData,
- const TargetRegisterInfo *TRI,
- const MachineInstr *DefMI,
- const MachineInstr *UseMI,
- unsigned Reg, bool FindMin) const;
-
- /// getOutputLatency - Compute and return the output dependency latency of a
- /// a given pair of defs which both target the same register. This is usually
- /// one.
- virtual unsigned getOutputLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *DepMI) const {
- return 1;
- }
-
/// getInstrLatency - Compute the instruction latency of a given instruction.
/// If the instruction has higher cost when predicated, it's returned via
/// PredCost.
@@ -831,6 +815,9 @@ public:
unsigned defaultDefLatency(const MCSchedModel *SchedModel,
const MachineInstr *DefMI) const;
+ int computeDefOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, bool FindMin) const;
+
/// isHighLatencyDef - Return true if this opcode has high latency to its
/// result.
virtual bool isHighLatencyDef(int opc) const { return false; }
diff --git a/contrib/llvm/include/llvm/Target/TargetIntrinsicInfo.h b/contrib/llvm/include/llvm/Target/TargetIntrinsicInfo.h
index c44b923..ce21349 100644
--- a/contrib/llvm/include/llvm/Target/TargetIntrinsicInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetIntrinsicInfo.h
@@ -14,6 +14,7 @@
#ifndef LLVM_TARGET_TARGETINTRINSICINFO_H
#define LLVM_TARGET_TARGETINTRINSICINFO_H
+#include "llvm/Support/Compiler.h"
#include <string>
namespace llvm {
@@ -27,8 +28,8 @@ class Type;
/// TargetIntrinsicInfo - Interface to description of machine instruction set
///
class TargetIntrinsicInfo {
- TargetIntrinsicInfo(const TargetIntrinsicInfo &); // DO NOT IMPLEMENT
- void operator=(const TargetIntrinsicInfo &); // DO NOT IMPLEMENT
+ TargetIntrinsicInfo(const TargetIntrinsicInfo &) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetIntrinsicInfo &) LLVM_DELETED_FUNCTION;
public:
TargetIntrinsicInfo();
virtual ~TargetIntrinsicInfo();
diff --git a/contrib/llvm/include/llvm/Target/TargetLibraryInfo.h b/contrib/llvm/include/llvm/Target/TargetLibraryInfo.h
index ea2874f..a2c97d7 100644
--- a/contrib/llvm/include/llvm/Target/TargetLibraryInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetLibraryInfo.h
@@ -18,6 +18,26 @@ namespace llvm {
namespace LibFunc {
enum Func {
+ /// void operator delete[](void*);
+ ZdaPv,
+ /// void operator delete(void*);
+ ZdlPv,
+ /// void *new[](unsigned int);
+ Znaj,
+ /// void *new[](unsigned int, nothrow);
+ ZnajRKSt9nothrow_t,
+ /// void *new[](unsigned long);
+ Znam,
+ /// void *new[](unsigned long, nothrow);
+ ZnamRKSt9nothrow_t,
+ /// void *new(unsigned int);
+ Znwj,
+ /// void *new(unsigned int, nothrow);
+ ZnwjRKSt9nothrow_t,
+ /// void *new(unsigned long);
+ Znwm,
+ /// void *new(unsigned long, nothrow);
+ ZnwmRKSt9nothrow_t,
/// int __cxa_atexit(void (*f)(void *), void *p, void *d);
cxa_atexit,
/// void __cxa_guard_abort(guard_t *guard);
@@ -33,12 +53,24 @@ namespace llvm {
acos,
/// float acosf(float x);
acosf,
+ /// double acosh(double x);
+ acosh,
+ /// float acoshf(float x);
+ acoshf,
+ /// long double acoshl(long double x);
+ acoshl,
/// long double acosl(long double x);
acosl,
/// double asin(double x);
asin,
/// float asinf(float x);
asinf,
+ /// double asinh(double x);
+ asinh,
+ /// float asinhf(float x);
+ asinhf,
+ /// long double asinhl(long double x);
+ asinhl,
/// long double asinl(long double x);
asinl,
/// double atan(double x);
@@ -51,8 +83,22 @@ namespace llvm {
atan2l,
/// float atanf(float x);
atanf,
+ /// double atanh(double x);
+ atanh,
+ /// float atanhf(float x);
+ atanhf,
+ /// long double atanhl(long double x);
+ atanhl,
/// long double atanl(long double x);
atanl,
+ /// void *calloc(size_t count, size_t size);
+ calloc,
+ /// double cbrt(double x);
+ cbrt,
+ /// float cbrtf(float x);
+ cbrtf,
+ /// long double cbrtl(long double x);
+ cbrtl,
/// double ceil(double x);
ceil,
/// float ceilf(float x);
@@ -79,6 +125,12 @@ namespace llvm {
cosl,
/// double exp(double x);
exp,
+ /// double exp10(double x);
+ exp10,
+ /// float exp10f(float x);
+ exp10f,
+ /// long double exp10l(long double x);
+ exp10l,
/// double exp2(double x);
exp2,
/// float exp2f(float x);
@@ -119,6 +171,8 @@ namespace llvm {
fputc,
/// int fputs(const char *s, FILE *stream);
fputs,
+ /// void free(void *ptr);
+ free,
/// size_t fwrite(const void *ptr, size_t size, size_t nitems,
/// FILE *stream);
fwrite,
@@ -144,10 +198,18 @@ namespace llvm {
log2f,
/// double long double log2l(long double x);
log2l,
+ /// double logb(double x);
+ logb,
+ /// float logbf(float x);
+ logbf,
+ /// long double logbl(long double x);
+ logbl,
/// float logf(float x);
logf,
/// long double logl(long double x);
logl,
+ /// void *malloc(size_t size);
+ malloc,
/// void *memchr(const void *s, int c, size_t n);
memchr,
/// int memcmp(const void *s1, const void *s2, size_t n);
@@ -166,6 +228,8 @@ namespace llvm {
nearbyintf,
/// long double nearbyintl(long double x);
nearbyintl,
+ /// int posix_memalign(void **memptr, size_t alignment, size_t size);
+ posix_memalign,
/// double pow(double x, double y);
pow,
/// float powf(float x, float y);
@@ -176,6 +240,10 @@ namespace llvm {
putchar,
/// int puts(const char *s);
puts,
+ /// void *realloc(void *ptr, size_t size);
+ realloc,
+ /// void *reallocf(void *ptr, size_t size);
+ reallocf,
/// double rint(double x);
rint,
/// float rintf(float x);
@@ -208,12 +276,20 @@ namespace llvm {
sqrtf,
/// long double sqrtl(long double x);
sqrtl,
+ /// char *stpcpy(char *s1, const char *s2);
+ stpcpy,
/// char *strcat(char *s1, const char *s2);
strcat,
/// char *strchr(const char *s, int c);
strchr,
+ /// int strcmp(const char *s1, const char *s2);
+ strcmp,
/// char *strcpy(char *s1, const char *s2);
strcpy,
+ /// size_t strcspn(const char *s1, const char *s2);
+ strcspn,
+ /// char *strdup(const char *s1);
+ strdup,
/// size_t strlen(const char *s);
strlen,
/// char *strncat(char *s1, const char *s2, size_t n);
@@ -222,8 +298,33 @@ namespace llvm {
strncmp,
/// char *strncpy(char *s1, const char *s2, size_t n);
strncpy,
+ /// char *strndup(const char *s1, size_t n);
+ strndup,
/// size_t strnlen(const char *s, size_t maxlen);
strnlen,
+ /// char *strpbrk(const char *s1, const char *s2);
+ strpbrk,
+ /// char *strrchr(const char *s, int c);
+ strrchr,
+ /// size_t strspn(const char *s1, const char *s2);
+ strspn,
+ /// char *strstr(const char *s1, const char *s2);
+ strstr,
+ /// double strtod(const char *nptr, char **endptr);
+ strtod,
+ /// float strtof(const char *nptr, char **endptr);
+ strtof,
+ /// long int strtol(const char *nptr, char **endptr, int base);
+ strtol,
+ /// long double strtold(const char *nptr, char **endptr);
+ strtold,
+ /// long long int strtoll(const char *nptr, char **endptr, int base);
+ strtoll,
+ /// unsigned long int strtoul(const char *nptr, char **endptr, int base);
+ strtoul,
+ /// unsigned long long int strtoull(const char *nptr, char **endptr,
+ /// int base);
+ strtoull,
/// double tan(double x);
tan,
/// float tanf(float x);
@@ -242,6 +343,8 @@ namespace llvm {
truncf,
/// long double truncl(long double x);
truncl,
+ /// void *valloc(size_t size);
+ valloc,
NumLibFuncs
};
diff --git a/contrib/llvm/include/llvm/Target/TargetLowering.h b/contrib/llvm/include/llvm/Target/TargetLowering.h
index acf0419..580a30f 100644
--- a/contrib/llvm/include/llvm/Target/TargetLowering.h
+++ b/contrib/llvm/include/llvm/Target/TargetLowering.h
@@ -22,9 +22,11 @@
#ifndef LLVM_TARGET_TARGETLOWERING_H
#define LLVM_TARGET_TARGETLOWERING_H
+#include "llvm/AddressingMode.h"
#include "llvm/CallingConv.h"
#include "llvm/InlineAsm.h"
#include "llvm/Attributes.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/CallSite.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
@@ -49,7 +51,7 @@ namespace llvm {
class MCContext;
class MCExpr;
template<typename T> class SmallVectorImpl;
- class TargetData;
+ class DataLayout;
class TargetRegisterClass;
class TargetLibraryInfo;
class TargetLoweringObjectFile;
@@ -76,8 +78,8 @@ namespace llvm {
/// target-specific constructs to SelectionDAG operators.
///
class TargetLowering {
- TargetLowering(const TargetLowering&); // DO NOT IMPLEMENT
- void operator=(const TargetLowering&); // DO NOT IMPLEMENT
+ TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION;
public:
/// LegalizeAction - This enum indicates whether operations are valid for a
/// target, and if not, what action should be used to make them valid.
@@ -101,12 +103,24 @@ public:
TypeWidenVector // This vector should be widened into a larger vector.
};
+ /// LegalizeKind holds the legalization kind that needs to happen to EVT
+ /// in order to type-legalize it.
+ typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
+
enum BooleanContent { // How the target represents true/false values.
UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
ZeroOrOneBooleanContent, // All bits zero except for bit 0.
ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
};
+ enum SelectSupportKind {
+ ScalarValSelect, // The target supports scalar selects (ex: cmov).
+ ScalarCondVectorVal, // The target supports selects with a scalar condition
+ // and vector values (ex: cmov).
+ VectorMaskSelect // The target supports vector selects with a vector
+ // mask (ex: x86 blends).
+ };
+
static ISD::NodeType getExtendForContent(BooleanContent Content) {
switch (Content) {
case UndefinedBooleanContent:
@@ -128,22 +142,37 @@ public:
virtual ~TargetLowering();
const TargetMachine &getTargetMachine() const { return TM; }
- const TargetData *getTargetData() const { return TD; }
+ const DataLayout *getDataLayout() const { return TD; }
const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
bool isBigEndian() const { return !IsLittleEndian; }
bool isLittleEndian() const { return IsLittleEndian; }
- MVT getPointerTy() const { return PointerTy; }
+ // Return the pointer type for the given address space, defaults to
+ // the pointer type from the data layout.
+ // FIXME: The default needs to be removed once all the code is updated.
+ virtual MVT getPointerTy(uint32_t AS = 0) const { return PointerTy; }
virtual MVT getShiftAmountTy(EVT LHSTy) const;
/// isSelectExpensive - Return true if the select operation is expensive for
/// this target.
bool isSelectExpensive() const { return SelectIsExpensive; }
+ virtual bool isSelectSupported(SelectSupportKind kind) const { return true; }
+
/// isIntDivCheap() - Return true if integer divide is usually cheaper than
/// a sequence of several shifts, adds, and multiplies for this target.
bool isIntDivCheap() const { return IntDivIsCheap; }
+ /// isSlowDivBypassed - Returns true if target has indicated at least one
+ /// type should be bypassed.
+ bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
+
+ /// getBypassSlowDivTypes - Returns map of slow types for division or
+ /// remainder with corresponding fast types
+ const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
+ return BypassSlowDivWidths;
+ }
+
/// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of
/// srl/add/sra.
bool isPow2DivCheap() const { return Pow2DivIsCheap; }
@@ -382,6 +411,13 @@ public:
getOperationAction(Op, VT) == Custom);
}
+ /// isOperationExpand - Return true if the specified operation is illegal on
+ /// this target or unlikely to be made legal with custom lowering. This is
+ /// used to help guide high-level lowering decisions.
+ bool isOperationExpand(unsigned Op, EVT VT) const {
+ return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
+ }
+
/// isOperationLegal - Return true if the specified operation is legal on this
/// target.
bool isOperationLegal(unsigned Op, EVT VT) const {
@@ -475,8 +511,12 @@ public:
assert((unsigned)CC < array_lengthof(CondCodeActions) &&
(unsigned)VT.getSimpleVT().SimpleTy < sizeof(CondCodeActions[0])*4 &&
"Table isn't big enough!");
+ /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
+ /// value and the upper 27 bits index into the second dimension of the
+ /// array to select what 64bit value to use.
LegalizeAction Action = (LegalizeAction)
- ((CondCodeActions[CC] >> (2*VT.getSimpleVT().SimpleTy)) & 3);
+ ((CondCodeActions[CC][VT.getSimpleVT().SimpleTy >> 5]
+ >> (2*(VT.getSimpleVT().SimpleTy & 0x1F))) & 3);
assert(Action != Promote && "Can't promote condition code!");
return Action;
}
@@ -533,6 +573,7 @@ public:
}
return EVT::getEVT(Ty, AllowUnknown);
}
+
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
@@ -686,6 +727,12 @@ public:
return SupportJumpTables;
}
+ /// getMinimumJumpTableEntries - return integer threshold on number of
+ /// blocks to use jump tables rather than if sequence.
+ int getMinimumJumpTableEntries() const {
+ return MinimumJumpTableEntries;
+ }
+
/// getStackPointerRegisterToSaveRestore - If a physical register, this
/// specifies the register that llvm.savestack/llvm.restorestack should save
/// and restore.
@@ -1006,6 +1053,12 @@ protected:
SupportJumpTables = Val;
}
+ /// setMinimumJumpTableEntries - Indicate the number of blocks to generate
+ /// jump tables rather than if sequence.
+ void setMinimumJumpTableEntries(int Val) {
+ MinimumJumpTableEntries = Val;
+ }
+
/// setStackPointerRegisterToSaveRestore - If set to a physical register, this
/// specifies the register that llvm.savestack/llvm.restorestack should save
/// and restore.
@@ -1045,6 +1098,11 @@ protected:
/// of instructions not containing an integer divide.
void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
+ /// addBypassSlowDiv - Tells the code generator which bitwidths to bypass.
+ void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
+ BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
+ }
+
/// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
/// srl/add/sra for a signed divide by power of two, and let the target handle
/// it.
@@ -1127,8 +1185,13 @@ protected:
assert(VT < MVT::LAST_VALUETYPE &&
(unsigned)CC < array_lengthof(CondCodeActions) &&
"Table isn't big enough!");
- CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.SimpleTy*2);
- CondCodeActions[(unsigned)CC] |= (uint64_t)Action << VT.SimpleTy*2;
+ /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
+ /// value and the upper 27 bits index into the second dimension of the
+ /// array to select what 64bit value to use.
+ CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
+ &= ~(uint64_t(3UL) << (VT.SimpleTy & 0x1F)*2);
+ CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
+ |= (uint64_t)Action << (VT.SimpleTy & 0x1F)*2;
}
/// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the
@@ -1201,7 +1264,7 @@ protected:
public:
//===--------------------------------------------------------------------===//
// Lowering methods - These methods must be implemented by targets so that
- // the SelectionDAGLowering code knows how to lower these.
+ // the SelectionDAGBuilder code knows how to lower these.
//
/// LowerFormalArguments - This hook must be implemented to lower the
@@ -1271,9 +1334,9 @@ public:
FunctionType *FTy, bool isTailCall, SDValue callee,
ArgListTy &args, SelectionDAG &dag, DebugLoc dl,
ImmutableCallSite &cs)
- : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attribute::SExt)),
- RetZExt(cs.paramHasAttr(0, Attribute::ZExt)), IsVarArg(FTy->isVarArg()),
- IsInReg(cs.paramHasAttr(0, Attribute::InReg)),
+ : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attributes::SExt)),
+ RetZExt(cs.paramHasAttr(0, Attributes::ZExt)), IsVarArg(FTy->isVarArg()),
+ IsInReg(cs.paramHasAttr(0, Attributes::InReg)),
DoesNotReturn(cs.doesNotReturn()),
IsReturnValueUsed(!cs.getInstruction()->use_empty()),
IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()),
@@ -1314,7 +1377,7 @@ public:
}
/// HandleByVal - Target-specific cleanup for formal ByVal parameters.
- virtual void HandleByVal(CCState *, unsigned &) const {}
+ virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
/// CanLowerReturn - This hook should be implemented to check whether the
/// return values described by the Outs array can fit into the return
@@ -1584,22 +1647,6 @@ public:
// Addressing mode description hooks (used by LSR etc).
//
- /// AddrMode - This represents an addressing mode of:
- /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
- /// If BaseGV is null, there is no BaseGV.
- /// If BaseOffs is zero, there is no base offset.
- /// If HasBaseReg is false, there is no base register.
- /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
- /// no scale.
- ///
- struct AddrMode {
- GlobalValue *BaseGV;
- int64_t BaseOffs;
- bool HasBaseReg;
- int64_t Scale;
- AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
- };
-
/// GetAddrModeArguments - CodeGenPrepare sinks address calculations into the
/// same BB as Load/Store instructions reading the address. This allows as
/// much computation as possible to be done in the address mode for that
@@ -1741,10 +1788,11 @@ public:
private:
const TargetMachine &TM;
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLoweringObjectFile &TLOF;
- /// PointerTy - The type to use for pointers, usually i32 or i64.
+ /// PointerTy - The type to use for pointers for the default address space,
+ /// usually i32 or i64.
///
MVT PointerTy;
@@ -1762,6 +1810,12 @@ private:
/// set to true unconditionally.
bool IntDivIsCheap;
+ /// BypassSlowDivMap - Tells the code generator to bypass slow divide or
+ /// remainder instructions. For example, BypassSlowDivWidths[32,8] tells the
+ /// code generator to bypass 32-bit integer div/rem with an 8-bit unsigned
+ /// integer div/rem when the operands are positive and less than 256.
+ DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
+
/// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
/// srl/add/sra for a signed divide by power of two, and let the target handle
/// it.
@@ -1784,6 +1838,9 @@ private:
/// If it's not true, then each jumptable must be lowered into if-then-else's.
bool SupportJumpTables;
+ /// MinimumJumpTableEntries - Number of blocks threshold to use jump tables.
+ int MinimumJumpTableEntries;
+
/// BooleanContents - Information about the contents of the high-bits in
/// boolean values held in a type wider than i1. See getBooleanContents.
BooleanContent BooleanContents;
@@ -1901,12 +1958,14 @@ private:
/// CondCodeActions - For each condition code (ISD::CondCode) keep a
/// LegalizeAction that indicates how instruction selection should
/// deal with the condition code.
- uint64_t CondCodeActions[ISD::SETCC_INVALID];
+ /// Because each CC action takes up 2 bits, we need to have the array size
+ /// be large enough to fit all of the value types. This can be done by
+ /// dividing the MVT::LAST_VALUETYPE by 32 and adding one.
+ uint64_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE / 32) + 1];
ValueTypeActionImpl ValueTypeActions;
- typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
-
+public:
LegalizeKind
getTypeConversion(LLVMContext &Context, EVT VT) const {
// If this is a simple type, use the ComputeRegisterProp mechanism.
@@ -1921,6 +1980,9 @@ private:
ValueTypeActions.getTypeAction(NVT.getSimpleVT()) != TypePromoteInteger)
&& "Promote may not follow Expand or Promote");
+ if (LA == TypeSplitVector)
+ NVT = EVT::getVectorVT(Context, VT.getVectorElementType(),
+ VT.getVectorNumElements() / 2);
return LegalizeKind(LA, NVT);
}
@@ -2023,6 +2085,7 @@ private:
return LegalizeKind(TypeSplitVector, NVT);
}
+private:
std::vector<std::pair<EVT, const TargetRegisterClass*> > AvailableRegClasses;
/// TargetDAGCombineArray - Targets can specify ISD nodes that they would
diff --git a/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h b/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h
index d631f58..13a6fe3 100644
--- a/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h
+++ b/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h
@@ -33,10 +33,11 @@ namespace llvm {
class TargetLoweringObjectFile : public MCObjectFileInfo {
MCContext *Ctx;
-
- TargetLoweringObjectFile(const TargetLoweringObjectFile&); // DO NOT IMPLEMENT
- void operator=(const TargetLoweringObjectFile&); // DO NOT IMPLEMENT
-
+
+ TargetLoweringObjectFile(
+ const TargetLoweringObjectFile&) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetLoweringObjectFile&) LLVM_DELETED_FUNCTION;
+
public:
MCContext &getContext() const { return *Ctx; }
diff --git a/contrib/llvm/include/llvm/Target/TargetMachine.h b/contrib/llvm/include/llvm/Target/TargetMachine.h
index e4bf32b..5006647 100644
--- a/contrib/llvm/include/llvm/Target/TargetMachine.h
+++ b/contrib/llvm/include/llvm/Target/TargetMachine.h
@@ -17,6 +17,8 @@
#include "llvm/Pass.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/TargetTransformInfo.h"
+#include "llvm/Target/TargetTransformImpl.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
#include <string>
@@ -31,8 +33,7 @@ class MCCodeGenInfo;
class MCContext;
class PassManagerBase;
class Target;
-class TargetData;
-class TargetELFWriterInfo;
+class DataLayout;
class TargetFrameLowering;
class TargetInstrInfo;
class TargetIntrinsicInfo;
@@ -52,8 +53,8 @@ class raw_ostream;
/// through this interface.
///
class TargetMachine {
- TargetMachine(const TargetMachine &); // DO NOT IMPLEMENT
- void operator=(const TargetMachine &); // DO NOT IMPLEMENT
+ TargetMachine(const TargetMachine &) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetMachine &) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses.
TargetMachine(const Target &T, StringRef TargetTriple,
StringRef CPU, StringRef FS, const TargetOptions &Options);
@@ -106,7 +107,11 @@ public:
virtual const TargetFrameLowering *getFrameLowering() const { return 0; }
virtual const TargetLowering *getTargetLowering() const { return 0; }
virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const{ return 0; }
- virtual const TargetData *getTargetData() const { return 0; }
+ virtual const DataLayout *getDataLayout() const { return 0; }
+ virtual const ScalarTargetTransformInfo*
+ getScalarTargetTransformInfo() const { return 0; }
+ virtual const VectorTargetTransformInfo*
+ getVectorTargetTransformInfo() const { return 0; }
/// getMCAsmInfo - Return target specific asm information.
///
@@ -142,11 +147,6 @@ public:
return 0;
}
- /// getELFWriterInfo - If this target supports an ELF writer, return
- /// information for it, otherwise return null.
- ///
- virtual const TargetELFWriterInfo *getELFWriterInfo() const { return 0; }
-
/// hasMCRelaxAll - Check whether all machine code instructions should be
/// relaxed.
bool hasMCRelaxAll() const { return MCRelaxAll; }
diff --git a/contrib/llvm/include/llvm/Target/TargetOpcodes.h b/contrib/llvm/include/llvm/Target/TargetOpcodes.h
index f0b181e..516e070 100644
--- a/contrib/llvm/include/llvm/Target/TargetOpcodes.h
+++ b/contrib/llvm/include/llvm/Target/TargetOpcodes.h
@@ -87,7 +87,11 @@ namespace TargetOpcode {
/// BUNDLE - This instruction represents an instruction bundle. Instructions
/// which immediately follow a BUNDLE instruction which are marked with
/// 'InsideBundle' flag are inside the bundle.
- BUNDLE
+ BUNDLE = 14,
+
+ /// Lifetime markers.
+ LIFETIME_START = 15,
+ LIFETIME_END = 16
};
} // end namespace TargetOpcode
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Target/TargetOptions.h b/contrib/llvm/include/llvm/Target/TargetOptions.h
index d1a07d1..68ca567 100644
--- a/contrib/llvm/include/llvm/Target/TargetOptions.h
+++ b/contrib/llvm/include/llvm/Target/TargetOptions.h
@@ -155,6 +155,10 @@ namespace llvm {
/// automatically realigned, if needed.
unsigned RealignStack : 1;
+ /// SSPBufferSize - The minimum size of buffers that will receive stack
+ /// smashing protection when -fstack-protection is used.
+ unsigned SSPBufferSize;
+
/// EnableFastISel - This flag enables fast-path instruction selection
/// which trades away generated code quality in favor of reducing
/// compile time.
diff --git a/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h b/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
index df4d900..afa2ee2 100644
--- a/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
@@ -221,13 +221,17 @@ public:
private:
const TargetRegisterInfoDesc *InfoDesc; // Extra desc array for codegen
const char *const *SubRegIndexNames; // Names of subreg indexes.
+ // Pointer to array of lane masks, one per sub-reg index.
+ const unsigned *SubRegIndexLaneMasks;
+
regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses
protected:
TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
regclass_iterator RegClassBegin,
regclass_iterator RegClassEnd,
- const char *const *subregindexnames);
+ const char *const *SRINames,
+ const unsigned *SRILaneMasks);
virtual ~TargetRegisterInfo();
public:
@@ -327,10 +331,36 @@ public:
/// getSubRegIndexName - Return the human-readable symbolic target-specific
/// name for the specified SubRegIndex.
const char *getSubRegIndexName(unsigned SubIdx) const {
- assert(SubIdx && "This is not a subregister index");
+ assert(SubIdx && SubIdx < getNumSubRegIndices() &&
+ "This is not a subregister index");
return SubRegIndexNames[SubIdx-1];
}
+ /// getSubRegIndexLaneMask - Return a bitmask representing the parts of a
+ /// register that are covered by SubIdx.
+ ///
+ /// Lane masks for sub-register indices are similar to register units for
+ /// physical registers. The individual bits in a lane mask can't be assigned
+ /// any specific meaning. They can be used to check if two sub-register
+ /// indices overlap.
+ ///
+ /// If the target has a register such that:
+ ///
+ /// getSubReg(Reg, A) overlaps getSubReg(Reg, B)
+ ///
+ /// then:
+ ///
+ /// getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B) != 0
+ ///
+ /// The converse is not necessarily true. If two lane masks have a common
+ /// bit, the corresponding sub-registers may not overlap, but it can be
+ /// assumed that they usually will.
+ unsigned getSubRegIndexLaneMask(unsigned SubIdx) const {
+ // SubIdx == 0 is allowed, it has the lane mask ~0u.
+ assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
+ return SubRegIndexLaneMasks[SubIdx];
+ }
+
/// regsOverlap - Returns true if the two registers are equal or alias each
/// other. The registers may be virtual register.
bool regsOverlap(unsigned regA, unsigned regB) const {
@@ -416,18 +446,6 @@ public:
return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC);
}
- /// canCombineSubRegIndices - Given a register class and a list of
- /// subregister indices, return true if it's possible to combine the
- /// subregister indices into one that corresponds to a larger
- /// subregister. Return the new subregister index by reference. Note the
- /// new index may be zero if the given subregisters can be combined to
- /// form the whole register.
- virtual bool canCombineSubRegIndices(const TargetRegisterClass *RC,
- SmallVectorImpl<unsigned> &SubIndices,
- unsigned &NewSubIdx) const {
- return 0;
- }
-
/// getMatchingSuperRegClass - Return a subclass of the specified register
/// class A so that each register in it has a sub-register of the
/// specified sub-register index which is in the specified register class B.
@@ -458,6 +476,8 @@ public:
/// composeSubRegIndices - Return the subregister index you get from composing
/// two subregister indices.
///
+ /// The special null sub-register index composes as the identity.
+ ///
/// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
/// returns c. Note that composeSubRegIndices does not tell you about illegal
/// compositions. If R does not have a subreg a, or R:a does not have a subreg
@@ -467,11 +487,19 @@ public:
/// ssub_0:S0 - ssub_3:S3 subregs.
/// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
///
- virtual unsigned composeSubRegIndices(unsigned a, unsigned b) const {
- // This default implementation is correct for most targets.
- return b;
+ unsigned composeSubRegIndices(unsigned a, unsigned b) const {
+ if (!a) return b;
+ if (!b) return a;
+ return composeSubRegIndicesImpl(a, b);
}
+protected:
+ /// Overridden by TableGen in targets that have sub-registers.
+ virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const {
+ llvm_unreachable("Target has no sub-registers");
+ }
+
+public:
/// getCommonSuperRegClass - Find a common super-register class if it exists.
///
/// Find a register class, SuperRC and two sub-register indices, PreA and
diff --git a/contrib/llvm/include/llvm/Target/TargetSchedule.td b/contrib/llvm/include/llvm/Target/TargetSchedule.td
index 4dc488d..0da82fd 100644
--- a/contrib/llvm/include/llvm/Target/TargetSchedule.td
+++ b/contrib/llvm/include/llvm/Target/TargetSchedule.td
@@ -10,25 +10,77 @@
// This file defines the target-independent scheduling interfaces which should
// be implemented by each target which is using TableGen based scheduling.
//
+// The SchedMachineModel is defined by subtargets for three categories of data:
+// 1. Basic properties for coarse grained instruction cost model.
+// 2. Scheduler Read/Write resources for simple per-opcode cost model.
+// 3. Instruction itineraties for detailed reservation tables.
+//
+// (1) Basic properties are defined by the SchedMachineModel
+// class. Target hooks allow subtargets to associate opcodes with
+// those properties.
+//
+// (2) A per-operand machine model can be implemented in any
+// combination of the following ways:
+//
+// A. Associate per-operand SchedReadWrite types with Instructions by
+// modifying the Instruction definition to inherit from Sched. For
+// each subtarget, define WriteRes and ReadAdvance to associate
+// processor resources and latency with each SchedReadWrite type.
+//
+// B. In each instruction definition, name an ItineraryClass. For each
+// subtarget, define ItinRW entries to map ItineraryClass to
+// per-operand SchedReadWrite types. Unlike method A, these types may
+// be subtarget specific and can be directly associated with resources
+// by defining SchedWriteRes and SchedReadAdvance.
+//
+// C. In the subtarget, map SchedReadWrite types to specific
+// opcodes. This overrides any SchedReadWrite types or
+// ItineraryClasses defined by the Instruction. As in method B, the
+// subtarget can directly associate resources with SchedReadWrite
+// types by defining SchedWriteRes and SchedReadAdvance.
+//
+// D. In either the target or subtarget, define SchedWriteVariant or
+// SchedReadVariant to map one SchedReadWrite type onto another
+// sequence of SchedReadWrite types. This allows dynamic selection of
+// an instruction's machine model via custom C++ code. It also allows
+// a machine-independent SchedReadWrite type to map to a sequence of
+// machine-dependent types.
+//
+// (3) A per-pipeline-stage machine model can be implemented by providing
+// Itineraries in addition to mapping instructions to ItineraryClasses.
//===----------------------------------------------------------------------===//
+// Include legacy support for instruction itineraries.
include "llvm/Target/TargetItinerary.td"
-// The SchedMachineModel is defined by subtargets for three categories of data:
-// 1) Basic properties for coarse grained instruction cost model.
-// 2) Scheduler Read/Write resources for simple per-opcode cost model.
-// 3) Instruction itineraties for detailed reservation tables.
+class Instruction; // Forward def
+
+// DAG operator that interprets the DAG args as Instruction defs.
+def instrs;
+
+// DAG operator that interprets each DAG arg as a regex pattern for
+// matching Instruction opcode names.
+// The regex must match the beginning of the opcode (as in Python re.match).
+// To avoid matching prefixes, append '$' to the pattern.
+def instregex;
+
+// Define the SchedMachineModel and provide basic properties for
+// coarse grained instruction cost model. Default values for the
+// properties are defined in MCSchedModel. A value of "-1" in the
+// target description's SchedMachineModel indicates that the property
+// is not overriden by the target.
//
-// Default values for basic properties are defined in MCSchedModel. "-1"
-// indicates that the property is not overriden by the target description.
+// Target hooks allow subtargets to associate LoadLatency and
+// HighLatency with groups of opcodes.
class SchedMachineModel {
- int IssueWidth = -1; // Max instructions that may be scheduled per cycle.
+ int IssueWidth = -1; // Max micro-ops that may be scheduled per cycle.
int MinLatency = -1; // Determines which instrucions are allowed in a group.
// (-1) inorder (0) ooo, (1): inorder +var latencies.
int LoadLatency = -1; // Cycles for loads to access the cache.
int HighLatency = -1; // Approximation of cycles for "high latency" ops.
int MispredictPenalty = -1; // Extra cycles for a mispredicted branch.
+ // Per-cycle resources tables.
ProcessorItineraries Itineraries = NoItineraries;
bit NoModel = 0; // Special tag to indicate missing machine model.
@@ -38,4 +90,276 @@ def NoSchedModel : SchedMachineModel {
let NoModel = 1;
}
-// TODO: Define classes for processor and scheduler resources.
+// Define a kind of processor resource that may be common across
+// similar subtargets.
+class ProcResourceKind;
+
+// Define a number of interchangeable processor resources. NumUnits
+// determines the throughput of instructions that require the resource.
+//
+// An optional Super resource may be given to model these resources as
+// a subset of the more general super resources. Using one of these
+// resources implies using one of the super resoruces.
+//
+// ProcResourceUnits normally model a few buffered resources within an
+// out-of-order engine that the compiler attempts to conserve.
+// Buffered resources may be held for multiple clock cycles, but the
+// scheduler does not pin them to a particular clock cycle relative to
+// instruction dispatch. Setting Buffered=0 changes this to an
+// in-order resource. In this case, the scheduler counts down from the
+// cycle that the instruction issues in-order, forcing an interlock
+// with subsequent instructions that require the same resource until
+// the number of ResourceCyles specified in WriteRes expire.
+//
+// SchedModel ties these units to a processor for any stand-alone defs
+// of this class. Instances of subclass ProcResource will be automatically
+// attached to a processor, so SchedModel is not needed.
+class ProcResourceUnits<ProcResourceKind kind, int num> {
+ ProcResourceKind Kind = kind;
+ int NumUnits = num;
+ ProcResourceKind Super = ?;
+ bit Buffered = 1;
+ SchedMachineModel SchedModel = ?;
+}
+
+// EponymousProcResourceKind helps implement ProcResourceUnits by
+// allowing a ProcResourceUnits definition to reference itself. It
+// should not be referenced anywhere else.
+def EponymousProcResourceKind : ProcResourceKind;
+
+// Subtargets typically define processor resource kind and number of
+// units in one place.
+class ProcResource<int num> : ProcResourceKind,
+ ProcResourceUnits<EponymousProcResourceKind, num>;
+
+// A target architecture may define SchedReadWrite types and associate
+// them with instruction operands.
+class SchedReadWrite;
+
+// List the per-operand types that map to the machine model of an
+// instruction. One SchedWrite type must be listed for each explicit
+// def operand in order. Additional SchedWrite types may optionally be
+// listed for implicit def operands. SchedRead types may optionally
+// be listed for use operands in order. The order of defs relative to
+// uses is insignificant. This way, the same SchedReadWrite list may
+// be used for multiple forms of an operation. For example, a
+// two-address instruction could have two tied operands or single
+// operand that both reads and writes a reg. In both cases we have a
+// single SchedWrite and single SchedRead in any order.
+class Sched<list<SchedReadWrite> schedrw> {
+ list<SchedReadWrite> SchedRW = schedrw;
+}
+
+// Define a scheduler resource associated with a def operand.
+class SchedWrite : SchedReadWrite;
+def NoWrite : SchedWrite;
+
+// Define a scheduler resource associated with a use operand.
+class SchedRead : SchedReadWrite;
+
+// Define a SchedWrite that is modeled as a sequence of other
+// SchedWrites with additive latency. This allows a single operand to
+// be mapped the resources composed from a set of previously defined
+// SchedWrites.
+//
+// If the final write in this sequence is a SchedWriteVariant marked
+// Variadic, then the list of prior writes are distributed across all
+// operands after resolving the predicate for the final write.
+//
+// SchedModel silences warnings but is ignored.
+class WriteSequence<list<SchedWrite> writes, int rep = 1> : SchedWrite {
+ list<SchedWrite> Writes = writes;
+ int Repeat = rep;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Define values common to WriteRes and SchedWriteRes.
+//
+// SchedModel ties these resources to a processor.
+class ProcWriteResources<list<ProcResourceKind> resources> {
+ list<ProcResourceKind> ProcResources = resources;
+ list<int> ResourceCycles = [];
+ int Latency = 1;
+ int NumMicroOps = 1;
+ bit BeginGroup = 0;
+ bit EndGroup = 0;
+ // Allow a processor to mark some scheduling classes as unsupported
+ // for stronger verification.
+ bit Unsupported = 0;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Define the resources and latency of a SchedWrite. This will be used
+// directly by targets that have no itinerary classes. In this case,
+// SchedWrite is defined by the target, while WriteResources is
+// defined by the subtarget, and maps the SchedWrite to processor
+// resources.
+//
+// If a target already has itinerary classes, SchedWriteResources can
+// be used instead to define subtarget specific SchedWrites and map
+// them to processor resources in one place. Then ItinRW can map
+// itinerary classes to the subtarget's SchedWrites.
+//
+// ProcResources indicates the set of resources consumed by the write.
+// Optionally, ResourceCycles indicates the number of cycles the
+// resource is consumed. Each ResourceCycles item is paired with the
+// ProcResource item at the same position in its list. Since
+// ResourceCycles are rarely specialized, the list may be
+// incomplete. By default, resources are consumed for a single cycle,
+// regardless of latency, which models a fully pipelined processing
+// unit. A value of 0 for ResourceCycles means that the resource must
+// be available but is not consumed, which is only relevant for
+// unbuffered resources.
+//
+// By default, each SchedWrite takes one micro-op, which is counted
+// against the processor's IssueWidth limit. If an instruction can
+// write multiple registers with a single micro-op, the subtarget
+// should define one of the writes to be zero micro-ops. If a
+// subtarget requires multiple micro-ops to write a single result, it
+// should either override the write's NumMicroOps to be greater than 1
+// or require additional writes. Extra writes can be required either
+// by defining a WriteSequence, or simply listing extra writes in the
+// instruction's list of writers beyond the number of "def"
+// operands. The scheduler assumes that all micro-ops must be
+// dispatched in the same cycle. These micro-ops may be required to
+// begin or end the current dispatch group.
+class WriteRes<SchedWrite write, list<ProcResourceKind> resources>
+ : ProcWriteResources<resources> {
+ SchedWrite WriteType = write;
+}
+
+// Directly name a set of WriteResources defining a new SchedWrite
+// type at the same time. This class is unaware of its SchedModel so
+// must be referenced by InstRW or ItinRW.
+class SchedWriteRes<list<ProcResourceKind> resources> : SchedWrite,
+ ProcWriteResources<resources>;
+
+// Define values common to ReadAdvance and SchedReadAdvance.
+//
+// SchedModel ties these resources to a processor.
+class ProcReadAdvance<int cycles, list<SchedWrite> writes = []> {
+ int Cycles = cycles;
+ list<SchedWrite> ValidWrites = writes;
+ // Allow a processor to mark some scheduling classes as unsupported
+ // for stronger verification.
+ bit Unsupported = 0;
+ SchedMachineModel SchedModel = ?;
+}
+
+// A processor may define a ReadAdvance associated with a SchedRead
+// to reduce latency of a prior write by N cycles. A negative advance
+// effectively increases latency, which may be used for cross-domain
+// stalls.
+//
+// A ReadAdvance may be associated with a list of SchedWrites
+// to implement pipeline bypass. The Writes list may be empty to
+// indicate operands that are always read this number of Cycles later
+// than a normal register read, allowing the read's parent instruction
+// to issue earlier relative to the writer.
+class ReadAdvance<SchedRead read, int cycles, list<SchedWrite> writes = []>
+ : ProcReadAdvance<cycles, writes> {
+ SchedRead ReadType = read;
+}
+
+// Directly associate a new SchedRead type with a delay and optional
+// pipeline bypess. For use with InstRW or ItinRW.
+class SchedReadAdvance<int cycles, list<SchedWrite> writes = []> : SchedRead,
+ ProcReadAdvance<cycles, writes>;
+
+// Define SchedRead defaults. Reads seldom need special treatment.
+def ReadDefault : SchedRead;
+def NoReadAdvance : SchedReadAdvance<0>;
+
+// Define shared code that will be in the same scope as all
+// SchedPredicates. Available variables are:
+// (const MachineInstr *MI, const TargetSchedModel *SchedModel)
+class PredicateProlog<code c> {
+ code Code = c;
+}
+
+// Define a predicate to determine which SchedVariant applies to a
+// particular MachineInstr. The code snippet is used as an
+// if-statement's expression. Available variables are MI, SchedModel,
+// and anything defined in a PredicateProlog.
+//
+// SchedModel silences warnings but is ignored.
+class SchedPredicate<code pred> {
+ SchedMachineModel SchedModel = ?;
+ code Predicate = pred;
+}
+def NoSchedPred : SchedPredicate<[{true}]>;
+
+// Associate a predicate with a list of SchedReadWrites. By default,
+// the selected SchedReadWrites are still associated with a single
+// operand and assumed to execute sequentially with additive
+// latency. However, if the parent SchedWriteVariant or
+// SchedReadVariant is marked "Variadic", then each Selected
+// SchedReadWrite is mapped in place to the instruction's variadic
+// operands. In this case, latency is not additive. If the current Variant
+// is already part of a Sequence, then that entire chain leading up to
+// the Variant is distributed over the variadic operands.
+class SchedVar<SchedPredicate pred, list<SchedReadWrite> selected> {
+ SchedPredicate Predicate = pred;
+ list<SchedReadWrite> Selected = selected;
+}
+
+// SchedModel silences warnings but is ignored.
+class SchedVariant<list<SchedVar> variants> {
+ list<SchedVar> Variants = variants;
+ bit Variadic = 0;
+ SchedMachineModel SchedModel = ?;
+}
+
+// A SchedWriteVariant is a single SchedWrite type that maps to a list
+// of SchedWrite types under the conditions defined by its predicates.
+//
+// A Variadic write is expanded to cover multiple "def" operands. The
+// SchedVariant's Expansion list is then interpreted as one write
+// per-operand instead of the usual sequential writes feeding a single
+// operand.
+class SchedWriteVariant<list<SchedVar> variants> : SchedWrite,
+ SchedVariant<variants> {
+}
+
+// A SchedReadVariant is a single SchedRead type that maps to a list
+// of SchedRead types under the conditions defined by its predicates.
+//
+// A Variadic write is expanded to cover multiple "readsReg" operands as
+// explained above.
+class SchedReadVariant<list<SchedVar> variants> : SchedRead,
+ SchedVariant<variants> {
+}
+
+// Map a set of opcodes to a list of SchedReadWrite types. This allows
+// the subtarget to easily override specific operations.
+//
+// SchedModel ties this opcode mapping to a processor.
+class InstRW<list<SchedReadWrite> rw, dag instrlist> {
+ list<SchedReadWrite> OperandReadWrites = rw;
+ dag Instrs = instrlist;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Map a set of itinerary classes to SchedReadWrite resources. This is
+// used to bootstrap a target (e.g. ARM) when itineraries already
+// exist and changing InstrInfo is undesirable.
+//
+// SchedModel ties this ItineraryClass mapping to a processor.
+class ItinRW<list<SchedReadWrite> rw, list<InstrItinClass> iic> {
+ list<InstrItinClass> MatchedItinClasses = iic;
+ list<SchedReadWrite> OperandReadWrites = rw;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Alias a target-defined SchedReadWrite to a processor specific
+// SchedReadWrite. This allows a subtarget to easily map a
+// SchedReadWrite type onto a WriteSequence, SchedWriteVariant, or
+// SchedReadVariant.
+//
+// SchedModel will usually be provided by surrounding let statement
+// and ties this SchedAlias mapping to a processor.
+class SchedAlias<SchedReadWrite match, SchedReadWrite alias> {
+ SchedReadWrite MatchRW = match;
+ SchedReadWrite AliasRW = alias;
+ SchedMachineModel SchedModel = ?;
+}
diff --git a/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td b/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
index 3f81c06..83bd787 100644
--- a/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -445,9 +445,9 @@ def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", SDTAtomic2,
def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def atomic_store : SDNode<"ISD::ATOMIC_STORE", SDTAtomicStore,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
// and truncst (see below).
diff --git a/contrib/llvm/include/llvm/Target/TargetSelectionDAGInfo.h b/contrib/llvm/include/llvm/Target/TargetSelectionDAGInfo.h
index c9ca722..96793bc 100644
--- a/contrib/llvm/include/llvm/Target/TargetSelectionDAGInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetSelectionDAGInfo.h
@@ -20,7 +20,7 @@
namespace llvm {
-class TargetData;
+class DataLayout;
class TargetMachine;
//===----------------------------------------------------------------------===//
@@ -28,13 +28,13 @@ class TargetMachine;
/// SelectionDAG lowering and instruction selection process.
///
class TargetSelectionDAGInfo {
- TargetSelectionDAGInfo(const TargetSelectionDAGInfo &); // DO NOT IMPLEMENT
- void operator=(const TargetSelectionDAGInfo &); // DO NOT IMPLEMENT
+ TargetSelectionDAGInfo(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
- const TargetData *TD;
+ const DataLayout *TD;
protected:
- const TargetData *getTargetData() const { return TD; }
+ const DataLayout *getDataLayout() const { return TD; }
public:
explicit TargetSelectionDAGInfo(const TargetMachine &TM);
diff --git a/contrib/llvm/include/llvm/Target/TargetSubtargetInfo.h b/contrib/llvm/include/llvm/Target/TargetSubtargetInfo.h
index fc23b2c..6db96d9 100644
--- a/contrib/llvm/include/llvm/Target/TargetSubtargetInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetSubtargetInfo.h
@@ -19,9 +19,11 @@
namespace llvm {
+class MachineInstr;
class SDep;
class SUnit;
class TargetRegisterClass;
+class TargetSchedModel;
template <typename T> class SmallVectorImpl;
//===----------------------------------------------------------------------===//
@@ -31,8 +33,8 @@ template <typename T> class SmallVectorImpl;
/// be exposed through a TargetSubtargetInfo-derived class.
///
class TargetSubtargetInfo : public MCSubtargetInfo {
- TargetSubtargetInfo(const TargetSubtargetInfo&); // DO NOT IMPLEMENT
- void operator=(const TargetSubtargetInfo&); // DO NOT IMPLEMENT
+ TargetSubtargetInfo(const TargetSubtargetInfo&) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetSubtargetInfo&) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses...
TargetSubtargetInfo();
public:
@@ -43,23 +45,26 @@ public:
virtual ~TargetSubtargetInfo();
- /// getSpecialAddressLatency - For targets where it is beneficial to
- /// backschedule instructions that compute addresses, return a value
- /// indicating the number of scheduling cycles of backscheduling that
- /// should be attempted.
- virtual unsigned getSpecialAddressLatency() const { return 0; }
+ /// Resolve a SchedClass at runtime, where SchedClass identifies an
+ /// MCSchedClassDesc with the isVariant property. This may return the ID of
+ /// another variant SchedClass, but repeated invocation must quickly terminate
+ /// in a nonvariant SchedClass.
+ virtual unsigned resolveSchedClass(unsigned SchedClass, const MachineInstr *MI,
+ const TargetSchedModel* SchedModel) const {
+ return 0;
+ }
// enablePostRAScheduler - If the target can benefit from post-regalloc
// scheduling and the specified optimization level meets the requirement
// return true to enable post-register-allocation scheduling. In
// CriticalPathRCs return any register classes that should only be broken
- // if on the critical path.
+ // if on the critical path.
virtual bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
AntiDepBreakMode& Mode,
RegClassVector& CriticalPathRCs) const;
// adjustSchedDependency - Perform target specific adjustments to
// the latency of a schedule dependency.
- virtual void adjustSchedDependency(SUnit *def, SUnit *use,
+ virtual void adjustSchedDependency(SUnit *def, SUnit *use,
SDep& dep) const { }
};
diff --git a/contrib/llvm/include/llvm/Target/TargetTransformImpl.h b/contrib/llvm/include/llvm/Target/TargetTransformImpl.h
new file mode 100644
index 0000000..7ea2396
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetTransformImpl.h
@@ -0,0 +1,98 @@
+//=- llvm/Target/TargetTransformImpl.h - Target Loop Trans Info----*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the target-specific implementations of the
+// TargetTransform interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGET_TRANSFORMATION_IMPL_H
+#define LLVM_TARGET_TARGET_TRANSFORMATION_IMPL_H
+
+#include "llvm/TargetTransformInfo.h"
+#include "llvm/CodeGen/ValueTypes.h"
+
+namespace llvm {
+
+class TargetLowering;
+
+/// ScalarTargetTransformInfo - This is a default implementation for the
+/// ScalarTargetTransformInfo interface. Different targets can implement
+/// this interface differently.
+class ScalarTargetTransformImpl : public ScalarTargetTransformInfo {
+private:
+ const TargetLowering *TLI;
+
+public:
+ /// Ctor
+ explicit ScalarTargetTransformImpl(const TargetLowering *TL) : TLI(TL) {}
+
+ virtual bool isLegalAddImmediate(int64_t imm) const;
+
+ virtual bool isLegalICmpImmediate(int64_t imm) const;
+
+ virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
+
+ virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
+
+ virtual bool isTypeLegal(Type *Ty) const;
+
+ virtual unsigned getJumpBufAlignment() const;
+
+ virtual unsigned getJumpBufSize() const;
+
+ virtual bool shouldBuildLookupTables() const;
+};
+
+class VectorTargetTransformImpl : public VectorTargetTransformInfo {
+protected:
+ const TargetLowering *TLI;
+
+ /// Estimate the cost of type-legalization and the legalized type.
+ std::pair<unsigned, MVT> getTypeLegalizationCost(Type *Ty) const;
+
+ /// Estimate the overhead of scalarizing an instruction. Insert and Extract
+ /// are set if the result needs to be inserted and/or extracted from vectors.
+ unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
+
+ // Get the ISD node that corresponds to the Instruction class opcode.
+ int InstructionOpcodeToISD(unsigned Opcode) const;
+
+public:
+ explicit VectorTargetTransformImpl(const TargetLowering *TL) : TLI(TL) {}
+
+ virtual ~VectorTargetTransformImpl() {}
+
+ virtual unsigned getInstrCost(unsigned Opcode, Type *Ty1, Type *Ty2) const;
+
+ virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const;
+
+ virtual unsigned getBroadcastCost(Type *Tp) const;
+
+ virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
+ Type *Src) const;
+
+ virtual unsigned getCFInstrCost(unsigned Opcode) const;
+
+ virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+ Type *CondTy) const;
+
+ virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) const;
+
+ virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
+ unsigned Alignment,
+ unsigned AddressSpace) const;
+
+ virtual unsigned getNumberOfParts(Type *Tp) const;
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/TargetTransformInfo.h b/contrib/llvm/include/llvm/TargetTransformInfo.h
new file mode 100644
index 0000000..94db490
--- /dev/null
+++ b/contrib/llvm/include/llvm/TargetTransformInfo.h
@@ -0,0 +1,204 @@
+//===- llvm/Transforms/TargetTransformInfo.h --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass exposes codegen information to IR-level passes. Every
+// transformation that uses codegen information is broken into three parts:
+// 1. The IR-level analysis pass.
+// 2. The IR-level transformation interface which provides the needed
+// information.
+// 3. Codegen-level implementation which uses target-specific hooks.
+//
+// This file defines #2, which is the interface that IR-level transformations
+// use for querying the codegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_TARGET_TRANSFORM_INTERFACE
+#define LLVM_TRANSFORMS_TARGET_TRANSFORM_INTERFACE
+
+#include "llvm/Pass.h"
+#include "llvm/AddressingMode.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Type.h"
+
+namespace llvm {
+
+class ScalarTargetTransformInfo;
+class VectorTargetTransformInfo;
+
+/// TargetTransformInfo - This pass provides access to the codegen
+/// interfaces that are needed for IR-level transformations.
+class TargetTransformInfo : public ImmutablePass {
+private:
+ const ScalarTargetTransformInfo *STTI;
+ const VectorTargetTransformInfo *VTTI;
+public:
+ /// Default ctor.
+ ///
+ /// @note This has to exist, because this is a pass, but it should never be
+ /// used.
+ TargetTransformInfo();
+
+ TargetTransformInfo(const ScalarTargetTransformInfo* S,
+ const VectorTargetTransformInfo *V)
+ : ImmutablePass(ID), STTI(S), VTTI(V) {
+ initializeTargetTransformInfoPass(*PassRegistry::getPassRegistry());
+ }
+
+ TargetTransformInfo(const TargetTransformInfo &T) :
+ ImmutablePass(ID), STTI(T.STTI), VTTI(T.VTTI) { }
+
+ const ScalarTargetTransformInfo* getScalarTargetTransformInfo() const {
+ return STTI;
+ }
+ const VectorTargetTransformInfo* getVectorTargetTransformInfo() const {
+ return VTTI;
+ }
+
+ /// Pass identification, replacement for typeid.
+ static char ID;
+};
+
+// ---------------------------------------------------------------------------//
+// The classes below are inherited and implemented by target-specific classes
+// in the codegen.
+// ---------------------------------------------------------------------------//
+
+/// ScalarTargetTransformInfo - This interface is used by IR-level passes
+/// that need target-dependent information for generic scalar transformations.
+/// LSR, and LowerInvoke use this interface.
+class ScalarTargetTransformInfo {
+public:
+ virtual ~ScalarTargetTransformInfo() {}
+
+ /// isLegalAddImmediate - Return true if the specified immediate is legal
+ /// add immediate, that is the target has add instructions which can add
+ /// a register with the immediate without having to materialize the
+ /// immediate into a register.
+ virtual bool isLegalAddImmediate(int64_t) const {
+ return false;
+ }
+ /// isLegalICmpImmediate - Return true if the specified immediate is legal
+ /// icmp immediate, that is the target has icmp instructions which can compare
+ /// a register against the immediate without having to materialize the
+ /// immediate into a register.
+ virtual bool isLegalICmpImmediate(int64_t) const {
+ return false;
+ }
+ /// isLegalAddressingMode - Return true if the addressing mode represented by
+ /// AM is legal for this target, for a load/store of the specified type.
+ /// The type may be VoidTy, in which case only return true if the addressing
+ /// mode is legal for a load/store of any legal type.
+ /// TODO: Handle pre/postinc as well.
+ virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const {
+ return false;
+ }
+ /// isTruncateFree - Return true if it's free to truncate a value of
+ /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
+ /// register EAX to i16 by referencing its sub-register AX.
+ virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const {
+ return false;
+ }
+ /// Is this type legal.
+ virtual bool isTypeLegal(Type *Ty) const {
+ return false;
+ }
+ /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes
+ virtual unsigned getJumpBufAlignment() const {
+ return 0;
+ }
+ /// getJumpBufSize - returns the target's jmp_buf size in bytes.
+ virtual unsigned getJumpBufSize() const {
+ return 0;
+ }
+ /// shouldBuildLookupTables - Return true if switches should be turned into
+ /// lookup tables for the target.
+ virtual bool shouldBuildLookupTables() const {
+ return true;
+ }
+};
+
+/// VectorTargetTransformInfo - This interface is used by the vectorizers
+/// to estimate the profitability of vectorization for different instructions.
+class VectorTargetTransformInfo {
+public:
+ virtual ~VectorTargetTransformInfo() {}
+
+ /// Returns the expected cost of the instruction opcode. The opcode is one of
+ /// the enums like Instruction::Add. The type arguments are the type of the
+ /// operation.
+ /// Most instructions only use the first type and in that case the second
+ /// operand is ignored.
+ ///
+ /// Exceptions:
+ /// * Br instructions do not use any of the types.
+ /// * Select instructions pass the return type as Ty1 and the selector as Ty2.
+ /// * Cast instructions pass the destination as Ty1 and the source as Ty2.
+ /// * Insert/Extract element pass only the vector type as Ty1.
+ /// * ShuffleVector, Load, Store do not use this call.
+ virtual unsigned getInstrCost(unsigned Opcode,
+ Type *Ty1 = 0,
+ Type *Ty2 = 0) const {
+ return 1;
+ }
+
+ /// Returns the expected cost of arithmetic ops, such as mul, xor, fsub, etc.
+ virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const {
+ return 1;
+ }
+
+ /// Returns the cost of a vector broadcast of a scalar at place zero to a
+ /// vector of type 'Tp'.
+ virtual unsigned getBroadcastCost(Type *Tp) const {
+ return 1;
+ }
+
+ /// Returns the expected cost of cast instructions, such as bitcast, trunc,
+ /// zext, etc.
+ virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
+ Type *Src) const {
+ return 1;
+ }
+
+ /// Returns the expected cost of control-flow related instrutctions such as
+ /// Phi, Ret, Br.
+ virtual unsigned getCFInstrCost(unsigned Opcode) const {
+ return 1;
+ }
+
+ /// Returns the expected cost of compare and select instructions.
+ virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+ Type *CondTy = 0) const {
+ return 1;
+ }
+
+ /// Returns the expected cost of vector Insert and Extract.
+ /// Use -1 to indicate that there is no information on the index value.
+ virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index = -1) const {
+ return 1;
+ }
+
+ /// Returns the cost of Load and Store instructions.
+ virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
+ unsigned Alignment,
+ unsigned AddressSpace) const {
+ return 1;
+ }
+
+ /// Returns the number of pieces into which the provided type must be
+ /// split during legalization. Zero is returned when the answer is unknown.
+ virtual unsigned getNumberOfParts(Type *Tp) const {
+ return 0;
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/IPO.h b/contrib/llvm/include/llvm/Transforms/IPO.h
index 18176e8..fc1cd59 100644
--- a/contrib/llvm/include/llvm/Transforms/IPO.h
+++ b/contrib/llvm/include/llvm/Transforms/IPO.h
@@ -104,23 +104,14 @@ Pass *createPruneEHPass();
//===----------------------------------------------------------------------===//
/// createInternalizePass - This pass loops over all of the functions in the
-/// input module, internalizing all globals (functions and variables) not part
-/// of the api. If a list of symbols is specified with the
-/// -internalize-public-api-* command line options, those symbols are not
-/// internalized and all others are. Otherwise if AllButMain is set and the
-/// main function is found, all other globals are marked as internal. If no api
-/// is supplied and AllButMain is not set, or no main function is found, nothing
-/// is internalized.
-///
-ModulePass *createInternalizePass(bool AllButMain);
-
-/// createInternalizePass - This pass loops over all of the functions in the
/// input module, internalizing all globals (functions and variables) not in the
/// given exportList.
///
/// Note that commandline options that are used with the above function are not
-/// used now! Also, when exportList is empty, nothing is internalized.
+/// used now!
ModulePass *createInternalizePass(const std::vector<const char *> &exportList);
+/// createInternalizePass - Same as above, but with an empty exportList.
+ModulePass *createInternalizePass();
//===----------------------------------------------------------------------===//
/// createDeadArgEliminationPass - This pass removes arguments from functions
@@ -192,6 +183,16 @@ ModulePass *createMergeFunctionsPass();
/// createPartialInliningPass - This pass inlines parts of functions.
///
ModulePass *createPartialInliningPass();
+
+//===----------------------------------------------------------------------===//
+// createMetaRenamerPass - Rename everything with metasyntatic names.
+//
+ModulePass *createMetaRenamerPass();
+
+//===----------------------------------------------------------------------===//
+/// createBarrierNoopPass - This pass is purely a module pass barrier in a pass
+/// manager.
+ModulePass *createBarrierNoopPass();
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Transforms/IPO/InlinerPass.h b/contrib/llvm/include/llvm/Transforms/IPO/InlinerPass.h
index 7c3cfc8..b036040 100644
--- a/contrib/llvm/include/llvm/Transforms/IPO/InlinerPass.h
+++ b/contrib/llvm/include/llvm/Transforms/IPO/InlinerPass.h
@@ -21,7 +21,7 @@
namespace llvm {
class CallSite;
- class TargetData;
+ class DataLayout;
class InlineCost;
template<class PtrType, unsigned SmallSize>
class SmallPtrSet;
diff --git a/contrib/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h b/contrib/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
index 47ce902..3ea0a42 100644
--- a/contrib/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
+++ b/contrib/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
@@ -104,6 +104,7 @@ public:
bool DisableUnitAtATime;
bool DisableUnrollLoops;
bool Vectorize;
+ bool LoopVectorize;
private:
/// ExtensionList - This is list of all of the extensions that are registered.
diff --git a/contrib/llvm/include/llvm/Transforms/Instrumentation.h b/contrib/llvm/include/llvm/Transforms/Instrumentation.h
index 4b0c448..8e63aaa 100644
--- a/contrib/llvm/include/llvm/Transforms/Instrumentation.h
+++ b/contrib/llvm/include/llvm/Transforms/Instrumentation.h
@@ -34,7 +34,7 @@ ModulePass *createGCOVProfilerPass(bool EmitNotes = true, bool EmitData = true,
bool UseExtraChecksum = false);
// Insert AddressSanitizer (address sanity checking) instrumentation
-ModulePass *createAddressSanitizerPass();
+FunctionPass *createAddressSanitizerPass();
// Insert ThreadSanitizer (race detection) instrumentation
FunctionPass *createThreadSanitizerPass();
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar.h b/contrib/llvm/include/llvm/Transforms/Scalar.h
index 3dce6fe..a5d8eed 100644
--- a/contrib/llvm/include/llvm/Transforms/Scalar.h
+++ b/contrib/llvm/include/llvm/Transforms/Scalar.h
@@ -70,6 +70,12 @@ FunctionPass *createAggressiveDCEPass();
//===----------------------------------------------------------------------===//
//
+// SROA - Replace aggregates or pieces of aggregates with scalar SSA values.
+//
+FunctionPass *createSROAPass(bool RequiresDomTree = true);
+
+//===----------------------------------------------------------------------===//
+//
// ScalarReplAggregates - Break up alloca's of aggregates into multiple allocas
// if possible.
//
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/AddrModeMatcher.h b/contrib/llvm/include/llvm/Transforms/Utils/AddrModeMatcher.h
index 90485eb..7d67283 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/AddrModeMatcher.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/AddrModeMatcher.h
@@ -19,6 +19,7 @@
#ifndef LLVM_TRANSFORMS_UTILS_ADDRMODEMATCHER_H
#define LLVM_TRANSFORMS_UTILS_ADDRMODEMATCHER_H
+#include "llvm/AddressingMode.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Target/TargetLowering.h"
@@ -33,7 +34,7 @@ class raw_ostream;
/// ExtAddrMode - This is an extended version of TargetLowering::AddrMode
/// which holds actual Value*'s for register values.
-struct ExtAddrMode : public TargetLowering::AddrMode {
+struct ExtAddrMode : public AddrMode {
Value *BaseReg;
Value *ScaledReg;
ExtAddrMode() : BaseReg(0), ScaledReg(0) {}
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 8a939cc..b810f1a 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -25,8 +25,11 @@ namespace llvm {
class AliasAnalysis;
class Instruction;
+class MDNode;
class Pass;
class ReturnInst;
+class TargetLibraryInfo;
+class TerminatorInst;
/// DeleteDeadBlock - Delete the specified block, which must have no
/// predecessors.
@@ -44,7 +47,7 @@ void FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P = 0);
/// a result. This includes tracing the def-use list from the PHI to see if
/// it is ultimately unused or if it reaches an unused cycle. Return true
/// if any PHIs were deleted.
-bool DeleteDeadPHIs(BasicBlock *BB);
+bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = 0);
/// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor,
/// if possible. The return value indicates success or failure.
@@ -202,6 +205,29 @@ void SplitLandingPadPredecessors(BasicBlock *OrigBB,ArrayRef<BasicBlock*> Preds,
ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
BasicBlock *Pred);
+/// SplitBlockAndInsertIfThen - Split the containing block at the
+/// specified instruction - everything before and including Cmp stays
+/// in the old basic block, and everything after Cmp is moved to a
+/// new block. The two blocks are connected by a conditional branch
+/// (with value of Cmp being the condition).
+/// Before:
+/// Head
+/// Cmp
+/// Tail
+/// After:
+/// Head
+/// Cmp
+/// if (Cmp)
+/// ThenBlock
+/// Tail
+///
+/// If Unreachable is true, then ThenBlock ends with
+/// UnreachableInst, otherwise it branches to Tail.
+/// Returns the NewBasicBlock's terminator.
+
+TerminatorInst *SplitBlockAndInsertIfThen(Instruction *Cmp,
+ bool Unreachable, MDNode *BranchWeights = 0);
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h b/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
index a6e41f0..ab9fc47 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -19,7 +19,7 @@
namespace llvm {
class Value;
- class TargetData;
+ class DataLayout;
class TargetLibraryInfo;
/// CastToCStr - Return V if it is an i8*, otherwise cast it to i8*.
@@ -28,52 +28,52 @@ namespace llvm {
/// EmitStrLen - Emit a call to the strlen function to the builder, for the
/// specified pointer. Ptr is required to be some pointer type, and the
/// return value has 'intptr_t' type.
- Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD,
+ Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
/// EmitStrNLen - Emit a call to the strnlen function to the builder, for the
/// specified pointer. Ptr is required to be some pointer type, MaxLen must
/// be of size_t type, and the return value has 'intptr_t' type.
Value *EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI);
+ const DataLayout *TD, const TargetLibraryInfo *TLI);
/// EmitStrChr - Emit a call to the strchr function to the builder, for the
/// specified pointer and character. Ptr is required to be some pointer type,
/// and the return value has 'i8*' type.
- Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const TargetData *TD,
+ Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
/// EmitStrNCmp - Emit a call to the strncmp function to the builder.
Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI);
+ const DataLayout *TD, const TargetLibraryInfo *TLI);
/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments.
Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
StringRef Name = "strcpy");
/// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the
/// specified pointer arguments and length.
Value *EmitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
StringRef Name = "strncpy");
/// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder.
/// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
/// are pointers.
Value *EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
- IRBuilder<> &B, const TargetData *TD,
+ IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
/// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
/// a pointer, Val is an i32 value, and Len is an 'intptr_t' value.
Value *EmitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI);
+ const DataLayout *TD, const TargetLibraryInfo *TLI);
/// EmitMemCmp - Emit a call to the memcmp function.
Value *EmitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI);
+ const DataLayout *TD, const TargetLibraryInfo *TLI);
/// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name'
/// (e.g. 'floor'). This function is known to take a single of type matching
@@ -85,28 +85,28 @@ namespace llvm {
/// EmitPutChar - Emit a call to the putchar function. This assumes that Char
/// is an integer.
- Value *EmitPutChar(Value *Char, IRBuilder<> &B, const TargetData *TD,
+ Value *EmitPutChar(Value *Char, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
/// EmitPutS - Emit a call to the puts function. This assumes that Str is
/// some pointer.
- Value *EmitPutS(Value *Str, IRBuilder<> &B, const TargetData *TD,
+ Value *EmitPutS(Value *Str, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
/// EmitFPutC - Emit a call to the fputc function. This assumes that Char is
/// an i32, and File is a pointer to FILE.
Value *EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI);
+ const DataLayout *TD, const TargetLibraryInfo *TLI);
/// EmitFPutS - Emit a call to the puts function. Str is required to be a
/// pointer and File is a pointer to FILE.
- Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const TargetData *TD,
+ Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
/// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is
/// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
Value *EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI);
+ const DataLayout *TD, const TargetLibraryInfo *TLI);
/// SimplifyFortifiedLibCalls - Helper class for folding checked library
/// calls (e.g. __strcpy_chk) into their unchecked counterparts.
@@ -118,7 +118,7 @@ namespace llvm {
bool isString) const = 0;
public:
virtual ~SimplifyFortifiedLibCalls();
- bool fold(CallInst *CI, const TargetData *TD, const TargetLibraryInfo *TLI);
+ bool fold(CallInst *CI, const DataLayout *TD, const TargetLibraryInfo *TLI);
};
}
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h b/contrib/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h
new file mode 100644
index 0000000..ac8af12
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h
@@ -0,0 +1,33 @@
+//===- llvm/Transforms/Utils/BypassSlowDivision.h --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an optimization for div and rem on architectures that
+// execute short instructions significantly faster than longer instructions.
+// For example, on Intel Atom 32-bit divides are slow enough that during
+// runtime it is profitable to check the value of the operands, and if they are
+// positive and less than 256 use an unsigned 8-bit divide.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
+#define TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
+
+#include "llvm/Function.h"
+
+namespace llvm {
+
+/// This optimization identifies DIV instructions that can be
+/// profitably bypassed and carried out with a shorter, faster divide.
+bool bypassSlowDivision(Function &F,
+ Function::iterator &I,
+ const DenseMap<unsigned int, unsigned int> &BypassWidth);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h b/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
index b7b5d29..1780025 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
@@ -39,7 +39,7 @@ class ReturnInst;
class CallSite;
class Trace;
class CallGraph;
-class TargetData;
+class DataLayout;
class Loop;
class LoopInfo;
class AllocaInst;
@@ -116,13 +116,6 @@ Function *CloneFunction(const Function *F,
bool ModuleLevelChanges,
ClonedCodeInfo *CodeInfo = 0);
-/// CloneFunction - Version of the function that doesn't need the VMap.
-///
-inline Function *CloneFunction(const Function *F, ClonedCodeInfo *CodeInfo = 0){
- ValueToValueMapTy VMap;
- return CloneFunction(F, VMap, CodeInfo);
-}
-
/// Clone OldFunc into NewFunc, transforming the old arguments into references
/// to VMap values. Note that if NewFunc already has basic blocks, the ones
/// cloned into it will be added to the end of the function. This function
@@ -157,7 +150,7 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = 0,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
Instruction *TheCall = 0);
@@ -165,13 +158,13 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
/// InlineFunction call, and records the auxiliary results produced by it.
class InlineFunctionInfo {
public:
- explicit InlineFunctionInfo(CallGraph *cg = 0, const TargetData *td = 0)
+ explicit InlineFunctionInfo(CallGraph *cg = 0, const DataLayout *td = 0)
: CG(cg), TD(td) {}
/// CG - If non-null, InlineFunction will update the callgraph to reflect the
/// changes it makes.
CallGraph *CG;
- const TargetData *TD;
+ const DataLayout *TD;
/// StaticAllocas - InlineFunction fills this in with all static allocas that
/// get copied into the caller.
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/IntegerDivision.h b/contrib/llvm/include/llvm/Transforms/Utils/IntegerDivision.h
new file mode 100644
index 0000000..cecc807
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/IntegerDivision.h
@@ -0,0 +1,48 @@
+//===- llvm/Transforms/Utils/IntegerDivision.h ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an implementation of 32bit integer division for targets
+// that don't have native support. It's largely derived from compiler-rt's
+// implementation of __udivsi3, but hand-tuned for targets that prefer less
+// control flow.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TRANSFORMS_UTILS_INTEGERDIVISION_H
+#define TRANSFORMS_UTILS_INTEGERDIVISION_H
+
+namespace llvm {
+ class BinaryOperator;
+}
+
+namespace llvm {
+
+ /// Generate code to calculate the remainder of two integers, replacing Rem
+ /// with the generated code. This currently generates code using the udiv
+ /// expansion, but future work includes generating more specialized code,
+ /// e.g. when more information about the operands are known. Currently only
+ /// implements 32bit scalar division (due to udiv's limitation), but future
+ /// work is removing this limitation.
+ ///
+ /// @brief Replace Rem with generated code.
+ bool expandRemainder(BinaryOperator *Rem);
+
+ /// Generate code to divide two integers, replacing Div with the generated
+ /// code. This currently generates code similarly to compiler-rt's
+ /// implementations, but future work includes generating more specialized code
+ /// when more information about the operands are known. Currently only
+ /// implements 32bit scalar division, but future work is removing this
+ /// limitation.
+ ///
+ /// @brief Replace Div with generated code.
+ bool expandDivision(BinaryOperator* Div);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/Local.h b/contrib/llvm/include/llvm/Transforms/Utils/Local.h
index 495eab7..be3029e 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/Local.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/Local.h
@@ -18,7 +18,7 @@
#include "llvm/IRBuilder.h"
#include "llvm/Operator.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
namespace llvm {
@@ -35,7 +35,9 @@ class Pass;
class PHINode;
class AllocaInst;
class ConstantExpr;
-class TargetData;
+class DataLayout;
+class TargetLibraryInfo;
+class TargetTransformInfo;
class DIBuilder;
template<typename T> class SmallVectorImpl;
@@ -51,7 +53,8 @@ template<typename T> class SmallVectorImpl;
/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
/// conditions and indirectbr addresses this might make dead if
/// DeleteDeadConditions is true.
-bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false);
+bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
+ const TargetLibraryInfo *TLI = 0);
//===----------------------------------------------------------------------===//
// Local dead code elimination.
@@ -60,20 +63,21 @@ bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false);
/// isInstructionTriviallyDead - Return true if the result produced by the
/// instruction is not used, and the instruction has no side effects.
///
-bool isInstructionTriviallyDead(Instruction *I);
+bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=0);
/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
/// trivially dead instruction, delete it. If that makes any of its operands
/// trivially dead, delete them too, recursively. Return true if any
/// instructions were deleted.
-bool RecursivelyDeleteTriviallyDeadInstructions(Value *V);
+bool RecursivelyDeleteTriviallyDeadInstructions(Value *V,
+ const TargetLibraryInfo *TLI=0);
/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
/// dead PHI node, due to being a def-use chain of single-use nodes that
/// either forms a cycle or is terminated by a trivially dead instruction,
/// delete it. If that makes any of its operands trivially dead, delete them
/// too, recursively. Return true if a change was made.
-bool RecursivelyDeleteDeadPHINode(PHINode *PN);
+bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=0);
/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
@@ -81,7 +85,8 @@ bool RecursivelyDeleteDeadPHINode(PHINode *PN);
///
/// This returns true if it changed the code, note that it can delete
/// instructions in other blocks as well in this block.
-bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD = 0);
+bool SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD = 0,
+ const TargetLibraryInfo *TLI = 0);
//===----------------------------------------------------------------------===//
// Control Flow Graph Restructuring.
@@ -99,7 +104,7 @@ bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD = 0);
/// .. and delete the predecessor corresponding to the '1', this will attempt to
/// recursively fold the 'and' to 0.
void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
- TargetData *TD = 0);
+ DataLayout *TD = 0);
/// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its
@@ -130,7 +135,8 @@ bool EliminateDuplicatePHINodes(BasicBlock *BB);
/// of the CFG. It returns true if a modification was made, possibly deleting
/// the basic block that was pointed to.
///
-bool SimplifyCFG(BasicBlock *BB, const TargetData *TD = 0);
+bool SimplifyCFG(BasicBlock *BB, const DataLayout *TD = 0,
+ const TargetTransformInfo *TTI = 0);
/// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch,
/// and if a predecessor branches to us and one of our successors, fold the
@@ -158,10 +164,10 @@ AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = 0);
/// and it is more than the alignment of the ultimate object, see if we can
/// increase the alignment of the ultimate object, making this check succeed.
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
- const TargetData *TD = 0);
+ const DataLayout *TD = 0);
/// getKnownAlignment - Try to infer an alignment for the specified pointer.
-static inline unsigned getKnownAlignment(Value *V, const TargetData *TD = 0) {
+static inline unsigned getKnownAlignment(Value *V, const DataLayout *TD = 0) {
return getOrEnforceKnownAlignment(V, 0, TD);
}
@@ -171,7 +177,7 @@ static inline unsigned getKnownAlignment(Value *V, const TargetData *TD = 0) {
/// When NoAssumptions is true, no assumptions about index computation not
/// overflowing is made.
template<typename IRBuilderTy>
-Value *EmitGEPOffset(IRBuilderTy *Builder, const TargetData &TD, User *GEP,
+Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
bool NoAssumptions = false) {
gep_type_iterator GTI = gep_type_begin(GEP);
Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h b/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
index 4c82149..db65a47 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
@@ -109,8 +109,8 @@ public:
private:
Value *GetValueAtEndOfBlockInternal(BasicBlock *BB);
- void operator=(const SSAUpdater&); // DO NOT IMPLEMENT
- SSAUpdater(const SSAUpdater&); // DO NOT IMPLEMENT
+ void operator=(const SSAUpdater&) LLVM_DELETED_FUNCTION;
+ SSAUpdater(const SSAUpdater&) LLVM_DELETED_FUNCTION;
};
/// LoadAndStorePromoter - This little helper class provides a convenient way to
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h b/contrib/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h
index 2632d18..7e97e21 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h
@@ -21,8 +21,6 @@
namespace llvm {
-extern cl::opt<bool> DisableIVRewrite;
-
class CastInst;
class IVUsers;
class Loop;
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/contrib/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h
new file mode 100644
index 0000000..fde452b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h
@@ -0,0 +1,52 @@
+//===- SimplifyLibCalls.h - Library call simplifier -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to build some C language libcalls for
+// optimization passes that need to call the various functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H
+#define LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H
+
+namespace llvm {
+ class Value;
+ class CallInst;
+ class DataLayout;
+ class Instruction;
+ class TargetLibraryInfo;
+ class LibCallSimplifierImpl;
+
+ /// LibCallSimplifier - This class implements a collection of optimizations
+ /// that replace well formed calls to library functions with a more optimal
+ /// form. For example, replacing 'printf("Hello!")' with 'puts("Hello!")'.
+ class LibCallSimplifier {
+ /// Impl - A pointer to the actual implementation of the library call
+ /// simplifier.
+ LibCallSimplifierImpl *Impl;
+ public:
+ LibCallSimplifier(const DataLayout *TD, const TargetLibraryInfo *TLI);
+ virtual ~LibCallSimplifier();
+
+ /// optimizeCall - Take the given call instruction and return a more
+ /// optimal value to replace the instruction with or 0 if a more
+ /// optimal form can't be found. Note that the returned value may
+ /// be equal to the instruction being optimized. In this case all
+ /// other instructions that use the given instruction were modified
+ /// and the given instruction is dead.
+ Value *optimizeCall(CallInst *CI);
+
+ /// replaceAllUsesWith - This method is used when the library call
+ /// simplifier needs to replace instructions other than the library
+ /// call being modified.
+ virtual void replaceAllUsesWith(Instruction *I, Value *With) const;
+ };
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h b/contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h
index 8594707..5390c5e 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h
@@ -25,7 +25,7 @@ namespace llvm {
/// ValueMapTypeRemapper - This is a class that can be implemented by clients
/// to remap types when cloning constants and instructions.
class ValueMapTypeRemapper {
- virtual void Anchor(); // Out of line method.
+ virtual void anchor(); // Out of line method.
public:
virtual ~ValueMapTypeRemapper() {}
diff --git a/contrib/llvm/include/llvm/Transforms/Vectorize.h b/contrib/llvm/include/llvm/Transforms/Vectorize.h
index 1e49a9c..41e53a8 100644
--- a/contrib/llvm/include/llvm/Transforms/Vectorize.h
+++ b/contrib/llvm/include/llvm/Transforms/Vectorize.h
@@ -107,6 +107,12 @@ BasicBlockPass *
createBBVectorizePass(const VectorizeConfig &C = VectorizeConfig());
//===----------------------------------------------------------------------===//
+//
+// LoopVectorize - Create a loop vectorization pass.
+//
+Pass * createLoopVectorizePass();
+
+//===----------------------------------------------------------------------===//
/// @brief Vectorize the BasicBlock.
///
/// @param BB The BasicBlock to be vectorized
diff --git a/contrib/llvm/include/llvm/Type.h b/contrib/llvm/include/llvm/Type.h
index 185258d..def4575 100644
--- a/contrib/llvm/include/llvm/Type.h
+++ b/contrib/llvm/include/llvm/Type.h
@@ -153,7 +153,7 @@ public:
/// isPPC_FP128Ty - Return true if this is powerpc long double.
bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; }
- /// isFloatingPointTy - Return true if this is one of the five floating point
+ /// isFloatingPointTy - Return true if this is one of the six floating point
/// types
bool isFloatingPointTy() const {
return getTypeID() == HalfTyID || getTypeID() == FloatTyID ||
@@ -167,7 +167,7 @@ public:
/// isFPOrFPVectorTy - Return true if this is a FP type or a vector of FP.
///
- bool isFPOrFPVectorTy() const;
+ bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); }
/// isLabelTy - Return true if this is 'label'.
bool isLabelTy() const { return getTypeID() == LabelTyID; }
@@ -185,7 +185,7 @@ public:
/// isIntOrIntVectorTy - Return true if this is an integer type or a vector of
/// integer types.
///
- bool isIntOrIntVectorTy() const;
+ bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); }
/// isFunctionTy - True if this is an instance of FunctionType.
///
@@ -203,6 +203,11 @@ public:
///
bool isPointerTy() const { return getTypeID() == PointerTyID; }
+ /// isPtrOrPtrVectorTy - Return true if this is a pointer type or a vector of
+ /// pointer types.
+ ///
+ bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); }
+
/// isVectorTy - True if this is an instance of VectorType.
///
bool isVectorTy() const { return getTypeID() == VectorTyID; }
@@ -252,7 +257,7 @@ public:
/// isSized - Return true if it makes sense to take the size of this type. To
/// get the actual size for a particular target, it is reasonable to use the
- /// TargetData subsystem to do this.
+ /// DataLayout subsystem to do this.
///
bool isSized() const {
// If it's a primitive, it is always sized.
@@ -276,7 +281,7 @@ public:
///
/// Note that this may not reflect the size of memory allocated for an
/// instance of the type or the number of bytes that are written when an
- /// instance of the type is stored to memory. The TargetData class provides
+ /// instance of the type is stored to memory. The DataLayout class provides
/// additional query functions to provide this information.
///
unsigned getPrimitiveSizeInBits() const;
@@ -293,6 +298,7 @@ public:
/// getScalarType - If this is a vector type, return the element type,
/// otherwise return 'this'.
+ const Type *getScalarType() const;
Type *getScalarType();
//===--------------------------------------------------------------------===//
@@ -340,8 +346,10 @@ public:
unsigned getVectorNumElements() const;
Type *getVectorElementType() const { return getSequentialElementType(); }
- unsigned getPointerAddressSpace() const;
Type *getPointerElementType() const { return getSequentialElementType(); }
+
+ /// \brief Get the address space of this pointer or pointer vector type.
+ unsigned getPointerAddressSpace() const;
//===--------------------------------------------------------------------===//
// Static members exported by the Type class itself. Useful for getting
@@ -389,9 +397,6 @@ public:
static PointerType *getInt32PtrTy(LLVMContext &C, unsigned AS = 0);
static PointerType *getInt64PtrTy(LLVMContext &C, unsigned AS = 0);
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const Type *) { return true; }
-
/// getPointerTo - Return a pointer to the current type. This is equivalent
/// to PointerType::get(Foo, AddrSpace).
PointerType *getPointerTo(unsigned AddrSpace = 0);
diff --git a/contrib/llvm/include/llvm/Use.h b/contrib/llvm/include/llvm/Use.h
index a496325..8080445 100644
--- a/contrib/llvm/include/llvm/Use.h
+++ b/contrib/llvm/include/llvm/Use.h
@@ -26,6 +26,7 @@
#define LLVM_USE_H
#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/Compiler.h"
#include <cstddef>
#include <iterator>
@@ -66,7 +67,7 @@ public:
private:
/// Copy ctor - do not implement
- Use(const Use &U);
+ Use(const Use &U) LLVM_DELETED_FUNCTION;
/// Destructor - Only for zap()
~Use() {
diff --git a/contrib/llvm/include/llvm/User.h b/contrib/llvm/include/llvm/User.h
index 5d5460c..df303d0 100644
--- a/contrib/llvm/include/llvm/User.h
+++ b/contrib/llvm/include/llvm/User.h
@@ -31,8 +31,8 @@ template <class>
struct OperandTraits;
class User : public Value {
- User(const User &); // Do not implement
- void *operator new(size_t); // Do not implement
+ User(const User &) LLVM_DELETED_FUNCTION;
+ void *operator new(size_t) LLVM_DELETED_FUNCTION;
template <unsigned>
friend struct HungoffOperandTraits;
virtual void anchor();
@@ -104,7 +104,7 @@ public:
assert(i < NumOperands && "getOperandUse() out of range!");
return OperandList[i];
}
-
+
unsigned getNumOperands() const { return NumOperands; }
// ---------------------------------------------------------------------------
@@ -118,6 +118,45 @@ public:
inline op_iterator op_end() { return OperandList+NumOperands; }
inline const_op_iterator op_end() const { return OperandList+NumOperands; }
+ /// Convenience iterator for directly iterating over the Values in the
+ /// OperandList
+ class value_op_iterator : public std::iterator<std::forward_iterator_tag,
+ Value*> {
+ op_iterator OI;
+ public:
+ explicit value_op_iterator(Use *U) : OI(U) {}
+
+ bool operator==(const value_op_iterator &x) const {
+ return OI == x.OI;
+ }
+ bool operator!=(const value_op_iterator &x) const {
+ return !operator==(x);
+ }
+
+ /// Iterator traversal: forward iteration only
+ value_op_iterator &operator++() { // Preincrement
+ ++OI;
+ return *this;
+ }
+ value_op_iterator operator++(int) { // Postincrement
+ value_op_iterator tmp = *this; ++*this; return tmp;
+ }
+
+ /// Retrieve a pointer to the current Value.
+ Value *operator*() const {
+ return *OI;
+ }
+
+ Value *operator->() const { return operator*(); }
+ };
+
+ inline value_op_iterator value_op_begin() {
+ return value_op_iterator(op_begin());
+ }
+ inline value_op_iterator value_op_end() {
+ return value_op_iterator(op_end());
+ }
+
// dropAllReferences() - This function is in charge of "letting go" of all
// objects that this User refers to. This allows one to
// 'delete' a whole class at a time, even though there may be circular
@@ -137,7 +176,6 @@ public:
void replaceUsesOfWith(Value *From, Value *To);
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const User *) { return true; }
static inline bool classof(const Value *V) {
return isa<Instruction>(V) || isa<Constant>(V);
}
diff --git a/contrib/llvm/include/llvm/Value.h b/contrib/llvm/include/llvm/Value.h
index a82ac45..5b19435 100644
--- a/contrib/llvm/include/llvm/Value.h
+++ b/contrib/llvm/include/llvm/Value.h
@@ -16,6 +16,7 @@
#include "llvm/Use.h"
#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
@@ -80,8 +81,8 @@ private:
friend class ValueHandleBase;
ValueName *Name;
- void operator=(const Value &); // Do not implement
- Value(const Value &); // Do not implement
+ void operator=(const Value &) LLVM_DELETED_FUNCTION;
+ Value(const Value &) LLVM_DELETED_FUNCTION;
protected:
/// printCustom - Value subclasses can override this to implement custom
@@ -120,7 +121,7 @@ public:
/// setName() - Change the name of the value, choosing a new unique name if
/// the provided name is taken.
///
- /// \arg Name - The new name; or "" if the value's name should be removed.
+ /// \param Name The new name; or "" if the value's name should be removed.
void setName(const Twine &Name);
@@ -256,11 +257,6 @@ public:
/// hasValueHandle - Return true if there is a value handle associated with
/// this value.
bool hasValueHandle() const { return HasValueHandle; }
-
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const Value *) {
- return true; // Values are always values.
- }
/// stripPointerCasts - This method strips off any unneeded pointer casts and
/// all-zero GEPs from the specified value, returning the original uncasted
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
index 3b6aab1..752edd5 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -35,7 +35,8 @@
#include "llvm/Instructions.h"
#include "llvm/LLVMContext.h"
#include "llvm/Type.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Target/TargetLibraryInfo.h"
using namespace llvm;
// Register the AliasAnalysis interface, providing a nice name to refer to.
@@ -451,7 +452,8 @@ AliasAnalysis::~AliasAnalysis() {}
/// AliasAnalysis interface before any other methods are called.
///
void AliasAnalysis::InitializeAliasAnalysis(Pass *P) {
- TD = P->getAnalysisIfAvailable<TargetData>();
+ TD = P->getAnalysisIfAvailable<DataLayout>();
+ TLI = P->getAnalysisIfAvailable<TargetLibraryInfo>();
AA = &P->getAnalysis<AliasAnalysis>();
}
@@ -461,7 +463,7 @@ void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>(); // All AA's chain
}
-/// getTypeStoreSize - Return the TargetData store size for the given type,
+/// getTypeStoreSize - Return the DataLayout store size for the given type,
/// if known, or a conservative value otherwise.
///
uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
@@ -501,7 +503,7 @@ bool AliasAnalysis::canInstructionRangeModify(const Instruction &I1,
bool llvm::isNoAliasCall(const Value *V) {
if (isa<CallInst>(V) || isa<InvokeInst>(V))
return ImmutableCallSite(cast<Instruction>(V))
- .paramHasAttr(0, Attribute::NoAlias);
+ .paramHasAttr(0, Attributes::NoAlias);
return false;
}
diff --git a/contrib/llvm/lib/Analysis/AliasSetTracker.cpp b/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
index 92e8906..388c755 100644
--- a/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
+++ b/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
@@ -18,7 +18,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/Pass.h"
#include "llvm/Type.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -550,7 +550,7 @@ void AliasSetTracker::copyValue(Value *From, Value *To) {
//===----------------------------------------------------------------------===//
void AliasSet::print(raw_ostream &OS) const {
- OS << " AliasSet[" << (void*)this << ", " << RefCount << "] ";
+ OS << " AliasSet[" << (const void*)this << ", " << RefCount << "] ";
OS << (AliasTy == MustAlias ? "must" : "may") << " alias, ";
switch (AccessTy) {
case NoModRef: OS << "No access "; break;
@@ -590,8 +590,10 @@ void AliasSetTracker::print(raw_ostream &OS) const {
OS << "\n";
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void AliasSet::dump() const { print(dbgs()); }
void AliasSetTracker::dump() const { print(dbgs()); }
+#endif
//===----------------------------------------------------------------------===//
// ASTCallbackVH Class Implementation
diff --git a/contrib/llvm/lib/Analysis/Analysis.cpp b/contrib/llvm/lib/Analysis/Analysis.cpp
index 0ba6af9..9dc81a6 100644
--- a/contrib/llvm/lib/Analysis/Analysis.cpp
+++ b/contrib/llvm/lib/Analysis/Analysis.cpp
@@ -26,11 +26,13 @@ void llvm::initializeAnalysis(PassRegistry &Registry) {
initializeBasicAliasAnalysisPass(Registry);
initializeBlockFrequencyInfoPass(Registry);
initializeBranchProbabilityInfoPass(Registry);
+ initializeCostModelAnalysisPass(Registry);
initializeCFGViewerPass(Registry);
initializeCFGPrinterPass(Registry);
initializeCFGOnlyViewerPass(Registry);
initializeCFGOnlyPrinterPass(Registry);
initializePrintDbgInfoPass(Registry);
+ initializeDependenceAnalysisPass(Registry);
initializeDominanceFrontierPass(Registry);
initializeDomViewerPass(Registry);
initializeDomPrinterPass(Registry);
@@ -46,7 +48,6 @@ void llvm::initializeAnalysis(PassRegistry &Registry) {
initializeLazyValueInfoPass(Registry);
initializeLibCallAliasAnalysisPass(Registry);
initializeLintPass(Registry);
- initializeLoopDependenceAnalysisPass(Registry);
initializeLoopInfoPass(Registry);
initializeMemDepPrinterPass(Registry);
initializeMemoryDependenceAnalysisPass(Registry);
@@ -61,6 +62,7 @@ void llvm::initializeAnalysis(PassRegistry &Registry) {
initializePathProfileLoaderPassPass(Registry);
initializeProfileVerifierPassPass(Registry);
initializePathProfileVerifierPass(Registry);
+ initializeProfileMetadataLoaderPassPass(Registry);
initializeRegionInfoPass(Registry);
initializeRegionViewerPass(Registry);
initializeRegionPrinterPass(Registry);
diff --git a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 1d028c2..4bb93ee 100644
--- a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -29,7 +29,7 @@
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -58,12 +58,12 @@ static bool isNonEscapingLocalObject(const Value *V) {
// then it has not escaped before entering the function. Check if it escapes
// inside the function.
if (const Argument *A = dyn_cast<Argument>(V))
- if (A->hasByValAttr() || A->hasNoAliasAttr()) {
- // Don't bother analyzing arguments already known not to escape.
- if (A->hasNoCaptureAttr())
- return true;
+ if (A->hasByValAttr() || A->hasNoAliasAttr())
+ // Note even if the argument is marked nocapture we still need to check
+ // for copies made inside the function. The nocapture attribute only
+ // specifies that there are no copies made that outlive the function.
return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
- }
+
return false;
}
@@ -84,10 +84,11 @@ static bool isEscapeSource(const Value *V) {
/// getObjectSize - Return the size of the object specified by V, or
/// UnknownSize if unknown.
-static uint64_t getObjectSize(const Value *V, const TargetData &TD,
+static uint64_t getObjectSize(const Value *V, const DataLayout &TD,
+ const TargetLibraryInfo &TLI,
bool RoundToAlign = false) {
uint64_t Size;
- if (getObjectSize(V, Size, &TD, RoundToAlign))
+ if (getObjectSize(V, Size, &TD, &TLI, RoundToAlign))
return Size;
return AliasAnalysis::UnknownSize;
}
@@ -95,10 +96,11 @@ static uint64_t getObjectSize(const Value *V, const TargetData &TD,
/// isObjectSmallerThan - Return true if we can prove that the object specified
/// by V is smaller than Size.
static bool isObjectSmallerThan(const Value *V, uint64_t Size,
- const TargetData &TD) {
+ const DataLayout &TD,
+ const TargetLibraryInfo &TLI) {
// This function needs to use the aligned object size because we allow
// reads a bit past the end given sufficient alignment.
- uint64_t ObjectSize = getObjectSize(V, TD, /*RoundToAlign*/true);
+ uint64_t ObjectSize = getObjectSize(V, TD, TLI, /*RoundToAlign*/true);
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size;
}
@@ -106,8 +108,8 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
/// isObjectSize - Return true if we can prove that the object specified
/// by V has size Size.
static bool isObjectSize(const Value *V, uint64_t Size,
- const TargetData &TD) {
- uint64_t ObjectSize = getObjectSize(V, TD);
+ const DataLayout &TD, const TargetLibraryInfo &TLI) {
+ uint64_t ObjectSize = getObjectSize(V, TD, TLI);
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size;
}
@@ -126,6 +128,15 @@ namespace {
const Value *V;
ExtensionKind Extension;
int64_t Scale;
+
+ bool operator==(const VariableGEPIndex &Other) const {
+ return V == Other.V && Extension == Other.Extension &&
+ Scale == Other.Scale;
+ }
+
+ bool operator!=(const VariableGEPIndex &Other) const {
+ return !operator==(Other);
+ }
};
}
@@ -140,7 +151,7 @@ namespace {
/// represented in the result.
static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
ExtensionKind &Extension,
- const TargetData &TD, unsigned Depth) {
+ const DataLayout &TD, unsigned Depth) {
assert(V->getType()->isIntegerTy() && "Not an integer value");
// Limit our recursion depth.
@@ -215,14 +226,14 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
/// specified amount, but which may have other unrepresented high bits. As such,
/// the gep cannot necessarily be reconstructed from its decomposed form.
///
-/// When TargetData is around, this function is capable of analyzing everything
+/// When DataLayout is around, this function is capable of analyzing everything
/// that GetUnderlyingObject can look through. When not, it just looks
/// through pointer casts.
///
static const Value *
DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
SmallVectorImpl<VariableGEPIndex> &VarIndices,
- const TargetData *TD) {
+ const DataLayout *TD) {
// Limit recursion depth to limit compile time in crazy cases.
unsigned MaxLookup = 6;
@@ -266,7 +277,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
->getElementType()->isSized())
return V;
- // If we are lacking TargetData information, we can't compute the offets of
+ // If we are lacking DataLayout information, we can't compute the offets of
// elements computed by GEPs. However, we can handle bitcast equivalent
// GEPs.
if (TD == 0) {
@@ -417,13 +428,7 @@ namespace {
/// BasicAliasAnalysis - This is the primary alias analysis implementation.
struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis {
static char ID; // Class identification, replacement for typeinfo
- BasicAliasAnalysis() : ImmutablePass(ID),
- // AliasCache rarely has more than 1 or 2 elements,
- // so start it off fairly small so that clear()
- // doesn't have to tromp through 64 (the default)
- // elements on each alias query. This really wants
- // something like a SmallDenseMap.
- AliasCache(8) {
+ BasicAliasAnalysis() : ImmutablePass(ID) {
initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
@@ -443,7 +448,11 @@ namespace {
"BasicAliasAnalysis doesn't support interprocedural queries.");
AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.TBAATag,
LocB.Ptr, LocB.Size, LocB.TBAATag);
- AliasCache.clear();
+ // AliasCache rarely has more than 1 or 2 elements, always use
+ // shrink_and_clear so it quickly returns to the inline capacity of the
+ // SmallDenseMap if it ever grows larger.
+ // FIXME: This should really be shrink_to_inline_capacity_and_clear().
+ AliasCache.shrink_and_clear();
return Alias;
}
@@ -481,7 +490,7 @@ namespace {
private:
// AliasCache - Track alias queries to guard against recursion.
typedef std::pair<Location, Location> LocPair;
- typedef DenseMap<LocPair, AliasResult> AliasCacheTy;
+ typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy;
AliasCacheTy AliasCache;
// Visited - Track instructions visited by pointsToConstantMemory.
@@ -490,6 +499,7 @@ namespace {
// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP
// instruction against another.
AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size,
+ const MDNode *V1TBAAInfo,
const Value *V2, uint64_t V2Size,
const MDNode *V2TBAAInfo,
const Value *UnderlyingV1, const Value *UnderlyingV2);
@@ -807,6 +817,21 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
return ModRefResult(AliasAnalysis::getModRefInfo(CS, Loc) & Min);
}
+static bool areVarIndicesEqual(SmallVector<VariableGEPIndex, 4> &Indices1,
+ SmallVector<VariableGEPIndex, 4> &Indices2) {
+ unsigned Size1 = Indices1.size();
+ unsigned Size2 = Indices2.size();
+
+ if (Size1 != Size2)
+ return false;
+
+ for (unsigned I = 0; I != Size1; ++I)
+ if (Indices1[I] != Indices2[I])
+ return false;
+
+ return true;
+}
+
/// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
/// against another pointer. We know that V1 is a GEP, but we don't know
/// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, TD),
@@ -814,6 +839,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
///
AliasAnalysis::AliasResult
BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
+ const MDNode *V1TBAAInfo,
const Value *V2, uint64_t V2Size,
const MDNode *V2TBAAInfo,
const Value *UnderlyingV1,
@@ -821,9 +847,41 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
int64_t GEP1BaseOffset;
SmallVector<VariableGEPIndex, 4> GEP1VariableIndices;
- // If we have two gep instructions with must-alias'ing base pointers, figure
- // out if the indexes to the GEP tell us anything about the derived pointer.
+ // If we have two gep instructions with must-alias or not-alias'ing base
+ // pointers, figure out if the indexes to the GEP tell us anything about the
+ // derived pointer.
if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
+ // Check for geps of non-aliasing underlying pointers where the offsets are
+ // identical.
+ if (V1Size == V2Size) {
+ // Do the base pointers alias assuming type and size.
+ AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size,
+ V1TBAAInfo, UnderlyingV2,
+ V2Size, V2TBAAInfo);
+ if (PreciseBaseAlias == NoAlias) {
+ // See if the computed offset from the common pointer tells us about the
+ // relation of the resulting pointer.
+ int64_t GEP2BaseOffset;
+ SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
+ const Value *GEP2BasePtr =
+ DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
+ const Value *GEP1BasePtr =
+ DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
+ // DecomposeGEPExpression and GetUnderlyingObject should return the
+ // same result except when DecomposeGEPExpression has no DataLayout.
+ if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
+ assert(TD == 0 &&
+ "DecomposeGEPExpression and GetUnderlyingObject disagree!");
+ return MayAlias;
+ }
+ // Same offsets.
+ if (GEP1BaseOffset == GEP2BaseOffset &&
+ areVarIndicesEqual(GEP1VariableIndices, GEP2VariableIndices))
+ return NoAlias;
+ GEP1VariableIndices.clear();
+ }
+ }
+
// Do the base pointers alias?
AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, 0,
UnderlyingV2, UnknownSize, 0);
@@ -843,9 +901,8 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
const Value *GEP2BasePtr =
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
- // If DecomposeGEPExpression isn't able to look all the way through the
- // addressing operation, we must not have TD and this is too complex for us
- // to handle without it.
+ // DecomposeGEPExpression and GetUnderlyingObject should return the
+ // same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
assert(TD == 0 &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
@@ -879,9 +936,8 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
const Value *GEP1BasePtr =
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
- // If DecomposeGEPExpression isn't able to look all the way through the
- // addressing operation, we must not have TD and this is too complex for us
- // to handle without it.
+ // DecomposeGEPExpression and GetUnderlyingObject should return the
+ // same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1) {
assert(TD == 0 &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
@@ -1004,12 +1060,42 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize,
// on corresponding edges.
if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
if (PN2->getParent() == PN->getParent()) {
+ LocPair Locs(Location(PN, PNSize, PNTBAAInfo),
+ Location(V2, V2Size, V2TBAAInfo));
+ if (PN > V2)
+ std::swap(Locs.first, Locs.second);
+
AliasResult Alias =
aliasCheck(PN->getIncomingValue(0), PNSize, PNTBAAInfo,
PN2->getIncomingValueForBlock(PN->getIncomingBlock(0)),
V2Size, V2TBAAInfo);
if (Alias == MayAlias)
return MayAlias;
+
+ // If the first source of the PHI nodes NoAlias and the other inputs are
+ // the PHI node itself through some amount of recursion this does not add
+ // any new information so just return NoAlias.
+ // bb:
+ // ptr = ptr2 + 1
+ // loop:
+ // ptr_phi = phi [bb, ptr], [loop, ptr_plus_one]
+ // ptr2_phi = phi [bb, ptr2], [loop, ptr2_plus_one]
+ // ...
+ // ptr_plus_one = gep ptr_phi, 1
+ // ptr2_plus_one = gep ptr2_phi, 1
+ // We assume for the recursion that the the phis (ptr_phi, ptr2_phi) do
+ // not alias each other.
+ bool ArePhisAssumedNoAlias = false;
+ AliasResult OrigAliasResult = NoAlias;
+ if (Alias == NoAlias) {
+ // Pretend the phis do not alias.
+ assert(AliasCache.count(Locs) &&
+ "There must exist an entry for the phi node");
+ OrigAliasResult = AliasCache[Locs];
+ AliasCache[Locs] = NoAlias;
+ ArePhisAssumedNoAlias = true;
+ }
+
for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
AliasResult ThisAlias =
aliasCheck(PN->getIncomingValue(i), PNSize, PNTBAAInfo,
@@ -1019,6 +1105,11 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize,
if (Alias == MayAlias)
break;
}
+
+ // Reset if speculation failed.
+ if (ArePhisAssumedNoAlias && Alias != NoAlias)
+ AliasCache[Locs] = OrigAliasResult;
+
return Alias;
}
@@ -1133,8 +1224,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
// If the size of one access is larger than the entire object on the other
// side, then we know such behavior is undefined and can assume no alias.
if (TD)
- if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD)) ||
- (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD)))
+ if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD, *TLI)) ||
+ (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD, *TLI)))
return NoAlias;
// Check the cache before climbing up use-def chains. This also terminates
@@ -1154,15 +1245,17 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
std::swap(V1, V2);
std::swap(V1Size, V2Size);
std::swap(O1, O2);
+ std::swap(V1TBAAInfo, V2TBAAInfo);
}
if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
- AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, V2TBAAInfo, O1, O2);
+ AliasResult Result = aliasGEP(GV1, V1Size, V1TBAAInfo, V2, V2Size, V2TBAAInfo, O1, O2);
if (Result != MayAlias) return AliasCache[Locs] = Result;
}
if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
std::swap(V1, V2);
std::swap(V1Size, V2Size);
+ std::swap(V1TBAAInfo, V2TBAAInfo);
}
if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
AliasResult Result = aliasPHI(PN, V1Size, V1TBAAInfo,
@@ -1173,6 +1266,7 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
std::swap(V1, V2);
std::swap(V1Size, V2Size);
+ std::swap(V1TBAAInfo, V2TBAAInfo);
}
if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
AliasResult Result = aliasSelect(S1, V1Size, V1TBAAInfo,
@@ -1184,8 +1278,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
// accesses is accessing the entire object, then the accesses must
// overlap in some way.
if (TD && O1 == O2)
- if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD)) ||
- (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD)))
+ if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD, *TLI)) ||
+ (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD, *TLI)))
return AliasCache[Locs] = PartialAlias;
AliasResult Result =
diff --git a/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp b/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
index b255ce6..04a6560 100644
--- a/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
@@ -115,14 +115,14 @@ bool BranchProbabilityInfo::calcUnreachableHeuristics(BasicBlock *BB) {
return false;
}
- SmallPtrSet<BasicBlock *, 4> UnreachableEdges;
- SmallPtrSet<BasicBlock *, 4> ReachableEdges;
+ SmallVector<unsigned, 4> UnreachableEdges;
+ SmallVector<unsigned, 4> ReachableEdges;
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
if (PostDominatedByUnreachable.count(*I))
- UnreachableEdges.insert(*I);
+ UnreachableEdges.push_back(I.getSuccessorIndex());
else
- ReachableEdges.insert(*I);
+ ReachableEdges.push_back(I.getSuccessorIndex());
}
// If all successors are in the set of blocks post-dominated by unreachable,
@@ -136,18 +136,19 @@ bool BranchProbabilityInfo::calcUnreachableHeuristics(BasicBlock *BB) {
return false;
uint32_t UnreachableWeight =
- std::max(UR_TAKEN_WEIGHT / UnreachableEdges.size(), MIN_WEIGHT);
- for (SmallPtrSet<BasicBlock *, 4>::iterator I = UnreachableEdges.begin(),
- E = UnreachableEdges.end();
+ std::max(UR_TAKEN_WEIGHT / (unsigned)UnreachableEdges.size(), MIN_WEIGHT);
+ for (SmallVector<unsigned, 4>::iterator I = UnreachableEdges.begin(),
+ E = UnreachableEdges.end();
I != E; ++I)
setEdgeWeight(BB, *I, UnreachableWeight);
if (ReachableEdges.empty())
return true;
uint32_t ReachableWeight =
- std::max(UR_NONTAKEN_WEIGHT / ReachableEdges.size(), NORMAL_WEIGHT);
- for (SmallPtrSet<BasicBlock *, 4>::iterator I = ReachableEdges.begin(),
- E = ReachableEdges.end();
+ std::max(UR_NONTAKEN_WEIGHT / (unsigned)ReachableEdges.size(),
+ NORMAL_WEIGHT);
+ for (SmallVector<unsigned, 4>::iterator I = ReachableEdges.begin(),
+ E = ReachableEdges.end();
I != E; ++I)
setEdgeWeight(BB, *I, ReachableWeight);
@@ -187,7 +188,7 @@ bool BranchProbabilityInfo::calcMetadataWeights(BasicBlock *BB) {
}
assert(Weights.size() == TI->getNumSuccessors() && "Checked above");
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
- setEdgeWeight(BB, TI->getSuccessor(i), Weights[i]);
+ setEdgeWeight(BB, i, Weights[i]);
return true;
}
@@ -211,19 +212,17 @@ bool BranchProbabilityInfo::calcPointerHeuristics(BasicBlock *BB) {
assert(CI->getOperand(1)->getType()->isPointerTy());
- BasicBlock *Taken = BI->getSuccessor(0);
- BasicBlock *NonTaken = BI->getSuccessor(1);
-
// p != 0 -> isProb = true
// p == 0 -> isProb = false
// p != q -> isProb = true
// p == q -> isProb = false;
+ unsigned TakenIdx = 0, NonTakenIdx = 1;
bool isProb = CI->getPredicate() == ICmpInst::ICMP_NE;
if (!isProb)
- std::swap(Taken, NonTaken);
+ std::swap(TakenIdx, NonTakenIdx);
- setEdgeWeight(BB, Taken, PH_TAKEN_WEIGHT);
- setEdgeWeight(BB, NonTaken, PH_NONTAKEN_WEIGHT);
+ setEdgeWeight(BB, TakenIdx, PH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, NonTakenIdx, PH_NONTAKEN_WEIGHT);
return true;
}
@@ -234,17 +233,17 @@ bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) {
if (!L)
return false;
- SmallPtrSet<BasicBlock *, 8> BackEdges;
- SmallPtrSet<BasicBlock *, 8> ExitingEdges;
- SmallPtrSet<BasicBlock *, 8> InEdges; // Edges from header to the loop.
+ SmallVector<unsigned, 8> BackEdges;
+ SmallVector<unsigned, 8> ExitingEdges;
+ SmallVector<unsigned, 8> InEdges; // Edges from header to the loop.
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
if (!L->contains(*I))
- ExitingEdges.insert(*I);
+ ExitingEdges.push_back(I.getSuccessorIndex());
else if (L->getHeader() == *I)
- BackEdges.insert(*I);
+ BackEdges.push_back(I.getSuccessorIndex());
else
- InEdges.insert(*I);
+ InEdges.push_back(I.getSuccessorIndex());
}
if (uint32_t numBackEdges = BackEdges.size()) {
@@ -252,10 +251,9 @@ bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) {
if (backWeight < NORMAL_WEIGHT)
backWeight = NORMAL_WEIGHT;
- for (SmallPtrSet<BasicBlock *, 8>::iterator EI = BackEdges.begin(),
+ for (SmallVector<unsigned, 8>::iterator EI = BackEdges.begin(),
EE = BackEdges.end(); EI != EE; ++EI) {
- BasicBlock *Back = *EI;
- setEdgeWeight(BB, Back, backWeight);
+ setEdgeWeight(BB, *EI, backWeight);
}
}
@@ -264,10 +262,9 @@ bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) {
if (inWeight < NORMAL_WEIGHT)
inWeight = NORMAL_WEIGHT;
- for (SmallPtrSet<BasicBlock *, 8>::iterator EI = InEdges.begin(),
+ for (SmallVector<unsigned, 8>::iterator EI = InEdges.begin(),
EE = InEdges.end(); EI != EE; ++EI) {
- BasicBlock *Back = *EI;
- setEdgeWeight(BB, Back, inWeight);
+ setEdgeWeight(BB, *EI, inWeight);
}
}
@@ -276,10 +273,9 @@ bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) {
if (exitWeight < MIN_WEIGHT)
exitWeight = MIN_WEIGHT;
- for (SmallPtrSet<BasicBlock *, 8>::iterator EI = ExitingEdges.begin(),
+ for (SmallVector<unsigned, 8>::iterator EI = ExitingEdges.begin(),
EE = ExitingEdges.end(); EI != EE; ++EI) {
- BasicBlock *Exiting = *EI;
- setEdgeWeight(BB, Exiting, exitWeight);
+ setEdgeWeight(BB, *EI, exitWeight);
}
}
@@ -335,14 +331,13 @@ bool BranchProbabilityInfo::calcZeroHeuristics(BasicBlock *BB) {
return false;
}
- BasicBlock *Taken = BI->getSuccessor(0);
- BasicBlock *NonTaken = BI->getSuccessor(1);
+ unsigned TakenIdx = 0, NonTakenIdx = 1;
if (!isProb)
- std::swap(Taken, NonTaken);
+ std::swap(TakenIdx, NonTakenIdx);
- setEdgeWeight(BB, Taken, ZH_TAKEN_WEIGHT);
- setEdgeWeight(BB, NonTaken, ZH_NONTAKEN_WEIGHT);
+ setEdgeWeight(BB, TakenIdx, ZH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, NonTakenIdx, ZH_NONTAKEN_WEIGHT);
return true;
}
@@ -372,14 +367,13 @@ bool BranchProbabilityInfo::calcFloatingPointHeuristics(BasicBlock *BB) {
return false;
}
- BasicBlock *Taken = BI->getSuccessor(0);
- BasicBlock *NonTaken = BI->getSuccessor(1);
+ unsigned TakenIdx = 0, NonTakenIdx = 1;
if (!isProb)
- std::swap(Taken, NonTaken);
+ std::swap(TakenIdx, NonTakenIdx);
- setEdgeWeight(BB, Taken, FPH_TAKEN_WEIGHT);
- setEdgeWeight(BB, NonTaken, FPH_NONTAKEN_WEIGHT);
+ setEdgeWeight(BB, TakenIdx, FPH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, NonTakenIdx, FPH_NONTAKEN_WEIGHT);
return true;
}
@@ -389,11 +383,8 @@ bool BranchProbabilityInfo::calcInvokeHeuristics(BasicBlock *BB) {
if (!II)
return false;
- BasicBlock *Normal = II->getNormalDest();
- BasicBlock *Unwind = II->getUnwindDest();
-
- setEdgeWeight(BB, Normal, IH_TAKEN_WEIGHT);
- setEdgeWeight(BB, Unwind, IH_NONTAKEN_WEIGHT);
+ setEdgeWeight(BB, 0/*Index for Normal*/, IH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, 1/*Index for Unwind*/, IH_NONTAKEN_WEIGHT);
return true;
}
@@ -450,8 +441,7 @@ uint32_t BranchProbabilityInfo::getSumForBlock(const BasicBlock *BB) const {
uint32_t Sum = 0;
for (succ_const_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
- const BasicBlock *Succ = *I;
- uint32_t Weight = getEdgeWeight(BB, Succ);
+ uint32_t Weight = getEdgeWeight(BB, I.getSuccessorIndex());
uint32_t PrevSum = Sum;
Sum += Weight;
@@ -494,11 +484,13 @@ BasicBlock *BranchProbabilityInfo::getHotSucc(BasicBlock *BB) const {
return 0;
}
-// Return edge's weight. If can't find it, return DEFAULT_WEIGHT value.
+/// Get the raw edge weight for the edge. If can't find it, return
+/// DEFAULT_WEIGHT value. Here an edge is specified using PredBlock and an index
+/// to the successors.
uint32_t BranchProbabilityInfo::
-getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const {
- Edge E(Src, Dst);
- DenseMap<Edge, uint32_t>::const_iterator I = Weights.find(E);
+getEdgeWeight(const BasicBlock *Src, unsigned IndexInSuccessors) const {
+ DenseMap<Edge, uint32_t>::const_iterator I =
+ Weights.find(std::make_pair(Src, IndexInSuccessors));
if (I != Weights.end())
return I->second;
@@ -506,15 +498,43 @@ getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const {
return DEFAULT_WEIGHT;
}
+/// Get the raw edge weight calculated for the block pair. This returns the sum
+/// of all raw edge weights from Src to Dst.
+uint32_t BranchProbabilityInfo::
+getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const {
+ uint32_t Weight = 0;
+ DenseMap<Edge, uint32_t>::const_iterator MapI;
+ for (succ_const_iterator I = succ_begin(Src), E = succ_end(Src); I != E; ++I)
+ if (*I == Dst) {
+ MapI = Weights.find(std::make_pair(Src, I.getSuccessorIndex()));
+ if (MapI != Weights.end())
+ Weight += MapI->second;
+ }
+ return (Weight == 0) ? DEFAULT_WEIGHT : Weight;
+}
+
+/// Set the edge weight for a given edge specified by PredBlock and an index
+/// to the successors.
void BranchProbabilityInfo::
-setEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst, uint32_t Weight) {
- Weights[std::make_pair(Src, Dst)] = Weight;
+setEdgeWeight(const BasicBlock *Src, unsigned IndexInSuccessors,
+ uint32_t Weight) {
+ Weights[std::make_pair(Src, IndexInSuccessors)] = Weight;
DEBUG(dbgs() << "set edge " << Src->getName() << " -> "
- << Dst->getName() << " weight to " << Weight
- << (isEdgeHot(Src, Dst) ? " [is HOT now]\n" : "\n"));
+ << IndexInSuccessors << " successor weight to "
+ << Weight << "\n");
}
+/// Get an edge's probability, relative to other out-edges from Src.
+BranchProbability BranchProbabilityInfo::
+getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const {
+ uint32_t N = getEdgeWeight(Src, IndexInSuccessors);
+ uint32_t D = getSumForBlock(Src);
+
+ return BranchProbability(N, D);
+}
+/// Get the probability of going from Src to Dst. It returns the sum of all
+/// probabilities for edges from Src to Dst.
BranchProbability BranchProbabilityInfo::
getEdgeProbability(const BasicBlock *Src, const BasicBlock *Dst) const {
diff --git a/contrib/llvm/lib/Analysis/CaptureTracking.cpp b/contrib/llvm/lib/Analysis/CaptureTracking.cpp
index 974b906..d9c0299 100644
--- a/contrib/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/contrib/llvm/lib/Analysis/CaptureTracking.cpp
@@ -23,6 +23,8 @@ using namespace llvm;
CaptureTracker::~CaptureTracker() {}
+bool CaptureTracker::shouldExplore(Use *U) { return true; }
+
namespace {
struct SimpleCaptureTracker : public CaptureTracker {
explicit SimpleCaptureTracker(bool ReturnCaptures)
@@ -30,8 +32,6 @@ namespace {
void tooManyUses() { Captured = true; }
- bool shouldExplore(Use *U) { return true; }
-
bool captured(Use *U) {
if (isa<ReturnInst>(U->getUser()) && !ReturnCaptures)
return false;
diff --git a/contrib/llvm/lib/Analysis/CodeMetrics.cpp b/contrib/llvm/lib/Analysis/CodeMetrics.cpp
index acda34b..651a54b 100644
--- a/contrib/llvm/lib/Analysis/CodeMetrics.cpp
+++ b/contrib/llvm/lib/Analysis/CodeMetrics.cpp
@@ -15,7 +15,7 @@
#include "llvm/Function.h"
#include "llvm/Support/CallSite.h"
#include "llvm/IntrinsicInst.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
@@ -54,7 +54,7 @@ bool llvm::callIsSmall(ImmutableCallSite CS) {
return false;
}
-bool llvm::isInstructionFree(const Instruction *I, const TargetData *TD) {
+bool llvm::isInstructionFree(const Instruction *I, const DataLayout *TD) {
if (isa<PHINode>(I))
return true;
@@ -119,7 +119,7 @@ bool llvm::isInstructionFree(const Instruction *I, const TargetData *TD) {
/// analyzeBasicBlock - Fill in the current structure with information gleaned
/// from the specified block.
void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
- const TargetData *TD) {
+ const DataLayout *TD) {
++NumBlocks;
unsigned NumInstsBeforeThisBB = NumInsts;
for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
@@ -189,14 +189,14 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
}
-void CodeMetrics::analyzeFunction(Function *F, const TargetData *TD) {
+void CodeMetrics::analyzeFunction(Function *F, const DataLayout *TD) {
// If this function contains a call that "returns twice" (e.g., setjmp or
// _setjmp) and it isn't marked with "returns twice" itself, never inline it.
// This is a hack because we depend on the user marking their local variables
// as volatile if they are live across a setjmp call, and they probably
// won't do this in callers.
exposesReturnsTwice = F->callsFunctionThatReturnsTwice() &&
- !F->hasFnAttr(Attribute::ReturnsTwice);
+ !F->getFnAttributes().hasAttribute(Attributes::ReturnsTwice);
// Look at the size of the callee.
for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
diff --git a/contrib/llvm/lib/Analysis/ConstantFolding.cpp b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
index f5e619c..91a5b84 100644
--- a/contrib/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
@@ -11,7 +11,7 @@
//
// Also, to supplement the basic VMCore ConstantExpr simplifications,
// this file defines some additional folding routines that can make use of
-// TargetData information. These functions cannot go in VMCore due to library
+// DataLayout information. These functions cannot go in VMCore due to library
// dependency issues.
//
//===----------------------------------------------------------------------===//
@@ -25,7 +25,7 @@
#include "llvm/Intrinsics.h"
#include "llvm/Operator.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
@@ -41,11 +41,11 @@ using namespace llvm;
// Constant Folding internal helper functions
//===----------------------------------------------------------------------===//
-/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
-/// TargetData. This always returns a non-null constant, but it may be a
+/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
+/// DataLayout. This always returns a non-null constant, but it may be a
/// ConstantExpr if unfoldable.
static Constant *FoldBitCast(Constant *C, Type *DestTy,
- const TargetData &TD) {
+ const DataLayout &TD) {
// Catch the obvious splat cases.
if (C->isNullValue() && !DestTy->isX86_MMXTy())
return Constant::getNullValue(DestTy);
@@ -59,9 +59,9 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
return ConstantExpr::getBitCast(C, DestTy);
unsigned NumSrcElts = CDV->getType()->getNumElements();
-
+
Type *SrcEltTy = CDV->getType()->getElementType();
-
+
// If the vector is a vector of floating point, convert it to vector of int
// to simplify things.
if (SrcEltTy->isFloatingPointTy()) {
@@ -72,7 +72,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
C = ConstantExpr::getBitCast(C, SrcIVTy);
CDV = cast<ConstantDataVector>(C);
}
-
+
// Now that we know that the input value is a vector of integers, just shift
// and insert them into our result.
unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy);
@@ -84,43 +84,43 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
else
Result |= CDV->getElementAsInteger(i);
}
-
+
return ConstantInt::get(IT, Result);
}
-
+
// The code below only handles casts to vectors currently.
VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
if (DestVTy == 0)
return ConstantExpr::getBitCast(C, DestTy);
-
+
// If this is a scalar -> vector cast, convert the input into a <1 x scalar>
// vector so the code below can handle it uniformly.
if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
Constant *Ops = C; // don't take the address of C!
return FoldBitCast(ConstantVector::get(Ops), DestTy, TD);
}
-
+
// If this is a bitcast from constant vector -> vector, fold it.
if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
return ConstantExpr::getBitCast(C, DestTy);
-
+
// If the element types match, VMCore can fold it.
unsigned NumDstElt = DestVTy->getNumElements();
unsigned NumSrcElt = C->getType()->getVectorNumElements();
if (NumDstElt == NumSrcElt)
return ConstantExpr::getBitCast(C, DestTy);
-
+
Type *SrcEltTy = C->getType()->getVectorElementType();
Type *DstEltTy = DestVTy->getElementType();
-
- // Otherwise, we're changing the number of elements in a vector, which
+
+ // Otherwise, we're changing the number of elements in a vector, which
// requires endianness information to do the right thing. For example,
// bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
// folds to (little endian):
// <4 x i32> <i32 0, i32 0, i32 1, i32 0>
// and to (big endian):
// <4 x i32> <i32 0, i32 0, i32 0, i32 1>
-
+
// First thing is first. We only want to think about integer here, so if
// we have something in FP form, recast it as integer.
if (DstEltTy->isFloatingPointTy()) {
@@ -130,11 +130,11 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
// Recursively handle this integer conversion, if possible.
C = FoldBitCast(C, DestIVTy, TD);
-
+
// Finally, VMCore can handle this now that #elts line up.
return ConstantExpr::getBitCast(C, DestTy);
}
-
+
// Okay, we know the destination is integer, if the input is FP, convert
// it to integer first.
if (SrcEltTy->isFloatingPointTy()) {
@@ -148,13 +148,13 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
!isa<ConstantDataVector>(C))
return C;
}
-
+
// Now we know that the input and output vectors are both integer vectors
// of the same size, and that their #elements is not the same. Do the
// conversion here, which depends on whether the input or output has
// more elements.
bool isLittleEndian = TD.isLittleEndian();
-
+
SmallVector<Constant*, 32> Result;
if (NumDstElt < NumSrcElt) {
// Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
@@ -170,15 +170,15 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
-
+
// Zero extend the element to the right size.
Src = ConstantExpr::getZExt(Src, Elt->getType());
-
+
// Shift it to the right place, depending on endianness.
- Src = ConstantExpr::getShl(Src,
+ Src = ConstantExpr::getShl(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
-
+
// Mix it in.
Elt = ConstantExpr::getOr(Elt, Src);
}
@@ -186,30 +186,30 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
}
return ConstantVector::get(Result);
}
-
+
// Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
unsigned Ratio = NumDstElt/NumSrcElt;
unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
-
+
// Loop over each source value, expanding into multiple results.
for (unsigned i = 0; i != NumSrcElt; ++i) {
Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
-
+
unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
// Shift the piece of the value into the right place, depending on
// endianness.
- Constant *Elt = ConstantExpr::getLShr(Src,
+ Constant *Elt = ConstantExpr::getLShr(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
-
+
// Truncate and remember this piece.
Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
}
}
-
+
return ConstantVector::get(Result);
}
@@ -218,34 +218,34 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
/// from a global, return the global and the constant. Because of
/// constantexprs, this function is recursive.
static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
- int64_t &Offset, const TargetData &TD) {
+ int64_t &Offset, const DataLayout &TD) {
// Trivial case, constant is the global.
if ((GV = dyn_cast<GlobalValue>(C))) {
Offset = 0;
return true;
}
-
+
// Otherwise, if this isn't a constant expr, bail out.
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
if (!CE) return false;
-
+
// Look through ptr->int and ptr->ptr casts.
if (CE->getOpcode() == Instruction::PtrToInt ||
CE->getOpcode() == Instruction::BitCast)
return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD);
-
- // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
+
+ // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
if (CE->getOpcode() == Instruction::GetElementPtr) {
// Cannot compute this if the element type of the pointer is missing size
// info.
if (!cast<PointerType>(CE->getOperand(0)->getType())
->getElementType()->isSized())
return false;
-
+
// If the base isn't a global+constant, we aren't either.
if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD))
return false;
-
+
// Otherwise, add any offset that our operands provide.
gep_type_iterator GTI = gep_type_begin(CE);
for (User::const_op_iterator i = CE->op_begin() + 1, e = CE->op_end();
@@ -253,7 +253,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
ConstantInt *CI = dyn_cast<ConstantInt>(*i);
if (!CI) return false; // Index isn't a simple constant?
if (CI->isZero()) continue; // Not adding anything.
-
+
if (StructType *ST = dyn_cast<StructType>(*GTI)) {
// N = N + Offset
Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue());
@@ -264,7 +264,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
}
return true;
}
-
+
return false;
}
@@ -274,30 +274,33 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
/// the CurPtr buffer. TD is the target data.
static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
unsigned char *CurPtr, unsigned BytesLeft,
- const TargetData &TD) {
+ const DataLayout &TD) {
assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) &&
"Out of range access");
-
+
// If this element is zero or undefined, we can just return since *CurPtr is
// zero initialized.
if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
return true;
-
+
if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
if (CI->getBitWidth() > 64 ||
(CI->getBitWidth() & 7) != 0)
return false;
-
+
uint64_t Val = CI->getZExtValue();
unsigned IntBytes = unsigned(CI->getBitWidth()/8);
-
+
for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
- CurPtr[i] = (unsigned char)(Val >> (ByteOffset * 8));
+ int n = ByteOffset;
+ if (!TD.isLittleEndian())
+ n = IntBytes - n - 1;
+ CurPtr[i] = (unsigned char)(Val >> (n * 8));
++ByteOffset;
}
return true;
}
-
+
if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
if (CFP->getType()->isDoubleTy()) {
C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD);
@@ -309,13 +312,13 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
}
return false;
}
-
+
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
const StructLayout *SL = TD.getStructLayout(CS->getType());
unsigned Index = SL->getElementContainingOffset(ByteOffset);
uint64_t CurEltOffset = SL->getElementOffset(Index);
ByteOffset -= CurEltOffset;
-
+
while (1) {
// If the element access is to the element itself and not to tail padding,
// read the bytes from the element.
@@ -325,9 +328,9 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
!ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
BytesLeft, TD))
return false;
-
+
++Index;
-
+
// Check to see if we read from the last struct element, if so we're done.
if (Index == CS->getType()->getNumElements())
return true;
@@ -375,11 +378,11 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
}
return true;
}
-
+
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->getOpcode() == Instruction::IntToPtr &&
- CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
- return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
+ CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
+ return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
BytesLeft, TD);
}
@@ -388,10 +391,10 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
}
static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
- const TargetData &TD) {
+ const DataLayout &TD) {
Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
-
+
// If this isn't an integer load we can't fold it directly.
if (!IntType) {
// If this is a float/double load, we can try folding it as an int32/64 load
@@ -415,15 +418,15 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
return FoldBitCast(Res, LoadTy, TD);
return 0;
}
-
+
unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
if (BytesLoaded > 32 || BytesLoaded == 0) return 0;
-
+
GlobalValue *GVal;
int64_t Offset;
if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD))
return 0;
-
+
GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal);
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
!GV->getInitializer()->getType()->isSized())
@@ -432,20 +435,29 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
// If we're loading off the beginning of the global, some bytes may be valid,
// but we don't try to handle this.
if (Offset < 0) return 0;
-
+
// If we're not accessing anything in this constant, the result is undefined.
if (uint64_t(Offset) >= TD.getTypeAllocSize(GV->getInitializer()->getType()))
return UndefValue::get(IntType);
-
+
unsigned char RawBytes[32] = {0};
if (!ReadDataFromGlobal(GV->getInitializer(), Offset, RawBytes,
BytesLoaded, TD))
return 0;
- APInt ResultVal = APInt(IntType->getBitWidth(), RawBytes[BytesLoaded-1]);
- for (unsigned i = 1; i != BytesLoaded; ++i) {
- ResultVal <<= 8;
- ResultVal |= RawBytes[BytesLoaded-1-i];
+ APInt ResultVal = APInt(IntType->getBitWidth(), 0);
+ if (TD.isLittleEndian()) {
+ ResultVal = RawBytes[BytesLoaded - 1];
+ for (unsigned i = 1; i != BytesLoaded; ++i) {
+ ResultVal <<= 8;
+ ResultVal |= RawBytes[BytesLoaded-1-i];
+ }
+ } else {
+ ResultVal = RawBytes[0];
+ for (unsigned i = 1; i != BytesLoaded; ++i) {
+ ResultVal <<= 8;
+ ResultVal |= RawBytes[i];
+ }
}
return ConstantInt::get(IntType->getContext(), ResultVal);
@@ -455,7 +467,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
/// produce if it is constant and determinable. If this is not determinable,
/// return null.
Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
- const TargetData *TD) {
+ const DataLayout *TD) {
// First, try the easy cases:
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
if (GV->isConstant() && GV->hasDefinitiveInitializer())
@@ -464,15 +476,15 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
// If the loaded value isn't a constant expr, we can't handle it.
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
if (!CE) return 0;
-
+
if (CE->getOpcode() == Instruction::GetElementPtr) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0)))
if (GV->isConstant() && GV->hasDefinitiveInitializer())
- if (Constant *V =
+ if (Constant *V =
ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
return V;
}
-
+
// Instead of loading constant c string, use corresponding integer value
// directly if string length is small enough.
StringRef Str;
@@ -500,14 +512,14 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
SingleChar = 0;
StrVal = (StrVal << 8) | SingleChar;
}
-
+
Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
if (Ty->isFloatingPointTy())
Res = ConstantExpr::getBitCast(Res, Ty);
return Res;
}
}
-
+
// If this load comes from anywhere in a constant global, and if the global
// is all undef or zero, we know what it loads.
if (GlobalVariable *GV =
@@ -520,18 +532,16 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
return UndefValue::get(ResTy);
}
}
-
- // Try hard to fold loads from bitcasted strange and non-type-safe things. We
- // currently don't do any of this for big endian systems. It can be
- // generalized in the future if someone is interested.
- if (TD && TD->isLittleEndian())
+
+ // Try hard to fold loads from bitcasted strange and non-type-safe things.
+ if (TD)
return FoldReinterpretLoadFromConstPtr(CE, *TD);
return 0;
}
-static Constant *ConstantFoldLoadInst(const LoadInst *LI, const TargetData *TD){
+static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
if (LI->isVolatile()) return 0;
-
+
if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
return ConstantFoldLoadFromConstPtr(C, TD);
@@ -540,23 +550,23 @@ static Constant *ConstantFoldLoadInst(const LoadInst *LI, const TargetData *TD){
/// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
/// Attempt to symbolically evaluate the result of a binary operator merging
-/// these together. If target data info is available, it is provided as TD,
+/// these together. If target data info is available, it is provided as TD,
/// otherwise TD is null.
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
- Constant *Op1, const TargetData *TD){
+ Constant *Op1, const DataLayout *TD){
// SROA
-
+
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
// Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
// bits.
-
-
+
+
// If the constant expr is something like &A[123] - &A[4].f, fold this into a
// constant. This happens frequently when iterating over a global array.
if (Opc == Instruction::Sub && TD) {
GlobalValue *GV1, *GV2;
int64_t Offs1, Offs2;
-
+
if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *TD))
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *TD) &&
GV1 == GV2) {
@@ -564,7 +574,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
return ConstantInt::get(Op0->getType(), Offs1-Offs2);
}
}
-
+
return 0;
}
@@ -572,7 +582,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
/// explicitly cast them so that they aren't implicitly casted by the
/// getelementptr.
static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
- Type *ResultTy, const TargetData *TD,
+ Type *ResultTy, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TD) return 0;
Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
@@ -622,20 +632,20 @@ static Constant* StripPtrCastKeepAS(Constant* Ptr) {
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
/// constant expression, do so.
static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
- Type *ResultTy, const TargetData *TD,
+ Type *ResultTy, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
Constant *Ptr = Ops[0];
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() ||
!Ptr->getType()->isPointerTy())
return 0;
-
+
Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
// If this is a constant expr gep that is effectively computing an
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
if (!isa<ConstantInt>(Ops[i])) {
-
+
// If this is "gep i8* Ptr, (sub 0, V)", fold this as:
// "inttoptr (sub (ptrtoint Ptr), V)"
if (Ops.size() == 2 &&
@@ -659,7 +669,8 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy);
APInt Offset =
APInt(BitWidth, TD->getIndexedOffset(Ptr->getType(),
- makeArrayRef((Value **)Ops.data() + 1,
+ makeArrayRef((Value *const*)
+ Ops.data() + 1,
Ops.size() - 1)));
Ptr = StripPtrCastKeepAS(Ptr);
@@ -708,12 +719,12 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
// The only pointer indexing we'll do is on the first index of the GEP.
if (!NewIdxs.empty())
break;
-
+
// Only handle pointers to sized types, not pointers to functions.
if (!ATy->getElementType()->isSized())
return 0;
}
-
+
// Determine which element of the array the offset points into.
APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
@@ -785,7 +796,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
/// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form.
Constant *llvm::ConstantFoldInstruction(Instruction *I,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI) {
// Handle PHI nodes quickly here...
if (PHINode *PN = dyn_cast<PHINode>(I)) {
@@ -836,7 +847,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
TD, TLI);
-
+
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
return ConstantFoldLoadInst(LI, TD);
@@ -855,10 +866,10 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
}
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
-/// using the specified TargetData. If successful, the constant result is
+/// using the specified DataLayout. If successful, the constant result is
/// result is returned, if not, null is returned.
Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI) {
SmallVector<Constant*, 8> Ops;
for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end();
@@ -886,19 +897,19 @@ Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
/// information, due to only being passed an opcode and operands. Constant
/// folding using this function strips this information.
///
-Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
+Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
- const TargetData *TD,
- const TargetLibraryInfo *TLI) {
+ const DataLayout *TD,
+ const TargetLibraryInfo *TLI) {
// Handle easy binops first.
if (Instruction::isBinaryOp(Opcode)) {
if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1]))
if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD))
return C;
-
+
return ConstantExpr::get(Opcode, Ops[0], Ops[1]);
}
-
+
switch (Opcode) {
default: return 0;
case Instruction::ICmp:
@@ -916,7 +927,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
Constant *Input = CE->getOperand(0);
unsigned InWidth = Input->getType()->getScalarSizeInBits();
if (TD->getPointerSizeInBits() < InWidth) {
- Constant *Mask =
+ Constant *Mask =
ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth,
TD->getPointerSizeInBits()));
Input = ConstantExpr::getAnd(Input, Mask);
@@ -964,7 +975,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
return C;
if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI))
return C;
-
+
return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1));
}
}
@@ -974,8 +985,8 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
/// returns a constant expression of the specified operands.
///
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
- Constant *Ops0, Constant *Ops1,
- const TargetData *TD,
+ Constant *Ops0, Constant *Ops1,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI) {
// fold: icmp (inttoptr x), null -> icmp x, 0
// fold: icmp (ptrtoint x), 0 -> icmp x, null
@@ -995,17 +1006,17 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *Null = Constant::getNullValue(C->getType());
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
}
-
+
// Only do this transformation if the int is intptrty in size, otherwise
// there is a truncation or extension that we aren't modeling.
- if (CE0->getOpcode() == Instruction::PtrToInt &&
+ if (CE0->getOpcode() == Instruction::PtrToInt &&
CE0->getType() == IntPtrTy) {
Constant *C = CE0->getOperand(0);
Constant *Null = Constant::getNullValue(C->getType());
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
}
}
-
+
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
if (TD && CE0->getOpcode() == CE1->getOpcode()) {
Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
@@ -1029,24 +1040,24 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
CE1->getOperand(0), TD, TLI);
}
}
-
+
// icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
// icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
- Constant *LHS =
+ Constant *LHS =
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1,
TD, TLI);
- Constant *RHS =
+ Constant *RHS =
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1,
TD, TLI);
- unsigned OpC =
+ unsigned OpC =
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
Constant *Ops[] = { LHS, RHS };
return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI);
}
}
-
+
return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
}
@@ -1054,7 +1065,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
/// getelementptr constantexpr, return the constant value being addressed by the
/// constant expression, or null if something is funny and we can't decide.
-Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
+Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
ConstantExpr *CE) {
if (!CE->getOperand(1)->isNullValue())
return 0; // Do not allow stepping over the value!
@@ -1124,14 +1135,14 @@ llvm::canConstantFoldCallTo(const Function *F) {
if (!F->hasName()) return false;
StringRef Name = F->getName();
-
+
// In these cases, the check of the length is required. We don't want to
// return true for a name like "cos\0blah" which strcmp would return equal to
// "cos", but has length 8.
switch (Name[0]) {
default: return false;
case 'a':
- return Name == "acos" || Name == "asin" ||
+ return Name == "acos" || Name == "asin" ||
Name == "atan" || Name == "atan2";
case 'c':
return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh";
@@ -1151,7 +1162,7 @@ llvm::canConstantFoldCallTo(const Function *F) {
}
}
-static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
+static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
Type *Ty) {
sys::llvm_fenv_clearexcept();
V = NativeFP(V);
@@ -1159,7 +1170,7 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
sys::llvm_fenv_clearexcept();
return 0;
}
-
+
if (Ty->isFloatTy())
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
if (Ty->isDoubleTy())
@@ -1175,7 +1186,7 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
sys::llvm_fenv_clearexcept();
return 0;
}
-
+
if (Ty->isFloatTy())
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
if (Ty->isDoubleTy())
@@ -1269,7 +1280,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
case 'e':
if (Name == "exp" && TLI->has(LibFunc::exp))
return ConstantFoldFP(exp, V, Ty);
-
+
if (Name == "exp2" && TLI->has(LibFunc::exp2)) {
// Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
// C99 library.
@@ -1345,7 +1356,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
}
// Support ConstantVector in case we have an Undef in the top.
- if (isa<ConstantVector>(Operands[0]) ||
+ if (isa<ConstantVector>(Operands[0]) ||
isa<ConstantDataVector>(Operands[0])) {
Constant *Op = cast<Constant>(Operands[0]);
switch (F->getIntrinsicID()) {
@@ -1364,11 +1375,11 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
case Intrinsic::x86_sse2_cvttsd2si64:
if (ConstantFP *FPOp =
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
- return ConstantFoldConvertToInt(FPOp->getValueAPF(),
+ return ConstantFoldConvertToInt(FPOp->getValueAPF(),
/*roundTowardZero=*/true, Ty);
}
}
-
+
if (isa<UndefValue>(Operands[0])) {
if (F->getIntrinsicID() == Intrinsic::bswap)
return Operands[0];
@@ -1382,14 +1393,14 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
if (!Ty->isFloatTy() && !Ty->isDoubleTy())
return 0;
- double Op1V = Ty->isFloatTy() ?
+ double Op1V = Ty->isFloatTy() ?
(double)Op1->getValueAPF().convertToFloat() :
Op1->getValueAPF().convertToDouble();
if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
if (Op2->getType() != Op1->getType())
return 0;
- double Op2V = Ty->isFloatTy() ?
+ double Op2V = Ty->isFloatTy() ?
(double)Op2->getValueAPF().convertToFloat():
Op2->getValueAPF().convertToDouble();
@@ -1416,7 +1427,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
}
return 0;
}
-
+
if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
switch (F->getIntrinsicID()) {
@@ -1466,7 +1477,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
}
}
-
+
return 0;
}
return 0;
diff --git a/contrib/llvm/lib/Analysis/CostModel.cpp b/contrib/llvm/lib/Analysis/CostModel.cpp
new file mode 100644
index 0000000..5adbf45
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/CostModel.cpp
@@ -0,0 +1,193 @@
+//===- CostModel.cpp ------ Cost Model Analysis ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the cost model analysis. It provides a very basic cost
+// estimation for LLVM-IR. The cost result can be thought of as cycles, but it
+// is really unit-less. The estimated cost is ment to be used for comparing
+// alternatives.
+//
+//===----------------------------------------------------------------------===//
+
+#define CM_NAME "cost-model"
+#define DEBUG_TYPE CM_NAME
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/Pass.h"
+#include "llvm/TargetTransformInfo.h"
+#include "llvm/Value.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+namespace {
+ class CostModelAnalysis : public FunctionPass {
+
+ public:
+ static char ID; // Class identification, replacement for typeinfo
+ CostModelAnalysis() : FunctionPass(ID), F(0), VTTI(0) {
+ initializeCostModelAnalysisPass(
+ *PassRegistry::getPassRegistry());
+ }
+
+ /// Returns the expected cost of the instruction.
+ /// Returns -1 if the cost is unknown.
+ /// Note, this method does not cache the cost calculation and it
+ /// can be expensive in some cases.
+ unsigned getInstructionCost(Instruction *I) const;
+
+ private:
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ virtual bool runOnFunction(Function &F);
+ virtual void print(raw_ostream &OS, const Module*) const;
+
+ /// The function that we analyze.
+ Function *F;
+ /// Vector target information.
+ const VectorTargetTransformInfo *VTTI;
+ };
+} // End of anonymous namespace
+
+// Register this pass.
+char CostModelAnalysis::ID = 0;
+static const char cm_name[] = "Cost Model Analysis";
+INITIALIZE_PASS_BEGIN(CostModelAnalysis, CM_NAME, cm_name, false, true)
+INITIALIZE_PASS_END (CostModelAnalysis, CM_NAME, cm_name, false, true)
+
+FunctionPass *llvm::createCostModelAnalysisPass() {
+ return new CostModelAnalysis();
+}
+
+void
+CostModelAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+}
+
+bool
+CostModelAnalysis::runOnFunction(Function &F) {
+ this->F = &F;
+
+ // Target information.
+ TargetTransformInfo *TTI;
+ TTI = getAnalysisIfAvailable<TargetTransformInfo>();
+ if (TTI)
+ VTTI = TTI->getVectorTargetTransformInfo();
+
+ return false;
+}
+
+unsigned CostModelAnalysis::getInstructionCost(Instruction *I) const {
+ if (!VTTI)
+ return -1;
+
+ switch (I->getOpcode()) {
+ case Instruction::Ret:
+ case Instruction::PHI:
+ case Instruction::Br: {
+ return VTTI->getCFInstrCost(I->getOpcode());
+ }
+ case Instruction::Add:
+ case Instruction::FAdd:
+ case Instruction::Sub:
+ case Instruction::FSub:
+ case Instruction::Mul:
+ case Instruction::FMul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::FDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::FRem:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: {
+ return VTTI->getArithmeticInstrCost(I->getOpcode(), I->getType());
+ }
+ case Instruction::Select: {
+ SelectInst *SI = cast<SelectInst>(I);
+ Type *CondTy = SI->getCondition()->getType();
+ return VTTI->getCmpSelInstrCost(I->getOpcode(), I->getType(), CondTy);
+ }
+ case Instruction::ICmp:
+ case Instruction::FCmp: {
+ Type *ValTy = I->getOperand(0)->getType();
+ return VTTI->getCmpSelInstrCost(I->getOpcode(), ValTy);
+ }
+ case Instruction::Store: {
+ StoreInst *SI = cast<StoreInst>(I);
+ Type *ValTy = SI->getValueOperand()->getType();
+ return VTTI->getMemoryOpCost(I->getOpcode(), ValTy,
+ SI->getAlignment(),
+ SI->getPointerAddressSpace());
+ }
+ case Instruction::Load: {
+ LoadInst *LI = cast<LoadInst>(I);
+ return VTTI->getMemoryOpCost(I->getOpcode(), I->getType(),
+ LI->getAlignment(),
+ LI->getPointerAddressSpace());
+ }
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::FPExt:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::SIToFP:
+ case Instruction::UIToFP:
+ case Instruction::Trunc:
+ case Instruction::FPTrunc:
+ case Instruction::BitCast: {
+ Type *SrcTy = I->getOperand(0)->getType();
+ return VTTI->getCastInstrCost(I->getOpcode(), I->getType(), SrcTy);
+ }
+ case Instruction::ExtractElement: {
+ ExtractElementInst * EEI = cast<ExtractElementInst>(I);
+ ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
+ unsigned Idx = -1;
+ if (CI)
+ Idx = CI->getZExtValue();
+ return VTTI->getVectorInstrCost(I->getOpcode(),
+ EEI->getOperand(0)->getType(), Idx);
+ }
+ case Instruction::InsertElement: {
+ InsertElementInst * IE = cast<InsertElementInst>(I);
+ ConstantInt *CI = dyn_cast<ConstantInt>(IE->getOperand(2));
+ unsigned Idx = -1;
+ if (CI)
+ Idx = CI->getZExtValue();
+ return VTTI->getVectorInstrCost(I->getOpcode(),
+ IE->getType(), Idx);
+ }
+ default:
+ // We don't have any information on this instruction.
+ return -1;
+ }
+}
+
+void CostModelAnalysis::print(raw_ostream &OS, const Module*) const {
+ if (!F)
+ return;
+
+ for (Function::iterator B = F->begin(), BE = F->end(); B != BE; ++B) {
+ for (BasicBlock::iterator it = B->begin(), e = B->end(); it != e; ++it) {
+ Instruction *Inst = it;
+ unsigned Cost = getInstructionCost(Inst);
+ if (Cost != (unsigned)-1)
+ OS << "Cost Model: Found an estimated cost of " << Cost;
+ else
+ OS << "Cost Model: Unknown cost";
+
+ OS << " for instruction: "<< *Inst << "\n";
+ }
+ }
+}
diff --git a/contrib/llvm/lib/Analysis/DependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/DependenceAnalysis.cpp
new file mode 100644
index 0000000..95ac5ea
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/DependenceAnalysis.cpp
@@ -0,0 +1,3786 @@
+//===-- DependenceAnalysis.cpp - DA Implementation --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// DependenceAnalysis is an LLVM pass that analyses dependences between memory
+// accesses. Currently, it is an (incomplete) implementation of the approach
+// described in
+//
+// Practical Dependence Testing
+// Goff, Kennedy, Tseng
+// PLDI 1991
+//
+// There's a single entry point that analyzes the dependence between a pair
+// of memory references in a function, returning either NULL, for no dependence,
+// or a more-or-less detailed description of the dependence between them.
+//
+// Currently, the implementation cannot propagate constraints between
+// coupled RDIV subscripts and lacks a multi-subscript MIV test.
+// Both of these are conservative weaknesses;
+// that is, not a source of correctness problems.
+//
+// The implementation depends on the GEP instruction to
+// differentiate subscripts. Since Clang linearizes subscripts
+// for most arrays, we give up some precision (though the existing MIV tests
+// will help). We trust that the GEP instruction will eventually be extended.
+// In the meantime, we should explore Maslov's ideas about delinearization.
+//
+// We should pay some careful attention to the possibility of integer overflow
+// in the implementation of the various tests. This could happen with Add,
+// Subtract, or Multiply, with both APInt's and SCEV's.
+//
+// Some non-linear subscript pairs can be handled by the GCD test
+// (and perhaps other tests).
+// Should explore how often these things occur.
+//
+// Finally, it seems like certain test cases expose weaknesses in the SCEV
+// simplification, especially in the handling of sign and zero extensions.
+// It could be useful to spend time exploring these.
+//
+// Please note that this is work in progress and the interface is subject to
+// change.
+//
+//===----------------------------------------------------------------------===//
+// //
+// In memory of Ken Kennedy, 1945 - 2007 //
+// //
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "da"
+
+#include "llvm/Analysis/DependenceAnalysis.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Operator.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/InstIterator.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// statistics
+
+STATISTIC(TotalArrayPairs, "Array pairs tested");
+STATISTIC(SeparableSubscriptPairs, "Separable subscript pairs");
+STATISTIC(CoupledSubscriptPairs, "Coupled subscript pairs");
+STATISTIC(NonlinearSubscriptPairs, "Nonlinear subscript pairs");
+STATISTIC(ZIVapplications, "ZIV applications");
+STATISTIC(ZIVindependence, "ZIV independence");
+STATISTIC(StrongSIVapplications, "Strong SIV applications");
+STATISTIC(StrongSIVsuccesses, "Strong SIV successes");
+STATISTIC(StrongSIVindependence, "Strong SIV independence");
+STATISTIC(WeakCrossingSIVapplications, "Weak-Crossing SIV applications");
+STATISTIC(WeakCrossingSIVsuccesses, "Weak-Crossing SIV successes");
+STATISTIC(WeakCrossingSIVindependence, "Weak-Crossing SIV independence");
+STATISTIC(ExactSIVapplications, "Exact SIV applications");
+STATISTIC(ExactSIVsuccesses, "Exact SIV successes");
+STATISTIC(ExactSIVindependence, "Exact SIV independence");
+STATISTIC(WeakZeroSIVapplications, "Weak-Zero SIV applications");
+STATISTIC(WeakZeroSIVsuccesses, "Weak-Zero SIV successes");
+STATISTIC(WeakZeroSIVindependence, "Weak-Zero SIV independence");
+STATISTIC(ExactRDIVapplications, "Exact RDIV applications");
+STATISTIC(ExactRDIVindependence, "Exact RDIV independence");
+STATISTIC(SymbolicRDIVapplications, "Symbolic RDIV applications");
+STATISTIC(SymbolicRDIVindependence, "Symbolic RDIV independence");
+STATISTIC(DeltaApplications, "Delta applications");
+STATISTIC(DeltaSuccesses, "Delta successes");
+STATISTIC(DeltaIndependence, "Delta independence");
+STATISTIC(DeltaPropagations, "Delta propagations");
+STATISTIC(GCDapplications, "GCD applications");
+STATISTIC(GCDsuccesses, "GCD successes");
+STATISTIC(GCDindependence, "GCD independence");
+STATISTIC(BanerjeeApplications, "Banerjee applications");
+STATISTIC(BanerjeeIndependence, "Banerjee independence");
+STATISTIC(BanerjeeSuccesses, "Banerjee successes");
+
+//===----------------------------------------------------------------------===//
+// basics
+
+INITIALIZE_PASS_BEGIN(DependenceAnalysis, "da",
+ "Dependence Analysis", true, true)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(DependenceAnalysis, "da",
+ "Dependence Analysis", true, true)
+
+char DependenceAnalysis::ID = 0;
+
+
+FunctionPass *llvm::createDependenceAnalysisPass() {
+ return new DependenceAnalysis();
+}
+
+
+bool DependenceAnalysis::runOnFunction(Function &F) {
+ this->F = &F;
+ AA = &getAnalysis<AliasAnalysis>();
+ SE = &getAnalysis<ScalarEvolution>();
+ LI = &getAnalysis<LoopInfo>();
+ return false;
+}
+
+
+void DependenceAnalysis::releaseMemory() {
+}
+
+
+void DependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequiredTransitive<AliasAnalysis>();
+ AU.addRequiredTransitive<ScalarEvolution>();
+ AU.addRequiredTransitive<LoopInfo>();
+}
+
+
+// Used to test the dependence analyzer.
+// Looks through the function, noting the first store instruction
+// and the first load instruction
+// (which always follows the first load in our tests).
+// Calls depends() and prints out the result.
+// Ignores all other instructions.
+static
+void dumpExampleDependence(raw_ostream &OS, Function *F,
+ DependenceAnalysis *DA) {
+ for (inst_iterator SrcI = inst_begin(F), SrcE = inst_end(F);
+ SrcI != SrcE; ++SrcI) {
+ if (const StoreInst *Src = dyn_cast<StoreInst>(&*SrcI)) {
+ for (inst_iterator DstI = SrcI, DstE = inst_end(F);
+ DstI != DstE; ++DstI) {
+ if (const LoadInst *Dst = dyn_cast<LoadInst>(&*DstI)) {
+ OS << "da analyze - ";
+ if (Dependence *D = DA->depends(Src, Dst, true)) {
+ D->dump(OS);
+ for (unsigned Level = 1; Level <= D->getLevels(); Level++) {
+ if (D->isSplitable(Level)) {
+ OS << "da analyze - split level = " << Level;
+ OS << ", iteration = " << *DA->getSplitIteration(D, Level);
+ OS << "!\n";
+ }
+ }
+ delete D;
+ }
+ else
+ OS << "none!\n";
+ return;
+ }
+ }
+ }
+ }
+}
+
+
+void DependenceAnalysis::print(raw_ostream &OS, const Module*) const {
+ dumpExampleDependence(OS, F, const_cast<DependenceAnalysis *>(this));
+}
+
+//===----------------------------------------------------------------------===//
+// Dependence methods
+
+// Returns true if this is an input dependence.
+bool Dependence::isInput() const {
+ return Src->mayReadFromMemory() && Dst->mayReadFromMemory();
+}
+
+
+// Returns true if this is an output dependence.
+bool Dependence::isOutput() const {
+ return Src->mayWriteToMemory() && Dst->mayWriteToMemory();
+}
+
+
+// Returns true if this is an flow (aka true) dependence.
+bool Dependence::isFlow() const {
+ return Src->mayWriteToMemory() && Dst->mayReadFromMemory();
+}
+
+
+// Returns true if this is an anti dependence.
+bool Dependence::isAnti() const {
+ return Src->mayReadFromMemory() && Dst->mayWriteToMemory();
+}
+
+
+// Returns true if a particular level is scalar; that is,
+// if no subscript in the source or destination mention the induction
+// variable associated with the loop at this level.
+// Leave this out of line, so it will serve as a virtual method anchor
+bool Dependence::isScalar(unsigned level) const {
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// FullDependence methods
+
+FullDependence::FullDependence(const Instruction *Source,
+ const Instruction *Destination,
+ bool PossiblyLoopIndependent,
+ unsigned CommonLevels) :
+ Dependence(Source, Destination),
+ Levels(CommonLevels),
+ LoopIndependent(PossiblyLoopIndependent) {
+ Consistent = true;
+ DV = CommonLevels ? new DVEntry[CommonLevels] : NULL;
+}
+
+// The rest are simple getters that hide the implementation.
+
+// getDirection - Returns the direction associated with a particular level.
+unsigned FullDependence::getDirection(unsigned Level) const {
+ assert(0 < Level && Level <= Levels && "Level out of range");
+ return DV[Level - 1].Direction;
+}
+
+
+// Returns the distance (or NULL) associated with a particular level.
+const SCEV *FullDependence::getDistance(unsigned Level) const {
+ assert(0 < Level && Level <= Levels && "Level out of range");
+ return DV[Level - 1].Distance;
+}
+
+
+// Returns true if a particular level is scalar; that is,
+// if no subscript in the source or destination mention the induction
+// variable associated with the loop at this level.
+bool FullDependence::isScalar(unsigned Level) const {
+ assert(0 < Level && Level <= Levels && "Level out of range");
+ return DV[Level - 1].Scalar;
+}
+
+
+// Returns true if peeling the first iteration from this loop
+// will break this dependence.
+bool FullDependence::isPeelFirst(unsigned Level) const {
+ assert(0 < Level && Level <= Levels && "Level out of range");
+ return DV[Level - 1].PeelFirst;
+}
+
+
+// Returns true if peeling the last iteration from this loop
+// will break this dependence.
+bool FullDependence::isPeelLast(unsigned Level) const {
+ assert(0 < Level && Level <= Levels && "Level out of range");
+ return DV[Level - 1].PeelLast;
+}
+
+
+// Returns true if splitting this loop will break the dependence.
+bool FullDependence::isSplitable(unsigned Level) const {
+ assert(0 < Level && Level <= Levels && "Level out of range");
+ return DV[Level - 1].Splitable;
+}
+
+
+//===----------------------------------------------------------------------===//
+// DependenceAnalysis::Constraint methods
+
+// If constraint is a point <X, Y>, returns X.
+// Otherwise assert.
+const SCEV *DependenceAnalysis::Constraint::getX() const {
+ assert(Kind == Point && "Kind should be Point");
+ return A;
+}
+
+
+// If constraint is a point <X, Y>, returns Y.
+// Otherwise assert.
+const SCEV *DependenceAnalysis::Constraint::getY() const {
+ assert(Kind == Point && "Kind should be Point");
+ return B;
+}
+
+
+// If constraint is a line AX + BY = C, returns A.
+// Otherwise assert.
+const SCEV *DependenceAnalysis::Constraint::getA() const {
+ assert((Kind == Line || Kind == Distance) &&
+ "Kind should be Line (or Distance)");
+ return A;
+}
+
+
+// If constraint is a line AX + BY = C, returns B.
+// Otherwise assert.
+const SCEV *DependenceAnalysis::Constraint::getB() const {
+ assert((Kind == Line || Kind == Distance) &&
+ "Kind should be Line (or Distance)");
+ return B;
+}
+
+
+// If constraint is a line AX + BY = C, returns C.
+// Otherwise assert.
+const SCEV *DependenceAnalysis::Constraint::getC() const {
+ assert((Kind == Line || Kind == Distance) &&
+ "Kind should be Line (or Distance)");
+ return C;
+}
+
+
+// If constraint is a distance, returns D.
+// Otherwise assert.
+const SCEV *DependenceAnalysis::Constraint::getD() const {
+ assert(Kind == Distance && "Kind should be Distance");
+ return SE->getNegativeSCEV(C);
+}
+
+
+// Returns the loop associated with this constraint.
+const Loop *DependenceAnalysis::Constraint::getAssociatedLoop() const {
+ assert((Kind == Distance || Kind == Line || Kind == Point) &&
+ "Kind should be Distance, Line, or Point");
+ return AssociatedLoop;
+}
+
+
+void DependenceAnalysis::Constraint::setPoint(const SCEV *X,
+ const SCEV *Y,
+ const Loop *CurLoop) {
+ Kind = Point;
+ A = X;
+ B = Y;
+ AssociatedLoop = CurLoop;
+}
+
+
+void DependenceAnalysis::Constraint::setLine(const SCEV *AA,
+ const SCEV *BB,
+ const SCEV *CC,
+ const Loop *CurLoop) {
+ Kind = Line;
+ A = AA;
+ B = BB;
+ C = CC;
+ AssociatedLoop = CurLoop;
+}
+
+
+void DependenceAnalysis::Constraint::setDistance(const SCEV *D,
+ const Loop *CurLoop) {
+ Kind = Distance;
+ A = SE->getConstant(D->getType(), 1);
+ B = SE->getNegativeSCEV(A);
+ C = SE->getNegativeSCEV(D);
+ AssociatedLoop = CurLoop;
+}
+
+
+void DependenceAnalysis::Constraint::setEmpty() {
+ Kind = Empty;
+}
+
+
+void DependenceAnalysis::Constraint::setAny(ScalarEvolution *NewSE) {
+ SE = NewSE;
+ Kind = Any;
+}
+
+
+// For debugging purposes. Dumps the constraint out to OS.
+void DependenceAnalysis::Constraint::dump(raw_ostream &OS) const {
+ if (isEmpty())
+ OS << " Empty\n";
+ else if (isAny())
+ OS << " Any\n";
+ else if (isPoint())
+ OS << " Point is <" << *getX() << ", " << *getY() << ">\n";
+ else if (isDistance())
+ OS << " Distance is " << *getD() <<
+ " (" << *getA() << "*X + " << *getB() << "*Y = " << *getC() << ")\n";
+ else if (isLine())
+ OS << " Line is " << *getA() << "*X + " <<
+ *getB() << "*Y = " << *getC() << "\n";
+ else
+ llvm_unreachable("unknown constraint type in Constraint::dump");
+}
+
+
+// Updates X with the intersection
+// of the Constraints X and Y. Returns true if X has changed.
+// Corresponds to Figure 4 from the paper
+//
+// Practical Dependence Testing
+// Goff, Kennedy, Tseng
+// PLDI 1991
+bool DependenceAnalysis::intersectConstraints(Constraint *X,
+ const Constraint *Y) {
+ ++DeltaApplications;
+ DEBUG(dbgs() << "\tintersect constraints\n");
+ DEBUG(dbgs() << "\t X ="; X->dump(dbgs()));
+ DEBUG(dbgs() << "\t Y ="; Y->dump(dbgs()));
+ assert(!Y->isPoint() && "Y must not be a Point");
+ if (X->isAny()) {
+ if (Y->isAny())
+ return false;
+ *X = *Y;
+ return true;
+ }
+ if (X->isEmpty())
+ return false;
+ if (Y->isEmpty()) {
+ X->setEmpty();
+ return true;
+ }
+
+ if (X->isDistance() && Y->isDistance()) {
+ DEBUG(dbgs() << "\t intersect 2 distances\n");
+ if (isKnownPredicate(CmpInst::ICMP_EQ, X->getD(), Y->getD()))
+ return false;
+ if (isKnownPredicate(CmpInst::ICMP_NE, X->getD(), Y->getD())) {
+ X->setEmpty();
+ ++DeltaSuccesses;
+ return true;
+ }
+ // Hmmm, interesting situation.
+ // I guess if either is constant, keep it and ignore the other.
+ if (isa<SCEVConstant>(Y->getD())) {
+ *X = *Y;
+ return true;
+ }
+ return false;
+ }
+
+ // At this point, the pseudo-code in Figure 4 of the paper
+ // checks if (X->isPoint() && Y->isPoint()).
+ // This case can't occur in our implementation,
+ // since a Point can only arise as the result of intersecting
+ // two Line constraints, and the right-hand value, Y, is never
+ // the result of an intersection.
+ assert(!(X->isPoint() && Y->isPoint()) &&
+ "We shouldn't ever see X->isPoint() && Y->isPoint()");
+
+ if (X->isLine() && Y->isLine()) {
+ DEBUG(dbgs() << "\t intersect 2 lines\n");
+ const SCEV *Prod1 = SE->getMulExpr(X->getA(), Y->getB());
+ const SCEV *Prod2 = SE->getMulExpr(X->getB(), Y->getA());
+ if (isKnownPredicate(CmpInst::ICMP_EQ, Prod1, Prod2)) {
+ // slopes are equal, so lines are parallel
+ DEBUG(dbgs() << "\t\tsame slope\n");
+ Prod1 = SE->getMulExpr(X->getC(), Y->getB());
+ Prod2 = SE->getMulExpr(X->getB(), Y->getC());
+ if (isKnownPredicate(CmpInst::ICMP_EQ, Prod1, Prod2))
+ return false;
+ if (isKnownPredicate(CmpInst::ICMP_NE, Prod1, Prod2)) {
+ X->setEmpty();
+ ++DeltaSuccesses;
+ return true;
+ }
+ return false;
+ }
+ if (isKnownPredicate(CmpInst::ICMP_NE, Prod1, Prod2)) {
+ // slopes differ, so lines intersect
+ DEBUG(dbgs() << "\t\tdifferent slopes\n");
+ const SCEV *C1B2 = SE->getMulExpr(X->getC(), Y->getB());
+ const SCEV *C1A2 = SE->getMulExpr(X->getC(), Y->getA());
+ const SCEV *C2B1 = SE->getMulExpr(Y->getC(), X->getB());
+ const SCEV *C2A1 = SE->getMulExpr(Y->getC(), X->getA());
+ const SCEV *A1B2 = SE->getMulExpr(X->getA(), Y->getB());
+ const SCEV *A2B1 = SE->getMulExpr(Y->getA(), X->getB());
+ const SCEVConstant *C1A2_C2A1 =
+ dyn_cast<SCEVConstant>(SE->getMinusSCEV(C1A2, C2A1));
+ const SCEVConstant *C1B2_C2B1 =
+ dyn_cast<SCEVConstant>(SE->getMinusSCEV(C1B2, C2B1));
+ const SCEVConstant *A1B2_A2B1 =
+ dyn_cast<SCEVConstant>(SE->getMinusSCEV(A1B2, A2B1));
+ const SCEVConstant *A2B1_A1B2 =
+ dyn_cast<SCEVConstant>(SE->getMinusSCEV(A2B1, A1B2));
+ if (!C1B2_C2B1 || !C1A2_C2A1 ||
+ !A1B2_A2B1 || !A2B1_A1B2)
+ return false;
+ APInt Xtop = C1B2_C2B1->getValue()->getValue();
+ APInt Xbot = A1B2_A2B1->getValue()->getValue();
+ APInt Ytop = C1A2_C2A1->getValue()->getValue();
+ APInt Ybot = A2B1_A1B2->getValue()->getValue();
+ DEBUG(dbgs() << "\t\tXtop = " << Xtop << "\n");
+ DEBUG(dbgs() << "\t\tXbot = " << Xbot << "\n");
+ DEBUG(dbgs() << "\t\tYtop = " << Ytop << "\n");
+ DEBUG(dbgs() << "\t\tYbot = " << Ybot << "\n");
+ APInt Xq = Xtop; // these need to be initialized, even
+ APInt Xr = Xtop; // though they're just going to be overwritten
+ APInt::sdivrem(Xtop, Xbot, Xq, Xr);
+ APInt Yq = Ytop;
+ APInt Yr = Ytop;;
+ APInt::sdivrem(Ytop, Ybot, Yq, Yr);
+ if (Xr != 0 || Yr != 0) {
+ X->setEmpty();
+ ++DeltaSuccesses;
+ return true;
+ }
+ DEBUG(dbgs() << "\t\tX = " << Xq << ", Y = " << Yq << "\n");
+ if (Xq.slt(0) || Yq.slt(0)) {
+ X->setEmpty();
+ ++DeltaSuccesses;
+ return true;
+ }
+ if (const SCEVConstant *CUB =
+ collectConstantUpperBound(X->getAssociatedLoop(), Prod1->getType())) {
+ APInt UpperBound = CUB->getValue()->getValue();
+ DEBUG(dbgs() << "\t\tupper bound = " << UpperBound << "\n");
+ if (Xq.sgt(UpperBound) || Yq.sgt(UpperBound)) {
+ X->setEmpty();
+ ++DeltaSuccesses;
+ return true;
+ }
+ }
+ X->setPoint(SE->getConstant(Xq),
+ SE->getConstant(Yq),
+ X->getAssociatedLoop());
+ ++DeltaSuccesses;
+ return true;
+ }
+ return false;
+ }
+
+ // if (X->isLine() && Y->isPoint()) This case can't occur.
+ assert(!(X->isLine() && Y->isPoint()) && "This case should never occur");
+
+ if (X->isPoint() && Y->isLine()) {
+ DEBUG(dbgs() << "\t intersect Point and Line\n");
+ const SCEV *A1X1 = SE->getMulExpr(Y->getA(), X->getX());
+ const SCEV *B1Y1 = SE->getMulExpr(Y->getB(), X->getY());
+ const SCEV *Sum = SE->getAddExpr(A1X1, B1Y1);
+ if (isKnownPredicate(CmpInst::ICMP_EQ, Sum, Y->getC()))
+ return false;
+ if (isKnownPredicate(CmpInst::ICMP_NE, Sum, Y->getC())) {
+ X->setEmpty();
+ ++DeltaSuccesses;
+ return true;
+ }
+ return false;
+ }
+
+ llvm_unreachable("shouldn't reach the end of Constraint intersection");
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// DependenceAnalysis methods
+
+// For debugging purposes. Dumps a dependence to OS.
+void Dependence::dump(raw_ostream &OS) const {
+ bool Splitable = false;
+ if (isConfused())
+ OS << "confused";
+ else {
+ if (isConsistent())
+ OS << "consistent ";
+ if (isFlow())
+ OS << "flow";
+ else if (isOutput())
+ OS << "output";
+ else if (isAnti())
+ OS << "anti";
+ else if (isInput())
+ OS << "input";
+ unsigned Levels = getLevels();
+ if (Levels) {
+ OS << " [";
+ for (unsigned II = 1; II <= Levels; ++II) {
+ if (isSplitable(II))
+ Splitable = true;
+ if (isPeelFirst(II))
+ OS << 'p';
+ const SCEV *Distance = getDistance(II);
+ if (Distance)
+ OS << *Distance;
+ else if (isScalar(II))
+ OS << "S";
+ else {
+ unsigned Direction = getDirection(II);
+ if (Direction == DVEntry::ALL)
+ OS << "*";
+ else {
+ if (Direction & DVEntry::LT)
+ OS << "<";
+ if (Direction & DVEntry::EQ)
+ OS << "=";
+ if (Direction & DVEntry::GT)
+ OS << ">";
+ }
+ }
+ if (isPeelLast(II))
+ OS << 'p';
+ if (II < Levels)
+ OS << " ";
+ }
+ if (isLoopIndependent())
+ OS << "|<";
+ OS << "]";
+ if (Splitable)
+ OS << " splitable";
+ }
+ }
+ OS << "!\n";
+}
+
+
+
+static
+AliasAnalysis::AliasResult underlyingObjectsAlias(AliasAnalysis *AA,
+ const Value *A,
+ const Value *B) {
+ const Value *AObj = GetUnderlyingObject(A);
+ const Value *BObj = GetUnderlyingObject(B);
+ return AA->alias(AObj, AA->getTypeStoreSize(AObj->getType()),
+ BObj, AA->getTypeStoreSize(BObj->getType()));
+}
+
+
+// Returns true if the load or store can be analyzed. Atomic and volatile
+// operations have properties which this analysis does not understand.
+static
+bool isLoadOrStore(const Instruction *I) {
+ if (const LoadInst *LI = dyn_cast<LoadInst>(I))
+ return LI->isUnordered();
+ else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
+ return SI->isUnordered();
+ return false;
+}
+
+
+static
+const Value *getPointerOperand(const Instruction *I) {
+ if (const LoadInst *LI = dyn_cast<LoadInst>(I))
+ return LI->getPointerOperand();
+ if (const StoreInst *SI = dyn_cast<StoreInst>(I))
+ return SI->getPointerOperand();
+ llvm_unreachable("Value is not load or store instruction");
+ return 0;
+}
+
+
+// Examines the loop nesting of the Src and Dst
+// instructions and establishes their shared loops. Sets the variables
+// CommonLevels, SrcLevels, and MaxLevels.
+// The source and destination instructions needn't be contained in the same
+// loop. The routine establishNestingLevels finds the level of most deeply
+// nested loop that contains them both, CommonLevels. An instruction that's
+// not contained in a loop is at level = 0. MaxLevels is equal to the level
+// of the source plus the level of the destination, minus CommonLevels.
+// This lets us allocate vectors MaxLevels in length, with room for every
+// distinct loop referenced in both the source and destination subscripts.
+// The variable SrcLevels is the nesting depth of the source instruction.
+// It's used to help calculate distinct loops referenced by the destination.
+// Here's the map from loops to levels:
+// 0 - unused
+// 1 - outermost common loop
+// ... - other common loops
+// CommonLevels - innermost common loop
+// ... - loops containing Src but not Dst
+// SrcLevels - innermost loop containing Src but not Dst
+// ... - loops containing Dst but not Src
+// MaxLevels - innermost loops containing Dst but not Src
+// Consider the follow code fragment:
+// for (a = ...) {
+// for (b = ...) {
+// for (c = ...) {
+// for (d = ...) {
+// A[] = ...;
+// }
+// }
+// for (e = ...) {
+// for (f = ...) {
+// for (g = ...) {
+// ... = A[];
+// }
+// }
+// }
+// }
+// }
+// If we're looking at the possibility of a dependence between the store
+// to A (the Src) and the load from A (the Dst), we'll note that they
+// have 2 loops in common, so CommonLevels will equal 2 and the direction
+// vector for Result will have 2 entries. SrcLevels = 4 and MaxLevels = 7.
+// A map from loop names to loop numbers would look like
+// a - 1
+// b - 2 = CommonLevels
+// c - 3
+// d - 4 = SrcLevels
+// e - 5
+// f - 6
+// g - 7 = MaxLevels
+void DependenceAnalysis::establishNestingLevels(const Instruction *Src,
+ const Instruction *Dst) {
+ const BasicBlock *SrcBlock = Src->getParent();
+ const BasicBlock *DstBlock = Dst->getParent();
+ unsigned SrcLevel = LI->getLoopDepth(SrcBlock);
+ unsigned DstLevel = LI->getLoopDepth(DstBlock);
+ const Loop *SrcLoop = LI->getLoopFor(SrcBlock);
+ const Loop *DstLoop = LI->getLoopFor(DstBlock);
+ SrcLevels = SrcLevel;
+ MaxLevels = SrcLevel + DstLevel;
+ while (SrcLevel > DstLevel) {
+ SrcLoop = SrcLoop->getParentLoop();
+ SrcLevel--;
+ }
+ while (DstLevel > SrcLevel) {
+ DstLoop = DstLoop->getParentLoop();
+ DstLevel--;
+ }
+ while (SrcLoop != DstLoop) {
+ SrcLoop = SrcLoop->getParentLoop();
+ DstLoop = DstLoop->getParentLoop();
+ SrcLevel--;
+ }
+ CommonLevels = SrcLevel;
+ MaxLevels -= CommonLevels;
+}
+
+
+// Given one of the loops containing the source, return
+// its level index in our numbering scheme.
+unsigned DependenceAnalysis::mapSrcLoop(const Loop *SrcLoop) const {
+ return SrcLoop->getLoopDepth();
+}
+
+
+// Given one of the loops containing the destination,
+// return its level index in our numbering scheme.
+unsigned DependenceAnalysis::mapDstLoop(const Loop *DstLoop) const {
+ unsigned D = DstLoop->getLoopDepth();
+ if (D > CommonLevels)
+ return D - CommonLevels + SrcLevels;
+ else
+ return D;
+}
+
+
+// Returns true if Expression is loop invariant in LoopNest.
+bool DependenceAnalysis::isLoopInvariant(const SCEV *Expression,
+ const Loop *LoopNest) const {
+ if (!LoopNest)
+ return true;
+ return SE->isLoopInvariant(Expression, LoopNest) &&
+ isLoopInvariant(Expression, LoopNest->getParentLoop());
+}
+
+
+
+// Finds the set of loops from the LoopNest that
+// have a level <= CommonLevels and are referred to by the SCEV Expression.
+void DependenceAnalysis::collectCommonLoops(const SCEV *Expression,
+ const Loop *LoopNest,
+ SmallBitVector &Loops) const {
+ while (LoopNest) {
+ unsigned Level = LoopNest->getLoopDepth();
+ if (Level <= CommonLevels && !SE->isLoopInvariant(Expression, LoopNest))
+ Loops.set(Level);
+ LoopNest = LoopNest->getParentLoop();
+ }
+}
+
+
+// removeMatchingExtensions - Examines a subscript pair.
+// If the source and destination are identically sign (or zero)
+// extended, it strips off the extension in an effect to simplify
+// the actual analysis.
+void DependenceAnalysis::removeMatchingExtensions(Subscript *Pair) {
+ const SCEV *Src = Pair->Src;
+ const SCEV *Dst = Pair->Dst;
+ if ((isa<SCEVZeroExtendExpr>(Src) && isa<SCEVZeroExtendExpr>(Dst)) ||
+ (isa<SCEVSignExtendExpr>(Src) && isa<SCEVSignExtendExpr>(Dst))) {
+ const SCEVCastExpr *SrcCast = cast<SCEVCastExpr>(Src);
+ const SCEVCastExpr *DstCast = cast<SCEVCastExpr>(Dst);
+ if (SrcCast->getType() == DstCast->getType()) {
+ Pair->Src = SrcCast->getOperand();
+ Pair->Dst = DstCast->getOperand();
+ }
+ }
+}
+
+
+// Examine the scev and return true iff it's linear.
+// Collect any loops mentioned in the set of "Loops".
+bool DependenceAnalysis::checkSrcSubscript(const SCEV *Src,
+ const Loop *LoopNest,
+ SmallBitVector &Loops) {
+ const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Src);
+ if (!AddRec)
+ return isLoopInvariant(Src, LoopNest);
+ const SCEV *Start = AddRec->getStart();
+ const SCEV *Step = AddRec->getStepRecurrence(*SE);
+ if (!isLoopInvariant(Step, LoopNest))
+ return false;
+ Loops.set(mapSrcLoop(AddRec->getLoop()));
+ return checkSrcSubscript(Start, LoopNest, Loops);
+}
+
+
+
+// Examine the scev and return true iff it's linear.
+// Collect any loops mentioned in the set of "Loops".
+bool DependenceAnalysis::checkDstSubscript(const SCEV *Dst,
+ const Loop *LoopNest,
+ SmallBitVector &Loops) {
+ const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Dst);
+ if (!AddRec)
+ return isLoopInvariant(Dst, LoopNest);
+ const SCEV *Start = AddRec->getStart();
+ const SCEV *Step = AddRec->getStepRecurrence(*SE);
+ if (!isLoopInvariant(Step, LoopNest))
+ return false;
+ Loops.set(mapDstLoop(AddRec->getLoop()));
+ return checkDstSubscript(Start, LoopNest, Loops);
+}
+
+
+// Examines the subscript pair (the Src and Dst SCEVs)
+// and classifies it as either ZIV, SIV, RDIV, MIV, or Nonlinear.
+// Collects the associated loops in a set.
+DependenceAnalysis::Subscript::ClassificationKind
+DependenceAnalysis::classifyPair(const SCEV *Src, const Loop *SrcLoopNest,
+ const SCEV *Dst, const Loop *DstLoopNest,
+ SmallBitVector &Loops) {
+ SmallBitVector SrcLoops(MaxLevels + 1);
+ SmallBitVector DstLoops(MaxLevels + 1);
+ if (!checkSrcSubscript(Src, SrcLoopNest, SrcLoops))
+ return Subscript::NonLinear;
+ if (!checkDstSubscript(Dst, DstLoopNest, DstLoops))
+ return Subscript::NonLinear;
+ Loops = SrcLoops;
+ Loops |= DstLoops;
+ unsigned N = Loops.count();
+ if (N == 0)
+ return Subscript::ZIV;
+ if (N == 1)
+ return Subscript::SIV;
+ if (N == 2 && (SrcLoops.count() == 0 ||
+ DstLoops.count() == 0 ||
+ (SrcLoops.count() == 1 && DstLoops.count() == 1)))
+ return Subscript::RDIV;
+ return Subscript::MIV;
+}
+
+
+// A wrapper around SCEV::isKnownPredicate.
+// Looks for cases where we're interested in comparing for equality.
+// If both X and Y have been identically sign or zero extended,
+// it strips off the (confusing) extensions before invoking
+// SCEV::isKnownPredicate. Perhaps, someday, the ScalarEvolution package
+// will be similarly updated.
+//
+// If SCEV::isKnownPredicate can't prove the predicate,
+// we try simple subtraction, which seems to help in some cases
+// involving symbolics.
+bool DependenceAnalysis::isKnownPredicate(ICmpInst::Predicate Pred,
+ const SCEV *X,
+ const SCEV *Y) const {
+ if (Pred == CmpInst::ICMP_EQ ||
+ Pred == CmpInst::ICMP_NE) {
+ if ((isa<SCEVSignExtendExpr>(X) &&
+ isa<SCEVSignExtendExpr>(Y)) ||
+ (isa<SCEVZeroExtendExpr>(X) &&
+ isa<SCEVZeroExtendExpr>(Y))) {
+ const SCEVCastExpr *CX = cast<SCEVCastExpr>(X);
+ const SCEVCastExpr *CY = cast<SCEVCastExpr>(Y);
+ const SCEV *Xop = CX->getOperand();
+ const SCEV *Yop = CY->getOperand();
+ if (Xop->getType() == Yop->getType()) {
+ X = Xop;
+ Y = Yop;
+ }
+ }
+ }
+ if (SE->isKnownPredicate(Pred, X, Y))
+ return true;
+ // If SE->isKnownPredicate can't prove the condition,
+ // we try the brute-force approach of subtracting
+ // and testing the difference.
+ // By testing with SE->isKnownPredicate first, we avoid
+ // the possibility of overflow when the arguments are constants.
+ const SCEV *Delta = SE->getMinusSCEV(X, Y);
+ switch (Pred) {
+ case CmpInst::ICMP_EQ:
+ return Delta->isZero();
+ case CmpInst::ICMP_NE:
+ return SE->isKnownNonZero(Delta);
+ case CmpInst::ICMP_SGE:
+ return SE->isKnownNonNegative(Delta);
+ case CmpInst::ICMP_SLE:
+ return SE->isKnownNonPositive(Delta);
+ case CmpInst::ICMP_SGT:
+ return SE->isKnownPositive(Delta);
+ case CmpInst::ICMP_SLT:
+ return SE->isKnownNegative(Delta);
+ default:
+ llvm_unreachable("unexpected predicate in isKnownPredicate");
+ }
+}
+
+
+// All subscripts are all the same type.
+// Loop bound may be smaller (e.g., a char).
+// Should zero extend loop bound, since it's always >= 0.
+// This routine collects upper bound and extends if needed.
+// Return null if no bound available.
+const SCEV *DependenceAnalysis::collectUpperBound(const Loop *L,
+ Type *T) const {
+ if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
+ const SCEV *UB = SE->getBackedgeTakenCount(L);
+ return SE->getNoopOrZeroExtend(UB, T);
+ }
+ return NULL;
+}
+
+
+// Calls collectUpperBound(), then attempts to cast it to SCEVConstant.
+// If the cast fails, returns NULL.
+const SCEVConstant *DependenceAnalysis::collectConstantUpperBound(const Loop *L,
+ Type *T
+ ) const {
+ if (const SCEV *UB = collectUpperBound(L, T))
+ return dyn_cast<SCEVConstant>(UB);
+ return NULL;
+}
+
+
+// testZIV -
+// When we have a pair of subscripts of the form [c1] and [c2],
+// where c1 and c2 are both loop invariant, we attack it using
+// the ZIV test. Basically, we test by comparing the two values,
+// but there are actually three possible results:
+// 1) the values are equal, so there's a dependence
+// 2) the values are different, so there's no dependence
+// 3) the values might be equal, so we have to assume a dependence.
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::testZIV(const SCEV *Src,
+ const SCEV *Dst,
+ FullDependence &Result) const {
+ DEBUG(dbgs() << " src = " << *Src << "\n");
+ DEBUG(dbgs() << " dst = " << *Dst << "\n");
+ ++ZIVapplications;
+ if (isKnownPredicate(CmpInst::ICMP_EQ, Src, Dst)) {
+ DEBUG(dbgs() << " provably dependent\n");
+ return false; // provably dependent
+ }
+ if (isKnownPredicate(CmpInst::ICMP_NE, Src, Dst)) {
+ DEBUG(dbgs() << " provably independent\n");
+ ++ZIVindependence;
+ return true; // provably independent
+ }
+ DEBUG(dbgs() << " possibly dependent\n");
+ Result.Consistent = false;
+ return false; // possibly dependent
+}
+
+
+// strongSIVtest -
+// From the paper, Practical Dependence Testing, Section 4.2.1
+//
+// When we have a pair of subscripts of the form [c1 + a*i] and [c2 + a*i],
+// where i is an induction variable, c1 and c2 are loop invariant,
+// and a is a constant, we can solve it exactly using the Strong SIV test.
+//
+// Can prove independence. Failing that, can compute distance (and direction).
+// In the presence of symbolic terms, we can sometimes make progress.
+//
+// If there's a dependence,
+//
+// c1 + a*i = c2 + a*i'
+//
+// The dependence distance is
+//
+// d = i' - i = (c1 - c2)/a
+//
+// A dependence only exists if d is an integer and abs(d) <= U, where U is the
+// loop's upper bound. If a dependence exists, the dependence direction is
+// defined as
+//
+// { < if d > 0
+// direction = { = if d = 0
+// { > if d < 0
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::strongSIVtest(const SCEV *Coeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const {
+ DEBUG(dbgs() << "\tStrong SIV test\n");
+ DEBUG(dbgs() << "\t Coeff = " << *Coeff);
+ DEBUG(dbgs() << ", " << *Coeff->getType() << "\n");
+ DEBUG(dbgs() << "\t SrcConst = " << *SrcConst);
+ DEBUG(dbgs() << ", " << *SrcConst->getType() << "\n");
+ DEBUG(dbgs() << "\t DstConst = " << *DstConst);
+ DEBUG(dbgs() << ", " << *DstConst->getType() << "\n");
+ ++StrongSIVapplications;
+ assert(0 < Level && Level <= CommonLevels && "level out of range");
+ Level--;
+
+ const SCEV *Delta = SE->getMinusSCEV(SrcConst, DstConst);
+ DEBUG(dbgs() << "\t Delta = " << *Delta);
+ DEBUG(dbgs() << ", " << *Delta->getType() << "\n");
+
+ // check that |Delta| < iteration count
+ if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) {
+ DEBUG(dbgs() << "\t UpperBound = " << *UpperBound);
+ DEBUG(dbgs() << ", " << *UpperBound->getType() << "\n");
+ const SCEV *AbsDelta =
+ SE->isKnownNonNegative(Delta) ? Delta : SE->getNegativeSCEV(Delta);
+ const SCEV *AbsCoeff =
+ SE->isKnownNonNegative(Coeff) ? Coeff : SE->getNegativeSCEV(Coeff);
+ const SCEV *Product = SE->getMulExpr(UpperBound, AbsCoeff);
+ if (isKnownPredicate(CmpInst::ICMP_SGT, AbsDelta, Product)) {
+ // Distance greater than trip count - no dependence
+ ++StrongSIVindependence;
+ ++StrongSIVsuccesses;
+ return true;
+ }
+ }
+
+ // Can we compute distance?
+ if (isa<SCEVConstant>(Delta) && isa<SCEVConstant>(Coeff)) {
+ APInt ConstDelta = cast<SCEVConstant>(Delta)->getValue()->getValue();
+ APInt ConstCoeff = cast<SCEVConstant>(Coeff)->getValue()->getValue();
+ APInt Distance = ConstDelta; // these need to be initialized
+ APInt Remainder = ConstDelta;
+ APInt::sdivrem(ConstDelta, ConstCoeff, Distance, Remainder);
+ DEBUG(dbgs() << "\t Distance = " << Distance << "\n");
+ DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n");
+ // Make sure Coeff divides Delta exactly
+ if (Remainder != 0) {
+ // Coeff doesn't divide Distance, no dependence
+ ++StrongSIVindependence;
+ ++StrongSIVsuccesses;
+ return true;
+ }
+ Result.DV[Level].Distance = SE->getConstant(Distance);
+ NewConstraint.setDistance(SE->getConstant(Distance), CurLoop);
+ if (Distance.sgt(0))
+ Result.DV[Level].Direction &= Dependence::DVEntry::LT;
+ else if (Distance.slt(0))
+ Result.DV[Level].Direction &= Dependence::DVEntry::GT;
+ else
+ Result.DV[Level].Direction &= Dependence::DVEntry::EQ;
+ ++StrongSIVsuccesses;
+ }
+ else if (Delta->isZero()) {
+ // since 0/X == 0
+ Result.DV[Level].Distance = Delta;
+ NewConstraint.setDistance(Delta, CurLoop);
+ Result.DV[Level].Direction &= Dependence::DVEntry::EQ;
+ ++StrongSIVsuccesses;
+ }
+ else {
+ if (Coeff->isOne()) {
+ DEBUG(dbgs() << "\t Distance = " << *Delta << "\n");
+ Result.DV[Level].Distance = Delta; // since X/1 == X
+ NewConstraint.setDistance(Delta, CurLoop);
+ }
+ else {
+ Result.Consistent = false;
+ NewConstraint.setLine(Coeff,
+ SE->getNegativeSCEV(Coeff),
+ SE->getNegativeSCEV(Delta), CurLoop);
+ }
+
+ // maybe we can get a useful direction
+ bool DeltaMaybeZero = !SE->isKnownNonZero(Delta);
+ bool DeltaMaybePositive = !SE->isKnownNonPositive(Delta);
+ bool DeltaMaybeNegative = !SE->isKnownNonNegative(Delta);
+ bool CoeffMaybePositive = !SE->isKnownNonPositive(Coeff);
+ bool CoeffMaybeNegative = !SE->isKnownNonNegative(Coeff);
+ // The double negatives above are confusing.
+ // It helps to read !SE->isKnownNonZero(Delta)
+ // as "Delta might be Zero"
+ unsigned NewDirection = Dependence::DVEntry::NONE;
+ if ((DeltaMaybePositive && CoeffMaybePositive) ||
+ (DeltaMaybeNegative && CoeffMaybeNegative))
+ NewDirection = Dependence::DVEntry::LT;
+ if (DeltaMaybeZero)
+ NewDirection |= Dependence::DVEntry::EQ;
+ if ((DeltaMaybeNegative && CoeffMaybePositive) ||
+ (DeltaMaybePositive && CoeffMaybeNegative))
+ NewDirection |= Dependence::DVEntry::GT;
+ if (NewDirection < Result.DV[Level].Direction)
+ ++StrongSIVsuccesses;
+ Result.DV[Level].Direction &= NewDirection;
+ }
+ return false;
+}
+
+
+// weakCrossingSIVtest -
+// From the paper, Practical Dependence Testing, Section 4.2.2
+//
+// When we have a pair of subscripts of the form [c1 + a*i] and [c2 - a*i],
+// where i is an induction variable, c1 and c2 are loop invariant,
+// and a is a constant, we can solve it exactly using the
+// Weak-Crossing SIV test.
+//
+// Given c1 + a*i = c2 - a*i', we can look for the intersection of
+// the two lines, where i = i', yielding
+//
+// c1 + a*i = c2 - a*i
+// 2a*i = c2 - c1
+// i = (c2 - c1)/2a
+//
+// If i < 0, there is no dependence.
+// If i > upperbound, there is no dependence.
+// If i = 0 (i.e., if c1 = c2), there's a dependence with distance = 0.
+// If i = upperbound, there's a dependence with distance = 0.
+// If i is integral, there's a dependence (all directions).
+// If the non-integer part = 1/2, there's a dependence (<> directions).
+// Otherwise, there's no dependence.
+//
+// Can prove independence. Failing that,
+// can sometimes refine the directions.
+// Can determine iteration for splitting.
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::weakCrossingSIVtest(const SCEV *Coeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint,
+ const SCEV *&SplitIter) const {
+ DEBUG(dbgs() << "\tWeak-Crossing SIV test\n");
+ DEBUG(dbgs() << "\t Coeff = " << *Coeff << "\n");
+ DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
+ DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
+ ++WeakCrossingSIVapplications;
+ assert(0 < Level && Level <= CommonLevels && "Level out of range");
+ Level--;
+ Result.Consistent = false;
+ const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
+ DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ NewConstraint.setLine(Coeff, Coeff, Delta, CurLoop);
+ if (Delta->isZero()) {
+ Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::LT);
+ Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::GT);
+ ++WeakCrossingSIVsuccesses;
+ if (!Result.DV[Level].Direction) {
+ ++WeakCrossingSIVindependence;
+ return true;
+ }
+ Result.DV[Level].Distance = Delta; // = 0
+ return false;
+ }
+ const SCEVConstant *ConstCoeff = dyn_cast<SCEVConstant>(Coeff);
+ if (!ConstCoeff)
+ return false;
+
+ Result.DV[Level].Splitable = true;
+ if (SE->isKnownNegative(ConstCoeff)) {
+ ConstCoeff = dyn_cast<SCEVConstant>(SE->getNegativeSCEV(ConstCoeff));
+ assert(ConstCoeff &&
+ "dynamic cast of negative of ConstCoeff should yield constant");
+ Delta = SE->getNegativeSCEV(Delta);
+ }
+ assert(SE->isKnownPositive(ConstCoeff) && "ConstCoeff should be positive");
+
+ // compute SplitIter for use by DependenceAnalysis::getSplitIteration()
+ SplitIter =
+ SE->getUDivExpr(SE->getSMaxExpr(SE->getConstant(Delta->getType(), 0),
+ Delta),
+ SE->getMulExpr(SE->getConstant(Delta->getType(), 2),
+ ConstCoeff));
+ DEBUG(dbgs() << "\t Split iter = " << *SplitIter << "\n");
+
+ const SCEVConstant *ConstDelta = dyn_cast<SCEVConstant>(Delta);
+ if (!ConstDelta)
+ return false;
+
+ // We're certain that ConstCoeff > 0; therefore,
+ // if Delta < 0, then no dependence.
+ DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ DEBUG(dbgs() << "\t ConstCoeff = " << *ConstCoeff << "\n");
+ if (SE->isKnownNegative(Delta)) {
+ // No dependence, Delta < 0
+ ++WeakCrossingSIVindependence;
+ ++WeakCrossingSIVsuccesses;
+ return true;
+ }
+
+ // We're certain that Delta > 0 and ConstCoeff > 0.
+ // Check Delta/(2*ConstCoeff) against upper loop bound
+ if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) {
+ DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n");
+ const SCEV *ConstantTwo = SE->getConstant(UpperBound->getType(), 2);
+ const SCEV *ML = SE->getMulExpr(SE->getMulExpr(ConstCoeff, UpperBound),
+ ConstantTwo);
+ DEBUG(dbgs() << "\t ML = " << *ML << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_SGT, Delta, ML)) {
+ // Delta too big, no dependence
+ ++WeakCrossingSIVindependence;
+ ++WeakCrossingSIVsuccesses;
+ return true;
+ }
+ if (isKnownPredicate(CmpInst::ICMP_EQ, Delta, ML)) {
+ // i = i' = UB
+ Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::LT);
+ Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::GT);
+ ++WeakCrossingSIVsuccesses;
+ if (!Result.DV[Level].Direction) {
+ ++WeakCrossingSIVindependence;
+ return true;
+ }
+ Result.DV[Level].Splitable = false;
+ Result.DV[Level].Distance = SE->getConstant(Delta->getType(), 0);
+ return false;
+ }
+ }
+
+ // check that Coeff divides Delta
+ APInt APDelta = ConstDelta->getValue()->getValue();
+ APInt APCoeff = ConstCoeff->getValue()->getValue();
+ APInt Distance = APDelta; // these need to be initialzed
+ APInt Remainder = APDelta;
+ APInt::sdivrem(APDelta, APCoeff, Distance, Remainder);
+ DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n");
+ if (Remainder != 0) {
+ // Coeff doesn't divide Delta, no dependence
+ ++WeakCrossingSIVindependence;
+ ++WeakCrossingSIVsuccesses;
+ return true;
+ }
+ DEBUG(dbgs() << "\t Distance = " << Distance << "\n");
+
+ // if 2*Coeff doesn't divide Delta, then the equal direction isn't possible
+ APInt Two = APInt(Distance.getBitWidth(), 2, true);
+ Remainder = Distance.srem(Two);
+ DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n");
+ if (Remainder != 0) {
+ // Equal direction isn't possible
+ Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::EQ);
+ ++WeakCrossingSIVsuccesses;
+ }
+ return false;
+}
+
+
+// Kirch's algorithm, from
+//
+// Optimizing Supercompilers for Supercomputers
+// Michael Wolfe
+// MIT Press, 1989
+//
+// Program 2.1, page 29.
+// Computes the GCD of AM and BM.
+// Also finds a solution to the equation ax - by = gdc(a, b).
+// Returns true iff the gcd divides Delta.
+static
+bool findGCD(unsigned Bits, APInt AM, APInt BM, APInt Delta,
+ APInt &G, APInt &X, APInt &Y) {
+ APInt A0(Bits, 1, true), A1(Bits, 0, true);
+ APInt B0(Bits, 0, true), B1(Bits, 1, true);
+ APInt G0 = AM.abs();
+ APInt G1 = BM.abs();
+ APInt Q = G0; // these need to be initialized
+ APInt R = G0;
+ APInt::sdivrem(G0, G1, Q, R);
+ while (R != 0) {
+ APInt A2 = A0 - Q*A1; A0 = A1; A1 = A2;
+ APInt B2 = B0 - Q*B1; B0 = B1; B1 = B2;
+ G0 = G1; G1 = R;
+ APInt::sdivrem(G0, G1, Q, R);
+ }
+ G = G1;
+ DEBUG(dbgs() << "\t GCD = " << G << "\n");
+ X = AM.slt(0) ? -A1 : A1;
+ Y = BM.slt(0) ? B1 : -B1;
+
+ // make sure gcd divides Delta
+ R = Delta.srem(G);
+ if (R != 0)
+ return true; // gcd doesn't divide Delta, no dependence
+ Q = Delta.sdiv(G);
+ X *= Q;
+ Y *= Q;
+ return false;
+}
+
+
+static
+APInt floorOfQuotient(APInt A, APInt B) {
+ APInt Q = A; // these need to be initialized
+ APInt R = A;
+ APInt::sdivrem(A, B, Q, R);
+ if (R == 0)
+ return Q;
+ if ((A.sgt(0) && B.sgt(0)) ||
+ (A.slt(0) && B.slt(0)))
+ return Q;
+ else
+ return Q - 1;
+}
+
+
+static
+APInt ceilingOfQuotient(APInt A, APInt B) {
+ APInt Q = A; // these need to be initialized
+ APInt R = A;
+ APInt::sdivrem(A, B, Q, R);
+ if (R == 0)
+ return Q;
+ if ((A.sgt(0) && B.sgt(0)) ||
+ (A.slt(0) && B.slt(0)))
+ return Q + 1;
+ else
+ return Q;
+}
+
+
+static
+APInt maxAPInt(APInt A, APInt B) {
+ return A.sgt(B) ? A : B;
+}
+
+
+static
+APInt minAPInt(APInt A, APInt B) {
+ return A.slt(B) ? A : B;
+}
+
+
+// exactSIVtest -
+// When we have a pair of subscripts of the form [c1 + a1*i] and [c2 + a2*i],
+// where i is an induction variable, c1 and c2 are loop invariant, and a1
+// and a2 are constant, we can solve it exactly using an algorithm developed
+// by Banerjee and Wolfe. See Section 2.5.3 in
+//
+// Optimizing Supercompilers for Supercomputers
+// Michael Wolfe
+// MIT Press, 1989
+//
+// It's slower than the specialized tests (strong SIV, weak-zero SIV, etc),
+// so use them if possible. They're also a bit better with symbolics and,
+// in the case of the strong SIV test, can compute Distances.
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::exactSIVtest(const SCEV *SrcCoeff,
+ const SCEV *DstCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const {
+ DEBUG(dbgs() << "\tExact SIV test\n");
+ DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << " = AM\n");
+ DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << " = BM\n");
+ DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
+ DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
+ ++ExactSIVapplications;
+ assert(0 < Level && Level <= CommonLevels && "Level out of range");
+ Level--;
+ Result.Consistent = false;
+ const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
+ DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ NewConstraint.setLine(SrcCoeff, SE->getNegativeSCEV(DstCoeff),
+ Delta, CurLoop);
+ const SCEVConstant *ConstDelta = dyn_cast<SCEVConstant>(Delta);
+ const SCEVConstant *ConstSrcCoeff = dyn_cast<SCEVConstant>(SrcCoeff);
+ const SCEVConstant *ConstDstCoeff = dyn_cast<SCEVConstant>(DstCoeff);
+ if (!ConstDelta || !ConstSrcCoeff || !ConstDstCoeff)
+ return false;
+
+ // find gcd
+ APInt G, X, Y;
+ APInt AM = ConstSrcCoeff->getValue()->getValue();
+ APInt BM = ConstDstCoeff->getValue()->getValue();
+ unsigned Bits = AM.getBitWidth();
+ if (findGCD(Bits, AM, BM, ConstDelta->getValue()->getValue(), G, X, Y)) {
+ // gcd doesn't divide Delta, no dependence
+ ++ExactSIVindependence;
+ ++ExactSIVsuccesses;
+ return true;
+ }
+
+ DEBUG(dbgs() << "\t X = " << X << ", Y = " << Y << "\n");
+
+ // since SCEV construction normalizes, LM = 0
+ APInt UM(Bits, 1, true);
+ bool UMvalid = false;
+ // UM is perhaps unavailable, let's check
+ if (const SCEVConstant *CUB =
+ collectConstantUpperBound(CurLoop, Delta->getType())) {
+ UM = CUB->getValue()->getValue();
+ DEBUG(dbgs() << "\t UM = " << UM << "\n");
+ UMvalid = true;
+ }
+
+ APInt TU(APInt::getSignedMaxValue(Bits));
+ APInt TL(APInt::getSignedMinValue(Bits));
+
+ // test(BM/G, LM-X) and test(-BM/G, X-UM)
+ APInt TMUL = BM.sdiv(G);
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(-X, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ if (UMvalid) {
+ TU = minAPInt(TU, floorOfQuotient(UM - X, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ }
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(-X, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ if (UMvalid) {
+ TL = maxAPInt(TL, ceilingOfQuotient(UM - X, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ }
+ }
+
+ // test(AM/G, LM-Y) and test(-AM/G, Y-UM)
+ TMUL = AM.sdiv(G);
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(-Y, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ if (UMvalid) {
+ TU = minAPInt(TU, floorOfQuotient(UM - Y, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ }
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(-Y, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ if (UMvalid) {
+ TL = maxAPInt(TL, ceilingOfQuotient(UM - Y, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ }
+ }
+ if (TL.sgt(TU)) {
+ ++ExactSIVindependence;
+ ++ExactSIVsuccesses;
+ return true;
+ }
+
+ // explore directions
+ unsigned NewDirection = Dependence::DVEntry::NONE;
+
+ // less than
+ APInt SaveTU(TU); // save these
+ APInt SaveTL(TL);
+ DEBUG(dbgs() << "\t exploring LT direction\n");
+ TMUL = AM - BM;
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(X - Y + 1, TMUL));
+ DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(X - Y + 1, TMUL));
+ DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
+ }
+ if (TL.sle(TU)) {
+ NewDirection |= Dependence::DVEntry::LT;
+ ++ExactSIVsuccesses;
+ }
+
+ // equal
+ TU = SaveTU; // restore
+ TL = SaveTL;
+ DEBUG(dbgs() << "\t exploring EQ direction\n");
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(X - Y, TMUL));
+ DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(X - Y, TMUL));
+ DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
+ }
+ TMUL = BM - AM;
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(Y - X, TMUL));
+ DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(Y - X, TMUL));
+ DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
+ }
+ if (TL.sle(TU)) {
+ NewDirection |= Dependence::DVEntry::EQ;
+ ++ExactSIVsuccesses;
+ }
+
+ // greater than
+ TU = SaveTU; // restore
+ TL = SaveTL;
+ DEBUG(dbgs() << "\t exploring GT direction\n");
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(Y - X + 1, TMUL));
+ DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(Y - X + 1, TMUL));
+ DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
+ }
+ if (TL.sle(TU)) {
+ NewDirection |= Dependence::DVEntry::GT;
+ ++ExactSIVsuccesses;
+ }
+
+ // finished
+ Result.DV[Level].Direction &= NewDirection;
+ if (Result.DV[Level].Direction == Dependence::DVEntry::NONE)
+ ++ExactSIVindependence;
+ return Result.DV[Level].Direction == Dependence::DVEntry::NONE;
+}
+
+
+
+// Return true if the divisor evenly divides the dividend.
+static
+bool isRemainderZero(const SCEVConstant *Dividend,
+ const SCEVConstant *Divisor) {
+ APInt ConstDividend = Dividend->getValue()->getValue();
+ APInt ConstDivisor = Divisor->getValue()->getValue();
+ return ConstDividend.srem(ConstDivisor) == 0;
+}
+
+
+// weakZeroSrcSIVtest -
+// From the paper, Practical Dependence Testing, Section 4.2.2
+//
+// When we have a pair of subscripts of the form [c1] and [c2 + a*i],
+// where i is an induction variable, c1 and c2 are loop invariant,
+// and a is a constant, we can solve it exactly using the
+// Weak-Zero SIV test.
+//
+// Given
+//
+// c1 = c2 + a*i
+//
+// we get
+//
+// (c1 - c2)/a = i
+//
+// If i is not an integer, there's no dependence.
+// If i < 0 or > UB, there's no dependence.
+// If i = 0, the direction is <= and peeling the
+// 1st iteration will break the dependence.
+// If i = UB, the direction is >= and peeling the
+// last iteration will break the dependence.
+// Otherwise, the direction is *.
+//
+// Can prove independence. Failing that, we can sometimes refine
+// the directions. Can sometimes show that first or last
+// iteration carries all the dependences (so worth peeling).
+//
+// (see also weakZeroDstSIVtest)
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::weakZeroSrcSIVtest(const SCEV *DstCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const {
+ // For the WeakSIV test, it's possible the loop isn't common to
+ // the Src and Dst loops. If it isn't, then there's no need to
+ // record a direction.
+ DEBUG(dbgs() << "\tWeak-Zero (src) SIV test\n");
+ DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << "\n");
+ DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
+ DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
+ ++WeakZeroSIVapplications;
+ assert(0 < Level && Level <= MaxLevels && "Level out of range");
+ Level--;
+ Result.Consistent = false;
+ const SCEV *Delta = SE->getMinusSCEV(SrcConst, DstConst);
+ NewConstraint.setLine(SE->getConstant(Delta->getType(), 0),
+ DstCoeff, Delta, CurLoop);
+ DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_EQ, SrcConst, DstConst)) {
+ if (Level < CommonLevels) {
+ Result.DV[Level].Direction &= Dependence::DVEntry::LE;
+ Result.DV[Level].PeelFirst = true;
+ ++WeakZeroSIVsuccesses;
+ }
+ return false; // dependences caused by first iteration
+ }
+ const SCEVConstant *ConstCoeff = dyn_cast<SCEVConstant>(DstCoeff);
+ if (!ConstCoeff)
+ return false;
+ const SCEV *AbsCoeff =
+ SE->isKnownNegative(ConstCoeff) ?
+ SE->getNegativeSCEV(ConstCoeff) : ConstCoeff;
+ const SCEV *NewDelta =
+ SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(Delta) : Delta;
+
+ // check that Delta/SrcCoeff < iteration count
+ // really check NewDelta < count*AbsCoeff
+ if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) {
+ DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n");
+ const SCEV *Product = SE->getMulExpr(AbsCoeff, UpperBound);
+ if (isKnownPredicate(CmpInst::ICMP_SGT, NewDelta, Product)) {
+ ++WeakZeroSIVindependence;
+ ++WeakZeroSIVsuccesses;
+ return true;
+ }
+ if (isKnownPredicate(CmpInst::ICMP_EQ, NewDelta, Product)) {
+ // dependences caused by last iteration
+ if (Level < CommonLevels) {
+ Result.DV[Level].Direction &= Dependence::DVEntry::GE;
+ Result.DV[Level].PeelLast = true;
+ ++WeakZeroSIVsuccesses;
+ }
+ return false;
+ }
+ }
+
+ // check that Delta/SrcCoeff >= 0
+ // really check that NewDelta >= 0
+ if (SE->isKnownNegative(NewDelta)) {
+ // No dependence, newDelta < 0
+ ++WeakZeroSIVindependence;
+ ++WeakZeroSIVsuccesses;
+ return true;
+ }
+
+ // if SrcCoeff doesn't divide Delta, then no dependence
+ if (isa<SCEVConstant>(Delta) &&
+ !isRemainderZero(cast<SCEVConstant>(Delta), ConstCoeff)) {
+ ++WeakZeroSIVindependence;
+ ++WeakZeroSIVsuccesses;
+ return true;
+ }
+ return false;
+}
+
+
+// weakZeroDstSIVtest -
+// From the paper, Practical Dependence Testing, Section 4.2.2
+//
+// When we have a pair of subscripts of the form [c1 + a*i] and [c2],
+// where i is an induction variable, c1 and c2 are loop invariant,
+// and a is a constant, we can solve it exactly using the
+// Weak-Zero SIV test.
+//
+// Given
+//
+// c1 + a*i = c2
+//
+// we get
+//
+// i = (c2 - c1)/a
+//
+// If i is not an integer, there's no dependence.
+// If i < 0 or > UB, there's no dependence.
+// If i = 0, the direction is <= and peeling the
+// 1st iteration will break the dependence.
+// If i = UB, the direction is >= and peeling the
+// last iteration will break the dependence.
+// Otherwise, the direction is *.
+//
+// Can prove independence. Failing that, we can sometimes refine
+// the directions. Can sometimes show that first or last
+// iteration carries all the dependences (so worth peeling).
+//
+// (see also weakZeroSrcSIVtest)
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::weakZeroDstSIVtest(const SCEV *SrcCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const {
+ // For the WeakSIV test, it's possible the loop isn't common to the
+ // Src and Dst loops. If it isn't, then there's no need to record a direction.
+ DEBUG(dbgs() << "\tWeak-Zero (dst) SIV test\n");
+ DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << "\n");
+ DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
+ DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
+ ++WeakZeroSIVapplications;
+ assert(0 < Level && Level <= SrcLevels && "Level out of range");
+ Level--;
+ Result.Consistent = false;
+ const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
+ NewConstraint.setLine(SrcCoeff, SE->getConstant(Delta->getType(), 0),
+ Delta, CurLoop);
+ DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_EQ, DstConst, SrcConst)) {
+ if (Level < CommonLevels) {
+ Result.DV[Level].Direction &= Dependence::DVEntry::LE;
+ Result.DV[Level].PeelFirst = true;
+ ++WeakZeroSIVsuccesses;
+ }
+ return false; // dependences caused by first iteration
+ }
+ const SCEVConstant *ConstCoeff = dyn_cast<SCEVConstant>(SrcCoeff);
+ if (!ConstCoeff)
+ return false;
+ const SCEV *AbsCoeff =
+ SE->isKnownNegative(ConstCoeff) ?
+ SE->getNegativeSCEV(ConstCoeff) : ConstCoeff;
+ const SCEV *NewDelta =
+ SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(Delta) : Delta;
+
+ // check that Delta/SrcCoeff < iteration count
+ // really check NewDelta < count*AbsCoeff
+ if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) {
+ DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n");
+ const SCEV *Product = SE->getMulExpr(AbsCoeff, UpperBound);
+ if (isKnownPredicate(CmpInst::ICMP_SGT, NewDelta, Product)) {
+ ++WeakZeroSIVindependence;
+ ++WeakZeroSIVsuccesses;
+ return true;
+ }
+ if (isKnownPredicate(CmpInst::ICMP_EQ, NewDelta, Product)) {
+ // dependences caused by last iteration
+ if (Level < CommonLevels) {
+ Result.DV[Level].Direction &= Dependence::DVEntry::GE;
+ Result.DV[Level].PeelLast = true;
+ ++WeakZeroSIVsuccesses;
+ }
+ return false;
+ }
+ }
+
+ // check that Delta/SrcCoeff >= 0
+ // really check that NewDelta >= 0
+ if (SE->isKnownNegative(NewDelta)) {
+ // No dependence, newDelta < 0
+ ++WeakZeroSIVindependence;
+ ++WeakZeroSIVsuccesses;
+ return true;
+ }
+
+ // if SrcCoeff doesn't divide Delta, then no dependence
+ if (isa<SCEVConstant>(Delta) &&
+ !isRemainderZero(cast<SCEVConstant>(Delta), ConstCoeff)) {
+ ++WeakZeroSIVindependence;
+ ++WeakZeroSIVsuccesses;
+ return true;
+ }
+ return false;
+}
+
+
+// exactRDIVtest - Tests the RDIV subscript pair for dependence.
+// Things of the form [c1 + a*i] and [c2 + b*j],
+// where i and j are induction variable, c1 and c2 are loop invariant,
+// and a and b are constants.
+// Returns true if any possible dependence is disproved.
+// Marks the result as inconsistent.
+// Works in some cases that symbolicRDIVtest doesn't, and vice versa.
+bool DependenceAnalysis::exactRDIVtest(const SCEV *SrcCoeff,
+ const SCEV *DstCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *SrcLoop,
+ const Loop *DstLoop,
+ FullDependence &Result) const {
+ DEBUG(dbgs() << "\tExact RDIV test\n");
+ DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << " = AM\n");
+ DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << " = BM\n");
+ DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
+ DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
+ ++ExactRDIVapplications;
+ Result.Consistent = false;
+ const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
+ DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ const SCEVConstant *ConstDelta = dyn_cast<SCEVConstant>(Delta);
+ const SCEVConstant *ConstSrcCoeff = dyn_cast<SCEVConstant>(SrcCoeff);
+ const SCEVConstant *ConstDstCoeff = dyn_cast<SCEVConstant>(DstCoeff);
+ if (!ConstDelta || !ConstSrcCoeff || !ConstDstCoeff)
+ return false;
+
+ // find gcd
+ APInt G, X, Y;
+ APInt AM = ConstSrcCoeff->getValue()->getValue();
+ APInt BM = ConstDstCoeff->getValue()->getValue();
+ unsigned Bits = AM.getBitWidth();
+ if (findGCD(Bits, AM, BM, ConstDelta->getValue()->getValue(), G, X, Y)) {
+ // gcd doesn't divide Delta, no dependence
+ ++ExactRDIVindependence;
+ return true;
+ }
+
+ DEBUG(dbgs() << "\t X = " << X << ", Y = " << Y << "\n");
+
+ // since SCEV construction seems to normalize, LM = 0
+ APInt SrcUM(Bits, 1, true);
+ bool SrcUMvalid = false;
+ // SrcUM is perhaps unavailable, let's check
+ if (const SCEVConstant *UpperBound =
+ collectConstantUpperBound(SrcLoop, Delta->getType())) {
+ SrcUM = UpperBound->getValue()->getValue();
+ DEBUG(dbgs() << "\t SrcUM = " << SrcUM << "\n");
+ SrcUMvalid = true;
+ }
+
+ APInt DstUM(Bits, 1, true);
+ bool DstUMvalid = false;
+ // UM is perhaps unavailable, let's check
+ if (const SCEVConstant *UpperBound =
+ collectConstantUpperBound(DstLoop, Delta->getType())) {
+ DstUM = UpperBound->getValue()->getValue();
+ DEBUG(dbgs() << "\t DstUM = " << DstUM << "\n");
+ DstUMvalid = true;
+ }
+
+ APInt TU(APInt::getSignedMaxValue(Bits));
+ APInt TL(APInt::getSignedMinValue(Bits));
+
+ // test(BM/G, LM-X) and test(-BM/G, X-UM)
+ APInt TMUL = BM.sdiv(G);
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(-X, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ if (SrcUMvalid) {
+ TU = minAPInt(TU, floorOfQuotient(SrcUM - X, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ }
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(-X, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ if (SrcUMvalid) {
+ TL = maxAPInt(TL, ceilingOfQuotient(SrcUM - X, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ }
+ }
+
+ // test(AM/G, LM-Y) and test(-AM/G, Y-UM)
+ TMUL = AM.sdiv(G);
+ if (TMUL.sgt(0)) {
+ TL = maxAPInt(TL, ceilingOfQuotient(-Y, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ if (DstUMvalid) {
+ TU = minAPInt(TU, floorOfQuotient(DstUM - Y, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ }
+ }
+ else {
+ TU = minAPInt(TU, floorOfQuotient(-Y, TMUL));
+ DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ if (DstUMvalid) {
+ TL = maxAPInt(TL, ceilingOfQuotient(DstUM - Y, TMUL));
+ DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ }
+ }
+ if (TL.sgt(TU))
+ ++ExactRDIVindependence;
+ return TL.sgt(TU);
+}
+
+
+// symbolicRDIVtest -
+// In Section 4.5 of the Practical Dependence Testing paper,the authors
+// introduce a special case of Banerjee's Inequalities (also called the
+// Extreme-Value Test) that can handle some of the SIV and RDIV cases,
+// particularly cases with symbolics. Since it's only able to disprove
+// dependence (not compute distances or directions), we'll use it as a
+// fall back for the other tests.
+//
+// When we have a pair of subscripts of the form [c1 + a1*i] and [c2 + a2*j]
+// where i and j are induction variables and c1 and c2 are loop invariants,
+// we can use the symbolic tests to disprove some dependences, serving as a
+// backup for the RDIV test. Note that i and j can be the same variable,
+// letting this test serve as a backup for the various SIV tests.
+//
+// For a dependence to exist, c1 + a1*i must equal c2 + a2*j for some
+// 0 <= i <= N1 and some 0 <= j <= N2, where N1 and N2 are the (normalized)
+// loop bounds for the i and j loops, respectively. So, ...
+//
+// c1 + a1*i = c2 + a2*j
+// a1*i - a2*j = c2 - c1
+//
+// To test for a dependence, we compute c2 - c1 and make sure it's in the
+// range of the maximum and minimum possible values of a1*i - a2*j.
+// Considering the signs of a1 and a2, we have 4 possible cases:
+//
+// 1) If a1 >= 0 and a2 >= 0, then
+// a1*0 - a2*N2 <= c2 - c1 <= a1*N1 - a2*0
+// -a2*N2 <= c2 - c1 <= a1*N1
+//
+// 2) If a1 >= 0 and a2 <= 0, then
+// a1*0 - a2*0 <= c2 - c1 <= a1*N1 - a2*N2
+// 0 <= c2 - c1 <= a1*N1 - a2*N2
+//
+// 3) If a1 <= 0 and a2 >= 0, then
+// a1*N1 - a2*N2 <= c2 - c1 <= a1*0 - a2*0
+// a1*N1 - a2*N2 <= c2 - c1 <= 0
+//
+// 4) If a1 <= 0 and a2 <= 0, then
+// a1*N1 - a2*0 <= c2 - c1 <= a1*0 - a2*N2
+// a1*N1 <= c2 - c1 <= -a2*N2
+//
+// return true if dependence disproved
+bool DependenceAnalysis::symbolicRDIVtest(const SCEV *A1,
+ const SCEV *A2,
+ const SCEV *C1,
+ const SCEV *C2,
+ const Loop *Loop1,
+ const Loop *Loop2) const {
+ ++SymbolicRDIVapplications;
+ DEBUG(dbgs() << "\ttry symbolic RDIV test\n");
+ DEBUG(dbgs() << "\t A1 = " << *A1);
+ DEBUG(dbgs() << ", type = " << *A1->getType() << "\n");
+ DEBUG(dbgs() << "\t A2 = " << *A2 << "\n");
+ DEBUG(dbgs() << "\t C1 = " << *C1 << "\n");
+ DEBUG(dbgs() << "\t C2 = " << *C2 << "\n");
+ const SCEV *N1 = collectUpperBound(Loop1, A1->getType());
+ const SCEV *N2 = collectUpperBound(Loop2, A1->getType());
+ DEBUG(if (N1) dbgs() << "\t N1 = " << *N1 << "\n");
+ DEBUG(if (N2) dbgs() << "\t N2 = " << *N2 << "\n");
+ const SCEV *C2_C1 = SE->getMinusSCEV(C2, C1);
+ const SCEV *C1_C2 = SE->getMinusSCEV(C1, C2);
+ DEBUG(dbgs() << "\t C2 - C1 = " << *C2_C1 << "\n");
+ DEBUG(dbgs() << "\t C1 - C2 = " << *C1_C2 << "\n");
+ if (SE->isKnownNonNegative(A1)) {
+ if (SE->isKnownNonNegative(A2)) {
+ // A1 >= 0 && A2 >= 0
+ if (N1) {
+ // make sure that c2 - c1 <= a1*N1
+ const SCEV *A1N1 = SE->getMulExpr(A1, N1);
+ DEBUG(dbgs() << "\t A1*N1 = " << *A1N1 << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_SGT, C2_C1, A1N1)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ if (N2) {
+ // make sure that -a2*N2 <= c2 - c1, or a2*N2 >= c1 - c2
+ const SCEV *A2N2 = SE->getMulExpr(A2, N2);
+ DEBUG(dbgs() << "\t A2*N2 = " << *A2N2 << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_SLT, A2N2, C1_C2)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ }
+ else if (SE->isKnownNonPositive(A2)) {
+ // a1 >= 0 && a2 <= 0
+ if (N1 && N2) {
+ // make sure that c2 - c1 <= a1*N1 - a2*N2
+ const SCEV *A1N1 = SE->getMulExpr(A1, N1);
+ const SCEV *A2N2 = SE->getMulExpr(A2, N2);
+ const SCEV *A1N1_A2N2 = SE->getMinusSCEV(A1N1, A2N2);
+ DEBUG(dbgs() << "\t A1*N1 - A2*N2 = " << *A1N1_A2N2 << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_SGT, C2_C1, A1N1_A2N2)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ // make sure that 0 <= c2 - c1
+ if (SE->isKnownNegative(C2_C1)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ }
+ else if (SE->isKnownNonPositive(A1)) {
+ if (SE->isKnownNonNegative(A2)) {
+ // a1 <= 0 && a2 >= 0
+ if (N1 && N2) {
+ // make sure that a1*N1 - a2*N2 <= c2 - c1
+ const SCEV *A1N1 = SE->getMulExpr(A1, N1);
+ const SCEV *A2N2 = SE->getMulExpr(A2, N2);
+ const SCEV *A1N1_A2N2 = SE->getMinusSCEV(A1N1, A2N2);
+ DEBUG(dbgs() << "\t A1*N1 - A2*N2 = " << *A1N1_A2N2 << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_SGT, A1N1_A2N2, C2_C1)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ // make sure that c2 - c1 <= 0
+ if (SE->isKnownPositive(C2_C1)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ else if (SE->isKnownNonPositive(A2)) {
+ // a1 <= 0 && a2 <= 0
+ if (N1) {
+ // make sure that a1*N1 <= c2 - c1
+ const SCEV *A1N1 = SE->getMulExpr(A1, N1);
+ DEBUG(dbgs() << "\t A1*N1 = " << *A1N1 << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_SGT, A1N1, C2_C1)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ if (N2) {
+ // make sure that c2 - c1 <= -a2*N2, or c1 - c2 >= a2*N2
+ const SCEV *A2N2 = SE->getMulExpr(A2, N2);
+ DEBUG(dbgs() << "\t A2*N2 = " << *A2N2 << "\n");
+ if (isKnownPredicate(CmpInst::ICMP_SLT, C1_C2, A2N2)) {
+ ++SymbolicRDIVindependence;
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+
+// testSIV -
+// When we have a pair of subscripts of the form [c1 + a1*i] and [c2 - a2*i]
+// where i is an induction variable, c1 and c2 are loop invariant, and a1 and
+// a2 are constant, we attack it with an SIV test. While they can all be
+// solved with the Exact SIV test, it's worthwhile to use simpler tests when
+// they apply; they're cheaper and sometimes more precise.
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::testSIV(const SCEV *Src,
+ const SCEV *Dst,
+ unsigned &Level,
+ FullDependence &Result,
+ Constraint &NewConstraint,
+ const SCEV *&SplitIter) const {
+ DEBUG(dbgs() << " src = " << *Src << "\n");
+ DEBUG(dbgs() << " dst = " << *Dst << "\n");
+ const SCEVAddRecExpr *SrcAddRec = dyn_cast<SCEVAddRecExpr>(Src);
+ const SCEVAddRecExpr *DstAddRec = dyn_cast<SCEVAddRecExpr>(Dst);
+ if (SrcAddRec && DstAddRec) {
+ const SCEV *SrcConst = SrcAddRec->getStart();
+ const SCEV *DstConst = DstAddRec->getStart();
+ const SCEV *SrcCoeff = SrcAddRec->getStepRecurrence(*SE);
+ const SCEV *DstCoeff = DstAddRec->getStepRecurrence(*SE);
+ const Loop *CurLoop = SrcAddRec->getLoop();
+ assert(CurLoop == DstAddRec->getLoop() &&
+ "both loops in SIV should be same");
+ Level = mapSrcLoop(CurLoop);
+ bool disproven;
+ if (SrcCoeff == DstCoeff)
+ disproven = strongSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop,
+ Level, Result, NewConstraint);
+ else if (SrcCoeff == SE->getNegativeSCEV(DstCoeff))
+ disproven = weakCrossingSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop,
+ Level, Result, NewConstraint, SplitIter);
+ else
+ disproven = exactSIVtest(SrcCoeff, DstCoeff, SrcConst, DstConst, CurLoop,
+ Level, Result, NewConstraint);
+ return disproven ||
+ gcdMIVtest(Src, Dst, Result) ||
+ symbolicRDIVtest(SrcCoeff, DstCoeff, SrcConst, DstConst, CurLoop, CurLoop);
+ }
+ if (SrcAddRec) {
+ const SCEV *SrcConst = SrcAddRec->getStart();
+ const SCEV *SrcCoeff = SrcAddRec->getStepRecurrence(*SE);
+ const SCEV *DstConst = Dst;
+ const Loop *CurLoop = SrcAddRec->getLoop();
+ Level = mapSrcLoop(CurLoop);
+ return weakZeroDstSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop,
+ Level, Result, NewConstraint) ||
+ gcdMIVtest(Src, Dst, Result);
+ }
+ if (DstAddRec) {
+ const SCEV *DstConst = DstAddRec->getStart();
+ const SCEV *DstCoeff = DstAddRec->getStepRecurrence(*SE);
+ const SCEV *SrcConst = Src;
+ const Loop *CurLoop = DstAddRec->getLoop();
+ Level = mapDstLoop(CurLoop);
+ return weakZeroSrcSIVtest(DstCoeff, SrcConst, DstConst,
+ CurLoop, Level, Result, NewConstraint) ||
+ gcdMIVtest(Src, Dst, Result);
+ }
+ llvm_unreachable("SIV test expected at least one AddRec");
+ return false;
+}
+
+
+// testRDIV -
+// When we have a pair of subscripts of the form [c1 + a1*i] and [c2 + a2*j]
+// where i and j are induction variables, c1 and c2 are loop invariant,
+// and a1 and a2 are constant, we can solve it exactly with an easy adaptation
+// of the Exact SIV test, the Restricted Double Index Variable (RDIV) test.
+// It doesn't make sense to talk about distance or direction in this case,
+// so there's no point in making special versions of the Strong SIV test or
+// the Weak-crossing SIV test.
+//
+// With minor algebra, this test can also be used for things like
+// [c1 + a1*i + a2*j][c2].
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::testRDIV(const SCEV *Src,
+ const SCEV *Dst,
+ FullDependence &Result) const {
+ // we have 3 possible situations here:
+ // 1) [a*i + b] and [c*j + d]
+ // 2) [a*i + c*j + b] and [d]
+ // 3) [b] and [a*i + c*j + d]
+ // We need to find what we've got and get organized
+
+ const SCEV *SrcConst, *DstConst;
+ const SCEV *SrcCoeff, *DstCoeff;
+ const Loop *SrcLoop, *DstLoop;
+
+ DEBUG(dbgs() << " src = " << *Src << "\n");
+ DEBUG(dbgs() << " dst = " << *Dst << "\n");
+ const SCEVAddRecExpr *SrcAddRec = dyn_cast<SCEVAddRecExpr>(Src);
+ const SCEVAddRecExpr *DstAddRec = dyn_cast<SCEVAddRecExpr>(Dst);
+ if (SrcAddRec && DstAddRec) {
+ SrcConst = SrcAddRec->getStart();
+ SrcCoeff = SrcAddRec->getStepRecurrence(*SE);
+ SrcLoop = SrcAddRec->getLoop();
+ DstConst = DstAddRec->getStart();
+ DstCoeff = DstAddRec->getStepRecurrence(*SE);
+ DstLoop = DstAddRec->getLoop();
+ }
+ else if (SrcAddRec) {
+ if (const SCEVAddRecExpr *tmpAddRec =
+ dyn_cast<SCEVAddRecExpr>(SrcAddRec->getStart())) {
+ SrcConst = tmpAddRec->getStart();
+ SrcCoeff = tmpAddRec->getStepRecurrence(*SE);
+ SrcLoop = tmpAddRec->getLoop();
+ DstConst = Dst;
+ DstCoeff = SE->getNegativeSCEV(SrcAddRec->getStepRecurrence(*SE));
+ DstLoop = SrcAddRec->getLoop();
+ }
+ else
+ llvm_unreachable("RDIV reached by surprising SCEVs");
+ }
+ else if (DstAddRec) {
+ if (const SCEVAddRecExpr *tmpAddRec =
+ dyn_cast<SCEVAddRecExpr>(DstAddRec->getStart())) {
+ DstConst = tmpAddRec->getStart();
+ DstCoeff = tmpAddRec->getStepRecurrence(*SE);
+ DstLoop = tmpAddRec->getLoop();
+ SrcConst = Src;
+ SrcCoeff = SE->getNegativeSCEV(DstAddRec->getStepRecurrence(*SE));
+ SrcLoop = DstAddRec->getLoop();
+ }
+ else
+ llvm_unreachable("RDIV reached by surprising SCEVs");
+ }
+ else
+ llvm_unreachable("RDIV expected at least one AddRec");
+ return exactRDIVtest(SrcCoeff, DstCoeff,
+ SrcConst, DstConst,
+ SrcLoop, DstLoop,
+ Result) ||
+ gcdMIVtest(Src, Dst, Result) ||
+ symbolicRDIVtest(SrcCoeff, DstCoeff,
+ SrcConst, DstConst,
+ SrcLoop, DstLoop);
+}
+
+
+// Tests the single-subscript MIV pair (Src and Dst) for dependence.
+// Return true if dependence disproved.
+// Can sometimes refine direction vectors.
+bool DependenceAnalysis::testMIV(const SCEV *Src,
+ const SCEV *Dst,
+ const SmallBitVector &Loops,
+ FullDependence &Result) const {
+ DEBUG(dbgs() << " src = " << *Src << "\n");
+ DEBUG(dbgs() << " dst = " << *Dst << "\n");
+ Result.Consistent = false;
+ return gcdMIVtest(Src, Dst, Result) ||
+ banerjeeMIVtest(Src, Dst, Loops, Result);
+}
+
+
+// Given a product, e.g., 10*X*Y, returns the first constant operand,
+// in this case 10. If there is no constant part, returns NULL.
+static
+const SCEVConstant *getConstantPart(const SCEVMulExpr *Product) {
+ for (unsigned Op = 0, Ops = Product->getNumOperands(); Op < Ops; Op++) {
+ if (const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Product->getOperand(Op)))
+ return Constant;
+ }
+ return NULL;
+}
+
+
+//===----------------------------------------------------------------------===//
+// gcdMIVtest -
+// Tests an MIV subscript pair for dependence.
+// Returns true if any possible dependence is disproved.
+// Marks the result as inconsistent.
+// Can sometimes disprove the equal direction for 1 or more loops,
+// as discussed in Michael Wolfe's book,
+// High Performance Compilers for Parallel Computing, page 235.
+//
+// We spend some effort (code!) to handle cases like
+// [10*i + 5*N*j + 15*M + 6], where i and j are induction variables,
+// but M and N are just loop-invariant variables.
+// This should help us handle linearized subscripts;
+// also makes this test a useful backup to the various SIV tests.
+//
+// It occurs to me that the presence of loop-invariant variables
+// changes the nature of the test from "greatest common divisor"
+// to "a common divisor!"
+bool DependenceAnalysis::gcdMIVtest(const SCEV *Src,
+ const SCEV *Dst,
+ FullDependence &Result) const {
+ DEBUG(dbgs() << "starting gcd\n");
+ ++GCDapplications;
+ unsigned BitWidth = Src->getType()->getIntegerBitWidth();
+ APInt RunningGCD = APInt::getNullValue(BitWidth);
+
+ // Examine Src coefficients.
+ // Compute running GCD and record source constant.
+ // Because we're looking for the constant at the end of the chain,
+ // we can't quit the loop just because the GCD == 1.
+ const SCEV *Coefficients = Src;
+ while (const SCEVAddRecExpr *AddRec =
+ dyn_cast<SCEVAddRecExpr>(Coefficients)) {
+ const SCEV *Coeff = AddRec->getStepRecurrence(*SE);
+ const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Coeff);
+ if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Coeff))
+ // If the coefficient is the product of a constant and other stuff,
+ // we can use the constant in the GCD computation.
+ Constant = getConstantPart(Product);
+ if (!Constant)
+ return false;
+ APInt ConstCoeff = Constant->getValue()->getValue();
+ RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs());
+ Coefficients = AddRec->getStart();
+ }
+ const SCEV *SrcConst = Coefficients;
+
+ // Examine Dst coefficients.
+ // Compute running GCD and record destination constant.
+ // Because we're looking for the constant at the end of the chain,
+ // we can't quit the loop just because the GCD == 1.
+ Coefficients = Dst;
+ while (const SCEVAddRecExpr *AddRec =
+ dyn_cast<SCEVAddRecExpr>(Coefficients)) {
+ const SCEV *Coeff = AddRec->getStepRecurrence(*SE);
+ const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Coeff);
+ if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Coeff))
+ // If the coefficient is the product of a constant and other stuff,
+ // we can use the constant in the GCD computation.
+ Constant = getConstantPart(Product);
+ if (!Constant)
+ return false;
+ APInt ConstCoeff = Constant->getValue()->getValue();
+ RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs());
+ Coefficients = AddRec->getStart();
+ }
+ const SCEV *DstConst = Coefficients;
+
+ APInt ExtraGCD = APInt::getNullValue(BitWidth);
+ const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
+ DEBUG(dbgs() << " Delta = " << *Delta << "\n");
+ const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Delta);
+ if (const SCEVAddExpr *Sum = dyn_cast<SCEVAddExpr>(Delta)) {
+ // If Delta is a sum of products, we may be able to make further progress.
+ for (unsigned Op = 0, Ops = Sum->getNumOperands(); Op < Ops; Op++) {
+ const SCEV *Operand = Sum->getOperand(Op);
+ if (isa<SCEVConstant>(Operand)) {
+ assert(!Constant && "Surprised to find multiple constants");
+ Constant = cast<SCEVConstant>(Operand);
+ }
+ else if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Operand)) {
+ // Search for constant operand to participate in GCD;
+ // If none found; return false.
+ const SCEVConstant *ConstOp = getConstantPart(Product);
+ if (!ConstOp)
+ return false;
+ APInt ConstOpValue = ConstOp->getValue()->getValue();
+ ExtraGCD = APIntOps::GreatestCommonDivisor(ExtraGCD,
+ ConstOpValue.abs());
+ }
+ else
+ return false;
+ }
+ }
+ if (!Constant)
+ return false;
+ APInt ConstDelta = cast<SCEVConstant>(Constant)->getValue()->getValue();
+ DEBUG(dbgs() << " ConstDelta = " << ConstDelta << "\n");
+ if (ConstDelta == 0)
+ return false;
+ RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ExtraGCD);
+ DEBUG(dbgs() << " RunningGCD = " << RunningGCD << "\n");
+ APInt Remainder = ConstDelta.srem(RunningGCD);
+ if (Remainder != 0) {
+ ++GCDindependence;
+ return true;
+ }
+
+ // Try to disprove equal directions.
+ // For example, given a subscript pair [3*i + 2*j] and [i' + 2*j' - 1],
+ // the code above can't disprove the dependence because the GCD = 1.
+ // So we consider what happen if i = i' and what happens if j = j'.
+ // If i = i', we can simplify the subscript to [2*i + 2*j] and [2*j' - 1],
+ // which is infeasible, so we can disallow the = direction for the i level.
+ // Setting j = j' doesn't help matters, so we end up with a direction vector
+ // of [<>, *]
+ //
+ // Given A[5*i + 10*j*M + 9*M*N] and A[15*i + 20*j*M - 21*N*M + 5],
+ // we need to remember that the constant part is 5 and the RunningGCD should
+ // be initialized to ExtraGCD = 30.
+ DEBUG(dbgs() << " ExtraGCD = " << ExtraGCD << '\n');
+
+ bool Improved = false;
+ Coefficients = Src;
+ while (const SCEVAddRecExpr *AddRec =
+ dyn_cast<SCEVAddRecExpr>(Coefficients)) {
+ Coefficients = AddRec->getStart();
+ const Loop *CurLoop = AddRec->getLoop();
+ RunningGCD = ExtraGCD;
+ const SCEV *SrcCoeff = AddRec->getStepRecurrence(*SE);
+ const SCEV *DstCoeff = SE->getMinusSCEV(SrcCoeff, SrcCoeff);
+ const SCEV *Inner = Src;
+ while (RunningGCD != 1 && isa<SCEVAddRecExpr>(Inner)) {
+ AddRec = cast<SCEVAddRecExpr>(Inner);
+ const SCEV *Coeff = AddRec->getStepRecurrence(*SE);
+ if (CurLoop == AddRec->getLoop())
+ ; // SrcCoeff == Coeff
+ else {
+ if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Coeff))
+ // If the coefficient is the product of a constant and other stuff,
+ // we can use the constant in the GCD computation.
+ Constant = getConstantPart(Product);
+ else
+ Constant = cast<SCEVConstant>(Coeff);
+ APInt ConstCoeff = Constant->getValue()->getValue();
+ RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs());
+ }
+ Inner = AddRec->getStart();
+ }
+ Inner = Dst;
+ while (RunningGCD != 1 && isa<SCEVAddRecExpr>(Inner)) {
+ AddRec = cast<SCEVAddRecExpr>(Inner);
+ const SCEV *Coeff = AddRec->getStepRecurrence(*SE);
+ if (CurLoop == AddRec->getLoop())
+ DstCoeff = Coeff;
+ else {
+ if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Coeff))
+ // If the coefficient is the product of a constant and other stuff,
+ // we can use the constant in the GCD computation.
+ Constant = getConstantPart(Product);
+ else
+ Constant = cast<SCEVConstant>(Coeff);
+ APInt ConstCoeff = Constant->getValue()->getValue();
+ RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs());
+ }
+ Inner = AddRec->getStart();
+ }
+ Delta = SE->getMinusSCEV(SrcCoeff, DstCoeff);
+ if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Delta))
+ // If the coefficient is the product of a constant and other stuff,
+ // we can use the constant in the GCD computation.
+ Constant = getConstantPart(Product);
+ else if (isa<SCEVConstant>(Delta))
+ Constant = cast<SCEVConstant>(Delta);
+ else {
+ // The difference of the two coefficients might not be a product
+ // or constant, in which case we give up on this direction.
+ continue;
+ }
+ APInt ConstCoeff = Constant->getValue()->getValue();
+ RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs());
+ DEBUG(dbgs() << "\tRunningGCD = " << RunningGCD << "\n");
+ if (RunningGCD != 0) {
+ Remainder = ConstDelta.srem(RunningGCD);
+ DEBUG(dbgs() << "\tRemainder = " << Remainder << "\n");
+ if (Remainder != 0) {
+ unsigned Level = mapSrcLoop(CurLoop);
+ Result.DV[Level - 1].Direction &= unsigned(~Dependence::DVEntry::EQ);
+ Improved = true;
+ }
+ }
+ }
+ if (Improved)
+ ++GCDsuccesses;
+ DEBUG(dbgs() << "all done\n");
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// banerjeeMIVtest -
+// Use Banerjee's Inequalities to test an MIV subscript pair.
+// (Wolfe, in the race-car book, calls this the Extreme Value Test.)
+// Generally follows the discussion in Section 2.5.2 of
+//
+// Optimizing Supercompilers for Supercomputers
+// Michael Wolfe
+//
+// The inequalities given on page 25 are simplified in that loops are
+// normalized so that the lower bound is always 0 and the stride is always 1.
+// For example, Wolfe gives
+//
+// LB^<_k = (A^-_k - B_k)^- (U_k - L_k - N_k) + (A_k - B_k)L_k - B_k N_k
+//
+// where A_k is the coefficient of the kth index in the source subscript,
+// B_k is the coefficient of the kth index in the destination subscript,
+// U_k is the upper bound of the kth index, L_k is the lower bound of the Kth
+// index, and N_k is the stride of the kth index. Since all loops are normalized
+// by the SCEV package, N_k = 1 and L_k = 0, allowing us to simplify the
+// equation to
+//
+// LB^<_k = (A^-_k - B_k)^- (U_k - 0 - 1) + (A_k - B_k)0 - B_k 1
+// = (A^-_k - B_k)^- (U_k - 1) - B_k
+//
+// Similar simplifications are possible for the other equations.
+//
+// When we can't determine the number of iterations for a loop,
+// we use NULL as an indicator for the worst case, infinity.
+// When computing the upper bound, NULL denotes +inf;
+// for the lower bound, NULL denotes -inf.
+//
+// Return true if dependence disproved.
+bool DependenceAnalysis::banerjeeMIVtest(const SCEV *Src,
+ const SCEV *Dst,
+ const SmallBitVector &Loops,
+ FullDependence &Result) const {
+ DEBUG(dbgs() << "starting Banerjee\n");
+ ++BanerjeeApplications;
+ DEBUG(dbgs() << " Src = " << *Src << '\n');
+ const SCEV *A0;
+ CoefficientInfo *A = collectCoeffInfo(Src, true, A0);
+ DEBUG(dbgs() << " Dst = " << *Dst << '\n');
+ const SCEV *B0;
+ CoefficientInfo *B = collectCoeffInfo(Dst, false, B0);
+ BoundInfo *Bound = new BoundInfo[MaxLevels + 1];
+ const SCEV *Delta = SE->getMinusSCEV(B0, A0);
+ DEBUG(dbgs() << "\tDelta = " << *Delta << '\n');
+
+ // Compute bounds for all the * directions.
+ DEBUG(dbgs() << "\tBounds[*]\n");
+ for (unsigned K = 1; K <= MaxLevels; ++K) {
+ Bound[K].Iterations = A[K].Iterations ? A[K].Iterations : B[K].Iterations;
+ Bound[K].Direction = Dependence::DVEntry::ALL;
+ Bound[K].DirSet = Dependence::DVEntry::NONE;
+ findBoundsALL(A, B, Bound, K);
+#ifndef NDEBUG
+ DEBUG(dbgs() << "\t " << K << '\t');
+ if (Bound[K].Lower[Dependence::DVEntry::ALL])
+ DEBUG(dbgs() << *Bound[K].Lower[Dependence::DVEntry::ALL] << '\t');
+ else
+ DEBUG(dbgs() << "-inf\t");
+ if (Bound[K].Upper[Dependence::DVEntry::ALL])
+ DEBUG(dbgs() << *Bound[K].Upper[Dependence::DVEntry::ALL] << '\n');
+ else
+ DEBUG(dbgs() << "+inf\n");
+#endif
+ }
+
+ // Test the *, *, *, ... case.
+ bool Disproved = false;
+ if (testBounds(Dependence::DVEntry::ALL, 0, Bound, Delta)) {
+ // Explore the direction vector hierarchy.
+ unsigned DepthExpanded = 0;
+ unsigned NewDeps = exploreDirections(1, A, B, Bound,
+ Loops, DepthExpanded, Delta);
+ if (NewDeps > 0) {
+ bool Improved = false;
+ for (unsigned K = 1; K <= CommonLevels; ++K) {
+ if (Loops[K]) {
+ unsigned Old = Result.DV[K - 1].Direction;
+ Result.DV[K - 1].Direction = Old & Bound[K].DirSet;
+ Improved |= Old != Result.DV[K - 1].Direction;
+ if (!Result.DV[K - 1].Direction) {
+ Improved = false;
+ Disproved = true;
+ break;
+ }
+ }
+ }
+ if (Improved)
+ ++BanerjeeSuccesses;
+ }
+ else {
+ ++BanerjeeIndependence;
+ Disproved = true;
+ }
+ }
+ else {
+ ++BanerjeeIndependence;
+ Disproved = true;
+ }
+ delete [] Bound;
+ delete [] A;
+ delete [] B;
+ return Disproved;
+}
+
+
+// Hierarchically expands the direction vector
+// search space, combining the directions of discovered dependences
+// in the DirSet field of Bound. Returns the number of distinct
+// dependences discovered. If the dependence is disproved,
+// it will return 0.
+unsigned DependenceAnalysis::exploreDirections(unsigned Level,
+ CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ const SmallBitVector &Loops,
+ unsigned &DepthExpanded,
+ const SCEV *Delta) const {
+ if (Level > CommonLevels) {
+ // record result
+ DEBUG(dbgs() << "\t[");
+ for (unsigned K = 1; K <= CommonLevels; ++K) {
+ if (Loops[K]) {
+ Bound[K].DirSet |= Bound[K].Direction;
+#ifndef NDEBUG
+ switch (Bound[K].Direction) {
+ case Dependence::DVEntry::LT:
+ DEBUG(dbgs() << " <");
+ break;
+ case Dependence::DVEntry::EQ:
+ DEBUG(dbgs() << " =");
+ break;
+ case Dependence::DVEntry::GT:
+ DEBUG(dbgs() << " >");
+ break;
+ case Dependence::DVEntry::ALL:
+ DEBUG(dbgs() << " *");
+ break;
+ default:
+ llvm_unreachable("unexpected Bound[K].Direction");
+ }
+#endif
+ }
+ }
+ DEBUG(dbgs() << " ]\n");
+ return 1;
+ }
+ if (Loops[Level]) {
+ if (Level > DepthExpanded) {
+ DepthExpanded = Level;
+ // compute bounds for <, =, > at current level
+ findBoundsLT(A, B, Bound, Level);
+ findBoundsGT(A, B, Bound, Level);
+ findBoundsEQ(A, B, Bound, Level);
+#ifndef NDEBUG
+ DEBUG(dbgs() << "\tBound for level = " << Level << '\n');
+ DEBUG(dbgs() << "\t <\t");
+ if (Bound[Level].Lower[Dependence::DVEntry::LT])
+ DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::LT] << '\t');
+ else
+ DEBUG(dbgs() << "-inf\t");
+ if (Bound[Level].Upper[Dependence::DVEntry::LT])
+ DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::LT] << '\n');
+ else
+ DEBUG(dbgs() << "+inf\n");
+ DEBUG(dbgs() << "\t =\t");
+ if (Bound[Level].Lower[Dependence::DVEntry::EQ])
+ DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::EQ] << '\t');
+ else
+ DEBUG(dbgs() << "-inf\t");
+ if (Bound[Level].Upper[Dependence::DVEntry::EQ])
+ DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::EQ] << '\n');
+ else
+ DEBUG(dbgs() << "+inf\n");
+ DEBUG(dbgs() << "\t >\t");
+ if (Bound[Level].Lower[Dependence::DVEntry::GT])
+ DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::GT] << '\t');
+ else
+ DEBUG(dbgs() << "-inf\t");
+ if (Bound[Level].Upper[Dependence::DVEntry::GT])
+ DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::GT] << '\n');
+ else
+ DEBUG(dbgs() << "+inf\n");
+#endif
+ }
+
+ unsigned NewDeps = 0;
+
+ // test bounds for <, *, *, ...
+ if (testBounds(Dependence::DVEntry::LT, Level, Bound, Delta))
+ NewDeps += exploreDirections(Level + 1, A, B, Bound,
+ Loops, DepthExpanded, Delta);
+
+ // Test bounds for =, *, *, ...
+ if (testBounds(Dependence::DVEntry::EQ, Level, Bound, Delta))
+ NewDeps += exploreDirections(Level + 1, A, B, Bound,
+ Loops, DepthExpanded, Delta);
+
+ // test bounds for >, *, *, ...
+ if (testBounds(Dependence::DVEntry::GT, Level, Bound, Delta))
+ NewDeps += exploreDirections(Level + 1, A, B, Bound,
+ Loops, DepthExpanded, Delta);
+
+ Bound[Level].Direction = Dependence::DVEntry::ALL;
+ return NewDeps;
+ }
+ else
+ return exploreDirections(Level + 1, A, B, Bound, Loops, DepthExpanded, Delta);
+}
+
+
+// Returns true iff the current bounds are plausible.
+bool DependenceAnalysis::testBounds(unsigned char DirKind,
+ unsigned Level,
+ BoundInfo *Bound,
+ const SCEV *Delta) const {
+ Bound[Level].Direction = DirKind;
+ if (const SCEV *LowerBound = getLowerBound(Bound))
+ if (isKnownPredicate(CmpInst::ICMP_SGT, LowerBound, Delta))
+ return false;
+ if (const SCEV *UpperBound = getUpperBound(Bound))
+ if (isKnownPredicate(CmpInst::ICMP_SGT, Delta, UpperBound))
+ return false;
+ return true;
+}
+
+
+// Computes the upper and lower bounds for level K
+// using the * direction. Records them in Bound.
+// Wolfe gives the equations
+//
+// LB^*_k = (A^-_k - B^+_k)(U_k - L_k) + (A_k - B_k)L_k
+// UB^*_k = (A^+_k - B^-_k)(U_k - L_k) + (A_k - B_k)L_k
+//
+// Since we normalize loops, we can simplify these equations to
+//
+// LB^*_k = (A^-_k - B^+_k)U_k
+// UB^*_k = (A^+_k - B^-_k)U_k
+//
+// We must be careful to handle the case where the upper bound is unknown.
+// Note that the lower bound is always <= 0
+// and the upper bound is always >= 0.
+void DependenceAnalysis::findBoundsALL(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const {
+ Bound[K].Lower[Dependence::DVEntry::ALL] = NULL; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::ALL] = NULL; // Default value = +infinity.
+ if (Bound[K].Iterations) {
+ Bound[K].Lower[Dependence::DVEntry::ALL] =
+ SE->getMulExpr(SE->getMinusSCEV(A[K].NegPart, B[K].PosPart),
+ Bound[K].Iterations);
+ Bound[K].Upper[Dependence::DVEntry::ALL] =
+ SE->getMulExpr(SE->getMinusSCEV(A[K].PosPart, B[K].NegPart),
+ Bound[K].Iterations);
+ }
+ else {
+ // If the difference is 0, we won't need to know the number of iterations.
+ if (isKnownPredicate(CmpInst::ICMP_EQ, A[K].NegPart, B[K].PosPart))
+ Bound[K].Lower[Dependence::DVEntry::ALL] =
+ SE->getConstant(A[K].Coeff->getType(), 0);
+ if (isKnownPredicate(CmpInst::ICMP_EQ, A[K].PosPart, B[K].NegPart))
+ Bound[K].Upper[Dependence::DVEntry::ALL] =
+ SE->getConstant(A[K].Coeff->getType(), 0);
+ }
+}
+
+
+// Computes the upper and lower bounds for level K
+// using the = direction. Records them in Bound.
+// Wolfe gives the equations
+//
+// LB^=_k = (A_k - B_k)^- (U_k - L_k) + (A_k - B_k)L_k
+// UB^=_k = (A_k - B_k)^+ (U_k - L_k) + (A_k - B_k)L_k
+//
+// Since we normalize loops, we can simplify these equations to
+//
+// LB^=_k = (A_k - B_k)^- U_k
+// UB^=_k = (A_k - B_k)^+ U_k
+//
+// We must be careful to handle the case where the upper bound is unknown.
+// Note that the lower bound is always <= 0
+// and the upper bound is always >= 0.
+void DependenceAnalysis::findBoundsEQ(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const {
+ Bound[K].Lower[Dependence::DVEntry::EQ] = NULL; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::EQ] = NULL; // Default value = +infinity.
+ if (Bound[K].Iterations) {
+ const SCEV *Delta = SE->getMinusSCEV(A[K].Coeff, B[K].Coeff);
+ const SCEV *NegativePart = getNegativePart(Delta);
+ Bound[K].Lower[Dependence::DVEntry::EQ] =
+ SE->getMulExpr(NegativePart, Bound[K].Iterations);
+ const SCEV *PositivePart = getPositivePart(Delta);
+ Bound[K].Upper[Dependence::DVEntry::EQ] =
+ SE->getMulExpr(PositivePart, Bound[K].Iterations);
+ }
+ else {
+ // If the positive/negative part of the difference is 0,
+ // we won't need to know the number of iterations.
+ const SCEV *Delta = SE->getMinusSCEV(A[K].Coeff, B[K].Coeff);
+ const SCEV *NegativePart = getNegativePart(Delta);
+ if (NegativePart->isZero())
+ Bound[K].Lower[Dependence::DVEntry::EQ] = NegativePart; // Zero
+ const SCEV *PositivePart = getPositivePart(Delta);
+ if (PositivePart->isZero())
+ Bound[K].Upper[Dependence::DVEntry::EQ] = PositivePart; // Zero
+ }
+}
+
+
+// Computes the upper and lower bounds for level K
+// using the < direction. Records them in Bound.
+// Wolfe gives the equations
+//
+// LB^<_k = (A^-_k - B_k)^- (U_k - L_k - N_k) + (A_k - B_k)L_k - B_k N_k
+// UB^<_k = (A^+_k - B_k)^+ (U_k - L_k - N_k) + (A_k - B_k)L_k - B_k N_k
+//
+// Since we normalize loops, we can simplify these equations to
+//
+// LB^<_k = (A^-_k - B_k)^- (U_k - 1) - B_k
+// UB^<_k = (A^+_k - B_k)^+ (U_k - 1) - B_k
+//
+// We must be careful to handle the case where the upper bound is unknown.
+void DependenceAnalysis::findBoundsLT(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const {
+ Bound[K].Lower[Dependence::DVEntry::LT] = NULL; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::LT] = NULL; // Default value = +infinity.
+ if (Bound[K].Iterations) {
+ const SCEV *Iter_1 =
+ SE->getMinusSCEV(Bound[K].Iterations,
+ SE->getConstant(Bound[K].Iterations->getType(), 1));
+ const SCEV *NegPart =
+ getNegativePart(SE->getMinusSCEV(A[K].NegPart, B[K].Coeff));
+ Bound[K].Lower[Dependence::DVEntry::LT] =
+ SE->getMinusSCEV(SE->getMulExpr(NegPart, Iter_1), B[K].Coeff);
+ const SCEV *PosPart =
+ getPositivePart(SE->getMinusSCEV(A[K].PosPart, B[K].Coeff));
+ Bound[K].Upper[Dependence::DVEntry::LT] =
+ SE->getMinusSCEV(SE->getMulExpr(PosPart, Iter_1), B[K].Coeff);
+ }
+ else {
+ // If the positive/negative part of the difference is 0,
+ // we won't need to know the number of iterations.
+ const SCEV *NegPart =
+ getNegativePart(SE->getMinusSCEV(A[K].NegPart, B[K].Coeff));
+ if (NegPart->isZero())
+ Bound[K].Lower[Dependence::DVEntry::LT] = SE->getNegativeSCEV(B[K].Coeff);
+ const SCEV *PosPart =
+ getPositivePart(SE->getMinusSCEV(A[K].PosPart, B[K].Coeff));
+ if (PosPart->isZero())
+ Bound[K].Upper[Dependence::DVEntry::LT] = SE->getNegativeSCEV(B[K].Coeff);
+ }
+}
+
+
+// Computes the upper and lower bounds for level K
+// using the > direction. Records them in Bound.
+// Wolfe gives the equations
+//
+// LB^>_k = (A_k - B^+_k)^- (U_k - L_k - N_k) + (A_k - B_k)L_k + A_k N_k
+// UB^>_k = (A_k - B^-_k)^+ (U_k - L_k - N_k) + (A_k - B_k)L_k + A_k N_k
+//
+// Since we normalize loops, we can simplify these equations to
+//
+// LB^>_k = (A_k - B^+_k)^- (U_k - 1) + A_k
+// UB^>_k = (A_k - B^-_k)^+ (U_k - 1) + A_k
+//
+// We must be careful to handle the case where the upper bound is unknown.
+void DependenceAnalysis::findBoundsGT(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const {
+ Bound[K].Lower[Dependence::DVEntry::GT] = NULL; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::GT] = NULL; // Default value = +infinity.
+ if (Bound[K].Iterations) {
+ const SCEV *Iter_1 =
+ SE->getMinusSCEV(Bound[K].Iterations,
+ SE->getConstant(Bound[K].Iterations->getType(), 1));
+ const SCEV *NegPart =
+ getNegativePart(SE->getMinusSCEV(A[K].Coeff, B[K].PosPart));
+ Bound[K].Lower[Dependence::DVEntry::GT] =
+ SE->getAddExpr(SE->getMulExpr(NegPart, Iter_1), A[K].Coeff);
+ const SCEV *PosPart =
+ getPositivePart(SE->getMinusSCEV(A[K].Coeff, B[K].NegPart));
+ Bound[K].Upper[Dependence::DVEntry::GT] =
+ SE->getAddExpr(SE->getMulExpr(PosPart, Iter_1), A[K].Coeff);
+ }
+ else {
+ // If the positive/negative part of the difference is 0,
+ // we won't need to know the number of iterations.
+ const SCEV *NegPart = getNegativePart(SE->getMinusSCEV(A[K].Coeff, B[K].PosPart));
+ if (NegPart->isZero())
+ Bound[K].Lower[Dependence::DVEntry::GT] = A[K].Coeff;
+ const SCEV *PosPart = getPositivePart(SE->getMinusSCEV(A[K].Coeff, B[K].NegPart));
+ if (PosPart->isZero())
+ Bound[K].Upper[Dependence::DVEntry::GT] = A[K].Coeff;
+ }
+}
+
+
+// X^+ = max(X, 0)
+const SCEV *DependenceAnalysis::getPositivePart(const SCEV *X) const {
+ return SE->getSMaxExpr(X, SE->getConstant(X->getType(), 0));
+}
+
+
+// X^- = min(X, 0)
+const SCEV *DependenceAnalysis::getNegativePart(const SCEV *X) const {
+ return SE->getSMinExpr(X, SE->getConstant(X->getType(), 0));
+}
+
+
+// Walks through the subscript,
+// collecting each coefficient, the associated loop bounds,
+// and recording its positive and negative parts for later use.
+DependenceAnalysis::CoefficientInfo *
+DependenceAnalysis::collectCoeffInfo(const SCEV *Subscript,
+ bool SrcFlag,
+ const SCEV *&Constant) const {
+ const SCEV *Zero = SE->getConstant(Subscript->getType(), 0);
+ CoefficientInfo *CI = new CoefficientInfo[MaxLevels + 1];
+ for (unsigned K = 1; K <= MaxLevels; ++K) {
+ CI[K].Coeff = Zero;
+ CI[K].PosPart = Zero;
+ CI[K].NegPart = Zero;
+ CI[K].Iterations = NULL;
+ }
+ while (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Subscript)) {
+ const Loop *L = AddRec->getLoop();
+ unsigned K = SrcFlag ? mapSrcLoop(L) : mapDstLoop(L);
+ CI[K].Coeff = AddRec->getStepRecurrence(*SE);
+ CI[K].PosPart = getPositivePart(CI[K].Coeff);
+ CI[K].NegPart = getNegativePart(CI[K].Coeff);
+ CI[K].Iterations = collectUpperBound(L, Subscript->getType());
+ Subscript = AddRec->getStart();
+ }
+ Constant = Subscript;
+#ifndef NDEBUG
+ DEBUG(dbgs() << "\tCoefficient Info\n");
+ for (unsigned K = 1; K <= MaxLevels; ++K) {
+ DEBUG(dbgs() << "\t " << K << "\t" << *CI[K].Coeff);
+ DEBUG(dbgs() << "\tPos Part = ");
+ DEBUG(dbgs() << *CI[K].PosPart);
+ DEBUG(dbgs() << "\tNeg Part = ");
+ DEBUG(dbgs() << *CI[K].NegPart);
+ DEBUG(dbgs() << "\tUpper Bound = ");
+ if (CI[K].Iterations)
+ DEBUG(dbgs() << *CI[K].Iterations);
+ else
+ DEBUG(dbgs() << "+inf");
+ DEBUG(dbgs() << '\n');
+ }
+ DEBUG(dbgs() << "\t Constant = " << *Subscript << '\n');
+#endif
+ return CI;
+}
+
+
+// Looks through all the bounds info and
+// computes the lower bound given the current direction settings
+// at each level. If the lower bound for any level is -inf,
+// the result is -inf.
+const SCEV *DependenceAnalysis::getLowerBound(BoundInfo *Bound) const {
+ const SCEV *Sum = Bound[1].Lower[Bound[1].Direction];
+ for (unsigned K = 2; Sum && K <= MaxLevels; ++K) {
+ if (Bound[K].Lower[Bound[K].Direction])
+ Sum = SE->getAddExpr(Sum, Bound[K].Lower[Bound[K].Direction]);
+ else
+ Sum = NULL;
+ }
+ return Sum;
+}
+
+
+// Looks through all the bounds info and
+// computes the upper bound given the current direction settings
+// at each level. If the upper bound at any level is +inf,
+// the result is +inf.
+const SCEV *DependenceAnalysis::getUpperBound(BoundInfo *Bound) const {
+ const SCEV *Sum = Bound[1].Upper[Bound[1].Direction];
+ for (unsigned K = 2; Sum && K <= MaxLevels; ++K) {
+ if (Bound[K].Upper[Bound[K].Direction])
+ Sum = SE->getAddExpr(Sum, Bound[K].Upper[Bound[K].Direction]);
+ else
+ Sum = NULL;
+ }
+ return Sum;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Constraint manipulation for Delta test.
+
+// Given a linear SCEV,
+// return the coefficient (the step)
+// corresponding to the specified loop.
+// If there isn't one, return 0.
+// For example, given a*i + b*j + c*k, zeroing the coefficient
+// corresponding to the j loop would yield b.
+const SCEV *DependenceAnalysis::findCoefficient(const SCEV *Expr,
+ const Loop *TargetLoop) const {
+ const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Expr);
+ if (!AddRec)
+ return SE->getConstant(Expr->getType(), 0);
+ if (AddRec->getLoop() == TargetLoop)
+ return AddRec->getStepRecurrence(*SE);
+ return findCoefficient(AddRec->getStart(), TargetLoop);
+}
+
+
+// Given a linear SCEV,
+// return the SCEV given by zeroing out the coefficient
+// corresponding to the specified loop.
+// For example, given a*i + b*j + c*k, zeroing the coefficient
+// corresponding to the j loop would yield a*i + c*k.
+const SCEV *DependenceAnalysis::zeroCoefficient(const SCEV *Expr,
+ const Loop *TargetLoop) const {
+ const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Expr);
+ if (!AddRec)
+ return Expr; // ignore
+ if (AddRec->getLoop() == TargetLoop)
+ return AddRec->getStart();
+ return SE->getAddRecExpr(zeroCoefficient(AddRec->getStart(), TargetLoop),
+ AddRec->getStepRecurrence(*SE),
+ AddRec->getLoop(),
+ AddRec->getNoWrapFlags());
+}
+
+
+// Given a linear SCEV Expr,
+// return the SCEV given by adding some Value to the
+// coefficient corresponding to the specified TargetLoop.
+// For example, given a*i + b*j + c*k, adding 1 to the coefficient
+// corresponding to the j loop would yield a*i + (b+1)*j + c*k.
+const SCEV *DependenceAnalysis::addToCoefficient(const SCEV *Expr,
+ const Loop *TargetLoop,
+ const SCEV *Value) const {
+ const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Expr);
+ if (!AddRec) // create a new addRec
+ return SE->getAddRecExpr(Expr,
+ Value,
+ TargetLoop,
+ SCEV::FlagAnyWrap); // Worst case, with no info.
+ if (AddRec->getLoop() == TargetLoop) {
+ const SCEV *Sum = SE->getAddExpr(AddRec->getStepRecurrence(*SE), Value);
+ if (Sum->isZero())
+ return AddRec->getStart();
+ return SE->getAddRecExpr(AddRec->getStart(),
+ Sum,
+ AddRec->getLoop(),
+ AddRec->getNoWrapFlags());
+ }
+ return SE->getAddRecExpr(addToCoefficient(AddRec->getStart(),
+ TargetLoop, Value),
+ AddRec->getStepRecurrence(*SE),
+ AddRec->getLoop(),
+ AddRec->getNoWrapFlags());
+}
+
+
+// Review the constraints, looking for opportunities
+// to simplify a subscript pair (Src and Dst).
+// Return true if some simplification occurs.
+// If the simplification isn't exact (that is, if it is conservative
+// in terms of dependence), set consistent to false.
+// Corresponds to Figure 5 from the paper
+//
+// Practical Dependence Testing
+// Goff, Kennedy, Tseng
+// PLDI 1991
+bool DependenceAnalysis::propagate(const SCEV *&Src,
+ const SCEV *&Dst,
+ SmallBitVector &Loops,
+ SmallVector<Constraint, 4> &Constraints,
+ bool &Consistent) {
+ bool Result = false;
+ for (int LI = Loops.find_first(); LI >= 0; LI = Loops.find_next(LI)) {
+ DEBUG(dbgs() << "\t Constraint[" << LI << "] is");
+ DEBUG(Constraints[LI].dump(dbgs()));
+ if (Constraints[LI].isDistance())
+ Result |= propagateDistance(Src, Dst, Constraints[LI], Consistent);
+ else if (Constraints[LI].isLine())
+ Result |= propagateLine(Src, Dst, Constraints[LI], Consistent);
+ else if (Constraints[LI].isPoint())
+ Result |= propagatePoint(Src, Dst, Constraints[LI]);
+ }
+ return Result;
+}
+
+
+// Attempt to propagate a distance
+// constraint into a subscript pair (Src and Dst).
+// Return true if some simplification occurs.
+// If the simplification isn't exact (that is, if it is conservative
+// in terms of dependence), set consistent to false.
+bool DependenceAnalysis::propagateDistance(const SCEV *&Src,
+ const SCEV *&Dst,
+ Constraint &CurConstraint,
+ bool &Consistent) {
+ const Loop *CurLoop = CurConstraint.getAssociatedLoop();
+ DEBUG(dbgs() << "\t\tSrc is " << *Src << "\n");
+ const SCEV *A_K = findCoefficient(Src, CurLoop);
+ if (A_K->isZero())
+ return false;
+ const SCEV *DA_K = SE->getMulExpr(A_K, CurConstraint.getD());
+ Src = SE->getMinusSCEV(Src, DA_K);
+ Src = zeroCoefficient(Src, CurLoop);
+ DEBUG(dbgs() << "\t\tnew Src is " << *Src << "\n");
+ DEBUG(dbgs() << "\t\tDst is " << *Dst << "\n");
+ Dst = addToCoefficient(Dst, CurLoop, SE->getNegativeSCEV(A_K));
+ DEBUG(dbgs() << "\t\tnew Dst is " << *Dst << "\n");
+ if (!findCoefficient(Dst, CurLoop)->isZero())
+ Consistent = false;
+ return true;
+}
+
+
+// Attempt to propagate a line
+// constraint into a subscript pair (Src and Dst).
+// Return true if some simplification occurs.
+// If the simplification isn't exact (that is, if it is conservative
+// in terms of dependence), set consistent to false.
+bool DependenceAnalysis::propagateLine(const SCEV *&Src,
+ const SCEV *&Dst,
+ Constraint &CurConstraint,
+ bool &Consistent) {
+ const Loop *CurLoop = CurConstraint.getAssociatedLoop();
+ const SCEV *A = CurConstraint.getA();
+ const SCEV *B = CurConstraint.getB();
+ const SCEV *C = CurConstraint.getC();
+ DEBUG(dbgs() << "\t\tA = " << *A << ", B = " << *B << ", C = " << *C << "\n");
+ DEBUG(dbgs() << "\t\tSrc = " << *Src << "\n");
+ DEBUG(dbgs() << "\t\tDst = " << *Dst << "\n");
+ if (A->isZero()) {
+ const SCEVConstant *Bconst = dyn_cast<SCEVConstant>(B);
+ const SCEVConstant *Cconst = dyn_cast<SCEVConstant>(C);
+ if (!Bconst || !Cconst) return false;
+ APInt Beta = Bconst->getValue()->getValue();
+ APInt Charlie = Cconst->getValue()->getValue();
+ APInt CdivB = Charlie.sdiv(Beta);
+ assert(Charlie.srem(Beta) == 0 && "C should be evenly divisible by B");
+ const SCEV *AP_K = findCoefficient(Dst, CurLoop);
+ // Src = SE->getAddExpr(Src, SE->getMulExpr(AP_K, SE->getConstant(CdivB)));
+ Src = SE->getMinusSCEV(Src, SE->getMulExpr(AP_K, SE->getConstant(CdivB)));
+ Dst = zeroCoefficient(Dst, CurLoop);
+ if (!findCoefficient(Src, CurLoop)->isZero())
+ Consistent = false;
+ }
+ else if (B->isZero()) {
+ const SCEVConstant *Aconst = dyn_cast<SCEVConstant>(A);
+ const SCEVConstant *Cconst = dyn_cast<SCEVConstant>(C);
+ if (!Aconst || !Cconst) return false;
+ APInt Alpha = Aconst->getValue()->getValue();
+ APInt Charlie = Cconst->getValue()->getValue();
+ APInt CdivA = Charlie.sdiv(Alpha);
+ assert(Charlie.srem(Alpha) == 0 && "C should be evenly divisible by A");
+ const SCEV *A_K = findCoefficient(Src, CurLoop);
+ Src = SE->getAddExpr(Src, SE->getMulExpr(A_K, SE->getConstant(CdivA)));
+ Src = zeroCoefficient(Src, CurLoop);
+ if (!findCoefficient(Dst, CurLoop)->isZero())
+ Consistent = false;
+ }
+ else if (isKnownPredicate(CmpInst::ICMP_EQ, A, B)) {
+ const SCEVConstant *Aconst = dyn_cast<SCEVConstant>(A);
+ const SCEVConstant *Cconst = dyn_cast<SCEVConstant>(C);
+ if (!Aconst || !Cconst) return false;
+ APInt Alpha = Aconst->getValue()->getValue();
+ APInt Charlie = Cconst->getValue()->getValue();
+ APInt CdivA = Charlie.sdiv(Alpha);
+ assert(Charlie.srem(Alpha) == 0 && "C should be evenly divisible by A");
+ const SCEV *A_K = findCoefficient(Src, CurLoop);
+ Src = SE->getAddExpr(Src, SE->getMulExpr(A_K, SE->getConstant(CdivA)));
+ Src = zeroCoefficient(Src, CurLoop);
+ Dst = addToCoefficient(Dst, CurLoop, A_K);
+ if (!findCoefficient(Dst, CurLoop)->isZero())
+ Consistent = false;
+ }
+ else {
+ // paper is incorrect here, or perhaps just misleading
+ const SCEV *A_K = findCoefficient(Src, CurLoop);
+ Src = SE->getMulExpr(Src, A);
+ Dst = SE->getMulExpr(Dst, A);
+ Src = SE->getAddExpr(Src, SE->getMulExpr(A_K, C));
+ Src = zeroCoefficient(Src, CurLoop);
+ Dst = addToCoefficient(Dst, CurLoop, SE->getMulExpr(A_K, B));
+ if (!findCoefficient(Dst, CurLoop)->isZero())
+ Consistent = false;
+ }
+ DEBUG(dbgs() << "\t\tnew Src = " << *Src << "\n");
+ DEBUG(dbgs() << "\t\tnew Dst = " << *Dst << "\n");
+ return true;
+}
+
+
+// Attempt to propagate a point
+// constraint into a subscript pair (Src and Dst).
+// Return true if some simplification occurs.
+bool DependenceAnalysis::propagatePoint(const SCEV *&Src,
+ const SCEV *&Dst,
+ Constraint &CurConstraint) {
+ const Loop *CurLoop = CurConstraint.getAssociatedLoop();
+ const SCEV *A_K = findCoefficient(Src, CurLoop);
+ const SCEV *AP_K = findCoefficient(Dst, CurLoop);
+ const SCEV *XA_K = SE->getMulExpr(A_K, CurConstraint.getX());
+ const SCEV *YAP_K = SE->getMulExpr(AP_K, CurConstraint.getY());
+ DEBUG(dbgs() << "\t\tSrc is " << *Src << "\n");
+ Src = SE->getAddExpr(Src, SE->getMinusSCEV(XA_K, YAP_K));
+ Src = zeroCoefficient(Src, CurLoop);
+ DEBUG(dbgs() << "\t\tnew Src is " << *Src << "\n");
+ DEBUG(dbgs() << "\t\tDst is " << *Dst << "\n");
+ Dst = zeroCoefficient(Dst, CurLoop);
+ DEBUG(dbgs() << "\t\tnew Dst is " << *Dst << "\n");
+ return true;
+}
+
+
+// Update direction vector entry based on the current constraint.
+void DependenceAnalysis::updateDirection(Dependence::DVEntry &Level,
+ const Constraint &CurConstraint
+ ) const {
+ DEBUG(dbgs() << "\tUpdate direction, constraint =");
+ DEBUG(CurConstraint.dump(dbgs()));
+ if (CurConstraint.isAny())
+ ; // use defaults
+ else if (CurConstraint.isDistance()) {
+ // this one is consistent, the others aren't
+ Level.Scalar = false;
+ Level.Distance = CurConstraint.getD();
+ unsigned NewDirection = Dependence::DVEntry::NONE;
+ if (!SE->isKnownNonZero(Level.Distance)) // if may be zero
+ NewDirection = Dependence::DVEntry::EQ;
+ if (!SE->isKnownNonPositive(Level.Distance)) // if may be positive
+ NewDirection |= Dependence::DVEntry::LT;
+ if (!SE->isKnownNonNegative(Level.Distance)) // if may be negative
+ NewDirection |= Dependence::DVEntry::GT;
+ Level.Direction &= NewDirection;
+ }
+ else if (CurConstraint.isLine()) {
+ Level.Scalar = false;
+ Level.Distance = NULL;
+ // direction should be accurate
+ }
+ else if (CurConstraint.isPoint()) {
+ Level.Scalar = false;
+ Level.Distance = NULL;
+ unsigned NewDirection = Dependence::DVEntry::NONE;
+ if (!isKnownPredicate(CmpInst::ICMP_NE,
+ CurConstraint.getY(),
+ CurConstraint.getX()))
+ // if X may be = Y
+ NewDirection |= Dependence::DVEntry::EQ;
+ if (!isKnownPredicate(CmpInst::ICMP_SLE,
+ CurConstraint.getY(),
+ CurConstraint.getX()))
+ // if Y may be > X
+ NewDirection |= Dependence::DVEntry::LT;
+ if (!isKnownPredicate(CmpInst::ICMP_SGE,
+ CurConstraint.getY(),
+ CurConstraint.getX()))
+ // if Y may be < X
+ NewDirection |= Dependence::DVEntry::GT;
+ Level.Direction &= NewDirection;
+ }
+ else
+ llvm_unreachable("constraint has unexpected kind");
+}
+
+
+//===----------------------------------------------------------------------===//
+
+#ifndef NDEBUG
+// For debugging purposes, dump a small bit vector to dbgs().
+static void dumpSmallBitVector(SmallBitVector &BV) {
+ dbgs() << "{";
+ for (int VI = BV.find_first(); VI >= 0; VI = BV.find_next(VI)) {
+ dbgs() << VI;
+ if (BV.find_next(VI) >= 0)
+ dbgs() << ' ';
+ }
+ dbgs() << "}\n";
+}
+#endif
+
+
+// depends -
+// Returns NULL if there is no dependence.
+// Otherwise, return a Dependence with as many details as possible.
+// Corresponds to Section 3.1 in the paper
+//
+// Practical Dependence Testing
+// Goff, Kennedy, Tseng
+// PLDI 1991
+//
+// Care is required to keep the code below up to date w.r.t. this routine.
+Dependence *DependenceAnalysis::depends(const Instruction *Src,
+ const Instruction *Dst,
+ bool PossiblyLoopIndependent) {
+ if ((!Src->mayReadFromMemory() && !Src->mayWriteToMemory()) ||
+ (!Dst->mayReadFromMemory() && !Dst->mayWriteToMemory()))
+ // if both instructions don't reference memory, there's no dependence
+ return NULL;
+
+ if (!isLoadOrStore(Src) || !isLoadOrStore(Dst))
+ // can only analyze simple loads and stores, i.e., no calls, invokes, etc.
+ return new Dependence(Src, Dst);
+
+ const Value *SrcPtr = getPointerOperand(Src);
+ const Value *DstPtr = getPointerOperand(Dst);
+
+ switch (underlyingObjectsAlias(AA, DstPtr, SrcPtr)) {
+ case AliasAnalysis::MayAlias:
+ case AliasAnalysis::PartialAlias:
+ // cannot analyse objects if we don't understand their aliasing.
+ return new Dependence(Src, Dst);
+ case AliasAnalysis::NoAlias:
+ // If the objects noalias, they are distinct, accesses are independent.
+ return NULL;
+ case AliasAnalysis::MustAlias:
+ break; // The underlying objects alias; test accesses for dependence.
+ }
+
+ const GEPOperator *SrcGEP = dyn_cast<GEPOperator>(SrcPtr);
+ const GEPOperator *DstGEP = dyn_cast<GEPOperator>(DstPtr);
+ if (!SrcGEP || !DstGEP)
+ return new Dependence(Src, Dst); // missing GEP, assume dependence
+
+ if (SrcGEP->getPointerOperandType() != DstGEP->getPointerOperandType())
+ return new Dependence(Src, Dst); // different types, assume dependence
+
+ // establish loop nesting levels
+ establishNestingLevels(Src, Dst);
+ DEBUG(dbgs() << " common nesting levels = " << CommonLevels << "\n");
+ DEBUG(dbgs() << " maximum nesting levels = " << MaxLevels << "\n");
+
+ FullDependence Result(Src, Dst, PossiblyLoopIndependent, CommonLevels);
+ ++TotalArrayPairs;
+
+ // classify subscript pairs
+ unsigned Pairs = SrcGEP->idx_end() - SrcGEP->idx_begin();
+ SmallVector<Subscript, 4> Pair(Pairs);
+ for (unsigned SI = 0; SI < Pairs; ++SI) {
+ Pair[SI].Loops.resize(MaxLevels + 1);
+ Pair[SI].GroupLoops.resize(MaxLevels + 1);
+ Pair[SI].Group.resize(Pairs);
+ }
+ Pairs = 0;
+ for (GEPOperator::const_op_iterator SrcIdx = SrcGEP->idx_begin(),
+ SrcEnd = SrcGEP->idx_end(),
+ DstIdx = DstGEP->idx_begin(),
+ DstEnd = DstGEP->idx_end();
+ SrcIdx != SrcEnd && DstIdx != DstEnd;
+ ++SrcIdx, ++DstIdx, ++Pairs) {
+ Pair[Pairs].Src = SE->getSCEV(*SrcIdx);
+ Pair[Pairs].Dst = SE->getSCEV(*DstIdx);
+ removeMatchingExtensions(&Pair[Pairs]);
+ Pair[Pairs].Classification =
+ classifyPair(Pair[Pairs].Src, LI->getLoopFor(Src->getParent()),
+ Pair[Pairs].Dst, LI->getLoopFor(Dst->getParent()),
+ Pair[Pairs].Loops);
+ Pair[Pairs].GroupLoops = Pair[Pairs].Loops;
+ Pair[Pairs].Group.set(Pairs);
+ DEBUG(dbgs() << " subscript " << Pairs << "\n");
+ DEBUG(dbgs() << "\tsrc = " << *Pair[Pairs].Src << "\n");
+ DEBUG(dbgs() << "\tdst = " << *Pair[Pairs].Dst << "\n");
+ DEBUG(dbgs() << "\tclass = " << Pair[Pairs].Classification << "\n");
+ DEBUG(dbgs() << "\tloops = ");
+ DEBUG(dumpSmallBitVector(Pair[Pairs].Loops));
+ }
+
+ SmallBitVector Separable(Pairs);
+ SmallBitVector Coupled(Pairs);
+
+ // Partition subscripts into separable and minimally-coupled groups
+ // Algorithm in paper is algorithmically better;
+ // this may be faster in practice. Check someday.
+ //
+ // Here's an example of how it works. Consider this code:
+ //
+ // for (i = ...) {
+ // for (j = ...) {
+ // for (k = ...) {
+ // for (l = ...) {
+ // for (m = ...) {
+ // A[i][j][k][m] = ...;
+ // ... = A[0][j][l][i + j];
+ // }
+ // }
+ // }
+ // }
+ // }
+ //
+ // There are 4 subscripts here:
+ // 0 [i] and [0]
+ // 1 [j] and [j]
+ // 2 [k] and [l]
+ // 3 [m] and [i + j]
+ //
+ // We've already classified each subscript pair as ZIV, SIV, etc.,
+ // and collected all the loops mentioned by pair P in Pair[P].Loops.
+ // In addition, we've initialized Pair[P].GroupLoops to Pair[P].Loops
+ // and set Pair[P].Group = {P}.
+ //
+ // Src Dst Classification Loops GroupLoops Group
+ // 0 [i] [0] SIV {1} {1} {0}
+ // 1 [j] [j] SIV {2} {2} {1}
+ // 2 [k] [l] RDIV {3,4} {3,4} {2}
+ // 3 [m] [i + j] MIV {1,2,5} {1,2,5} {3}
+ //
+ // For each subscript SI 0 .. 3, we consider each remaining subscript, SJ.
+ // So, 0 is compared against 1, 2, and 3; 1 is compared against 2 and 3, etc.
+ //
+ // We begin by comparing 0 and 1. The intersection of the GroupLoops is empty.
+ // Next, 0 and 2. Again, the intersection of their GroupLoops is empty.
+ // Next 0 and 3. The intersection of their GroupLoop = {1}, not empty,
+ // so Pair[3].Group = {0,3} and Done = false (that is, 0 will not be added
+ // to either Separable or Coupled).
+ //
+ // Next, we consider 1 and 2. The intersection of the GroupLoops is empty.
+ // Next, 1 and 3. The intersectionof their GroupLoops = {2}, not empty,
+ // so Pair[3].Group = {0, 1, 3} and Done = false.
+ //
+ // Next, we compare 2 against 3. The intersection of the GroupLoops is empty.
+ // Since Done remains true, we add 2 to the set of Separable pairs.
+ //
+ // Finally, we consider 3. There's nothing to compare it with,
+ // so Done remains true and we add it to the Coupled set.
+ // Pair[3].Group = {0, 1, 3} and GroupLoops = {1, 2, 5}.
+ //
+ // In the end, we've got 1 separable subscript and 1 coupled group.
+ for (unsigned SI = 0; SI < Pairs; ++SI) {
+ if (Pair[SI].Classification == Subscript::NonLinear) {
+ // ignore these, but collect loops for later
+ ++NonlinearSubscriptPairs;
+ collectCommonLoops(Pair[SI].Src,
+ LI->getLoopFor(Src->getParent()),
+ Pair[SI].Loops);
+ collectCommonLoops(Pair[SI].Dst,
+ LI->getLoopFor(Dst->getParent()),
+ Pair[SI].Loops);
+ Result.Consistent = false;
+ }
+ else if (Pair[SI].Classification == Subscript::ZIV) {
+ // always separable
+ Separable.set(SI);
+ }
+ else {
+ // SIV, RDIV, or MIV, so check for coupled group
+ bool Done = true;
+ for (unsigned SJ = SI + 1; SJ < Pairs; ++SJ) {
+ SmallBitVector Intersection = Pair[SI].GroupLoops;
+ Intersection &= Pair[SJ].GroupLoops;
+ if (Intersection.any()) {
+ // accumulate set of all the loops in group
+ Pair[SJ].GroupLoops |= Pair[SI].GroupLoops;
+ // accumulate set of all subscripts in group
+ Pair[SJ].Group |= Pair[SI].Group;
+ Done = false;
+ }
+ }
+ if (Done) {
+ if (Pair[SI].Group.count() == 1) {
+ Separable.set(SI);
+ ++SeparableSubscriptPairs;
+ }
+ else {
+ Coupled.set(SI);
+ ++CoupledSubscriptPairs;
+ }
+ }
+ }
+ }
+
+ DEBUG(dbgs() << " Separable = ");
+ DEBUG(dumpSmallBitVector(Separable));
+ DEBUG(dbgs() << " Coupled = ");
+ DEBUG(dumpSmallBitVector(Coupled));
+
+ Constraint NewConstraint;
+ NewConstraint.setAny(SE);
+
+ // test separable subscripts
+ for (int SI = Separable.find_first(); SI >= 0; SI = Separable.find_next(SI)) {
+ DEBUG(dbgs() << "testing subscript " << SI);
+ switch (Pair[SI].Classification) {
+ case Subscript::ZIV:
+ DEBUG(dbgs() << ", ZIV\n");
+ if (testZIV(Pair[SI].Src, Pair[SI].Dst, Result))
+ return NULL;
+ break;
+ case Subscript::SIV: {
+ DEBUG(dbgs() << ", SIV\n");
+ unsigned Level;
+ const SCEV *SplitIter = NULL;
+ if (testSIV(Pair[SI].Src, Pair[SI].Dst, Level,
+ Result, NewConstraint, SplitIter))
+ return NULL;
+ break;
+ }
+ case Subscript::RDIV:
+ DEBUG(dbgs() << ", RDIV\n");
+ if (testRDIV(Pair[SI].Src, Pair[SI].Dst, Result))
+ return NULL;
+ break;
+ case Subscript::MIV:
+ DEBUG(dbgs() << ", MIV\n");
+ if (testMIV(Pair[SI].Src, Pair[SI].Dst, Pair[SI].Loops, Result))
+ return NULL;
+ break;
+ default:
+ llvm_unreachable("subscript has unexpected classification");
+ }
+ }
+
+ if (Coupled.count()) {
+ // test coupled subscript groups
+ DEBUG(dbgs() << "starting on coupled subscripts\n");
+ DEBUG(dbgs() << "MaxLevels + 1 = " << MaxLevels + 1 << "\n");
+ SmallVector<Constraint, 4> Constraints(MaxLevels + 1);
+ for (unsigned II = 0; II <= MaxLevels; ++II)
+ Constraints[II].setAny(SE);
+ for (int SI = Coupled.find_first(); SI >= 0; SI = Coupled.find_next(SI)) {
+ DEBUG(dbgs() << "testing subscript group " << SI << " { ");
+ SmallBitVector Group(Pair[SI].Group);
+ SmallBitVector Sivs(Pairs);
+ SmallBitVector Mivs(Pairs);
+ SmallBitVector ConstrainedLevels(MaxLevels + 1);
+ for (int SJ = Group.find_first(); SJ >= 0; SJ = Group.find_next(SJ)) {
+ DEBUG(dbgs() << SJ << " ");
+ if (Pair[SJ].Classification == Subscript::SIV)
+ Sivs.set(SJ);
+ else
+ Mivs.set(SJ);
+ }
+ DEBUG(dbgs() << "}\n");
+ while (Sivs.any()) {
+ bool Changed = false;
+ for (int SJ = Sivs.find_first(); SJ >= 0; SJ = Sivs.find_next(SJ)) {
+ DEBUG(dbgs() << "testing subscript " << SJ << ", SIV\n");
+ // SJ is an SIV subscript that's part of the current coupled group
+ unsigned Level;
+ const SCEV *SplitIter = NULL;
+ DEBUG(dbgs() << "SIV\n");
+ if (testSIV(Pair[SJ].Src, Pair[SJ].Dst, Level,
+ Result, NewConstraint, SplitIter))
+ return NULL;
+ ConstrainedLevels.set(Level);
+ if (intersectConstraints(&Constraints[Level], &NewConstraint)) {
+ if (Constraints[Level].isEmpty()) {
+ ++DeltaIndependence;
+ return NULL;
+ }
+ Changed = true;
+ }
+ Sivs.reset(SJ);
+ }
+ if (Changed) {
+ // propagate, possibly creating new SIVs and ZIVs
+ DEBUG(dbgs() << " propagating\n");
+ DEBUG(dbgs() << "\tMivs = ");
+ DEBUG(dumpSmallBitVector(Mivs));
+ for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) {
+ // SJ is an MIV subscript that's part of the current coupled group
+ DEBUG(dbgs() << "\tSJ = " << SJ << "\n");
+ if (propagate(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops,
+ Constraints, Result.Consistent)) {
+ DEBUG(dbgs() << "\t Changed\n");
+ ++DeltaPropagations;
+ Pair[SJ].Classification =
+ classifyPair(Pair[SJ].Src, LI->getLoopFor(Src->getParent()),
+ Pair[SJ].Dst, LI->getLoopFor(Dst->getParent()),
+ Pair[SJ].Loops);
+ switch (Pair[SJ].Classification) {
+ case Subscript::ZIV:
+ DEBUG(dbgs() << "ZIV\n");
+ if (testZIV(Pair[SJ].Src, Pair[SJ].Dst, Result))
+ return NULL;
+ Mivs.reset(SJ);
+ break;
+ case Subscript::SIV:
+ Sivs.set(SJ);
+ Mivs.reset(SJ);
+ break;
+ case Subscript::RDIV:
+ case Subscript::MIV:
+ break;
+ default:
+ llvm_unreachable("bad subscript classification");
+ }
+ }
+ }
+ }
+ }
+
+ // test & propagate remaining RDIVs
+ for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) {
+ if (Pair[SJ].Classification == Subscript::RDIV) {
+ DEBUG(dbgs() << "RDIV test\n");
+ if (testRDIV(Pair[SJ].Src, Pair[SJ].Dst, Result))
+ return NULL;
+ // I don't yet understand how to propagate RDIV results
+ Mivs.reset(SJ);
+ }
+ }
+
+ // test remaining MIVs
+ // This code is temporary.
+ // Better to somehow test all remaining subscripts simultaneously.
+ for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) {
+ if (Pair[SJ].Classification == Subscript::MIV) {
+ DEBUG(dbgs() << "MIV test\n");
+ if (testMIV(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops, Result))
+ return NULL;
+ }
+ else
+ llvm_unreachable("expected only MIV subscripts at this point");
+ }
+
+ // update Result.DV from constraint vector
+ DEBUG(dbgs() << " updating\n");
+ for (int SJ = ConstrainedLevels.find_first();
+ SJ >= 0; SJ = ConstrainedLevels.find_next(SJ)) {
+ updateDirection(Result.DV[SJ - 1], Constraints[SJ]);
+ if (Result.DV[SJ - 1].Direction == Dependence::DVEntry::NONE)
+ return NULL;
+ }
+ }
+ }
+
+ // make sure Scalar flags are set correctly
+ SmallBitVector CompleteLoops(MaxLevels + 1);
+ for (unsigned SI = 0; SI < Pairs; ++SI)
+ CompleteLoops |= Pair[SI].Loops;
+ for (unsigned II = 1; II <= CommonLevels; ++II)
+ if (CompleteLoops[II])
+ Result.DV[II - 1].Scalar = false;
+
+ // make sure loopIndepent flag is set correctly
+ if (PossiblyLoopIndependent) {
+ for (unsigned II = 1; II <= CommonLevels; ++II) {
+ if (!(Result.getDirection(II) & Dependence::DVEntry::EQ)) {
+ Result.LoopIndependent = false;
+ break;
+ }
+ }
+ }
+
+ FullDependence *Final = new FullDependence(Result);
+ Result.DV = NULL;
+ return Final;
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// getSplitIteration -
+// Rather than spend rarely-used space recording the splitting iteration
+// during the Weak-Crossing SIV test, we re-compute it on demand.
+// The re-computation is basically a repeat of the entire dependence test,
+// though simplified since we know that the dependence exists.
+// It's tedious, since we must go through all propagations, etc.
+//
+// Care is required to keep this code up to date w.r.t. the code above.
+//
+// Generally, the dependence analyzer will be used to build
+// a dependence graph for a function (basically a map from instructions
+// to dependences). Looking for cycles in the graph shows us loops
+// that cannot be trivially vectorized/parallelized.
+//
+// We can try to improve the situation by examining all the dependences
+// that make up the cycle, looking for ones we can break.
+// Sometimes, peeling the first or last iteration of a loop will break
+// dependences, and we've got flags for those possibilities.
+// Sometimes, splitting a loop at some other iteration will do the trick,
+// and we've got a flag for that case. Rather than waste the space to
+// record the exact iteration (since we rarely know), we provide
+// a method that calculates the iteration. It's a drag that it must work
+// from scratch, but wonderful in that it's possible.
+//
+// Here's an example:
+//
+// for (i = 0; i < 10; i++)
+// A[i] = ...
+// ... = A[11 - i]
+//
+// There's a loop-carried flow dependence from the store to the load,
+// found by the weak-crossing SIV test. The dependence will have a flag,
+// indicating that the dependence can be broken by splitting the loop.
+// Calling getSplitIteration will return 5.
+// Splitting the loop breaks the dependence, like so:
+//
+// for (i = 0; i <= 5; i++)
+// A[i] = ...
+// ... = A[11 - i]
+// for (i = 6; i < 10; i++)
+// A[i] = ...
+// ... = A[11 - i]
+//
+// breaks the dependence and allows us to vectorize/parallelize
+// both loops.
+const SCEV *DependenceAnalysis::getSplitIteration(const Dependence *Dep,
+ unsigned SplitLevel) {
+ assert(Dep && "expected a pointer to a Dependence");
+ assert(Dep->isSplitable(SplitLevel) &&
+ "Dep should be splitable at SplitLevel");
+ const Instruction *Src = Dep->getSrc();
+ const Instruction *Dst = Dep->getDst();
+ assert(Src->mayReadFromMemory() || Src->mayWriteToMemory());
+ assert(Dst->mayReadFromMemory() || Dst->mayWriteToMemory());
+ assert(isLoadOrStore(Src));
+ assert(isLoadOrStore(Dst));
+ const Value *SrcPtr = getPointerOperand(Src);
+ const Value *DstPtr = getPointerOperand(Dst);
+ assert(underlyingObjectsAlias(AA, DstPtr, SrcPtr) ==
+ AliasAnalysis::MustAlias);
+ const GEPOperator *SrcGEP = dyn_cast<GEPOperator>(SrcPtr);
+ const GEPOperator *DstGEP = dyn_cast<GEPOperator>(DstPtr);
+ assert(SrcGEP);
+ assert(DstGEP);
+ assert(SrcGEP->getPointerOperandType() == DstGEP->getPointerOperandType());
+
+ // establish loop nesting levels
+ establishNestingLevels(Src, Dst);
+
+ FullDependence Result(Src, Dst, false, CommonLevels);
+
+ // classify subscript pairs
+ unsigned Pairs = SrcGEP->idx_end() - SrcGEP->idx_begin();
+ SmallVector<Subscript, 4> Pair(Pairs);
+ for (unsigned SI = 0; SI < Pairs; ++SI) {
+ Pair[SI].Loops.resize(MaxLevels + 1);
+ Pair[SI].GroupLoops.resize(MaxLevels + 1);
+ Pair[SI].Group.resize(Pairs);
+ }
+ Pairs = 0;
+ for (GEPOperator::const_op_iterator SrcIdx = SrcGEP->idx_begin(),
+ SrcEnd = SrcGEP->idx_end(),
+ DstIdx = DstGEP->idx_begin(),
+ DstEnd = DstGEP->idx_end();
+ SrcIdx != SrcEnd && DstIdx != DstEnd;
+ ++SrcIdx, ++DstIdx, ++Pairs) {
+ Pair[Pairs].Src = SE->getSCEV(*SrcIdx);
+ Pair[Pairs].Dst = SE->getSCEV(*DstIdx);
+ Pair[Pairs].Classification =
+ classifyPair(Pair[Pairs].Src, LI->getLoopFor(Src->getParent()),
+ Pair[Pairs].Dst, LI->getLoopFor(Dst->getParent()),
+ Pair[Pairs].Loops);
+ Pair[Pairs].GroupLoops = Pair[Pairs].Loops;
+ Pair[Pairs].Group.set(Pairs);
+ }
+
+ SmallBitVector Separable(Pairs);
+ SmallBitVector Coupled(Pairs);
+
+ // partition subscripts into separable and minimally-coupled groups
+ for (unsigned SI = 0; SI < Pairs; ++SI) {
+ if (Pair[SI].Classification == Subscript::NonLinear) {
+ // ignore these, but collect loops for later
+ collectCommonLoops(Pair[SI].Src,
+ LI->getLoopFor(Src->getParent()),
+ Pair[SI].Loops);
+ collectCommonLoops(Pair[SI].Dst,
+ LI->getLoopFor(Dst->getParent()),
+ Pair[SI].Loops);
+ Result.Consistent = false;
+ }
+ else if (Pair[SI].Classification == Subscript::ZIV)
+ Separable.set(SI);
+ else {
+ // SIV, RDIV, or MIV, so check for coupled group
+ bool Done = true;
+ for (unsigned SJ = SI + 1; SJ < Pairs; ++SJ) {
+ SmallBitVector Intersection = Pair[SI].GroupLoops;
+ Intersection &= Pair[SJ].GroupLoops;
+ if (Intersection.any()) {
+ // accumulate set of all the loops in group
+ Pair[SJ].GroupLoops |= Pair[SI].GroupLoops;
+ // accumulate set of all subscripts in group
+ Pair[SJ].Group |= Pair[SI].Group;
+ Done = false;
+ }
+ }
+ if (Done) {
+ if (Pair[SI].Group.count() == 1)
+ Separable.set(SI);
+ else
+ Coupled.set(SI);
+ }
+ }
+ }
+
+ Constraint NewConstraint;
+ NewConstraint.setAny(SE);
+
+ // test separable subscripts
+ for (int SI = Separable.find_first(); SI >= 0; SI = Separable.find_next(SI)) {
+ switch (Pair[SI].Classification) {
+ case Subscript::SIV: {
+ unsigned Level;
+ const SCEV *SplitIter = NULL;
+ (void) testSIV(Pair[SI].Src, Pair[SI].Dst, Level,
+ Result, NewConstraint, SplitIter);
+ if (Level == SplitLevel) {
+ assert(SplitIter != NULL);
+ return SplitIter;
+ }
+ break;
+ }
+ case Subscript::ZIV:
+ case Subscript::RDIV:
+ case Subscript::MIV:
+ break;
+ default:
+ llvm_unreachable("subscript has unexpected classification");
+ }
+ }
+
+ if (Coupled.count()) {
+ // test coupled subscript groups
+ SmallVector<Constraint, 4> Constraints(MaxLevels + 1);
+ for (unsigned II = 0; II <= MaxLevels; ++II)
+ Constraints[II].setAny(SE);
+ for (int SI = Coupled.find_first(); SI >= 0; SI = Coupled.find_next(SI)) {
+ SmallBitVector Group(Pair[SI].Group);
+ SmallBitVector Sivs(Pairs);
+ SmallBitVector Mivs(Pairs);
+ SmallBitVector ConstrainedLevels(MaxLevels + 1);
+ for (int SJ = Group.find_first(); SJ >= 0; SJ = Group.find_next(SJ)) {
+ if (Pair[SJ].Classification == Subscript::SIV)
+ Sivs.set(SJ);
+ else
+ Mivs.set(SJ);
+ }
+ while (Sivs.any()) {
+ bool Changed = false;
+ for (int SJ = Sivs.find_first(); SJ >= 0; SJ = Sivs.find_next(SJ)) {
+ // SJ is an SIV subscript that's part of the current coupled group
+ unsigned Level;
+ const SCEV *SplitIter = NULL;
+ (void) testSIV(Pair[SJ].Src, Pair[SJ].Dst, Level,
+ Result, NewConstraint, SplitIter);
+ if (Level == SplitLevel && SplitIter)
+ return SplitIter;
+ ConstrainedLevels.set(Level);
+ if (intersectConstraints(&Constraints[Level], &NewConstraint))
+ Changed = true;
+ Sivs.reset(SJ);
+ }
+ if (Changed) {
+ // propagate, possibly creating new SIVs and ZIVs
+ for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) {
+ // SJ is an MIV subscript that's part of the current coupled group
+ if (propagate(Pair[SJ].Src, Pair[SJ].Dst,
+ Pair[SJ].Loops, Constraints, Result.Consistent)) {
+ Pair[SJ].Classification =
+ classifyPair(Pair[SJ].Src, LI->getLoopFor(Src->getParent()),
+ Pair[SJ].Dst, LI->getLoopFor(Dst->getParent()),
+ Pair[SJ].Loops);
+ switch (Pair[SJ].Classification) {
+ case Subscript::ZIV:
+ Mivs.reset(SJ);
+ break;
+ case Subscript::SIV:
+ Sivs.set(SJ);
+ Mivs.reset(SJ);
+ break;
+ case Subscript::RDIV:
+ case Subscript::MIV:
+ break;
+ default:
+ llvm_unreachable("bad subscript classification");
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ llvm_unreachable("somehow reached end of routine");
+ return NULL;
+}
diff --git a/contrib/llvm/lib/Analysis/DominanceFrontier.cpp b/contrib/llvm/lib/Analysis/DominanceFrontier.cpp
index 1604576..3e537e9 100644
--- a/contrib/llvm/lib/Analysis/DominanceFrontier.cpp
+++ b/contrib/llvm/lib/Analysis/DominanceFrontier.cpp
@@ -133,7 +133,9 @@ void DominanceFrontierBase::print(raw_ostream &OS, const Module* ) const {
}
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void DominanceFrontierBase::dump() const {
print(dbgs());
}
+#endif
diff --git a/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp b/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
index 0df3e8a..dec0ece 100644
--- a/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
@@ -141,12 +141,13 @@ private:
for (BasicBlock::iterator II = BB->begin(), IE = BB->end();
II != IE; ++II) {
CallSite CS(cast<Value>(II));
- if (CS && !isa<IntrinsicInst>(II)) {
+ if (CS) {
const Function *Callee = CS.getCalledFunction();
- if (Callee)
- Node->addCalledFunction(CS, getOrInsertFunction(Callee));
- else
+ if (!Callee)
+ // Indirect calls of intrinsics are not allowed so no need to check.
Node->addCalledFunction(CS, CallsExternalNode);
+ else if (!Callee->isIntrinsic())
+ Node->addCalledFunction(CS, getOrInsertFunction(Callee));
}
}
}
@@ -198,9 +199,11 @@ void CallGraph::print(raw_ostream &OS, Module*) const {
for (CallGraph::const_iterator I = begin(), E = end(); I != E; ++I)
I->second->print(OS);
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void CallGraph::dump() const {
print(dbgs(), 0);
}
+#endif
//===----------------------------------------------------------------------===//
// Implementations of public modification methods
@@ -267,7 +270,9 @@ void CallGraphNode::print(raw_ostream &OS) const {
OS << '\n';
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void CallGraphNode::dump() const { print(dbgs()); }
+#endif
/// removeCallEdgeFor - This method removes the edge in the node for the
/// specified call site. Note that this method takes linear time, so it
diff --git a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
index 22f6e96..990caa8 100644
--- a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
@@ -263,7 +263,7 @@ bool GlobalsModRef::AnalyzeUsesOfPointer(Value *V,
} else if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
if (AnalyzeUsesOfPointer(BCI, Readers, Writers, OkayStoreDest))
return true;
- } else if (isFreeCall(U)) {
+ } else if (isFreeCall(U, TLI)) {
Writers.push_back(cast<Instruction>(U)->getParent()->getParent());
} else if (CallInst *CI = dyn_cast<CallInst>(U)) {
// Make sure that this is just the function being called, not that it is
@@ -329,7 +329,7 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
// Check the value being stored.
Value *Ptr = GetUnderlyingObject(SI->getOperand(0));
- if (!isAllocLikeFn(Ptr))
+ if (!isAllocLikeFn(Ptr, TLI))
return false; // Too hard to analyze.
// Analyze all uses of the allocation. If any of them are used in a
@@ -458,7 +458,7 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {
if (SI->isVolatile())
// Treat volatile stores as reading memory somewhere.
FunctionEffect |= Ref;
- } else if (isAllocationFn(&*II) || isFreeCall(&*II)) {
+ } else if (isAllocationFn(&*II, TLI) || isFreeCall(&*II, TLI)) {
FunctionEffect |= ModRef;
} else if (IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(&*II)) {
// The callgraph doesn't include intrinsic calls.
diff --git a/contrib/llvm/lib/Analysis/IVUsers.cpp b/contrib/llvm/lib/Analysis/IVUsers.cpp
index 0a6682a..d4221b8 100644
--- a/contrib/llvm/lib/Analysis/IVUsers.cpp
+++ b/contrib/llvm/lib/Analysis/IVUsers.cpp
@@ -22,7 +22,7 @@
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
@@ -235,7 +235,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
LI = &getAnalysis<LoopInfo>();
DT = &getAnalysis<DominatorTree>();
SE = &getAnalysis<ScalarEvolution>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
// Find all uses of induction variables in this loop, and categorize
// them by stride. Start by finding all of the PHI nodes in the header for
@@ -273,9 +273,11 @@ void IVUsers::print(raw_ostream &OS, const Module *M) const {
}
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void IVUsers::dump() const {
print(dbgs());
}
+#endif
void IVUsers::releaseMemory() {
Processed.clear();
diff --git a/contrib/llvm/lib/Analysis/InlineCost.cpp b/contrib/llvm/lib/Analysis/InlineCost.cpp
index e9f39ab..5f51f77 100644
--- a/contrib/llvm/lib/Analysis/InlineCost.cpp
+++ b/contrib/llvm/lib/Analysis/InlineCost.cpp
@@ -24,7 +24,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Operator.h"
#include "llvm/GlobalAlias.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
@@ -41,8 +41,8 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
typedef InstVisitor<CallAnalyzer, bool> Base;
friend class InstVisitor<CallAnalyzer, bool>;
- // TargetData if available, or null.
- const TargetData *const TD;
+ // DataLayout if available, or null.
+ const DataLayout *const TD;
// The called function.
Function &F;
@@ -51,9 +51,12 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
int Cost;
const bool AlwaysInline;
- bool IsRecursive;
+ bool IsCallerRecursive;
+ bool IsRecursiveCall;
bool ExposesReturnsTwice;
bool HasDynamicAlloca;
+ /// Number of bytes allocated statically by the callee.
+ uint64_t AllocatedSize;
unsigned NumInstructions, NumVectorInstructions;
int FiftyPercentVectorBonus, TenPercentVectorBonus;
int VectorBonus;
@@ -123,10 +126,11 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
bool visitCallSite(CallSite CS);
public:
- CallAnalyzer(const TargetData *TD, Function &Callee, int Threshold)
+ CallAnalyzer(const DataLayout *TD, Function &Callee, int Threshold)
: TD(TD), F(Callee), Threshold(Threshold), Cost(0),
- AlwaysInline(F.hasFnAttr(Attribute::AlwaysInline)),
- IsRecursive(false), ExposesReturnsTwice(false), HasDynamicAlloca(false),
+ AlwaysInline(F.getFnAttributes().hasAttribute(Attributes::AlwaysInline)),
+ IsCallerRecursive(false), IsRecursiveCall(false),
+ ExposesReturnsTwice(false), HasDynamicAlloca(false), AllocatedSize(0),
NumInstructions(0), NumVectorInstructions(0),
FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0),
NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
@@ -270,6 +274,13 @@ bool CallAnalyzer::visitAlloca(AllocaInst &I) {
// FIXME: Check whether inlining will turn a dynamic alloca into a static
// alloca, and handle that case.
+ // Accumulate the allocated size.
+ if (I.isStaticAlloca()) {
+ Type *Ty = I.getAllocatedType();
+ AllocatedSize += (TD ? TD->getTypeAllocSize(Ty) :
+ Ty->getPrimitiveSizeInBits());
+ }
+
// We will happily inline static alloca instructions or dynamic alloca
// instructions in always-inline situations.
if (AlwaysInline || I.isStaticAlloca())
@@ -603,7 +614,7 @@ bool CallAnalyzer::visitStore(StoreInst &I) {
bool CallAnalyzer::visitCallSite(CallSite CS) {
if (CS.isCall() && cast<CallInst>(CS.getInstruction())->canReturnTwice() &&
- !F.hasFnAttr(Attribute::ReturnsTwice)) {
+ !F.getFnAttributes().hasAttribute(Attributes::ReturnsTwice)) {
// This aborts the entire analysis.
ExposesReturnsTwice = true;
return false;
@@ -626,7 +637,7 @@ bool CallAnalyzer::visitCallSite(CallSite CS) {
if (F == CS.getInstruction()->getParent()->getParent()) {
// This flag will fully abort the analysis, so don't bother with anything
// else.
- IsRecursive = true;
+ IsRecursiveCall = true;
return false;
}
@@ -713,7 +724,14 @@ bool CallAnalyzer::analyzeBlock(BasicBlock *BB) {
Cost += InlineConstants::InstrCost;
// If the visit this instruction detected an uninlinable pattern, abort.
- if (IsRecursive || ExposesReturnsTwice || HasDynamicAlloca)
+ if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca)
+ return false;
+
+ // If the caller is a recursive function then we don't want to inline
+ // functions which allocate a lot of stack space because it would increase
+ // the caller stack usage dramatically.
+ if (IsCallerRecursive &&
+ AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
return false;
if (NumVectorInstructions > NumInstructions/2)
@@ -815,7 +833,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// one load and one store per word copied.
// FIXME: The maxStoresPerMemcpy setting from the target should be used
// here instead of a magic number of 8, but it's not available via
- // TargetData.
+ // DataLayout.
NumStores = std::min(NumStores, 8U);
Cost -= 2 * NumStores * InlineConstants::InstrCost;
@@ -832,12 +850,14 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
Cost += InlineConstants::LastCallToStaticBonus;
// If the instruction after the call, or if the normal destination of the
- // invoke is an unreachable instruction, the function is noreturn. As such,
- // there is little point in inlining this unless there is literally zero cost.
- if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
+ // invoke is an unreachable instruction, the function is noreturn. As such,
+ // there is little point in inlining this unless there is literally zero
+ // cost.
+ Instruction *Instr = CS.getInstruction();
+ if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) {
if (isa<UnreachableInst>(II->getNormalDest()->begin()))
Threshold = 1;
- } else if (isa<UnreachableInst>(++BasicBlock::iterator(CS.getInstruction())))
+ } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr)))
Threshold = 1;
// If this function uses the coldcc calling convention, prefer not to inline
@@ -853,6 +873,20 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
if (F.empty())
return true;
+ Function *Caller = CS.getInstruction()->getParent()->getParent();
+ // Check if the caller function is recursive itself.
+ for (Value::use_iterator U = Caller->use_begin(), E = Caller->use_end();
+ U != E; ++U) {
+ CallSite Site(cast<Value>(*U));
+ if (!Site)
+ continue;
+ Instruction *I = Site.getInstruction();
+ if (I->getParent()->getParent() == Caller) {
+ IsCallerRecursive = true;
+ break;
+ }
+ }
+
// Track whether we've seen a return instruction. The first return
// instruction is free, as at least one will usually disappear in inlining.
bool HasReturn = false;
@@ -909,9 +943,9 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// We never want to inline functions that contain an indirectbr. This is
// incorrect because all the blockaddress's (in static global initializers
- // for example) would be referring to the original function, and this indirect
- // jump would jump from the inlined copy of the function into the original
- // function which is extremely undefined behavior.
+ // for example) would be referring to the original function, and this
+ // indirect jump would jump from the inlined copy of the function into the
+ // original function which is extremely undefined behavior.
// FIXME: This logic isn't really right; we can safely inline functions
// with indirectbr's as long as no other function or global references the
// blockaddress of a block within the current function. And as a QOI issue,
@@ -929,8 +963,16 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// Analyze the cost of this block. If we blow through the threshold, this
// returns false, and we can bail on out.
if (!analyzeBlock(BB)) {
- if (IsRecursive || ExposesReturnsTwice || HasDynamicAlloca)
+ if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca)
return false;
+
+ // If the caller is a recursive function then we don't want to inline
+ // functions which allocate a lot of stack space because it would increase
+ // the caller stack usage dramatically.
+ if (IsCallerRecursive &&
+ AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
+ return false;
+
break;
}
@@ -956,7 +998,8 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// If we're unable to select a particular successor, just count all of
// them.
- for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; ++TIdx)
+ for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
+ ++TIdx)
BBWorklist.insert(TI->getSuccessor(TIdx));
// If we had any successors at this point, than post-inlining is likely to
@@ -975,6 +1018,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
return AlwaysInline || Cost < Threshold;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// \brief Dump stats about this call's analysis.
void CallAnalyzer::dump() {
#define DEBUG_PRINT_STAT(x) llvm::dbgs() << " " #x ": " << x << "\n"
@@ -988,6 +1032,7 @@ void CallAnalyzer::dump() {
DEBUG_PRINT_STAT(SROACostSavingsLost);
#undef DEBUG_PRINT_STAT
}
+#endif
InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, int Threshold) {
return getInlineCost(CS, CS.getCalledFunction(), Threshold);
@@ -999,10 +1044,12 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee,
// something else. Don't inline functions marked noinline or call sites
// marked noinline.
if (!Callee || Callee->mayBeOverridden() ||
- Callee->hasFnAttr(Attribute::NoInline) || CS.isNoInline())
+ Callee->getFnAttributes().hasAttribute(Attributes::NoInline) ||
+ CS.isNoInline())
return llvm::InlineCost::getNever();
- DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() << "...\n");
+ DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
+ << "...\n");
CallAnalyzer CA(TD, *Callee, Threshold);
bool ShouldInline = CA.analyzeCall(CS);
diff --git a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
index 379a35a..a76e5ad 100644
--- a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -31,7 +31,7 @@
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/PatternMatch.h"
#include "llvm/Support/ValueHandle.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
using namespace llvm::PatternMatch;
@@ -42,11 +42,11 @@ STATISTIC(NumFactor , "Number of factorizations");
STATISTIC(NumReassoc, "Number of reassociations");
struct Query {
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLibraryInfo *TLI;
const DominatorTree *DT;
- Query(const TargetData *td, const TargetLibraryInfo *tli,
+ Query(const DataLayout *td, const TargetLibraryInfo *tli,
const DominatorTree *dt) : TD(td), TLI(tli), DT(dt) {}
};
@@ -651,7 +651,7 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
RecursionLimit);
@@ -664,7 +664,7 @@ Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
/// if the GEP has all-constant indices. Returns false if any non-constant
/// index is encountered leaving the 'Offset' in an undefined state. The
/// 'Offset' APInt must be the bitwidth of the target's pointer size.
-static bool accumulateGEPOffset(const TargetData &TD, GEPOperator *GEP,
+static bool accumulateGEPOffset(const DataLayout &TD, GEPOperator *GEP,
APInt &Offset) {
unsigned IntPtrWidth = TD.getPointerSizeInBits();
assert(IntPtrWidth == Offset.getBitWidth());
@@ -696,7 +696,7 @@ static bool accumulateGEPOffset(const TargetData &TD, GEPOperator *GEP,
/// accumulates the total constant offset applied in the returned constant. It
/// returns 0 if V is not a pointer, and returns the constant '0' if there are
/// no constant offsets applied.
-static Constant *stripAndComputeConstantOffsets(const TargetData &TD,
+static Constant *stripAndComputeConstantOffsets(const DataLayout &TD,
Value *&V) {
if (!V->getType()->isPointerTy())
return 0;
@@ -731,7 +731,7 @@ static Constant *stripAndComputeConstantOffsets(const TargetData &TD,
/// \brief Compute the constant difference between two pointer values.
/// If the difference is not a constant, returns zero.
-static Constant *computePointerDifference(const TargetData &TD,
+static Constant *computePointerDifference(const DataLayout &TD,
Value *LHS, Value *RHS) {
Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS);
if (!LHSOffset)
@@ -880,7 +880,7 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
RecursionLimit);
@@ -951,7 +951,7 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyMulInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1039,7 +1039,7 @@ static Value *SimplifySDivInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifySDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1055,7 +1055,7 @@ static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyUDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1074,7 +1074,7 @@ static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyFDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1144,7 +1144,7 @@ static Value *SimplifySRemInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifySRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1160,7 +1160,7 @@ static Value *SimplifyURemInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyURemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1179,7 +1179,7 @@ static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const Query &,
return 0;
}
-Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyFRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1248,7 +1248,7 @@ static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
RecursionLimit);
@@ -1275,7 +1275,7 @@ static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
}
Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyLShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
@@ -1307,7 +1307,7 @@ static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
}
Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyAShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
@@ -1407,7 +1407,7 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyAndInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1501,7 +1501,7 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyOrInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1561,7 +1561,7 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q,
return 0;
}
-Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyXorInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1591,7 +1591,7 @@ static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
return 0;
}
-static Constant *computePointerICmp(const TargetData &TD,
+static Constant *computePointerICmp(const DataLayout &TD,
CmpInst::Predicate Pred,
Value *LHS, Value *RHS) {
// We can only fold certain predicates on pointer comparisons.
@@ -2065,8 +2065,25 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (A && C && (A == C || A == D || B == C || B == D) &&
NoLHSWrapProblem && NoRHSWrapProblem) {
// Determine Y and Z in the form icmp (X+Y), (X+Z).
- Value *Y = (A == C || A == D) ? B : A;
- Value *Z = (C == A || C == B) ? D : C;
+ Value *Y, *Z;
+ if (A == C) {
+ // C + B == C + D -> B == D
+ Y = B;
+ Z = D;
+ } else if (A == D) {
+ // D + B == C + D -> B == C
+ Y = B;
+ Z = C;
+ } else if (B == C) {
+ // A + C == C + D -> A == D
+ Y = A;
+ Z = D;
+ } else {
+ assert(B == D);
+ // A + D == C + D -> A == C
+ Y = A;
+ Z = C;
+ }
if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse-1))
return V;
}
@@ -2399,7 +2416,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
@@ -2496,7 +2513,7 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
@@ -2531,7 +2548,7 @@ static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal,
}
Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (TD, TLI, DT),
@@ -2579,7 +2596,7 @@ static Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const Query &Q, unsigned) {
return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]), Ops.slice(1));
}
-Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const TargetData *TD,
+Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyGEPInst(Ops, Query (TD, TLI, DT), RecursionLimit);
@@ -2616,7 +2633,7 @@ static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (TD, TLI, DT),
@@ -2664,7 +2681,7 @@ static Value *SimplifyTruncInst(Value *Op, Type *Ty, const Query &Q, unsigned) {
return 0;
}
-Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const TargetData *TD,
+Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyTruncInst(Op, Ty, Query (TD, TLI, DT), RecursionLimit);
@@ -2730,7 +2747,7 @@ static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyBinOp(Opcode, LHS, RHS, Query (TD, TLI, DT), RecursionLimit);
}
@@ -2745,7 +2762,7 @@ static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
RecursionLimit);
@@ -2761,7 +2778,7 @@ static Value *SimplifyCallInst(CallInst *CI, const Query &) {
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
-Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD,
+Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
Value *Result;
@@ -2881,7 +2898,7 @@ Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD,
/// This routine returns 'true' only when *it* simplifies something. The passed
/// in simplified value does not count toward this.
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
bool Simplified = false;
@@ -2936,14 +2953,14 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
}
bool llvm::recursivelySimplifyInstruction(Instruction *I,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return replaceAndRecursivelySimplifyImpl(I, 0, TD, TLI, DT);
}
bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
diff --git a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
index 9140786..2b87d80 100644
--- a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -13,13 +13,14 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "lazy-value-info"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/LazyValueInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/ConstantRange.h"
@@ -212,7 +213,7 @@ public:
// Unless we can prove that the two Constants are different, we must
// move to overdefined.
- // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding.
+ // FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getConstant(),
@@ -238,7 +239,7 @@ public:
// Unless we can prove that the two Constants are different, we must
// move to overdefined.
- // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding.
+ // FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getNotConstant(),
@@ -294,7 +295,7 @@ raw_ostream &operator<<(raw_ostream &OS, const LVILatticeVal &Val) {
//===----------------------------------------------------------------------===//
namespace {
- /// LVIValueHandle - A callback value handle update the cache when
+ /// LVIValueHandle - A callback value handle updates the cache when
/// values are erased.
class LazyValueInfoCache;
struct LVIValueHandle : public CallbackVH {
@@ -470,8 +471,10 @@ bool LazyValueInfoCache::hasBlockValue(Value *Val, BasicBlock *BB) {
return true;
LVIValueHandle ValHandle(Val, this);
- if (!ValueCache.count(ValHandle)) return false;
- return ValueCache[ValHandle].count(BB);
+ std::map<LVIValueHandle, ValueCacheEntryTy>::iterator I =
+ ValueCache.find(ValHandle);
+ if (I == ValueCache.end()) return false;
+ return I->second.count(BB);
}
LVILatticeVal LazyValueInfoCache::getBlockValue(Value *Val, BasicBlock *BB) {
@@ -555,13 +558,11 @@ bool LazyValueInfoCache::solveBlockValue(Value *Val, BasicBlock *BB) {
static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
if (LoadInst *L = dyn_cast<LoadInst>(I)) {
return L->getPointerAddressSpace() == 0 &&
- GetUnderlyingObject(L->getPointerOperand()) ==
- GetUnderlyingObject(Ptr);
+ GetUnderlyingObject(L->getPointerOperand()) == Ptr;
}
if (StoreInst *S = dyn_cast<StoreInst>(I)) {
return S->getPointerAddressSpace() == 0 &&
- GetUnderlyingObject(S->getPointerOperand()) ==
- GetUnderlyingObject(Ptr);
+ GetUnderlyingObject(S->getPointerOperand()) == Ptr;
}
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
if (MI->isVolatile()) return false;
@@ -571,11 +572,11 @@ static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
if (!Len || Len->isZero()) return false;
if (MI->getDestAddressSpace() == 0)
- if (MI->getRawDest() == Ptr || MI->getDest() == Ptr)
+ if (GetUnderlyingObject(MI->getRawDest()) == Ptr)
return true;
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
if (MTI->getSourceAddressSpace() == 0)
- if (MTI->getRawSource() == Ptr || MTI->getSource() == Ptr)
+ if (GetUnderlyingObject(MTI->getRawSource()) == Ptr)
return true;
}
return false;
@@ -589,13 +590,19 @@ bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
// then we know that the pointer can't be NULL.
bool NotNull = false;
if (Val->getType()->isPointerTy()) {
- if (isa<AllocaInst>(Val)) {
+ if (isKnownNonNull(Val)) {
NotNull = true;
} else {
- for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();BI != BE;++BI){
- if (InstructionDereferencesPointer(BI, Val)) {
- NotNull = true;
- break;
+ Value *UnderlyingVal = GetUnderlyingObject(Val);
+ // If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
+ // inside InstructionDereferencesPointer either.
+ if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, NULL, 1)) {
+ for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
+ BI != BE; ++BI) {
+ if (InstructionDereferencesPointer(BI, UnderlyingVal)) {
+ NotNull = true;
+ break;
+ }
}
}
}
@@ -845,9 +852,12 @@ static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
i != e; ++i) {
ConstantRange EdgeVal(i.getCaseValue()->getValue());
- if (DefaultCase)
- EdgesVals = EdgesVals.difference(EdgeVal);
- else if (i.getCaseSuccessor() == BBTo)
+ if (DefaultCase) {
+ // It is possible that the default destination is the destination of
+ // some cases. There is no need to perform difference for those cases.
+ if (i.getCaseSuccessor() != BBTo)
+ EdgesVals = EdgesVals.difference(EdgeVal);
+ } else if (i.getCaseSuccessor() == BBTo)
EdgesVals = EdgesVals.unionWith(EdgeVal);
}
Result = LVILatticeVal::getRange(EdgesVals);
@@ -1004,7 +1014,7 @@ bool LazyValueInfo::runOnFunction(Function &F) {
if (PImpl)
getCache(PImpl).clear();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
// Fully lazy.
diff --git a/contrib/llvm/lib/Analysis/Lint.cpp b/contrib/llvm/lib/Analysis/Lint.cpp
index 83bdf52..6d6d580 100644
--- a/contrib/llvm/lib/Analysis/Lint.cpp
+++ b/contrib/llvm/lib/Analysis/Lint.cpp
@@ -43,7 +43,7 @@
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Pass.h"
#include "llvm/PassManager.h"
@@ -103,7 +103,7 @@ namespace {
Module *Mod;
AliasAnalysis *AA;
DominatorTree *DT;
- TargetData *TD;
+ DataLayout *TD;
TargetLibraryInfo *TLI;
std::string Messages;
@@ -177,7 +177,7 @@ bool Lint::runOnFunction(Function &F) {
Mod = F.getParent();
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTree>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
visit(F);
dbgs() << MessagesStr.str();
@@ -411,14 +411,50 @@ void Lint::visitMemoryReference(Instruction &I,
"Undefined behavior: Branch to non-blockaddress", &I);
}
+ // Check for buffer overflows and misalignment.
if (TD) {
- if (Align == 0 && Ty) Align = TD->getABITypeAlignment(Ty);
+ // Only handles memory references that read/write something simple like an
+ // alloca instruction or a global variable.
+ int64_t Offset = 0;
+ if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, *TD)) {
+ // OK, so the access is to a constant offset from Ptr. Check that Ptr is
+ // something we can handle and if so extract the size of this base object
+ // along with its alignment.
+ uint64_t BaseSize = AliasAnalysis::UnknownSize;
+ unsigned BaseAlign = 0;
+
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
+ Type *ATy = AI->getAllocatedType();
+ if (!AI->isArrayAllocation() && ATy->isSized())
+ BaseSize = TD->getTypeAllocSize(ATy);
+ BaseAlign = AI->getAlignment();
+ if (BaseAlign == 0 && ATy->isSized())
+ BaseAlign = TD->getABITypeAlignment(ATy);
+ } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
+ // If the global may be defined differently in another compilation unit
+ // then don't warn about funky memory accesses.
+ if (GV->hasDefinitiveInitializer()) {
+ Type *GTy = GV->getType()->getElementType();
+ if (GTy->isSized())
+ BaseSize = TD->getTypeAllocSize(GTy);
+ BaseAlign = GV->getAlignment();
+ if (BaseAlign == 0 && GTy->isSized())
+ BaseAlign = TD->getABITypeAlignment(GTy);
+ }
+ }
- if (Align != 0) {
- unsigned BitWidth = TD->getTypeSizeInBits(Ptr->getType());
- APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(Ptr, KnownZero, KnownOne, TD);
- Assert1(!(KnownOne & APInt::getLowBitsSet(BitWidth, Log2_32(Align))),
+ // Accesses from before the start or after the end of the object are not
+ // defined.
+ Assert1(Size == AliasAnalysis::UnknownSize ||
+ BaseSize == AliasAnalysis::UnknownSize ||
+ (Offset >= 0 && Offset + Size <= BaseSize),
+ "Undefined behavior: Buffer overflow", &I);
+
+ // Accesses that say that the memory is more aligned than it is are not
+ // defined.
+ if (Align == 0 && Ty && Ty->isSized())
+ Align = TD->getABITypeAlignment(Ty);
+ Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset),
"Undefined behavior: Memory reference address is misaligned", &I);
}
}
@@ -470,7 +506,7 @@ void Lint::visitShl(BinaryOperator &I) {
"Undefined result: Shift count out of range", &I);
}
-static bool isZero(Value *V, TargetData *TD) {
+static bool isZero(Value *V, DataLayout *TD) {
// Assume undef could be zero.
if (isa<UndefValue>(V)) return true;
diff --git a/contrib/llvm/lib/Analysis/Loads.cpp b/contrib/llvm/lib/Analysis/Loads.cpp
index 873a275..73aa8b4 100644
--- a/contrib/llvm/lib/Analysis/Loads.cpp
+++ b/contrib/llvm/lib/Analysis/Loads.cpp
@@ -13,7 +13,7 @@
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/GlobalAlias.h"
#include "llvm/GlobalVariable.h"
#include "llvm/IntrinsicInst.h"
@@ -52,8 +52,8 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
/// bitcasts to get back to the underlying object being addressed, keeping
/// track of the offset in bytes from the GEPs relative to the result.
/// This is closely related to GetUnderlyingObject but is located
-/// here to avoid making VMCore depend on TargetData.
-static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD,
+/// here to avoid making VMCore depend on DataLayout.
+static Value *getUnderlyingObjectWithOffset(Value *V, const DataLayout *TD,
uint64_t &ByteOffset,
unsigned MaxLookup = 6) {
if (!V->getType()->isPointerTy())
@@ -85,7 +85,7 @@ static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD,
/// specified pointer, we do a quick local scan of the basic block containing
/// ScanFrom, to determine if the address is already accessed.
bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
- unsigned Align, const TargetData *TD) {
+ unsigned Align, const DataLayout *TD) {
uint64_t ByteOffset = 0;
Value *Base = V;
if (TD)
diff --git a/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
deleted file mode 100644
index 463269d..0000000
--- a/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
+++ /dev/null
@@ -1,362 +0,0 @@
-//===- LoopDependenceAnalysis.cpp - LDA Implementation ----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This is the (beginning) of an implementation of a loop dependence analysis
-// framework, which is used to detect dependences in memory accesses in loops.
-//
-// Please note that this is work in progress and the interface is subject to
-// change.
-//
-// TODO: adapt as implementation progresses.
-//
-// TODO: document lingo (pair, subscript, index)
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "lda"
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/LoopDependenceAnalysis.h"
-#include "llvm/Analysis/LoopPass.h"
-#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpressions.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Assembly/Writer.h"
-#include "llvm/Instructions.h"
-#include "llvm/Operator.h"
-#include "llvm/Support/Allocator.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
-using namespace llvm;
-
-STATISTIC(NumAnswered, "Number of dependence queries answered");
-STATISTIC(NumAnalysed, "Number of distinct dependence pairs analysed");
-STATISTIC(NumDependent, "Number of pairs with dependent accesses");
-STATISTIC(NumIndependent, "Number of pairs with independent accesses");
-STATISTIC(NumUnknown, "Number of pairs with unknown accesses");
-
-LoopPass *llvm::createLoopDependenceAnalysisPass() {
- return new LoopDependenceAnalysis();
-}
-
-INITIALIZE_PASS_BEGIN(LoopDependenceAnalysis, "lda",
- "Loop Dependence Analysis", false, true)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
-INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
-INITIALIZE_PASS_END(LoopDependenceAnalysis, "lda",
- "Loop Dependence Analysis", false, true)
-char LoopDependenceAnalysis::ID = 0;
-
-//===----------------------------------------------------------------------===//
-// Utility Functions
-//===----------------------------------------------------------------------===//
-
-static inline bool IsMemRefInstr(const Value *V) {
- const Instruction *I = dyn_cast<const Instruction>(V);
- return I && (I->mayReadFromMemory() || I->mayWriteToMemory());
-}
-
-static void GetMemRefInstrs(const Loop *L,
- SmallVectorImpl<Instruction*> &Memrefs) {
- for (Loop::block_iterator b = L->block_begin(), be = L->block_end();
- b != be; ++b)
- for (BasicBlock::iterator i = (*b)->begin(), ie = (*b)->end();
- i != ie; ++i)
- if (IsMemRefInstr(i))
- Memrefs.push_back(i);
-}
-
-static bool IsLoadOrStoreInst(Value *I) {
- // Returns true if the load or store can be analyzed. Atomic and volatile
- // operations have properties which this analysis does not understand.
- if (LoadInst *LI = dyn_cast<LoadInst>(I))
- return LI->isUnordered();
- else if (StoreInst *SI = dyn_cast<StoreInst>(I))
- return SI->isUnordered();
- return false;
-}
-
-static Value *GetPointerOperand(Value *I) {
- if (LoadInst *i = dyn_cast<LoadInst>(I))
- return i->getPointerOperand();
- if (StoreInst *i = dyn_cast<StoreInst>(I))
- return i->getPointerOperand();
- llvm_unreachable("Value is no load or store instruction!");
-}
-
-static AliasAnalysis::AliasResult UnderlyingObjectsAlias(AliasAnalysis *AA,
- const Value *A,
- const Value *B) {
- const Value *aObj = GetUnderlyingObject(A);
- const Value *bObj = GetUnderlyingObject(B);
- return AA->alias(aObj, AA->getTypeStoreSize(aObj->getType()),
- bObj, AA->getTypeStoreSize(bObj->getType()));
-}
-
-static inline const SCEV *GetZeroSCEV(ScalarEvolution *SE) {
- return SE->getConstant(Type::getInt32Ty(SE->getContext()), 0L);
-}
-
-//===----------------------------------------------------------------------===//
-// Dependence Testing
-//===----------------------------------------------------------------------===//
-
-bool LoopDependenceAnalysis::isDependencePair(const Value *A,
- const Value *B) const {
- return IsMemRefInstr(A) &&
- IsMemRefInstr(B) &&
- (cast<const Instruction>(A)->mayWriteToMemory() ||
- cast<const Instruction>(B)->mayWriteToMemory());
-}
-
-bool LoopDependenceAnalysis::findOrInsertDependencePair(Value *A,
- Value *B,
- DependencePair *&P) {
- void *insertPos = 0;
- FoldingSetNodeID id;
- id.AddPointer(A);
- id.AddPointer(B);
-
- P = Pairs.FindNodeOrInsertPos(id, insertPos);
- if (P) return true;
-
- P = new (PairAllocator) DependencePair(id, A, B);
- Pairs.InsertNode(P, insertPos);
- return false;
-}
-
-void LoopDependenceAnalysis::getLoops(const SCEV *S,
- DenseSet<const Loop*>* Loops) const {
- // Refactor this into an SCEVVisitor, if efficiency becomes a concern.
- for (const Loop *L = this->L; L != 0; L = L->getParentLoop())
- if (!SE->isLoopInvariant(S, L))
- Loops->insert(L);
-}
-
-bool LoopDependenceAnalysis::isLoopInvariant(const SCEV *S) const {
- DenseSet<const Loop*> loops;
- getLoops(S, &loops);
- return loops.empty();
-}
-
-bool LoopDependenceAnalysis::isAffine(const SCEV *S) const {
- const SCEVAddRecExpr *rec = dyn_cast<SCEVAddRecExpr>(S);
- return isLoopInvariant(S) || (rec && rec->isAffine());
-}
-
-bool LoopDependenceAnalysis::isZIVPair(const SCEV *A, const SCEV *B) const {
- return isLoopInvariant(A) && isLoopInvariant(B);
-}
-
-bool LoopDependenceAnalysis::isSIVPair(const SCEV *A, const SCEV *B) const {
- DenseSet<const Loop*> loops;
- getLoops(A, &loops);
- getLoops(B, &loops);
- return loops.size() == 1;
-}
-
-LoopDependenceAnalysis::DependenceResult
-LoopDependenceAnalysis::analyseZIV(const SCEV *A,
- const SCEV *B,
- Subscript *S) const {
- assert(isZIVPair(A, B) && "Attempted to ZIV-test non-ZIV SCEVs!");
- return A == B ? Dependent : Independent;
-}
-
-LoopDependenceAnalysis::DependenceResult
-LoopDependenceAnalysis::analyseSIV(const SCEV *A,
- const SCEV *B,
- Subscript *S) const {
- return Unknown; // TODO: Implement.
-}
-
-LoopDependenceAnalysis::DependenceResult
-LoopDependenceAnalysis::analyseMIV(const SCEV *A,
- const SCEV *B,
- Subscript *S) const {
- return Unknown; // TODO: Implement.
-}
-
-LoopDependenceAnalysis::DependenceResult
-LoopDependenceAnalysis::analyseSubscript(const SCEV *A,
- const SCEV *B,
- Subscript *S) const {
- DEBUG(dbgs() << " Testing subscript: " << *A << ", " << *B << "\n");
-
- if (A == B) {
- DEBUG(dbgs() << " -> [D] same SCEV\n");
- return Dependent;
- }
-
- if (!isAffine(A) || !isAffine(B)) {
- DEBUG(dbgs() << " -> [?] not affine\n");
- return Unknown;
- }
-
- if (isZIVPair(A, B))
- return analyseZIV(A, B, S);
-
- if (isSIVPair(A, B))
- return analyseSIV(A, B, S);
-
- return analyseMIV(A, B, S);
-}
-
-LoopDependenceAnalysis::DependenceResult
-LoopDependenceAnalysis::analysePair(DependencePair *P) const {
- DEBUG(dbgs() << "Analysing:\n" << *P->A << "\n" << *P->B << "\n");
-
- // We only analyse loads and stores but no possible memory accesses by e.g.
- // free, call, or invoke instructions.
- if (!IsLoadOrStoreInst(P->A) || !IsLoadOrStoreInst(P->B)) {
- DEBUG(dbgs() << "--> [?] no load/store\n");
- return Unknown;
- }
-
- Value *aPtr = GetPointerOperand(P->A);
- Value *bPtr = GetPointerOperand(P->B);
-
- switch (UnderlyingObjectsAlias(AA, aPtr, bPtr)) {
- case AliasAnalysis::MayAlias:
- case AliasAnalysis::PartialAlias:
- // We can not analyse objects if we do not know about their aliasing.
- DEBUG(dbgs() << "---> [?] may alias\n");
- return Unknown;
-
- case AliasAnalysis::NoAlias:
- // If the objects noalias, they are distinct, accesses are independent.
- DEBUG(dbgs() << "---> [I] no alias\n");
- return Independent;
-
- case AliasAnalysis::MustAlias:
- break; // The underlying objects alias, test accesses for dependence.
- }
-
- const GEPOperator *aGEP = dyn_cast<GEPOperator>(aPtr);
- const GEPOperator *bGEP = dyn_cast<GEPOperator>(bPtr);
-
- if (!aGEP || !bGEP)
- return Unknown;
-
- // FIXME: Is filtering coupled subscripts necessary?
-
- // Collect GEP operand pairs (FIXME: use GetGEPOperands from BasicAA), adding
- // trailing zeroes to the smaller GEP, if needed.
- typedef SmallVector<std::pair<const SCEV*, const SCEV*>, 4> GEPOpdPairsTy;
- GEPOpdPairsTy opds;
- for(GEPOperator::const_op_iterator aIdx = aGEP->idx_begin(),
- aEnd = aGEP->idx_end(),
- bIdx = bGEP->idx_begin(),
- bEnd = bGEP->idx_end();
- aIdx != aEnd && bIdx != bEnd;
- aIdx += (aIdx != aEnd), bIdx += (bIdx != bEnd)) {
- const SCEV* aSCEV = (aIdx != aEnd) ? SE->getSCEV(*aIdx) : GetZeroSCEV(SE);
- const SCEV* bSCEV = (bIdx != bEnd) ? SE->getSCEV(*bIdx) : GetZeroSCEV(SE);
- opds.push_back(std::make_pair(aSCEV, bSCEV));
- }
-
- if (!opds.empty() && opds[0].first != opds[0].second) {
- // We cannot (yet) handle arbitrary GEP pointer offsets. By limiting
- //
- // TODO: this could be relaxed by adding the size of the underlying object
- // to the first subscript. If we have e.g. (GEP x,0,i; GEP x,2,-i) and we
- // know that x is a [100 x i8]*, we could modify the first subscript to be
- // (i, 200-i) instead of (i, -i).
- return Unknown;
- }
-
- // Now analyse the collected operand pairs (skipping the GEP ptr offsets).
- for (GEPOpdPairsTy::const_iterator i = opds.begin() + 1, end = opds.end();
- i != end; ++i) {
- Subscript subscript;
- DependenceResult result = analyseSubscript(i->first, i->second, &subscript);
- if (result != Dependent) {
- // We either proved independence or failed to analyse this subscript.
- // Further subscripts will not improve the situation, so abort early.
- return result;
- }
- P->Subscripts.push_back(subscript);
- }
- // We successfully analysed all subscripts but failed to prove independence.
- return Dependent;
-}
-
-bool LoopDependenceAnalysis::depends(Value *A, Value *B) {
- assert(isDependencePair(A, B) && "Values form no dependence pair!");
- ++NumAnswered;
-
- DependencePair *p;
- if (!findOrInsertDependencePair(A, B, p)) {
- // The pair is not cached, so analyse it.
- ++NumAnalysed;
- switch (p->Result = analysePair(p)) {
- case Dependent: ++NumDependent; break;
- case Independent: ++NumIndependent; break;
- case Unknown: ++NumUnknown; break;
- }
- }
- return p->Result != Independent;
-}
-
-//===----------------------------------------------------------------------===//
-// LoopDependenceAnalysis Implementation
-//===----------------------------------------------------------------------===//
-
-bool LoopDependenceAnalysis::runOnLoop(Loop *L, LPPassManager &) {
- this->L = L;
- AA = &getAnalysis<AliasAnalysis>();
- SE = &getAnalysis<ScalarEvolution>();
- return false;
-}
-
-void LoopDependenceAnalysis::releaseMemory() {
- Pairs.clear();
- PairAllocator.Reset();
-}
-
-void LoopDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- AU.addRequiredTransitive<AliasAnalysis>();
- AU.addRequiredTransitive<ScalarEvolution>();
-}
-
-static void PrintLoopInfo(raw_ostream &OS,
- LoopDependenceAnalysis *LDA, const Loop *L) {
- if (!L->empty()) return; // ignore non-innermost loops
-
- SmallVector<Instruction*, 8> memrefs;
- GetMemRefInstrs(L, memrefs);
-
- OS << "Loop at depth " << L->getLoopDepth() << ", header block: ";
- WriteAsOperand(OS, L->getHeader(), false);
- OS << "\n";
-
- OS << " Load/store instructions: " << memrefs.size() << "\n";
- for (SmallVector<Instruction*, 8>::const_iterator x = memrefs.begin(),
- end = memrefs.end(); x != end; ++x)
- OS << "\t" << (x - memrefs.begin()) << ": " << **x << "\n";
-
- OS << " Pairwise dependence results:\n";
- for (SmallVector<Instruction*, 8>::const_iterator x = memrefs.begin(),
- end = memrefs.end(); x != end; ++x)
- for (SmallVector<Instruction*, 8>::const_iterator y = x + 1;
- y != end; ++y)
- if (LDA->isDependencePair(*x, *y))
- OS << "\t" << (x - memrefs.begin()) << "," << (y - memrefs.begin())
- << ": " << (LDA->depends(*x, *y) ? "dependent" : "independent")
- << "\n";
-}
-
-void LoopDependenceAnalysis::print(raw_ostream &OS, const Module*) const {
- // TODO: doc why const_cast is safe
- PrintLoopInfo(OS, const_cast<LoopDependenceAnalysis*>(this), this->L);
-}
diff --git a/contrib/llvm/lib/Analysis/LoopInfo.cpp b/contrib/llvm/lib/Analysis/LoopInfo.cpp
index 20c33a3..8341f9d 100644
--- a/contrib/llvm/lib/Analysis/LoopInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LoopInfo.cpp
@@ -306,9 +306,11 @@ BasicBlock *Loop::getUniqueExitBlock() const {
return 0;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void Loop::dump() const {
print(dbgs());
}
+#endif
//===----------------------------------------------------------------------===//
// UnloopUpdater implementation
@@ -429,8 +431,8 @@ void UnloopUpdater::updateSubloopParents() {
Unloop->removeChildLoop(llvm::prior(Unloop->end()));
assert(SubloopParents.count(Subloop) && "DFS failed to visit subloop");
- if (SubloopParents[Subloop])
- SubloopParents[Subloop]->addChildLoop(Subloop);
+ if (Loop *Parent = SubloopParents[Subloop])
+ Parent->addChildLoop(Subloop);
else
LI->addTopLevelLoop(Subloop);
}
@@ -456,9 +458,8 @@ Loop *UnloopUpdater::getNearestLoop(BasicBlock *BB, Loop *BBLoop) {
assert(Subloop && "subloop is not an ancestor of the original loop");
}
// Get the current nearest parent of the Subloop exits, initially Unloop.
- if (!SubloopParents.count(Subloop))
- SubloopParents[Subloop] = Unloop;
- NearLoop = SubloopParents[Subloop];
+ NearLoop =
+ SubloopParents.insert(std::make_pair(Subloop, Unloop)).first->second;
}
succ_iterator I = succ_begin(BB), E = succ_end(BB);
diff --git a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
index e77d2ff..0a539fe 100644
--- a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -25,7 +25,8 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@@ -39,7 +40,7 @@ enum AllocType {
};
struct AllocFnsTy {
- const char *Name;
+ LibFunc::Func Func;
AllocType AllocTy;
unsigned char NumParams;
// First and Second size parameters (or -1 if unused)
@@ -49,22 +50,22 @@ struct AllocFnsTy {
// FIXME: certain users need more information. E.g., SimplifyLibCalls needs to
// know which functions are nounwind, noalias, nocapture parameters, etc.
static const AllocFnsTy AllocationFnData[] = {
- {"malloc", MallocLike, 1, 0, -1},
- {"valloc", MallocLike, 1, 0, -1},
- {"_Znwj", MallocLike, 1, 0, -1}, // new(unsigned int)
- {"_ZnwjRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new(unsigned int, nothrow)
- {"_Znwm", MallocLike, 1, 0, -1}, // new(unsigned long)
- {"_ZnwmRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new(unsigned long, nothrow)
- {"_Znaj", MallocLike, 1, 0, -1}, // new[](unsigned int)
- {"_ZnajRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new[](unsigned int, nothrow)
- {"_Znam", MallocLike, 1, 0, -1}, // new[](unsigned long)
- {"_ZnamRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new[](unsigned long, nothrow)
- {"posix_memalign", MallocLike, 3, 2, -1},
- {"calloc", CallocLike, 2, 0, 1},
- {"realloc", ReallocLike, 2, 1, -1},
- {"reallocf", ReallocLike, 2, 1, -1},
- {"strdup", StrDupLike, 1, -1, -1},
- {"strndup", StrDupLike, 2, 1, -1}
+ {LibFunc::malloc, MallocLike, 1, 0, -1},
+ {LibFunc::valloc, MallocLike, 1, 0, -1},
+ {LibFunc::Znwj, MallocLike, 1, 0, -1}, // new(unsigned int)
+ {LibFunc::ZnwjRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new(unsigned int, nothrow)
+ {LibFunc::Znwm, MallocLike, 1, 0, -1}, // new(unsigned long)
+ {LibFunc::ZnwmRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new(unsigned long, nothrow)
+ {LibFunc::Znaj, MallocLike, 1, 0, -1}, // new[](unsigned int)
+ {LibFunc::ZnajRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new[](unsigned int, nothrow)
+ {LibFunc::Znam, MallocLike, 1, 0, -1}, // new[](unsigned long)
+ {LibFunc::ZnamRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new[](unsigned long, nothrow)
+ {LibFunc::posix_memalign, MallocLike, 3, 2, -1},
+ {LibFunc::calloc, CallocLike, 2, 0, 1},
+ {LibFunc::realloc, ReallocLike, 2, 1, -1},
+ {LibFunc::reallocf, ReallocLike, 2, 1, -1},
+ {LibFunc::strdup, StrDupLike, 1, -1, -1},
+ {LibFunc::strndup, StrDupLike, 2, 1, -1}
};
@@ -85,15 +86,22 @@ static Function *getCalledFunction(const Value *V, bool LookThroughBitCast) {
/// \brief Returns the allocation data for the given value if it is a call to a
/// known allocation function, and NULL otherwise.
static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
+ const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false) {
Function *Callee = getCalledFunction(V, LookThroughBitCast);
if (!Callee)
return 0;
+ // Make sure that the function is available.
+ StringRef FnName = Callee->getName();
+ LibFunc::Func TLIFn;
+ if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
+ return 0;
+
unsigned i = 0;
bool found = false;
for ( ; i < array_lengthof(AllocationFnData); ++i) {
- if (Callee->getName() == AllocationFnData[i].Name) {
+ if (AllocationFnData[i].Func == TLIFn) {
found = true;
break;
}
@@ -106,7 +114,6 @@ static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
return 0;
// Check function prototype.
- // FIXME: Check the nobuiltin metadata?? (PR5130)
int FstParam = FnData->FstParam;
int SndParam = FnData->SndParam;
FunctionType *FTy = Callee->getFunctionType();
@@ -125,64 +132,72 @@ static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) {
ImmutableCallSite CS(LookThroughBitCast ? V->stripPointerCasts() : V);
- return CS && CS.hasFnAttr(Attribute::NoAlias);
+ return CS && CS.hasFnAttr(Attributes::NoAlias);
}
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like).
-bool llvm::isAllocationFn(const Value *V, bool LookThroughBitCast) {
- return getAllocationData(V, AnyAlloc, LookThroughBitCast);
+bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, AnyAlloc, TLI, LookThroughBitCast);
}
/// \brief Tests if a value is a call or invoke to a function that returns a
/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
-bool llvm::isNoAliasFn(const Value *V, bool LookThroughBitCast) {
+bool llvm::isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
// it's safe to consider realloc as noalias since accessing the original
// pointer is undefined behavior
- return isAllocationFn(V, LookThroughBitCast) ||
+ return isAllocationFn(V, TLI, LookThroughBitCast) ||
hasNoAliasAttr(V, LookThroughBitCast);
}
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory (such as malloc).
-bool llvm::isMallocLikeFn(const Value *V, bool LookThroughBitCast) {
- return getAllocationData(V, MallocLike, LookThroughBitCast);
+bool llvm::isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, MallocLike, TLI, LookThroughBitCast);
}
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates zero-filled memory (such as calloc).
-bool llvm::isCallocLikeFn(const Value *V, bool LookThroughBitCast) {
- return getAllocationData(V, CallocLike, LookThroughBitCast);
+bool llvm::isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, CallocLike, TLI, LookThroughBitCast);
}
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
-bool llvm::isAllocLikeFn(const Value *V, bool LookThroughBitCast) {
- return getAllocationData(V, AllocLike, LookThroughBitCast);
+bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, AllocLike, TLI, LookThroughBitCast);
}
/// \brief Tests if a value is a call or invoke to a library function that
/// reallocates memory (such as realloc).
-bool llvm::isReallocLikeFn(const Value *V, bool LookThroughBitCast) {
- return getAllocationData(V, ReallocLike, LookThroughBitCast);
+bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, ReallocLike, TLI, LookThroughBitCast);
}
/// extractMallocCall - Returns the corresponding CallInst if the instruction
/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
/// ignore InvokeInst here.
-const CallInst *llvm::extractMallocCall(const Value *I) {
- return isMallocLikeFn(I) ? dyn_cast<CallInst>(I) : 0;
+const CallInst *llvm::extractMallocCall(const Value *I,
+ const TargetLibraryInfo *TLI) {
+ return isMallocLikeFn(I, TLI) ? dyn_cast<CallInst>(I) : 0;
}
-static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
+static Value *computeArraySize(const CallInst *CI, const DataLayout *TD,
+ const TargetLibraryInfo *TLI,
bool LookThroughSExt = false) {
if (!CI)
return NULL;
// The size of the malloc's result type must be known to determine array size.
- Type *T = getMallocAllocatedType(CI);
+ Type *T = getMallocAllocatedType(CI, TLI);
if (!T || !T->isSized() || !TD)
return NULL;
@@ -204,9 +219,11 @@ static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
/// isArrayMalloc - Returns the corresponding CallInst if the instruction
/// is a call to malloc whose array size can be determined and the array size
/// is not constant 1. Otherwise, return NULL.
-const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) {
- const CallInst *CI = extractMallocCall(I);
- Value *ArraySize = computeArraySize(CI, TD);
+const CallInst *llvm::isArrayMalloc(const Value *I,
+ const DataLayout *TD,
+ const TargetLibraryInfo *TLI) {
+ const CallInst *CI = extractMallocCall(I, TLI);
+ Value *ArraySize = computeArraySize(CI, TD, TLI);
if (ArraySize &&
ArraySize != ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
@@ -221,8 +238,9 @@ const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) {
/// 0: PointerType is the calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
-PointerType *llvm::getMallocType(const CallInst *CI) {
- assert(isMallocLikeFn(CI) && "getMallocType and not malloc call");
+PointerType *llvm::getMallocType(const CallInst *CI,
+ const TargetLibraryInfo *TLI) {
+ assert(isMallocLikeFn(CI, TLI) && "getMallocType and not malloc call");
PointerType *MallocType = NULL;
unsigned NumOfBitCastUses = 0;
@@ -252,8 +270,9 @@ PointerType *llvm::getMallocType(const CallInst *CI) {
/// 0: PointerType is the malloc calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
-Type *llvm::getMallocAllocatedType(const CallInst *CI) {
- PointerType *PT = getMallocType(CI);
+Type *llvm::getMallocAllocatedType(const CallInst *CI,
+ const TargetLibraryInfo *TLI) {
+ PointerType *PT = getMallocType(CI, TLI);
return PT ? PT->getElementType() : NULL;
}
@@ -262,22 +281,24 @@ Type *llvm::getMallocAllocatedType(const CallInst *CI) {
/// then return that multiple. For non-array mallocs, the multiple is
/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
/// determined.
-Value *llvm::getMallocArraySize(CallInst *CI, const TargetData *TD,
+Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout *TD,
+ const TargetLibraryInfo *TLI,
bool LookThroughSExt) {
- assert(isMallocLikeFn(CI) && "getMallocArraySize and not malloc call");
- return computeArraySize(CI, TD, LookThroughSExt);
+ assert(isMallocLikeFn(CI, TLI) && "getMallocArraySize and not malloc call");
+ return computeArraySize(CI, TD, TLI, LookThroughSExt);
}
/// extractCallocCall - Returns the corresponding CallInst if the instruction
/// is a calloc call.
-const CallInst *llvm::extractCallocCall(const Value *I) {
- return isCallocLikeFn(I) ? cast<CallInst>(I) : 0;
+const CallInst *llvm::extractCallocCall(const Value *I,
+ const TargetLibraryInfo *TLI) {
+ return isCallocLikeFn(I, TLI) ? cast<CallInst>(I) : 0;
}
/// isFreeCall - Returns non-null if the value is a call to the builtin free()
-const CallInst *llvm::isFreeCall(const Value *I) {
+const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) {
const CallInst *CI = dyn_cast<CallInst>(I);
if (!CI)
return 0;
@@ -285,9 +306,14 @@ const CallInst *llvm::isFreeCall(const Value *I) {
if (Callee == 0 || !Callee->isDeclaration())
return 0;
- if (Callee->getName() != "free" &&
- Callee->getName() != "_ZdlPv" && // operator delete(void*)
- Callee->getName() != "_ZdaPv") // operator delete[](void*)
+ StringRef FnName = Callee->getName();
+ LibFunc::Func TLIFn;
+ if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
+ return 0;
+
+ if (TLIFn != LibFunc::free &&
+ TLIFn != LibFunc::ZdlPv && // operator delete(void*)
+ TLIFn != LibFunc::ZdaPv) // operator delete[](void*)
return 0;
// Check free prototype.
@@ -315,12 +341,12 @@ const CallInst *llvm::isFreeCall(const Value *I) {
/// object size in Size if successful, and false otherwise.
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables.
-bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD,
- bool RoundToAlign) {
+bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *TD,
+ const TargetLibraryInfo *TLI, bool RoundToAlign) {
if (!TD)
return false;
- ObjectSizeOffsetVisitor Visitor(TD, Ptr->getContext(), RoundToAlign);
+ ObjectSizeOffsetVisitor Visitor(TD, TLI, Ptr->getContext(), RoundToAlign);
SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr));
if (!Visitor.bothKnown(Data))
return false;
@@ -347,10 +373,11 @@ APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
return Size;
}
-ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const TargetData *TD,
+ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *TD,
+ const TargetLibraryInfo *TLI,
LLVMContext &Context,
bool RoundToAlign)
-: TD(TD), RoundToAlign(RoundToAlign) {
+: TD(TD), TLI(TLI), RoundToAlign(RoundToAlign) {
IntegerType *IntTy = TD->getIntPtrType(Context);
IntTyBits = IntTy->getBitWidth();
Zero = APInt::getNullValue(IntTyBits);
@@ -358,11 +385,16 @@ ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const TargetData *TD,
SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
V = V->stripPointerCasts();
+ if (Instruction *I = dyn_cast<Instruction>(V)) {
+ // If we have already seen this instruction, bail out. Cycles can happen in
+ // unreachable code after constant propagation.
+ if (!SeenInsts.insert(I))
+ return unknown();
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
- return visitGEPOperator(*GEP);
- if (Instruction *I = dyn_cast<Instruction>(V))
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
+ return visitGEPOperator(*GEP);
return visit(*I);
+ }
if (Argument *A = dyn_cast<Argument>(V))
return visitArgument(*A);
if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V))
@@ -371,9 +403,12 @@ SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
return visitGlobalVariable(*GV);
if (UndefValue *UV = dyn_cast<UndefValue>(V))
return visitUndefValue(*UV);
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
if (CE->getOpcode() == Instruction::IntToPtr)
return unknown(); // clueless
+ if (CE->getOpcode() == Instruction::GetElementPtr)
+ return visitGEPOperator(cast<GEPOperator>(*CE));
+ }
DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " << *V
<< '\n');
@@ -408,7 +443,8 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
}
SizeOffsetType ObjectSizeOffsetVisitor::visitCallSite(CallSite CS) {
- const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc);
+ const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc,
+ TLI);
if (!FnData)
return unknown();
@@ -473,10 +509,6 @@ ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) {
}
SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) {
- // Ignore self-referencing GEPs, they can occur in unreachable code.
- if (&GEP == GEP.getPointerOperand())
- return unknown();
-
SizeOffsetType PtrData = compute(GEP.getPointerOperand());
if (!bothKnown(PtrData) || !GEP.hasAllConstantIndices())
return unknown();
@@ -510,10 +542,6 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode&) {
}
SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) {
- // ignore malformed self-looping selects
- if (I.getTrueValue() == &I || I.getFalseValue() == &I)
- return unknown();
-
SizeOffsetType TrueSide = compute(I.getTrueValue());
SizeOffsetType FalseSide = compute(I.getFalseValue());
if (bothKnown(TrueSide) && bothKnown(FalseSide) && TrueSide == FalseSide)
@@ -531,10 +559,10 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) {
}
-ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const TargetData *TD,
+ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const DataLayout *TD,
+ const TargetLibraryInfo *TLI,
LLVMContext &Context)
-: TD(TD), Context(Context), Builder(Context, TargetFolder(TD)),
-Visitor(TD, Context) {
+: TD(TD), TLI(TLI), Context(Context), Builder(Context, TargetFolder(TD)) {
IntTy = TD->getIntPtrType(Context);
Zero = ConstantInt::get(IntTy, 0);
}
@@ -559,6 +587,7 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) {
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) {
+ ObjectSizeOffsetVisitor Visitor(TD, TLI, Context);
SizeOffsetType Const = Visitor.compute(V);
if (Visitor.bothKnown(Const))
return std::make_pair(ConstantInt::get(Context, Const.first),
@@ -621,7 +650,8 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) {
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallSite(CallSite CS) {
- const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc);
+ const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc,
+ TLI);
if (!FnData)
return unknown();
@@ -719,10 +749,6 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) {
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) {
- // ignore malformed self-looping selects
- if (I.getTrueValue() == &I || I.getFalseValue() == &I)
- return unknown();
-
SizeOffsetEvalType TrueSide = compute_(I.getTrueValue());
SizeOffsetEvalType FalseSide = compute_(I.getFalseValue());
diff --git a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 059e574..9872890 100644
--- a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -30,7 +30,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/PredIteratorCache.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
@@ -89,7 +89,7 @@ void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
bool MemoryDependenceAnalysis::runOnFunction(Function &) {
AA = &getAnalysis<AliasAnalysis>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
DT = getAnalysisIfAvailable<DominatorTree>();
if (PredCache == 0)
PredCache.reset(new PredIteratorCache());
@@ -148,7 +148,7 @@ AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
return AliasAnalysis::ModRef;
}
- if (const CallInst *CI = isFreeCall(Inst)) {
+ if (const CallInst *CI = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
// calls to free() deallocate the entire structure
Loc = AliasAnalysis::Location(CI->getArgOperand(0));
return AliasAnalysis::Mod;
@@ -256,7 +256,7 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
const Value *&MemLocBase,
int64_t &MemLocOffs,
const LoadInst *LI,
- const TargetData *TD) {
+ const DataLayout *TD) {
// If we have no target data, we can't do this.
if (TD == 0) return false;
@@ -280,7 +280,7 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
unsigned MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
unsigned MemLocSize, const LoadInst *LI,
- const TargetData &TD) {
+ const DataLayout &TD) {
// We can only extend simple integer loads.
if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
@@ -327,12 +327,12 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
return 0;
if (LIOffs+NewLoadByteSize > MemLocEnd &&
- LI->getParent()->getParent()->hasFnAttr(Attribute::AddressSafety)) {
+ LI->getParent()->getParent()->getFnAttributes().
+ hasAttribute(Attributes::AddressSafety))
// We will be reading past the location accessed by the original program.
// While this is safe in a regular build, Address Safety analysis tools
// may start reporting false warnings. So, don't do widening.
return 0;
- }
// If a load of this width would include all of MemLoc, then we succeed.
if (LIOffs+NewLoadByteSize >= MemLocEnd)
@@ -479,12 +479,20 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// a subsequent bitcast of the malloc call result. There can be stores to
// the malloced memory between the malloc call and its bitcast uses, and we
// need to continue scanning until the malloc call.
- if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst)) {
+ const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo();
+ if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) {
const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD);
if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
return MemDepResult::getDef(Inst);
- continue;
+ // Be conservative if the accessed pointer may alias the allocation.
+ if (AA->alias(Inst, AccessPtr) != AliasAnalysis::NoAlias)
+ return MemDepResult::getClobber(Inst);
+ // If the allocation is not aliased and does not read memory (like
+ // strdup), it is safe to ignore.
+ if (isa<AllocaInst>(Inst) ||
+ isMallocLikeFn(Inst, TLI) || isCallocLikeFn(Inst, TLI))
+ continue;
}
// See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
@@ -975,7 +983,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
I != E; ++I) {
Visited.insert(std::make_pair(I->getBB(), Addr));
- if (!I->getResult().isNonLocal())
+ if (!I->getResult().isNonLocal() && DT->isReachableFromEntry(I->getBB()))
Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr));
}
++NumCacheCompleteNonLocalPtr;
@@ -1021,7 +1029,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
NumSortedEntries);
// If we got a Def or Clobber, add this to the list of results.
- if (!Dep.isNonLocal()) {
+ if (!Dep.isNonLocal() && DT->isReachableFromEntry(BB)) {
Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
continue;
}
diff --git a/contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp
index 101c2d5..2eb4137 100644
--- a/contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp
@@ -15,7 +15,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
namespace {
@@ -36,7 +36,7 @@ namespace {
virtual void initializePass() {
// Note: NoAA does not call InitializeAliasAnalysis because it's
// special and does not support chaining.
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
}
virtual AliasResult alias(const Location &LocA, const Location &LocB) {
diff --git a/contrib/llvm/lib/Analysis/PHITransAddr.cpp b/contrib/llvm/lib/Analysis/PHITransAddr.cpp
index 38cb1c9..c35737e 100644
--- a/contrib/llvm/lib/Analysis/PHITransAddr.cpp
+++ b/contrib/llvm/lib/Analysis/PHITransAddr.cpp
@@ -41,6 +41,7 @@ static bool CanPHITrans(Instruction *Inst) {
return false;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void PHITransAddr::dump() const {
if (Addr == 0) {
dbgs() << "PHITransAddr: null\n";
@@ -50,6 +51,7 @@ void PHITransAddr::dump() const {
for (unsigned i = 0, e = InstInputs.size(); i != e; ++i)
dbgs() << " Input #" << i << " is " << *InstInputs[i] << "\n";
}
+#endif
static bool VerifySubExpr(Value *Expr,
diff --git a/contrib/llvm/lib/Analysis/ProfileDataLoader.cpp b/contrib/llvm/lib/Analysis/ProfileDataLoader.cpp
new file mode 100644
index 0000000..a4f634a
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/ProfileDataLoader.cpp
@@ -0,0 +1,155 @@
+//===- ProfileDataLoader.cpp - Load profile information from disk ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The ProfileDataLoader class is used to load raw profiling data from the dump
+// file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Module.h"
+#include "llvm/InstrTypes.h"
+#include "llvm/Analysis/ProfileDataLoader.h"
+#include "llvm/Analysis/ProfileDataTypes.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+#include <cstdio>
+#include <cstdlib>
+using namespace llvm;
+
+raw_ostream &llvm::operator<<(raw_ostream &O, std::pair<const BasicBlock *,
+ const BasicBlock *> E) {
+ O << "(";
+
+ if (E.first)
+ O << E.first->getName();
+ else
+ O << "0";
+
+ O << ",";
+
+ if (E.second)
+ O << E.second->getName();
+ else
+ O << "0";
+
+ return O << ")";
+}
+
+/// AddCounts - Add 'A' and 'B', accounting for the fact that the value of one
+/// (or both) may not be defined.
+static unsigned AddCounts(unsigned A, unsigned B) {
+ // If either value is undefined, use the other.
+ // Undefined + undefined = undefined.
+ if (A == ProfileDataLoader::Uncounted) return B;
+ if (B == ProfileDataLoader::Uncounted) return A;
+
+ return A + B;
+}
+
+/// ReadProfilingData - Load 'NumEntries' items of type 'T' from file 'F'
+template <typename T>
+static void ReadProfilingData(const char *ToolName, FILE *F,
+ T *Data, size_t NumEntries) {
+ // Read in the block of data...
+ if (fread(Data, sizeof(T), NumEntries, F) != NumEntries)
+ report_fatal_error(Twine(ToolName) + ": Profiling data truncated");
+}
+
+/// ReadProfilingNumEntries - Read how many entries are in this profiling data
+/// packet.
+static unsigned ReadProfilingNumEntries(const char *ToolName, FILE *F,
+ bool ShouldByteSwap) {
+ unsigned Entry;
+ ReadProfilingData<unsigned>(ToolName, F, &Entry, 1);
+ return ShouldByteSwap ? ByteSwap_32(Entry) : Entry;
+}
+
+/// ReadProfilingBlock - Read the number of entries in the next profiling data
+/// packet and then accumulate the entries into 'Data'.
+static void ReadProfilingBlock(const char *ToolName, FILE *F,
+ bool ShouldByteSwap,
+ SmallVector<unsigned, 32> &Data) {
+ // Read the number of entries...
+ unsigned NumEntries = ReadProfilingNumEntries(ToolName, F, ShouldByteSwap);
+
+ // Read in the data.
+ SmallVector<unsigned, 8> TempSpace(NumEntries);
+ ReadProfilingData<unsigned>(ToolName, F, TempSpace.data(), NumEntries);
+
+ // Make sure we have enough space ...
+ if (Data.size() < NumEntries)
+ Data.resize(NumEntries, ProfileDataLoader::Uncounted);
+
+ // Accumulate the data we just read into the existing data.
+ for (unsigned i = 0; i < NumEntries; ++i) {
+ unsigned Entry = ShouldByteSwap ? ByteSwap_32(TempSpace[i]) : TempSpace[i];
+ Data[i] = AddCounts(Entry, Data[i]);
+ }
+}
+
+/// ReadProfilingArgBlock - Read the command line arguments that the progam was
+/// run with when the current profiling data packet(s) were generated.
+static void ReadProfilingArgBlock(const char *ToolName, FILE *F,
+ bool ShouldByteSwap,
+ SmallVector<std::string, 1> &CommandLines) {
+ // Read the number of bytes ...
+ unsigned ArgLength = ReadProfilingNumEntries(ToolName, F, ShouldByteSwap);
+
+ // Read in the arguments (if there are any to read). Round up the length to
+ // the nearest 4-byte multiple.
+ SmallVector<char, 8> Args(ArgLength+4);
+ if (ArgLength)
+ ReadProfilingData<char>(ToolName, F, Args.data(), (ArgLength+3) & ~3);
+
+ // Store the arguments.
+ CommandLines.push_back(std::string(&Args[0], &Args[ArgLength]));
+}
+
+const unsigned ProfileDataLoader::Uncounted = ~0U;
+
+/// ProfileDataLoader ctor - Read the specified profiling data file, reporting
+/// a fatal error if the file is invalid or broken.
+ProfileDataLoader::ProfileDataLoader(const char *ToolName,
+ const std::string &Filename)
+ : Filename(Filename) {
+ FILE *F = fopen(Filename.c_str(), "rb");
+ if (F == 0)
+ report_fatal_error(Twine(ToolName) + ": Error opening '" +
+ Filename + "': ");
+
+ // Keep reading packets until we run out of them.
+ unsigned PacketType;
+ while (fread(&PacketType, sizeof(unsigned), 1, F) == 1) {
+ // If the low eight bits of the packet are zero, we must be dealing with an
+ // endianness mismatch. Byteswap all words read from the profiling
+ // information. This can happen when the compiler host and target have
+ // different endianness.
+ bool ShouldByteSwap = (char)PacketType == 0;
+ PacketType = ShouldByteSwap ? ByteSwap_32(PacketType) : PacketType;
+
+ switch (PacketType) {
+ case ArgumentInfo:
+ ReadProfilingArgBlock(ToolName, F, ShouldByteSwap, CommandLines);
+ break;
+
+ case EdgeInfo:
+ ReadProfilingBlock(ToolName, F, ShouldByteSwap, EdgeCounts);
+ break;
+
+ default:
+ report_fatal_error(std::string(ToolName)
+ + ": Unknown profiling packet type");
+ break;
+ }
+ }
+
+ fclose(F);
+}
diff --git a/contrib/llvm/lib/Analysis/ProfileDataLoaderPass.cpp b/contrib/llvm/lib/Analysis/ProfileDataLoaderPass.cpp
new file mode 100644
index 0000000..c43cff0
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/ProfileDataLoaderPass.cpp
@@ -0,0 +1,188 @@
+//===- ProfileDataLoaderPass.cpp - Set branch weight metadata from prof ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass loads profiling data from a dump file and sets branch weight
+// metadata.
+//
+// TODO: Replace all "profile-metadata-loader" strings with "profile-loader"
+// once ProfileInfo etc. has been removed.
+//
+//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "profile-metadata-loader"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/BasicBlock.h"
+#include "llvm/InstrTypes.h"
+#include "llvm/Module.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/MDBuilder.h"
+#include "llvm/Metadata.h"
+#include "llvm/Pass.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/ProfileDataLoader.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Format.h"
+#include "llvm/ADT/Statistic.h"
+using namespace llvm;
+
+STATISTIC(NumEdgesRead, "The # of edges read.");
+STATISTIC(NumTermsAnnotated, "The # of terminator instructions annotated.");
+
+static cl::opt<std::string>
+ProfileMetadataFilename("profile-file", cl::init("llvmprof.out"),
+ cl::value_desc("filename"),
+ cl::desc("Profile file loaded by -profile-metadata-loader"));
+
+namespace {
+ /// This pass loads profiling data from a dump file and sets branch weight
+ /// metadata.
+ class ProfileMetadataLoaderPass : public ModulePass {
+ std::string Filename;
+ public:
+ static char ID; // Class identification, replacement for typeinfo
+ explicit ProfileMetadataLoaderPass(const std::string &filename = "")
+ : ModulePass(ID), Filename(filename) {
+ initializeProfileMetadataLoaderPassPass(*PassRegistry::getPassRegistry());
+ if (filename.empty()) Filename = ProfileMetadataFilename;
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ }
+
+ virtual const char *getPassName() const {
+ return "Profile loader";
+ }
+
+ virtual void readEdge(unsigned, ProfileData&, ProfileData::Edge,
+ ArrayRef<unsigned>);
+ virtual unsigned matchEdges(Module&, ProfileData&, ArrayRef<unsigned>);
+ virtual void setBranchWeightMetadata(Module&, ProfileData&);
+
+ virtual bool runOnModule(Module &M);
+ };
+} // End of anonymous namespace
+
+char ProfileMetadataLoaderPass::ID = 0;
+INITIALIZE_PASS_BEGIN(ProfileMetadataLoaderPass, "profile-metadata-loader",
+ "Load profile information from llvmprof.out", false, true)
+INITIALIZE_PASS_END(ProfileMetadataLoaderPass, "profile-metadata-loader",
+ "Load profile information from llvmprof.out", false, true)
+
+char &llvm::ProfileMetadataLoaderPassID = ProfileMetadataLoaderPass::ID;
+
+/// createProfileMetadataLoaderPass - This function returns a Pass that loads
+/// the profiling information for the module from the specified filename,
+/// making it available to the optimizers.
+ModulePass *llvm::createProfileMetadataLoaderPass() {
+ return new ProfileMetadataLoaderPass();
+}
+ModulePass *llvm::createProfileMetadataLoaderPass(const std::string &Filename) {
+ return new ProfileMetadataLoaderPass(Filename);
+}
+
+/// readEdge - Take the value from a profile counter and assign it to an edge.
+void ProfileMetadataLoaderPass::readEdge(unsigned ReadCount,
+ ProfileData &PB, ProfileData::Edge e,
+ ArrayRef<unsigned> Counters) {
+ if (ReadCount >= Counters.size()) return;
+
+ unsigned weight = Counters[ReadCount];
+ assert(weight != ProfileDataLoader::Uncounted);
+ PB.addEdgeWeight(e, weight);
+
+ DEBUG(dbgs() << "-- Read Edge Counter for " << e
+ << " (# "<< (ReadCount) << "): "
+ << PB.getEdgeWeight(e) << "\n");
+}
+
+/// matchEdges - Link every profile counter with an edge.
+unsigned ProfileMetadataLoaderPass::matchEdges(Module &M, ProfileData &PB,
+ ArrayRef<unsigned> Counters) {
+ if (Counters.size() == 0) return 0;
+
+ unsigned ReadCount = 0;
+
+ for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
+ if (F->isDeclaration()) continue;
+ DEBUG(dbgs() << "Loading edges in '" << F->getName() << "'\n");
+ readEdge(ReadCount++, PB, PB.getEdge(0, &F->getEntryBlock()), Counters);
+ for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
+ TerminatorInst *TI = BB->getTerminator();
+ for (unsigned s = 0, e = TI->getNumSuccessors(); s != e; ++s) {
+ readEdge(ReadCount++, PB, PB.getEdge(BB,TI->getSuccessor(s)),
+ Counters);
+ }
+ }
+ }
+
+ return ReadCount;
+}
+
+/// setBranchWeightMetadata - Translate the counter values associated with each
+/// edge into branch weights for each conditional branch (a branch with 2 or
+/// more desinations).
+void ProfileMetadataLoaderPass::setBranchWeightMetadata(Module &M,
+ ProfileData &PB) {
+ for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
+ if (F->isDeclaration()) continue;
+ DEBUG(dbgs() << "Setting branch metadata in '" << F->getName() << "'\n");
+
+ for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
+ TerminatorInst *TI = BB->getTerminator();
+ unsigned NumSuccessors = TI->getNumSuccessors();
+
+ // If there is only one successor then we can not set a branch
+ // probability as the target is certain.
+ if (NumSuccessors < 2) continue;
+
+ // Load the weights of all edges leading from this terminator.
+ DEBUG(dbgs() << "-- Terminator with " << NumSuccessors
+ << " successors:\n");
+ SmallVector<uint32_t, 4> Weights(NumSuccessors);
+ for (unsigned s = 0 ; s < NumSuccessors ; ++s) {
+ ProfileData::Edge edge = PB.getEdge(BB, TI->getSuccessor(s));
+ Weights[s] = (uint32_t)PB.getEdgeWeight(edge);
+ DEBUG(dbgs() << "---- Edge '" << edge << "' has weight "
+ << Weights[s] << "\n");
+ }
+
+ // Set branch weight metadata. This will set branch probabilities of
+ // 100%/0% if that is true of the dynamic execution.
+ // BranchProbabilityInfo can account for this when it loads this metadata
+ // (it gives the unexectuted branch a weight of 1 for the purposes of
+ // probability calculations).
+ MDBuilder MDB(TI->getContext());
+ MDNode *Node = MDB.createBranchWeights(Weights);
+ TI->setMetadata(LLVMContext::MD_prof, Node);
+ NumTermsAnnotated++;
+ }
+ }
+}
+
+bool ProfileMetadataLoaderPass::runOnModule(Module &M) {
+ ProfileDataLoader PDL("profile-data-loader", Filename);
+ ProfileData PB;
+
+ ArrayRef<unsigned> Counters = PDL.getRawEdgeCounts();
+
+ unsigned ReadCount = matchEdges(M, PB, Counters);
+
+ if (ReadCount != Counters.size()) {
+ errs() << "WARNING: profile information is inconsistent with "
+ << "the current program!\n";
+ }
+ NumEdgesRead = ReadCount;
+
+ setBranchWeightMetadata(M, PB);
+
+ return ReadCount > 0;
+}
diff --git a/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp b/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp
index 63468f8..12b59e0 100644
--- a/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp
@@ -286,7 +286,7 @@ void ProfileEstimatorPass::recurseBasicBlock(BasicBlock *BB) {
}
}
- double fraction = floor(BBWeight/Edges.size());
+ double fraction = Edges.size() ? floor(BBWeight/Edges.size()) : 0.0;
// Finally we know what flow is still not leaving the block, distribute this
// flow onto the empty edges.
for (SmallVector<Edge, 8>::iterator ei = Edges.begin(), ee = Edges.end();
diff --git a/contrib/llvm/lib/Analysis/ProfileInfo.cpp b/contrib/llvm/lib/Analysis/ProfileInfo.cpp
index 173de2c..b5b7ac1 100644
--- a/contrib/llvm/lib/Analysis/ProfileInfo.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileInfo.cpp
@@ -1016,40 +1016,14 @@ void ProfileInfoT<Function,BasicBlock>::repair(const Function *F) {
}
}
-raw_ostream& operator<<(raw_ostream &O, const Function *F) {
- return O << F->getName();
-}
-
raw_ostream& operator<<(raw_ostream &O, const MachineFunction *MF) {
return O << MF->getFunction()->getName() << "(MF)";
}
-raw_ostream& operator<<(raw_ostream &O, const BasicBlock *BB) {
- return O << BB->getName();
-}
-
raw_ostream& operator<<(raw_ostream &O, const MachineBasicBlock *MBB) {
return O << MBB->getBasicBlock()->getName() << "(MB)";
}
-raw_ostream& operator<<(raw_ostream &O, std::pair<const BasicBlock *, const BasicBlock *> E) {
- O << "(";
-
- if (E.first)
- O << E.first;
- else
- O << "0";
-
- O << ",";
-
- if (E.second)
- O << E.second;
- else
- O << "0";
-
- return O << ")";
-}
-
raw_ostream& operator<<(raw_ostream &O, std::pair<const MachineBasicBlock *, const MachineBasicBlock *> E) {
O << "(";
diff --git a/contrib/llvm/lib/Analysis/RegionInfo.cpp b/contrib/llvm/lib/Analysis/RegionInfo.cpp
index 868f483..30f0d2f 100644
--- a/contrib/llvm/lib/Analysis/RegionInfo.cpp
+++ b/contrib/llvm/lib/Analysis/RegionInfo.cpp
@@ -47,7 +47,7 @@ static cl::opt<enum Region::PrintStyle> printStyle("print-region-style",
cl::values(
clEnumValN(Region::PrintNone, "none", "print no details"),
clEnumValN(Region::PrintBB, "bb",
- "print regions in detail with block_node_iterator"),
+ "print regions in detail with block_iterator"),
clEnumValN(Region::PrintRN, "rn",
"print regions in detail with element_iterator"),
clEnumValEnd));
@@ -246,22 +246,6 @@ void Region::verifyRegionNest() const {
verifyRegion();
}
-Region::block_node_iterator Region::block_node_begin() {
- return GraphTraits<FlatIt<Region*> >::nodes_begin(this);
-}
-
-Region::block_node_iterator Region::block_node_end() {
- return GraphTraits<FlatIt<Region*> >::nodes_end(this);
-}
-
-Region::const_block_node_iterator Region::block_node_begin() const {
- return GraphTraits<FlatIt<const Region*> >::nodes_begin(this);
-}
-
-Region::const_block_node_iterator Region::block_node_end() const {
- return GraphTraits<FlatIt<const Region*> >::nodes_end(this);
-}
-
Region::element_iterator Region::element_begin() {
return GraphTraits<Region*>::nodes_begin(this);
}
@@ -425,10 +409,8 @@ void Region::print(raw_ostream &OS, bool print_tree, unsigned level,
OS.indent(level*2 + 2);
if (Style == PrintBB) {
- for (const_block_node_iterator I = block_node_begin(),
- E = block_node_end();
- I != E; ++I)
- OS << **I << ", "; // TODO: remove the last ","
+ for (const_block_iterator I = block_begin(), E = block_end(); I != E; ++I)
+ OS << (*I)->getName() << ", "; // TODO: remove the last ","
} else if (Style == PrintRN) {
for (const_element_iterator I = element_begin(), E = element_end(); I!=E; ++I)
OS << **I << ", "; // TODO: remove the last ",
@@ -445,9 +427,11 @@ void Region::print(raw_ostream &OS, bool print_tree, unsigned level,
OS.indent(level*2) << "} \n";
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void Region::dump() const {
print(dbgs(), true, getDepth(), printStyle.getValue());
}
+#endif
void Region::clearNodeCache() {
// Free the cached nodes.
diff --git a/contrib/llvm/lib/Analysis/RegionPass.cpp b/contrib/llvm/lib/Analysis/RegionPass.cpp
index c97b5eb..9208fa2 100644
--- a/contrib/llvm/lib/Analysis/RegionPass.cpp
+++ b/contrib/llvm/lib/Analysis/RegionPass.cpp
@@ -195,10 +195,9 @@ public:
virtual bool runOnRegion(Region *R, RGPassManager &RGM) {
Out << Banner;
- for (Region::block_node_iterator I = R->block_node_begin(),
- E = R->block_node_end();
+ for (Region::block_iterator I = R->block_begin(), E = R->block_end();
I != E; ++I)
- (*I)->getEntry()->print(Out);
+ (*I)->print(Out);
return false;
}
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
index a654648..e3189ec 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -73,7 +73,7 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ConstantRange.h"
@@ -105,6 +105,11 @@ MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
"derived loop"),
cl::init(100));
+// FIXME: Enable this with XDEBUG when the test suite is clean.
+static cl::opt<bool>
+VerifySCEV("verify-scev",
+ cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
+
INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
"Scalar Evolution Analysis", false, true)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
@@ -122,10 +127,12 @@ char ScalarEvolution::ID = 0;
// Implementation of the SCEV class.
//
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void SCEV::dump() const {
print(dbgs());
dbgs() << '\n';
}
+#endif
void SCEV::print(raw_ostream &OS) const {
switch (getSCEVType()) {
@@ -2580,7 +2587,7 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
}
const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
- // If we have TargetData, we can bypass creating a target-independent
+ // If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
if (TD)
@@ -2606,7 +2613,7 @@ const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
unsigned FieldNo) {
- // If we have TargetData, we can bypass creating a target-independent
+ // If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
if (TD)
@@ -2671,7 +2678,7 @@ bool ScalarEvolution::isSCEVable(Type *Ty) const {
uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!");
- // If we have a TargetData, use it!
+ // If we have a DataLayout, use it!
if (TD)
return TD->getTypeSizeInBits(Ty);
@@ -2679,7 +2686,7 @@ uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
if (Ty->isIntegerTy())
return Ty->getPrimitiveSizeInBits();
- // The only other support type is pointer. Without TargetData, conservatively
+ // The only other support type is pointer. Without DataLayout, conservatively
// assume pointers are 64-bit.
assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
return 64;
@@ -2699,7 +2706,7 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
if (TD) return TD->getIntPtrType(getContext());
- // Without TargetData, conservatively assume pointers are 64-bit.
+ // Without DataLayout, conservatively assume pointers are 64-bit.
return Type::getInt64Ty(getContext());
}
@@ -3978,8 +3985,11 @@ getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitingBlock) {
ConstantInt *Result = MulC->getValue();
- // Guard against huge trip counts.
- if (!Result || Result->getValue().getActiveBits() > 32)
+ // Guard against huge trip counts (this requires checking
+ // for zero to handle the case where the trip count == -1 and the
+ // addition wraps).
+ if (!Result || Result->getValue().getActiveBits() > 32 ||
+ Result->getValue().getActiveBits() == 0)
return 1;
return (unsigned)Result->getZExtValue();
@@ -4749,7 +4759,7 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
/// reason, return null.
static Constant *EvaluateExpression(Value *V, const Loop *L,
DenseMap<Instruction *, Constant *> &Vals,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI) {
// Convenient constant check, but redundant for recursive calls.
if (Constant *C = dyn_cast<Constant>(V)) return C;
@@ -6141,7 +6151,7 @@ bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
return CmpInst::isTrueWhenEqual(Pred);
if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
if (FoundLHS == FoundRHS)
- return CmpInst::isFalseWhenEqual(Pred);
+ return CmpInst::isFalseWhenEqual(FoundPred);
// Check to see if we can make the LHS or RHS match.
if (LHS == FoundRHS || RHS == FoundLHS) {
@@ -6588,7 +6598,7 @@ ScalarEvolution::ScalarEvolution()
bool ScalarEvolution::runOnFunction(Function &F) {
this->F = &F;
LI = &getAnalysis<LoopInfo>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
DT = &getAnalysis<DominatorTree>();
return false;
@@ -6930,3 +6940,87 @@ void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
UnsignedRanges.erase(S);
SignedRanges.erase(S);
}
+
+typedef DenseMap<const Loop *, std::string> VerifyMap;
+
+/// replaceSubString - Replaces all occurences of From in Str with To.
+static void replaceSubString(std::string &Str, StringRef From, StringRef To) {
+ size_t Pos = 0;
+ while ((Pos = Str.find(From, Pos)) != std::string::npos) {
+ Str.replace(Pos, From.size(), To.data(), To.size());
+ Pos += To.size();
+ }
+}
+
+/// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis.
+static void
+getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) {
+ for (Loop::reverse_iterator I = L->rbegin(), E = L->rend(); I != E; ++I) {
+ getLoopBackedgeTakenCounts(*I, Map, SE); // recurse.
+
+ std::string &S = Map[L];
+ if (S.empty()) {
+ raw_string_ostream OS(S);
+ SE.getBackedgeTakenCount(L)->print(OS);
+
+ // false and 0 are semantically equivalent. This can happen in dead loops.
+ replaceSubString(OS.str(), "false", "0");
+ // Remove wrap flags, their use in SCEV is highly fragile.
+ // FIXME: Remove this when SCEV gets smarter about them.
+ replaceSubString(OS.str(), "<nw>", "");
+ replaceSubString(OS.str(), "<nsw>", "");
+ replaceSubString(OS.str(), "<nuw>", "");
+ }
+ }
+}
+
+void ScalarEvolution::verifyAnalysis() const {
+ if (!VerifySCEV)
+ return;
+
+ ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
+
+ // Gather stringified backedge taken counts for all loops using SCEV's caches.
+ // FIXME: It would be much better to store actual values instead of strings,
+ // but SCEV pointers will change if we drop the caches.
+ VerifyMap BackedgeDumpsOld, BackedgeDumpsNew;
+ for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
+ getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE);
+
+ // Gather stringified backedge taken counts for all loops without using
+ // SCEV's caches.
+ SE.releaseMemory();
+ for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
+ getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE);
+
+ // Now compare whether they're the same with and without caches. This allows
+ // verifying that no pass changed the cache.
+ assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() &&
+ "New loops suddenly appeared!");
+
+ for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(),
+ OldE = BackedgeDumpsOld.end(),
+ NewI = BackedgeDumpsNew.begin();
+ OldI != OldE; ++OldI, ++NewI) {
+ assert(OldI->first == NewI->first && "Loop order changed!");
+
+ // Compare the stringified SCEVs. We don't care if undef backedgetaken count
+ // changes.
+ // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This
+ // means that a pass is buggy or SCEV has to learn a new pattern but is
+ // usually not harmful.
+ if (OldI->second != NewI->second &&
+ OldI->second.find("undef") == std::string::npos &&
+ NewI->second.find("undef") == std::string::npos &&
+ OldI->second != "***COULDNOTCOMPUTE***" &&
+ NewI->second != "***COULDNOTCOMPUTE***") {
+ dbgs() << "SCEVValidator: SCEV for loop '"
+ << OldI->first->getHeader()->getName()
+ << "' changed from '" << OldI->second
+ << "' to '" << NewI->second << "'!\n";
+ std::abort();
+ }
+ }
+
+ // TODO: Verify more things.
+}
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index 62710c5..111bfb4 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -18,7 +18,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/STLExtras.h"
@@ -212,7 +212,7 @@ static bool FactorOutConstant(const SCEV *&S,
const SCEV *&Remainder,
const SCEV *Factor,
ScalarEvolution &SE,
- const TargetData *TD) {
+ const DataLayout *TD) {
// Everything is divisible by one.
if (Factor->isOne())
return true;
@@ -253,7 +253,7 @@ static bool FactorOutConstant(const SCEV *&S,
// of the given factor.
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
if (TD) {
- // With TargetData, the size is known. Check if there is a constant
+ // With DataLayout, the size is known. Check if there is a constant
// operand which is a multiple of the given factor. If so, we can
// factor it.
const SCEVConstant *FC = cast<SCEVConstant>(Factor);
@@ -267,7 +267,7 @@ static bool FactorOutConstant(const SCEV *&S,
return true;
}
} else {
- // Without TargetData, check if Factor can be factored out of any of the
+ // Without DataLayout, check if Factor can be factored out of any of the
// Mul's operands. If so, we can just remove it.
for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
const SCEV *SOp = M->getOperand(i);
@@ -458,7 +458,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
// An empty struct has no fields.
if (STy->getNumElements() == 0) break;
if (SE.TD) {
- // With TargetData, field offsets are known. See if a constant offset
+ // With DataLayout, field offsets are known. See if a constant offset
// falls within any of the struct fields.
if (Ops.empty()) break;
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
@@ -477,7 +477,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
}
}
} else {
- // Without TargetData, just check for an offsetof expression of the
+ // Without DataLayout, just check for an offsetof expression of the
// appropriate struct type.
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
@@ -1618,6 +1618,17 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
PEnd = Phis.end(); PIter != PEnd; ++PIter) {
PHINode *Phi = *PIter;
+ // Fold constant phis. They may be congruent to other constant phis and
+ // would confuse the logic below that expects proper IVs.
+ if (Value *V = Phi->hasConstantValue()) {
+ Phi->replaceAllUsesWith(V);
+ DeadInsts.push_back(Phi);
+ ++NumElim;
+ DEBUG_WITH_TYPE(DebugType, dbgs()
+ << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
+ continue;
+ }
+
if (!SE.isSCEVable(Phi->getType()))
continue;
diff --git a/contrib/llvm/lib/Analysis/Trace.cpp b/contrib/llvm/lib/Analysis/Trace.cpp
index ff5010b..22da857 100644
--- a/contrib/llvm/lib/Analysis/Trace.cpp
+++ b/contrib/llvm/lib/Analysis/Trace.cpp
@@ -43,9 +43,11 @@ void Trace::print(raw_ostream &O) const {
O << "; Trace parent function: \n" << *F;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// dump - Debugger convenience method; writes trace to standard error
/// output stream.
///
void Trace::dump() const {
print(dbgs());
}
+#endif
diff --git a/contrib/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm/lib/Analysis/ValueTracking.cpp
index cea34e1..3beb373 100644
--- a/contrib/llvm/lib/Analysis/ValueTracking.cpp
+++ b/contrib/llvm/lib/Analysis/ValueTracking.cpp
@@ -22,7 +22,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/Metadata.h"
#include "llvm/Operator.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
@@ -36,7 +36,7 @@ const unsigned MaxDepth = 6;
/// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if
/// unknown returns 0). For vector types, returns the element type's bitwidth.
-static unsigned getBitWidth(Type *Ty, const TargetData *TD) {
+static unsigned getBitWidth(Type *Ty, const DataLayout *TD) {
if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth;
assert(isa<PointerType>(Ty) && "Expected a pointer type!");
@@ -46,7 +46,7 @@ static unsigned getBitWidth(Type *Ty, const TargetData *TD) {
static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
APInt &KnownZero, APInt &KnownOne,
APInt &KnownZero2, APInt &KnownOne2,
- const TargetData *TD, unsigned Depth) {
+ const DataLayout *TD, unsigned Depth) {
if (!Add) {
if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
// We know that the top bits of C-X are clear if X contains less bits
@@ -132,7 +132,7 @@ static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW,
APInt &KnownZero, APInt &KnownOne,
APInt &KnownZero2, APInt &KnownOne2,
- const TargetData *TD, unsigned Depth) {
+ const DataLayout *TD, unsigned Depth) {
unsigned BitWidth = KnownZero.getBitWidth();
ComputeMaskedBits(Op1, KnownZero, KnownOne, TD, Depth+1);
ComputeMaskedBits(Op0, KnownZero2, KnownOne2, TD, Depth+1);
@@ -226,7 +226,7 @@ void llvm::computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero) {
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
- const TargetData *TD, unsigned Depth) {
+ const DataLayout *TD, unsigned Depth) {
assert(V && "No Value?");
assert(Depth <= MaxDepth && "Limit Search Depth");
unsigned BitWidth = KnownZero.getBitWidth();
@@ -308,11 +308,20 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
}
if (Argument *A = dyn_cast<Argument>(V)) {
- // Get alignment information off byval arguments if specified in the IR.
- if (A->hasByValAttr())
- if (unsigned Align = A->getParamAlignment())
- KnownZero = APInt::getLowBitsSet(BitWidth,
- CountTrailingZeros_32(Align));
+ unsigned Align = 0;
+
+ if (A->hasByValAttr()) {
+ // Get alignment information off byval arguments if specified in the IR.
+ Align = A->getParamAlignment();
+ } else if (TD && A->hasStructRetAttr()) {
+ // An sret parameter has at least the ABI alignment of the return type.
+ Type *EltTy = cast<PointerType>(A->getType())->getElementType();
+ if (EltTy->isSized())
+ Align = TD->getABITypeAlignment(EltTy);
+ }
+
+ if (Align)
+ KnownZero = APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align));
return;
}
@@ -420,15 +429,13 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
case Instruction::ZExt:
case Instruction::Trunc: {
Type *SrcTy = I->getOperand(0)->getType();
-
+
unsigned SrcBitWidth;
// Note that we handle pointer operands here because of inttoptr/ptrtoint
// which fall through here.
- if (SrcTy->isPointerTy())
- SrcBitWidth = TD->getTypeSizeInBits(SrcTy);
- else
- SrcBitWidth = SrcTy->getScalarSizeInBits();
-
+ SrcBitWidth = TD->getTypeSizeInBits(SrcTy->getScalarType());
+
+ assert(SrcBitWidth && "SrcBitWidth can't be zero");
KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
@@ -778,7 +785,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
/// ComputeSignBit - Determine whether the sign bit is known to be zero or
/// one. Convenience wrapper around ComputeMaskedBits.
void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
- const TargetData *TD, unsigned Depth) {
+ const DataLayout *TD, unsigned Depth) {
unsigned BitWidth = getBitWidth(V->getType(), TD);
if (!BitWidth) {
KnownZero = false;
@@ -796,7 +803,7 @@ void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
/// bit set when defined. For vectors return true if every element is known to
/// be a power of two when defined. Supports values with integer or pointer
/// types and vectors of integers.
-bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, bool OrZero,
+bool llvm::isPowerOfTwo(Value *V, const DataLayout *TD, bool OrZero,
unsigned Depth) {
if (Constant *C = dyn_cast<Constant>(V)) {
if (C->isNullValue())
@@ -859,7 +866,7 @@ bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, bool OrZero,
/// when defined. For vectors return true if every element is known to be
/// non-zero when defined. Supports values with integer or pointer type and
/// vectors of integers.
-bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
+bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) {
if (Constant *C = dyn_cast<Constant>(V)) {
if (C->isNullValue())
return false;
@@ -986,7 +993,7 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
- const TargetData *TD, unsigned Depth) {
+ const DataLayout *TD, unsigned Depth) {
APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
@@ -1003,10 +1010,10 @@ bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
///
/// 'Op' must have a scalar integer type.
///
-unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
+unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD,
unsigned Depth) {
assert((TD || V->getType()->isIntOrIntVectorTy()) &&
- "ComputeNumSignBits requires a TargetData object to operate "
+ "ComputeNumSignBits requires a DataLayout object to operate "
"on non-integer values!");
Type *Ty = V->getType();
unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) :
@@ -1582,7 +1589,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
/// it can be expressed as a base pointer plus a constant offset. Return the
/// base and offset to the caller.
Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
- const TargetData &TD) {
+ const DataLayout &TD) {
Operator *PtrOp = dyn_cast<Operator>(Ptr);
if (PtrOp == 0 || Ptr->getType()->isVectorTy())
return Ptr;
@@ -1614,7 +1621,7 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
// right.
unsigned PtrSize = TD.getPointerSizeInBits();
if (PtrSize < 64)
- Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
+ Offset = SignExtend64(Offset, PtrSize);
return GetPointerBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
}
@@ -1768,7 +1775,7 @@ uint64_t llvm::GetStringLength(Value *V) {
}
Value *
-llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) {
+llvm::GetUnderlyingObject(Value *V, const DataLayout *TD, unsigned MaxLookup) {
if (!V->getType()->isPointerTy())
return V;
for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
@@ -1799,7 +1806,7 @@ llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) {
void
llvm::GetUnderlyingObjects(Value *V,
SmallVectorImpl<Value *> &Objects,
- const TargetData *TD,
+ const DataLayout *TD,
unsigned MaxLookup) {
SmallPtrSet<Value *, 4> Visited;
SmallVector<Value *, 4> Worklist;
@@ -1844,7 +1851,7 @@ bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
}
bool llvm::isSafeToSpeculativelyExecute(const Value *V,
- const TargetData *TD) {
+ const DataLayout *TD) {
const Operator *Inst = dyn_cast<Operator>(V);
if (!Inst)
return false;
diff --git a/contrib/llvm/lib/Archive/ArchiveInternals.h b/contrib/llvm/lib/Archive/ArchiveInternals.h
index 55684f7..639f5ac 100644
--- a/contrib/llvm/lib/Archive/ArchiveInternals.h
+++ b/contrib/llvm/lib/Archive/ArchiveInternals.h
@@ -66,7 +66,7 @@ namespace llvm {
fmag[1] = '\n';
}
- bool checkSignature() {
+ bool checkSignature() const {
return 0 == memcmp(fmag, ARFILE_MEMBER_MAGIC,2);
}
};
diff --git a/contrib/llvm/lib/Archive/ArchiveReader.cpp b/contrib/llvm/lib/Archive/ArchiveReader.cpp
index 5cfc810..5052495 100644
--- a/contrib/llvm/lib/Archive/ArchiveReader.cpp
+++ b/contrib/llvm/lib/Archive/ArchiveReader.cpp
@@ -79,7 +79,7 @@ Archive::parseMemberHeader(const char*& At, const char* End, std::string* error)
}
// Cast archive member header
- ArchiveMemberHeader* Hdr = (ArchiveMemberHeader*)At;
+ const ArchiveMemberHeader* Hdr = (const ArchiveMemberHeader*)At;
At += sizeof(ArchiveMemberHeader);
int flags = 0;
@@ -196,7 +196,7 @@ Archive::parseMemberHeader(const char*& At, const char* End, std::string* error)
/* FALL THROUGH */
default:
- char* slash = (char*) memchr(Hdr->name, '/', 16);
+ const char* slash = (const char*) memchr(Hdr->name, '/', 16);
if (slash == 0)
slash = Hdr->name + 16;
pathname.assign(Hdr->name, slash - Hdr->name);
diff --git a/contrib/llvm/lib/AsmParser/LLLexer.cpp b/contrib/llvm/lib/AsmParser/LLLexer.cpp
index 481733d..a60e4aa 100644
--- a/contrib/llvm/lib/AsmParser/LLLexer.cpp
+++ b/contrib/llvm/lib/AsmParser/LLLexer.cpp
@@ -456,11 +456,12 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(private);
KEYWORD(linker_private);
KEYWORD(linker_private_weak);
- KEYWORD(linker_private_weak_def_auto);
+ KEYWORD(linker_private_weak_def_auto); // FIXME: For backwards compatibility.
KEYWORD(internal);
KEYWORD(available_externally);
KEYWORD(linkonce);
KEYWORD(linkonce_odr);
+ KEYWORD(linkonce_odr_auto_hide);
KEYWORD(weak);
KEYWORD(weak_odr);
KEYWORD(appending);
@@ -509,6 +510,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(asm);
KEYWORD(sideeffect);
KEYWORD(alignstack);
+ KEYWORD(inteldialect);
KEYWORD(gc);
KEYWORD(ccc);
@@ -523,6 +525,9 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(msp430_intrcc);
KEYWORD(ptx_kernel);
KEYWORD(ptx_device);
+ KEYWORD(spir_kernel);
+ KEYWORD(spir_func);
+ KEYWORD(intel_ocl_bicc);
KEYWORD(cc);
KEYWORD(c);
@@ -553,7 +558,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(naked);
KEYWORD(nonlazybind);
KEYWORD(address_safety);
- KEYWORD(ia_nsdialect);
+ KEYWORD(minsize);
KEYWORD(type);
KEYWORD(opaque);
diff --git a/contrib/llvm/lib/AsmParser/LLParser.cpp b/contrib/llvm/lib/AsmParser/LLParser.cpp
index 0ff8edd..b24291f 100644
--- a/contrib/llvm/lib/AsmParser/LLParser.cpp
+++ b/contrib/llvm/lib/AsmParser/LLParser.cpp
@@ -184,12 +184,13 @@ bool LLParser::ParseTopLevelEntities() {
case lltok::kw_private: // OptionalLinkage
case lltok::kw_linker_private: // OptionalLinkage
case lltok::kw_linker_private_weak: // OptionalLinkage
- case lltok::kw_linker_private_weak_def_auto: // OptionalLinkage
+ case lltok::kw_linker_private_weak_def_auto: // FIXME: backwards compat.
case lltok::kw_internal: // OptionalLinkage
case lltok::kw_weak: // OptionalLinkage
case lltok::kw_weak_odr: // OptionalLinkage
case lltok::kw_linkonce: // OptionalLinkage
case lltok::kw_linkonce_odr: // OptionalLinkage
+ case lltok::kw_linkonce_odr_auto_hide: // OptionalLinkage
case lltok::kw_appending: // OptionalLinkage
case lltok::kw_dllexport: // OptionalLinkage
case lltok::kw_common: // OptionalLinkage
@@ -576,8 +577,7 @@ bool LLParser::ParseAlias(const std::string &Name, LocTy NameLoc,
Linkage != GlobalValue::InternalLinkage &&
Linkage != GlobalValue::PrivateLinkage &&
Linkage != GlobalValue::LinkerPrivateLinkage &&
- Linkage != GlobalValue::LinkerPrivateWeakLinkage &&
- Linkage != GlobalValue::LinkerPrivateWeakDefAutoLinkage)
+ Linkage != GlobalValue::LinkerPrivateWeakLinkage)
return Error(LinkageLoc, "invalid linkage type for alias");
Constant *Aliasee;
@@ -779,7 +779,9 @@ GlobalValue *LLParser::GetGlobalVal(const std::string &Name, Type *Ty,
FwdVal = Function::Create(FT, GlobalValue::ExternalWeakLinkage, Name, M);
else
FwdVal = new GlobalVariable(*M, PTy->getElementType(), false,
- GlobalValue::ExternalWeakLinkage, 0, Name);
+ GlobalValue::ExternalWeakLinkage, 0, Name,
+ 0, GlobalVariable::NotThreadLocal,
+ PTy->getAddressSpace());
ForwardRefVals[Name] = std::make_pair(FwdVal, Loc);
return FwdVal;
@@ -916,59 +918,50 @@ bool LLParser::ParseOptionalAddrSpace(unsigned &AddrSpace) {
/// ParseOptionalAttrs - Parse a potentially empty attribute list. AttrKind
/// indicates what kind of attribute list this is: 0: function arg, 1: result,
/// 2: function attr.
-bool LLParser::ParseOptionalAttrs(Attributes &Attrs, unsigned AttrKind) {
- Attrs = Attribute::None;
+bool LLParser::ParseOptionalAttrs(AttrBuilder &B, unsigned AttrKind) {
LocTy AttrLoc = Lex.getLoc();
+ bool HaveError = false;
+
+ B.clear();
while (1) {
- switch (Lex.getKind()) {
+ lltok::Kind Token = Lex.getKind();
+ switch (Token) {
default: // End of attributes.
- if (AttrKind != 2 && (Attrs & Attribute::FunctionOnly))
- return Error(AttrLoc, "invalid use of function-only attribute");
-
- // As a hack, we allow "align 2" on functions as a synonym for
- // "alignstack 2".
- if (AttrKind == 2 &&
- (Attrs & ~(Attribute::FunctionOnly | Attribute::Alignment)))
- return Error(AttrLoc, "invalid use of attribute on a function");
-
- if (AttrKind != 0 && (Attrs & Attribute::ParameterOnly))
- return Error(AttrLoc, "invalid use of parameter-only attribute");
-
- return false;
- case lltok::kw_zeroext: Attrs |= Attribute::ZExt; break;
- case lltok::kw_signext: Attrs |= Attribute::SExt; break;
- case lltok::kw_inreg: Attrs |= Attribute::InReg; break;
- case lltok::kw_sret: Attrs |= Attribute::StructRet; break;
- case lltok::kw_noalias: Attrs |= Attribute::NoAlias; break;
- case lltok::kw_nocapture: Attrs |= Attribute::NoCapture; break;
- case lltok::kw_byval: Attrs |= Attribute::ByVal; break;
- case lltok::kw_nest: Attrs |= Attribute::Nest; break;
-
- case lltok::kw_noreturn: Attrs |= Attribute::NoReturn; break;
- case lltok::kw_nounwind: Attrs |= Attribute::NoUnwind; break;
- case lltok::kw_uwtable: Attrs |= Attribute::UWTable; break;
- case lltok::kw_returns_twice: Attrs |= Attribute::ReturnsTwice; break;
- case lltok::kw_noinline: Attrs |= Attribute::NoInline; break;
- case lltok::kw_readnone: Attrs |= Attribute::ReadNone; break;
- case lltok::kw_readonly: Attrs |= Attribute::ReadOnly; break;
- case lltok::kw_inlinehint: Attrs |= Attribute::InlineHint; break;
- case lltok::kw_alwaysinline: Attrs |= Attribute::AlwaysInline; break;
- case lltok::kw_optsize: Attrs |= Attribute::OptimizeForSize; break;
- case lltok::kw_ssp: Attrs |= Attribute::StackProtect; break;
- case lltok::kw_sspreq: Attrs |= Attribute::StackProtectReq; break;
- case lltok::kw_noredzone: Attrs |= Attribute::NoRedZone; break;
- case lltok::kw_noimplicitfloat: Attrs |= Attribute::NoImplicitFloat; break;
- case lltok::kw_naked: Attrs |= Attribute::Naked; break;
- case lltok::kw_nonlazybind: Attrs |= Attribute::NonLazyBind; break;
- case lltok::kw_address_safety: Attrs |= Attribute::AddressSafety; break;
- case lltok::kw_ia_nsdialect: Attrs |= Attribute::IANSDialect; break;
+ return HaveError;
+ case lltok::kw_zeroext: B.addAttribute(Attributes::ZExt); break;
+ case lltok::kw_signext: B.addAttribute(Attributes::SExt); break;
+ case lltok::kw_inreg: B.addAttribute(Attributes::InReg); break;
+ case lltok::kw_sret: B.addAttribute(Attributes::StructRet); break;
+ case lltok::kw_noalias: B.addAttribute(Attributes::NoAlias); break;
+ case lltok::kw_nocapture: B.addAttribute(Attributes::NoCapture); break;
+ case lltok::kw_byval: B.addAttribute(Attributes::ByVal); break;
+ case lltok::kw_nest: B.addAttribute(Attributes::Nest); break;
+
+ case lltok::kw_noreturn: B.addAttribute(Attributes::NoReturn); break;
+ case lltok::kw_nounwind: B.addAttribute(Attributes::NoUnwind); break;
+ case lltok::kw_uwtable: B.addAttribute(Attributes::UWTable); break;
+ case lltok::kw_returns_twice: B.addAttribute(Attributes::ReturnsTwice); break;
+ case lltok::kw_noinline: B.addAttribute(Attributes::NoInline); break;
+ case lltok::kw_readnone: B.addAttribute(Attributes::ReadNone); break;
+ case lltok::kw_readonly: B.addAttribute(Attributes::ReadOnly); break;
+ case lltok::kw_inlinehint: B.addAttribute(Attributes::InlineHint); break;
+ case lltok::kw_alwaysinline: B.addAttribute(Attributes::AlwaysInline); break;
+ case lltok::kw_optsize: B.addAttribute(Attributes::OptimizeForSize); break;
+ case lltok::kw_ssp: B.addAttribute(Attributes::StackProtect); break;
+ case lltok::kw_sspreq: B.addAttribute(Attributes::StackProtectReq); break;
+ case lltok::kw_noredzone: B.addAttribute(Attributes::NoRedZone); break;
+ case lltok::kw_noimplicitfloat: B.addAttribute(Attributes::NoImplicitFloat); break;
+ case lltok::kw_naked: B.addAttribute(Attributes::Naked); break;
+ case lltok::kw_nonlazybind: B.addAttribute(Attributes::NonLazyBind); break;
+ case lltok::kw_address_safety: B.addAttribute(Attributes::AddressSafety); break;
+ case lltok::kw_minsize: B.addAttribute(Attributes::MinSize); break;
case lltok::kw_alignstack: {
unsigned Alignment;
if (ParseOptionalStackAlignment(Alignment))
return true;
- Attrs |= Attribute::constructStackAlignmentFromInt(Alignment);
+ B.addStackAlignmentAttr(Alignment);
continue;
}
@@ -976,11 +969,57 @@ bool LLParser::ParseOptionalAttrs(Attributes &Attrs, unsigned AttrKind) {
unsigned Alignment;
if (ParseOptionalAlignment(Alignment))
return true;
- Attrs |= Attribute::constructAlignmentFromInt(Alignment);
+ B.addAlignmentAttr(Alignment);
continue;
}
}
+
+ // Perform some error checking.
+ switch (Token) {
+ default:
+ if (AttrKind == 2)
+ HaveError |= Error(AttrLoc, "invalid use of attribute on a function");
+ break;
+ case lltok::kw_align:
+ // As a hack, we allow "align 2" on functions as a synonym for
+ // "alignstack 2".
+ break;
+
+ // Parameter Only:
+ case lltok::kw_sret:
+ case lltok::kw_nocapture:
+ case lltok::kw_byval:
+ case lltok::kw_nest:
+ if (AttrKind != 0)
+ HaveError |= Error(AttrLoc, "invalid use of parameter-only attribute");
+ break;
+
+ // Function Only:
+ case lltok::kw_noreturn:
+ case lltok::kw_nounwind:
+ case lltok::kw_readnone:
+ case lltok::kw_readonly:
+ case lltok::kw_noinline:
+ case lltok::kw_alwaysinline:
+ case lltok::kw_optsize:
+ case lltok::kw_ssp:
+ case lltok::kw_sspreq:
+ case lltok::kw_noredzone:
+ case lltok::kw_noimplicitfloat:
+ case lltok::kw_naked:
+ case lltok::kw_inlinehint:
+ case lltok::kw_alignstack:
+ case lltok::kw_uwtable:
+ case lltok::kw_nonlazybind:
+ case lltok::kw_returns_twice:
+ case lltok::kw_address_safety:
+ case lltok::kw_minsize:
+ if (AttrKind != 2)
+ HaveError |= Error(AttrLoc, "invalid use of function-only attribute");
+ break;
+ }
+
Lex.Lex();
}
}
@@ -990,12 +1029,12 @@ bool LLParser::ParseOptionalAttrs(Attributes &Attrs, unsigned AttrKind) {
/// ::= 'private'
/// ::= 'linker_private'
/// ::= 'linker_private_weak'
-/// ::= 'linker_private_weak_def_auto'
/// ::= 'internal'
/// ::= 'weak'
/// ::= 'weak_odr'
/// ::= 'linkonce'
/// ::= 'linkonce_odr'
+/// ::= 'linkonce_odr_auto_hide'
/// ::= 'available_externally'
/// ::= 'appending'
/// ::= 'dllexport'
@@ -1012,14 +1051,15 @@ bool LLParser::ParseOptionalLinkage(unsigned &Res, bool &HasLinkage) {
case lltok::kw_linker_private_weak:
Res = GlobalValue::LinkerPrivateWeakLinkage;
break;
- case lltok::kw_linker_private_weak_def_auto:
- Res = GlobalValue::LinkerPrivateWeakDefAutoLinkage;
- break;
case lltok::kw_internal: Res = GlobalValue::InternalLinkage; break;
case lltok::kw_weak: Res = GlobalValue::WeakAnyLinkage; break;
case lltok::kw_weak_odr: Res = GlobalValue::WeakODRLinkage; break;
case lltok::kw_linkonce: Res = GlobalValue::LinkOnceAnyLinkage; break;
case lltok::kw_linkonce_odr: Res = GlobalValue::LinkOnceODRLinkage; break;
+ case lltok::kw_linkonce_odr_auto_hide:
+ case lltok::kw_linker_private_weak_def_auto: // FIXME: For backwards compat.
+ Res = GlobalValue::LinkOnceODRAutoHideLinkage;
+ break;
case lltok::kw_available_externally:
Res = GlobalValue::AvailableExternallyLinkage;
break;
@@ -1056,6 +1096,7 @@ bool LLParser::ParseOptionalVisibility(unsigned &Res) {
/// ::= /*empty*/
/// ::= 'ccc'
/// ::= 'fastcc'
+/// ::= 'kw_intel_ocl_bicc'
/// ::= 'coldcc'
/// ::= 'x86_stdcallcc'
/// ::= 'x86_fastcallcc'
@@ -1066,6 +1107,8 @@ bool LLParser::ParseOptionalVisibility(unsigned &Res) {
/// ::= 'msp430_intrcc'
/// ::= 'ptx_kernel'
/// ::= 'ptx_device'
+/// ::= 'spir_func'
+/// ::= 'spir_kernel'
/// ::= 'cc' UINT
///
bool LLParser::ParseOptionalCallingConv(CallingConv::ID &CC) {
@@ -1083,6 +1126,9 @@ bool LLParser::ParseOptionalCallingConv(CallingConv::ID &CC) {
case lltok::kw_msp430_intrcc: CC = CallingConv::MSP430_INTR; break;
case lltok::kw_ptx_kernel: CC = CallingConv::PTX_Kernel; break;
case lltok::kw_ptx_device: CC = CallingConv::PTX_Device; break;
+ case lltok::kw_spir_kernel: CC = CallingConv::SPIR_KERNEL; break;
+ case lltok::kw_spir_func: CC = CallingConv::SPIR_FUNC; break;
+ case lltok::kw_intel_ocl_bicc: CC = CallingConv::Intel_OCL_BI; break;
case lltok::kw_cc: {
unsigned ArbitraryCC;
Lex.Lex();
@@ -1395,16 +1441,16 @@ bool LLParser::ParseParameterList(SmallVectorImpl<ParamInfo> &ArgList,
// Parse the argument.
LocTy ArgLoc;
Type *ArgTy = 0;
- Attributes ArgAttrs1;
- Attributes ArgAttrs2;
+ AttrBuilder ArgAttrs;
Value *V;
if (ParseType(ArgTy, ArgLoc))
return true;
// Otherwise, handle normal operands.
- if (ParseOptionalAttrs(ArgAttrs1, 0) || ParseValue(ArgTy, V, PFS))
+ if (ParseOptionalAttrs(ArgAttrs, 0) || ParseValue(ArgTy, V, PFS))
return true;
- ArgList.push_back(ParamInfo(ArgLoc, V, ArgAttrs1|ArgAttrs2));
+ ArgList.push_back(ParamInfo(ArgLoc, V, Attributes::get(V->getContext(),
+ ArgAttrs)));
}
Lex.Lex(); // Lex the ')'.
@@ -1436,7 +1482,7 @@ bool LLParser::ParseArgumentList(SmallVectorImpl<ArgInfo> &ArgList,
} else {
LocTy TypeLoc = Lex.getLoc();
Type *ArgTy = 0;
- Attributes Attrs;
+ AttrBuilder Attrs;
std::string Name;
if (ParseType(ArgTy) ||
@@ -1453,7 +1499,9 @@ bool LLParser::ParseArgumentList(SmallVectorImpl<ArgInfo> &ArgList,
if (!FunctionType::isValidArgumentType(ArgTy))
return Error(TypeLoc, "invalid type for function argument");
- ArgList.push_back(ArgInfo(TypeLoc, ArgTy, Attrs, Name));
+ ArgList.push_back(ArgInfo(TypeLoc, ArgTy,
+ Attributes::get(ArgTy->getContext(),
+ Attrs), Name));
while (EatIfPresent(lltok::comma)) {
// Handle ... at end of arg list.
@@ -1479,7 +1527,9 @@ bool LLParser::ParseArgumentList(SmallVectorImpl<ArgInfo> &ArgList,
if (!ArgTy->isFirstClassType())
return Error(TypeLoc, "invalid type for function argument");
- ArgList.push_back(ArgInfo(TypeLoc, ArgTy, Attrs, Name));
+ ArgList.push_back(ArgInfo(TypeLoc, ArgTy,
+ Attributes::get(ArgTy->getContext(), Attrs),
+ Name));
}
}
@@ -1503,7 +1553,7 @@ bool LLParser::ParseFunctionType(Type *&Result) {
for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
if (!ArgList[i].Name.empty())
return Error(ArgList[i].Loc, "argument name invalid in function type");
- if (ArgList[i].Attrs)
+ if (ArgList[i].Attrs.hasAttributes())
return Error(ArgList[i].Loc,
"argument attributes invalid in function type");
}
@@ -2069,16 +2119,18 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
case lltok::kw_asm: {
// ValID ::= 'asm' SideEffect? AlignStack? STRINGCONSTANT ',' STRINGCONSTANT
- bool HasSideEffect, AlignStack;
+ bool HasSideEffect, AlignStack, AsmDialect;
Lex.Lex();
if (ParseOptionalToken(lltok::kw_sideeffect, HasSideEffect) ||
ParseOptionalToken(lltok::kw_alignstack, AlignStack) ||
+ ParseOptionalToken(lltok::kw_inteldialect, AsmDialect) ||
ParseStringConstant(ID.StrVal) ||
ParseToken(lltok::comma, "expected comma in inline asm expression") ||
ParseToken(lltok::StringConstant, "expected constraint string"))
return true;
ID.StrVal2 = Lex.getStrVal();
- ID.UIntVal = unsigned(HasSideEffect) | (unsigned(AlignStack)<<1);
+ ID.UIntVal = unsigned(HasSideEffect) | (unsigned(AlignStack)<<1) |
+ (unsigned(AsmDialect)<<2);
ID.Kind = ValID::t_InlineAsm;
return false;
}
@@ -2495,7 +2547,8 @@ bool LLParser::ConvertValIDToValue(Type *Ty, ValID &ID, Value *&V,
PTy ? dyn_cast<FunctionType>(PTy->getElementType()) : 0;
if (!FTy || !InlineAsm::Verify(FTy, ID.StrVal2))
return Error(ID.Loc, "invalid type for inline asm constraint string");
- V = InlineAsm::get(FTy, ID.StrVal, ID.StrVal2, ID.UIntVal&1, ID.UIntVal>>1);
+ V = InlineAsm::get(FTy, ID.StrVal, ID.StrVal2, ID.UIntVal&1,
+ (ID.UIntVal>>1)&1, (InlineAsm::AsmDialect(ID.UIntVal>>2)));
return false;
}
case ValID::t_MDNode:
@@ -2630,7 +2683,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
unsigned Linkage;
unsigned Visibility;
- Attributes RetAttrs;
+ AttrBuilder RetAttrs;
CallingConv::ID CC;
Type *RetType = 0;
LocTy RetTypeLoc = Lex.getLoc();
@@ -2653,11 +2706,11 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
case GlobalValue::PrivateLinkage:
case GlobalValue::LinkerPrivateLinkage:
case GlobalValue::LinkerPrivateWeakLinkage:
- case GlobalValue::LinkerPrivateWeakDefAutoLinkage:
case GlobalValue::InternalLinkage:
case GlobalValue::AvailableExternallyLinkage:
case GlobalValue::LinkOnceAnyLinkage:
case GlobalValue::LinkOnceODRLinkage:
+ case GlobalValue::LinkOnceODRAutoHideLinkage:
case GlobalValue::WeakAnyLinkage:
case GlobalValue::WeakODRLinkage:
case GlobalValue::DLLExportLinkage:
@@ -2694,7 +2747,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
SmallVector<ArgInfo, 8> ArgList;
bool isVarArg;
- Attributes FuncAttrs;
+ AttrBuilder FuncAttrs;
std::string Section;
unsigned Alignment;
std::string GC;
@@ -2713,9 +2766,9 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
return true;
// If the alignment was parsed as an attribute, move to the alignment field.
- if (FuncAttrs & Attribute::Alignment) {
- Alignment = Attribute::getAlignmentFromAttrs(FuncAttrs);
- FuncAttrs &= ~Attribute::Alignment;
+ if (FuncAttrs.hasAlignmentAttr()) {
+ Alignment = FuncAttrs.getAlignment();
+ FuncAttrs.removeAttribute(Attributes::Alignment);
}
// Okay, if we got here, the function is syntactically valid. Convert types
@@ -2723,21 +2776,28 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
std::vector<Type*> ParamTypeList;
SmallVector<AttributeWithIndex, 8> Attrs;
- if (RetAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(0, RetAttrs));
+ if (RetAttrs.hasAttributes())
+ Attrs.push_back(
+ AttributeWithIndex::get(AttrListPtr::ReturnIndex,
+ Attributes::get(RetType->getContext(),
+ RetAttrs)));
for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
ParamTypeList.push_back(ArgList[i].Ty);
- if (ArgList[i].Attrs != Attribute::None)
+ if (ArgList[i].Attrs.hasAttributes())
Attrs.push_back(AttributeWithIndex::get(i+1, ArgList[i].Attrs));
}
- if (FuncAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(~0, FuncAttrs));
+ if (FuncAttrs.hasAttributes())
+ Attrs.push_back(
+ AttributeWithIndex::get(AttrListPtr::FunctionIndex,
+ Attributes::get(RetType->getContext(),
+ FuncAttrs)));
- AttrListPtr PAL = AttrListPtr::get(Attrs);
+ AttrListPtr PAL = AttrListPtr::get(Context, Attrs);
- if (PAL.paramHasAttr(1, Attribute::StructRet) && !RetType->isVoidTy())
+ if (PAL.getParamAttributes(1).hasAttribute(Attributes::StructRet) &&
+ !RetType->isVoidTy())
return Error(RetTypeLoc, "functions with 'sret' argument must return void");
FunctionType *FT =
@@ -2752,6 +2812,9 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
ForwardRefVals.find(FunctionName);
if (FRVI != ForwardRefVals.end()) {
Fn = M->getFunction(FunctionName);
+ if (!Fn)
+ return Error(FRVI->second.second, "invalid forward reference to "
+ "function as global value!");
if (Fn->getType() != PFT)
return Error(FRVI->second.second, "invalid forward reference to "
"function '" + FunctionName + "' with wrong type!");
@@ -3205,7 +3268,7 @@ bool LLParser::ParseIndirectBr(Instruction *&Inst, PerFunctionState &PFS) {
/// OptionalAttrs 'to' TypeAndValue 'unwind' TypeAndValue
bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
LocTy CallLoc = Lex.getLoc();
- Attributes RetAttrs, FnAttrs;
+ AttrBuilder RetAttrs, FnAttrs;
CallingConv::ID CC;
Type *RetType = 0;
LocTy RetTypeLoc;
@@ -3250,8 +3313,11 @@ bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
// Set up the Attributes for the function.
SmallVector<AttributeWithIndex, 8> Attrs;
- if (RetAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(0, RetAttrs));
+ if (RetAttrs.hasAttributes())
+ Attrs.push_back(
+ AttributeWithIndex::get(AttrListPtr::ReturnIndex,
+ Attributes::get(Callee->getContext(),
+ RetAttrs)));
SmallVector<Value*, 8> Args;
@@ -3271,18 +3337,21 @@ bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
return Error(ArgList[i].Loc, "argument is not of expected type '" +
getTypeString(ExpectedTy) + "'");
Args.push_back(ArgList[i].V);
- if (ArgList[i].Attrs != Attribute::None)
+ if (ArgList[i].Attrs.hasAttributes())
Attrs.push_back(AttributeWithIndex::get(i+1, ArgList[i].Attrs));
}
if (I != E)
return Error(CallLoc, "not enough parameters specified for call");
- if (FnAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(~0, FnAttrs));
+ if (FnAttrs.hasAttributes())
+ Attrs.push_back(
+ AttributeWithIndex::get(AttrListPtr::FunctionIndex,
+ Attributes::get(Callee->getContext(),
+ FnAttrs)));
// Finish off the Attributes and check them
- AttrListPtr PAL = AttrListPtr::get(Attrs);
+ AttrListPtr PAL = AttrListPtr::get(Context, Attrs);
InvokeInst *II = InvokeInst::Create(Callee, NormalBB, UnwindBB, Args);
II->setCallingConv(CC);
@@ -3604,7 +3673,7 @@ bool LLParser::ParseLandingPad(Instruction *&Inst, PerFunctionState &PFS) {
/// ParameterList OptionalAttrs
bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
bool isTail) {
- Attributes RetAttrs, FnAttrs;
+ AttrBuilder RetAttrs, FnAttrs;
CallingConv::ID CC;
Type *RetType = 0;
LocTy RetTypeLoc;
@@ -3646,8 +3715,11 @@ bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
// Set up the Attributes for the function.
SmallVector<AttributeWithIndex, 8> Attrs;
- if (RetAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(0, RetAttrs));
+ if (RetAttrs.hasAttributes())
+ Attrs.push_back(
+ AttributeWithIndex::get(AttrListPtr::ReturnIndex,
+ Attributes::get(Callee->getContext(),
+ RetAttrs)));
SmallVector<Value*, 8> Args;
@@ -3667,18 +3739,21 @@ bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
return Error(ArgList[i].Loc, "argument is not of expected type '" +
getTypeString(ExpectedTy) + "'");
Args.push_back(ArgList[i].V);
- if (ArgList[i].Attrs != Attribute::None)
+ if (ArgList[i].Attrs.hasAttributes())
Attrs.push_back(AttributeWithIndex::get(i+1, ArgList[i].Attrs));
}
if (I != E)
return Error(CallLoc, "not enough parameters specified for call");
- if (FnAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(~0, FnAttrs));
+ if (FnAttrs.hasAttributes())
+ Attrs.push_back(
+ AttributeWithIndex::get(AttrListPtr::FunctionIndex,
+ Attributes::get(Callee->getContext(),
+ FnAttrs)));
// Finish off the Attributes and check them
- AttrListPtr PAL = AttrListPtr::get(Attrs);
+ AttrListPtr PAL = AttrListPtr::get(Context, Attrs);
CallInst *CI = CallInst::Create(Callee, Args);
CI->setTailCall(isTail);
diff --git a/contrib/llvm/lib/AsmParser/LLParser.h b/contrib/llvm/lib/AsmParser/LLParser.h
index 257c726..c6bbdb2 100644
--- a/contrib/llvm/lib/AsmParser/LLParser.h
+++ b/contrib/llvm/lib/AsmParser/LLParser.h
@@ -175,7 +175,7 @@ namespace llvm {
bool ParseTLSModel(GlobalVariable::ThreadLocalMode &TLM);
bool ParseOptionalThreadLocal(GlobalVariable::ThreadLocalMode &TLM);
bool ParseOptionalAddrSpace(unsigned &AddrSpace);
- bool ParseOptionalAttrs(Attributes &Attrs, unsigned AttrKind);
+ bool ParseOptionalAttrs(AttrBuilder &Attrs, unsigned AttrKind);
bool ParseOptionalLinkage(unsigned &Linkage, bool &HasLinkage);
bool ParseOptionalLinkage(unsigned &Linkage) {
bool HasLinkage; return ParseOptionalLinkage(Linkage, HasLinkage);
diff --git a/contrib/llvm/lib/AsmParser/LLToken.h b/contrib/llvm/lib/AsmParser/LLToken.h
index 0b0b980..036686d 100644
--- a/contrib/llvm/lib/AsmParser/LLToken.h
+++ b/contrib/llvm/lib/AsmParser/LLToken.h
@@ -37,8 +37,10 @@ namespace lltok {
kw_global, kw_constant,
kw_private, kw_linker_private, kw_linker_private_weak,
- kw_linker_private_weak_def_auto, kw_internal,
- kw_linkonce, kw_linkonce_odr, kw_weak, kw_weak_odr, kw_appending,
+ kw_linker_private_weak_def_auto, // FIXME: For backwards compatibility.
+ kw_internal,
+ kw_linkonce, kw_linkonce_odr, kw_linkonce_odr_auto_hide,
+ kw_weak, kw_weak_odr, kw_appending,
kw_dllimport, kw_dllexport, kw_common, kw_available_externally,
kw_default, kw_hidden, kw_protected,
kw_unnamed_addr,
@@ -70,14 +72,17 @@ namespace lltok {
kw_asm,
kw_sideeffect,
kw_alignstack,
+ kw_inteldialect,
kw_gc,
kw_c,
kw_cc, kw_ccc, kw_fastcc, kw_coldcc,
+ kw_intel_ocl_bicc,
kw_x86_stdcallcc, kw_x86_fastcallcc, kw_x86_thiscallcc,
kw_arm_apcscc, kw_arm_aapcscc, kw_arm_aapcs_vfpcc,
kw_msp430_intrcc,
kw_ptx_kernel, kw_ptx_device,
+ kw_spir_kernel, kw_spir_func,
kw_signext,
kw_zeroext,
@@ -105,7 +110,7 @@ namespace lltok {
kw_naked,
kw_nonlazybind,
kw_address_safety,
- kw_ia_nsdialect,
+ kw_minsize,
kw_type,
kw_opaque,
diff --git a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 4ffee38..4ec9da1 100644
--- a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -52,6 +52,8 @@ void BitcodeReader::FreeState() {
std::vector<Function*>().swap(FunctionsWithBodies);
DeferredFunctionInfo.clear();
MDKindMap.clear();
+
+ assert(BlockAddrFwdRefs.empty() && "Unresolved blockaddress fwd references");
}
//===----------------------------------------------------------------------===//
@@ -89,7 +91,7 @@ static GlobalValue::LinkageTypes GetDecodedLinkage(unsigned Val) {
case 12: return GlobalValue::AvailableExternallyLinkage;
case 13: return GlobalValue::LinkerPrivateLinkage;
case 14: return GlobalValue::LinkerPrivateWeakLinkage;
- case 15: return GlobalValue::LinkerPrivateWeakDefAutoLinkage;
+ case 15: return GlobalValue::LinkOnceODRAutoHideLinkage;
}
}
@@ -197,7 +199,7 @@ namespace {
/// @brief A class for maintaining the slot number definition
/// as a placeholder for the actual definition for forward constants defs.
class ConstantPlaceHolder : public ConstantExpr {
- void operator=(const ConstantPlaceHolder &); // DO NOT IMPLEMENT
+ void operator=(const ConstantPlaceHolder &) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly one operand
void *operator new(size_t s) {
@@ -209,7 +211,6 @@ namespace {
}
/// @brief Methods to support type inquiry through isa, cast, and dyn_cast.
- //static inline bool classof(const ConstantPlaceHolder *) { return true; }
static bool classof(const Value *V) {
return isa<ConstantExpr>(V) &&
cast<ConstantExpr>(V)->getOpcode() == Instruction::UserOp1;
@@ -475,17 +476,18 @@ bool BitcodeReader::ParseAttributeBlock() {
for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
Attributes ReconstitutedAttr =
- Attribute::decodeLLVMAttributesForBitcode(Record[i+1]);
+ Attributes::decodeLLVMAttributesForBitcode(Context, Record[i+1]);
Record[i+1] = ReconstitutedAttr.Raw();
}
for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
- if (Attributes(Record[i+1]) != Attribute::None)
+ AttrBuilder B(Record[i+1]);
+ if (B.hasAttributes())
Attrs.push_back(AttributeWithIndex::get(Record[i],
- Attributes(Record[i+1])));
+ Attributes::get(Context, B)));
}
- MAttributes.push_back(AttrListPtr::get(Attrs));
+ MAttributes.push_back(AttrListPtr::get(Context, Attrs));
Attrs.clear();
break;
}
@@ -889,9 +891,9 @@ bool BitcodeReader::ParseMetadata() {
}
}
-/// DecodeSignRotatedValue - Decode a signed value stored with the sign bit in
+/// decodeSignRotatedValue - Decode a signed value stored with the sign bit in
/// the LSB for dense VBR encoding.
-static uint64_t DecodeSignRotatedValue(uint64_t V) {
+uint64_t BitcodeReader::decodeSignRotatedValue(uint64_t V) {
if ((V & 1) == 0)
return V >> 1;
if (V != 1)
@@ -941,7 +943,7 @@ bool BitcodeReader::ResolveGlobalAndAliasInits() {
static APInt ReadWideAPInt(ArrayRef<uint64_t> Vals, unsigned TypeBits) {
SmallVector<uint64_t, 8> Words(Vals.size());
std::transform(Vals.begin(), Vals.end(), Words.begin(),
- DecodeSignRotatedValue);
+ BitcodeReader::decodeSignRotatedValue);
return APInt(TypeBits, Words);
}
@@ -995,7 +997,7 @@ bool BitcodeReader::ParseConstants() {
case bitc::CST_CODE_INTEGER: // INTEGER: [intval]
if (!CurTy->isIntegerTy() || Record.empty())
return Error("Invalid CST_INTEGER record");
- V = ConstantInt::get(CurTy, DecodeSignRotatedValue(Record[0]));
+ V = ConstantInt::get(CurTy, decodeSignRotatedValue(Record[0]));
break;
case bitc::CST_CODE_WIDE_INTEGER: {// WIDE_INTEGER: [n x intval]
if (!CurTy->isIntegerTy() || Record.empty())
@@ -1245,7 +1247,9 @@ bool BitcodeReader::ParseConstants() {
V = ConstantExpr::getICmp(Record[3], Op0, Op1);
break;
}
- case bitc::CST_CODE_INLINEASM: {
+ // This maintains backward compatibility, pre-asm dialect keywords.
+ // FIXME: Remove with the 4.0 release.
+ case bitc::CST_CODE_INLINEASM_OLD: {
if (Record.size() < 2) return Error("Invalid INLINEASM record");
std::string AsmStr, ConstrStr;
bool HasSideEffects = Record[0] & 1;
@@ -1266,6 +1270,31 @@ bool BitcodeReader::ParseConstants() {
AsmStr, ConstrStr, HasSideEffects, IsAlignStack);
break;
}
+ // This version adds support for the asm dialect keywords (e.g.,
+ // inteldialect).
+ case bitc::CST_CODE_INLINEASM: {
+ if (Record.size() < 2) return Error("Invalid INLINEASM record");
+ std::string AsmStr, ConstrStr;
+ bool HasSideEffects = Record[0] & 1;
+ bool IsAlignStack = (Record[0] >> 1) & 1;
+ unsigned AsmDialect = Record[0] >> 2;
+ unsigned AsmStrSize = Record[1];
+ if (2+AsmStrSize >= Record.size())
+ return Error("Invalid INLINEASM record");
+ unsigned ConstStrSize = Record[2+AsmStrSize];
+ if (3+AsmStrSize+ConstStrSize > Record.size())
+ return Error("Invalid INLINEASM record");
+
+ for (unsigned i = 0; i != AsmStrSize; ++i)
+ AsmStr += (char)Record[2+i];
+ for (unsigned i = 0; i != ConstStrSize; ++i)
+ ConstrStr += (char)Record[3+AsmStrSize+i];
+ PointerType *PTy = cast<PointerType>(CurTy);
+ V = InlineAsm::get(cast<FunctionType>(PTy->getElementType()),
+ AsmStr, ConstrStr, HasSideEffects, IsAlignStack,
+ InlineAsm::AsmDialect(AsmDialect));
+ break;
+ }
case bitc::CST_CODE_BLOCKADDRESS:{
if (Record.size() < 3) return Error("Invalid CE_BLOCKADDRESS record");
Type *FnTy = getTypeByID(Record[0]);
@@ -1273,13 +1302,27 @@ bool BitcodeReader::ParseConstants() {
Function *Fn =
dyn_cast_or_null<Function>(ValueList.getConstantFwdRef(Record[1],FnTy));
if (Fn == 0) return Error("Invalid CE_BLOCKADDRESS record");
-
- GlobalVariable *FwdRef = new GlobalVariable(*Fn->getParent(),
- Type::getInt8Ty(Context),
+
+ // If the function is already parsed we can insert the block address right
+ // away.
+ if (!Fn->empty()) {
+ Function::iterator BBI = Fn->begin(), BBE = Fn->end();
+ for (size_t I = 0, E = Record[2]; I != E; ++I) {
+ if (BBI == BBE)
+ return Error("Invalid blockaddress block #");
+ ++BBI;
+ }
+ V = BlockAddress::get(Fn, BBI);
+ } else {
+ // Otherwise insert a placeholder and remember it so it can be inserted
+ // when the function is parsed.
+ GlobalVariable *FwdRef = new GlobalVariable(*Fn->getParent(),
+ Type::getInt8Ty(Context),
false, GlobalValue::InternalLinkage,
- 0, "");
- BlockAddrFwdRefs[Fn].push_back(std::make_pair(Record[2], FwdRef));
- V = FwdRef;
+ 0, "");
+ BlockAddrFwdRefs[Fn].push_back(std::make_pair(Record[2], FwdRef));
+ V = FwdRef;
+ }
break;
}
}
@@ -1481,13 +1524,22 @@ bool BitcodeReader::ParseModule(bool Resume) {
// Read a record.
switch (Stream.ReadRecord(Code, Record)) {
default: break; // Default behavior, ignore unknown content.
- case bitc::MODULE_CODE_VERSION: // VERSION: [version#]
+ case bitc::MODULE_CODE_VERSION: { // VERSION: [version#]
if (Record.size() < 1)
return Error("Malformed MODULE_CODE_VERSION");
- // Only version #0 is supported so far.
- if (Record[0] != 0)
- return Error("Unknown bitstream version!");
+ // Only version #0 and #1 are supported so far.
+ unsigned module_version = Record[0];
+ switch (module_version) {
+ default: return Error("Unknown bitstream version!");
+ case 0:
+ UseRelativeIDs = false;
+ break;
+ case 1:
+ UseRelativeIDs = true;
+ break;
+ }
break;
+ }
case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N]
std::string S;
if (ConvertToString(Record, 0, S))
@@ -1754,13 +1806,6 @@ bool BitcodeReader::ParseModuleTriple(std::string &Triple) {
// Read a record.
switch (Stream.ReadRecord(Code, Record)) {
default: break; // Default behavior, ignore unknown content.
- case bitc::MODULE_CODE_VERSION: // VERSION: [version#]
- if (Record.size() < 1)
- return Error("Malformed MODULE_CODE_VERSION");
- // Only version #0 is supported so far.
- if (Record[0] != 0)
- return Error("Unknown bitstream version!");
- break;
case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N]
std::string S;
if (ConvertToString(Record, 0, S))
@@ -1973,7 +2018,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *LHS, *RHS;
if (getValueTypePair(Record, OpNum, NextValueNo, LHS) ||
- getValue(Record, OpNum, LHS->getType(), RHS) ||
+ popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS) ||
OpNum+1 > Record.size())
return Error("Invalid BINOP record");
@@ -2088,8 +2133,8 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *TrueVal, *FalseVal, *Cond;
if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) ||
- getValue(Record, OpNum, TrueVal->getType(), FalseVal) ||
- getValue(Record, OpNum, Type::getInt1Ty(Context), Cond))
+ popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) ||
+ popValue(Record, OpNum, NextValueNo, Type::getInt1Ty(Context), Cond))
return Error("Invalid SELECT record");
I = SelectInst::Create(Cond, TrueVal, FalseVal);
@@ -2103,7 +2148,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *TrueVal, *FalseVal, *Cond;
if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) ||
- getValue(Record, OpNum, TrueVal->getType(), FalseVal) ||
+ popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) ||
getValueTypePair(Record, OpNum, NextValueNo, Cond))
return Error("Invalid SELECT record");
@@ -2128,7 +2173,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *Vec, *Idx;
if (getValueTypePair(Record, OpNum, NextValueNo, Vec) ||
- getValue(Record, OpNum, Type::getInt32Ty(Context), Idx))
+ popValue(Record, OpNum, NextValueNo, Type::getInt32Ty(Context), Idx))
return Error("Invalid EXTRACTELT record");
I = ExtractElementInst::Create(Vec, Idx);
InstructionList.push_back(I);
@@ -2139,9 +2184,9 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *Vec, *Elt, *Idx;
if (getValueTypePair(Record, OpNum, NextValueNo, Vec) ||
- getValue(Record, OpNum,
+ popValue(Record, OpNum, NextValueNo,
cast<VectorType>(Vec->getType())->getElementType(), Elt) ||
- getValue(Record, OpNum, Type::getInt32Ty(Context), Idx))
+ popValue(Record, OpNum, NextValueNo, Type::getInt32Ty(Context), Idx))
return Error("Invalid INSERTELT record");
I = InsertElementInst::Create(Vec, Elt, Idx);
InstructionList.push_back(I);
@@ -2152,7 +2197,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *Vec1, *Vec2, *Mask;
if (getValueTypePair(Record, OpNum, NextValueNo, Vec1) ||
- getValue(Record, OpNum, Vec1->getType(), Vec2))
+ popValue(Record, OpNum, NextValueNo, Vec1->getType(), Vec2))
return Error("Invalid SHUFFLEVEC record");
if (getValueTypePair(Record, OpNum, NextValueNo, Mask))
@@ -2172,7 +2217,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *LHS, *RHS;
if (getValueTypePair(Record, OpNum, NextValueNo, LHS) ||
- getValue(Record, OpNum, LHS->getType(), RHS) ||
+ popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS) ||
OpNum+1 != Record.size())
return Error("Invalid CMP record");
@@ -2217,7 +2262,8 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
}
else {
BasicBlock *FalseDest = getBasicBlock(Record[1]);
- Value *Cond = getFnValueByID(Record[2], Type::getInt1Ty(Context));
+ Value *Cond = getValue(Record, 2, NextValueNo,
+ Type::getInt1Ty(Context));
if (FalseDest == 0 || Cond == 0)
return Error("Invalid BR record");
I = BranchInst::Create(TrueDest, FalseDest, Cond);
@@ -2233,7 +2279,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
Type *OpTy = getTypeByID(Record[1]);
unsigned ValueBitWidth = cast<IntegerType>(OpTy)->getBitWidth();
- Value *Cond = getFnValueByID(Record[2], OpTy);
+ Value *Cond = getValue(Record, 2, NextValueNo, OpTy);
BasicBlock *Default = getBasicBlock(Record[3]);
if (OpTy == 0 || Cond == 0 || Default == 0)
return Error("Invalid SWITCH record");
@@ -2288,7 +2334,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
if (Record.size() < 3 || (Record.size() & 1) == 0)
return Error("Invalid SWITCH record");
Type *OpTy = getTypeByID(Record[0]);
- Value *Cond = getFnValueByID(Record[1], OpTy);
+ Value *Cond = getValue(Record, 1, NextValueNo, OpTy);
BasicBlock *Default = getBasicBlock(Record[2]);
if (OpTy == 0 || Cond == 0 || Default == 0)
return Error("Invalid SWITCH record");
@@ -2312,7 +2358,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
if (Record.size() < 2)
return Error("Invalid INDIRECTBR record");
Type *OpTy = getTypeByID(Record[0]);
- Value *Address = getFnValueByID(Record[1], OpTy);
+ Value *Address = getValue(Record, 1, NextValueNo, OpTy);
if (OpTy == 0 || Address == 0)
return Error("Invalid INDIRECTBR record");
unsigned NumDests = Record.size()-2;
@@ -2354,7 +2400,8 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
SmallVector<Value*, 16> Ops;
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) {
- Ops.push_back(getFnValueByID(Record[OpNum], FTy->getParamType(i)));
+ Ops.push_back(getValue(Record, OpNum, NextValueNo,
+ FTy->getParamType(i)));
if (Ops.back() == 0) return Error("Invalid INVOKE record");
}
@@ -2401,7 +2448,14 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
InstructionList.push_back(PN);
for (unsigned i = 0, e = Record.size()-1; i != e; i += 2) {
- Value *V = getFnValueByID(Record[1+i], Ty);
+ Value *V;
+ // With the new function encoding, it is possible that operands have
+ // negative IDs (for forward references). Use a signed VBR
+ // representation to keep the encoding small.
+ if (UseRelativeIDs)
+ V = getValueSigned(Record, 1+i, NextValueNo, Ty);
+ else
+ V = getValue(Record, 1+i, NextValueNo, Ty);
BasicBlock *BB = getBasicBlock(Record[2+i]);
if (!V || !BB) return Error("Invalid PHI record");
PN->addIncoming(V, BB);
@@ -2499,7 +2553,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *Val, *Ptr;
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
- getValue(Record, OpNum,
+ popValue(Record, OpNum, NextValueNo,
cast<PointerType>(Ptr->getType())->getElementType(), Val) ||
OpNum+2 != Record.size())
return Error("Invalid STORE record");
@@ -2513,7 +2567,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *Val, *Ptr;
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
- getValue(Record, OpNum,
+ popValue(Record, OpNum, NextValueNo,
cast<PointerType>(Ptr->getType())->getElementType(), Val) ||
OpNum+4 != Record.size())
return Error("Invalid STOREATOMIC record");
@@ -2536,9 +2590,9 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *Ptr, *Cmp, *New;
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
- getValue(Record, OpNum,
+ popValue(Record, OpNum, NextValueNo,
cast<PointerType>(Ptr->getType())->getElementType(), Cmp) ||
- getValue(Record, OpNum,
+ popValue(Record, OpNum, NextValueNo,
cast<PointerType>(Ptr->getType())->getElementType(), New) ||
OpNum+3 != Record.size())
return Error("Invalid CMPXCHG record");
@@ -2556,7 +2610,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *Ptr, *Val;
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
- getValue(Record, OpNum,
+ popValue(Record, OpNum, NextValueNo,
cast<PointerType>(Ptr->getType())->getElementType(), Val) ||
OpNum+4 != Record.size())
return Error("Invalid ATOMICRMW record");
@@ -2610,7 +2664,8 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
if (FTy->getParamType(i)->isLabelTy())
Args.push_back(getBasicBlock(Record[OpNum]));
else
- Args.push_back(getFnValueByID(Record[OpNum], FTy->getParamType(i)));
+ Args.push_back(getValue(Record, OpNum, NextValueNo,
+ FTy->getParamType(i)));
if (Args.back() == 0) return Error("Invalid CALL record");
}
@@ -2639,7 +2694,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
if (Record.size() < 3)
return Error("Invalid VAARG record");
Type *OpTy = getTypeByID(Record[0]);
- Value *Op = getFnValueByID(Record[1], OpTy);
+ Value *Op = getValue(Record, 1, NextValueNo, OpTy);
Type *ResTy = getTypeByID(Record[2]);
if (!OpTy || !Op || !ResTy)
return Error("Invalid VAARG record");
@@ -2837,7 +2892,7 @@ bool BitcodeReader::InitStream() {
}
bool BitcodeReader::InitStreamFromBuffer() {
- const unsigned char *BufPtr = (unsigned char *)Buffer->getBufferStart();
+ const unsigned char *BufPtr = (const unsigned char*)Buffer->getBufferStart();
const unsigned char *BufEnd = BufPtr+Buffer->getBufferSize();
if (Buffer->getBufferSize() & 3) {
diff --git a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h
index e7c4e94..3d5c0eb 100644
--- a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h
+++ b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h
@@ -179,18 +179,27 @@ class BitcodeReader : public GVMaterializer {
typedef std::pair<unsigned, GlobalVariable*> BlockAddrRefTy;
DenseMap<Function*, std::vector<BlockAddrRefTy> > BlockAddrFwdRefs;
+ /// UseRelativeIDs - Indicates that we are using a new encoding for
+ /// instruction operands where most operands in the current
+ /// FUNCTION_BLOCK are encoded relative to the instruction number,
+ /// for a more compact encoding. Some instruction operands are not
+ /// relative to the instruction ID: basic block numbers, and types.
+ /// Once the old style function blocks have been phased out, we would
+ /// not need this flag.
+ bool UseRelativeIDs;
+
public:
explicit BitcodeReader(MemoryBuffer *buffer, LLVMContext &C)
: Context(C), TheModule(0), Buffer(buffer), BufferOwned(false),
LazyStreamer(0), NextUnreadBit(0), SeenValueSymbolTable(false),
ErrorString(0), ValueList(C), MDValueList(C),
- SeenFirstFunctionBody(false) {
+ SeenFirstFunctionBody(false), UseRelativeIDs(false) {
}
explicit BitcodeReader(DataStreamer *streamer, LLVMContext &C)
: Context(C), TheModule(0), Buffer(0), BufferOwned(false),
LazyStreamer(streamer), NextUnreadBit(0), SeenValueSymbolTable(false),
ErrorString(0), ValueList(C), MDValueList(C),
- SeenFirstFunctionBody(false) {
+ SeenFirstFunctionBody(false), UseRelativeIDs(false) {
}
~BitcodeReader() {
FreeState();
@@ -223,6 +232,9 @@ public:
/// @brief Cheap mechanism to just extract module triple
/// @returns true if an error occurred.
bool ParseTriple(std::string &Triple);
+
+ static uint64_t decodeSignRotatedValue(uint64_t V);
+
private:
Type *getTypeByID(unsigned ID);
Value *getFnValueByID(unsigned ID, Type *Ty) {
@@ -247,6 +259,9 @@ private:
unsigned InstNum, Value *&ResVal) {
if (Slot == Record.size()) return true;
unsigned ValNo = (unsigned)Record[Slot++];
+ // Adjust the ValNo, if it was encoded relative to the InstNum.
+ if (UseRelativeIDs)
+ ValNo = InstNum - ValNo;
if (ValNo < InstNum) {
// If this is not a forward reference, just return the value we already
// have.
@@ -255,20 +270,54 @@ private:
} else if (Slot == Record.size()) {
return true;
}
-
+
unsigned TypeNo = (unsigned)Record[Slot++];
ResVal = getFnValueByID(ValNo, getTypeByID(TypeNo));
return ResVal == 0;
}
- bool getValue(SmallVector<uint64_t, 64> &Record, unsigned &Slot,
- Type *Ty, Value *&ResVal) {
- if (Slot == Record.size()) return true;
- unsigned ValNo = (unsigned)Record[Slot++];
- ResVal = getFnValueByID(ValNo, Ty);
+
+ /// popValue - Read a value out of the specified record from slot 'Slot'.
+ /// Increment Slot past the number of slots used by the value in the record.
+ /// Return true if there is an error.
+ bool popValue(SmallVector<uint64_t, 64> &Record, unsigned &Slot,
+ unsigned InstNum, Type *Ty, Value *&ResVal) {
+ if (getValue(Record, Slot, InstNum, Ty, ResVal))
+ return true;
+ // All values currently take a single record slot.
+ ++Slot;
+ return false;
+ }
+
+ /// getValue -- Like popValue, but does not increment the Slot number.
+ bool getValue(SmallVector<uint64_t, 64> &Record, unsigned Slot,
+ unsigned InstNum, Type *Ty, Value *&ResVal) {
+ ResVal = getValue(Record, Slot, InstNum, Ty);
return ResVal == 0;
}
-
+ /// getValue -- Version of getValue that returns ResVal directly,
+ /// or 0 if there is an error.
+ Value *getValue(SmallVector<uint64_t, 64> &Record, unsigned Slot,
+ unsigned InstNum, Type *Ty) {
+ if (Slot == Record.size()) return 0;
+ unsigned ValNo = (unsigned)Record[Slot];
+ // Adjust the ValNo, if it was encoded relative to the InstNum.
+ if (UseRelativeIDs)
+ ValNo = InstNum - ValNo;
+ return getFnValueByID(ValNo, Ty);
+ }
+
+ /// getValueSigned -- Like getValue, but decodes signed VBRs.
+ Value *getValueSigned(SmallVector<uint64_t, 64> &Record, unsigned Slot,
+ unsigned InstNum, Type *Ty) {
+ if (Slot == Record.size()) return 0;
+ unsigned ValNo = (unsigned)decodeSignRotatedValue(Record[Slot]);
+ // Adjust the ValNo, if it was encoded relative to the InstNum.
+ if (UseRelativeIDs)
+ ValNo = InstNum - ValNo;
+ return getFnValueByID(ValNo, Ty);
+ }
+
bool ParseModule(bool Resume);
bool ParseAttributeBlock();
bool ParseTypeTable();
diff --git a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index 5b1725f..60c657a 100644
--- a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -41,8 +41,6 @@ EnablePreserveUseListOrdering("enable-bc-uselist-preserve",
/// These are manifest constants used by the bitcode writer. They do not need to
/// be kept in sync with the reader, but need to be consistent within this file.
enum {
- CurVersion = 0,
-
// VALUE_SYMTAB_BLOCK abbrev id's.
VST_ENTRY_8_ABBREV = bitc::FIRST_APPLICATION_ABBREV,
VST_ENTRY_7_ABBREV,
@@ -177,7 +175,7 @@ static void WriteAttributeTable(const ValueEnumerator &VE,
for (unsigned i = 0, e = A.getNumSlots(); i != e; ++i) {
const AttributeWithIndex &PAWI = A.getSlot(i);
Record.push_back(PAWI.Index);
- Record.push_back(Attribute::encodeLLVMAttributesForBitcode(PAWI.Attrs));
+ Record.push_back(Attributes::encodeLLVMAttributesForBitcode(PAWI.Attrs));
}
Stream.EmitRecord(bitc::PARAMATTR_CODE_ENTRY, Record);
@@ -365,7 +363,7 @@ static unsigned getEncodedLinkage(const GlobalValue *GV) {
case GlobalValue::AvailableExternallyLinkage: return 12;
case GlobalValue::LinkerPrivateLinkage: return 13;
case GlobalValue::LinkerPrivateWeakLinkage: return 14;
- case GlobalValue::LinkerPrivateWeakDefAutoLinkage: return 15;
+ case GlobalValue::LinkOnceODRAutoHideLinkage: return 15;
}
llvm_unreachable("Invalid linkage");
}
@@ -722,16 +720,20 @@ static void WriteModuleMetadataStore(const Module *M, BitstreamWriter &Stream) {
Stream.ExitBlock();
}
+static void emitSignedInt64(SmallVectorImpl<uint64_t> &Vals, uint64_t V) {
+ if ((int64_t)V >= 0)
+ Vals.push_back(V << 1);
+ else
+ Vals.push_back((-V << 1) | 1);
+}
+
static void EmitAPInt(SmallVectorImpl<uint64_t> &Vals,
unsigned &Code, unsigned &AbbrevToUse, const APInt &Val,
bool EmitSizeForWideNumbers = false
) {
if (Val.getBitWidth() <= 64) {
uint64_t V = Val.getSExtValue();
- if ((int64_t)V >= 0)
- Vals.push_back(V << 1);
- else
- Vals.push_back((-V << 1) | 1);
+ emitSignedInt64(Vals, V);
Code = bitc::CST_CODE_INTEGER;
AbbrevToUse = CONSTANTS_INTEGER_ABBREV;
} else {
@@ -747,11 +749,7 @@ static void EmitAPInt(SmallVectorImpl<uint64_t> &Vals,
const uint64_t *RawWords = Val.getRawData();
for (unsigned i = 0; i != NWords; ++i) {
- int64_t V = RawWords[i];
- if (V >= 0)
- Vals.push_back(V << 1);
- else
- Vals.push_back((-V << 1) | 1);
+ emitSignedInt64(Vals, RawWords[i]);
}
Code = bitc::CST_CODE_WIDE_INTEGER;
}
@@ -814,7 +812,8 @@ static void WriteConstants(unsigned FirstVal, unsigned LastVal,
if (const InlineAsm *IA = dyn_cast<InlineAsm>(V)) {
Record.push_back(unsigned(IA->hasSideEffects()) |
- unsigned(IA->isAlignStack()) << 1);
+ unsigned(IA->isAlignStack()) << 1 |
+ unsigned(IA->getDialect()&1) << 2);
// Add the asm string.
const std::string &AsmStr = IA->getAsmString();
@@ -1024,12 +1023,13 @@ static void WriteModuleConstants(const ValueEnumerator &VE,
///
/// This function adds V's value ID to Vals. If the value ID is higher than the
/// instruction ID, then it is a forward reference, and it also includes the
-/// type ID.
+/// type ID. The value ID that is written is encoded relative to the InstID.
static bool PushValueAndType(const Value *V, unsigned InstID,
SmallVector<unsigned, 64> &Vals,
ValueEnumerator &VE) {
unsigned ValID = VE.getValueID(V);
- Vals.push_back(ValID);
+ // Make encoding relative to the InstID.
+ Vals.push_back(InstID - ValID);
if (ValID >= InstID) {
Vals.push_back(VE.getTypeID(V->getType()));
return true;
@@ -1037,6 +1037,30 @@ static bool PushValueAndType(const Value *V, unsigned InstID,
return false;
}
+/// pushValue - Like PushValueAndType, but where the type of the value is
+/// omitted (perhaps it was already encoded in an earlier operand).
+static void pushValue(const Value *V, unsigned InstID,
+ SmallVector<unsigned, 64> &Vals,
+ ValueEnumerator &VE) {
+ unsigned ValID = VE.getValueID(V);
+ Vals.push_back(InstID - ValID);
+}
+
+static void pushValue64(const Value *V, unsigned InstID,
+ SmallVector<uint64_t, 128> &Vals,
+ ValueEnumerator &VE) {
+ uint64_t ValID = VE.getValueID(V);
+ Vals.push_back(InstID - ValID);
+}
+
+static void pushValueSigned(const Value *V, unsigned InstID,
+ SmallVector<uint64_t, 128> &Vals,
+ ValueEnumerator &VE) {
+ unsigned ValID = VE.getValueID(V);
+ int64_t diff = ((int32_t)InstID - (int32_t)ValID);
+ emitSignedInt64(Vals, diff);
+}
+
/// WriteInstruction - Emit an instruction to the specified stream.
static void WriteInstruction(const Instruction &I, unsigned InstID,
ValueEnumerator &VE, BitstreamWriter &Stream,
@@ -1057,7 +1081,7 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
Code = bitc::FUNC_CODE_INST_BINOP;
if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE))
AbbrevToUse = FUNCTION_INST_BINOP_ABBREV;
- Vals.push_back(VE.getValueID(I.getOperand(1)));
+ pushValue(I.getOperand(1), InstID, Vals, VE);
Vals.push_back(GetEncodedBinaryOpcode(I.getOpcode()));
uint64_t Flags = GetOptimizationFlags(&I);
if (Flags != 0) {
@@ -1095,32 +1119,32 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
case Instruction::Select:
Code = bitc::FUNC_CODE_INST_VSELECT;
PushValueAndType(I.getOperand(1), InstID, Vals, VE);
- Vals.push_back(VE.getValueID(I.getOperand(2)));
+ pushValue(I.getOperand(2), InstID, Vals, VE);
PushValueAndType(I.getOperand(0), InstID, Vals, VE);
break;
case Instruction::ExtractElement:
Code = bitc::FUNC_CODE_INST_EXTRACTELT;
PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- Vals.push_back(VE.getValueID(I.getOperand(1)));
+ pushValue(I.getOperand(1), InstID, Vals, VE);
break;
case Instruction::InsertElement:
Code = bitc::FUNC_CODE_INST_INSERTELT;
PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- Vals.push_back(VE.getValueID(I.getOperand(1)));
- Vals.push_back(VE.getValueID(I.getOperand(2)));
+ pushValue(I.getOperand(1), InstID, Vals, VE);
+ pushValue(I.getOperand(2), InstID, Vals, VE);
break;
case Instruction::ShuffleVector:
Code = bitc::FUNC_CODE_INST_SHUFFLEVEC;
PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- Vals.push_back(VE.getValueID(I.getOperand(1)));
- Vals.push_back(VE.getValueID(I.getOperand(2)));
+ pushValue(I.getOperand(1), InstID, Vals, VE);
+ pushValue(I.getOperand(2), InstID, Vals, VE);
break;
case Instruction::ICmp:
case Instruction::FCmp:
// compare returning Int1Ty or vector of Int1Ty
Code = bitc::FUNC_CODE_INST_CMP2;
PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- Vals.push_back(VE.getValueID(I.getOperand(1)));
+ pushValue(I.getOperand(1), InstID, Vals, VE);
Vals.push_back(cast<CmpInst>(I).getPredicate());
break;
@@ -1146,7 +1170,7 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
Vals.push_back(VE.getValueID(II.getSuccessor(0)));
if (II.isConditional()) {
Vals.push_back(VE.getValueID(II.getSuccessor(1)));
- Vals.push_back(VE.getValueID(II.getCondition()));
+ pushValue(II.getCondition(), InstID, Vals, VE);
}
}
break;
@@ -1163,7 +1187,7 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
Vals64.push_back(SwitchRecordHeader);
Vals64.push_back(VE.getTypeID(SI.getCondition()->getType()));
- Vals64.push_back(VE.getValueID(SI.getCondition()));
+ pushValue64(SI.getCondition(), InstID, Vals64, VE);
Vals64.push_back(VE.getValueID(SI.getDefaultDest()));
Vals64.push_back(SI.getNumCases());
for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end();
@@ -1214,7 +1238,9 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
case Instruction::IndirectBr:
Code = bitc::FUNC_CODE_INST_INDIRECTBR;
Vals.push_back(VE.getTypeID(I.getOperand(0)->getType()));
- for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
+ // Encode the address operand as relative, but not the basic blocks.
+ pushValue(I.getOperand(0), InstID, Vals, VE);
+ for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i)
Vals.push_back(VE.getValueID(I.getOperand(i)));
break;
@@ -1233,7 +1259,7 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
// Emit value #'s for the fixed parameters.
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- Vals.push_back(VE.getValueID(I.getOperand(i))); // fixed param.
+ pushValue(I.getOperand(i), InstID, Vals, VE); // fixed param.
// Emit type/value pairs for varargs params.
if (FTy->isVarArg()) {
@@ -1255,12 +1281,19 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
case Instruction::PHI: {
const PHINode &PN = cast<PHINode>(I);
Code = bitc::FUNC_CODE_INST_PHI;
- Vals.push_back(VE.getTypeID(PN.getType()));
+ // With the newer instruction encoding, forward references could give
+ // negative valued IDs. This is most common for PHIs, so we use
+ // signed VBRs.
+ SmallVector<uint64_t, 128> Vals64;
+ Vals64.push_back(VE.getTypeID(PN.getType()));
for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
- Vals.push_back(VE.getValueID(PN.getIncomingValue(i)));
- Vals.push_back(VE.getValueID(PN.getIncomingBlock(i)));
+ pushValueSigned(PN.getIncomingValue(i), InstID, Vals64, VE);
+ Vals64.push_back(VE.getValueID(PN.getIncomingBlock(i)));
}
- break;
+ // Emit a Vals64 vector and exit.
+ Stream.EmitRecord(Code, Vals64, AbbrevToUse);
+ Vals64.clear();
+ return;
}
case Instruction::LandingPad: {
@@ -1310,7 +1343,7 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
else
Code = bitc::FUNC_CODE_INST_STORE;
PushValueAndType(I.getOperand(1), InstID, Vals, VE); // ptrty + ptr
- Vals.push_back(VE.getValueID(I.getOperand(0))); // val.
+ pushValue(I.getOperand(0), InstID, Vals, VE); // val.
Vals.push_back(Log2_32(cast<StoreInst>(I).getAlignment())+1);
Vals.push_back(cast<StoreInst>(I).isVolatile());
if (cast<StoreInst>(I).isAtomic()) {
@@ -1321,8 +1354,8 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
case Instruction::AtomicCmpXchg:
Code = bitc::FUNC_CODE_INST_CMPXCHG;
PushValueAndType(I.getOperand(0), InstID, Vals, VE); // ptrty + ptr
- Vals.push_back(VE.getValueID(I.getOperand(1))); // cmp.
- Vals.push_back(VE.getValueID(I.getOperand(2))); // newval.
+ pushValue(I.getOperand(1), InstID, Vals, VE); // cmp.
+ pushValue(I.getOperand(2), InstID, Vals, VE); // newval.
Vals.push_back(cast<AtomicCmpXchgInst>(I).isVolatile());
Vals.push_back(GetEncodedOrdering(
cast<AtomicCmpXchgInst>(I).getOrdering()));
@@ -1332,7 +1365,7 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
case Instruction::AtomicRMW:
Code = bitc::FUNC_CODE_INST_ATOMICRMW;
PushValueAndType(I.getOperand(0), InstID, Vals, VE); // ptrty + ptr
- Vals.push_back(VE.getValueID(I.getOperand(1))); // val.
+ pushValue(I.getOperand(1), InstID, Vals, VE); // val.
Vals.push_back(GetEncodedRMWOperation(
cast<AtomicRMWInst>(I).getOperation()));
Vals.push_back(cast<AtomicRMWInst>(I).isVolatile());
@@ -1357,8 +1390,13 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
PushValueAndType(CI.getCalledValue(), InstID, Vals, VE); // Callee
// Emit value #'s for the fixed parameters.
- for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- Vals.push_back(VE.getValueID(CI.getArgOperand(i))); // fixed param.
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
+ // Check for labels (can happen with asm labels).
+ if (FTy->getParamType(i)->isLabelTy())
+ Vals.push_back(VE.getValueID(CI.getArgOperand(i)));
+ else
+ pushValue(CI.getArgOperand(i), InstID, Vals, VE); // fixed param.
+ }
// Emit type/value pairs for varargs params.
if (FTy->isVarArg()) {
@@ -1371,7 +1409,7 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
case Instruction::VAArg:
Code = bitc::FUNC_CODE_INST_VAARG;
Vals.push_back(VE.getTypeID(I.getOperand(0)->getType())); // valistty
- Vals.push_back(VE.getValueID(I.getOperand(0))); // valist.
+ pushValue(I.getOperand(0), InstID, Vals, VE); // valist.
Vals.push_back(VE.getTypeID(I.getType())); // restype.
break;
}
@@ -1513,8 +1551,8 @@ static void WriteFunction(const Function &F, ValueEnumerator &VE,
// Emit blockinfo, which defines the standard abbreviations etc.
static void WriteBlockInfo(const ValueEnumerator &VE, BitstreamWriter &Stream) {
// We only want to emit block info records for blocks that have multiple
- // instances: CONSTANTS_BLOCK, FUNCTION_BLOCK and VALUE_SYMTAB_BLOCK. Other
- // blocks can defined their abbrevs inline.
+ // instances: CONSTANTS_BLOCK, FUNCTION_BLOCK and VALUE_SYMTAB_BLOCK.
+ // Other blocks can define their abbrevs inline.
Stream.EnterBlockInfoBlock(2);
{ // 8-bit fixed-width VST_ENTRY/VST_BBENTRY strings.
@@ -1772,12 +1810,10 @@ static void WriteModuleUseLists(const Module *M, ValueEnumerator &VE,
static void WriteModule(const Module *M, BitstreamWriter &Stream) {
Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3);
- // Emit the version number if it is non-zero.
- if (CurVersion) {
- SmallVector<unsigned, 1> Vals;
- Vals.push_back(CurVersion);
- Stream.EmitRecord(bitc::MODULE_CODE_VERSION, Vals);
- }
+ SmallVector<unsigned, 1> Vals;
+ unsigned CurVersion = 1;
+ Vals.push_back(CurVersion);
+ Stream.EmitRecord(bitc::MODULE_CODE_VERSION, Vals);
// Analyze the module, enumerating globals, functions, etc.
ValueEnumerator VE(M);
diff --git a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h
index a6ca536..75468e6 100644
--- a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h
+++ b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h
@@ -78,9 +78,9 @@ private:
unsigned FirstFuncConstantID;
unsigned FirstInstID;
-
- ValueEnumerator(const ValueEnumerator &); // DO NOT IMPLEMENT
- void operator=(const ValueEnumerator &); // DO NOT IMPLEMENT
+
+ ValueEnumerator(const ValueEnumerator &) LLVM_DELETED_FUNCTION;
+ void operator=(const ValueEnumerator &) LLVM_DELETED_FUNCTION;
public:
ValueEnumerator(const Module *M);
diff --git a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
index 205480a..7a1c049 100644
--- a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
+++ b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@@ -635,7 +635,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
--R;
const unsigned NewSuperReg = Order[R];
// Don't consider non-allocatable registers
- if (!RegClassInfo.isAllocatable(NewSuperReg)) continue;
+ if (!MRI.isAllocatable(NewSuperReg)) continue;
// Don't replace a register with itself.
if (NewSuperReg == SuperReg) continue;
@@ -818,7 +818,7 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
DEBUG(dbgs() << "\tAntidep reg: " << TRI->getName(AntiDepReg));
assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
- if (!RegClassInfo.isAllocatable(AntiDepReg)) {
+ if (!MRI.isAllocatable(AntiDepReg)) {
// Don't break anti-dependencies on non-allocatable registers.
DEBUG(dbgs() << " (non-allocatable)\n");
continue;
diff --git a/contrib/llvm/lib/CodeGen/AllocationOrder.cpp b/contrib/llvm/lib/CodeGen/AllocationOrder.cpp
index 32ad34a..7cde136 100644
--- a/contrib/llvm/lib/CodeGen/AllocationOrder.cpp
+++ b/contrib/llvm/lib/CodeGen/AllocationOrder.cpp
@@ -29,6 +29,7 @@ AllocationOrder::AllocationOrder(unsigned VirtReg,
const TargetRegisterClass *RC = VRM.getRegInfo().getRegClass(VirtReg);
std::pair<unsigned, unsigned> HintPair =
VRM.getRegInfo().getRegAllocationHint(VirtReg);
+ const MachineRegisterInfo &MRI = VRM.getRegInfo();
// HintPair.second is a register, phys or virt.
Hint = HintPair.second;
@@ -52,7 +53,7 @@ AllocationOrder::AllocationOrder(unsigned VirtReg,
unsigned *P = new unsigned[Order.size()];
Begin = P;
for (unsigned i = 0; i != Order.size(); ++i)
- if (!RCI.isReserved(Order[i]))
+ if (!MRI.isReserved(Order[i]))
*P++ = Order[i];
End = P;
@@ -69,7 +70,7 @@ AllocationOrder::AllocationOrder(unsigned VirtReg,
// The hint must be a valid physreg for allocation.
if (Hint && (!TargetRegisterInfo::isPhysicalRegister(Hint) ||
- !RC->contains(Hint) || RCI.isReserved(Hint)))
+ !RC->contains(Hint) || MRI.isReserved(Hint)))
Hint = 0;
}
diff --git a/contrib/llvm/lib/CodeGen/Analysis.cpp b/contrib/llvm/lib/CodeGen/Analysis.cpp
index 447f398..5162ad7 100644
--- a/contrib/llvm/lib/CodeGen/Analysis.cpp
+++ b/contrib/llvm/lib/CodeGen/Analysis.cpp
@@ -21,7 +21,7 @@
#include "llvm/Module.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/ErrorHandling.h"
@@ -79,7 +79,7 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
uint64_t StartingOffset) {
// Given a struct type, recursively traverse the elements.
if (StructType *STy = dyn_cast<StructType>(Ty)) {
- const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
+ const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy);
for (StructType::element_iterator EB = STy->element_begin(),
EI = EB,
EE = STy->element_end();
@@ -91,7 +91,7 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
// Given an array type, recursively traverse the elements.
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Type *EltTy = ATy->getElementType();
- uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
+ uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy);
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
StartingOffset + i * EltSize);
@@ -314,11 +314,13 @@ bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
// the return. Ignore noalias because it doesn't affect the call sequence.
const Function *F = ExitBB->getParent();
Attributes CallerRetAttr = F->getAttributes().getRetAttributes();
- if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
+ if (AttrBuilder(CalleeRetAttr).removeAttribute(Attributes::NoAlias) !=
+ AttrBuilder(CallerRetAttr).removeAttribute(Attributes::NoAlias))
return false;
// It's not safe to eliminate the sign / zero extension of the return value.
- if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
+ if (CallerRetAttr.hasAttribute(Attributes::ZExt) ||
+ CallerRetAttr.hasAttribute(Attributes::SExt))
return false;
// Otherwise, make sure the unmodified return value of I is the return value.
@@ -354,11 +356,13 @@ bool llvm::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
// Conservatively require the attributes of the call to match those of
// the return. Ignore noalias because it doesn't affect the call sequence.
Attributes CallerRetAttr = F->getAttributes().getRetAttributes();
- if (CallerRetAttr & ~Attribute::NoAlias)
+ if (AttrBuilder(CallerRetAttr)
+ .removeAttribute(Attributes::NoAlias).hasAttributes())
return false;
// It's not safe to eliminate the sign / zero extension of the return value.
- if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
+ if (CallerRetAttr.hasAttribute(Attributes::ZExt) ||
+ CallerRetAttr.hasAttribute(Attributes::SExt))
return false;
// Check if the only use is a function return node.
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
index bf5d8c4..b2ebf04 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
@@ -24,7 +24,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index d9be7a1..d74a703 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -33,7 +33,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
@@ -67,7 +67,7 @@ static gcp_map_type &getGCMap(void *&P) {
/// getGVAlignmentLog2 - Return the alignment to use for the specified global
/// value in log2 form. This rounds up to the preferred alignment if possible
/// and legal.
-static unsigned getGVAlignmentLog2(const GlobalValue *GV, const TargetData &TD,
+static unsigned getGVAlignmentLog2(const GlobalValue *GV, const DataLayout &TD,
unsigned InBits = 0) {
unsigned NumBits = 0;
if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
@@ -131,9 +131,9 @@ const TargetLoweringObjectFile &AsmPrinter::getObjFileLowering() const {
}
-/// getTargetData - Return information about data layout.
-const TargetData &AsmPrinter::getTargetData() const {
- return *TM.getTargetData();
+/// getDataLayout - Return information about data layout.
+const DataLayout &AsmPrinter::getDataLayout() const {
+ return *TM.getDataLayout();
}
/// getCurrentSection() - Return the current section we are emitting to.
@@ -160,7 +160,7 @@ bool AsmPrinter::doInitialization(Module &M) {
const_cast<TargetLoweringObjectFile&>(getObjFileLowering())
.Initialize(OutContext, TM);
- Mang = new Mangler(OutContext, *TM.getTargetData());
+ Mang = new Mangler(OutContext, *TM.getDataLayout());
// Allow the target to emit any magic that it wants at the start of the file.
EmitStartOfAsmFile(M);
@@ -213,16 +213,16 @@ void AsmPrinter::EmitLinkage(unsigned Linkage, MCSymbol *GVSym) const {
case GlobalValue::CommonLinkage:
case GlobalValue::LinkOnceAnyLinkage:
case GlobalValue::LinkOnceODRLinkage:
+ case GlobalValue::LinkOnceODRAutoHideLinkage:
case GlobalValue::WeakAnyLinkage:
case GlobalValue::WeakODRLinkage:
case GlobalValue::LinkerPrivateWeakLinkage:
- case GlobalValue::LinkerPrivateWeakDefAutoLinkage:
if (MAI->getWeakDefDirective() != 0) {
// .globl _foo
OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Global);
if ((GlobalValue::LinkageTypes)Linkage !=
- GlobalValue::LinkerPrivateWeakDefAutoLinkage)
+ GlobalValue::LinkOnceODRAutoHideLinkage)
// .weak_definition _foo
OutStreamer.EmitSymbolAttribute(GVSym, MCSA_WeakDefinition);
else
@@ -280,7 +280,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
SectionKind GVKind = TargetLoweringObjectFile::getKindForGlobal(GV, TM);
- const TargetData *TD = TM.getTargetData();
+ const DataLayout *TD = TM.getDataLayout();
uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
// If the alignment is specified, we *must* obey it. Overaligning a global
@@ -312,8 +312,8 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
return;
}
- if (MAI->getLCOMMDirectiveType() != LCOMM::None &&
- (MAI->getLCOMMDirectiveType() != LCOMM::NoAlignment || Align == 1)) {
+ if (Align == 1 ||
+ MAI->getLCOMMDirectiveAlignmentType() != LCOMM::NoAlignment) {
// .lcomm _foo, 42
OutStreamer.EmitLocalCommonSymbol(GVSym, Size, Align);
return;
@@ -482,9 +482,8 @@ void AsmPrinter::EmitFunctionEntryLabel() {
"' label emitted multiple times to assembly file");
}
-
-/// EmitComments - Pretty-print comments for instructions.
-static void EmitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
+/// emitComments - Pretty-print comments for instructions.
+static void emitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
const MachineFunction *MF = MI.getParent()->getParent();
const TargetMachine &TM = MF->getTarget();
@@ -519,16 +518,16 @@ static void EmitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
CommentOS << " Reload Reuse\n";
}
-/// EmitImplicitDef - This method emits the specified machine instruction
+/// emitImplicitDef - This method emits the specified machine instruction
/// that is an implicit def.
-static void EmitImplicitDef(const MachineInstr *MI, AsmPrinter &AP) {
+static void emitImplicitDef(const MachineInstr *MI, AsmPrinter &AP) {
unsigned RegNo = MI->getOperand(0).getReg();
AP.OutStreamer.AddComment(Twine("implicit-def: ") +
AP.TM.getRegisterInfo()->getName(RegNo));
AP.OutStreamer.AddBlankLine();
}
-static void EmitKill(const MachineInstr *MI, AsmPrinter &AP) {
+static void emitKill(const MachineInstr *MI, AsmPrinter &AP) {
std::string Str = "kill:";
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &Op = MI->getOperand(i);
@@ -541,10 +540,10 @@ static void EmitKill(const MachineInstr *MI, AsmPrinter &AP) {
AP.OutStreamer.AddBlankLine();
}
-/// EmitDebugValueComment - This method handles the target-independent form
+/// emitDebugValueComment - This method handles the target-independent form
/// of DBG_VALUE, returning true if it was able to do so. A false return
/// means the target will need to handle MI in EmitInstruction.
-static bool EmitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
+static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
// This code handles only the 3-operand target-independent form.
if (MI->getNumOperands() != 3)
return false;
@@ -674,7 +673,7 @@ void AsmPrinter::EmitFunctionBody() {
}
if (isVerbose())
- EmitComments(*II, OutStreamer.GetCommentOS());
+ emitComments(*II, OutStreamer.GetCommentOS());
switch (II->getOpcode()) {
case TargetOpcode::PROLOG_LABEL:
@@ -690,15 +689,15 @@ void AsmPrinter::EmitFunctionBody() {
break;
case TargetOpcode::DBG_VALUE:
if (isVerbose()) {
- if (!EmitDebugValueComment(II, *this))
+ if (!emitDebugValueComment(II, *this))
EmitInstruction(II);
}
break;
case TargetOpcode::IMPLICIT_DEF:
- if (isVerbose()) EmitImplicitDef(II, *this);
+ if (isVerbose()) emitImplicitDef(II, *this);
break;
case TargetOpcode::KILL:
- if (isVerbose()) EmitKill(II, *this);
+ if (isVerbose()) emitKill(II, *this);
break;
default:
if (!TM.hasMCUseLoc())
@@ -992,7 +991,7 @@ void AsmPrinter::EmitConstantPool() {
Kind = SectionKind::getReadOnlyWithRelLocal();
break;
case 0:
- switch (TM.getTargetData()->getTypeAllocSize(CPE.getType())) {
+ switch (TM.getDataLayout()->getTypeAllocSize(CPE.getType())) {
case 4: Kind = SectionKind::getMergeableConst4(); break;
case 8: Kind = SectionKind::getMergeableConst8(); break;
case 16: Kind = SectionKind::getMergeableConst16();break;
@@ -1038,7 +1037,7 @@ void AsmPrinter::EmitConstantPool() {
OutStreamer.EmitFill(NewOffset - Offset, 0/*fillval*/, 0/*addrspace*/);
Type *Ty = CPE.getType();
- Offset = NewOffset + TM.getTargetData()->getTypeAllocSize(Ty);
+ Offset = NewOffset + TM.getDataLayout()->getTypeAllocSize(Ty);
OutStreamer.EmitLabel(GetCPISymbol(CPI));
if (CPE.isMachineConstantPoolEntry())
@@ -1081,7 +1080,12 @@ void AsmPrinter::EmitJumpTableInfo() {
JTInDiffSection = true;
}
- EmitAlignment(Log2_32(MJTI->getEntryAlignment(*TM.getTargetData())));
+ EmitAlignment(Log2_32(MJTI->getEntryAlignment(*TM.getDataLayout())));
+
+ // Jump tables in code sections are marked with a data_region directive
+ // where that's supported.
+ if (!JTInDiffSection)
+ OutStreamer.EmitDataRegion(MCDR_DataRegionJT32);
for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) {
const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
@@ -1123,6 +1127,8 @@ void AsmPrinter::EmitJumpTableInfo() {
for (unsigned ii = 0, ee = JTBBs.size(); ii != ee; ++ii)
EmitJumpTableEntry(MJTI, JTBBs[ii], JTI);
}
+ if (!JTInDiffSection)
+ OutStreamer.EmitDataRegion(MCDR_DataRegionEnd);
}
/// EmitJumpTableEntry - Emit a jump table entry for the specified MBB to the
@@ -1190,7 +1196,7 @@ void AsmPrinter::EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
assert(Value && "Unknown entry kind!");
- unsigned EntrySize = MJTI->getEntrySize(*TM.getTargetData());
+ unsigned EntrySize = MJTI->getEntrySize(*TM.getDataLayout());
OutStreamer.EmitValue(Value, EntrySize, /*addrspace*/0);
}
@@ -1292,7 +1298,7 @@ void AsmPrinter::EmitXXStructorList(const Constant *List, bool isCtor) {
}
// Emit the function pointers in the target-specific order
- const TargetData *TD = TM.getTargetData();
+ const DataLayout *TD = TM.getDataLayout();
unsigned Align = Log2_32(TD->getPointerPrefAlignment());
std::stable_sort(Structors.begin(), Structors.end(), priority_order);
for (unsigned i = 0, e = Structors.size(); i != e; ++i) {
@@ -1408,7 +1414,7 @@ void AsmPrinter::EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
// if required for correctness.
//
void AsmPrinter::EmitAlignment(unsigned NumBits, const GlobalValue *GV) const {
- if (GV) NumBits = getGVAlignmentLog2(GV, *TM.getTargetData(), NumBits);
+ if (GV) NumBits = getGVAlignmentLog2(GV, *TM.getDataLayout(), NumBits);
if (NumBits == 0) return; // 1-byte aligned: no need to emit alignment.
@@ -1422,9 +1428,9 @@ void AsmPrinter::EmitAlignment(unsigned NumBits, const GlobalValue *GV) const {
// Constant emission.
//===----------------------------------------------------------------------===//
-/// LowerConstant - Lower the specified LLVM Constant to an MCExpr.
+/// lowerConstant - Lower the specified LLVM Constant to an MCExpr.
///
-static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) {
+static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
MCContext &Ctx = AP.OutContext;
if (CV->isNullValue() || isa<UndefValue>(CV))
@@ -1447,12 +1453,12 @@ static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) {
switch (CE->getOpcode()) {
default:
// If the code isn't optimized, there may be outstanding folding
- // opportunities. Attempt to fold the expression using TargetData as a
+ // opportunities. Attempt to fold the expression using DataLayout as a
// last resort before giving up.
if (Constant *C =
- ConstantFoldConstantExpression(CE, AP.TM.getTargetData()))
+ ConstantFoldConstantExpression(CE, AP.TM.getDataLayout()))
if (C != CE)
- return LowerConstant(C, AP);
+ return lowerConstant(C, AP);
// Otherwise report the problem to the user.
{
@@ -1464,21 +1470,20 @@ static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) {
report_fatal_error(OS.str());
}
case Instruction::GetElementPtr: {
- const TargetData &TD = *AP.TM.getTargetData();
+ const DataLayout &TD = *AP.TM.getDataLayout();
// Generate a symbolic expression for the byte address
const Constant *PtrVal = CE->getOperand(0);
SmallVector<Value*, 8> IdxVec(CE->op_begin()+1, CE->op_end());
int64_t Offset = TD.getIndexedOffset(PtrVal->getType(), IdxVec);
- const MCExpr *Base = LowerConstant(CE->getOperand(0), AP);
+ const MCExpr *Base = lowerConstant(CE->getOperand(0), AP);
if (Offset == 0)
return Base;
// Truncate/sext the offset to the pointer size.
- if (TD.getPointerSizeInBits() != 64) {
- int SExtAmount = 64-TD.getPointerSizeInBits();
- Offset = (Offset << SExtAmount) >> SExtAmount;
- }
+ unsigned Width = TD.getPointerSizeInBits();
+ if (Width < 64)
+ Offset = SignExtend64(Offset, Width);
return MCBinaryExpr::CreateAdd(Base, MCConstantExpr::Create(Offset, Ctx),
Ctx);
@@ -1491,26 +1496,26 @@ static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) {
// is reasonable to treat their delta as a 32-bit value.
// FALL THROUGH.
case Instruction::BitCast:
- return LowerConstant(CE->getOperand(0), AP);
+ return lowerConstant(CE->getOperand(0), AP);
case Instruction::IntToPtr: {
- const TargetData &TD = *AP.TM.getTargetData();
+ const DataLayout &TD = *AP.TM.getDataLayout();
// Handle casts to pointers by changing them into casts to the appropriate
// integer type. This promotes constant folding and simplifies this code.
Constant *Op = CE->getOperand(0);
Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()),
false/*ZExt*/);
- return LowerConstant(Op, AP);
+ return lowerConstant(Op, AP);
}
case Instruction::PtrToInt: {
- const TargetData &TD = *AP.TM.getTargetData();
+ const DataLayout &TD = *AP.TM.getDataLayout();
// Support only foldable casts to/from pointers that can be eliminated by
// changing the pointer to the appropriately sized integer type.
Constant *Op = CE->getOperand(0);
Type *Ty = CE->getType();
- const MCExpr *OpExpr = LowerConstant(Op, AP);
+ const MCExpr *OpExpr = lowerConstant(Op, AP);
// We can emit the pointer value into this slot if the slot is an
// integer slot equal to the size of the pointer.
@@ -1536,8 +1541,8 @@ static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) {
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: {
- const MCExpr *LHS = LowerConstant(CE->getOperand(0), AP);
- const MCExpr *RHS = LowerConstant(CE->getOperand(1), AP);
+ const MCExpr *LHS = lowerConstant(CE->getOperand(0), AP);
+ const MCExpr *RHS = lowerConstant(CE->getOperand(1), AP);
switch (CE->getOpcode()) {
default: llvm_unreachable("Unknown binary operator constant cast expr");
case Instruction::Add: return MCBinaryExpr::CreateAdd(LHS, RHS, Ctx);
@@ -1554,7 +1559,7 @@ static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) {
}
}
-static void EmitGlobalConstantImpl(const Constant *C, unsigned AddrSpace,
+static void emitGlobalConstantImpl(const Constant *C, unsigned AddrSpace,
AsmPrinter &AP);
/// isRepeatedByteSequence - Determine whether the given value is
@@ -1578,7 +1583,7 @@ static int isRepeatedByteSequence(const Value *V, TargetMachine &TM) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getBitWidth() > 64) return -1;
- uint64_t Size = TM.getTargetData()->getTypeAllocSize(V->getType());
+ uint64_t Size = TM.getDataLayout()->getTypeAllocSize(V->getType());
uint64_t Value = CI->getZExtValue();
// Make sure the constant is at least 8 bits long and has a power
@@ -1616,13 +1621,13 @@ static int isRepeatedByteSequence(const Value *V, TargetMachine &TM) {
return -1;
}
-static void EmitGlobalConstantDataSequential(const ConstantDataSequential *CDS,
+static void emitGlobalConstantDataSequential(const ConstantDataSequential *CDS,
unsigned AddrSpace,AsmPrinter &AP){
// See if we can aggregate this into a .fill, if so, emit it as such.
int Value = isRepeatedByteSequence(CDS, AP.TM);
if (Value != -1) {
- uint64_t Bytes = AP.TM.getTargetData()->getTypeAllocSize(CDS->getType());
+ uint64_t Bytes = AP.TM.getDataLayout()->getTypeAllocSize(CDS->getType());
// Don't emit a 1-byte object as a .fill.
if (Bytes > 1)
return AP.OutStreamer.EmitFill(Bytes, Value, AddrSpace);
@@ -1672,7 +1677,7 @@ static void EmitGlobalConstantDataSequential(const ConstantDataSequential *CDS,
}
}
- const TargetData &TD = *AP.TM.getTargetData();
+ const DataLayout &TD = *AP.TM.getDataLayout();
unsigned Size = TD.getTypeAllocSize(CDS->getType());
unsigned EmittedSize = TD.getTypeAllocSize(CDS->getType()->getElementType()) *
CDS->getNumElements();
@@ -1681,28 +1686,28 @@ static void EmitGlobalConstantDataSequential(const ConstantDataSequential *CDS,
}
-static void EmitGlobalConstantArray(const ConstantArray *CA, unsigned AddrSpace,
+static void emitGlobalConstantArray(const ConstantArray *CA, unsigned AddrSpace,
AsmPrinter &AP) {
// See if we can aggregate some values. Make sure it can be
// represented as a series of bytes of the constant value.
int Value = isRepeatedByteSequence(CA, AP.TM);
if (Value != -1) {
- uint64_t Bytes = AP.TM.getTargetData()->getTypeAllocSize(CA->getType());
+ uint64_t Bytes = AP.TM.getDataLayout()->getTypeAllocSize(CA->getType());
AP.OutStreamer.EmitFill(Bytes, Value, AddrSpace);
}
else {
for (unsigned i = 0, e = CA->getNumOperands(); i != e; ++i)
- EmitGlobalConstantImpl(CA->getOperand(i), AddrSpace, AP);
+ emitGlobalConstantImpl(CA->getOperand(i), AddrSpace, AP);
}
}
-static void EmitGlobalConstantVector(const ConstantVector *CV,
+static void emitGlobalConstantVector(const ConstantVector *CV,
unsigned AddrSpace, AsmPrinter &AP) {
for (unsigned i = 0, e = CV->getType()->getNumElements(); i != e; ++i)
- EmitGlobalConstantImpl(CV->getOperand(i), AddrSpace, AP);
+ emitGlobalConstantImpl(CV->getOperand(i), AddrSpace, AP);
- const TargetData &TD = *AP.TM.getTargetData();
+ const DataLayout &TD = *AP.TM.getDataLayout();
unsigned Size = TD.getTypeAllocSize(CV->getType());
unsigned EmittedSize = TD.getTypeAllocSize(CV->getType()->getElementType()) *
CV->getType()->getNumElements();
@@ -1710,10 +1715,10 @@ static void EmitGlobalConstantVector(const ConstantVector *CV,
AP.OutStreamer.EmitZeros(Padding, AddrSpace);
}
-static void EmitGlobalConstantStruct(const ConstantStruct *CS,
+static void emitGlobalConstantStruct(const ConstantStruct *CS,
unsigned AddrSpace, AsmPrinter &AP) {
// Print the fields in successive locations. Pad to align if needed!
- const TargetData *TD = AP.TM.getTargetData();
+ const DataLayout *TD = AP.TM.getDataLayout();
unsigned Size = TD->getTypeAllocSize(CS->getType());
const StructLayout *Layout = TD->getStructLayout(CS->getType());
uint64_t SizeSoFar = 0;
@@ -1727,7 +1732,7 @@ static void EmitGlobalConstantStruct(const ConstantStruct *CS,
SizeSoFar += FieldSize + PadSize;
// Now print the actual field value.
- EmitGlobalConstantImpl(Field, AddrSpace, AP);
+ emitGlobalConstantImpl(Field, AddrSpace, AP);
// Insert padding - this may include padding to increase the size of the
// current field up to the ABI size (if the struct is not packed) as well
@@ -1738,7 +1743,7 @@ static void EmitGlobalConstantStruct(const ConstantStruct *CS,
"Layout of constant struct may be incorrect!");
}
-static void EmitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace,
+static void emitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace,
AsmPrinter &AP) {
if (CFP->getType()->isHalfTy()) {
if (AP.isVerbose()) {
@@ -1793,7 +1798,7 @@ static void EmitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace,
<< DoubleVal.convertToDouble() << '\n';
}
- if (AP.TM.getTargetData()->isBigEndian()) {
+ if (AP.TM.getDataLayout()->isBigEndian()) {
AP.OutStreamer.EmitIntValue(p[1], 2, AddrSpace);
AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace);
} else {
@@ -1802,7 +1807,7 @@ static void EmitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace,
}
// Emit the tail padding for the long double.
- const TargetData &TD = *AP.TM.getTargetData();
+ const DataLayout &TD = *AP.TM.getDataLayout();
AP.OutStreamer.EmitZeros(TD.getTypeAllocSize(CFP->getType()) -
TD.getTypeStoreSize(CFP->getType()), AddrSpace);
return;
@@ -1814,7 +1819,7 @@ static void EmitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace,
// API needed to prevent premature destruction.
APInt API = CFP->getValueAPF().bitcastToAPInt();
const uint64_t *p = API.getRawData();
- if (AP.TM.getTargetData()->isBigEndian()) {
+ if (AP.TM.getDataLayout()->isBigEndian()) {
AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace);
AP.OutStreamer.EmitIntValue(p[1], 8, AddrSpace);
} else {
@@ -1823,9 +1828,9 @@ static void EmitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace,
}
}
-static void EmitGlobalConstantLargeInt(const ConstantInt *CI,
+static void emitGlobalConstantLargeInt(const ConstantInt *CI,
unsigned AddrSpace, AsmPrinter &AP) {
- const TargetData *TD = AP.TM.getTargetData();
+ const DataLayout *TD = AP.TM.getDataLayout();
unsigned BitWidth = CI->getBitWidth();
assert((BitWidth & 63) == 0 && "only support multiples of 64-bits");
@@ -1839,9 +1844,9 @@ static void EmitGlobalConstantLargeInt(const ConstantInt *CI,
}
}
-static void EmitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace,
+static void emitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace,
AsmPrinter &AP) {
- const TargetData *TD = AP.TM.getTargetData();
+ const DataLayout *TD = AP.TM.getDataLayout();
uint64_t Size = TD->getTypeAllocSize(CV->getType());
if (isa<ConstantAggregateZero>(CV) || isa<UndefValue>(CV))
return AP.OutStreamer.EmitZeros(Size, AddrSpace);
@@ -1858,13 +1863,13 @@ static void EmitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace,
AP.OutStreamer.EmitIntValue(CI->getZExtValue(), Size, AddrSpace);
return;
default:
- EmitGlobalConstantLargeInt(CI, AddrSpace, AP);
+ emitGlobalConstantLargeInt(CI, AddrSpace, AP);
return;
}
}
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV))
- return EmitGlobalConstantFP(CFP, AddrSpace, AP);
+ return emitGlobalConstantFP(CFP, AddrSpace, AP);
if (isa<ConstantPointerNull>(CV)) {
AP.OutStreamer.EmitIntValue(0, Size, AddrSpace);
@@ -1872,19 +1877,19 @@ static void EmitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace,
}
if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(CV))
- return EmitGlobalConstantDataSequential(CDS, AddrSpace, AP);
+ return emitGlobalConstantDataSequential(CDS, AddrSpace, AP);
if (const ConstantArray *CVA = dyn_cast<ConstantArray>(CV))
- return EmitGlobalConstantArray(CVA, AddrSpace, AP);
+ return emitGlobalConstantArray(CVA, AddrSpace, AP);
if (const ConstantStruct *CVS = dyn_cast<ConstantStruct>(CV))
- return EmitGlobalConstantStruct(CVS, AddrSpace, AP);
+ return emitGlobalConstantStruct(CVS, AddrSpace, AP);
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
// Look through bitcasts, which might not be able to be MCExpr'ized (e.g. of
// vectors).
if (CE->getOpcode() == Instruction::BitCast)
- return EmitGlobalConstantImpl(CE->getOperand(0), AddrSpace, AP);
+ return emitGlobalConstantImpl(CE->getOperand(0), AddrSpace, AP);
if (Size > 8) {
// If the constant expression's size is greater than 64-bits, then we have
@@ -1892,23 +1897,23 @@ static void EmitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace,
// that way.
Constant *New = ConstantFoldConstantExpression(CE, TD);
if (New && New != CE)
- return EmitGlobalConstantImpl(New, AddrSpace, AP);
+ return emitGlobalConstantImpl(New, AddrSpace, AP);
}
}
if (const ConstantVector *V = dyn_cast<ConstantVector>(CV))
- return EmitGlobalConstantVector(V, AddrSpace, AP);
+ return emitGlobalConstantVector(V, AddrSpace, AP);
// Otherwise, it must be a ConstantExpr. Lower it to an MCExpr, then emit it
// thread the streamer with EmitValue.
- AP.OutStreamer.EmitValue(LowerConstant(CV, AP), Size, AddrSpace);
+ AP.OutStreamer.EmitValue(lowerConstant(CV, AP), Size, AddrSpace);
}
/// EmitGlobalConstant - Print a general LLVM constant to the .s file.
void AsmPrinter::EmitGlobalConstant(const Constant *CV, unsigned AddrSpace) {
- uint64_t Size = TM.getTargetData()->getTypeAllocSize(CV->getType());
+ uint64_t Size = TM.getDataLayout()->getTypeAllocSize(CV->getType());
if (Size)
- EmitGlobalConstantImpl(CV, AddrSpace, *this);
+ emitGlobalConstantImpl(CV, AddrSpace, *this);
else if (MAI->hasSubsectionsViaSymbols()) {
// If the global has zero size, emit a single byte so that two labels don't
// look like they are at the same location.
@@ -2023,8 +2028,8 @@ static void PrintChildLoopComment(raw_ostream &OS, const MachineLoop *Loop,
}
}
-/// EmitBasicBlockLoopComments - Pretty-print comments for basic blocks.
-static void EmitBasicBlockLoopComments(const MachineBasicBlock &MBB,
+/// emitBasicBlockLoopComments - Pretty-print comments for basic blocks.
+static void emitBasicBlockLoopComments(const MachineBasicBlock &MBB,
const MachineLoopInfo *LI,
const AsmPrinter &AP) {
// Add loop depth information
@@ -2090,7 +2095,7 @@ void AsmPrinter::EmitBasicBlockStart(const MachineBasicBlock *MBB) const {
if (const BasicBlock *BB = MBB->getBasicBlock())
if (BB->hasName())
OutStreamer.AddComment("%" + BB->getName());
- EmitBasicBlockLoopComments(*MBB, LI, *this);
+ emitBasicBlockLoopComments(*MBB, LI, *this);
}
// Print the main label for the block.
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
index 90d511c..d94e1fe 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
@@ -18,7 +18,7 @@
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
@@ -112,7 +112,7 @@ unsigned AsmPrinter::GetSizeOfEncodedValue(unsigned Encoding) const {
switch (Encoding & 0x07) {
default: llvm_unreachable("Invalid encoded value.");
- case dwarf::DW_EH_PE_absptr: return TM.getTargetData()->getPointerSize();
+ case dwarf::DW_EH_PE_absptr: return TM.getDataLayout()->getPointerSize();
case dwarf::DW_EH_PE_udata2: return 2;
case dwarf::DW_EH_PE_udata4: return 4;
case dwarf::DW_EH_PE_udata8: return 8;
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
index db43b06..50f0fc3 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
@@ -43,10 +43,10 @@ namespace {
};
}
-/// SrcMgrDiagHandler - This callback is invoked when the SourceMgr for an
+/// srcMgrDiagHandler - This callback is invoked when the SourceMgr for an
/// inline asm has an error in it. diagInfo is a pointer to the SrcMgrDiagInfo
/// struct above.
-static void SrcMgrDiagHandler(const SMDiagnostic &Diag, void *diagInfo) {
+static void srcMgrDiagHandler(const SMDiagnostic &Diag, void *diagInfo) {
SrcMgrDiagInfo *DiagInfo = static_cast<SrcMgrDiagInfo *>(diagInfo);
assert(DiagInfo && "Diagnostic context not passed down?");
@@ -68,7 +68,8 @@ static void SrcMgrDiagHandler(const SMDiagnostic &Diag, void *diagInfo) {
}
/// EmitInlineAsm - Emit a blob of inline asm to the output streamer.
-void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode) const {
+void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode,
+ InlineAsm::AsmDialect Dialect) const {
assert(!Str.empty() && "Can't emit empty inline asm block");
// Remember if the buffer is nul terminated or not so we can avoid a copy.
@@ -91,12 +92,12 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode) const {
LLVMContext &LLVMCtx = MMI->getModule()->getContext();
bool HasDiagHandler = false;
if (LLVMCtx.getInlineAsmDiagnosticHandler() != 0) {
- // If the source manager has an issue, we arrange for SrcMgrDiagHandler
+ // If the source manager has an issue, we arrange for srcMgrDiagHandler
// to be invoked, getting DiagInfo passed into it.
DiagInfo.LocInfo = LocMDNode;
DiagInfo.DiagHandler = LLVMCtx.getInlineAsmDiagnosticHandler();
DiagInfo.DiagContext = LLVMCtx.getInlineAsmDiagnosticContext();
- SrcMgr.setDiagHandler(SrcMgrDiagHandler, &DiagInfo);
+ SrcMgr.setDiagHandler(srcMgrDiagHandler, &DiagInfo);
HasDiagHandler = true;
}
@@ -126,6 +127,7 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode) const {
if (!TAP)
report_fatal_error("Inline asm not supported by this streamer because"
" we don't have an asm parser for this target\n");
+ Parser->setAssemblerDialect(Dialect);
Parser->setTargetParser(*TAP.get());
// Don't implicitly switch to the text section before the asm.
@@ -135,71 +137,113 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode) const {
report_fatal_error("Error parsing inline asm\n");
}
+static void EmitMSInlineAsmStr(const char *AsmStr, const MachineInstr *MI,
+ MachineModuleInfo *MMI, int InlineAsmVariant,
+ AsmPrinter *AP, unsigned LocCookie,
+ raw_ostream &OS) {
+ // Switch to the inline assembly variant.
+ OS << "\t.intel_syntax\n\t";
-/// EmitInlineAsm - This method formats and emits the specified machine
-/// instruction that is an inline asm.
-void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
- assert(MI->isInlineAsm() && "printInlineAsm only works on inline asms");
-
+ const char *LastEmitted = AsmStr; // One past the last character emitted.
unsigned NumOperands = MI->getNumOperands();
- // Count the number of register definitions to find the asm string.
- unsigned NumDefs = 0;
- for (; MI->getOperand(NumDefs).isReg() && MI->getOperand(NumDefs).isDef();
- ++NumDefs)
- assert(NumDefs != NumOperands-2 && "No asm string?");
+ while (*LastEmitted) {
+ switch (*LastEmitted) {
+ default: {
+ // Not a special case, emit the string section literally.
+ const char *LiteralEnd = LastEmitted+1;
+ while (*LiteralEnd && *LiteralEnd != '{' && *LiteralEnd != '|' &&
+ *LiteralEnd != '}' && *LiteralEnd != '$' && *LiteralEnd != '\n')
+ ++LiteralEnd;
- assert(MI->getOperand(NumDefs).isSymbol() && "No asm string?");
+ OS.write(LastEmitted, LiteralEnd-LastEmitted);
+ LastEmitted = LiteralEnd;
+ break;
+ }
+ case '\n':
+ ++LastEmitted; // Consume newline character.
+ OS << '\n'; // Indent code with newline.
+ break;
+ case '$': {
+ ++LastEmitted; // Consume '$' character.
+ bool Done = true;
- // Disassemble the AsmStr, printing out the literal pieces, the operands, etc.
- const char *AsmStr = MI->getOperand(NumDefs).getSymbolName();
+ // Handle escapes.
+ switch (*LastEmitted) {
+ default: Done = false; break;
+ case '$':
+ ++LastEmitted; // Consume second '$' character.
+ break;
+ }
+ if (Done) break;
- // If this asmstr is empty, just print the #APP/#NOAPP markers.
- // These are useful to see where empty asm's wound up.
- if (AsmStr[0] == 0) {
- // Don't emit the comments if writing to a .o file.
- if (!OutStreamer.hasRawTextSupport()) return;
+ const char *IDStart = LastEmitted;
+ const char *IDEnd = IDStart;
+ while (*IDEnd >= '0' && *IDEnd <= '9') ++IDEnd;
- OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
- MAI->getInlineAsmStart());
- OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
- MAI->getInlineAsmEnd());
- return;
- }
+ unsigned Val;
+ if (StringRef(IDStart, IDEnd-IDStart).getAsInteger(10, Val))
+ report_fatal_error("Bad $ operand number in inline asm string: '" +
+ Twine(AsmStr) + "'");
+ LastEmitted = IDEnd;
- // Emit the #APP start marker. This has to happen even if verbose-asm isn't
- // enabled, so we use EmitRawText.
- if (OutStreamer.hasRawTextSupport())
- OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
- MAI->getInlineAsmStart());
+ if (Val >= NumOperands-1)
+ report_fatal_error("Invalid $ operand number in inline asm string: '" +
+ Twine(AsmStr) + "'");
- // Get the !srcloc metadata node if we have it, and decode the loc cookie from
- // it.
- unsigned LocCookie = 0;
- const MDNode *LocMD = 0;
- for (unsigned i = MI->getNumOperands(); i != 0; --i) {
- if (MI->getOperand(i-1).isMetadata() &&
- (LocMD = MI->getOperand(i-1).getMetadata()) &&
- LocMD->getNumOperands() != 0) {
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(LocMD->getOperand(0))) {
- LocCookie = CI->getZExtValue();
- break;
- }
- }
- }
+ // Okay, we finally have a value number. Ask the target to print this
+ // operand!
+ unsigned OpNo = InlineAsm::MIOp_FirstOperand;
- // Emit the inline asm to a temporary string so we can emit it through
- // EmitInlineAsm.
- SmallString<256> StringData;
- raw_svector_ostream OS(StringData);
+ bool Error = false;
- OS << '\t';
+ // Scan to find the machine operand number for the operand.
+ for (; Val; --Val) {
+ if (OpNo >= MI->getNumOperands()) break;
+ unsigned OpFlags = MI->getOperand(OpNo).getImm();
+ OpNo += InlineAsm::getNumOperandRegisters(OpFlags) + 1;
+ }
- // The variant of the current asmprinter.
- int AsmPrinterVariant = MAI->getAssemblerDialect();
+ // We may have a location metadata attached to the end of the
+ // instruction, and at no point should see metadata at any
+ // other point while processing. It's an error if so.
+ if (OpNo >= MI->getNumOperands() ||
+ MI->getOperand(OpNo).isMetadata()) {
+ Error = true;
+ } else {
+ unsigned OpFlags = MI->getOperand(OpNo).getImm();
+ ++OpNo; // Skip over the ID number.
+
+ if (InlineAsm::isMemKind(OpFlags)) {
+ Error = AP->PrintAsmMemoryOperand(MI, OpNo, InlineAsmVariant,
+ /*Modifier*/ 0, OS);
+ } else {
+ Error = AP->PrintAsmOperand(MI, OpNo, InlineAsmVariant,
+ /*Modifier*/ 0, OS);
+ }
+ }
+ if (Error) {
+ std::string msg;
+ raw_string_ostream Msg(msg);
+ Msg << "invalid operand in inline asm: '" << AsmStr << "'";
+ MMI->getModule()->getContext().emitError(LocCookie, Msg.str());
+ }
+ break;
+ }
+ }
+ }
+ OS << "\n\t.att_syntax\n" << (char)0; // null terminate string.
+}
+static void EmitGCCInlineAsmStr(const char *AsmStr, const MachineInstr *MI,
+ MachineModuleInfo *MMI, int InlineAsmVariant,
+ int AsmPrinterVariant, AsmPrinter *AP,
+ unsigned LocCookie, raw_ostream &OS) {
int CurVariant = -1; // The number of the {.|.|.} region we are in.
const char *LastEmitted = AsmStr; // One past the last character emitted.
+ unsigned NumOperands = MI->getNumOperands();
+
+ OS << '\t';
while (*LastEmitted) {
switch (*LastEmitted) {
@@ -272,7 +316,7 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
" string: '" + Twine(AsmStr) + "'");
std::string Val(StrStart, StrEnd);
- PrintSpecial(MI, OS, Val.c_str());
+ AP->PrintSpecial(MI, OS, Val.c_str());
LastEmitted = StrEnd+1;
break;
}
@@ -340,13 +384,12 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
// FIXME: What if the operand isn't an MBB, report error?
OS << *MI->getOperand(OpNo).getMBB()->getSymbol();
else {
- AsmPrinter *AP = const_cast<AsmPrinter*>(this);
if (InlineAsm::isMemKind(OpFlags)) {
- Error = AP->PrintAsmMemoryOperand(MI, OpNo, AsmPrinterVariant,
+ Error = AP->PrintAsmMemoryOperand(MI, OpNo, InlineAsmVariant,
Modifier[0] ? Modifier : 0,
OS);
} else {
- Error = AP->PrintAsmOperand(MI, OpNo, AsmPrinterVariant,
+ Error = AP->PrintAsmOperand(MI, OpNo, InlineAsmVariant,
Modifier[0] ? Modifier : 0, OS);
}
}
@@ -363,7 +406,74 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
}
}
OS << '\n' << (char)0; // null terminate string.
- EmitInlineAsm(OS.str(), LocMD);
+}
+
+/// EmitInlineAsm - This method formats and emits the specified machine
+/// instruction that is an inline asm.
+void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
+ assert(MI->isInlineAsm() && "printInlineAsm only works on inline asms");
+
+ // Count the number of register definitions to find the asm string.
+ unsigned NumDefs = 0;
+ for (; MI->getOperand(NumDefs).isReg() && MI->getOperand(NumDefs).isDef();
+ ++NumDefs)
+ assert(NumDefs != MI->getNumOperands()-2 && "No asm string?");
+
+ assert(MI->getOperand(NumDefs).isSymbol() && "No asm string?");
+
+ // Disassemble the AsmStr, printing out the literal pieces, the operands, etc.
+ const char *AsmStr = MI->getOperand(NumDefs).getSymbolName();
+
+ // If this asmstr is empty, just print the #APP/#NOAPP markers.
+ // These are useful to see where empty asm's wound up.
+ if (AsmStr[0] == 0) {
+ // Don't emit the comments if writing to a .o file.
+ if (!OutStreamer.hasRawTextSupport()) return;
+
+ OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
+ MAI->getInlineAsmStart());
+ OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
+ MAI->getInlineAsmEnd());
+ return;
+ }
+
+ // Emit the #APP start marker. This has to happen even if verbose-asm isn't
+ // enabled, so we use EmitRawText.
+ if (OutStreamer.hasRawTextSupport())
+ OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
+ MAI->getInlineAsmStart());
+
+ // Get the !srcloc metadata node if we have it, and decode the loc cookie from
+ // it.
+ unsigned LocCookie = 0;
+ const MDNode *LocMD = 0;
+ for (unsigned i = MI->getNumOperands(); i != 0; --i) {
+ if (MI->getOperand(i-1).isMetadata() &&
+ (LocMD = MI->getOperand(i-1).getMetadata()) &&
+ LocMD->getNumOperands() != 0) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(LocMD->getOperand(0))) {
+ LocCookie = CI->getZExtValue();
+ break;
+ }
+ }
+ }
+
+ // Emit the inline asm to a temporary string so we can emit it through
+ // EmitInlineAsm.
+ SmallString<256> StringData;
+ raw_svector_ostream OS(StringData);
+
+ // The variant of the current asmprinter.
+ int AsmPrinterVariant = MAI->getAssemblerDialect();
+ InlineAsm::AsmDialect InlineAsmVariant = MI->getInlineAsmDialect();
+ AsmPrinter *AP = const_cast<AsmPrinter*>(this);
+ if (InlineAsmVariant == InlineAsm::AD_ATT)
+ EmitGCCInlineAsmStr(AsmStr, MI, MMI, InlineAsmVariant, AsmPrinterVariant,
+ AP, LocCookie, OS);
+ else
+ EmitMSInlineAsmStr(AsmStr, MI, MMI, InlineAsmVariant, AP, LocCookie, OS);
+
+ EmitInlineAsm(OS.str(), LocMD, MI->getInlineAsmDialect());
// Emit the #NOAPP end marker. This has to happen even if verbose-asm isn't
// enabled, so we use EmitRawText.
@@ -409,8 +519,8 @@ void AsmPrinter::PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
/// instruction, using the specified assembler variant. Targets should
/// override this to format as appropriate.
bool AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O) {
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &O) {
// Does this asm operand have a single letter operand modifier?
if (ExtraCode && ExtraCode[0]) {
if (ExtraCode[1] != 0) return true; // Unknown modifier.
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
index 3776848..4d73b3c 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
@@ -17,7 +17,7 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -182,6 +182,12 @@ void DIEValue::dump() {
void DIEInteger::EmitValue(AsmPrinter *Asm, unsigned Form) const {
unsigned Size = ~0U;
switch (Form) {
+ case dwarf::DW_FORM_flag_present:
+ // Emit something to keep the lines and comments in sync.
+ // FIXME: Is there a better way to do this?
+ if (Asm->OutStreamer.hasRawTextSupport())
+ Asm->OutStreamer.EmitRawText(StringRef(""));
+ return;
case dwarf::DW_FORM_flag: // Fall thru
case dwarf::DW_FORM_ref1: // Fall thru
case dwarf::DW_FORM_data1: Size = 1; break;
@@ -193,7 +199,8 @@ void DIEInteger::EmitValue(AsmPrinter *Asm, unsigned Form) const {
case dwarf::DW_FORM_data8: Size = 8; break;
case dwarf::DW_FORM_udata: Asm->EmitULEB128(Integer); return;
case dwarf::DW_FORM_sdata: Asm->EmitSLEB128(Integer); return;
- case dwarf::DW_FORM_addr: Size = Asm->getTargetData().getPointerSize(); break;
+ case dwarf::DW_FORM_addr:
+ Size = Asm->getDataLayout().getPointerSize(); break;
default: llvm_unreachable("DIE Value form not supported yet");
}
Asm->OutStreamer.EmitIntValue(Integer, Size, 0/*addrspace*/);
@@ -203,6 +210,7 @@ void DIEInteger::EmitValue(AsmPrinter *Asm, unsigned Form) const {
///
unsigned DIEInteger::SizeOf(AsmPrinter *AP, unsigned Form) const {
switch (Form) {
+ case dwarf::DW_FORM_flag_present: return 0;
case dwarf::DW_FORM_flag: // Fall thru
case dwarf::DW_FORM_ref1: // Fall thru
case dwarf::DW_FORM_data1: return sizeof(int8_t);
@@ -214,7 +222,7 @@ unsigned DIEInteger::SizeOf(AsmPrinter *AP, unsigned Form) const {
case dwarf::DW_FORM_data8: return sizeof(int64_t);
case dwarf::DW_FORM_udata: return MCAsmInfo::getULEB128Size(Integer);
case dwarf::DW_FORM_sdata: return MCAsmInfo::getSLEB128Size(Integer);
- case dwarf::DW_FORM_addr: return AP->getTargetData().getPointerSize();
+ case dwarf::DW_FORM_addr: return AP->getDataLayout().getPointerSize();
default: llvm_unreachable("DIE Value form not supported yet");
}
}
@@ -241,7 +249,7 @@ void DIELabel::EmitValue(AsmPrinter *AP, unsigned Form) const {
unsigned DIELabel::SizeOf(AsmPrinter *AP, unsigned Form) const {
if (Form == dwarf::DW_FORM_data4) return 4;
if (Form == dwarf::DW_FORM_strp) return 4;
- return AP->getTargetData().getPointerSize();
+ return AP->getDataLayout().getPointerSize();
}
#ifndef NDEBUG
@@ -265,7 +273,7 @@ void DIEDelta::EmitValue(AsmPrinter *AP, unsigned Form) const {
unsigned DIEDelta::SizeOf(AsmPrinter *AP, unsigned Form) const {
if (Form == dwarf::DW_FORM_data4) return 4;
if (Form == dwarf::DW_FORM_strp) return 4;
- return AP->getTargetData().getPointerSize();
+ return AP->getDataLayout().getPointerSize();
}
#ifndef NDEBUG
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.h
index f93ea1b..28a96f3 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.h
@@ -214,9 +214,6 @@ namespace llvm {
///
virtual unsigned SizeOf(AsmPrinter *AP, unsigned Form) const = 0;
- // Implement isa/cast/dyncast.
- static bool classof(const DIEValue *) { return true; }
-
#ifndef NDEBUG
virtual void print(raw_ostream &O) = 0;
void dump();
@@ -257,7 +254,6 @@ namespace llvm {
virtual unsigned SizeOf(AsmPrinter *AP, unsigned Form) const;
// Implement isa/cast/dyncast.
- static bool classof(const DIEInteger *) { return true; }
static bool classof(const DIEValue *I) { return I->getType() == isInteger; }
#ifndef NDEBUG
@@ -286,7 +282,6 @@ namespace llvm {
virtual unsigned SizeOf(AsmPrinter *AP, unsigned Form) const;
// Implement isa/cast/dyncast.
- static bool classof(const DIELabel *) { return true; }
static bool classof(const DIEValue *L) { return L->getType() == isLabel; }
#ifndef NDEBUG
@@ -313,7 +308,6 @@ namespace llvm {
virtual unsigned SizeOf(AsmPrinter *AP, unsigned Form) const;
// Implement isa/cast/dyncast.
- static bool classof(const DIEDelta *) { return true; }
static bool classof(const DIEValue *D) { return D->getType() == isDelta; }
#ifndef NDEBUG
@@ -343,7 +337,6 @@ namespace llvm {
}
// Implement isa/cast/dyncast.
- static bool classof(const DIEEntry *) { return true; }
static bool classof(const DIEValue *E) { return E->getType() == isEntry; }
#ifndef NDEBUG
@@ -383,7 +376,6 @@ namespace llvm {
virtual unsigned SizeOf(AsmPrinter *AP, unsigned Form) const;
// Implement isa/cast/dyncast.
- static bool classof(const DIEBlock *) { return true; }
static bool classof(const DIEValue *E) { return E->getType() == isBlock; }
#ifndef NDEBUG
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp
index 454a923..05e0f2f 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp
@@ -133,8 +133,8 @@ void DwarfAccelTable::EmitHeader(AsmPrinter *Asm) {
}
}
-// Walk through and emit the buckets for the table. This will look
-// like a list of numbers of how many elements are in each bucket.
+// Walk through and emit the buckets for the table. Each index is
+// an offset into the list of hashes.
void DwarfAccelTable::EmitBuckets(AsmPrinter *Asm) {
unsigned index = 0;
for (size_t i = 0, e = Buckets.size(); i < e; ++i) {
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
index 963b8cd..92d1bbe 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
@@ -237,8 +237,8 @@ private:
#endif
};
- DwarfAccelTable(const DwarfAccelTable&); // DO NOT IMPLEMENT
- void operator=(const DwarfAccelTable&); // DO NOT IMPLEMENT
+ DwarfAccelTable(const DwarfAccelTable&) LLVM_DELETED_FUNCTION;
+ void operator=(const DwarfAccelTable&) LLVM_DELETED_FUNCTION;
// Internal Functions
void EmitHeader(AsmPrinter *);
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
index d975f1f..4fdd5ca 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
@@ -25,7 +25,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
index d30e5bb..2b07dda 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
@@ -22,7 +22,7 @@
#include "llvm/Instructions.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -51,6 +51,15 @@ DIEEntry *CompileUnit::createDIEEntry(DIE *Entry) {
return Value;
}
+/// addFlag - Add a flag that is true.
+void CompileUnit::addFlag(DIE *Die, unsigned Attribute) {
+ if (!DD->useDarwinGDBCompat())
+ Die->addValue(Attribute, dwarf::DW_FORM_flag_present,
+ DIEIntegerOne);
+ else
+ addUInt(Die, Attribute, dwarf::DW_FORM_flag, 1);
+}
+
/// addUInt - Add an unsigned integer attribute data and value.
///
void CompileUnit::addUInt(DIE *Die, unsigned Attribute,
@@ -501,7 +510,7 @@ bool CompileUnit::addConstantFPValue(DIE *Die, const MachineOperand &MO) {
const char *FltPtr = (const char*)FltVal.getRawData();
int NumBytes = FltVal.getBitWidth() / 8; // 8 bits per byte.
- bool LittleEndian = Asm->getTargetData().isLittleEndian();
+ bool LittleEndian = Asm->getDataLayout().isLittleEndian();
int Incr = (LittleEndian ? 1 : -1);
int Start = (LittleEndian ? 0 : NumBytes - 1);
int Stop = (LittleEndian ? NumBytes : -1);
@@ -543,7 +552,7 @@ bool CompileUnit::addConstantValue(DIE *Die, const ConstantInt *CI,
const uint64_t *Ptr64 = Val.getRawData();
int NumBytes = Val.getBitWidth() / 8; // 8 bits per byte.
- bool LittleEndian = Asm->getTargetData().isLittleEndian();
+ bool LittleEndian = Asm->getDataLayout().isLittleEndian();
// Output the constant to DWARF one byte at a time.
for (int i = 0; i < NumBytes; i++) {
@@ -794,7 +803,7 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
(Language == dwarf::DW_LANG_C89 ||
Language == dwarf::DW_LANG_C99 ||
Language == dwarf::DW_LANG_ObjC))
- addUInt(&Buffer, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1);
+ addFlag(&Buffer, dwarf::DW_AT_prototyped);
}
break;
case dwarf::DW_TAG_structure_type:
@@ -825,15 +834,15 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_public);
if (SP.isExplicit())
- addUInt(ElemDie, dwarf::DW_AT_explicit, dwarf::DW_FORM_flag, 1);
+ addFlag(ElemDie, dwarf::DW_AT_explicit);
}
else if (Element.isVariable()) {
DIVariable DV(Element);
ElemDie = new DIE(dwarf::DW_TAG_variable);
addString(ElemDie, dwarf::DW_AT_name, DV.getName());
addType(ElemDie, DV.getType());
- addUInt(ElemDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
- addUInt(ElemDie, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1);
+ addFlag(ElemDie, dwarf::DW_AT_declaration);
+ addFlag(ElemDie, dwarf::DW_AT_external);
addSourceLine(ElemDie, DV);
} else if (Element.isDerivedType()) {
DIDerivedType DDTy(Element);
@@ -883,7 +892,7 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
}
if (CTy.isAppleBlockExtension())
- addUInt(&Buffer, dwarf::DW_AT_APPLE_block, dwarf::DW_FORM_flag, 1);
+ addFlag(&Buffer, dwarf::DW_AT_APPLE_block);
DICompositeType ContainingType = CTy.getContainingType();
if (DIDescriptor(ContainingType).isCompositeType())
@@ -895,8 +904,7 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
}
if (CTy.isObjcClassComplete())
- addUInt(&Buffer, dwarf::DW_AT_APPLE_objc_complete_type,
- dwarf::DW_FORM_flag, 1);
+ addFlag(&Buffer, dwarf::DW_AT_APPLE_objc_complete_type);
// Add template parameters to a class, structure or union types.
// FIXME: The support isn't in the metadata for this yet.
@@ -929,7 +937,7 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
// If we're a forward decl, say so.
if (CTy.isForwardDecl())
- addUInt(&Buffer, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
+ addFlag(&Buffer, dwarf::DW_AT_declaration);
// Add source line info if available.
if (!CTy.isForwardDecl())
@@ -1028,8 +1036,10 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) {
// AT_specification code in order to work around a bug in older
// gdbs that requires the linkage name to resolve multiple template
// functions.
+ // TODO: Remove this set of code when we get rid of the old gdb
+ // compatibility.
StringRef LinkageName = SP.getLinkageName();
- if (!LinkageName.empty())
+ if (!LinkageName.empty() && DD->useDarwinGDBCompat())
addString(SPDie, dwarf::DW_AT_MIPS_linkage_name,
getRealLinkageName(LinkageName));
@@ -1043,6 +1053,11 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) {
return SPDie;
}
+ // Add the linkage name if we have one.
+ if (!LinkageName.empty() && !DD->useDarwinGDBCompat())
+ addString(SPDie, dwarf::DW_AT_MIPS_linkage_name,
+ getRealLinkageName(LinkageName));
+
// Constructors and operators for anonymous aggregates do not have names.
if (!SP.getName().empty())
addString(SPDie, dwarf::DW_AT_name, SP.getName());
@@ -1055,7 +1070,7 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) {
(Language == dwarf::DW_LANG_C89 ||
Language == dwarf::DW_LANG_C99 ||
Language == dwarf::DW_LANG_ObjC))
- addUInt(SPDie, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1);
+ addFlag(SPDie, dwarf::DW_AT_prototyped);
// Add Return Type.
DICompositeType SPTy = SP.getType();
@@ -1079,7 +1094,7 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) {
}
if (!SP.isDefinition()) {
- addUInt(SPDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
+ addFlag(SPDie, dwarf::DW_AT_declaration);
// Add arguments. Do not add arguments for subprogram definition. They will
// be handled while processing variables.
@@ -1090,22 +1105,22 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) {
if (SPTag == dwarf::DW_TAG_subroutine_type)
for (unsigned i = 1, N = Args.getNumElements(); i < N; ++i) {
DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter);
- DIType ATy = DIType(DIType(Args.getElement(i)));
+ DIType ATy = DIType(Args.getElement(i));
addType(Arg, ATy);
if (ATy.isArtificial())
- addUInt(Arg, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1);
+ addFlag(Arg, dwarf::DW_AT_artificial);
SPDie->addChild(Arg);
}
}
if (SP.isArtificial())
- addUInt(SPDie, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1);
+ addFlag(SPDie, dwarf::DW_AT_artificial);
if (!SP.isLocalToUnit())
- addUInt(SPDie, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1);
+ addFlag(SPDie, dwarf::DW_AT_external);
if (SP.isOptimized())
- addUInt(SPDie, dwarf::DW_AT_APPLE_optimized, dwarf::DW_FORM_flag, 1);
+ addFlag(SPDie, dwarf::DW_AT_APPLE_optimized);
if (unsigned isa = Asm->getISAEncoding()) {
addUInt(SPDie, dwarf::DW_AT_APPLE_isa, dwarf::DW_FORM_flag, isa);
@@ -1168,7 +1183,7 @@ void CompileUnit::createGlobalVariableDIE(const MDNode *N) {
// Add scoping info.
if (!GV.isLocalToUnit())
- addUInt(VariableDIE, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1);
+ addFlag(VariableDIE, dwarf::DW_AT_external);
// Add line number info.
addSourceLine(VariableDIE, GV);
@@ -1193,8 +1208,7 @@ void CompileUnit::createGlobalVariableDIE(const MDNode *N) {
addDIEEntry(VariableSpecDIE, dwarf::DW_AT_specification,
dwarf::DW_FORM_ref4, VariableDIE);
addBlock(VariableSpecDIE, dwarf::DW_AT_location, 0, Block);
- addUInt(VariableDIE, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag,
- 1);
+ addFlag(VariableDIE, dwarf::DW_AT_declaration);
addDie(VariableSpecDIE);
} else {
addBlock(VariableDIE, dwarf::DW_AT_location, 0, Block);
@@ -1213,7 +1227,7 @@ void CompileUnit::createGlobalVariableDIE(const MDNode *N) {
addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
SmallVector<Value*, 3> Idx(CE->op_begin()+1, CE->op_end());
addUInt(Block, 0, dwarf::DW_FORM_udata,
- Asm->getTargetData().getIndexedOffset(Ptr->getType(), Idx));
+ Asm->getDataLayout().getIndexedOffset(Ptr->getType(), Idx));
addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_plus);
addBlock(VariableDIE, dwarf::DW_AT_location, 0, Block);
}
@@ -1260,7 +1274,7 @@ void CompileUnit::constructArrayTypeDIE(DIE &Buffer,
DICompositeType *CTy) {
Buffer.setTag(dwarf::DW_TAG_array_type);
if (CTy->getTag() == dwarf::DW_TAG_vector_type)
- addUInt(&Buffer, dwarf::DW_AT_GNU_vector, dwarf::DW_FORM_flag, 1);
+ addFlag(&Buffer, dwarf::DW_AT_GNU_vector);
// Emit derived type.
addType(&Buffer, CTy->getTypeDerivedFrom());
@@ -1333,8 +1347,7 @@ DIE *CompileUnit::constructVariableDIE(DbgVariable *DV, bool isScopeAbstract) {
}
if (DV->isArtificial())
- addUInt(VariableDie, dwarf::DW_AT_artificial,
- dwarf::DW_FORM_flag, 1);
+ addFlag(VariableDie, dwarf::DW_AT_artificial);
if (isScopeAbstract) {
DV->setDIE(VariableDie);
@@ -1446,7 +1459,7 @@ DIE *CompileUnit::createMemberDIE(DIDerivedType DT) {
Offset -= FieldOffset;
// Maybe we need to work from the other end.
- if (Asm->getTargetData().isLittleEndian())
+ if (Asm->getDataLayout().isLittleEndian())
Offset = FieldSize - (Offset + Size);
addUInt(MemberDie, dwarf::DW_AT_bit_offset, 0, Offset);
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
index b4ff9e8..fad9b6e 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
@@ -176,6 +176,9 @@ public:
}
public:
+ /// addFlag - Add a flag that is true to the DIE.
+ void addFlag(DIE *Die, unsigned Attribute);
+
/// addUInt - Add an unsigned integer attribute data and value.
///
void addUInt(DIE *Die, unsigned Attribute, unsigned Form, uint64_t Integer);
@@ -280,8 +283,8 @@ public:
/// for the given DITemplateTypeParameter.
DIE *getOrCreateTemplateTypeParameterDIE(DITemplateTypeParameter TP);
- /// getOrCreateTemplateValueParameterDIE - Find existing DIE or create new DIE
- /// for the given DITemplateValueParameter.
+ /// getOrCreateTemplateValueParameterDIE - Find existing DIE or create
+ /// new DIE for the given DITemplateValueParameter.
DIE *getOrCreateTemplateValueParameterDIE(DITemplateValueParameter TVP);
/// createDIEEntry - Creates a new DIEEntry to be a proxy for a debug
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 649684a..367b523 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -27,7 +27,7 @@
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
@@ -54,9 +54,29 @@ static cl::opt<bool> UnknownLocations("use-unknown-locations", cl::Hidden,
cl::desc("Make an absence of debug location information explicit."),
cl::init(false));
-static cl::opt<bool> DwarfAccelTables("dwarf-accel-tables", cl::Hidden,
+namespace {
+ enum DefaultOnOff {
+ Default, Enable, Disable
+ };
+}
+
+static cl::opt<DefaultOnOff> DwarfAccelTables("dwarf-accel-tables", cl::Hidden,
cl::desc("Output prototype dwarf accelerator tables."),
- cl::init(false));
+ cl::values(
+ clEnumVal(Default, "Default for platform"),
+ clEnumVal(Enable, "Enabled"),
+ clEnumVal(Disable, "Disabled"),
+ clEnumValEnd),
+ cl::init(Default));
+
+static cl::opt<DefaultOnOff> DarwinGDBCompat("darwin-gdb-compat", cl::Hidden,
+ cl::desc("Compatibility with Darwin gdb."),
+ cl::values(
+ clEnumVal(Default, "Default for platform"),
+ clEnumVal(Enable, "Enabled"),
+ clEnumVal(Disable, "Disabled"),
+ clEnumValEnd),
+ cl::init(Default));
namespace {
const char *DWARFGroupName = "DWARF Emission";
@@ -135,10 +155,25 @@ DwarfDebug::DwarfDebug(AsmPrinter *A, Module *M)
DwarfDebugRangeSectionSym = DwarfDebugLocSectionSym = 0;
FunctionBeginSym = FunctionEndSym = 0;
- // Turn on accelerator tables for Darwin.
- if (Triple(M->getTargetTriple()).isOSDarwin())
- DwarfAccelTables = true;
-
+ // Turn on accelerator tables and older gdb compatibility
+ // for Darwin.
+ bool isDarwin = Triple(M->getTargetTriple()).isOSDarwin();
+ if (DarwinGDBCompat == Default) {
+ if (isDarwin)
+ isDarwinGDBCompat = true;
+ else
+ isDarwinGDBCompat = false;
+ } else
+ isDarwinGDBCompat = DarwinGDBCompat == Enable ? true : false;
+
+ if (DwarfAccelTables == Default) {
+ if (isDarwin)
+ hasDwarfAccelTables = true;
+ else
+ hasDwarfAccelTables = false;
+ } else
+ hasDwarfAccelTables = DwarfAccelTables == Enable ? true : false;
+
{
NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
beginModule(M);
@@ -272,44 +307,51 @@ DIE *DwarfDebug::updateSubprogramScopeDIE(CompileUnit *SPCU,
assert(SPDie && "Unable to find subprogram DIE!");
DISubprogram SP(SPNode);
- DISubprogram SPDecl = SP.getFunctionDeclaration();
- if (!SPDecl.isSubprogram()) {
- // There is not any need to generate specification DIE for a function
- // defined at compile unit level. If a function is defined inside another
- // function then gdb prefers the definition at top level and but does not
- // expect specification DIE in parent function. So avoid creating
- // specification DIE for a function defined inside a function.
- if (SP.isDefinition() && !SP.getContext().isCompileUnit() &&
- !SP.getContext().isFile() &&
- !isSubprogramContext(SP.getContext())) {
- SPCU->addUInt(SPDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
-
- // Add arguments.
- DICompositeType SPTy = SP.getType();
- DIArray Args = SPTy.getTypeArray();
- unsigned SPTag = SPTy.getTag();
- if (SPTag == dwarf::DW_TAG_subroutine_type)
- for (unsigned i = 1, N = Args.getNumElements(); i < N; ++i) {
- DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter);
- DIType ATy = DIType(DIType(Args.getElement(i)));
- SPCU->addType(Arg, ATy);
- if (ATy.isArtificial())
- SPCU->addUInt(Arg, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1);
- SPDie->addChild(Arg);
- }
- DIE *SPDeclDie = SPDie;
- SPDie = new DIE(dwarf::DW_TAG_subprogram);
- SPCU->addDIEEntry(SPDie, dwarf::DW_AT_specification, dwarf::DW_FORM_ref4,
- SPDeclDie);
- SPCU->addDie(SPDie);
- }
- }
- // Pick up abstract subprogram DIE.
+ // If we're updating an abstract DIE, then we will be adding the children and
+ // object pointer later on. But what we don't want to do is process the
+ // concrete DIE twice.
if (DIE *AbsSPDIE = AbstractSPDies.lookup(SPNode)) {
+ // Pick up abstract subprogram DIE.
SPDie = new DIE(dwarf::DW_TAG_subprogram);
SPCU->addDIEEntry(SPDie, dwarf::DW_AT_abstract_origin,
dwarf::DW_FORM_ref4, AbsSPDIE);
SPCU->addDie(SPDie);
+ } else {
+ DISubprogram SPDecl = SP.getFunctionDeclaration();
+ if (!SPDecl.isSubprogram()) {
+ // There is not any need to generate specification DIE for a function
+ // defined at compile unit level. If a function is defined inside another
+ // function then gdb prefers the definition at top level and but does not
+ // expect specification DIE in parent function. So avoid creating
+ // specification DIE for a function defined inside a function.
+ if (SP.isDefinition() && !SP.getContext().isCompileUnit() &&
+ !SP.getContext().isFile() &&
+ !isSubprogramContext(SP.getContext())) {
+ SPCU->addFlag(SPDie, dwarf::DW_AT_declaration);
+
+ // Add arguments.
+ DICompositeType SPTy = SP.getType();
+ DIArray Args = SPTy.getTypeArray();
+ unsigned SPTag = SPTy.getTag();
+ if (SPTag == dwarf::DW_TAG_subroutine_type)
+ for (unsigned i = 1, N = Args.getNumElements(); i < N; ++i) {
+ DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter);
+ DIType ATy = DIType(Args.getElement(i));
+ SPCU->addType(Arg, ATy);
+ if (ATy.isArtificial())
+ SPCU->addFlag(Arg, dwarf::DW_AT_artificial);
+ if (ATy.isObjectPointer())
+ SPCU->addDIEEntry(SPDie, dwarf::DW_AT_object_pointer,
+ dwarf::DW_FORM_ref4, Arg);
+ SPDie->addChild(Arg);
+ }
+ DIE *SPDeclDie = SPDie;
+ SPDie = new DIE(dwarf::DW_TAG_subprogram);
+ SPCU->addDIEEntry(SPDie, dwarf::DW_AT_specification, dwarf::DW_FORM_ref4,
+ SPDeclDie);
+ SPCU->addDie(SPDie);
+ }
+ }
}
SPCU->addLabel(SPDie, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr,
@@ -346,7 +388,7 @@ DIE *DwarfDebug::constructLexicalScopeDIE(CompileUnit *TheCU,
// DW_AT_ranges appropriately.
TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4,
DebugRangeSymbols.size()
- * Asm->getTargetData().getPointerSize());
+ * Asm->getDataLayout().getPointerSize());
for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(),
RE = Ranges.end(); RI != RE; ++RI) {
DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first));
@@ -386,7 +428,7 @@ DIE *DwarfDebug::constructInlinedScopeDIE(CompileUnit *TheCU,
DISubprogram InlinedSP = getDISubprogram(DS);
DIE *OriginDIE = TheCU->getDIE(InlinedSP);
if (!OriginDIE) {
- DEBUG(dbgs() << "Unable to find original DIE for inlined subprogram.");
+ DEBUG(dbgs() << "Unable to find original DIE for an inlined subprogram.");
return NULL;
}
@@ -395,7 +437,7 @@ DIE *DwarfDebug::constructInlinedScopeDIE(CompileUnit *TheCU,
const MCSymbol *EndLabel = getLabelAfterInsn(RI->second);
if (StartLabel == 0 || EndLabel == 0) {
- llvm_unreachable("Unexpected Start and End labels for a inlined scope!");
+ llvm_unreachable("Unexpected Start and End labels for an inlined scope!");
}
assert(StartLabel->isDefined() &&
"Invalid starting label for an inlined scope!");
@@ -412,7 +454,7 @@ DIE *DwarfDebug::constructInlinedScopeDIE(CompileUnit *TheCU,
// DW_AT_ranges appropriately.
TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4,
DebugRangeSymbols.size()
- * Asm->getTargetData().getPointerSize());
+ * Asm->getDataLayout().getPointerSize());
for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(),
RE = Ranges.end(); RI != RE; ++RI) {
DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first));
@@ -461,21 +503,26 @@ DIE *DwarfDebug::constructScopeDIE(CompileUnit *TheCU, LexicalScope *Scope) {
return NULL;
SmallVector<DIE *, 8> Children;
+ DIE *ObjectPointer = NULL;
// Collect arguments for current function.
if (LScopes.isCurrentFunctionScope(Scope))
for (unsigned i = 0, N = CurrentFnArguments.size(); i < N; ++i)
if (DbgVariable *ArgDV = CurrentFnArguments[i])
if (DIE *Arg =
- TheCU->constructVariableDIE(ArgDV, Scope->isAbstractScope()))
+ TheCU->constructVariableDIE(ArgDV, Scope->isAbstractScope())) {
Children.push_back(Arg);
+ if (ArgDV->isObjectPointer()) ObjectPointer = Arg;
+ }
// Collect lexical scope children first.
const SmallVector<DbgVariable *, 8> &Variables = ScopeVariables.lookup(Scope);
for (unsigned i = 0, N = Variables.size(); i < N; ++i)
if (DIE *Variable =
- TheCU->constructVariableDIE(Variables[i], Scope->isAbstractScope()))
+ TheCU->constructVariableDIE(Variables[i], Scope->isAbstractScope())) {
Children.push_back(Variable);
+ if (Variables[i]->isObjectPointer()) ObjectPointer = Variable;
+ }
const SmallVector<LexicalScope *, 4> &Scopes = Scope->getChildren();
for (unsigned j = 0, M = Scopes.size(); j < M; ++j)
if (DIE *Nested = constructScopeDIE(TheCU, Scopes[j]))
@@ -509,6 +556,10 @@ DIE *DwarfDebug::constructScopeDIE(CompileUnit *TheCU, LexicalScope *Scope) {
E = Children.end(); I != E; ++I)
ScopeDIE->addChild(*I);
+ if (DS.isSubprogram() && ObjectPointer != NULL)
+ TheCU->addDIEEntry(ScopeDIE, dwarf::DW_AT_object_pointer,
+ dwarf::DW_FORM_ref4, ObjectPointer);
+
if (DS.isSubprogram())
TheCU->addPubTypes(DISubprogram(DS));
@@ -556,7 +607,8 @@ CompileUnit *DwarfDebug::constructCompileUnit(const MDNode *N) {
unsigned ID = GetOrCreateSourceID(FN, CompilationDir);
DIE *Die = new DIE(dwarf::DW_TAG_compile_unit);
- CompileUnit *NewCU = new CompileUnit(ID, DIUnit.getLanguage(), Die, Asm, this);
+ CompileUnit *NewCU = new CompileUnit(ID, DIUnit.getLanguage(), Die,
+ Asm, this);
NewCU->addString(Die, dwarf::DW_AT_producer, DIUnit.getProducer());
NewCU->addUInt(Die, dwarf::DW_AT_language, dwarf::DW_FORM_data2,
DIUnit.getLanguage());
@@ -575,7 +627,7 @@ CompileUnit *DwarfDebug::constructCompileUnit(const MDNode *N) {
if (!CompilationDir.empty())
NewCU->addString(Die, dwarf::DW_AT_comp_dir, CompilationDir);
if (DIUnit.isOptimized())
- NewCU->addUInt(Die, dwarf::DW_AT_APPLE_optimized, dwarf::DW_FORM_flag, 1);
+ NewCU->addFlag(Die, dwarf::DW_AT_APPLE_optimized);
StringRef Flags = DIUnit.getFlags();
if (!Flags.empty())
@@ -755,7 +807,7 @@ void DwarfDebug::endModule() {
LexicalScope *Scope =
new LexicalScope(NULL, DIDescriptor(SP), NULL, false);
DeadFnScopeMap[SP] = Scope;
-
+
// Construct subprogram DIE and add variables DIEs.
CompileUnit *SPCU = CUMap.lookup(TheCU);
assert(SPCU && "Unable to find Compile Unit!");
@@ -802,9 +854,9 @@ void DwarfDebug::endModule() {
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("data_end"));
// End text sections.
- for (unsigned i = 1, N = SectionMap.size(); i <= N; ++i) {
- Asm->OutStreamer.SwitchSection(SectionMap[i]);
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("section_end", i));
+ for (unsigned I = 0, E = SectionMap.size(); I != E; ++I) {
+ Asm->OutStreamer.SwitchSection(SectionMap[I]);
+ Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("section_end", I+1));
}
// Compute DIE offsets and sizes.
@@ -816,8 +868,8 @@ void DwarfDebug::endModule() {
// Corresponding abbreviations into a abbrev section.
emitAbbreviations();
- // Emit info into a dwarf accelerator table sections.
- if (DwarfAccelTables) {
+ // Emit info into the dwarf accelerator table sections.
+ if (useDwarfAccelTables()) {
emitAccelNames();
emitAccelObjC();
emitAccelNamespaces();
@@ -825,7 +877,10 @@ void DwarfDebug::endModule() {
}
// Emit info into a debug pubtypes section.
- emitDebugPubTypes();
+ // TODO: When we don't need the option anymore we can
+ // remove all of the code that adds to the table.
+ if (useDarwinGDBCompat())
+ emitDebugPubTypes();
// Emit info into a debug loc section.
emitDebugLoc();
@@ -840,7 +895,11 @@ void DwarfDebug::endModule() {
emitDebugMacInfo();
// Emit inline info.
- emitDebugInlineInfo();
+ // TODO: When we don't need the option anymore we
+ // can remove all of the code that this section
+ // depends upon.
+ if (useDarwinGDBCompat())
+ emitDebugInlineInfo();
// Emit info into a debug str section.
emitDebugStr();
@@ -1014,7 +1073,7 @@ DwarfDebug::collectVariableInfo(const MachineFunction *MF,
if (AbsVar)
AbsVar->setMInsn(MInsn);
- // Simple ranges that are fully coalesced.
+ // Simplify ranges that are fully coalesced.
if (History.size() <= 1 || (History.size() == 2 &&
MInsn->isIdenticalTo(History.back()))) {
RegVar->setMInsn(MInsn);
@@ -1267,7 +1326,7 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
// Coalesce identical entries at the end of History.
if (History.size() >= 2 &&
Prev->isIdenticalTo(History[History.size() - 2])) {
- DEBUG(dbgs() << "Coalesce identical DBG_VALUE entries:\n"
+ DEBUG(dbgs() << "Coalescing identical DBG_VALUE entries:\n"
<< "\t" << *Prev
<< "\t" << *History[History.size() - 2] << "\n");
History.pop_back();
@@ -1283,7 +1342,7 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
PrevMBB->getLastNonDebugInstr();
if (LastMI == PrevMBB->end()) {
// Drop DBG_VALUE for empty range.
- DEBUG(dbgs() << "Drop DBG_VALUE for empty range:\n"
+ DEBUG(dbgs() << "Dropping DBG_VALUE for empty range:\n"
<< "\t" << *Prev << "\n");
History.pop_back();
}
@@ -1300,9 +1359,10 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
if (!MI->isLabel())
AtBlockEntry = false;
- // First known non DBG_VALUE location marks beginning of function
- // body.
- if (PrologEndLoc.isUnknown() && !MI->getDebugLoc().isUnknown())
+ // First known non-DBG_VALUE and non-frame setup location marks
+ // the beginning of the function body.
+ if (!MI->getFlag(MachineInstr::FrameSetup) &&
+ (PrologEndLoc.isUnknown() && !MI->getDebugLoc().isUnknown()))
PrologEndLoc = MI->getDebugLoc();
// Check if the instruction clobbers any registers with debug vars.
@@ -1382,7 +1442,7 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
MF->getFunction()->getContext());
recordSourceLine(FnStartDL.getLine(), FnStartDL.getCol(),
FnStartDL.getScope(MF->getFunction()->getContext()),
- DWARF2_LINE_DEFAULT_IS_STMT ? DWARF2_FLAG_IS_STMT : 0);
+ 0);
}
}
@@ -1439,8 +1499,7 @@ void DwarfDebug::endFunction(const MachineFunction *MF) {
DIE *CurFnDIE = constructScopeDIE(TheCU, FnScope);
if (!MF->getTarget().Options.DisableFramePointerElim(*MF))
- TheCU->addUInt(CurFnDIE, dwarf::DW_AT_APPLE_omit_frame_ptr,
- dwarf::DW_FORM_flag, 1);
+ TheCU->addFlag(CurFnDIE, dwarf::DW_AT_APPLE_omit_frame_ptr);
DebugFrames.push_back(FunctionDebugFrameInfo(Asm->getFunctionNumber(),
MMI->getFrameMoves()));
@@ -1710,7 +1769,7 @@ void DwarfDebug::emitDebugInfo() {
Asm->EmitSectionOffset(Asm->GetTempSymbol("abbrev_begin"),
DwarfAbbrevSectionSym);
Asm->OutStreamer.AddComment("Address Size (in bytes)");
- Asm->EmitInt8(Asm->getTargetData().getPointerSize());
+ Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
emitDIE(Die);
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("info_end", TheCU->getID()));
@@ -1756,14 +1815,14 @@ void DwarfDebug::emitEndOfLineMatrix(unsigned SectionEnd) {
Asm->EmitInt8(0);
Asm->OutStreamer.AddComment("Op size");
- Asm->EmitInt8(Asm->getTargetData().getPointerSize() + 1);
+ Asm->EmitInt8(Asm->getDataLayout().getPointerSize() + 1);
Asm->OutStreamer.AddComment("DW_LNE_set_address");
Asm->EmitInt8(dwarf::DW_LNE_set_address);
Asm->OutStreamer.AddComment("Section end label");
Asm->OutStreamer.EmitSymbolValue(Asm->GetTempSymbol("section_end",SectionEnd),
- Asm->getTargetData().getPointerSize(),
+ Asm->getDataLayout().getPointerSize(),
0/*AddrSpace*/);
// Mark end of matrix.
@@ -1992,7 +2051,7 @@ void DwarfDebug::emitDebugLoc() {
// Start the dwarf loc section.
Asm->OutStreamer.SwitchSection(
Asm->getObjFileLowering().getDwarfLocSection());
- unsigned char Size = Asm->getTargetData().getPointerSize();
+ unsigned char Size = Asm->getDataLayout().getPointerSize();
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("debug_loc", 0));
unsigned index = 1;
for (SmallVector<DotDebugLocEntry, 4>::iterator
@@ -2089,7 +2148,7 @@ void DwarfDebug::emitDebugRanges() {
// Start the dwarf ranges section.
Asm->OutStreamer.SwitchSection(
Asm->getObjFileLowering().getDwarfRangesSection());
- unsigned char Size = Asm->getTargetData().getPointerSize();
+ unsigned char Size = Asm->getDataLayout().getPointerSize();
for (SmallVector<const MCSymbol *, 8>::iterator
I = DebugRangeSymbols.begin(), E = DebugRangeSymbols.end();
I != E; ++I) {
@@ -2147,7 +2206,7 @@ void DwarfDebug::emitDebugInlineInfo() {
Asm->OutStreamer.AddComment("Dwarf Version");
Asm->EmitInt16(dwarf::DWARF_VERSION);
Asm->OutStreamer.AddComment("Address Size (in bytes)");
- Asm->EmitInt8(Asm->getTargetData().getPointerSize());
+ Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
for (SmallVector<const MDNode *, 4>::iterator I = InlinedSPNodes.begin(),
E = InlinedSPNodes.end(); I != E; ++I) {
@@ -2178,7 +2237,7 @@ void DwarfDebug::emitDebugInlineInfo() {
if (Asm->isVerbose()) Asm->OutStreamer.AddComment("low_pc");
Asm->OutStreamer.EmitSymbolValue(LI->first,
- Asm->getTargetData().getPointerSize(),0);
+ Asm->getDataLayout().getPointerSize(),0);
}
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
index d1d6512..61d9a51 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
@@ -21,9 +21,9 @@
#include "llvm/MC/MachineLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/UniqueVector.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/DebugLoc.h"
@@ -96,7 +96,8 @@ typedef struct DotDebugLocEntry {
DotDebugLocEntry(const MCSymbol *B, const MCSymbol *E, const ConstantFP *FPtr)
: Begin(B), End(E), Variable(0), Merged(false),
Constant(true) { Constants.CFP = FPtr; EntryKind = E_ConstantFP; }
- DotDebugLocEntry(const MCSymbol *B, const MCSymbol *E, const ConstantInt *IPtr)
+ DotDebugLocEntry(const MCSymbol *B, const MCSymbol *E,
+ const ConstantInt *IPtr)
: Begin(B), End(E), Variable(0), Merged(false),
Constant(true) { Constants.CIP = IPtr; EntryKind = E_ConstantInt; }
@@ -158,11 +159,19 @@ public:
bool isArtificial() const {
if (Var.isArtificial())
return true;
- if (Var.getTag() == dwarf::DW_TAG_arg_variable
- && getType().isArtificial())
+ if (getType().isArtificial())
return true;
return false;
}
+
+ bool isObjectPointer() const {
+ if (Var.isObjectPointer())
+ return true;
+ if (getType().isObjectPointer())
+ return true;
+ return false;
+ }
+
bool variableHasComplexAddress() const {
assert(Var.Verify() && "Invalid complex DbgVariable!");
return Var.hasComplexAddress();
@@ -222,7 +231,7 @@ class DwarfDebug {
/// SectionMap - Provides a unique id per text section.
///
- UniqueVector<const MCSection*> SectionMap;
+ SetVector<const MCSection*> SectionMap;
/// CurrentFnArguments - List of Arguments (DbgValues) for current function.
SmallVector<DbgVariable *, 8> CurrentFnArguments;
@@ -307,6 +316,9 @@ class DwarfDebug {
// table for the same directory as DW_at_comp_dir.
StringRef CompilationDir;
+ // A holder for the DarwinGDBCompat flag so that the compile unit can use it.
+ bool isDarwinGDBCompat;
+ bool hasDwarfAccelTables;
private:
/// assignAbbrevNumber - Define a unique number for the abbreviation.
@@ -520,6 +532,11 @@ public:
/// getStringPoolEntry - returns an entry into the string pool with the given
/// string text.
MCSymbol *getStringPoolEntry(StringRef Str);
+
+ /// useDarwinGDBCompat - returns whether or not to limit some of our debug
+ /// output to the limitations of darwin gdb.
+ bool useDarwinGDBCompat() { return isDarwinGDBCompat; }
+ bool useDwarfAccelTables() { return hasDwarfAccelTables; }
};
} // End of namespace llvm
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp
index 70cc2e5..08fb6b3 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp
@@ -24,7 +24,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
@@ -417,7 +417,7 @@ void DwarfException::EmitExceptionTable() {
// that we're omitting that bit.
TTypeEncoding = dwarf::DW_EH_PE_omit;
// dwarf::DW_EH_PE_absptr
- TypeFormatSize = Asm->getTargetData().getPointerSize();
+ TypeFormatSize = Asm->getDataLayout().getPointerSize();
} else {
// Okay, we have actual filters or typeinfos to emit. As such, we need to
// pick a type encoding for them. We're about to emit a list of pointers to
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
index 75f6056..fe9e493 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
@@ -43,26 +43,6 @@ protected:
/// MMI - Collected machine module information.
MachineModuleInfo *MMI;
- /// EmitExceptionTable - Emit landing pads and actions.
- ///
- /// The general organization of the table is complex, but the basic concepts
- /// are easy. First there is a header which describes the location and
- /// organization of the three components that follow.
- /// 1. The landing pad site information describes the range of code covered
- /// by the try. In our case it's an accumulation of the ranges covered
- /// by the invokes in the try. There is also a reference to the landing
- /// pad that handles the exception once processed. Finally an index into
- /// the actions table.
- /// 2. The action table, in our case, is composed of pairs of type ids
- /// and next action offset. Starting with the action index from the
- /// landing pad site, each type Id is checked for a match to the current
- /// exception. If it matches then the exception and type id are passed
- /// on to the landing pad. Otherwise the next action is looked up. This
- /// chain is terminated with a next action of zero. If no type id is
- /// found the frame is unwound and handling continues.
- /// 3. Type id table contains references to all the C++ typeinfo for all
- /// catches in the function. This tables is reversed indexed base 1.
-
/// SharedTypeIds - How many leading type ids two landing pads have in common.
static unsigned SharedTypeIds(const LandingPadInfo *L,
const LandingPadInfo *R);
@@ -119,6 +99,26 @@ protected:
const RangeMapType &PadMap,
const SmallVectorImpl<const LandingPadInfo *> &LPs,
const SmallVectorImpl<unsigned> &FirstActions);
+
+ /// EmitExceptionTable - Emit landing pads and actions.
+ ///
+ /// The general organization of the table is complex, but the basic concepts
+ /// are easy. First there is a header which describes the location and
+ /// organization of the three components that follow.
+ /// 1. The landing pad site information describes the range of code covered
+ /// by the try. In our case it's an accumulation of the ranges covered
+ /// by the invokes in the try. There is also a reference to the landing
+ /// pad that handles the exception once processed. Finally an index into
+ /// the actions table.
+ /// 2. The action table, in our case, is composed of pairs of type ids
+ /// and next action offset. Starting with the action index from the
+ /// landing pad site, each type Id is checked for a match to the current
+ /// exception. If it matches then the exception and type id are passed
+ /// on to the landing pad. Otherwise the next action is looked up. This
+ /// chain is terminated with a next action of zero. If no type id is
+ /// found the frame is unwound and handling continues.
+ /// 3. Type id table contains references to all the C++ typeinfo for all
+ /// catches in the function. This tables is reversed indexed base 1.
void EmitExceptionTable();
public:
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
index 1153817..f7c0119 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
@@ -20,7 +20,7 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/ADT/SmallString.h"
@@ -91,7 +91,7 @@ void OcamlGCMetadataPrinter::beginAssembly(AsmPrinter &AP) {
/// either condition is detected in a function which uses the GC.
///
void OcamlGCMetadataPrinter::finishAssembly(AsmPrinter &AP) {
- unsigned IntPtrSize = AP.TM.getTargetData()->getPointerSize();
+ unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize();
AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getTextSection());
EmitCamlGlobal(getModule(), AP, "code_end");
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/Win64Exception.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/Win64Exception.cpp
index b83aa5a..70742a8 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/Win64Exception.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/Win64Exception.cpp
@@ -24,7 +24,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
diff --git a/contrib/llvm/lib/CodeGen/BranchFolding.cpp b/contrib/llvm/lib/CodeGen/BranchFolding.cpp
index fb65bb7..6f4c5a2 100644
--- a/contrib/llvm/lib/CodeGen/BranchFolding.cpp
+++ b/contrib/llvm/lib/CodeGen/BranchFolding.cpp
@@ -357,9 +357,8 @@ static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1,
if (I1 == MBB1->begin() && I2 != MBB2->begin()) {
--I2;
while (I2->isDebugValue()) {
- if (I2 == MBB2->begin()) {
+ if (I2 == MBB2->begin())
return TailLen;
- }
--I2;
}
++I2;
@@ -482,21 +481,19 @@ bool
BranchFolder::MergePotentialsElt::operator<(const MergePotentialsElt &o) const {
if (getHash() < o.getHash())
return true;
- else if (getHash() > o.getHash())
+ if (getHash() > o.getHash())
return false;
- else if (getBlock()->getNumber() < o.getBlock()->getNumber())
+ if (getBlock()->getNumber() < o.getBlock()->getNumber())
return true;
- else if (getBlock()->getNumber() > o.getBlock()->getNumber())
+ if (getBlock()->getNumber() > o.getBlock()->getNumber())
return false;
- else {
- // _GLIBCXX_DEBUG checks strict weak ordering, which involves comparing
- // an object with itself.
+ // _GLIBCXX_DEBUG checks strict weak ordering, which involves comparing
+ // an object with itself.
#ifndef _GLIBCXX_DEBUG
- llvm_unreachable("Predecessor appears twice");
+ llvm_unreachable("Predecessor appears twice");
#else
- return false;
+ return false;
#endif
- }
}
/// CountTerminators - Count the number of terminators in the given
@@ -574,7 +571,8 @@ static bool ProfitableToMerge(MachineBasicBlock *MBB1,
// instructions that would be deleted in the merge.
MachineFunction *MF = MBB1->getParent();
if (EffectiveTailLen >= 2 &&
- MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize) &&
+ MF->getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize) &&
(I1 == MBB1->begin() || I2 == MBB2->begin()))
return true;
@@ -1554,8 +1552,7 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
Uses.insert(*AI);
} else {
- if (Uses.count(Reg)) {
- Uses.erase(Reg);
+ if (Uses.erase(Reg)) {
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
Uses.erase(*SubRegs); // Use sub-registers to be conservative
}
diff --git a/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp b/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp
index 939af3f..dee339a 100644
--- a/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp
+++ b/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp
@@ -9,7 +9,6 @@
#define DEBUG_TYPE "calcspillweights"
-#include "llvm/Function.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
@@ -42,8 +41,7 @@ void CalculateSpillWeights::getAnalysisUsage(AnalysisUsage &au) const {
bool CalculateSpillWeights::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
- << "********** Function: "
- << MF.getFunction()->getName() << '\n');
+ << "********** Function: " << MF.getName() << '\n');
LiveIntervals &LIS = getAnalysis<LiveIntervals>();
MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -166,7 +164,7 @@ void VirtRegAuxInfo::CalculateWeightAndHint(LiveInterval &li) {
continue;
float hweight = Hint[hint] += weight;
if (TargetRegisterInfo::isPhysicalRegister(hint)) {
- if (hweight > bestPhys && LIS.isAllocatable(hint))
+ if (hweight > bestPhys && mri.isAllocatable(hint))
bestPhys = hweight, hintPhys = hint;
} else {
if (hweight > bestVirt)
diff --git a/contrib/llvm/lib/CodeGen/CallingConvLower.cpp b/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
index 0b747fd..22b9140 100644
--- a/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
+++ b/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
@@ -18,7 +18,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetLowering.h"
using namespace llvm;
@@ -50,7 +50,7 @@ void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
if (MinAlign > (int)Align)
Align = MinAlign;
MF.getFrameInfo()->ensureMaxAlignment(Align);
- TM.getTargetLowering()->HandleByVal(this, Size);
+ TM.getTargetLowering()->HandleByVal(this, Size, Align);
unsigned Offset = AllocateStack(Size, Align);
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
}
diff --git a/contrib/llvm/lib/CodeGen/CodeGen.cpp b/contrib/llvm/lib/CodeGen/CodeGen.cpp
index fb2c2e8..a53f6f8 100644
--- a/contrib/llvm/lib/CodeGen/CodeGen.cpp
+++ b/contrib/llvm/lib/CodeGen/CodeGen.cpp
@@ -41,6 +41,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeMachineCopyPropagationPass(Registry);
initializeMachineCSEPass(Registry);
initializeMachineDominatorTreePass(Registry);
+ initializeMachinePostDominatorTreePass(Registry);
initializeMachineLICMPass(Registry);
initializeMachineLoopInfoPass(Registry);
initializeMachineModuleInfoPass(Registry);
@@ -56,6 +57,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeRegisterCoalescerPass(Registry);
initializeSlotIndexesPass(Registry);
initializeStackProtectorPass(Registry);
+ initializeStackColoringPass(Registry);
initializeStackSlotColoringPass(Registry);
initializeStrongPHIEliminationPass(Registry);
initializeTailDuplicatePassPass(Registry);
diff --git a/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp b/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp
index 99233df..d8e06c3 100644
--- a/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp
+++ b/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp
@@ -373,7 +373,7 @@ bool CodePlacementOpt::OptimizeIntraLoopEdges(MachineFunction &MF) {
///
bool CodePlacementOpt::AlignLoops(MachineFunction &MF) {
const Function *F = MF.getFunction();
- if (F->hasFnAttr(Attribute::OptimizeForSize))
+ if (F->getFnAttributes().hasAttribute(Attributes::OptimizeForSize))
return false;
unsigned Align = TLI->getPrefLoopAlignment();
diff --git a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
index a9de1c749..377b471 100644
--- a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
+++ b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
@@ -527,7 +527,7 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
if (Edge->getKind() == SDep::Anti) {
AntiDepReg = Edge->getReg();
assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
- if (!RegClassInfo.isAllocatable(AntiDepReg))
+ if (!MRI.isAllocatable(AntiDepReg))
// Don't break anti-dependencies on non-allocatable registers.
AntiDepReg = 0;
else if (KeepRegs.test(AntiDepReg))
diff --git a/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp b/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
index b4394e8..8964269 100644
--- a/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
+++ b/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
@@ -33,7 +33,6 @@ namespace {
const MachineRegisterInfo *MRI;
const TargetInstrInfo *TII;
BitVector LivePhysRegs;
- BitVector ReservedRegs;
public:
static char ID; // Pass identification, replacement for typeid
@@ -70,7 +69,7 @@ bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const {
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
// Don't delete live physreg defs, or any reserved register defs.
- if (LivePhysRegs.test(Reg) || ReservedRegs.test(Reg))
+ if (LivePhysRegs.test(Reg) || MRI->isReserved(Reg))
return false;
} else {
if (!MRI->use_nodbg_empty(Reg))
@@ -90,9 +89,6 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
TRI = MF.getTarget().getRegisterInfo();
TII = MF.getTarget().getInstrInfo();
- // Treat reserved registers as always live.
- ReservedRegs = TRI->getReservedRegs(MF);
-
// Loop over all instructions in all blocks, from bottom to top, so that it's
// more likely that chains of dependent but ultimately dead instructions will
// be cleaned up.
@@ -101,7 +97,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
MachineBasicBlock *MBB = &*I;
// Start out assuming that reserved registers are live out of this block.
- LivePhysRegs = ReservedRegs;
+ LivePhysRegs = MRI->getReservedRegs();
// Also add any explicit live-out physregs for this block.
if (!MBB->empty() && MBB->back().isReturn())
diff --git a/contrib/llvm/lib/CodeGen/EarlyIfConversion.cpp b/contrib/llvm/lib/CodeGen/EarlyIfConversion.cpp
index f9347ef..d5d8404 100644
--- a/contrib/llvm/lib/CodeGen/EarlyIfConversion.cpp
+++ b/contrib/llvm/lib/CodeGen/EarlyIfConversion.cpp
@@ -18,7 +18,6 @@
#define DEBUG_TYPE "early-ifcvt"
#include "MachineTraceMetrics.h"
-#include "llvm/Function.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SetVector.h"
@@ -32,9 +31,9 @@
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -775,11 +774,11 @@ bool EarlyIfConverter::tryConvertIf(MachineBasicBlock *MBB) {
bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** EARLY IF-CONVERSION **********\n"
- << "********** Function: "
- << ((Value*)MF.getFunction())->getName() << '\n');
+ << "********** Function: " << MF.getName() << '\n');
TII = MF.getTarget().getInstrInfo();
TRI = MF.getTarget().getRegisterInfo();
- SchedModel = MF.getTarget().getInstrItineraryData()->SchedModel;
+ SchedModel =
+ MF.getTarget().getSubtarget<TargetSubtargetInfo>().getSchedModel();
MRI = &MF.getRegInfo();
DomTree = &getAnalysis<MachineDominatorTree>();
Loops = getAnalysisIfAvailable<MachineLoopInfo>();
@@ -798,6 +797,5 @@ bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) {
if (tryConvertIf(I->getBlock()))
Changed = true;
- MF.verify(this, "After early if-conversion");
return Changed;
}
diff --git a/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp b/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp
index fee8e47..ed78f19 100644
--- a/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp
+++ b/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp
@@ -626,9 +626,12 @@ void ExeDepsFix::visitSoftInstr(MachineInstr *mi, unsigned mask) {
}
dv->Instrs.push_back(mi);
- // Finally set all defs and non-collapsed uses to dv.
- for (unsigned i = 0, e = mi->getDesc().getNumOperands(); i != e; ++i) {
- MachineOperand &mo = mi->getOperand(i);
+ // Finally set all defs and non-collapsed uses to dv. We must iterate through
+ // all the operators, including imp-def ones.
+ for (MachineInstr::mop_iterator ii = mi->operands_begin(),
+ ee = mi->operands_end();
+ ii != ee; ++ii) {
+ MachineOperand &mo = *ii;
if (!mo.isReg()) continue;
int rx = regIndex(mo.getReg());
if (rx < 0) continue;
@@ -654,7 +657,7 @@ bool ExeDepsFix::runOnMachineFunction(MachineFunction &mf) {
bool anyregs = false;
for (TargetRegisterClass::const_iterator I = RC->begin(), E = RC->end();
I != E; ++I)
- if (MF->getRegInfo().isPhysRegOrOverlapUsed(*I)) {
+ if (MF->getRegInfo().isPhysRegUsed(*I)) {
anyregs = true;
break;
}
diff --git a/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp b/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp
index 7a17331..ffe4b63 100644
--- a/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp
+++ b/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp
@@ -14,7 +14,6 @@
#define DEBUG_TYPE "postrapseudos"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/Function.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -190,8 +189,7 @@ bool ExpandPostRA::LowerCopy(MachineInstr *MI) {
bool ExpandPostRA::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "Machine Function\n"
<< "********** EXPANDING POST-RA PSEUDO INSTRS **********\n"
- << "********** Function: "
- << MF.getFunction()->getName() << '\n');
+ << "********** Function: " << MF.getName() << '\n');
TRI = MF.getTarget().getRegisterInfo();
TII = MF.getTarget().getInstrInfo();
diff --git a/contrib/llvm/lib/CodeGen/GCStrategy.cpp b/contrib/llvm/lib/CodeGen/GCStrategy.cpp
index 506b5cf..f4755bb 100644
--- a/contrib/llvm/lib/CodeGen/GCStrategy.cpp
+++ b/contrib/llvm/lib/CodeGen/GCStrategy.cpp
@@ -20,6 +20,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Module.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/DominatorInternals.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -387,9 +388,16 @@ void GCMachineCodeAnalysis::FindStackOffsets(MachineFunction &MF) {
const TargetFrameLowering *TFI = TM->getFrameLowering();
assert(TFI && "TargetRegisterInfo not available!");
- for (GCFunctionInfo::roots_iterator RI = FI->roots_begin(),
- RE = FI->roots_end(); RI != RE; ++RI)
- RI->StackOffset = TFI->getFrameIndexOffset(MF, RI->Num);
+ for (GCFunctionInfo::roots_iterator RI = FI->roots_begin();
+ RI != FI->roots_end();) {
+ // If the root references a dead object, no need to keep it.
+ if (MF.getFrameInfo()->isDeadObjectIndex(RI->Num)) {
+ RI = FI->removeStackRoot(RI);
+ } else {
+ RI->StackOffset = TFI->getFrameIndexOffset(MF, RI->Num);
+ ++RI;
+ }
+ }
}
bool GCMachineCodeAnalysis::runOnMachineFunction(MachineFunction &MF) {
diff --git a/contrib/llvm/lib/CodeGen/IfConversion.cpp b/contrib/llvm/lib/CodeGen/IfConversion.cpp
index 4214ba1..31e36f0 100644
--- a/contrib/llvm/lib/CodeGen/IfConversion.cpp
+++ b/contrib/llvm/lib/CodeGen/IfConversion.cpp
@@ -13,7 +13,6 @@
#define DEBUG_TYPE "ifcvt"
#include "BranchFolding.h"
-#include "llvm/Function.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
@@ -282,7 +281,7 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
}
DEBUG(dbgs() << "\nIfcvt: function (" << ++FnNum << ") \'"
- << MF.getFunction()->getName() << "\'");
+ << MF.getName() << "\'");
if (FnNum < IfCvtFnStart || (IfCvtFnStop != -1 && FnNum > IfCvtFnStop)) {
DEBUG(dbgs() << " skipped\n");
@@ -997,14 +996,13 @@ static void UpdatePredRedefs(MachineInstr *MI, SmallSet<unsigned,4> &Redefs,
}
for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
unsigned Reg = Defs[i];
- if (Redefs.count(Reg)) {
+ if (!Redefs.insert(Reg)) {
if (AddImpUse)
// Treat predicated update as read + write.
MI->addOperand(MachineOperand::CreateReg(Reg, false/*IsDef*/,
true/*IsImp*/,false/*IsKill*/,
false/*IsDead*/,true/*IsUndef*/));
} else {
- Redefs.insert(Reg);
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
Redefs.insert(*SubRegs);
}
diff --git a/contrib/llvm/lib/CodeGen/InlineSpiller.cpp b/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
index 07e37af..37828a7 100644
--- a/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
+++ b/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
@@ -613,7 +613,7 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI,
propagateSiblingValue(SVI);
} while (!WorkList.empty());
- // Look up the value we were looking for. We already did this lokup at the
+ // Look up the value we were looking for. We already did this lookup at the
// top of the function, but SibValues may have been invalidated.
SVI = SibValues.find(UseVNI);
assert(SVI != SibValues.end() && "Didn't compute requested info");
@@ -863,7 +863,7 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg,
// If the instruction also writes VirtReg.reg, it had better not require the
// same register for uses and defs.
SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
- MIBundleOperands::RegInfo RI =
+ MIBundleOperands::VirtRegInfo RI =
MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
if (RI.Tied) {
markValueUsed(&VirtReg, ParentVNI);
@@ -1142,7 +1142,7 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
// Analyze instruction.
SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
- MIBundleOperands::RegInfo RI =
+ MIBundleOperands::VirtRegInfo RI =
MIBundleOperands(MI).analyzeVirtReg(Reg, &Ops);
// Find the slot index where this instruction reads and writes OldLI.
diff --git a/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp b/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
index 8d2282a..6120ae56 100644
--- a/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
@@ -21,7 +21,7 @@
#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
template <class ArgIt>
@@ -457,7 +457,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break; // Strip out annotate intrinsic
case Intrinsic::memcpy: {
- IntegerType *IntPtr = TD.getIntPtrType(Context);
+ Type *IntPtr = TD.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
@@ -468,7 +468,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break;
}
case Intrinsic::memmove: {
- IntegerType *IntPtr = TD.getIntPtrType(Context);
+ Type *IntPtr = TD.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
@@ -479,7 +479,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break;
}
case Intrinsic::memset: {
- IntegerType *IntPtr = TD.getIntPtrType(Context);
+ Type *IntPtr = TD.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
diff --git a/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp b/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp
index d631726..defc127 100644
--- a/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp
@@ -687,8 +687,7 @@ bool LDVImpl::runOnMachineFunction(MachineFunction &mf) {
clear();
LS.initialize(mf);
DEBUG(dbgs() << "********** COMPUTING LIVE DEBUG VARIABLES: "
- << ((Value*)mf.getFunction())->getName()
- << " **********\n");
+ << mf.getName() << " **********\n");
bool Changed = collectDebugValues(mf);
computeIntervals();
diff --git a/contrib/llvm/lib/CodeGen/LiveInterval.cpp b/contrib/llvm/lib/CodeGen/LiveInterval.cpp
index 0a795e6..8585cbb 100644
--- a/contrib/llvm/lib/CodeGen/LiveInterval.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveInterval.cpp
@@ -27,6 +27,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "RegisterCoalescer.h"
#include <algorithm>
using namespace llvm;
@@ -58,8 +59,16 @@ VNInfo *LiveInterval::createDeadDef(SlotIndex Def,
return VNI;
}
if (SlotIndex::isSameInstr(Def, I->start)) {
- assert(I->start == Def && "Cannot insert def, already live");
- assert(I->valno->def == Def && "Inconsistent existing value def");
+ assert(I->valno->def == I->start && "Inconsistent existing value def");
+
+ // It is possible to have both normal and early-clobber defs of the same
+ // register on an instruction. It doesn't make a lot of sense, but it is
+ // possible to specify in inline assembly.
+ //
+ // Just convert everything to early-clobber.
+ Def = std::min(Def, I->start);
+ if (Def != I->start)
+ I->start = I->valno->def = Def;
return I->valno;
}
assert(SlotIndex::isEarlierInstr(Def, I->start) && "Already live at def");
@@ -68,21 +77,6 @@ VNInfo *LiveInterval::createDeadDef(SlotIndex Def,
return VNI;
}
-/// killedInRange - Return true if the interval has kills in [Start,End).
-bool LiveInterval::killedInRange(SlotIndex Start, SlotIndex End) const {
- Ranges::const_iterator r =
- std::lower_bound(ranges.begin(), ranges.end(), End);
-
- // Now r points to the first interval with start >= End, or ranges.end().
- if (r == ranges.begin())
- return false;
-
- --r;
- // Now r points to the last interval with end <= End.
- // r->end is the kill point.
- return r->end >= Start && r->end < End;
-}
-
// overlaps - Return true if the intersection of the two live intervals is
// not empty.
//
@@ -142,6 +136,48 @@ bool LiveInterval::overlapsFrom(const LiveInterval& other,
return false;
}
+bool LiveInterval::overlaps(const LiveInterval &Other,
+ const CoalescerPair &CP,
+ const SlotIndexes &Indexes) const {
+ assert(!empty() && "empty interval");
+ if (Other.empty())
+ return false;
+
+ // Use binary searches to find initial positions.
+ const_iterator I = find(Other.beginIndex());
+ const_iterator IE = end();
+ if (I == IE)
+ return false;
+ const_iterator J = Other.find(I->start);
+ const_iterator JE = Other.end();
+ if (J == JE)
+ return false;
+
+ for (;;) {
+ // J has just been advanced to satisfy:
+ assert(J->end >= I->start);
+ // Check for an overlap.
+ if (J->start < I->end) {
+ // I and J are overlapping. Find the later start.
+ SlotIndex Def = std::max(I->start, J->start);
+ // Allow the overlap if Def is a coalescable copy.
+ if (Def.isBlock() ||
+ !CP.isCoalescable(Indexes.getInstructionFromIndex(Def)))
+ return true;
+ }
+ // Advance the iterator that ends first to check for more overlaps.
+ if (J->end > I->end) {
+ std::swap(I, J);
+ std::swap(IE, JE);
+ }
+ // Advance J until J->end >= I->start.
+ do
+ if (++J == JE)
+ return false;
+ while (J->end < I->start);
+ }
+}
+
/// overlaps - Return true if the live interval overlaps a range specified
/// by [Start, End).
bool LiveInterval::overlaps(SlotIndex Start, SlotIndex End) const {
@@ -399,7 +435,7 @@ void LiveInterval::join(LiveInterval &Other,
// If we have to apply a mapping to our base interval assignment, rewrite it
// now.
- if (MustMapCurValNos) {
+ if (MustMapCurValNos && !empty()) {
// Map the first live range.
iterator OutIt = begin();
@@ -673,27 +709,6 @@ VNInfo* LiveInterval::MergeValueNumberInto(VNInfo *V1, VNInfo *V2) {
return V2;
}
-void LiveInterval::Copy(const LiveInterval &RHS,
- MachineRegisterInfo *MRI,
- VNInfo::Allocator &VNInfoAllocator) {
- ranges.clear();
- valnos.clear();
- std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(RHS.reg);
- MRI->setRegAllocationHint(reg, Hint.first, Hint.second);
-
- weight = RHS.weight;
- for (unsigned i = 0, e = RHS.getNumValNums(); i != e; ++i) {
- const VNInfo *VNI = RHS.getValNumInfo(i);
- createValueCopy(VNI, VNInfoAllocator);
- }
- for (unsigned i = 0, e = RHS.ranges.size(); i != e; ++i) {
- const LiveRange &LR = RHS.ranges[i];
- addRange(LiveRange(LR.start, LR.end, getValNumInfo(LR.valno->id)));
- }
-
- verify();
-}
-
unsigned LiveInterval::getSize() const {
unsigned Sum = 0;
for (const_iterator I = begin(), E = end(); I != E; ++I)
@@ -705,9 +720,11 @@ raw_ostream& llvm::operator<<(raw_ostream& os, const LiveRange &LR) {
return os << '[' << LR.start << ',' << LR.end << ':' << LR.valno->id << ")";
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void LiveRange::dump() const {
dbgs() << *this << "\n";
}
+#endif
void LiveInterval::print(raw_ostream &OS) const {
if (empty())
@@ -740,9 +757,11 @@ void LiveInterval::print(raw_ostream &OS) const {
}
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void LiveInterval::dump() const {
dbgs() << *this << "\n";
}
+#endif
#ifndef NDEBUG
void LiveInterval::verify() const {
diff --git a/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp b/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
index d0f8ae1..4e75d89 100644
--- a/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -34,6 +34,7 @@
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "LiveRangeCalc.h"
+#include "VirtRegMap.h"
#include <algorithm>
#include <limits>
#include <cmath>
@@ -109,8 +110,6 @@ bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
DomTree = &getAnalysis<MachineDominatorTree>();
if (!LRCalc)
LRCalc = new LiveRangeCalc();
- AllocatableRegs = TRI->getAllocatableSet(fn);
- ReservedRegs = TRI->getReservedRegs(fn);
// Allocate space for all virtual registers.
VirtRegIntervals.resize(MRI->getNumVirtRegs());
@@ -147,6 +146,11 @@ void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
OS << PrintReg(Reg) << " = " << getInterval(Reg) << '\n';
}
+ OS << "RegMasks:";
+ for (unsigned i = 0, e = RegMaskSlots.size(); i != e; ++i)
+ OS << ' ' << RegMaskSlots[i];
+ OS << '\n';
+
printInstrs(OS);
}
@@ -155,9 +159,11 @@ void LiveIntervals::printInstrs(raw_ostream &OS) const {
MF->print(OS, Indexes);
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void LiveIntervals::dumpInstrs() const {
printInstrs(dbgs());
}
+#endif
static
bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
@@ -382,8 +388,7 @@ void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
/// which a variable is live
void LiveIntervals::computeIntervals() {
DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
- << "********** Function: "
- << ((Value*)MF->getFunction())->getName() << '\n');
+ << "********** Function: " << MF->getName() << '\n');
RegMaskBlocks.resize(MF->getNumBlockIDs());
@@ -440,7 +445,7 @@ void LiveIntervals::computeIntervals() {
// Compute the number of register mask instructions in this block.
std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB->getNumber()];
- RMB.second = RegMaskSlots.size() - RMB.first;;
+ RMB.second = RegMaskSlots.size() - RMB.first;
}
// Create empty intervals for registers defined by implicit_def's (except
@@ -497,7 +502,7 @@ void LiveIntervals::computeRegMasks() {
RegMaskBits.push_back(MO->getRegMask());
}
// Compute the number of register mask instructions in this block.
- RMB.second = RegMaskSlots.size() - RMB.first;;
+ RMB.second = RegMaskSlots.size() - RMB.first;
}
}
@@ -540,11 +545,11 @@ void LiveIntervals::computeRegUnitInterval(LiveInterval *LI) {
// Ignore uses of reserved registers. We only track defs of those.
for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
unsigned Root = *Roots;
- if (!isReserved(Root) && !MRI->reg_empty(Root))
+ if (!MRI->isReserved(Root) && !MRI->reg_empty(Root))
LRCalc->extendToUses(LI, Root);
for (MCSuperRegIterator Supers(Root, TRI); Supers.isValid(); ++Supers) {
unsigned Reg = *Supers;
- if (!isReserved(Reg) && !MRI->reg_empty(Reg))
+ if (!MRI->isReserved(Reg) && !MRI->reg_empty(Reg))
LRCalc->extendToUses(LI, Reg);
}
}
@@ -729,17 +734,100 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
return CanSeparate;
}
+void LiveIntervals::extendToIndices(LiveInterval *LI,
+ ArrayRef<SlotIndex> Indices) {
+ assert(LRCalc && "LRCalc not initialized.");
+ LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i)
+ LRCalc->extend(LI, Indices[i]);
+}
+
+void LiveIntervals::pruneValue(LiveInterval *LI, SlotIndex Kill,
+ SmallVectorImpl<SlotIndex> *EndPoints) {
+ LiveRangeQuery LRQ(*LI, Kill);
+ VNInfo *VNI = LRQ.valueOut();
+ if (!VNI)
+ return;
+
+ MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill);
+ SlotIndex MBBStart, MBBEnd;
+ tie(MBBStart, MBBEnd) = Indexes->getMBBRange(KillMBB);
+
+ // If VNI isn't live out from KillMBB, the value is trivially pruned.
+ if (LRQ.endPoint() < MBBEnd) {
+ LI->removeRange(Kill, LRQ.endPoint());
+ if (EndPoints) EndPoints->push_back(LRQ.endPoint());
+ return;
+ }
+
+ // VNI is live out of KillMBB.
+ LI->removeRange(Kill, MBBEnd);
+ if (EndPoints) EndPoints->push_back(MBBEnd);
+
+ // Find all blocks that are reachable from KillMBB without leaving VNI's live
+ // range. It is possible that KillMBB itself is reachable, so start a DFS
+ // from each successor.
+ typedef SmallPtrSet<MachineBasicBlock*, 9> VisitedTy;
+ VisitedTy Visited;
+ for (MachineBasicBlock::succ_iterator
+ SuccI = KillMBB->succ_begin(), SuccE = KillMBB->succ_end();
+ SuccI != SuccE; ++SuccI) {
+ for (df_ext_iterator<MachineBasicBlock*, VisitedTy>
+ I = df_ext_begin(*SuccI, Visited), E = df_ext_end(*SuccI, Visited);
+ I != E;) {
+ MachineBasicBlock *MBB = *I;
+
+ // Check if VNI is live in to MBB.
+ tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB);
+ LiveRangeQuery LRQ(*LI, MBBStart);
+ if (LRQ.valueIn() != VNI) {
+ // This block isn't part of the VNI live range. Prune the search.
+ I.skipChildren();
+ continue;
+ }
+
+ // Prune the search if VNI is killed in MBB.
+ if (LRQ.endPoint() < MBBEnd) {
+ LI->removeRange(MBBStart, LRQ.endPoint());
+ if (EndPoints) EndPoints->push_back(LRQ.endPoint());
+ I.skipChildren();
+ continue;
+ }
+
+ // VNI is live through MBB.
+ LI->removeRange(MBBStart, MBBEnd);
+ if (EndPoints) EndPoints->push_back(MBBEnd);
+ ++I;
+ }
+ }
+}
//===----------------------------------------------------------------------===//
// Register allocator hooks.
//
-void LiveIntervals::addKillFlags() {
+void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
+ // Keep track of regunit ranges.
+ SmallVector<std::pair<LiveInterval*, LiveInterval::iterator>, 8> RU;
+
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
if (MRI->reg_nodbg_empty(Reg))
continue;
LiveInterval *LI = &getInterval(Reg);
+ if (LI->empty())
+ continue;
+
+ // Find the regunit intervals for the assigned register. They may overlap
+ // the virtual register live range, cancelling any kills.
+ RU.clear();
+ for (MCRegUnitIterator Units(VRM->getPhys(Reg), TRI); Units.isValid();
+ ++Units) {
+ LiveInterval *RUInt = &getRegUnit(*Units);
+ if (RUInt->empty())
+ continue;
+ RU.push_back(std::make_pair(RUInt, RUInt->find(LI->begin()->end)));
+ }
// Every instruction that kills Reg corresponds to a live range end point.
for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
@@ -750,7 +838,32 @@ void LiveIntervals::addKillFlags() {
MachineInstr *MI = getInstructionFromIndex(RI->end);
if (!MI)
continue;
- MI->addRegisterKilled(Reg, NULL);
+
+ // Check if any of the reguints are live beyond the end of RI. That could
+ // happen when a physreg is defined as a copy of a virtreg:
+ //
+ // %EAX = COPY %vreg5
+ // FOO %vreg5 <--- MI, cancel kill because %EAX is live.
+ // BAR %EAX<kill>
+ //
+ // There should be no kill flag on FOO when %vreg5 is rewritten as %EAX.
+ bool CancelKill = false;
+ for (unsigned u = 0, e = RU.size(); u != e; ++u) {
+ LiveInterval *RInt = RU[u].first;
+ LiveInterval::iterator &I = RU[u].second;
+ if (I == RInt->end())
+ continue;
+ I = RInt->advanceTo(I, RI->end);
+ if (I == RInt->end() || I->start >= RI->end)
+ continue;
+ // I is overlapping RI.
+ CancelKill = true;
+ break;
+ }
+ if (CancelKill)
+ MI->clearRegisterKills(Reg, NULL);
+ else
+ MI->addRegisterKilled(Reg, NULL);
}
}
}
@@ -900,497 +1013,321 @@ private:
LiveIntervals& LIS;
const MachineRegisterInfo& MRI;
const TargetRegisterInfo& TRI;
+ SlotIndex OldIdx;
SlotIndex NewIdx;
-
- typedef std::pair<LiveInterval*, LiveRange*> IntRangePair;
- typedef DenseSet<IntRangePair> RangeSet;
-
- struct RegRanges {
- LiveRange* Use;
- LiveRange* EC;
- LiveRange* Dead;
- LiveRange* Def;
- RegRanges() : Use(0), EC(0), Dead(0), Def(0) {}
- };
- typedef DenseMap<unsigned, RegRanges> BundleRanges;
+ SmallPtrSet<LiveInterval*, 8> Updated;
+ bool UpdateFlags;
public:
HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
- const TargetRegisterInfo& TRI, SlotIndex NewIdx)
- : LIS(LIS), MRI(MRI), TRI(TRI), NewIdx(NewIdx) {}
-
- // Update intervals for all operands of MI from OldIdx to NewIdx.
- // This assumes that MI used to be at OldIdx, and now resides at
- // NewIdx.
- void moveAllRangesFrom(MachineInstr* MI, SlotIndex OldIdx) {
- assert(NewIdx != OldIdx && "No-op move? That's a bit strange.");
-
- // Collect the operands.
- RangeSet Entering, Internal, Exiting;
- bool hasRegMaskOp = false;
- collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
-
- // To keep the LiveRanges valid within an interval, move the ranges closest
- // to the destination first. This prevents ranges from overlapping, to that
- // APIs like removeRange still work.
- if (NewIdx < OldIdx) {
- moveAllEnteringFrom(OldIdx, Entering);
- moveAllInternalFrom(OldIdx, Internal);
- moveAllExitingFrom(OldIdx, Exiting);
- }
- else {
- moveAllExitingFrom(OldIdx, Exiting);
- moveAllInternalFrom(OldIdx, Internal);
- moveAllEnteringFrom(OldIdx, Entering);
- }
-
- if (hasRegMaskOp)
- updateRegMaskSlots(OldIdx);
-
-#ifndef NDEBUG
- LIValidator validator;
- validator = std::for_each(Entering.begin(), Entering.end(), validator);
- validator = std::for_each(Internal.begin(), Internal.end(), validator);
- validator = std::for_each(Exiting.begin(), Exiting.end(), validator);
- assert(validator.rangesOk() && "moveAllOperandsFrom broke liveness.");
-#endif
-
+ const TargetRegisterInfo& TRI,
+ SlotIndex OldIdx, SlotIndex NewIdx, bool UpdateFlags)
+ : LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx),
+ UpdateFlags(UpdateFlags) {}
+
+ // FIXME: UpdateFlags is a workaround that creates live intervals for all
+ // physregs, even those that aren't needed for regalloc, in order to update
+ // kill flags. This is wasteful. Eventually, LiveVariables will strip all kill
+ // flags, and postRA passes will use a live register utility instead.
+ LiveInterval *getRegUnitLI(unsigned Unit) {
+ if (UpdateFlags)
+ return &LIS.getRegUnit(Unit);
+ return LIS.getCachedRegUnit(Unit);
}
- // Update intervals for all operands of MI to refer to BundleStart's
- // SlotIndex.
- void moveAllRangesInto(MachineInstr* MI, MachineInstr* BundleStart) {
- if (MI == BundleStart)
- return; // Bundling instr with itself - nothing to do.
-
- SlotIndex OldIdx = LIS.getSlotIndexes()->getInstructionIndex(MI);
- assert(LIS.getSlotIndexes()->getInstructionFromIndex(OldIdx) == MI &&
- "SlotIndex <-> Instruction mapping broken for MI");
-
- // Collect all ranges already in the bundle.
- MachineBasicBlock::instr_iterator BII(BundleStart);
- RangeSet Entering, Internal, Exiting;
- bool hasRegMaskOp = false;
- collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx);
- assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
- for (++BII; &*BII == MI || BII->isInsideBundle(); ++BII) {
- if (&*BII == MI)
+ /// Update all live ranges touched by MI, assuming a move from OldIdx to
+ /// NewIdx.
+ void updateAllRanges(MachineInstr *MI) {
+ DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": " << *MI);
+ bool hasRegMask = false;
+ for (MIOperands MO(MI); MO.isValid(); ++MO) {
+ if (MO->isRegMask())
+ hasRegMask = true;
+ if (!MO->isReg())
continue;
- collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx);
- assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
- }
-
- BundleRanges BR = createBundleRanges(Entering, Internal, Exiting);
-
- Entering.clear();
- Internal.clear();
- Exiting.clear();
- collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
- assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
+ // Aggressively clear all kill flags.
+ // They are reinserted by VirtRegRewriter.
+ if (MO->isUse())
+ MO->setIsKill(false);
- DEBUG(dbgs() << "Entering: " << Entering.size() << "\n");
- DEBUG(dbgs() << "Internal: " << Internal.size() << "\n");
- DEBUG(dbgs() << "Exiting: " << Exiting.size() << "\n");
-
- moveAllEnteringFromInto(OldIdx, Entering, BR);
- moveAllInternalFromInto(OldIdx, Internal, BR);
- moveAllExitingFromInto(OldIdx, Exiting, BR);
-
-
-#ifndef NDEBUG
- LIValidator validator;
- validator = std::for_each(Entering.begin(), Entering.end(), validator);
- validator = std::for_each(Internal.begin(), Internal.end(), validator);
- validator = std::for_each(Exiting.begin(), Exiting.end(), validator);
- assert(validator.rangesOk() && "moveAllOperandsInto broke liveness.");
-#endif
- }
-
-private:
-
-#ifndef NDEBUG
- class LIValidator {
- private:
- DenseSet<const LiveInterval*> Checked, Bogus;
- public:
- void operator()(const IntRangePair& P) {
- const LiveInterval* LI = P.first;
- if (Checked.count(LI))
- return;
- Checked.insert(LI);
- if (LI->empty())
- return;
- SlotIndex LastEnd = LI->begin()->start;
- for (LiveInterval::const_iterator LRI = LI->begin(), LRE = LI->end();
- LRI != LRE; ++LRI) {
- const LiveRange& LR = *LRI;
- if (LastEnd > LR.start || LR.start >= LR.end)
- Bogus.insert(LI);
- LastEnd = LR.end;
- }
- }
-
- bool rangesOk() const {
- return Bogus.empty();
- }
- };
-#endif
-
- // Collect IntRangePairs for all operands of MI that may need fixing.
- // Treat's MI's index as OldIdx (regardless of what it is in SlotIndexes'
- // maps).
- void collectRanges(MachineInstr* MI, RangeSet& Entering, RangeSet& Internal,
- RangeSet& Exiting, bool& hasRegMaskOp, SlotIndex OldIdx) {
- hasRegMaskOp = false;
- for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
- MOE = MI->operands_end();
- MOI != MOE; ++MOI) {
- const MachineOperand& MO = *MOI;
-
- if (MO.isRegMask()) {
- hasRegMaskOp = true;
+ unsigned Reg = MO->getReg();
+ if (!Reg)
continue;
- }
-
- if (!MO.isReg() || MO.getReg() == 0)
- continue;
-
- unsigned Reg = MO.getReg();
-
- // TODO: Currently we're skipping uses that are reserved or have no
- // interval, but we're not updating their kills. This should be
- // fixed.
- if (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg))
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ updateRange(LIS.getInterval(Reg));
continue;
-
- // Collect ranges for register units. These live ranges are computed on
- // demand, so just skip any that haven't been computed yet.
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
- if (LiveInterval *LI = LIS.getCachedRegUnit(*Units))
- collectRanges(MO, LI, Entering, Internal, Exiting, OldIdx);
- } else {
- // Collect ranges for individual virtual registers.
- collectRanges(MO, &LIS.getInterval(Reg),
- Entering, Internal, Exiting, OldIdx);
}
+
+ // For physregs, only update the regunits that actually have a
+ // precomputed live range.
+ for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
+ if (LiveInterval *LI = getRegUnitLI(*Units))
+ updateRange(*LI);
}
+ if (hasRegMask)
+ updateRegMaskSlots();
}
- void collectRanges(const MachineOperand &MO, LiveInterval *LI,
- RangeSet &Entering, RangeSet &Internal, RangeSet &Exiting,
- SlotIndex OldIdx) {
- if (MO.readsReg()) {
- LiveRange* LR = LI->getLiveRangeContaining(OldIdx);
- if (LR != 0)
- Entering.insert(std::make_pair(LI, LR));
- }
- if (MO.isDef()) {
- LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot());
- assert(LR != 0 && "No live range for def?");
- if (LR->end > OldIdx.getDeadSlot())
- Exiting.insert(std::make_pair(LI, LR));
+private:
+ /// Update a single live range, assuming an instruction has been moved from
+ /// OldIdx to NewIdx.
+ void updateRange(LiveInterval &LI) {
+ if (!Updated.insert(&LI))
+ return;
+ DEBUG({
+ dbgs() << " ";
+ if (TargetRegisterInfo::isVirtualRegister(LI.reg))
+ dbgs() << PrintReg(LI.reg);
else
- Internal.insert(std::make_pair(LI, LR));
- }
+ dbgs() << PrintRegUnit(LI.reg, &TRI);
+ dbgs() << ":\t" << LI << '\n';
+ });
+ if (SlotIndex::isEarlierInstr(OldIdx, NewIdx))
+ handleMoveDown(LI);
+ else
+ handleMoveUp(LI);
+ DEBUG(dbgs() << " -->\t" << LI << '\n');
+ LI.verify();
}
- BundleRanges createBundleRanges(RangeSet& Entering,
- RangeSet& Internal,
- RangeSet& Exiting) {
- BundleRanges BR;
-
- for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
- EI != EE; ++EI) {
- LiveInterval* LI = EI->first;
- LiveRange* LR = EI->second;
- BR[LI->reg].Use = LR;
- }
+ /// Update LI to reflect an instruction has been moved downwards from OldIdx
+ /// to NewIdx.
+ ///
+ /// 1. Live def at OldIdx:
+ /// Move def to NewIdx, assert endpoint after NewIdx.
+ ///
+ /// 2. Live def at OldIdx, killed at NewIdx:
+ /// Change to dead def at NewIdx.
+ /// (Happens when bundling def+kill together).
+ ///
+ /// 3. Dead def at OldIdx:
+ /// Move def to NewIdx, possibly across another live value.
+ ///
+ /// 4. Def at OldIdx AND at NewIdx:
+ /// Remove live range [OldIdx;NewIdx) and value defined at OldIdx.
+ /// (Happens when bundling multiple defs together).
+ ///
+ /// 5. Value read at OldIdx, killed before NewIdx:
+ /// Extend kill to NewIdx.
+ ///
+ void handleMoveDown(LiveInterval &LI) {
+ // First look for a kill at OldIdx.
+ LiveInterval::iterator I = LI.find(OldIdx.getBaseIndex());
+ LiveInterval::iterator E = LI.end();
+ // Is LI even live at OldIdx?
+ if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start))
+ return;
- for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
- II != IE; ++II) {
- LiveInterval* LI = II->first;
- LiveRange* LR = II->second;
- if (LR->end.isDead()) {
- BR[LI->reg].Dead = LR;
- } else {
- BR[LI->reg].EC = LR;
- }
+ // Handle a live-in value.
+ if (!SlotIndex::isSameInstr(I->start, OldIdx)) {
+ bool isKill = SlotIndex::isSameInstr(OldIdx, I->end);
+ // If the live-in value already extends to NewIdx, there is nothing to do.
+ if (!SlotIndex::isEarlierInstr(I->end, NewIdx))
+ return;
+ // Aggressively remove all kill flags from the old kill point.
+ // Kill flags shouldn't be used while live intervals exist, they will be
+ // reinserted by VirtRegRewriter.
+ if (MachineInstr *KillMI = LIS.getInstructionFromIndex(I->end))
+ for (MIBundleOperands MO(KillMI); MO.isValid(); ++MO)
+ if (MO->isReg() && MO->isUse())
+ MO->setIsKill(false);
+ // Adjust I->end to reach NewIdx. This may temporarily make LI invalid by
+ // overlapping ranges. Case 5 above.
+ I->end = NewIdx.getRegSlot(I->end.isEarlyClobber());
+ // If this was a kill, there may also be a def. Otherwise we're done.
+ if (!isKill)
+ return;
+ ++I;
}
- for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
- EI != EE; ++EI) {
- LiveInterval* LI = EI->first;
- LiveRange* LR = EI->second;
- BR[LI->reg].Def = LR;
+ // Check for a def at OldIdx.
+ if (I == E || !SlotIndex::isSameInstr(OldIdx, I->start))
+ return;
+ // We have a def at OldIdx.
+ VNInfo *DefVNI = I->valno;
+ assert(DefVNI->def == I->start && "Inconsistent def");
+ DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber());
+ // If the defined value extends beyond NewIdx, just move the def down.
+ // This is case 1 above.
+ if (SlotIndex::isEarlierInstr(NewIdx, I->end)) {
+ I->start = DefVNI->def;
+ return;
}
-
- return BR;
- }
-
- void moveKillFlags(unsigned reg, SlotIndex OldIdx, SlotIndex newKillIdx) {
- MachineInstr* OldKillMI = LIS.getInstructionFromIndex(OldIdx);
- if (!OldKillMI->killsRegister(reg))
- return; // Bail out if we don't have kill flags on the old register.
- MachineInstr* NewKillMI = LIS.getInstructionFromIndex(newKillIdx);
- assert(OldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill.");
- assert(!NewKillMI->killsRegister(reg) &&
- "New kill instr is already a kill.");
- OldKillMI->clearRegisterKills(reg, &TRI);
- NewKillMI->addRegisterKilled(reg, &TRI);
- }
-
- void updateRegMaskSlots(SlotIndex OldIdx) {
- SmallVectorImpl<SlotIndex>::iterator RI =
- std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(),
- OldIdx);
- assert(*RI == OldIdx && "No RegMask at OldIdx.");
- *RI = NewIdx;
- assert(*prior(RI) < *RI && *RI < *next(RI) &&
- "RegSlots out of order. Did you move one call across another?");
- }
-
- // Return the last use of reg between NewIdx and OldIdx.
- SlotIndex findLastUseBefore(unsigned Reg, SlotIndex OldIdx) {
- SlotIndex LastUse = NewIdx;
- for (MachineRegisterInfo::use_nodbg_iterator
- UI = MRI.use_nodbg_begin(Reg),
- UE = MRI.use_nodbg_end();
- UI != UE; UI.skipInstruction()) {
- const MachineInstr* MI = &*UI;
- SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
- if (InstSlot > LastUse && InstSlot < OldIdx)
- LastUse = InstSlot;
+ // The remaining possibilities are now:
+ // 2. Live def at OldIdx, killed at NewIdx: isSameInstr(I->end, NewIdx).
+ // 3. Dead def at OldIdx: I->end = OldIdx.getDeadSlot().
+ // In either case, it is possible that there is an existing def at NewIdx.
+ assert((I->end == OldIdx.getDeadSlot() ||
+ SlotIndex::isSameInstr(I->end, NewIdx)) &&
+ "Cannot move def below kill");
+ LiveInterval::iterator NewI = LI.advanceTo(I, NewIdx.getRegSlot());
+ if (NewI != E && SlotIndex::isSameInstr(NewI->start, NewIdx)) {
+ // There is an existing def at NewIdx, case 4 above. The def at OldIdx is
+ // coalesced into that value.
+ assert(NewI->valno != DefVNI && "Multiple defs of value?");
+ LI.removeValNo(DefVNI);
+ return;
}
- return LastUse;
+ // There was no existing def at NewIdx. Turn *I into a dead def at NewIdx.
+ // If the def at OldIdx was dead, we allow it to be moved across other LI
+ // values. The new range should be placed immediately before NewI, move any
+ // intermediate ranges up.
+ assert(NewI != I && "Inconsistent iterators");
+ std::copy(llvm::next(I), NewI, I);
+ *llvm::prior(NewI) = LiveRange(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
}
- void moveEnteringUpFrom(SlotIndex OldIdx, IntRangePair& P) {
- LiveInterval* LI = P.first;
- LiveRange* LR = P.second;
- bool LiveThrough = LR->end > OldIdx.getRegSlot();
- if (LiveThrough)
+ /// Update LI to reflect an instruction has been moved upwards from OldIdx
+ /// to NewIdx.
+ ///
+ /// 1. Live def at OldIdx:
+ /// Hoist def to NewIdx.
+ ///
+ /// 2. Dead def at OldIdx:
+ /// Hoist def+end to NewIdx, possibly move across other values.
+ ///
+ /// 3. Dead def at OldIdx AND existing def at NewIdx:
+ /// Remove value defined at OldIdx, coalescing it with existing value.
+ ///
+ /// 4. Live def at OldIdx AND existing def at NewIdx:
+ /// Remove value defined at NewIdx, hoist OldIdx def to NewIdx.
+ /// (Happens when bundling multiple defs together).
+ ///
+ /// 5. Value killed at OldIdx:
+ /// Hoist kill to NewIdx, then scan for last kill between NewIdx and
+ /// OldIdx.
+ ///
+ void handleMoveUp(LiveInterval &LI) {
+ // First look for a kill at OldIdx.
+ LiveInterval::iterator I = LI.find(OldIdx.getBaseIndex());
+ LiveInterval::iterator E = LI.end();
+ // Is LI even live at OldIdx?
+ if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start))
return;
- SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
- if (LastUse != NewIdx)
- moveKillFlags(LI->reg, NewIdx, LastUse);
- LR->end = LastUse.getRegSlot();
- }
- void moveEnteringDownFrom(SlotIndex OldIdx, IntRangePair& P) {
- LiveInterval* LI = P.first;
- LiveRange* LR = P.second;
- // Extend the LiveRange if NewIdx is past the end.
- if (NewIdx > LR->end) {
- // Move kill flags if OldIdx was not originally the end
- // (otherwise LR->end points to an invalid slot).
- if (LR->end.getRegSlot() != OldIdx.getRegSlot()) {
- assert(LR->end > OldIdx && "LiveRange does not cover original slot");
- moveKillFlags(LI->reg, LR->end, NewIdx);
+ // Handle a live-in value.
+ if (!SlotIndex::isSameInstr(I->start, OldIdx)) {
+ // If the live-in value isn't killed here, there is nothing to do.
+ if (!SlotIndex::isSameInstr(OldIdx, I->end))
+ return;
+ // Adjust I->end to end at NewIdx. If we are hoisting a kill above
+ // another use, we need to search for that use. Case 5 above.
+ I->end = NewIdx.getRegSlot(I->end.isEarlyClobber());
+ ++I;
+ // If OldIdx also defines a value, there couldn't have been another use.
+ if (I == E || !SlotIndex::isSameInstr(I->start, OldIdx)) {
+ // No def, search for the new kill.
+ // This can never be an early clobber kill since there is no def.
+ llvm::prior(I)->end = findLastUseBefore(LI.reg).getRegSlot();
+ return;
}
- LR->end = NewIdx.getRegSlot();
- }
- }
-
- void moveAllEnteringFrom(SlotIndex OldIdx, RangeSet& Entering) {
- bool GoingUp = NewIdx < OldIdx;
-
- if (GoingUp) {
- for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
- EI != EE; ++EI)
- moveEnteringUpFrom(OldIdx, *EI);
- } else {
- for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
- EI != EE; ++EI)
- moveEnteringDownFrom(OldIdx, *EI);
}
- }
-
- void moveInternalFrom(SlotIndex OldIdx, IntRangePair& P) {
- LiveInterval* LI = P.first;
- LiveRange* LR = P.second;
- assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
- LR->end <= OldIdx.getDeadSlot() &&
- "Range should be internal to OldIdx.");
- LiveRange Tmp(*LR);
- Tmp.start = NewIdx.getRegSlot(LR->start.isEarlyClobber());
- Tmp.valno->def = Tmp.start;
- Tmp.end = LR->end.isDead() ? NewIdx.getDeadSlot() : NewIdx.getRegSlot();
- LI->removeRange(*LR);
- LI->addRange(Tmp);
- }
-
- void moveAllInternalFrom(SlotIndex OldIdx, RangeSet& Internal) {
- for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
- II != IE; ++II)
- moveInternalFrom(OldIdx, *II);
- }
-
- void moveExitingFrom(SlotIndex OldIdx, IntRangePair& P) {
- LiveRange* LR = P.second;
- assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
- "Range should start in OldIdx.");
- assert(LR->end > OldIdx.getDeadSlot() && "Range should exit OldIdx.");
- SlotIndex NewStart = NewIdx.getRegSlot(LR->start.isEarlyClobber());
- LR->start = NewStart;
- LR->valno->def = NewStart;
- }
-
- void moveAllExitingFrom(SlotIndex OldIdx, RangeSet& Exiting) {
- for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
- EI != EE; ++EI)
- moveExitingFrom(OldIdx, *EI);
- }
- void moveEnteringUpFromInto(SlotIndex OldIdx, IntRangePair& P,
- BundleRanges& BR) {
- LiveInterval* LI = P.first;
- LiveRange* LR = P.second;
- bool LiveThrough = LR->end > OldIdx.getRegSlot();
- if (LiveThrough) {
- assert((LR->start < NewIdx || BR[LI->reg].Def == LR) &&
- "Def in bundle should be def range.");
- assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) &&
- "If bundle has use for this reg it should be LR.");
- BR[LI->reg].Use = LR;
+ // Now deal with the def at OldIdx.
+ assert(I != E && SlotIndex::isSameInstr(I->start, OldIdx) && "No def?");
+ VNInfo *DefVNI = I->valno;
+ assert(DefVNI->def == I->start && "Inconsistent def");
+ DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber());
+
+ // Check for an existing def at NewIdx.
+ LiveInterval::iterator NewI = LI.find(NewIdx.getRegSlot());
+ if (SlotIndex::isSameInstr(NewI->start, NewIdx)) {
+ assert(NewI->valno != DefVNI && "Same value defined more than once?");
+ // There is an existing def at NewIdx.
+ if (I->end.isDead()) {
+ // Case 3: Remove the dead def at OldIdx.
+ LI.removeValNo(DefVNI);
+ return;
+ }
+ // Case 4: Replace def at NewIdx with live def at OldIdx.
+ I->start = DefVNI->def;
+ LI.removeValNo(NewI->valno);
return;
}
- SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
- moveKillFlags(LI->reg, OldIdx, LastUse);
-
- if (LR->start < NewIdx) {
- // Becoming a new entering range.
- assert(BR[LI->reg].Dead == 0 && BR[LI->reg].Def == 0 &&
- "Bundle shouldn't be re-defining reg mid-range.");
- assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) &&
- "Bundle shouldn't have different use range for same reg.");
- LR->end = LastUse.getRegSlot();
- BR[LI->reg].Use = LR;
- } else {
- // Becoming a new Dead-def.
- assert(LR->start == NewIdx.getRegSlot(LR->start.isEarlyClobber()) &&
- "Live range starting at unexpected slot.");
- assert(BR[LI->reg].Def == LR && "Reg should have def range.");
- assert(BR[LI->reg].Dead == 0 &&
- "Can't have def and dead def of same reg in a bundle.");
- LR->end = LastUse.getDeadSlot();
- BR[LI->reg].Dead = BR[LI->reg].Def;
- BR[LI->reg].Def = 0;
- }
- }
-
- void moveEnteringDownFromInto(SlotIndex OldIdx, IntRangePair& P,
- BundleRanges& BR) {
- LiveInterval* LI = P.first;
- LiveRange* LR = P.second;
- if (NewIdx > LR->end) {
- // Range extended to bundle. Add to bundle uses.
- // Note: Currently adds kill flags to bundle start.
- assert(BR[LI->reg].Use == 0 &&
- "Bundle already has use range for reg.");
- moveKillFlags(LI->reg, LR->end, NewIdx);
- LR->end = NewIdx.getRegSlot();
- BR[LI->reg].Use = LR;
- } else {
- assert(BR[LI->reg].Use != 0 &&
- "Bundle should already have a use range for reg.");
- }
- }
-
- void moveAllEnteringFromInto(SlotIndex OldIdx, RangeSet& Entering,
- BundleRanges& BR) {
- bool GoingUp = NewIdx < OldIdx;
-
- if (GoingUp) {
- for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
- EI != EE; ++EI)
- moveEnteringUpFromInto(OldIdx, *EI, BR);
- } else {
- for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
- EI != EE; ++EI)
- moveEnteringDownFromInto(OldIdx, *EI, BR);
+ // There is no existing def at NewIdx. Hoist DefVNI.
+ if (!I->end.isDead()) {
+ // Leave the end point of a live def.
+ I->start = DefVNI->def;
+ return;
}
- }
- void moveInternalFromInto(SlotIndex OldIdx, IntRangePair& P,
- BundleRanges& BR) {
- // TODO: Sane rules for moving ranges into bundles.
+ // DefVNI is a dead def. It may have been moved across other values in LI,
+ // so move I up to NewI. Slide [NewI;I) down one position.
+ std::copy_backward(NewI, I, llvm::next(I));
+ *NewI = LiveRange(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
}
- void moveAllInternalFromInto(SlotIndex OldIdx, RangeSet& Internal,
- BundleRanges& BR) {
- for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
- II != IE; ++II)
- moveInternalFromInto(OldIdx, *II, BR);
+ void updateRegMaskSlots() {
+ SmallVectorImpl<SlotIndex>::iterator RI =
+ std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(),
+ OldIdx);
+ assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() &&
+ "No RegMask at OldIdx.");
+ *RI = NewIdx.getRegSlot();
+ assert((RI == LIS.RegMaskSlots.begin() ||
+ SlotIndex::isEarlierInstr(*llvm::prior(RI), *RI)) &&
+ "Cannot move regmask instruction above another call");
+ assert((llvm::next(RI) == LIS.RegMaskSlots.end() ||
+ SlotIndex::isEarlierInstr(*RI, *llvm::next(RI))) &&
+ "Cannot move regmask instruction below another call");
}
- void moveExitingFromInto(SlotIndex OldIdx, IntRangePair& P,
- BundleRanges& BR) {
- LiveInterval* LI = P.first;
- LiveRange* LR = P.second;
-
- assert(LR->start.isRegister() &&
- "Don't know how to merge exiting ECs into bundles yet.");
+ // Return the last use of reg between NewIdx and OldIdx.
+ SlotIndex findLastUseBefore(unsigned Reg) {
+ SlotIndex LastUse = NewIdx;
- if (LR->end > NewIdx.getDeadSlot()) {
- // This range is becoming an exiting range on the bundle.
- // If there was an old dead-def of this reg, delete it.
- if (BR[LI->reg].Dead != 0) {
- LI->removeRange(*BR[LI->reg].Dead);
- BR[LI->reg].Dead = 0;
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI.use_nodbg_begin(Reg),
+ UE = MRI.use_nodbg_end();
+ UI != UE; UI.skipInstruction()) {
+ const MachineInstr* MI = &*UI;
+ SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
+ if (InstSlot > LastUse && InstSlot < OldIdx)
+ LastUse = InstSlot;
}
- assert(BR[LI->reg].Def == 0 &&
- "Can't have two defs for the same variable exiting a bundle.");
- LR->start = NewIdx.getRegSlot();
- LR->valno->def = LR->start;
- BR[LI->reg].Def = LR;
} else {
- // This range is becoming internal to the bundle.
- assert(LR->end == NewIdx.getRegSlot() &&
- "Can't bundle def whose kill is before the bundle");
- if (BR[LI->reg].Dead || BR[LI->reg].Def) {
- // Already have a def for this. Just delete range.
- LI->removeRange(*LR);
- } else {
- // Make range dead, record.
- LR->end = NewIdx.getDeadSlot();
- BR[LI->reg].Dead = LR;
- assert(BR[LI->reg].Use == LR &&
- "Range becoming dead should currently be use.");
+ MachineInstr* MI = LIS.getSlotIndexes()->getInstructionFromIndex(NewIdx);
+ MachineBasicBlock::iterator MII(MI);
+ ++MII;
+ MachineBasicBlock* MBB = MI->getParent();
+ for (; MII != MBB->end() && LIS.getInstructionIndex(MII) < OldIdx; ++MII){
+ for (MachineInstr::mop_iterator MOI = MII->operands_begin(),
+ MOE = MII->operands_end();
+ MOI != MOE; ++MOI) {
+ const MachineOperand& mop = *MOI;
+ if (!mop.isReg() || mop.getReg() == 0 ||
+ TargetRegisterInfo::isVirtualRegister(mop.getReg()))
+ continue;
+
+ if (TRI.hasRegUnit(mop.getReg(), Reg))
+ LastUse = LIS.getInstructionIndex(MII);
+ }
}
- // In both cases the range is no longer a use on the bundle.
- BR[LI->reg].Use = 0;
}
+ return LastUse;
}
-
- void moveAllExitingFromInto(SlotIndex OldIdx, RangeSet& Exiting,
- BundleRanges& BR) {
- for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
- EI != EE; ++EI)
- moveExitingFromInto(OldIdx, *EI, BR);
- }
-
};
-void LiveIntervals::handleMove(MachineInstr* MI) {
+void LiveIntervals::handleMove(MachineInstr* MI, bool UpdateFlags) {
+ assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
Indexes->removeMachineInstrFromMaps(MI);
- SlotIndex NewIndex = MI->isInsideBundle() ?
- Indexes->getInstructionIndex(MI) :
- Indexes->insertMachineInstrInMaps(MI);
+ SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI);
assert(getMBBStartIdx(MI->getParent()) <= OldIndex &&
OldIndex < getMBBEndIdx(MI->getParent()) &&
"Cannot handle moves across basic block boundaries.");
- assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
- HMEditor HME(*this, *MRI, *TRI, NewIndex);
- HME.moveAllRangesFrom(MI, OldIndex);
+ HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
+ HME.updateAllRanges(MI);
}
void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI,
- MachineInstr* BundleStart) {
+ MachineInstr* BundleStart,
+ bool UpdateFlags) {
+ SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart);
- HMEditor HME(*this, *MRI, *TRI, NewIndex);
- HME.moveAllRangesInto(MI, BundleStart);
+ HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
+ HME.updateAllRanges(MI);
}
diff --git a/contrib/llvm/lib/CodeGen/LiveIntervalUnion.h b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.h
index cd4e690..4d41fca 100644
--- a/contrib/llvm/lib/CodeGen/LiveIntervalUnion.h
+++ b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.h
@@ -178,8 +178,8 @@ public:
bool checkLoopInterference(MachineLoopRange*);
private:
- Query(const Query&); // DO NOT IMPLEMENT
- void operator=(const Query&); // DO NOT IMPLEMENT
+ Query(const Query&) LLVM_DELETED_FUNCTION;
+ void operator=(const Query&) LLVM_DELETED_FUNCTION;
};
// Array of LiveIntervalUnions.
diff --git a/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp b/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp
index d828f25..c3ff4f1 100644
--- a/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp
@@ -65,7 +65,11 @@ void LiveRangeCalc::extendToUses(LiveInterval *LI, unsigned Reg) {
// Visit all operands that read Reg. This may include partial defs.
for (MachineRegisterInfo::reg_nodbg_iterator I = MRI->reg_nodbg_begin(Reg),
E = MRI->reg_nodbg_end(); I != E; ++I) {
- const MachineOperand &MO = I.getOperand();
+ MachineOperand &MO = I.getOperand();
+ // Clear all kill flags. They will be reinserted after register allocation
+ // by LiveIntervalAnalysis::addKillFlags().
+ if (MO.isUse())
+ MO.setIsKill(false);
if (!MO.readsReg())
continue;
// MI is reading Reg. We may have visited MI before if it happens to be
diff --git a/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp b/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp
index b4ce9aa..f8fbc7d 100644
--- a/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp
@@ -87,7 +87,7 @@ bool LiveRangeEdit::allUsesAvailableAt(const MachineInstr *OrigMI,
// We can't remat physreg uses, unless it is a constant.
if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
- if (MRI.isConstantPhysReg(MO.getReg(), VRM->getMachineFunction()))
+ if (MRI.isConstantPhysReg(MO.getReg(), *OrigMI->getParent()->getParent()))
continue;
return false;
}
@@ -96,6 +96,13 @@ bool LiveRangeEdit::allUsesAvailableAt(const MachineInstr *OrigMI,
const VNInfo *OVNI = li.getVNInfoAt(OrigIdx);
if (!OVNI)
continue;
+
+ // Don't allow rematerialization immediately after the original def.
+ // It would be incorrect if OrigMI redefines the register.
+ // See PR14098.
+ if (SlotIndex::isSameInstr(OrigIdx, UseIdx))
+ return false;
+
if (OVNI != li.getVNInfoAt(UseIdx))
return false;
}
@@ -249,7 +256,7 @@ void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
unsigned Reg = MOI->getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
// Check if MI reads any unreserved physregs.
- if (Reg && MOI->readsReg() && !LIS.isReserved(Reg))
+ if (Reg && MOI->readsReg() && !MRI.isReserved(Reg))
ReadsPhysRegs = true;
continue;
}
diff --git a/contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp b/contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp
index cdb1776..7f22478 100644
--- a/contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp
@@ -13,6 +13,7 @@
#define DEBUG_TYPE "regalloc"
#include "LiveRegMatrix.h"
+#include "RegisterCoalescer.h"
#include "VirtRegMap.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -117,8 +118,9 @@ bool LiveRegMatrix::checkRegUnitInterference(LiveInterval &VirtReg,
unsigned PhysReg) {
if (VirtReg.empty())
return false;
+ CoalescerPair CP(VirtReg.reg, PhysReg, *TRI);
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
- if (VirtReg.overlaps(LIS->getRegUnit(*Units)))
+ if (VirtReg.overlaps(LIS->getRegUnit(*Units), CP, *LIS->getSlotIndexes()))
return true;
return false;
}
diff --git a/contrib/llvm/lib/CodeGen/LiveRegMatrix.h b/contrib/llvm/lib/CodeGen/LiveRegMatrix.h
index b3e2d7f..8f22c24 100644
--- a/contrib/llvm/lib/CodeGen/LiveRegMatrix.h
+++ b/contrib/llvm/lib/CodeGen/LiveRegMatrix.h
@@ -15,7 +15,7 @@
// Register units are defined in MCRegisterInfo.h, they represent the smallest
// unit of interference when dealing with overlapping physical registers. The
// LiveRegMatrix is represented as a LiveIntervalUnion per register unit. When
-// a virtual register is assigned to a physicval register, the live range for
+// a virtual register is assigned to a physical register, the live range for
// the virtual register is inserted into the LiveIntervalUnion for each regunit
// in the physreg.
//
diff --git a/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp b/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp
index 939e795..f0b522b 100644
--- a/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp
@@ -25,7 +25,10 @@
using namespace llvm;
char LiveStacks::ID = 0;
-INITIALIZE_PASS(LiveStacks, "livestacks",
+INITIALIZE_PASS_BEGIN(LiveStacks, "livestacks",
+ "Live Stack Slot Analysis", false, false)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_END(LiveStacks, "livestacks",
"Live Stack Slot Analysis", false, false)
char &llvm::LiveStacksID = LiveStacks::ID;
diff --git a/contrib/llvm/lib/CodeGen/LiveVariables.cpp b/contrib/llvm/lib/CodeGen/LiveVariables.cpp
index 348ed3a..6ea933d 100644
--- a/contrib/llvm/lib/CodeGen/LiveVariables.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveVariables.cpp
@@ -65,6 +65,7 @@ LiveVariables::VarInfo::findKill(const MachineBasicBlock *MBB) const {
}
void LiveVariables::VarInfo::dump() const {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dbgs() << " Alive in blocks: ";
for (SparseBitVector<>::iterator I = AliveBlocks.begin(),
E = AliveBlocks.end(); I != E; ++I)
@@ -77,6 +78,7 @@ void LiveVariables::VarInfo::dump() const {
dbgs() << "\n #" << i << ": " << *Kills[i];
dbgs() << "\n";
}
+#endif
}
/// getVarInfo - Get (possibly creating) a VarInfo object for the given vreg.
@@ -501,8 +503,6 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
MRI = &mf.getRegInfo();
TRI = MF->getTarget().getRegisterInfo();
- ReservedRegisters = TRI->getReservedRegs(mf);
-
unsigned NumRegs = TRI->getNumRegs();
PhysRegDef = new MachineInstr*[NumRegs];
PhysRegUse = new MachineInstr*[NumRegs];
@@ -586,7 +586,7 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
unsigned MOReg = UseRegs[i];
if (TargetRegisterInfo::isVirtualRegister(MOReg))
HandleVirtRegUse(MOReg, MBB, MI);
- else if (!ReservedRegisters[MOReg])
+ else if (!MRI->isReserved(MOReg))
HandlePhysRegUse(MOReg, MI);
}
@@ -599,7 +599,7 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
unsigned MOReg = DefRegs[i];
if (TargetRegisterInfo::isVirtualRegister(MOReg))
HandleVirtRegDef(MOReg, MI);
- else if (!ReservedRegisters[MOReg])
+ else if (!MRI->isReserved(MOReg))
HandlePhysRegDef(MOReg, MI, Defs);
}
UpdatePhysRegDefs(MI, Defs);
@@ -806,18 +806,44 @@ void LiveVariables::addNewBlock(MachineBasicBlock *BB,
MachineBasicBlock *SuccBB) {
const unsigned NumNew = BB->getNumber();
- // All registers used by PHI nodes in SuccBB must be live through BB.
- for (MachineBasicBlock::iterator BBI = SuccBB->begin(),
- BBE = SuccBB->end(); BBI != BBE && BBI->isPHI(); ++BBI)
+ SmallSet<unsigned, 16> Defs, Kills;
+
+ MachineBasicBlock::iterator BBI = SuccBB->begin(), BBE = SuccBB->end();
+ for (; BBI != BBE && BBI->isPHI(); ++BBI) {
+ // Record the def of the PHI node.
+ Defs.insert(BBI->getOperand(0).getReg());
+
+ // All registers used by PHI nodes in SuccBB must be live through BB.
for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2)
if (BBI->getOperand(i+1).getMBB() == BB)
getVarInfo(BBI->getOperand(i).getReg()).AliveBlocks.set(NumNew);
+ }
+
+ // Record all vreg defs and kills of all instructions in SuccBB.
+ for (; BBI != BBE; ++BBI) {
+ for (MachineInstr::mop_iterator I = BBI->operands_begin(),
+ E = BBI->operands_end(); I != E; ++I) {
+ if (I->isReg() && TargetRegisterInfo::isVirtualRegister(I->getReg())) {
+ if (I->isDef())
+ Defs.insert(I->getReg());
+ else if (I->isKill())
+ Kills.insert(I->getReg());
+ }
+ }
+ }
// Update info for all live variables
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+
+ // If the Defs is defined in the successor it can't be live in BB.
+ if (Defs.count(Reg))
+ continue;
+
+ // If the register is either killed in or live through SuccBB it's also live
+ // through BB.
VarInfo &VI = getVarInfo(Reg);
- if (!VI.AliveBlocks.test(NumNew) && VI.isLiveIn(*SuccBB, Reg, *MRI))
+ if (Kills.count(Reg) || VI.AliveBlocks.test(SuccBB->getNumber()))
VI.AliveBlocks.set(NumNew);
}
}
diff --git a/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp b/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
index fa6b450..18d021d 100644
--- a/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
@@ -21,7 +21,7 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Assembly/Writer.h"
@@ -145,7 +145,8 @@ MachineBasicBlock::iterator MachineBasicBlock::getFirstNonPHI() {
instr_iterator I = instr_begin(), E = instr_end();
while (I != E && I->isPHI())
++I;
- assert(!I->isInsideBundle() && "First non-phi MI cannot be inside a bundle!");
+ assert((I == E || !I->isInsideBundle()) &&
+ "First non-phi MI cannot be inside a bundle!");
return I;
}
@@ -156,7 +157,7 @@ MachineBasicBlock::SkipPHIsAndLabels(MachineBasicBlock::iterator I) {
++I;
// FIXME: This needs to change if we wish to bundle labels / dbg_values
// inside the bundle.
- assert(!I->isInsideBundle() &&
+ assert((I == E || !I->isInsideBundle()) &&
"First non-phi / non-label instruction is inside a bundle!");
return I;
}
@@ -228,9 +229,11 @@ const MachineBasicBlock *MachineBasicBlock::getLandingPadSuccessor() const {
return 0;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MachineBasicBlock::dump() const {
print(dbgs());
}
+#endif
StringRef MachineBasicBlock::getName() const {
if (const BasicBlock *LBB = getBasicBlock())
@@ -243,7 +246,7 @@ StringRef MachineBasicBlock::getName() const {
std::string MachineBasicBlock::getFullName() const {
std::string Name;
if (getParent())
- Name = (getParent()->getFunction()->getName() + ":").str();
+ Name = (getParent()->getName() + ":").str();
if (getBasicBlock())
Name += getBasicBlock()->getName();
else
@@ -942,12 +945,11 @@ MachineBasicBlock::findDebugLoc(instr_iterator MBBI) {
/// getSuccWeight - Return weight of the edge from this block to MBB.
///
-uint32_t MachineBasicBlock::getSuccWeight(const MachineBasicBlock *succ) const {
+uint32_t MachineBasicBlock::getSuccWeight(const_succ_iterator Succ) const {
if (Weights.empty())
return 0;
- const_succ_iterator I = std::find(Successors.begin(), Successors.end(), succ);
- return *getWeightIterator(I);
+ return *getWeightIterator(Succ);
}
/// getWeightIterator - Return wight iterator corresonding to the I successor
@@ -970,6 +972,80 @@ getWeightIterator(MachineBasicBlock::const_succ_iterator I) const {
return Weights.begin() + index;
}
+/// Return whether (physical) register "Reg" has been <def>ined and not <kill>ed
+/// as of just before "MI".
+///
+/// Search is localised to a neighborhood of
+/// Neighborhood instructions before (searching for defs or kills) and N
+/// instructions after (searching just for defs) MI.
+MachineBasicBlock::LivenessQueryResult
+MachineBasicBlock::computeRegisterLiveness(const TargetRegisterInfo *TRI,
+ unsigned Reg, MachineInstr *MI,
+ unsigned Neighborhood) {
+
+ unsigned N = Neighborhood;
+ MachineBasicBlock *MBB = MI->getParent();
+
+ // Start by searching backwards from MI, looking for kills, reads or defs.
+
+ MachineBasicBlock::iterator I(MI);
+ // If this is the first insn in the block, don't search backwards.
+ if (I != MBB->begin()) {
+ do {
+ --I;
+
+ MachineOperandIteratorBase::PhysRegInfo Analysis =
+ MIOperands(I).analyzePhysReg(Reg, TRI);
+
+ if (Analysis.Kills)
+ // Register killed, so isn't live.
+ return LQR_Dead;
+
+ else if (Analysis.DefinesOverlap || Analysis.ReadsOverlap)
+ // Defined or read without a previous kill - live.
+ return (Analysis.Defines || Analysis.Reads) ?
+ LQR_Live : LQR_OverlappingLive;
+
+ } while (I != MBB->begin() && --N > 0);
+ }
+
+ // Did we get to the start of the block?
+ if (I == MBB->begin()) {
+ // If so, the register's state is definitely defined by the live-in state.
+ for (MCRegAliasIterator RAI(Reg, TRI, /*IncludeSelf=*/true);
+ RAI.isValid(); ++RAI) {
+ if (MBB->isLiveIn(*RAI))
+ return (*RAI == Reg) ? LQR_Live : LQR_OverlappingLive;
+ }
+
+ return LQR_Dead;
+ }
+
+ N = Neighborhood;
+
+ // Try searching forwards from MI, looking for reads or defs.
+ I = MachineBasicBlock::iterator(MI);
+ // If this is the last insn in the block, don't search forwards.
+ if (I != MBB->end()) {
+ for (++I; I != MBB->end() && N > 0; ++I, --N) {
+ MachineOperandIteratorBase::PhysRegInfo Analysis =
+ MIOperands(I).analyzePhysReg(Reg, TRI);
+
+ if (Analysis.ReadsOverlap)
+ // Used, therefore must have been live.
+ return (Analysis.Reads) ?
+ LQR_Live : LQR_OverlappingLive;
+
+ else if (Analysis.DefinesOverlap)
+ // Defined (but not read) therefore cannot have been live.
+ return LQR_Dead;
+ }
+ }
+
+ // At this point we have no idea of the liveness of the register.
+ return LQR_Unknown;
+}
+
void llvm::WriteAsOperand(raw_ostream &OS, const MachineBasicBlock *MBB,
bool t) {
OS << "BB#" << MBB->getNumber();
diff --git a/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp
index c4dca2c..cd3f199 100644
--- a/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp
@@ -500,11 +500,10 @@ void MachineBlockPlacement::buildChain(
assert(BB);
assert(BlockToChain[BB] == &Chain);
assert(*llvm::prior(Chain.end()) == BB);
- MachineBasicBlock *BestSucc = 0;
// Look for the best viable successor if there is one to place immediately
// after this block.
- BestSucc = selectBestSuccessor(BB, Chain, BlockFilter);
+ MachineBasicBlock *BestSucc = selectBestSuccessor(BB, Chain, BlockFilter);
// If an immediate successor isn't available, look for the best viable
// block among those we've identified as not violating the loop's CFG at
@@ -1014,7 +1013,8 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
// exclusively on the loop info here so that we can align backedges in
// unnatural CFGs and backedges that were introduced purely because of the
// loop rotations done during this layout pass.
- if (F.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
+ if (F.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize))
return;
unsigned Align = TLI->getPrefLoopAlignment();
if (!Align)
diff --git a/contrib/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp b/contrib/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp
index 0cc1af0..4479211 100644
--- a/contrib/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp
@@ -38,7 +38,7 @@ getSumForBlock(const MachineBasicBlock *MBB, uint32_t &Scale) const {
Scale = 1;
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I) {
- uint32_t Weight = getEdgeWeight(MBB, *I);
+ uint32_t Weight = getEdgeWeight(MBB, I);
Sum += Weight;
}
@@ -53,22 +53,30 @@ getSumForBlock(const MachineBasicBlock *MBB, uint32_t &Scale) const {
Sum = 0;
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I) {
- uint32_t Weight = getEdgeWeight(MBB, *I);
+ uint32_t Weight = getEdgeWeight(MBB, I);
Sum += Weight / Scale;
}
assert(Sum <= UINT32_MAX);
return Sum;
}
-uint32_t
-MachineBranchProbabilityInfo::getEdgeWeight(const MachineBasicBlock *Src,
- const MachineBasicBlock *Dst) const {
+uint32_t MachineBranchProbabilityInfo::
+getEdgeWeight(const MachineBasicBlock *Src,
+ MachineBasicBlock::const_succ_iterator Dst) const {
uint32_t Weight = Src->getSuccWeight(Dst);
if (!Weight)
return DEFAULT_WEIGHT;
return Weight;
}
+uint32_t MachineBranchProbabilityInfo::
+getEdgeWeight(const MachineBasicBlock *Src,
+ const MachineBasicBlock *Dst) const {
+ // This is a linear search. Try to use the const_succ_iterator version when
+ // possible.
+ return getEdgeWeight(Src, std::find(Src->succ_begin(), Src->succ_end(), Dst));
+}
+
bool MachineBranchProbabilityInfo::isEdgeHot(MachineBasicBlock *Src,
MachineBasicBlock *Dst) const {
// Hot probability is at least 4/5 = 80%
@@ -82,7 +90,7 @@ MachineBranchProbabilityInfo::getHotSucc(MachineBasicBlock *MBB) const {
MachineBasicBlock *MaxSucc = 0;
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I) {
- uint32_t Weight = getEdgeWeight(MBB, *I);
+ uint32_t Weight = getEdgeWeight(MBB, I);
if (Weight > MaxWeight) {
MaxWeight = Weight;
MaxSucc = *I;
diff --git a/contrib/llvm/lib/CodeGen/MachineCSE.cpp b/contrib/llvm/lib/CodeGen/MachineCSE.cpp
index 896461f..dbc41de 100644
--- a/contrib/llvm/lib/CodeGen/MachineCSE.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineCSE.cpp
@@ -63,8 +63,6 @@ namespace {
virtual void releaseMemory() {
ScopeMap.clear();
Exps.clear();
- AllocatableRegs.clear();
- ReservedRegs.clear();
}
private:
@@ -78,8 +76,6 @@ namespace {
ScopedHTType VNT;
SmallVector<MachineInstr*, 64> Exps;
unsigned CurrVN;
- BitVector AllocatableRegs;
- BitVector ReservedRegs;
bool PerformTrivialCoalescing(MachineInstr *MI, MachineBasicBlock *MBB);
bool isPhysDefTriviallyDead(unsigned Reg,
@@ -88,7 +84,8 @@ namespace {
bool hasLivePhysRegDefUses(const MachineInstr *MI,
const MachineBasicBlock *MBB,
SmallSet<unsigned,8> &PhysRefs,
- SmallVector<unsigned,2> &PhysDefs) const;
+ SmallVector<unsigned,2> &PhysDefs,
+ bool &PhysUseDef) const;
bool PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
SmallSet<unsigned,8> &PhysRefs,
SmallVector<unsigned,2> &PhysDefs,
@@ -198,31 +195,52 @@ MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
const MachineBasicBlock *MBB,
SmallSet<unsigned,8> &PhysRefs,
- SmallVector<unsigned,2> &PhysDefs) const{
- MachineBasicBlock::const_iterator I = MI; I = llvm::next(I);
+ SmallVector<unsigned,2> &PhysDefs,
+ bool &PhysUseDef) const{
+ // First, add all uses to PhysRefs.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg())
+ if (!MO.isReg() || MO.isDef())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg))
continue;
- // If the def is dead, it's ok. But the def may not marked "dead". That's
- // common since this pass is run before livevariables. We can scan
- // forward a few instructions and check if it is obviously dead.
- if (MO.isDef() &&
- (MO.isDead() || isPhysDefTriviallyDead(Reg, I, MBB->end())))
- continue;
// Reading constant physregs is ok.
if (!MRI->isConstantPhysReg(Reg, *MBB->getParent()))
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
PhysRefs.insert(*AI);
- if (MO.isDef())
+ }
+
+ // Next, collect all defs into PhysDefs. If any is already in PhysRefs
+ // (which currently contains only uses), set the PhysUseDef flag.
+ PhysUseDef = false;
+ MachineBasicBlock::const_iterator I = MI; I = llvm::next(I);
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isDef())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
+ // Check against PhysRefs even if the def is "dead".
+ if (PhysRefs.count(Reg))
+ PhysUseDef = true;
+ // If the def is dead, it's ok. But the def may not marked "dead". That's
+ // common since this pass is run before livevariables. We can scan
+ // forward a few instructions and check if it is obviously dead.
+ if (!MO.isDead() && !isPhysDefTriviallyDead(Reg, I, MBB->end()))
PhysDefs.push_back(Reg);
}
+ // Finally, add all defs to PhysRefs as well.
+ for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i)
+ for (MCRegAliasIterator AI(PhysDefs[i], TRI, true); AI.isValid(); ++AI)
+ PhysRefs.insert(*AI);
+
return !PhysRefs.empty();
}
@@ -242,7 +260,7 @@ bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
return false;
for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i) {
- if (AllocatableRegs.test(PhysDefs[i]) || ReservedRegs.test(PhysDefs[i]))
+ if (MRI->isAllocatable(PhysDefs[i]) || MRI->isReserved(PhysDefs[i]))
// Avoid extending live range of physical registers if they are
//allocatable or reserved.
return false;
@@ -411,8 +429,8 @@ void MachineCSE::ExitScope(MachineBasicBlock *MBB) {
DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n');
DenseMap<MachineBasicBlock*, ScopeType*>::iterator SI = ScopeMap.find(MBB);
assert(SI != ScopeMap.end());
- ScopeMap.erase(SI);
delete SI->second;
+ ScopeMap.erase(SI);
}
bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
@@ -463,16 +481,22 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
bool CrossMBBPhysDef = false;
SmallSet<unsigned, 8> PhysRefs;
SmallVector<unsigned, 2> PhysDefs;
- if (FoundCSE && hasLivePhysRegDefUses(MI, MBB, PhysRefs, PhysDefs)) {
+ bool PhysUseDef = false;
+ if (FoundCSE && hasLivePhysRegDefUses(MI, MBB, PhysRefs,
+ PhysDefs, PhysUseDef)) {
FoundCSE = false;
// ... Unless the CS is local or is in the sole predecessor block
// and it also defines the physical register which is not clobbered
// in between and the physical register uses were not clobbered.
- unsigned CSVN = VNT.lookup(MI);
- MachineInstr *CSMI = Exps[CSVN];
- if (PhysRegDefsReach(CSMI, MI, PhysRefs, PhysDefs, CrossMBBPhysDef))
- FoundCSE = true;
+ // This can never be the case if the instruction both uses and
+ // defines the same physical register, which was detected above.
+ if (!PhysUseDef) {
+ unsigned CSVN = VNT.lookup(MI);
+ MachineInstr *CSMI = Exps[CSVN];
+ if (PhysRegDefsReach(CSMI, MI, PhysRefs, PhysDefs, CrossMBBPhysDef))
+ FoundCSE = true;
+ }
}
if (!FoundCSE) {
@@ -635,7 +659,5 @@ bool MachineCSE::runOnMachineFunction(MachineFunction &MF) {
MRI = &MF.getRegInfo();
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<MachineDominatorTree>();
- AllocatableRegs = TRI->getAllocatableSet(MF);
- ReservedRegs = TRI->getReservedRegs(MF);
return PerformCSE(DT->getRootNode());
}
diff --git a/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp
index bac3aa2..4a79328 100644
--- a/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp
@@ -16,6 +16,7 @@
#include "llvm/Pass.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -32,7 +33,7 @@ STATISTIC(NumDeletes, "Number of dead copies deleted");
namespace {
class MachineCopyPropagation : public MachineFunctionPass {
const TargetRegisterInfo *TRI;
- BitVector ReservedRegs;
+ MachineRegisterInfo *MRI;
public:
static char ID; // Pass identification, replacement for typeid
@@ -146,8 +147,8 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
DenseMap<unsigned, MachineInstr*>::iterator CI = AvailCopyMap.find(Src);
if (CI != AvailCopyMap.end()) {
MachineInstr *CopyMI = CI->second;
- if (!ReservedRegs.test(Def) &&
- (!ReservedRegs.test(Src) || NoInterveningSideEffect(CopyMI, MI)) &&
+ if (!MRI->isReserved(Def) &&
+ (!MRI->isReserved(Src) || NoInterveningSideEffect(CopyMI, MI)) &&
isNopCopy(CopyMI, Def, Src, TRI)) {
// The two copies cancel out and the source of the first copy
// hasn't been overridden, eliminate the second one. e.g.
@@ -259,7 +260,7 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
DI = MaybeDeadCopies.begin(), DE = MaybeDeadCopies.end();
DI != DE; ++DI) {
unsigned Reg = (*DI)->getOperand(0).getReg();
- if (ReservedRegs.test(Reg) || !MaskMO.clobbersPhysReg(Reg))
+ if (MRI->isReserved(Reg) || !MaskMO.clobbersPhysReg(Reg))
continue;
(*DI)->eraseFromParent();
Changed = true;
@@ -296,7 +297,7 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
for (SmallSetVector<MachineInstr*, 8>::iterator
DI = MaybeDeadCopies.begin(), DE = MaybeDeadCopies.end();
DI != DE; ++DI) {
- if (!ReservedRegs.test((*DI)->getOperand(0).getReg())) {
+ if (!MRI->isReserved((*DI)->getOperand(0).getReg())) {
(*DI)->eraseFromParent();
Changed = true;
++NumDeletes;
@@ -311,7 +312,7 @@ bool MachineCopyPropagation::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
TRI = MF.getTarget().getRegisterInfo();
- ReservedRegs = TRI->getReservedRegs(MF);
+ MRI = &MF.getRegInfo();
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
Changed |= CopyPropagateBlock(*I);
diff --git a/contrib/llvm/lib/CodeGen/MachineFunction.cpp b/contrib/llvm/lib/CodeGen/MachineFunction.cpp
index d4aede8a..91d5211 100644
--- a/contrib/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineFunction.cpp
@@ -28,7 +28,7 @@
#include "llvm/MC/MCContext.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetFrameLowering.h"
@@ -59,13 +59,13 @@ MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
RegInfo = 0;
MFInfo = 0;
FrameInfo = new (Allocator) MachineFrameInfo(*TM.getFrameLowering());
- if (Fn->hasFnAttr(Attribute::StackAlignment))
- FrameInfo->ensureMaxAlignment(Attribute::getStackAlignmentFromAttrs(
- Fn->getAttributes().getFnAttributes()));
- ConstantPool = new (Allocator) MachineConstantPool(TM.getTargetData());
+ if (Fn->getFnAttributes().hasAttribute(Attributes::StackAlignment))
+ FrameInfo->ensureMaxAlignment(Fn->getAttributes().
+ getFnAttributes().getStackAlignment());
+ ConstantPool = new (Allocator) MachineConstantPool(TM.getDataLayout());
Alignment = TM.getTargetLowering()->getMinFunctionAlignment();
// FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
- if (!Fn->hasFnAttr(Attribute::OptimizeForSize))
+ if (!Fn->getFnAttributes().hasAttribute(Attributes::OptimizeForSize))
Alignment = std::max(Alignment,
TM.getTargetLowering()->getPrefFunctionAlignment());
FunctionNumber = FunctionNum;
@@ -284,12 +284,19 @@ MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
return std::make_pair(Result, Result + Num);
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MachineFunction::dump() const {
print(dbgs());
}
+#endif
+
+StringRef MachineFunction::getName() const {
+ assert(getFunction() && "No function!");
+ return getFunction()->getName();
+}
void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const {
- OS << "# Machine code for function " << Fn->getName() << ": ";
+ OS << "# Machine code for function " << getName() << ": ";
if (RegInfo) {
OS << (RegInfo->isSSA() ? "SSA" : "Post SSA");
if (!RegInfo->tracksLiveness())
@@ -334,7 +341,7 @@ void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const {
BB->print(OS, Indexes);
}
- OS << "\n# End machine code for function " << Fn->getName() << ".\n\n";
+ OS << "\n# End machine code for function " << getName() << ".\n\n";
}
namespace llvm {
@@ -344,7 +351,7 @@ namespace llvm {
DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
static std::string getGraphName(const MachineFunction *F) {
- return "CFG for '" + F->getFunction()->getName().str() + "' function";
+ return "CFG for '" + F->getName().str() + "' function";
}
std::string getNodeLabel(const MachineBasicBlock *Node,
@@ -377,7 +384,7 @@ namespace llvm {
void MachineFunction::viewCFG() const
{
#ifndef NDEBUG
- ViewGraph(this, "mf" + getFunction()->getName());
+ ViewGraph(this, "mf" + getName());
#else
errs() << "MachineFunction::viewCFG is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
@@ -387,7 +394,7 @@ void MachineFunction::viewCFG() const
void MachineFunction::viewCFGOnly() const
{
#ifndef NDEBUG
- ViewGraph(this, "mf" + getFunction()->getName(), true);
+ ViewGraph(this, "mf" + getName(), true);
#else
errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
@@ -453,7 +460,9 @@ int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
unsigned StackAlign = TFI.getStackAlignment();
unsigned Align = MinAlign(SPOffset, StackAlign);
Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
- /*isSS*/false, false));
+ /*isSS*/ false,
+ /*NeedSP*/ false,
+ /*Alloca*/ 0));
return -++NumFixedObjects;
}
@@ -525,16 +534,18 @@ void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
}
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MachineFrameInfo::dump(const MachineFunction &MF) const {
print(MF, dbgs());
}
+#endif
//===----------------------------------------------------------------------===//
// MachineJumpTableInfo implementation
//===----------------------------------------------------------------------===//
/// getEntrySize - Return the size of each entry in the jump table.
-unsigned MachineJumpTableInfo::getEntrySize(const TargetData &TD) const {
+unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
// The size of a jump table entry is 4 bytes unless the entry is just the
// address of a block, in which case it is the pointer size.
switch (getEntryKind()) {
@@ -553,7 +564,7 @@ unsigned MachineJumpTableInfo::getEntrySize(const TargetData &TD) const {
}
/// getEntryAlignment - Return the alignment of each entry in the jump table.
-unsigned MachineJumpTableInfo::getEntryAlignment(const TargetData &TD) const {
+unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
// The alignment of a jump table entry is the alignment of int32 unless the
// entry is just the address of a block, in which case it is the pointer
// alignment.
@@ -622,7 +633,9 @@ void MachineJumpTableInfo::print(raw_ostream &OS) const {
OS << '\n';
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MachineJumpTableInfo::dump() const { print(dbgs()); }
+#endif
//===----------------------------------------------------------------------===//
@@ -657,7 +670,7 @@ MachineConstantPool::~MachineConstantPool() {
/// CanShareConstantPoolEntry - Test whether the given two constants
/// can be allocated the same constant pool entry.
static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
- const TargetData *TD) {
+ const DataLayout *TD) {
// Handle the trivial case quickly.
if (A == B) return true;
@@ -681,7 +694,7 @@ static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
// Try constant folding a bitcast of both instructions to an integer. If we
// get two identical ConstantInt's, then we are good to share them. We use
// the constant folding APIs to do this so that we get the benefit of
- // TargetData.
+ // DataLayout.
if (isa<PointerType>(A->getType()))
A = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
const_cast<Constant*>(A), TD);
@@ -749,10 +762,12 @@ void MachineConstantPool::print(raw_ostream &OS) const {
if (Constants[i].isMachineConstantPoolEntry())
Constants[i].Val.MachineCPVal->print(OS);
else
- OS << *(Value*)Constants[i].Val.ConstVal;
+ OS << *(const Value*)Constants[i].Val.ConstVal;
OS << ", align=" << Constants[i].getAlignment();
OS << "\n";
}
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MachineConstantPool::dump() const { print(dbgs()); }
+#endif
diff --git a/contrib/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp b/contrib/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp
index 0102ac7..ed94efb 100644
--- a/contrib/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp
@@ -51,7 +51,7 @@ struct MachineFunctionPrinterPass : public MachineFunctionPass {
char MachineFunctionPrinterPass::ID = 0;
}
-char &MachineFunctionPrinterPassID = MachineFunctionPrinterPass::ID;
+char &llvm::MachineFunctionPrinterPassID = MachineFunctionPrinterPass::ID;
INITIALIZE_PASS(MachineFunctionPrinterPass, "print-machineinstrs",
"Machine Function Printer", false, false)
diff --git a/contrib/llvm/lib/CodeGen/MachineInstr.cpp b/contrib/llvm/lib/CodeGen/MachineInstr.cpp
index b166849..ce8d520 100644
--- a/contrib/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineInstr.cpp
@@ -111,6 +111,7 @@ void MachineOperand::setIsDef(bool Val) {
/// the specified value. If an operand is known to be an immediate already,
/// the setImm method should be used.
void MachineOperand::ChangeToImmediate(int64_t ImmVal) {
+ assert((!isReg() || !isTied()) && "Cannot change a tied operand into an imm");
// If this operand is currently a register operand, and if this is in a
// function, deregister the operand from the register's use/def list.
if (isReg() && isOnRegUseList())
@@ -136,7 +137,8 @@ void MachineOperand::ChangeToRegister(unsigned Reg, bool isDef, bool isImp,
RegInfo = &MF->getRegInfo();
// If this operand is already a register operand, remove it from the
// register's use/def lists.
- if (RegInfo && isReg())
+ bool WasReg = isReg();
+ if (RegInfo && WasReg)
RegInfo->removeRegOperandFromUseList(this);
// Change this to a register and set the reg#.
@@ -153,6 +155,9 @@ void MachineOperand::ChangeToRegister(unsigned Reg, bool isDef, bool isImp,
IsDebug = isDebug;
// Ensure isOnRegUseList() returns false.
Contents.Reg.Prev = 0;
+ // Preserve the tie when the operand was already a register.
+ if (!WasReg)
+ TiedTo = 0;
// If this operand is embedded in a function, add the operand to the
// register's use/def list.
@@ -193,7 +198,8 @@ bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const {
return !strcmp(getSymbolName(), Other.getSymbolName()) &&
getOffset() == Other.getOffset();
case MachineOperand::MO_BlockAddress:
- return getBlockAddress() == Other.getBlockAddress();
+ return getBlockAddress() == Other.getBlockAddress() &&
+ getOffset() == Other.getOffset();
case MO_RegisterMask:
return getRegMask() == Other.getRegMask();
case MachineOperand::MO_MCSymbol:
@@ -208,8 +214,8 @@ bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const {
hash_code llvm::hash_value(const MachineOperand &MO) {
switch (MO.getType()) {
case MachineOperand::MO_Register:
- return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getReg(),
- MO.getSubReg(), MO.isDef());
+ // Register operands don't have target flags.
+ return hash_combine(MO.getType(), MO.getReg(), MO.getSubReg(), MO.isDef());
case MachineOperand::MO_Immediate:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getImm());
case MachineOperand::MO_CImmediate:
@@ -234,7 +240,7 @@ hash_code llvm::hash_value(const MachineOperand &MO) {
MO.getOffset());
case MachineOperand::MO_BlockAddress:
return hash_combine(MO.getType(), MO.getTargetFlags(),
- MO.getBlockAddress());
+ MO.getBlockAddress(), MO.getOffset());
case MachineOperand::MO_RegisterMask:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getRegMask());
case MachineOperand::MO_Metadata:
@@ -262,7 +268,7 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
OS << PrintReg(getReg(), TRI, getSubReg());
if (isDef() || isKill() || isDead() || isImplicit() || isUndef() ||
- isInternalRead() || isEarlyClobber()) {
+ isInternalRead() || isEarlyClobber() || isTied()) {
OS << '<';
bool NeedComma = false;
if (isDef()) {
@@ -282,27 +288,32 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
NeedComma = true;
}
- if (isKill() || isDead() || (isUndef() && isUse()) || isInternalRead()) {
+ if (isKill()) {
if (NeedComma) OS << ',';
- NeedComma = false;
- if (isKill()) {
- OS << "kill";
- NeedComma = true;
- }
- if (isDead()) {
- OS << "dead";
- NeedComma = true;
- }
- if (isUndef() && isUse()) {
- if (NeedComma) OS << ',';
- OS << "undef";
- NeedComma = true;
- }
- if (isInternalRead()) {
- if (NeedComma) OS << ',';
- OS << "internal";
- NeedComma = true;
- }
+ OS << "kill";
+ NeedComma = true;
+ }
+ if (isDead()) {
+ if (NeedComma) OS << ',';
+ OS << "dead";
+ NeedComma = true;
+ }
+ if (isUndef() && isUse()) {
+ if (NeedComma) OS << ',';
+ OS << "undef";
+ NeedComma = true;
+ }
+ if (isInternalRead()) {
+ if (NeedComma) OS << ',';
+ OS << "internal";
+ NeedComma = true;
+ }
+ if (isTied()) {
+ if (NeedComma) OS << ',';
+ OS << "tied";
+ if (TiedTo != 15)
+ OS << unsigned(TiedTo - 1);
+ NeedComma = true;
}
OS << '>';
}
@@ -352,6 +363,7 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
case MachineOperand::MO_BlockAddress:
OS << '<';
WriteAsOperand(OS, getBlockAddress(), /*PrintType=*/false);
+ if (getOffset()) OS << "+" << getOffset();
OS << '>';
break;
case MachineOperand::MO_RegisterMask:
@@ -528,20 +540,6 @@ void MachineInstr::addImplicitDefUseOperands() {
/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
/// implicit operands. It reserves space for the number of operands specified by
/// the MCInstrDesc.
-MachineInstr::MachineInstr(const MCInstrDesc &tid, bool NoImp)
- : MCID(&tid), Flags(0), AsmPrinterFlags(0),
- NumMemRefs(0), MemRefs(0), Parent(0) {
- unsigned NumImplicitOps = 0;
- if (!NoImp)
- NumImplicitOps = MCID->getNumImplicitDefs() + MCID->getNumImplicitUses();
- Operands.reserve(NumImplicitOps + MCID->getNumOperands());
- if (!NoImp)
- addImplicitDefUseOperands();
- // Make sure that we get added to a machine basicblock
- LeakDetector::addGarbageObject(this);
-}
-
-/// MachineInstr ctor - As above, but with a DebugLoc.
MachineInstr::MachineInstr(const MCInstrDesc &tid, const DebugLoc dl,
bool NoImp)
: MCID(&tid), Flags(0), AsmPrinterFlags(0),
@@ -559,21 +557,6 @@ MachineInstr::MachineInstr(const MCInstrDesc &tid, const DebugLoc dl,
/// MachineInstr ctor - Work exactly the same as the ctor two above, except
/// that the MachineInstr is created and added to the end of the specified
/// basic block.
-MachineInstr::MachineInstr(MachineBasicBlock *MBB, const MCInstrDesc &tid)
- : MCID(&tid), Flags(0), AsmPrinterFlags(0),
- NumMemRefs(0), MemRefs(0), Parent(0) {
- assert(MBB && "Cannot use inserting ctor with null basic block!");
- unsigned NumImplicitOps =
- MCID->getNumImplicitDefs() + MCID->getNumImplicitUses();
- Operands.reserve(NumImplicitOps + MCID->getNumOperands());
- addImplicitDefUseOperands();
- // Make sure that we get added to a machine basicblock
- LeakDetector::addGarbageObject(this);
- MBB->push_back(this); // Add instruction to end of basic block!
-}
-
-/// MachineInstr ctor - As above, but with a DebugLoc.
-///
MachineInstr::MachineInstr(MachineBasicBlock *MBB, const DebugLoc dl,
const MCInstrDesc &tid)
: MCID(&tid), Flags(0), AsmPrinterFlags(0),
@@ -673,6 +656,7 @@ void MachineInstr::addOperand(const MachineOperand &Op) {
if (!isImpReg && !isInlineAsm()) {
while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
--OpNo;
+ assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
if (RegInfo)
RegInfo->removeRegOperandFromUseList(&Operands[OpNo]);
}
@@ -708,12 +692,25 @@ void MachineInstr::addOperand(const MachineOperand &Op) {
if (Operands[OpNo].isReg()) {
// Ensure isOnRegUseList() returns false, regardless of Op's status.
Operands[OpNo].Contents.Reg.Prev = 0;
+ // Ignore existing ties. This is not a property that can be copied.
+ Operands[OpNo].TiedTo = 0;
// Add the new operand to RegInfo.
if (RegInfo)
RegInfo->addRegOperandToUseList(&Operands[OpNo]);
- // If the register operand is flagged as early, mark the operand as such.
- if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
- Operands[OpNo].setIsEarlyClobber(true);
+ // The MCID operand information isn't accurate until we start adding
+ // explicit operands. The implicit operands are added first, then the
+ // explicits are inserted before them.
+ if (!isImpReg) {
+ // Tie uses to defs as indicated in MCInstrDesc.
+ if (Operands[OpNo].isUse()) {
+ int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
+ if (DefIdx != -1)
+ tieOperands(DefIdx, OpNo);
+ }
+ // If the register operand is flagged as early, mark the operand as such.
+ if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
+ Operands[OpNo].setIsEarlyClobber(true);
+ }
}
// Re-add all the implicit ops.
@@ -730,6 +727,7 @@ void MachineInstr::addOperand(const MachineOperand &Op) {
///
void MachineInstr::RemoveOperand(unsigned OpNo) {
assert(OpNo < Operands.size() && "Invalid operand number");
+ untieRegOperand(OpNo);
MachineRegisterInfo *RegInfo = getRegInfo();
// Special case removing the last one.
@@ -752,6 +750,13 @@ void MachineInstr::RemoveOperand(unsigned OpNo) {
}
}
+#ifndef NDEBUG
+ // Moving tied operands would break the ties.
+ for (unsigned i = OpNo + 1, e = Operands.size(); i != e; ++i)
+ if (Operands[i].isReg())
+ assert(!Operands[i].isTied() && "Cannot move tied operands");
+#endif
+
Operands.erase(Operands.begin()+OpNo);
if (RegInfo) {
@@ -935,6 +940,12 @@ bool MachineInstr::isStackAligningInlineAsm() const {
return false;
}
+InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
+ assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
+ unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+ return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
+}
+
int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
unsigned *GroupNo) const {
assert(isInlineAsm() && "Expected an inline asm instruction");
@@ -1004,9 +1015,10 @@ MachineInstr::getRegClassConstraint(unsigned OpIdx,
unsigned MachineInstr::getBundleSize() const {
assert(isBundle() && "Expecting a bundle");
- MachineBasicBlock::const_instr_iterator I = *this;
+ const MachineBasicBlock *MBB = getParent();
+ MachineBasicBlock::const_instr_iterator I = *this, E = MBB->instr_end();
unsigned Size = 0;
- while ((++I)->isInsideBundle()) {
+ while ((++I != E) && I->isInsideBundle()) {
++Size;
}
assert(Size > 1 && "Malformed bundle");
@@ -1114,107 +1126,99 @@ int MachineInstr::findFirstPredOperandIdx() const {
return -1;
}
-/// isRegTiedToUseOperand - Given the index of a register def operand,
-/// check if the register def is tied to a source operand, due to either
-/// two-address elimination or inline assembly constraints. Returns the
-/// first tied use operand index by reference is UseOpIdx is not null.
-bool MachineInstr::
-isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx) const {
- if (isInlineAsm()) {
- assert(DefOpIdx > InlineAsm::MIOp_FirstOperand);
- const MachineOperand &MO = getOperand(DefOpIdx);
- if (!MO.isReg() || !MO.isDef() || MO.getReg() == 0)
- return false;
- // Determine the actual operand index that corresponds to this index.
- unsigned DefNo = 0;
- int FlagIdx = findInlineAsmFlagIdx(DefOpIdx, &DefNo);
- if (FlagIdx < 0)
- return false;
+// MachineOperand::TiedTo is 4 bits wide.
+const unsigned TiedMax = 15;
- // Which part of the group is DefOpIdx?
- unsigned DefPart = DefOpIdx - (FlagIdx + 1);
-
- for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands();
- i != e; ++i) {
- const MachineOperand &FMO = getOperand(i);
- if (!FMO.isImm())
- continue;
- if (i+1 >= e || !getOperand(i+1).isReg() || !getOperand(i+1).isUse())
- continue;
- unsigned Idx;
- if (InlineAsm::isUseOperandTiedToDef(FMO.getImm(), Idx) &&
- Idx == DefNo) {
- if (UseOpIdx)
- *UseOpIdx = (unsigned)i + 1 + DefPart;
- return true;
- }
- }
- return false;
+/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
+///
+/// Use and def operands can be tied together, indicated by a non-zero TiedTo
+/// field. TiedTo can have these values:
+///
+/// 0: Operand is not tied to anything.
+/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
+/// TiedMax: Tied to an operand >= TiedMax-1.
+///
+/// The tied def must be one of the first TiedMax operands on a normal
+/// instruction. INLINEASM instructions allow more tied defs.
+///
+void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
+ MachineOperand &DefMO = getOperand(DefIdx);
+ MachineOperand &UseMO = getOperand(UseIdx);
+ assert(DefMO.isDef() && "DefIdx must be a def operand");
+ assert(UseMO.isUse() && "UseIdx must be a use operand");
+ assert(!DefMO.isTied() && "Def is already tied to another use");
+ assert(!UseMO.isTied() && "Use is already tied to another def");
+
+ if (DefIdx < TiedMax)
+ UseMO.TiedTo = DefIdx + 1;
+ else {
+ // Inline asm can use the group descriptors to find tied operands, but on
+ // normal instruction, the tied def must be within the first TiedMax
+ // operands.
+ assert(isInlineAsm() && "DefIdx out of range");
+ UseMO.TiedTo = TiedMax;
}
- assert(getOperand(DefOpIdx).isDef() && "DefOpIdx is not a def!");
- const MCInstrDesc &MCID = getDesc();
- for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = getOperand(i);
- if (MO.isReg() && MO.isUse() &&
- MCID.getOperandConstraint(i, MCOI::TIED_TO) == (int)DefOpIdx) {
- if (UseOpIdx)
- *UseOpIdx = (unsigned)i;
- return true;
- }
- }
- return false;
+ // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
+ DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
}
-/// isRegTiedToDefOperand - Return true if the operand of the specified index
-/// is a register use and it is tied to an def operand. It also returns the def
-/// operand index by reference.
-bool MachineInstr::
-isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx) const {
- if (isInlineAsm()) {
- const MachineOperand &MO = getOperand(UseOpIdx);
- if (!MO.isReg() || !MO.isUse() || MO.getReg() == 0)
- return false;
+/// Given the index of a tied register operand, find the operand it is tied to.
+/// Defs are tied to uses and vice versa. Returns the index of the tied operand
+/// which must exist.
+unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
+ const MachineOperand &MO = getOperand(OpIdx);
+ assert(MO.isTied() && "Operand isn't tied");
- // Find the flag operand corresponding to UseOpIdx
- int FlagIdx = findInlineAsmFlagIdx(UseOpIdx);
- if (FlagIdx < 0)
- return false;
+ // Normally TiedTo is in range.
+ if (MO.TiedTo < TiedMax)
+ return MO.TiedTo - 1;
- const MachineOperand &UFMO = getOperand(FlagIdx);
- unsigned DefNo;
- if (InlineAsm::isUseOperandTiedToDef(UFMO.getImm(), DefNo)) {
- if (!DefOpIdx)
- return true;
-
- unsigned DefIdx = InlineAsm::MIOp_FirstOperand;
- // Remember to adjust the index. First operand is asm string, second is
- // the HasSideEffects and AlignStack bits, then there is a flag for each.
- while (DefNo) {
- const MachineOperand &FMO = getOperand(DefIdx);
- assert(FMO.isImm());
- // Skip over this def.
- DefIdx += InlineAsm::getNumOperandRegisters(FMO.getImm()) + 1;
- --DefNo;
- }
- *DefOpIdx = DefIdx + UseOpIdx - FlagIdx;
- return true;
+ // Uses on normal instructions can be out of range.
+ if (!isInlineAsm()) {
+ // Normal tied defs must be in the 0..TiedMax-1 range.
+ if (MO.isUse())
+ return TiedMax - 1;
+ // MO is a def. Search for the tied use.
+ for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
+ const MachineOperand &UseMO = getOperand(i);
+ if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
+ return i;
}
- return false;
+ llvm_unreachable("Can't find tied use");
}
- const MCInstrDesc &MCID = getDesc();
- if (UseOpIdx >= MCID.getNumOperands())
- return false;
- const MachineOperand &MO = getOperand(UseOpIdx);
- if (!MO.isReg() || !MO.isUse())
- return false;
- int DefIdx = MCID.getOperandConstraint(UseOpIdx, MCOI::TIED_TO);
- if (DefIdx == -1)
- return false;
- if (DefOpIdx)
- *DefOpIdx = (unsigned)DefIdx;
- return true;
+ // Now deal with inline asm by parsing the operand group descriptor flags.
+ // Find the beginning of each operand group.
+ SmallVector<unsigned, 8> GroupIdx;
+ unsigned OpIdxGroup = ~0u;
+ unsigned NumOps;
+ for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
+ i += NumOps) {
+ const MachineOperand &FlagMO = getOperand(i);
+ assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
+ unsigned CurGroup = GroupIdx.size();
+ GroupIdx.push_back(i);
+ NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
+ // OpIdx belongs to this operand group.
+ if (OpIdx > i && OpIdx < i + NumOps)
+ OpIdxGroup = CurGroup;
+ unsigned TiedGroup;
+ if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup))
+ continue;
+ // Operands in this group are tied to operands in TiedGroup which must be
+ // earlier. Find the number of operands between the two groups.
+ unsigned Delta = i - GroupIdx[TiedGroup];
+
+ // OpIdx is a use tied to TiedGroup.
+ if (OpIdxGroup == CurGroup)
+ return OpIdx - Delta;
+
+ // OpIdx is a def tied to this use group.
+ if (OpIdxGroup == TiedGroup)
+ return OpIdx + Delta;
+ }
+ llvm_unreachable("Invalid tied operand on inline asm");
}
/// clearKillInfo - Clears kill flags on all operands.
@@ -1292,7 +1296,12 @@ bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII,
AliasAnalysis *AA,
bool &SawStore) const {
// Ignore stuff that we obviously can't move.
- if (mayStore() || isCall()) {
+ //
+ // Treat volatile loads as stores. This is not strictly necessary for
+ // volatiles, but it is required for atomic loads. It is not allowed to move
+ // a load across an atomic load with Ordering > Monotonic.
+ if (mayStore() || isCall() ||
+ (mayLoad() && hasOrderedMemoryRef())) {
SawStore = true;
return false;
}
@@ -1308,8 +1317,8 @@ bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII,
// load.
if (mayLoad() && !isInvariantLoad(AA))
// Otherwise, this is a real load. If there is a store between the load and
- // end of block, or if the load is volatile, we can't move it.
- return !SawStore && !hasVolatileMemoryRef();
+ // end of block, we can't move it.
+ return !SawStore;
return true;
}
@@ -1340,11 +1349,11 @@ bool MachineInstr::isSafeToReMat(const TargetInstrInfo *TII,
return true;
}
-/// hasVolatileMemoryRef - Return true if this instruction may have a
-/// volatile memory reference, or if the information describing the
-/// memory reference is not available. Return false if it is known to
-/// have no volatile memory references.
-bool MachineInstr::hasVolatileMemoryRef() const {
+/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
+/// or volatile memory reference, or if the information describing the memory
+/// reference is not available. Return false if it is known to have no ordered
+/// memory references.
+bool MachineInstr::hasOrderedMemoryRef() const {
// An instruction known never to access memory won't have a volatile access.
if (!mayStore() &&
!mayLoad() &&
@@ -1357,9 +1366,9 @@ bool MachineInstr::hasVolatileMemoryRef() const {
if (memoperands_empty())
return true;
- // Check the memory reference information for volatile references.
+ // Check the memory reference information for ordered references.
for (mmo_iterator I = memoperands_begin(), E = memoperands_end(); I != E; ++I)
- if ((*I)->isVolatile())
+ if (!(*I)->isUnordered())
return true;
return false;
@@ -1461,7 +1470,9 @@ void MachineInstr::copyImplicitOps(const MachineInstr *MI) {
}
void MachineInstr::dump() const {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dbgs() << " " << *this;
+#endif
}
static void printDebugLoc(DebugLoc DL, const MachineFunction *MF,
@@ -1540,6 +1551,10 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
OS << " [sideeffect]";
if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
OS << " [alignstack]";
+ if (getInlineAsmDialect() == InlineAsm::AD_ATT)
+ OS << " [attdialect]";
+ if (getInlineAsmDialect() == InlineAsm::AD_Intel)
+ OS << " [inteldialect]";
StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
FirstOp = false;
diff --git a/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp b/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp
index b7de7bf..1f7fbfc 100644
--- a/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp
@@ -109,10 +109,10 @@ void llvm::finalizeBundle(MachineBasicBlock &MBB,
MachineInstrBuilder MIB = BuildMI(MBB, FirstMI, FirstMI->getDebugLoc(),
TII->get(TargetOpcode::BUNDLE));
- SmallVector<unsigned, 8> LocalDefs;
- SmallSet<unsigned, 8> LocalDefSet;
+ SmallVector<unsigned, 32> LocalDefs;
+ SmallSet<unsigned, 32> LocalDefSet;
SmallSet<unsigned, 8> DeadDefSet;
- SmallSet<unsigned, 8> KilledDefSet;
+ SmallSet<unsigned, 16> KilledDefSet;
SmallVector<unsigned, 8> ExternUses;
SmallSet<unsigned, 8> ExternUseSet;
SmallSet<unsigned, 8> KilledUseSet;
@@ -181,7 +181,7 @@ void llvm::finalizeBundle(MachineBasicBlock &MBB,
Defs.clear();
}
- SmallSet<unsigned, 8> Added;
+ SmallSet<unsigned, 32> Added;
for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
unsigned Reg = LocalDefs[i];
if (Added.insert(Reg)) {
@@ -248,10 +248,10 @@ bool llvm::finalizeBundles(MachineFunction &MF) {
// MachineOperand iterator
//===----------------------------------------------------------------------===//
-MachineOperandIteratorBase::RegInfo
+MachineOperandIteratorBase::VirtRegInfo
MachineOperandIteratorBase::analyzeVirtReg(unsigned Reg,
SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops) {
- RegInfo RI = { false, false, false };
+ VirtRegInfo RI = { false, false, false };
for(; isValid(); ++*this) {
MachineOperand &MO = deref();
if (!MO.isReg() || MO.getReg() != Reg)
@@ -276,3 +276,53 @@ MachineOperandIteratorBase::analyzeVirtReg(unsigned Reg,
}
return RI;
}
+
+MachineOperandIteratorBase::PhysRegInfo
+MachineOperandIteratorBase::analyzePhysReg(unsigned Reg,
+ const TargetRegisterInfo *TRI) {
+ bool AllDefsDead = true;
+ PhysRegInfo PRI = {false, false, false, false, false, false, false};
+
+ assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
+ "analyzePhysReg not given a physical register!");
+ for (; isValid(); ++*this) {
+ MachineOperand &MO = deref();
+
+ if (MO.isRegMask() && MO.clobbersPhysReg(Reg))
+ PRI.Clobbers = true; // Regmask clobbers Reg.
+
+ if (!MO.isReg())
+ continue;
+
+ unsigned MOReg = MO.getReg();
+ if (!MOReg || !TargetRegisterInfo::isPhysicalRegister(MOReg))
+ continue;
+
+ bool IsRegOrSuperReg = MOReg == Reg || TRI->isSubRegister(MOReg, Reg);
+ bool IsRegOrOverlapping = MOReg == Reg || TRI->regsOverlap(MOReg, Reg);
+
+ if (IsRegOrSuperReg && MO.readsReg()) {
+ // Reg or a super-reg is read, and perhaps killed also.
+ PRI.Reads = true;
+ PRI.Kills = MO.isKill();
+ } if (IsRegOrOverlapping && MO.readsReg()) {
+ PRI.ReadsOverlap = true;// Reg or an overlapping register is read.
+ }
+
+ if (!MO.isDef())
+ continue;
+
+ if (IsRegOrSuperReg) {
+ PRI.Defines = true; // Reg or a super-register is defined.
+ if (!MO.isDead())
+ AllDefsDead = false;
+ }
+ if (IsRegOrOverlapping)
+ PRI.Clobbers = true; // Reg or an overlapping reg is defined.
+ }
+
+ if (AllDefsDead && PRI.Defines)
+ PRI.DefinesDead = true; // Reg or super-register was defined and was dead.
+
+ return PRI;
+}
diff --git a/contrib/llvm/lib/CodeGen/MachineLICM.cpp b/contrib/llvm/lib/CodeGen/MachineLICM.cpp
index efec481..169443e 100644
--- a/contrib/llvm/lib/CodeGen/MachineLICM.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineLICM.cpp
@@ -334,7 +334,7 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
else
DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
- DEBUG(dbgs() << MF.getFunction()->getName() << " ********\n");
+ DEBUG(dbgs() << MF.getName() << " ********\n");
if (PreRegAlloc) {
// Estimate register pressure during pre-regalloc pass.
diff --git a/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp b/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp
index 9f3829e..27afeec 100644
--- a/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp
@@ -74,6 +74,8 @@ MachineBasicBlock *MachineLoop::getBottomBlock() {
return BotMBB;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MachineLoop::dump() const {
print(dbgs());
}
+#endif
diff --git a/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp b/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
index ea98b23..005bf78 100644
--- a/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
@@ -25,7 +25,7 @@
using namespace llvm;
using namespace llvm::dwarf;
-// Handle the Pass registration stuff necessary to use TargetData's.
+// Handle the Pass registration stuff necessary to use DataLayout's.
INITIALIZE_PASS(MachineModuleInfo, "machinemoduleinfo",
"Machine Module Information", false, false)
char MachineModuleInfo::ID = 0;
diff --git a/contrib/llvm/lib/CodeGen/MachineModuleInfoImpls.cpp b/contrib/llvm/lib/CodeGen/MachineModuleInfoImpls.cpp
index 5ab56c0..a1c7e9f 100644
--- a/contrib/llvm/lib/CodeGen/MachineModuleInfoImpls.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineModuleInfoImpls.cpp
@@ -21,8 +21,8 @@ using namespace llvm;
//===----------------------------------------------------------------------===//
// Out of line virtual method.
-void MachineModuleInfoMachO::Anchor() {}
-void MachineModuleInfoELF::Anchor() {}
+void MachineModuleInfoMachO::anchor() {}
+void MachineModuleInfoELF::anchor() {}
static int SortSymbolPair(const void *LHS, const void *RHS) {
typedef std::pair<MCSymbol*, MachineModuleInfoImpl::StubValueTy> PairTy;
diff --git a/contrib/llvm/lib/CodeGen/MachinePostDominators.cpp b/contrib/llvm/lib/CodeGen/MachinePostDominators.cpp
new file mode 100644
index 0000000..c3f6e92
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/MachinePostDominators.cpp
@@ -0,0 +1,55 @@
+//===- MachinePostDominators.cpp -Machine Post Dominator Calculation ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements simple dominator construction algorithms for finding
+// post dominators on machine functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/MachinePostDominators.h"
+
+using namespace llvm;
+
+char MachinePostDominatorTree::ID = 0;
+
+//declare initializeMachinePostDominatorTreePass
+INITIALIZE_PASS(MachinePostDominatorTree, "machinepostdomtree",
+ "MachinePostDominator Tree Construction", true, true)
+
+MachinePostDominatorTree::MachinePostDominatorTree() : MachineFunctionPass(ID) {
+ initializeMachinePostDominatorTreePass(*PassRegistry::getPassRegistry());
+ DT = new DominatorTreeBase<MachineBasicBlock>(true); //true indicate
+ // postdominator
+}
+
+FunctionPass *
+MachinePostDominatorTree::createMachinePostDominatorTreePass() {
+ return new MachinePostDominatorTree();
+}
+
+bool
+MachinePostDominatorTree::runOnMachineFunction(MachineFunction &F) {
+ DT->recalculate(F);
+ return false;
+}
+
+MachinePostDominatorTree::~MachinePostDominatorTree() {
+ delete DT;
+}
+
+void
+MachinePostDominatorTree::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+void
+MachinePostDominatorTree::print(llvm::raw_ostream &OS, const Module *M) const {
+ DT->print(OS);
+}
diff --git a/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp b/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
index 5fb938f..95d7a7d 100644
--- a/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
@@ -21,7 +21,7 @@ MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI)
: TRI(&TRI), IsSSA(true), TracksLiveness(true) {
VRegInfo.reserve(256);
RegAllocHints.reserve(256);
- UsedPhysRegs.resize(TRI.getNumRegs());
+ UsedRegUnits.resize(TRI.getNumRegUnits());
UsedPhysRegMask.resize(TRI.getNumRegs());
// Create the physreg use/def lists.
@@ -32,7 +32,7 @@ MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI)
MachineRegisterInfo::~MachineRegisterInfo() {
#ifndef NDEBUG
clearVirtRegs();
- for (unsigned i = 0, e = UsedPhysRegs.size(); i != e; ++i)
+ for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i)
assert(!PhysRegUseDefLists[i] &&
"PhysRegUseDefLists has entries after all instructions are deleted");
#endif
@@ -306,22 +306,18 @@ void MachineRegisterInfo::dumpUses(unsigned Reg) const {
void MachineRegisterInfo::freezeReservedRegs(const MachineFunction &MF) {
ReservedRegs = TRI->getReservedRegs(MF);
+ assert(ReservedRegs.size() == TRI->getNumRegs() &&
+ "Invalid ReservedRegs vector from target");
}
bool MachineRegisterInfo::isConstantPhysReg(unsigned PhysReg,
const MachineFunction &MF) const {
assert(TargetRegisterInfo::isPhysicalRegister(PhysReg));
- // Check if any overlapping register is modified.
+ // Check if any overlapping register is modified, or allocatable so it may be
+ // used later.
for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI)
- if (!def_empty(*AI))
- return false;
-
- // Check if any overlapping register is allocatable so it may be used later.
- if (AllocatableRegs.empty())
- AllocatableRegs = TRI->getAllocatableSet(MF);
- for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI)
- if (AllocatableRegs.test(*AI))
+ if (!def_empty(*AI) || isAllocatable(*AI))
return false;
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/MachineScheduler.cpp b/contrib/llvm/lib/CodeGen/MachineScheduler.cpp
index a1dc948..a4817d0 100644
--- a/contrib/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -18,11 +18,8 @@
#include "llvm/CodeGen/MachineScheduler.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
-#include "llvm/CodeGen/RegisterPressure.h"
-#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/CodeGen/ScheduleDAGILP.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -35,10 +32,12 @@
using namespace llvm;
-static cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
- cl::desc("Force top-down list scheduling"));
-static cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
- cl::desc("Force bottom-up list scheduling"));
+namespace llvm {
+cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
+ cl::desc("Force top-down list scheduling"));
+cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
+ cl::desc("Force bottom-up list scheduling"));
+}
#ifndef NDEBUG
static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
@@ -50,6 +49,15 @@ static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
static bool ViewMISchedDAGs = false;
#endif // NDEBUG
+// Threshold to very roughly model an out-of-order processor's instruction
+// buffers. If the actual value of this threshold matters much in practice, then
+// it can be specified by the machine model. For now, it's an experimental
+// tuning knob to determine when and if it matters.
+static cl::opt<unsigned> ILPWindow("ilp-window", cl::Hidden,
+ cl::desc("Allow expected latency to exceed the critical path by N cycles "
+ "before attempting to balance ILP"),
+ cl::init(10U));
+
//===----------------------------------------------------------------------===//
// Machine Instruction Scheduling Pass and Registry
//===----------------------------------------------------------------------===//
@@ -221,7 +229,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
// The Scheduler may insert instructions during either schedule() or
// exitRegion(), even for empty regions. So the local iterators 'I' and
// 'RegionEnd' are invalid across these calls.
- unsigned RemainingCount = MBB->size();
+ unsigned RemainingInstrs = MBB->size();
for(MachineBasicBlock::iterator RegionEnd = MBB->end();
RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) {
@@ -230,19 +238,19 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
|| TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) {
--RegionEnd;
// Count the boundary instruction.
- --RemainingCount;
+ --RemainingInstrs;
}
// The next region starts above the previous region. Look backward in the
// instruction stream until we find the nearest boundary.
MachineBasicBlock::iterator I = RegionEnd;
- for(;I != MBB->begin(); --I, --RemainingCount) {
+ for(;I != MBB->begin(); --I, --RemainingInstrs) {
if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF))
break;
}
// Notify the scheduler of the region, even if we may skip scheduling
// it. Perhaps it still needs to be bundled.
- Scheduler->enterRegion(MBB, I, RegionEnd, RemainingCount);
+ Scheduler->enterRegion(MBB, I, RegionEnd, RemainingInstrs);
// Skip empty scheduling regions (0 or 1 schedulable instructions).
if (I == RegionEnd || I == llvm::prior(RegionEnd)) {
@@ -252,11 +260,11 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
continue;
}
DEBUG(dbgs() << "********** MI Scheduling **********\n");
- DEBUG(dbgs() << MF->getFunction()->getName()
+ DEBUG(dbgs() << MF->getName()
<< ":BB#" << MBB->getNumber() << "\n From: " << *I << " To: ";
if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
else dbgs() << "End";
- dbgs() << " Remaining: " << RemainingCount << "\n");
+ dbgs() << " Remaining: " << RemainingInstrs << "\n");
// Schedule a region: possibly reorder instructions.
// This invalidates 'RegionEnd' and 'I'.
@@ -269,7 +277,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
// scheduler for the top of it's scheduled region.
RegionEnd = Scheduler->begin();
}
- assert(RemainingCount == 0 && "Instruction count mismatch!");
+ assert(RemainingInstrs == 0 && "Instruction count mismatch!");
Scheduler->finishBlock();
}
Scheduler->finalizeSchedule();
@@ -281,157 +289,20 @@ void MachineScheduler::print(raw_ostream &O, const Module* m) const {
// unimplemented
}
-//===----------------------------------------------------------------------===//
-// MachineSchedStrategy - Interface to a machine scheduling algorithm.
-//===----------------------------------------------------------------------===//
-
-namespace {
-class ScheduleDAGMI;
-
-/// MachineSchedStrategy - Interface used by ScheduleDAGMI to drive the selected
-/// scheduling algorithm.
-///
-/// If this works well and targets wish to reuse ScheduleDAGMI, we may expose it
-/// in ScheduleDAGInstrs.h
-class MachineSchedStrategy {
-public:
- virtual ~MachineSchedStrategy() {}
-
- /// Initialize the strategy after building the DAG for a new region.
- virtual void initialize(ScheduleDAGMI *DAG) = 0;
-
- /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
- /// schedule the node at the top of the unscheduled region. Otherwise it will
- /// be scheduled at the bottom.
- virtual SUnit *pickNode(bool &IsTopNode) = 0;
-
- /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled a node.
- virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
-
- /// When all predecessor dependencies have been resolved, free this node for
- /// top-down scheduling.
- virtual void releaseTopNode(SUnit *SU) = 0;
- /// When all successor dependencies have been resolved, free this node for
- /// bottom-up scheduling.
- virtual void releaseBottomNode(SUnit *SU) = 0;
-};
-} // namespace
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+void ReadyQueue::dump() {
+ dbgs() << Name << ": ";
+ for (unsigned i = 0, e = Queue.size(); i < e; ++i)
+ dbgs() << Queue[i]->NodeNum << " ";
+ dbgs() << "\n";
+}
+#endif
//===----------------------------------------------------------------------===//
// ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals
// preservation.
//===----------------------------------------------------------------------===//
-namespace {
-/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that schedules
-/// machine instructions while updating LiveIntervals.
-class ScheduleDAGMI : public ScheduleDAGInstrs {
- AliasAnalysis *AA;
- RegisterClassInfo *RegClassInfo;
- MachineSchedStrategy *SchedImpl;
-
- MachineBasicBlock::iterator LiveRegionEnd;
-
- /// Register pressure in this region computed by buildSchedGraph.
- IntervalPressure RegPressure;
- RegPressureTracker RPTracker;
-
- /// List of pressure sets that exceed the target's pressure limit before
- /// scheduling, listed in increasing set ID order. Each pressure set is paired
- /// with its max pressure in the currently scheduled regions.
- std::vector<PressureElement> RegionCriticalPSets;
-
- /// The top of the unscheduled zone.
- MachineBasicBlock::iterator CurrentTop;
- IntervalPressure TopPressure;
- RegPressureTracker TopRPTracker;
-
- /// The bottom of the unscheduled zone.
- MachineBasicBlock::iterator CurrentBottom;
- IntervalPressure BotPressure;
- RegPressureTracker BotRPTracker;
-
-#ifndef NDEBUG
- /// The number of instructions scheduled so far. Used to cut off the
- /// scheduler at the point determined by misched-cutoff.
- unsigned NumInstrsScheduled;
-#endif
-public:
- ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S):
- ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, /*IsPostRA=*/false, C->LIS),
- AA(C->AA), RegClassInfo(C->RegClassInfo), SchedImpl(S),
- RPTracker(RegPressure), CurrentTop(), TopRPTracker(TopPressure),
- CurrentBottom(), BotRPTracker(BotPressure) {
-#ifndef NDEBUG
- NumInstrsScheduled = 0;
-#endif
- }
-
- ~ScheduleDAGMI() {
- delete SchedImpl;
- }
-
- MachineBasicBlock::iterator top() const { return CurrentTop; }
- MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
-
- /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
- /// region. This covers all instructions in a block, while schedule() may only
- /// cover a subset.
- void enterRegion(MachineBasicBlock *bb,
- MachineBasicBlock::iterator begin,
- MachineBasicBlock::iterator end,
- unsigned endcount);
-
- /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
- /// reorderable instructions.
- void schedule();
-
- /// Get current register pressure for the top scheduled instructions.
- const IntervalPressure &getTopPressure() const { return TopPressure; }
- const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
-
- /// Get current register pressure for the bottom scheduled instructions.
- const IntervalPressure &getBotPressure() const { return BotPressure; }
- const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }
-
- /// Get register pressure for the entire scheduling region before scheduling.
- const IntervalPressure &getRegPressure() const { return RegPressure; }
-
- const std::vector<PressureElement> &getRegionCriticalPSets() const {
- return RegionCriticalPSets;
- }
-
- /// getIssueWidth - Return the max instructions per scheduling group.
- unsigned getIssueWidth() const {
- return (InstrItins && InstrItins->SchedModel)
- ? InstrItins->SchedModel->IssueWidth : 1;
- }
-
- /// getNumMicroOps - Return the number of issue slots required for this MI.
- unsigned getNumMicroOps(MachineInstr *MI) const {
- if (!InstrItins) return 1;
- int UOps = InstrItins->getNumMicroOps(MI->getDesc().getSchedClass());
- return (UOps >= 0) ? UOps : TII->getNumMicroOps(InstrItins, MI);
- }
-
-protected:
- void initRegPressure();
- void updateScheduledPressure(std::vector<unsigned> NewMaxPressure);
-
- void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
- bool checkSchedLimit();
-
- void releaseRoots();
-
- void releaseSucc(SUnit *SU, SDep *SuccEdge);
- void releaseSuccessors(SUnit *SU);
- void releasePred(SUnit *SU, SDep *PredEdge);
- void releasePredecessors(SUnit *SU);
-
- void placeDebugValues();
-};
-} // namespace
-
/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
/// NumPredsLeft reaches zero, release the successor node.
///
@@ -498,7 +369,7 @@ void ScheduleDAGMI::moveInstruction(MachineInstr *MI,
BB->splice(InsertPos, BB, MI);
// Update LiveIntervals
- LIS->handleMove(MI);
+ LIS->handleMove(MI, /*UpdateFlags=*/true);
// Recede RegionBegin if an instruction moves above the first.
if (RegionBegin == InsertPos)
@@ -565,6 +436,9 @@ void ScheduleDAGMI::initRegPressure() {
std::vector<unsigned> RegionPressure = RPTracker.getPressure().MaxSetPressure;
for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
unsigned Limit = TRI->getRegPressureSetLimit(i);
+ DEBUG(dbgs() << TRI->getRegPressureSetName(i)
+ << "Limit " << Limit
+ << " Actual " << RegionPressure[i] << "\n");
if (RegionPressure[i] > Limit)
RegionCriticalPSets.push_back(PressureElement(i, 0));
}
@@ -587,6 +461,74 @@ updateScheduledPressure(std::vector<unsigned> NewMaxPressure) {
}
}
+/// schedule - Called back from MachineScheduler::runOnMachineFunction
+/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
+/// only includes instructions that have DAG nodes, not scheduling boundaries.
+///
+/// This is a skeletal driver, with all the functionality pushed into helpers,
+/// so that it can be easilly extended by experimental schedulers. Generally,
+/// implementing MachineSchedStrategy should be sufficient to implement a new
+/// scheduling algorithm. However, if a scheduler further subclasses
+/// ScheduleDAGMI then it will want to override this virtual method in order to
+/// update any specialized state.
+void ScheduleDAGMI::schedule() {
+ buildDAGWithRegPressure();
+
+ postprocessDAG();
+
+ DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
+ SUnits[su].dumpAll(this));
+
+ if (ViewMISchedDAGs) viewGraph();
+
+ initQueues();
+
+ bool IsTopNode = false;
+ while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
+ assert(!SU->isScheduled && "Node already scheduled");
+ if (!checkSchedLimit())
+ break;
+
+ scheduleMI(SU, IsTopNode);
+
+ updateQueues(SU, IsTopNode);
+ }
+ assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
+
+ placeDebugValues();
+
+ DEBUG({
+ unsigned BBNum = top()->getParent()->getNumber();
+ dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
+ dumpSchedule();
+ dbgs() << '\n';
+ });
+}
+
+/// Build the DAG and setup three register pressure trackers.
+void ScheduleDAGMI::buildDAGWithRegPressure() {
+ // Initialize the register pressure tracker used by buildSchedGraph.
+ RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
+
+ // Account for liveness generate by the region boundary.
+ if (LiveRegionEnd != RegionEnd)
+ RPTracker.recede();
+
+ // Build the DAG, and compute current register pressure.
+ buildSchedGraph(AA, &RPTracker);
+ if (ViewMISchedDAGs) viewGraph();
+
+ // Initialize top/bottom trackers after computing region pressure.
+ initRegPressure();
+}
+
+/// Apply each ScheduleDAGMutation step in order.
+void ScheduleDAGMI::postprocessDAG() {
+ for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
+ Mutations[i]->apply(this);
+ }
+}
+
// Release all DAG roots for scheduling.
void ScheduleDAGMI::releaseRoots() {
SmallVector<SUnit*, 16> BotRoots;
@@ -607,28 +549,10 @@ void ScheduleDAGMI::releaseRoots() {
SchedImpl->releaseBottomNode(*I);
}
-/// schedule - Called back from MachineScheduler::runOnMachineFunction
-/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
-/// only includes instructions that have DAG nodes, not scheduling boundaries.
-void ScheduleDAGMI::schedule() {
- // Initialize the register pressure tracker used by buildSchedGraph.
- RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
-
- // Account for liveness generate by the region boundary.
- if (LiveRegionEnd != RegionEnd)
- RPTracker.recede();
-
- // Build the DAG, and compute current register pressure.
- buildSchedGraph(AA, &RPTracker);
-
- // Initialize top/bottom trackers after computing region pressure.
- initRegPressure();
-
- DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
- SUnits[su].dumpAll(this));
-
- if (ViewMISchedDAGs) viewGraph();
+/// Identify DAG roots and setup scheduler queues.
+void ScheduleDAGMI::initQueues() {
+ // Initialize the strategy before modifying the DAG.
SchedImpl->initialize(this);
// Release edges from the special Entry node or to the special Exit node.
@@ -638,61 +562,64 @@ void ScheduleDAGMI::schedule() {
// Release all DAG roots for scheduling.
releaseRoots();
+ SchedImpl->registerRoots();
+
CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
CurrentBottom = RegionEnd;
- bool IsTopNode = false;
- while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
- if (!checkSchedLimit())
- break;
-
- // Move the instruction to its new location in the instruction stream.
- MachineInstr *MI = SU->getInstr();
-
- if (IsTopNode) {
- assert(SU->isTopReady() && "node still has unscheduled dependencies");
- if (&*CurrentTop == MI)
- CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
- else {
- moveInstruction(MI, CurrentTop);
- TopRPTracker.setPos(MI);
- }
+}
- // Update top scheduled pressure.
- TopRPTracker.advance();
- assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
- updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure);
+/// Move an instruction and update register pressure.
+void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) {
+ // Move the instruction to its new location in the instruction stream.
+ MachineInstr *MI = SU->getInstr();
- // Release dependent instructions for scheduling.
- releaseSuccessors(SU);
+ if (IsTopNode) {
+ assert(SU->isTopReady() && "node still has unscheduled dependencies");
+ if (&*CurrentTop == MI)
+ CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
+ else {
+ moveInstruction(MI, CurrentTop);
+ TopRPTracker.setPos(MI);
}
+
+ // Update top scheduled pressure.
+ TopRPTracker.advance();
+ assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
+ updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure);
+ }
+ else {
+ assert(SU->isBottomReady() && "node still has unscheduled dependencies");
+ MachineBasicBlock::iterator priorII =
+ priorNonDebug(CurrentBottom, CurrentTop);
+ if (&*priorII == MI)
+ CurrentBottom = priorII;
else {
- assert(SU->isBottomReady() && "node still has unscheduled dependencies");
- MachineBasicBlock::iterator priorII =
- priorNonDebug(CurrentBottom, CurrentTop);
- if (&*priorII == MI)
- CurrentBottom = priorII;
- else {
- if (&*CurrentTop == MI) {
- CurrentTop = nextIfDebug(++CurrentTop, priorII);
- TopRPTracker.setPos(CurrentTop);
- }
- moveInstruction(MI, CurrentBottom);
- CurrentBottom = MI;
+ if (&*CurrentTop == MI) {
+ CurrentTop = nextIfDebug(++CurrentTop, priorII);
+ TopRPTracker.setPos(CurrentTop);
}
- // Update bottom scheduled pressure.
- BotRPTracker.recede();
- assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
- updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure);
-
- // Release dependent instructions for scheduling.
- releasePredecessors(SU);
+ moveInstruction(MI, CurrentBottom);
+ CurrentBottom = MI;
}
- SU->isScheduled = true;
- SchedImpl->schedNode(SU, IsTopNode);
+ // Update bottom scheduled pressure.
+ BotRPTracker.recede();
+ assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
+ updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure);
}
- assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
+}
- placeDebugValues();
+/// Update scheduler queues after scheduling an instruction.
+void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
+ // Release dependent instructions for scheduling.
+ if (IsTopNode)
+ releaseSuccessors(SU);
+ else
+ releasePredecessors(SU);
+
+ SU->isScheduled = true;
+
+ // Notify the scheduling strategy after updating the DAG.
+ SchedImpl->schedNode(SU, IsTopNode);
}
/// Reinsert any remaining debug_values, just like the PostRA scheduler.
@@ -716,91 +643,146 @@ void ScheduleDAGMI::placeDebugValues() {
FirstDbgValue = NULL;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+void ScheduleDAGMI::dumpSchedule() const {
+ for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
+ if (SUnit *SU = getSUnit(&(*MI)))
+ SU->dump(this);
+ else
+ dbgs() << "Missing SUnit\n";
+ }
+}
+#endif
+
//===----------------------------------------------------------------------===//
// ConvergingScheduler - Implementation of the standard MachineSchedStrategy.
//===----------------------------------------------------------------------===//
namespace {
-/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
-/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
-/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
-class ReadyQueue {
- unsigned ID;
- std::string Name;
- std::vector<SUnit*> Queue;
-
+/// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance
+/// the schedule.
+class ConvergingScheduler : public MachineSchedStrategy {
public:
- ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
-
- unsigned getID() const { return ID; }
-
- StringRef getName() const { return Name; }
-
- // SU is in this queue if it's NodeQueueID is a superset of this ID.
- bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
-
- bool empty() const { return Queue.empty(); }
-
- unsigned size() const { return Queue.size(); }
-
- typedef std::vector<SUnit*>::iterator iterator;
+ /// Represent the type of SchedCandidate found within a single queue.
+ /// pickNodeBidirectional depends on these listed by decreasing priority.
+ enum CandReason {
+ NoCand, SingleExcess, SingleCritical, ResourceReduce, ResourceDemand,
+ BotHeightReduce, BotPathReduce, TopDepthReduce, TopPathReduce,
+ SingleMax, MultiPressure, NextDefUse, NodeOrder};
- iterator begin() { return Queue.begin(); }
+#ifndef NDEBUG
+ static const char *getReasonStr(ConvergingScheduler::CandReason Reason);
+#endif
- iterator end() { return Queue.end(); }
+ /// Policy for scheduling the next instruction in the candidate's zone.
+ struct CandPolicy {
+ bool ReduceLatency;
+ unsigned ReduceResIdx;
+ unsigned DemandResIdx;
- iterator find(SUnit *SU) {
- return std::find(Queue.begin(), Queue.end(), SU);
- }
+ CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {}
+ };
- void push(SUnit *SU) {
- Queue.push_back(SU);
- SU->NodeQueueId |= ID;
- }
+ /// Status of an instruction's critical resource consumption.
+ struct SchedResourceDelta {
+ // Count critical resources in the scheduled region required by SU.
+ unsigned CritResources;
- void remove(iterator I) {
- (*I)->NodeQueueId &= ~ID;
- *I = Queue.back();
- Queue.pop_back();
- }
+ // Count critical resources from another region consumed by SU.
+ unsigned DemandedResources;
- void dump() {
- dbgs() << Name << ": ";
- for (unsigned i = 0, e = Queue.size(); i < e; ++i)
- dbgs() << Queue[i]->NodeNum << " ";
- dbgs() << "\n";
- }
-};
+ SchedResourceDelta(): CritResources(0), DemandedResources(0) {}
-/// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance
-/// the schedule.
-class ConvergingScheduler : public MachineSchedStrategy {
+ bool operator==(const SchedResourceDelta &RHS) const {
+ return CritResources == RHS.CritResources
+ && DemandedResources == RHS.DemandedResources;
+ }
+ bool operator!=(const SchedResourceDelta &RHS) const {
+ return !operator==(RHS);
+ }
+ };
/// Store the state used by ConvergingScheduler heuristics, required for the
/// lifetime of one invocation of pickNode().
struct SchedCandidate {
+ CandPolicy Policy;
+
// The best SUnit candidate.
SUnit *SU;
+ // The reason for this candidate.
+ CandReason Reason;
+
// Register pressure values for the best candidate.
RegPressureDelta RPDelta;
- SchedCandidate(): SU(NULL) {}
+ // Critical resource consumption of the best candidate.
+ SchedResourceDelta ResDelta;
+
+ SchedCandidate(const CandPolicy &policy)
+ : Policy(policy), SU(NULL), Reason(NoCand) {}
+
+ bool isValid() const { return SU; }
+
+ // Copy the status of another candidate without changing policy.
+ void setBest(SchedCandidate &Best) {
+ assert(Best.Reason != NoCand && "uninitialized Sched candidate");
+ SU = Best.SU;
+ Reason = Best.Reason;
+ RPDelta = Best.RPDelta;
+ ResDelta = Best.ResDelta;
+ }
+
+ void initResourceDelta(const ScheduleDAGMI *DAG,
+ const TargetSchedModel *SchedModel);
+ };
+
+ /// Summarize the unscheduled region.
+ struct SchedRemainder {
+ // Critical path through the DAG in expected latency.
+ unsigned CriticalPath;
+
+ // Unscheduled resources
+ SmallVector<unsigned, 16> RemainingCounts;
+ // Critical resource for the unscheduled zone.
+ unsigned CritResIdx;
+ // Number of micro-ops left to schedule.
+ unsigned RemainingMicroOps;
+ // Is the unscheduled zone resource limited.
+ bool IsResourceLimited;
+
+ unsigned MaxRemainingCount;
+
+ void reset() {
+ CriticalPath = 0;
+ RemainingCounts.clear();
+ CritResIdx = 0;
+ RemainingMicroOps = 0;
+ IsResourceLimited = false;
+ MaxRemainingCount = 0;
+ }
+
+ SchedRemainder() { reset(); }
+
+ void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
};
- /// Represent the type of SchedCandidate found within a single queue.
- enum CandResult {
- NoCand, NodeOrder, SingleExcess, SingleCritical, SingleMax, MultiPressure };
/// Each Scheduling boundary is associated with ready queues. It tracks the
- /// current cycle in whichever direction at has moved, and maintains the state
+ /// current cycle in the direction of movement, and maintains the state
/// of "hazards" and other interlocks at the current cycle.
struct SchedBoundary {
ScheduleDAGMI *DAG;
+ const TargetSchedModel *SchedModel;
+ SchedRemainder *Rem;
ReadyQueue Available;
ReadyQueue Pending;
bool CheckPending;
+ // For heuristics, keep a list of the nodes that immediately depend on the
+ // most recently scheduled node.
+ SmallPtrSet<const SUnit*, 8> NextSUs;
+
ScheduleHazardRecognizer *HazardRec;
unsigned CurrCycle;
@@ -809,29 +791,88 @@ class ConvergingScheduler : public MachineSchedStrategy {
/// MinReadyCycle - Cycle of the soonest available instruction.
unsigned MinReadyCycle;
+ // The expected latency of the critical path in this scheduled zone.
+ unsigned ExpectedLatency;
+
+ // Resources used in the scheduled zone beyond this boundary.
+ SmallVector<unsigned, 16> ResourceCounts;
+
+ // Cache the critical resources ID in this scheduled zone.
+ unsigned CritResIdx;
+
+ // Is the scheduled region resource limited vs. latency limited.
+ bool IsResourceLimited;
+
+ unsigned ExpectedCount;
+
+ // Policy flag: attempt to find ILP until expected latency is covered.
+ bool ShouldIncreaseILP;
+
+#ifndef NDEBUG
// Remember the greatest min operand latency.
unsigned MaxMinLatency;
+#endif
+
+ void reset() {
+ Available.clear();
+ Pending.clear();
+ CheckPending = false;
+ NextSUs.clear();
+ HazardRec = 0;
+ CurrCycle = 0;
+ IssueCount = 0;
+ MinReadyCycle = UINT_MAX;
+ ExpectedLatency = 0;
+ ResourceCounts.resize(1);
+ assert(!ResourceCounts[0] && "nonzero count for bad resource");
+ CritResIdx = 0;
+ IsResourceLimited = false;
+ ExpectedCount = 0;
+ ShouldIncreaseILP = false;
+#ifndef NDEBUG
+ MaxMinLatency = 0;
+#endif
+ // Reserve a zero-count for invalid CritResIdx.
+ ResourceCounts.resize(1);
+ }
/// Pending queues extend the ready queues with the same ID and the
/// PendingFlag set.
SchedBoundary(unsigned ID, const Twine &Name):
- DAG(0), Available(ID, Name+".A"),
- Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"),
- CheckPending(false), HazardRec(0), CurrCycle(0), IssueCount(0),
- MinReadyCycle(UINT_MAX), MaxMinLatency(0) {}
+ DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"),
+ Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P") {
+ reset();
+ }
~SchedBoundary() { delete HazardRec; }
+ void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
+ SchedRemainder *rem);
+
bool isTop() const {
return Available.getID() == ConvergingScheduler::TopQID;
}
+ unsigned getUnscheduledLatency(SUnit *SU) const {
+ if (isTop())
+ return SU->getHeight();
+ return SU->getDepth();
+ }
+
+ unsigned getCriticalCount() const {
+ return ResourceCounts[CritResIdx];
+ }
+
bool checkHazard(SUnit *SU);
+ void checkILPPolicy();
+
void releaseNode(SUnit *SU, unsigned ReadyCycle);
void bumpCycle();
+ void countResource(unsigned PIdx, unsigned Cycles);
+
void bumpNode(SUnit *SU);
void releasePending();
@@ -841,10 +882,13 @@ class ConvergingScheduler : public MachineSchedStrategy {
SUnit *pickOnlyChoice();
};
+private:
ScheduleDAGMI *DAG;
+ const TargetSchedModel *SchedModel;
const TargetRegisterInfo *TRI;
// State of the top and bottom scheduled instruction boundaries.
+ SchedRemainder Rem;
SchedBoundary Top;
SchedBoundary Bot;
@@ -857,7 +901,7 @@ public:
};
ConvergingScheduler():
- DAG(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
+ DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
virtual void initialize(ScheduleDAGMI *dag);
@@ -869,28 +913,80 @@ public:
virtual void releaseBottomNode(SUnit *SU);
+ virtual void registerRoots();
+
protected:
- SUnit *pickNodeBidrectional(bool &IsTopNode);
+ void balanceZones(
+ ConvergingScheduler::SchedBoundary &CriticalZone,
+ ConvergingScheduler::SchedCandidate &CriticalCand,
+ ConvergingScheduler::SchedBoundary &OppositeZone,
+ ConvergingScheduler::SchedCandidate &OppositeCand);
+
+ void checkResourceLimits(ConvergingScheduler::SchedCandidate &TopCand,
+ ConvergingScheduler::SchedCandidate &BotCand);
+
+ void tryCandidate(SchedCandidate &Cand,
+ SchedCandidate &TryCand,
+ SchedBoundary &Zone,
+ const RegPressureTracker &RPTracker,
+ RegPressureTracker &TempTracker);
+
+ SUnit *pickNodeBidirectional(bool &IsTopNode);
+
+ void pickNodeFromQueue(SchedBoundary &Zone,
+ const RegPressureTracker &RPTracker,
+ SchedCandidate &Candidate);
- CandResult pickNodeFromQueue(ReadyQueue &Q,
- const RegPressureTracker &RPTracker,
- SchedCandidate &Candidate);
#ifndef NDEBUG
- void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU,
- PressureElement P = PressureElement());
+ void traceCandidate(const SchedCandidate &Cand, const SchedBoundary &Zone);
#endif
};
} // namespace
+void ConvergingScheduler::SchedRemainder::
+init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
+ reset();
+ if (!SchedModel->hasInstrSchedModel())
+ return;
+ RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
+ for (std::vector<SUnit>::iterator
+ I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
+ const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
+ RemainingMicroOps += SchedModel->getNumMicroOps(I->getInstr(), SC);
+ for (TargetSchedModel::ProcResIter
+ PI = SchedModel->getWriteProcResBegin(SC),
+ PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
+ unsigned PIdx = PI->ProcResourceIdx;
+ unsigned Factor = SchedModel->getResourceFactor(PIdx);
+ RemainingCounts[PIdx] += (Factor * PI->Cycles);
+ }
+ }
+}
+
+void ConvergingScheduler::SchedBoundary::
+init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
+ reset();
+ DAG = dag;
+ SchedModel = smodel;
+ Rem = rem;
+ if (SchedModel->hasInstrSchedModel())
+ ResourceCounts.resize(SchedModel->getNumProcResourceKinds());
+}
+
void ConvergingScheduler::initialize(ScheduleDAGMI *dag) {
DAG = dag;
+ SchedModel = DAG->getSchedModel();
TRI = DAG->TRI;
- Top.DAG = dag;
- Bot.DAG = dag;
+ Rem.init(DAG, SchedModel);
+ Top.init(DAG, SchedModel, &Rem);
+ Bot.init(DAG, SchedModel, &Rem);
+
+ // Initialize resource counts.
- // Initialize the HazardRecognizers.
+ // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
+ // are disabled, then these HazardRecs will be disabled.
+ const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
const TargetMachine &TM = DAG->MF.getTarget();
- const InstrItineraryData *Itin = TM.getInstrItineraryData();
Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
@@ -905,13 +1001,12 @@ void ConvergingScheduler::releaseTopNode(SUnit *SU) {
for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
- unsigned Latency =
- DAG->computeOperandLatency(I->getSUnit(), SU, *I, /*FindMin=*/true);
+ unsigned MinLatency = I->getMinLatency();
#ifndef NDEBUG
- Top.MaxMinLatency = std::max(Latency, Top.MaxMinLatency);
+ Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency);
#endif
- if (SU->TopReadyCycle < PredReadyCycle + Latency)
- SU->TopReadyCycle = PredReadyCycle + Latency;
+ if (SU->TopReadyCycle < PredReadyCycle + MinLatency)
+ SU->TopReadyCycle = PredReadyCycle + MinLatency;
}
Top.releaseNode(SU, SU->TopReadyCycle);
}
@@ -925,17 +1020,27 @@ void ConvergingScheduler::releaseBottomNode(SUnit *SU) {
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
- unsigned Latency =
- DAG->computeOperandLatency(SU, I->getSUnit(), *I, /*FindMin=*/true);
+ unsigned MinLatency = I->getMinLatency();
#ifndef NDEBUG
- Bot.MaxMinLatency = std::max(Latency, Bot.MaxMinLatency);
+ Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency);
#endif
- if (SU->BotReadyCycle < SuccReadyCycle + Latency)
- SU->BotReadyCycle = SuccReadyCycle + Latency;
+ if (SU->BotReadyCycle < SuccReadyCycle + MinLatency)
+ SU->BotReadyCycle = SuccReadyCycle + MinLatency;
}
Bot.releaseNode(SU, SU->BotReadyCycle);
}
+void ConvergingScheduler::registerRoots() {
+ Rem.CriticalPath = DAG->ExitSU.getDepth();
+ // Some roots may not feed into ExitSU. Check all of them in case.
+ for (std::vector<SUnit*>::const_iterator
+ I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
+ if ((*I)->getDepth() > Rem.CriticalPath)
+ Rem.CriticalPath = (*I)->getDepth();
+ }
+ DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n');
+}
+
/// Does this SU have a hazard within the current instruction group.
///
/// The scheduler supports two modes of hazard recognition. The first is the
@@ -953,14 +1058,27 @@ bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) {
if (HazardRec->isEnabled())
return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
- if (IssueCount + DAG->getNumMicroOps(SU->getInstr()) > DAG->getIssueWidth())
+ unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
+ if ((IssueCount > 0) && (IssueCount + uops > SchedModel->getIssueWidth())) {
+ DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops="
+ << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
return true;
-
+ }
return false;
}
+/// If expected latency is covered, disable ILP policy.
+void ConvergingScheduler::SchedBoundary::checkILPPolicy() {
+ if (ShouldIncreaseILP
+ && (IsResourceLimited || ExpectedLatency <= CurrCycle)) {
+ ShouldIncreaseILP = false;
+ DEBUG(dbgs() << "Disable ILP: " << Available.getName() << '\n');
+ }
+}
+
void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU,
unsigned ReadyCycle) {
+
if (ReadyCycle < MinReadyCycle)
MinReadyCycle = ReadyCycle;
@@ -970,15 +1088,31 @@ void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU,
Pending.push(SU);
else
Available.push(SU);
+
+ // Record this node as an immediate dependent of the scheduled node.
+ NextSUs.insert(SU);
+
+ // If CriticalPath has been computed, then check if the unscheduled nodes
+ // exceed the ILP window. Before registerRoots, CriticalPath==0.
+ if (Rem->CriticalPath && (ExpectedLatency + getUnscheduledLatency(SU)
+ > Rem->CriticalPath + ILPWindow)) {
+ ShouldIncreaseILP = true;
+ DEBUG(dbgs() << "Increase ILP: " << Available.getName() << " "
+ << ExpectedLatency << " + " << getUnscheduledLatency(SU) << '\n');
+ }
}
/// Move the boundary of scheduled code by one cycle.
void ConvergingScheduler::SchedBoundary::bumpCycle() {
- unsigned Width = DAG->getIssueWidth();
+ unsigned Width = SchedModel->getIssueWidth();
IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width;
+ unsigned NextCycle = CurrCycle + 1;
assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
- unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle);
+ if (MinReadyCycle > NextCycle) {
+ IssueCount = 0;
+ NextCycle = MinReadyCycle;
+ }
if (!HazardRec->isEnabled()) {
// Bypass HazardRec virtual calls.
@@ -994,11 +1128,39 @@ void ConvergingScheduler::SchedBoundary::bumpCycle() {
}
}
CheckPending = true;
+ IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle);
- DEBUG(dbgs() << "*** " << Available.getName() << " cycle "
+ DEBUG(dbgs() << " *** " << Available.getName() << " cycle "
<< CurrCycle << '\n');
}
+/// Add the given processor resource to this scheduled zone.
+void ConvergingScheduler::SchedBoundary::countResource(unsigned PIdx,
+ unsigned Cycles) {
+ unsigned Factor = SchedModel->getResourceFactor(PIdx);
+ DEBUG(dbgs() << " " << SchedModel->getProcResource(PIdx)->Name
+ << " +(" << Cycles << "x" << Factor
+ << ") / " << SchedModel->getLatencyFactor() << '\n');
+
+ unsigned Count = Factor * Cycles;
+ ResourceCounts[PIdx] += Count;
+ assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
+ Rem->RemainingCounts[PIdx] -= Count;
+
+ // Reset MaxRemainingCount for sanity.
+ Rem->MaxRemainingCount = 0;
+
+ // Check if this resource exceeds the current critical resource by a full
+ // cycle. If so, it becomes the critical resource.
+ if ((int)(ResourceCounts[PIdx] - ResourceCounts[CritResIdx])
+ >= (int)SchedModel->getLatencyFactor()) {
+ CritResIdx = PIdx;
+ DEBUG(dbgs() << " *** Critical resource "
+ << SchedModel->getProcResource(PIdx)->Name << " x"
+ << ResourceCounts[PIdx] << '\n');
+ }
+}
+
/// Move the boundary of scheduled code by one SUnit.
void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) {
// Update the reservation table.
@@ -1010,11 +1172,38 @@ void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) {
}
HazardRec->EmitInstruction(SU);
}
+ // Update resource counts and critical resource.
+ if (SchedModel->hasInstrSchedModel()) {
+ const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
+ Rem->RemainingMicroOps -= SchedModel->getNumMicroOps(SU->getInstr(), SC);
+ for (TargetSchedModel::ProcResIter
+ PI = SchedModel->getWriteProcResBegin(SC),
+ PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
+ countResource(PI->ProcResourceIdx, PI->Cycles);
+ }
+ }
+ if (isTop()) {
+ if (SU->getDepth() > ExpectedLatency)
+ ExpectedLatency = SU->getDepth();
+ }
+ else {
+ if (SU->getHeight() > ExpectedLatency)
+ ExpectedLatency = SU->getHeight();
+ }
+
+ IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle);
+
// Check the instruction group dispatch limit.
// TODO: Check if this SU must end a dispatch group.
- IssueCount += DAG->getNumMicroOps(SU->getInstr());
- if (IssueCount >= DAG->getIssueWidth()) {
- DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n');
+ IssueCount += SchedModel->getNumMicroOps(SU->getInstr());
+
+ // checkHazard prevents scheduling multiple instructions per cycle that exceed
+ // issue width. However, we commonly reach the maximum. In this case
+ // opportunistically bump the cycle to avoid uselessly checking everything in
+ // the readyQ. Furthermore, a single instruction may produce more than one
+ // cycle's worth of micro-ops.
+ if (IssueCount >= SchedModel->getIssueWidth()) {
+ DEBUG(dbgs() << " *** Max instrs at cycle " << CurrCycle << '\n');
bumpCycle();
}
}
@@ -1045,6 +1234,7 @@ void ConvergingScheduler::SchedBoundary::releasePending() {
Pending.remove(Pending.begin()+i);
--i; --e;
}
+ DEBUG(if (!Pending.empty()) Pending.dump());
CheckPending = false;
}
@@ -1059,12 +1249,23 @@ void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) {
}
/// If this queue only has one ready candidate, return it. As a side effect,
-/// advance the cycle until at least one node is ready. If multiple instructions
-/// are ready, return NULL.
+/// defer any nodes that now hit a hazard, and advance the cycle until at least
+/// one node is ready. If multiple instructions are ready, return NULL.
SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() {
if (CheckPending)
releasePending();
+ if (IssueCount > 0) {
+ // Defer any ready instrs that now have a hazard.
+ for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
+ if (checkHazard(*I)) {
+ Pending.push(*I);
+ I = Available.remove(I);
+ continue;
+ }
+ ++I;
+ }
+ }
for (unsigned i = 0; Available.empty(); ++i) {
assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) &&
"permanent hazard"); (void)i;
@@ -1076,18 +1277,262 @@ SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() {
return NULL;
}
-#ifndef NDEBUG
-void ConvergingScheduler::traceCandidate(const char *Label, const ReadyQueue &Q,
- SUnit *SU, PressureElement P) {
- dbgs() << Label << " " << Q.getName() << " ";
- if (P.isValid())
- dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease
- << " ";
- else
- dbgs() << " ";
- SU->dump(DAG);
+/// Record the candidate policy for opposite zones with different critical
+/// resources.
+///
+/// If the CriticalZone is latency limited, don't force a policy for the
+/// candidates here. Instead, When releasing each candidate, releaseNode
+/// compares the region's critical path to the candidate's height or depth and
+/// the scheduled zone's expected latency then sets ShouldIncreaseILP.
+void ConvergingScheduler::balanceZones(
+ ConvergingScheduler::SchedBoundary &CriticalZone,
+ ConvergingScheduler::SchedCandidate &CriticalCand,
+ ConvergingScheduler::SchedBoundary &OppositeZone,
+ ConvergingScheduler::SchedCandidate &OppositeCand) {
+
+ if (!CriticalZone.IsResourceLimited)
+ return;
+
+ SchedRemainder *Rem = CriticalZone.Rem;
+
+ // If the critical zone is overconsuming a resource relative to the
+ // remainder, try to reduce it.
+ unsigned RemainingCritCount =
+ Rem->RemainingCounts[CriticalZone.CritResIdx];
+ if ((int)(Rem->MaxRemainingCount - RemainingCritCount)
+ > (int)SchedModel->getLatencyFactor()) {
+ CriticalCand.Policy.ReduceResIdx = CriticalZone.CritResIdx;
+ DEBUG(dbgs() << "Balance " << CriticalZone.Available.getName() << " reduce "
+ << SchedModel->getProcResource(CriticalZone.CritResIdx)->Name
+ << '\n');
+ }
+ // If the other zone is underconsuming a resource relative to the full zone,
+ // try to increase it.
+ unsigned OppositeCount =
+ OppositeZone.ResourceCounts[CriticalZone.CritResIdx];
+ if ((int)(OppositeZone.ExpectedCount - OppositeCount)
+ > (int)SchedModel->getLatencyFactor()) {
+ OppositeCand.Policy.DemandResIdx = CriticalZone.CritResIdx;
+ DEBUG(dbgs() << "Balance " << OppositeZone.Available.getName() << " demand "
+ << SchedModel->getProcResource(OppositeZone.CritResIdx)->Name
+ << '\n');
+ }
+}
+
+/// Determine if the scheduled zones exceed resource limits or critical path and
+/// set each candidate's ReduceHeight policy accordingly.
+void ConvergingScheduler::checkResourceLimits(
+ ConvergingScheduler::SchedCandidate &TopCand,
+ ConvergingScheduler::SchedCandidate &BotCand) {
+
+ Bot.checkILPPolicy();
+ Top.checkILPPolicy();
+ if (Bot.ShouldIncreaseILP)
+ BotCand.Policy.ReduceLatency = true;
+ if (Top.ShouldIncreaseILP)
+ TopCand.Policy.ReduceLatency = true;
+
+ // Handle resource-limited regions.
+ if (Top.IsResourceLimited && Bot.IsResourceLimited
+ && Top.CritResIdx == Bot.CritResIdx) {
+ // If the scheduled critical resource in both zones is no longer the
+ // critical remaining resource, attempt to reduce resource height both ways.
+ if (Top.CritResIdx != Rem.CritResIdx) {
+ TopCand.Policy.ReduceResIdx = Top.CritResIdx;
+ BotCand.Policy.ReduceResIdx = Bot.CritResIdx;
+ DEBUG(dbgs() << "Reduce scheduled "
+ << SchedModel->getProcResource(Top.CritResIdx)->Name << '\n');
+ }
+ return;
+ }
+ // Handle latency-limited regions.
+ if (!Top.IsResourceLimited && !Bot.IsResourceLimited) {
+ // If the total scheduled expected latency exceeds the region's critical
+ // path then reduce latency both ways.
+ //
+ // Just because a zone is not resource limited does not mean it is latency
+ // limited. Unbuffered resource, such as max micro-ops may cause CurrCycle
+ // to exceed expected latency.
+ if ((Top.ExpectedLatency + Bot.ExpectedLatency >= Rem.CriticalPath)
+ && (Rem.CriticalPath > Top.CurrCycle + Bot.CurrCycle)) {
+ TopCand.Policy.ReduceLatency = true;
+ BotCand.Policy.ReduceLatency = true;
+ DEBUG(dbgs() << "Reduce scheduled latency " << Top.ExpectedLatency
+ << " + " << Bot.ExpectedLatency << '\n');
+ }
+ return;
+ }
+ // The critical resource is different in each zone, so request balancing.
+
+ // Compute the cost of each zone.
+ Rem.MaxRemainingCount = std::max(
+ Rem.RemainingMicroOps * SchedModel->getMicroOpFactor(),
+ Rem.RemainingCounts[Rem.CritResIdx]);
+ Top.ExpectedCount = std::max(Top.ExpectedLatency, Top.CurrCycle);
+ Top.ExpectedCount = std::max(
+ Top.getCriticalCount(),
+ Top.ExpectedCount * SchedModel->getLatencyFactor());
+ Bot.ExpectedCount = std::max(Bot.ExpectedLatency, Bot.CurrCycle);
+ Bot.ExpectedCount = std::max(
+ Bot.getCriticalCount(),
+ Bot.ExpectedCount * SchedModel->getLatencyFactor());
+
+ balanceZones(Top, TopCand, Bot, BotCand);
+ balanceZones(Bot, BotCand, Top, TopCand);
+}
+
+void ConvergingScheduler::SchedCandidate::
+initResourceDelta(const ScheduleDAGMI *DAG,
+ const TargetSchedModel *SchedModel) {
+ if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
+ return;
+
+ const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
+ for (TargetSchedModel::ProcResIter
+ PI = SchedModel->getWriteProcResBegin(SC),
+ PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
+ if (PI->ProcResourceIdx == Policy.ReduceResIdx)
+ ResDelta.CritResources += PI->Cycles;
+ if (PI->ProcResourceIdx == Policy.DemandResIdx)
+ ResDelta.DemandedResources += PI->Cycles;
+ }
+}
+
+/// Return true if this heuristic determines order.
+static bool tryLess(unsigned TryVal, unsigned CandVal,
+ ConvergingScheduler::SchedCandidate &TryCand,
+ ConvergingScheduler::SchedCandidate &Cand,
+ ConvergingScheduler::CandReason Reason) {
+ if (TryVal < CandVal) {
+ TryCand.Reason = Reason;
+ return true;
+ }
+ if (TryVal > CandVal) {
+ if (Cand.Reason > Reason)
+ Cand.Reason = Reason;
+ return true;
+ }
+ return false;
+}
+static bool tryGreater(unsigned TryVal, unsigned CandVal,
+ ConvergingScheduler::SchedCandidate &TryCand,
+ ConvergingScheduler::SchedCandidate &Cand,
+ ConvergingScheduler::CandReason Reason) {
+ if (TryVal > CandVal) {
+ TryCand.Reason = Reason;
+ return true;
+ }
+ if (TryVal < CandVal) {
+ if (Cand.Reason > Reason)
+ Cand.Reason = Reason;
+ return true;
+ }
+ return false;
+}
+
+/// Apply a set of heursitics to a new candidate. Heuristics are currently
+/// hierarchical. This may be more efficient than a graduated cost model because
+/// we don't need to evaluate all aspects of the model for each node in the
+/// queue. But it's really done to make the heuristics easier to debug and
+/// statistically analyze.
+///
+/// \param Cand provides the policy and current best candidate.
+/// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
+/// \param Zone describes the scheduled zone that we are extending.
+/// \param RPTracker describes reg pressure within the scheduled zone.
+/// \param TempTracker is a scratch pressure tracker to reuse in queries.
+void ConvergingScheduler::tryCandidate(SchedCandidate &Cand,
+ SchedCandidate &TryCand,
+ SchedBoundary &Zone,
+ const RegPressureTracker &RPTracker,
+ RegPressureTracker &TempTracker) {
+
+ // Always initialize TryCand's RPDelta.
+ TempTracker.getMaxPressureDelta(TryCand.SU->getInstr(), TryCand.RPDelta,
+ DAG->getRegionCriticalPSets(),
+ DAG->getRegPressure().MaxSetPressure);
+
+ // Initialize the candidate if needed.
+ if (!Cand.isValid()) {
+ TryCand.Reason = NodeOrder;
+ return;
+ }
+ // Avoid exceeding the target's limit.
+ if (tryLess(TryCand.RPDelta.Excess.UnitIncrease,
+ Cand.RPDelta.Excess.UnitIncrease, TryCand, Cand, SingleExcess))
+ return;
+ if (Cand.Reason == SingleExcess)
+ Cand.Reason = MultiPressure;
+
+ // Avoid increasing the max critical pressure in the scheduled region.
+ if (tryLess(TryCand.RPDelta.CriticalMax.UnitIncrease,
+ Cand.RPDelta.CriticalMax.UnitIncrease,
+ TryCand, Cand, SingleCritical))
+ return;
+ if (Cand.Reason == SingleCritical)
+ Cand.Reason = MultiPressure;
+
+ // Avoid critical resource consumption and balance the schedule.
+ TryCand.initResourceDelta(DAG, SchedModel);
+ if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
+ TryCand, Cand, ResourceReduce))
+ return;
+ if (tryGreater(TryCand.ResDelta.DemandedResources,
+ Cand.ResDelta.DemandedResources,
+ TryCand, Cand, ResourceDemand))
+ return;
+
+ // Avoid serializing long latency dependence chains.
+ if (Cand.Policy.ReduceLatency) {
+ if (Zone.isTop()) {
+ if (Cand.SU->getDepth() * SchedModel->getLatencyFactor()
+ > Zone.ExpectedCount) {
+ if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
+ TryCand, Cand, TopDepthReduce))
+ return;
+ }
+ if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
+ TryCand, Cand, TopPathReduce))
+ return;
+ }
+ else {
+ if (Cand.SU->getHeight() * SchedModel->getLatencyFactor()
+ > Zone.ExpectedCount) {
+ if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
+ TryCand, Cand, BotHeightReduce))
+ return;
+ }
+ if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
+ TryCand, Cand, BotPathReduce))
+ return;
+ }
+ }
+
+ // Avoid increasing the max pressure of the entire region.
+ if (tryLess(TryCand.RPDelta.CurrentMax.UnitIncrease,
+ Cand.RPDelta.CurrentMax.UnitIncrease, TryCand, Cand, SingleMax))
+ return;
+ if (Cand.Reason == SingleMax)
+ Cand.Reason = MultiPressure;
+
+ // Prefer immediate defs/users of the last scheduled instruction. This is a
+ // nice pressure avoidance strategy that also conserves the processor's
+ // register renaming resources and keeps the machine code readable.
+ if (Zone.NextSUs.count(TryCand.SU) && !Zone.NextSUs.count(Cand.SU)) {
+ TryCand.Reason = NextDefUse;
+ return;
+ }
+ if (!Zone.NextSUs.count(TryCand.SU) && Zone.NextSUs.count(Cand.SU)) {
+ if (Cand.Reason > NextDefUse)
+ Cand.Reason = NextDefUse;
+ return;
+ }
+ // Fall through to original instruction order.
+ if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
+ || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
+ TryCand.Reason = NodeOrder;
+ }
}
-#endif
/// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is
/// more desirable than RHS from scheduling standpoint.
@@ -1098,109 +1543,144 @@ static bool compareRPDelta(const RegPressureDelta &LHS,
// have UnitIncrease==0, so are neutral.
// Avoid increasing the max critical pressure in the scheduled region.
- if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease)
+ if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) {
+ DEBUG(dbgs() << "RP excess top - bot: "
+ << (LHS.Excess.UnitIncrease - RHS.Excess.UnitIncrease) << '\n');
return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease;
-
+ }
// Avoid increasing the max critical pressure in the scheduled region.
- if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease)
+ if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) {
+ DEBUG(dbgs() << "RP critical top - bot: "
+ << (LHS.CriticalMax.UnitIncrease - RHS.CriticalMax.UnitIncrease)
+ << '\n');
return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease;
-
+ }
// Avoid increasing the max pressure of the entire region.
- if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease)
+ if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) {
+ DEBUG(dbgs() << "RP current top - bot: "
+ << (LHS.CurrentMax.UnitIncrease - RHS.CurrentMax.UnitIncrease)
+ << '\n');
return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease;
-
+ }
return false;
}
+#ifndef NDEBUG
+const char *ConvergingScheduler::getReasonStr(
+ ConvergingScheduler::CandReason Reason) {
+ switch (Reason) {
+ case NoCand: return "NOCAND ";
+ case SingleExcess: return "REG-EXCESS";
+ case SingleCritical: return "REG-CRIT ";
+ case SingleMax: return "REG-MAX ";
+ case MultiPressure: return "REG-MULTI ";
+ case ResourceReduce: return "RES-REDUCE";
+ case ResourceDemand: return "RES-DEMAND";
+ case TopDepthReduce: return "TOP-DEPTH ";
+ case TopPathReduce: return "TOP-PATH ";
+ case BotHeightReduce:return "BOT-HEIGHT";
+ case BotPathReduce: return "BOT-PATH ";
+ case NextDefUse: return "DEF-USE ";
+ case NodeOrder: return "ORDER ";
+ };
+ llvm_unreachable("Unknown reason!");
+}
+
+void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand,
+ const SchedBoundary &Zone) {
+ const char *Label = getReasonStr(Cand.Reason);
+ PressureElement P;
+ unsigned ResIdx = 0;
+ unsigned Latency = 0;
+ switch (Cand.Reason) {
+ default:
+ break;
+ case SingleExcess:
+ P = Cand.RPDelta.Excess;
+ break;
+ case SingleCritical:
+ P = Cand.RPDelta.CriticalMax;
+ break;
+ case SingleMax:
+ P = Cand.RPDelta.CurrentMax;
+ break;
+ case ResourceReduce:
+ ResIdx = Cand.Policy.ReduceResIdx;
+ break;
+ case ResourceDemand:
+ ResIdx = Cand.Policy.DemandResIdx;
+ break;
+ case TopDepthReduce:
+ Latency = Cand.SU->getDepth();
+ break;
+ case TopPathReduce:
+ Latency = Cand.SU->getHeight();
+ break;
+ case BotHeightReduce:
+ Latency = Cand.SU->getHeight();
+ break;
+ case BotPathReduce:
+ Latency = Cand.SU->getDepth();
+ break;
+ }
+ dbgs() << Label << " " << Zone.Available.getName() << " ";
+ if (P.isValid())
+ dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease
+ << " ";
+ else
+ dbgs() << " ";
+ if (ResIdx)
+ dbgs() << SchedModel->getProcResource(ResIdx)->Name << " ";
+ else
+ dbgs() << " ";
+ if (Latency)
+ dbgs() << Latency << " cycles ";
+ else
+ dbgs() << " ";
+ Cand.SU->dump(DAG);
+}
+#endif
+
/// Pick the best candidate from the top queue.
///
/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
/// DAG building. To adjust for the current scheduling location we need to
/// maintain the number of vreg uses remaining to be top-scheduled.
-ConvergingScheduler::CandResult ConvergingScheduler::
-pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker,
- SchedCandidate &Candidate) {
+void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone,
+ const RegPressureTracker &RPTracker,
+ SchedCandidate &Cand) {
+ ReadyQueue &Q = Zone.Available;
+
DEBUG(Q.dump());
// getMaxPressureDelta temporarily modifies the tracker.
RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
- // BestSU remains NULL if no top candidates beat the best existing candidate.
- CandResult FoundCandidate = NoCand;
for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
- RegPressureDelta RPDelta;
- TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
- DAG->getRegionCriticalPSets(),
- DAG->getRegPressure().MaxSetPressure);
-
- // Initialize the candidate if needed.
- if (!Candidate.SU) {
- Candidate.SU = *I;
- Candidate.RPDelta = RPDelta;
- FoundCandidate = NodeOrder;
- continue;
- }
- // Avoid exceeding the target's limit.
- if (RPDelta.Excess.UnitIncrease < Candidate.RPDelta.Excess.UnitIncrease) {
- DEBUG(traceCandidate("ECAND", Q, *I, RPDelta.Excess));
- Candidate.SU = *I;
- Candidate.RPDelta = RPDelta;
- FoundCandidate = SingleExcess;
- continue;
- }
- if (RPDelta.Excess.UnitIncrease > Candidate.RPDelta.Excess.UnitIncrease)
- continue;
- if (FoundCandidate == SingleExcess)
- FoundCandidate = MultiPressure;
-
- // Avoid increasing the max critical pressure in the scheduled region.
- if (RPDelta.CriticalMax.UnitIncrease
- < Candidate.RPDelta.CriticalMax.UnitIncrease) {
- DEBUG(traceCandidate("PCAND", Q, *I, RPDelta.CriticalMax));
- Candidate.SU = *I;
- Candidate.RPDelta = RPDelta;
- FoundCandidate = SingleCritical;
- continue;
- }
- if (RPDelta.CriticalMax.UnitIncrease
- > Candidate.RPDelta.CriticalMax.UnitIncrease)
- continue;
- if (FoundCandidate == SingleCritical)
- FoundCandidate = MultiPressure;
-
- // Avoid increasing the max pressure of the entire region.
- if (RPDelta.CurrentMax.UnitIncrease
- < Candidate.RPDelta.CurrentMax.UnitIncrease) {
- DEBUG(traceCandidate("MCAND", Q, *I, RPDelta.CurrentMax));
- Candidate.SU = *I;
- Candidate.RPDelta = RPDelta;
- FoundCandidate = SingleMax;
- continue;
- }
- if (RPDelta.CurrentMax.UnitIncrease
- > Candidate.RPDelta.CurrentMax.UnitIncrease)
- continue;
- if (FoundCandidate == SingleMax)
- FoundCandidate = MultiPressure;
-
- // Fall through to original instruction order.
- // Only consider node order if Candidate was chosen from this Q.
- if (FoundCandidate == NoCand)
- continue;
- if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum)
- || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) {
- DEBUG(traceCandidate("NCAND", Q, *I));
- Candidate.SU = *I;
- Candidate.RPDelta = RPDelta;
- FoundCandidate = NodeOrder;
+ SchedCandidate TryCand(Cand.Policy);
+ TryCand.SU = *I;
+ tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker);
+ if (TryCand.Reason != NoCand) {
+ // Initialize resource delta if needed in case future heuristics query it.
+ if (TryCand.ResDelta == SchedResourceDelta())
+ TryCand.initResourceDelta(DAG, SchedModel);
+ Cand.setBest(TryCand);
+ DEBUG(traceCandidate(Cand, Zone));
}
+ TryCand.SU = *I;
}
- return FoundCandidate;
+}
+
+static void tracePick(const ConvergingScheduler::SchedCandidate &Cand,
+ bool IsTop) {
+ DEBUG(dbgs() << "Pick " << (IsTop ? "top" : "bot")
+ << " SU(" << Cand.SU->NodeNum << ") "
+ << ConvergingScheduler::getReasonStr(Cand.Reason) << '\n');
}
/// Pick the best candidate node from either the top or bottom queue.
-SUnit *ConvergingScheduler::pickNodeBidrectional(bool &IsTopNode) {
+SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) {
// Schedule as far as possible in the direction of no choice. This is most
// efficient, but also provides the best heuristics for CriticalPSets.
if (SUnit *SU = Bot.pickOnlyChoice()) {
@@ -1211,11 +1691,14 @@ SUnit *ConvergingScheduler::pickNodeBidrectional(bool &IsTopNode) {
IsTopNode = true;
return SU;
}
- SchedCandidate BotCand;
+ CandPolicy NoPolicy;
+ SchedCandidate BotCand(NoPolicy);
+ SchedCandidate TopCand(NoPolicy);
+ checkResourceLimits(TopCand, BotCand);
+
// Prefer bottom scheduling when heuristics are silent.
- CandResult BotResult = pickNodeFromQueue(Bot.Available,
- DAG->getBotRPTracker(), BotCand);
- assert(BotResult != NoCand && "failed to find the first candidate");
+ pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
+ assert(BotCand.Reason != NoCand && "failed to find the first candidate");
// If either Q has a single candidate that provides the least increase in
// Excess pressure, we can immediately schedule from that Q.
@@ -1224,37 +1707,41 @@ SUnit *ConvergingScheduler::pickNodeBidrectional(bool &IsTopNode) {
// affects picking from either Q. If scheduling in one direction must
// increase pressure for one of the excess PSets, then schedule in that
// direction first to provide more freedom in the other direction.
- if (BotResult == SingleExcess || BotResult == SingleCritical) {
+ if (BotCand.Reason == SingleExcess || BotCand.Reason == SingleCritical) {
IsTopNode = false;
+ tracePick(BotCand, IsTopNode);
return BotCand.SU;
}
// Check if the top Q has a better candidate.
- SchedCandidate TopCand;
- CandResult TopResult = pickNodeFromQueue(Top.Available,
- DAG->getTopRPTracker(), TopCand);
- assert(TopResult != NoCand && "failed to find the first candidate");
+ pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
+ assert(TopCand.Reason != NoCand && "failed to find the first candidate");
- if (TopResult == SingleExcess || TopResult == SingleCritical) {
- IsTopNode = true;
- return TopCand.SU;
- }
// If either Q has a single candidate that minimizes pressure above the
// original region's pressure pick it.
- if (BotResult == SingleMax) {
+ if (TopCand.Reason <= SingleMax || BotCand.Reason <= SingleMax) {
+ if (TopCand.Reason < BotCand.Reason) {
+ IsTopNode = true;
+ tracePick(TopCand, IsTopNode);
+ return TopCand.SU;
+ }
IsTopNode = false;
+ tracePick(BotCand, IsTopNode);
return BotCand.SU;
}
- if (TopResult == SingleMax) {
+ // Check for a salient pressure difference and pick the best from either side.
+ if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) {
IsTopNode = true;
+ tracePick(TopCand, IsTopNode);
return TopCand.SU;
}
- // Check for a salient pressure difference and pick the best from either side.
- if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) {
+ // Otherwise prefer the bottom candidate, in node order if all else failed.
+ if (TopCand.Reason < BotCand.Reason) {
IsTopNode = true;
+ tracePick(TopCand, IsTopNode);
return TopCand.SU;
}
- // Otherwise prefer the bottom candidate in node order.
IsTopNode = false;
+ tracePick(BotCand, IsTopNode);
return BotCand.SU;
}
@@ -1266,33 +1753,34 @@ SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) {
return NULL;
}
SUnit *SU;
- if (ForceTopDown) {
- SU = Top.pickOnlyChoice();
- if (!SU) {
- SchedCandidate TopCand;
- CandResult TopResult =
- pickNodeFromQueue(Top.Available, DAG->getTopRPTracker(), TopCand);
- assert(TopResult != NoCand && "failed to find the first candidate");
- (void)TopResult;
- SU = TopCand.SU;
+ do {
+ if (ForceTopDown) {
+ SU = Top.pickOnlyChoice();
+ if (!SU) {
+ CandPolicy NoPolicy;
+ SchedCandidate TopCand(NoPolicy);
+ pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
+ assert(TopCand.Reason != NoCand && "failed to find the first candidate");
+ SU = TopCand.SU;
+ }
+ IsTopNode = true;
}
- IsTopNode = true;
- }
- else if (ForceBottomUp) {
- SU = Bot.pickOnlyChoice();
- if (!SU) {
- SchedCandidate BotCand;
- CandResult BotResult =
- pickNodeFromQueue(Bot.Available, DAG->getBotRPTracker(), BotCand);
- assert(BotResult != NoCand && "failed to find the first candidate");
- (void)BotResult;
- SU = BotCand.SU;
+ else if (ForceBottomUp) {
+ SU = Bot.pickOnlyChoice();
+ if (!SU) {
+ CandPolicy NoPolicy;
+ SchedCandidate BotCand(NoPolicy);
+ pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
+ assert(BotCand.Reason != NoCand && "failed to find the first candidate");
+ SU = BotCand.SU;
+ }
+ IsTopNode = false;
}
- IsTopNode = false;
- }
- else {
- SU = pickNodeBidrectional(IsTopNode);
- }
+ else {
+ SU = pickNodeBidirectional(IsTopNode);
+ }
+ } while (SU->isScheduled);
+
if (SU->isTopReady())
Top.removeReady(SU);
if (SU->isBottomReady())
@@ -1331,6 +1819,86 @@ ConvergingSchedRegistry("converge", "Standard converging scheduler.",
createConvergingSched);
//===----------------------------------------------------------------------===//
+// ILP Scheduler. Currently for experimental analysis of heuristics.
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// \brief Order nodes by the ILP metric.
+struct ILPOrder {
+ ScheduleDAGILP *ILP;
+ bool MaximizeILP;
+
+ ILPOrder(ScheduleDAGILP *ilp, bool MaxILP): ILP(ilp), MaximizeILP(MaxILP) {}
+
+ /// \brief Apply a less-than relation on node priority.
+ bool operator()(const SUnit *A, const SUnit *B) const {
+ // Return true if A comes after B in the Q.
+ if (MaximizeILP)
+ return ILP->getILP(A) < ILP->getILP(B);
+ else
+ return ILP->getILP(A) > ILP->getILP(B);
+ }
+};
+
+/// \brief Schedule based on the ILP metric.
+class ILPScheduler : public MachineSchedStrategy {
+ ScheduleDAGILP ILP;
+ ILPOrder Cmp;
+
+ std::vector<SUnit*> ReadyQ;
+public:
+ ILPScheduler(bool MaximizeILP)
+ : ILP(/*BottomUp=*/true), Cmp(&ILP, MaximizeILP) {}
+
+ virtual void initialize(ScheduleDAGMI *DAG) {
+ ReadyQ.clear();
+ ILP.resize(DAG->SUnits.size());
+ }
+
+ virtual void registerRoots() {
+ for (std::vector<SUnit*>::const_iterator
+ I = ReadyQ.begin(), E = ReadyQ.end(); I != E; ++I) {
+ ILP.computeILP(*I);
+ }
+ }
+
+ /// Implement MachineSchedStrategy interface.
+ /// -----------------------------------------
+
+ virtual SUnit *pickNode(bool &IsTopNode) {
+ if (ReadyQ.empty()) return NULL;
+ pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
+ SUnit *SU = ReadyQ.back();
+ ReadyQ.pop_back();
+ IsTopNode = false;
+ DEBUG(dbgs() << "*** Scheduling " << *SU->getInstr()
+ << " ILP: " << ILP.getILP(SU) << '\n');
+ return SU;
+ }
+
+ virtual void schedNode(SUnit *, bool) {}
+
+ virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ }
+
+ virtual void releaseBottomNode(SUnit *SU) {
+ ReadyQ.push_back(SU);
+ std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
+ }
+};
+} // namespace
+
+static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
+ return new ScheduleDAGMI(C, new ILPScheduler(true));
+}
+static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
+ return new ScheduleDAGMI(C, new ILPScheduler(false));
+}
+static MachineSchedRegistry ILPMaxRegistry(
+ "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
+static MachineSchedRegistry ILPMinRegistry(
+ "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
+
+//===----------------------------------------------------------------------===//
// Machine Instruction Shuffler for Correctness Testing
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/CodeGen/MachineSink.cpp b/contrib/llvm/lib/CodeGen/MachineSink.cpp
index bc383cb..b117f8c 100644
--- a/contrib/llvm/lib/CodeGen/MachineSink.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineSink.cpp
@@ -49,7 +49,6 @@ namespace {
MachineDominatorTree *DT; // Machine dominator tree
MachineLoopInfo *LI;
AliasAnalysis *AA;
- BitVector AllocatableSet; // Which physregs are allocatable?
// Remember which edges have been considered for breaking.
SmallSet<std::pair<MachineBasicBlock*,MachineBasicBlock*>, 8>
@@ -229,7 +228,6 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
DT = &getAnalysis<MachineDominatorTree>();
LI = &getAnalysis<MachineLoopInfo>();
AA = &getAnalysis<AliasAnalysis>();
- AllocatableSet = TRI->getAllocatableSet(MF);
bool EverMadeChange = false;
diff --git a/contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp b/contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp
index 1a3aa60..9686b04 100644
--- a/contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp
@@ -14,9 +14,10 @@
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/PostOrderIterator.h"
@@ -50,9 +51,11 @@ bool MachineTraceMetrics::runOnMachineFunction(MachineFunction &Func) {
MF = &Func;
TII = MF->getTarget().getInstrInfo();
TRI = MF->getTarget().getRegisterInfo();
- ItinData = MF->getTarget().getInstrItineraryData();
MRI = &MF->getRegInfo();
Loops = &getAnalysis<MachineLoopInfo>();
+ const TargetSubtargetInfo &ST =
+ MF->getTarget().getSubtarget<TargetSubtargetInfo>();
+ SchedModel.init(*ST.getSchedModel(), &ST, TII);
BlockInfo.resize(MF->getNumBlockIDs());
return false;
}
@@ -674,7 +677,7 @@ computeCrossBlockCriticalPath(const TraceBlockInfo &TBI) {
const MachineInstr *DefMI = MTM.MRI->getVRegDef(LIR.Reg);
// Ignore dependencies outside the current trace.
const TraceBlockInfo &DefTBI = BlockInfo[DefMI->getParent()->getNumber()];
- if (!DefTBI.hasValidDepth() || DefTBI.Head != TBI.Head)
+ if (!DefTBI.isEarlierInSameTrace(TBI))
continue;
unsigned Len = LIR.Height + Cycles[DefMI].Depth;
MaxLen = std::max(MaxLen, Len);
@@ -737,16 +740,15 @@ computeInstrDepths(const MachineBasicBlock *MBB) {
const TraceBlockInfo&DepTBI =
BlockInfo[Dep.DefMI->getParent()->getNumber()];
// Ignore dependencies from outside the current trace.
- if (!DepTBI.hasValidDepth() || DepTBI.Head != TBI.Head)
+ if (!DepTBI.isEarlierInSameTrace(TBI))
continue;
assert(DepTBI.HasValidInstrDepths && "Inconsistent dependency");
unsigned DepCycle = Cycles.lookup(Dep.DefMI).Depth;
// Add latency if DefMI is a real instruction. Transients get latency 0.
if (!Dep.DefMI->isTransient())
- DepCycle += MTM.TII->computeOperandLatency(MTM.ItinData,
- Dep.DefMI, Dep.DefOp,
- UseMI, Dep.UseOp,
- /* FindMin = */ false);
+ DepCycle += MTM.SchedModel
+ .computeOperandLatency(Dep.DefMI, Dep.DefOp, UseMI, Dep.UseOp,
+ /* FindMin = */ false);
Cycle = std::max(Cycle, DepCycle);
}
// Remember the instruction depth.
@@ -769,7 +771,7 @@ computeInstrDepths(const MachineBasicBlock *MBB) {
// Height is the issue height computed from virtual register dependencies alone.
static unsigned updatePhysDepsUpwards(const MachineInstr *MI, unsigned Height,
SparseSet<LiveRegUnit> &RegUnits,
- const InstrItineraryData *ItinData,
+ const TargetSchedModel &SchedModel,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) {
SmallVector<unsigned, 8> ReadOps;
@@ -792,14 +794,10 @@ static unsigned updatePhysDepsUpwards(const MachineInstr *MI, unsigned Height,
unsigned DepHeight = I->Cycle;
if (!MI->isTransient()) {
// We may not know the UseMI of this dependency, if it came from the
- // live-in list.
- if (I->MI)
- DepHeight += TII->computeOperandLatency(ItinData,
- MI, MO.getOperandNo(),
- I->MI, I->Op);
- else
- // No UseMI. Just use the MI latency instead.
- DepHeight += TII->getInstrLatency(ItinData, MI);
+ // live-in list. SchedModel can handle a NULL UseMI.
+ DepHeight += SchedModel
+ .computeOperandLatency(MI, MO.getOperandNo(), I->MI, I->Op,
+ /* FindMin = */ false);
}
Height = std::max(Height, DepHeight);
// This regunit is dead above MI.
@@ -832,12 +830,12 @@ typedef DenseMap<const MachineInstr *, unsigned> MIHeightMap;
static bool pushDepHeight(const DataDep &Dep,
const MachineInstr *UseMI, unsigned UseHeight,
MIHeightMap &Heights,
- const InstrItineraryData *ItinData,
+ const TargetSchedModel &SchedModel,
const TargetInstrInfo *TII) {
// Adjust height by Dep.DefMI latency.
if (!Dep.DefMI->isTransient())
- UseHeight += TII->computeOperandLatency(ItinData, Dep.DefMI, Dep.DefOp,
- UseMI, Dep.UseOp);
+ UseHeight += SchedModel.computeOperandLatency(Dep.DefMI, Dep.DefOp,
+ UseMI, Dep.UseOp, false);
// Update Heights[DefMI] to be the maximum height seen.
MIHeightMap::iterator I;
@@ -852,14 +850,14 @@ static bool pushDepHeight(const DataDep &Dep,
return false;
}
-/// Assuming that DefMI was used by Trace.back(), add it to the live-in lists
-/// of all the blocks in Trace. Stop when reaching the block that contains
-/// DefMI.
+/// Assuming that the virtual register defined by DefMI:DefOp was used by
+/// Trace.back(), add it to the live-in lists of all the blocks in Trace. Stop
+/// when reaching the block that contains DefMI.
void MachineTraceMetrics::Ensemble::
-addLiveIns(const MachineInstr *DefMI,
+addLiveIns(const MachineInstr *DefMI, unsigned DefOp,
ArrayRef<const MachineBasicBlock*> Trace) {
assert(!Trace.empty() && "Trace should contain at least one block");
- unsigned Reg = DefMI->getOperand(0).getReg();
+ unsigned Reg = DefMI->getOperand(DefOp).getReg();
assert(TargetRegisterInfo::isVirtualRegister(Reg));
const MachineBasicBlock *DefMBB = DefMI->getParent();
@@ -951,8 +949,8 @@ computeInstrHeights(const MachineBasicBlock *MBB) {
unsigned Height = TBI.Succ ? Cycles.lookup(PHI).Height : 0;
DEBUG(dbgs() << "pred\t" << Height << '\t' << *PHI);
if (pushDepHeight(Deps.front(), PHI, Height,
- Heights, MTM.ItinData, MTM.TII))
- addLiveIns(Deps.front().DefMI, Stack);
+ Heights, MTM.SchedModel, MTM.TII))
+ addLiveIns(Deps.front().DefMI, Deps.front().DefOp, Stack);
}
}
}
@@ -980,12 +978,12 @@ computeInstrHeights(const MachineBasicBlock *MBB) {
// There may also be regunit dependencies to include in the height.
if (HasPhysRegs)
Cycle = updatePhysDepsUpwards(MI, Cycle, RegUnits,
- MTM.ItinData, MTM.TII, MTM.TRI);
+ MTM.SchedModel, MTM.TII, MTM.TRI);
// Update the required height of any virtual registers read by MI.
for (unsigned i = 0, e = Deps.size(); i != e; ++i)
- if (pushDepHeight(Deps[i], MI, Cycle, Heights, MTM.ItinData, MTM.TII))
- addLiveIns(Deps[i].DefMI, Stack);
+ if (pushDepHeight(Deps[i], MI, Cycle, Heights, MTM.SchedModel, MTM.TII))
+ addLiveIns(Deps[i].DefMI, Deps[i].DefOp, Stack);
InstrCycles &MICycles = Cycles[MI];
MICycles.Height = Cycle;
@@ -1054,10 +1052,8 @@ MachineTraceMetrics::Trace::getPHIDepth(const MachineInstr *PHI) const {
unsigned DepCycle = getInstrCycles(Dep.DefMI).Depth;
// Add latency if DefMI is a real instruction. Transients get latency 0.
if (!Dep.DefMI->isTransient())
- DepCycle += TE.MTM.TII->computeOperandLatency(TE.MTM.ItinData,
- Dep.DefMI, Dep.DefOp,
- PHI, Dep.UseOp,
- /* FindMin = */ false);
+ DepCycle += TE.MTM.SchedModel
+ .computeOperandLatency(Dep.DefMI, Dep.DefOp, PHI, Dep.UseOp, false);
return DepCycle;
}
@@ -1068,9 +1064,8 @@ unsigned MachineTraceMetrics::Trace::getResourceDepth(bool Bottom) const {
unsigned Instrs = TBI.InstrDepth;
if (Bottom)
Instrs += TE.MTM.BlockInfo[getBlockNum()].InstrCount;
- if (const MCSchedModel *Model = TE.MTM.ItinData->SchedModel)
- if (Model->IssueWidth != 0)
- return Instrs / Model->IssueWidth;
+ if (unsigned IW = TE.MTM.SchedModel.getIssueWidth())
+ Instrs /= IW;
// Assume issue width 1 without a schedule model.
return Instrs;
}
@@ -1080,9 +1075,8 @@ getResourceLength(ArrayRef<const MachineBasicBlock*> Extrablocks) const {
unsigned Instrs = TBI.InstrDepth + TBI.InstrHeight;
for (unsigned i = 0, e = Extrablocks.size(); i != e; ++i)
Instrs += TE.MTM.getResources(Extrablocks[i])->InstrCount;
- if (const MCSchedModel *Model = TE.MTM.ItinData->SchedModel)
- if (Model->IssueWidth != 0)
- return Instrs / Model->IssueWidth;
+ if (unsigned IW = TE.MTM.SchedModel.getIssueWidth())
+ Instrs /= IW;
// Assume issue width 1 without a schedule model.
return Instrs;
}
diff --git a/contrib/llvm/lib/CodeGen/MachineTraceMetrics.h b/contrib/llvm/lib/CodeGen/MachineTraceMetrics.h
index c5b86f3..460730b 100644
--- a/contrib/llvm/lib/CodeGen/MachineTraceMetrics.h
+++ b/contrib/llvm/lib/CodeGen/MachineTraceMetrics.h
@@ -50,6 +50,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/TargetSchedule.h"
namespace llvm {
@@ -67,9 +68,9 @@ class MachineTraceMetrics : public MachineFunctionPass {
const MachineFunction *MF;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
- const InstrItineraryData *ItinData;
const MachineRegisterInfo *MRI;
const MachineLoopInfo *Loops;
+ TargetSchedModel SchedModel;
public:
class Ensemble;
@@ -164,6 +165,14 @@ public:
/// Invalidate height resources when a block below this one has changed.
void invalidateHeight() { InstrHeight = ~0u; HasValidInstrHeights = false; }
+ /// Determine if this block belongs to the same trace as TBI and comes
+ /// before it in the trace.
+ /// Also returns true when TBI == this.
+ bool isEarlierInSameTrace(const TraceBlockInfo &TBI) const {
+ return hasValidDepth() && TBI.hasValidDepth() &&
+ Head == TBI.Head && InstrDepth <= TBI.InstrDepth;
+ }
+
// Data-dependency-related information. Per-instruction depth and height
// are computed from data dependencies in the current trace, using
// itinerary data.
@@ -270,7 +279,7 @@ public:
unsigned computeCrossBlockCriticalPath(const TraceBlockInfo&);
void computeInstrDepths(const MachineBasicBlock*);
void computeInstrHeights(const MachineBasicBlock*);
- void addLiveIns(const MachineInstr *DefMI,
+ void addLiveIns(const MachineInstr *DefMI, unsigned DefOp,
ArrayRef<const MachineBasicBlock*> Trace);
protected:
diff --git a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
index f745b41..69a3ae8 100644
--- a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -23,8 +23,9 @@
// the verifier errors.
//===----------------------------------------------------------------------===//
+#include "llvm/BasicBlock.h"
+#include "llvm/InlineAsm.h"
#include "llvm/Instructions.h"
-#include "llvm/Function.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
@@ -73,11 +74,12 @@ namespace {
typedef SmallVector<const uint32_t*, 4> RegMaskVector;
typedef DenseSet<unsigned> RegSet;
typedef DenseMap<unsigned, const MachineInstr*> RegMap;
+ typedef SmallPtrSet<const MachineBasicBlock*, 8> BlockSet;
const MachineInstr *FirstTerminator;
+ BlockSet FunctionBlocks;
BitVector regsReserved;
- BitVector regsAllocatable;
RegSet regsLive;
RegVector regsDefined, regsDead, regsKilled;
RegMaskVector regMasks;
@@ -117,6 +119,9 @@ namespace {
// block. This set is disjoint from regsLiveOut.
RegSet vregsRequired;
+ // Set versions of block's predecessor and successor lists.
+ BlockSet Preds, Succs;
+
BBInfo() : reachable(false) {}
// Add register to vregsPassed if it belongs there. Return true if
@@ -180,7 +185,7 @@ namespace {
}
bool isAllocatable(unsigned Reg) {
- return Reg < regsAllocatable.size() && regsAllocatable.test(Reg);
+ return Reg < TRI->getNumRegs() && MRI->isAllocatable(Reg);
}
// Analysis information if available
@@ -208,6 +213,8 @@ namespace {
void report(const char *msg, const MachineBasicBlock *MBB,
const LiveInterval &LI);
+ void verifyInlineAsm(const MachineInstr *MI);
+
void checkLiveness(const MachineOperand *MO, unsigned MONum);
void markReachable(const MachineBasicBlock *MBB);
void calcRegsPassed();
@@ -352,7 +359,7 @@ void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
MF->print(*OS, Indexes);
}
*OS << "*** Bad machine code: " << msg << " ***\n"
- << "- function: " << MF->getFunction()->getName() << "\n";
+ << "- function: " << MF->getName() << "\n";
}
void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
@@ -360,7 +367,7 @@ void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
report(msg, MBB->getParent());
*OS << "- basic block: BB#" << MBB->getNumber()
<< ' ' << MBB->getName()
- << " (" << (void*)MBB << ')';
+ << " (" << (const void*)MBB << ')';
if (Indexes)
*OS << " [" << Indexes->getMBBStartIdx(MBB)
<< ';' << Indexes->getMBBEndIdx(MBB) << ')';
@@ -419,7 +426,7 @@ void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
void MachineVerifier::visitMachineFunctionBefore() {
lastIndex = SlotIndex();
- regsReserved = TRI->getReservedRegs(*MF);
+ regsReserved = MRI->getReservedRegs();
// A sub-register of a reserved register is also reserved
for (int Reg = regsReserved.find_first(); Reg>=0;
@@ -431,9 +438,23 @@ void MachineVerifier::visitMachineFunctionBefore() {
}
}
- regsAllocatable = TRI->getAllocatableSet(*MF);
-
markReachable(&MF->front());
+
+ // Build a set of the basic blocks in the function.
+ FunctionBlocks.clear();
+ for (MachineFunction::const_iterator
+ I = MF->begin(), E = MF->end(); I != E; ++I) {
+ FunctionBlocks.insert(I);
+ BBInfo &MInfo = MBBInfoMap[I];
+
+ MInfo.Preds.insert(I->pred_begin(), I->pred_end());
+ if (MInfo.Preds.size() != I->pred_size())
+ report("MBB has duplicate entries in its predecessor list.", I);
+
+ MInfo.Succs.insert(I->succ_begin(), I->succ_end());
+ if (MInfo.Succs.size() != I->succ_size())
+ report("MBB has duplicate entries in its successor list.", I);
+ }
}
// Does iterator point to a and b as the first two elements?
@@ -470,6 +491,25 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
E = MBB->succ_end(); I != E; ++I) {
if ((*I)->isLandingPad())
LandingPadSuccs.insert(*I);
+ if (!FunctionBlocks.count(*I))
+ report("MBB has successor that isn't part of the function.", MBB);
+ if (!MBBInfoMap[*I].Preds.count(MBB)) {
+ report("Inconsistent CFG", MBB);
+ *OS << "MBB is not in the predecessor list of the successor BB#"
+ << (*I)->getNumber() << ".\n";
+ }
+ }
+
+ // Check the predecessor list.
+ for (MachineBasicBlock::const_pred_iterator I = MBB->pred_begin(),
+ E = MBB->pred_end(); I != E; ++I) {
+ if (!FunctionBlocks.count(*I))
+ report("MBB has predecessor that isn't part of the function.", MBB);
+ if (!MBBInfoMap[*I].Succs.count(MBB)) {
+ report("Inconsistent CFG", MBB);
+ *OS << "MBB is not in the successor list of the predecessor BB#"
+ << (*I)->getNumber() << ".\n";
+ }
}
const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
@@ -540,7 +580,15 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
++MBBI;
if (MBBI == MF->end()) {
report("MBB conditionally falls through out of function!", MBB);
- } if (MBB->succ_size() != 2) {
+ } if (MBB->succ_size() == 1) {
+ // A conditional branch with only one successor is weird, but allowed.
+ if (&*MBBI != TBB)
+ report("MBB exits via conditional branch/fall-through but only has "
+ "one CFG successor!", MBB);
+ else if (TBB != *MBB->succ_begin())
+ report("MBB exits via conditional branch/fall-through but the CFG "
+ "successor don't match the actual successor!", MBB);
+ } else if (MBB->succ_size() != 2) {
report("MBB exits via conditional branch/fall-through but doesn't have "
"exactly two CFG successors!", MBB);
} else if (!matchPair(MBB->succ_begin(), TBB, MBBI)) {
@@ -560,7 +608,15 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
} else if (TBB && FBB) {
// Block conditionally branches somewhere, otherwise branches
// somewhere else.
- if (MBB->succ_size() != 2) {
+ if (MBB->succ_size() == 1) {
+ // A conditional branch with only one successor is weird, but allowed.
+ if (FBB != TBB)
+ report("MBB exits via conditional branch/branch through but only has "
+ "one CFG successor!", MBB);
+ else if (TBB != *MBB->succ_begin())
+ report("MBB exits via conditional branch/branch through but the CFG "
+ "successor don't match the actual successor!", MBB);
+ } else if (MBB->succ_size() != 2) {
report("MBB exits via conditional branch/branch but doesn't have "
"exactly two CFG successors!", MBB);
} else if (!matchPair(MBB->succ_begin(), TBB, FBB)) {
@@ -639,6 +695,50 @@ void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
}
}
+// The operands on an INLINEASM instruction must follow a template.
+// Verify that the flag operands make sense.
+void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
+ // The first two operands on INLINEASM are the asm string and global flags.
+ if (MI->getNumOperands() < 2) {
+ report("Too few operands on inline asm", MI);
+ return;
+ }
+ if (!MI->getOperand(0).isSymbol())
+ report("Asm string must be an external symbol", MI);
+ if (!MI->getOperand(1).isImm())
+ report("Asm flags must be an immediate", MI);
+ // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
+ // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16.
+ if (!isUInt<5>(MI->getOperand(1).getImm()))
+ report("Unknown asm flags", &MI->getOperand(1), 1);
+
+ assert(InlineAsm::MIOp_FirstOperand == 2 && "Asm format changed");
+
+ unsigned OpNo = InlineAsm::MIOp_FirstOperand;
+ unsigned NumOps;
+ for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ // There may be implicit ops after the fixed operands.
+ if (!MO.isImm())
+ break;
+ NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm());
+ }
+
+ if (OpNo > MI->getNumOperands())
+ report("Missing operands in last group", MI);
+
+ // An optional MDNode follows the groups.
+ if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
+ ++OpNo;
+
+ // All trailing operands must be implicit registers.
+ for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ if (!MO.isReg() || !MO.isImplicit())
+ report("Expected implicit register after groups", &MO, OpNo);
+ }
+}
+
void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
const MCInstrDesc &MCID = MI->getDesc();
if (MI->getNumOperands() < MCID.getNumOperands()) {
@@ -647,6 +747,10 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
<< MI->getNumExplicitOperands() << " given.\n";
}
+ // Check the tied operands.
+ if (MI->isInlineAsm())
+ verifyInlineAsm(MI);
+
// Check the MachineMemOperands for basic consistency.
for (MachineInstr::mmo_iterator I = MI->memoperands_begin(),
E = MI->memoperands_end(); I != E; ++I) {
@@ -702,6 +806,17 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
if (MO->isImplicit())
report("Explicit operand marked as implicit", MO, MONum);
}
+
+ int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
+ if (TiedTo != -1) {
+ if (!MO->isReg())
+ report("Tied use must be a register", MO, MONum);
+ else if (!MO->isTied())
+ report("Operand should be tied", MO, MONum);
+ else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
+ report("Tied def doesn't match MCInstrDesc", MO, MONum);
+ } else if (MO->isReg() && MO->isTied())
+ report("Explicit operand should not be tied", MO, MONum);
} else {
// ARM adds %reg0 operands to indicate predicates. We'll allow that.
if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg())
@@ -716,6 +831,28 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
if (MRI->tracksLiveness() && !MI->isDebugValue())
checkLiveness(MO, MONum);
+ // Verify the consistency of tied operands.
+ if (MO->isTied()) {
+ unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
+ const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
+ if (!OtherMO.isReg())
+ report("Must be tied to a register", MO, MONum);
+ if (!OtherMO.isTied())
+ report("Missing tie flags on tied operand", MO, MONum);
+ if (MI->findTiedOperandIdx(OtherIdx) != MONum)
+ report("Inconsistent tie links", MO, MONum);
+ if (MONum < MCID.getNumDefs()) {
+ if (OtherIdx < MCID.getNumOperands()) {
+ if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
+ report("Explicit def tied to explicit use without tie constraint",
+ MO, MONum);
+ } else {
+ if (!OtherMO.isImplicit())
+ report("Explicit def should be tied to implicit use", MO, MONum);
+ }
+ }
+ }
+
// Verify two-address constraints after leaving SSA form.
unsigned DefIdx;
if (!MRI->isSSA() && MO->isUse() &&
diff --git a/contrib/llvm/lib/CodeGen/Passes.cpp b/contrib/llvm/lib/CodeGen/Passes.cpp
index cfa3eec..4ea21d4 100644
--- a/contrib/llvm/lib/CodeGen/Passes.cpp
+++ b/contrib/llvm/lib/CodeGen/Passes.cpp
@@ -49,8 +49,8 @@ static cl::opt<bool> DisableSSC("disable-ssc", cl::Hidden,
cl::desc("Disable Stack Slot Coloring"));
static cl::opt<bool> DisableMachineDCE("disable-machine-dce", cl::Hidden,
cl::desc("Disable Machine Dead Code Elimination"));
-static cl::opt<bool> EnableEarlyIfConversion("enable-early-ifcvt", cl::Hidden,
- cl::desc("Enable Early If-conversion"));
+static cl::opt<bool> DisableEarlyIfConversion("disable-early-ifcvt", cl::Hidden,
+ cl::desc("Disable Early If-conversion"));
static cl::opt<bool> DisableMachineLICM("disable-machine-licm", cl::Hidden,
cl::desc("Disable Machine LICM"));
static cl::opt<bool> DisableMachineCSE("disable-machine-cse", cl::Hidden,
@@ -161,7 +161,7 @@ static AnalysisID overridePass(AnalysisID StandardID, AnalysisID TargetID) {
return applyDisable(TargetID, DisableMachineDCE);
if (StandardID == &EarlyIfConverterID)
- return applyDisable(TargetID, !EnableEarlyIfConversion);
+ return applyDisable(TargetID, DisableEarlyIfConversion);
if (StandardID == &MachineLICMID)
return applyDisable(TargetID, DisableMachineLICM);
@@ -447,8 +447,8 @@ void TargetPassConfig::addMachinePasses() {
const PassInfo *TPI = PR->getPassInfo(PrintMachineInstrs.getValue());
const PassInfo *IPI = PR->getPassInfo(StringRef("print-machineinstrs"));
assert (TPI && IPI && "Pass ID not registered!");
- const char *TID = (char *)(TPI->getTypeInfo());
- const char *IID = (char *)(IPI->getTypeInfo());
+ const char *TID = (const char *)(TPI->getTypeInfo());
+ const char *IID = (const char *)(IPI->getTypeInfo());
insertPass(TID, IID);
}
@@ -456,7 +456,8 @@ void TargetPassConfig::addMachinePasses() {
printAndVerify("After Instruction Selection");
// Expand pseudo-instructions emitted by ISel.
- addPass(&ExpandISelPseudosID);
+ if (addPass(&ExpandISelPseudosID))
+ printAndVerify("After ExpandISelPseudos");
// Add passes that optimize machine instructions in SSA form.
if (getOptLevel() != CodeGenOpt::None) {
@@ -528,6 +529,10 @@ void TargetPassConfig::addMachineSSAOptimization() {
// instructions dead.
addPass(&OptimizePHIsID);
+ // This pass merges large allocas. StackSlotColoring is a different pass
+ // which merges spill slots.
+ addPass(&StackColoringID);
+
// If the target requests it, assign local variables to stack slots relative
// to one another and simplify frame index references where possible.
addPass(&LocalStackSlotAllocationID);
diff --git a/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp
index 9099862..a795ac8 100644
--- a/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp
@@ -527,6 +527,11 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
SeenMoveImm = true;
} else {
Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
+ // optimizeExtInstr might have created new instructions after MI
+ // and before the already incremented MII. Adjust MII so that the
+ // next iteration sees the new instructions.
+ MII = MI;
+ ++MII;
if (SeenMoveImm)
Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
}
diff --git a/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp b/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
index 7449ff5..d57bc73 100644
--- a/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
+++ b/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
@@ -240,6 +240,7 @@ void SchedulePostRATDList::exitRegion() {
ScheduleDAGInstrs::exitRegion();
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// dumpSchedule - dump the scheduled Sequence.
void SchedulePostRATDList::dumpSchedule() const {
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
@@ -249,6 +250,7 @@ void SchedulePostRATDList::dumpSchedule() const {
dbgs() << "**** NOOP ****\n";
}
}
+#endif
bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
TII = Fn.getTarget().getInstrInfo();
@@ -298,7 +300,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
static int bbcnt = 0;
if (bbcnt++ % DebugDiv != DebugMod)
continue;
- dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getName()
+ dbgs() << "*** DEBUG scheduling " << Fn.getName()
<< ":BB#" << MBB->getNumber() << " ***\n";
}
#endif
@@ -488,7 +490,6 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
BitVector killedRegs(TRI->getNumRegs());
- BitVector ReservedRegs = TRI->getReservedRegs(MF);
StartBlockForKills(MBB);
@@ -529,7 +530,7 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse()) continue;
unsigned Reg = MO.getReg();
- if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
+ if ((Reg == 0) || MRI.isReserved(Reg)) continue;
bool kill = false;
if (!killedRegs.test(Reg)) {
@@ -564,7 +565,7 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
unsigned Reg = MO.getReg();
- if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
+ if ((Reg == 0) || MRI.isReserved(Reg)) continue;
LiveRegs.set(Reg);
diff --git a/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp b/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
index 34d075c..e4e18c3 100644
--- a/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
+++ b/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
@@ -137,8 +137,7 @@ void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) {
bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** PROCESS IMPLICIT DEFS **********\n"
- << "********** Function: "
- << ((Value*)MF.getFunction())->getName() << '\n');
+ << "********** Function: " << MF.getName() << '\n');
bool Changed = false;
diff --git a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
index c791ffb..77554d6 100644
--- a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
@@ -96,7 +96,7 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
placeCSRSpillsAndRestores(Fn);
// Add the code to save and restore the callee saved registers
- if (!F->hasFnAttr(Attribute::Naked))
+ if (!F->getFnAttributes().hasAttribute(Attributes::Naked))
insertCSRSpillsAndRestores(Fn);
// Allow the target machine to make final modifications to the function
@@ -111,7 +111,7 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
// called functions. Because of this, calculateCalleeSavedRegisters()
// must be called before this function in order to set the AdjustsStack
// and MaxCallFrameSize variables.
- if (!F->hasFnAttr(Attribute::Naked))
+ if (!F->getFnAttributes().hasAttribute(Attributes::Naked))
insertPrologEpilogCode(Fn);
// Replace all MO_FrameIndex operands with physical register references
@@ -221,13 +221,13 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
return;
// In Naked functions we aren't going to save any registers.
- if (Fn.getFunction()->hasFnAttr(Attribute::Naked))
+ if (Fn.getFunction()->getFnAttributes().hasAttribute(Attributes::Naked))
return;
std::vector<CalleeSavedInfo> CSI;
for (unsigned i = 0; CSRegs[i]; ++i) {
unsigned Reg = CSRegs[i];
- if (Fn.getRegInfo().isPhysRegOrOverlapUsed(Reg)) {
+ if (Fn.getRegInfo().isPhysRegUsed(Reg)) {
// If the reg is modified, save it!
CSI.push_back(CalleeSavedInfo(Reg));
}
diff --git a/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp b/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp
index 3a03807..8a49609 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp
@@ -20,7 +20,6 @@
#include "VirtRegMap.h"
#include "LiveRegMatrix.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Function.h"
#include "llvm/PassAnalysisSupport.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
@@ -273,7 +272,7 @@ unsigned RABasic::selectOrSplit(LiveInterval &VirtReg,
bool RABasic::runOnMachineFunction(MachineFunction &mf) {
DEBUG(dbgs() << "********** BASIC REGISTER ALLOCATION **********\n"
<< "********** Function: "
- << ((Value*)mf.getFunction())->getName() << '\n');
+ << mf.getName() << '\n');
MF = &mf;
RegAllocBase::init(getAnalysis<VirtRegMap>(),
diff --git a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
index 6b3a48e..8892216 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
@@ -113,9 +113,11 @@ namespace {
// PhysRegState - One of the RegState enums, or a virtreg.
std::vector<unsigned> PhysRegState;
- // UsedInInstr - BitVector of physregs that are used in the current
- // instruction, and so cannot be allocated.
- BitVector UsedInInstr;
+ typedef SparseSet<unsigned> UsedInInstrSet;
+
+ // UsedInInstr - Set of physregs that are used in the current instruction,
+ // and so cannot be allocated.
+ UsedInInstrSet UsedInInstr;
// SkippedInstrs - Descriptors of instructions whose clobber list was
// ignored because all registers were spilled. It is still necessary to
@@ -173,7 +175,7 @@ namespace {
unsigned VirtReg, unsigned Hint);
LiveRegMap::iterator reloadVirtReg(MachineInstr *MI, unsigned OpNum,
unsigned VirtReg, unsigned Hint);
- void spillAll(MachineInstr *MI);
+ void spillAll(MachineBasicBlock::iterator MI);
bool setPhysReg(MachineInstr *MI, unsigned OpNum, unsigned PhysReg);
void addRetOperands(MachineBasicBlock *MBB);
};
@@ -312,7 +314,7 @@ void RAFast::spillVirtReg(MachineBasicBlock::iterator MI,
}
/// spillAll - Spill all dirty virtregs without killing them.
-void RAFast::spillAll(MachineInstr *MI) {
+void RAFast::spillAll(MachineBasicBlock::iterator MI) {
if (LiveVirtRegs.empty()) return;
isBulkSpilling = true;
// The LiveRegMap is keyed by an unsigned (the virtreg number), so the order
@@ -340,7 +342,7 @@ void RAFast::usePhysReg(MachineOperand &MO) {
PhysRegState[PhysReg] = regFree;
// Fall through
case regFree:
- UsedInInstr.set(PhysReg);
+ UsedInInstr.insert(PhysReg);
MO.setIsKill();
return;
default:
@@ -360,13 +362,13 @@ void RAFast::usePhysReg(MachineOperand &MO) {
"Instruction is not using a subregister of a reserved register");
// Leave the superregister in the working set.
PhysRegState[Alias] = regFree;
- UsedInInstr.set(Alias);
+ UsedInInstr.insert(Alias);
MO.getParent()->addRegisterKilled(Alias, TRI, true);
return;
case regFree:
if (TRI->isSuperRegister(PhysReg, Alias)) {
// Leave the superregister in the working set.
- UsedInInstr.set(Alias);
+ UsedInInstr.insert(Alias);
MO.getParent()->addRegisterKilled(Alias, TRI, true);
return;
}
@@ -380,7 +382,7 @@ void RAFast::usePhysReg(MachineOperand &MO) {
// All aliases are disabled, bring register into working set.
PhysRegState[PhysReg] = regFree;
- UsedInInstr.set(PhysReg);
+ UsedInInstr.insert(PhysReg);
MO.setIsKill();
}
@@ -389,7 +391,7 @@ void RAFast::usePhysReg(MachineOperand &MO) {
/// reserved instead of allocated.
void RAFast::definePhysReg(MachineInstr *MI, unsigned PhysReg,
RegState NewState) {
- UsedInInstr.set(PhysReg);
+ UsedInInstr.insert(PhysReg);
switch (unsigned VirtReg = PhysRegState[PhysReg]) {
case regDisabled:
break;
@@ -429,7 +431,7 @@ void RAFast::definePhysReg(MachineInstr *MI, unsigned PhysReg,
// can be allocated directly.
// Returns spillImpossible when PhysReg or an alias can't be spilled.
unsigned RAFast::calcSpillCost(unsigned PhysReg) const {
- if (UsedInInstr.test(PhysReg)) {
+ if (UsedInInstr.count(PhysReg)) {
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is already used in instr.\n");
return spillImpossible;
}
@@ -454,7 +456,7 @@ unsigned RAFast::calcSpillCost(unsigned PhysReg) const {
unsigned Cost = 0;
for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) {
unsigned Alias = *AI;
- if (UsedInInstr.test(Alias))
+ if (UsedInInstr.count(Alias))
return spillImpossible;
switch (unsigned VirtReg = PhysRegState[Alias]) {
case regDisabled:
@@ -509,7 +511,7 @@ RAFast::LiveRegMap::iterator RAFast::allocVirtReg(MachineInstr *MI,
// Ignore invalid hints.
if (Hint && (!TargetRegisterInfo::isPhysicalRegister(Hint) ||
- !RC->contains(Hint) || !RegClassInfo.isAllocatable(Hint)))
+ !RC->contains(Hint) || !MRI->isAllocatable(Hint)))
Hint = 0;
// Take hint when possible.
@@ -530,7 +532,7 @@ RAFast::LiveRegMap::iterator RAFast::allocVirtReg(MachineInstr *MI,
// First try to find a completely free register.
for (ArrayRef<unsigned>::iterator I = AO.begin(), E = AO.end(); I != E; ++I) {
unsigned PhysReg = *I;
- if (PhysRegState[PhysReg] == regFree && !UsedInInstr.test(PhysReg)) {
+ if (PhysRegState[PhysReg] == regFree && !UsedInInstr.count(PhysReg)) {
assignVirtToPhysReg(*LRI, PhysReg);
return LRI;
}
@@ -596,7 +598,7 @@ RAFast::defineVirtReg(MachineInstr *MI, unsigned OpNum,
LRI->LastUse = MI;
LRI->LastOpNum = OpNum;
LRI->Dirty = true;
- UsedInInstr.set(LRI->PhysReg);
+ UsedInInstr.insert(LRI->PhysReg);
return LRI;
}
@@ -646,7 +648,7 @@ RAFast::reloadVirtReg(MachineInstr *MI, unsigned OpNum,
assert(LRI->PhysReg && "Register not assigned");
LRI->LastUse = MI;
LRI->LastOpNum = OpNum;
- UsedInInstr.set(LRI->PhysReg);
+ UsedInInstr.insert(LRI->PhysReg);
return LRI;
}
@@ -708,7 +710,7 @@ void RAFast::handleThroughOperands(MachineInstr *MI,
unsigned Reg = MO.getReg();
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
- UsedInInstr.set(*AI);
+ UsedInInstr.insert(*AI);
if (ThroughRegs.count(PhysRegState[*AI]))
definePhysReg(MI, *AI, regFree);
}
@@ -756,7 +758,7 @@ void RAFast::handleThroughOperands(MachineInstr *MI,
}
// Restore UsedInInstr to a state usable for allocating normal virtual uses.
- UsedInInstr.reset();
+ UsedInInstr.clear();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue;
@@ -764,12 +766,12 @@ void RAFast::handleThroughOperands(MachineInstr *MI,
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
DEBUG(dbgs() << "\tSetting " << PrintReg(Reg, TRI)
<< " as used in instr\n");
- UsedInInstr.set(Reg);
+ UsedInInstr.insert(Reg);
}
// Also mark PartialDefs as used to avoid reallocation.
for (unsigned i = 0, e = PartialDefs.size(); i != e; ++i)
- UsedInInstr.set(PartialDefs[i]);
+ UsedInInstr.insert(PartialDefs[i]);
}
/// addRetOperand - ensure that a return instruction has an operand for each
@@ -838,7 +840,7 @@ void RAFast::AllocateBasicBlock() {
// Add live-in registers as live.
for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(),
E = MBB->livein_end(); I != E; ++I)
- if (RegClassInfo.isAllocatable(*I))
+ if (MRI->isAllocatable(*I))
definePhysReg(MII, *I, regReserved);
SmallVector<unsigned, 8> VirtDead;
@@ -942,7 +944,7 @@ void RAFast::AllocateBasicBlock() {
}
// Track registers used by instruction.
- UsedInInstr.reset();
+ UsedInInstr.clear();
// First scan.
// Mark physreg uses and early clobbers as used.
@@ -954,6 +956,11 @@ void RAFast::AllocateBasicBlock() {
bool hasPhysDefs = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
+ // Make sure MRI knows about registers clobbered by regmasks.
+ if (MO.isRegMask()) {
+ MRI->addPhysRegsUsedFromRegMask(MO.getRegMask());
+ continue;
+ }
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!Reg) continue;
@@ -970,7 +977,7 @@ void RAFast::AllocateBasicBlock() {
}
continue;
}
- if (!RegClassInfo.isAllocatable(Reg)) continue;
+ if (!MRI->isAllocatable(Reg)) continue;
if (MO.isUse()) {
usePhysReg(MO);
} else if (MO.isEarlyClobber()) {
@@ -1016,11 +1023,13 @@ void RAFast::AllocateBasicBlock() {
}
}
- MRI->addPhysRegsUsed(UsedInInstr);
+ for (UsedInInstrSet::iterator
+ I = UsedInInstr.begin(), E = UsedInInstr.end(); I != E; ++I)
+ MRI->setPhysRegUsed(*I);
// Track registers defined by instruction - early clobbers and tied uses at
// this point.
- UsedInInstr.reset();
+ UsedInInstr.clear();
if (hasEarlyClobbers) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
@@ -1030,7 +1039,7 @@ void RAFast::AllocateBasicBlock() {
// Look for physreg defs and tied uses.
if (!MO.isDef() && !MI->isRegTiedToDefOperand(i)) continue;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
- UsedInInstr.set(*AI);
+ UsedInInstr.insert(*AI);
}
}
@@ -1058,7 +1067,7 @@ void RAFast::AllocateBasicBlock() {
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- if (!RegClassInfo.isAllocatable(Reg)) continue;
+ if (!MRI->isAllocatable(Reg)) continue;
definePhysReg(MI, Reg, (MO.isImplicit() || MO.isDead()) ?
regFree : regReserved);
continue;
@@ -1080,7 +1089,9 @@ void RAFast::AllocateBasicBlock() {
killVirtReg(VirtDead[i]);
VirtDead.clear();
- MRI->addPhysRegsUsed(UsedInInstr);
+ for (UsedInInstrSet::iterator
+ I = UsedInInstr.begin(), E = UsedInInstr.end(); I != E; ++I)
+ MRI->setPhysRegUsed(*I);
if (CopyDst && CopyDst == CopySrc && CopyDstSub == CopySrcSub) {
DEBUG(dbgs() << "-- coalescing: " << *MI);
@@ -1110,8 +1121,7 @@ void RAFast::AllocateBasicBlock() {
///
bool RAFast::runOnMachineFunction(MachineFunction &Fn) {
DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n"
- << "********** Function: "
- << ((Value*)Fn.getFunction())->getName() << '\n');
+ << "********** Function: " << Fn.getName() << '\n');
MF = &Fn;
MRI = &MF->getRegInfo();
TM = &Fn.getTarget();
@@ -1119,7 +1129,8 @@ bool RAFast::runOnMachineFunction(MachineFunction &Fn) {
TII = TM->getInstrInfo();
MRI->freezeReservedRegs(Fn);
RegClassInfo.runOnMachineFunction(Fn);
- UsedInInstr.resize(TRI->getNumRegs());
+ UsedInInstr.clear();
+ UsedInInstr.setUniverse(TRI->getNumRegs());
assert(!MRI->isSSA() && "regalloc requires leaving SSA");
diff --git a/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp b/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
index 6ac5428..06f69c1e 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
@@ -24,7 +24,6 @@
#include "VirtRegMap.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Function.h"
#include "llvm/PassAnalysisSupport.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/EdgeBundles.h"
@@ -331,9 +330,9 @@ void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveDebugVariables>();
AU.addPreserved<LiveDebugVariables>();
- AU.addRequired<CalculateSpillWeights>();
AU.addRequired<LiveStacks>();
AU.addPreserved<LiveStacks>();
+ AU.addRequired<CalculateSpillWeights>();
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
@@ -509,7 +508,7 @@ bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
///
/// @param VirtReg Live range that is about to be assigned.
/// @param PhysReg Desired register for assignment.
-/// @prarm IsHint True when PhysReg is VirtReg's preferred register.
+/// @param IsHint True when PhysReg is VirtReg's preferred register.
/// @param MaxCost Only look for cheaper candidates and update with new cost
/// when returning true.
/// @returns True when interference can be evicted cheaper than MaxCost.
@@ -1746,8 +1745,7 @@ unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
- << "********** Function: "
- << ((Value*)mf.getFunction())->getName() << '\n');
+ << "********** Function: " << mf.getName() << '\n');
MF = &mf;
if (VerifyEnabled)
diff --git a/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp b/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
index d0db26b..02ebce7 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
@@ -118,7 +118,6 @@ private:
typedef std::vector<AllowedSet> AllowedSetMap;
typedef std::pair<unsigned, unsigned> RegPair;
typedef std::map<RegPair, PBQP::PBQPNum> CoalesceMap;
- typedef std::vector<PBQP::Graph::NodeItr> NodeVector;
typedef std::set<unsigned> RegSet;
@@ -192,7 +191,6 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
const MachineLoopInfo *loopInfo,
const RegSet &vregs) {
- typedef std::vector<const LiveInterval*> LIVector;
LiveIntervals *LIS = const_cast<LiveIntervals*>(lis);
MachineRegisterInfo *mri = &mf->getRegInfo();
const TargetRegisterInfo *tri = mf->getTarget().getRegisterInfo();
@@ -209,8 +207,6 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
mri->setPhysRegUsed(Reg);
}
- BitVector reservedRegs = tri->getReservedRegs(*mf);
-
// Iterate over vregs.
for (RegSet::const_iterator vregItr = vregs.begin(), vregEnd = vregs.end();
vregItr != vregEnd; ++vregItr) {
@@ -219,7 +215,7 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
LiveInterval *vregLI = &LIS->getInterval(vreg);
// Record any overlaps with regmask operands.
- BitVector regMaskOverlaps(tri->getNumRegs());
+ BitVector regMaskOverlaps;
LIS->checkRegMaskInterference(*vregLI, regMaskOverlaps);
// Compute an initial allowed set for the current vreg.
@@ -228,7 +224,7 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
ArrayRef<uint16_t> rawOrder = trc->getRawAllocationOrder(*mf);
for (unsigned i = 0; i != rawOrder.size(); ++i) {
unsigned preg = rawOrder[i];
- if (reservedRegs.test(preg))
+ if (mri->isReserved(preg))
continue;
// vregLI crosses a regmask operand that clobbers preg.
@@ -358,7 +354,7 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilderWithCoalescing::build(
loopInfo->getLoopDepth(mbb));
if (cp.isPhys()) {
- if (!lis->isAllocatable(dst)) {
+ if (!mf->getRegInfo().isAllocatable(dst)) {
continue;
}
@@ -433,6 +429,7 @@ void RegAllocPBQP::getAnalysisUsage(AnalysisUsage &au) const {
au.addRequired<SlotIndexes>();
au.addPreserved<SlotIndexes>();
au.addRequired<LiveIntervals>();
+ au.addPreserved<LiveIntervals>();
//au.addRequiredID(SplitCriticalEdgesID);
if (customPassID)
au.addRequiredID(*customPassID);
@@ -444,6 +441,7 @@ void RegAllocPBQP::getAnalysisUsage(AnalysisUsage &au) const {
au.addRequired<MachineLoopInfo>();
au.addPreserved<MachineLoopInfo>();
au.addRequired<VirtRegMap>();
+ au.addPreserved<VirtRegMap>();
MachineFunctionPass::getAnalysisUsage(au);
}
@@ -556,7 +554,7 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
mri->freezeReservedRegs(MF);
- DEBUG(dbgs() << "PBQP Register Allocating for " << mf->getFunction()->getName() << "\n");
+ DEBUG(dbgs() << "PBQP Register Allocating for " << mf->getName() << "\n");
// Allocator main loop:
//
@@ -570,11 +568,12 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
// Find the vreg intervals in need of allocation.
findVRegIntervalsToAlloc();
+#ifndef NDEBUG
const Function* func = mf->getFunction();
std::string fqn =
func->getParent()->getModuleIdentifier() + "." +
func->getName().str();
- (void)fqn;
+#endif
// If there are non-empty intervals allocate them using pbqp.
if (!vregsToAlloc.empty()) {
diff --git a/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp b/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp
index 652bc30..805d235 100644
--- a/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp
@@ -15,8 +15,9 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "regalloc"
-#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -57,10 +58,11 @@ void RegisterClassInfo::runOnMachineFunction(const MachineFunction &mf) {
CalleeSaved = CSR;
// Different reserved registers?
- BitVector RR = TRI->getReservedRegs(*MF);
- if (RR != Reserved)
+ const BitVector &RR = MF->getRegInfo().getReservedRegs();
+ if (Reserved.size() != RR.size() || RR != Reserved) {
Update = true;
- Reserved = RR;
+ Reserved = RR;
+ }
// Invalidate cached information from previous function.
if (Update)
diff --git a/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp b/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
index 9906334..2538f10 100644
--- a/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -55,6 +55,8 @@ STATISTIC(numCommutes , "Number of instruction commuting performed");
STATISTIC(numExtends , "Number of copies extended");
STATISTIC(NumReMats , "Number of instructions re-materialized");
STATISTIC(NumInflated , "Number of register classes inflated");
+STATISTIC(NumLaneConflicts, "Number of dead lane conflicts tested");
+STATISTIC(NumLaneResolves, "Number of dead lane conflicts resolved");
static cl::opt<bool>
EnableJoining("join-liveintervals",
@@ -123,6 +125,9 @@ namespace {
/// can use this information below to update aliases.
bool joinIntervals(CoalescerPair &CP);
+ /// Attempt joining two virtual registers. Return true on success.
+ bool joinVirtRegs(CoalescerPair &CP);
+
/// Attempt joining with a reserved physreg.
bool joinReservedPhysReg(CoalescerPair &CP);
@@ -193,12 +198,6 @@ INITIALIZE_PASS_END(RegisterCoalescer, "simple-register-coalescing",
char RegisterCoalescer::ID = 0;
-static unsigned compose(const TargetRegisterInfo &tri, unsigned a, unsigned b) {
- if (!a) return b;
- if (!b) return a;
- return tri.composeSubRegIndices(a, b);
-}
-
static bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI,
unsigned &Src, unsigned &Dst,
unsigned &SrcSub, unsigned &DstSub) {
@@ -209,8 +208,8 @@ static bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI,
SrcSub = MI->getOperand(1).getSubReg();
} else if (MI->isSubregToReg()) {
Dst = MI->getOperand(0).getReg();
- DstSub = compose(tri, MI->getOperand(0).getSubReg(),
- MI->getOperand(3).getImm());
+ DstSub = tri.composeSubRegIndices(MI->getOperand(0).getSubReg(),
+ MI->getOperand(3).getImm());
Src = MI->getOperand(2).getReg();
SrcSub = MI->getOperand(2).getSubReg();
} else
@@ -349,7 +348,8 @@ bool CoalescerPair::isCoalescable(const MachineInstr *MI) const {
if (DstReg != Dst)
return false;
// Registers match, do the subregisters line up?
- return compose(TRI, SrcIdx, SrcSub) == compose(TRI, DstIdx, DstSub);
+ return TRI.composeSubRegIndices(SrcIdx, SrcSub) ==
+ TRI.composeSubRegIndices(DstIdx, DstSub);
}
}
@@ -425,7 +425,8 @@ bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP,
// If AValNo is defined as a copy from IntB, we can potentially process this.
// Get the instruction that defines this value number.
MachineInstr *ACopyMI = LIS->getInstructionFromIndex(AValNo->def);
- if (!CP.isCoalescable(ACopyMI))
+ // Don't allow any partial copies, even if isCoalescable() allows them.
+ if (!CP.isCoalescable(ACopyMI) || !ACopyMI->isFullCopy())
return false;
// Get the LiveRange in IntB that this value number starts with.
@@ -583,7 +584,7 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
unsigned NewReg = NewDstMO.getReg();
- if (NewReg != IntB.reg || !NewDstMO.isKill())
+ if (NewReg != IntB.reg || !LiveRangeQuery(IntB, AValNo->def).isKill())
return false;
// Make sure there are no other definitions of IntB that would reach the
@@ -849,8 +850,17 @@ void RegisterCoalescer::updateRegDefsUses(unsigned SrcReg,
// Update LiveDebugVariables.
LDV->renameRegister(SrcReg, DstReg, SubIdx);
+ SmallPtrSet<MachineInstr*, 8> Visited;
for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(SrcReg);
MachineInstr *UseMI = I.skipInstruction();) {
+ // Each instruction can only be rewritten once because sub-register
+ // composition is not always idempotent. When SrcReg != DstReg, rewriting
+ // the UseMI operands removes them from the SrcReg use-def chain, but when
+ // SrcReg is DstReg we could encounter UseMI twice if it has multiple
+ // operands mentioning the virtual register.
+ if (SrcReg == DstReg && !Visited.insert(UseMI))
+ continue;
+
SmallVector<unsigned,8> Ops;
bool Reads, Writes;
tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops);
@@ -890,7 +900,7 @@ bool RegisterCoalescer::canJoinPhys(CoalescerPair &CP) {
/// Always join simple intervals that are defined by a single copy from a
/// reserved register. This doesn't increase register pressure, so it is
/// always beneficial.
- if (!RegClassInfo.isReserved(CP.getDstReg())) {
+ if (!MRI->isReserved(CP.getDstReg())) {
DEBUG(dbgs() << "\tCan only merge into reserved registers.\n");
return false;
}
@@ -1065,7 +1075,7 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
/// Attempt joining with a reserved physreg.
bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
assert(CP.isPhys() && "Must be a physreg copy");
- assert(RegClassInfo.isReserved(CP.getDstReg()) && "Not a reserved register");
+ assert(MRI->isReserved(CP.getDstReg()) && "Not a reserved register");
LiveInterval &RHS = LIS->getInterval(CP.getSrcReg());
DEBUG(dbgs() << "\t\tRHS = " << PrintReg(CP.getSrcReg()) << ' ' << RHS
<< '\n');
@@ -1102,347 +1112,797 @@ bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
return true;
}
-/// ComputeUltimateVN - Assuming we are going to join two live intervals,
-/// compute what the resultant value numbers for each value in the input two
-/// ranges will be. This is complicated by copies between the two which can
-/// and will commonly cause multiple value numbers to be merged into one.
-///
-/// VN is the value number that we're trying to resolve. InstDefiningValue
-/// keeps track of the new InstDefiningValue assignment for the result
-/// LiveInterval. ThisFromOther/OtherFromThis are sets that keep track of
-/// whether a value in this or other is a copy from the opposite set.
-/// ThisValNoAssignments/OtherValNoAssignments keep track of value #'s that have
-/// already been assigned.
-///
-/// ThisFromOther[x] - If x is defined as a copy from the other interval, this
-/// contains the value number the copy is from.
-///
-static unsigned ComputeUltimateVN(VNInfo *VNI,
- SmallVector<VNInfo*, 16> &NewVNInfo,
- DenseMap<VNInfo*, VNInfo*> &ThisFromOther,
- DenseMap<VNInfo*, VNInfo*> &OtherFromThis,
- SmallVector<int, 16> &ThisValNoAssignments,
- SmallVector<int, 16> &OtherValNoAssignments) {
- unsigned VN = VNI->id;
-
- // If the VN has already been computed, just return it.
- if (ThisValNoAssignments[VN] >= 0)
- return ThisValNoAssignments[VN];
- assert(ThisValNoAssignments[VN] != -2 && "Cyclic value numbers");
-
- // If this val is not a copy from the other val, then it must be a new value
- // number in the destination.
- DenseMap<VNInfo*, VNInfo*>::iterator I = ThisFromOther.find(VNI);
- if (I == ThisFromOther.end()) {
- NewVNInfo.push_back(VNI);
- return ThisValNoAssignments[VN] = NewVNInfo.size()-1;
- }
- VNInfo *OtherValNo = I->second;
-
- // Otherwise, this *is* a copy from the RHS. If the other side has already
- // been computed, return it.
- if (OtherValNoAssignments[OtherValNo->id] >= 0)
- return ThisValNoAssignments[VN] = OtherValNoAssignments[OtherValNo->id];
-
- // Mark this value number as currently being computed, then ask what the
- // ultimate value # of the other value is.
- ThisValNoAssignments[VN] = -2;
- unsigned UltimateVN =
- ComputeUltimateVN(OtherValNo, NewVNInfo, OtherFromThis, ThisFromOther,
- OtherValNoAssignments, ThisValNoAssignments);
- return ThisValNoAssignments[VN] = UltimateVN;
-}
+//===----------------------------------------------------------------------===//
+// Interference checking and interval joining
+//===----------------------------------------------------------------------===//
+//
+// In the easiest case, the two live ranges being joined are disjoint, and
+// there is no interference to consider. It is quite common, though, to have
+// overlapping live ranges, and we need to check if the interference can be
+// resolved.
+//
+// The live range of a single SSA value forms a sub-tree of the dominator tree.
+// This means that two SSA values overlap if and only if the def of one value
+// is contained in the live range of the other value. As a special case, the
+// overlapping values can be defined at the same index.
+//
+// The interference from an overlapping def can be resolved in these cases:
+//
+// 1. Coalescable copies. The value is defined by a copy that would become an
+// identity copy after joining SrcReg and DstReg. The copy instruction will
+// be removed, and the value will be merged with the source value.
+//
+// There can be several copies back and forth, causing many values to be
+// merged into one. We compute a list of ultimate values in the joined live
+// range as well as a mappings from the old value numbers.
+//
+// 2. IMPLICIT_DEF. This instruction is only inserted to ensure all PHI
+// predecessors have a live out value. It doesn't cause real interference,
+// and can be merged into the value it overlaps. Like a coalescable copy, it
+// can be erased after joining.
+//
+// 3. Copy of external value. The overlapping def may be a copy of a value that
+// is already in the other register. This is like a coalescable copy, but
+// the live range of the source register must be trimmed after erasing the
+// copy instruction:
+//
+// %src = COPY %ext
+// %dst = COPY %ext <-- Remove this COPY, trim the live range of %ext.
+//
+// 4. Clobbering undefined lanes. Vector registers are sometimes built by
+// defining one lane at a time:
+//
+// %dst:ssub0<def,read-undef> = FOO
+// %src = BAR
+// %dst:ssub1<def> = COPY %src
+//
+// The live range of %src overlaps the %dst value defined by FOO, but
+// merging %src into %dst:ssub1 is only going to clobber the ssub1 lane
+// which was undef anyway.
+//
+// The value mapping is more complicated in this case. The final live range
+// will have different value numbers for both FOO and BAR, but there is no
+// simple mapping from old to new values. It may even be necessary to add
+// new PHI values.
+//
+// 5. Clobbering dead lanes. A def may clobber a lane of a vector register that
+// is live, but never read. This can happen because we don't compute
+// individual live ranges per lane.
+//
+// %dst<def> = FOO
+// %src = BAR
+// %dst:ssub1<def> = COPY %src
+//
+// This kind of interference is only resolved locally. If the clobbered
+// lane value escapes the block, the join is aborted.
+namespace {
+/// Track information about values in a single virtual register about to be
+/// joined. Objects of this class are always created in pairs - one for each
+/// side of the CoalescerPair.
+class JoinVals {
+ LiveInterval &LI;
+
+ // Location of this register in the final joined register.
+ // Either CP.DstIdx or CP.SrcIdx.
+ unsigned SubIdx;
+
+ // Values that will be present in the final live range.
+ SmallVectorImpl<VNInfo*> &NewVNInfo;
+
+ const CoalescerPair &CP;
+ LiveIntervals *LIS;
+ SlotIndexes *Indexes;
+ const TargetRegisterInfo *TRI;
+
+ // Value number assignments. Maps value numbers in LI to entries in NewVNInfo.
+ // This is suitable for passing to LiveInterval::join().
+ SmallVector<int, 8> Assignments;
+
+ // Conflict resolution for overlapping values.
+ enum ConflictResolution {
+ // No overlap, simply keep this value.
+ CR_Keep,
+
+ // Merge this value into OtherVNI and erase the defining instruction.
+ // Used for IMPLICIT_DEF, coalescable copies, and copies from external
+ // values.
+ CR_Erase,
+
+ // Merge this value into OtherVNI but keep the defining instruction.
+ // This is for the special case where OtherVNI is defined by the same
+ // instruction.
+ CR_Merge,
+
+ // Keep this value, and have it replace OtherVNI where possible. This
+ // complicates value mapping since OtherVNI maps to two different values
+ // before and after this def.
+ // Used when clobbering undefined or dead lanes.
+ CR_Replace,
+
+ // Unresolved conflict. Visit later when all values have been mapped.
+ CR_Unresolved,
+
+ // Unresolvable conflict. Abort the join.
+ CR_Impossible
+ };
-// Find out if we have something like
-// A = X
-// B = X
-// if so, we can pretend this is actually
-// A = X
-// B = A
-// which allows us to coalesce A and B.
-// VNI is the definition of B. LR is the life range of A that includes
-// the slot just before B. If we return true, we add "B = X" to DupCopies.
-// This implies that A dominates B.
-static bool RegistersDefinedFromSameValue(LiveIntervals &li,
- const TargetRegisterInfo &tri,
- CoalescerPair &CP,
- VNInfo *VNI,
- VNInfo *OtherVNI,
- SmallVector<MachineInstr*, 8> &DupCopies) {
- // FIXME: This is very conservative. For example, we don't handle
- // physical registers.
-
- MachineInstr *MI = li.getInstructionFromIndex(VNI->def);
-
- if (!MI || CP.isPartial() || CP.isPhys())
- return false;
+ // Per-value info for LI. The lane bit masks are all relative to the final
+ // joined register, so they can be compared directly between SrcReg and
+ // DstReg.
+ struct Val {
+ ConflictResolution Resolution;
- unsigned A = CP.getDstReg();
- if (!TargetRegisterInfo::isVirtualRegister(A))
- return false;
+ // Lanes written by this def, 0 for unanalyzed values.
+ unsigned WriteLanes;
- unsigned B = CP.getSrcReg();
- if (!TargetRegisterInfo::isVirtualRegister(B))
- return false;
+ // Lanes with defined values in this register. Other lanes are undef and
+ // safe to clobber.
+ unsigned ValidLanes;
- MachineInstr *OtherMI = li.getInstructionFromIndex(OtherVNI->def);
- if (!OtherMI)
- return false;
+ // Value in LI being redefined by this def.
+ VNInfo *RedefVNI;
- if (MI->isImplicitDef()) {
- DupCopies.push_back(MI);
- return true;
- } else {
- if (!MI->isFullCopy())
- return false;
- unsigned Src = MI->getOperand(1).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Src))
- return false;
- if (!OtherMI->isFullCopy())
- return false;
- unsigned OtherSrc = OtherMI->getOperand(1).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(OtherSrc))
- return false;
+ // Value in the other live range that overlaps this def, if any.
+ VNInfo *OtherVNI;
- if (Src != OtherSrc)
- return false;
+ // Is this value an IMPLICIT_DEF?
+ bool IsImplicitDef;
- // If the copies use two different value numbers of X, we cannot merge
- // A and B.
- LiveInterval &SrcInt = li.getInterval(Src);
- // getVNInfoBefore returns NULL for undef copies. In this case, the
- // optimization is still safe.
- if (SrcInt.getVNInfoBefore(OtherVNI->def) !=
- SrcInt.getVNInfoBefore(VNI->def))
- return false;
+ // True when the live range of this value will be pruned because of an
+ // overlapping CR_Replace value in the other live range.
+ bool Pruned;
- DupCopies.push_back(MI);
- return true;
- }
-}
+ // True once Pruned above has been computed.
+ bool PrunedComputed;
-/// joinIntervals - Attempt to join these two intervals. On failure, this
-/// returns false.
-bool RegisterCoalescer::joinIntervals(CoalescerPair &CP) {
- // Handle physreg joins separately.
- if (CP.isPhys())
- return joinReservedPhysReg(CP);
+ Val() : Resolution(CR_Keep), WriteLanes(0), ValidLanes(0),
+ RedefVNI(0), OtherVNI(0), IsImplicitDef(false), Pruned(false),
+ PrunedComputed(false) {}
- LiveInterval &RHS = LIS->getInterval(CP.getSrcReg());
- DEBUG(dbgs() << "\t\tRHS = " << PrintReg(CP.getSrcReg()) << ' ' << RHS
- << '\n');
+ bool isAnalyzed() const { return WriteLanes != 0; }
+ };
- // Compute the final value assignment, assuming that the live ranges can be
- // coalesced.
- SmallVector<int, 16> LHSValNoAssignments;
- SmallVector<int, 16> RHSValNoAssignments;
- DenseMap<VNInfo*, VNInfo*> LHSValsDefinedFromRHS;
- DenseMap<VNInfo*, VNInfo*> RHSValsDefinedFromLHS;
- SmallVector<VNInfo*, 16> NewVNInfo;
+ // One entry per value number in LI.
+ SmallVector<Val, 8> Vals;
+
+ unsigned computeWriteLanes(const MachineInstr *DefMI, bool &Redef);
+ VNInfo *stripCopies(VNInfo *VNI);
+ ConflictResolution analyzeValue(unsigned ValNo, JoinVals &Other);
+ void computeAssignment(unsigned ValNo, JoinVals &Other);
+ bool taintExtent(unsigned, unsigned, JoinVals&,
+ SmallVectorImpl<std::pair<SlotIndex, unsigned> >&);
+ bool usesLanes(MachineInstr *MI, unsigned, unsigned, unsigned);
+ bool isPrunedValue(unsigned ValNo, JoinVals &Other);
+
+public:
+ JoinVals(LiveInterval &li, unsigned subIdx,
+ SmallVectorImpl<VNInfo*> &newVNInfo,
+ const CoalescerPair &cp,
+ LiveIntervals *lis,
+ const TargetRegisterInfo *tri)
+ : LI(li), SubIdx(subIdx), NewVNInfo(newVNInfo), CP(cp), LIS(lis),
+ Indexes(LIS->getSlotIndexes()), TRI(tri),
+ Assignments(LI.getNumValNums(), -1), Vals(LI.getNumValNums())
+ {}
+
+ /// Analyze defs in LI and compute a value mapping in NewVNInfo.
+ /// Returns false if any conflicts were impossible to resolve.
+ bool mapValues(JoinVals &Other);
+
+ /// Try to resolve conflicts that require all values to be mapped.
+ /// Returns false if any conflicts were impossible to resolve.
+ bool resolveConflicts(JoinVals &Other);
+
+ /// Prune the live range of values in Other.LI where they would conflict with
+ /// CR_Replace values in LI. Collect end points for restoring the live range
+ /// after joining.
+ void pruneValues(JoinVals &Other, SmallVectorImpl<SlotIndex> &EndPoints);
+
+ /// Erase any machine instructions that have been coalesced away.
+ /// Add erased instructions to ErasedInstrs.
+ /// Add foreign virtual registers to ShrinkRegs if their live range ended at
+ /// the erased instrs.
+ void eraseInstrs(SmallPtrSet<MachineInstr*, 8> &ErasedInstrs,
+ SmallVectorImpl<unsigned> &ShrinkRegs);
+
+ /// Get the value assignments suitable for passing to LiveInterval::join.
+ const int *getAssignments() const { return Assignments.data(); }
+};
+} // end anonymous namespace
+
+/// Compute the bitmask of lanes actually written by DefMI.
+/// Set Redef if there are any partial register definitions that depend on the
+/// previous value of the register.
+unsigned JoinVals::computeWriteLanes(const MachineInstr *DefMI, bool &Redef) {
+ unsigned L = 0;
+ for (ConstMIOperands MO(DefMI); MO.isValid(); ++MO) {
+ if (!MO->isReg() || MO->getReg() != LI.reg || !MO->isDef())
+ continue;
+ L |= TRI->getSubRegIndexLaneMask(
+ TRI->composeSubRegIndices(SubIdx, MO->getSubReg()));
+ if (MO->readsReg())
+ Redef = true;
+ }
+ return L;
+}
- SmallVector<MachineInstr*, 8> DupCopies;
- SmallVector<MachineInstr*, 8> DeadCopies;
+/// Find the ultimate value that VNI was copied from.
+VNInfo *JoinVals::stripCopies(VNInfo *VNI) {
+ while (!VNI->isPHIDef()) {
+ MachineInstr *MI = Indexes->getInstructionFromIndex(VNI->def);
+ assert(MI && "No defining instruction");
+ if (!MI->isFullCopy())
+ break;
+ unsigned Reg = MI->getOperand(1).getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ break;
+ LiveRangeQuery LRQ(LIS->getInterval(Reg), VNI->def);
+ if (!LRQ.valueIn())
+ break;
+ VNI = LRQ.valueIn();
+ }
+ return VNI;
+}
- LiveInterval &LHS = LIS->getOrCreateInterval(CP.getDstReg());
- DEBUG(dbgs() << "\t\tLHS = " << PrintReg(CP.getDstReg(), TRI) << ' ' << LHS
- << '\n');
+/// Analyze ValNo in this live range, and set all fields of Vals[ValNo].
+/// Return a conflict resolution when possible, but leave the hard cases as
+/// CR_Unresolved.
+/// Recursively calls computeAssignment() on this and Other, guaranteeing that
+/// both OtherVNI and RedefVNI have been analyzed and mapped before returning.
+/// The recursion always goes upwards in the dominator tree, making loops
+/// impossible.
+JoinVals::ConflictResolution
+JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) {
+ Val &V = Vals[ValNo];
+ assert(!V.isAnalyzed() && "Value has already been analyzed!");
+ VNInfo *VNI = LI.getValNumInfo(ValNo);
+ if (VNI->isUnused()) {
+ V.WriteLanes = ~0u;
+ return CR_Keep;
+ }
- // Loop over the value numbers of the LHS, seeing if any are defined from
- // the RHS.
- for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
- i != e; ++i) {
- VNInfo *VNI = *i;
- if (VNI->isUnused() || VNI->isPHIDef())
- continue;
- MachineInstr *MI = LIS->getInstructionFromIndex(VNI->def);
- assert(MI && "Missing def");
- if (!MI->isCopyLike() && !MI->isImplicitDef()) // Src not defined by a copy?
- continue;
+ // Get the instruction defining this value, compute the lanes written.
+ const MachineInstr *DefMI = 0;
+ if (VNI->isPHIDef()) {
+ // Conservatively assume that all lanes in a PHI are valid.
+ V.ValidLanes = V.WriteLanes = TRI->getSubRegIndexLaneMask(SubIdx);
+ } else {
+ DefMI = Indexes->getInstructionFromIndex(VNI->def);
+ bool Redef = false;
+ V.ValidLanes = V.WriteLanes = computeWriteLanes(DefMI, Redef);
+
+ // If this is a read-modify-write instruction, there may be more valid
+ // lanes than the ones written by this instruction.
+ // This only covers partial redef operands. DefMI may have normal use
+ // operands reading the register. They don't contribute valid lanes.
+ //
+ // This adds ssub1 to the set of valid lanes in %src:
+ //
+ // %src:ssub1<def> = FOO
+ //
+ // This leaves only ssub1 valid, making any other lanes undef:
+ //
+ // %src:ssub1<def,read-undef> = FOO %src:ssub2
+ //
+ // The <read-undef> flag on the def operand means that old lane values are
+ // not important.
+ if (Redef) {
+ V.RedefVNI = LiveRangeQuery(LI, VNI->def).valueIn();
+ assert(V.RedefVNI && "Instruction is reading nonexistent value");
+ computeAssignment(V.RedefVNI->id, Other);
+ V.ValidLanes |= Vals[V.RedefVNI->id].ValidLanes;
+ }
- // Figure out the value # from the RHS.
- VNInfo *OtherVNI = RHS.getVNInfoBefore(VNI->def);
- // The copy could be to an aliased physreg.
- if (!OtherVNI)
- continue;
+ // An IMPLICIT_DEF writes undef values.
+ if (DefMI->isImplicitDef()) {
+ V.IsImplicitDef = true;
+ V.ValidLanes &= ~V.WriteLanes;
+ }
+ }
- // DstReg is known to be a register in the LHS interval. If the src is
- // from the RHS interval, we can use its value #.
- if (CP.isCoalescable(MI))
- DeadCopies.push_back(MI);
- else if (!RegistersDefinedFromSameValue(*LIS, *TRI, CP, VNI, OtherVNI,
- DupCopies))
- continue;
+ // Find the value in Other that overlaps VNI->def, if any.
+ LiveRangeQuery OtherLRQ(Other.LI, VNI->def);
+
+ // It is possible that both values are defined by the same instruction, or
+ // the values are PHIs defined in the same block. When that happens, the two
+ // values should be merged into one, but not into any preceding value.
+ // The first value defined or visited gets CR_Keep, the other gets CR_Merge.
+ if (VNInfo *OtherVNI = OtherLRQ.valueDefined()) {
+ assert(SlotIndex::isSameInstr(VNI->def, OtherVNI->def) && "Broken LRQ");
+
+ // One value stays, the other is merged. Keep the earlier one, or the first
+ // one we see.
+ if (OtherVNI->def < VNI->def)
+ Other.computeAssignment(OtherVNI->id, *this);
+ else if (VNI->def < OtherVNI->def && OtherLRQ.valueIn()) {
+ // This is an early-clobber def overlapping a live-in value in the other
+ // register. Not mergeable.
+ V.OtherVNI = OtherLRQ.valueIn();
+ return CR_Impossible;
+ }
+ V.OtherVNI = OtherVNI;
+ Val &OtherV = Other.Vals[OtherVNI->id];
+ // Keep this value, check for conflicts when analyzing OtherVNI.
+ if (!OtherV.isAnalyzed())
+ return CR_Keep;
+ // Both sides have been analyzed now.
+ // Allow overlapping PHI values. Any real interference would show up in a
+ // predecessor, the PHI itself can't introduce any conflicts.
+ if (VNI->isPHIDef())
+ return CR_Merge;
+ if (V.ValidLanes & OtherV.ValidLanes)
+ // Overlapping lanes can't be resolved.
+ return CR_Impossible;
+ else
+ return CR_Merge;
+ }
- LHSValsDefinedFromRHS[VNI] = OtherVNI;
+ // No simultaneous def. Is Other live at the def?
+ V.OtherVNI = OtherLRQ.valueIn();
+ if (!V.OtherVNI)
+ // No overlap, no conflict.
+ return CR_Keep;
+
+ assert(!SlotIndex::isSameInstr(VNI->def, V.OtherVNI->def) && "Broken LRQ");
+
+ // We have overlapping values, or possibly a kill of Other.
+ // Recursively compute assignments up the dominator tree.
+ Other.computeAssignment(V.OtherVNI->id, *this);
+ const Val &OtherV = Other.Vals[V.OtherVNI->id];
+
+ // Allow overlapping PHI values. Any real interference would show up in a
+ // predecessor, the PHI itself can't introduce any conflicts.
+ if (VNI->isPHIDef())
+ return CR_Replace;
+
+ // Check for simple erasable conflicts.
+ if (DefMI->isImplicitDef())
+ return CR_Erase;
+
+ // Include the non-conflict where DefMI is a coalescable copy that kills
+ // OtherVNI. We still want the copy erased and value numbers merged.
+ if (CP.isCoalescable(DefMI)) {
+ // Some of the lanes copied from OtherVNI may be undef, making them undef
+ // here too.
+ V.ValidLanes &= ~V.WriteLanes | OtherV.ValidLanes;
+ return CR_Erase;
}
- // Loop over the value numbers of the RHS, seeing if any are defined from
- // the LHS.
- for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
- i != e; ++i) {
- VNInfo *VNI = *i;
- if (VNI->isUnused() || VNI->isPHIDef())
- continue;
- MachineInstr *MI = LIS->getInstructionFromIndex(VNI->def);
- assert(MI && "Missing def");
- if (!MI->isCopyLike() && !MI->isImplicitDef()) // Src not defined by a copy?
- continue;
+ // This may not be a real conflict if DefMI simply kills Other and defines
+ // VNI.
+ if (OtherLRQ.isKill() && OtherLRQ.endPoint() <= VNI->def)
+ return CR_Keep;
+
+ // Handle the case where VNI and OtherVNI can be proven to be identical:
+ //
+ // %other = COPY %ext
+ // %this = COPY %ext <-- Erase this copy
+ //
+ if (DefMI->isFullCopy() && !CP.isPartial() &&
+ stripCopies(VNI) == stripCopies(V.OtherVNI))
+ return CR_Erase;
+
+ // If the lanes written by this instruction were all undef in OtherVNI, it is
+ // still safe to join the live ranges. This can't be done with a simple value
+ // mapping, though - OtherVNI will map to multiple values:
+ //
+ // 1 %dst:ssub0 = FOO <-- OtherVNI
+ // 2 %src = BAR <-- VNI
+ // 3 %dst:ssub1 = COPY %src<kill> <-- Eliminate this copy.
+ // 4 BAZ %dst<kill>
+ // 5 QUUX %src<kill>
+ //
+ // Here OtherVNI will map to itself in [1;2), but to VNI in [2;5). CR_Replace
+ // handles this complex value mapping.
+ if ((V.WriteLanes & OtherV.ValidLanes) == 0)
+ return CR_Replace;
+
+ // If the other live range is killed by DefMI and the live ranges are still
+ // overlapping, it must be because we're looking at an early clobber def:
+ //
+ // %dst<def,early-clobber> = ASM %src<kill>
+ //
+ // In this case, it is illegal to merge the two live ranges since the early
+ // clobber def would clobber %src before it was read.
+ if (OtherLRQ.isKill()) {
+ // This case where the def doesn't overlap the kill is handled above.
+ assert(VNI->def.isEarlyClobber() &&
+ "Only early clobber defs can overlap a kill");
+ return CR_Impossible;
+ }
- // Figure out the value # from the LHS.
- VNInfo *OtherVNI = LHS.getVNInfoBefore(VNI->def);
- // The copy could be to an aliased physreg.
- if (!OtherVNI)
- continue;
+ // VNI is clobbering live lanes in OtherVNI, but there is still the
+ // possibility that no instructions actually read the clobbered lanes.
+ // If we're clobbering all the lanes in OtherVNI, at least one must be read.
+ // Otherwise Other.LI wouldn't be live here.
+ if ((TRI->getSubRegIndexLaneMask(Other.SubIdx) & ~V.WriteLanes) == 0)
+ return CR_Impossible;
+
+ // We need to verify that no instructions are reading the clobbered lanes. To
+ // save compile time, we'll only check that locally. Don't allow the tainted
+ // value to escape the basic block.
+ MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def);
+ if (OtherLRQ.endPoint() >= Indexes->getMBBEndIdx(MBB))
+ return CR_Impossible;
+
+ // There are still some things that could go wrong besides clobbered lanes
+ // being read, for example OtherVNI may be only partially redefined in MBB,
+ // and some clobbered lanes could escape the block. Save this analysis for
+ // resolveConflicts() when all values have been mapped. We need to know
+ // RedefVNI and WriteLanes for any later defs in MBB, and we can't compute
+ // that now - the recursive analyzeValue() calls must go upwards in the
+ // dominator tree.
+ return CR_Unresolved;
+}
- // DstReg is known to be a register in the RHS interval. If the src is
- // from the LHS interval, we can use its value #.
- if (CP.isCoalescable(MI))
- DeadCopies.push_back(MI);
- else if (!RegistersDefinedFromSameValue(*LIS, *TRI, CP, VNI, OtherVNI,
- DupCopies))
- continue;
+/// Compute the value assignment for ValNo in LI.
+/// This may be called recursively by analyzeValue(), but never for a ValNo on
+/// the stack.
+void JoinVals::computeAssignment(unsigned ValNo, JoinVals &Other) {
+ Val &V = Vals[ValNo];
+ if (V.isAnalyzed()) {
+ // Recursion should always move up the dominator tree, so ValNo is not
+ // supposed to reappear before it has been assigned.
+ assert(Assignments[ValNo] != -1 && "Bad recursion?");
+ return;
+ }
+ switch ((V.Resolution = analyzeValue(ValNo, Other))) {
+ case CR_Erase:
+ case CR_Merge:
+ // Merge this ValNo into OtherVNI.
+ assert(V.OtherVNI && "OtherVNI not assigned, can't merge.");
+ assert(Other.Vals[V.OtherVNI->id].isAnalyzed() && "Missing recursion");
+ Assignments[ValNo] = Other.Assignments[V.OtherVNI->id];
+ DEBUG(dbgs() << "\t\tmerge " << PrintReg(LI.reg) << ':' << ValNo << '@'
+ << LI.getValNumInfo(ValNo)->def << " into "
+ << PrintReg(Other.LI.reg) << ':' << V.OtherVNI->id << '@'
+ << V.OtherVNI->def << " --> @"
+ << NewVNInfo[Assignments[ValNo]]->def << '\n');
+ break;
+ case CR_Replace:
+ case CR_Unresolved:
+ // The other value is going to be pruned if this join is successful.
+ assert(V.OtherVNI && "OtherVNI not assigned, can't prune");
+ Other.Vals[V.OtherVNI->id].Pruned = true;
+ // Fall through.
+ default:
+ // This value number needs to go in the final joined live range.
+ Assignments[ValNo] = NewVNInfo.size();
+ NewVNInfo.push_back(LI.getValNumInfo(ValNo));
+ break;
+ }
+}
- RHSValsDefinedFromLHS[VNI] = OtherVNI;
+bool JoinVals::mapValues(JoinVals &Other) {
+ for (unsigned i = 0, e = LI.getNumValNums(); i != e; ++i) {
+ computeAssignment(i, Other);
+ if (Vals[i].Resolution == CR_Impossible) {
+ DEBUG(dbgs() << "\t\tinterference at " << PrintReg(LI.reg) << ':' << i
+ << '@' << LI.getValNumInfo(i)->def << '\n');
+ return false;
+ }
}
+ return true;
+}
- LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
- RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
- NewVNInfo.reserve(LHS.getNumValNums() + RHS.getNumValNums());
+/// Assuming ValNo is going to clobber some valid lanes in Other.LI, compute
+/// the extent of the tainted lanes in the block.
+///
+/// Multiple values in Other.LI can be affected since partial redefinitions can
+/// preserve previously tainted lanes.
+///
+/// 1 %dst = VLOAD <-- Define all lanes in %dst
+/// 2 %src = FOO <-- ValNo to be joined with %dst:ssub0
+/// 3 %dst:ssub1 = BAR <-- Partial redef doesn't clear taint in ssub0
+/// 4 %dst:ssub0 = COPY %src <-- Conflict resolved, ssub0 wasn't read
+///
+/// For each ValNo in Other that is affected, add an (EndIndex, TaintedLanes)
+/// entry to TaintedVals.
+///
+/// Returns false if the tainted lanes extend beyond the basic block.
+bool JoinVals::
+taintExtent(unsigned ValNo, unsigned TaintedLanes, JoinVals &Other,
+ SmallVectorImpl<std::pair<SlotIndex, unsigned> > &TaintExtent) {
+ VNInfo *VNI = LI.getValNumInfo(ValNo);
+ MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def);
+ SlotIndex MBBEnd = Indexes->getMBBEndIdx(MBB);
+
+ // Scan Other.LI from VNI.def to MBBEnd.
+ LiveInterval::iterator OtherI = Other.LI.find(VNI->def);
+ assert(OtherI != Other.LI.end() && "No conflict?");
+ do {
+ // OtherI is pointing to a tainted value. Abort the join if the tainted
+ // lanes escape the block.
+ SlotIndex End = OtherI->end;
+ if (End >= MBBEnd) {
+ DEBUG(dbgs() << "\t\ttaints global " << PrintReg(Other.LI.reg) << ':'
+ << OtherI->valno->id << '@' << OtherI->start << '\n');
+ return false;
+ }
+ DEBUG(dbgs() << "\t\ttaints local " << PrintReg(Other.LI.reg) << ':'
+ << OtherI->valno->id << '@' << OtherI->start
+ << " to " << End << '\n');
+ // A dead def is not a problem.
+ if (End.isDead())
+ break;
+ TaintExtent.push_back(std::make_pair(End, TaintedLanes));
+
+ // Check for another def in the MBB.
+ if (++OtherI == Other.LI.end() || OtherI->start >= MBBEnd)
+ break;
+
+ // Lanes written by the new def are no longer tainted.
+ const Val &OV = Other.Vals[OtherI->valno->id];
+ TaintedLanes &= ~OV.WriteLanes;
+ if (!OV.RedefVNI)
+ break;
+ } while (TaintedLanes);
+ return true;
+}
- for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
- i != e; ++i) {
- VNInfo *VNI = *i;
- unsigned VN = VNI->id;
- if (LHSValNoAssignments[VN] >= 0 || VNI->isUnused())
+/// Return true if MI uses any of the given Lanes from Reg.
+/// This does not include partial redefinitions of Reg.
+bool JoinVals::usesLanes(MachineInstr *MI, unsigned Reg, unsigned SubIdx,
+ unsigned Lanes) {
+ if (MI->isDebugValue())
+ return false;
+ for (ConstMIOperands MO(MI); MO.isValid(); ++MO) {
+ if (!MO->isReg() || MO->isDef() || MO->getReg() != Reg)
continue;
- ComputeUltimateVN(VNI, NewVNInfo,
- LHSValsDefinedFromRHS, RHSValsDefinedFromLHS,
- LHSValNoAssignments, RHSValNoAssignments);
- }
- for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
- i != e; ++i) {
- VNInfo *VNI = *i;
- unsigned VN = VNI->id;
- if (RHSValNoAssignments[VN] >= 0 || VNI->isUnused())
+ if (!MO->readsReg())
continue;
- // If this value number isn't a copy from the LHS, it's a new number.
- if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) {
- NewVNInfo.push_back(VNI);
- RHSValNoAssignments[VN] = NewVNInfo.size()-1;
+ if (Lanes & TRI->getSubRegIndexLaneMask(
+ TRI->composeSubRegIndices(SubIdx, MO->getSubReg())))
+ return true;
+ }
+ return false;
+}
+
+bool JoinVals::resolveConflicts(JoinVals &Other) {
+ for (unsigned i = 0, e = LI.getNumValNums(); i != e; ++i) {
+ Val &V = Vals[i];
+ assert (V.Resolution != CR_Impossible && "Unresolvable conflict");
+ if (V.Resolution != CR_Unresolved)
continue;
- }
+ DEBUG(dbgs() << "\t\tconflict at " << PrintReg(LI.reg) << ':' << i
+ << '@' << LI.getValNumInfo(i)->def << '\n');
+ ++NumLaneConflicts;
+ assert(V.OtherVNI && "Inconsistent conflict resolution.");
+ VNInfo *VNI = LI.getValNumInfo(i);
+ const Val &OtherV = Other.Vals[V.OtherVNI->id];
+
+ // VNI is known to clobber some lanes in OtherVNI. If we go ahead with the
+ // join, those lanes will be tainted with a wrong value. Get the extent of
+ // the tainted lanes.
+ unsigned TaintedLanes = V.WriteLanes & OtherV.ValidLanes;
+ SmallVector<std::pair<SlotIndex, unsigned>, 8> TaintExtent;
+ if (!taintExtent(i, TaintedLanes, Other, TaintExtent))
+ // Tainted lanes would extend beyond the basic block.
+ return false;
- ComputeUltimateVN(VNI, NewVNInfo,
- RHSValsDefinedFromLHS, LHSValsDefinedFromRHS,
- RHSValNoAssignments, LHSValNoAssignments);
- }
+ assert(!TaintExtent.empty() && "There should be at least one conflict.");
- // Armed with the mappings of LHS/RHS values to ultimate values, walk the
- // interval lists to see if these intervals are coalescable.
- LiveInterval::const_iterator I = LHS.begin();
- LiveInterval::const_iterator IE = LHS.end();
- LiveInterval::const_iterator J = RHS.begin();
- LiveInterval::const_iterator JE = RHS.end();
-
- // Collect interval end points that will no longer be kills.
- SmallVector<MachineInstr*, 8> LHSOldKills;
- SmallVector<MachineInstr*, 8> RHSOldKills;
-
- // Skip ahead until the first place of potential sharing.
- if (I != IE && J != JE) {
- if (I->start < J->start) {
- I = std::upper_bound(I, IE, J->start);
- if (I != LHS.begin()) --I;
- } else if (J->start < I->start) {
- J = std::upper_bound(J, JE, I->start);
- if (J != RHS.begin()) --J;
+ // Now look at the instructions from VNI->def to TaintExtent (inclusive).
+ MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def);
+ MachineBasicBlock::iterator MI = MBB->begin();
+ if (!VNI->isPHIDef()) {
+ MI = Indexes->getInstructionFromIndex(VNI->def);
+ // No need to check the instruction defining VNI for reads.
+ ++MI;
}
- }
-
- while (I != IE && J != JE) {
- // Determine if these two live ranges overlap.
- // If so, check value # info to determine if they are really different.
- if (I->end > J->start && J->end > I->start) {
- // If the live range overlap will map to the same value number in the
- // result liverange, we can still coalesce them. If not, we can't.
- if (LHSValNoAssignments[I->valno->id] !=
- RHSValNoAssignments[J->valno->id])
+ assert(!SlotIndex::isSameInstr(VNI->def, TaintExtent.front().first) &&
+ "Interference ends on VNI->def. Should have been handled earlier");
+ MachineInstr *LastMI =
+ Indexes->getInstructionFromIndex(TaintExtent.front().first);
+ assert(LastMI && "Range must end at a proper instruction");
+ unsigned TaintNum = 0;
+ for(;;) {
+ assert(MI != MBB->end() && "Bad LastMI");
+ if (usesLanes(MI, Other.LI.reg, Other.SubIdx, TaintedLanes)) {
+ DEBUG(dbgs() << "\t\ttainted lanes used by: " << *MI);
return false;
-
- // Extended live ranges should no longer be killed.
- if (!I->end.isBlock() && I->end < J->end)
- if (MachineInstr *MI = LIS->getInstructionFromIndex(I->end))
- LHSOldKills.push_back(MI);
- if (!J->end.isBlock() && J->end < I->end)
- if (MachineInstr *MI = LIS->getInstructionFromIndex(J->end))
- RHSOldKills.push_back(MI);
+ }
+ // LastMI is the last instruction to use the current value.
+ if (&*MI == LastMI) {
+ if (++TaintNum == TaintExtent.size())
+ break;
+ LastMI = Indexes->getInstructionFromIndex(TaintExtent[TaintNum].first);
+ assert(LastMI && "Range must end at a proper instruction");
+ TaintedLanes = TaintExtent[TaintNum].second;
+ }
+ ++MI;
}
- if (I->end < J->end)
- ++I;
- else
- ++J;
- }
-
- // Clear kill flags where live ranges are extended.
- while (!LHSOldKills.empty())
- LHSOldKills.pop_back_val()->clearRegisterKills(LHS.reg, TRI);
- while (!RHSOldKills.empty())
- RHSOldKills.pop_back_val()->clearRegisterKills(RHS.reg, TRI);
-
- if (LHSValNoAssignments.empty())
- LHSValNoAssignments.push_back(-1);
- if (RHSValNoAssignments.empty())
- RHSValNoAssignments.push_back(-1);
-
- // Now erase all the redundant copies.
- for (unsigned i = 0, e = DeadCopies.size(); i != e; ++i) {
- MachineInstr *MI = DeadCopies[i];
- if (!ErasedInstrs.insert(MI))
- continue;
- DEBUG(dbgs() << "\t\terased:\t" << LIS->getInstructionIndex(MI)
- << '\t' << *MI);
- LIS->RemoveMachineInstrFromMaps(MI);
- MI->eraseFromParent();
+ // The tainted lanes are unused.
+ V.Resolution = CR_Replace;
+ ++NumLaneResolves;
}
+ return true;
+}
- SmallVector<unsigned, 8> SourceRegisters;
- for (SmallVector<MachineInstr*, 8>::iterator I = DupCopies.begin(),
- E = DupCopies.end(); I != E; ++I) {
- MachineInstr *MI = *I;
- if (!ErasedInstrs.insert(MI))
- continue;
+// Determine if ValNo is a copy of a value number in LI or Other.LI that will
+// be pruned:
+//
+// %dst = COPY %src
+// %src = COPY %dst <-- This value to be pruned.
+// %dst = COPY %src <-- This value is a copy of a pruned value.
+//
+bool JoinVals::isPrunedValue(unsigned ValNo, JoinVals &Other) {
+ Val &V = Vals[ValNo];
+ if (V.Pruned || V.PrunedComputed)
+ return V.Pruned;
+
+ if (V.Resolution != CR_Erase && V.Resolution != CR_Merge)
+ return V.Pruned;
+
+ // Follow copies up the dominator tree and check if any intermediate value
+ // has been pruned.
+ V.PrunedComputed = true;
+ V.Pruned = Other.isPrunedValue(V.OtherVNI->id, *this);
+ return V.Pruned;
+}
- // If MI is a copy, then we have pretended that the assignment to B in
- // A = X
- // B = X
- // was actually a copy from A. Now that we decided to coalesce A and B,
- // transform the code into
- // A = X
- // In the case of the implicit_def, we just have to remove it.
- if (!MI->isImplicitDef()) {
- unsigned Src = MI->getOperand(1).getReg();
- SourceRegisters.push_back(Src);
+void JoinVals::pruneValues(JoinVals &Other,
+ SmallVectorImpl<SlotIndex> &EndPoints) {
+ for (unsigned i = 0, e = LI.getNumValNums(); i != e; ++i) {
+ SlotIndex Def = LI.getValNumInfo(i)->def;
+ switch (Vals[i].Resolution) {
+ case CR_Keep:
+ break;
+ case CR_Replace: {
+ // This value takes precedence over the value in Other.LI.
+ LIS->pruneValue(&Other.LI, Def, &EndPoints);
+ // Check if we're replacing an IMPLICIT_DEF value. The IMPLICIT_DEF
+ // instructions are only inserted to provide a live-out value for PHI
+ // predecessors, so the instruction should simply go away once its value
+ // has been replaced.
+ Val &OtherV = Other.Vals[Vals[i].OtherVNI->id];
+ bool EraseImpDef = OtherV.IsImplicitDef && OtherV.Resolution == CR_Keep;
+ if (!Def.isBlock()) {
+ // Remove <def,read-undef> flags. This def is now a partial redef.
+ // Also remove <def,dead> flags since the joined live range will
+ // continue past this instruction.
+ for (MIOperands MO(Indexes->getInstructionFromIndex(Def));
+ MO.isValid(); ++MO)
+ if (MO->isReg() && MO->isDef() && MO->getReg() == LI.reg) {
+ MO->setIsUndef(EraseImpDef);
+ MO->setIsDead(false);
+ }
+ // This value will reach instructions below, but we need to make sure
+ // the live range also reaches the instruction at Def.
+ if (!EraseImpDef)
+ EndPoints.push_back(Def);
+ }
+ DEBUG(dbgs() << "\t\tpruned " << PrintReg(Other.LI.reg) << " at " << Def
+ << ": " << Other.LI << '\n');
+ break;
+ }
+ case CR_Erase:
+ case CR_Merge:
+ if (isPrunedValue(i, Other)) {
+ // This value is ultimately a copy of a pruned value in LI or Other.LI.
+ // We can no longer trust the value mapping computed by
+ // computeAssignment(), the value that was originally copied could have
+ // been replaced.
+ LIS->pruneValue(&LI, Def, &EndPoints);
+ DEBUG(dbgs() << "\t\tpruned all of " << PrintReg(LI.reg) << " at "
+ << Def << ": " << LI << '\n');
+ }
+ break;
+ case CR_Unresolved:
+ case CR_Impossible:
+ llvm_unreachable("Unresolved conflicts");
}
- LIS->RemoveMachineInstrFromMaps(MI);
- MI->eraseFromParent();
}
+}
- // If B = X was the last use of X in a liverange, we have to shrink it now
- // that B = X is gone.
- for (SmallVector<unsigned, 8>::iterator I = SourceRegisters.begin(),
- E = SourceRegisters.end(); I != E; ++I) {
- LIS->shrinkToUses(&LIS->getInterval(*I));
+void JoinVals::eraseInstrs(SmallPtrSet<MachineInstr*, 8> &ErasedInstrs,
+ SmallVectorImpl<unsigned> &ShrinkRegs) {
+ for (unsigned i = 0, e = LI.getNumValNums(); i != e; ++i) {
+ // Get the def location before markUnused() below invalidates it.
+ SlotIndex Def = LI.getValNumInfo(i)->def;
+ switch (Vals[i].Resolution) {
+ case CR_Keep:
+ // If an IMPLICIT_DEF value is pruned, it doesn't serve a purpose any
+ // longer. The IMPLICIT_DEF instructions are only inserted by
+ // PHIElimination to guarantee that all PHI predecessors have a value.
+ if (!Vals[i].IsImplicitDef || !Vals[i].Pruned)
+ break;
+ // Remove value number i from LI. Note that this VNInfo is still present
+ // in NewVNInfo, so it will appear as an unused value number in the final
+ // joined interval.
+ LI.getValNumInfo(i)->markUnused();
+ LI.removeValNo(LI.getValNumInfo(i));
+ DEBUG(dbgs() << "\t\tremoved " << i << '@' << Def << ": " << LI << '\n');
+ // FALL THROUGH.
+
+ case CR_Erase: {
+ MachineInstr *MI = Indexes->getInstructionFromIndex(Def);
+ assert(MI && "No instruction to erase");
+ if (MI->isCopy()) {
+ unsigned Reg = MI->getOperand(1).getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg) &&
+ Reg != CP.getSrcReg() && Reg != CP.getDstReg())
+ ShrinkRegs.push_back(Reg);
+ }
+ ErasedInstrs.insert(MI);
+ DEBUG(dbgs() << "\t\terased:\t" << Def << '\t' << *MI);
+ LIS->RemoveMachineInstrFromMaps(MI);
+ MI->eraseFromParent();
+ break;
+ }
+ default:
+ break;
+ }
}
+}
+
+bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) {
+ SmallVector<VNInfo*, 16> NewVNInfo;
+ LiveInterval &RHS = LIS->getInterval(CP.getSrcReg());
+ LiveInterval &LHS = LIS->getInterval(CP.getDstReg());
+ JoinVals RHSVals(RHS, CP.getSrcIdx(), NewVNInfo, CP, LIS, TRI);
+ JoinVals LHSVals(LHS, CP.getDstIdx(), NewVNInfo, CP, LIS, TRI);
+
+ DEBUG(dbgs() << "\t\tRHS = " << PrintReg(CP.getSrcReg()) << ' ' << RHS
+ << "\n\t\tLHS = " << PrintReg(CP.getDstReg()) << ' ' << LHS
+ << '\n');
+
+ // First compute NewVNInfo and the simple value mappings.
+ // Detect impossible conflicts early.
+ if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals))
+ return false;
+
+ // Some conflicts can only be resolved after all values have been mapped.
+ if (!LHSVals.resolveConflicts(RHSVals) || !RHSVals.resolveConflicts(LHSVals))
+ return false;
- // If we get here, we know that we can coalesce the live ranges. Ask the
- // intervals to coalesce themselves now.
- LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo,
+ // All clear, the live ranges can be merged.
+
+ // The merging algorithm in LiveInterval::join() can't handle conflicting
+ // value mappings, so we need to remove any live ranges that overlap a
+ // CR_Replace resolution. Collect a set of end points that can be used to
+ // restore the live range after joining.
+ SmallVector<SlotIndex, 8> EndPoints;
+ LHSVals.pruneValues(RHSVals, EndPoints);
+ RHSVals.pruneValues(LHSVals, EndPoints);
+
+ // Erase COPY and IMPLICIT_DEF instructions. This may cause some external
+ // registers to require trimming.
+ SmallVector<unsigned, 8> ShrinkRegs;
+ LHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs);
+ RHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs);
+ while (!ShrinkRegs.empty())
+ LIS->shrinkToUses(&LIS->getInterval(ShrinkRegs.pop_back_val()));
+
+ // Join RHS into LHS.
+ LHS.join(RHS, LHSVals.getAssignments(), RHSVals.getAssignments(), NewVNInfo,
MRI);
+
+ // Kill flags are going to be wrong if the live ranges were overlapping.
+ // Eventually, we should simply clear all kill flags when computing live
+ // ranges. They are reinserted after register allocation.
+ MRI->clearKillFlags(LHS.reg);
+ MRI->clearKillFlags(RHS.reg);
+
+ if (EndPoints.empty())
+ return true;
+
+ // Recompute the parts of the live range we had to remove because of
+ // CR_Replace conflicts.
+ DEBUG(dbgs() << "\t\trestoring liveness to " << EndPoints.size()
+ << " points: " << LHS << '\n');
+ LIS->extendToIndices(&LHS, EndPoints);
return true;
}
+/// joinIntervals - Attempt to join these two intervals. On failure, this
+/// returns false.
+bool RegisterCoalescer::joinIntervals(CoalescerPair &CP) {
+ return CP.isPhys() ? joinReservedPhysReg(CP) : joinVirtRegs(CP);
+}
+
namespace {
// DepthMBBCompare - Comparison predicate that sort first based on the loop
// depth of the basic block (the unsigned), and then on the MBB number.
@@ -1564,8 +2024,7 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
Loops = &getAnalysis<MachineLoopInfo>();
DEBUG(dbgs() << "********** SIMPLE REGISTER COALESCING **********\n"
- << "********** Function: "
- << ((Value*)MF->getFunction())->getName() << '\n');
+ << "********** Function: " << MF->getName() << '\n');
if (VerifyCoalescing)
MF->verify(this, "Before register coalescing");
diff --git a/contrib/llvm/lib/CodeGen/RegisterCoalescer.h b/contrib/llvm/lib/CodeGen/RegisterCoalescer.h
index 8a6df98..47c3df1 100644
--- a/contrib/llvm/lib/CodeGen/RegisterCoalescer.h
+++ b/contrib/llvm/lib/CodeGen/RegisterCoalescer.h
@@ -63,6 +63,13 @@ namespace llvm {
: TRI(tri), DstReg(0), SrcReg(0), DstIdx(0), SrcIdx(0),
Partial(false), CrossClass(false), Flipped(false), NewRC(0) {}
+ /// Create a CoalescerPair representing a virtreg-to-physreg copy.
+ /// No need to call setRegisters().
+ CoalescerPair(unsigned VirtReg, unsigned PhysReg,
+ const TargetRegisterInfo &tri)
+ : TRI(tri), DstReg(PhysReg), SrcReg(VirtReg), DstIdx(0), SrcIdx(0),
+ Partial(false), CrossClass(false), Flipped(false), NewRC(0) {}
+
/// setRegisters - set registers to match the copy instruction MI. Return
/// false if MI is not a coalescable copy instruction.
bool setRegisters(const MachineInstr*);
diff --git a/contrib/llvm/lib/CodeGen/RegisterPressure.cpp b/contrib/llvm/lib/CodeGen/RegisterPressure.cpp
index 43448c8..543c426 100644
--- a/contrib/llvm/lib/CodeGen/RegisterPressure.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterPressure.cpp
@@ -63,7 +63,8 @@ void RegisterPressure::decrease(const TargetRegisterClass *RC,
decreaseSetPressure(MaxSetPressure, RC, TRI);
}
-void RegisterPressure::dump(const TargetRegisterInfo *TRI) {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+void RegisterPressure::dump(const TargetRegisterInfo *TRI) const {
dbgs() << "Live In: ";
for (unsigned i = 0, e = LiveInRegs.size(); i < e; ++i)
dbgs() << PrintReg(LiveInRegs[i], TRI) << " ";
@@ -78,6 +79,7 @@ void RegisterPressure::dump(const TargetRegisterInfo *TRI) {
<< '\n';
}
}
+#endif
/// Increase the current pressure as impacted by these physical registers and
/// bump the high water mark if needed.
@@ -320,10 +322,8 @@ struct RegisterOperands {
if (findReg(MO.getReg(), isVReg, DeadDefs, TRI) == DeadDefs.end())
DeadDefs.push_back(MO.getReg());
}
- else {
- if (findReg(MO.getReg(), isVReg, Defs, TRI) == Defs.end())
- Defs.push_back(MO.getReg());
- }
+ else if (findReg(MO.getReg(), isVReg, Defs, TRI) == Defs.end())
+ Defs.push_back(MO.getReg());
}
}
};
@@ -335,7 +335,7 @@ static void collectOperands(const MachineInstr *MI,
PhysRegOperands &PhysRegOpers,
VirtRegOperands &VirtRegOpers,
const TargetRegisterInfo *TRI,
- const RegisterClassInfo *RCI) {
+ const MachineRegisterInfo *MRI) {
for(ConstMIBundleOperands OperI(MI); OperI.isValid(); ++OperI) {
const MachineOperand &MO = *OperI;
if (!MO.isReg() || !MO.getReg())
@@ -343,7 +343,7 @@ static void collectOperands(const MachineInstr *MI,
if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
VirtRegOpers.collect(MO, TRI);
- else if (RCI->isAllocatable(MO.getReg()))
+ else if (MRI->isAllocatable(MO.getReg()))
PhysRegOpers.collect(MO, TRI);
}
// Remove redundant physreg dead defs.
@@ -449,7 +449,7 @@ bool RegPressureTracker::recede() {
PhysRegOperands PhysRegOpers;
VirtRegOperands VirtRegOpers;
- collectOperands(CurrPos, PhysRegOpers, VirtRegOpers, TRI, RCI);
+ collectOperands(CurrPos, PhysRegOpers, VirtRegOpers, TRI, MRI);
// Boost pressure for all dead defs together.
increasePhysRegPressure(PhysRegOpers.DeadDefs);
@@ -522,7 +522,7 @@ bool RegPressureTracker::advance() {
PhysRegOperands PhysRegOpers;
VirtRegOperands VirtRegOpers;
- collectOperands(CurrPos, PhysRegOpers, VirtRegOpers, TRI, RCI);
+ collectOperands(CurrPos, PhysRegOpers, VirtRegOpers, TRI, MRI);
// Kill liveness at last uses.
for (unsigned i = 0, e = PhysRegOpers.Uses.size(); i < e; ++i) {
@@ -664,7 +664,7 @@ void RegPressureTracker::bumpUpwardPressure(const MachineInstr *MI) {
// Account for register pressure similar to RegPressureTracker::recede().
PhysRegOperands PhysRegOpers;
VirtRegOperands VirtRegOpers;
- collectOperands(MI, PhysRegOpers, VirtRegOpers, TRI, RCI);
+ collectOperands(MI, PhysRegOpers, VirtRegOpers, TRI, MRI);
// Boost max pressure for all dead defs together.
// Since CurrSetPressure and MaxSetPressure
@@ -674,9 +674,16 @@ void RegPressureTracker::bumpUpwardPressure(const MachineInstr *MI) {
decreaseVirtRegPressure(VirtRegOpers.DeadDefs);
// Kill liveness at live defs.
- decreasePhysRegPressure(PhysRegOpers.Defs);
- decreaseVirtRegPressure(VirtRegOpers.Defs);
-
+ for (unsigned i = 0, e = PhysRegOpers.Defs.size(); i < e; ++i) {
+ unsigned Reg = PhysRegOpers.Defs[i];
+ if (!findReg(Reg, false, PhysRegOpers.Uses, TRI))
+ decreasePhysRegPressure(PhysRegOpers.Defs);
+ }
+ for (unsigned i = 0, e = VirtRegOpers.Defs.size(); i < e; ++i) {
+ unsigned Reg = VirtRegOpers.Defs[i];
+ if (!findReg(Reg, true, VirtRegOpers.Uses, TRI))
+ decreaseVirtRegPressure(VirtRegOpers.Defs);
+ }
// Generate liveness for uses.
for (unsigned i = 0, e = PhysRegOpers.Uses.size(); i < e; ++i) {
unsigned Reg = PhysRegOpers.Uses[i];
@@ -750,7 +757,7 @@ void RegPressureTracker::bumpDownwardPressure(const MachineInstr *MI) {
// Account for register pressure similar to RegPressureTracker::recede().
PhysRegOperands PhysRegOpers;
VirtRegOperands VirtRegOpers;
- collectOperands(MI, PhysRegOpers, VirtRegOpers, TRI, RCI);
+ collectOperands(MI, PhysRegOpers, VirtRegOpers, TRI, MRI);
// Kill liveness at last uses. Assume allocatable physregs are single-use
// rather than checking LiveIntervals.
diff --git a/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp b/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp
index d673794..5ec6564 100644
--- a/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp
@@ -92,9 +92,6 @@ void RegScavenger::enterBasicBlock(MachineBasicBlock *mbb) {
KillRegs.resize(NumPhysRegs);
DefRegs.resize(NumPhysRegs);
- // Create reserved registers bitvector.
- ReservedRegs = TRI->getReservedRegs(MF);
-
// Create callee-saved registers bitvector.
CalleeSavedRegs.resize(NumPhysRegs);
const uint16_t *CSRegs = TRI->getCalleeSavedRegs(&MF);
@@ -225,9 +222,9 @@ void RegScavenger::getRegsUsed(BitVector &used, bool includeReserved) {
used = RegsAvailable;
used.flip();
if (includeReserved)
- used |= ReservedRegs;
+ used |= MRI->getReservedRegs();
else
- used.reset(ReservedRegs);
+ used.reset(MRI->getReservedRegs());
}
unsigned RegScavenger::FindUnusedReg(const TargetRegisterClass *RC) const {
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
index 752f8e4..9a65071 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
@@ -279,6 +279,7 @@ void SUnit::ComputeHeight() {
} while (!WorkList.empty());
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// SUnit - Scheduling unit. It's an wrapper around either a single SDNode or
/// a group of nodes flagged together.
void SUnit::dump(const ScheduleDAG *G) const {
@@ -336,6 +337,7 @@ void SUnit::dumpAll(const ScheduleDAG *G) const {
}
dbgs() << "\n";
}
+#endif
#ifndef NDEBUG
/// VerifyScheduledDAG - Verify that all SUnits were scheduled and that
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
index 9c1dba3..a4d4a93 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -22,6 +22,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/RegisterPressure.h"
+#include "llvm/CodeGen/ScheduleDAGILP.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Target/TargetMachine.h"
@@ -30,6 +31,7 @@
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -44,14 +46,15 @@ ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
const MachineDominatorTree &mdt,
bool IsPostRAFlag,
LiveIntervals *lis)
- : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()),
- InstrItins(mf.getTarget().getInstrItineraryData()), LIS(lis),
- IsPostRA(IsPostRAFlag), UnitLatencies(false), CanHandleTerminators(false),
- LoopRegs(MDT), FirstDbgValue(0) {
+ : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()), LIS(lis),
+ IsPostRA(IsPostRAFlag), CanHandleTerminators(false), FirstDbgValue(0) {
assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals");
DbgValues.clear();
assert(!(IsPostRA && MRI.getNumVirtRegs()) &&
"Virtual registers must be removed prior to PostRA scheduling");
+
+ const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
+ SchedModel.init(*ST.getSchedModel(), &ST, TII);
}
/// getUnderlyingObjectFromInt - This is the function that does the work of
@@ -68,7 +71,7 @@ static const Value *getUnderlyingObjectFromInt(const Value *V) {
// object. We don't have to worry about the case where the
// object address is somehow being computed by the multiply,
// because our callers only care when the result is an
- // identifibale object.
+ // identifiable object.
if (U->getOpcode() != Instruction::Add ||
(!isa<ConstantInt>(U->getOperand(1)) &&
Operator::getOpcode(U->getOperand(1)) != Instruction::Mul))
@@ -135,10 +138,6 @@ static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI,
void ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) {
BB = bb;
- LoopRegs.Deps.clear();
- if (MachineLoop *ML = MLI.getLoopFor(BB))
- if (BB == ML->getLoopLatch())
- LoopRegs.VisitLoop(ML);
}
void ScheduleDAGInstrs::finishBlock() {
@@ -174,9 +173,6 @@ void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb,
EndIndex = endcount;
MISUnitMap.clear();
- // Check to see if the scheduler cares about latencies.
- UnitLatencies = forceUnitLatencies();
-
ScheduleDAG::clearDAG();
}
@@ -209,7 +205,7 @@ void ScheduleDAGInstrs::addSchedBarrierDeps() {
if (Reg == 0) continue;
if (TRI->isPhysicalRegister(Reg))
- Uses[Reg].push_back(&ExitSU);
+ Uses[Reg].push_back(PhysRegSUOper(&ExitSU, -1));
else {
assert(!IsPostRA && "Virtual register encountered after regalloc.");
addVRegUseDeps(&ExitSU, i);
@@ -225,59 +221,44 @@ void ScheduleDAGInstrs::addSchedBarrierDeps() {
E = (*SI)->livein_end(); I != E; ++I) {
unsigned Reg = *I;
if (!Uses.contains(Reg))
- Uses[Reg].push_back(&ExitSU);
+ Uses[Reg].push_back(PhysRegSUOper(&ExitSU, -1));
}
}
}
/// MO is an operand of SU's instruction that defines a physical register. Add
/// data dependencies from SU to any uses of the physical register.
-void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU,
- const MachineOperand &MO) {
+void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {
+ const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx);
assert(MO.isDef() && "expect physreg def");
// Ask the target if address-backscheduling is desirable, and if so how much.
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
- unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
- unsigned DataLatency = SU->Latency;
for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
Alias.isValid(); ++Alias) {
if (!Uses.contains(*Alias))
continue;
- std::vector<SUnit*> &UseList = Uses[*Alias];
+ std::vector<PhysRegSUOper> &UseList = Uses[*Alias];
for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
- SUnit *UseSU = UseList[i];
+ SUnit *UseSU = UseList[i].SU;
if (UseSU == SU)
continue;
- unsigned LDataLatency = DataLatency;
- // Optionally add in a special extra latency for nodes that
- // feed addresses.
- // TODO: Perhaps we should get rid of
- // SpecialAddressLatency and just move this into
- // adjustSchedDependency for the targets that care about it.
- if (SpecialAddressLatency != 0 && !UnitLatencies &&
- UseSU != &ExitSU) {
- MachineInstr *UseMI = UseSU->getInstr();
- const MCInstrDesc &UseMCID = UseMI->getDesc();
- int RegUseIndex = UseMI->findRegisterUseOperandIdx(*Alias);
- assert(RegUseIndex >= 0 && "UseMI doesn't use register!");
- if (RegUseIndex >= 0 &&
- (UseMI->mayLoad() || UseMI->mayStore()) &&
- (unsigned)RegUseIndex < UseMCID.getNumOperands() &&
- UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass())
- LDataLatency += SpecialAddressLatency;
- }
- // Adjust the dependence latency using operand def/use
- // information (if any), and then allow the target to
- // perform its own adjustments.
- SDep dep(SU, SDep::Data, LDataLatency, *Alias);
- if (!UnitLatencies) {
- unsigned Latency = computeOperandLatency(SU, UseSU, dep);
- dep.setLatency(Latency);
-
- ST.adjustSchedDependency(SU, UseSU, dep);
- }
+
+ SDep dep(SU, SDep::Data, *Alias);
+
+ // Adjust the dependence latency using operand def/use information,
+ // then allow the target to perform its own adjustments.
+ int UseOp = UseList[i].OpIdx;
+ MachineInstr *RegUse = UseOp < 0 ? 0 : UseSU->getInstr();
+ dep.setLatency(
+ SchedModel.computeOperandLatency(SU->getInstr(), OperIdx,
+ RegUse, UseOp, /*FindMin=*/false));
+ dep.setMinLatency(
+ SchedModel.computeOperandLatency(SU->getInstr(), OperIdx,
+ RegUse, UseOp, /*FindMin=*/true));
+
+ ST.adjustSchedDependency(SU, UseSU, dep);
UseSU->addPred(dep);
}
}
@@ -301,20 +282,23 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
Alias.isValid(); ++Alias) {
if (!Defs.contains(*Alias))
continue;
- std::vector<SUnit *> &DefList = Defs[*Alias];
+ std::vector<PhysRegSUOper> &DefList = Defs[*Alias];
for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
- SUnit *DefSU = DefList[i];
+ SUnit *DefSU = DefList[i].SU;
if (DefSU == &ExitSU)
continue;
if (DefSU != SU &&
(Kind != SDep::Output || !MO.isDead() ||
!DefSU->getInstr()->registerDefIsDead(*Alias))) {
if (Kind == SDep::Anti)
- DefSU->addPred(SDep(SU, Kind, 0, /*Reg=*/*Alias));
+ DefSU->addPred(SDep(SU, Kind, /*Reg=*/*Alias));
else {
- unsigned AOLat = TII->getOutputLatency(InstrItins, MI, OperIdx,
- DefSU->getInstr());
- DefSU->addPred(SDep(SU, Kind, AOLat, /*Reg=*/*Alias));
+ SDep Dep(SU, Kind, /*Reg=*/*Alias);
+ unsigned OutLatency =
+ SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr());
+ Dep.setMinLatency(OutLatency);
+ Dep.setLatency(OutLatency);
+ DefSU->addPred(Dep);
}
}
}
@@ -324,61 +308,14 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
// Either insert a new Reg2SUnits entry with an empty SUnits list, or
// retrieve the existing SUnits list for this register's uses.
// Push this SUnit on the use list.
- Uses[MO.getReg()].push_back(SU);
+ Uses[MO.getReg()].push_back(PhysRegSUOper(SU, OperIdx));
}
else {
- addPhysRegDataDeps(SU, MO);
+ addPhysRegDataDeps(SU, OperIdx);
// Either insert a new Reg2SUnits entry with an empty SUnits list, or
// retrieve the existing SUnits list for this register's defs.
- std::vector<SUnit *> &DefList = Defs[MO.getReg()];
-
- // If a def is going to wrap back around to the top of the loop,
- // backschedule it.
- if (!UnitLatencies && DefList.empty()) {
- LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(MO.getReg());
- if (I != LoopRegs.Deps.end()) {
- const MachineOperand *UseMO = I->second.first;
- unsigned Count = I->second.second;
- const MachineInstr *UseMI = UseMO->getParent();
- unsigned UseMOIdx = UseMO - &UseMI->getOperand(0);
- const MCInstrDesc &UseMCID = UseMI->getDesc();
- const TargetSubtargetInfo &ST =
- TM.getSubtarget<TargetSubtargetInfo>();
- unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
- // TODO: If we knew the total depth of the region here, we could
- // handle the case where the whole loop is inside the region but
- // is large enough that the isScheduleHigh trick isn't needed.
- if (UseMOIdx < UseMCID.getNumOperands()) {
- // Currently, we only support scheduling regions consisting of
- // single basic blocks. Check to see if the instruction is in
- // the same region by checking to see if it has the same parent.
- if (UseMI->getParent() != MI->getParent()) {
- unsigned Latency = SU->Latency;
- if (UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass())
- Latency += SpecialAddressLatency;
- // This is a wild guess as to the portion of the latency which
- // will be overlapped by work done outside the current
- // scheduling region.
- Latency -= std::min(Latency, Count);
- // Add the artificial edge.
- ExitSU.addPred(SDep(SU, SDep::Order, Latency,
- /*Reg=*/0, /*isNormalMemory=*/false,
- /*isMustAlias=*/false,
- /*isArtificial=*/true));
- } else if (SpecialAddressLatency > 0 &&
- UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
- // The entire loop body is within the current scheduling region
- // and the latency of this operation is assumed to be greater
- // than the latency of the loop.
- // TODO: Recursively mark data-edge predecessors as
- // isScheduleHigh too.
- SU->isScheduleHigh = true;
- }
- }
- LoopRegs.Deps.erase(I);
- }
- }
+ std::vector<PhysRegSUOper> &DefList = Defs[MO.getReg()];
// clear this register's use list
if (Uses.contains(MO.getReg()))
@@ -393,11 +330,11 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
// the block. Instead, we leave only one call at the back of the
// DefList.
if (SU->isCall) {
- while (!DefList.empty() && DefList.back()->isCall)
+ while (!DefList.empty() && DefList.back().SU->isCall)
DefList.pop_back();
}
// Defs are pushed in the order they are visited and never reordered.
- DefList.push_back(SU);
+ DefList.push_back(PhysRegSUOper(SU, OperIdx));
}
}
@@ -430,9 +367,12 @@ void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) {
else {
SUnit *DefSU = DefI->SU;
if (DefSU != SU && DefSU != &ExitSU) {
- unsigned OutLatency = TII->getOutputLatency(InstrItins, MI, OperIdx,
- DefSU->getInstr());
- DefSU->addPred(SDep(SU, SDep::Output, OutLatency, Reg));
+ SDep Dep(SU, SDep::Output, Reg);
+ unsigned OutLatency =
+ SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr());
+ Dep.setMinLatency(OutLatency);
+ Dep.setLatency(OutLatency);
+ DefSU->addPred(Dep);
}
DefI->SU = SU;
}
@@ -462,18 +402,17 @@ void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) {
if (DefSU) {
// The reaching Def lives within this scheduling region.
// Create a data dependence.
- //
- // TODO: Handle "special" address latencies cleanly.
- SDep dep(DefSU, SDep::Data, DefSU->Latency, Reg);
- if (!UnitLatencies) {
- // Adjust the dependence latency using operand def/use information, then
- // allow the target to perform its own adjustments.
- unsigned Latency = computeOperandLatency(DefSU, SU, const_cast<SDep &>(dep));
- dep.setLatency(Latency);
-
- const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
- ST.adjustSchedDependency(DefSU, SU, const_cast<SDep &>(dep));
- }
+ SDep dep(DefSU, SDep::Data, Reg);
+ // Adjust the dependence latency using operand def/use information, then
+ // allow the target to perform its own adjustments.
+ int DefOp = Def->findRegisterDefOperandIdx(Reg);
+ dep.setLatency(
+ SchedModel.computeOperandLatency(Def, DefOp, MI, OperIdx, false));
+ dep.setMinLatency(
+ SchedModel.computeOperandLatency(Def, DefOp, MI, OperIdx, true));
+
+ const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
+ ST.adjustSchedDependency(DefSU, SU, const_cast<SDep &>(dep));
SU->addPred(dep);
}
}
@@ -481,14 +420,14 @@ void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) {
// Add antidependence to the following def of the vreg it uses.
VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg);
if (DefI != VRegDefs.end() && DefI->SU != SU)
- DefI->SU->addPred(SDep(SU, SDep::Anti, 0, Reg));
+ DefI->SU->addPred(SDep(SU, SDep::Anti, Reg));
}
/// Return true if MI is an instruction we are unable to reason about
/// (like a call or something with unmodeled side effects).
static inline bool isGlobalMemoryObject(AliasAnalysis *AA, MachineInstr *MI) {
if (MI->isCall() || MI->hasUnmodeledSideEffects() ||
- (MI->hasVolatileMemoryRef() &&
+ (MI->hasOrderedMemoryRef() &&
(!MI->mayLoad() || !MI->isInvariantLoad(AA))))
return true;
return false;
@@ -621,8 +560,7 @@ iterateChainSucc(AliasAnalysis *AA, const MachineFrameInfo *MFI,
// and stop descending.
if (*Depth > 200 ||
MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr())) {
- SUb->addPred(SDep(SUa, SDep::Order, /*Latency=*/0, /*Reg=*/0,
- /*isNormalMemory=*/true));
+ SUb->addPred(SDep(SUa, SDep::MayAliasMem));
return *Depth;
}
// Track current depth.
@@ -653,9 +591,9 @@ static void adjustChainDeps(AliasAnalysis *AA, const MachineFrameInfo *MFI,
if (SU == *I)
continue;
if (MIsNeedChainEdge(AA, MFI, SU->getInstr(), (*I)->getInstr())) {
- unsigned Latency = ((*I)->getInstr()->mayLoad()) ? LatencyToLoad : 0;
- (*I)->addPred(SDep(SU, SDep::Order, Latency, /*Reg=*/0,
- /*isNormalMemory=*/true));
+ SDep Dep(SU, SDep::MayAliasMem);
+ Dep.setLatency(((*I)->getInstr()->mayLoad()) ? LatencyToLoad : 0);
+ (*I)->addPred(Dep);
}
// Now go through all the chain successors and iterate from them.
// Keep track of visited nodes.
@@ -678,9 +616,11 @@ void addChainDependency (AliasAnalysis *AA, const MachineFrameInfo *MFI,
// If this is a false dependency,
// do not add the edge, but rememeber the rejected node.
if (!EnableAASchedMI ||
- MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr()))
- SUb->addPred(SDep(SUa, SDep::Order, TrueMemOrderLatency, /*Reg=*/0,
- isNormalMemory));
+ MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr())) {
+ SDep Dep(SUa, isNormalMemory ? SDep::MayAliasMem : SDep::Barrier);
+ Dep.setLatency(TrueMemOrderLatency);
+ SUb->addPred(Dep);
+ }
else {
// Duplicate entries should be ignored.
RejectList.insert(SUb);
@@ -718,10 +658,7 @@ void ScheduleDAGInstrs::initSUnits() {
SU->isCommutable = MI->isCommutable();
// Assign the Latency field of SU using target-provided information.
- if (UnitLatencies)
- SU->Latency = 1;
- else
- computeLatency(SU);
+ SU->Latency = SchedModel.computeInstrLatency(SU->getInstr());
}
}
@@ -825,16 +762,19 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
// references, even those that are known to not alias.
for (std::map<const Value *, SUnit *>::iterator I =
NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) {
- I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ I->second->addPred(SDep(SU, SDep::Barrier));
}
for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) {
- for (unsigned i = 0, e = I->second.size(); i != e; ++i)
- I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
+ for (unsigned i = 0, e = I->second.size(); i != e; ++i) {
+ SDep Dep(SU, SDep::Barrier);
+ Dep.setLatency(TrueMemOrderLatency);
+ I->second[i]->addPred(Dep);
+ }
}
// Add SU to the barrier chain.
if (BarrierChain)
- BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ BarrierChain->addPred(SDep(SU, SDep::Barrier));
BarrierChain = SU;
// This is a barrier event that acts as a pivotal node in the DAG,
// so it is safe to clear list of exposed nodes.
@@ -922,7 +862,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
// SU and barrier _could_ be reordered, they should not. In addition,
// we have lost all RejectMemNodes below barrier.
if (BarrierChain)
- BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ BarrierChain->addPred(SDep(SU, SDep::Barrier));
} else {
// Treat all other stores conservatively.
goto new_alias_chain;
@@ -931,10 +871,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
if (!ExitSU.isPred(SU))
// Push store's up a bit to avoid them getting in between cmp
// and branches.
- ExitSU.addPred(SDep(SU, SDep::Order, 0,
- /*Reg=*/0, /*isNormalMemory=*/false,
- /*isMustAlias=*/false,
- /*isArtificial=*/true));
+ ExitSU.addPred(SDep(SU, SDep::Artificial));
} else if (MI->mayLoad()) {
bool MayAlias = true;
if (MI->isInvariantLoad(AA)) {
@@ -969,7 +906,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
if (MayAlias && AliasChain)
addChainDependency(AA, MFI, SU, AliasChain, RejectMemNodes);
if (BarrierChain)
- BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ BarrierChain->addPred(SDep(SU, SDep::Barrier));
}
}
}
@@ -982,34 +919,10 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
PendingLoads.clear();
}
-void ScheduleDAGInstrs::computeLatency(SUnit *SU) {
- // Compute the latency for the node. We only provide a default for missing
- // itineraries. Empty itineraries still have latency properties.
- if (!InstrItins) {
- SU->Latency = 1;
-
- // Simplistic target-independent heuristic: assume that loads take
- // extra time.
- if (SU->getInstr()->mayLoad())
- SU->Latency += 2;
- } else {
- SU->Latency = TII->getInstrLatency(InstrItins, SU->getInstr());
- }
-}
-
-unsigned ScheduleDAGInstrs::computeOperandLatency(SUnit *Def, SUnit *Use,
- const SDep& dep,
- bool FindMin) const {
- // For a data dependency with a known register...
- if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0))
- return 1;
-
- return TII->computeOperandLatency(InstrItins, TRI, Def->getInstr(),
- Use->getInstr(), dep.getReg(), FindMin);
-}
-
void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
SU->getInstr()->dump();
+#endif
}
std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
@@ -1029,3 +942,94 @@ std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
std::string ScheduleDAGInstrs::getDAGName() const {
return "dag." + BB->getFullName();
}
+
+namespace {
+/// \brief Manage the stack used by a reverse depth-first search over the DAG.
+class SchedDAGReverseDFS {
+ std::vector<std::pair<const SUnit*, SUnit::const_pred_iterator> > DFSStack;
+public:
+ bool isComplete() const { return DFSStack.empty(); }
+
+ void follow(const SUnit *SU) {
+ DFSStack.push_back(std::make_pair(SU, SU->Preds.begin()));
+ }
+ void advance() { ++DFSStack.back().second; }
+
+ void backtrack() { DFSStack.pop_back(); }
+
+ const SUnit *getCurr() const { return DFSStack.back().first; }
+
+ SUnit::const_pred_iterator getPred() const { return DFSStack.back().second; }
+
+ SUnit::const_pred_iterator getPredEnd() const {
+ return getCurr()->Preds.end();
+ }
+};
+} // anonymous
+
+void ScheduleDAGILP::resize(unsigned NumSUnits) {
+ ILPValues.resize(NumSUnits);
+}
+
+ILPValue ScheduleDAGILP::getILP(const SUnit *SU) {
+ return ILPValues[SU->NodeNum];
+}
+
+// A leaf node has an ILP of 1/1.
+static ILPValue initILP(const SUnit *SU) {
+ unsigned Cnt = SU->getInstr()->isTransient() ? 0 : 1;
+ return ILPValue(Cnt, 1 + SU->getDepth());
+}
+
+/// Compute an ILP metric for all nodes in the subDAG reachable via depth-first
+/// search from this root.
+void ScheduleDAGILP::computeILP(const SUnit *Root) {
+ if (!IsBottomUp)
+ llvm_unreachable("Top-down ILP metric is unimplemnted");
+
+ SchedDAGReverseDFS DFS;
+ // Mark a node visited by validating it.
+ ILPValues[Root->NodeNum] = initILP(Root);
+ DFS.follow(Root);
+ for (;;) {
+ // Traverse the leftmost path as far as possible.
+ while (DFS.getPred() != DFS.getPredEnd()) {
+ const SUnit *PredSU = DFS.getPred()->getSUnit();
+ DFS.advance();
+ // If the pred is already valid, skip it.
+ if (ILPValues[PredSU->NodeNum].isValid())
+ continue;
+ ILPValues[PredSU->NodeNum] = initILP(PredSU);
+ DFS.follow(PredSU);
+ }
+ // Visit the top of the stack in postorder and backtrack.
+ unsigned PredCount = ILPValues[DFS.getCurr()->NodeNum].InstrCount;
+ DFS.backtrack();
+ if (DFS.isComplete())
+ break;
+ // Add the recently finished predecessor's bottom-up descendent count.
+ ILPValues[DFS.getCurr()->NodeNum].InstrCount += PredCount;
+ }
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+void ILPValue::print(raw_ostream &OS) const {
+ if (!isValid())
+ OS << "BADILP";
+ OS << InstrCount << " / " << Cycles << " = "
+ << format("%g", ((double)InstrCount / Cycles));
+}
+
+void ILPValue::dump() const {
+ dbgs() << *this << '\n';
+}
+
+namespace llvm {
+
+raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) {
+ Val.print(OS);
+ return OS;
+}
+
+} // namespace llvm
+#endif // !NDEBUG || LLVM_ENABLE_DUMP
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAGPrinter.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAGPrinter.cpp
index 38feee9..6e781b1 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAGPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAGPrinter.cpp
@@ -12,7 +12,6 @@
//===----------------------------------------------------------------------===//
#include "llvm/Constants.h"
-#include "llvm/Function.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -35,7 +34,7 @@ namespace llvm {
DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
static std::string getGraphName(const ScheduleDAG *G) {
- return G->MF.getFunction()->getName();
+ return G->MF.getName();
}
static bool renderGraphFromBottomUp() {
diff --git a/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp b/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
index e675366..2cd84d6 100644
--- a/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
+++ b/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
@@ -89,6 +89,7 @@ void ScoreboardHazardRecognizer::Reset() {
ReservedScoreboard.reset();
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void ScoreboardHazardRecognizer::Scoreboard::dump() const {
dbgs() << "Scoreboard:\n";
@@ -104,6 +105,7 @@ void ScoreboardHazardRecognizer::Scoreboard::dump() const {
dbgs() << '\n';
}
}
+#endif
bool ScoreboardHazardRecognizer::atIssueLimit() const {
if (IssueWidth == 0)
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 4e29879..37d7731 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -23,7 +23,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -194,6 +194,7 @@ namespace {
SDValue visitOR(SDNode *N);
SDValue visitXOR(SDNode *N);
SDValue SimplifyVBinOp(SDNode *N);
+ SDValue SimplifyVUnaryOp(SDNode *N);
SDValue visitSHL(SDNode *N);
SDValue visitSRA(SDNode *N);
SDValue visitSRL(SDNode *N);
@@ -269,6 +270,8 @@ namespace {
SDValue ReduceLoadWidth(SDNode *N);
SDValue ReduceLoadOpStoreWidth(SDNode *N);
SDValue TransformFPLoadStorePair(SDNode *N);
+ SDValue reduceBuildVecExtToExtBuildVec(SDNode *N);
+ SDValue reduceBuildVecConvertToConvertBuildVec(SDNode *N);
SDValue GetDemandedBits(SDValue V, const APInt &Mask);
@@ -300,6 +303,11 @@ namespace {
/// looking for a better chain (aliasing node.)
SDValue FindBetterChain(SDNode *N, SDValue Chain);
+ /// Merge consecutive store operations into a wide store.
+ /// This optimization uses wide integers or vectors when possible.
+ /// \return True if some memory operations were changed.
+ bool MergeConsecutiveStores(StoreSDNode *N);
+
public:
DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL)
: DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
@@ -385,10 +393,6 @@ static char isNegatibleForFree(SDValue Op, bool LegalOperations,
const TargetLowering &TLI,
const TargetOptions *Options,
unsigned Depth = 0) {
- // No compile time optimizations on this type.
- if (Op.getValueType() == MVT::ppcf128)
- return 0;
-
// fneg is removable even if it has multiple uses.
if (Op.getOpcode() == ISD::FNEG) return 2;
@@ -413,7 +417,7 @@ static char isNegatibleForFree(SDValue Op, bool LegalOperations,
!TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType()))
return 0;
- // fold (fsub (fadd A, B)) -> (fsub (fneg A), B)
+ // fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI,
Options, Depth + 1))
return V;
@@ -1643,7 +1647,8 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
return N0.getOperand(0);
// fold C2-(A+C1) -> (C2-C1)-A
if (N1.getOpcode() == ISD::ADD && N0C && N1C1) {
- SDValue NewC = DAG.getConstant((N0C->getAPIntValue() - N1C1->getAPIntValue()), VT);
+ SDValue NewC = DAG.getConstant(N0C->getAPIntValue() - N1C1->getAPIntValue(),
+ VT);
return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, NewC,
N1.getOperand(0));
}
@@ -2345,16 +2350,19 @@ SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
// we don't want to undo this promotion.
// We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper
// on scalars.
- if ((N0.getOpcode() == ISD::BITCAST || N0.getOpcode() == ISD::SCALAR_TO_VECTOR)
- && Level == AfterLegalizeTypes) {
+ if ((N0.getOpcode() == ISD::BITCAST ||
+ N0.getOpcode() == ISD::SCALAR_TO_VECTOR) &&
+ Level == AfterLegalizeTypes) {
SDValue In0 = N0.getOperand(0);
SDValue In1 = N1.getOperand(0);
EVT In0Ty = In0.getValueType();
EVT In1Ty = In1.getValueType();
- // If both incoming values are integers, and the original types are the same.
+ DebugLoc DL = N->getDebugLoc();
+ // If both incoming values are integers, and the original types are the
+ // same.
if (In0Ty.isInteger() && In1Ty.isInteger() && In0Ty == In1Ty) {
- SDValue Op = DAG.getNode(N->getOpcode(), N->getDebugLoc(), In0Ty, In0, In1);
- SDValue BC = DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, Op);
+ SDValue Op = DAG.getNode(N->getOpcode(), DL, In0Ty, In0, In1);
+ SDValue BC = DAG.getNode(N0.getOpcode(), DL, VT, Op);
AddToWorkList(Op.getNode());
return BC;
}
@@ -2496,8 +2504,18 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
// lanes of the constant together.
EVT VT = Vector->getValueType(0);
unsigned BitWidth = VT.getVectorElementType().getSizeInBits();
+
+ // If the splat value has been compressed to a bitlength lower
+ // than the size of the vector lane, we need to re-expand it to
+ // the lane size.
+ if (BitWidth > SplatBitSize)
+ for (SplatValue = SplatValue.zextOrTrunc(BitWidth);
+ SplatBitSize < BitWidth;
+ SplatBitSize = SplatBitSize * 2)
+ SplatValue |= SplatValue.shl(SplatBitSize);
+
Constant = APInt::getAllOnesValue(BitWidth);
- for (unsigned i = 0, n = VT.getVectorNumElements(); i < n; ++i)
+ for (unsigned i = 0, n = SplatBitSize/BitWidth; i < n; ++i)
Constant &= SplatValue.lshr(i*BitWidth).zextOrTrunc(BitWidth);
}
}
@@ -2984,7 +3002,7 @@ SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) {
SDValue ShAmt = DAG.getConstant(16, getShiftAmountTy(VT));
if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT))
return DAG.getNode(ISD::ROTL, N->getDebugLoc(), VT, BSwap, ShAmt);
- else if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT))
+ if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT))
return DAG.getNode(ISD::ROTR, N->getDebugLoc(), VT, BSwap, ShAmt);
return DAG.getNode(ISD::OR, N->getDebugLoc(), VT,
DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, BSwap, ShAmt),
@@ -3202,11 +3220,8 @@ SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL) {
if ((LShVal + RShVal) != OpSizeInBits)
return 0;
- SDValue Rot;
- if (HasROTL)
- Rot = DAG.getNode(ISD::ROTL, DL, VT, LHSShiftArg, LHSShiftAmt);
- else
- Rot = DAG.getNode(ISD::ROTR, DL, VT, LHSShiftArg, RHSShiftAmt);
+ SDValue Rot = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT,
+ LHSShiftArg, HasROTL ? LHSShiftAmt : RHSShiftAmt);
// If there is an AND of either shifted operand, apply it to the result.
if (LHSMask.getNode() || RHSMask.getNode()) {
@@ -3239,12 +3254,8 @@ SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL) {
if (ConstantSDNode *SUBC =
dyn_cast<ConstantSDNode>(RHSShiftAmt.getOperand(0))) {
if (SUBC->getAPIntValue() == OpSizeInBits) {
- if (HasROTL)
- return DAG.getNode(ISD::ROTL, DL, VT,
- LHSShiftArg, LHSShiftAmt).getNode();
- else
- return DAG.getNode(ISD::ROTR, DL, VT,
- LHSShiftArg, RHSShiftAmt).getNode();
+ return DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT, LHSShiftArg,
+ HasROTL ? LHSShiftAmt : RHSShiftAmt).getNode();
}
}
}
@@ -3256,25 +3267,21 @@ SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL) {
if (ConstantSDNode *SUBC =
dyn_cast<ConstantSDNode>(LHSShiftAmt.getOperand(0))) {
if (SUBC->getAPIntValue() == OpSizeInBits) {
- if (HasROTR)
- return DAG.getNode(ISD::ROTR, DL, VT,
- LHSShiftArg, RHSShiftAmt).getNode();
- else
- return DAG.getNode(ISD::ROTL, DL, VT,
- LHSShiftArg, LHSShiftAmt).getNode();
+ return DAG.getNode(HasROTR ? ISD::ROTR : ISD::ROTL, DL, VT, LHSShiftArg,
+ HasROTR ? RHSShiftAmt : LHSShiftAmt).getNode();
}
}
}
// Look for sign/zext/any-extended or truncate cases:
- if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND
- || LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND
- || LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND
- || LHSShiftAmt.getOpcode() == ISD::TRUNCATE) &&
- (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND
- || RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND
- || RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND
- || RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) {
+ if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND ||
+ LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND ||
+ LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND ||
+ LHSShiftAmt.getOpcode() == ISD::TRUNCATE) &&
+ (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND ||
+ RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND ||
+ RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND ||
+ RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) {
SDValue LExtOp0 = LHSShiftAmt.getOperand(0);
SDValue RExtOp0 = RHSShiftAmt.getOperand(0);
if (RExtOp0.getOpcode() == ISD::SUB &&
@@ -4046,7 +4053,8 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
if (VT.isInteger() &&
(VT0 == MVT::i1 ||
(VT0.isInteger() &&
- TLI.getBooleanContents(false) == TargetLowering::ZeroOrOneBooleanContent)) &&
+ TLI.getBooleanContents(false) ==
+ TargetLowering::ZeroOrOneBooleanContent)) &&
N1C && N2C && N1C->isNullValue() && N2C->getAPIntValue() == 1) {
SDValue XORNode;
if (VT == VT0)
@@ -4412,20 +4420,18 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
// If the desired elements are smaller or larger than the source
// elements we can use a matching integer vector type and then
// truncate/sign extend
- else {
- EVT MatchingElementType =
- EVT::getIntegerVT(*DAG.getContext(),
- N0VT.getScalarType().getSizeInBits());
- EVT MatchingVectorType =
- EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
- N0VT.getVectorNumElements());
+ EVT MatchingElementType =
+ EVT::getIntegerVT(*DAG.getContext(),
+ N0VT.getScalarType().getSizeInBits());
+ EVT MatchingVectorType =
+ EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
+ N0VT.getVectorNumElements());
- if (SVT == MatchingVectorType) {
- SDValue VsetCC = DAG.getSetCC(N->getDebugLoc(), MatchingVectorType,
- N0.getOperand(0), N0.getOperand(1),
- cast<CondCodeSDNode>(N0.getOperand(2))->get());
- return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT);
- }
+ if (SVT == MatchingVectorType) {
+ SDValue VsetCC = DAG.getSetCC(N->getDebugLoc(), MatchingVectorType,
+ N0.getOperand(0), N0.getOperand(1),
+ cast<CondCodeSDNode>(N0.getOperand(2))->get());
+ return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT);
}
}
@@ -5235,13 +5241,12 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
// if the source is smaller than the dest, we still need an extend
return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT,
N0.getOperand(0));
- else if (N0.getOperand(0).getValueType().bitsGT(VT))
+ if (N0.getOperand(0).getValueType().bitsGT(VT))
// if the source is larger than the dest, than we just need the truncate
return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0.getOperand(0));
- else
- // if the source and dest are the same type, we can drop both the extend
- // and the truncate.
- return N0.getOperand(0);
+ // if the source and dest are the same type, we can drop both the extend
+ // and the truncate.
+ return N0.getOperand(0);
}
// Fold extract-and-trunc into a narrow extract. For example:
@@ -5301,6 +5306,48 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
if (Reduced.getNode())
return Reduced;
}
+ // fold (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...)),
+ // where ... are all 'undef'.
+ if (N0.getOpcode() == ISD::CONCAT_VECTORS && !LegalTypes) {
+ SmallVector<EVT, 8> VTs;
+ SDValue V;
+ unsigned Idx = 0;
+ unsigned NumDefs = 0;
+
+ for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) {
+ SDValue X = N0.getOperand(i);
+ if (X.getOpcode() != ISD::UNDEF) {
+ V = X;
+ Idx = i;
+ NumDefs++;
+ }
+ // Stop if more than one members are non-undef.
+ if (NumDefs > 1)
+ break;
+ VTs.push_back(EVT::getVectorVT(*DAG.getContext(),
+ VT.getVectorElementType(),
+ X.getValueType().getVectorNumElements()));
+ }
+
+ if (NumDefs == 0)
+ return DAG.getUNDEF(VT);
+
+ if (NumDefs == 1) {
+ assert(V.getNode() && "The single defined operand is empty!");
+ SmallVector<SDValue, 8> Opnds;
+ for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
+ if (i != Idx) {
+ Opnds.push_back(DAG.getUNDEF(VTs[i]));
+ continue;
+ }
+ SDValue NV = DAG.getNode(ISD::TRUNCATE, V.getDebugLoc(), VTs[i], V);
+ AddToWorkList(NV.getNode());
+ Opnds.push_back(NV);
+ }
+ return DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT,
+ &Opnds[0], Opnds.size());
+ }
+ }
// Simplify the operands using demanded-bits information.
if (!VT.isVector() &&
@@ -5338,7 +5385,7 @@ SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
!LD2->isVolatile() &&
DAG.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1)) {
unsigned Align = LD1->getAlignment();
- unsigned NewAlign = TLI.getTargetData()->
+ unsigned NewAlign = TLI.getDataLayout()->
getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
if (NewAlign <= Align &&
@@ -5407,7 +5454,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
!cast<LoadSDNode>(N0)->isVolatile() &&
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- unsigned Align = TLI.getTargetData()->
+ unsigned Align = TLI.getDataLayout()->
getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
unsigned OrigAlign = LN0->getAlignment();
@@ -5430,7 +5477,8 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
// This often reduces constant pool loads.
if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(VT)) ||
(N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(VT))) &&
- N0.getNode()->hasOneUse() && VT.isInteger() && !VT.isVector()) {
+ N0.getNode()->hasOneUse() && VT.isInteger() &&
+ !VT.isVector() && !N0.getValueType().isVector()) {
SDValue NewConv = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(), VT,
N0.getOperand(0));
AddToWorkList(NewConv.getNode());
@@ -5653,7 +5701,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
}
// fold (fadd c1, c2) -> c1 + c2
- if (N0CFP && N1CFP && VT != MVT::ppcf128)
+ if (N0CFP && N1CFP)
return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N1);
// canonicalize constant to RHS
if (N0CFP && !N1CFP)
@@ -5664,12 +5712,12 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
return N0;
// fold (fadd A, (fneg B)) -> (fsub A, B)
if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) &&
- isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options) == 2)
+ isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options) == 2)
return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0,
GetNegatedExpression(N1, DAG, LegalOperations));
// fold (fadd (fneg A), B) -> (fsub B, A)
if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) &&
- isNegatibleForFree(N0, LegalOperations, TLI, &DAG.getTarget().Options) == 2)
+ isNegatibleForFree(N0, LegalOperations, TLI, &DAG.getTarget().Options) == 2)
return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N1,
GetNegatedExpression(N0, DAG, LegalOperations));
@@ -5681,6 +5729,139 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
DAG.getNode(ISD::FADD, N->getDebugLoc(), VT,
N0.getOperand(1), N1));
+ // If allow, fold (fadd (fneg x), x) -> 0.0
+ if (DAG.getTarget().Options.UnsafeFPMath &&
+ N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1) {
+ return DAG.getConstantFP(0.0, VT);
+ }
+
+ // If allow, fold (fadd x, (fneg x)) -> 0.0
+ if (DAG.getTarget().Options.UnsafeFPMath &&
+ N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0) {
+ return DAG.getConstantFP(0.0, VT);
+ }
+
+ // In unsafe math mode, we can fold chains of FADD's of the same value
+ // into multiplications. This transform is not safe in general because
+ // we are reducing the number of rounding steps.
+ if (DAG.getTarget().Options.UnsafeFPMath &&
+ TLI.isOperationLegalOrCustom(ISD::FMUL, VT) &&
+ !N0CFP && !N1CFP) {
+ if (N0.getOpcode() == ISD::FMUL) {
+ ConstantFPSDNode *CFP00 = dyn_cast<ConstantFPSDNode>(N0.getOperand(0));
+ ConstantFPSDNode *CFP01 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
+
+ // (fadd (fmul c, x), x) -> (fmul c+1, x)
+ if (CFP00 && !CFP01 && N0.getOperand(1) == N1) {
+ SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT,
+ SDValue(CFP00, 0),
+ DAG.getConstantFP(1.0, VT));
+ return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
+ N1, NewCFP);
+ }
+
+ // (fadd (fmul x, c), x) -> (fmul c+1, x)
+ if (CFP01 && !CFP00 && N0.getOperand(0) == N1) {
+ SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT,
+ SDValue(CFP01, 0),
+ DAG.getConstantFP(1.0, VT));
+ return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
+ N1, NewCFP);
+ }
+
+ // (fadd (fadd x, x), x) -> (fmul 3.0, x)
+ if (!CFP00 && !CFP01 && N0.getOperand(0) == N0.getOperand(1) &&
+ N0.getOperand(0) == N1) {
+ return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
+ N1, DAG.getConstantFP(3.0, VT));
+ }
+
+ // (fadd (fmul c, x), (fadd x, x)) -> (fmul c+2, x)
+ if (CFP00 && !CFP01 && N1.getOpcode() == ISD::FADD &&
+ N1.getOperand(0) == N1.getOperand(1) &&
+ N0.getOperand(1) == N1.getOperand(0)) {
+ SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT,
+ SDValue(CFP00, 0),
+ DAG.getConstantFP(2.0, VT));
+ return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
+ N0.getOperand(1), NewCFP);
+ }
+
+ // (fadd (fmul x, c), (fadd x, x)) -> (fmul c+2, x)
+ if (CFP01 && !CFP00 && N1.getOpcode() == ISD::FADD &&
+ N1.getOperand(0) == N1.getOperand(1) &&
+ N0.getOperand(0) == N1.getOperand(0)) {
+ SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT,
+ SDValue(CFP01, 0),
+ DAG.getConstantFP(2.0, VT));
+ return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
+ N0.getOperand(0), NewCFP);
+ }
+ }
+
+ if (N1.getOpcode() == ISD::FMUL) {
+ ConstantFPSDNode *CFP10 = dyn_cast<ConstantFPSDNode>(N1.getOperand(0));
+ ConstantFPSDNode *CFP11 = dyn_cast<ConstantFPSDNode>(N1.getOperand(1));
+
+ // (fadd x, (fmul c, x)) -> (fmul c+1, x)
+ if (CFP10 && !CFP11 && N1.getOperand(1) == N0) {
+ SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT,
+ SDValue(CFP10, 0),
+ DAG.getConstantFP(1.0, VT));
+ return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
+ N0, NewCFP);
+ }
+
+ // (fadd x, (fmul x, c)) -> (fmul c+1, x)
+ if (CFP11 && !CFP10 && N1.getOperand(0) == N0) {
+ SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT,
+ SDValue(CFP11, 0),
+ DAG.getConstantFP(1.0, VT));
+ return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
+ N0, NewCFP);
+ }
+
+ // (fadd x, (fadd x, x)) -> (fmul 3.0, x)
+ if (!CFP10 && !CFP11 && N1.getOperand(0) == N1.getOperand(1) &&
+ N1.getOperand(0) == N0) {
+ return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
+ N0, DAG.getConstantFP(3.0, VT));
+ }
+
+ // (fadd (fadd x, x), (fmul c, x)) -> (fmul c+2, x)
+ if (CFP10 && !CFP11 && N1.getOpcode() == ISD::FADD &&
+ N1.getOperand(0) == N1.getOperand(1) &&
+ N0.getOperand(1) == N1.getOperand(0)) {
+ SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT,
+ SDValue(CFP10, 0),
+ DAG.getConstantFP(2.0, VT));
+ return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
+ N0.getOperand(1), NewCFP);
+ }
+
+ // (fadd (fadd x, x), (fmul x, c)) -> (fmul c+2, x)
+ if (CFP11 && !CFP10 && N1.getOpcode() == ISD::FADD &&
+ N1.getOperand(0) == N1.getOperand(1) &&
+ N0.getOperand(0) == N1.getOperand(0)) {
+ SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT,
+ SDValue(CFP11, 0),
+ DAG.getConstantFP(2.0, VT));
+ return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
+ N0.getOperand(0), NewCFP);
+ }
+ }
+
+ // (fadd (fadd x, x), (fadd x, x)) -> (fmul 4.0, x)
+ if (N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD &&
+ N0.getOperand(0) == N0.getOperand(1) &&
+ N1.getOperand(0) == N1.getOperand(1) &&
+ N0.getOperand(0) == N1.getOperand(0)) {
+ return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
+ N0.getOperand(0),
+ DAG.getConstantFP(4.0, VT));
+ }
+ }
+
// FADD -> FMA combines:
if ((DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast ||
DAG.getTarget().Options.UnsafeFPMath) &&
@@ -5692,8 +5873,8 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
return DAG.getNode(ISD::FMA, N->getDebugLoc(), VT,
N0.getOperand(0), N0.getOperand(1), N1);
}
-
- // fold (fadd x, (fmul y, z)) -> (fma x, y, z)
+
+ // fold (fadd x, (fmul y, z)) -> (fma y, z, x)
// Note: Commutes FADD operands.
if (N1.getOpcode() == ISD::FMUL && N1->hasOneUse()) {
return DAG.getNode(ISD::FMA, N->getDebugLoc(), VT,
@@ -5719,7 +5900,7 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
}
// fold (fsub c1, c2) -> c1-c2
- if (N0CFP && N1CFP && VT != MVT::ppcf128)
+ if (N0CFP && N1CFP)
return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0, N1);
// fold (fsub A, 0) -> A
if (DAG.getTarget().Options.UnsafeFPMath &&
@@ -5811,7 +5992,7 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
}
// fold (fmul c1, c2) -> c1*c2
- if (N0CFP && N1CFP && VT != MVT::ppcf128)
+ if (N0CFP && N1CFP)
return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0, N1);
// canonicalize constant to RHS
if (N0CFP && !N1CFP)
@@ -5867,7 +6048,14 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
EVT VT = N->getValueType(0);
+ DebugLoc dl = N->getDebugLoc();
+ if (DAG.getTarget().Options.UnsafeFPMath) {
+ if (N0CFP && N0CFP->isZero())
+ return N2;
+ if (N1CFP && N1CFP->isZero())
+ return N2;
+ }
if (N0CFP && N0CFP->isExactlyValue(1.0))
return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N1, N2);
if (N1CFP && N1CFP->isExactlyValue(1.0))
@@ -5877,6 +6065,58 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
if (N0CFP && !N1CFP)
return DAG.getNode(ISD::FMA, N->getDebugLoc(), VT, N1, N0, N2);
+ // (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2)
+ if (DAG.getTarget().Options.UnsafeFPMath && N1CFP &&
+ N2.getOpcode() == ISD::FMUL &&
+ N0 == N2.getOperand(0) &&
+ N2.getOperand(1).getOpcode() == ISD::ConstantFP) {
+ return DAG.getNode(ISD::FMUL, dl, VT, N0,
+ DAG.getNode(ISD::FADD, dl, VT, N1, N2.getOperand(1)));
+ }
+
+
+ // (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y)
+ if (DAG.getTarget().Options.UnsafeFPMath &&
+ N0.getOpcode() == ISD::FMUL && N1CFP &&
+ N0.getOperand(1).getOpcode() == ISD::ConstantFP) {
+ return DAG.getNode(ISD::FMA, dl, VT,
+ N0.getOperand(0),
+ DAG.getNode(ISD::FMUL, dl, VT, N1, N0.getOperand(1)),
+ N2);
+ }
+
+ // (fma x, 1, y) -> (fadd x, y)
+ // (fma x, -1, y) -> (fadd (fneg x), y)
+ if (N1CFP) {
+ if (N1CFP->isExactlyValue(1.0))
+ return DAG.getNode(ISD::FADD, dl, VT, N0, N2);
+
+ if (N1CFP->isExactlyValue(-1.0) &&
+ (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))) {
+ SDValue RHSNeg = DAG.getNode(ISD::FNEG, dl, VT, N0);
+ AddToWorkList(RHSNeg.getNode());
+ return DAG.getNode(ISD::FADD, dl, VT, N2, RHSNeg);
+ }
+ }
+
+ // (fma x, c, x) -> (fmul x, (c+1))
+ if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && N0 == N2) {
+ return DAG.getNode(ISD::FMUL, dl, VT,
+ N0,
+ DAG.getNode(ISD::FADD, dl, VT,
+ N1, DAG.getConstantFP(1.0, VT)));
+ }
+
+ // (fma x, c, (fneg x)) -> (fmul x, (c-1))
+ if (DAG.getTarget().Options.UnsafeFPMath && N1CFP &&
+ N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0) {
+ return DAG.getNode(ISD::FMUL, dl, VT,
+ N0,
+ DAG.getNode(ISD::FADD, dl, VT,
+ N1, DAG.getConstantFP(-1.0, VT)));
+ }
+
+
return SDValue();
}
@@ -5895,11 +6135,11 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
}
// fold (fdiv c1, c2) -> c1/c2
- if (N0CFP && N1CFP && VT != MVT::ppcf128)
+ if (N0CFP && N1CFP)
return DAG.getNode(ISD::FDIV, N->getDebugLoc(), VT, N0, N1);
// fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable.
- if (N1CFP && VT != MVT::ppcf128 && DAG.getTarget().Options.UnsafeFPMath) {
+ if (N1CFP && DAG.getTarget().Options.UnsafeFPMath) {
// Compute the reciprocal 1.0 / c2.
APFloat N1APF = N1CFP->getValueAPF();
APFloat Recip(N1APF.getSemantics(), 1); // 1.0
@@ -5942,7 +6182,7 @@ SDValue DAGCombiner::visitFREM(SDNode *N) {
EVT VT = N->getValueType(0);
// fold (frem c1, c2) -> fmod(c1,c2)
- if (N0CFP && N1CFP && VT != MVT::ppcf128)
+ if (N0CFP && N1CFP)
return DAG.getNode(ISD::FREM, N->getDebugLoc(), VT, N0, N1);
return SDValue();
@@ -5955,7 +6195,7 @@ SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
EVT VT = N->getValueType(0);
- if (N0CFP && N1CFP && VT != MVT::ppcf128) // Constant fold
+ if (N0CFP && N1CFP) // Constant fold
return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, N0, N1);
if (N1CFP) {
@@ -6005,7 +6245,7 @@ SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
EVT OpVT = N0.getValueType();
// fold (sint_to_fp c1) -> c1fp
- if (N0C && OpVT != MVT::ppcf128 &&
+ if (N0C &&
// ...but only if the target supports immediate floating-point values
(!LegalOperations ||
TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT)))
@@ -6062,7 +6302,7 @@ SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
EVT OpVT = N0.getValueType();
// fold (uint_to_fp c1) -> c1fp
- if (N0C && OpVT != MVT::ppcf128 &&
+ if (N0C &&
// ...but only if the target supports immediate floating-point values
(!LegalOperations ||
TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT)))
@@ -6117,7 +6357,7 @@ SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
EVT VT = N->getValueType(0);
// fold (fp_to_uint c1fp) -> c1
- if (N0CFP && VT != MVT::ppcf128)
+ if (N0CFP)
return DAG.getNode(ISD::FP_TO_UINT, N->getDebugLoc(), VT, N0);
return SDValue();
@@ -6130,7 +6370,7 @@ SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
EVT VT = N->getValueType(0);
// fold (fp_round c1fp) -> c1fp
- if (N0CFP && N0.getValueType() != MVT::ppcf128)
+ if (N0CFP)
return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT, N0, N1);
// fold (fp_round (fp_extend x)) -> x
@@ -6184,7 +6424,7 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
return SDValue();
// fold (fp_extend c1fp) -> c1fp
- if (N0CFP && VT != MVT::ppcf128)
+ if (N0CFP)
return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, N0);
// Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the
@@ -6225,6 +6465,11 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
+ if (VT.isVector()) {
+ SDValue FoldedVOp = SimplifyVUnaryOp(N);
+ if (FoldedVOp.getNode()) return FoldedVOp;
+ }
+
if (isNegatibleForFree(N0, LegalOperations, DAG.getTargetLoweringInfo(),
&DAG.getTarget().Options))
return GetNegatedExpression(N0, DAG, LegalOperations);
@@ -6246,6 +6491,17 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
}
}
+ // (fneg (fmul c, x)) -> (fmul -c, x)
+ if (N0.getOpcode() == ISD::FMUL) {
+ ConstantFPSDNode *CFP1 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
+ if (CFP1) {
+ return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
+ N0.getOperand(0),
+ DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT,
+ N0.getOperand(1)));
+ }
+ }
+
return SDValue();
}
@@ -6255,7 +6511,7 @@ SDValue DAGCombiner::visitFCEIL(SDNode *N) {
EVT VT = N->getValueType(0);
// fold (fceil c1) -> fceil(c1)
- if (N0CFP && VT != MVT::ppcf128)
+ if (N0CFP)
return DAG.getNode(ISD::FCEIL, N->getDebugLoc(), VT, N0);
return SDValue();
@@ -6267,7 +6523,7 @@ SDValue DAGCombiner::visitFTRUNC(SDNode *N) {
EVT VT = N->getValueType(0);
// fold (ftrunc c1) -> ftrunc(c1)
- if (N0CFP && VT != MVT::ppcf128)
+ if (N0CFP)
return DAG.getNode(ISD::FTRUNC, N->getDebugLoc(), VT, N0);
return SDValue();
@@ -6279,7 +6535,7 @@ SDValue DAGCombiner::visitFFLOOR(SDNode *N) {
EVT VT = N->getValueType(0);
// fold (ffloor c1) -> ffloor(c1)
- if (N0CFP && VT != MVT::ppcf128)
+ if (N0CFP)
return DAG.getNode(ISD::FFLOOR, N->getDebugLoc(), VT, N0);
return SDValue();
@@ -6290,8 +6546,13 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
EVT VT = N->getValueType(0);
+ if (VT.isVector()) {
+ SDValue FoldedVOp = SimplifyVUnaryOp(N);
+ if (FoldedVOp.getNode()) return FoldedVOp;
+ }
+
// fold (fabs c1) -> fabs(c1)
- if (N0CFP && VT != MVT::ppcf128)
+ if (N0CFP)
return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0);
// fold (fabs (fabs x)) -> (fabs x)
if (N0.getOpcode() == ISD::FABS)
@@ -6511,7 +6772,7 @@ static bool canFoldInAddressingMode(SDNode *N, SDNode *Use,
} else
return false;
- TargetLowering::AddrMode AM;
+ AddrMode AM;
if (N->getOpcode() == ISD::ADD) {
ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (Offset)
@@ -7138,7 +7399,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
- if (NewAlign < TLI.getTargetData()->getABITypeAlignment(NewVTTy))
+ if (NewAlign < TLI.getDataLayout()->getABITypeAlignment(NewVTTy))
return SDValue();
SDValue NewPtr = DAG.getNode(ISD::ADD, LD->getDebugLoc(),
@@ -7200,7 +7461,7 @@ SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
unsigned LDAlign = LD->getAlignment();
unsigned STAlign = ST->getAlignment();
Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
- unsigned ABIAlign = TLI.getTargetData()->getABITypeAlignment(IntVTTy);
+ unsigned ABIAlign = TLI.getDataLayout()->getABITypeAlignment(IntVTTy);
if (LDAlign < ABIAlign || STAlign < ABIAlign)
return SDValue();
@@ -7225,6 +7486,433 @@ SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
return SDValue();
}
+/// Returns the base pointer and an integer offset from that object.
+static std::pair<SDValue, int64_t> GetPointerBaseAndOffset(SDValue Ptr) {
+ if (Ptr->getOpcode() == ISD::ADD && isa<ConstantSDNode>(Ptr->getOperand(1))) {
+ int64_t Offset = cast<ConstantSDNode>(Ptr->getOperand(1))->getSExtValue();
+ SDValue Base = Ptr->getOperand(0);
+ return std::make_pair(Base, Offset);
+ }
+
+ return std::make_pair(Ptr, 0);
+}
+
+/// Holds a pointer to an LSBaseSDNode as well as information on where it
+/// is located in a sequence of memory operations connected by a chain.
+struct MemOpLink {
+ MemOpLink (LSBaseSDNode *N, int64_t Offset, unsigned Seq):
+ MemNode(N), OffsetFromBase(Offset), SequenceNum(Seq) { }
+ // Ptr to the mem node.
+ LSBaseSDNode *MemNode;
+ // Offset from the base ptr.
+ int64_t OffsetFromBase;
+ // What is the sequence number of this mem node.
+ // Lowest mem operand in the DAG starts at zero.
+ unsigned SequenceNum;
+};
+
+/// Sorts store nodes in a link according to their offset from a shared
+// base ptr.
+struct ConsecutiveMemoryChainSorter {
+ bool operator()(MemOpLink LHS, MemOpLink RHS) {
+ return LHS.OffsetFromBase < RHS.OffsetFromBase;
+ }
+};
+
+bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
+ EVT MemVT = St->getMemoryVT();
+ int64_t ElementSizeBytes = MemVT.getSizeInBits()/8;
+
+ // Don't merge vectors into wider inputs.
+ if (MemVT.isVector() || !MemVT.isSimple())
+ return false;
+
+ // Perform an early exit check. Do not bother looking at stored values that
+ // are not constants or loads.
+ SDValue StoredVal = St->getValue();
+ bool IsLoadSrc = isa<LoadSDNode>(StoredVal);
+ if (!isa<ConstantSDNode>(StoredVal) && !isa<ConstantFPSDNode>(StoredVal) &&
+ !IsLoadSrc)
+ return false;
+
+ // Only look at ends of store sequences.
+ SDValue Chain = SDValue(St, 1);
+ if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE)
+ return false;
+
+ // This holds the base pointer and the offset in bytes from the base pointer.
+ std::pair<SDValue, int64_t> BasePtr =
+ GetPointerBaseAndOffset(St->getBasePtr());
+
+ // We must have a base and an offset.
+ if (!BasePtr.first.getNode())
+ return false;
+
+ // Do not handle stores to undef base pointers.
+ if (BasePtr.first.getOpcode() == ISD::UNDEF)
+ return false;
+
+ SmallVector<MemOpLink, 8> StoreNodes;
+ // Walk up the chain and look for nodes with offsets from the same
+ // base pointer. Stop when reaching an instruction with a different kind
+ // or instruction which has a different base pointer.
+ unsigned Seq = 0;
+ StoreSDNode *Index = St;
+ while (Index) {
+ // If the chain has more than one use, then we can't reorder the mem ops.
+ if (Index != St && !SDValue(Index, 1)->hasOneUse())
+ break;
+
+ // Find the base pointer and offset for this memory node.
+ std::pair<SDValue, int64_t> Ptr =
+ GetPointerBaseAndOffset(Index->getBasePtr());
+
+ // Check that the base pointer is the same as the original one.
+ if (Ptr.first.getNode() != BasePtr.first.getNode())
+ break;
+
+ // Check that the alignment is the same.
+ if (Index->getAlignment() != St->getAlignment())
+ break;
+
+ // The memory operands must not be volatile.
+ if (Index->isVolatile() || Index->isIndexed())
+ break;
+
+ // No truncation.
+ if (StoreSDNode *St = dyn_cast<StoreSDNode>(Index))
+ if (St->isTruncatingStore())
+ break;
+
+ // The stored memory type must be the same.
+ if (Index->getMemoryVT() != MemVT)
+ break;
+
+ // We do not allow unaligned stores because we want to prevent overriding
+ // stores.
+ if (Index->getAlignment()*8 != MemVT.getSizeInBits())
+ break;
+
+ // We found a potential memory operand to merge.
+ StoreNodes.push_back(MemOpLink(Index, Ptr.second, Seq++));
+
+ // Move up the chain to the next memory operation.
+ Index = dyn_cast<StoreSDNode>(Index->getChain().getNode());
+ }
+
+ // Check if there is anything to merge.
+ if (StoreNodes.size() < 2)
+ return false;
+
+ // Sort the memory operands according to their distance from the base pointer.
+ std::sort(StoreNodes.begin(), StoreNodes.end(),
+ ConsecutiveMemoryChainSorter());
+
+ // Scan the memory operations on the chain and find the first non-consecutive
+ // store memory address.
+ unsigned LastConsecutiveStore = 0;
+ int64_t StartAddress = StoreNodes[0].OffsetFromBase;
+ for (unsigned i=1; i<StoreNodes.size(); ++i) {
+ int64_t CurrAddress = StoreNodes[i].OffsetFromBase;
+ if (CurrAddress - StartAddress != (ElementSizeBytes * i))
+ break;
+
+ // Mark this node as useful.
+ LastConsecutiveStore = i;
+ }
+
+ // The node with the lowest store address.
+ LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
+
+ // Store the constants into memory as one consecutive store.
+ if (!IsLoadSrc) {
+ unsigned LastLegalType = 0;
+ unsigned LastLegalVectorType = 0;
+ bool NonZero = false;
+ for (unsigned i=0; i<LastConsecutiveStore+1; ++i) {
+ StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
+ SDValue StoredVal = St->getValue();
+
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(StoredVal)) {
+ NonZero |= !C->isNullValue();
+ } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(StoredVal)) {
+ NonZero |= !C->getConstantFPValue()->isNullValue();
+ } else {
+ // Non constant.
+ break;
+ }
+
+ // Find a legal type for the constant store.
+ unsigned StoreBW = (i+1) * ElementSizeBytes * 8;
+ EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
+ if (TLI.isTypeLegal(StoreTy))
+ LastLegalType = i+1;
+
+ // Find a legal type for the vector store.
+ EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1);
+ if (TLI.isTypeLegal(Ty))
+ LastLegalVectorType = i + 1;
+ }
+
+ // We only use vectors if the constant is known to be zero.
+ if (NonZero)
+ LastLegalVectorType = 0;
+
+ // Check if we found a legal integer type to store.
+ if (LastLegalType == 0 && LastLegalVectorType == 0)
+ return false;
+
+ bool UseVector = LastLegalVectorType > LastLegalType;
+ unsigned NumElem = UseVector ? LastLegalVectorType : LastLegalType;
+
+ // Make sure we have something to merge.
+ if (NumElem < 2)
+ return false;
+
+ unsigned EarliestNodeUsed = 0;
+ for (unsigned i=0; i < NumElem; ++i) {
+ // Find a chain for the new wide-store operand. Notice that some
+ // of the store nodes that we found may not be selected for inclusion
+ // in the wide store. The chain we use needs to be the chain of the
+ // earliest store node which is *used* and replaced by the wide store.
+ if (StoreNodes[i].SequenceNum > StoreNodes[EarliestNodeUsed].SequenceNum)
+ EarliestNodeUsed = i;
+ }
+
+ // The earliest Node in the DAG.
+ LSBaseSDNode *EarliestOp = StoreNodes[EarliestNodeUsed].MemNode;
+ DebugLoc DL = StoreNodes[0].MemNode->getDebugLoc();
+
+ SDValue StoredVal;
+ if (UseVector) {
+ // Find a legal type for the vector store.
+ EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem);
+ assert(TLI.isTypeLegal(Ty) && "Illegal vector store");
+ StoredVal = DAG.getConstant(0, Ty);
+ } else {
+ unsigned StoreBW = NumElem * ElementSizeBytes * 8;
+ APInt StoreInt(StoreBW, 0);
+
+ // Construct a single integer constant which is made of the smaller
+ // constant inputs.
+ bool IsLE = TLI.isLittleEndian();
+ for (unsigned i = 0; i < NumElem ; ++i) {
+ unsigned Idx = IsLE ?(NumElem - 1 - i) : i;
+ StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode);
+ SDValue Val = St->getValue();
+ StoreInt<<=ElementSizeBytes*8;
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) {
+ StoreInt|=C->getAPIntValue().zext(StoreBW);
+ } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val)) {
+ StoreInt|= C->getValueAPF().bitcastToAPInt().zext(StoreBW);
+ } else {
+ assert(false && "Invalid constant element type");
+ }
+ }
+
+ // Create the new Load and Store operations.
+ EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
+ StoredVal = DAG.getConstant(StoreInt, StoreTy);
+ }
+
+ SDValue NewStore = DAG.getStore(EarliestOp->getChain(), DL, StoredVal,
+ FirstInChain->getBasePtr(),
+ FirstInChain->getPointerInfo(),
+ false, false,
+ FirstInChain->getAlignment());
+
+ // Replace the first store with the new store
+ CombineTo(EarliestOp, NewStore);
+ // Erase all other stores.
+ for (unsigned i = 0; i < NumElem ; ++i) {
+ if (StoreNodes[i].MemNode == EarliestOp)
+ continue;
+ StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
+ // ReplaceAllUsesWith will replace all uses that existed when it was
+ // called, but graph optimizations may cause new ones to appear. For
+ // example, the case in pr14333 looks like
+ //
+ // St's chain -> St -> another store -> X
+ //
+ // And the only difference from St to the other store is the chain.
+ // When we change it's chain to be St's chain they become identical,
+ // get CSEed and the net result is that X is now a use of St.
+ // Since we know that St is redundant, just iterate.
+ while (!St->use_empty())
+ DAG.ReplaceAllUsesWith(SDValue(St, 0), St->getChain());
+ removeFromWorkList(St);
+ DAG.DeleteNode(St);
+ }
+
+ return true;
+ }
+
+ // Below we handle the case of multiple consecutive stores that
+ // come from multiple consecutive loads. We merge them into a single
+ // wide load and a single wide store.
+
+ // Look for load nodes which are used by the stored values.
+ SmallVector<MemOpLink, 8> LoadNodes;
+
+ // Find acceptable loads. Loads need to have the same chain (token factor),
+ // must not be zext, volatile, indexed, and they must be consecutive.
+ SDValue LdBasePtr;
+ for (unsigned i=0; i<LastConsecutiveStore+1; ++i) {
+ StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
+ LoadSDNode *Ld = dyn_cast<LoadSDNode>(St->getValue());
+ if (!Ld) break;
+
+ // Loads must only have one use.
+ if (!Ld->hasNUsesOfValue(1, 0))
+ break;
+
+ // Check that the alignment is the same as the stores.
+ if (Ld->getAlignment() != St->getAlignment())
+ break;
+
+ // The memory operands must not be volatile.
+ if (Ld->isVolatile() || Ld->isIndexed())
+ break;
+
+ // We do not accept ext loads.
+ if (Ld->getExtensionType() != ISD::NON_EXTLOAD)
+ break;
+
+ // The stored memory type must be the same.
+ if (Ld->getMemoryVT() != MemVT)
+ break;
+
+ std::pair<SDValue, int64_t> LdPtr =
+ GetPointerBaseAndOffset(Ld->getBasePtr());
+
+ // If this is not the first ptr that we check.
+ if (LdBasePtr.getNode()) {
+ // The base ptr must be the same.
+ if (LdPtr.first != LdBasePtr)
+ break;
+ } else {
+ // Check that all other base pointers are the same as this one.
+ LdBasePtr = LdPtr.first;
+ }
+
+ // We found a potential memory operand to merge.
+ LoadNodes.push_back(MemOpLink(Ld, LdPtr.second, 0));
+ }
+
+ if (LoadNodes.size() < 2)
+ return false;
+
+ // Scan the memory operations on the chain and find the first non-consecutive
+ // load memory address. These variables hold the index in the store node
+ // array.
+ unsigned LastConsecutiveLoad = 0;
+ // This variable refers to the size and not index in the array.
+ unsigned LastLegalVectorType = 0;
+ unsigned LastLegalIntegerType = 0;
+ StartAddress = LoadNodes[0].OffsetFromBase;
+ SDValue FirstChain = LoadNodes[0].MemNode->getChain();
+ for (unsigned i = 1; i < LoadNodes.size(); ++i) {
+ // All loads much share the same chain.
+ if (LoadNodes[i].MemNode->getChain() != FirstChain)
+ break;
+
+ int64_t CurrAddress = LoadNodes[i].OffsetFromBase;
+ if (CurrAddress - StartAddress != (ElementSizeBytes * i))
+ break;
+ LastConsecutiveLoad = i;
+
+ // Find a legal type for the vector store.
+ EVT StoreTy = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1);
+ if (TLI.isTypeLegal(StoreTy))
+ LastLegalVectorType = i + 1;
+
+ // Find a legal type for the integer store.
+ unsigned StoreBW = (i+1) * ElementSizeBytes * 8;
+ StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
+ if (TLI.isTypeLegal(StoreTy))
+ LastLegalIntegerType = i + 1;
+ }
+
+ // Only use vector types if the vector type is larger than the integer type.
+ // If they are the same, use integers.
+ bool UseVectorTy = LastLegalVectorType > LastLegalIntegerType;
+ unsigned LastLegalType = std::max(LastLegalVectorType, LastLegalIntegerType);
+
+ // We add +1 here because the LastXXX variables refer to location while
+ // the NumElem refers to array/index size.
+ unsigned NumElem = std::min(LastConsecutiveStore, LastConsecutiveLoad) + 1;
+ NumElem = std::min(LastLegalType, NumElem);
+
+ if (NumElem < 2)
+ return false;
+
+ // The earliest Node in the DAG.
+ unsigned EarliestNodeUsed = 0;
+ LSBaseSDNode *EarliestOp = StoreNodes[EarliestNodeUsed].MemNode;
+ for (unsigned i=1; i<NumElem; ++i) {
+ // Find a chain for the new wide-store operand. Notice that some
+ // of the store nodes that we found may not be selected for inclusion
+ // in the wide store. The chain we use needs to be the chain of the
+ // earliest store node which is *used* and replaced by the wide store.
+ if (StoreNodes[i].SequenceNum > StoreNodes[EarliestNodeUsed].SequenceNum)
+ EarliestNodeUsed = i;
+ }
+
+ // Find if it is better to use vectors or integers to load and store
+ // to memory.
+ EVT JointMemOpVT;
+ if (UseVectorTy) {
+ JointMemOpVT = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem);
+ } else {
+ unsigned StoreBW = NumElem * ElementSizeBytes * 8;
+ JointMemOpVT = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
+ }
+
+ DebugLoc LoadDL = LoadNodes[0].MemNode->getDebugLoc();
+ DebugLoc StoreDL = StoreNodes[0].MemNode->getDebugLoc();
+
+ LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode);
+ SDValue NewLoad = DAG.getLoad(JointMemOpVT, LoadDL,
+ FirstLoad->getChain(),
+ FirstLoad->getBasePtr(),
+ FirstLoad->getPointerInfo(),
+ false, false, false,
+ FirstLoad->getAlignment());
+
+ SDValue NewStore = DAG.getStore(EarliestOp->getChain(), StoreDL, NewLoad,
+ FirstInChain->getBasePtr(),
+ FirstInChain->getPointerInfo(), false, false,
+ FirstInChain->getAlignment());
+
+ // Replace one of the loads with the new load.
+ LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[0].MemNode);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1),
+ SDValue(NewLoad.getNode(), 1));
+
+ // Remove the rest of the load chains.
+ for (unsigned i = 1; i < NumElem ; ++i) {
+ // Replace all chain users of the old load nodes with the chain of the new
+ // load node.
+ LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[i].MemNode);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Ld->getChain());
+ }
+
+ // Replace the first store with the new store.
+ CombineTo(EarliestOp, NewStore);
+ // Erase all other stores.
+ for (unsigned i = 0; i < NumElem ; ++i) {
+ // Remove all Store nodes.
+ if (StoreNodes[i].MemNode == EarliestOp)
+ continue;
+ StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(St, 0), St->getChain());
+ removeFromWorkList(St);
+ DAG.DeleteNode(St);
+ }
+
+ return true;
+}
+
SDValue DAGCombiner::visitSTORE(SDNode *N) {
StoreSDNode *ST = cast<StoreSDNode>(N);
SDValue Chain = ST->getChain();
@@ -7237,7 +7925,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
ST->isUnindexed()) {
unsigned OrigAlign = ST->getAlignment();
EVT SVT = Value.getOperand(0).getValueType();
- unsigned Align = TLI.getTargetData()->
+ unsigned Align = TLI.getDataLayout()->
getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext()));
if (Align <= OrigAlign &&
((!LegalOperations && !ST->isVolatile()) ||
@@ -7426,6 +8114,11 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
ST->getAlignment());
}
+ // Only perform this optimization before the types are legal, because we
+ // don't want to perform this optimization on every DAGCombine invocation.
+ if (!LegalTypes && MergeConsecutiveStores(ST))
+ return SDValue(N, 0);
+
return ReduceLoadOpStoreWidth(N);
}
@@ -7504,9 +8197,9 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
// Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT.
// We only perform this optimization before the op legalization phase because
- // we may introduce new vector instructions which are not backed by TD patterns.
- // For example on AVX, extracting elements from a wide vector without using
- // extract_subvector.
+ // we may introduce new vector instructions which are not backed by TD
+ // patterns. For example on AVX, extracting elements from a wide vector
+ // without using extract_subvector.
if (InVec.getOpcode() == ISD::VECTOR_SHUFFLE
&& ConstEltNo && !LegalOperations) {
int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
@@ -7625,7 +8318,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
// Check the resultant load doesn't need a higher alignment than the
// original load.
unsigned NewAlign =
- TLI.getTargetData()
+ TLI.getDataLayout()
->getABITypeAlignment(LVT.getTypeForEVT(*DAG.getContext()));
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, LVT))
@@ -7690,15 +8383,21 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
return SDValue();
}
-SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
+// Simplify (build_vec (ext )) to (bitcast (build_vec ))
+SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
+ // We perform this optimization post type-legalization because
+ // the type-legalizer often scalarizes integer-promoted vectors.
+ // Performing this optimization before may create bit-casts which
+ // will be type-legalized to complex code sequences.
+ // We perform this optimization only before the operation legalizer because we
+ // may introduce illegal operations.
+ if (Level != AfterLegalizeVectorOps && Level != AfterLegalizeTypes)
+ return SDValue();
+
unsigned NumInScalars = N->getNumOperands();
DebugLoc dl = N->getDebugLoc();
EVT VT = N->getValueType(0);
- // A vector built entirely of undefs is undef.
- if (ISD::allOperandsUndef(N))
- return DAG.getUNDEF(VT);
-
// Check to see if this is a BUILD_VECTOR of a bunch of values
// which come from any_extend or zero_extend nodes. If so, we can create
// a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR
@@ -7741,64 +8440,141 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
// In order to have valid types, all of the inputs must be extended from the
// same source type and all of the inputs must be any or zero extend.
// Scalar sizes must be a power of two.
- EVT OutScalarTy = N->getValueType(0).getScalarType();
+ EVT OutScalarTy = VT.getScalarType();
bool ValidTypes = SourceType != MVT::Other &&
isPowerOf2_32(OutScalarTy.getSizeInBits()) &&
isPowerOf2_32(SourceType.getSizeInBits());
- // We perform this optimization post type-legalization because
- // the type-legalizer often scalarizes integer-promoted vectors.
- // Performing this optimization before may create bit-casts which
- // will be type-legalized to complex code sequences.
- // We perform this optimization only before the operation legalizer because we
- // may introduce illegal operations.
// Create a new simpler BUILD_VECTOR sequence which other optimizations can
// turn into a single shuffle instruction.
- if ((Level == AfterLegalizeVectorOps || Level == AfterLegalizeTypes) &&
- ValidTypes) {
- bool isLE = TLI.isLittleEndian();
- unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits();
- assert(ElemRatio > 1 && "Invalid element size ratio");
- SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType):
- DAG.getConstant(0, SourceType);
-
- unsigned NewBVElems = ElemRatio * N->getValueType(0).getVectorNumElements();
- SmallVector<SDValue, 8> Ops(NewBVElems, Filler);
-
- // Populate the new build_vector
- for (unsigned i=0; i < N->getNumOperands(); ++i) {
- SDValue Cast = N->getOperand(i);
- assert((Cast.getOpcode() == ISD::ANY_EXTEND ||
- Cast.getOpcode() == ISD::ZERO_EXTEND ||
- Cast.getOpcode() == ISD::UNDEF) && "Invalid cast opcode");
- SDValue In;
- if (Cast.getOpcode() == ISD::UNDEF)
- In = DAG.getUNDEF(SourceType);
- else
- In = Cast->getOperand(0);
- unsigned Index = isLE ? (i * ElemRatio) :
- (i * ElemRatio + (ElemRatio - 1));
+ if (!ValidTypes)
+ return SDValue();
+
+ bool isLE = TLI.isLittleEndian();
+ unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits();
+ assert(ElemRatio > 1 && "Invalid element size ratio");
+ SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType):
+ DAG.getConstant(0, SourceType);
+
+ unsigned NewBVElems = ElemRatio * VT.getVectorNumElements();
+ SmallVector<SDValue, 8> Ops(NewBVElems, Filler);
+
+ // Populate the new build_vector
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
+ SDValue Cast = N->getOperand(i);
+ assert((Cast.getOpcode() == ISD::ANY_EXTEND ||
+ Cast.getOpcode() == ISD::ZERO_EXTEND ||
+ Cast.getOpcode() == ISD::UNDEF) && "Invalid cast opcode");
+ SDValue In;
+ if (Cast.getOpcode() == ISD::UNDEF)
+ In = DAG.getUNDEF(SourceType);
+ else
+ In = Cast->getOperand(0);
+ unsigned Index = isLE ? (i * ElemRatio) :
+ (i * ElemRatio + (ElemRatio - 1));
+
+ assert(Index < Ops.size() && "Invalid index");
+ Ops[Index] = In;
+ }
+
+ // The type of the new BUILD_VECTOR node.
+ EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems);
+ assert(VecVT.getSizeInBits() == VT.getSizeInBits() &&
+ "Invalid vector size");
+ // Check if the new vector type is legal.
+ if (!isTypeLegal(VecVT)) return SDValue();
+
+ // Make the new BUILD_VECTOR.
+ SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], Ops.size());
+
+ // The new BUILD_VECTOR node has the potential to be further optimized.
+ AddToWorkList(BV.getNode());
+ // Bitcast to the desired type.
+ return DAG.getNode(ISD::BITCAST, dl, VT, BV);
+}
+
+SDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) {
+ EVT VT = N->getValueType(0);
+
+ unsigned NumInScalars = N->getNumOperands();
+ DebugLoc dl = N->getDebugLoc();
+
+ EVT SrcVT = MVT::Other;
+ unsigned Opcode = ISD::DELETED_NODE;
+ unsigned NumDefs = 0;
- assert(Index < Ops.size() && "Invalid index");
- Ops[Index] = In;
+ for (unsigned i = 0; i != NumInScalars; ++i) {
+ SDValue In = N->getOperand(i);
+ unsigned Opc = In.getOpcode();
+
+ if (Opc == ISD::UNDEF)
+ continue;
+
+ // If all scalar values are floats and converted from integers.
+ if (Opcode == ISD::DELETED_NODE &&
+ (Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP)) {
+ Opcode = Opc;
+ // If not supported by target, bail out.
+ if (TLI.getOperationAction(Opcode, VT) != TargetLowering::Legal &&
+ TLI.getOperationAction(Opcode, VT) != TargetLowering::Custom)
+ return SDValue();
}
+ if (Opc != Opcode)
+ return SDValue();
- // The type of the new BUILD_VECTOR node.
- EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems);
- assert(VecVT.getSizeInBits() == N->getValueType(0).getSizeInBits() &&
- "Invalid vector size");
- // Check if the new vector type is legal.
- if (!isTypeLegal(VecVT)) return SDValue();
+ EVT InVT = In.getOperand(0).getValueType();
- // Make the new BUILD_VECTOR.
- SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
- VecVT, &Ops[0], Ops.size());
+ // If all scalar values are typed differently, bail out. It's chosen to
+ // simplify BUILD_VECTOR of integer types.
+ if (SrcVT == MVT::Other)
+ SrcVT = InVT;
+ if (SrcVT != InVT)
+ return SDValue();
+ NumDefs++;
+ }
+
+ // If the vector has just one element defined, it's not worth to fold it into
+ // a vectorized one.
+ if (NumDefs < 2)
+ return SDValue();
- // The new BUILD_VECTOR node has the potential to be further optimized.
- AddToWorkList(BV.getNode());
- // Bitcast to the desired type.
- return DAG.getNode(ISD::BITCAST, dl, N->getValueType(0), BV);
+ assert((Opcode == ISD::UINT_TO_FP || Opcode == ISD::SINT_TO_FP)
+ && "Should only handle conversion from integer to float.");
+ assert(SrcVT != MVT::Other && "Cannot determine source type!");
+
+ EVT NVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumInScalars);
+ SmallVector<SDValue, 8> Opnds;
+ for (unsigned i = 0; i != NumInScalars; ++i) {
+ SDValue In = N->getOperand(i);
+
+ if (In.getOpcode() == ISD::UNDEF)
+ Opnds.push_back(DAG.getUNDEF(SrcVT));
+ else
+ Opnds.push_back(In.getOperand(0));
}
+ SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT,
+ &Opnds[0], Opnds.size());
+ AddToWorkList(BV.getNode());
+
+ return DAG.getNode(Opcode, dl, VT, BV);
+}
+
+SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
+ unsigned NumInScalars = N->getNumOperands();
+ DebugLoc dl = N->getDebugLoc();
+ EVT VT = N->getValueType(0);
+
+ // A vector built entirely of undefs is undef.
+ if (ISD::allOperandsUndef(N))
+ return DAG.getUNDEF(VT);
+
+ SDValue V = reduceBuildVecExtToExtBuildVec(N);
+ if (V.getNode())
+ return V;
+
+ V = reduceBuildVecConvertToConvertBuildVec(N);
+ if (V.getNode())
+ return V;
// Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
// operations. If so, and if the EXTRACT_VECTOR_ELT vector inputs come from
@@ -7876,15 +8652,22 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
if (VecIn1.getValueType().getSizeInBits()*2 != VT.getSizeInBits())
return SDValue();
+ // If the input vector type has a different base type to the output
+ // vector type, bail out.
+ if (VecIn1.getValueType().getVectorElementType() !=
+ VT.getVectorElementType())
+ return SDValue();
+
// Widen the input vector by adding undef values.
- VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT,
+ VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
VecIn1, DAG.getUNDEF(VecIn1.getValueType()));
}
// If VecIn2 is unused then change it to undef.
VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
- // Check that we were able to transform all incoming values to the same type.
+ // Check that we were able to transform all incoming values to the same
+ // type.
if (VecIn2.getValueType() != VecIn1.getValueType() ||
VecIn1.getValueType() != VT)
return SDValue();
@@ -7897,7 +8680,7 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
SDValue Ops[2];
Ops[0] = VecIn1;
Ops[1] = VecIn2;
- return DAG.getVectorShuffle(VT, N->getDebugLoc(), Ops[0], Ops[1], &Mask[0]);
+ return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], &Mask[0]);
}
return SDValue();
@@ -7933,8 +8716,8 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) {
return SDValue();
// Only handle cases where both indexes are constants with the same type.
- ConstantSDNode *InsIdx = dyn_cast<ConstantSDNode>(N->getOperand(1));
- ConstantSDNode *ExtIdx = dyn_cast<ConstantSDNode>(V->getOperand(2));
+ ConstantSDNode *ExtIdx = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ ConstantSDNode *InsIdx = dyn_cast<ConstantSDNode>(V->getOperand(2));
if (InsIdx && ExtIdx &&
InsIdx->getValueType(0).getSizeInBits() <= 64 &&
@@ -7951,6 +8734,21 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) {
}
}
+ if (V->getOpcode() == ISD::CONCAT_VECTORS) {
+ // Combine:
+ // (extract_subvec (concat V1, V2, ...), i)
+ // Into:
+ // Vi if possible
+ // Only operand 0 is checked as 'concat' assumes all inputs of the same type.
+ if (V->getOperand(0).getValueType() != NVT)
+ return SDValue();
+ unsigned Idx = dyn_cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned NumElems = NVT.getVectorNumElements();
+ assert((Idx % NumElems) == 0 &&
+ "IDX in concat is not a multiple of the result vector length.");
+ return V->getOperand(Idx / NumElems);
+ }
+
return SDValue();
}
@@ -8266,6 +9064,44 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
return SDValue();
}
+/// SimplifyVUnaryOp - Visit a binary vector operation, like FABS/FNEG.
+SDValue DAGCombiner::SimplifyVUnaryOp(SDNode *N) {
+ // After legalize, the target may be depending on adds and other
+ // binary ops to provide legal ways to construct constants or other
+ // things. Simplifying them may result in a loss of legality.
+ if (LegalOperations) return SDValue();
+
+ assert(N->getValueType(0).isVector() &&
+ "SimplifyVUnaryOp only works on vectors!");
+
+ SDValue N0 = N->getOperand(0);
+
+ if (N0.getOpcode() != ISD::BUILD_VECTOR)
+ return SDValue();
+
+ // Operand is a BUILD_VECTOR node, see if we can constant fold it.
+ SmallVector<SDValue, 8> Ops;
+ for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) {
+ SDValue Op = N0.getOperand(i);
+ if (Op.getOpcode() != ISD::UNDEF &&
+ Op.getOpcode() != ISD::ConstantFP)
+ break;
+ EVT EltVT = Op.getValueType();
+ SDValue FoldOp = DAG.getNode(N->getOpcode(), N0.getDebugLoc(), EltVT, Op);
+ if (FoldOp.getOpcode() != ISD::UNDEF &&
+ FoldOp.getOpcode() != ISD::ConstantFP)
+ break;
+ Ops.push_back(FoldOp);
+ AddToWorkList(FoldOp.getNode());
+ }
+
+ if (Ops.size() != N0.getNumOperands())
+ return SDValue();
+
+ return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
+ N0.getValueType(), &Ops[0], Ops.size());
+}
+
SDValue DAGCombiner::SimplifySelect(DebugLoc DL, SDValue N0,
SDValue N1, SDValue N2){
assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!");
@@ -8349,6 +9185,10 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
if ((LLD->hasAnyUseOfValue(1) && LLD->isPredecessorOf(CondNode)) ||
(RLD->hasAnyUseOfValue(1) && RLD->isPredecessorOf(CondNode)))
return false;
+ // The loads must not depend on one another.
+ if (LLD->isPredecessorOf(RLD) ||
+ RLD->isPredecessorOf(LLD))
+ return false;
Addr = DAG.getNode(ISD::SELECT, TheSelect->getDebugLoc(),
LLD->getBasePtr().getValueType(),
TheSelect->getOperand(0), LLD->getBasePtr(),
@@ -8468,7 +9308,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
const_cast<ConstantFP*>(TV->getConstantFPValue())
};
Type *FPTy = Elts[0]->getType();
- const TargetData &TD = *TLI.getTargetData();
+ const DataLayout &TD = *TLI.getDataLayout();
// Create a ConstantArray of the two constants.
Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts);
@@ -8583,34 +9423,38 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
return SDValue();
// Get a SetCC of the condition
- // FIXME: Should probably make sure that setcc is legal if we ever have a
- // target where it isn't.
- SDValue Temp, SCC;
- // cast from setcc result type to select result type
- if (LegalTypes) {
- SCC = DAG.getSetCC(DL, TLI.getSetCCResultType(N0.getValueType()),
- N0, N1, CC);
- if (N2.getValueType().bitsLT(SCC.getValueType()))
- Temp = DAG.getZeroExtendInReg(SCC, N2.getDebugLoc(), N2.getValueType());
- else
+ // NOTE: Don't create a SETCC if it's not legal on this target.
+ if (!LegalOperations ||
+ TLI.isOperationLegal(ISD::SETCC,
+ LegalTypes ? TLI.getSetCCResultType(N0.getValueType()) : MVT::i1)) {
+ SDValue Temp, SCC;
+ // cast from setcc result type to select result type
+ if (LegalTypes) {
+ SCC = DAG.getSetCC(DL, TLI.getSetCCResultType(N0.getValueType()),
+ N0, N1, CC);
+ if (N2.getValueType().bitsLT(SCC.getValueType()))
+ Temp = DAG.getZeroExtendInReg(SCC, N2.getDebugLoc(),
+ N2.getValueType());
+ else
+ Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(),
+ N2.getValueType(), SCC);
+ } else {
+ SCC = DAG.getSetCC(N0.getDebugLoc(), MVT::i1, N0, N1, CC);
Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(),
N2.getValueType(), SCC);
- } else {
- SCC = DAG.getSetCC(N0.getDebugLoc(), MVT::i1, N0, N1, CC);
- Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(),
- N2.getValueType(), SCC);
- }
+ }
- AddToWorkList(SCC.getNode());
- AddToWorkList(Temp.getNode());
+ AddToWorkList(SCC.getNode());
+ AddToWorkList(Temp.getNode());
- if (N2C->getAPIntValue() == 1)
- return Temp;
+ if (N2C->getAPIntValue() == 1)
+ return Temp;
- // shl setcc result by log2 n2c
- return DAG.getNode(ISD::SHL, DL, N2.getValueType(), Temp,
- DAG.getConstant(N2C->getAPIntValue().logBase2(),
- getShiftAmountTy(Temp.getValueType())));
+ // shl setcc result by log2 n2c
+ return DAG.getNode(ISD::SHL, DL, N2.getValueType(), Temp,
+ DAG.getConstant(N2C->getAPIntValue().logBase2(),
+ getShiftAmountTy(Temp.getValueType())));
+ }
}
// Check to see if this is the equivalent of setcc
@@ -8729,7 +9573,7 @@ SDValue DAGCombiner::BuildUDIV(SDNode *N) {
// to alias with anything but itself. Provides base object and offset as
// results.
static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
- const GlobalValue *&GV, void *&CV) {
+ const GlobalValue *&GV, const void *&CV) {
// Assume it is a primitive operation.
Base = Ptr; Offset = 0; GV = 0; CV = 0;
@@ -8754,8 +9598,8 @@ static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
// for ConstantSDNodes since the same constant pool entry may be represented
// by multiple nodes with different offsets.
if (ConstantPoolSDNode *C = dyn_cast<ConstantPoolSDNode>(Base)) {
- CV = C->isMachineConstantPoolEntry() ? (void *)C->getMachineCPVal()
- : (void *)C->getConstVal();
+ CV = C->isMachineConstantPoolEntry() ? (const void *)C->getMachineCPVal()
+ : (const void *)C->getConstVal();
Offset += C->getOffset();
return false;
}
@@ -8780,7 +9624,7 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
SDValue Base1, Base2;
int64_t Offset1, Offset2;
const GlobalValue *GV1, *GV2;
- void *CV1, *CV2;
+ const void *CV1, *CV2;
bool isFrameIndex1 = FindBaseOffset(Ptr1, Base1, Offset1, GV1, CV1);
bool isFrameIndex2 = FindBaseOffset(Ptr2, Base2, Offset2, GV2, CV2);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index 683fac6..4854cf7 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -53,7 +53,7 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Analysis/Loads.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetLowering.h"
@@ -1059,7 +1059,7 @@ FastISel::FastISel(FunctionLoweringInfo &funcInfo,
MFI(*FuncInfo.MF->getFrameInfo()),
MCP(*FuncInfo.MF->getConstantPool()),
TM(FuncInfo.MF->getTarget()),
- TD(*TM.getTargetData()),
+ TD(*TM.getDataLayout()),
TII(*TM.getInstrInfo()),
TLI(*TM.getTargetLowering()),
TRI(*TM.getRegisterInfo()),
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 3e18ea7..a418290 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -29,7 +29,7 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
@@ -80,9 +80,9 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
if (const AllocaInst *AI = dyn_cast<AllocaInst>(I))
if (const ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
Type *Ty = AI->getAllocatedType();
- uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
+ uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
unsigned Align =
- std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
+ std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty),
AI->getAlignment());
TySize *= CUI->getZExtValue(); // Get total allocated size.
@@ -97,7 +97,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
cast<ArrayType>(Ty)->getElementType()->isIntegerTy(8)));
StaticAllocaMap[AI] =
MF->getFrameInfo()->CreateStackObject(TySize, Align, false,
- MayNeedSP);
+ MayNeedSP, AI);
}
for (; BB != EB; ++BB)
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 4488d27..a8381b2 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -20,7 +20,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
@@ -55,7 +55,8 @@ unsigned InstrEmitter::CountResults(SDNode *Node) {
///
/// Also count physreg RegisterSDNode and RegisterMaskSDNode operands preceding
/// the chain and glue. These operands may be implicit on the machine instr.
-static unsigned countOperands(SDNode *Node, unsigned &NumImpUses) {
+static unsigned countOperands(SDNode *Node, unsigned NumExpUses,
+ unsigned &NumImpUses) {
unsigned N = Node->getNumOperands();
while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
--N;
@@ -63,7 +64,8 @@ static unsigned countOperands(SDNode *Node, unsigned &NumImpUses) {
--N; // Ignore chain if it exists.
// Count RegisterSDNode and RegisterMaskSDNode operands for NumImpUses.
- for (unsigned I = N; I; --I) {
+ NumImpUses = N - NumExpUses;
+ for (unsigned I = N; I > NumExpUses; --I) {
if (isa<RegisterMaskSDNode>(Node->getOperand(I - 1)))
continue;
if (RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Node->getOperand(I - 1)))
@@ -312,8 +314,6 @@ InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
const TargetRegisterClass *DstRC = 0;
if (IIOpNum < II->getNumOperands())
DstRC = TRI->getAllocatableClass(TII->getRegClass(*II,IIOpNum,TRI,*MF));
- assert((DstRC || (MI->isVariadic() && IIOpNum >= MCID.getNumOperands())) &&
- "Don't have operand info for this instruction!");
if (DstRC && !MRI->constrainRegClass(VReg, DstRC, MinRCSize)) {
unsigned NewVReg = MRI->createVirtualRegister(DstRC);
BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
@@ -390,10 +390,10 @@ void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
Type *Type = CP->getType();
// MachineConstantPool wants an explicit alignment.
if (Align == 0) {
- Align = TM->getTargetData()->getPrefTypeAlignment(Type);
+ Align = TM->getDataLayout()->getPrefTypeAlignment(Type);
if (Align == 0) {
// Alignment of vector types. FIXME!
- Align = TM->getTargetData()->getTypeAllocSize(Type);
+ Align = TM->getDataLayout()->getTypeAllocSize(Type);
}
}
@@ -410,6 +410,7 @@ void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
ES->getTargetFlags()));
} else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) {
MI->addOperand(MachineOperand::CreateBA(BA->getBlockAddress(),
+ BA->getOffset(),
BA->getTargetFlags()));
} else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Op)) {
MI->addOperand(MachineOperand::CreateTargetIndex(TI->getIndex(),
@@ -720,7 +721,8 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
const MCInstrDesc &II = TII->get(Opc);
unsigned NumResults = CountResults(Node);
unsigned NumImpUses = 0;
- unsigned NodeOperands = countOperands(Node, NumImpUses);
+ unsigned NodeOperands =
+ countOperands(Node, II.getNumOperands() - II.getNumDefs(), NumImpUses);
bool HasPhysRegOuts = NumResults > II.getNumDefs() && II.getImplicitDefs()!=0;
#ifndef NDEBUG
unsigned NumMIOperands = NodeOperands + NumResults;
@@ -870,6 +872,17 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
break;
}
+ case ISD::LIFETIME_START:
+ case ISD::LIFETIME_END: {
+ unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START) ?
+ TargetOpcode::LIFETIME_START : TargetOpcode::LIFETIME_END;
+
+ FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Node->getOperand(1));
+ BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
+ .addFrameIndex(FI->getIndex());
+ break;
+ }
+
case ISD::INLINEASM: {
unsigned NumOps = Node->getNumOperands();
if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
@@ -884,25 +897,30 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol();
MI->addOperand(MachineOperand::CreateES(AsmStr));
- // Add the HasSideEffect and isAlignStack bits.
+ // Add the HasSideEffect, isAlignStack, AsmDialect, MayLoad and MayStore
+ // bits.
int64_t ExtraInfo =
cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_ExtraInfo))->
getZExtValue();
MI->addOperand(MachineOperand::CreateImm(ExtraInfo));
+ // Remember to operand index of the group flags.
+ SmallVector<unsigned, 8> GroupIdx;
+
// Add all of the operand registers to the instruction.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
unsigned Flags =
cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
- unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
+ const unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
+ GroupIdx.push_back(MI->getNumOperands());
MI->addOperand(MachineOperand::CreateImm(Flags));
++i; // Skip the ID value.
switch (InlineAsm::getKind(Flags)) {
default: llvm_unreachable("Bad flags!");
case InlineAsm::Kind_RegDef:
- for (; NumVals; --NumVals, ++i) {
+ for (unsigned j = 0; j != NumVals; ++j, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
// FIXME: Add dead flags for physical and virtual registers defined.
// For now, mark physical register defs as implicit to help fast
@@ -913,7 +931,7 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
break;
case InlineAsm::Kind_RegDefEarlyClobber:
case InlineAsm::Kind_Clobber:
- for (; NumVals; --NumVals, ++i) {
+ for (unsigned j = 0; j != NumVals; ++j, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
MI->addOperand(MachineOperand::CreateReg(Reg, /*isDef=*/ true,
/*isImp=*/ TargetRegisterInfo::isPhysicalRegister(Reg),
@@ -928,9 +946,20 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
case InlineAsm::Kind_Mem: // Addressing mode.
// The addressing mode has been selected, just add all of the
// operands to the machine instruction.
- for (; NumVals; --NumVals, ++i)
+ for (unsigned j = 0; j != NumVals; ++j, ++i)
AddOperand(MI, Node->getOperand(i), 0, 0, VRBaseMap,
/*IsDebug=*/false, IsClone, IsCloned);
+
+ // Manually set isTied bits.
+ if (InlineAsm::getKind(Flags) == InlineAsm::Kind_RegUse) {
+ unsigned DefGroup = 0;
+ if (InlineAsm::isUseOperandTiedToDef(Flags, DefGroup)) {
+ unsigned DefIdx = GroupIdx[DefGroup] + 1;
+ unsigned UseIdx = GroupIdx.back() + 1;
+ for (unsigned j = 0; j != NumVals; ++j)
+ MI->tieOperands(DefIdx + j, UseIdx + j);
+ }
+ }
break;
}
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 908ebb9..abf40b7 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -22,7 +22,7 @@
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -718,7 +718,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
+ unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
ExpandUnalignedStore(cast<StoreSDNode>(Node),
DAG, TLI, this);
@@ -824,7 +824,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
+ unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
}
@@ -869,25 +869,24 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal:
- // If this is an unaligned load and the target doesn't support it,
- // expand it.
- if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
- Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment =
- TLI.getTargetData()->getABITypeAlignment(Ty);
- if (LD->getAlignment() < ABIAlignment){
- ExpandUnalignedLoad(cast<LoadSDNode>(Node),
- DAG, TLI, RVal, RChain);
- }
- }
- break;
+ // If this is an unaligned load and the target doesn't support it,
+ // expand it.
+ if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
+ Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlignment =
+ TLI.getDataLayout()->getABITypeAlignment(Ty);
+ if (LD->getAlignment() < ABIAlignment){
+ ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, RVal, RChain);
+ }
+ }
+ break;
case TargetLowering::Custom: {
- SDValue Res = TLI.LowerOperation(RVal, DAG);
- if (Res.getNode()) {
- RVal = Res;
- RChain = Res.getValue(1);
- }
- break;
+ SDValue Res = TLI.LowerOperation(RVal, DAG);
+ if (Res.getNode()) {
+ RVal = Res;
+ RChain = Res.getValue(1);
+ }
+ break;
}
case TargetLowering::Promote: {
// Only promote a load of vector type to another.
@@ -1060,7 +1059,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
Type *Ty =
LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment =
- TLI.getTargetData()->getABITypeAlignment(Ty);
+ TLI.getDataLayout()->getABITypeAlignment(Ty);
if (LD->getAlignment() < ABIAlignment){
ExpandUnalignedLoad(cast<LoadSDNode>(Node),
DAG, TLI, Value, Chain);
@@ -1241,6 +1240,19 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
if (Action == TargetLowering::Legal)
Action = TargetLowering::Custom;
break;
+ case ISD::DEBUGTRAP:
+ Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
+ if (Action == TargetLowering::Expand) {
+ // replace ISD::DEBUGTRAP with ISD::TRAP
+ SDValue NewVal;
+ NewVal = DAG.getNode(ISD::TRAP, Node->getDebugLoc(), Node->getVTList(),
+ Node->getOperand(0));
+ ReplaceNode(Node, NewVal.getNode());
+ LegalizeOp(NewVal.getNode());
+ return;
+ }
+ break;
+
default:
if (Node->getOpcode() >= ISD::BUILTIN_OP_END) {
Action = TargetLowering::Legal;
@@ -1588,26 +1600,71 @@ void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT,
break;
case TargetLowering::Expand: {
ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID;
+ ISD::CondCode InvCC = ISD::SETCC_INVALID;
unsigned Opc = 0;
switch (CCCode) {
default: llvm_unreachable("Don't know how to expand this condition!");
- case ISD::SETOEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETO; Opc = ISD::AND; break;
- case ISD::SETOGT: CC1 = ISD::SETGT; CC2 = ISD::SETO; Opc = ISD::AND; break;
- case ISD::SETOGE: CC1 = ISD::SETGE; CC2 = ISD::SETO; Opc = ISD::AND; break;
- case ISD::SETOLT: CC1 = ISD::SETLT; CC2 = ISD::SETO; Opc = ISD::AND; break;
- case ISD::SETOLE: CC1 = ISD::SETLE; CC2 = ISD::SETO; Opc = ISD::AND; break;
- case ISD::SETONE: CC1 = ISD::SETNE; CC2 = ISD::SETO; Opc = ISD::AND; break;
- case ISD::SETUEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETUO; Opc = ISD::OR; break;
- case ISD::SETUGT: CC1 = ISD::SETGT; CC2 = ISD::SETUO; Opc = ISD::OR; break;
- case ISD::SETUGE: CC1 = ISD::SETGE; CC2 = ISD::SETUO; Opc = ISD::OR; break;
- case ISD::SETULT: CC1 = ISD::SETLT; CC2 = ISD::SETUO; Opc = ISD::OR; break;
- case ISD::SETULE: CC1 = ISD::SETLE; CC2 = ISD::SETUO; Opc = ISD::OR; break;
- case ISD::SETUNE: CC1 = ISD::SETNE; CC2 = ISD::SETUO; Opc = ISD::OR; break;
- // FIXME: Implement more expansions.
- }
-
- SDValue SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1);
- SDValue SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2);
+ case ISD::SETO:
+ assert(TLI.getCondCodeAction(ISD::SETOEQ, OpVT)
+ == TargetLowering::Legal
+ && "If SETO is expanded, SETOEQ must be legal!");
+ CC1 = ISD::SETOEQ; CC2 = ISD::SETOEQ; Opc = ISD::AND; break;
+ case ISD::SETUO:
+ assert(TLI.getCondCodeAction(ISD::SETUNE, OpVT)
+ == TargetLowering::Legal
+ && "If SETUO is expanded, SETUNE must be legal!");
+ CC1 = ISD::SETUNE; CC2 = ISD::SETUNE; Opc = ISD::OR; break;
+ case ISD::SETOEQ:
+ case ISD::SETOGT:
+ case ISD::SETOGE:
+ case ISD::SETOLT:
+ case ISD::SETOLE:
+ case ISD::SETONE:
+ case ISD::SETUEQ:
+ case ISD::SETUNE:
+ case ISD::SETUGT:
+ case ISD::SETUGE:
+ case ISD::SETULT:
+ case ISD::SETULE:
+ // If we are floating point, assign and break, otherwise fall through.
+ if (!OpVT.isInteger()) {
+ // We can use the 4th bit to tell if we are the unordered
+ // or ordered version of the opcode.
+ CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO;
+ Opc = ((unsigned)CCCode & 0x8U) ? ISD::OR : ISD::AND;
+ CC1 = (ISD::CondCode)(((int)CCCode & 0x7) | 0x10);
+ break;
+ }
+ // Fallthrough if we are unsigned integer.
+ case ISD::SETLE:
+ case ISD::SETGT:
+ case ISD::SETGE:
+ case ISD::SETLT:
+ case ISD::SETNE:
+ case ISD::SETEQ:
+ InvCC = ISD::getSetCCSwappedOperands(CCCode);
+ if (TLI.getCondCodeAction(InvCC, OpVT) == TargetLowering::Expand) {
+ // We only support using the inverted operation and not a
+ // different manner of supporting expanding these cases.
+ llvm_unreachable("Don't know how to expand this condition!");
+ }
+ LHS = DAG.getSetCC(dl, VT, RHS, LHS, InvCC);
+ RHS = SDValue();
+ CC = SDValue();
+ return;
+ }
+
+ SDValue SetCC1, SetCC2;
+ if (CCCode != ISD::SETO && CCCode != ISD::SETUO) {
+ // If we aren't the ordered or unorder operation,
+ // then the pattern is (LHS CC1 RHS) Opc (LHS CC2 RHS).
+ SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1);
+ SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2);
+ } else {
+ // Otherwise, the pattern is (LHS CC1 LHS) Opc (RHS CC2 RHS)
+ SetCC1 = DAG.getSetCC(dl, VT, LHS, LHS, CC1);
+ SetCC2 = DAG.getSetCC(dl, VT, RHS, RHS, CC2);
+ }
LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2);
RHS = SDValue();
CC = SDValue();
@@ -1626,7 +1683,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
DebugLoc dl) {
// Create the stack frame object.
unsigned SrcAlign =
- TLI.getTargetData()->getPrefTypeAlignment(SrcOp.getValueType().
+ TLI.getDataLayout()->getPrefTypeAlignment(SrcOp.getValueType().
getTypeForEVT(*DAG.getContext()));
SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign);
@@ -1638,7 +1695,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
unsigned SlotSize = SlotVT.getSizeInBits();
unsigned DestSize = DestVT.getSizeInBits();
Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
- unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(DestType);
+ unsigned DestAlign = TLI.getDataLayout()->getPrefTypeAlignment(DestType);
// Emit a store to the stack slot. Use a truncstore if the input value is
// later than DestVT.
@@ -2042,7 +2099,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
SDValue Op0,
EVT DestVT,
DebugLoc dl) {
- if (Op0.getValueType() == MVT::i32) {
+ if (Op0.getValueType() == MVT::i32 && TLI.isTypeLegal(MVT::f64)) {
// simple 32-bit [signed|unsigned] integer to float/double expansion
// Get the stack frame index of a 8 byte buffer.
@@ -2787,7 +2844,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// Increment the pointer, VAList, to the next vaarg
Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList,
- DAG.getConstant(TLI.getTargetData()->
+ DAG.getConstant(TLI.getDataLayout()->
getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())),
TLI.getPointerTy()));
// Store the incremented VAList to the legalized pointer
@@ -3109,6 +3166,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Tmp3 = Node->getOperand(1);
if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) ||
(isDivRemLibcallAvailable(Node, isSigned, TLI) &&
+ // If div is legal, it's better to do the normal expansion
+ !TLI.isOperationLegalOrCustom(DivOpc, Node->getValueType(0)) &&
useDivRem(Node, isSigned, false))) {
Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Tmp2, Tmp3).getValue(1);
} else if (TLI.isOperationLegalOrCustom(DivOpc, VT)) {
@@ -3366,7 +3425,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
EVT PTy = TLI.getPointerTy();
- const TargetData &TD = *TLI.getTargetData();
+ const DataLayout &TD = *TLI.getDataLayout();
unsigned EntrySize =
DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index e393896..92dc5a9 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -1245,32 +1245,30 @@ bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) {
DEBUG(dbgs() << "Expand float operand: "; N->dump(&DAG); dbgs() << "\n");
SDValue Res = SDValue();
- if (TLI.getOperationAction(N->getOpcode(), N->getOperand(OpNo).getValueType())
- == TargetLowering::Custom)
- Res = TLI.LowerOperation(SDValue(N, 0), DAG);
-
- if (Res.getNode() == 0) {
- switch (N->getOpcode()) {
- default:
- #ifndef NDEBUG
- dbgs() << "ExpandFloatOperand Op #" << OpNo << ": ";
- N->dump(&DAG); dbgs() << "\n";
- #endif
- llvm_unreachable("Do not know how to expand this operator's operand!");
-
- case ISD::BITCAST: Res = ExpandOp_BITCAST(N); break;
- case ISD::BUILD_VECTOR: Res = ExpandOp_BUILD_VECTOR(N); break;
- case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break;
-
- case ISD::BR_CC: Res = ExpandFloatOp_BR_CC(N); break;
- case ISD::FP_ROUND: Res = ExpandFloatOp_FP_ROUND(N); break;
- case ISD::FP_TO_SINT: Res = ExpandFloatOp_FP_TO_SINT(N); break;
- case ISD::FP_TO_UINT: Res = ExpandFloatOp_FP_TO_UINT(N); break;
- case ISD::SELECT_CC: Res = ExpandFloatOp_SELECT_CC(N); break;
- case ISD::SETCC: Res = ExpandFloatOp_SETCC(N); break;
- case ISD::STORE: Res = ExpandFloatOp_STORE(cast<StoreSDNode>(N),
- OpNo); break;
- }
+ // See if the target wants to custom expand this node.
+ if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false))
+ return false;
+
+ switch (N->getOpcode()) {
+ default:
+#ifndef NDEBUG
+ dbgs() << "ExpandFloatOperand Op #" << OpNo << ": ";
+ N->dump(&DAG); dbgs() << "\n";
+#endif
+ llvm_unreachable("Do not know how to expand this operator's operand!");
+
+ case ISD::BITCAST: Res = ExpandOp_BITCAST(N); break;
+ case ISD::BUILD_VECTOR: Res = ExpandOp_BUILD_VECTOR(N); break;
+ case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break;
+
+ case ISD::BR_CC: Res = ExpandFloatOp_BR_CC(N); break;
+ case ISD::FP_ROUND: Res = ExpandFloatOp_FP_ROUND(N); break;
+ case ISD::FP_TO_SINT: Res = ExpandFloatOp_FP_TO_SINT(N); break;
+ case ISD::FP_TO_UINT: Res = ExpandFloatOp_FP_TO_UINT(N); break;
+ case ISD::SELECT_CC: Res = ExpandFloatOp_SELECT_CC(N); break;
+ case ISD::SETCC: Res = ExpandFloatOp_SETCC(N); break;
+ case ISD::STORE: Res = ExpandFloatOp_STORE(cast<StoreSDNode>(N),
+ OpNo); break;
}
// If the result is null, the sub-method took care of registering results etc.
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index e8e968a..a370fae 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -644,8 +644,9 @@ SDValue DAGTypeLegalizer::PromoteIntRes_XMULO(SDNode *N, unsigned ResNo) {
EVT SmallVT = LHS.getValueType();
// To determine if the result overflowed in a larger type, we extend the
- // input to the larger type, do the multiply, then check the high bits of
- // the result to see if the overflow happened.
+ // input to the larger type, do the multiply (checking if it overflows),
+ // then also check the high bits of the result to see if overflow happened
+ // there.
if (N->getOpcode() == ISD::SMULO) {
LHS = SExtPromotedInteger(LHS);
RHS = SExtPromotedInteger(RHS);
@@ -653,24 +654,31 @@ SDValue DAGTypeLegalizer::PromoteIntRes_XMULO(SDNode *N, unsigned ResNo) {
LHS = ZExtPromotedInteger(LHS);
RHS = ZExtPromotedInteger(RHS);
}
- SDValue Mul = DAG.getNode(ISD::MUL, DL, LHS.getValueType(), LHS, RHS);
+ SDVTList VTs = DAG.getVTList(LHS.getValueType(), N->getValueType(1));
+ SDValue Mul = DAG.getNode(N->getOpcode(), DL, VTs, LHS, RHS);
- // Overflow occurred iff the high part of the result does not
- // zero/sign-extend the low part.
+ // Overflow occurred if it occurred in the larger type, or if the high part
+ // of the result does not zero/sign-extend the low part. Check this second
+ // possibility first.
SDValue Overflow;
if (N->getOpcode() == ISD::UMULO) {
- // Unsigned overflow occurred iff the high part is non-zero.
+ // Unsigned overflow occurred if the high part is non-zero.
SDValue Hi = DAG.getNode(ISD::SRL, DL, Mul.getValueType(), Mul,
DAG.getIntPtrConstant(SmallVT.getSizeInBits()));
Overflow = DAG.getSetCC(DL, N->getValueType(1), Hi,
DAG.getConstant(0, Hi.getValueType()), ISD::SETNE);
} else {
- // Signed overflow occurred iff the high part does not sign extend the low.
+ // Signed overflow occurred if the high part does not sign extend the low.
SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Mul.getValueType(),
Mul, DAG.getValueType(SmallVT));
Overflow = DAG.getSetCC(DL, N->getValueType(1), SExt, Mul, ISD::SETNE);
}
+ // The only other way for overflow to occur is if the multiplication in the
+ // larger type itself overflowed.
+ Overflow = DAG.getNode(ISD::OR, DL, N->getValueType(1), Overflow,
+ SDValue(Mul.getNode(), 1));
+
// Use the calculated overflow everywhere.
ReplaceValueWith(SDValue(N, 1), Overflow);
return Mul;
@@ -2253,32 +2261,35 @@ void DAGTypeLegalizer::ExpandIntRes_UADDSUBO(SDNode *N,
void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
- Type *RetTy = VT.getTypeForEVT(*DAG.getContext());
- EVT PtrVT = TLI.getPointerTy();
- Type *PtrTy = PtrVT.getTypeForEVT(*DAG.getContext());
DebugLoc dl = N->getDebugLoc();
// A divide for UMULO should be faster than a function call.
if (N->getOpcode() == ISD::UMULO) {
SDValue LHS = N->getOperand(0), RHS = N->getOperand(1);
- DebugLoc DL = N->getDebugLoc();
- SDValue MUL = DAG.getNode(ISD::MUL, DL, LHS.getValueType(), LHS, RHS);
+ SDValue MUL = DAG.getNode(ISD::MUL, dl, LHS.getValueType(), LHS, RHS);
SplitInteger(MUL, Lo, Hi);
// A divide for UMULO will be faster than a function call. Select to
// make sure we aren't using 0.
SDValue isZero = DAG.getSetCC(dl, TLI.getSetCCResultType(VT),
- RHS, DAG.getConstant(0, VT), ISD::SETNE);
+ RHS, DAG.getConstant(0, VT), ISD::SETEQ);
SDValue NotZero = DAG.getNode(ISD::SELECT, dl, VT, isZero,
DAG.getConstant(1, VT), RHS);
- SDValue DIV = DAG.getNode(ISD::UDIV, DL, LHS.getValueType(), MUL, NotZero);
- SDValue Overflow;
- Overflow = DAG.getSetCC(DL, N->getValueType(1), DIV, LHS, ISD::SETNE);
+ SDValue DIV = DAG.getNode(ISD::UDIV, dl, VT, MUL, NotZero);
+ SDValue Overflow = DAG.getSetCC(dl, N->getValueType(1), DIV, LHS,
+ ISD::SETNE);
+ Overflow = DAG.getNode(ISD::SELECT, dl, N->getValueType(1), isZero,
+ DAG.getConstant(0, N->getValueType(1)),
+ Overflow);
ReplaceValueWith(SDValue(N, 1), Overflow);
return;
}
+ Type *RetTy = VT.getTypeForEVT(*DAG.getContext());
+ EVT PtrVT = TLI.getPointerTy();
+ Type *PtrTy = PtrVT.getTypeForEVT(*DAG.getContext());
+
// Replace this with a libcall that will check overflow.
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == MVT::i32)
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index 39337ff..644e36e 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -15,7 +15,7 @@
#include "LegalizeTypes.h"
#include "llvm/CallingConv.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 94fc976..20b7ce6 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -625,6 +625,7 @@ private:
SDValue WidenVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N);
SDValue WidenVecRes_VSETCC(SDNode* N);
+ SDValue WidenVecRes_Ternary(SDNode *N);
SDValue WidenVecRes_Binary(SDNode *N);
SDValue WidenVecRes_Convert(SDNode *N);
SDValue WidenVecRes_POWI(SDNode *N);
@@ -633,7 +634,7 @@ private:
SDValue WidenVecRes_InregOp(SDNode *N);
// Widen Vector Operand.
- bool WidenVectorOperand(SDNode *N, unsigned ResNo);
+ bool WidenVectorOperand(SDNode *N, unsigned OpNo);
SDValue WidenVecOp_BITCAST(SDNode *N);
SDValue WidenVecOp_CONCAT_VECTORS(SDNode *N);
SDValue WidenVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index 06f6bd6..6bcb3b2 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -20,7 +20,7 @@
//===----------------------------------------------------------------------===//
#include "LegalizeTypes.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -94,14 +94,48 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
if (InVT.isVector() && OutVT.isInteger()) {
// Handle cases like i64 = BITCAST v1i64 on x86, where the operand
// is legal but the result is not.
- EVT NVT = EVT::getVectorVT(*DAG.getContext(), NOutVT, 2);
+ unsigned NumElems = 2;
+ EVT ElemVT = NOutVT;
+ EVT NVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, NumElems);
+
+ // If <ElemVT * N> is not a legal type, try <ElemVT/2 * (N*2)>.
+ while (!isTypeLegal(NVT)) {
+ unsigned NewSizeInBits = ElemVT.getSizeInBits() / 2;
+ // If the element size is smaller than byte, bail.
+ if (NewSizeInBits < 8)
+ break;
+ NumElems *= 2;
+ ElemVT = EVT::getIntegerVT(*DAG.getContext(), NewSizeInBits);
+ NVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, NumElems);
+ }
if (isTypeLegal(NVT)) {
SDValue CastInOp = DAG.getNode(ISD::BITCAST, dl, NVT, InOp);
- Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp,
- DAG.getIntPtrConstant(0));
- Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp,
- DAG.getIntPtrConstant(1));
+
+ SmallVector<SDValue, 8> Vals;
+ for (unsigned i = 0; i < NumElems; ++i)
+ Vals.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ElemVT,
+ CastInOp, DAG.getIntPtrConstant(i)));
+
+ // Build Lo, Hi pair by pairing extracted elements if needed.
+ unsigned Slot = 0;
+ for (unsigned e = Vals.size(); e - Slot > 2; Slot += 2, e += 1) {
+ // Each iteration will BUILD_PAIR two nodes and append the result until
+ // there are only two nodes left, i.e. Lo and Hi.
+ SDValue LHS = Vals[Slot];
+ SDValue RHS = Vals[Slot + 1];
+
+ if (TLI.isBigEndian())
+ std::swap(LHS, RHS);
+
+ Vals.push_back(DAG.getNode(ISD::BUILD_PAIR, dl,
+ EVT::getIntegerVT(
+ *DAG.getContext(),
+ LHS.getValueType().getSizeInBits() << 1),
+ LHS, RHS));
+ }
+ Lo = Vals[Slot++];
+ Hi = Vals[Slot++];
if (TLI.isBigEndian())
std::swap(Lo, Hi);
@@ -116,7 +150,7 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
// Create the stack frame object. Make sure it is aligned for both
// the source and expanded destination types.
unsigned Alignment =
- TLI.getTargetData()->getPrefTypeAlignment(NOutVT.
+ TLI.getDataLayout()->getPrefTypeAlignment(NOutVT.
getTypeForEVT(*DAG.getContext()));
SDValue StackPtr = DAG.CreateStackTemporary(InVT, Alignment);
int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 704f99b..22f8d51 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -64,6 +64,7 @@ class VectorLegalizer {
// Implement vselect in terms of XOR, AND, OR when blend is not supported
// by the target.
SDValue ExpandVSELECT(SDValue Op);
+ SDValue ExpandSELECT(SDValue Op);
SDValue ExpandLoad(SDValue Op);
SDValue ExpandStore(SDValue Op);
SDValue ExpandFNEG(SDValue Op);
@@ -220,6 +221,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case ISD::FRINT:
case ISD::FNEARBYINT:
case ISD::FFLOOR:
+ case ISD::FMA:
case ISD::SIGN_EXTEND_INREG:
QueryType = Node->getValueType(0);
break;
@@ -260,6 +262,8 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case TargetLowering::Expand:
if (Node->getOpcode() == ISD::VSELECT)
Result = ExpandVSELECT(Op);
+ else if (Node->getOpcode() == ISD::SELECT)
+ Result = ExpandSELECT(Op);
else if (Node->getOpcode() == ISD::UINT_TO_FP)
Result = ExpandUINT_TO_FLOAT(Op);
else if (Node->getOpcode() == ISD::FNEG)
@@ -435,6 +439,66 @@ SDValue VectorLegalizer::ExpandStore(SDValue Op) {
return TF;
}
+SDValue VectorLegalizer::ExpandSELECT(SDValue Op) {
+ // Lower a select instruction where the condition is a scalar and the
+ // operands are vectors. Lower this select to VSELECT and implement it
+ // using XOR AND OR. The selector bit is broadcasted.
+ EVT VT = Op.getValueType();
+ DebugLoc DL = Op.getDebugLoc();
+
+ SDValue Mask = Op.getOperand(0);
+ SDValue Op1 = Op.getOperand(1);
+ SDValue Op2 = Op.getOperand(2);
+
+ assert(VT.isVector() && !Mask.getValueType().isVector()
+ && Op1.getValueType() == Op2.getValueType() && "Invalid type");
+
+ unsigned NumElem = VT.getVectorNumElements();
+
+ // If we can't even use the basic vector operations of
+ // AND,OR,XOR, we will have to scalarize the op.
+ // Notice that the operation may be 'promoted' which means that it is
+ // 'bitcasted' to another type which is handled.
+ // Also, we need to be able to construct a splat vector using BUILD_VECTOR.
+ if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand ||
+ TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand ||
+ TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand ||
+ TLI.getOperationAction(ISD::BUILD_VECTOR, VT) == TargetLowering::Expand)
+ return DAG.UnrollVectorOp(Op.getNode());
+
+ // Generate a mask operand.
+ EVT MaskTy = TLI.getSetCCResultType(VT);
+ assert(MaskTy.isVector() && "Invalid CC type");
+ assert(MaskTy.getSizeInBits() == Op1.getValueType().getSizeInBits()
+ && "Invalid mask size");
+
+ // What is the size of each element in the vector mask.
+ EVT BitTy = MaskTy.getScalarType();
+
+ Mask = DAG.getNode(ISD::SELECT, DL, BitTy, Mask,
+ DAG.getConstant(APInt::getAllOnesValue(BitTy.getSizeInBits()), BitTy),
+ DAG.getConstant(0, BitTy));
+
+ // Broadcast the mask so that the entire vector is all-one or all zero.
+ SmallVector<SDValue, 8> Ops(NumElem, Mask);
+ Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskTy, &Ops[0], Ops.size());
+
+ // Bitcast the operands to be the same type as the mask.
+ // This is needed when we select between FP types because
+ // the mask is a vector of integers.
+ Op1 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op1);
+ Op2 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op2);
+
+ SDValue AllOnes = DAG.getConstant(
+ APInt::getAllOnesValue(BitTy.getSizeInBits()), MaskTy);
+ SDValue NotMask = DAG.getNode(ISD::XOR, DL, MaskTy, Mask, AllOnes);
+
+ Op1 = DAG.getNode(ISD::AND, DL, MaskTy, Op1, Mask);
+ Op2 = DAG.getNode(ISD::AND, DL, MaskTy, Op2, NotMask);
+ SDValue Val = DAG.getNode(ISD::OR, DL, MaskTy, Op1, Op2);
+ return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Val);
+}
+
SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) {
// Implement VSELECT in terms of XOR, AND, OR
// on platforms which do not support blend natively.
@@ -449,12 +513,17 @@ SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) {
// AND,OR,XOR, we will have to scalarize the op.
// Notice that the operation may be 'promoted' which means that it is
// 'bitcasted' to another type which is handled.
+ // This operation also isn't safe with AND, OR, XOR when the boolean
+ // type is 0/1 as we need an all ones vector constant to mask with.
+ // FIXME: Sign extend 1 to all ones if thats legal on the target.
if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand ||
TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand ||
- TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand)
+ TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand ||
+ TLI.getBooleanContents(true) !=
+ TargetLowering::ZeroOrNegativeOneBooleanContent)
return DAG.UnrollVectorOp(Op.getNode());
- assert(VT.getSizeInBits() == Op.getOperand(1).getValueType().getSizeInBits()
+ assert(VT.getSizeInBits() == Op1.getValueType().getSizeInBits()
&& "Invalid mask size");
// Bitcast the operands to be the same type as the mask.
// This is needed when we select between FP types because
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 4709202..d51a6eb 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -21,7 +21,7 @@
//===----------------------------------------------------------------------===//
#include "LegalizeTypes.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -749,7 +749,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
unsigned Alignment =
- TLI.getTargetData()->getPrefTypeAlignment(VecType);
+ TLI.getDataLayout()->getPrefTypeAlignment(VecType);
Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, MachinePointerInfo(), EltVT,
false, false, 0);
@@ -1366,6 +1366,9 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FTRUNC:
Res = WidenVecRes_Unary(N);
break;
+ case ISD::FMA:
+ Res = WidenVecRes_Ternary(N);
+ break;
}
// If Res is null, the sub-method took care of registering the result.
@@ -1373,6 +1376,16 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
SetWidenedVector(SDValue(N, ResNo), Res);
}
+SDValue DAGTypeLegalizer::WidenVecRes_Ternary(SDNode *N) {
+ // Ternary op widening.
+ DebugLoc dl = N->getDebugLoc();
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ SDValue InOp1 = GetWidenedVector(N->getOperand(0));
+ SDValue InOp2 = GetWidenedVector(N->getOperand(1));
+ SDValue InOp3 = GetWidenedVector(N->getOperand(2));
+ return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3);
+}
+
SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
// Binary op widening.
unsigned Opcode = N->getOpcode();
@@ -2069,16 +2082,20 @@ SDValue DAGTypeLegalizer::WidenVecRes_VSETCC(SDNode *N) {
//===----------------------------------------------------------------------===//
// Widen Vector Operand
//===----------------------------------------------------------------------===//
-bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned ResNo) {
- DEBUG(dbgs() << "Widen node operand " << ResNo << ": ";
+bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) {
+ DEBUG(dbgs() << "Widen node operand " << OpNo << ": ";
N->dump(&DAG);
dbgs() << "\n");
SDValue Res = SDValue();
+ // See if the target wants to custom widen this node.
+ if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false))
+ return false;
+
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
- dbgs() << "WidenVectorOperand op #" << ResNo << ": ";
+ dbgs() << "WidenVectorOperand op #" << OpNo << ": ";
N->dump(&DAG);
dbgs() << "\n";
#endif
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SDNodeOrdering.h b/contrib/llvm/lib/CodeGen/SelectionDAG/SDNodeOrdering.h
index f88b26d..d2269f8 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SDNodeOrdering.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SDNodeOrdering.h
@@ -28,8 +28,8 @@ class SDNode;
class SDNodeOrdering {
DenseMap<const SDNode*, unsigned> OrderMap;
- void operator=(const SDNodeOrdering&); // Do not implement.
- SDNodeOrdering(const SDNodeOrdering&); // Do not implement.
+ void operator=(const SDNodeOrdering&) LLVM_DELETED_FUNCTION;
+ SDNodeOrdering(const SDNodeOrdering&) LLVM_DELETED_FUNCTION;
public:
SDNodeOrdering() {}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index b7ce48a..2ecdd89 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -13,11 +13,12 @@
#define DEBUG_TYPE "pre-RA-sched"
#include "ScheduleDAGSDNodes.h"
+#include "InstrEmitter.h"
#include "llvm/InlineAsm.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/ADT/SmallSet.h"
@@ -34,6 +35,10 @@ STATISTIC(NumPRCopies, "Number of physical copies");
static RegisterScheduler
fastDAGScheduler("fast", "Fast suboptimal list scheduling",
createFastDAGScheduler);
+static RegisterScheduler
+ linearizeDAGScheduler("linearize", "Linearize DAG, no scheduling",
+ createDAGLinearizer);
+
namespace {
/// FastPriorityQueue - A degenerate priority queue that considers
@@ -331,7 +336,9 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
}
}
if (isNewLoad) {
- AddPred(NewSU, SDep(LoadSU, SDep::Order, LoadSU->Latency));
+ SDep D(LoadSU, SDep::Barrier);
+ D.setLatency(LoadSU->Latency);
+ AddPred(NewSU, D);
}
++NumUnfolds;
@@ -407,9 +414,12 @@ void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
RemovePred(DelDeps[i].first, DelDeps[i].second);
}
-
- AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
- AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
+ SDep FromDep(SU, SDep::Data, Reg);
+ FromDep.setLatency(SU->Latency);
+ AddPred(CopyFromSU, FromDep);
+ SDep ToDep(CopyFromSU, SDep::Data, 0);
+ ToDep.setLatency(CopyFromSU->Latency);
+ AddPred(CopyToSU, ToDep);
Copies.push_back(CopyFromSU);
Copies.push_back(CopyToSU);
@@ -586,18 +596,14 @@ void ScheduleDAGFast::ListScheduleBottomUp() {
InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
DEBUG(dbgs() << "Adding an edge from SU # " << TrySU->NodeNum
<< " to SU #" << Copies.front()->NodeNum << "\n");
- AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
- /*Reg=*/0, /*isNormalMemory=*/false,
- /*isMustAlias=*/false, /*isArtificial=*/true));
+ AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
NewDef = Copies.back();
}
DEBUG(dbgs() << "Adding an edge from SU # " << NewDef->NodeNum
<< " to SU #" << TrySU->NodeNum << "\n");
LiveRegDefs[Reg] = NewDef;
- AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
- /*Reg=*/0, /*isNormalMemory=*/false,
- /*isMustAlias=*/false, /*isArtificial=*/true));
+ AddPred(NewDef, SDep(TrySU, SDep::Artificial));
TrySU->isAvailable = false;
CurSU = NewDef;
}
@@ -629,6 +635,155 @@ void ScheduleDAGFast::ListScheduleBottomUp() {
#endif
}
+
+namespace {
+//===----------------------------------------------------------------------===//
+// ScheduleDAGLinearize - No scheduling scheduler, it simply linearize the
+// DAG in topological order.
+// IMPORTANT: this may not work for targets with phyreg dependency.
+//
+class ScheduleDAGLinearize : public ScheduleDAGSDNodes {
+public:
+ ScheduleDAGLinearize(MachineFunction &mf) : ScheduleDAGSDNodes(mf) {}
+
+ void Schedule();
+
+ MachineBasicBlock *EmitSchedule(MachineBasicBlock::iterator &InsertPos);
+
+private:
+ std::vector<SDNode*> Sequence;
+ DenseMap<SDNode*, SDNode*> GluedMap; // Cache glue to its user
+
+ void ScheduleNode(SDNode *N);
+};
+} // end anonymous namespace
+
+void ScheduleDAGLinearize::ScheduleNode(SDNode *N) {
+ if (N->getNodeId() != 0)
+ llvm_unreachable(0);
+
+ if (!N->isMachineOpcode() &&
+ (N->getOpcode() == ISD::EntryToken || isPassiveNode(N)))
+ // These nodes do not need to be translated into MIs.
+ return;
+
+ DEBUG(dbgs() << "\n*** Scheduling: ");
+ DEBUG(N->dump(DAG));
+ Sequence.push_back(N);
+
+ unsigned NumOps = N->getNumOperands();
+ if (unsigned NumLeft = NumOps) {
+ SDNode *GluedOpN = 0;
+ do {
+ const SDValue &Op = N->getOperand(NumLeft-1);
+ SDNode *OpN = Op.getNode();
+
+ if (NumLeft == NumOps && Op.getValueType() == MVT::Glue) {
+ // Schedule glue operand right above N.
+ GluedOpN = OpN;
+ assert(OpN->getNodeId() != 0 && "Glue operand not ready?");
+ OpN->setNodeId(0);
+ ScheduleNode(OpN);
+ continue;
+ }
+
+ if (OpN == GluedOpN)
+ // Glue operand is already scheduled.
+ continue;
+
+ DenseMap<SDNode*, SDNode*>::iterator DI = GluedMap.find(OpN);
+ if (DI != GluedMap.end() && DI->second != N)
+ // Users of glues are counted against the glued users.
+ OpN = DI->second;
+
+ unsigned Degree = OpN->getNodeId();
+ assert(Degree > 0 && "Predecessor over-released!");
+ OpN->setNodeId(--Degree);
+ if (Degree == 0)
+ ScheduleNode(OpN);
+ } while (--NumLeft);
+ }
+}
+
+/// findGluedUser - Find the representative use of a glue value by walking
+/// the use chain.
+static SDNode *findGluedUser(SDNode *N) {
+ while (SDNode *Glued = N->getGluedUser())
+ N = Glued;
+ return N;
+}
+
+void ScheduleDAGLinearize::Schedule() {
+ DEBUG(dbgs() << "********** DAG Linearization **********\n");
+
+ SmallVector<SDNode*, 8> Glues;
+ unsigned DAGSize = 0;
+ for (SelectionDAG::allnodes_iterator I = DAG->allnodes_begin(),
+ E = DAG->allnodes_end(); I != E; ++I) {
+ SDNode *N = I;
+
+ // Use node id to record degree.
+ unsigned Degree = N->use_size();
+ N->setNodeId(Degree);
+ unsigned NumVals = N->getNumValues();
+ if (NumVals && N->getValueType(NumVals-1) == MVT::Glue &&
+ N->hasAnyUseOfValue(NumVals-1)) {
+ SDNode *User = findGluedUser(N);
+ if (User) {
+ Glues.push_back(N);
+ GluedMap.insert(std::make_pair(N, User));
+ }
+ }
+
+ if (N->isMachineOpcode() ||
+ (N->getOpcode() != ISD::EntryToken && !isPassiveNode(N)))
+ ++DAGSize;
+ }
+
+ for (unsigned i = 0, e = Glues.size(); i != e; ++i) {
+ SDNode *Glue = Glues[i];
+ SDNode *GUser = GluedMap[Glue];
+ unsigned Degree = Glue->getNodeId();
+ unsigned UDegree = GUser->getNodeId();
+
+ // Glue user must be scheduled together with the glue operand. So other
+ // users of the glue operand must be treated as its users.
+ SDNode *ImmGUser = Glue->getGluedUser();
+ for (SDNode::use_iterator ui = Glue->use_begin(), ue = Glue->use_end();
+ ui != ue; ++ui)
+ if (*ui == ImmGUser)
+ --Degree;
+ GUser->setNodeId(UDegree + Degree);
+ Glue->setNodeId(1);
+ }
+
+ Sequence.reserve(DAGSize);
+ ScheduleNode(DAG->getRoot().getNode());
+}
+
+MachineBasicBlock*
+ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
+ InstrEmitter Emitter(BB, InsertPos);
+ DenseMap<SDValue, unsigned> VRBaseMap;
+
+ DEBUG({
+ dbgs() << "\n*** Final schedule ***\n";
+ });
+
+ // FIXME: Handle dbg_values.
+ unsigned NumNodes = Sequence.size();
+ for (unsigned i = 0; i != NumNodes; ++i) {
+ SDNode *N = Sequence[NumNodes-i-1];
+ DEBUG(N->dump(DAG));
+ Emitter.EmitNode(N, false, false, VRBaseMap);
+ }
+
+ DEBUG(dbgs() << '\n');
+
+ InsertPos = Emitter.getInsertPos();
+ return Emitter.getBlock();
+}
+
//===----------------------------------------------------------------------===//
// Public Constructor Functions
//===----------------------------------------------------------------------===//
@@ -637,3 +792,8 @@ llvm::ScheduleDAGSDNodes *
llvm::createFastDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
return new ScheduleDAGFast(*IS->MF);
}
+
+llvm::ScheduleDAGSDNodes *
+llvm::createDAGLinearizer(SelectionDAGISel *IS, CodeGenOpt::Level) {
+ return new ScheduleDAGLinearize(*IS->MF);
+}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index bf0a437..c554569 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -22,7 +22,7 @@
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
@@ -656,6 +656,8 @@ void ScheduleDAGRRList::EmitNode(SUnit *SU) {
break;
case ISD::MERGE_VALUES:
case ISD::TokenFactor:
+ case ISD::LIFETIME_START:
+ case ISD::LIFETIME_END:
case ISD::CopyToReg:
case ISD::CopyFromReg:
case ISD::EH_LABEL:
@@ -1056,7 +1058,9 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
// Add a data dependency to reflect that NewSU reads the value defined
// by LoadSU.
- AddPred(NewSU, SDep(LoadSU, SDep::Data, LoadSU->Latency));
+ SDep D(LoadSU, SDep::Data, 0);
+ D.setLatency(LoadSU->Latency);
+ AddPred(NewSU, D);
if (isNewLoad)
AvailableQueue->addNode(LoadSU);
@@ -1138,17 +1142,18 @@ void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
// Avoid scheduling the def-side copy before other successors. Otherwise
// we could introduce another physreg interference on the copy and
// continue inserting copies indefinitely.
- SDep D(CopyFromSU, SDep::Order, /*Latency=*/0,
- /*Reg=*/0, /*isNormalMemory=*/false,
- /*isMustAlias=*/false, /*isArtificial=*/true);
- AddPred(SuccSU, D);
+ AddPred(SuccSU, SDep(CopyFromSU, SDep::Artificial));
}
}
for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
RemovePred(DelDeps[i].first, DelDeps[i].second);
- AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
- AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
+ SDep FromDep(SU, SDep::Data, Reg);
+ FromDep.setLatency(SU->Latency);
+ AddPred(CopyFromSU, FromDep);
+ SDep ToDep(CopyFromSU, SDep::Data, 0);
+ ToDep.setLatency(CopyFromSU->Latency);
+ AddPred(CopyToSU, ToDep);
AvailableQueue->updateNode(SU);
AvailableQueue->addNode(CopyFromSU);
@@ -1357,9 +1362,7 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
if (!BtSU->isPending)
AvailableQueue->remove(BtSU);
}
- AddPred(TrySU, SDep(BtSU, SDep::Order, /*Latency=*/1,
- /*Reg=*/0, /*isNormalMemory=*/false,
- /*isMustAlias=*/false, /*isArtificial=*/true));
+ AddPred(TrySU, SDep(BtSU, SDep::Artificial));
// If one or more successors has been unscheduled, then the current
// node is no longer avaialable. Schedule a successor that's now
@@ -1411,20 +1414,14 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
<< " to SU #" << Copies.front()->NodeNum << "\n");
- AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
- /*Reg=*/0, /*isNormalMemory=*/false,
- /*isMustAlias=*/false,
- /*isArtificial=*/true));
+ AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
NewDef = Copies.back();
}
DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
<< " to SU #" << TrySU->NodeNum << "\n");
LiveRegDefs[Reg] = NewDef;
- AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
- /*Reg=*/0, /*isNormalMemory=*/false,
- /*isMustAlias=*/false,
- /*isArtificial=*/true));
+ AddPred(NewDef, SDep(TrySU, SDep::Artificial));
TrySU->isAvailable = false;
CurSU = NewDef;
}
@@ -1756,6 +1753,7 @@ public:
return V;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void dump(ScheduleDAG *DAG) const {
// Emulate pop() without clobbering NodeQueueIds.
std::vector<SUnit*> DumpQueue = Queue;
@@ -1766,6 +1764,7 @@ public:
SU->dump(DAG);
}
}
+#endif
};
typedef RegReductionPriorityQueue<bu_ls_rr_sort>
@@ -1893,6 +1892,7 @@ unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
//===----------------------------------------------------------------------===//
void RegReductionPQBase::dumpRegPressure() const {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
E = TRI->regclass_end(); I != E; ++I) {
const TargetRegisterClass *RC = *I;
@@ -1902,6 +1902,7 @@ void RegReductionPQBase::dumpRegPressure() const {
DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
<< '\n');
}
+#endif
}
bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
@@ -2930,10 +2931,7 @@ void RegReductionPQBase::AddPseudoTwoAddrDeps() {
!scheduleDAG->IsReachable(SuccSU, SU)) {
DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
<< SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
- scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
- /*Reg=*/0, /*isNormalMemory=*/false,
- /*isMustAlias=*/false,
- /*isArtificial=*/true));
+ scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Artificial));
}
}
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index 748668c..a197fcb 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -485,14 +485,15 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
if(isChain && OpN->getOpcode() == ISD::TokenFactor)
OpLatency = 0;
- const SDep &dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
- OpLatency, PhysReg);
+ SDep Dep = isChain ? SDep(OpSU, SDep::Barrier)
+ : SDep(OpSU, SDep::Data, PhysReg);
+ Dep.setLatency(OpLatency);
if (!isChain && !UnitLatencies) {
- computeOperandLatency(OpN, N, i, const_cast<SDep &>(dep));
- ST.adjustSchedDependency(OpSU, SU, const_cast<SDep &>(dep));
+ computeOperandLatency(OpN, N, i, Dep);
+ ST.adjustSchedDependency(OpSU, SU, Dep);
}
- if (!SU->addPred(dep) && !dep.isCtrl() && OpSU->NumRegDefsLeft > 1) {
+ if (!SU->addPred(Dep) && !Dep.isCtrl() && OpSU->NumRegDefsLeft > 1) {
// Multiple register uses are combined in the same SUnit. For example,
// we could have a set of glued nodes with all their defs consumed by
// another set of glued nodes. Register pressure tracking sees this as
@@ -643,6 +644,7 @@ void ScheduleDAGSDNodes::computeOperandLatency(SDNode *Def, SDNode *Use,
}
void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
if (!SU->getNode()) {
dbgs() << "PHYS REG COPY\n";
return;
@@ -659,8 +661,10 @@ void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
dbgs() << "\n";
GluedNodes.pop_back();
}
+#endif
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void ScheduleDAGSDNodes::dumpSchedule() const {
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
if (SUnit *SU = Sequence[i])
@@ -669,6 +673,7 @@ void ScheduleDAGSDNodes::dumpSchedule() const {
dbgs() << "**** NOOP ****\n";
}
}
+#endif
#ifndef NDEBUG
/// VerifyScheduledSequence - Verify that all SUnits were scheduled and that
@@ -827,8 +832,7 @@ EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
}
SmallVector<SDNode *, 4> GluedNodes;
- for (SDNode *N = SU->getNode()->getGluedNode(); N;
- N = N->getGluedNode())
+ for (SDNode *N = SU->getNode()->getGluedNode(); N; N = N->getGluedNode())
GluedNodes.push_back(N);
while (!GluedNodes.empty()) {
SDNode *N = GluedNodes.back();
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
index 84e41fc..907356f 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
@@ -114,7 +114,8 @@ namespace llvm {
/// EmitSchedule - Insert MachineInstrs into the MachineBasicBlock
/// according to the order specified in Sequence.
///
- MachineBasicBlock *EmitSchedule(MachineBasicBlock::iterator &InsertPos);
+ virtual MachineBasicBlock*
+ EmitSchedule(MachineBasicBlock::iterator &InsertPos);
virtual void dumpNode(const SUnit *SU) const;
@@ -158,6 +159,12 @@ namespace llvm {
void InitNodeNumDefs();
};
+ protected:
+ /// ForceUnitLatencies - Return true if all scheduling edges should be given
+ /// a latency value of one. The default is to return false; schedulers may
+ /// override this as needed.
+ virtual bool forceUnitLatencies() const { return false; }
+
private:
/// ClusterNeighboringLoads - Cluster loads from "near" addresses into
/// combined SUnits.
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
index c851291..30f03ac 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
@@ -25,7 +25,7 @@
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index f4fe892..f000ce3 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -29,7 +29,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetSelectionDAGInfo.h"
#include "llvm/Target/TargetOptions.h"
@@ -91,11 +91,6 @@ bool ConstantFPSDNode::isValueValidForType(EVT VT,
const APFloat& Val) {
assert(VT.isFloatingPoint() && "Can only convert between FP types");
- // PPC long double cannot be converted to any other type.
- if (VT == MVT::ppcf128 ||
- &Val.getSemantics() == &APFloat::PPCDoubleDouble)
- return false;
-
// convert modifies in place, so make a copy.
APFloat Val2 = APFloat(Val);
bool losesInfo;
@@ -136,13 +131,11 @@ bool ISD::isBuildVectorAllOnes(const SDNode *N) {
// constants are.
SDValue NotZero = N->getOperand(i);
unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
- if (isa<ConstantSDNode>(NotZero)) {
- if (cast<ConstantSDNode>(NotZero)->getAPIntValue().countTrailingOnes() <
- EltSize)
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
+ if (CN->getAPIntValue().countTrailingOnes() < EltSize)
return false;
- } else if (isa<ConstantFPSDNode>(NotZero)) {
- if (cast<ConstantFPSDNode>(NotZero)->getValueAPF()
- .bitcastToAPInt().countTrailingOnes() < EltSize)
+ } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
+ if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
return false;
} else
return false;
@@ -179,11 +172,11 @@ bool ISD::isBuildVectorAllZeros(const SDNode *N) {
// Do not accept build_vectors that aren't all constants or which have non-0
// elements.
SDValue Zero = N->getOperand(i);
- if (isa<ConstantSDNode>(Zero)) {
- if (!cast<ConstantSDNode>(Zero)->isNullValue())
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
+ if (!CN->isNullValue())
return false;
- } else if (isa<ConstantFPSDNode>(Zero)) {
- if (!cast<ConstantFPSDNode>(Zero)->getValueAPF().isPosZero())
+ } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
+ if (!CFPN->getValueAPF().isPosZero())
return false;
} else
return false;
@@ -494,8 +487,10 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
}
case ISD::TargetBlockAddress:
case ISD::BlockAddress: {
- ID.AddPointer(cast<BlockAddressSDNode>(N)->getBlockAddress());
- ID.AddInteger(cast<BlockAddressSDNode>(N)->getTargetFlags());
+ const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
+ ID.AddPointer(BA->getBlockAddress());
+ ID.AddInteger(BA->getOffset());
+ ID.AddInteger(BA->getTargetFlags());
break;
}
} // end switch (N->getOpcode())
@@ -883,7 +878,7 @@ unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
PointerType::get(Type::getInt8Ty(*getContext()), 0) :
VT.getTypeForEVT(*getContext());
- return TLI.getTargetData()->getABITypeAlignment(Ty);
+ return TLI.getDataLayout()->getABITypeAlignment(Ty);
}
// EntryNode could meaningfully have debug info if we can find it...
@@ -1097,10 +1092,9 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, DebugLoc DL,
"Cannot set target flags on target-independent globals");
// Truncate (with sign-extension) the offset value to the pointer size.
- EVT PTy = TLI.getPointerTy();
- unsigned BitWidth = PTy.getSizeInBits();
+ unsigned BitWidth = TLI.getPointerTy().getSizeInBits();
if (BitWidth < 64)
- Offset = (Offset << (64 - BitWidth) >> (64 - BitWidth));
+ Offset = SignExtend64(Offset, BitWidth);
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
if (!GVar) {
@@ -1174,7 +1168,7 @@ SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals");
if (Alignment == 0)
- Alignment = TLI.getTargetData()->getPrefTypeAlignment(C->getType());
+ Alignment = TLI.getDataLayout()->getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
@@ -1201,7 +1195,7 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals");
if (Alignment == 0)
- Alignment = TLI.getTargetData()->getPrefTypeAlignment(C->getType());
+ Alignment = TLI.getDataLayout()->getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
@@ -1471,6 +1465,7 @@ SDValue SelectionDAG::getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label) {
SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
+ int64_t Offset,
bool isTarget,
unsigned char TargetFlags) {
unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
@@ -1478,12 +1473,14 @@ SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
ID.AddPointer(BA);
+ ID.AddInteger(Offset);
ID.AddInteger(TargetFlags);
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, TargetFlags);
+ SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
+ TargetFlags);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -1542,7 +1539,7 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
unsigned ByteSize = VT.getStoreSize();
Type *Ty = VT.getTypeForEVT(*getContext());
unsigned StackAlign =
- std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), minAlign);
+ std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
return getFrameIndex(FrameIdx, TLI.getPointerTy());
@@ -1555,7 +1552,7 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
VT2.getStoreSizeInBits())/8;
Type *Ty1 = VT1.getTypeForEVT(*getContext());
Type *Ty2 = VT2.getTypeForEVT(*getContext());
- const TargetData *TD = TLI.getTargetData();
+ const DataLayout *TD = TLI.getDataLayout();
unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
TD->getPrefTypeAlignment(Ty2));
@@ -1610,10 +1607,6 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
}
if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
- // No compile time operations on this type yet.
- if (N1C->getValueType(0) == MVT::ppcf128)
- return SDValue();
-
APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
switch (Cond) {
default: break;
@@ -2445,8 +2438,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT);
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP: {
- // No compile time operations on ppcf128.
- if (VT == MVT::ppcf128) break;
APFloat apf(APInt::getNullValue(VT.getSizeInBits()));
(void)apf.convertFromAPInt(Val,
Opcode==ISD::SINT_TO_FP,
@@ -2455,9 +2446,9 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
}
case ISD::BITCAST:
if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
- return getConstantFP(Val.bitsToFloat(), VT);
+ return getConstantFP(APFloat(Val), VT);
else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
- return getConstantFP(Val.bitsToDouble(), VT);
+ return getConstantFP(APFloat(Val), VT);
break;
case ISD::BSWAP:
return getConstant(Val.byteSwap(), VT);
@@ -2475,61 +2466,59 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
// Constant fold unary operations with a floating point constant operand.
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
APFloat V = C->getValueAPF(); // make copy
- if (VT != MVT::ppcf128 && Operand.getValueType() != MVT::ppcf128) {
- switch (Opcode) {
- case ISD::FNEG:
- V.changeSign();
+ switch (Opcode) {
+ case ISD::FNEG:
+ V.changeSign();
+ return getConstantFP(V, VT);
+ case ISD::FABS:
+ V.clearSign();
+ return getConstantFP(V, VT);
+ case ISD::FCEIL: {
+ APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
+ if (fs == APFloat::opOK || fs == APFloat::opInexact)
return getConstantFP(V, VT);
- case ISD::FABS:
- V.clearSign();
+ break;
+ }
+ case ISD::FTRUNC: {
+ APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
+ if (fs == APFloat::opOK || fs == APFloat::opInexact)
return getConstantFP(V, VT);
- case ISD::FCEIL: {
- APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
- if (fs == APFloat::opOK || fs == APFloat::opInexact)
- return getConstantFP(V, VT);
- break;
- }
- case ISD::FTRUNC: {
- APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
- if (fs == APFloat::opOK || fs == APFloat::opInexact)
- return getConstantFP(V, VT);
- break;
- }
- case ISD::FFLOOR: {
- APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
- if (fs == APFloat::opOK || fs == APFloat::opInexact)
- return getConstantFP(V, VT);
- break;
- }
- case ISD::FP_EXTEND: {
- bool ignored;
- // This can return overflow, underflow, or inexact; we don't care.
- // FIXME need to be more flexible about rounding mode.
- (void)V.convert(*EVTToAPFloatSemantics(VT),
- APFloat::rmNearestTiesToEven, &ignored);
+ break;
+ }
+ case ISD::FFLOOR: {
+ APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
+ if (fs == APFloat::opOK || fs == APFloat::opInexact)
return getConstantFP(V, VT);
- }
- case ISD::FP_TO_SINT:
- case ISD::FP_TO_UINT: {
- integerPart x[2];
- bool ignored;
- assert(integerPartWidth >= 64);
- // FIXME need to be more flexible about rounding mode.
- APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
- Opcode==ISD::FP_TO_SINT,
- APFloat::rmTowardZero, &ignored);
- if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
- break;
- APInt api(VT.getSizeInBits(), x);
- return getConstant(api, VT);
- }
- case ISD::BITCAST:
- if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
- return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
- else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
- return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
+ break;
+ }
+ case ISD::FP_EXTEND: {
+ bool ignored;
+ // This can return overflow, underflow, or inexact; we don't care.
+ // FIXME need to be more flexible about rounding mode.
+ (void)V.convert(*EVTToAPFloatSemantics(VT),
+ APFloat::rmNearestTiesToEven, &ignored);
+ return getConstantFP(V, VT);
+ }
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT: {
+ integerPart x[2];
+ bool ignored;
+ assert(integerPartWidth >= 64);
+ // FIXME need to be more flexible about rounding mode.
+ APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
+ Opcode==ISD::FP_TO_SINT,
+ APFloat::rmTowardZero, &ignored);
+ if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
break;
- }
+ APInt api(VT.getSizeInBits(), x);
+ return getConstant(api, VT);
+ }
+ case ISD::BITCAST:
+ if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
+ return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
+ else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
+ return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
+ break;
}
}
@@ -2817,6 +2806,24 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
if (CFP->getValueAPF().isZero())
return N1;
+ } else if (Opcode == ISD::FMUL) {
+ ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1);
+ SDValue V = N2;
+
+ // If the first operand isn't the constant, try the second
+ if (!CFP) {
+ CFP = dyn_cast<ConstantFPSDNode>(N2);
+ V = N1;
+ }
+
+ if (CFP) {
+ // 0*x --> 0
+ if (CFP->isZero())
+ return SDValue(CFP,0);
+ // 1*x --> x
+ if (CFP->isExactlyValue(1.0))
+ return V;
+ }
}
}
assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
@@ -2935,17 +2942,13 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
// expanding large vector constants.
if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
SDValue Elt = N1.getOperand(N2C->getZExtValue());
- EVT VEltTy = N1.getValueType().getVectorElementType();
- if (Elt.getValueType() != VEltTy) {
+
+ if (VT != Elt.getValueType())
// If the vector element type is not legal, the BUILD_VECTOR operands
- // are promoted and implicitly truncated. Make that explicit here.
- Elt = getNode(ISD::TRUNCATE, DL, VEltTy, Elt);
- }
- if (VT != VEltTy) {
- // If the vector element type is not legal, the EXTRACT_VECTOR_ELT
- // result is implicitly extended.
- Elt = getNode(ISD::ANY_EXTEND, DL, VT, Elt);
- }
+ // are promoted and implicitly truncated, and the result implicitly
+ // extended. Make that explicit here.
+ Elt = getAnyExtOrTrunc(Elt, DL, VT);
+
return Elt;
}
@@ -3036,7 +3039,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
// Cannonicalize constant to RHS if commutative
std::swap(N1CFP, N2CFP);
std::swap(N1, N2);
- } else if (N2CFP && VT != MVT::ppcf128) {
+ } else if (N2CFP) {
APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
APFloat::opStatus s;
switch (Opcode) {
@@ -3435,7 +3438,7 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
DAG.getMachineFunction());
if (VT == MVT::Other) {
- if (DstAlign >= TLI.getTargetData()->getPointerPrefAlignment() ||
+ if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() ||
TLI.allowsUnalignedMemoryAccesses(VT)) {
VT = TLI.getPointerTy();
} else {
@@ -3503,7 +3506,9 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
bool DstAlignCanChange = false;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
- bool OptSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
+ bool OptSize =
+ MF.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize);
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
DstAlignCanChange = true;
@@ -3523,7 +3528,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
if (DstAlignCanChange) {
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
- unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
+ unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed.
if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
@@ -3596,7 +3601,8 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
bool DstAlignCanChange = false;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
- bool OptSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
+ bool OptSize = MF.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize);
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
DstAlignCanChange = true;
@@ -3612,7 +3618,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
if (DstAlignCanChange) {
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
- unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
+ unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed.
if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
@@ -3674,7 +3680,8 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
bool DstAlignCanChange = false;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
- bool OptSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
+ bool OptSize = MF.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize);
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
DstAlignCanChange = true;
@@ -3687,7 +3694,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
if (DstAlignCanChange) {
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
- unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
+ unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed.
if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
@@ -3781,7 +3788,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
// Emit a library call.
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
- Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext());
+ Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
Entry.Node = Dst; Args.push_back(Entry);
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
@@ -3836,7 +3843,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
// Emit a library call.
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
- Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext());
+ Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
Entry.Node = Dst; Args.push_back(Entry);
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
@@ -3885,7 +3892,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
return Result;
// Emit a library call.
- Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
+ Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Dst; Entry.Ty = IntPtrTy;
@@ -3923,17 +3930,21 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
SDValue Swp, MachinePointerInfo PtrInfo,
unsigned Alignment,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope) {
+ SynchronizationScope SynchScope) {
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(MemVT);
MachineFunction &MF = getMachineFunction();
- unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
+ // All atomics are load and store, except for ATMOIC_LOAD and ATOMIC_STORE.
// For now, atomics are considered to be volatile always.
// FIXME: Volatile isn't really correct; we should keep track of atomic
// orderings in the memoperand.
- Flags |= MachineMemOperand::MOVolatile;
+ unsigned Flags = MachineMemOperand::MOVolatile;
+ if (Opcode != ISD::ATOMIC_STORE)
+ Flags |= MachineMemOperand::MOLoad;
+ if (Opcode != ISD::ATOMIC_LOAD)
+ Flags |= MachineMemOperand::MOStore;
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
@@ -3983,17 +3994,17 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
Alignment = getEVTAlignment(MemVT);
MachineFunction &MF = getMachineFunction();
- // A monotonic store does not load; a release store "loads" in the sense
- // that other stores cannot be sunk past it.
+ // An atomic store does not load. An atomic load does not store.
// (An atomicrmw obviously both loads and stores.)
- unsigned Flags = MachineMemOperand::MOStore;
- if (Opcode != ISD::ATOMIC_STORE || Ordering > Monotonic)
- Flags |= MachineMemOperand::MOLoad;
-
- // For now, atomics are considered to be volatile always.
+ // For now, atomics are considered to be volatile always, and they are
+ // chained as such.
// FIXME: Volatile isn't really correct; we should keep track of atomic
// orderings in the memoperand.
- Flags |= MachineMemOperand::MOVolatile;
+ unsigned Flags = MachineMemOperand::MOVolatile;
+ if (Opcode != ISD::ATOMIC_STORE)
+ Flags |= MachineMemOperand::MOLoad;
+ if (Opcode != ISD::ATOMIC_LOAD)
+ Flags |= MachineMemOperand::MOStore;
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
@@ -4056,16 +4067,17 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
Alignment = getEVTAlignment(MemVT);
MachineFunction &MF = getMachineFunction();
- // A monotonic load does not store; an acquire load "stores" in the sense
- // that other loads cannot be hoisted past it.
- unsigned Flags = MachineMemOperand::MOLoad;
- if (Ordering > Monotonic)
- Flags |= MachineMemOperand::MOStore;
-
- // For now, atomics are considered to be volatile always.
+ // An atomic store does not load. An atomic load does not store.
+ // (An atomicrmw obviously both loads and stores.)
+ // For now, atomics are considered to be volatile always, and they are
+ // chained as such.
// FIXME: Volatile isn't really correct; we should keep track of atomic
// orderings in the memoperand.
- Flags |= MachineMemOperand::MOVolatile;
+ unsigned Flags = MachineMemOperand::MOVolatile;
+ if (Opcode != ISD::ATOMIC_STORE)
+ Flags |= MachineMemOperand::MOLoad;
+ if (Opcode != ISD::ATOMIC_LOAD)
+ Flags |= MachineMemOperand::MOStore;
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
@@ -4157,6 +4169,8 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
assert((Opcode == ISD::INTRINSIC_VOID ||
Opcode == ISD::INTRINSIC_W_CHAIN ||
Opcode == ISD::PREFETCH ||
+ Opcode == ISD::LIFETIME_START ||
+ Opcode == ISD::LIFETIME_END ||
(Opcode <= INT_MAX &&
(int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
"Opcode is not a memory-accessing opcode!");
@@ -4226,7 +4240,7 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
bool isVolatile, bool isNonTemporal, bool isInvariant,
unsigned Alignment, const MDNode *TBAAInfo,
const MDNode *Ranges) {
- assert(Chain.getValueType() == MVT::Other &&
+ assert(Chain.getValueType() == MVT::Other &&
"Invalid chain type");
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(VT);
@@ -4284,7 +4298,7 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
ID.AddInteger(MemVT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
- MMO->isNonTemporal(),
+ MMO->isNonTemporal(),
MMO->isInvariant()));
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = 0;
@@ -4303,7 +4317,7 @@ SDValue SelectionDAG::getLoad(EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr,
MachinePointerInfo PtrInfo,
bool isVolatile, bool isNonTemporal,
- bool isInvariant, unsigned Alignment,
+ bool isInvariant, unsigned Alignment,
const MDNode *TBAAInfo,
const MDNode *Ranges) {
SDValue Undef = getUNDEF(Ptr.getValueType());
@@ -4332,7 +4346,7 @@ SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
"Load is already a indexed load!");
return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
LD->getChain(), Base, Offset, LD->getPointerInfo(),
- LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
+ LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
false, LD->getAlignment());
}
@@ -4340,7 +4354,7 @@ SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
SDValue Ptr, MachinePointerInfo PtrInfo,
bool isVolatile, bool isNonTemporal,
unsigned Alignment, const MDNode *TBAAInfo) {
- assert(Chain.getValueType() == MVT::Other &&
+ assert(Chain.getValueType() == MVT::Other &&
"Invalid chain type");
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(Val.getValueType());
@@ -4365,7 +4379,7 @@ SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
SDValue Ptr, MachineMemOperand *MMO) {
- assert(Chain.getValueType() == MVT::Other &&
+ assert(Chain.getValueType() == MVT::Other &&
"Invalid chain type");
EVT VT = Val.getValueType();
SDVTList VTs = getVTList(MVT::Other);
@@ -4394,7 +4408,7 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
EVT SVT,bool isVolatile, bool isNonTemporal,
unsigned Alignment,
const MDNode *TBAAInfo) {
- assert(Chain.getValueType() == MVT::Other &&
+ assert(Chain.getValueType() == MVT::Other &&
"Invalid chain type");
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(SVT);
@@ -4421,7 +4435,7 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
MachineMemOperand *MMO) {
EVT VT = Val.getValueType();
- assert(Chain.getValueType() == MVT::Other &&
+ assert(Chain.getValueType() == MVT::Other &&
"Invalid chain type");
if (VT == SVT)
return getStore(Chain, dl, Val, Ptr, MMO);
@@ -6074,7 +6088,7 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
unsigned PtrWidth = TLI.getPointerTy().getSizeInBits();
APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
- TLI.getTargetData());
+ TLI.getDataLayout());
unsigned AlignBits = KnownZero.countTrailingOnes();
unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
if (Align)
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index ba5bd79..3fbf7c2 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -19,6 +19,7 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Constants.h"
#include "llvm/CallingConv.h"
#include "llvm/DebugInfo.h"
@@ -43,7 +44,7 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
@@ -88,7 +89,7 @@ static const unsigned MaxParallelChains = 64;
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
const SDValue *Parts, unsigned NumParts,
- EVT PartVT, EVT ValueVT);
+ EVT PartVT, EVT ValueVT, const Value *V);
/// getCopyFromParts - Create a value that contains the specified legal parts
/// combined into the value they represent. If the parts combine to a type
@@ -98,9 +99,11 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
const SDValue *Parts,
unsigned NumParts, EVT PartVT, EVT ValueVT,
+ const Value *V,
ISD::NodeType AssertOp = ISD::DELETED_NODE) {
if (ValueVT.isVector())
- return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT);
+ return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
+ PartVT, ValueVT, V);
assert(NumParts > 0 && "No parts to assemble!");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -124,9 +127,9 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
if (RoundParts > 2) {
Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
- PartVT, HalfVT);
+ PartVT, HalfVT, V);
Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
- RoundParts / 2, PartVT, HalfVT);
+ RoundParts / 2, PartVT, HalfVT, V);
} else {
Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
@@ -142,7 +145,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
unsigned OddParts = NumParts - RoundParts;
EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
Hi = getCopyFromParts(DAG, DL,
- Parts + RoundParts, OddParts, PartVT, OddVT);
+ Parts + RoundParts, OddParts, PartVT, OddVT, V);
// Combine the round and odd parts.
Lo = Val;
@@ -171,7 +174,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
!PartVT.isVector() && "Unexpected split");
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
- Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT);
+ Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V);
}
}
@@ -209,14 +212,14 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
llvm_unreachable("Unknown mismatch!");
}
-/// getCopyFromParts - Create a value that contains the specified legal parts
-/// combined into the value they represent. If the parts combine to a type
-/// larger then ValueVT then AssertOp can be used to specify whether the extra
-/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
-/// (ISD::AssertSext).
+/// getCopyFromPartsVector - Create a value that contains the specified legal
+/// parts combined into the value they represent. If the parts combine to a
+/// type larger then ValueVT then AssertOp can be used to specify whether the
+/// extra bits are known to be zero (ISD::AssertZext) or sign extended from
+/// ValueVT (ISD::AssertSext).
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
const SDValue *Parts, unsigned NumParts,
- EVT PartVT, EVT ValueVT) {
+ EVT PartVT, EVT ValueVT, const Value *V) {
assert(ValueVT.isVector() && "Not a vector value");
assert(NumParts > 0 && "No parts to assemble!");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -242,7 +245,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
// as appropriate.
for (unsigned i = 0; i != NumParts; ++i)
Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
- PartVT, IntermediateVT);
+ PartVT, IntermediateVT, V);
} else if (NumParts > 0) {
// If the intermediate type was expanded, build the intermediate
// operands from the parts.
@@ -251,7 +254,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
unsigned Factor = NumParts / NumIntermediates;
for (unsigned i = 0; i != NumIntermediates; ++i)
Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
- PartVT, IntermediateVT);
+ PartVT, IntermediateVT, V);
}
// Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
@@ -299,8 +302,19 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
// Handle cases such as i8 -> <1 x i1>
- assert(ValueVT.getVectorNumElements() == 1 &&
- "Only trivial scalar-to-vector conversions should get here!");
+ if (ValueVT.getVectorNumElements() != 1) {
+ LLVMContext &Ctx = *DAG.getContext();
+ Twine ErrMsg("non-trivial scalar-to-vector conversion");
+ if (const Instruction *I = dyn_cast_or_null<Instruction>(V)) {
+ if (const CallInst *CI = dyn_cast<CallInst>(I))
+ if (isa<InlineAsm>(CI->getCalledValue()))
+ ErrMsg = ErrMsg + ", possible invalid constraint for vector type";
+ Ctx.emitError(I, ErrMsg);
+ } else {
+ Ctx.emitError(ErrMsg);
+ }
+ report_fatal_error("Cannot handle scalar-to-vector conversion!");
+ }
if (ValueVT.getVectorNumElements() == 1 &&
ValueVT.getVectorElementType() != PartVT) {
@@ -312,25 +326,22 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT, Val);
}
-
-
-
static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc dl,
SDValue Val, SDValue *Parts, unsigned NumParts,
- EVT PartVT);
+ EVT PartVT, const Value *V);
/// getCopyToParts - Create a series of nodes that contain the specified value
/// split into legal parts. If the parts contain more bits than Val, then, for
/// integers, ExtendKind can be used to specify how to generate the extra bits.
static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
SDValue Val, SDValue *Parts, unsigned NumParts,
- EVT PartVT,
+ EVT PartVT, const Value *V,
ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
EVT ValueVT = Val.getValueType();
// Handle the vector case separately.
if (ValueVT.isVector())
- return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT);
+ return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned PartBits = PartVT.getSizeInBits();
@@ -382,7 +393,19 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
"Failed to tile the value with PartVT!");
if (NumParts == 1) {
- assert(PartVT == ValueVT && "Type conversion failed!");
+ if (PartVT != ValueVT) {
+ LLVMContext &Ctx = *DAG.getContext();
+ Twine ErrMsg("scalar-to-vector conversion failed");
+ if (const Instruction *I = dyn_cast_or_null<Instruction>(V)) {
+ if (const CallInst *CI = dyn_cast<CallInst>(I))
+ if (isa<InlineAsm>(CI->getCalledValue()))
+ ErrMsg = ErrMsg + ", possible invalid constraint for vector type";
+ Ctx.emitError(I, ErrMsg);
+ } else {
+ Ctx.emitError(ErrMsg);
+ }
+ }
+
Parts[0] = Val;
return;
}
@@ -397,7 +420,7 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
unsigned OddParts = NumParts - RoundParts;
SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
DAG.getIntPtrConstant(RoundBits));
- getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT);
+ getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V);
if (TLI.isBigEndian())
// The odd parts were reversed by getCopyToParts - unreverse them.
@@ -443,7 +466,7 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
/// value split into legal parts.
static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL,
SDValue Val, SDValue *Parts, unsigned NumParts,
- EVT PartVT) {
+ EVT PartVT, const Value *V) {
EVT ValueVT = Val.getValueType();
assert(ValueVT.isVector() && "Not a vector");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -529,7 +552,7 @@ static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL,
// If the register was not expanded, promote or copy the value,
// as appropriate.
for (unsigned i = 0; i != NumParts; ++i)
- getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT);
+ getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V);
} else if (NumParts > 0) {
// If the intermediate type was expanded, split each the value into
// legal parts.
@@ -537,13 +560,10 @@ static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL,
"Must expand into a divisible number of parts!");
unsigned Factor = NumParts / NumIntermediates;
for (unsigned i = 0; i != NumIntermediates; ++i)
- getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT);
+ getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V);
}
}
-
-
-
namespace {
/// RegsForValue - This struct represents the registers (physical or virtual)
/// that a particular set of values is assigned, and the type information
@@ -621,14 +641,15 @@ namespace {
/// If the Flag pointer is NULL, no flag is used.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
DebugLoc dl,
- SDValue &Chain, SDValue *Flag) const;
+ SDValue &Chain, SDValue *Flag,
+ const Value *V = 0) const;
/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
/// specified value into the registers specified by this object. This uses
/// Chain/Flag as the input and updates them for the output Chain/Flag.
/// If the Flag pointer is NULL, no flag is used.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
- SDValue &Chain, SDValue *Flag) const;
+ SDValue &Chain, SDValue *Flag, const Value *V) const;
/// AddInlineAsmOperands - Add this value to the specified inlineasm node
/// operand list. This adds the code marker, matching input operand index
@@ -647,7 +668,8 @@ namespace {
SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
FunctionLoweringInfo &FuncInfo,
DebugLoc dl,
- SDValue &Chain, SDValue *Flag) const {
+ SDValue &Chain, SDValue *Flag,
+ const Value *V) const {
// A Value with type {} or [0 x %t] needs no registers.
if (ValueVTs.empty())
return SDValue();
@@ -721,7 +743,7 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
}
Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
- NumRegs, RegisterVT, ValueVT);
+ NumRegs, RegisterVT, ValueVT, V);
Part += NumRegs;
Parts.clear();
}
@@ -736,7 +758,8 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
/// Chain/Flag as the input and updates them for the output Chain/Flag.
/// If the Flag pointer is NULL, no flag is used.
void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
- SDValue &Chain, SDValue *Flag) const {
+ SDValue &Chain, SDValue *Flag,
+ const Value *V) const {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Get the list of the values's legal parts.
@@ -748,7 +771,7 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
EVT RegisterVT = RegVTs[Value];
getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
- &Parts[Part], NumParts, RegisterVT);
+ &Parts[Part], NumParts, RegisterVT, V);
Part += NumParts;
}
@@ -824,7 +847,8 @@ void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
AA = &aa;
GFI = gfi;
LibInfo = li;
- TD = DAG.getTarget().getTargetData();
+ TD = DAG.getTarget().getDataLayout();
+ Context = DAG.getContext();
LPadToCallSiteMap.clear();
}
@@ -992,7 +1016,7 @@ SDValue SelectionDAGBuilder::getValue(const Value *V) {
unsigned InReg = It->second;
RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
SDValue Chain = DAG.getEntryNode();
- N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL);
+ N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL, V);
resolveDanglingDebugInfo(V, N);
return N;
}
@@ -1147,7 +1171,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
RegsForValue RFV(*DAG.getContext(), TLI, InReg, Inst->getType());
SDValue Chain = DAG.getEntryNode();
- return RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL);
+ return RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL, V);
}
llvm_unreachable("Can't get register for value!");
@@ -1203,9 +1227,9 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
const Function *F = I.getParent()->getParent();
- if (F->paramHasAttr(0, Attribute::SExt))
+ if (F->getRetAttributes().hasAttribute(Attributes::SExt))
ExtendKind = ISD::SIGN_EXTEND;
- else if (F->paramHasAttr(0, Attribute::ZExt))
+ else if (F->getRetAttributes().hasAttribute(Attributes::ZExt))
ExtendKind = ISD::ZERO_EXTEND;
if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
@@ -1216,11 +1240,11 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
SmallVector<SDValue, 4> Parts(NumParts);
getCopyToParts(DAG, getCurDebugLoc(),
SDValue(RetOp.getNode(), RetOp.getResNo() + j),
- &Parts[0], NumParts, PartVT, ExtendKind);
+ &Parts[0], NumParts, PartVT, &I, ExtendKind);
// 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
- if (F->paramHasAttr(0, Attribute::InReg))
+ if (F->getRetAttributes().hasAttribute(Attributes::InReg))
Flags.setInReg();
// Propagate extension type if any
@@ -1231,7 +1255,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
for (unsigned i = 0; i < NumParts; ++i) {
Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
- /*isfixed=*/true));
+ /*isfixed=*/true, 0, 0));
OutVals.push_back(Parts[i]);
}
}
@@ -1601,7 +1625,10 @@ void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
// Update successor info
addSuccessorWithWeight(SwitchBB, CB.TrueBB, CB.TrueWeight);
- addSuccessorWithWeight(SwitchBB, CB.FalseBB, CB.FalseWeight);
+ // TrueBB and FalseBB are always different unless the incoming IR is
+ // degenerate. This only happens when running llc on weird IR.
+ if (CB.TrueBB != CB.FalseBB)
+ addSuccessorWithWeight(SwitchBB, CB.FalseBB, CB.FalseWeight);
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
@@ -1762,6 +1789,7 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
/// visitBitTestCase - this function produces one "bit test"
void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
MachineBasicBlock* NextMBB,
+ uint32_t BranchWeightToNext,
unsigned Reg,
BitTestCase &B,
MachineBasicBlock *SwitchBB) {
@@ -1799,8 +1827,10 @@ void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
ISD::SETNE);
}
- addSuccessorWithWeight(SwitchBB, B.TargetBB);
- addSuccessorWithWeight(SwitchBB, NextMBB);
+ // The branch weight from SwitchBB to B.TargetBB is B.ExtraWeight.
+ addSuccessorWithWeight(SwitchBB, B.TargetBB, B.ExtraWeight);
+ // The branch weight from SwitchBB to NextMBB is BranchWeightToNext.
+ addSuccessorWithWeight(SwitchBB, NextMBB, BranchWeightToNext);
SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
MVT::Other, getControlRoot(),
@@ -1923,6 +1953,7 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
+ BranchProbabilityInfo *BPI = FuncInfo.BPI;
// If any two of the cases has the same destination, and if one value
// is the same as the other, but has one bit unset that the other has set,
// use bit manipulation to do two compares at once. For example:
@@ -1956,8 +1987,12 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
ISD::SETEQ);
// Update successor info.
- addSuccessorWithWeight(SwitchBB, Small.BB);
- addSuccessorWithWeight(SwitchBB, Default);
+ // Both Small and Big will jump to Small.BB, so we sum up the weights.
+ addSuccessorWithWeight(SwitchBB, Small.BB,
+ Small.ExtraWeight + Big.ExtraWeight);
+ addSuccessorWithWeight(SwitchBB, Default,
+ // The default destination is the first successor in IR.
+ BPI ? BPI->getEdgeWeight(SwitchBB->getBasicBlock(), (unsigned)0) : 0);
// Insert the true branch.
SDValue BrCond = DAG.getNode(ISD::BRCOND, DL, MVT::Other,
@@ -1975,14 +2010,13 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
}
// Order cases by weight so the most likely case will be checked first.
- BranchProbabilityInfo *BPI = FuncInfo.BPI;
+ uint32_t UnhandledWeights = 0;
if (BPI) {
for (CaseItr I = CR.Range.first, IE = CR.Range.second; I != IE; ++I) {
- uint32_t IWeight = BPI->getEdgeWeight(SwitchBB->getBasicBlock(),
- I->BB->getBasicBlock());
+ uint32_t IWeight = I->ExtraWeight;
+ UnhandledWeights += IWeight;
for (CaseItr J = CR.Range.first; J < I; ++J) {
- uint32_t JWeight = BPI->getEdgeWeight(SwitchBB->getBasicBlock(),
- J->BB->getBasicBlock());
+ uint32_t JWeight = J->ExtraWeight;
if (IWeight > JWeight)
std::swap(*I, *J);
}
@@ -2031,10 +2065,12 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
LHS = I->Low; MHS = SV; RHS = I->High;
}
- uint32_t ExtraWeight = I->ExtraWeight;
+ // The false weight should be sum of all un-handled cases.
+ UnhandledWeights -= I->ExtraWeight;
CaseBlock CB(CC, LHS, RHS, MHS, /* truebb */ I->BB, /* falsebb */ FallThrough,
/* me */ CurBlock,
- /* trueweight */ ExtraWeight / 2, /* falseweight */ ExtraWeight / 2);
+ /* trueweight */ I->ExtraWeight,
+ /* falseweight */ UnhandledWeights);
// If emitting the first comparison, just call visitSwitchCase to emit the
// code into the current block. Otherwise, push the CaseBlock onto the
@@ -2079,7 +2115,7 @@ bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR,
for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I)
TSize += I->size();
- if (!areJTsAllowed(TLI) || TSize.ult(4))
+ if (!areJTsAllowed(TLI) || TSize.ult(TLI.getMinimumJumpTableEntries()))
return false;
APInt Range = ComputeRange(First, Last);
@@ -2134,13 +2170,28 @@ bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR,
}
}
+ // Calculate weight for each unique destination in CR.
+ DenseMap<MachineBasicBlock*, uint32_t> DestWeights;
+ if (FuncInfo.BPI)
+ for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
+ DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr =
+ DestWeights.find(I->BB);
+ if (Itr != DestWeights.end())
+ Itr->second += I->ExtraWeight;
+ else
+ DestWeights[I->BB] = I->ExtraWeight;
+ }
+
// Update successor info. Add one edge to each unique successor.
BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
E = DestBBs.end(); I != E; ++I) {
if (!SuccsHandled[(*I)->getNumber()]) {
SuccsHandled[(*I)->getNumber()] = true;
- addSuccessorWithWeight(JumpTableBB, *I);
+ DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr =
+ DestWeights.find(*I);
+ addSuccessorWithWeight(JumpTableBB, *I,
+ Itr != DestWeights.end() ? Itr->second : 0);
}
}
@@ -2371,7 +2422,7 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
if (i == count) {
assert((count < 3) && "Too much destinations to test!");
- CasesBits.push_back(CaseBits(0, Dest, 0));
+ CasesBits.push_back(CaseBits(0, Dest, 0, 0/*Weight*/));
count++;
}
@@ -2380,6 +2431,7 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
uint64_t lo = (lowValue - lowBound).getZExtValue();
uint64_t hi = (highValue - lowBound).getZExtValue();
+ CasesBits[i].ExtraWeight += I->ExtraWeight;
for (uint64_t j = lo; j <= hi; j++) {
CasesBits[i].Mask |= 1ULL << j;
@@ -2407,7 +2459,7 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
CurMF->insert(BBI, CaseBB);
BTC.push_back(BitTestCase(CasesBits[i].Mask,
CaseBB,
- CasesBits[i].BB));
+ CasesBits[i].BB, CasesBits[i].ExtraWeight));
// Put SV in a virtual register to make it available from the new blocks.
ExportFromCurrentBlock(SV);
@@ -2435,30 +2487,25 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
Clusterifier TheClusterifier;
+ BranchProbabilityInfo *BPI = FuncInfo.BPI;
// Start with "simple" cases
for (SwitchInst::ConstCaseIt i = SI.case_begin(), e = SI.case_end();
i != e; ++i) {
const BasicBlock *SuccBB = i.getCaseSuccessor();
MachineBasicBlock *SMBB = FuncInfo.MBBMap[SuccBB];
- TheClusterifier.add(i.getCaseValueEx(), SMBB);
+ TheClusterifier.add(i.getCaseValueEx(), SMBB,
+ BPI ? BPI->getEdgeWeight(SI.getParent(), i.getSuccessorIndex()) : 0);
}
TheClusterifier.optimize();
- BranchProbabilityInfo *BPI = FuncInfo.BPI;
size_t numCmps = 0;
for (Clusterifier::RangeIterator i = TheClusterifier.begin(),
e = TheClusterifier.end(); i != e; ++i, ++numCmps) {
Clusterifier::Cluster &C = *i;
- unsigned W = 0;
- if (BPI) {
- W = BPI->getEdgeWeight(SI.getParent(), C.second->getBasicBlock());
- if (!W)
- W = 16;
- W *= C.first.Weight;
- BPI->setEdgeWeight(SI.getParent(), C.second->getBasicBlock(), W);
- }
+ // Update edge weight for the cluster.
+ unsigned W = C.first.Weight;
// FIXME: Currently work with ConstantInt based numbers.
// Changing it to APInt based is a pretty heavy for this commit.
@@ -2540,9 +2587,10 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
if (handleSmallSwitchRange(CR, WorkList, SV, Default, SwitchMBB))
continue;
- // If the switch has more than 5 blocks, and at least 40% dense, and the
+ // If the switch has more than N blocks, and is at least 40% dense, and the
// target supports indirect branches, then emit a jump table rather than
// lowering the switch to a binary tree of conditional branches.
+ // N defaults to 4 and is controlled via TLS.getMinimumJumpTableEntries().
if (handleJTSwitchCase(CR, WorkList, SV, Default, SwitchMBB))
continue;
@@ -2556,14 +2604,14 @@ void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
// Update machine-CFG edges with unique successors.
- SmallVector<BasicBlock*, 32> succs;
- succs.reserve(I.getNumSuccessors());
- for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i)
- succs.push_back(I.getSuccessor(i));
- array_pod_sort(succs.begin(), succs.end());
- succs.erase(std::unique(succs.begin(), succs.end()), succs.end());
- for (unsigned i = 0, e = succs.size(); i != e; ++i) {
- MachineBasicBlock *Succ = FuncInfo.MBBMap[succs[i]];
+ SmallSet<BasicBlock*, 32> Done;
+ for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
+ BasicBlock *BB = I.getSuccessor(i);
+ bool Inserted = Done.insert(BB);
+ if (!Inserted)
+ continue;
+
+ MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
addSuccessorWithWeight(IndirectBrMBB, Succ);
}
@@ -3160,9 +3208,9 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
return; // getValue will auto-populate this.
Type *Ty = I.getAllocatedType();
- uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
+ uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
unsigned Align =
- std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
+ std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty),
I.getAlignment());
SDValue AllocSize = getValue(I.getArraySize());
@@ -3460,7 +3508,7 @@ void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
SDValue InChain = getRoot();
- EVT VT = EVT::getEVT(I.getType());
+ EVT VT = TLI.getValueType(I.getType());
if (I.getAlignment() * 8 < VT.getSizeInBits())
report_fatal_error("Cannot generate unaligned atomic load");
@@ -3490,7 +3538,7 @@ void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
SDValue InChain = getRoot();
- EVT VT = EVT::getEVT(I.getValueOperand()->getType());
+ EVT VT = TLI.getValueType(I.getValueOperand()->getType());
if (I.getAlignment() * 8 < VT.getSizeInBits())
report_fatal_error("Cannot generate unaligned atomic store");
@@ -4352,7 +4400,7 @@ static SDValue ExpandPowI(DebugLoc DL, SDValue LHS, SDValue RHS,
return DAG.getConstantFP(1.0, LHS.getValueType());
const Function *F = DAG.getMachineFunction().getFunction();
- if (!F->hasFnAttr(Attribute::OptimizeForSize) ||
+ if (!F->getFnAttributes().hasAttribute(Attributes::OptimizeForSize) ||
// If optimizing for size, don't insert too many multiplies. This
// inserts up to 5 multiplies.
CountPopulation_32(Val)+Log2_32(Val) < 7) {
@@ -4850,7 +4898,21 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, DestVT,
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)),
- DAG.getConstant(Idx, MVT::i32));
+ DAG.getIntPtrConstant(Idx));
+ setValue(&I, Res);
+ return 0;
+ }
+ case Intrinsic::x86_avx_vextractf128_pd_256:
+ case Intrinsic::x86_avx_vextractf128_ps_256:
+ case Intrinsic::x86_avx_vextractf128_si_256:
+ case Intrinsic::x86_avx2_vextracti128: {
+ DebugLoc dl = getCurDebugLoc();
+ EVT DestVT = TLI.getValueType(I.getType());
+ uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(1))->getZExtValue() & 1) *
+ DestVT.getVectorNumElements();
+ Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT,
+ getValue(I.getArgOperand(0)),
+ DAG.getIntPtrConstant(Idx));
setValue(&I, Res);
return 0;
}
@@ -5113,10 +5175,13 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
return 0;
}
+ case Intrinsic::debugtrap:
case Intrinsic::trap: {
StringRef TrapFuncName = TM.Options.getTrapFunctionName();
if (TrapFuncName.empty()) {
- DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot()));
+ ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
+ ISD::TRAP : ISD::DEBUGTRAP;
+ DAG.setRoot(DAG.getNode(Op, dl,MVT::Other, getRoot()));
return 0;
}
TargetLowering::ArgListTy Args;
@@ -5131,10 +5196,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
DAG.setRoot(Result.second);
return 0;
}
- case Intrinsic::debugtrap: {
- DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, dl,MVT::Other, getRoot()));
- return 0;
- }
+
case Intrinsic::uadd_with_overflow:
case Intrinsic::sadd_with_overflow:
case Intrinsic::usub_with_overflow:
@@ -5177,14 +5239,40 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
rw==1)); /* write */
return 0;
}
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end: {
+ bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
+ // Stack coloring is not enabled in O0, discard region information.
+ if (TM.getOptLevel() == CodeGenOpt::None)
+ return 0;
+
+ SmallVector<Value *, 4> Allocas;
+ GetUnderlyingObjects(I.getArgOperand(1), Allocas, TD);
+
+ for (SmallVector<Value*, 4>::iterator Object = Allocas.begin(),
+ E = Allocas.end(); Object != E; ++Object) {
+ AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
+ // Could not find an Alloca.
+ if (!LifetimeObject)
+ continue;
+
+ int FI = FuncInfo.StaticAllocaMap[LifetimeObject];
+
+ SDValue Ops[2];
+ Ops[0] = getRoot();
+ Ops[1] = DAG.getFrameIndex(FI, TLI.getPointerTy(), true);
+ unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
+
+ Res = DAG.getNode(Opcode, dl, MVT::Other, Ops, 2);
+ DAG.setRoot(Res);
+ }
+ }
case Intrinsic::invariant_start:
- case Intrinsic::lifetime_start:
// Discard region information.
setValue(&I, DAG.getUNDEF(TLI.getPointerTy()));
return 0;
case Intrinsic::invariant_end:
- case Intrinsic::lifetime_end:
// Discard region information.
return 0;
case Intrinsic::donothing:
@@ -5220,9 +5308,9 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
int DemoteStackIdx = -100;
if (!CanLowerReturn) {
- uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(
+ uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(
FTy->getReturnType());
- unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(
+ unsigned Align = TLI.getDataLayout()->getPrefTypeAlignment(
FTy->getReturnType());
MachineFunction &MF = DAG.getMachineFunction();
DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
@@ -5254,12 +5342,12 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
Entry.Node = ArgNode; Entry.Ty = V->getType();
unsigned attrInd = i - CS.arg_begin() + 1;
- Entry.isSExt = CS.paramHasAttr(attrInd, Attribute::SExt);
- Entry.isZExt = CS.paramHasAttr(attrInd, Attribute::ZExt);
- Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
- Entry.isSRet = CS.paramHasAttr(attrInd, Attribute::StructRet);
- Entry.isNest = CS.paramHasAttr(attrInd, Attribute::Nest);
- Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
+ Entry.isSExt = CS.paramHasAttr(attrInd, Attributes::SExt);
+ Entry.isZExt = CS.paramHasAttr(attrInd, Attributes::ZExt);
+ Entry.isInReg = CS.paramHasAttr(attrInd, Attributes::InReg);
+ Entry.isSRet = CS.paramHasAttr(attrInd, Attributes::StructRet);
+ Entry.isNest = CS.paramHasAttr(attrInd, Attributes::Nest);
+ Entry.isByVal = CS.paramHasAttr(attrInd, Attributes::ByVal);
Entry.Alignment = CS.getParamAlignment(attrInd);
Args.push_back(Entry);
}
@@ -5687,7 +5775,7 @@ public:
/// MVT::Other.
EVT getCallOperandValEVT(LLVMContext &Context,
const TargetLowering &TLI,
- const TargetData *TD) const {
+ const DataLayout *TD) const {
if (CallOperandVal == 0) return MVT::Other;
if (isa<BasicBlock>(CallOperandVal))
@@ -5991,8 +6079,8 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// Otherwise, create a stack slot and emit a store to it before the
// asm.
Type *Ty = OpVal->getType();
- uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
- unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
+ uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
+ unsigned Align = TLI.getDataLayout()->getPrefTypeAlignment(Ty);
MachineFunction &MF = DAG.getMachineFunction();
int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
@@ -6040,12 +6128,36 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
- // Remember the HasSideEffect and AlignStack bits as operand 3.
+ // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
+ // bits as operand 3.
unsigned ExtraInfo = 0;
if (IA->hasSideEffects())
ExtraInfo |= InlineAsm::Extra_HasSideEffects;
if (IA->isAlignStack())
ExtraInfo |= InlineAsm::Extra_IsAlignStack;
+ // Set the asm dialect.
+ ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
+
+ // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
+ for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
+ TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
+
+ // Compute the constraint code and ConstraintType to use.
+ TLI.ComputeConstraintToUse(OpInfo, SDValue());
+
+ // Ideally, we would only check against memory constraints. However, the
+ // meaning of an other constraint can be target-specific and we can't easily
+ // reason about it. Therefore, be conservative and set MayLoad/MayStore
+ // for other constriants as well.
+ if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
+ OpInfo.ConstraintType == TargetLowering::C_Other) {
+ if (OpInfo.Type == InlineAsm::isInput)
+ ExtraInfo |= InlineAsm::Extra_MayLoad;
+ else if (OpInfo.Type == InlineAsm::isOutput)
+ ExtraInfo |= InlineAsm::Extra_MayStore;
+ }
+ }
+
AsmNodeOperands.push_back(DAG.getTargetConstant(ExtraInfo,
TLI.getPointerTy()));
@@ -6155,7 +6267,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// Use the produced MatchedRegs object to
MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
- Chain, &Flag);
+ Chain, &Flag, CS.getInstruction());
MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
true, OpInfo.getMatchedOperand(),
DAG, AsmNodeOperands);
@@ -6237,7 +6349,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
}
OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
- Chain, &Flag);
+ Chain, &Flag, CS.getInstruction());
OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
DAG, AsmNodeOperands);
@@ -6268,7 +6380,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// and set it as the value of the call.
if (!RetValRegs.Regs.empty()) {
SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(),
- Chain, &Flag);
+ Chain, &Flag, CS.getInstruction());
// FIXME: Why don't we do this for inline asms with MRVs?
if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
@@ -6308,7 +6420,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
const Value *Ptr = IndirectStoresToEmit[i].second;
SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(),
- Chain, &Flag);
+ Chain, &Flag, IA);
StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
}
@@ -6338,7 +6450,7 @@ void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
}
void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
- const TargetData &TD = *TLI.getTargetData();
+ const DataLayout &TD = *TLI.getDataLayout();
SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
getRoot(), getValue(I.getOperand(0)),
DAG.getSrcValue(I.getOperand(0)),
@@ -6384,7 +6496,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Args[i].Node.getResNo() + Value);
ISD::ArgFlagsTy Flags;
unsigned OriginalAlignment =
- getTargetData()->getABITypeAlignment(ArgTy);
+ getDataLayout()->getABITypeAlignment(ArgTy);
if (Args[i].isZExt)
Flags.setZExt();
@@ -6398,7 +6510,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Flags.setByVal();
PointerType *Ty = cast<PointerType>(Args[i].Ty);
Type *ElementTy = Ty->getElementType();
- Flags.setByValSize(getTargetData()->getTypeAllocSize(ElementTy));
+ Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy));
// For ByVal, alignment should come from FE. BE will guess if this
// info is not there but there are cases it cannot get right.
unsigned FrameAlign;
@@ -6423,12 +6535,13 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
ExtendKind = ISD::ZERO_EXTEND;
getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts,
- PartVT, ExtendKind);
+ PartVT, CLI.CS ? CLI.CS->getInstruction() : 0, ExtendKind);
for (unsigned j = 0; j != NumParts; ++j) {
// if it isn't first piece, alignment must be 1
ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(),
- i < CLI.NumFixedArgs);
+ i < CLI.NumFixedArgs,
+ i, j*Parts[j].getValueType().getStoreSize());
if (NumParts > 1 && j == 0)
MyFlags.Flags.setSplit();
else if (j != 0)
@@ -6504,7 +6617,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
- NumRegs, RegisterVT, VT,
+ NumRegs, RegisterVT, VT, NULL,
AssertOp));
CurReg += NumRegs;
}
@@ -6543,7 +6656,7 @@ SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
SDValue Chain = DAG.getEntryNode();
- RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0);
+ RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0, V);
PendingExports.push_back(Chain);
}
@@ -6573,7 +6686,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
const Function &F = *LLVMBB->getParent();
SelectionDAG &DAG = SDB->DAG;
DebugLoc dl = SDB->getCurDebugLoc();
- const TargetData *TD = TLI.getTargetData();
+ const DataLayout *TD = TLI.getDataLayout();
SmallVector<ISD::InputArg, 16> Ins;
// Check whether the function can return without sret-demotion.
@@ -6591,7 +6704,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
ISD::ArgFlagsTy Flags;
Flags.setSRet();
EVT RegisterVT = TLI.getRegisterType(*DAG.getContext(), ValueVTs[0]);
- ISD::InputArg RetArg(Flags, RegisterVT, true);
+ ISD::InputArg RetArg(Flags, RegisterVT, true, 0, 0);
Ins.push_back(RetArg);
}
@@ -6610,15 +6723,15 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
unsigned OriginalAlignment =
TD->getABITypeAlignment(ArgTy);
- if (F.paramHasAttr(Idx, Attribute::ZExt))
+ if (F.getParamAttributes(Idx).hasAttribute(Attributes::ZExt))
Flags.setZExt();
- if (F.paramHasAttr(Idx, Attribute::SExt))
+ if (F.getParamAttributes(Idx).hasAttribute(Attributes::SExt))
Flags.setSExt();
- if (F.paramHasAttr(Idx, Attribute::InReg))
+ if (F.getParamAttributes(Idx).hasAttribute(Attributes::InReg))
Flags.setInReg();
- if (F.paramHasAttr(Idx, Attribute::StructRet))
+ if (F.getParamAttributes(Idx).hasAttribute(Attributes::StructRet))
Flags.setSRet();
- if (F.paramHasAttr(Idx, Attribute::ByVal)) {
+ if (F.getParamAttributes(Idx).hasAttribute(Attributes::ByVal)) {
Flags.setByVal();
PointerType *Ty = cast<PointerType>(I->getType());
Type *ElementTy = Ty->getElementType();
@@ -6632,14 +6745,15 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
FrameAlign = TLI.getByValTypeAlignment(ElementTy);
Flags.setByValAlign(FrameAlign);
}
- if (F.paramHasAttr(Idx, Attribute::Nest))
+ if (F.getParamAttributes(Idx).hasAttribute(Attributes::Nest))
Flags.setNest();
Flags.setOrigAlign(OriginalAlignment);
EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
unsigned NumRegs = TLI.getNumRegisters(*CurDAG->getContext(), VT);
for (unsigned i = 0; i != NumRegs; ++i) {
- ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed);
+ ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed,
+ Idx-1, i*RegisterVT.getStoreSize());
if (NumRegs > 1 && i == 0)
MyFlags.Flags.setSplit();
// if it isn't first piece, alignment must be 1
@@ -6685,7 +6799,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
EVT RegVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
ISD::NodeType AssertOp = ISD::DELETED_NODE;
SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1,
- RegVT, VT, AssertOp);
+ RegVT, VT, NULL, AssertOp);
MachineFunction& MF = SDB->DAG.getMachineFunction();
MachineRegisterInfo& RegInfo = MF.getRegInfo();
@@ -6719,14 +6833,14 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
if (!I->use_empty()) {
ISD::NodeType AssertOp = ISD::DELETED_NODE;
- if (F.paramHasAttr(Idx, Attribute::SExt))
+ if (F.getParamAttributes(Idx).hasAttribute(Attributes::SExt))
AssertOp = ISD::AssertSext;
- else if (F.paramHasAttr(Idx, Attribute::ZExt))
+ else if (F.getParamAttributes(Idx).hasAttribute(Attributes::ZExt))
AssertOp = ISD::AssertZext;
ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i],
NumParts, PartVT, VT,
- AssertOp));
+ NULL, AssertOp));
}
i += NumParts;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 4090002..9e46d96 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -66,7 +66,7 @@ class ShuffleVectorInst;
class SIToFPInst;
class StoreInst;
class SwitchInst;
-class TargetData;
+class DataLayout;
class TargetLibraryInfo;
class TargetLowering;
class TruncInst;
@@ -150,9 +150,11 @@ private:
uint64_t Mask;
MachineBasicBlock* BB;
unsigned Bits;
+ uint32_t ExtraWeight;
- CaseBits(uint64_t mask, MachineBasicBlock* bb, unsigned bits):
- Mask(mask), BB(bb), Bits(bits) { }
+ CaseBits(uint64_t mask, MachineBasicBlock* bb, unsigned bits,
+ uint32_t Weight):
+ Mask(mask), BB(bb), Bits(bits), ExtraWeight(Weight) { }
};
typedef std::vector<Case> CaseVector;
@@ -247,11 +249,13 @@ private:
typedef std::pair<JumpTableHeader, JumpTable> JumpTableBlock;
struct BitTestCase {
- BitTestCase(uint64_t M, MachineBasicBlock* T, MachineBasicBlock* Tr):
- Mask(M), ThisBB(T), TargetBB(Tr) { }
+ BitTestCase(uint64_t M, MachineBasicBlock* T, MachineBasicBlock* Tr,
+ uint32_t Weight):
+ Mask(M), ThisBB(T), TargetBB(Tr), ExtraWeight(Weight) { }
uint64_t Mask;
MachineBasicBlock *ThisBB;
MachineBasicBlock *TargetBB;
+ uint32_t ExtraWeight;
};
typedef SmallVector<BitTestCase, 3> BitTestInfo;
@@ -281,7 +285,7 @@ public:
const TargetMachine &TM;
const TargetLowering &TLI;
SelectionDAG &DAG;
- const TargetData *TD;
+ const DataLayout *TD;
AliasAnalysis *AA;
const TargetLibraryInfo *LibInfo;
@@ -325,7 +329,7 @@ public:
CodeGenOpt::Level ol)
: SDNodeOrder(0), TM(dag.getTarget()), TLI(dag.getTargetLoweringInfo()),
DAG(dag), FuncInfo(funcinfo), OptLevel(ol),
- HasTailCall(false), Context(dag.getContext()) {
+ HasTailCall(false) {
}
void init(GCFunctionInfo *gfi, AliasAnalysis &aa,
@@ -452,6 +456,7 @@ public:
void visitBitTestHeader(BitTestBlock &B, MachineBasicBlock *SwitchBB);
void visitBitTestCase(BitTestBlock &BB,
MachineBasicBlock* NextMBB,
+ uint32_t BranchWeightToNext,
unsigned Reg,
BitTestCase &B,
MachineBasicBlock *SwitchBB);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 13cd011..6f3ce7a 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -267,6 +267,8 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::STACKRESTORE: return "stackrestore";
case ISD::TRAP: return "trap";
case ISD::DEBUGTRAP: return "debugtrap";
+ case ISD::LIFETIME_START: return "lifetime.start";
+ case ISD::LIFETIME_END: return "lifetime.end";
// Bit manipulation
case ISD::BSWAP: return "bswap";
@@ -331,7 +333,7 @@ void SDNode::dump(const SelectionDAG *G) const {
}
void SDNode::print_types(raw_ostream &OS, const SelectionDAG *G) const {
- OS << (void*)this << ": ";
+ OS << (const void*)this << ": ";
for (unsigned i = 0, e = getNumValues(); i != e; ++i) {
if (i) OS << ",";
@@ -473,11 +475,16 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
OS << "<" << *M->getMemOperand() << ">";
} else if (const BlockAddressSDNode *BA =
dyn_cast<BlockAddressSDNode>(this)) {
+ int64_t offset = BA->getOffset();
OS << "<";
WriteAsOperand(OS, BA->getBlockAddress()->getFunction(), false);
OS << ", ";
WriteAsOperand(OS, BA->getBlockAddress()->getBasicBlock(), false);
OS << ">";
+ if (offset > 0)
+ OS << " + " << offset;
+ else
+ OS << " " << offset;
if (unsigned int TF = BA->getTargetFlags())
OS << " [TF=" << TF << ']';
}
@@ -559,7 +566,7 @@ static void DumpNodesr(raw_ostream &OS, const SDNode *N, unsigned indent,
child->printr(OS, G);
once.insert(child);
} else { // Just the address. FIXME: also print the child's opcode.
- OS << (void*)child;
+ OS << (const void*)child;
if (unsigned RN = N->getOperand(i).getResNo())
OS << ":" << RN;
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 4e5e3ba..c314fa5 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -474,6 +474,11 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
MRI.replaceRegWith(From, To);
}
+ // Freeze the set of reserved registers now that MachineFrameInfo has been
+ // set up. All the information required by getReservedRegs() should be
+ // available now.
+ MRI.freezeReservedRegs(*MF);
+
// Release function-specific state. SDB and CurDAG are already cleared
// at this point.
FuncInfo->clear();
@@ -554,7 +559,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
#endif
{
BlockNumber = FuncInfo->MBB->getNumber();
- BlockName = MF->getFunction()->getName().str() + ":" +
+ BlockName = MF->getName().str() + ":" +
FuncInfo->MBB->getBasicBlock()->getName().str();
}
DEBUG(dbgs() << "Initial selection DAG: BB#" << BlockNumber
@@ -1209,7 +1214,12 @@ SelectionDAGISel::FinishBasicBlock() {
CodeGenAndEmitDAG();
}
+ uint32_t UnhandledWeight = 0;
+ for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j)
+ UnhandledWeight += SDB->BitTestCases[i].Cases[j].ExtraWeight;
+
for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j) {
+ UnhandledWeight -= SDB->BitTestCases[i].Cases[j].ExtraWeight;
// Set the current basic block to the mbb we wish to insert the code into
FuncInfo->MBB = SDB->BitTestCases[i].Cases[j].ThisBB;
FuncInfo->InsertPt = FuncInfo->MBB->end();
@@ -1217,12 +1227,14 @@ SelectionDAGISel::FinishBasicBlock() {
if (j+1 != ej)
SDB->visitBitTestCase(SDB->BitTestCases[i],
SDB->BitTestCases[i].Cases[j+1].ThisBB,
+ UnhandledWeight,
SDB->BitTestCases[i].Reg,
SDB->BitTestCases[i].Cases[j],
FuncInfo->MBB);
else
SDB->visitBitTestCase(SDB->BitTestCases[i],
SDB->BitTestCases[i].Default,
+ UnhandledWeight,
SDB->BitTestCases[i].Reg,
SDB->BitTestCases[i].Cases[j],
FuncInfo->MBB);
@@ -1794,10 +1806,13 @@ WalkChainUsers(const SDNode *ChainedNode,
User->getOpcode() == ISD::HANDLENODE) // Root of the graph.
continue;
- if (User->getOpcode() == ISD::CopyToReg ||
- User->getOpcode() == ISD::CopyFromReg ||
- User->getOpcode() == ISD::INLINEASM ||
- User->getOpcode() == ISD::EH_LABEL) {
+ unsigned UserOpcode = User->getOpcode();
+ if (UserOpcode == ISD::CopyToReg ||
+ UserOpcode == ISD::CopyFromReg ||
+ UserOpcode == ISD::INLINEASM ||
+ UserOpcode == ISD::EH_LABEL ||
+ UserOpcode == ISD::LIFETIME_START ||
+ UserOpcode == ISD::LIFETIME_END) {
// If their node ID got reset to -1 then they've already been selected.
// Treat them like a MachineOpcode.
if (User->getNodeId() == -1)
@@ -1994,7 +2009,7 @@ MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
return Res;
}
-/// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
+/// CheckSame - Implements OP_CheckSame.
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N,
@@ -2213,6 +2228,8 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case ISD::CopyFromReg:
case ISD::CopyToReg:
case ISD::EH_LABEL:
+ case ISD::LIFETIME_START:
+ case ISD::LIFETIME_END:
NodeToMatch->setNodeId(-1); // Mark selected.
return 0;
case ISD::AssertSext:
@@ -2981,7 +2998,7 @@ void SelectionDAGISel::CannotYetSelect(SDNode *N) {
N->getOpcode() != ISD::INTRINSIC_WO_CHAIN &&
N->getOpcode() != ISD::INTRINSIC_VOID) {
N->printrFull(Msg, CurDAG);
- Msg << "\nIn function: " << MF->getFunction()->getName();
+ Msg << "\nIn function: " << MF->getName();
} else {
bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other;
unsigned iid =
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
index 173ffac..3921635 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
@@ -14,7 +14,6 @@
#include "ScheduleDAGSDNodes.h"
#include "llvm/Constants.h"
#include "llvm/DebugInfo.h"
-#include "llvm/Function.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -50,7 +49,7 @@ namespace llvm {
template<typename EdgeIter>
static std::string getEdgeSourceLabel(const void *Node, EdgeIter I) {
- return itostr(I - SDNodeIterator::begin((SDNode *) Node));
+ return itostr(I - SDNodeIterator::begin((const SDNode *) Node));
}
/// edgeTargetsEdgeSource - This method returns true if this outgoing edge
@@ -73,7 +72,7 @@ namespace llvm {
}
static std::string getGraphName(const SelectionDAG *G) {
- return G->getMachineFunction().getFunction()->getName();
+ return G->getMachineFunction().getName();
}
static bool renderGraphFromBottomUp() {
@@ -146,7 +145,7 @@ std::string DOTGraphTraits<SelectionDAG*>::getNodeLabel(const SDNode *Node,
void SelectionDAG::viewGraph(const std::string &Title) {
// This code is only for debugging!
#ifndef NDEBUG
- ViewGraph(this, "dag." + getMachineFunction().getFunction()->getName(),
+ ViewGraph(this, "dag." + getMachineFunction().getName(),
false, Title);
#else
errs() << "SelectionDAG::viewGraph is only available in debug builds on "
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 6820175..49f55e2 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -14,7 +14,7 @@
#include "llvm/Target/TargetLowering.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -515,7 +515,7 @@ static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
/// NOTE: The constructor takes ownership of TLOF.
TargetLowering::TargetLowering(const TargetMachine &tm,
const TargetLoweringObjectFile *tlof)
- : TM(tm), TD(TM.getTargetData()), TLOF(*tlof) {
+ : TM(tm), TD(TM.getDataLayout()), TLOF(*tlof) {
// All operations default to being supported.
memset(OpActions, 0, sizeof(OpActions));
memset(LoadExtActions, 0, sizeof(LoadExtActions));
@@ -583,8 +583,13 @@ TargetLowering::TargetLowering(const TargetMachine &tm,
// Default ISD::TRAP to expand (which turns it into abort).
setOperationAction(ISD::TRAP, MVT::Other, Expand);
+ // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
+ // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
+ //
+ setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
+
IsLittleEndian = TD->isLittleEndian();
- PointerTy = MVT::getIntegerVT(8*TD->getPointerSize());
+ PointerTy = MVT::getIntegerVT(8*TD->getPointerSize(0));
memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray));
maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8;
@@ -613,6 +618,7 @@ TargetLowering::TargetLowering(const TargetMachine &tm,
ShouldFoldAtomicFences = false;
InsertFencesForAtomic = false;
SupportJumpTables = true;
+ MinimumJumpTableEntries = 4;
InitLibcallNames(LibcallRoutineNames);
InitCmpLibcallCCs(CmpLibcallCCs);
@@ -624,7 +630,7 @@ TargetLowering::~TargetLowering() {
}
MVT TargetLowering::getShiftAmountTy(EVT LHSTy) const {
- return MVT::getIntegerVT(8*TD->getPointerSize());
+ return MVT::getIntegerVT(8*TD->getPointerSize(0));
}
/// canOpTrap - Returns true if the operation can trap for the value type.
@@ -772,7 +778,7 @@ void TargetLowering::computeRegisterProperties() {
LegalIntReg = IntReg;
} else {
RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
- (MVT::SimpleValueType)LegalIntReg;
+ (const MVT::SimpleValueType)LegalIntReg;
ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
}
}
@@ -898,10 +904,9 @@ const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
return NULL;
}
-
EVT TargetLowering::getSetCCResultType(EVT VT) const {
assert(!VT.isVector() && "No default SetCC type for vectors!");
- return PointerTy.SimpleTy;
+ return getPointerTy(0).SimpleTy;
}
MVT::SimpleValueType TargetLowering::getCmpLibcallReturnType() const {
@@ -997,9 +1002,9 @@ void llvm::GetReturnInfo(Type* ReturnType, Attributes attr,
EVT VT = ValueVTs[j];
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
- if (attr & Attribute::SExt)
+ if (attr.hasAttribute(Attributes::SExt))
ExtendKind = ISD::SIGN_EXTEND;
- else if (attr & Attribute::ZExt)
+ else if (attr.hasAttribute(Attributes::ZExt))
ExtendKind = ISD::ZERO_EXTEND;
// FIXME: C calling convention requires the return type to be promoted to
@@ -1017,18 +1022,17 @@ void llvm::GetReturnInfo(Type* ReturnType, Attributes attr,
// 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
- if (attr & Attribute::InReg)
+ if (attr.hasAttribute(Attributes::InReg))
Flags.setInReg();
// Propagate extension type if any
- if (attr & Attribute::SExt)
+ if (attr.hasAttribute(Attributes::SExt))
Flags.setSExt();
- else if (attr & Attribute::ZExt)
+ else if (attr.hasAttribute(Attributes::ZExt))
Flags.setZExt();
- for (unsigned i = 0; i < NumParts; ++i) {
- Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true));
- }
+ for (unsigned i = 0; i < NumParts; ++i)
+ Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true, 0, 0));
}
}
@@ -1062,7 +1066,7 @@ SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) ||
(JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress))
- return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy());
+ return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(0));
return Table;
}
@@ -2441,7 +2445,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
if (N0 == N1) {
// The sext(setcc()) => setcc() optimization relies on the appropriate
// constant being emitted.
- uint64_t EqVal;
+ uint64_t EqVal = 0;
switch (getBooleanContents(N0.getValueType().isVector())) {
case UndefinedBooleanContent:
case ZeroOrOneBooleanContent:
@@ -2954,8 +2958,9 @@ TargetLowering::AsmOperandInfoVector TargetLowering::ParseConstraints(
EVT::getEVT(IntegerType::get(OpTy->getContext(), BitSize), true);
break;
}
- } else if (dyn_cast<PointerType>(OpTy)) {
- OpInfo.ConstraintVT = MVT::getIntegerVT(8*TD->getPointerSize());
+ } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) {
+ OpInfo.ConstraintVT = MVT::getIntegerVT(
+ 8*TD->getPointerSize(PT->getAddressSpace()));
} else {
OpInfo.ConstraintVT = EVT::getEVT(OpTy, true);
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
index a081e3c..f769b44 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
@@ -16,7 +16,7 @@
using namespace llvm;
TargetSelectionDAGInfo::TargetSelectionDAGInfo(const TargetMachine &TM)
- : TD(TM.getTargetData()) {
+ : TD(TM.getDataLayout()) {
}
TargetSelectionDAGInfo::~TargetSelectionDAGInfo() {
diff --git a/contrib/llvm/lib/CodeGen/ShrinkWrapping.cpp b/contrib/llvm/lib/CodeGen/ShrinkWrapping.cpp
index 21ae2f5..4fbe1b3 100644
--- a/contrib/llvm/lib/CodeGen/ShrinkWrapping.cpp
+++ b/contrib/llvm/lib/CodeGen/ShrinkWrapping.cpp
@@ -159,7 +159,7 @@ void PEI::initShrinkWrappingInfo() {
// via --shrink-wrap-func=<funcname>.
#ifndef NDEBUG
if (ShrinkWrapFunc != "") {
- std::string MFName = MF->getFunction()->getName().str();
+ std::string MFName = MF->getName().str();
ShrinkWrapThisFunction = (MFName == ShrinkWrapFunc);
}
#endif
@@ -187,7 +187,7 @@ void PEI::placeCSRSpillsAndRestores(MachineFunction &Fn) {
DEBUG(if (ShrinkWrapThisFunction) {
dbgs() << "Place CSR spills/restores for "
- << MF->getFunction()->getName() << "\n";
+ << MF->getName() << "\n";
});
if (calculateSets(Fn))
@@ -364,7 +364,7 @@ bool PEI::calculateSets(MachineFunction &Fn) {
// If no CSRs used, we are done.
if (CSI.empty()) {
DEBUG(if (ShrinkWrapThisFunction)
- dbgs() << "DISABLED: " << Fn.getFunction()->getName()
+ dbgs() << "DISABLED: " << Fn.getName()
<< ": uses no callee-saved registers\n");
return false;
}
@@ -384,7 +384,7 @@ bool PEI::calculateSets(MachineFunction &Fn) {
// implementation to functions with <= 500 MBBs.
if (Fn.size() > 500) {
DEBUG(if (ShrinkWrapThisFunction)
- dbgs() << "DISABLED: " << Fn.getFunction()->getName()
+ dbgs() << "DISABLED: " << Fn.getName()
<< ": too large (" << Fn.size() << " MBBs)\n");
ShrinkWrapThisFunction = false;
}
@@ -466,7 +466,7 @@ bool PEI::calculateSets(MachineFunction &Fn) {
}
if (allCSRUsesInEntryBlock) {
- DEBUG(dbgs() << "DISABLED: " << Fn.getFunction()->getName()
+ DEBUG(dbgs() << "DISABLED: " << Fn.getName()
<< ": all CSRs used in EntryBlock\n");
ShrinkWrapThisFunction = false;
} else {
@@ -478,7 +478,7 @@ bool PEI::calculateSets(MachineFunction &Fn) {
allCSRsUsedInEntryFanout = false;
}
if (allCSRsUsedInEntryFanout) {
- DEBUG(dbgs() << "DISABLED: " << Fn.getFunction()->getName()
+ DEBUG(dbgs() << "DISABLED: " << Fn.getName()
<< ": all CSRs used in imm successors of EntryBlock\n");
ShrinkWrapThisFunction = false;
}
@@ -505,7 +505,7 @@ bool PEI::calculateSets(MachineFunction &Fn) {
if (dominatesExitNodes) {
CSRUsedInChokePoints |= CSRUsed[MBB];
if (CSRUsedInChokePoints == UsedCSRegs) {
- DEBUG(dbgs() << "DISABLED: " << Fn.getFunction()->getName()
+ DEBUG(dbgs() << "DISABLED: " << Fn.getName()
<< ": all CSRs used in choke point(s) at "
<< getBasicBlockName(MBB) << "\n");
ShrinkWrapThisFunction = false;
@@ -521,7 +521,7 @@ bool PEI::calculateSets(MachineFunction &Fn) {
return false;
DEBUG({
- dbgs() << "ENABLED: " << Fn.getFunction()->getName();
+ dbgs() << "ENABLED: " << Fn.getName();
if (HasFastExitPath)
dbgs() << " (fast exit path)";
dbgs() << "\n";
@@ -861,7 +861,7 @@ void PEI::placeSpillsAndRestores(MachineFunction &Fn) {
DEBUG(if (ShrinkWrapDebugging >= BasicInfo) {
dbgs() << "-----------------------------------------------------------\n";
dbgs() << "total iterations = " << iterations << " ( "
- << Fn.getFunction()->getName()
+ << Fn.getName()
<< " " << numSRReducedThisFunc
<< " " << Fn.size()
<< " )\n";
@@ -984,7 +984,7 @@ void PEI::verifySpillRestorePlacement() {
if (isReturnBlock(SBB) || SBB->succ_size() == 0) {
if (restored != spilled) {
CSRegSet notRestored = (spilled - restored);
- DEBUG(dbgs() << MF->getFunction()->getName() << ": "
+ DEBUG(dbgs() << MF->getName() << ": "
<< stringifyCSRegSet(notRestored)
<< " spilled at " << getBasicBlockName(MBB)
<< " are never restored on path to return "
@@ -1032,7 +1032,7 @@ void PEI::verifySpillRestorePlacement() {
}
if (spilled != restored) {
CSRegSet notSpilled = (restored - spilled);
- DEBUG(dbgs() << MF->getFunction()->getName() << ": "
+ DEBUG(dbgs() << MF->getName() << ": "
<< stringifyCSRegSet(notSpilled)
<< " restored at " << getBasicBlockName(MBB)
<< " are never spilled\n");
diff --git a/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp b/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
index 980bd74..4b566fc 100644
--- a/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
@@ -30,7 +30,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -191,58 +191,43 @@ setupFunctionContext(Function &F, ArrayRef<LandingPadInst*> LPads) {
// that needs to be restored on all exits from the function. This is an alloca
// because the value needs to be added to the global context list.
unsigned Align =
- TLI->getTargetData()->getPrefTypeAlignment(FunctionContextTy);
+ TLI->getDataLayout()->getPrefTypeAlignment(FunctionContextTy);
FuncCtx =
new AllocaInst(FunctionContextTy, 0, Align, "fn_context", EntryBB->begin());
// Fill in the function context structure.
- Type *Int32Ty = Type::getInt32Ty(F.getContext());
- Value *Zero = ConstantInt::get(Int32Ty, 0);
- Value *One = ConstantInt::get(Int32Ty, 1);
- Value *Two = ConstantInt::get(Int32Ty, 2);
- Value *Three = ConstantInt::get(Int32Ty, 3);
- Value *Four = ConstantInt::get(Int32Ty, 4);
-
- Value *Idxs[2] = { Zero, 0 };
-
for (unsigned I = 0, E = LPads.size(); I != E; ++I) {
LandingPadInst *LPI = LPads[I];
IRBuilder<> Builder(LPI->getParent()->getFirstInsertionPt());
// Reference the __data field.
- Idxs[1] = Two;
- Value *FCData = Builder.CreateGEP(FuncCtx, Idxs, "__data");
+ Value *FCData = Builder.CreateConstGEP2_32(FuncCtx, 0, 2, "__data");
// The exception values come back in context->__data[0].
- Idxs[1] = Zero;
- Value *ExceptionAddr = Builder.CreateGEP(FCData, Idxs, "exception_gep");
+ Value *ExceptionAddr = Builder.CreateConstGEP2_32(FCData, 0, 0,
+ "exception_gep");
Value *ExnVal = Builder.CreateLoad(ExceptionAddr, true, "exn_val");
- ExnVal = Builder.CreateIntToPtr(ExnVal, Type::getInt8PtrTy(F.getContext()));
+ ExnVal = Builder.CreateIntToPtr(ExnVal, Builder.getInt8PtrTy());
- Idxs[1] = One;
- Value *SelectorAddr = Builder.CreateGEP(FCData, Idxs, "exn_selector_gep");
+ Value *SelectorAddr = Builder.CreateConstGEP2_32(FCData, 0, 1,
+ "exn_selector_gep");
Value *SelVal = Builder.CreateLoad(SelectorAddr, true, "exn_selector_val");
substituteLPadValues(LPI, ExnVal, SelVal);
}
// Personality function
- Idxs[1] = Three;
+ IRBuilder<> Builder(EntryBB->getTerminator());
if (!PersonalityFn)
PersonalityFn = LPads[0]->getPersonalityFn();
- Value *PersonalityFieldPtr =
- GetElementPtrInst::Create(FuncCtx, Idxs, "pers_fn_gep",
- EntryBB->getTerminator());
- new StoreInst(PersonalityFn, PersonalityFieldPtr, true,
- EntryBB->getTerminator());
+ Value *PersonalityFieldPtr = Builder.CreateConstGEP2_32(FuncCtx, 0, 3,
+ "pers_fn_gep");
+ Builder.CreateStore(PersonalityFn, PersonalityFieldPtr, /*isVolatile=*/true);
// LSDA address
- Value *LSDA = CallInst::Create(LSDAAddrFn, "lsda_addr",
- EntryBB->getTerminator());
- Idxs[1] = Four;
- Value *LSDAFieldPtr = GetElementPtrInst::Create(FuncCtx, Idxs, "lsda_gep",
- EntryBB->getTerminator());
- new StoreInst(LSDA, LSDAFieldPtr, true, EntryBB->getTerminator());
+ Value *LSDA = Builder.CreateCall(LSDAAddrFn, "lsda_addr");
+ Value *LSDAFieldPtr = Builder.CreateConstGEP2_32(FuncCtx, 0, 4, "lsda_gep");
+ Builder.CreateStore(LSDA, LSDAFieldPtr, /*isVolatile=*/true);
return FuncCtx;
}
@@ -417,48 +402,31 @@ bool SjLjEHPrepare::setupEntryBlockAndCallSites(Function &F) {
Value *FuncCtx =
setupFunctionContext(F, makeArrayRef(LPads.begin(), LPads.end()));
BasicBlock *EntryBB = F.begin();
- Type *Int32Ty = Type::getInt32Ty(F.getContext());
-
- Value *Idxs[2] = {
- ConstantInt::get(Int32Ty, 0), 0
- };
+ IRBuilder<> Builder(EntryBB->getTerminator());
// Get a reference to the jump buffer.
- Idxs[1] = ConstantInt::get(Int32Ty, 5);
- Value *JBufPtr = GetElementPtrInst::Create(FuncCtx, Idxs, "jbuf_gep",
- EntryBB->getTerminator());
+ Value *JBufPtr = Builder.CreateConstGEP2_32(FuncCtx, 0, 5, "jbuf_gep");
// Save the frame pointer.
- Idxs[1] = ConstantInt::get(Int32Ty, 0);
- Value *FramePtr = GetElementPtrInst::Create(JBufPtr, Idxs, "jbuf_fp_gep",
- EntryBB->getTerminator());
+ Value *FramePtr = Builder.CreateConstGEP2_32(JBufPtr, 0, 0, "jbuf_fp_gep");
- Value *Val = CallInst::Create(FrameAddrFn,
- ConstantInt::get(Int32Ty, 0),
- "fp",
- EntryBB->getTerminator());
- new StoreInst(Val, FramePtr, true, EntryBB->getTerminator());
+ Value *Val = Builder.CreateCall(FrameAddrFn, Builder.getInt32(0), "fp");
+ Builder.CreateStore(Val, FramePtr, /*isVolatile=*/true);
// Save the stack pointer.
- Idxs[1] = ConstantInt::get(Int32Ty, 2);
- Value *StackPtr = GetElementPtrInst::Create(JBufPtr, Idxs, "jbuf_sp_gep",
- EntryBB->getTerminator());
+ Value *StackPtr = Builder.CreateConstGEP2_32(JBufPtr, 0, 2, "jbuf_sp_gep");
- Val = CallInst::Create(StackAddrFn, "sp", EntryBB->getTerminator());
- new StoreInst(Val, StackPtr, true, EntryBB->getTerminator());
+ Val = Builder.CreateCall(StackAddrFn, "sp");
+ Builder.CreateStore(Val, StackPtr, /*isVolatile=*/true);
// Call the setjmp instrinsic. It fills in the rest of the jmpbuf.
- Value *SetjmpArg = CastInst::Create(Instruction::BitCast, JBufPtr,
- Type::getInt8PtrTy(F.getContext()), "",
- EntryBB->getTerminator());
- CallInst::Create(BuiltinSetjmpFn, SetjmpArg, "", EntryBB->getTerminator());
+ Value *SetjmpArg = Builder.CreateBitCast(JBufPtr, Builder.getInt8PtrTy());
+ Builder.CreateCall(BuiltinSetjmpFn, SetjmpArg);
// Store a pointer to the function context so that the back-end will know
// where to look for it.
- Value *FuncCtxArg = CastInst::Create(Instruction::BitCast, FuncCtx,
- Type::getInt8PtrTy(F.getContext()), "",
- EntryBB->getTerminator());
- CallInst::Create(FuncCtxFn, FuncCtxArg, "", EntryBB->getTerminator());
+ Value *FuncCtxArg = Builder.CreateBitCast(FuncCtx, Builder.getInt8PtrTy());
+ Builder.CreateCall(FuncCtxFn, FuncCtxArg);
// At this point, we are all set up, update the invoke instructions to mark
// their call_site values.
diff --git a/contrib/llvm/lib/CodeGen/SlotIndexes.cpp b/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
index c8c3fb3..95faafab 100644
--- a/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
+++ b/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
@@ -143,6 +143,7 @@ void SlotIndexes::renumberIndexes(IndexList::iterator curItr) {
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void SlotIndexes::dump() const {
for (IndexList::const_iterator itr = indexList.begin();
itr != indexList.end(); ++itr) {
@@ -159,6 +160,7 @@ void SlotIndexes::dump() const {
dbgs() << "BB#" << i << "\t[" << MBBRanges[i].first << ';'
<< MBBRanges[i].second << ")\n";
}
+#endif
// Print a SlotIndex to a raw_ostream.
void SlotIndex::print(raw_ostream &os) const {
@@ -168,9 +170,11 @@ void SlotIndex::print(raw_ostream &os) const {
os << "invalid";
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
// Dump a SlotIndex to stderr.
void SlotIndex::dump() const {
print(dbgs());
dbgs() << "\n";
}
+#endif
diff --git a/contrib/llvm/lib/CodeGen/SplitKit.cpp b/contrib/llvm/lib/CodeGen/SplitKit.cpp
index 4a2b7ec..dca15ee 100644
--- a/contrib/llvm/lib/CodeGen/SplitKit.cpp
+++ b/contrib/llvm/lib/CodeGen/SplitKit.cpp
@@ -356,6 +356,7 @@ void SplitEditor::reset(LiveRangeEdit &LRE, ComplementSpillMode SM) {
Edit->anyRematerializable(0);
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void SplitEditor::dump() const {
if (RegAssign.empty()) {
dbgs() << " empty\n";
@@ -366,6 +367,7 @@ void SplitEditor::dump() const {
dbgs() << " [" << I.start() << ';' << I.stop() << "):" << I.value();
dbgs() << '\n';
}
+#endif
VNInfo *SplitEditor::defValue(unsigned RegIdx,
const VNInfo *ParentVNI,
diff --git a/contrib/llvm/lib/CodeGen/StackColoring.cpp b/contrib/llvm/lib/CodeGen/StackColoring.cpp
new file mode 100644
index 0000000..1cbee84
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/StackColoring.cpp
@@ -0,0 +1,783 @@
+//===-- StackColoring.cpp -------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass implements the stack-coloring optimization that looks for
+// lifetime markers machine instructions (LIFESTART_BEGIN and LIFESTART_END),
+// which represent the possible lifetime of stack slots. It attempts to
+// merge disjoint stack slots and reduce the used stack space.
+// NOTE: This pass is not StackSlotColoring, which optimizes spill slots.
+//
+// TODO: In the future we plan to improve stack coloring in the following ways:
+// 1. Allow merging multiple small slots into a single larger slot at different
+// offsets.
+// 2. Merge this pass with StackSlotColoring and allow merging of allocas with
+// spill slots.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "stackcoloring"
+#include "MachineTraceMetrics.h"
+#include "llvm/Function.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SparseSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/DebugInfo.h"
+#include "llvm/Instructions.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+static cl::opt<bool>
+DisableColoring("no-stack-coloring",
+ cl::init(false), cl::Hidden,
+ cl::desc("Disable stack coloring"));
+
+/// The user may write code that uses allocas outside of the declared lifetime
+/// zone. This can happen when the user returns a reference to a local
+/// data-structure. We can detect these cases and decide not to optimize the
+/// code. If this flag is enabled, we try to save the user.
+static cl::opt<bool>
+ProtectFromEscapedAllocas("protect-from-escaped-allocas",
+ cl::init(false), cl::Hidden,
+ cl::desc("Do not optimize lifetime zones that are broken"));
+
+STATISTIC(NumMarkerSeen, "Number of lifetime markers found.");
+STATISTIC(StackSpaceSaved, "Number of bytes saved due to merging slots.");
+STATISTIC(StackSlotMerged, "Number of stack slot merged.");
+STATISTIC(EscapedAllocas,
+ "Number of allocas that escaped the lifetime region");
+
+//===----------------------------------------------------------------------===//
+// StackColoring Pass
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// StackColoring - A machine pass for merging disjoint stack allocations,
+/// marked by the LIFETIME_START and LIFETIME_END pseudo instructions.
+class StackColoring : public MachineFunctionPass {
+ MachineFrameInfo *MFI;
+ MachineFunction *MF;
+
+ /// A class representing liveness information for a single basic block.
+ /// Each bit in the BitVector represents the liveness property
+ /// for a different stack slot.
+ struct BlockLifetimeInfo {
+ /// Which slots BEGINs in each basic block.
+ BitVector Begin;
+ /// Which slots ENDs in each basic block.
+ BitVector End;
+ /// Which slots are marked as LIVE_IN, coming into each basic block.
+ BitVector LiveIn;
+ /// Which slots are marked as LIVE_OUT, coming out of each basic block.
+ BitVector LiveOut;
+ };
+
+ /// Maps active slots (per bit) for each basic block.
+ DenseMap<MachineBasicBlock*, BlockLifetimeInfo> BlockLiveness;
+
+ /// Maps serial numbers to basic blocks.
+ DenseMap<MachineBasicBlock*, int> BasicBlocks;
+ /// Maps basic blocks to a serial number.
+ SmallVector<MachineBasicBlock*, 8> BasicBlockNumbering;
+
+ /// Maps liveness intervals for each slot.
+ SmallVector<LiveInterval*, 16> Intervals;
+ /// VNInfo is used for the construction of LiveIntervals.
+ VNInfo::Allocator VNInfoAllocator;
+ /// SlotIndex analysis object.
+ SlotIndexes *Indexes;
+
+ /// The list of lifetime markers found. These markers are to be removed
+ /// once the coloring is done.
+ SmallVector<MachineInstr*, 8> Markers;
+
+ /// SlotSizeSorter - A Sort utility for arranging stack slots according
+ /// to their size.
+ struct SlotSizeSorter {
+ MachineFrameInfo *MFI;
+ SlotSizeSorter(MachineFrameInfo *mfi) : MFI(mfi) { }
+ bool operator()(int LHS, int RHS) {
+ // We use -1 to denote a uninteresting slot. Place these slots at the end.
+ if (LHS == -1) return false;
+ if (RHS == -1) return true;
+ // Sort according to size.
+ return MFI->getObjectSize(LHS) > MFI->getObjectSize(RHS);
+ }
+};
+
+public:
+ static char ID;
+ StackColoring() : MachineFunctionPass(ID) {
+ initializeStackColoringPass(*PassRegistry::getPassRegistry());
+ }
+ void getAnalysisUsage(AnalysisUsage &AU) const;
+ bool runOnMachineFunction(MachineFunction &MF);
+
+private:
+ /// Debug.
+ void dump();
+
+ /// Removes all of the lifetime marker instructions from the function.
+ /// \returns true if any markers were removed.
+ bool removeAllMarkers();
+
+ /// Scan the machine function and find all of the lifetime markers.
+ /// Record the findings in the BEGIN and END vectors.
+ /// \returns the number of markers found.
+ unsigned collectMarkers(unsigned NumSlot);
+
+ /// Perform the dataflow calculation and calculate the lifetime for each of
+ /// the slots, based on the BEGIN/END vectors. Set the LifetimeLIVE_IN and
+ /// LifetimeLIVE_OUT maps that represent which stack slots are live coming
+ /// in and out blocks.
+ void calculateLocalLiveness();
+
+ /// Construct the LiveIntervals for the slots.
+ void calculateLiveIntervals(unsigned NumSlots);
+
+ /// Go over the machine function and change instructions which use stack
+ /// slots to use the joint slots.
+ void remapInstructions(DenseMap<int, int> &SlotRemap);
+
+ /// The input program may contain intructions which are not inside lifetime
+ /// markers. This can happen due to a bug in the compiler or due to a bug in
+ /// user code (for example, returning a reference to a local variable).
+ /// This procedure checks all of the instructions in the function and
+ /// invalidates lifetime ranges which do not contain all of the instructions
+ /// which access that frame slot.
+ void removeInvalidSlotRanges();
+
+ /// Map entries which point to other entries to their destination.
+ /// A->B->C becomes A->C.
+ void expungeSlotMap(DenseMap<int, int> &SlotRemap, unsigned NumSlots);
+};
+} // end anonymous namespace
+
+char StackColoring::ID = 0;
+char &llvm::StackColoringID = StackColoring::ID;
+
+INITIALIZE_PASS_BEGIN(StackColoring,
+ "stack-coloring", "Merge disjoint stack slots", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_END(StackColoring,
+ "stack-coloring", "Merge disjoint stack slots", false, false)
+
+void StackColoring::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<SlotIndexes>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+void StackColoring::dump() {
+ for (df_iterator<MachineFunction*> FI = df_begin(MF), FE = df_end(MF);
+ FI != FE; ++FI) {
+ unsigned Num = BasicBlocks[*FI];
+ DEBUG(dbgs()<<"Inspecting block #"<<Num<<" ["<<FI->getName()<<"]\n");
+ Num = 0;
+ DEBUG(dbgs()<<"BEGIN : {");
+ for (unsigned i=0; i < BlockLiveness[*FI].Begin.size(); ++i)
+ DEBUG(dbgs()<<BlockLiveness[*FI].Begin.test(i)<<" ");
+ DEBUG(dbgs()<<"}\n");
+
+ DEBUG(dbgs()<<"END : {");
+ for (unsigned i=0; i < BlockLiveness[*FI].End.size(); ++i)
+ DEBUG(dbgs()<<BlockLiveness[*FI].End.test(i)<<" ");
+
+ DEBUG(dbgs()<<"}\n");
+
+ DEBUG(dbgs()<<"LIVE_IN: {");
+ for (unsigned i=0; i < BlockLiveness[*FI].LiveIn.size(); ++i)
+ DEBUG(dbgs()<<BlockLiveness[*FI].LiveIn.test(i)<<" ");
+
+ DEBUG(dbgs()<<"}\n");
+ DEBUG(dbgs()<<"LIVEOUT: {");
+ for (unsigned i=0; i < BlockLiveness[*FI].LiveOut.size(); ++i)
+ DEBUG(dbgs()<<BlockLiveness[*FI].LiveOut.test(i)<<" ");
+ DEBUG(dbgs()<<"}\n");
+ }
+}
+
+unsigned StackColoring::collectMarkers(unsigned NumSlot) {
+ unsigned MarkersFound = 0;
+ // Scan the function to find all lifetime markers.
+ // NOTE: We use the a reverse-post-order iteration to ensure that we obtain a
+ // deterministic numbering, and because we'll need a post-order iteration
+ // later for solving the liveness dataflow problem.
+ for (df_iterator<MachineFunction*> FI = df_begin(MF), FE = df_end(MF);
+ FI != FE; ++FI) {
+
+ // Assign a serial number to this basic block.
+ BasicBlocks[*FI] = BasicBlockNumbering.size();
+ BasicBlockNumbering.push_back(*FI);
+
+ BlockLiveness[*FI].Begin.resize(NumSlot);
+ BlockLiveness[*FI].End.resize(NumSlot);
+
+ for (MachineBasicBlock::iterator BI = (*FI)->begin(), BE = (*FI)->end();
+ BI != BE; ++BI) {
+
+ if (BI->getOpcode() != TargetOpcode::LIFETIME_START &&
+ BI->getOpcode() != TargetOpcode::LIFETIME_END)
+ continue;
+
+ Markers.push_back(BI);
+
+ bool IsStart = BI->getOpcode() == TargetOpcode::LIFETIME_START;
+ MachineOperand &MI = BI->getOperand(0);
+ unsigned Slot = MI.getIndex();
+
+ MarkersFound++;
+
+ const AllocaInst *Allocation = MFI->getObjectAllocation(Slot);
+ if (Allocation) {
+ DEBUG(dbgs()<<"Found a lifetime marker for slot #"<<Slot<<
+ " with allocation: "<< Allocation->getName()<<"\n");
+ }
+
+ if (IsStart) {
+ BlockLiveness[*FI].Begin.set(Slot);
+ } else {
+ if (BlockLiveness[*FI].Begin.test(Slot)) {
+ // Allocas that start and end within a single block are handled
+ // specially when computing the LiveIntervals to avoid pessimizing
+ // the liveness propagation.
+ BlockLiveness[*FI].Begin.reset(Slot);
+ } else {
+ BlockLiveness[*FI].End.set(Slot);
+ }
+ }
+ }
+ }
+
+ // Update statistics.
+ NumMarkerSeen += MarkersFound;
+ return MarkersFound;
+}
+
+void StackColoring::calculateLocalLiveness() {
+ // Perform a standard reverse dataflow computation to solve for
+ // global liveness. The BEGIN set here is equivalent to KILL in the standard
+ // formulation, and END is equivalent to GEN. The result of this computation
+ // is a map from blocks to bitvectors where the bitvectors represent which
+ // allocas are live in/out of that block.
+ SmallPtrSet<MachineBasicBlock*, 8> BBSet(BasicBlockNumbering.begin(),
+ BasicBlockNumbering.end());
+ unsigned NumSSMIters = 0;
+ bool changed = true;
+ while (changed) {
+ changed = false;
+ ++NumSSMIters;
+
+ SmallPtrSet<MachineBasicBlock*, 8> NextBBSet;
+
+ for (SmallVector<MachineBasicBlock*, 8>::iterator
+ PI = BasicBlockNumbering.begin(), PE = BasicBlockNumbering.end();
+ PI != PE; ++PI) {
+
+ MachineBasicBlock *BB = *PI;
+ if (!BBSet.count(BB)) continue;
+
+ BitVector LocalLiveIn;
+ BitVector LocalLiveOut;
+
+ // Forward propagation from begins to ends.
+ for (MachineBasicBlock::pred_iterator PI = BB->pred_begin(),
+ PE = BB->pred_end(); PI != PE; ++PI)
+ LocalLiveIn |= BlockLiveness[*PI].LiveOut;
+ LocalLiveIn |= BlockLiveness[BB].End;
+ LocalLiveIn.reset(BlockLiveness[BB].Begin);
+
+ // Reverse propagation from ends to begins.
+ for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
+ SE = BB->succ_end(); SI != SE; ++SI)
+ LocalLiveOut |= BlockLiveness[*SI].LiveIn;
+ LocalLiveOut |= BlockLiveness[BB].Begin;
+ LocalLiveOut.reset(BlockLiveness[BB].End);
+
+ LocalLiveIn |= LocalLiveOut;
+ LocalLiveOut |= LocalLiveIn;
+
+ // After adopting the live bits, we need to turn-off the bits which
+ // are de-activated in this block.
+ LocalLiveOut.reset(BlockLiveness[BB].End);
+ LocalLiveIn.reset(BlockLiveness[BB].Begin);
+
+ // If we have both BEGIN and END markers in the same basic block then
+ // we know that the BEGIN marker comes after the END, because we already
+ // handle the case where the BEGIN comes before the END when collecting
+ // the markers (and building the BEGIN/END vectore).
+ // Want to enable the LIVE_IN and LIVE_OUT of slots that have both
+ // BEGIN and END because it means that the value lives before and after
+ // this basic block.
+ BitVector LocalEndBegin = BlockLiveness[BB].End;
+ LocalEndBegin &= BlockLiveness[BB].Begin;
+ LocalLiveIn |= LocalEndBegin;
+ LocalLiveOut |= LocalEndBegin;
+
+ if (LocalLiveIn.test(BlockLiveness[BB].LiveIn)) {
+ changed = true;
+ BlockLiveness[BB].LiveIn |= LocalLiveIn;
+
+ for (MachineBasicBlock::pred_iterator PI = BB->pred_begin(),
+ PE = BB->pred_end(); PI != PE; ++PI)
+ NextBBSet.insert(*PI);
+ }
+
+ if (LocalLiveOut.test(BlockLiveness[BB].LiveOut)) {
+ changed = true;
+ BlockLiveness[BB].LiveOut |= LocalLiveOut;
+
+ for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
+ SE = BB->succ_end(); SI != SE; ++SI)
+ NextBBSet.insert(*SI);
+ }
+ }
+
+ BBSet = NextBBSet;
+ }// while changed.
+}
+
+void StackColoring::calculateLiveIntervals(unsigned NumSlots) {
+ SmallVector<SlotIndex, 16> Starts;
+ SmallVector<SlotIndex, 16> Finishes;
+
+ // For each block, find which slots are active within this block
+ // and update the live intervals.
+ for (MachineFunction::iterator MBB = MF->begin(), MBBe = MF->end();
+ MBB != MBBe; ++MBB) {
+ Starts.clear();
+ Starts.resize(NumSlots);
+ Finishes.clear();
+ Finishes.resize(NumSlots);
+
+ // Create the interval for the basic blocks with lifetime markers in them.
+ for (SmallVector<MachineInstr*, 8>::iterator it = Markers.begin(),
+ e = Markers.end(); it != e; ++it) {
+ MachineInstr *MI = *it;
+ if (MI->getParent() != MBB)
+ continue;
+
+ assert((MI->getOpcode() == TargetOpcode::LIFETIME_START ||
+ MI->getOpcode() == TargetOpcode::LIFETIME_END) &&
+ "Invalid Lifetime marker");
+
+ bool IsStart = MI->getOpcode() == TargetOpcode::LIFETIME_START;
+ MachineOperand &Mo = MI->getOperand(0);
+ int Slot = Mo.getIndex();
+ assert(Slot >= 0 && "Invalid slot");
+
+ SlotIndex ThisIndex = Indexes->getInstructionIndex(MI);
+
+ if (IsStart) {
+ if (!Starts[Slot].isValid() || Starts[Slot] > ThisIndex)
+ Starts[Slot] = ThisIndex;
+ } else {
+ if (!Finishes[Slot].isValid() || Finishes[Slot] < ThisIndex)
+ Finishes[Slot] = ThisIndex;
+ }
+ }
+
+ // Create the interval of the blocks that we previously found to be 'alive'.
+ BitVector Alive = BlockLiveness[MBB].LiveIn;
+ Alive |= BlockLiveness[MBB].LiveOut;
+
+ if (Alive.any()) {
+ for (int pos = Alive.find_first(); pos != -1;
+ pos = Alive.find_next(pos)) {
+ if (!Starts[pos].isValid())
+ Starts[pos] = Indexes->getMBBStartIdx(MBB);
+ if (!Finishes[pos].isValid())
+ Finishes[pos] = Indexes->getMBBEndIdx(MBB);
+ }
+ }
+
+ for (unsigned i = 0; i < NumSlots; ++i) {
+ assert(Starts[i].isValid() == Finishes[i].isValid() && "Unmatched range");
+ if (!Starts[i].isValid())
+ continue;
+
+ assert(Starts[i] && Finishes[i] && "Invalid interval");
+ VNInfo *ValNum = Intervals[i]->getValNumInfo(0);
+ SlotIndex S = Starts[i];
+ SlotIndex F = Finishes[i];
+ if (S < F) {
+ // We have a single consecutive region.
+ Intervals[i]->addRange(LiveRange(S, F, ValNum));
+ } else {
+ // We have two non consecutive regions. This happens when
+ // LIFETIME_START appears after the LIFETIME_END marker.
+ SlotIndex NewStart = Indexes->getMBBStartIdx(MBB);
+ SlotIndex NewFin = Indexes->getMBBEndIdx(MBB);
+ Intervals[i]->addRange(LiveRange(NewStart, F, ValNum));
+ Intervals[i]->addRange(LiveRange(S, NewFin, ValNum));
+ }
+ }
+ }
+}
+
+bool StackColoring::removeAllMarkers() {
+ unsigned Count = 0;
+ for (unsigned i = 0; i < Markers.size(); ++i) {
+ Markers[i]->eraseFromParent();
+ Count++;
+ }
+ Markers.clear();
+
+ DEBUG(dbgs()<<"Removed "<<Count<<" markers.\n");
+ return Count;
+}
+
+void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) {
+ unsigned FixedInstr = 0;
+ unsigned FixedMemOp = 0;
+ unsigned FixedDbg = 0;
+ MachineModuleInfo *MMI = &MF->getMMI();
+
+ // Remap debug information that refers to stack slots.
+ MachineModuleInfo::VariableDbgInfoMapTy &VMap = MMI->getVariableDbgInfo();
+ for (MachineModuleInfo::VariableDbgInfoMapTy::iterator VI = VMap.begin(),
+ VE = VMap.end(); VI != VE; ++VI) {
+ const MDNode *Var = VI->first;
+ if (!Var) continue;
+ std::pair<unsigned, DebugLoc> &VP = VI->second;
+ if (SlotRemap.count(VP.first)) {
+ DEBUG(dbgs()<<"Remapping debug info for ["<<Var->getName()<<"].\n");
+ VP.first = SlotRemap[VP.first];
+ FixedDbg++;
+ }
+ }
+
+ // Keep a list of *allocas* which need to be remapped.
+ DenseMap<const AllocaInst*, const AllocaInst*> Allocas;
+ for (DenseMap<int, int>::iterator it = SlotRemap.begin(),
+ e = SlotRemap.end(); it != e; ++it) {
+ const AllocaInst *From = MFI->getObjectAllocation(it->first);
+ const AllocaInst *To = MFI->getObjectAllocation(it->second);
+ assert(To && From && "Invalid allocation object");
+ Allocas[From] = To;
+ }
+
+ // Remap all instructions to the new stack slots.
+ MachineFunction::iterator BB, BBE;
+ MachineBasicBlock::iterator I, IE;
+ for (BB = MF->begin(), BBE = MF->end(); BB != BBE; ++BB)
+ for (I = BB->begin(), IE = BB->end(); I != IE; ++I) {
+
+ // Skip lifetime markers. We'll remove them soon.
+ if (I->getOpcode() == TargetOpcode::LIFETIME_START ||
+ I->getOpcode() == TargetOpcode::LIFETIME_END)
+ continue;
+
+ // Update the MachineMemOperand to use the new alloca.
+ for (MachineInstr::mmo_iterator MM = I->memoperands_begin(),
+ E = I->memoperands_end(); MM != E; ++MM) {
+ MachineMemOperand *MMO = *MM;
+
+ const Value *V = MMO->getValue();
+
+ if (!V)
+ continue;
+
+ // Climb up and find the original alloca.
+ V = GetUnderlyingObject(V);
+ // If we did not find one, or if the one that we found is not in our
+ // map, then move on.
+ if (!V || !isa<AllocaInst>(V)) {
+ // Clear mem operand since we don't know for sure that it doesn't
+ // alias a merged alloca.
+ MMO->setValue(0);
+ continue;
+ }
+ const AllocaInst *AI= cast<AllocaInst>(V);
+ if (!Allocas.count(AI))
+ continue;
+
+ MMO->setValue(Allocas[AI]);
+ FixedMemOp++;
+ }
+
+ // Update all of the machine instruction operands.
+ for (unsigned i = 0 ; i < I->getNumOperands(); ++i) {
+ MachineOperand &MO = I->getOperand(i);
+
+ if (!MO.isFI())
+ continue;
+ int FromSlot = MO.getIndex();
+
+ // Don't touch arguments.
+ if (FromSlot<0)
+ continue;
+
+ // Only look at mapped slots.
+ if (!SlotRemap.count(FromSlot))
+ continue;
+
+ // In a debug build, check that the instruction that we are modifying is
+ // inside the expected live range. If the instruction is not inside
+ // the calculated range then it means that the alloca usage moved
+ // outside of the lifetime markers, or that the user has a bug.
+ // NOTE: Alloca address calculations which happen outside the lifetime
+ // zone are are okay, despite the fact that we don't have a good way
+ // for validating all of the usages of the calculation.
+#ifndef NDEBUG
+ bool TouchesMemory = I->mayLoad() || I->mayStore();
+ // If we *don't* protect the user from escaped allocas, don't bother
+ // validating the instructions.
+ if (!I->isDebugValue() && TouchesMemory && ProtectFromEscapedAllocas) {
+ SlotIndex Index = Indexes->getInstructionIndex(I);
+ LiveInterval *Interval = Intervals[FromSlot];
+ assert(Interval->find(Index) != Interval->end() &&
+ "Found instruction usage outside of live range.");
+ }
+#endif
+
+ // Fix the machine instructions.
+ int ToSlot = SlotRemap[FromSlot];
+ MO.setIndex(ToSlot);
+ FixedInstr++;
+ }
+ }
+
+ DEBUG(dbgs()<<"Fixed "<<FixedMemOp<<" machine memory operands.\n");
+ DEBUG(dbgs()<<"Fixed "<<FixedDbg<<" debug locations.\n");
+ DEBUG(dbgs()<<"Fixed "<<FixedInstr<<" machine instructions.\n");
+}
+
+void StackColoring::removeInvalidSlotRanges() {
+ MachineFunction::iterator BB, BBE;
+ MachineBasicBlock::iterator I, IE;
+ for (BB = MF->begin(), BBE = MF->end(); BB != BBE; ++BB)
+ for (I = BB->begin(), IE = BB->end(); I != IE; ++I) {
+
+ if (I->getOpcode() == TargetOpcode::LIFETIME_START ||
+ I->getOpcode() == TargetOpcode::LIFETIME_END || I->isDebugValue())
+ continue;
+
+ // Some intervals are suspicious! In some cases we find address
+ // calculations outside of the lifetime zone, but not actual memory
+ // read or write. Memory accesses outside of the lifetime zone are a clear
+ // violation, but address calculations are okay. This can happen when
+ // GEPs are hoisted outside of the lifetime zone.
+ // So, in here we only check instructions which can read or write memory.
+ if (!I->mayLoad() && !I->mayStore())
+ continue;
+
+ // Check all of the machine operands.
+ for (unsigned i = 0 ; i < I->getNumOperands(); ++i) {
+ MachineOperand &MO = I->getOperand(i);
+
+ if (!MO.isFI())
+ continue;
+
+ int Slot = MO.getIndex();
+
+ if (Slot<0)
+ continue;
+
+ if (Intervals[Slot]->empty())
+ continue;
+
+ // Check that the used slot is inside the calculated lifetime range.
+ // If it is not, warn about it and invalidate the range.
+ LiveInterval *Interval = Intervals[Slot];
+ SlotIndex Index = Indexes->getInstructionIndex(I);
+ if (Interval->find(Index) == Interval->end()) {
+ Intervals[Slot]->clear();
+ DEBUG(dbgs()<<"Invalidating range #"<<Slot<<"\n");
+ EscapedAllocas++;
+ }
+ }
+ }
+}
+
+void StackColoring::expungeSlotMap(DenseMap<int, int> &SlotRemap,
+ unsigned NumSlots) {
+ // Expunge slot remap map.
+ for (unsigned i=0; i < NumSlots; ++i) {
+ // If we are remapping i
+ if (SlotRemap.count(i)) {
+ int Target = SlotRemap[i];
+ // As long as our target is mapped to something else, follow it.
+ while (SlotRemap.count(Target)) {
+ Target = SlotRemap[Target];
+ SlotRemap[i] = Target;
+ }
+ }
+ }
+}
+
+bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
+ DEBUG(dbgs() << "********** Stack Coloring **********\n"
+ << "********** Function: "
+ << ((const Value*)Func.getFunction())->getName() << '\n');
+ MF = &Func;
+ MFI = MF->getFrameInfo();
+ Indexes = &getAnalysis<SlotIndexes>();
+ BlockLiveness.clear();
+ BasicBlocks.clear();
+ BasicBlockNumbering.clear();
+ Markers.clear();
+ Intervals.clear();
+ VNInfoAllocator.Reset();
+
+ unsigned NumSlots = MFI->getObjectIndexEnd();
+
+ // If there are no stack slots then there are no markers to remove.
+ if (!NumSlots)
+ return false;
+
+ SmallVector<int, 8> SortedSlots;
+
+ SortedSlots.reserve(NumSlots);
+ Intervals.reserve(NumSlots);
+
+ unsigned NumMarkers = collectMarkers(NumSlots);
+
+ unsigned TotalSize = 0;
+ DEBUG(dbgs()<<"Found "<<NumMarkers<<" markers and "<<NumSlots<<" slots\n");
+ DEBUG(dbgs()<<"Slot structure:\n");
+
+ for (int i=0; i < MFI->getObjectIndexEnd(); ++i) {
+ DEBUG(dbgs()<<"Slot #"<<i<<" - "<<MFI->getObjectSize(i)<<" bytes.\n");
+ TotalSize += MFI->getObjectSize(i);
+ }
+
+ DEBUG(dbgs()<<"Total Stack size: "<<TotalSize<<" bytes\n\n");
+
+ // Don't continue because there are not enough lifetime markers, or the
+ // stack is too small, or we are told not to optimize the slots.
+ if (NumMarkers < 2 || TotalSize < 16 || DisableColoring) {
+ DEBUG(dbgs()<<"Will not try to merge slots.\n");
+ return removeAllMarkers();
+ }
+
+ for (unsigned i=0; i < NumSlots; ++i) {
+ LiveInterval *LI = new LiveInterval(i, 0);
+ Intervals.push_back(LI);
+ LI->getNextValue(Indexes->getZeroIndex(), VNInfoAllocator);
+ SortedSlots.push_back(i);
+ }
+
+ // Calculate the liveness of each block.
+ calculateLocalLiveness();
+
+ // Propagate the liveness information.
+ calculateLiveIntervals(NumSlots);
+
+ // Search for allocas which are used outside of the declared lifetime
+ // markers.
+ if (ProtectFromEscapedAllocas)
+ removeInvalidSlotRanges();
+
+ // Maps old slots to new slots.
+ DenseMap<int, int> SlotRemap;
+ unsigned RemovedSlots = 0;
+ unsigned ReducedSize = 0;
+
+ // Do not bother looking at empty intervals.
+ for (unsigned I = 0; I < NumSlots; ++I) {
+ if (Intervals[SortedSlots[I]]->empty())
+ SortedSlots[I] = -1;
+ }
+
+ // This is a simple greedy algorithm for merging allocas. First, sort the
+ // slots, placing the largest slots first. Next, perform an n^2 scan and look
+ // for disjoint slots. When you find disjoint slots, merge the samller one
+ // into the bigger one and update the live interval. Remove the small alloca
+ // and continue.
+
+ // Sort the slots according to their size. Place unused slots at the end.
+ std::sort(SortedSlots.begin(), SortedSlots.end(), SlotSizeSorter(MFI));
+
+ bool Chanded = true;
+ while (Chanded) {
+ Chanded = false;
+ for (unsigned I = 0; I < NumSlots; ++I) {
+ if (SortedSlots[I] == -1)
+ continue;
+
+ for (unsigned J=I+1; J < NumSlots; ++J) {
+ if (SortedSlots[J] == -1)
+ continue;
+
+ int FirstSlot = SortedSlots[I];
+ int SecondSlot = SortedSlots[J];
+ LiveInterval *First = Intervals[FirstSlot];
+ LiveInterval *Second = Intervals[SecondSlot];
+ assert (!First->empty() && !Second->empty() && "Found an empty range");
+
+ // Merge disjoint slots.
+ if (!First->overlaps(*Second)) {
+ Chanded = true;
+ First->MergeRangesInAsValue(*Second, First->getValNumInfo(0));
+ SlotRemap[SecondSlot] = FirstSlot;
+ SortedSlots[J] = -1;
+ DEBUG(dbgs()<<"Merging #"<<FirstSlot<<" and slots #"<<
+ SecondSlot<<" together.\n");
+ unsigned MaxAlignment = std::max(MFI->getObjectAlignment(FirstSlot),
+ MFI->getObjectAlignment(SecondSlot));
+
+ assert(MFI->getObjectSize(FirstSlot) >=
+ MFI->getObjectSize(SecondSlot) &&
+ "Merging a small object into a larger one");
+
+ RemovedSlots+=1;
+ ReducedSize += MFI->getObjectSize(SecondSlot);
+ MFI->setObjectAlignment(FirstSlot, MaxAlignment);
+ MFI->RemoveStackObject(SecondSlot);
+ }
+ }
+ }
+ }// While changed.
+
+ // Record statistics.
+ StackSpaceSaved += ReducedSize;
+ StackSlotMerged += RemovedSlots;
+ DEBUG(dbgs()<<"Merge "<<RemovedSlots<<" slots. Saved "<<
+ ReducedSize<<" bytes\n");
+
+ // Scan the entire function and update all machine operands that use frame
+ // indices to use the remapped frame index.
+ expungeSlotMap(SlotRemap, NumSlots);
+ remapInstructions(SlotRemap);
+
+ // Release the intervals.
+ for (unsigned I = 0; I < NumSlots; ++I) {
+ delete Intervals[I];
+ }
+
+ return removeAllMarkers();
+}
diff --git a/contrib/llvm/lib/CodeGen/StackProtector.cpp b/contrib/llvm/lib/CodeGen/StackProtector.cpp
index f1eab1f..31e9ec0 100644
--- a/contrib/llvm/lib/CodeGen/StackProtector.cpp
+++ b/contrib/llvm/lib/CodeGen/StackProtector.cpp
@@ -26,18 +26,12 @@
#include "llvm/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/Triple.h"
using namespace llvm;
-// SSPBufferSize - The lower bound for a buffer to be considered for stack
-// smashing protection.
-static cl::opt<unsigned>
-SSPBufferSize("stack-protector-buffer-size", cl::init(8),
- cl::desc("Lower bound for a buffer to be considered for "
- "stack protection"));
-
namespace {
class StackProtector : public FunctionPass {
/// TLI - Keep a pointer of a TargetLowering to consult for determining
@@ -61,6 +55,11 @@ namespace {
/// check fails.
BasicBlock *CreateFailBB();
+ /// ContainsProtectableArray - Check whether the type either is an array or
+ /// contains an array of sufficient size so that we need stack protectors
+ /// for it.
+ bool ContainsProtectableArray(Type *Ty, bool InStruct = false) const;
+
/// RequiresStackProtector - Check whether or not this function needs a
/// stack protector based upon the stack protector level.
bool RequiresStackProtector() const;
@@ -100,21 +99,50 @@ bool StackProtector::runOnFunction(Function &Fn) {
return InsertStackProtectors();
}
+/// ContainsProtectableArray - Check whether the type either is an array or
+/// contains a char array of sufficient size so that we need stack protectors
+/// for it.
+bool StackProtector::ContainsProtectableArray(Type *Ty, bool InStruct) const {
+ if (!Ty) return false;
+ if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
+ const TargetMachine &TM = TLI->getTargetMachine();
+ if (!AT->getElementType()->isIntegerTy(8)) {
+ Triple Trip(TM.getTargetTriple());
+
+ // If we're on a non-Darwin platform or we're inside of a structure, don't
+ // add stack protectors unless the array is a character array.
+ if (InStruct || !Trip.isOSDarwin())
+ return false;
+ }
+
+ // If an array has more than SSPBufferSize bytes of allocated space, then we
+ // emit stack protectors.
+ if (TM.Options.SSPBufferSize <= TLI->getDataLayout()->getTypeAllocSize(AT))
+ return true;
+ }
+
+ const StructType *ST = dyn_cast<StructType>(Ty);
+ if (!ST) return false;
+
+ for (StructType::element_iterator I = ST->element_begin(),
+ E = ST->element_end(); I != E; ++I)
+ if (ContainsProtectableArray(*I, true))
+ return true;
+
+ return false;
+}
+
/// RequiresStackProtector - Check whether or not this function needs a stack
/// protector based upon the stack protector level. The heuristic we use is to
/// add a guard variable to functions that call alloca, and functions with
/// buffers larger than SSPBufferSize bytes.
bool StackProtector::RequiresStackProtector() const {
- if (F->hasFnAttr(Attribute::StackProtectReq))
+ if (F->getFnAttributes().hasAttribute(Attributes::StackProtectReq))
return true;
- if (!F->hasFnAttr(Attribute::StackProtect))
+ if (!F->getFnAttributes().hasAttribute(Attributes::StackProtect))
return false;
- const TargetData *TD = TLI->getTargetData();
- const TargetMachine &TM = TLI->getTargetMachine();
- Triple Trip(TM.getTargetTriple());
-
for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
BasicBlock *BB = I;
@@ -126,17 +154,8 @@ bool StackProtector::RequiresStackProtector() const {
// protectors.
return true;
- if (ArrayType *AT = dyn_cast<ArrayType>(AI->getAllocatedType())) {
- // If we're on a non-Darwin platform, don't add stack protectors
- // unless the array is a character array.
- if (!Trip.isOSDarwin() && !AT->getElementType()->isIntegerTy(8))
- continue;
-
- // If an array has more than SSPBufferSize bytes of allocated space,
- // then we emit stack protectors.
- if (SSPBufferSize <= TD->getTypeAllocSize(AT))
- return true;
- }
+ if (ContainsProtectableArray(AI->getAllocatedType()))
+ return true;
}
}
diff --git a/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp b/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
index 20da36e..d349abc 100644
--- a/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
+++ b/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
@@ -11,8 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "stackcoloring"
-#include "llvm/Function.h"
+#define DEBUG_TYPE "stackslotcoloring"
#include "llvm/Module.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
@@ -391,8 +390,7 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
bool StackSlotColoring::runOnMachineFunction(MachineFunction &MF) {
DEBUG({
dbgs() << "********** Stack Slot Coloring **********\n"
- << "********** Function: "
- << MF.getFunction()->getName() << '\n';
+ << "********** Function: " << MF.getName() << '\n';
});
MFI = MF.getFrameInfo();
diff --git a/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp b/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp
index 5b06195..39fd600 100644
--- a/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp
+++ b/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp
@@ -404,9 +404,9 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &MF) {
}
void StrongPHIElimination::addReg(unsigned Reg) {
- if (RegNodeMap.count(Reg))
- return;
- RegNodeMap[Reg] = new (Allocator) Node(Reg);
+ Node *&N = RegNodeMap[Reg];
+ if (!N)
+ N = new (Allocator) Node(Reg);
}
StrongPHIElimination::Node*
@@ -714,8 +714,9 @@ void StrongPHIElimination::InsertCopiesForPHI(MachineInstr *PHI,
assert(getRegColor(CopyReg) == CopyReg);
}
- if (!InsertedSrcCopyMap.count(std::make_pair(PredBB, PHIColor)))
- InsertedSrcCopyMap[std::make_pair(PredBB, PHIColor)] = CopyInstr;
+ // Insert into map if not already there.
+ InsertedSrcCopyMap.insert(std::make_pair(std::make_pair(PredBB, PHIColor),
+ CopyInstr));
}
SrcMO.setReg(CopyReg);
diff --git a/contrib/llvm/lib/CodeGen/TailDuplication.cpp b/contrib/llvm/lib/CodeGen/TailDuplication.cpp
index a813fa6..1497d1b 100644
--- a/contrib/llvm/lib/CodeGen/TailDuplication.cpp
+++ b/contrib/llvm/lib/CodeGen/TailDuplication.cpp
@@ -552,7 +552,8 @@ TailDuplicatePass::shouldTailDuplicate(const MachineFunction &MF,
// compensate for the duplication.
unsigned MaxDuplicateCount;
if (TailDuplicateSize.getNumOccurrences() == 0 &&
- MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
+ MF.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize))
MaxDuplicateCount = 1;
else
MaxDuplicateCount = TailDuplicateSize;
diff --git a/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp b/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
index ddee6b2..4439192 100644
--- a/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
@@ -99,17 +99,8 @@ MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
if (NewMI) {
// Create a new instruction.
- bool Reg0IsDead = HasDef ? MI->getOperand(0).isDead() : false;
MachineFunction &MF = *MI->getParent()->getParent();
- if (HasDef)
- return BuildMI(MF, MI->getDebugLoc(), MI->getDesc())
- .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead), SubReg0)
- .addReg(Reg2, getKillRegState(Reg2IsKill), SubReg2)
- .addReg(Reg1, getKillRegState(Reg1IsKill), SubReg1);
- else
- return BuildMI(MF, MI->getDebugLoc(), MI->getDesc())
- .addReg(Reg2, getKillRegState(Reg2IsKill), SubReg2)
- .addReg(Reg1, getKillRegState(Reg1IsKill), SubReg1);
+ MI = MF.CloneMachineInstr(MI);
}
if (HasDef) {
@@ -572,6 +563,8 @@ TargetInstrInfoImpl::getNumMicroOps(const InstrItineraryData *ItinData,
/// Return the default expected latency for a def based on it's opcode.
unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel *SchedModel,
const MachineInstr *DefMI) const {
+ if (DefMI->isTransient())
+ return 0;
if (DefMI->mayLoad())
return SchedModel->LoadLatency;
if (isHighLatencyDef(DefMI->getOpcode()))
@@ -615,13 +608,13 @@ getOperandLatency(const InstrItineraryData *ItinData,
/// If we can determine the operand latency from the def only, without itinerary
/// lookup, do so. Otherwise return -1.
-static int computeDefOperandLatency(
- const TargetInstrInfo *TII, const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, bool FindMin) {
+int TargetInstrInfo::computeDefOperandLatency(
+ const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, bool FindMin) const {
// Let the target hook getInstrLatency handle missing itineraries.
if (!ItinData)
- return TII->getInstrLatency(ItinData, DefMI);
+ return getInstrLatency(ItinData, DefMI);
// Return a latency based on the itinerary properties and defining instruction
// if possible. Some common subtargets don't require per-operand latency,
@@ -630,7 +623,7 @@ static int computeDefOperandLatency(
// If MinLatency is valid, call getInstrLatency. This uses Stage latency if
// it exists before defaulting to MinLatency.
if (ItinData->SchedModel->MinLatency >= 0)
- return TII->getInstrLatency(ItinData, DefMI);
+ return getInstrLatency(ItinData, DefMI);
// If MinLatency is invalid, OperandLatency is interpreted as MinLatency.
// For empty itineraries, short-cirtuit the check and default to one cycle.
@@ -638,29 +631,42 @@ static int computeDefOperandLatency(
return 1;
}
else if(ItinData->isEmpty())
- return TII->defaultDefLatency(ItinData->SchedModel, DefMI);
+ return defaultDefLatency(ItinData->SchedModel, DefMI);
// ...operand lookup required
return -1;
}
/// computeOperandLatency - Compute and return the latency of the given data
-/// dependent def and use when the operand indices are already known.
+/// dependent def and use when the operand indices are already known. UseMI may
+/// be NULL for an unknown use.
+///
+/// FindMin may be set to get the minimum vs. expected latency. Minimum
+/// latency is used for scheduling groups, while expected latency is for
+/// instruction cost and critical path.
///
-/// FindMin may be set to get the minimum vs. expected latency.
+/// Depending on the subtarget's itinerary properties, this may or may not need
+/// to call getOperandLatency(). For most subtargets, we don't need DefIdx or
+/// UseIdx to compute min latency.
unsigned TargetInstrInfo::
computeOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI, unsigned UseIdx,
bool FindMin) const {
- int DefLatency = computeDefOperandLatency(this, ItinData, DefMI, FindMin);
+ int DefLatency = computeDefOperandLatency(ItinData, DefMI, FindMin);
if (DefLatency >= 0)
return DefLatency;
assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
- int OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
+ int OperLatency = 0;
+ if (UseMI)
+ OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
+ else {
+ unsigned DefClass = DefMI->getDesc().getSchedClass();
+ OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
+ }
if (OperLatency >= 0)
return OperLatency;
@@ -673,77 +679,3 @@ computeOperandLatency(const InstrItineraryData *ItinData,
defaultDefLatency(ItinData->SchedModel, DefMI));
return InstrLatency;
}
-
-/// computeOperandLatency - Compute and return the latency of the given data
-/// dependent def and use. DefMI must be a valid def. UseMI may be NULL for an
-/// unknown use. Depending on the subtarget's itinerary properties, this may or
-/// may not need to call getOperandLatency().
-///
-/// FindMin may be set to get the minimum vs. expected latency. Minimum
-/// latency is used for scheduling groups, while expected latency is for
-/// instruction cost and critical path.
-///
-/// For most subtargets, we don't need DefIdx or UseIdx to compute min latency.
-/// DefMI must be a valid definition, but UseMI may be NULL for an unknown use.
-unsigned TargetInstrInfo::
-computeOperandLatency(const InstrItineraryData *ItinData,
- const TargetRegisterInfo *TRI,
- const MachineInstr *DefMI, const MachineInstr *UseMI,
- unsigned Reg, bool FindMin) const {
-
- int DefLatency = computeDefOperandLatency(this, ItinData, DefMI, FindMin);
- if (DefLatency >= 0)
- return DefLatency;
-
- assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
-
- // Find the definition of the register in the defining instruction.
- int DefIdx = DefMI->findRegisterDefOperandIdx(Reg);
- if (DefIdx != -1) {
- const MachineOperand &MO = DefMI->getOperand(DefIdx);
- if (MO.isReg() && MO.isImplicit() &&
- DefIdx >= (int)DefMI->getDesc().getNumOperands()) {
- // This is an implicit def, getOperandLatency() won't return the correct
- // latency. e.g.
- // %D6<def>, %D7<def> = VLD1q16 %R2<kill>, 0, ..., %Q3<imp-def>
- // %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ...
- // What we want is to compute latency between def of %D6/%D7 and use of
- // %Q3 instead.
- unsigned Op2 = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI);
- if (DefMI->getOperand(Op2).isReg())
- DefIdx = Op2;
- }
- // For all uses of the register, calculate the maxmimum latency
- int OperLatency = -1;
-
- // UseMI is null, then it must be a scheduling barrier.
- if (!UseMI) {
- unsigned DefClass = DefMI->getDesc().getSchedClass();
- OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
- }
- else {
- for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = UseMI->getOperand(i);
- if (!MO.isReg() || !MO.isUse())
- continue;
- unsigned MOReg = MO.getReg();
- if (MOReg != Reg)
- continue;
-
- int UseCycle = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, i);
- OperLatency = std::max(OperLatency, UseCycle);
- }
- }
- // If we found an operand latency, we're done.
- if (OperLatency >= 0)
- return OperLatency;
- }
- // No operand latency was found.
- unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
-
- // Expected latency is the max of the stage latency and itinerary props.
- if (!FindMin)
- InstrLatency = std::max(InstrLatency,
- defaultDefLatency(ItinData->SchedModel, DefMI));
- return InstrLatency;
-}
diff --git a/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index 2a2fa9e..8f5d770 100644
--- a/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -27,7 +27,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/Dwarf.h"
@@ -77,9 +77,9 @@ void TargetLoweringObjectFileELF::emitPersonalityValue(MCStreamer &Streamer,
Flags,
SectionKind::getDataRel(),
0, Label->getName());
- unsigned Size = TM.getTargetData()->getPointerSize();
+ unsigned Size = TM.getDataLayout()->getPointerSize();
Streamer.SwitchSection(Sec);
- Streamer.EmitValueToAlignment(TM.getTargetData()->getPointerABIAlignment());
+ Streamer.EmitValueToAlignment(TM.getDataLayout()->getPointerABIAlignment());
Streamer.EmitSymbolAttribute(Label, MCSA_ELF_TypeObject);
const MCExpr *E = MCConstantExpr::Create(Size, getContext());
Streamer.EmitELFSize(Label, E);
@@ -247,7 +247,7 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
// FIXME: this is getting the alignment of the character, not the
// alignment of the global!
unsigned Align =
- TM.getTargetData()->getPreferredAlignment(cast<GlobalVariable>(GV));
+ TM.getDataLayout()->getPreferredAlignment(cast<GlobalVariable>(GV));
const char *SizeSpec = ".rodata.str1.";
if (Kind.isMergeable2ByteCString())
@@ -522,14 +522,14 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
// FIXME: Alignment check should be handled by section classifier.
if (Kind.isMergeable1ByteCString() &&
- TM.getTargetData()->getPreferredAlignment(cast<GlobalVariable>(GV)) < 32)
+ TM.getDataLayout()->getPreferredAlignment(cast<GlobalVariable>(GV)) < 32)
return CStringSection;
// Do not put 16-bit arrays in the UString section if they have an
// externally visible label, this runs into issues with certain linker
// versions.
if (Kind.isMergeable2ByteCString() && !GV->hasExternalLinkage() &&
- TM.getTargetData()->getPreferredAlignment(cast<GlobalVariable>(GV)) < 32)
+ TM.getDataLayout()->getPreferredAlignment(cast<GlobalVariable>(GV)) < 32)
return UStringSection;
if (Kind.isMergeableConst()) {
diff --git a/contrib/llvm/lib/CodeGen/TargetSchedule.cpp b/contrib/llvm/lib/CodeGen/TargetSchedule.cpp
new file mode 100644
index 0000000..ca3b0e0
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/TargetSchedule.cpp
@@ -0,0 +1,306 @@
+//===-- llvm/Target/TargetSchedule.cpp - Sched Machine Model ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a wrapper around MCSchedModel that allows the interface
+// to benefit from information currently only available in TargetInstrInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/TargetSchedule.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+static cl::opt<bool> EnableSchedModel("schedmodel", cl::Hidden, cl::init(true),
+ cl::desc("Use TargetSchedModel for latency lookup"));
+
+static cl::opt<bool> EnableSchedItins("scheditins", cl::Hidden, cl::init(true),
+ cl::desc("Use InstrItineraryData for latency lookup"));
+
+bool TargetSchedModel::hasInstrSchedModel() const {
+ return EnableSchedModel && SchedModel.hasInstrSchedModel();
+}
+
+bool TargetSchedModel::hasInstrItineraries() const {
+ return EnableSchedItins && !InstrItins.isEmpty();
+}
+
+static unsigned gcd(unsigned Dividend, unsigned Divisor) {
+ // Dividend and Divisor will be naturally swapped as needed.
+ while(Divisor) {
+ unsigned Rem = Dividend % Divisor;
+ Dividend = Divisor;
+ Divisor = Rem;
+ };
+ return Dividend;
+}
+static unsigned lcm(unsigned A, unsigned B) {
+ unsigned LCM = (uint64_t(A) * B) / gcd(A, B);
+ assert((LCM >= A && LCM >= B) && "LCM overflow");
+ return LCM;
+}
+
+void TargetSchedModel::init(const MCSchedModel &sm,
+ const TargetSubtargetInfo *sti,
+ const TargetInstrInfo *tii) {
+ SchedModel = sm;
+ STI = sti;
+ TII = tii;
+ STI->initInstrItins(InstrItins);
+
+ unsigned NumRes = SchedModel.getNumProcResourceKinds();
+ ResourceFactors.resize(NumRes);
+ ResourceLCM = SchedModel.IssueWidth;
+ for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
+ unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
+ if (NumUnits > 0)
+ ResourceLCM = lcm(ResourceLCM, NumUnits);
+ }
+ MicroOpFactor = ResourceLCM / SchedModel.IssueWidth;
+ for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
+ unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
+ ResourceFactors[Idx] = NumUnits ? (ResourceLCM / NumUnits) : 0;
+ }
+}
+
+unsigned TargetSchedModel::getNumMicroOps(const MachineInstr *MI,
+ const MCSchedClassDesc *SC) const {
+ if (hasInstrItineraries()) {
+ int UOps = InstrItins.getNumMicroOps(MI->getDesc().getSchedClass());
+ return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, MI);
+ }
+ if (hasInstrSchedModel()) {
+ if (!SC)
+ SC = resolveSchedClass(MI);
+ if (SC->isValid())
+ return SC->NumMicroOps;
+ }
+ return MI->isTransient() ? 0 : 1;
+}
+
+// The machine model may explicitly specify an invalid latency, which
+// effectively means infinite latency. Since users of the TargetSchedule API
+// don't know how to handle this, we convert it to a very large latency that is
+// easy to distinguish when debugging the DAG but won't induce overflow.
+static unsigned convertLatency(int Cycles) {
+ return Cycles >= 0 ? Cycles : 1000;
+}
+
+/// If we can determine the operand latency from the def only, without machine
+/// model or itinerary lookup, do so. Otherwise return -1.
+int TargetSchedModel::getDefLatency(const MachineInstr *DefMI,
+ bool FindMin) const {
+
+ // Return a latency based on the itinerary properties and defining instruction
+ // if possible. Some common subtargets don't require per-operand latency,
+ // especially for minimum latencies.
+ if (FindMin) {
+ // If MinLatency is invalid, then use the itinerary for MinLatency. If no
+ // itinerary exists either, then use single cycle latency.
+ if (SchedModel.MinLatency < 0 && !hasInstrItineraries()) {
+ return 1;
+ }
+ return SchedModel.MinLatency;
+ }
+ else if (!hasInstrSchedModel() && !hasInstrItineraries()) {
+ return TII->defaultDefLatency(&SchedModel, DefMI);
+ }
+ // ...operand lookup required
+ return -1;
+}
+
+/// Return the MCSchedClassDesc for this instruction. Some SchedClasses require
+/// evaluation of predicates that depend on instruction operands or flags.
+const MCSchedClassDesc *TargetSchedModel::
+resolveSchedClass(const MachineInstr *MI) const {
+
+ // Get the definition's scheduling class descriptor from this machine model.
+ unsigned SchedClass = MI->getDesc().getSchedClass();
+ const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass);
+
+#ifndef NDEBUG
+ unsigned NIter = 0;
+#endif
+ while (SCDesc->isVariant()) {
+ assert(++NIter < 6 && "Variants are nested deeper than the magic number");
+
+ SchedClass = STI->resolveSchedClass(SchedClass, MI, this);
+ SCDesc = SchedModel.getSchedClassDesc(SchedClass);
+ }
+ return SCDesc;
+}
+
+/// Find the def index of this operand. This index maps to the machine model and
+/// is independent of use operands. Def operands may be reordered with uses or
+/// merged with uses without affecting the def index (e.g. before/after
+/// regalloc). However, an instruction's def operands must never be reordered
+/// with respect to each other.
+static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx) {
+ unsigned DefIdx = 0;
+ for (unsigned i = 0; i != DefOperIdx; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isDef())
+ ++DefIdx;
+ }
+ return DefIdx;
+}
+
+/// Find the use index of this operand. This is independent of the instruction's
+/// def operands.
+///
+/// Note that uses are not determined by the operand's isUse property, which
+/// is simply the inverse of isDef. Here we consider any readsReg operand to be
+/// a "use". The machine model allows an operand to be both a Def and Use.
+static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx) {
+ unsigned UseIdx = 0;
+ for (unsigned i = 0; i != UseOperIdx; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.readsReg())
+ ++UseIdx;
+ }
+ return UseIdx;
+}
+
+// Top-level API for clients that know the operand indices.
+unsigned TargetSchedModel::computeOperandLatency(
+ const MachineInstr *DefMI, unsigned DefOperIdx,
+ const MachineInstr *UseMI, unsigned UseOperIdx,
+ bool FindMin) const {
+
+ int DefLatency = getDefLatency(DefMI, FindMin);
+ if (DefLatency >= 0)
+ return DefLatency;
+
+ if (hasInstrItineraries()) {
+ int OperLatency = 0;
+ if (UseMI) {
+ OperLatency =
+ TII->getOperandLatency(&InstrItins, DefMI, DefOperIdx, UseMI, UseOperIdx);
+ }
+ else {
+ unsigned DefClass = DefMI->getDesc().getSchedClass();
+ OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx);
+ }
+ if (OperLatency >= 0)
+ return OperLatency;
+
+ // No operand latency was found.
+ unsigned InstrLatency = TII->getInstrLatency(&InstrItins, DefMI);
+
+ // Expected latency is the max of the stage latency and itinerary props.
+ // Rather than directly querying InstrItins stage latency, we call a TII
+ // hook to allow subtargets to specialize latency. This hook is only
+ // applicable to the InstrItins model. InstrSchedModel should model all
+ // special cases without TII hooks.
+ if (!FindMin)
+ InstrLatency = std::max(InstrLatency,
+ TII->defaultDefLatency(&SchedModel, DefMI));
+ return InstrLatency;
+ }
+ assert(!FindMin && hasInstrSchedModel() &&
+ "Expected a SchedModel for this cpu");
+ const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
+ unsigned DefIdx = findDefIdx(DefMI, DefOperIdx);
+ if (DefIdx < SCDesc->NumWriteLatencyEntries) {
+ // Lookup the definition's write latency in SubtargetInfo.
+ const MCWriteLatencyEntry *WLEntry =
+ STI->getWriteLatencyEntry(SCDesc, DefIdx);
+ unsigned WriteID = WLEntry->WriteResourceID;
+ unsigned Latency = convertLatency(WLEntry->Cycles);
+ if (!UseMI)
+ return Latency;
+
+ // Lookup the use's latency adjustment in SubtargetInfo.
+ const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI);
+ if (UseDesc->NumReadAdvanceEntries == 0)
+ return Latency;
+ unsigned UseIdx = findUseIdx(UseMI, UseOperIdx);
+ return Latency - STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
+ }
+ // If DefIdx does not exist in the model (e.g. implicit defs), then return
+ // unit latency (defaultDefLatency may be too conservative).
+#ifndef NDEBUG
+ if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit()
+ && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef()) {
+ std::string Err;
+ raw_string_ostream ss(Err);
+ ss << "DefIdx " << DefIdx << " exceeds machine model writes for "
+ << *DefMI;
+ report_fatal_error(ss.str());
+ }
+#endif
+ return DefMI->isTransient() ? 0 : 1;
+}
+
+unsigned TargetSchedModel::computeInstrLatency(const MachineInstr *MI) const {
+ // For the itinerary model, fall back to the old subtarget hook.
+ // Allow subtargets to compute Bundle latencies outside the machine model.
+ if (hasInstrItineraries() || MI->isBundle())
+ return TII->getInstrLatency(&InstrItins, MI);
+
+ if (hasInstrSchedModel()) {
+ const MCSchedClassDesc *SCDesc = resolveSchedClass(MI);
+ if (SCDesc->isValid()) {
+ unsigned Latency = 0;
+ for (unsigned DefIdx = 0, DefEnd = SCDesc->NumWriteLatencyEntries;
+ DefIdx != DefEnd; ++DefIdx) {
+ // Lookup the definition's write latency in SubtargetInfo.
+ const MCWriteLatencyEntry *WLEntry =
+ STI->getWriteLatencyEntry(SCDesc, DefIdx);
+ Latency = std::max(Latency, convertLatency(WLEntry->Cycles));
+ }
+ return Latency;
+ }
+ }
+ return TII->defaultDefLatency(&SchedModel, MI);
+}
+
+unsigned TargetSchedModel::
+computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
+ const MachineInstr *DepMI) const {
+ // MinLatency == -1 is for in-order processors that always have unit
+ // MinLatency. MinLatency > 0 is for in-order processors with varying min
+ // latencies, but since this is not a RAW dep, we always use unit latency.
+ if (SchedModel.MinLatency != 0)
+ return 1;
+
+ // MinLatency == 0 indicates an out-of-order processor that can dispatch
+ // WAW dependencies in the same cycle.
+
+ // Treat predication as a data dependency for out-of-order cpus. In-order
+ // cpus do not need to treat predicated writes specially.
+ //
+ // TODO: The following hack exists because predication passes do not
+ // correctly append imp-use operands, and readsReg() strangely returns false
+ // for predicated defs.
+ unsigned Reg = DefMI->getOperand(DefOperIdx).getReg();
+ const MachineFunction &MF = *DefMI->getParent()->getParent();
+ const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+ if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(DepMI))
+ return computeInstrLatency(DefMI);
+
+ // If we have a per operand scheduling model, check if this def is writing
+ // an unbuffered resource. If so, it treated like an in-order cpu.
+ if (hasInstrSchedModel()) {
+ const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
+ if (SCDesc->isValid()) {
+ for (const MCWriteProcResEntry *PRI = STI->getWriteProcResBegin(SCDesc),
+ *PRE = STI->getWriteProcResEnd(SCDesc); PRI != PRE; ++PRI) {
+ if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->IsBuffered)
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
diff --git a/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
index aa601af..a9058bc 100644
--- a/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -60,116 +60,108 @@ STATISTIC(NumReSchedUps, "Number of instructions re-scheduled up");
STATISTIC(NumReSchedDowns, "Number of instructions re-scheduled down");
namespace {
- class TwoAddressInstructionPass : public MachineFunctionPass {
- MachineFunction *MF;
- const TargetInstrInfo *TII;
- const TargetRegisterInfo *TRI;
- const InstrItineraryData *InstrItins;
- MachineRegisterInfo *MRI;
- LiveVariables *LV;
- SlotIndexes *Indexes;
- LiveIntervals *LIS;
- AliasAnalysis *AA;
- CodeGenOpt::Level OptLevel;
-
- // DistanceMap - Keep track the distance of a MI from the start of the
- // current basic block.
- DenseMap<MachineInstr*, unsigned> DistanceMap;
-
- // SrcRegMap - A map from virtual registers to physical registers which
- // are likely targets to be coalesced to due to copies from physical
- // registers to virtual registers. e.g. v1024 = move r0.
- DenseMap<unsigned, unsigned> SrcRegMap;
-
- // DstRegMap - A map from virtual registers to physical registers which
- // are likely targets to be coalesced to due to copies to physical
- // registers from virtual registers. e.g. r1 = move v1024.
- DenseMap<unsigned, unsigned> DstRegMap;
-
- /// RegSequences - Keep track the list of REG_SEQUENCE instructions seen
- /// during the initial walk of the machine function.
- SmallVector<MachineInstr*, 16> RegSequences;
-
- bool Sink3AddrInstruction(MachineBasicBlock *MBB, MachineInstr *MI,
- unsigned Reg,
- MachineBasicBlock::iterator OldPos);
-
- bool NoUseAfterLastDef(unsigned Reg, MachineBasicBlock *MBB, unsigned Dist,
- unsigned &LastDef);
-
- bool isProfitableToCommute(unsigned regA, unsigned regB, unsigned regC,
- MachineInstr *MI, MachineBasicBlock *MBB,
- unsigned Dist);
+class TwoAddressInstructionPass : public MachineFunctionPass {
+ MachineFunction *MF;
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ const InstrItineraryData *InstrItins;
+ MachineRegisterInfo *MRI;
+ LiveVariables *LV;
+ SlotIndexes *Indexes;
+ LiveIntervals *LIS;
+ AliasAnalysis *AA;
+ CodeGenOpt::Level OptLevel;
+
+ // The current basic block being processed.
+ MachineBasicBlock *MBB;
+
+ // DistanceMap - Keep track the distance of a MI from the start of the
+ // current basic block.
+ DenseMap<MachineInstr*, unsigned> DistanceMap;
+
+ // Set of already processed instructions in the current block.
+ SmallPtrSet<MachineInstr*, 8> Processed;
- bool CommuteInstruction(MachineBasicBlock::iterator &mi,
- MachineFunction::iterator &mbbi,
- unsigned RegB, unsigned RegC, unsigned Dist);
+ // SrcRegMap - A map from virtual registers to physical registers which are
+ // likely targets to be coalesced to due to copies from physical registers to
+ // virtual registers. e.g. v1024 = move r0.
+ DenseMap<unsigned, unsigned> SrcRegMap;
- bool isProfitableToConv3Addr(unsigned RegA, unsigned RegB);
+ // DstRegMap - A map from virtual registers to physical registers which are
+ // likely targets to be coalesced to due to copies to physical registers from
+ // virtual registers. e.g. r1 = move v1024.
+ DenseMap<unsigned, unsigned> DstRegMap;
- bool ConvertInstTo3Addr(MachineBasicBlock::iterator &mi,
- MachineBasicBlock::iterator &nmi,
- MachineFunction::iterator &mbbi,
- unsigned RegA, unsigned RegB, unsigned Dist);
+ /// RegSequences - Keep track the list of REG_SEQUENCE instructions seen
+ /// during the initial walk of the machine function.
+ SmallVector<MachineInstr*, 16> RegSequences;
- bool isDefTooClose(unsigned Reg, unsigned Dist,
- MachineInstr *MI, MachineBasicBlock *MBB);
+ bool sink3AddrInstruction(MachineInstr *MI, unsigned Reg,
+ MachineBasicBlock::iterator OldPos);
- bool RescheduleMIBelowKill(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator &mi,
- MachineBasicBlock::iterator &nmi,
- unsigned Reg);
- bool RescheduleKillAboveMI(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator &mi,
- MachineBasicBlock::iterator &nmi,
- unsigned Reg);
+ bool noUseAfterLastDef(unsigned Reg, unsigned Dist, unsigned &LastDef);
- bool TryInstructionTransform(MachineBasicBlock::iterator &mi,
- MachineBasicBlock::iterator &nmi,
- MachineFunction::iterator &mbbi,
- unsigned SrcIdx, unsigned DstIdx,
- unsigned Dist,
- SmallPtrSet<MachineInstr*, 8> &Processed);
+ bool isProfitableToCommute(unsigned regA, unsigned regB, unsigned regC,
+ MachineInstr *MI, unsigned Dist);
- void ScanUses(unsigned DstReg, MachineBasicBlock *MBB,
- SmallPtrSet<MachineInstr*, 8> &Processed);
+ bool commuteInstruction(MachineBasicBlock::iterator &mi,
+ unsigned RegB, unsigned RegC, unsigned Dist);
- void ProcessCopy(MachineInstr *MI, MachineBasicBlock *MBB,
- SmallPtrSet<MachineInstr*, 8> &Processed);
+ bool isProfitableToConv3Addr(unsigned RegA, unsigned RegB);
- typedef SmallVector<std::pair<unsigned, unsigned>, 4> TiedPairList;
- typedef SmallDenseMap<unsigned, TiedPairList> TiedOperandMap;
- bool collectTiedOperands(MachineInstr *MI, TiedOperandMap&);
- void processTiedPairs(MachineInstr *MI, TiedPairList&, unsigned &Dist);
+ bool convertInstTo3Addr(MachineBasicBlock::iterator &mi,
+ MachineBasicBlock::iterator &nmi,
+ unsigned RegA, unsigned RegB, unsigned Dist);
- void CoalesceExtSubRegs(SmallVector<unsigned,4> &Srcs, unsigned DstReg);
+ bool isDefTooClose(unsigned Reg, unsigned Dist, MachineInstr *MI);
- /// EliminateRegSequences - Eliminate REG_SEQUENCE instructions as part
- /// of the de-ssa process. This replaces sources of REG_SEQUENCE as
- /// sub-register references of the register defined by REG_SEQUENCE.
- bool EliminateRegSequences();
+ bool rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
+ MachineBasicBlock::iterator &nmi,
+ unsigned Reg);
+ bool rescheduleKillAboveMI(MachineBasicBlock::iterator &mi,
+ MachineBasicBlock::iterator &nmi,
+ unsigned Reg);
- public:
- static char ID; // Pass identification, replacement for typeid
- TwoAddressInstructionPass() : MachineFunctionPass(ID) {
- initializeTwoAddressInstructionPassPass(*PassRegistry::getPassRegistry());
- }
+ bool tryInstructionTransform(MachineBasicBlock::iterator &mi,
+ MachineBasicBlock::iterator &nmi,
+ unsigned SrcIdx, unsigned DstIdx,
+ unsigned Dist);
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- AU.addRequired<AliasAnalysis>();
- AU.addPreserved<LiveVariables>();
- AU.addPreserved<SlotIndexes>();
- AU.addPreserved<LiveIntervals>();
- AU.addPreservedID(MachineLoopInfoID);
- AU.addPreservedID(MachineDominatorsID);
- MachineFunctionPass::getAnalysisUsage(AU);
- }
+ void scanUses(unsigned DstReg);
- /// runOnMachineFunction - Pass entry point.
- bool runOnMachineFunction(MachineFunction&);
- };
-}
+ void processCopy(MachineInstr *MI);
+
+ typedef SmallVector<std::pair<unsigned, unsigned>, 4> TiedPairList;
+ typedef SmallDenseMap<unsigned, TiedPairList> TiedOperandMap;
+ bool collectTiedOperands(MachineInstr *MI, TiedOperandMap&);
+ void processTiedPairs(MachineInstr *MI, TiedPairList&, unsigned &Dist);
+
+ /// eliminateRegSequences - Eliminate REG_SEQUENCE instructions as part of
+ /// the de-ssa process. This replaces sources of REG_SEQUENCE as sub-register
+ /// references of the register defined by REG_SEQUENCE.
+ bool eliminateRegSequences();
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ TwoAddressInstructionPass() : MachineFunctionPass(ID) {
+ initializeTwoAddressInstructionPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<AliasAnalysis>();
+ AU.addPreserved<LiveVariables>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addPreserved<LiveIntervals>();
+ AU.addPreservedID(MachineLoopInfoID);
+ AU.addPreservedID(MachineDominatorsID);
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ /// runOnMachineFunction - Pass entry point.
+ bool runOnMachineFunction(MachineFunction&);
+};
+} // end anonymous namespace
char TwoAddressInstructionPass::ID = 0;
INITIALIZE_PASS_BEGIN(TwoAddressInstructionPass, "twoaddressinstruction",
@@ -180,13 +172,13 @@ INITIALIZE_PASS_END(TwoAddressInstructionPass, "twoaddressinstruction",
char &llvm::TwoAddressInstructionPassID = TwoAddressInstructionPass::ID;
-/// Sink3AddrInstruction - A two-address instruction has been converted to a
+/// sink3AddrInstruction - A two-address instruction has been converted to a
/// three-address instruction to avoid clobbering a register. Try to sink it
/// past the instruction that would kill the above mentioned register to reduce
/// register pressure.
-bool TwoAddressInstructionPass::Sink3AddrInstruction(MachineBasicBlock *MBB,
- MachineInstr *MI, unsigned SavedReg,
- MachineBasicBlock::iterator OldPos) {
+bool TwoAddressInstructionPass::
+sink3AddrInstruction(MachineInstr *MI, unsigned SavedReg,
+ MachineBasicBlock::iterator OldPos) {
// FIXME: Shouldn't we be trying to do this before we three-addressify the
// instruction? After this transformation is done, we no longer need
// the instruction to be in three-address form.
@@ -299,13 +291,12 @@ bool TwoAddressInstructionPass::Sink3AddrInstruction(MachineBasicBlock *MBB,
return true;
}
-/// NoUseAfterLastDef - Return true if there are no intervening uses between the
+/// noUseAfterLastDef - Return true if there are no intervening uses between the
/// last instruction in the MBB that defines the specified register and the
/// two-address instruction which is being processed. It also returns the last
/// def location by reference
-bool TwoAddressInstructionPass::NoUseAfterLastDef(unsigned Reg,
- MachineBasicBlock *MBB, unsigned Dist,
- unsigned &LastDef) {
+bool TwoAddressInstructionPass::noUseAfterLastDef(unsigned Reg, unsigned Dist,
+ unsigned &LastDef) {
LastDef = 0;
unsigned LastUse = Dist;
for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(Reg),
@@ -465,10 +456,9 @@ regsAreCompatible(unsigned RegA, unsigned RegB, const TargetRegisterInfo *TRI) {
/// isProfitableToCommute - Return true if it's potentially profitable to commute
/// the two-address instruction that's being processed.
bool
-TwoAddressInstructionPass::isProfitableToCommute(unsigned regA, unsigned regB,
- unsigned regC,
- MachineInstr *MI, MachineBasicBlock *MBB,
- unsigned Dist) {
+TwoAddressInstructionPass::
+isProfitableToCommute(unsigned regA, unsigned regB, unsigned regC,
+ MachineInstr *MI, unsigned Dist) {
if (OptLevel == CodeGenOpt::None)
return false;
@@ -516,13 +506,13 @@ TwoAddressInstructionPass::isProfitableToCommute(unsigned regA, unsigned regB,
// If there is a use of regC between its last def (could be livein) and this
// instruction, then bail.
unsigned LastDefC = 0;
- if (!NoUseAfterLastDef(regC, MBB, Dist, LastDefC))
+ if (!noUseAfterLastDef(regC, Dist, LastDefC))
return false;
// If there is a use of regB between its last def (could be livein) and this
// instruction, then go ahead and make this transformation.
unsigned LastDefB = 0;
- if (!NoUseAfterLastDef(regB, MBB, Dist, LastDefB))
+ if (!noUseAfterLastDef(regB, Dist, LastDefB))
return true;
// Since there are no intervening uses for both registers, then commute
@@ -530,13 +520,12 @@ TwoAddressInstructionPass::isProfitableToCommute(unsigned regA, unsigned regB,
return LastDefB && LastDefC && LastDefC > LastDefB;
}
-/// CommuteInstruction - Commute a two-address instruction and update the basic
+/// commuteInstruction - Commute a two-address instruction and update the basic
/// block, distance map, and live variables if needed. Return true if it is
/// successful.
-bool
-TwoAddressInstructionPass::CommuteInstruction(MachineBasicBlock::iterator &mi,
- MachineFunction::iterator &mbbi,
- unsigned RegB, unsigned RegC, unsigned Dist) {
+bool TwoAddressInstructionPass::
+commuteInstruction(MachineBasicBlock::iterator &mi,
+ unsigned RegB, unsigned RegC, unsigned Dist) {
MachineInstr *MI = mi;
DEBUG(dbgs() << "2addr: COMMUTING : " << *MI);
MachineInstr *NewMI = TII->commuteInstruction(MI);
@@ -555,8 +544,8 @@ TwoAddressInstructionPass::CommuteInstruction(MachineBasicBlock::iterator &mi,
if (Indexes)
Indexes->replaceMachineInstrInMaps(MI, NewMI);
- mbbi->insert(mi, NewMI); // Insert the new inst
- mbbi->erase(mi); // Nuke the old inst.
+ MBB->insert(mi, NewMI); // Insert the new inst
+ MBB->erase(mi); // Nuke the old inst.
mi = NewMI;
DistanceMap.insert(std::make_pair(NewMI, Dist));
}
@@ -588,51 +577,51 @@ TwoAddressInstructionPass::isProfitableToConv3Addr(unsigned RegA,unsigned RegB){
return (ToRegA && !regsAreCompatible(FromRegB, ToRegA, TRI));
}
-/// ConvertInstTo3Addr - Convert the specified two-address instruction into a
+/// convertInstTo3Addr - Convert the specified two-address instruction into a
/// three address one. Return true if this transformation was successful.
bool
-TwoAddressInstructionPass::ConvertInstTo3Addr(MachineBasicBlock::iterator &mi,
+TwoAddressInstructionPass::convertInstTo3Addr(MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
- MachineFunction::iterator &mbbi,
unsigned RegA, unsigned RegB,
unsigned Dist) {
- MachineInstr *NewMI = TII->convertToThreeAddress(mbbi, mi, LV);
- if (NewMI) {
- DEBUG(dbgs() << "2addr: CONVERTING 2-ADDR: " << *mi);
- DEBUG(dbgs() << "2addr: TO 3-ADDR: " << *NewMI);
- bool Sunk = false;
+ // FIXME: Why does convertToThreeAddress() need an iterator reference?
+ MachineFunction::iterator MFI = MBB;
+ MachineInstr *NewMI = TII->convertToThreeAddress(MFI, mi, LV);
+ assert(MBB == MFI && "convertToThreeAddress changed iterator reference");
+ if (!NewMI)
+ return false;
- if (Indexes)
- Indexes->replaceMachineInstrInMaps(mi, NewMI);
+ DEBUG(dbgs() << "2addr: CONVERTING 2-ADDR: " << *mi);
+ DEBUG(dbgs() << "2addr: TO 3-ADDR: " << *NewMI);
+ bool Sunk = false;
- if (NewMI->findRegisterUseOperand(RegB, false, TRI))
- // FIXME: Temporary workaround. If the new instruction doesn't
- // uses RegB, convertToThreeAddress must have created more
- // then one instruction.
- Sunk = Sink3AddrInstruction(mbbi, NewMI, RegB, mi);
+ if (Indexes)
+ Indexes->replaceMachineInstrInMaps(mi, NewMI);
- mbbi->erase(mi); // Nuke the old inst.
+ if (NewMI->findRegisterUseOperand(RegB, false, TRI))
+ // FIXME: Temporary workaround. If the new instruction doesn't
+ // uses RegB, convertToThreeAddress must have created more
+ // then one instruction.
+ Sunk = sink3AddrInstruction(NewMI, RegB, mi);
- if (!Sunk) {
- DistanceMap.insert(std::make_pair(NewMI, Dist));
- mi = NewMI;
- nmi = llvm::next(mi);
- }
+ MBB->erase(mi); // Nuke the old inst.
- // Update source and destination register maps.
- SrcRegMap.erase(RegA);
- DstRegMap.erase(RegB);
- return true;
+ if (!Sunk) {
+ DistanceMap.insert(std::make_pair(NewMI, Dist));
+ mi = NewMI;
+ nmi = llvm::next(mi);
}
- return false;
+ // Update source and destination register maps.
+ SrcRegMap.erase(RegA);
+ DstRegMap.erase(RegB);
+ return true;
}
-/// ScanUses - Scan forward recursively for only uses, update maps if the use
+/// scanUses - Scan forward recursively for only uses, update maps if the use
/// is a copy or a two-address instruction.
void
-TwoAddressInstructionPass::ScanUses(unsigned DstReg, MachineBasicBlock *MBB,
- SmallPtrSet<MachineInstr*, 8> &Processed) {
+TwoAddressInstructionPass::scanUses(unsigned DstReg) {
SmallVector<unsigned, 4> VirtRegPairs;
bool IsDstPhys;
bool IsCopy = false;
@@ -676,7 +665,7 @@ TwoAddressInstructionPass::ScanUses(unsigned DstReg, MachineBasicBlock *MBB,
}
}
-/// ProcessCopy - If the specified instruction is not yet processed, process it
+/// processCopy - If the specified instruction is not yet processed, process it
/// if it's a copy. For a copy instruction, we find the physical registers the
/// source and destination registers might be mapped to. These are kept in
/// point-to maps used to determine future optimizations. e.g.
@@ -688,9 +677,7 @@ TwoAddressInstructionPass::ScanUses(unsigned DstReg, MachineBasicBlock *MBB,
/// coalesced to r0 (from the input side). v1025 is mapped to r1. v1026 is
/// potentially joined with r1 on the output side. It's worthwhile to commute
/// 'add' to eliminate a copy.
-void TwoAddressInstructionPass::ProcessCopy(MachineInstr *MI,
- MachineBasicBlock *MBB,
- SmallPtrSet<MachineInstr*, 8> &Processed) {
+void TwoAddressInstructionPass::processCopy(MachineInstr *MI) {
if (Processed.count(MI))
return;
@@ -707,21 +694,20 @@ void TwoAddressInstructionPass::ProcessCopy(MachineInstr *MI,
assert(SrcRegMap[DstReg] == SrcReg &&
"Can't map to two src physical registers!");
- ScanUses(DstReg, MBB, Processed);
+ scanUses(DstReg);
}
Processed.insert(MI);
return;
}
-/// RescheduleMIBelowKill - If there is one more local instruction that reads
+/// rescheduleMIBelowKill - If there is one more local instruction that reads
/// 'Reg' and it kills 'Reg, consider moving the instruction below the kill
/// instruction in order to eliminate the need for the copy.
-bool
-TwoAddressInstructionPass::RescheduleMIBelowKill(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator &mi,
- MachineBasicBlock::iterator &nmi,
- unsigned Reg) {
+bool TwoAddressInstructionPass::
+rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
+ MachineBasicBlock::iterator &nmi,
+ unsigned Reg) {
// Bail immediately if we don't have LV available. We use it to find kills
// efficiently.
if (!LV)
@@ -853,8 +839,7 @@ TwoAddressInstructionPass::RescheduleMIBelowKill(MachineBasicBlock *MBB,
/// isDefTooClose - Return true if the re-scheduling will put the given
/// instruction too close to the defs of its register dependencies.
bool TwoAddressInstructionPass::isDefTooClose(unsigned Reg, unsigned Dist,
- MachineInstr *MI,
- MachineBasicBlock *MBB) {
+ MachineInstr *MI) {
for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(Reg),
DE = MRI->def_end(); DI != DE; ++DI) {
MachineInstr *DefMI = &*DI;
@@ -873,15 +858,14 @@ bool TwoAddressInstructionPass::isDefTooClose(unsigned Reg, unsigned Dist,
return false;
}
-/// RescheduleKillAboveMI - If there is one more local instruction that reads
+/// rescheduleKillAboveMI - If there is one more local instruction that reads
/// 'Reg' and it kills 'Reg, consider moving the kill instruction above the
/// current two-address instruction in order to eliminate the need for the
/// copy.
-bool
-TwoAddressInstructionPass::RescheduleKillAboveMI(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator &mi,
- MachineBasicBlock::iterator &nmi,
- unsigned Reg) {
+bool TwoAddressInstructionPass::
+rescheduleKillAboveMI(MachineBasicBlock::iterator &mi,
+ MachineBasicBlock::iterator &nmi,
+ unsigned Reg) {
// Bail immediately if we don't have LV available. We use it to find kills
// efficiently.
if (!LV)
@@ -918,7 +902,7 @@ TwoAddressInstructionPass::RescheduleKillAboveMI(MachineBasicBlock *MBB,
if (MO.isUse()) {
if (!MOReg)
continue;
- if (isDefTooClose(MOReg, DI->second, MI, MBB))
+ if (isDefTooClose(MOReg, DI->second, MI))
return false;
if (MOReg == Reg && !MO.isKill())
return false;
@@ -1006,18 +990,16 @@ TwoAddressInstructionPass::RescheduleKillAboveMI(MachineBasicBlock *MBB,
return true;
}
-/// TryInstructionTransform - For the case where an instruction has a single
+/// tryInstructionTransform - For the case where an instruction has a single
/// pair of tied register operands, attempt some transformations that may
/// either eliminate the tied operands or improve the opportunities for
/// coalescing away the register copy. Returns true if no copy needs to be
/// inserted to untie mi's operands (either because they were untied, or
/// because mi was rescheduled, and will be visited again later).
bool TwoAddressInstructionPass::
-TryInstructionTransform(MachineBasicBlock::iterator &mi,
+tryInstructionTransform(MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
- MachineFunction::iterator &mbbi,
- unsigned SrcIdx, unsigned DstIdx, unsigned Dist,
- SmallPtrSet<MachineInstr*, 8> &Processed) {
+ unsigned SrcIdx, unsigned DstIdx, unsigned Dist) {
if (OptLevel == CodeGenOpt::None)
return false;
@@ -1030,7 +1012,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
bool regBKilled = isKilled(MI, regB, MRI, TII);
if (TargetRegisterInfo::isVirtualRegister(regA))
- ScanUses(regA, &*mbbi, Processed);
+ scanUses(regA);
// Check if it is profitable to commute the operands.
unsigned SrcOp1, SrcOp2;
@@ -1051,7 +1033,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
// If C dies but B does not, swap the B and C operands.
// This makes the live ranges of A and C joinable.
TryCommute = true;
- else if (isProfitableToCommute(regA, regB, regC, &MI, mbbi, Dist)) {
+ else if (isProfitableToCommute(regA, regB, regC, &MI, Dist)) {
TryCommute = true;
AggressiveCommute = true;
}
@@ -1059,7 +1041,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
}
// If it's profitable to commute, try to do so.
- if (TryCommute && CommuteInstruction(mi, mbbi, regB, regC, Dist)) {
+ if (TryCommute && commuteInstruction(mi, regB, regC, Dist)) {
++NumCommuted;
if (AggressiveCommute)
++NumAggrCommuted;
@@ -1068,7 +1050,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
// If there is one more use of regB later in the same MBB, consider
// re-schedule this MI below it.
- if (RescheduleMIBelowKill(mbbi, mi, nmi, regB)) {
+ if (rescheduleMIBelowKill(mi, nmi, regB)) {
++NumReSchedDowns;
return true;
}
@@ -1078,7 +1060,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
// three-address instruction. Check if it is profitable.
if (!regBKilled || isProfitableToConv3Addr(regA, regB)) {
// Try to convert it.
- if (ConvertInstTo3Addr(mi, nmi, mbbi, regA, regB, Dist)) {
+ if (convertInstTo3Addr(mi, nmi, regA, regB, Dist)) {
++NumConvertedTo3Addr;
return true; // Done with this instruction.
}
@@ -1087,7 +1069,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
// If there is one more use of regB later in the same MBB, consider
// re-schedule it before this MI if it's legal.
- if (RescheduleKillAboveMI(mbbi, mi, nmi, regB)) {
+ if (rescheduleKillAboveMI(mi, nmi, regB)) {
++NumReSchedUps;
return true;
}
@@ -1131,8 +1113,8 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
// Tentatively insert the instructions into the block so that they
// look "normal" to the transformation logic.
- mbbi->insert(mi, NewMIs[0]);
- mbbi->insert(mi, NewMIs[1]);
+ MBB->insert(mi, NewMIs[0]);
+ MBB->insert(mi, NewMIs[1]);
DEBUG(dbgs() << "2addr: NEW LOAD: " << *NewMIs[0]
<< "2addr: NEW INST: " << *NewMIs[1]);
@@ -1142,8 +1124,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
unsigned NewSrcIdx = NewMIs[1]->findRegisterUseOperandIdx(regB);
MachineBasicBlock::iterator NewMI = NewMIs[1];
bool TransformSuccess =
- TryInstructionTransform(NewMI, mi, mbbi,
- NewSrcIdx, NewDstIdx, Dist, Processed);
+ tryInstructionTransform(NewMI, mi, NewSrcIdx, NewDstIdx, Dist);
if (TransformSuccess ||
NewMIs[1]->getOperand(NewSrcIdx).isKill()) {
// Success, or at least we made an improvement. Keep the unfolded
@@ -1202,8 +1183,7 @@ bool TwoAddressInstructionPass::
collectTiedOperands(MachineInstr *MI, TiedOperandMap &TiedOperands) {
const MCInstrDesc &MCID = MI->getDesc();
bool AnyOps = false;
- unsigned NumOps = MI->isInlineAsm() ?
- MI->getNumOperands() : MCID.getNumOperands();
+ unsigned NumOps = MI->getNumOperands();
for (unsigned SrcIdx = 0; SrcIdx < NumOps; ++SrcIdx) {
unsigned DstIdx = 0;
@@ -1373,22 +1353,21 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
DEBUG(dbgs() << "********** REWRITING TWO-ADDR INSTRS **********\n");
DEBUG(dbgs() << "********** Function: "
- << MF->getFunction()->getName() << '\n');
+ << MF->getName() << '\n');
// This pass takes the function out of SSA form.
MRI->leaveSSA();
TiedOperandMap TiedOperands;
-
- SmallPtrSet<MachineInstr*, 8> Processed;
- for (MachineFunction::iterator mbbi = MF->begin(), mbbe = MF->end();
- mbbi != mbbe; ++mbbi) {
+ for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end();
+ MBBI != MBBE; ++MBBI) {
+ MBB = MBBI;
unsigned Dist = 0;
DistanceMap.clear();
SrcRegMap.clear();
DstRegMap.clear();
Processed.clear();
- for (MachineBasicBlock::iterator mi = mbbi->begin(), me = mbbi->end();
+ for (MachineBasicBlock::iterator mi = MBB->begin(), me = MBB->end();
mi != me; ) {
MachineBasicBlock::iterator nmi = llvm::next(mi);
if (mi->isDebugValue()) {
@@ -1402,7 +1381,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
DistanceMap.insert(std::make_pair(mi, ++Dist));
- ProcessCopy(&*mi, &*mbbi, Processed);
+ processCopy(&*mi);
// First scan through all the tied register uses in this instruction
// and record a list of pairs of tied operands for each register.
@@ -1427,8 +1406,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
unsigned SrcReg = mi->getOperand(SrcIdx).getReg();
unsigned DstReg = mi->getOperand(DstIdx).getReg();
if (SrcReg != DstReg &&
- TryInstructionTransform(mi, nmi, mbbi, SrcIdx, DstIdx, Dist,
- Processed)) {
+ tryInstructionTransform(mi, nmi, SrcIdx, DstIdx, Dist)) {
// The tied operands have been eliminated or shifted further down the
// block to ease elimination. Continue processing with 'nmi'.
TiedOperands.clear();
@@ -1468,7 +1446,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
// Eliminate REG_SEQUENCE instructions. Their whole purpose was to preseve
// SSA form. It's now safe to de-SSA.
- MadeChange |= EliminateRegSequences();
+ MadeChange |= eliminateRegSequences();
return MadeChange;
}
@@ -1515,127 +1493,6 @@ static MachineInstr *findFirstDef(unsigned Reg, MachineRegisterInfo *MRI) {
return First;
}
-/// CoalesceExtSubRegs - If a number of sources of the REG_SEQUENCE are
-/// EXTRACT_SUBREG from the same register and to the same virtual register
-/// with different sub-register indices, attempt to combine the
-/// EXTRACT_SUBREGs and pre-coalesce them. e.g.
-/// %reg1026<def> = VLDMQ %reg1025<kill>, 260, pred:14, pred:%reg0
-/// %reg1029:6<def> = EXTRACT_SUBREG %reg1026, 6
-/// %reg1029:5<def> = EXTRACT_SUBREG %reg1026<kill>, 5
-/// Since D subregs 5, 6 can combine to a Q register, we can coalesce
-/// reg1026 to reg1029.
-void
-TwoAddressInstructionPass::CoalesceExtSubRegs(SmallVector<unsigned,4> &Srcs,
- unsigned DstReg) {
- SmallSet<unsigned, 4> Seen;
- for (unsigned i = 0, e = Srcs.size(); i != e; ++i) {
- unsigned SrcReg = Srcs[i];
- if (!Seen.insert(SrcReg))
- continue;
-
- // Check that the instructions are all in the same basic block.
- MachineInstr *SrcDefMI = MRI->getUniqueVRegDef(SrcReg);
- MachineInstr *DstDefMI = MRI->getUniqueVRegDef(DstReg);
- if (!SrcDefMI || !DstDefMI ||
- SrcDefMI->getParent() != DstDefMI->getParent())
- continue;
-
- // If there are no other uses than copies which feed into
- // the reg_sequence, then we might be able to coalesce them.
- bool CanCoalesce = true;
- SmallVector<unsigned, 4> SrcSubIndices, DstSubIndices;
- for (MachineRegisterInfo::use_nodbg_iterator
- UI = MRI->use_nodbg_begin(SrcReg),
- UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
- MachineInstr *UseMI = &*UI;
- if (!UseMI->isCopy() || UseMI->getOperand(0).getReg() != DstReg) {
- CanCoalesce = false;
- break;
- }
- SrcSubIndices.push_back(UseMI->getOperand(1).getSubReg());
- DstSubIndices.push_back(UseMI->getOperand(0).getSubReg());
- }
-
- if (!CanCoalesce || SrcSubIndices.size() < 2)
- continue;
-
- // Check that the source subregisters can be combined.
- std::sort(SrcSubIndices.begin(), SrcSubIndices.end());
- unsigned NewSrcSubIdx = 0;
- if (!TRI->canCombineSubRegIndices(MRI->getRegClass(SrcReg), SrcSubIndices,
- NewSrcSubIdx))
- continue;
-
- // Check that the destination subregisters can also be combined.
- std::sort(DstSubIndices.begin(), DstSubIndices.end());
- unsigned NewDstSubIdx = 0;
- if (!TRI->canCombineSubRegIndices(MRI->getRegClass(DstReg), DstSubIndices,
- NewDstSubIdx))
- continue;
-
- // If neither source nor destination can be combined to the full register,
- // just give up. This could be improved if it ever matters.
- if (NewSrcSubIdx != 0 && NewDstSubIdx != 0)
- continue;
-
- // Now that we know that all the uses are extract_subregs and that those
- // subregs can somehow be combined, scan all the extract_subregs again to
- // make sure the subregs are in the right order and can be composed.
- MachineInstr *SomeMI = 0;
- CanCoalesce = true;
- for (MachineRegisterInfo::use_nodbg_iterator
- UI = MRI->use_nodbg_begin(SrcReg),
- UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
- MachineInstr *UseMI = &*UI;
- assert(UseMI->isCopy());
- unsigned DstSubIdx = UseMI->getOperand(0).getSubReg();
- unsigned SrcSubIdx = UseMI->getOperand(1).getSubReg();
- assert(DstSubIdx != 0 && "missing subreg from RegSequence elimination");
- if ((NewDstSubIdx == 0 &&
- TRI->composeSubRegIndices(NewSrcSubIdx, DstSubIdx) != SrcSubIdx) ||
- (NewSrcSubIdx == 0 &&
- TRI->composeSubRegIndices(NewDstSubIdx, SrcSubIdx) != DstSubIdx)) {
- CanCoalesce = false;
- break;
- }
- // Keep track of one of the uses. Preferably the first one which has a
- // <def,undef> flag.
- if (!SomeMI || UseMI->getOperand(0).isUndef())
- SomeMI = UseMI;
- }
- if (!CanCoalesce)
- continue;
-
- // Insert a copy to replace the original.
- MachineInstr *CopyMI = BuildMI(*SomeMI->getParent(), SomeMI,
- SomeMI->getDebugLoc(),
- TII->get(TargetOpcode::COPY))
- .addReg(DstReg, RegState::Define |
- getUndefRegState(SomeMI->getOperand(0).isUndef()),
- NewDstSubIdx)
- .addReg(SrcReg, 0, NewSrcSubIdx);
-
- // Remove all the old extract instructions.
- for (MachineRegisterInfo::use_nodbg_iterator
- UI = MRI->use_nodbg_begin(SrcReg),
- UE = MRI->use_nodbg_end(); UI != UE; ) {
- MachineInstr *UseMI = &*UI;
- ++UI;
- if (UseMI == CopyMI)
- continue;
- assert(UseMI->isCopy());
- // Move any kills to the new copy or extract instruction.
- if (UseMI->getOperand(1).isKill()) {
- CopyMI->getOperand(1).setIsKill();
- if (LV)
- // Update live variables
- LV->replaceKillInstruction(SrcReg, UseMI, &*CopyMI);
- }
- UseMI->eraseFromParent();
- }
- }
-}
-
static bool HasOtherRegSequenceUses(unsigned Reg, MachineInstr *RegSeq,
MachineRegisterInfo *MRI) {
for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
@@ -1647,7 +1504,7 @@ static bool HasOtherRegSequenceUses(unsigned Reg, MachineInstr *RegSeq,
return false;
}
-/// EliminateRegSequences - Eliminate REG_SEQUENCE instructions as part
+/// eliminateRegSequences - Eliminate REG_SEQUENCE instructions as part
/// of the de-ssa process. This replaces sources of REG_SEQUENCE as
/// sub-register references of the register defined by REG_SEQUENCE. e.g.
///
@@ -1655,7 +1512,7 @@ static bool HasOtherRegSequenceUses(unsigned Reg, MachineInstr *RegSeq,
/// %reg1031<def> = REG_SEQUENCE %reg1029<kill>, 5, %reg1030<kill>, 6
/// =>
/// %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
-bool TwoAddressInstructionPass::EliminateRegSequences() {
+bool TwoAddressInstructionPass::eliminateRegSequences() {
if (RegSequences.empty())
return false;
@@ -1759,10 +1616,6 @@ bool TwoAddressInstructionPass::EliminateRegSequences() {
if (MO.isReg() && MO.isDef() && MO.getReg() == DstReg)
MO.setIsUndef();
}
- // Make sure there is a full non-subreg imp-def operand on the
- // instruction. This shouldn't be necessary, but it seems that at least
- // RAFast requires it.
- Def->addRegisterDefined(DstReg, TRI);
DEBUG(dbgs() << "First def: " << *Def);
}
@@ -1775,12 +1628,6 @@ bool TwoAddressInstructionPass::EliminateRegSequences() {
DEBUG(dbgs() << "Eliminated: " << *MI);
MI->eraseFromParent();
}
-
- // Try coalescing some EXTRACT_SUBREG instructions. This can create
- // INSERT_SUBREG instructions that must have <undef> flags added by
- // LiveIntervalAnalysis, so only run it when LiveVariables is available.
- if (LV)
- CoalesceExtSubRegs(RealSrcs, DstReg);
}
RegSequences.clear();
diff --git a/contrib/llvm/lib/CodeGen/VirtRegMap.cpp b/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
index 93840f0..bb93bdc 100644
--- a/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
+++ b/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
@@ -19,8 +19,8 @@
#define DEBUG_TYPE "regalloc"
#include "VirtRegMap.h"
#include "LiveDebugVariables.h"
-#include "llvm/Function.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -127,9 +127,11 @@ void VirtRegMap::print(raw_ostream &OS, const Module*) const {
OS << '\n';
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void VirtRegMap::dump() const {
print(dbgs());
}
+#endif
//===----------------------------------------------------------------------===//
// VirtRegRewriter
@@ -170,6 +172,7 @@ INITIALIZE_PASS_BEGIN(VirtRegRewriter, "virtregrewriter",
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables)
+INITIALIZE_PASS_DEPENDENCY(LiveStacks)
INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
INITIALIZE_PASS_END(VirtRegRewriter, "virtregrewriter",
"Virtual Register Rewriter", false, false)
@@ -182,6 +185,8 @@ void VirtRegRewriter::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<SlotIndexes>();
AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveDebugVariables>();
+ AU.addRequired<LiveStacks>();
+ AU.addPreserved<LiveStacks>();
AU.addRequired<VirtRegMap>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -197,11 +202,11 @@ bool VirtRegRewriter::runOnMachineFunction(MachineFunction &fn) {
VRM = &getAnalysis<VirtRegMap>();
DEBUG(dbgs() << "********** REWRITE VIRTUAL REGISTERS **********\n"
<< "********** Function: "
- << MF->getFunction()->getName() << '\n');
+ << MF->getName() << '\n');
DEBUG(VRM->dump());
// Add kill flags while we still have virtual registers.
- LIS->addKillFlags();
+ LIS->addKillFlags(VRM);
// Live-in lists on basic blocks are required for physregs.
addMBBLiveIns();
@@ -252,9 +257,6 @@ void VirtRegRewriter::rewrite() {
SmallVector<unsigned, 8> SuperDeads;
SmallVector<unsigned, 8> SuperDefs;
SmallVector<unsigned, 8> SuperKills;
-#ifndef NDEBUG
- BitVector Reserved = TRI->getReservedRegs(*MF);
-#endif
for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end();
MBBI != MBBE; ++MBBI) {
@@ -278,7 +280,7 @@ void VirtRegRewriter::rewrite() {
unsigned PhysReg = VRM->getPhys(VirtReg);
assert(PhysReg != VirtRegMap::NO_PHYS_REG &&
"Instruction uses unmapped VirtReg");
- assert(!Reserved.test(PhysReg) && "Reserved register assignment");
+ assert(!MRI->isReserved(PhysReg) && "Reserved register assignment");
// Preserve semantics of sub-register operands.
if (MO.getSubReg()) {
diff --git a/contrib/llvm/lib/CodeGen/VirtRegMap.h b/contrib/llvm/lib/CodeGen/VirtRegMap.h
index c320985..7974dda 100644
--- a/contrib/llvm/lib/CodeGen/VirtRegMap.h
+++ b/contrib/llvm/lib/CodeGen/VirtRegMap.h
@@ -63,8 +63,8 @@ namespace llvm {
/// createSpillSlot - Allocate a spill slot for RC from MFI.
unsigned createSpillSlot(const TargetRegisterClass *RC);
- VirtRegMap(const VirtRegMap&); // DO NOT IMPLEMENT
- void operator=(const VirtRegMap&); // DO NOT IMPLEMENT
+ VirtRegMap(const VirtRegMap&) LLVM_DELETED_FUNCTION;
+ void operator=(const VirtRegMap&) LLVM_DELETED_FUNCTION;
public:
static char ID;
diff --git a/contrib/llvm/lib/DebugInfo/DIContext.cpp b/contrib/llvm/lib/DebugInfo/DIContext.cpp
index e2fd55f..691a92c 100644
--- a/contrib/llvm/lib/DebugInfo/DIContext.cpp
+++ b/contrib/llvm/lib/DebugInfo/DIContext.cpp
@@ -18,7 +18,10 @@ DIContext *DIContext::getDWARFContext(bool isLittleEndian,
StringRef abbrevSection,
StringRef aRangeSection,
StringRef lineSection,
- StringRef stringSection) {
+ StringRef stringSection,
+ StringRef rangeSection,
+ const RelocAddrMap &Map) {
return new DWARFContextInMemory(isLittleEndian, infoSection, abbrevSection,
- aRangeSection, lineSection, stringSection);
+ aRangeSection, lineSection, stringSection,
+ rangeSection, Map);
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.cpp b/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.cpp
index b27d57b..bdd65b7 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.cpp
@@ -63,7 +63,7 @@ DWARFCompileUnit::extract(uint32_t offset, DataExtractor debug_info_data,
Version = debug_info_data.getU16(&offset);
bool abbrevsOK = debug_info_data.getU32(&offset) == abbrevs->getOffset();
Abbrevs = abbrevs;
- AddrSize = debug_info_data.getU8 (&offset);
+ AddrSize = debug_info_data.getU8(&offset);
bool versionOK = DWARFContext::isSupportedVersion(Version);
bool addrSizeOK = AddrSize == 4 || AddrSize == 8;
@@ -75,6 +75,15 @@ DWARFCompileUnit::extract(uint32_t offset, DataExtractor debug_info_data,
return 0;
}
+bool DWARFCompileUnit::extractRangeList(uint32_t RangeListOffset,
+ DWARFDebugRangeList &RangeList) const {
+ // Require that compile unit is extracted.
+ assert(DieArray.size() > 0);
+ DataExtractor RangesData(Context.getRangeSection(),
+ Context.isLittleEndian(), AddrSize);
+ return RangeList.extract(RangesData, &RangeListOffset);
+}
+
void DWARFCompileUnit::clear() {
Offset = 0;
Length = 0;
@@ -94,7 +103,9 @@ void DWARFCompileUnit::dump(raw_ostream &OS) {
<< " (next CU at " << format("0x%08x", getNextCompileUnitOffset())
<< ")\n";
- getCompileUnitDIE(false)->dump(OS, this, -1U);
+ const DWARFDebugInfoEntryMinimal *CU = getCompileUnitDIE(false);
+ assert(CU && "Null Compile Unit?");
+ CU->dump(OS, this, -1U);
}
const char *DWARFCompileUnit::getCompilationDir() {
@@ -174,11 +185,11 @@ size_t DWARFCompileUnit::extractDIEsIfNeeded(bool cu_die_only) {
addDIE(die);
return 1;
}
- else if (depth == 0 && initial_die_array_size == 1) {
+ else if (depth == 0 && initial_die_array_size == 1)
// Don't append the CU die as we already did that
- } else {
- addDIE (die);
- }
+ ;
+ else
+ addDIE(die);
const DWARFAbbreviationDeclaration *abbrDecl =
die.getAbbreviationDeclarationPtr();
@@ -199,9 +210,9 @@ size_t DWARFCompileUnit::extractDIEsIfNeeded(bool cu_die_only) {
// Give a little bit of info if we encounter corrupt DWARF (our offset
// should always terminate at or before the start of the next compilation
// unit header).
- if (offset > next_cu_offset) {
- fprintf (stderr, "warning: DWARF compile unit extends beyond its bounds cu 0x%8.8x at 0x%8.8x'\n", getOffset(), offset);
- }
+ if (offset > next_cu_offset)
+ fprintf(stderr, "warning: DWARF compile unit extends beyond its"
+ "bounds cu 0x%8.8x at 0x%8.8x'\n", getOffset(), offset);
setDIERelations();
return DieArray.size();
@@ -244,12 +255,21 @@ DWARFCompileUnit::buildAddressRangeTable(DWARFDebugAranges *debug_aranges,
clearDIEs(true);
}
-const DWARFDebugInfoEntryMinimal*
-DWARFCompileUnit::getFunctionDIEForAddress(int64_t address) {
+DWARFDebugInfoEntryMinimal::InlinedChain
+DWARFCompileUnit::getInlinedChainForAddress(uint64_t Address) {
+ // First, find a subprogram that contains the given address (the root
+ // of inlined chain).
extractDIEsIfNeeded(false);
+ const DWARFDebugInfoEntryMinimal *SubprogramDIE = 0;
for (size_t i = 0, n = DieArray.size(); i != n; i++) {
- if (DieArray[i].addressRangeContainsAddress(this, address))
- return &DieArray[i];
+ if (DieArray[i].isSubprogramDIE() &&
+ DieArray[i].addressRangeContainsAddress(this, Address)) {
+ SubprogramDIE = &DieArray[i];
+ break;
+ }
}
- return 0;
+ // Get inlined chain rooted at this subprogram DIE.
+ if (!SubprogramDIE)
+ return DWARFDebugInfoEntryMinimal::InlinedChain();
+ return SubprogramDIE->getInlinedChainForAddress(this, Address);
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.h b/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.h
index b34a596..03e2862 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.h
@@ -12,6 +12,7 @@
#include "DWARFDebugAbbrev.h"
#include "DWARFDebugInfoEntry.h"
+#include "DWARFDebugRangeList.h"
#include <vector>
namespace llvm {
@@ -45,6 +46,11 @@ public:
/// extractDIEsIfNeeded - Parses a compile unit and indexes its DIEs if it
/// hasn't already been done. Returns the number of DIEs parsed at this call.
size_t extractDIEsIfNeeded(bool cu_die_only);
+ /// extractRangeList - extracts the range list referenced by this compile
+ /// unit from .debug_ranges section. Returns true on success.
+ /// Requires that compile unit is already extracted.
+ bool extractRangeList(uint32_t RangeListOffset,
+ DWARFDebugRangeList &RangeList) const;
void clear();
void dump(raw_ostream &OS);
uint32_t getOffset() const { return Offset; }
@@ -106,11 +112,11 @@ public:
void buildAddressRangeTable(DWARFDebugAranges *debug_aranges,
bool clear_dies_if_already_not_parsed);
- /// getFunctionDIEForAddress - Returns pointer to parsed subprogram DIE,
- /// address ranges of which contain the provided address,
- /// or NULL if there is no such subprogram. The pointer
- /// is valid until DWARFCompileUnit::clear() or clearDIEs() is called.
- const DWARFDebugInfoEntryMinimal *getFunctionDIEForAddress(int64_t address);
+
+ /// getInlinedChainForAddress - fetches inlined chain for a given address.
+ /// Returns empty chain if there is no subprogram containing address.
+ DWARFDebugInfoEntryMinimal::InlinedChain getInlinedChainForAddress(
+ uint64_t Address);
};
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFContext.cpp b/contrib/llvm/lib/DebugInfo/DWARFContext.cpp
index 797662b..afd614c 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFContext.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFContext.cpp
@@ -17,6 +17,8 @@
using namespace llvm;
using namespace dwarf;
+typedef DWARFDebugLine::LineTable DWARFLineTable;
+
void DWARFContext::dump(raw_ostream &OS) {
OS << ".debug_abbrev contents:\n";
getDebugAbbrev()->dump(OS);
@@ -32,15 +34,17 @@ void DWARFContext::dump(raw_ostream &OS) {
while (set.extract(arangesData, &offset))
set.dump(OS);
+ uint8_t savedAddressByteSize = 0;
OS << "\n.debug_lines contents:\n";
for (unsigned i = 0, e = getNumCompileUnits(); i != e; ++i) {
DWARFCompileUnit *cu = getCompileUnitAtIndex(i);
+ savedAddressByteSize = cu->getAddressByteSize();
unsigned stmtOffset =
cu->getCompileUnitDIE()->getAttributeValueAsUnsigned(cu, DW_AT_stmt_list,
-1U);
if (stmtOffset != -1U) {
DataExtractor lineData(getLineSection(), isLittleEndian(),
- cu->getAddressByteSize());
+ savedAddressByteSize);
DWARFDebugLine::DumpingState state(OS);
DWARFDebugLine::parseStatementTable(lineData, &stmtOffset, state);
}
@@ -54,6 +58,18 @@ void DWARFContext::dump(raw_ostream &OS) {
OS << format("0x%8.8x: \"%s\"\n", lastOffset, s);
lastOffset = offset;
}
+
+ OS << "\n.debug_ranges contents:\n";
+ // In fact, different compile units may have different address byte
+ // sizes, but for simplicity we just use the address byte size of the last
+ // compile unit (there is no easy and fast way to associate address range
+ // list and the compile unit it describes).
+ DataExtractor rangesData(getRangeSection(), isLittleEndian(),
+ savedAddressByteSize);
+ offset = 0;
+ DWARFDebugRangeList rangeList;
+ while (rangeList.extract(rangesData, &offset))
+ rangeList.dump(OS);
}
const DWARFDebugAbbrev *DWARFContext::getDebugAbbrev() {
@@ -80,7 +96,7 @@ const DWARFDebugAranges *DWARFContext::getDebugAranges() {
return Aranges.get();
}
-const DWARFDebugLine::LineTable *
+const DWARFLineTable *
DWARFContext::getLineTableForCompileUnit(DWARFCompileUnit *cu) {
if (!Line)
Line.reset(new DWARFDebugLine());
@@ -92,7 +108,7 @@ DWARFContext::getLineTableForCompileUnit(DWARFCompileUnit *cu) {
return 0; // No line table for this compile unit.
// See if the line table is cached.
- if (const DWARFDebugLine::LineTable *lt = Line->getLineTable(stmtOffset))
+ if (const DWARFLineTable *lt = Line->getLineTable(stmtOffset))
return lt;
// We have to parse it first.
@@ -103,11 +119,11 @@ DWARFContext::getLineTableForCompileUnit(DWARFCompileUnit *cu) {
void DWARFContext::parseCompileUnits() {
uint32_t offset = 0;
- const DataExtractor &debug_info_data = DataExtractor(getInfoSection(),
- isLittleEndian(), 0);
- while (debug_info_data.isValidOffset(offset)) {
+ const DataExtractor &DIData = DataExtractor(getInfoSection(),
+ isLittleEndian(), 0);
+ while (DIData.isValidOffset(offset)) {
CUs.push_back(DWARFCompileUnit(*this));
- if (!CUs.back().extract(debug_info_data, &offset)) {
+ if (!CUs.back().extract(DIData, &offset)) {
CUs.pop_back();
break;
}
@@ -131,75 +147,155 @@ namespace {
};
}
-DWARFCompileUnit *DWARFContext::getCompileUnitForOffset(uint32_t offset) {
+DWARFCompileUnit *DWARFContext::getCompileUnitForOffset(uint32_t Offset) {
if (CUs.empty())
parseCompileUnits();
- DWARFCompileUnit *i = std::lower_bound(CUs.begin(), CUs.end(), offset,
- OffsetComparator());
- if (i != CUs.end())
- return &*i;
+ DWARFCompileUnit *CU = std::lower_bound(CUs.begin(), CUs.end(), Offset,
+ OffsetComparator());
+ if (CU != CUs.end())
+ return &*CU;
return 0;
}
-DILineInfo DWARFContext::getLineInfoForAddress(uint64_t address,
- DILineInfoSpecifier specifier) {
+DWARFCompileUnit *DWARFContext::getCompileUnitForAddress(uint64_t Address) {
// First, get the offset of the compile unit.
- uint32_t cuOffset = getDebugAranges()->findAddress(address);
+ uint32_t CUOffset = getDebugAranges()->findAddress(Address);
// Retrieve the compile unit.
- DWARFCompileUnit *cu = getCompileUnitForOffset(cuOffset);
- if (!cu)
+ return getCompileUnitForOffset(CUOffset);
+}
+
+static bool getFileNameForCompileUnit(DWARFCompileUnit *CU,
+ const DWARFLineTable *LineTable,
+ uint64_t FileIndex,
+ bool NeedsAbsoluteFilePath,
+ std::string &FileName) {
+ if (CU == 0 ||
+ LineTable == 0 ||
+ !LineTable->getFileNameByIndex(FileIndex, NeedsAbsoluteFilePath,
+ FileName))
+ return false;
+ if (NeedsAbsoluteFilePath && sys::path::is_relative(FileName)) {
+ // We may still need to append compilation directory of compile unit.
+ SmallString<16> AbsolutePath;
+ if (const char *CompilationDir = CU->getCompilationDir()) {
+ sys::path::append(AbsolutePath, CompilationDir);
+ }
+ sys::path::append(AbsolutePath, FileName);
+ FileName = AbsolutePath.str();
+ }
+ return true;
+}
+
+static bool getFileLineInfoForCompileUnit(DWARFCompileUnit *CU,
+ const DWARFLineTable *LineTable,
+ uint64_t Address,
+ bool NeedsAbsoluteFilePath,
+ std::string &FileName,
+ uint32_t &Line, uint32_t &Column) {
+ if (CU == 0 || LineTable == 0)
+ return false;
+ // Get the index of row we're looking for in the line table.
+ uint32_t RowIndex = LineTable->lookupAddress(Address);
+ if (RowIndex == -1U)
+ return false;
+ // Take file number and line/column from the row.
+ const DWARFDebugLine::Row &Row = LineTable->Rows[RowIndex];
+ if (!getFileNameForCompileUnit(CU, LineTable, Row.File,
+ NeedsAbsoluteFilePath, FileName))
+ return false;
+ Line = Row.Line;
+ Column = Row.Column;
+ return true;
+}
+
+DILineInfo DWARFContext::getLineInfoForAddress(uint64_t Address,
+ DILineInfoSpecifier Specifier) {
+ DWARFCompileUnit *CU = getCompileUnitForAddress(Address);
+ if (!CU)
return DILineInfo();
- SmallString<16> fileName("<invalid>");
- SmallString<16> functionName("<invalid>");
- uint32_t line = 0;
- uint32_t column = 0;
- if (specifier.needs(DILineInfoSpecifier::FunctionName)) {
- const DWARFDebugInfoEntryMinimal *function_die =
- cu->getFunctionDIEForAddress(address);
- if (function_die) {
- if (const char *name = function_die->getSubprogramName(cu))
- functionName = name;
+ std::string FileName = "<invalid>";
+ std::string FunctionName = "<invalid>";
+ uint32_t Line = 0;
+ uint32_t Column = 0;
+ if (Specifier.needs(DILineInfoSpecifier::FunctionName)) {
+ // The address may correspond to instruction in some inlined function,
+ // so we have to build the chain of inlined functions and take the
+ // name of the topmost function in it.
+ const DWARFDebugInfoEntryMinimal::InlinedChain &InlinedChain =
+ CU->getInlinedChainForAddress(Address);
+ if (InlinedChain.size() > 0) {
+ const DWARFDebugInfoEntryMinimal &TopFunctionDIE = InlinedChain[0];
+ if (const char *Name = TopFunctionDIE.getSubroutineName(CU))
+ FunctionName = Name;
}
}
- if (specifier.needs(DILineInfoSpecifier::FileLineInfo)) {
- // Get the line table for this compile unit.
- const DWARFDebugLine::LineTable *lineTable = getLineTableForCompileUnit(cu);
- if (lineTable) {
- // Get the index of the row we're looking for in the line table.
- uint32_t rowIndex = lineTable->lookupAddress(address);
- if (rowIndex != -1U) {
- const DWARFDebugLine::Row &row = lineTable->Rows[rowIndex];
- // Take file/line info from the line table.
- const DWARFDebugLine::FileNameEntry &fileNameEntry =
- lineTable->Prologue.FileNames[row.File - 1];
- fileName = fileNameEntry.Name;
- if (specifier.needs(DILineInfoSpecifier::AbsoluteFilePath) &&
- sys::path::is_relative(fileName.str())) {
- // Append include directory of file (if it is present in line table)
- // and compilation directory of compile unit to make path absolute.
- const char *includeDir = 0;
- if (uint64_t includeDirIndex = fileNameEntry.DirIdx) {
- includeDir = lineTable->Prologue
- .IncludeDirectories[includeDirIndex - 1];
- }
- SmallString<16> absFileName;
- if (includeDir == 0 || sys::path::is_relative(includeDir)) {
- if (const char *compilationDir = cu->getCompilationDir())
- sys::path::append(absFileName, compilationDir);
- }
- if (includeDir) {
- sys::path::append(absFileName, includeDir);
- }
- sys::path::append(absFileName, fileName.str());
- fileName = absFileName;
- }
- line = row.Line;
- column = row.Column;
+ if (Specifier.needs(DILineInfoSpecifier::FileLineInfo)) {
+ const DWARFLineTable *LineTable = getLineTableForCompileUnit(CU);
+ const bool NeedsAbsoluteFilePath =
+ Specifier.needs(DILineInfoSpecifier::AbsoluteFilePath);
+ getFileLineInfoForCompileUnit(CU, LineTable, Address,
+ NeedsAbsoluteFilePath,
+ FileName, Line, Column);
+ }
+ return DILineInfo(StringRef(FileName), StringRef(FunctionName),
+ Line, Column);
+}
+
+DIInliningInfo DWARFContext::getInliningInfoForAddress(uint64_t Address,
+ DILineInfoSpecifier Specifier) {
+ DWARFCompileUnit *CU = getCompileUnitForAddress(Address);
+ if (!CU)
+ return DIInliningInfo();
+
+ const DWARFDebugInfoEntryMinimal::InlinedChain &InlinedChain =
+ CU->getInlinedChainForAddress(Address);
+ if (InlinedChain.size() == 0)
+ return DIInliningInfo();
+
+ DIInliningInfo InliningInfo;
+ uint32_t CallFile = 0, CallLine = 0, CallColumn = 0;
+ const DWARFLineTable *LineTable = 0;
+ for (uint32_t i = 0, n = InlinedChain.size(); i != n; i++) {
+ const DWARFDebugInfoEntryMinimal &FunctionDIE = InlinedChain[i];
+ std::string FileName = "<invalid>";
+ std::string FunctionName = "<invalid>";
+ uint32_t Line = 0;
+ uint32_t Column = 0;
+ // Get function name if necessary.
+ if (Specifier.needs(DILineInfoSpecifier::FunctionName)) {
+ if (const char *Name = FunctionDIE.getSubroutineName(CU))
+ FunctionName = Name;
+ }
+ if (Specifier.needs(DILineInfoSpecifier::FileLineInfo)) {
+ const bool NeedsAbsoluteFilePath =
+ Specifier.needs(DILineInfoSpecifier::AbsoluteFilePath);
+ if (i == 0) {
+ // For the topmost frame, initialize the line table of this
+ // compile unit and fetch file/line info from it.
+ LineTable = getLineTableForCompileUnit(CU);
+ // For the topmost routine, get file/line info from line table.
+ getFileLineInfoForCompileUnit(CU, LineTable, Address,
+ NeedsAbsoluteFilePath,
+ FileName, Line, Column);
+ } else {
+ // Otherwise, use call file, call line and call column from
+ // previous DIE in inlined chain.
+ getFileNameForCompileUnit(CU, LineTable, CallFile,
+ NeedsAbsoluteFilePath, FileName);
+ Line = CallLine;
+ Column = CallColumn;
+ }
+ // Get call file/line/column of a current DIE.
+ if (i + 1 < n) {
+ FunctionDIE.getCallerFrame(CU, CallFile, CallLine, CallColumn);
}
}
+ DILineInfo Frame(StringRef(FileName), StringRef(FunctionName),
+ Line, Column);
+ InliningInfo.addFrame(Frame);
}
- return DILineInfo(fileName, functionName, line, column);
+ return InliningInfo;
}
void DWARFContextInMemory::anchor() { }
diff --git a/contrib/llvm/lib/DebugInfo/DWARFContext.h b/contrib/llvm/lib/DebugInfo/DWARFContext.h
index e55a27e..4001792 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFContext.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFContext.h
@@ -13,6 +13,7 @@
#include "DWARFCompileUnit.h"
#include "DWARFDebugAranges.h"
#include "DWARFDebugLine.h"
+#include "DWARFDebugRangeList.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallVector.h"
@@ -25,21 +26,24 @@ namespace llvm {
/// methods that a concrete implementation provides.
class DWARFContext : public DIContext {
bool IsLittleEndian;
+ const RelocAddrMap &RelocMap;
SmallVector<DWARFCompileUnit, 1> CUs;
OwningPtr<DWARFDebugAbbrev> Abbrev;
OwningPtr<DWARFDebugAranges> Aranges;
OwningPtr<DWARFDebugLine> Line;
- DWARFContext(DWARFContext &); // = delete
- DWARFContext &operator=(DWARFContext &); // = delete
+ DWARFContext(DWARFContext &) LLVM_DELETED_FUNCTION;
+ DWARFContext &operator=(DWARFContext &) LLVM_DELETED_FUNCTION;
/// Read compile units from the debug_info section and store them in CUs.
void parseCompileUnits();
protected:
- DWARFContext(bool isLittleEndian) : IsLittleEndian(isLittleEndian) {}
+ DWARFContext(bool isLittleEndian, const RelocAddrMap &Map) :
+ IsLittleEndian(isLittleEndian), RelocMap(Map) {}
public:
virtual void dump(raw_ostream &OS);
+
/// Get the number of compile units in this context.
unsigned getNumCompileUnits() {
if (CUs.empty())
@@ -53,9 +57,6 @@ public:
return &CUs[index];
}
- /// Return the compile unit that includes an offset (relative to .debug_info).
- DWARFCompileUnit *getCompileUnitForOffset(uint32_t offset);
-
/// Get a pointer to the parsed DebugAbbrev object.
const DWARFDebugAbbrev *getDebugAbbrev();
@@ -66,22 +67,32 @@ public:
const DWARFDebugLine::LineTable *
getLineTableForCompileUnit(DWARFCompileUnit *cu);
- virtual DILineInfo getLineInfoForAddress(uint64_t address,
- DILineInfoSpecifier specifier = DILineInfoSpecifier());
+ virtual DILineInfo getLineInfoForAddress(uint64_t Address,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier());
+ virtual DIInliningInfo getInliningInfoForAddress(uint64_t Address,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier());
bool isLittleEndian() const { return IsLittleEndian; }
+ const RelocAddrMap &relocMap() const { return RelocMap; }
virtual StringRef getInfoSection() = 0;
virtual StringRef getAbbrevSection() = 0;
virtual StringRef getARangeSection() = 0;
virtual StringRef getLineSection() = 0;
virtual StringRef getStringSection() = 0;
+ virtual StringRef getRangeSection() = 0;
static bool isSupportedVersion(unsigned version) {
return version == 2 || version == 3;
}
-};
+private:
+ /// Return the compile unit that includes an offset (relative to .debug_info).
+ DWARFCompileUnit *getCompileUnitForOffset(uint32_t Offset);
+ /// Return the compile unit which contains instruction with provided
+ /// address.
+ DWARFCompileUnit *getCompileUnitForAddress(uint64_t Address);
+};
/// DWARFContextInMemory is the simplest possible implementation of a
/// DWARFContext. It assumes all content is available in memory and stores
@@ -93,19 +104,23 @@ class DWARFContextInMemory : public DWARFContext {
StringRef ARangeSection;
StringRef LineSection;
StringRef StringSection;
+ StringRef RangeSection;
public:
DWARFContextInMemory(bool isLittleEndian,
StringRef infoSection,
StringRef abbrevSection,
StringRef aRangeSection,
StringRef lineSection,
- StringRef stringSection)
- : DWARFContext(isLittleEndian),
+ StringRef stringSection,
+ StringRef rangeSection,
+ const RelocAddrMap &Map = RelocAddrMap())
+ : DWARFContext(isLittleEndian, Map),
InfoSection(infoSection),
AbbrevSection(abbrevSection),
ARangeSection(aRangeSection),
LineSection(lineSection),
- StringSection(stringSection)
+ StringSection(stringSection),
+ RangeSection(rangeSection)
{}
virtual StringRef getInfoSection() { return InfoSection; }
@@ -113,6 +128,7 @@ public:
virtual StringRef getARangeSection() { return ARangeSection; }
virtual StringRef getLineSection() { return LineSection; }
virtual StringRef getStringSection() { return StringSection; }
+ virtual StringRef getRangeSection() { return RangeSection; }
};
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp
index ef470e5..f9a34c9 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp
@@ -62,7 +62,6 @@ bool DWARFDebugAranges::extract(DataExtractor debug_aranges_data) {
uint32_t offset = 0;
typedef std::vector<DWARFDebugArangeSet> SetCollection;
- typedef SetCollection::const_iterator SetCollectionIter;
SetCollection sets;
DWARFDebugArangeSet set;
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp
index 429a36c..ab67464 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp
@@ -1,4 +1,4 @@
-//===-- DWARFDebugInfoEntry.cpp --------------------------------------------===//
+//===-- DWARFDebugInfoEntry.cpp -------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -101,7 +101,7 @@ bool DWARFDebugInfoEntryMinimal::extractFast(const DWARFCompileUnit *cu,
DataExtractor debug_info_data = cu->getDebugInfoExtractor();
uint64_t abbrCode = debug_info_data.getULEB128(offset_ptr);
- assert (fixed_form_sizes); // For best performance this should be specified!
+ assert(fixed_form_sizes); // For best performance this should be specified!
if (abbrCode) {
uint32_t offset = *offset_ptr;
@@ -126,6 +126,7 @@ bool DWARFDebugInfoEntryMinimal::extractFast(const DWARFCompileUnit *cu,
switch (form) {
// Blocks if inlined data that have a length field and the data bytes
// inlined in the .debug_info.
+ case DW_FORM_exprloc:
case DW_FORM_block:
form_size = debug_info_data.getULEB128(&offset);
break;
@@ -150,6 +151,11 @@ bool DWARFDebugInfoEntryMinimal::extractFast(const DWARFCompileUnit *cu,
form_size = cu->getAddressByteSize();
break;
+ // 0 sized form.
+ case DW_FORM_flag_present:
+ form_size = 0;
+ break;
+
// 1 byte values
case DW_FORM_data1:
case DW_FORM_flag:
@@ -173,6 +179,7 @@ bool DWARFDebugInfoEntryMinimal::extractFast(const DWARFCompileUnit *cu,
// 8 byte values
case DW_FORM_data8:
case DW_FORM_ref8:
+ case DW_FORM_ref_sig8:
form_size = 8;
break;
@@ -188,6 +195,13 @@ bool DWARFDebugInfoEntryMinimal::extractFast(const DWARFCompileUnit *cu,
form = debug_info_data.getULEB128(&offset);
break;
+ case DW_FORM_sec_offset:
+ if (cu->getAddressByteSize() == 4)
+ debug_info_data.getU32(offset_ptr);
+ else
+ debug_info_data.getU64(offset_ptr);
+ break;
+
default:
*offset_ptr = Offset;
return false;
@@ -249,6 +263,7 @@ DWARFDebugInfoEntryMinimal::extract(const DWARFCompileUnit *cu,
switch (form) {
// Blocks if inlined data that have a length field and the data
// bytes // inlined in the .debug_info
+ case DW_FORM_exprloc:
case DW_FORM_block:
form_size = debug_info_data.getULEB128(&offset);
break;
@@ -273,6 +288,11 @@ DWARFDebugInfoEntryMinimal::extract(const DWARFCompileUnit *cu,
form_size = cu_addr_size;
break;
+ // 0 byte value
+ case DW_FORM_flag_present:
+ form_size = 0;
+ break;
+
// 1 byte values
case DW_FORM_data1:
case DW_FORM_flag:
@@ -299,6 +319,7 @@ DWARFDebugInfoEntryMinimal::extract(const DWARFCompileUnit *cu,
// 8 byte values
case DW_FORM_data8:
case DW_FORM_ref8:
+ case DW_FORM_ref_sig8:
form_size = 8;
break;
@@ -314,6 +335,13 @@ DWARFDebugInfoEntryMinimal::extract(const DWARFCompileUnit *cu,
form_is_indirect = true;
break;
+ case DW_FORM_sec_offset:
+ if (cu->getAddressByteSize() == 4)
+ debug_info_data.getU32(offset_ptr);
+ else
+ debug_info_data.getU64(offset_ptr);
+ break;
+
default:
*offset_ptr = offset;
return false;
@@ -336,6 +364,16 @@ DWARFDebugInfoEntryMinimal::extract(const DWARFCompileUnit *cu,
return false;
}
+bool DWARFDebugInfoEntryMinimal::isSubprogramDIE() const {
+ return getTag() == DW_TAG_subprogram;
+}
+
+bool DWARFDebugInfoEntryMinimal::isSubroutineDIE() const {
+ uint32_t Tag = getTag();
+ return Tag == DW_TAG_subprogram ||
+ Tag == DW_TAG_inlined_subroutine;
+}
+
uint32_t
DWARFDebugInfoEntryMinimal::getAttributeValue(const DWARFCompileUnit *cu,
const uint16_t attr,
@@ -373,9 +411,10 @@ DWARFDebugInfoEntryMinimal::getAttributeValue(const DWARFCompileUnit *cu,
const char*
DWARFDebugInfoEntryMinimal::getAttributeValueAsString(
- const DWARFCompileUnit* cu,
- const uint16_t attr,
- const char* fail_value) const {
+ const DWARFCompileUnit* cu,
+ const uint16_t attr,
+ const char* fail_value)
+ const {
DWARFFormValue form_value;
if (getAttributeValue(cu, attr, form_value)) {
DataExtractor stringExtractor(cu->getContext().getStringSection(),
@@ -387,9 +426,9 @@ DWARFDebugInfoEntryMinimal::getAttributeValueAsString(
uint64_t
DWARFDebugInfoEntryMinimal::getAttributeValueAsUnsigned(
- const DWARFCompileUnit* cu,
- const uint16_t attr,
- uint64_t fail_value) const {
+ const DWARFCompileUnit* cu,
+ const uint16_t attr,
+ uint64_t fail_value) const {
DWARFFormValue form_value;
if (getAttributeValue(cu, attr, form_value))
return form_value.getUnsigned();
@@ -398,9 +437,9 @@ DWARFDebugInfoEntryMinimal::getAttributeValueAsUnsigned(
int64_t
DWARFDebugInfoEntryMinimal::getAttributeValueAsSigned(
- const DWARFCompileUnit* cu,
- const uint16_t attr,
- int64_t fail_value) const {
+ const DWARFCompileUnit* cu,
+ const uint16_t attr,
+ int64_t fail_value) const {
DWARFFormValue form_value;
if (getAttributeValue(cu, attr, form_value))
return form_value.getSigned();
@@ -409,33 +448,42 @@ DWARFDebugInfoEntryMinimal::getAttributeValueAsSigned(
uint64_t
DWARFDebugInfoEntryMinimal::getAttributeValueAsReference(
- const DWARFCompileUnit* cu,
- const uint16_t attr,
- uint64_t fail_value) const {
+ const DWARFCompileUnit* cu,
+ const uint16_t attr,
+ uint64_t fail_value)
+ const {
DWARFFormValue form_value;
if (getAttributeValue(cu, attr, form_value))
return form_value.getReference(cu);
return fail_value;
}
+bool DWARFDebugInfoEntryMinimal::getLowAndHighPC(const DWARFCompileUnit *CU,
+ uint64_t &LowPC,
+ uint64_t &HighPC) const {
+ HighPC = -1ULL;
+ LowPC = getAttributeValueAsUnsigned(CU, DW_AT_low_pc, -1ULL);
+ if (LowPC != -1ULL)
+ HighPC = getAttributeValueAsUnsigned(CU, DW_AT_high_pc, -1ULL);
+ return (HighPC != -1ULL);
+}
+
void
-DWARFDebugInfoEntryMinimal::buildAddressRangeTable(const DWARFCompileUnit *cu,
- DWARFDebugAranges *debug_aranges)
+DWARFDebugInfoEntryMinimal::buildAddressRangeTable(const DWARFCompileUnit *CU,
+ DWARFDebugAranges *DebugAranges)
const {
if (AbbrevDecl) {
- uint16_t tag = AbbrevDecl->getTag();
- if (tag == DW_TAG_subprogram) {
- uint64_t hi_pc = -1ULL;
- uint64_t lo_pc = getAttributeValueAsUnsigned(cu, DW_AT_low_pc, -1ULL);
- if (lo_pc != -1ULL)
- hi_pc = getAttributeValueAsUnsigned(cu, DW_AT_high_pc, -1ULL);
- if (hi_pc != -1ULL)
- debug_aranges->appendRange(cu->getOffset(), lo_pc, hi_pc);
+ if (isSubprogramDIE()) {
+ uint64_t LowPC, HighPC;
+ if (getLowAndHighPC(CU, LowPC, HighPC)) {
+ DebugAranges->appendRange(CU->getOffset(), LowPC, HighPC);
+ }
+ // FIXME: try to append ranges from .debug_ranges section.
}
const DWARFDebugInfoEntryMinimal *child = getFirstChild();
while (child) {
- child->buildAddressRangeTable(cu, debug_aranges);
+ child->buildAddressRangeTable(CU, DebugAranges);
child = child->getSibling();
}
}
@@ -443,51 +491,95 @@ DWARFDebugInfoEntryMinimal::buildAddressRangeTable(const DWARFCompileUnit *cu,
bool
DWARFDebugInfoEntryMinimal::addressRangeContainsAddress(
- const DWARFCompileUnit *cu, const uint64_t address) const {
- if (!isNULL() && getTag() == DW_TAG_subprogram) {
- uint64_t hi_pc = -1ULL;
- uint64_t lo_pc = getAttributeValueAsUnsigned(cu, DW_AT_low_pc, -1ULL);
- if (lo_pc != -1ULL)
- hi_pc = getAttributeValueAsUnsigned(cu, DW_AT_high_pc, -1ULL);
- if (hi_pc != -1ULL) {
- return (lo_pc <= address && address < hi_pc);
- }
+ const DWARFCompileUnit *CU,
+ const uint64_t Address)
+ const {
+ if (isNULL())
+ return false;
+ uint64_t LowPC, HighPC;
+ if (getLowAndHighPC(CU, LowPC, HighPC))
+ return (LowPC <= Address && Address <= HighPC);
+ // Try to get address ranges from .debug_ranges section.
+ uint32_t RangesOffset = getAttributeValueAsReference(CU, DW_AT_ranges, -1U);
+ if (RangesOffset != -1U) {
+ DWARFDebugRangeList RangeList;
+ if (CU->extractRangeList(RangesOffset, RangeList))
+ return RangeList.containsAddress(CU->getBaseAddress(), Address);
}
return false;
}
const char*
-DWARFDebugInfoEntryMinimal::getSubprogramName(
- const DWARFCompileUnit *cu) const {
- if (isNULL() || getTag() != DW_TAG_subprogram)
+DWARFDebugInfoEntryMinimal::getSubroutineName(const DWARFCompileUnit *CU)
+ const {
+ if (!isSubroutineDIE())
return 0;
// Try to get mangled name if possible.
if (const char *name =
- getAttributeValueAsString(cu, DW_AT_MIPS_linkage_name, 0))
+ getAttributeValueAsString(CU, DW_AT_MIPS_linkage_name, 0))
return name;
- if (const char *name = getAttributeValueAsString(cu, DW_AT_linkage_name, 0))
+ if (const char *name = getAttributeValueAsString(CU, DW_AT_linkage_name, 0))
return name;
- if (const char *name = getAttributeValueAsString(cu, DW_AT_name, 0))
+ if (const char *name = getAttributeValueAsString(CU, DW_AT_name, 0))
return name;
// Try to get name from specification DIE.
uint32_t spec_ref =
- getAttributeValueAsReference(cu, DW_AT_specification, -1U);
+ getAttributeValueAsReference(CU, DW_AT_specification, -1U);
if (spec_ref != -1U) {
DWARFDebugInfoEntryMinimal spec_die;
- if (spec_die.extract(cu, &spec_ref)) {
- if (const char *name = spec_die.getSubprogramName(cu))
+ if (spec_die.extract(CU, &spec_ref)) {
+ if (const char *name = spec_die.getSubroutineName(CU))
return name;
}
}
// Try to get name from abstract origin DIE.
uint32_t abs_origin_ref =
- getAttributeValueAsReference(cu, DW_AT_abstract_origin, -1U);
+ getAttributeValueAsReference(CU, DW_AT_abstract_origin, -1U);
if (abs_origin_ref != -1U) {
DWARFDebugInfoEntryMinimal abs_origin_die;
- if (abs_origin_die.extract(cu, &abs_origin_ref)) {
- if (const char *name = abs_origin_die.getSubprogramName(cu))
+ if (abs_origin_die.extract(CU, &abs_origin_ref)) {
+ if (const char *name = abs_origin_die.getSubroutineName(CU))
return name;
}
}
return 0;
}
+
+void DWARFDebugInfoEntryMinimal::getCallerFrame(const DWARFCompileUnit *CU,
+ uint32_t &CallFile,
+ uint32_t &CallLine,
+ uint32_t &CallColumn) const {
+ CallFile = getAttributeValueAsUnsigned(CU, DW_AT_call_file, 0);
+ CallLine = getAttributeValueAsUnsigned(CU, DW_AT_call_line, 0);
+ CallColumn = getAttributeValueAsUnsigned(CU, DW_AT_call_column, 0);
+}
+
+DWARFDebugInfoEntryMinimal::InlinedChain
+DWARFDebugInfoEntryMinimal::getInlinedChainForAddress(
+ const DWARFCompileUnit *CU,
+ const uint64_t Address)
+ const {
+ DWARFDebugInfoEntryMinimal::InlinedChain InlinedChain;
+ if (isNULL())
+ return InlinedChain;
+ for (const DWARFDebugInfoEntryMinimal *DIE = this; DIE; ) {
+ // Append current DIE to inlined chain only if it has correct tag
+ // (e.g. it is not a lexical block).
+ if (DIE->isSubroutineDIE()) {
+ InlinedChain.push_back(*DIE);
+ }
+ // Try to get child which also contains provided address.
+ const DWARFDebugInfoEntryMinimal *Child = DIE->getFirstChild();
+ while (Child) {
+ if (Child->addressRangeContainsAddress(CU, Address)) {
+ // Assume there is only one such child.
+ break;
+ }
+ Child = Child->getSibling();
+ }
+ DIE = Child;
+ }
+ // Reverse the obtained chain to make the root of inlined chain last.
+ std::reverse(InlinedChain.begin(), InlinedChain.end());
+ return InlinedChain;
+}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h
index d5d86b9..9c1b2be 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h
@@ -11,6 +11,7 @@
#define LLVM_DEBUGINFO_DWARFDEBUGINFOENTRY_H
#include "DWARFAbbreviationDeclaration.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -19,6 +20,7 @@ class DWARFDebugAranges;
class DWARFCompileUnit;
class DWARFContext;
class DWARFFormValue;
+class DWARFInlinedSubroutineChain;
/// DWARFDebugInfoEntryMinimal - A DIE with only the minimum required data.
class DWARFDebugInfoEntryMinimal {
@@ -52,6 +54,13 @@ public:
uint32_t getTag() const { return AbbrevDecl ? AbbrevDecl->getTag() : 0; }
bool isNULL() const { return AbbrevDecl == 0; }
+
+ /// Returns true if DIE represents a subprogram (not inlined).
+ bool isSubprogramDIE() const;
+ /// Returns true if DIE represents a subprogram or an inlined
+ /// subroutine.
+ bool isSubroutineDIE() const;
+
uint32_t getOffset() const { return Offset; }
uint32_t getNumAttributes() const {
return !isNULL() ? AbbrevDecl->getNumAttributes() : 0;
@@ -126,17 +135,40 @@ public:
const uint16_t attr,
int64_t fail_value) const;
- void buildAddressRangeTable(const DWARFCompileUnit *cu,
- DWARFDebugAranges *debug_aranges) const;
-
- bool addressRangeContainsAddress(const DWARFCompileUnit *cu,
- const uint64_t address) const;
-
- // If a DIE represents a subprogram, returns its mangled name
- // (or short name, if mangled is missing). This name may be fetched
- // from specification or abstract origin for this subprogram.
- // Returns null if no name is found.
- const char* getSubprogramName(const DWARFCompileUnit *cu) const;
+ /// Retrieves DW_AT_low_pc and DW_AT_high_pc from CU.
+ /// Returns true if both attributes are present.
+ bool getLowAndHighPC(const DWARFCompileUnit *CU,
+ uint64_t &LowPC, uint64_t &HighPC) const;
+
+ void buildAddressRangeTable(const DWARFCompileUnit *CU,
+ DWARFDebugAranges *DebugAranges) const;
+
+ bool addressRangeContainsAddress(const DWARFCompileUnit *CU,
+ const uint64_t Address) const;
+
+ /// If a DIE represents a subprogram (or inlined subroutine),
+ /// returns its mangled name (or short name, if mangled is missing).
+ /// This name may be fetched from specification or abstract origin
+ /// for this subprogram. Returns null if no name is found.
+ const char* getSubroutineName(const DWARFCompileUnit *CU) const;
+
+ /// Retrieves values of DW_AT_call_file, DW_AT_call_line and
+ /// DW_AT_call_column from DIE (or zeroes if they are missing).
+ void getCallerFrame(const DWARFCompileUnit *CU, uint32_t &CallFile,
+ uint32_t &CallLine, uint32_t &CallColumn) const;
+
+ /// InlinedChain - represents a chain of inlined_subroutine
+ /// DIEs, (possibly ending with subprogram DIE), all of which are contained
+ /// in some concrete inlined instance tree. Address range for each DIE
+ /// (except the last DIE) in this chain is contained in address
+ /// range for next DIE in the chain.
+ typedef SmallVector<DWARFDebugInfoEntryMinimal, 4> InlinedChain;
+
+ /// Get inlined chain for a given address, rooted at the current DIE.
+ /// Returns empty chain if address is not contained in address range
+ /// of current DIE.
+ InlinedChain getInlinedChainForAddress(const DWARFCompileUnit *CU,
+ const uint64_t Address) const;
};
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp
index d99575d..267364a 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp
@@ -10,6 +10,7 @@
#include "DWARFDebugLine.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/Format.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace llvm;
@@ -513,3 +514,29 @@ DWARFDebugLine::LineTable::lookupAddress(uint64_t address) const {
}
return index;
}
+
+bool
+DWARFDebugLine::LineTable::getFileNameByIndex(uint64_t FileIndex,
+ bool NeedsAbsoluteFilePath,
+ std::string &Result) const {
+ if (FileIndex == 0 || FileIndex > Prologue.FileNames.size())
+ return false;
+ const FileNameEntry &Entry = Prologue.FileNames[FileIndex - 1];
+ const char *FileName = Entry.Name;
+ if (!NeedsAbsoluteFilePath ||
+ sys::path::is_absolute(FileName)) {
+ Result = FileName;
+ return true;
+ }
+ SmallString<16> FilePath;
+ uint64_t IncludeDirIndex = Entry.DirIdx;
+ // Be defensive about the contents of Entry.
+ if (IncludeDirIndex > 0 &&
+ IncludeDirIndex <= Prologue.IncludeDirectories.size()) {
+ const char *IncludeDir = Prologue.IncludeDirectories[IncludeDirIndex - 1];
+ sys::path::append(FilePath, IncludeDir);
+ }
+ sys::path::append(FilePath, FileName);
+ Result = FilePath.str();
+ return true;
+}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugLine.h b/contrib/llvm/lib/DebugInfo/DWARFDebugLine.h
index 6382b45..586dd7e 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugLine.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugLine.h
@@ -12,6 +12,7 @@
#include "llvm/Support/DataExtractor.h"
#include <map>
+#include <string>
#include <vector>
namespace llvm {
@@ -174,6 +175,13 @@ public:
// Returns the index of the row with file/line info for a given address,
// or -1 if there is no such row.
uint32_t lookupAddress(uint64_t address) const;
+
+ // Extracts filename by its index in filename table in prologue.
+ // Returns true on success.
+ bool getFileNameByIndex(uint64_t FileIndex,
+ bool NeedsAbsoluteFilePath,
+ std::string &Result) const;
+
void dump(raw_ostream &OS) const;
struct Prologue Prologue;
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.cpp
new file mode 100644
index 0000000..1806bee
--- /dev/null
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.cpp
@@ -0,0 +1,67 @@
+//===-- DWARFDebugRangesList.cpp ------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DWARFDebugRangeList.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+void DWARFDebugRangeList::clear() {
+ Offset = -1U;
+ AddressSize = 0;
+ Entries.clear();
+}
+
+bool DWARFDebugRangeList::extract(DataExtractor data, uint32_t *offset_ptr) {
+ clear();
+ if (!data.isValidOffset(*offset_ptr))
+ return false;
+ AddressSize = data.getAddressSize();
+ if (AddressSize != 4 && AddressSize != 8)
+ return false;
+ Offset = *offset_ptr;
+ while (true) {
+ RangeListEntry entry;
+ uint32_t prev_offset = *offset_ptr;
+ entry.StartAddress = data.getAddress(offset_ptr);
+ entry.EndAddress = data.getAddress(offset_ptr);
+ // Check that both values were extracted correctly.
+ if (*offset_ptr != prev_offset + 2 * AddressSize) {
+ clear();
+ return false;
+ }
+ if (entry.isEndOfListEntry())
+ break;
+ Entries.push_back(entry);
+ }
+ return true;
+}
+
+void DWARFDebugRangeList::dump(raw_ostream &OS) const {
+ for (int i = 0, n = Entries.size(); i != n; ++i) {
+ const char *format_str = (AddressSize == 4
+ ? "%08x %08" PRIx64 " %08" PRIx64 "\n"
+ : "%08x %016" PRIx64 " %016" PRIx64 "\n");
+ OS << format(format_str, Offset, Entries[i].StartAddress,
+ Entries[i].EndAddress);
+ }
+ OS << format("%08x <End of list>\n", Offset);
+}
+
+bool DWARFDebugRangeList::containsAddress(uint64_t BaseAddress,
+ uint64_t Address) const {
+ for (int i = 0, n = Entries.size(); i != n; ++i) {
+ if (Entries[i].isBaseAddressSelectionEntry(AddressSize))
+ BaseAddress = Entries[i].EndAddress;
+ else if (Entries[i].containsAddress(BaseAddress, Address))
+ return true;
+ }
+ return false;
+}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.h b/contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.h
new file mode 100644
index 0000000..4e34a91
--- /dev/null
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.h
@@ -0,0 +1,78 @@
+//===-- DWARFDebugRangeList.h -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFDEBUGRANGELIST_H
+#define LLVM_DEBUGINFO_DWARFDEBUGRANGELIST_H
+
+#include "llvm/Support/DataExtractor.h"
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+class DWARFDebugRangeList {
+public:
+ struct RangeListEntry {
+ // A beginning address offset. This address offset has the size of an
+ // address and is relative to the applicable base address of the
+ // compilation unit referencing this range list. It marks the beginning
+ // of an address range.
+ uint64_t StartAddress;
+ // An ending address offset. This address offset again has the size of
+ // an address and is relative to the applicable base address of the
+ // compilation unit referencing this range list. It marks the first
+ // address past the end of the address range. The ending address must
+ // be greater than or equal to the beginning address.
+ uint64_t EndAddress;
+ // The end of any given range list is marked by an end of list entry,
+ // which consists of a 0 for the beginning address offset
+ // and a 0 for the ending address offset.
+ bool isEndOfListEntry() const {
+ return (StartAddress == 0) && (EndAddress == 0);
+ }
+ // A base address selection entry consists of:
+ // 1. The value of the largest representable address offset
+ // (for example, 0xffffffff when the size of an address is 32 bits).
+ // 2. An address, which defines the appropriate base address for
+ // use in interpreting the beginning and ending address offsets of
+ // subsequent entries of the location list.
+ bool isBaseAddressSelectionEntry(uint8_t AddressSize) const {
+ assert(AddressSize == 4 || AddressSize == 8);
+ if (AddressSize == 4)
+ return StartAddress == -1U;
+ else
+ return StartAddress == -1ULL;
+ }
+ bool containsAddress(uint64_t BaseAddress, uint64_t Address) const {
+ return (BaseAddress + StartAddress <= Address) &&
+ (Address < BaseAddress + EndAddress);
+ }
+ };
+
+private:
+ // Offset in .debug_ranges section.
+ uint32_t Offset;
+ uint8_t AddressSize;
+ std::vector<RangeListEntry> Entries;
+
+public:
+ DWARFDebugRangeList() { clear(); }
+ void clear();
+ void dump(raw_ostream &OS) const;
+ bool extract(DataExtractor data, uint32_t *offset_ptr);
+ /// containsAddress - Returns true if range list contains the given
+ /// address. Has to be passed base address of the compile unit that
+ /// references this range list.
+ bool containsAddress(uint64_t BaseAddress, uint64_t Address) const;
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARFDEBUGRANGELIST_H
diff --git a/contrib/llvm/lib/DebugInfo/DWARFFormValue.cpp b/contrib/llvm/lib/DebugInfo/DWARFFormValue.cpp
index ee2a3ab..fea9fd7 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFFormValue.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFFormValue.cpp
@@ -10,6 +10,7 @@
#include "DWARFFormValue.h"
#include "DWARFCompileUnit.h"
#include "DWARFContext.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
@@ -41,6 +42,10 @@ static const uint8_t form_sizes_addr4[] = {
8, // 0x14 DW_FORM_ref8
0, // 0x15 DW_FORM_ref_udata
0, // 0x16 DW_FORM_indirect
+ 4, // 0x17 DW_FORM_sec_offset
+ 0, // 0x18 DW_FORM_exprloc
+ 0, // 0x19 DW_FORM_flag_present
+ 8, // 0x20 DW_FORM_ref_sig8
};
static const uint8_t form_sizes_addr8[] = {
@@ -67,6 +72,10 @@ static const uint8_t form_sizes_addr8[] = {
8, // 0x14 DW_FORM_ref8
0, // 0x15 DW_FORM_ref_udata
0, // 0x16 DW_FORM_indirect
+ 8, // 0x17 DW_FORM_sec_offset
+ 0, // 0x18 DW_FORM_exprloc
+ 0, // 0x19 DW_FORM_flag_present
+ 8, // 0x20 DW_FORM_ref_sig8
};
const uint8_t *
@@ -90,9 +99,18 @@ DWARFFormValue::extractValue(DataExtractor data, uint32_t *offset_ptr,
indirect = false;
switch (Form) {
case DW_FORM_addr:
- case DW_FORM_ref_addr:
- Value.uval = data.getUnsigned(offset_ptr, cu->getAddressByteSize());
+ case DW_FORM_ref_addr: {
+ RelocAddrMap::const_iterator AI
+ = cu->getContext().relocMap().find(*offset_ptr);
+ if (AI != cu->getContext().relocMap().end()) {
+ const std::pair<uint8_t, int64_t> &R = AI->second;
+ Value.uval = R.second;
+ *offset_ptr += R.first;
+ } else
+ Value.uval = data.getUnsigned(offset_ptr, cu->getAddressByteSize());
+ }
break;
+ case DW_FORM_exprloc:
case DW_FORM_block:
Value.uval = data.getULEB128(offset_ptr);
is_block = true;
@@ -129,9 +147,17 @@ DWARFFormValue::extractValue(DataExtractor data, uint32_t *offset_ptr,
case DW_FORM_sdata:
Value.sval = data.getSLEB128(offset_ptr);
break;
- case DW_FORM_strp:
- Value.uval = data.getU32(offset_ptr);
+ case DW_FORM_strp: {
+ RelocAddrMap::const_iterator AI
+ = cu->getContext().relocMap().find(*offset_ptr);
+ if (AI != cu->getContext().relocMap().end()) {
+ const std::pair<uint8_t, int64_t> &R = AI->second;
+ Value.uval = R.second;
+ *offset_ptr += R.first;
+ } else
+ Value.uval = data.getU32(offset_ptr);
break;
+ }
case DW_FORM_udata:
case DW_FORM_ref_udata:
Value.uval = data.getULEB128(offset_ptr);
@@ -141,12 +167,24 @@ DWARFFormValue::extractValue(DataExtractor data, uint32_t *offset_ptr,
// Set the string value to also be the data for inlined cstr form
// values only so we can tell the differnence between DW_FORM_string
// and DW_FORM_strp form values
- Value.data = (uint8_t*)Value.cstr;
+ Value.data = (const uint8_t*)Value.cstr;
break;
case DW_FORM_indirect:
Form = data.getULEB128(offset_ptr);
indirect = true;
break;
+ case DW_FORM_sec_offset:
+ if (cu->getAddressByteSize() == 4)
+ Value.uval = data.getU32(offset_ptr);
+ else
+ Value.uval = data.getU64(offset_ptr);
+ break;
+ case DW_FORM_flag_present:
+ Value.uval = 1;
+ break;
+ case DW_FORM_ref_sig8:
+ Value.uval = data.getU64(offset_ptr);
+ break;
default:
return false;
}
@@ -179,6 +217,7 @@ DWARFFormValue::skipValue(uint16_t form, DataExtractor debug_info_data,
switch (form) {
// Blocks if inlined data that have a length field and the data bytes
// inlined in the .debug_info
+ case DW_FORM_exprloc:
case DW_FORM_block: {
uint64_t size = debug_info_data.getULEB128(offset_ptr);
*offset_ptr += size;
@@ -211,6 +250,10 @@ DWARFFormValue::skipValue(uint16_t form, DataExtractor debug_info_data,
*offset_ptr += cu->getAddressByteSize();
return true;
+ // 0 byte values - implied from the form.
+ case DW_FORM_flag_present:
+ return true;
+
// 1 byte values
case DW_FORM_data1:
case DW_FORM_flag:
@@ -234,6 +277,7 @@ DWARFFormValue::skipValue(uint16_t form, DataExtractor debug_info_data,
// 8 byte values
case DW_FORM_data8:
case DW_FORM_ref8:
+ case DW_FORM_ref_sig8:
*offset_ptr += 8;
return true;
@@ -249,6 +293,15 @@ DWARFFormValue::skipValue(uint16_t form, DataExtractor debug_info_data,
indirect = true;
form = debug_info_data.getULEB128(offset_ptr);
break;
+
+ // 4 for DWARF32, 8 for DWARF64.
+ case DW_FORM_sec_offset:
+ if (cu->getAddressByteSize() == 4)
+ *offset_ptr += 4;
+ else
+ *offset_ptr += 8;
+ return true;
+
default:
return false;
}
@@ -264,22 +317,26 @@ DWARFFormValue::dump(raw_ostream &OS, const DWARFCompileUnit *cu) const {
switch (Form) {
case DW_FORM_addr: OS << format("0x%016" PRIx64, uvalue); break;
+ case DW_FORM_flag_present: OS << "true"; break;
case DW_FORM_flag:
case DW_FORM_data1: OS << format("0x%02x", (uint8_t)uvalue); break;
case DW_FORM_data2: OS << format("0x%04x", (uint16_t)uvalue); break;
case DW_FORM_data4: OS << format("0x%08x", (uint32_t)uvalue); break;
+ case DW_FORM_ref_sig8:
case DW_FORM_data8: OS << format("0x%016" PRIx64, uvalue); break;
case DW_FORM_string:
OS << '"';
OS.write_escaped(getAsCString(NULL));
OS << '"';
break;
+ case DW_FORM_exprloc:
case DW_FORM_block:
case DW_FORM_block1:
case DW_FORM_block2:
case DW_FORM_block4:
if (uvalue > 0) {
switch (Form) {
+ case DW_FORM_exprloc:
case DW_FORM_block: OS << format("<0x%" PRIx64 "> ", uvalue); break;
case DW_FORM_block1: OS << format("<0x%2.2x> ", (uint8_t)uvalue); break;
case DW_FORM_block2: OS << format("<0x%4.4x> ", (uint16_t)uvalue); break;
@@ -342,6 +399,14 @@ DWARFFormValue::dump(raw_ostream &OS, const DWARFCompileUnit *cu) const {
case DW_FORM_indirect:
OS << "DW_FORM_indirect";
break;
+
+ case DW_FORM_sec_offset:
+ if (cu->getAddressByteSize() == 4)
+ OS << format("0x%08x", (uint32_t)uvalue);
+ else
+ OS << format("0x%016" PRIx64, uvalue);
+ break;
+
default:
OS << format("DW_FORM(0x%4.4x)", Form);
break;
@@ -404,6 +469,7 @@ const uint8_t *DWARFFormValue::BlockData() const {
bool DWARFFormValue::isBlockForm(uint16_t form) {
switch (form) {
+ case DW_FORM_exprloc:
case DW_FORM_block:
case DW_FORM_block1:
case DW_FORM_block2:
diff --git a/contrib/llvm/lib/DebugInfo/DWARFFormValue.h b/contrib/llvm/lib/DebugInfo/DWARFFormValue.h
index 22ac011..c5b590d 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFFormValue.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFFormValue.h
@@ -52,7 +52,7 @@ public:
bool extractValue(DataExtractor data, uint32_t *offset_ptr,
const DWARFCompileUnit *cu);
bool isInlinedCStr() const {
- return Value.data != NULL && Value.data == (uint8_t*)Value.cstr;
+ return Value.data != NULL && Value.data == (const uint8_t*)Value.cstr;
}
const uint8_t *BlockData() const;
uint64_t getReference(const DWARFCompileUnit* cu) const;
diff --git a/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
index a744d0c..05987f2 100644
--- a/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -29,7 +29,7 @@
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include <cmath>
#include <cstring>
@@ -91,11 +91,11 @@ class GVMemoryBlock : public CallbackVH {
public:
/// \brief Returns the address the GlobalVariable should be written into. The
/// GVMemoryBlock object prefixes that.
- static char *Create(const GlobalVariable *GV, const TargetData& TD) {
+ static char *Create(const GlobalVariable *GV, const DataLayout& TD) {
Type *ElTy = GV->getType()->getElementType();
size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy);
void *RawMemory = ::operator new(
- TargetData::RoundUpAlignment(sizeof(GVMemoryBlock),
+ DataLayout::RoundUpAlignment(sizeof(GVMemoryBlock),
TD.getPreferredAlignment(GV))
+ GVSize);
new(RawMemory) GVMemoryBlock(GV);
@@ -113,7 +113,7 @@ public:
} // anonymous namespace
char *ExecutionEngine::getMemoryForGV(const GlobalVariable *GV) {
- return GVMemoryBlock::Create(GV, *getTargetData());
+ return GVMemoryBlock::Create(GV, *getDataLayout());
}
bool ExecutionEngine::removeModule(Module *M) {
@@ -267,7 +267,7 @@ public:
void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
const std::vector<std::string> &InputArgv) {
clear(); // Free the old contents.
- unsigned PtrSize = EE->getTargetData()->getPointerSize();
+ unsigned PtrSize = EE->getDataLayout()->getPointerSize();
Array = new char[(InputArgv.size()+1)*PtrSize];
DEBUG(dbgs() << "JIT: ARGV = " << (void*)Array << "\n");
@@ -342,7 +342,7 @@ void ExecutionEngine::runStaticConstructorsDestructors(bool isDtors) {
#ifndef NDEBUG
/// isTargetNullPtr - Return whether the target pointer stored at Loc is null.
static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) {
- unsigned PtrSize = EE->getTargetData()->getPointerSize();
+ unsigned PtrSize = EE->getDataLayout()->getPointerSize();
for (unsigned i = 0; i < PtrSize; ++i)
if (*(i + (uint8_t*)Loc))
return false;
@@ -501,7 +501,8 @@ ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
return 0;
}
- if ((WhichEngine & EngineKind::JIT) && ExecutionEngine::JITCtor == 0) {
+ if ((WhichEngine & EngineKind::JIT) && ExecutionEngine::JITCtor == 0 &&
+ ExecutionEngine::MCJITCtor == 0) {
if (ErrorStr)
*ErrorStr = "JIT has not been linked in.";
}
@@ -643,15 +644,17 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
}
case Instruction::PtrToInt: {
GenericValue GV = getConstantValue(Op0);
- uint32_t PtrWidth = TD->getPointerSizeInBits();
+ uint32_t PtrWidth = TD->getTypeSizeInBits(Op0->getType());
+ assert(PtrWidth <= 64 && "Bad pointer width");
GV.IntVal = APInt(PtrWidth, uintptr_t(GV.PointerVal));
+ uint32_t IntWidth = TD->getTypeSizeInBits(CE->getType());
+ GV.IntVal = GV.IntVal.zextOrTrunc(IntWidth);
return GV;
}
case Instruction::IntToPtr: {
GenericValue GV = getConstantValue(Op0);
- uint32_t PtrWidth = TD->getPointerSizeInBits();
- if (PtrWidth != GV.IntVal.getBitWidth())
- GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth);
+ uint32_t PtrWidth = TD->getTypeSizeInBits(CE->getType());
+ GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth);
assert(GV.IntVal.getBitWidth() <= 64 && "Bad pointer width");
GV.PointerVal = PointerTy(uintptr_t(GV.IntVal.getZExtValue()));
return GV;
@@ -832,7 +835,7 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst,
unsigned StoreBytes) {
assert((IntVal.getBitWidth()+7)/8 >= StoreBytes && "Integer too small!");
- uint8_t *Src = (uint8_t *)IntVal.getRawData();
+ const uint8_t *Src = (const uint8_t *)IntVal.getRawData();
if (sys::isLittleEndianHost()) {
// Little-endian host - the source is ordered from LSB to MSB. Order the
@@ -855,7 +858,7 @@ static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst,
void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
GenericValue *Ptr, Type *Ty) {
- const unsigned StoreBytes = getTargetData()->getTypeStoreSize(Ty);
+ const unsigned StoreBytes = getDataLayout()->getTypeStoreSize(Ty);
switch (Ty->getTypeID()) {
case Type::IntegerTyID:
@@ -881,7 +884,7 @@ void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
dbgs() << "Cannot store value of type " << *Ty << "!\n";
}
- if (sys::isLittleEndianHost() != getTargetData()->isLittleEndian())
+ if (sys::isLittleEndianHost() != getDataLayout()->isLittleEndian())
// Host and target are different endian - reverse the stored bytes.
std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr);
}
@@ -917,7 +920,7 @@ static void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) {
void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
GenericValue *Ptr,
Type *Ty) {
- const unsigned LoadBytes = getTargetData()->getTypeStoreSize(Ty);
+ const unsigned LoadBytes = getDataLayout()->getTypeStoreSize(Ty);
switch (Ty->getTypeID()) {
case Type::IntegerTyID:
@@ -958,20 +961,20 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
if (const ConstantVector *CP = dyn_cast<ConstantVector>(Init)) {
unsigned ElementSize =
- getTargetData()->getTypeAllocSize(CP->getType()->getElementType());
+ getDataLayout()->getTypeAllocSize(CP->getType()->getElementType());
for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize);
return;
}
if (isa<ConstantAggregateZero>(Init)) {
- memset(Addr, 0, (size_t)getTargetData()->getTypeAllocSize(Init->getType()));
+ memset(Addr, 0, (size_t)getDataLayout()->getTypeAllocSize(Init->getType()));
return;
}
if (const ConstantArray *CPA = dyn_cast<ConstantArray>(Init)) {
unsigned ElementSize =
- getTargetData()->getTypeAllocSize(CPA->getType()->getElementType());
+ getDataLayout()->getTypeAllocSize(CPA->getType()->getElementType());
for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize);
return;
@@ -979,7 +982,7 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(Init)) {
const StructLayout *SL =
- getTargetData()->getStructLayout(cast<StructType>(CPS->getType()));
+ getDataLayout()->getStructLayout(cast<StructType>(CPS->getType()));
for (unsigned i = 0, e = CPS->getNumOperands(); i != e; ++i)
InitializeMemory(CPS->getOperand(i), (char*)Addr+SL->getElementOffset(i));
return;
@@ -1126,7 +1129,7 @@ void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) {
InitializeMemory(GV->getInitializer(), GA);
Type *ElTy = GV->getType()->getElementType();
- size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy);
+ size_t GVSize = (size_t)getDataLayout()->getTypeAllocSize(ElTy);
NumInitBytes += (unsigned)GVSize;
++NumGlobals;
}
diff --git a/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp b/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
index 75e680a..1e790e7 100644
--- a/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
@@ -239,7 +239,7 @@ void *LLVMRecompileAndRelinkFunction(LLVMExecutionEngineRef EE, LLVMValueRef Fn)
}
LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE) {
- return wrap(unwrap(EE)->getTargetData());
+ return wrap(unwrap(EE)->getDataLayout());
}
void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,
diff --git a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
index c11c17e..4cb0270 100644
--- a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
@@ -22,12 +22,12 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/ExecutionEngine/IntelJITEventsWrapper.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Errno.h"
#include "llvm/Support/ValueHandle.h"
#include "EventListenerCommon.h"
+#include "IntelJITEventsWrapper.h"
using namespace llvm;
using namespace llvm::jitprofiling;
@@ -37,13 +37,13 @@ namespace {
class IntelJITEventListener : public JITEventListener {
typedef DenseMap<void*, unsigned int> MethodIDMap;
- IntelJITEventsWrapper& Wrapper;
+ OwningPtr<IntelJITEventsWrapper> Wrapper;
MethodIDMap MethodIDs;
FilenameCache Filenames;
public:
- IntelJITEventListener(IntelJITEventsWrapper& libraryWrapper)
- : Wrapper(libraryWrapper) {
+ IntelJITEventListener(IntelJITEventsWrapper* libraryWrapper) {
+ Wrapper.reset(libraryWrapper);
}
~IntelJITEventListener() {
@@ -54,6 +54,10 @@ public:
const EmittedFunctionDetails &Details);
virtual void NotifyFreeingMachineCode(void *OldPtr);
+
+ virtual void NotifyObjectEmitted(const ObjectImage &Obj);
+
+ virtual void NotifyFreeingObject(const ObjectImage &Obj);
};
static LineNumberInfo LineStartToIntelJITFormat(
@@ -94,7 +98,7 @@ static iJIT_Method_Load FunctionDescToIntelJITFormat(
void IntelJITEventListener::NotifyFunctionEmitted(
const Function &F, void *FnStart, size_t FnSize,
const EmittedFunctionDetails &Details) {
- iJIT_Method_Load FunctionMessage = FunctionDescToIntelJITFormat(Wrapper,
+ iJIT_Method_Load FunctionMessage = FunctionDescToIntelJITFormat(*Wrapper,
F.getName().data(),
reinterpret_cast<uint64_t>(FnStart),
FnSize);
@@ -151,32 +155,36 @@ void IntelJITEventListener::NotifyFunctionEmitted(
FunctionMessage.line_number_table = 0;
}
- Wrapper.iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
- &FunctionMessage);
+ Wrapper->iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
+ &FunctionMessage);
MethodIDs[FnStart] = FunctionMessage.method_id;
}
void IntelJITEventListener::NotifyFreeingMachineCode(void *FnStart) {
MethodIDMap::iterator I = MethodIDs.find(FnStart);
if (I != MethodIDs.end()) {
- Wrapper.iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_UNLOAD_START, &I->second);
+ Wrapper->iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_UNLOAD_START, &I->second);
MethodIDs.erase(I);
}
}
+void IntelJITEventListener::NotifyObjectEmitted(const ObjectImage &Obj) {
+}
+
+void IntelJITEventListener::NotifyFreeingObject(const ObjectImage &Obj) {
+}
+
} // anonymous namespace.
namespace llvm {
JITEventListener *JITEventListener::createIntelJITEventListener() {
- static OwningPtr<IntelJITEventsWrapper> JITProfilingWrapper(
- new IntelJITEventsWrapper);
- return new IntelJITEventListener(*JITProfilingWrapper);
+ return new IntelJITEventListener(new IntelJITEventsWrapper);
}
// for testing
JITEventListener *JITEventListener::createIntelJITEventListener(
IntelJITEventsWrapper* TestImpl) {
- return new IntelJITEventListener(*TestImpl);
+ return new IntelJITEventListener(TestImpl);
}
} // namespace llvm
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/IntelJITEventsWrapper.h b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventsWrapper.h
index ca87342..7ab08e1 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/IntelJITEventsWrapper.h
+++ b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventsWrapper.h
@@ -18,7 +18,7 @@
#ifndef INTEL_JIT_EVENTS_WRAPPER_H
#define INTEL_JIT_EVENTS_WRAPPER_H
-#include <jitprofiling.h>
+#include "jitprofiling.h"
namespace llvm {
diff --git a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_config.h b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_config.h
new file mode 100644
index 0000000..1f029fb
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_config.h
@@ -0,0 +1,454 @@
+/*===-- ittnotify_config.h - JIT Profiling API internal config-----*- C -*-===*
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is distributed under the University of Illinois Open Source
+ * License. See LICENSE.TXT for details.
+ *
+ *===----------------------------------------------------------------------===*
+ *
+ * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
+ * Profiling API internal config.
+ *
+ * NOTE: This file comes in a style different from the rest of LLVM
+ * source base since this is a piece of code shared from Intel(R)
+ * products. Please do not reformat / re-style this code to make
+ * subsequent merges and contributions from the original source base eaiser.
+ *
+ *===----------------------------------------------------------------------===*/
+#ifndef _ITTNOTIFY_CONFIG_H_
+#define _ITTNOTIFY_CONFIG_H_
+
+/** @cond exclude_from_documentation */
+#ifndef ITT_OS_WIN
+# define ITT_OS_WIN 1
+#endif /* ITT_OS_WIN */
+
+#ifndef ITT_OS_LINUX
+# define ITT_OS_LINUX 2
+#endif /* ITT_OS_LINUX */
+
+#ifndef ITT_OS_MAC
+# define ITT_OS_MAC 3
+#endif /* ITT_OS_MAC */
+
+#ifndef ITT_OS
+# if defined WIN32 || defined _WIN32
+# define ITT_OS ITT_OS_WIN
+# elif defined( __APPLE__ ) && defined( __MACH__ )
+# define ITT_OS ITT_OS_MAC
+# else
+# define ITT_OS ITT_OS_LINUX
+# endif
+#endif /* ITT_OS */
+
+#ifndef ITT_PLATFORM_WIN
+# define ITT_PLATFORM_WIN 1
+#endif /* ITT_PLATFORM_WIN */
+
+#ifndef ITT_PLATFORM_POSIX
+# define ITT_PLATFORM_POSIX 2
+#endif /* ITT_PLATFORM_POSIX */
+
+#ifndef ITT_PLATFORM
+# if ITT_OS==ITT_OS_WIN
+# define ITT_PLATFORM ITT_PLATFORM_WIN
+# else
+# define ITT_PLATFORM ITT_PLATFORM_POSIX
+# endif /* _WIN32 */
+#endif /* ITT_PLATFORM */
+
+#if defined(_UNICODE) && !defined(UNICODE)
+#define UNICODE
+#endif
+
+#include <stddef.h>
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <tchar.h>
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <stdint.h>
+#if defined(UNICODE) || defined(_UNICODE)
+#include <wchar.h>
+#endif /* UNICODE || _UNICODE */
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#ifndef CDECL
+# if ITT_PLATFORM==ITT_PLATFORM_WIN
+# define CDECL __cdecl
+# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define CDECL /* not actual on x86_64 platform */
+# else /* _M_X64 || _M_AMD64 || __x86_64__ */
+# define CDECL __attribute__ ((cdecl))
+# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
+# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* CDECL */
+
+#ifndef STDCALL
+# if ITT_PLATFORM==ITT_PLATFORM_WIN
+# define STDCALL __stdcall
+# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define STDCALL /* not supported on x86_64 platform */
+# else /* _M_X64 || _M_AMD64 || __x86_64__ */
+# define STDCALL __attribute__ ((stdcall))
+# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
+# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* STDCALL */
+
+#define ITTAPI CDECL
+#define LIBITTAPI CDECL
+
+/* TODO: Temporary for compatibility! */
+#define ITTAPI_CALL CDECL
+#define LIBITTAPI_CALL CDECL
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+/* use __forceinline (VC++ specific) */
+#define ITT_INLINE __forceinline
+#define ITT_INLINE_ATTRIBUTE /* nothing */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+/*
+ * Generally, functions are not inlined unless optimization is specified.
+ * For functions declared inline, this attribute inlines the function even
+ * if no optimization level was specified.
+ */
+#ifdef __STRICT_ANSI__
+#define ITT_INLINE static
+#else /* __STRICT_ANSI__ */
+#define ITT_INLINE static inline
+#endif /* __STRICT_ANSI__ */
+#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+/** @endcond */
+
+#ifndef ITT_ARCH_IA32
+# define ITT_ARCH_IA32 1
+#endif /* ITT_ARCH_IA32 */
+
+#ifndef ITT_ARCH_IA32E
+# define ITT_ARCH_IA32E 2
+#endif /* ITT_ARCH_IA32E */
+
+#ifndef ITT_ARCH_IA64
+# define ITT_ARCH_IA64 3
+#endif /* ITT_ARCH_IA64 */
+
+#ifndef ITT_ARCH
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define ITT_ARCH ITT_ARCH_IA32E
+# elif defined _M_IA64 || defined __ia64
+# define ITT_ARCH ITT_ARCH_IA64
+# else
+# define ITT_ARCH ITT_ARCH_IA32
+# endif
+#endif
+
+#ifdef __cplusplus
+# define ITT_EXTERN_C extern "C"
+#else
+# define ITT_EXTERN_C /* nothing */
+#endif /* __cplusplus */
+
+#define ITT_TO_STR_AUX(x) #x
+#define ITT_TO_STR(x) ITT_TO_STR_AUX(x)
+
+#define __ITT_BUILD_ASSERT(expr, suffix) do { \
+ static char __itt_build_check_##suffix[(expr) ? 1 : -1]; \
+ __itt_build_check_##suffix[0] = 0; \
+} while(0)
+#define _ITT_BUILD_ASSERT(expr, suffix) __ITT_BUILD_ASSERT((expr), suffix)
+#define ITT_BUILD_ASSERT(expr) _ITT_BUILD_ASSERT((expr), __LINE__)
+
+#define ITT_MAGIC { 0xED, 0xAB, 0xAB, 0xEC, 0x0D, 0xEE, 0xDA, 0x30 }
+
+/* Replace with snapshot date YYYYMMDD for promotion build. */
+#define API_VERSION_BUILD 20111111
+
+#ifndef API_VERSION_NUM
+#define API_VERSION_NUM 0.0.0
+#endif /* API_VERSION_NUM */
+
+#define API_VERSION "ITT-API-Version " ITT_TO_STR(API_VERSION_NUM) \
+ " (" ITT_TO_STR(API_VERSION_BUILD) ")"
+
+/* OS communication functions */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <windows.h>
+typedef HMODULE lib_t;
+typedef DWORD TIDT;
+typedef CRITICAL_SECTION mutex_t;
+#define MUTEX_INITIALIZER { 0 }
+#define strong_alias(name, aliasname) /* empty for Windows */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <dlfcn.h>
+#if defined(UNICODE) || defined(_UNICODE)
+#include <wchar.h>
+#endif /* UNICODE */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE 1 /* need for PTHREAD_MUTEX_RECURSIVE */
+#endif /* _GNU_SOURCE */
+#include <pthread.h>
+typedef void* lib_t;
+typedef pthread_t TIDT;
+typedef pthread_mutex_t mutex_t;
+#define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+#define _strong_alias(name, aliasname) \
+ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
+#define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_get_proc(lib, name) GetProcAddress(lib, name)
+#define __itt_mutex_init(mutex) InitializeCriticalSection(mutex)
+#define __itt_mutex_lock(mutex) EnterCriticalSection(mutex)
+#define __itt_mutex_unlock(mutex) LeaveCriticalSection(mutex)
+#define __itt_load_lib(name) LoadLibraryA(name)
+#define __itt_unload_lib(handle) FreeLibrary(handle)
+#define __itt_system_error() (int)GetLastError()
+#define __itt_fstrcmp(s1, s2) lstrcmpA(s1, s2)
+#define __itt_fstrlen(s) lstrlenA(s)
+#define __itt_fstrcpyn(s1, s2, l) lstrcpynA(s1, s2, l)
+#define __itt_fstrdup(s) _strdup(s)
+#define __itt_thread_id() GetCurrentThreadId()
+#define __itt_thread_yield() SwitchToThread()
+#ifndef ITT_SIMPLE_INIT
+ITT_INLINE long
+__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
+ITT_INLINE long __itt_interlocked_increment(volatile long* ptr)
+{
+ return InterlockedIncrement(ptr);
+}
+#endif /* ITT_SIMPLE_INIT */
+#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+#define __itt_get_proc(lib, name) dlsym(lib, name)
+#define __itt_mutex_init(mutex) {\
+ pthread_mutexattr_t mutex_attr; \
+ int error_code = pthread_mutexattr_init(&mutex_attr); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_init", \
+ error_code); \
+ error_code = pthread_mutexattr_settype(&mutex_attr, \
+ PTHREAD_MUTEX_RECURSIVE); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_settype", \
+ error_code); \
+ error_code = pthread_mutex_init(mutex, &mutex_attr); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutex_init", \
+ error_code); \
+ error_code = pthread_mutexattr_destroy(&mutex_attr); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_destroy", \
+ error_code); \
+}
+#define __itt_mutex_lock(mutex) pthread_mutex_lock(mutex)
+#define __itt_mutex_unlock(mutex) pthread_mutex_unlock(mutex)
+#define __itt_load_lib(name) dlopen(name, RTLD_LAZY)
+#define __itt_unload_lib(handle) dlclose(handle)
+#define __itt_system_error() errno
+#define __itt_fstrcmp(s1, s2) strcmp(s1, s2)
+#define __itt_fstrlen(s) strlen(s)
+#define __itt_fstrcpyn(s1, s2, l) strncpy(s1, s2, l)
+#define __itt_fstrdup(s) strdup(s)
+#define __itt_thread_id() pthread_self()
+#define __itt_thread_yield() sched_yield()
+#if ITT_ARCH==ITT_ARCH_IA64
+#ifdef __INTEL_COMPILER
+#define __TBB_machine_fetchadd4(addr, val) __fetchadd4_acq((void *)addr, val)
+#else /* __INTEL_COMPILER */
+/* TODO: Add Support for not Intel compilers for IA64 */
+#endif /* __INTEL_COMPILER */
+#else /* ITT_ARCH!=ITT_ARCH_IA64 */
+ITT_INLINE long
+__TBB_machine_fetchadd4(volatile void* ptr, long addend) ITT_INLINE_ATTRIBUTE;
+ITT_INLINE long __TBB_machine_fetchadd4(volatile void* ptr, long addend)
+{
+ long result;
+ __asm__ __volatile__("lock\nxadd %0,%1"
+ : "=r"(result),"=m"(*(long*)ptr)
+ : "0"(addend), "m"(*(long*)ptr)
+ : "memory");
+ return result;
+}
+#endif /* ITT_ARCH==ITT_ARCH_IA64 */
+#ifndef ITT_SIMPLE_INIT
+ITT_INLINE long
+__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
+ITT_INLINE long __itt_interlocked_increment(volatile long* ptr)
+{
+ return __TBB_machine_fetchadd4(ptr, 1) + 1L;
+}
+#endif /* ITT_SIMPLE_INIT */
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+typedef enum {
+ __itt_collection_normal = 0,
+ __itt_collection_paused = 1
+} __itt_collection_state;
+
+typedef enum {
+ __itt_thread_normal = 0,
+ __itt_thread_ignored = 1
+} __itt_thread_state;
+
+#pragma pack(push, 8)
+
+typedef struct ___itt_thread_info
+{
+ const char* nameA; /*!< Copy of original name in ASCII. */
+#if defined(UNICODE) || defined(_UNICODE)
+ const wchar_t* nameW; /*!< Copy of original name in UNICODE. */
+#else /* UNICODE || _UNICODE */
+ void* nameW;
+#endif /* UNICODE || _UNICODE */
+ TIDT tid;
+ __itt_thread_state state; /*!< Thread state (paused or normal) */
+ int extra1; /*!< Reserved to the runtime */
+ void* extra2; /*!< Reserved to the runtime */
+ struct ___itt_thread_info* next;
+} __itt_thread_info;
+
+#include "ittnotify_types.h" /* For __itt_group_id definition */
+
+typedef struct ___itt_api_info_20101001
+{
+ const char* name;
+ void** func_ptr;
+ void* init_func;
+ __itt_group_id group;
+} __itt_api_info_20101001;
+
+typedef struct ___itt_api_info
+{
+ const char* name;
+ void** func_ptr;
+ void* init_func;
+ void* null_func;
+ __itt_group_id group;
+} __itt_api_info;
+
+struct ___itt_domain;
+struct ___itt_string_handle;
+
+typedef struct ___itt_global
+{
+ unsigned char magic[8];
+ unsigned long version_major;
+ unsigned long version_minor;
+ unsigned long version_build;
+ volatile long api_initialized;
+ volatile long mutex_initialized;
+ volatile long atomic_counter;
+ mutex_t mutex;
+ lib_t lib;
+ void* error_handler;
+ const char** dll_path_ptr;
+ __itt_api_info* api_list_ptr;
+ struct ___itt_global* next;
+ /* Joinable structures below */
+ __itt_thread_info* thread_list;
+ struct ___itt_domain* domain_list;
+ struct ___itt_string_handle* string_list;
+ __itt_collection_state state;
+} __itt_global;
+
+#pragma pack(pop)
+
+#define NEW_THREAD_INFO_W(gptr,h,h_tail,t,s,n) { \
+ h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \
+ if (h != NULL) { \
+ h->tid = t; \
+ h->nameA = NULL; \
+ h->nameW = n ? _wcsdup(n) : NULL; \
+ h->state = s; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->thread_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_THREAD_INFO_A(gptr,h,h_tail,t,s,n) { \
+ h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \
+ if (h != NULL) { \
+ h->tid = t; \
+ h->nameA = n ? __itt_fstrdup(n) : NULL; \
+ h->nameW = NULL; \
+ h->state = s; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->thread_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_DOMAIN_W(gptr,h,h_tail,name) { \
+ h = (__itt_domain*)malloc(sizeof(__itt_domain)); \
+ if (h != NULL) { \
+ h->flags = 0; /* domain is disabled by default */ \
+ h->nameA = NULL; \
+ h->nameW = name ? _wcsdup(name) : NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->domain_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_DOMAIN_A(gptr,h,h_tail,name) { \
+ h = (__itt_domain*)malloc(sizeof(__itt_domain)); \
+ if (h != NULL) { \
+ h->flags = 0; /* domain is disabled by default */ \
+ h->nameA = name ? __itt_fstrdup(name) : NULL; \
+ h->nameW = NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->domain_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_STRING_HANDLE_W(gptr,h,h_tail,name) { \
+ h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \
+ if (h != NULL) { \
+ h->strA = NULL; \
+ h->strW = name ? _wcsdup(name) : NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->string_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_STRING_HANDLE_A(gptr,h,h_tail,name) { \
+ h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \
+ if (h != NULL) { \
+ h->strA = name ? __itt_fstrdup(name) : NULL; \
+ h->strW = NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->string_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#endif /* _ITTNOTIFY_CONFIG_H_ */
diff --git a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_types.h b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_types.h
new file mode 100644
index 0000000..5df752f
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_types.h
@@ -0,0 +1,70 @@
+/*===-- ittnotify_types.h - JIT Profiling API internal types--------*- C -*-===*
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is distributed under the University of Illinois Open Source
+ * License. See LICENSE.TXT for details.
+ *
+ *===----------------------------------------------------------------------===*
+ *
+ * NOTE: This file comes in a style different from the rest of LLVM
+ * source base since this is a piece of code shared from Intel(R)
+ * products. Please do not reformat / re-style this code to make
+ * subsequent merges and contributions from the original source base eaiser.
+ *
+ *===----------------------------------------------------------------------===*/
+#ifndef _ITTNOTIFY_TYPES_H_
+#define _ITTNOTIFY_TYPES_H_
+
+typedef enum ___itt_group_id
+{
+ __itt_group_none = 0,
+ __itt_group_legacy = 1<<0,
+ __itt_group_control = 1<<1,
+ __itt_group_thread = 1<<2,
+ __itt_group_mark = 1<<3,
+ __itt_group_sync = 1<<4,
+ __itt_group_fsync = 1<<5,
+ __itt_group_jit = 1<<6,
+ __itt_group_model = 1<<7,
+ __itt_group_splitter_min = 1<<7,
+ __itt_group_counter = 1<<8,
+ __itt_group_frame = 1<<9,
+ __itt_group_stitch = 1<<10,
+ __itt_group_heap = 1<<11,
+ __itt_group_splitter_max = 1<<12,
+ __itt_group_structure = 1<<12,
+ __itt_group_suppress = 1<<13,
+ __itt_group_all = -1
+} __itt_group_id;
+
+#pragma pack(push, 8)
+
+typedef struct ___itt_group_list
+{
+ __itt_group_id id;
+ const char* name;
+} __itt_group_list;
+
+#pragma pack(pop)
+
+#define ITT_GROUP_LIST(varname) \
+ static __itt_group_list varname[] = { \
+ { __itt_group_all, "all" }, \
+ { __itt_group_control, "control" }, \
+ { __itt_group_thread, "thread" }, \
+ { __itt_group_mark, "mark" }, \
+ { __itt_group_sync, "sync" }, \
+ { __itt_group_fsync, "fsync" }, \
+ { __itt_group_jit, "jit" }, \
+ { __itt_group_model, "model" }, \
+ { __itt_group_counter, "counter" }, \
+ { __itt_group_frame, "frame" }, \
+ { __itt_group_stitch, "stitch" }, \
+ { __itt_group_heap, "heap" }, \
+ { __itt_group_structure, "structure" }, \
+ { __itt_group_suppress, "suppress" }, \
+ { __itt_group_none, NULL } \
+ }
+
+#endif /* _ITTNOTIFY_TYPES_H_ */
diff --git a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.c b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.c
new file mode 100644
index 0000000..7b507de
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.c
@@ -0,0 +1,481 @@
+/*===-- jitprofiling.c - JIT (Just-In-Time) Profiling API----------*- C -*-===*
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is distributed under the University of Illinois Open Source
+ * License. See LICENSE.TXT for details.
+ *
+ *===----------------------------------------------------------------------===*
+ *
+ * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
+ * Profiling API implementation.
+ *
+ * NOTE: This file comes in a style different from the rest of LLVM
+ * source base since this is a piece of code shared from Intel(R)
+ * products. Please do not reformat / re-style this code to make
+ * subsequent merges and contributions from the original source base eaiser.
+ *
+ *===----------------------------------------------------------------------===*/
+#include "ittnotify_config.h"
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <windows.h>
+#pragma optimize("", off)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <pthread.h>
+#include <dlfcn.h>
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <malloc.h>
+#include <stdlib.h>
+
+#include "jitprofiling.h"
+
+static const char rcsid[] = "\n@(#) $Revision: 243501 $\n";
+
+#define DLL_ENVIRONMENT_VAR "VS_PROFILER"
+
+#ifndef NEW_DLL_ENVIRONMENT_VAR
+#if ITT_ARCH==ITT_ARCH_IA32
+#define NEW_DLL_ENVIRONMENT_VAR "INTEL_JIT_PROFILER32"
+#else
+#define NEW_DLL_ENVIRONMENT_VAR "INTEL_JIT_PROFILER64"
+#endif
+#endif /* NEW_DLL_ENVIRONMENT_VAR */
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define DEFAULT_DLLNAME "JitPI.dll"
+HINSTANCE m_libHandle = NULL;
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define DEFAULT_DLLNAME "libJitPI.so"
+void* m_libHandle = NULL;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/* default location of JIT profiling agent on Android */
+#define ANDROID_JIT_AGENT_PATH "/data/intel/libittnotify.so"
+
+/* the function pointers */
+typedef unsigned int(*TPInitialize)(void);
+static TPInitialize FUNC_Initialize=NULL;
+
+typedef unsigned int(*TPNotify)(unsigned int, void*);
+static TPNotify FUNC_NotifyEvent=NULL;
+
+static iJIT_IsProfilingActiveFlags executionMode = iJIT_NOTHING_RUNNING;
+
+/* end collector dll part. */
+
+/* loadiJIT_Funcs() : this function is called just in the beginning
+ * and is responsible to load the functions from BistroJavaCollector.dll
+ * result:
+ * on success: the functions loads, iJIT_DLL_is_missing=0, return value = 1
+ * on failure: the functions are NULL, iJIT_DLL_is_missing=1, return value = 0
+ */
+static int loadiJIT_Funcs(void);
+
+/* global representing whether the BistroJavaCollector can't be loaded */
+static int iJIT_DLL_is_missing = 0;
+
+/* Virtual stack - the struct is used as a virtual stack for each thread.
+ * Every thread initializes with a stack of size INIT_TOP_STACK.
+ * Every method entry decreases from the current stack point,
+ * and when a thread stack reaches its top of stack (return from the global
+ * function), the top of stack and the current stack increase. Notice that
+ * when returning from a function the stack pointer is the address of
+ * the function return.
+*/
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+static DWORD threadLocalStorageHandle = 0;
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+static pthread_key_t threadLocalStorageHandle = (pthread_key_t)0;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#define INIT_TOP_Stack 10000
+
+typedef struct
+{
+ unsigned int TopStack;
+ unsigned int CurrentStack;
+} ThreadStack, *pThreadStack;
+
+/* end of virtual stack. */
+
+/*
+ * The function for reporting virtual-machine related events to VTune.
+ * Note: when reporting iJVM_EVENT_TYPE_ENTER_NIDS, there is no need to fill
+ * in the stack_id field in the iJIT_Method_NIDS structure, as VTune fills it.
+ * The return value in iJVM_EVENT_TYPE_ENTER_NIDS &&
+ * iJVM_EVENT_TYPE_LEAVE_NIDS events will be 0 in case of failure.
+ * in iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED event
+ * it will be -1 if EventSpecificData == 0 otherwise it will be 0.
+*/
+
+ITT_EXTERN_C int JITAPI
+iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData)
+{
+ int ReturnValue;
+
+ /*
+ * This section is for debugging outside of VTune.
+ * It creates the environment variables that indicates call graph mode.
+ * If running outside of VTune remove the remark.
+ *
+ *
+ * static int firstTime = 1;
+ * char DoCallGraph[12] = "DoCallGraph";
+ * if (firstTime)
+ * {
+ * firstTime = 0;
+ * SetEnvironmentVariable( "BISTRO_COLLECTORS_DO_CALLGRAPH", DoCallGraph);
+ * }
+ *
+ * end of section.
+ */
+
+ /* initialization part - the functions have not been loaded yet. This part
+ * will load the functions, and check if we are in Call Graph mode.
+ * (for special treatment).
+ */
+ if (!FUNC_NotifyEvent)
+ {
+ if (iJIT_DLL_is_missing)
+ return 0;
+
+ /* load the Function from the DLL */
+ if (!loadiJIT_Funcs())
+ return 0;
+
+ /* Call Graph initialization. */
+ }
+
+ /* If the event is method entry/exit, check that in the current mode
+ * VTune is allowed to receive it
+ */
+ if ((event_type == iJVM_EVENT_TYPE_ENTER_NIDS ||
+ event_type == iJVM_EVENT_TYPE_LEAVE_NIDS) &&
+ (executionMode != iJIT_CALLGRAPH_ON))
+ {
+ return 0;
+ }
+ /* This section is performed when method enter event occurs.
+ * It updates the virtual stack, or creates it if this is the first
+ * method entry in the thread. The stack pointer is decreased.
+ */
+ if (event_type == iJVM_EVENT_TYPE_ENTER_NIDS)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ pThreadStack threadStack =
+ (pThreadStack)TlsGetValue (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pThreadStack threadStack =
+ (pThreadStack)pthread_getspecific(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ /* check for use of reserved method IDs */
+ if ( ((piJIT_Method_NIDS) EventSpecificData)->method_id <= 999 )
+ return 0;
+
+ if (!threadStack)
+ {
+ /* initialize the stack. */
+ threadStack = (pThreadStack) calloc (sizeof(ThreadStack), 1);
+ threadStack->TopStack = INIT_TOP_Stack;
+ threadStack->CurrentStack = INIT_TOP_Stack;
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ TlsSetValue(threadLocalStorageHandle,(void*)threadStack);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_setspecific(threadLocalStorageHandle,(void*)threadStack);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+
+ /* decrease the stack. */
+ ((piJIT_Method_NIDS) EventSpecificData)->stack_id =
+ (threadStack->CurrentStack)--;
+ }
+
+ /* This section is performed when method leave event occurs
+ * It updates the virtual stack.
+ * Increases the stack pointer.
+ * If the stack pointer reached the top (left the global function)
+ * increase the pointer and the top pointer.
+ */
+ if (event_type == iJVM_EVENT_TYPE_LEAVE_NIDS)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ pThreadStack threadStack =
+ (pThreadStack)TlsGetValue (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pThreadStack threadStack =
+ (pThreadStack)pthread_getspecific(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ /* check for use of reserved method IDs */
+ if ( ((piJIT_Method_NIDS) EventSpecificData)->method_id <= 999 )
+ return 0;
+
+ if (!threadStack)
+ {
+ /* Error: first report in this thread is method exit */
+ exit (1);
+ }
+
+ ((piJIT_Method_NIDS) EventSpecificData)->stack_id =
+ ++(threadStack->CurrentStack) + 1;
+
+ if (((piJIT_Method_NIDS) EventSpecificData)->stack_id
+ > threadStack->TopStack)
+ ((piJIT_Method_NIDS) EventSpecificData)->stack_id =
+ (unsigned int)-1;
+ }
+
+ if (event_type == iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED)
+ {
+ /* check for use of reserved method IDs */
+ if ( ((piJIT_Method_Load) EventSpecificData)->method_id <= 999 )
+ return 0;
+ }
+
+ ReturnValue = (int)FUNC_NotifyEvent(event_type, EventSpecificData);
+
+ return ReturnValue;
+}
+
+/* The new mode call back routine */
+ITT_EXTERN_C void JITAPI
+iJIT_RegisterCallbackEx(void *userdata, iJIT_ModeChangedEx
+ NewModeCallBackFuncEx)
+{
+ /* is it already missing... or the load of functions from the DLL failed */
+ if (iJIT_DLL_is_missing || !loadiJIT_Funcs())
+ {
+ /* then do not bother with notifications */
+ NewModeCallBackFuncEx(userdata, iJIT_NO_NOTIFICATIONS);
+ /* Error: could not load JIT functions. */
+ return;
+ }
+ /* nothing to do with the callback */
+}
+
+/*
+ * This function allows the user to query in which mode, if at all,
+ *VTune is running
+ */
+ITT_EXTERN_C iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive()
+{
+ if (!iJIT_DLL_is_missing)
+ {
+ loadiJIT_Funcs();
+ }
+
+ return executionMode;
+}
+
+/* this function loads the collector dll (BistroJavaCollector)
+ * and the relevant functions.
+ * on success: all functions load, iJIT_DLL_is_missing = 0, return value = 1
+ * on failure: all functions are NULL, iJIT_DLL_is_missing = 1, return value = 0
+ */
+static int loadiJIT_Funcs()
+{
+ static int bDllWasLoaded = 0;
+ char *dllName = (char*)rcsid; /* !! Just to avoid unused code elimination */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ DWORD dNameLength = 0;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ if(bDllWasLoaded)
+ {
+ /* dll was already loaded, no need to do it for the second time */
+ return 1;
+ }
+
+ /* Assumes that the DLL will not be found */
+ iJIT_DLL_is_missing = 1;
+ FUNC_NotifyEvent = NULL;
+
+ if (m_libHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FreeLibrary(m_libHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ dlclose(m_libHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ m_libHandle = NULL;
+ }
+
+ /* Try to get the dll name from the environment */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ dNameLength = GetEnvironmentVariableA(NEW_DLL_ENVIRONMENT_VAR, NULL, 0);
+ if (dNameLength)
+ {
+ DWORD envret = 0;
+ dllName = (char*)malloc(sizeof(char) * (dNameLength + 1));
+ envret = GetEnvironmentVariableA(NEW_DLL_ENVIRONMENT_VAR,
+ dllName, dNameLength);
+ if (envret)
+ {
+ /* Try to load the dll from the PATH... */
+ m_libHandle = LoadLibraryExA(dllName,
+ NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
+ }
+ free(dllName);
+ } else {
+ /* Try to use old VS_PROFILER variable */
+ dNameLength = GetEnvironmentVariableA(DLL_ENVIRONMENT_VAR, NULL, 0);
+ if (dNameLength)
+ {
+ DWORD envret = 0;
+ dllName = (char*)malloc(sizeof(char) * (dNameLength + 1));
+ envret = GetEnvironmentVariableA(DLL_ENVIRONMENT_VAR,
+ dllName, dNameLength);
+ if (envret)
+ {
+ /* Try to load the dll from the PATH... */
+ m_libHandle = LoadLibraryA(dllName);
+ }
+ free(dllName);
+ }
+ }
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ dllName = getenv(NEW_DLL_ENVIRONMENT_VAR);
+ if (!dllName)
+ dllName = getenv(DLL_ENVIRONMENT_VAR);
+#ifdef ANDROID
+ if (!dllName)
+ dllName = ANDROID_JIT_AGENT_PATH;
+#endif
+ if (dllName)
+ {
+ /* Try to load the dll from the PATH... */
+ m_libHandle = dlopen(dllName, RTLD_LAZY);
+ }
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ if (!m_libHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ m_libHandle = LoadLibraryA(DEFAULT_DLLNAME);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ m_libHandle = dlopen(DEFAULT_DLLNAME, RTLD_LAZY);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+
+ /* if the dll wasn't loaded - exit. */
+ if (!m_libHandle)
+ {
+ iJIT_DLL_is_missing = 1; /* don't try to initialize
+ * JIT agent the second time
+ */
+ return 0;
+ }
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FUNC_NotifyEvent = (TPNotify)GetProcAddress(m_libHandle, "NotifyEvent");
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ FUNC_NotifyEvent = (TPNotify)dlsym(m_libHandle, "NotifyEvent");
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ if (!FUNC_NotifyEvent)
+ {
+ FUNC_Initialize = NULL;
+ return 0;
+ }
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FUNC_Initialize = (TPInitialize)GetProcAddress(m_libHandle, "Initialize");
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ FUNC_Initialize = (TPInitialize)dlsym(m_libHandle, "Initialize");
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ if (!FUNC_Initialize)
+ {
+ FUNC_NotifyEvent = NULL;
+ return 0;
+ }
+
+ executionMode = (iJIT_IsProfilingActiveFlags)FUNC_Initialize();
+
+ bDllWasLoaded = 1;
+ iJIT_DLL_is_missing = 0; /* DLL is ok. */
+
+ /*
+ * Call Graph mode: init the thread local storage
+ * (need to store the virtual stack there).
+ */
+ if ( executionMode == iJIT_CALLGRAPH_ON )
+ {
+ /* Allocate a thread local storage slot for the thread "stack" */
+ if (!threadLocalStorageHandle)
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ threadLocalStorageHandle = TlsAlloc();
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_key_create(&threadLocalStorageHandle, NULL);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+
+ return 1;
+}
+
+/*
+ * This function should be called by the user whenever a thread ends,
+ * to free the thread "virtual stack" storage
+ */
+ITT_EXTERN_C void JITAPI FinalizeThread()
+{
+ if (threadLocalStorageHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ pThreadStack threadStack =
+ (pThreadStack)TlsGetValue (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pThreadStack threadStack =
+ (pThreadStack)pthread_getspecific(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ if (threadStack)
+ {
+ free (threadStack);
+ threadStack = NULL;
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ TlsSetValue (threadLocalStorageHandle, threadStack);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_setspecific(threadLocalStorageHandle, threadStack);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+ }
+}
+
+/*
+ * This function should be called by the user when the process ends,
+ * to free the local storage index
+*/
+ITT_EXTERN_C void JITAPI FinalizeProcess()
+{
+ if (m_libHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FreeLibrary(m_libHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ dlclose(m_libHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ m_libHandle = NULL;
+ }
+
+ if (threadLocalStorageHandle)
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ TlsFree (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_key_delete(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+}
+
+/*
+ * This function should be called by the user for any method once.
+ * The function will return a unique method ID, the user should maintain
+ * the ID for each method
+ */
+ITT_EXTERN_C unsigned int JITAPI iJIT_GetNewMethodID()
+{
+ static unsigned int methodID = 0x100000;
+
+ if (methodID == 0)
+ return 0; /* ERROR : this is not a valid value */
+
+ return methodID++;
+}
diff --git a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h
new file mode 100644
index 0000000..f08e287
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h
@@ -0,0 +1,259 @@
+/*===-- jitprofiling.h - JIT Profiling API-------------------------*- C -*-===*
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is distributed under the University of Illinois Open Source
+ * License. See LICENSE.TXT for details.
+ *
+ *===----------------------------------------------------------------------===*
+ *
+ * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
+ * Profiling API declaration.
+ *
+ * NOTE: This file comes in a style different from the rest of LLVM
+ * source base since this is a piece of code shared from Intel(R)
+ * products. Please do not reformat / re-style this code to make
+ * subsequent merges and contributions from the original source base eaiser.
+ *
+ *===----------------------------------------------------------------------===*/
+#ifndef __JITPROFILING_H__
+#define __JITPROFILING_H__
+
+/*
+ * Various constants used by functions
+ */
+
+/* event notification */
+typedef enum iJIT_jvm_event
+{
+
+ /* shutdown */
+
+ /*
+ * Program exiting EventSpecificData NA
+ */
+ iJVM_EVENT_TYPE_SHUTDOWN = 2,
+
+ /* JIT profiling */
+
+ /*
+ * issued after method code jitted into memory but before code is executed
+ * EventSpecificData is an iJIT_Method_Load
+ */
+ iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED=13,
+
+ /* issued before unload. Method code will no longer be executed, but code
+ * and info are still in memory. The VTune profiler may capture method
+ * code only at this point EventSpecificData is iJIT_Method_Id
+ */
+ iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
+
+ /* Method Profiling */
+
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be entered EventSpecificData is
+ * iJIT_Method_NIDS
+ */
+ iJVM_EVENT_TYPE_ENTER_NIDS = 19,
+
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be left EventSpecificData is
+ * iJIT_Method_NIDS
+ */
+ iJVM_EVENT_TYPE_LEAVE_NIDS
+} iJIT_JVM_EVENT;
+
+typedef enum _iJIT_ModeFlags
+{
+ /* No need to Notify VTune, since VTune is not running */
+ iJIT_NO_NOTIFICATIONS = 0x0000,
+
+ /* when turned on the jit must call
+ * iJIT_NotifyEvent
+ * (
+ * iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
+ * )
+ * for all the method already jitted
+ */
+ iJIT_BE_NOTIFY_ON_LOAD = 0x0001,
+
+ /* when turned on the jit must call
+ * iJIT_NotifyEvent
+ * (
+ * iJVM_EVENT_TYPE_METHOD_UNLOAD_FINISHED,
+ * ) for all the method that are unloaded
+ */
+ iJIT_BE_NOTIFY_ON_UNLOAD = 0x0002,
+
+ /* when turned on the jit must instrument all
+ * the currently jited code with calls on
+ * method entries
+ */
+ iJIT_BE_NOTIFY_ON_METHOD_ENTRY = 0x0004,
+
+ /* when turned on the jit must instrument all
+ * the currently jited code with calls
+ * on method exit
+ */
+ iJIT_BE_NOTIFY_ON_METHOD_EXIT = 0x0008
+
+} iJIT_ModeFlags;
+
+
+ /* Flags used by iJIT_IsProfilingActive() */
+typedef enum _iJIT_IsProfilingActiveFlags
+{
+ /* No profiler is running. Currently not used */
+ iJIT_NOTHING_RUNNING = 0x0000,
+
+ /* Sampling is running. This is the default value
+ * returned by iJIT_IsProfilingActive()
+ */
+ iJIT_SAMPLING_ON = 0x0001,
+
+ /* Call Graph is running */
+ iJIT_CALLGRAPH_ON = 0x0002
+
+} iJIT_IsProfilingActiveFlags;
+
+/* Enumerator for the environment of methods*/
+typedef enum _iJDEnvironmentType
+{
+ iJDE_JittingAPI = 2
+} iJDEnvironmentType;
+
+/**********************************
+ * Data structures for the events *
+ **********************************/
+
+/* structure for the events:
+ * iJVM_EVENT_TYPE_METHOD_UNLOAD_START
+ */
+
+typedef struct _iJIT_Method_Id
+{
+ /* Id of the method (same as the one passed in
+ * the iJIT_Method_Load struct
+ */
+ unsigned int method_id;
+
+} *piJIT_Method_Id, iJIT_Method_Id;
+
+
+/* structure for the events:
+ * iJVM_EVENT_TYPE_ENTER_NIDS,
+ * iJVM_EVENT_TYPE_LEAVE_NIDS,
+ * iJVM_EVENT_TYPE_EXCEPTION_OCCURRED_NIDS
+ */
+
+typedef struct _iJIT_Method_NIDS
+{
+ /* unique method ID */
+ unsigned int method_id;
+
+ /* NOTE: no need to fill this field, it's filled by VTune */
+ unsigned int stack_id;
+
+ /* method name (just the method, without the class) */
+ char* method_name;
+} *piJIT_Method_NIDS, iJIT_Method_NIDS;
+
+/* structures for the events:
+ * iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED
+ */
+
+typedef struct _LineNumberInfo
+{
+ /* x86 Offset from the begining of the method*/
+ unsigned int Offset;
+
+ /* source line number from the begining of the source file */
+ unsigned int LineNumber;
+
+} *pLineNumberInfo, LineNumberInfo;
+
+typedef struct _iJIT_Method_Load
+{
+ /* unique method ID - can be any unique value, (except 0 - 999) */
+ unsigned int method_id;
+
+ /* method name (can be with or without the class and signature, in any case
+ * the class name will be added to it)
+ */
+ char* method_name;
+
+ /* virtual address of that method - This determines the method range for the
+ * iJVM_EVENT_TYPE_ENTER/LEAVE_METHOD_ADDR events
+ */
+ void* method_load_address;
+
+ /* Size in memory - Must be exact */
+ unsigned int method_size;
+
+ /* Line Table size in number of entries - Zero if none */
+ unsigned int line_number_size;
+
+ /* Pointer to the begining of the line numbers info array */
+ pLineNumberInfo line_number_table;
+
+ /* unique class ID */
+ unsigned int class_id;
+
+ /* class file name */
+ char* class_file_name;
+
+ /* source file name */
+ char* source_file_name;
+
+ /* bits supplied by the user for saving in the JIT file */
+ void* user_data;
+
+ /* the size of the user data buffer */
+ unsigned int user_data_size;
+
+ /* NOTE: no need to fill this field, it's filled by VTune */
+ iJDEnvironmentType env;
+
+} *piJIT_Method_Load, iJIT_Method_Load;
+
+/* API Functions */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef CDECL
+# if defined WIN32 || defined _WIN32
+# define CDECL __cdecl
+# else /* defined WIN32 || defined _WIN32 */
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define CDECL /* not actual on x86_64 platform */
+# else /* _M_X64 || _M_AMD64 || __x86_64__ */
+# define CDECL __attribute__ ((cdecl))
+# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
+# endif /* defined WIN32 || defined _WIN32 */
+#endif /* CDECL */
+
+#define JITAPI CDECL
+
+/* called when the settings are changed with new settings */
+typedef void (*iJIT_ModeChangedEx)(void *UserData, iJIT_ModeFlags Flags);
+
+int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData);
+
+/* The new mode call back routine */
+void JITAPI iJIT_RegisterCallbackEx(void *userdata,
+ iJIT_ModeChangedEx NewModeCallBackFuncEx);
+
+iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive(void);
+
+void JITAPI FinalizeThread(void);
+
+void JITAPI FinalizeProcess(void);
+
+unsigned int JITAPI iJIT_GetNewMethodID(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __JITPROFILING_H__ */
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
index 7a206eb..e16e2d1 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -25,7 +25,7 @@
#include "llvm/Config/config.h" // Detect libffi
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/DynamicLibrary.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Mutex.h"
#include <csignal>
@@ -180,7 +180,7 @@ static void *ffiValueFor(Type *Ty, const GenericValue &AV,
static bool ffiInvoke(RawFunc Fn, Function *F,
const std::vector<GenericValue> &ArgVals,
- const TargetData *TD, GenericValue &Result) {
+ const DataLayout *TD, GenericValue &Result) {
ffi_cif cif;
FunctionType *FTy = F->getFunctionType();
const unsigned NumArgs = F->arg_size();
@@ -276,7 +276,7 @@ GenericValue Interpreter::callExternalFunction(Function *F,
FunctionsLock->release();
GenericValue Result;
- if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getTargetData(), Result))
+ if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getDataLayout(), Result))
return Result;
#endif // USE_LIBFFI
@@ -376,7 +376,7 @@ GenericValue lle_X_sprintf(FunctionType *FT,
case 'x': case 'X':
if (HowLong >= 1) {
if (HowLong == 1 &&
- TheInterpreter->getTargetData()->getPointerSizeInBits() == 64 &&
+ TheInterpreter->getDataLayout()->getPointerSizeInBits() == 64 &&
sizeof(long) < sizeof(int64_t)) {
// Make sure we use %lld with a 64 bit argument because we might be
// compiling LLI on a 32 bit compiler.
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
index 43e3453..55152db 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
@@ -48,7 +48,7 @@ Interpreter::Interpreter(Module *M)
: ExecutionEngine(M), TD(M) {
memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
- setTargetData(&TD);
+ setDataLayout(&TD);
// Initialize the "backend"
initializeExecutionEngine();
initializeExternalFunctions();
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
index 28c5775..72c42c1 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
@@ -17,7 +17,7 @@
#include "llvm/Function.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/GenericValue.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
@@ -82,7 +82,7 @@ struct ExecutionContext {
//
class Interpreter : public ExecutionEngine, public InstVisitor<Interpreter> {
GenericValue ExitValue; // The return value of the called function
- TargetData TD;
+ DataLayout TD;
IntrinsicLowering *IL;
// The runtime stack of executing code. The top of the stack is the current
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
index 97995ad..1ad3382 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
@@ -24,7 +24,7 @@
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/ExecutionEngine/JITMemoryManager.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetJITInfo.h"
#include "llvm/Support/Dwarf.h"
@@ -272,7 +272,7 @@ JIT::JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
: ExecutionEngine(M), TM(tm), TJI(tji),
JMM(jmm ? jmm : JITMemoryManager::CreateDefaultMemManager()),
AllocateGVsWithCode(GVsWithCode), isAlreadyCodeGenerating(false) {
- setTargetData(TM.getTargetData());
+ setDataLayout(TM.getDataLayout());
jitstate = new JITState(M);
@@ -285,7 +285,7 @@ JIT::JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
// Add target data
MutexGuard locked(lock);
FunctionPassManager &PM = jitstate->getPM(locked);
- PM.add(new TargetData(*TM.getTargetData()));
+ PM.add(new DataLayout(*TM.getDataLayout()));
// Turn the machine code intermediate representation into bytes in memory that
// may be executed.
@@ -339,7 +339,7 @@ void JIT::addModule(Module *M) {
jitstate = new JITState(M);
FunctionPassManager &PM = jitstate->getPM(locked);
- PM.add(new TargetData(*TM.getTargetData()));
+ PM.add(new DataLayout(*TM.getDataLayout()));
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
@@ -370,7 +370,7 @@ bool JIT::removeModule(Module *M) {
jitstate = new JITState(Modules[0]);
FunctionPassManager &PM = jitstate->getPM(locked);
- PM.add(new TargetData(*TM.getTargetData()));
+ PM.add(new DataLayout(*TM.getDataLayout()));
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
@@ -815,8 +815,8 @@ char* JIT::getMemoryForGV(const GlobalVariable* GV) {
// through the memory manager which puts them near the code but not in the
// same buffer.
Type *GlobalType = GV->getType()->getElementType();
- size_t S = getTargetData()->getTypeAllocSize(GlobalType);
- size_t A = getTargetData()->getPreferredAlignment(GV);
+ size_t S = getDataLayout()->getTypeAllocSize(GlobalType);
+ size_t A = getDataLayout()->getPreferredAlignment(GV);
if (GV->isThreadLocal()) {
MutexGuard locked(lock);
Ptr = TJI.allocateThreadLocalMemory(S);
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
index 42a136e..19c1979 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
@@ -24,7 +24,7 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
@@ -42,7 +42,7 @@ unsigned char* JITDwarfEmitter::EmitDwarfTable(MachineFunction& F,
assert(MMI && "MachineModuleInfo not registered!");
const TargetMachine& TM = F.getTarget();
- TD = TM.getTargetData();
+ TD = TM.getDataLayout();
stackGrowthDirection = TM.getFrameLowering()->getStackGrowthDirection();
RI = TM.getRegisterInfo();
MAI = TM.getMCAsmInfo();
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.h b/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.h
index 8dc99ab..9cdbeac 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.h
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.h
@@ -23,12 +23,12 @@ class MachineFunction;
class MachineModuleInfo;
class MachineMove;
class MCAsmInfo;
-class TargetData;
+class DataLayout;
class TargetMachine;
class TargetRegisterInfo;
class JITDwarfEmitter {
- const TargetData* TD;
+ const DataLayout* TD;
JITCodeEmitter* JCE;
const TargetRegisterInfo* RI;
const MCAsmInfo *MAI;
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
index ff3a9dc..ecafda7 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
@@ -30,7 +30,7 @@
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/ExecutionEngine/JITMemoryManager.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetJITInfo.h"
#include "llvm/Target/TargetMachine.h"
@@ -384,11 +384,6 @@ namespace {
delete MemMgr;
}
- /// classof - Methods for support type inquiry through isa, cast, and
- /// dyn_cast:
- ///
- static inline bool classof(const MachineCodeEmitter*) { return true; }
-
JITResolver &getJITResolver() { return Resolver; }
virtual void startFunction(MachineFunction &F);
@@ -763,7 +758,7 @@ void JITEmitter::processDebugLoc(DebugLoc DL, bool BeforePrintingInsn) {
}
static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP,
- const TargetData *TD) {
+ const DataLayout *TD) {
const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
if (Constants.empty()) return 0;
@@ -780,7 +775,7 @@ static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP,
void JITEmitter::startFunction(MachineFunction &F) {
DEBUG(dbgs() << "JIT: Starting CodeGen of Function "
- << F.getFunction()->getName() << "\n");
+ << F.getName() << "\n");
uintptr_t ActualSize = 0;
// Set the memory writable, if it's not already
@@ -929,7 +924,7 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
PrevDL = DebugLoc();
DEBUG(dbgs() << "JIT: Finished CodeGen of [" << (void*)FnStart
- << "] Function: " << F.getFunction()->getName()
+ << "] Function: " << F.getName()
<< ": " << (FnEnd-FnStart) << " bytes of text, "
<< Relocations.size() << " relocations\n");
@@ -1058,7 +1053,7 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
if (Constants.empty()) return;
- unsigned Size = GetConstantPoolSizeInBytes(MCP, TheJIT->getTargetData());
+ unsigned Size = GetConstantPoolSizeInBytes(MCP, TheJIT->getDataLayout());
unsigned Align = MCP->getConstantPoolAlignment();
ConstantPoolBase = allocateSpace(Size, Align);
ConstantPool = MCP;
@@ -1087,7 +1082,7 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
dbgs().write_hex(CAddr) << "]\n");
Type *Ty = CPE.Val.ConstVal->getType();
- Offset += TheJIT->getTargetData()->getTypeAllocSize(Ty);
+ Offset += TheJIT->getDataLayout()->getTypeAllocSize(Ty);
}
}
@@ -1104,14 +1099,14 @@ void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) {
for (unsigned i = 0, e = JT.size(); i != e; ++i)
NumEntries += JT[i].MBBs.size();
- unsigned EntrySize = MJTI->getEntrySize(*TheJIT->getTargetData());
+ unsigned EntrySize = MJTI->getEntrySize(*TheJIT->getDataLayout());
// Just allocate space for all the jump tables now. We will fix up the actual
// MBB entries in the tables after we emit the code for each block, since then
// we will know the final locations of the MBBs in memory.
JumpTable = MJTI;
JumpTableBase = allocateSpace(NumEntries * EntrySize,
- MJTI->getEntryAlignment(*TheJIT->getTargetData()));
+ MJTI->getEntryAlignment(*TheJIT->getDataLayout()));
}
void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
@@ -1128,7 +1123,7 @@ void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
case MachineJumpTableInfo::EK_BlockAddress: {
// EK_BlockAddress - Each entry is a plain address of block, e.g.:
// .word LBB123
- assert(MJTI->getEntrySize(*TheJIT->getTargetData()) == sizeof(void*) &&
+ assert(MJTI->getEntrySize(*TheJIT->getDataLayout()) == sizeof(void*) &&
"Cross JIT'ing?");
// For each jump table, map each target in the jump table to the address of
@@ -1148,7 +1143,7 @@ void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
case MachineJumpTableInfo::EK_Custom32:
case MachineJumpTableInfo::EK_GPRel32BlockAddress:
case MachineJumpTableInfo::EK_LabelDifference32: {
- assert(MJTI->getEntrySize(*TheJIT->getTargetData()) == 4&&"Cross JIT'ing?");
+ assert(MJTI->getEntrySize(*TheJIT->getDataLayout()) == 4&&"Cross JIT'ing?");
// For each jump table, place the offset from the beginning of the table
// to the target address.
int *SlotPtr = (int*)JumpTableBase;
@@ -1224,7 +1219,7 @@ uintptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const {
const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables();
assert(Index < JT.size() && "Invalid jump table index!");
- unsigned EntrySize = JumpTable->getEntrySize(*TheJIT->getTargetData());
+ unsigned EntrySize = JumpTable->getEntrySize(*TheJIT->getDataLayout());
unsigned Offset = 0;
for (unsigned i = 0; i < Index; ++i)
@@ -1265,15 +1260,13 @@ void *JIT::getPointerToFunctionOrStub(Function *F) {
return Addr;
// Get a stub if the target supports it.
- assert(isa<JITEmitter>(JCE) && "Unexpected MCE?");
- JITEmitter *JE = cast<JITEmitter>(getCodeEmitter());
+ JITEmitter *JE = static_cast<JITEmitter*>(getCodeEmitter());
return JE->getJITResolver().getLazyFunctionStub(F);
}
void JIT::updateFunctionStub(Function *F) {
// Get the empty stub we generated earlier.
- assert(isa<JITEmitter>(JCE) && "Unexpected MCE?");
- JITEmitter *JE = cast<JITEmitter>(getCodeEmitter());
+ JITEmitter *JE = static_cast<JITEmitter*>(getCodeEmitter());
void *Stub = JE->getJITResolver().getLazyFunctionStub(F);
void *Addr = getPointerToGlobalIfAvailable(F);
assert(Addr != Stub && "Function must have non-stub address to be updated.");
@@ -1294,6 +1287,5 @@ void JIT::freeMachineCodeForFunction(Function *F) {
updateGlobalMapping(F, 0);
// Free the actual memory for the function body and related stuff.
- assert(isa<JITEmitter>(JCE) && "Unexpected MCE?");
- cast<JITEmitter>(JCE)->deallocateMemForFunction(F);
+ static_cast<JITEmitter*>(JCE)->deallocateMemForFunction(F);
}
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
index 739ffd7d8..752c5b7 100644
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
@@ -8,18 +8,20 @@
//===----------------------------------------------------------------------===//
#include "MCJIT.h"
-#include "MCJITMemoryManager.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/ExecutionEngine/GenericValue.h"
-#include "llvm/ExecutionEngine/MCJIT.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/ExecutionEngine/JITMemoryManager.h"
+#include "llvm/ExecutionEngine/MCJIT.h"
+#include "llvm/ExecutionEngine/ObjectBuffer.h"
+#include "llvm/ExecutionEngine/ObjectImage.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/MutexGuard.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
@@ -44,24 +46,20 @@ ExecutionEngine *MCJIT::createJIT(Module *M,
// FIXME: Don't do this here.
sys::DynamicLibrary::LoadLibraryPermanently(0, NULL);
- // If the target supports JIT code generation, create the JIT.
- if (TargetJITInfo *TJ = TM->getJITInfo())
- return new MCJIT(M, TM, *TJ, new MCJITMemoryManager(JMM), GVsWithCode);
-
- if (ErrorStr)
- *ErrorStr = "target does not support JIT code generation";
- return 0;
+ return new MCJIT(M, TM, JMM, GVsWithCode);
}
-MCJIT::MCJIT(Module *m, TargetMachine *tm, TargetJITInfo &tji,
- RTDyldMemoryManager *MM, bool AllocateGVsWithCode)
- : ExecutionEngine(m), TM(tm), Ctx(0), MemMgr(MM), Dyld(MM),
- isCompiled(false), M(m), OS(Buffer) {
+MCJIT::MCJIT(Module *m, TargetMachine *tm, RTDyldMemoryManager *MM,
+ bool AllocateGVsWithCode)
+ : ExecutionEngine(m), TM(tm), Ctx(0), MemMgr(MM), Dyld(MM),
+ isCompiled(false), M(m) {
- setTargetData(TM->getTargetData());
+ setDataLayout(TM->getDataLayout());
}
MCJIT::~MCJIT() {
+ if (LoadedObject)
+ NotifyFreeingObject(*LoadedObject.get());
delete MemMgr;
delete TM;
}
@@ -69,7 +67,7 @@ MCJIT::~MCJIT() {
void MCJIT::emitObject(Module *m) {
/// Currently, MCJIT only supports a single module and the module passed to
/// this function call is expected to be the contained module. The module
- /// is passed as a parameter here to prepare for multiple module support in
+ /// is passed as a parameter here to prepare for multiple module support in
/// the future.
assert(M == m);
@@ -84,41 +82,65 @@ void MCJIT::emitObject(Module *m) {
PassManager PM;
- PM.add(new TargetData(*TM->getTargetData()));
+ PM.add(new DataLayout(*TM->getDataLayout()));
+
+ // The RuntimeDyld will take ownership of this shortly
+ OwningPtr<ObjectBufferStream> Buffer(new ObjectBufferStream());
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
- if (TM->addPassesToEmitMC(PM, Ctx, OS, false)) {
+ if (TM->addPassesToEmitMC(PM, Ctx, Buffer->getOStream(), false)) {
report_fatal_error("Target does not support MC emission!");
}
// Initialize passes.
- // FIXME: When we support multiple modules, we'll want to move the code
- // gen and finalization out of the constructor here and do it more
- // on-demand as part of getPointerToFunction().
PM.run(*m);
- // Flush the output buffer so the SmallVector gets its data.
- OS.flush();
+ // Flush the output buffer to get the generated code into memory
+ Buffer->flush();
// Load the object into the dynamic linker.
- MemoryBuffer* MB = MemoryBuffer::getMemBuffer(StringRef(Buffer.data(),
- Buffer.size()),
- "", false);
- if (Dyld.loadObject(MB))
+ // handing off ownership of the buffer
+ LoadedObject.reset(Dyld.loadObject(Buffer.take()));
+ if (!LoadedObject)
report_fatal_error(Dyld.getErrorString());
// Resolve any relocations.
Dyld.resolveRelocations();
+ // FIXME: Make this optional, maybe even move it to a JIT event listener
+ LoadedObject->registerWithDebugger();
+
+ NotifyObjectEmitted(*LoadedObject);
+
// FIXME: Add support for per-module compilation state
isCompiled = true;
}
+// FIXME: Add a parameter to identify which object is being finalized when
+// MCJIT supports multiple modules.
+void MCJIT::finalizeObject() {
+ // If the module hasn't been compiled, just do that.
+ if (!isCompiled) {
+ // If the call to Dyld.resolveRelocations() is removed from emitObject()
+ // we'll need to do that here.
+ emitObject(M);
+ return;
+ }
+
+ // Resolve any relocations.
+ Dyld.resolveRelocations();
+}
+
void *MCJIT::getPointerToBasicBlock(BasicBlock *BB) {
report_fatal_error("not yet implemented");
}
void *MCJIT::getPointerToFunction(Function *F) {
+ // FIXME: This should really return a uint64_t since it's a pointer in the
+ // target address space, not our local address space. That's part of the
+ // ExecutionEngine interface, though. Fix that when the old JIT finally
+ // dies.
+
// FIXME: Add support for per-module compilation state
if (!isCompiled)
emitObject(M);
@@ -132,10 +154,13 @@ void *MCJIT::getPointerToFunction(Function *F) {
// FIXME: Should the Dyld be retaining module information? Probably not.
// FIXME: Should we be using the mangler for this? Probably.
+ //
+ // This is the accessor for the target address, so make sure to check the
+ // load address of the symbol, not the local address.
StringRef BaseName = F->getName();
if (BaseName[0] == '\1')
- return (void*)Dyld.getSymbolAddress(BaseName.substr(1));
- return (void*)Dyld.getSymbolAddress((TM->getMCAsmInfo()->getGlobalPrefix()
+ return (void*)Dyld.getSymbolLoadAddress(BaseName.substr(1));
+ return (void*)Dyld.getSymbolLoadAddress((TM->getMCAsmInfo()->getGlobalPrefix()
+ BaseName).str());
}
@@ -270,3 +295,33 @@ void *MCJIT::getPointerToNamedFunction(const std::string &Name,
}
return 0;
}
+
+void MCJIT::RegisterJITEventListener(JITEventListener *L) {
+ if (L == NULL)
+ return;
+ MutexGuard locked(lock);
+ EventListeners.push_back(L);
+}
+void MCJIT::UnregisterJITEventListener(JITEventListener *L) {
+ if (L == NULL)
+ return;
+ MutexGuard locked(lock);
+ SmallVector<JITEventListener*, 2>::reverse_iterator I=
+ std::find(EventListeners.rbegin(), EventListeners.rend(), L);
+ if (I != EventListeners.rend()) {
+ std::swap(*I, EventListeners.back());
+ EventListeners.pop_back();
+ }
+}
+void MCJIT::NotifyObjectEmitted(const ObjectImage& Obj) {
+ MutexGuard locked(lock);
+ for (unsigned I = 0, S = EventListeners.size(); I < S; ++I) {
+ EventListeners[I]->NotifyObjectEmitted(Obj);
+ }
+}
+void MCJIT::NotifyFreeingObject(const ObjectImage& Obj) {
+ MutexGuard locked(lock);
+ for (unsigned I = 0, S = EventListeners.size(); I < S; ++I) {
+ EventListeners[I]->NotifyFreeingObject(Obj);
+ }
+}
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
index 1d272e9..571080d 100644
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
@@ -11,33 +11,32 @@
#define LLVM_LIB_EXECUTIONENGINE_MCJIT_H
#include "llvm/PassManager.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/raw_ostream.h"
namespace llvm {
+class ObjectImage;
+
// FIXME: This makes all kinds of horrible assumptions for the time being,
// like only having one module, not needing to worry about multi-threading,
// blah blah. Purely in get-it-up-and-limping mode for now.
class MCJIT : public ExecutionEngine {
- MCJIT(Module *M, TargetMachine *tm, TargetJITInfo &tji,
- RTDyldMemoryManager *MemMgr, bool AllocateGVsWithCode);
+ MCJIT(Module *M, TargetMachine *tm, RTDyldMemoryManager *MemMgr,
+ bool AllocateGVsWithCode);
TargetMachine *TM;
MCContext *Ctx;
RTDyldMemoryManager *MemMgr;
RuntimeDyld Dyld;
+ SmallVector<JITEventListener*, 2> EventListeners;
// FIXME: Add support for multiple modules
bool isCompiled;
Module *M;
-
- // FIXME: Move these to a single container which manages JITed objects
- SmallVector<char, 4096> Buffer; // Working buffer into which we JIT.
- raw_svector_ostream OS;
+ OwningPtr<ObjectImage> LoadedObject;
public:
~MCJIT();
@@ -45,6 +44,8 @@ public:
/// @name ExecutionEngine interface implementation
/// @{
+ virtual void finalizeObject();
+
virtual void *getPointerToBasicBlock(BasicBlock *BB);
virtual void *getPointerToFunction(Function *F);
@@ -71,10 +72,14 @@ public:
/// Map the address of a JIT section as returned from the memory manager
/// to the address in the target process as the running code will see it.
/// This is the address which will be used for relocation resolution.
- virtual void mapSectionAddress(void *LocalAddress, uint64_t TargetAddress) {
+ virtual void mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) {
Dyld.mapSectionAddress(LocalAddress, TargetAddress);
}
+ virtual void RegisterJITEventListener(JITEventListener *L);
+ virtual void UnregisterJITEventListener(JITEventListener *L);
+
/// @}
/// @name (Private) Registration Interfaces
/// @{
@@ -98,6 +103,9 @@ protected:
/// is passed as a parameter here to prepare for multiple module support in
/// the future.
void emitObject(Module *M);
+
+ void NotifyObjectEmitted(const ObjectImage& Obj);
+ void NotifyFreeingObject(const ObjectImage& Obj);
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.cpp b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.cpp
deleted file mode 100644
index 457fe5e..0000000
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.cpp
+++ /dev/null
@@ -1,14 +0,0 @@
-//==-- MCJITMemoryManager.cpp - Definition for the Memory Manager -*-C++ -*-==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCJITMemoryManager.h"
-
-using namespace llvm;
-
-void MCJITMemoryManager::anchor() { }
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h
deleted file mode 100644
index 441aaeb..0000000
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h
+++ /dev/null
@@ -1,50 +0,0 @@
-//===-- MCJITMemoryManager.h - Definition for the Memory Manager ---C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_EXECUTIONENGINE_MCJITMEMORYMANAGER_H
-#define LLVM_LIB_EXECUTIONENGINE_MCJITMEMORYMANAGER_H
-
-#include "llvm/Module.h"
-#include "llvm/ExecutionEngine/JITMemoryManager.h"
-#include "llvm/ExecutionEngine/RuntimeDyld.h"
-#include <assert.h>
-
-namespace llvm {
-
-// The MCJIT memory manager is a layer between the standard JITMemoryManager
-// and the RuntimeDyld interface that maps objects, by name, onto their
-// matching LLVM IR counterparts in the module(s) being compiled.
-class MCJITMemoryManager : public RTDyldMemoryManager {
- virtual void anchor();
- OwningPtr<JITMemoryManager> JMM;
-
-public:
- MCJITMemoryManager(JITMemoryManager *jmm) :
- JMM(jmm?jmm:JITMemoryManager::CreateDefaultMemManager()) {}
-
- uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
- unsigned SectionID) {
- return JMM->allocateDataSection(Size, Alignment, SectionID);
- }
-
- uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
- unsigned SectionID) {
- return JMM->allocateCodeSection(Size, Alignment, SectionID);
- }
-
- virtual void *getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure = true) {
- return JMM->getPointerToNamedFunction(Name, AbortOnFailure);
- }
-
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp
index 8b50101..50cd072 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp
@@ -78,12 +78,12 @@ public:
/// Creates an entry in the JIT registry for the buffer @p Object,
/// which must contain an object file in executable memory with any
/// debug information for the debugger.
- void registerObject(const MemoryBuffer &Object);
+ void registerObject(const ObjectBuffer &Object);
/// Removes the internal registration of @p Object, and
/// frees associated resources.
/// Returns true if @p Object was found in ObjectBufferMap.
- bool deregisterObject(const MemoryBuffer &Object);
+ bool deregisterObject(const ObjectBuffer &Object);
private:
/// Deregister the debug info for the given object file from the debugger
@@ -124,7 +124,7 @@ GDBJITRegistrar::~GDBJITRegistrar() {
ObjectBufferMap.clear();
}
-void GDBJITRegistrar::registerObject(const MemoryBuffer &Object) {
+void GDBJITRegistrar::registerObject(const ObjectBuffer &Object) {
const char *Buffer = Object.getBufferStart();
size_t Size = Object.getBufferSize();
@@ -147,7 +147,7 @@ void GDBJITRegistrar::registerObject(const MemoryBuffer &Object) {
}
}
-bool GDBJITRegistrar::deregisterObject(const MemoryBuffer& Object) {
+bool GDBJITRegistrar::deregisterObject(const ObjectBuffer& Object) {
const char *Buffer = Object.getBufferStart();
RegisteredObjectBufferMap::iterator I = ObjectBufferMap.find(Buffer);
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/JITRegistrar.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/JITRegistrar.h
index f964bc6..69e9dbe 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/JITRegistrar.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/JITRegistrar.h
@@ -10,7 +10,7 @@
#ifndef LLVM_EXECUTION_ENGINE_JIT_REGISTRAR_H
#define LLVM_EXECUTION_ENGINE_JIT_REGISTRAR_H
-#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/ExecutionEngine/ObjectBuffer.h"
namespace llvm {
@@ -27,12 +27,12 @@ public:
/// Creates an entry in the JIT registry for the buffer @p Object,
/// which must contain an object file in executable memory with any
/// debug information for the debugger.
- virtual void registerObject(const MemoryBuffer &Object) = 0;
+ virtual void registerObject(const ObjectBuffer &Object) = 0;
/// Removes the internal registration of @p Object, and
/// frees associated resources.
/// Returns true if @p Object was previously registered.
- virtual bool deregisterObject(const MemoryBuffer &Object) = 0;
+ virtual bool deregisterObject(const ObjectBuffer &Object) = 0;
/// Returns a reference to a GDB JIT registrar singleton
static JITRegistrar& getGDBRegistrar();
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImage.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h
index c3e3572..17f3a21 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImage.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h
@@ -1,59 +1,76 @@
-//===---- ObjectImage.h - Format independent executuable object image -----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares a file format independent ObjectImage class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H
-#define LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H
-
-#include "llvm/Object/ObjectFile.h"
-
-namespace llvm {
-
-class ObjectImage {
- ObjectImage(); // = delete
- ObjectImage(const ObjectImage &other); // = delete
-protected:
- object::ObjectFile *ObjFile;
-
-public:
- ObjectImage(object::ObjectFile *Obj) { ObjFile = Obj; }
- virtual ~ObjectImage() {}
-
- virtual object::symbol_iterator begin_symbols() const
- { return ObjFile->begin_symbols(); }
- virtual object::symbol_iterator end_symbols() const
- { return ObjFile->end_symbols(); }
-
- virtual object::section_iterator begin_sections() const
- { return ObjFile->begin_sections(); }
- virtual object::section_iterator end_sections() const
- { return ObjFile->end_sections(); }
-
- virtual /* Triple::ArchType */ unsigned getArch() const
- { return ObjFile->getArch(); }
-
- // Subclasses can override these methods to update the image with loaded
- // addresses for sections and common symbols
- virtual void updateSectionAddress(const object::SectionRef &Sec,
- uint64_t Addr) {}
- virtual void updateSymbolAddress(const object::SymbolRef &Sym, uint64_t Addr)
- {}
-
- // Subclasses can override these methods to provide JIT debugging support
- virtual void registerWithDebugger() {}
- virtual void deregisterWithDebugger() {}
-};
-
-} // end namespace llvm
-
-#endif // LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H
-
+//===-- ObjectImageCommon.h - Format independent executuable object image -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares a file format independent ObjectImage class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIMEDYLD_OBJECTIMAGECOMMON_H
+#define LLVM_RUNTIMEDYLD_OBJECTIMAGECOMMON_H
+
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/ExecutionEngine/ObjectImage.h"
+#include "llvm/ExecutionEngine/ObjectBuffer.h"
+
+namespace llvm {
+
+class ObjectImageCommon : public ObjectImage {
+ ObjectImageCommon(); // = delete
+ ObjectImageCommon(const ObjectImageCommon &other); // = delete
+
+protected:
+ object::ObjectFile *ObjFile;
+
+ // This form of the constructor allows subclasses to use
+ // format-specific subclasses of ObjectFile directly
+ ObjectImageCommon(ObjectBuffer *Input, object::ObjectFile *Obj)
+ : ObjectImage(Input), // saves Input as Buffer and takes ownership
+ ObjFile(Obj)
+ {
+ }
+
+public:
+ ObjectImageCommon(ObjectBuffer* Input)
+ : ObjectImage(Input) // saves Input as Buffer and takes ownership
+ {
+ ObjFile = object::ObjectFile::createObjectFile(Buffer->getMemBuffer());
+ }
+ virtual ~ObjectImageCommon() { delete ObjFile; }
+
+ virtual object::symbol_iterator begin_symbols() const
+ { return ObjFile->begin_symbols(); }
+ virtual object::symbol_iterator end_symbols() const
+ { return ObjFile->end_symbols(); }
+
+ virtual object::section_iterator begin_sections() const
+ { return ObjFile->begin_sections(); }
+ virtual object::section_iterator end_sections() const
+ { return ObjFile->end_sections(); }
+
+ virtual /* Triple::ArchType */ unsigned getArch() const
+ { return ObjFile->getArch(); }
+
+ virtual StringRef getData() const { return ObjFile->getData(); }
+
+ // Subclasses can override these methods to update the image with loaded
+ // addresses for sections and common symbols
+ virtual void updateSectionAddress(const object::SectionRef &Sec,
+ uint64_t Addr) {}
+ virtual void updateSymbolAddress(const object::SymbolRef &Sym, uint64_t Addr)
+ {}
+
+ // Subclasses can override these methods to provide JIT debugging support
+ virtual void registerWithDebugger() {}
+ virtual void deregisterWithDebugger() {}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H
+
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
index b464040..f6dccb1 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
@@ -12,10 +12,12 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "dyld"
+#include "ObjectImageCommon.h"
#include "RuntimeDyldImpl.h"
#include "RuntimeDyldELF.h"
#include "RuntimeDyldMachO.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/MathExtras.h"
using namespace llvm;
using namespace llvm::object;
@@ -26,16 +28,6 @@ RuntimeDyldImpl::~RuntimeDyldImpl() {}
namespace llvm {
-namespace {
- // Helper for extensive error checking in debug builds.
- error_code Check(error_code Err) {
- if (Err) {
- report_fatal_error(Err.message());
- }
- return Err;
- }
-} // end anonymous namespace
-
// Resolve the relocations for all symbols we currently know about.
void RuntimeDyldImpl::resolveRelocations() {
// First, resolve relocations associated with external symbols.
@@ -44,11 +36,15 @@ void RuntimeDyldImpl::resolveRelocations() {
// Just iterate over the sections we have and resolve all the relocations
// in them. Gross overkill, but it gets the job done.
for (int i = 0, e = Sections.size(); i != e; ++i) {
- reassignSectionAddress(i, Sections[i].LoadAddress);
+ uint64_t Addr = Sections[i].LoadAddress;
+ DEBUG(dbgs() << "Resolving relocations Section #" << i
+ << "\t" << format("%p", (uint8_t *)Addr)
+ << "\n");
+ resolveRelocationList(Relocations[i], Addr);
}
}
-void RuntimeDyldImpl::mapSectionAddress(void *LocalAddress,
+void RuntimeDyldImpl::mapSectionAddress(const void *LocalAddress,
uint64_t TargetAddress) {
for (unsigned i = 0, e = Sections.size(); i != e; ++i) {
if (Sections[i].Address == LocalAddress) {
@@ -61,14 +57,11 @@ void RuntimeDyldImpl::mapSectionAddress(void *LocalAddress,
// Subclasses can implement this method to create specialized image instances.
// The caller owns the pointer that is returned.
-ObjectImage *RuntimeDyldImpl::createObjectImage(const MemoryBuffer *InputBuffer) {
- ObjectFile *ObjFile = ObjectFile::createObjectFile(const_cast<MemoryBuffer*>
- (InputBuffer));
- ObjectImage *Obj = new ObjectImage(ObjFile);
- return Obj;
+ObjectImage *RuntimeDyldImpl::createObjectImage(ObjectBuffer *InputBuffer) {
+ return new ObjectImageCommon(InputBuffer);
}
-bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) {
+ObjectImage *RuntimeDyldImpl::loadObject(ObjectBuffer *InputBuffer) {
OwningPtr<ObjectImage> obj(createObjectImage(InputBuffer));
if (!obj)
report_fatal_error("Unable to create object image from memory buffer!");
@@ -80,9 +73,9 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) {
// Used sections from the object file
ObjSectionToIDMap LocalSections;
- // Common symbols requiring allocation, and the total size required to
- // allocate all common symbols.
+ // Common symbols requiring allocation, with their sizes and alignments
CommonSymbolMap CommonSymbols;
+ // Maximum required total memory to allocate all common symbols
uint64_t CommonSize = 0;
error_code err;
@@ -102,13 +95,15 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) {
bool isCommon = flags & SymbolRef::SF_Common;
if (isCommon) {
// Add the common symbols to a list. We'll allocate them all below.
+ uint64_t Align = getCommonSymbolAlignment(*i);
uint64_t Size = 0;
Check(i->getSize(Size));
- CommonSize += Size;
- CommonSymbols[*i] = Size;
+ CommonSize += Size + Align;
+ CommonSymbols[*i] = CommonSymbolInfo(Size, Align);
} else {
if (SymType == object::SymbolRef::ST_Function ||
- SymType == object::SymbolRef::ST_Data) {
+ SymType == object::SymbolRef::ST_Data ||
+ SymType == object::SymbolRef::ST_Unknown) {
uint64_t FileOffset;
StringRef SectionData;
section_iterator si = obj->end_sections();
@@ -177,9 +172,7 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) {
}
}
- handleObjectLoaded(obj.take());
-
- return false;
+ return obj.take();
}
void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj,
@@ -193,7 +186,7 @@ void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj,
if (!Addr)
report_fatal_error("Unable to allocate memory for common symbols!");
uint64_t Offset = 0;
- Sections.push_back(SectionEntry(Addr, TotalSize, TotalSize, 0));
+ Sections.push_back(SectionEntry(StringRef(), Addr, TotalSize, TotalSize, 0));
memset(Addr, 0, TotalSize);
DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID
@@ -204,11 +197,20 @@ void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj,
// Assign the address of each symbol
for (CommonSymbolMap::const_iterator it = CommonSymbols.begin(),
itEnd = CommonSymbols.end(); it != itEnd; it++) {
+ uint64_t Size = it->second.first;
+ uint64_t Align = it->second.second;
StringRef Name;
it->first.getName(Name);
+ if (Align) {
+ // This symbol has an alignment requirement.
+ uint64_t AlignOffset = OffsetToAlignment((uint64_t)Addr, Align);
+ Addr += AlignOffset;
+ Offset += AlignOffset;
+ DEBUG(dbgs() << "Allocating common symbol " << Name << " address " <<
+ format("%p\n", Addr));
+ }
Obj.updateSymbolAddress(it->first, (uint64_t)Addr);
SymbolTable[Name.data()] = SymbolLoc(SectionID, Offset);
- uint64_t Size = it->second;
Offset += Size;
Addr += Size;
}
@@ -236,10 +238,12 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
bool IsVirtual;
bool IsZeroInit;
uint64_t DataSize;
+ StringRef Name;
Check(Section.isRequiredForExecution(IsRequired));
Check(Section.isVirtual(IsVirtual));
Check(Section.isZeroInit(IsZeroInit));
Check(Section.getSize(DataSize));
+ Check(Section.getName(Name));
unsigned Allocate;
unsigned SectionID = Sections.size();
@@ -267,6 +271,7 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
memcpy(Addr, pData, DataSize);
DEBUG(dbgs() << "emitSection SectionID: " << SectionID
+ << " Name: " << Name
<< " obj addr: " << format("%p", pData)
<< " new addr: " << format("%p", Addr)
<< " DataSize: " << DataSize
@@ -282,6 +287,7 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
Allocate = 0;
Addr = 0;
DEBUG(dbgs() << "emitSection SectionID: " << SectionID
+ << " Name: " << Name
<< " obj addr: " << format("%p", data.data())
<< " new addr: 0"
<< " DataSize: " << DataSize
@@ -290,7 +296,8 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
<< "\n");
}
- Sections.push_back(SectionEntry(Addr, Allocate, DataSize,(uintptr_t)pData));
+ Sections.push_back(SectionEntry(Name, Addr, Allocate, DataSize,
+ (uintptr_t)pData));
return SectionID;
}
@@ -333,15 +340,49 @@ void RuntimeDyldImpl::addRelocationForSymbol(const RelocationEntry &RE,
}
uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr) {
- // TODO: There is only ARM far stub now. We should add the Thumb stub,
- // and stubs for branches Thumb - ARM and ARM - Thumb.
if (Arch == Triple::arm) {
+ // TODO: There is only ARM far stub now. We should add the Thumb stub,
+ // and stubs for branches Thumb - ARM and ARM - Thumb.
uint32_t *StubAddr = (uint32_t*)Addr;
*StubAddr = 0xe51ff004; // ldr pc,<label>
return (uint8_t*)++StubAddr;
- }
- else
+ } else if (Arch == Triple::mipsel) {
+ uint32_t *StubAddr = (uint32_t*)Addr;
+ // 0: 3c190000 lui t9,%hi(addr).
+ // 4: 27390000 addiu t9,t9,%lo(addr).
+ // 8: 03200008 jr t9.
+ // c: 00000000 nop.
+ const unsigned LuiT9Instr = 0x3c190000, AdduiT9Instr = 0x27390000;
+ const unsigned JrT9Instr = 0x03200008, NopInstr = 0x0;
+
+ *StubAddr = LuiT9Instr;
+ StubAddr++;
+ *StubAddr = AdduiT9Instr;
+ StubAddr++;
+ *StubAddr = JrT9Instr;
+ StubAddr++;
+ *StubAddr = NopInstr;
return Addr;
+ } else if (Arch == Triple::ppc64) {
+ // PowerPC64 stub: the address points to a function descriptor
+ // instead of the function itself. Load the function address
+ // on r11 and sets it to control register. Also loads the function
+ // TOC in r2 and environment pointer to r11.
+ writeInt32BE(Addr, 0x3D800000); // lis r12, highest(addr)
+ writeInt32BE(Addr+4, 0x618C0000); // ori r12, higher(addr)
+ writeInt32BE(Addr+8, 0x798C07C6); // sldi r12, r12, 32
+ writeInt32BE(Addr+12, 0x658C0000); // oris r12, r12, h(addr)
+ writeInt32BE(Addr+16, 0x618C0000); // ori r12, r12, l(addr)
+ writeInt32BE(Addr+20, 0xF8410028); // std r2, 40(r1)
+ writeInt32BE(Addr+24, 0xE96C0000); // ld r11, 0(r12)
+ writeInt32BE(Addr+28, 0xE84C0008); // ld r2, 0(r12)
+ writeInt32BE(Addr+32, 0x7D6903A6); // mtctr r11
+ writeInt32BE(Addr+36, 0xE96C0010); // ld r11, 16(r2)
+ writeInt32BE(Addr+40, 0x4E800420); // bctr
+
+ return Addr;
+ }
+ return Addr;
}
// Assign an address to a symbol name and resolve all the relocations
@@ -350,32 +391,30 @@ void RuntimeDyldImpl::reassignSectionAddress(unsigned SectionID,
uint64_t Addr) {
// The address to use for relocation resolution is not
// the address of the local section buffer. We must be doing
- // a remote execution environment of some sort. Re-apply any
- // relocations referencing this section with the given address.
+ // a remote execution environment of some sort. Relocations can't
+ // be applied until all the sections have been moved. The client must
+ // trigger this with a call to MCJIT::finalize() or
+ // RuntimeDyld::resolveRelocations().
//
// Addr is a uint64_t because we can't assume the pointer width
// of the target is the same as that of the host. Just use a generic
// "big enough" type.
Sections[SectionID].LoadAddress = Addr;
- DEBUG(dbgs() << "Resolving relocations Section #" << SectionID
- << "\t" << format("%p", (uint8_t *)Addr)
- << "\n");
- resolveRelocationList(Relocations[SectionID], Addr);
}
void RuntimeDyldImpl::resolveRelocationEntry(const RelocationEntry &RE,
uint64_t Value) {
- // Ignore relocations for sections that were not loaded
- if (Sections[RE.SectionID].Address != 0) {
- uint8_t *Target = Sections[RE.SectionID].Address + RE.Offset;
- DEBUG(dbgs() << "\tSectionID: " << RE.SectionID
- << " + " << RE.Offset << " (" << format("%p", Target) << ")"
- << " RelType: " << RE.RelType
- << " Addend: " << RE.Addend
- << "\n");
+ // Ignore relocations for sections that were not loaded
+ if (Sections[RE.SectionID].Address != 0) {
+ DEBUG(dbgs() << "\tSectionID: " << RE.SectionID
+ << " + " << RE.Offset << " ("
+ << format("%p", Sections[RE.SectionID].Address + RE.Offset) << ")"
+ << " RelType: " << RE.RelType
+ << " Addend: " << RE.Addend
+ << "\n");
- resolveRelocation(Target, Sections[RE.SectionID].LoadAddress + RE.Offset,
- Value, RE.RelType, RE.Addend);
+ resolveRelocation(Sections[RE.SectionID], RE.Offset,
+ Value, RE.RelType, RE.Addend);
}
}
@@ -420,7 +459,7 @@ RuntimeDyld::~RuntimeDyld() {
delete Dyld;
}
-bool RuntimeDyld::loadObject(MemoryBuffer *InputBuffer) {
+ObjectImage *RuntimeDyld::loadObject(ObjectBuffer *InputBuffer) {
if (!Dyld) {
sys::LLVMFileType type = sys::IdentifyFileType(
InputBuffer->getBufferStart(),
@@ -462,6 +501,10 @@ void *RuntimeDyld::getSymbolAddress(StringRef Name) {
return Dyld->getSymbolAddress(Name);
}
+uint64_t RuntimeDyld::getSymbolLoadAddress(StringRef Name) {
+ return Dyld->getSymbolLoadAddress(Name);
+}
+
void RuntimeDyld::resolveRelocations() {
Dyld->resolveRelocations();
}
@@ -471,7 +514,7 @@ void RuntimeDyld::reassignSectionAddress(unsigned SectionID,
Dyld->reassignSectionAddress(SectionID, Addr);
}
-void RuntimeDyld::mapSectionAddress(void *LocalAddress,
+void RuntimeDyld::mapSectionAddress(const void *LocalAddress,
uint64_t TargetAddress) {
Dyld->mapSectionAddress(LocalAddress, TargetAddress);
}
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
index 75bb586..1ebcaf7 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -12,21 +12,32 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "dyld"
+#include "RuntimeDyldELF.h"
+#include "JITRegistrar.h"
+#include "ObjectImageCommon.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/IntervalMap.h"
-#include "RuntimeDyldELF.h"
#include "llvm/Object/ObjectFile.h"
+#include "llvm/ExecutionEngine/ObjectImage.h"
+#include "llvm/ExecutionEngine/ObjectBuffer.h"
#include "llvm/Support/ELF.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Object/ELF.h"
-#include "JITRegistrar.h"
using namespace llvm;
using namespace llvm::object;
namespace {
+static inline
+error_code check(error_code Err) {
+ if (Err) {
+ report_fatal_error(Err.message());
+ }
+ return Err;
+}
+
template<support::endianness target_endianness, bool is64Bits>
class DyldELFObject : public ELFObjectFile<target_endianness, is64Bits> {
LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits)
@@ -36,25 +47,17 @@ class DyldELFObject : public ELFObjectFile<target_endianness, is64Bits> {
typedef Elf_Rel_Impl<target_endianness, is64Bits, false> Elf_Rel;
typedef Elf_Rel_Impl<target_endianness, is64Bits, true> Elf_Rela;
- typedef typename ELFObjectFile<target_endianness, is64Bits>::
- Elf_Ehdr Elf_Ehdr;
+ typedef Elf_Ehdr_Impl<target_endianness, is64Bits> Elf_Ehdr;
typedef typename ELFDataTypeTypedefHelper<
target_endianness, is64Bits>::value_type addr_type;
-protected:
- // This duplicates the 'Data' member in the 'Binary' base class
- // but it is necessary to workaround a bug in gcc 4.2
- MemoryBuffer *InputData;
-
public:
- DyldELFObject(MemoryBuffer *Object, error_code &ec);
+ DyldELFObject(MemoryBuffer *Wrapper, error_code &ec);
void updateSectionAddress(const SectionRef &Sec, uint64_t Addr);
void updateSymbolAddress(const SymbolRef &Sym, uint64_t Addr);
- const MemoryBuffer& getBuffer() const { return *InputData; }
-
// Methods for type inquiry through isa, cast and dyn_cast
static inline bool classof(const Binary *v) {
return (isa<ELFObjectFile<target_endianness, is64Bits> >(v)
@@ -64,20 +67,18 @@ public:
const ELFObjectFile<target_endianness, is64Bits> *v) {
return v->isDyldType();
}
- static inline bool classof(const DyldELFObject *v) {
- return true;
- }
};
template<support::endianness target_endianness, bool is64Bits>
-class ELFObjectImage : public ObjectImage {
+class ELFObjectImage : public ObjectImageCommon {
protected:
DyldELFObject<target_endianness, is64Bits> *DyldObj;
bool Registered;
public:
- ELFObjectImage(DyldELFObject<target_endianness, is64Bits> *Obj)
- : ObjectImage(Obj),
+ ELFObjectImage(ObjectBuffer *Input,
+ DyldELFObject<target_endianness, is64Bits> *Obj)
+ : ObjectImageCommon(Input, Obj),
DyldObj(Obj),
Registered(false) {}
@@ -100,20 +101,22 @@ class ELFObjectImage : public ObjectImage {
virtual void registerWithDebugger()
{
- JITRegistrar::getGDBRegistrar().registerObject(DyldObj->getBuffer());
+ JITRegistrar::getGDBRegistrar().registerObject(*Buffer);
Registered = true;
}
virtual void deregisterWithDebugger()
{
- JITRegistrar::getGDBRegistrar().deregisterObject(DyldObj->getBuffer());
+ JITRegistrar::getGDBRegistrar().deregisterObject(*Buffer);
}
};
+// The MemoryBuffer passed into this constructor is just a wrapper around the
+// actual memory. Ultimately, the Binary parent class will take ownership of
+// this MemoryBuffer object but not the underlying memory.
template<support::endianness target_endianness, bool is64Bits>
-DyldELFObject<target_endianness, is64Bits>::DyldELFObject(MemoryBuffer *Object,
+DyldELFObject<target_endianness, is64Bits>::DyldELFObject(MemoryBuffer *Wrapper,
error_code &ec)
- : ELFObjectFile<target_endianness, is64Bits>(Object, ec),
- InputData(Object) {
+ : ELFObjectFile<target_endianness, is64Bits>(Wrapper, ec) {
this->isDyldELFObject = true;
}
@@ -149,50 +152,43 @@ void DyldELFObject<target_endianness, is64Bits>::updateSymbolAddress(
namespace llvm {
-ObjectImage *RuntimeDyldELF::createObjectImage(
- const MemoryBuffer *ConstInputBuffer) {
- MemoryBuffer *InputBuffer = const_cast<MemoryBuffer*>(ConstInputBuffer);
- std::pair<unsigned char, unsigned char> Ident = getElfArchType(InputBuffer);
+ObjectImage *RuntimeDyldELF::createObjectImage(ObjectBuffer *Buffer) {
+ if (Buffer->getBufferSize() < ELF::EI_NIDENT)
+ llvm_unreachable("Unexpected ELF object size");
+ std::pair<unsigned char, unsigned char> Ident = std::make_pair(
+ (uint8_t)Buffer->getBufferStart()[ELF::EI_CLASS],
+ (uint8_t)Buffer->getBufferStart()[ELF::EI_DATA]);
error_code ec;
if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2LSB) {
DyldELFObject<support::little, false> *Obj =
- new DyldELFObject<support::little, false>(InputBuffer, ec);
- return new ELFObjectImage<support::little, false>(Obj);
+ new DyldELFObject<support::little, false>(Buffer->getMemBuffer(), ec);
+ return new ELFObjectImage<support::little, false>(Buffer, Obj);
}
else if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2MSB) {
DyldELFObject<support::big, false> *Obj =
- new DyldELFObject<support::big, false>(InputBuffer, ec);
- return new ELFObjectImage<support::big, false>(Obj);
+ new DyldELFObject<support::big, false>(Buffer->getMemBuffer(), ec);
+ return new ELFObjectImage<support::big, false>(Buffer, Obj);
}
else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2MSB) {
DyldELFObject<support::big, true> *Obj =
- new DyldELFObject<support::big, true>(InputBuffer, ec);
- return new ELFObjectImage<support::big, true>(Obj);
+ new DyldELFObject<support::big, true>(Buffer->getMemBuffer(), ec);
+ return new ELFObjectImage<support::big, true>(Buffer, Obj);
}
else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2LSB) {
DyldELFObject<support::little, true> *Obj =
- new DyldELFObject<support::little, true>(InputBuffer, ec);
- return new ELFObjectImage<support::little, true>(Obj);
+ new DyldELFObject<support::little, true>(Buffer->getMemBuffer(), ec);
+ return new ELFObjectImage<support::little, true>(Buffer, Obj);
}
else
llvm_unreachable("Unexpected ELF format");
}
-void RuntimeDyldELF::handleObjectLoaded(ObjectImage *Obj)
-{
- Obj->registerWithDebugger();
- // Save the loaded object. It will deregister itself when deleted
- LoadedObject = Obj;
-}
-
RuntimeDyldELF::~RuntimeDyldELF() {
- if (LoadedObject)
- delete LoadedObject;
}
-void RuntimeDyldELF::resolveX86_64Relocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
+void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
+ uint64_t Offset,
uint64_t Value,
uint32_t Type,
int64_t Addend) {
@@ -201,8 +197,10 @@ void RuntimeDyldELF::resolveX86_64Relocation(uint8_t *LocalAddress,
llvm_unreachable("Relocation type not implemented yet!");
break;
case ELF::R_X86_64_64: {
- uint64_t *Target = (uint64_t*)(LocalAddress);
+ uint64_t *Target = reinterpret_cast<uint64_t*>(Section.Address + Offset);
*Target = Value + Addend;
+ DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend))
+ << " at " << format("%p\n",Target));
break;
}
case ELF::R_X86_64_32:
@@ -212,37 +210,52 @@ void RuntimeDyldELF::resolveX86_64Relocation(uint8_t *LocalAddress,
(Type == ELF::R_X86_64_32S &&
((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
- uint32_t *Target = reinterpret_cast<uint32_t*>(LocalAddress);
+ uint32_t *Target = reinterpret_cast<uint32_t*>(Section.Address + Offset);
*Target = TruncatedAddr;
+ DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr)
+ << " at " << format("%p\n",Target));
break;
}
case ELF::R_X86_64_PC32: {
- uint32_t *Placeholder = reinterpret_cast<uint32_t*>(LocalAddress);
+ // Get the placeholder value from the generated object since
+ // a previous relocation attempt may have overwritten the loaded version
+ uint32_t *Placeholder = reinterpret_cast<uint32_t*>(Section.ObjAddress
+ + Offset);
+ uint32_t *Target = reinterpret_cast<uint32_t*>(Section.Address + Offset);
+ uint64_t FinalAddress = Section.LoadAddress + Offset;
int64_t RealOffset = *Placeholder + Value + Addend - FinalAddress;
assert(RealOffset <= INT32_MAX && RealOffset >= INT32_MIN);
int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
- *Placeholder = TruncOffset;
+ *Target = TruncOffset;
break;
}
}
}
-void RuntimeDyldELF::resolveX86Relocation(uint8_t *LocalAddress,
- uint32_t FinalAddress,
+void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section,
+ uint64_t Offset,
uint32_t Value,
uint32_t Type,
int32_t Addend) {
switch (Type) {
case ELF::R_386_32: {
- uint32_t *Target = (uint32_t*)(LocalAddress);
- uint32_t Placeholder = *Target;
- *Target = Placeholder + Value + Addend;
+ // Get the placeholder value from the generated object since
+ // a previous relocation attempt may have overwritten the loaded version
+ uint32_t *Placeholder = reinterpret_cast<uint32_t*>(Section.ObjAddress
+ + Offset);
+ uint32_t *Target = reinterpret_cast<uint32_t*>(Section.Address + Offset);
+ *Target = *Placeholder + Value + Addend;
break;
}
case ELF::R_386_PC32: {
- uint32_t *Placeholder = reinterpret_cast<uint32_t*>(LocalAddress);
+ // Get the placeholder value from the generated object since
+ // a previous relocation attempt may have overwritten the loaded version
+ uint32_t *Placeholder = reinterpret_cast<uint32_t*>(Section.ObjAddress
+ + Offset);
+ uint32_t *Target = reinterpret_cast<uint32_t*>(Section.Address + Offset);
+ uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF);
uint32_t RealOffset = *Placeholder + Value + Addend - FinalAddress;
- *Placeholder = RealOffset;
+ *Target = RealOffset;
break;
}
default:
@@ -253,16 +266,18 @@ void RuntimeDyldELF::resolveX86Relocation(uint8_t *LocalAddress,
}
}
-void RuntimeDyldELF::resolveARMRelocation(uint8_t *LocalAddress,
- uint32_t FinalAddress,
+void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
+ uint64_t Offset,
uint32_t Value,
uint32_t Type,
int32_t Addend) {
// TODO: Add Thumb relocations.
- uint32_t* TargetPtr = (uint32_t*)LocalAddress;
+ uint32_t* TargetPtr = (uint32_t*)(Section.Address + Offset);
+ uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF);
Value += Addend;
- DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: " << LocalAddress
+ DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: "
+ << Section.Address + Offset
<< " FinalAddress: " << format("%p",FinalAddress)
<< " Value: " << format("%x",Value)
<< " Type: " << format("%x",Type)
@@ -273,14 +288,19 @@ void RuntimeDyldELF::resolveARMRelocation(uint8_t *LocalAddress,
default:
llvm_unreachable("Not implemented relocation type!");
- // Just write 32bit value to relocation address
+ // Write a 32bit value to relocation address, taking into account the
+ // implicit addend encoded in the target.
case ELF::R_ARM_ABS32 :
- *TargetPtr = Value;
+ *TargetPtr += Value;
break;
// Write first 16 bit of 32 bit value to the mov instruction.
// Last 4 bit should be shifted.
case ELF::R_ARM_MOVW_ABS_NC :
+ // We are not expecting any other addend in the relocation address.
+ // Using 0x000F0FFF because MOVW has its 16 bit immediate split into 2
+ // non-contiguous fields.
+ assert((*TargetPtr & 0x000F0FFF) == 0);
Value = Value & 0xFFFF;
*TargetPtr |= Value & 0xFFF;
*TargetPtr |= ((Value >> 12) & 0xF) << 16;
@@ -289,6 +309,9 @@ void RuntimeDyldELF::resolveARMRelocation(uint8_t *LocalAddress,
// Write last 16 bit of 32 bit value to the mov instruction.
// Last 4 bit should be shifted.
case ELF::R_ARM_MOVT_ABS :
+ // We are not expecting any other addend in the relocation address.
+ // Use 0x000F0FFF for the same reason as R_ARM_MOVW_ABS_NC.
+ assert((*TargetPtr & 0x000F0FFF) == 0);
Value = (Value >> 16) & 0xFFFF;
*TargetPtr |= Value & 0xFFF;
*TargetPtr |= ((Value >> 12) & 0xF) << 16;
@@ -306,26 +329,250 @@ void RuntimeDyldELF::resolveARMRelocation(uint8_t *LocalAddress,
}
}
-void RuntimeDyldELF::resolveRelocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
+void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section,
+ uint64_t Offset,
+ uint32_t Value,
+ uint32_t Type,
+ int32_t Addend) {
+ uint32_t* TargetPtr = (uint32_t*)(Section.Address + Offset);
+ Value += Addend;
+
+ DEBUG(dbgs() << "resolveMipselocation, LocalAddress: "
+ << Section.Address + Offset
+ << " FinalAddress: "
+ << format("%p",Section.LoadAddress + Offset)
+ << " Value: " << format("%x",Value)
+ << " Type: " << format("%x",Type)
+ << " Addend: " << format("%x",Addend)
+ << "\n");
+
+ switch(Type) {
+ default:
+ llvm_unreachable("Not implemented relocation type!");
+ break;
+ case ELF::R_MIPS_32:
+ *TargetPtr = Value + (*TargetPtr);
+ break;
+ case ELF::R_MIPS_26:
+ *TargetPtr = ((*TargetPtr) & 0xfc000000) | (( Value & 0x0fffffff) >> 2);
+ break;
+ case ELF::R_MIPS_HI16:
+ // Get the higher 16-bits. Also add 1 if bit 15 is 1.
+ Value += ((*TargetPtr) & 0x0000ffff) << 16;
+ *TargetPtr = ((*TargetPtr) & 0xffff0000) |
+ (((Value + 0x8000) >> 16) & 0xffff);
+ break;
+ case ELF::R_MIPS_LO16:
+ Value += ((*TargetPtr) & 0x0000ffff);
+ *TargetPtr = ((*TargetPtr) & 0xffff0000) | (Value & 0xffff);
+ break;
+ }
+}
+
+// Return the .TOC. section address to R_PPC64_TOC relocations.
+uint64_t RuntimeDyldELF::findPPC64TOC() const {
+ // The TOC consists of sections .got, .toc, .tocbss, .plt in that
+ // order. The TOC starts where the first of these sections starts.
+ SectionList::const_iterator it = Sections.begin();
+ SectionList::const_iterator ite = Sections.end();
+ for (; it != ite; ++it) {
+ if (it->Name == ".got" ||
+ it->Name == ".toc" ||
+ it->Name == ".tocbss" ||
+ it->Name == ".plt")
+ break;
+ }
+ if (it == ite) {
+ // This may happen for
+ // * references to TOC base base (sym@toc, .odp relocation) without
+ // a .toc directive.
+ // In this case just use the first section (which is usually
+ // the .odp) since the code won't reference the .toc base
+ // directly.
+ it = Sections.begin();
+ }
+ assert (it != ite);
+ // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
+ // thus permitting a full 64 Kbytes segment.
+ return it->LoadAddress + 0x8000;
+}
+
+// Returns the sections and offset associated with the ODP entry referenced
+// by Symbol.
+void RuntimeDyldELF::findOPDEntrySection(ObjectImage &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel) {
+ // Get the ELF symbol value (st_value) to compare with Relocation offset in
+ // .opd entries
+
+ error_code err;
+ for (section_iterator si = Obj.begin_sections(),
+ se = Obj.end_sections(); si != se; si.increment(err)) {
+ StringRef SectionName;
+ check(si->getName(SectionName));
+ if (SectionName != ".opd")
+ continue;
+
+ for (relocation_iterator i = si->begin_relocations(),
+ e = si->end_relocations(); i != e;) {
+ check(err);
+
+ // The R_PPC64_ADDR64 relocation indicates the first field
+ // of a .opd entry
+ uint64_t TypeFunc;
+ check(i->getType(TypeFunc));
+ if (TypeFunc != ELF::R_PPC64_ADDR64) {
+ i.increment(err);
+ continue;
+ }
+
+ SymbolRef TargetSymbol;
+ uint64_t TargetSymbolOffset;
+ int64_t TargetAdditionalInfo;
+ check(i->getSymbol(TargetSymbol));
+ check(i->getOffset(TargetSymbolOffset));
+ check(i->getAdditionalInfo(TargetAdditionalInfo));
+
+ i = i.increment(err);
+ if (i == e)
+ break;
+ check(err);
+
+ // Just check if following relocation is a R_PPC64_TOC
+ uint64_t TypeTOC;
+ check(i->getType(TypeTOC));
+ if (TypeTOC != ELF::R_PPC64_TOC)
+ continue;
+
+ // Finally compares the Symbol value and the target symbol offset
+ // to check if this .opd entry refers to the symbol the relocation
+ // points to.
+ if (Rel.Addend != (intptr_t)TargetSymbolOffset)
+ continue;
+
+ section_iterator tsi(Obj.end_sections());
+ check(TargetSymbol.getSection(tsi));
+ Rel.SectionID = findOrEmitSection(Obj, (*tsi), true, LocalSections);
+ Rel.Addend = (intptr_t)TargetAdditionalInfo;
+ return;
+ }
+ }
+ llvm_unreachable("Attempting to get address of ODP entry!");
+}
+
+// Relocation masks following the #lo(value), #hi(value), #higher(value),
+// and #highest(value) macros defined in section 4.5.1. Relocation Types
+// in PPC-elf64abi document.
+//
+static inline
+uint16_t applyPPClo (uint64_t value)
+{
+ return value & 0xffff;
+}
+
+static inline
+uint16_t applyPPChi (uint64_t value)
+{
+ return (value >> 16) & 0xffff;
+}
+
+static inline
+uint16_t applyPPChigher (uint64_t value)
+{
+ return (value >> 32) & 0xffff;
+}
+
+static inline
+uint16_t applyPPChighest (uint64_t value)
+{
+ return (value >> 48) & 0xffff;
+}
+
+void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
+ uint64_t Offset,
+ uint64_t Value,
+ uint32_t Type,
+ int64_t Addend) {
+ uint8_t* LocalAddress = Section.Address + Offset;
+ switch (Type) {
+ default:
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ case ELF::R_PPC64_ADDR16_LO :
+ writeInt16BE(LocalAddress, applyPPClo (Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HI :
+ writeInt16BE(LocalAddress, applyPPChi (Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHER :
+ writeInt16BE(LocalAddress, applyPPChigher (Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHEST :
+ writeInt16BE(LocalAddress, applyPPChighest (Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR14 : {
+ assert(((Value + Addend) & 3) == 0);
+ // Preserve the AA/LK bits in the branch instruction
+ uint8_t aalk = *(LocalAddress+3);
+ writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc));
+ } break;
+ case ELF::R_PPC64_REL24 : {
+ uint64_t FinalAddress = (Section.LoadAddress + Offset);
+ int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend);
+ if (SignExtend32<24>(delta) != delta)
+ llvm_unreachable("Relocation R_PPC64_REL24 overflow");
+ // Generates a 'bl <address>' instruction
+ writeInt32BE(LocalAddress, 0x48000001 | (delta & 0x03FFFFFC));
+ } break;
+ case ELF::R_PPC64_ADDR64 :
+ writeInt64BE(LocalAddress, Value + Addend);
+ break;
+ case ELF::R_PPC64_TOC :
+ writeInt64BE(LocalAddress, findPPC64TOC());
+ break;
+ case ELF::R_PPC64_TOC16 : {
+ uint64_t TOCStart = findPPC64TOC();
+ Value = applyPPClo((Value + Addend) - TOCStart);
+ writeInt16BE(LocalAddress, applyPPClo(Value));
+ } break;
+ case ELF::R_PPC64_TOC16_DS : {
+ uint64_t TOCStart = findPPC64TOC();
+ Value = ((Value + Addend) - TOCStart);
+ writeInt16BE(LocalAddress, applyPPClo(Value));
+ } break;
+ }
+}
+
+
+void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
+ uint64_t Offset,
uint64_t Value,
uint32_t Type,
int64_t Addend) {
switch (Arch) {
case Triple::x86_64:
- resolveX86_64Relocation(LocalAddress, FinalAddress, Value, Type, Addend);
+ resolveX86_64Relocation(Section, Offset, Value, Type, Addend);
break;
case Triple::x86:
- resolveX86Relocation(LocalAddress, (uint32_t)(FinalAddress & 0xffffffffL),
+ resolveX86Relocation(Section, Offset,
(uint32_t)(Value & 0xffffffffL), Type,
(uint32_t)(Addend & 0xffffffffL));
break;
case Triple::arm: // Fall through.
case Triple::thumb:
- resolveARMRelocation(LocalAddress, (uint32_t)(FinalAddress & 0xffffffffL),
+ resolveARMRelocation(Section, Offset,
(uint32_t)(Value & 0xffffffffL), Type,
(uint32_t)(Addend & 0xffffffffL));
break;
+ case Triple::mips: // Fall through.
+ case Triple::mipsel:
+ resolveMIPSRelocation(Section, Offset,
+ (uint32_t)(Value & 0xffffffffL), Type,
+ (uint32_t)(Addend & 0xffffffffL));
+ break;
+ case Triple::ppc64:
+ resolvePPC64Relocation(Section, Offset, Value, Type, Addend);
+ break;
default: llvm_unreachable("Unsupported CPU type!");
}
}
@@ -350,6 +597,8 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel,
RelocationValueRef Value;
// First search for the symbol in the local symbol table
SymbolTableMap::const_iterator lsi = Symbols.find(TargetName.data());
+ SymbolRef::Type SymType;
+ Symbol.getType(SymType);
if (lsi != Symbols.end()) {
Value.SectionID = lsi->second.first;
Value.Addend = lsi->second.second;
@@ -361,8 +610,6 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel,
Value.SectionID = gsi->second.first;
Value.Addend = gsi->second.second;
} else {
- SymbolRef::Type SymType;
- Symbol.getType(SymType);
switch (SymType) {
case SymbolRef::ST_Debug: {
// TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
@@ -373,7 +620,13 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel,
if (si == Obj.end_sections())
llvm_unreachable("Symbol section not found, bad object file format!");
DEBUG(dbgs() << "\t\tThis is section symbol\n");
- Value.SectionID = findOrEmitSection(Obj, (*si), true, ObjSectionToID);
+ // Default to 'true' in case isText fails (though it never does).
+ bool isCode = true;
+ si->isText(isCode);
+ Value.SectionID = findOrEmitSection(Obj,
+ (*si),
+ isCode,
+ ObjSectionToID);
Value.Addend = Addend;
break;
}
@@ -398,13 +651,12 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel,
// This is an ARM branch relocation, need to use a stub function.
DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.");
SectionEntry &Section = Sections[Rel.SectionID];
- uint8_t *Target = Section.Address + Rel.Offset;
- // Look up for existing stub.
+ // Look for an existing stub.
StubMap::const_iterator i = Stubs.find(Value);
if (i != Stubs.end()) {
- resolveRelocation(Target, (uint64_t)Target, (uint64_t)Section.Address +
- i->second, RelType, 0);
+ resolveRelocation(Section, Rel.Offset,
+ (uint64_t)Section.Address + i->second, RelType, 0);
DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function.
@@ -419,10 +671,145 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel,
else
addRelocationForSection(RE, Value.SectionID);
- resolveRelocation(Target, (uint64_t)Target, (uint64_t)Section.Address +
- Section.StubOffset, RelType, 0);
+ resolveRelocation(Section, Rel.Offset,
+ (uint64_t)Section.Address + Section.StubOffset,
+ RelType, 0);
Section.StubOffset += getMaxStubSize();
}
+ } else if (Arch == Triple::mipsel && RelType == ELF::R_MIPS_26) {
+ // This is an Mips branch relocation, need to use a stub function.
+ DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
+ SectionEntry &Section = Sections[Rel.SectionID];
+ uint8_t *Target = Section.Address + Rel.Offset;
+ uint32_t *TargetAddress = (uint32_t *)Target;
+
+ // Extract the addend from the instruction.
+ uint32_t Addend = ((*TargetAddress) & 0x03ffffff) << 2;
+
+ Value.Addend += Addend;
+
+ // Look up for existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ resolveRelocation(Section, Rel.Offset,
+ (uint64_t)Section.Address + i->second, RelType, 0);
+ DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.StubOffset;
+ uint8_t *StubTargetAddr = createStubFunction(Section.Address +
+ Section.StubOffset);
+
+ // Creating Hi and Lo relocations for the filled stub instructions.
+ RelocationEntry REHi(Rel.SectionID,
+ StubTargetAddr - Section.Address,
+ ELF::R_MIPS_HI16, Value.Addend);
+ RelocationEntry RELo(Rel.SectionID,
+ StubTargetAddr - Section.Address + 4,
+ ELF::R_MIPS_LO16, Value.Addend);
+
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REHi, Value.SymbolName);
+ addRelocationForSymbol(RELo, Value.SymbolName);
+ } else {
+ addRelocationForSection(REHi, Value.SectionID);
+ addRelocationForSection(RELo, Value.SectionID);
+ }
+
+ resolveRelocation(Section, Rel.Offset,
+ (uint64_t)Section.Address + Section.StubOffset,
+ RelType, 0);
+ Section.StubOffset += getMaxStubSize();
+ }
+ } else if (Arch == Triple::ppc64) {
+ if (RelType == ELF::R_PPC64_REL24) {
+ // A PPC branch relocation will need a stub function if the target is
+ // an external symbol (Symbol::ST_Unknown) or if the target address
+ // is not within the signed 24-bits branch address.
+ SectionEntry &Section = Sections[Rel.SectionID];
+ uint8_t *Target = Section.Address + Rel.Offset;
+ bool RangeOverflow = false;
+ if (SymType != SymbolRef::ST_Unknown) {
+ // A function call may points to the .opd entry, so the final symbol value
+ // in calculated based in the relocation values in .opd section.
+ findOPDEntrySection(Obj, ObjSectionToID, Value);
+ uint8_t *RelocTarget = Sections[Value.SectionID].Address + Value.Addend;
+ int32_t delta = static_cast<int32_t>(Target - RelocTarget);
+ // If it is within 24-bits branch range, just set the branch target
+ if (SignExtend32<24>(delta) == delta) {
+ RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else {
+ RangeOverflow = true;
+ }
+ }
+ if (SymType == SymbolRef::ST_Unknown || RangeOverflow == true) {
+ // It is an external symbol (SymbolRef::ST_Unknown) or within a range
+ // larger than 24-bits.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ // Symbol function stub already created, just relocate to it
+ resolveRelocation(Section, Rel.Offset,
+ (uint64_t)Section.Address + i->second, RelType, 0);
+ DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.StubOffset;
+ uint8_t *StubTargetAddr = createStubFunction(Section.Address +
+ Section.StubOffset);
+ RelocationEntry RE(Rel.SectionID, StubTargetAddr - Section.Address,
+ ELF::R_PPC64_ADDR64, Value.Addend);
+
+ // Generates the 64-bits address loads as exemplified in section
+ // 4.5.1 in PPC64 ELF ABI.
+ RelocationEntry REhst(Rel.SectionID,
+ StubTargetAddr - Section.Address + 2,
+ ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend);
+ RelocationEntry REhr(Rel.SectionID,
+ StubTargetAddr - Section.Address + 6,
+ ELF::R_PPC64_ADDR16_HIGHER, Value.Addend);
+ RelocationEntry REh(Rel.SectionID,
+ StubTargetAddr - Section.Address + 14,
+ ELF::R_PPC64_ADDR16_HI, Value.Addend);
+ RelocationEntry REl(Rel.SectionID,
+ StubTargetAddr - Section.Address + 18,
+ ELF::R_PPC64_ADDR16_LO, Value.Addend);
+
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REhst, Value.SymbolName);
+ addRelocationForSymbol(REhr, Value.SymbolName);
+ addRelocationForSymbol(REh, Value.SymbolName);
+ addRelocationForSymbol(REl, Value.SymbolName);
+ } else {
+ addRelocationForSection(REhst, Value.SectionID);
+ addRelocationForSection(REhr, Value.SectionID);
+ addRelocationForSection(REh, Value.SectionID);
+ addRelocationForSection(REl, Value.SectionID);
+ }
+
+ resolveRelocation(Section, Rel.Offset,
+ (uint64_t)Section.Address + Section.StubOffset,
+ RelType, 0);
+ if (SymType == SymbolRef::ST_Unknown)
+ // Restore the TOC for external calls
+ writeInt32BE(Target+4, 0xE8410028); // ld r2,40(r1)
+ Section.StubOffset += getMaxStubSize();
+ }
+ }
+ } else {
+ RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend);
+ // Extra check to avoid relocation againt empty symbols (usually
+ // the R_PPC64_TOC).
+ if (Value.SymbolName && !TargetName.empty())
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
} else {
RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend);
if (Value.SymbolName)
@@ -432,8 +819,16 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel,
}
}
-bool RuntimeDyldELF::isCompatibleFormat(const MemoryBuffer *InputBuffer) const {
- StringRef Magic = InputBuffer->getBuffer().slice(0, ELF::EI_NIDENT);
- return (memcmp(Magic.data(), ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
+unsigned RuntimeDyldELF::getCommonSymbolAlignment(const SymbolRef &Sym) {
+ // In ELF, the value of an SHN_COMMON symbol is its alignment requirement.
+ uint64_t Align;
+ Check(Sym.getValue(Align));
+ return Align;
+}
+
+bool RuntimeDyldELF::isCompatibleFormat(const ObjectBuffer *Buffer) const {
+ if (Buffer->getBufferSize() < strlen(ELF::ElfMagic))
+ return false;
+ return (memcmp(Buffer->getBufferStart(), ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
}
} // namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
index e413f78..07e704b 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
@@ -18,32 +18,52 @@
using namespace llvm;
-
namespace llvm {
+
+namespace {
+ // Helper for extensive error checking in debug builds.
+ error_code Check(error_code Err) {
+ if (Err) {
+ report_fatal_error(Err.message());
+ }
+ return Err;
+ }
+} // end anonymous namespace
+
class RuntimeDyldELF : public RuntimeDyldImpl {
protected:
- ObjectImage *LoadedObject;
-
- void resolveX86_64Relocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
+ void resolveX86_64Relocation(const SectionEntry &Section,
+ uint64_t Offset,
uint64_t Value,
uint32_t Type,
int64_t Addend);
- void resolveX86Relocation(uint8_t *LocalAddress,
- uint32_t FinalAddress,
+ void resolveX86Relocation(const SectionEntry &Section,
+ uint64_t Offset,
uint32_t Value,
uint32_t Type,
int32_t Addend);
- void resolveARMRelocation(uint8_t *LocalAddress,
- uint32_t FinalAddress,
+ void resolveARMRelocation(const SectionEntry &Section,
+ uint64_t Offset,
uint32_t Value,
uint32_t Type,
int32_t Addend);
- virtual void resolveRelocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
+ void resolveMIPSRelocation(const SectionEntry &Section,
+ uint64_t Offset,
+ uint32_t Value,
+ uint32_t Type,
+ int32_t Addend);
+
+ void resolvePPC64Relocation(const SectionEntry &Section,
+ uint64_t Offset,
+ uint64_t Value,
+ uint32_t Type,
+ int64_t Addend);
+
+ virtual void resolveRelocation(const SectionEntry &Section,
+ uint64_t Offset,
uint64_t Value,
uint32_t Type,
int64_t Addend);
@@ -54,16 +74,22 @@ protected:
const SymbolTableMap &Symbols,
StubMap &Stubs);
- virtual ObjectImage *createObjectImage(const MemoryBuffer *InputBuffer);
- virtual void handleObjectLoaded(ObjectImage *Obj);
+ unsigned getCommonSymbolAlignment(const SymbolRef &Sym);
+
+ virtual ObjectImage *createObjectImage(ObjectBuffer *InputBuffer);
+
+ uint64_t findPPC64TOC() const;
+ void findOPDEntrySection(ObjectImage &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel);
public:
RuntimeDyldELF(RTDyldMemoryManager *mm)
- : RuntimeDyldImpl(mm), LoadedObject(0) {}
+ : RuntimeDyldImpl(mm) {}
virtual ~RuntimeDyldELF();
- bool isCompatibleFormat(const MemoryBuffer *InputBuffer) const;
+ bool isCompatibleFormat(const ObjectBuffer *Buffer) const;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
index c38ca69..829fd6c 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
@@ -14,8 +14,8 @@
#ifndef LLVM_RUNTIME_DYLD_IMPL_H
#define LLVM_RUNTIME_DYLD_IMPL_H
-#include "ObjectImage.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/ExecutionEngine/ObjectImage.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
@@ -24,6 +24,8 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/SwapByteOrder.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/system_error.h"
#include <map>
@@ -33,7 +35,7 @@ using namespace llvm::object;
namespace llvm {
-class MemoryBuffer;
+class ObjectBuffer;
class Twine;
@@ -41,6 +43,9 @@ class Twine;
/// linker.
class SectionEntry {
public:
+ /// Name - section name.
+ StringRef Name;
+
/// Address - address in the linker's memory where the section resides.
uint8_t *Address;
@@ -61,9 +66,9 @@ public:
/// for calculating relocations in some object formats (like MachO).
uintptr_t ObjAddress;
- SectionEntry(uint8_t *address, size_t size, uintptr_t stubOffset,
- uintptr_t objAddress)
- : Address(address), Size(size), LoadAddress((uintptr_t)address),
+ SectionEntry(StringRef name, uint8_t *address, size_t size,
+ uintptr_t stubOffset, uintptr_t objAddress)
+ : Name(name), Address(address), Size(size), LoadAddress((uintptr_t)address),
StubOffset(stubOffset), ObjAddress(objAddress) {}
};
@@ -135,8 +140,10 @@ protected:
typedef StringMap<SymbolLoc> SymbolTableMap;
SymbolTableMap GlobalSymbolTable;
- // Keep a map of common symbols to their sizes
- typedef std::map<SymbolRef, unsigned> CommonSymbolMap;
+ // Pair representing the size and alignment requirement for a common symbol.
+ typedef std::pair<unsigned, unsigned> CommonSymbolInfo;
+ // Keep a map of common symbols to their info pairs
+ typedef std::map<SymbolRef, CommonSymbolInfo> CommonSymbolMap;
// For each symbol, keep a list of relocations based on it. Anytime
// its address is reassigned (the JIT re-compiled the function, e.g.),
@@ -161,6 +168,10 @@ protected:
inline unsigned getMaxStubSize() {
if (Arch == Triple::arm || Arch == Triple::thumb)
return 8; // 32-bit instruction and 32-bit address
+ else if (Arch == Triple::mipsel)
+ return 16;
+ else if (Arch == Triple::ppc64)
+ return 44;
else
return 0;
}
@@ -175,10 +186,50 @@ protected:
return true;
}
+ uint64_t getSectionLoadAddress(unsigned SectionID) {
+ return Sections[SectionID].LoadAddress;
+ }
+
uint8_t *getSectionAddress(unsigned SectionID) {
return (uint8_t*)Sections[SectionID].Address;
}
+ // Subclasses can override this method to get the alignment requirement of
+ // a common symbol. Returns no alignment requirement if not implemented.
+ virtual unsigned getCommonSymbolAlignment(const SymbolRef &Sym) {
+ return 0;
+ }
+
+
+ void writeInt16BE(uint8_t *Addr, uint16_t Value) {
+ if (sys::isLittleEndianHost())
+ Value = sys::SwapByteOrder(Value);
+ *Addr = (Value >> 8) & 0xFF;
+ *(Addr+1) = Value & 0xFF;
+ }
+
+ void writeInt32BE(uint8_t *Addr, uint32_t Value) {
+ if (sys::isLittleEndianHost())
+ Value = sys::SwapByteOrder(Value);
+ *Addr = (Value >> 24) & 0xFF;
+ *(Addr+1) = (Value >> 16) & 0xFF;
+ *(Addr+2) = (Value >> 8) & 0xFF;
+ *(Addr+3) = Value & 0xFF;
+ }
+
+ void writeInt64BE(uint8_t *Addr, uint64_t Value) {
+ if (sys::isLittleEndianHost())
+ Value = sys::SwapByteOrder(Value);
+ *Addr = (Value >> 56) & 0xFF;
+ *(Addr+1) = (Value >> 48) & 0xFF;
+ *(Addr+2) = (Value >> 40) & 0xFF;
+ *(Addr+3) = (Value >> 32) & 0xFF;
+ *(Addr+4) = (Value >> 24) & 0xFF;
+ *(Addr+5) = (Value >> 16) & 0xFF;
+ *(Addr+6) = (Value >> 8) & 0xFF;
+ *(Addr+7) = Value & 0xFF;
+ }
+
/// \brief Given the common symbols discovered in the object file, emit a
/// new section for them and update the symbol mappings in the object and
/// symbol table.
@@ -221,13 +272,14 @@ protected:
void resolveRelocationEntry(const RelocationEntry &RE, uint64_t Value);
/// \brief A object file specific relocation resolver
- /// \param Address Address to apply the relocation action
+ /// \param Section The section where the relocation is being applied
+ /// \param Offset The offset into the section for this relocation
/// \param Value Target symbol address to apply the relocation action
/// \param Type object file specific relocation type
/// \param Addend A constant addend used to compute the value to be stored
/// into the relocatable field
- virtual void resolveRelocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
+ virtual void resolveRelocation(const SectionEntry &Section,
+ uint64_t Offset,
uint64_t Value,
uint32_t Type,
int64_t Addend) = 0;
@@ -242,19 +294,13 @@ protected:
/// \brief Resolve relocations to external symbols.
void resolveExternalSymbols();
- virtual ObjectImage *createObjectImage(const MemoryBuffer *InputBuffer);
- virtual void handleObjectLoaded(ObjectImage *Obj)
- {
- // Subclasses may choose to retain this image if they have a use for it
- delete Obj;
- }
-
+ virtual ObjectImage *createObjectImage(ObjectBuffer *InputBuffer);
public:
RuntimeDyldImpl(RTDyldMemoryManager *mm) : MemMgr(mm), HasError(false) {}
virtual ~RuntimeDyldImpl();
- bool loadObject(const MemoryBuffer *InputBuffer);
+ ObjectImage *loadObject(ObjectBuffer *InputBuffer);
void *getSymbolAddress(StringRef Name) {
// FIXME: Just look up as a function for now. Overly simple of course.
@@ -265,11 +311,20 @@ public:
return getSectionAddress(Loc.first) + Loc.second;
}
+ uint64_t getSymbolLoadAddress(StringRef Name) {
+ // FIXME: Just look up as a function for now. Overly simple of course.
+ // Work in progress.
+ if (GlobalSymbolTable.find(Name) == GlobalSymbolTable.end())
+ return 0;
+ SymbolLoc Loc = GlobalSymbolTable.lookup(Name);
+ return getSectionLoadAddress(Loc.first) + Loc.second;
+ }
+
void resolveRelocations();
void reassignSectionAddress(unsigned SectionID, uint64_t Addr);
- void mapSectionAddress(void *LocalAddress, uint64_t TargetAddress);
+ void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress);
// Is the linker in an error state?
bool hasError() { return HasError; }
@@ -280,8 +335,7 @@ public:
// Get the error message.
StringRef getErrorString() { return ErrorStr; }
- virtual bool isCompatibleFormat(const MemoryBuffer *InputBuffer) const = 0;
-
+ virtual bool isCompatibleFormat(const ObjectBuffer *Buffer) const = 0;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
index 0e3a9d4..987c0c3 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
@@ -21,11 +21,13 @@ using namespace llvm::object;
namespace llvm {
-void RuntimeDyldMachO::resolveRelocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
+void RuntimeDyldMachO::resolveRelocation(const SectionEntry &Section,
+ uint64_t Offset,
uint64_t Value,
uint32_t Type,
int64_t Addend) {
+ uint8_t *LocalAddress = Section.Address + Offset;
+ uint64_t FinalAddress = Section.LoadAddress + Offset;
bool isPCRel = (Type >> 24) & 1;
unsigned MachoType = (Type >> 28) & 0xf;
unsigned Size = 1 << ((Type >> 25) & 3);
@@ -57,7 +59,7 @@ void RuntimeDyldMachO::resolveRelocation(uint8_t *LocalAddress,
FinalAddress,
(uintptr_t)Value,
isPCRel,
- Type,
+ MachoType,
Size,
Addend);
break;
@@ -211,7 +213,6 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel,
uint32_t RelType = (uint32_t) (Rel.Type & 0xffffffffL);
RelocationValueRef Value;
SectionEntry &Section = Sections[Rel.SectionID];
- uint8_t *Target = Section.Address + Rel.Offset;
bool isExtern = (RelType >> 27) & 1;
if (isExtern) {
@@ -246,7 +247,12 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel,
}
assert(si != se && "No section containing relocation!");
Value.SectionID = findOrEmitSection(Obj, *si, true, ObjSectionToID);
- Value.Addend = *(const intptr_t *)Target;
+ Value.Addend = 0;
+ // FIXME: The size and type of the relocation determines if we can
+ // encode an Addend in the target location itself, and if so, how many
+ // bytes we should read in order to get it. We don't yet support doing
+ // that, and just assuming it's sizeof(intptr_t) is blatantly wrong.
+ //Value.Addend = *(const intptr_t *)Target;
if (Value.Addend) {
// The MachO addend is an offset from the current section. We need it
// to be an offset from the destination section
@@ -254,13 +260,13 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel,
}
}
- if (Arch == Triple::arm && RelType == macho::RIT_ARM_Branch24Bit) {
+ if (Arch == Triple::arm && (RelType & 0xf) == macho::RIT_ARM_Branch24Bit) {
// This is an ARM branch relocation, need to use a stub function.
// Look up for existing stub.
StubMap::const_iterator i = Stubs.find(Value);
if (i != Stubs.end())
- resolveRelocation(Target, (uint64_t)Target,
+ resolveRelocation(Section, Rel.Offset,
(uint64_t)Section.Address + i->second,
RelType, 0);
else {
@@ -274,7 +280,7 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel,
addRelocationForSymbol(RE, Value.SymbolName);
else
addRelocationForSection(RE, Value.SectionID);
- resolveRelocation(Target, (uint64_t)Target,
+ resolveRelocation(Section, Rel.Offset,
(uint64_t)Section.Address + Section.StubOffset,
RelType, 0);
Section.StubOffset += getMaxStubSize();
@@ -290,8 +296,10 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel,
bool RuntimeDyldMachO::isCompatibleFormat(
- const MemoryBuffer *InputBuffer) const {
- StringRef Magic = InputBuffer->getBuffer().slice(0, 4);
+ const ObjectBuffer *InputBuffer) const {
+ if (InputBuffer->getBufferSize() < 4)
+ return false;
+ StringRef Magic(InputBuffer->getBufferStart(), 4);
if (Magic == "\xFE\xED\xFA\xCE") return true;
if (Magic == "\xCE\xFA\xED\xFE") return true;
if (Magic == "\xFE\xED\xFA\xCF") return true;
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
index 707664c..fe3539d 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
@@ -55,15 +55,15 @@ protected:
StubMap &Stubs);
public:
- virtual void resolveRelocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
+ virtual void resolveRelocation(const SectionEntry &Section,
+ uint64_t Offset,
uint64_t Value,
uint32_t Type,
int64_t Addend);
RuntimeDyldMachO(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {}
- bool isCompatibleFormat(const MemoryBuffer *InputBuffer) const;
+ bool isCompatibleFormat(const ObjectBuffer *Buffer) const;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp b/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp
index 7cdd669..8b6104f 100644
--- a/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp
@@ -26,7 +26,14 @@
using namespace llvm;
TargetMachine *EngineBuilder::selectTarget() {
- Triple TT(LLVM_HOSTTRIPLE);
+ Triple TT;
+
+ // MCJIT can generate code for remote targets, but the old JIT and Interpreter
+ // must use the host architecture.
+ if (UseMCJIT && WhichEngine != EngineKind::Interpreter && M)
+ TT.setTriple(M->getTargetTriple());
+ else
+ TT.setTriple(LLVM_HOSTTRIPLE);
return selectTarget(TT, MArch, MCPU, MAttrs);
}
diff --git a/contrib/llvm/lib/MC/ELFObjectWriter.cpp b/contrib/llvm/lib/MC/ELFObjectWriter.cpp
index 7203b9a..eda0623 100644
--- a/contrib/llvm/lib/MC/ELFObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/ELFObjectWriter.cpp
@@ -133,6 +133,11 @@ class ELFObjectWriter : public MCObjectWriter {
bool IsPCRel) const {
return TargetObjectWriter->ExplicitRelSym(Asm, Target, F, Fixup, IsPCRel);
}
+ const MCSymbol *undefinedExplicitRelSym(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ return TargetObjectWriter->undefinedExplicitRelSym(Target, Fixup, IsPCRel);
+ }
bool is64Bit() const { return TargetObjectWriter->is64Bit(); }
bool hasRelocationAddend() const {
@@ -270,9 +275,10 @@ class ELFObjectWriter : public MCObjectWriter {
/// ComputeSymbolTable - Compute the symbol table data
///
- /// \param StringTable [out] - The string table data.
- /// \param StringIndexMap [out] - Map from symbol names to offsets in the
- /// string table.
+ /// \param Asm - The assembler.
+ /// \param SectionIndexMap - Maps a section to its index.
+ /// \param RevGroupMap - Maps a signature symbol to the group section.
+ /// \param NumRegularSections - Number of non-relocation sections.
void ComputeSymbolTable(MCAssembler &Asm,
const SectionIndexMapTy &SectionIndexMap,
RevGroupMapTy RevGroupMap,
@@ -638,7 +644,7 @@ const MCSymbol *ELFObjectWriter::SymbolToReloc(const MCAssembler &Asm,
if (ASymbol.isUndefined()) {
if (Renamed)
return Renamed;
- return &ASymbol;
+ return undefinedExplicitRelSym(Target, Fixup, IsPCRel);
}
if (SD.isExternal()) {
@@ -720,10 +726,13 @@ void ELFObjectWriter::RecordRelocation(const MCAssembler &Asm,
MCSymbolData &SD = Asm.getSymbolData(ASymbol);
MCFragment *F = SD.getFragment();
- Index = F->getParent()->getOrdinal() + 1;
-
- // Offset of the symbol in the section
- Value += Layout.getSymbolOffset(&SD);
+ if (F) {
+ Index = F->getParent()->getOrdinal() + 1;
+ // Offset of the symbol in the section
+ Value += Layout.getSymbolOffset(&SD);
+ } else {
+ Index = 0;
+ }
} else {
if (Asm.getSymbolData(Symbol).getFlags() & ELF_Other_Weakref)
WeakrefUsedInReloc.insert(RelocSymbol);
@@ -732,8 +741,7 @@ void ELFObjectWriter::RecordRelocation(const MCAssembler &Asm,
Index = -1;
}
Addend = Value;
- // Compensate for the addend on i386.
- if (is64Bit())
+ if (hasRelocationAddend())
Value = 0;
}
diff --git a/contrib/llvm/lib/MC/MCAsmBackend.cpp b/contrib/llvm/lib/MC/MCAsmBackend.cpp
index 2e447b0..53960e7 100644
--- a/contrib/llvm/lib/MC/MCAsmBackend.cpp
+++ b/contrib/llvm/lib/MC/MCAsmBackend.cpp
@@ -12,12 +12,9 @@
using namespace llvm;
MCAsmBackend::MCAsmBackend()
- : HasReliableSymbolDifference(false)
-{
-}
+ : HasReliableSymbolDifference(false), HasDataInCodeSupport(false) {}
-MCAsmBackend::~MCAsmBackend() {
-}
+MCAsmBackend::~MCAsmBackend() {}
const MCFixupKindInfo &
MCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
diff --git a/contrib/llvm/lib/MC/MCAsmInfo.cpp b/contrib/llvm/lib/MC/MCAsmInfo.cpp
index 8da2e0e..7ea0f3b 100644
--- a/contrib/llvm/lib/MC/MCAsmInfo.cpp
+++ b/contrib/llvm/lib/MC/MCAsmInfo.cpp
@@ -68,8 +68,8 @@ MCAsmInfo::MCAsmInfo() {
GlobalDirective = "\t.globl\t";
HasSetDirective = true;
HasAggressiveSymbolFolding = true;
- LCOMMDirectiveType = LCOMM::None;
COMMDirectiveAlignmentIsInBytes = true;
+ LCOMMDirectiveAlignmentType = LCOMM::NoAlignment;
HasDotTypeDotSizeDirective = true;
HasSingleParameterDotFile = true;
HasNoDeadStrip = false;
diff --git a/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp b/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp
index 678e75a..fd79193 100644
--- a/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp
+++ b/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp
@@ -19,8 +19,10 @@ void MCAsmInfoCOFF::anchor() { }
MCAsmInfoCOFF::MCAsmInfoCOFF() {
GlobalPrefix = "_";
+ // MingW 4.5 and later support .comm with log2 alignment, but .lcomm uses byte
+ // alignment.
COMMDirectiveAlignmentIsInBytes = false;
- LCOMMDirectiveType = LCOMM::ByteAlignment;
+ LCOMMDirectiveAlignmentType = LCOMM::ByteAlignment;
HasDotTypeDotSizeDirective = false;
HasSingleParameterDotFile = false;
PrivateGlobalPrefix = "L"; // Prefix for private global symbols
diff --git a/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp b/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp
index 8e0ac23..a0e3eba 100644
--- a/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp
+++ b/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp
@@ -32,6 +32,7 @@ MCAsmInfoDarwin::MCAsmInfoDarwin() {
AlignmentIsInBytes = false;
COMMDirectiveAlignmentIsInBytes = false;
+ LCOMMDirectiveAlignmentType = LCOMM::Log2Alignment;
InlineAsmStart = " InlineAsm Start";
InlineAsmEnd = " InlineAsm End";
diff --git a/contrib/llvm/lib/MC/MCAsmStreamer.cpp b/contrib/llvm/lib/MC/MCAsmStreamer.cpp
index 373df4b..17a6323 100644
--- a/contrib/llvm/lib/MC/MCAsmStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCAsmStreamer.cpp
@@ -166,7 +166,7 @@ public:
///
/// @param Symbol - The common symbol to emit.
/// @param Size - The size of the common symbol.
- /// @param Size - The alignment of the common symbol in bytes.
+ /// @param ByteAlignment - The alignment of the common symbol in bytes.
virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment);
@@ -251,6 +251,7 @@ public:
virtual void EmitPad(int64_t Offset);
virtual void EmitRegSave(const SmallVectorImpl<unsigned> &RegList, bool);
+ virtual void EmitTCEntry(const MCSymbol &S);
virtual void EmitInstruction(const MCInst &Inst);
@@ -517,13 +518,19 @@ void MCAsmStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
/// @param Size - The size of the common symbol.
void MCAsmStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlign) {
- assert(MAI.getLCOMMDirectiveType() != LCOMM::None &&
- "Doesn't have .lcomm, can't emit it!");
OS << "\t.lcomm\t" << *Symbol << ',' << Size;
if (ByteAlign > 1) {
- assert(MAI.getLCOMMDirectiveType() == LCOMM::ByteAlignment &&
- "Alignment not supported on .lcomm!");
- OS << ',' << ByteAlign;
+ switch (MAI.getLCOMMDirectiveAlignmentType()) {
+ case LCOMM::NoAlignment:
+ llvm_unreachable("alignment not supported on .lcomm!");
+ case LCOMM::ByteAlignment:
+ OS << ',' << ByteAlign;
+ break;
+ case LCOMM::Log2Alignment:
+ assert(isPowerOf2_32(ByteAlign) && "alignment must be a power of 2");
+ OS << ',' << Log2_32(ByteAlign);
+ break;
+ }
}
EmitEOL();
}
@@ -1293,6 +1300,14 @@ void MCAsmStreamer::EmitRegSave(const SmallVectorImpl<unsigned> &RegList,
EmitEOL();
}
+void MCAsmStreamer::EmitTCEntry(const MCSymbol &S) {
+ OS << "\t.tc ";
+ OS << S.getName();
+ OS << "[TC],";
+ OS << S.getName();
+ EmitEOL();
+}
+
void MCAsmStreamer::EmitInstruction(const MCInst &Inst) {
assert(getCurrentSection() && "Cannot emit contents before setting section!");
diff --git a/contrib/llvm/lib/MC/MCAssembler.cpp b/contrib/llvm/lib/MC/MCAssembler.cpp
index 05519b5..726ec5a 100644
--- a/contrib/llvm/lib/MC/MCAssembler.cpp
+++ b/contrib/llvm/lib/MC/MCAssembler.cpp
@@ -199,8 +199,7 @@ MCAssembler::MCAssembler(MCContext &Context_, MCAsmBackend &Backend_,
MCCodeEmitter &Emitter_, MCObjectWriter &Writer_,
raw_ostream &OS_)
: Context(Context_), Backend(Backend_), Emitter(Emitter_), Writer(Writer_),
- OS(OS_), RelaxAll(false), NoExecStack(false), SubsectionsViaSymbols(false)
-{
+ OS(OS_), RelaxAll(false), NoExecStack(false), SubsectionsViaSymbols(false) {
}
MCAssembler::~MCAssembler() {
@@ -325,6 +324,12 @@ uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout,
const MCAlignFragment &AF = cast<MCAlignFragment>(F);
unsigned Offset = Layout.getFragmentOffset(&AF);
unsigned Size = OffsetToAlignment(Offset, AF.getAlignment());
+ // If we are padding with nops, force the padding to be larger than the
+ // minimum nop size.
+ if (Size > 0 && AF.hasEmitNops()) {
+ while (Size % getBackend().getMinimumNopSize())
+ Size += AF.getAlignment();
+ }
if (Size > AF.getMaxBytesToEmit())
return 0;
return Size;
@@ -375,7 +380,7 @@ void MCAsmLayout::LayoutFragment(MCFragment *F) {
LastValidFragment[F->getParent()] = F;
}
-/// WriteFragmentData - Write the \arg F data to the output file.
+/// WriteFragmentData - Write the \p F data to the output file.
static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment &F) {
MCObjectWriter *OW = &Asm.getWriter();
@@ -527,7 +532,7 @@ void MCAssembler::writeSectionData(const MCSectionData *SD,
}
uint64_t Start = getWriter().getStream().tell();
- (void) Start;
+ (void)Start;
for (MCSectionData::const_iterator it = SD->begin(),
ie = SD->end(); it != ie; ++it)
@@ -824,6 +829,7 @@ raw_ostream &operator<<(raw_ostream &OS, const MCFixup &AF) {
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MCFragment::dump() {
raw_ostream &OS = llvm::errs();
@@ -964,6 +970,7 @@ void MCAssembler::dump() {
}
OS << "]>\n";
}
+#endif
// anchors for MC*Fragment vtables
void MCDataFragment::anchor() { }
diff --git a/contrib/llvm/lib/MC/MCContext.cpp b/contrib/llvm/lib/MC/MCContext.cpp
index b5b14b9..477bd17 100644
--- a/contrib/llvm/lib/MC/MCContext.cpp
+++ b/contrib/llvm/lib/MC/MCContext.cpp
@@ -153,6 +153,12 @@ MCSymbol *MCContext::LookupSymbol(StringRef Name) const {
return Symbols.lookup(Name);
}
+MCSymbol *MCContext::LookupSymbol(const Twine &Name) const {
+ SmallString<128> NameSV;
+ Name.toVector(NameSV);
+ return LookupSymbol(NameSV.str());
+}
+
//===----------------------------------------------------------------------===//
// Section Management
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/MC/MCDisassembler/Disassembler.cpp b/contrib/llvm/lib/MC/MCDisassembler/Disassembler.cpp
index 35f675d..5189c9da 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/Disassembler.cpp
+++ b/contrib/llvm/lib/MC/MCDisassembler/Disassembler.cpp
@@ -184,3 +184,17 @@ size_t LLVMDisasmInstruction(LLVMDisasmContextRef DCR, uint8_t *Bytes,
}
llvm_unreachable("Invalid DecodeStatus!");
}
+
+//
+// LLVMSetDisasmOptions() sets the disassembler's options. It returns 1 if it
+// can set all the Options and 0 otherwise.
+//
+int LLVMSetDisasmOptions(LLVMDisasmContextRef DCR, uint64_t Options){
+ if (Options & LLVMDisassembler_Option_UseMarkup){
+ LLVMDisasmContext *DC = (LLVMDisasmContext *)DCR;
+ MCInstPrinter *IP = DC->getIP();
+ IP->setUseMarkup(1);
+ Options &= ~LLVMDisassembler_Option_UseMarkup;
+ }
+ return (Options == 0);
+}
diff --git a/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp b/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp
index 1226f1a..eed7a77 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp
+++ b/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp
@@ -366,8 +366,9 @@ int EDDisassembler::parseInst(SmallVectorImpl<MCParsedAsmOperand*> &operands,
instName = OpcodeToken.getString();
instLoc = OpcodeToken.getLoc();
+ ParseInstructionInfo Info;
if (NextToken.isNot(AsmToken::Eof) &&
- TargetParser->ParseInstruction(instName, instLoc, operands))
+ TargetParser->ParseInstruction(Info, instName, instLoc, operands))
ret = -1;
} else {
ret = -1;
diff --git a/contrib/llvm/lib/MC/MCDwarf.cpp b/contrib/llvm/lib/MC/MCDwarf.cpp
index 4c63e43..f71b266 100644
--- a/contrib/llvm/lib/MC/MCDwarf.cpp
+++ b/contrib/llvm/lib/MC/MCDwarf.cpp
@@ -425,9 +425,11 @@ void MCDwarfFile::print(raw_ostream &OS) const {
OS << '"' << getName() << '"';
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MCDwarfFile::dump() const {
print(dbgs());
}
+#endif
// Utility function to write a tuple for .debug_abbrev.
static void EmitAbbrev(MCStreamer *MCOS, uint64_t Name, uint64_t Form) {
diff --git a/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp b/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp
index 6eb6914..74cd042 100644
--- a/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp
+++ b/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp
@@ -9,6 +9,8 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCValue.h"
using namespace llvm;
@@ -35,6 +37,12 @@ const MCSymbol *MCELFObjectTargetWriter::ExplicitRelSym(const MCAssembler &Asm,
return NULL;
}
+const MCSymbol *MCELFObjectTargetWriter::undefinedExplicitRelSym(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ const MCSymbol &Symbol = Target.getSymA()->getSymbol();
+ return &Symbol.AliasedSymbol();
+}
void MCELFObjectTargetWriter::adjustFixupOffset(const MCFixup &Fixup,
uint64_t &RelocOffset) {
diff --git a/contrib/llvm/lib/MC/MCELFStreamer.cpp b/contrib/llvm/lib/MC/MCELFStreamer.cpp
index 2d342dc..14fbc1e 100644
--- a/contrib/llvm/lib/MC/MCELFStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCELFStreamer.cpp
@@ -98,17 +98,13 @@ public:
uint64_t Size, unsigned ByteAlignment = 0) {
llvm_unreachable("ELF doesn't support this directive");
}
- virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
- virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
- unsigned ValueSize = 1,
- unsigned MaxBytesToEmit = 0);
- virtual void EmitCodeAlignment(unsigned ByteAlignment,
- unsigned MaxBytesToEmit = 0);
virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
unsigned AddrSpace);
virtual void EmitFileDirective(StringRef Filename);
+ virtual void EmitTCEntry(const MCSymbol &S);
+
virtual void FinishImpl();
private:
@@ -247,7 +243,6 @@ void MCELFStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
switch (Attribute) {
case MCSA_LazyReference:
case MCSA_Reference:
- case MCSA_NoDeadStrip:
case MCSA_SymbolResolver:
case MCSA_PrivateExtern:
case MCSA_WeakDefinition:
@@ -256,6 +251,7 @@ void MCELFStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
case MCSA_IndirectSymbol:
llvm_unreachable("Invalid symbol attribute for ELF!");
+ case MCSA_NoDeadStrip:
case MCSA_ELF_TypeGnuUniqueObject:
// Ignore for now.
break;
@@ -355,42 +351,6 @@ void MCELFStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
EmitCommonSymbol(Symbol, Size, ByteAlignment);
}
-void MCELFStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
- // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
- // MCObjectStreamer.
- getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end());
-}
-
-void MCELFStreamer::EmitValueToAlignment(unsigned ByteAlignment,
- int64_t Value, unsigned ValueSize,
- unsigned MaxBytesToEmit) {
- // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
- // MCObjectStreamer.
- if (MaxBytesToEmit == 0)
- MaxBytesToEmit = ByteAlignment;
- new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit,
- getCurrentSectionData());
-
- // Update the maximum alignment on the current section if necessary.
- if (ByteAlignment > getCurrentSectionData()->getAlignment())
- getCurrentSectionData()->setAlignment(ByteAlignment);
-}
-
-void MCELFStreamer::EmitCodeAlignment(unsigned ByteAlignment,
- unsigned MaxBytesToEmit) {
- // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
- // MCObjectStreamer.
- if (MaxBytesToEmit == 0)
- MaxBytesToEmit = ByteAlignment;
- MCAlignFragment *F = new MCAlignFragment(ByteAlignment, 0, 1, MaxBytesToEmit,
- getCurrentSectionData());
- F->setEmitNops(true);
-
- // Update the maximum alignment on the current section if necessary.
- if (ByteAlignment > getCurrentSectionData()->getAlignment())
- getCurrentSectionData()->setAlignment(ByteAlignment);
-}
-
void MCELFStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size,
unsigned AddrSpace) {
fixSymbolsInTLSFixups(Value);
@@ -511,6 +471,12 @@ void MCELFStreamer::FinishImpl() {
this->MCObjectStreamer::FinishImpl();
}
+void MCELFStreamer::EmitTCEntry(const MCSymbol &S)
+{
+ // Creates a R_PPC64_TOC relocation
+ MCObjectStreamer::EmitSymbolValue(&S, 8, 0);
+}
+
MCStreamer *llvm::createELFStreamer(MCContext &Context, MCAsmBackend &MAB,
raw_ostream &OS, MCCodeEmitter *CE,
bool RelaxAll, bool NoExecStack) {
diff --git a/contrib/llvm/lib/MC/MCExpr.cpp b/contrib/llvm/lib/MC/MCExpr.cpp
index 0eb7fcc..e033634 100644
--- a/contrib/llvm/lib/MC/MCExpr.cpp
+++ b/contrib/llvm/lib/MC/MCExpr.cpp
@@ -60,7 +60,8 @@ void MCExpr::print(raw_ostream &OS) const {
SRE.getKind() == MCSymbolRefExpr::VK_ARM_GOTOFF ||
SRE.getKind() == MCSymbolRefExpr::VK_ARM_TPOFF ||
SRE.getKind() == MCSymbolRefExpr::VK_ARM_GOTTPOFF ||
- SRE.getKind() == MCSymbolRefExpr::VK_ARM_TARGET1)
+ SRE.getKind() == MCSymbolRefExpr::VK_ARM_TARGET1 ||
+ SRE.getKind() == MCSymbolRefExpr::VK_ARM_TARGET2)
OS << MCSymbolRefExpr::getVariantKindName(SRE.getKind());
else if (SRE.getKind() != MCSymbolRefExpr::VK_None &&
SRE.getKind() != MCSymbolRefExpr::VK_PPC_DARWIN_HA16 &&
@@ -136,10 +137,12 @@ void MCExpr::print(raw_ostream &OS) const {
llvm_unreachable("Invalid expression kind!");
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MCExpr::dump() const {
print(dbgs());
dbgs() << '\n';
}
+#endif
/* *** */
@@ -197,7 +200,9 @@ StringRef MCSymbolRefExpr::getVariantKindName(VariantKind Kind) {
case VK_ARM_GOTTPOFF: return "(gottpoff)";
case VK_ARM_TLSGD: return "(tlsgd)";
case VK_ARM_TARGET1: return "(target1)";
- case VK_PPC_TOC: return "toc";
+ case VK_ARM_TARGET2: return "(target2)";
+ case VK_PPC_TOC: return "tocbase";
+ case VK_PPC_TOC_ENTRY: return "toc";
case VK_PPC_DARWIN_HA16: return "ha16";
case VK_PPC_DARWIN_LO16: return "lo16";
case VK_PPC_GAS_HA16: return "ha";
@@ -264,7 +269,7 @@ MCSymbolRefExpr::getVariantKindForName(StringRef Name) {
/* *** */
-void MCTargetExpr::Anchor() {}
+void MCTargetExpr::anchor() {}
/* *** */
diff --git a/contrib/llvm/lib/MC/MCInst.cpp b/contrib/llvm/lib/MC/MCInst.cpp
index 7bbfd2e..124cc14 100644
--- a/contrib/llvm/lib/MC/MCInst.cpp
+++ b/contrib/llvm/lib/MC/MCInst.cpp
@@ -32,10 +32,12 @@ void MCOperand::print(raw_ostream &OS, const MCAsmInfo *MAI) const {
OS << ">";
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MCOperand::dump() const {
print(dbgs(), 0);
dbgs() << "\n";
}
+#endif
void MCInst::print(raw_ostream &OS, const MCAsmInfo *MAI) const {
OS << "<MCInst " << getOpcode();
@@ -62,7 +64,9 @@ void MCInst::dump_pretty(raw_ostream &OS, const MCAsmInfo *MAI,
OS << ">";
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MCInst::dump() const {
print(dbgs(), 0);
dbgs() << "\n";
}
+#endif
diff --git a/contrib/llvm/lib/MC/MCInstPrinter.cpp b/contrib/llvm/lib/MC/MCInstPrinter.cpp
index 847bcc0..41d90ab 100644
--- a/contrib/llvm/lib/MC/MCInstPrinter.cpp
+++ b/contrib/llvm/lib/MC/MCInstPrinter.cpp
@@ -36,3 +36,17 @@ void MCInstPrinter::printAnnotation(raw_ostream &OS, StringRef Annot) {
OS << " " << MAI.getCommentString() << " " << Annot;
}
}
+
+/// Utility functions to make adding mark ups simpler.
+StringRef MCInstPrinter::markup(StringRef s) const {
+ if (getUseMarkup())
+ return s;
+ else
+ return "";
+}
+StringRef MCInstPrinter::markup(StringRef a, StringRef b) const {
+ if (getUseMarkup())
+ return a;
+ else
+ return b;
+}
diff --git a/contrib/llvm/lib/MC/MCLabel.cpp b/contrib/llvm/lib/MC/MCLabel.cpp
index 9c0fc92..1d3022a 100644
--- a/contrib/llvm/lib/MC/MCLabel.cpp
+++ b/contrib/llvm/lib/MC/MCLabel.cpp
@@ -16,6 +16,8 @@ void MCLabel::print(raw_ostream &OS) const {
OS << '"' << getInstance() << '"';
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MCLabel::dump() const {
print(dbgs());
}
+#endif
diff --git a/contrib/llvm/lib/MC/MCMachOStreamer.cpp b/contrib/llvm/lib/MC/MCMachOStreamer.cpp
index b75fe2c..04b0e86 100644
--- a/contrib/llvm/lib/MC/MCMachOStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCMachOStreamer.cpp
@@ -70,19 +70,11 @@ public:
llvm_unreachable("macho doesn't support this directive");
}
virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment) {
- llvm_unreachable("macho doesn't support this directive");
- }
+ unsigned ByteAlignment);
virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
uint64_t Size = 0, unsigned ByteAlignment = 0);
virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
uint64_t Size, unsigned ByteAlignment = 0);
- virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
- virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
- unsigned ValueSize = 1,
- unsigned MaxBytesToEmit = 0);
- virtual void EmitCodeAlignment(unsigned ByteAlignment,
- unsigned MaxBytesToEmit = 0);
virtual void EmitFileDirective(StringRef Filename) {
// FIXME: Just ignore the .file; it isn't important enough to fail the
@@ -141,6 +133,8 @@ void MCMachOStreamer::EmitLabel(MCSymbol *Symbol) {
}
void MCMachOStreamer::EmitDataRegion(DataRegionData::KindTy Kind) {
+ if (!getAssembler().getBackend().hasDataInCodeSupport())
+ return;
// Create a temporary label to mark the start of the data region.
MCSymbol *Start = getContext().CreateTempSymbol();
EmitLabel(Start);
@@ -151,6 +145,8 @@ void MCMachOStreamer::EmitDataRegion(DataRegionData::KindTy Kind) {
}
void MCMachOStreamer::EmitDataRegionEnd() {
+ if (!getAssembler().getBackend().hasDataInCodeSupport())
+ return;
std::vector<DataRegionData> &Regions = getAssembler().getDataRegions();
assert(Regions.size() && "Mismatched .end_data_region!");
DataRegionData &Data = Regions.back();
@@ -325,6 +321,15 @@ void MCMachOStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
SD.setCommon(Size, ByteAlignment);
}
+void MCMachOStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) {
+ // '.lcomm' is equivalent to '.zerofill'.
+ return EmitZerofill(getContext().getMachOSection("__DATA", "__bss",
+ MCSectionMachO::S_ZEROFILL,
+ 0, SectionKind::getBSS()),
+ Symbol, Size, ByteAlignment);
+}
+
void MCMachOStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
uint64_t Size, unsigned ByteAlignment) {
MCSectionData &SectData = getAssembler().getOrCreateSectionData(*Section);
@@ -361,42 +366,6 @@ void MCMachOStreamer::EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
return;
}
-void MCMachOStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
- // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
- // MCObjectStreamer.
- getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end());
-}
-
-void MCMachOStreamer::EmitValueToAlignment(unsigned ByteAlignment,
- int64_t Value, unsigned ValueSize,
- unsigned MaxBytesToEmit) {
- // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
- // MCObjectStreamer.
- if (MaxBytesToEmit == 0)
- MaxBytesToEmit = ByteAlignment;
- new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit,
- getCurrentSectionData());
-
- // Update the maximum alignment on the current section if necessary.
- if (ByteAlignment > getCurrentSectionData()->getAlignment())
- getCurrentSectionData()->setAlignment(ByteAlignment);
-}
-
-void MCMachOStreamer::EmitCodeAlignment(unsigned ByteAlignment,
- unsigned MaxBytesToEmit) {
- // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
- // MCObjectStreamer.
- if (MaxBytesToEmit == 0)
- MaxBytesToEmit = ByteAlignment;
- MCAlignFragment *F = new MCAlignFragment(ByteAlignment, 0, 1, MaxBytesToEmit,
- getCurrentSectionData());
- F->setEmitNops(true);
-
- // Update the maximum alignment on the current section if necessary.
- if (ByteAlignment > getCurrentSectionData()->getAlignment())
- getCurrentSectionData()->setAlignment(ByteAlignment);
-}
-
void MCMachOStreamer::EmitInstToData(const MCInst &Inst) {
MCDataFragment *DF = getOrCreateDataFragment();
diff --git a/contrib/llvm/lib/MC/MCObjectFileInfo.cpp b/contrib/llvm/lib/MC/MCObjectFileInfo.cpp
index 29b4a94..2e1604d 100644
--- a/contrib/llvm/lib/MC/MCObjectFileInfo.cpp
+++ b/contrib/llvm/lib/MC/MCObjectFileInfo.cpp
@@ -392,6 +392,18 @@ void MCObjectFileInfo::InitELFMCObjectFileInfo(Triple T) {
DwarfMacroInfoSection =
Ctx->getELFSection(".debug_macinfo", ELF::SHT_PROGBITS, 0,
SectionKind::getMetadata());
+ DwarfAccelNamesSection =
+ Ctx->getELFSection(".apple_names", ELF::SHT_PROGBITS, 0,
+ SectionKind::getMetadata());
+ DwarfAccelObjCSection =
+ Ctx->getELFSection(".apple_objc", ELF::SHT_PROGBITS, 0,
+ SectionKind::getMetadata());
+ DwarfAccelNamespaceSection =
+ Ctx->getELFSection(".apple_namespaces", ELF::SHT_PROGBITS, 0,
+ SectionKind::getMetadata());
+ DwarfAccelTypesSection =
+ Ctx->getELFSection(".apple_types", ELF::SHT_PROGBITS, 0,
+ SectionKind::getMetadata());
}
@@ -430,12 +442,20 @@ void MCObjectFileInfo::InitCOFFMCObjectFileInfo(Triple T) {
}
- StaticDtorSection =
- Ctx->getCOFFSection(".dtors",
- COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- COFF::IMAGE_SCN_MEM_READ |
- COFF::IMAGE_SCN_MEM_WRITE,
- SectionKind::getDataRel());
+ if (T.getOS() == Triple::Win32) {
+ StaticDtorSection =
+ Ctx->getCOFFSection(".CRT$XTX",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getReadOnly());
+ } else {
+ StaticDtorSection =
+ Ctx->getCOFFSection(".dtors",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE,
+ SectionKind::getDataRel());
+ }
// FIXME: We're emitting LSDA info into a readonly section on COFF, even
// though it contains relocatable pointers. In PIC mode, this is probably a
@@ -557,6 +577,7 @@ void MCObjectFileInfo::InitMCObjectFileInfo(StringRef TT, Reloc::Model relocm,
Env = IsMachO;
InitMachOMCObjectFileInfo(T);
} else if ((Arch == Triple::x86 || Arch == Triple::x86_64) &&
+ (T.getEnvironment() != Triple::ELF) &&
(T.getOS() == Triple::MinGW32 || T.getOS() == Triple::Cygwin ||
T.getOS() == Triple::Win32)) {
Env = IsCOFF;
diff --git a/contrib/llvm/lib/MC/MCObjectStreamer.cpp b/contrib/llvm/lib/MC/MCObjectStreamer.cpp
index bad7cfe..7746323 100644
--- a/contrib/llvm/lib/MC/MCObjectStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCObjectStreamer.cpp
@@ -232,6 +232,31 @@ void MCObjectStreamer::EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
new MCDwarfCallFrameFragment(*AddrDelta, getCurrentSectionData());
}
+void MCObjectStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
+ assert(AddrSpace == 0 && "Address space must be 0!");
+ getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end());
+}
+
+void MCObjectStreamer::EmitValueToAlignment(unsigned ByteAlignment,
+ int64_t Value,
+ unsigned ValueSize,
+ unsigned MaxBytesToEmit) {
+ if (MaxBytesToEmit == 0)
+ MaxBytesToEmit = ByteAlignment;
+ new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit,
+ getCurrentSectionData());
+
+ // Update the maximum alignment on the current section if necessary.
+ if (ByteAlignment > getCurrentSectionData()->getAlignment())
+ getCurrentSectionData()->setAlignment(ByteAlignment);
+}
+
+void MCObjectStreamer::EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit) {
+ EmitValueToAlignment(ByteAlignment, 0, 1, MaxBytesToEmit);
+ cast<MCAlignFragment>(getCurrentFragment())->setEmitNops(true);
+}
+
bool MCObjectStreamer::EmitValueToOffset(const MCExpr *Offset,
unsigned char Value) {
int64_t Res;
@@ -258,12 +283,26 @@ bool MCObjectStreamer::EmitValueToOffset(const MCExpr *Offset,
void MCObjectStreamer::EmitGPRel32Value(const MCExpr *Value) {
MCDataFragment *DF = getOrCreateDataFragment();
- DF->addFixup(MCFixup::Create(DF->getContents().size(),
- Value,
- FK_GPRel_4));
+ DF->addFixup(MCFixup::Create(DF->getContents().size(), Value, FK_GPRel_4));
DF->getContents().resize(DF->getContents().size() + 4, 0);
}
+// Associate GPRel32 fixup with data and resize data area
+void MCObjectStreamer::EmitGPRel64Value(const MCExpr *Value) {
+ MCDataFragment *DF = getOrCreateDataFragment();
+
+ DF->addFixup(MCFixup::Create(DF->getContents().size(), Value, FK_GPRel_4));
+ DF->getContents().resize(DF->getContents().size() + 8, 0);
+}
+
+void MCObjectStreamer::EmitFill(uint64_t NumBytes, uint8_t FillValue,
+ unsigned AddrSpace) {
+ assert(AddrSpace == 0 && "Address space must be 0!");
+ // FIXME: A MCFillFragment would be more memory efficient but MCExpr has
+ // problems evaluating expressions across multiple fragments.
+ getOrCreateDataFragment()->getContents().append(NumBytes, FillValue);
+}
+
void MCObjectStreamer::FinishImpl() {
// Dump out the dwarf file & directory tables and line tables.
const MCSymbol *LineSectionSymbol = NULL;
diff --git a/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp b/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp
index c76052d..f93f685 100644
--- a/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp
+++ b/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp
@@ -396,8 +396,17 @@ AsmToken AsmLexer::LexToken() {
case 0:
case ' ':
case '\t':
- // Ignore whitespace.
- return LexToken();
+ if (SkipSpace) {
+ // Ignore whitespace.
+ return LexToken();
+ } else {
+ int len = 1;
+ while (*CurPtr==' ' || *CurPtr=='\t') {
+ CurPtr++;
+ len++;
+ }
+ return AsmToken(AsmToken::Space, StringRef(TokStart, len));
+ }
case '\n': // FALL THROUGH.
case '\r':
isAtStartOfLine = true;
diff --git a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
index b67c769..6f2e85e 100644
--- a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -19,6 +19,8 @@
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCParser/AsmCond.h"
#include "llvm/MC/MCParser/AsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
@@ -35,6 +37,8 @@
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
#include <cctype>
+#include <set>
+#include <string>
#include <vector>
using namespace llvm;
@@ -42,12 +46,14 @@ static cl::opt<bool>
FatalAssemblerWarnings("fatal-assembler-warnings",
cl::desc("Consider warnings as error"));
+MCAsmParserSemaCallback::~MCAsmParserSemaCallback() {}
+
namespace {
/// \brief Helper class for tracking macro definitions.
typedef std::vector<AsmToken> MacroArgument;
typedef std::vector<MacroArgument> MacroArguments;
-typedef StringRef MacroParameter;
+typedef std::pair<StringRef, MacroArgument> MacroParameter;
typedef std::vector<MacroParameter> MacroParameters;
struct Macro {
@@ -80,12 +86,34 @@ public:
MemoryBuffer *I);
};
+//struct AsmRewrite;
+struct ParseStatementInfo {
+ /// ParsedOperands - The parsed operands from the last parsed statement.
+ SmallVector<MCParsedAsmOperand*, 8> ParsedOperands;
+
+ /// Opcode - The opcode from the last parsed instruction.
+ unsigned Opcode;
+
+ SmallVectorImpl<AsmRewrite> *AsmRewrites;
+
+ ParseStatementInfo() : Opcode(~0U), AsmRewrites(0) {}
+ ParseStatementInfo(SmallVectorImpl<AsmRewrite> *rewrites)
+ : Opcode(~0), AsmRewrites(rewrites) {}
+
+ ~ParseStatementInfo() {
+ // Free any parsed operands.
+ for (unsigned i = 0, e = ParsedOperands.size(); i != e; ++i)
+ delete ParsedOperands[i];
+ ParsedOperands.clear();
+ }
+};
+
/// \brief The concrete assembly parser instance.
class AsmParser : public MCAsmParser {
friend class GenericAsmParser;
- AsmParser(const AsmParser &); // DO NOT IMPLEMENT
- void operator=(const AsmParser &); // DO NOT IMPLEMENT
+ AsmParser(const AsmParser &) LLVM_DELETED_FUNCTION;
+ void operator=(const AsmParser &) LLVM_DELETED_FUNCTION;
private:
AsmLexer Lexer;
MCContext &Ctx;
@@ -126,20 +154,27 @@ private:
StringRef CppHashFilename;
int64_t CppHashLineNumber;
SMLoc CppHashLoc;
+ int CppHashBuf;
/// AssemblerDialect. ~OU means unset value and use value provided by MAI.
unsigned AssemblerDialect;
+ /// IsDarwin - is Darwin compatibility enabled?
+ bool IsDarwin;
+
+ /// ParsingInlineAsm - Are we parsing ms-style inline assembly?
+ bool ParsingInlineAsm;
+
public:
AsmParser(SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
const MCAsmInfo &MAI);
- ~AsmParser();
+ virtual ~AsmParser();
virtual bool Run(bool NoInitialTextSection, bool NoFinalize = false);
- void AddDirectiveHandler(MCAsmParserExtension *Object,
- StringRef Directive,
- DirectiveHandler Handler) {
+ virtual void AddDirectiveHandler(MCAsmParserExtension *Object,
+ StringRef Directive,
+ DirectiveHandler Handler) {
DirectiveMap[Directive] = std::make_pair(Object, Handler);
}
@@ -166,7 +201,19 @@ public:
virtual bool Error(SMLoc L, const Twine &Msg,
ArrayRef<SMRange> Ranges = ArrayRef<SMRange>());
- const AsmToken &Lex();
+ virtual const AsmToken &Lex();
+
+ void setParsingInlineAsm(bool V) { ParsingInlineAsm = V; }
+ bool isParsingInlineAsm() { return ParsingInlineAsm; }
+
+ bool ParseMSInlineAsm(void *AsmLoc, std::string &AsmString,
+ unsigned &NumOutputs, unsigned &NumInputs,
+ SmallVectorImpl<std::pair<void *,bool> > &OpDecls,
+ SmallVectorImpl<std::string> &Constraints,
+ SmallVectorImpl<std::string> &Clobbers,
+ const MCInstrInfo *MII,
+ const MCInstPrinter *IP,
+ MCAsmParserSemaCallback &SI);
bool ParseExpression(const MCExpr *&Res);
virtual bool ParseExpression(const MCExpr *&Res, SMLoc &EndLoc);
@@ -178,7 +225,7 @@ public:
private:
void CheckForValidSection();
- bool ParseStatement();
+ bool ParseStatement(ParseStatementInfo &Info);
void EatToEndOfLine();
bool ParseCppHashLineFilenameComment(const SMLoc &L);
@@ -202,26 +249,28 @@ private:
/// This returns true on failure.
bool ProcessIncbinFile(const std::string &Filename);
- /// \brief Reset the current lexer position to that given by \arg Loc. The
+ /// \brief Reset the current lexer position to that given by \p Loc. The
/// current token is not set; clients should ensure Lex() is called
/// subsequently.
void JumpToLoc(SMLoc Loc);
- void EatToEndOfStatement();
+ virtual void EatToEndOfStatement();
- bool ParseMacroArgument(MacroArgument &MA);
+ bool ParseMacroArgument(MacroArgument &MA,
+ AsmToken::TokenKind &ArgumentDelimiter);
bool ParseMacroArguments(const Macro *M, MacroArguments &A);
/// \brief Parse up to the end of statement and a return the contents from the
/// current token until the end of the statement; the current token on exit
/// will be either the EndOfStatement or EOF.
- StringRef ParseStringToEndOfStatement();
+ virtual StringRef ParseStringToEndOfStatement();
/// \brief Parse until the end of a statement or a comma is encountered,
/// return the contents from the current token up to the end or comma.
StringRef ParseStringToComma();
- bool ParseAssignment(StringRef Name, bool allow_redef);
+ bool ParseAssignment(StringRef Name, bool allow_redef,
+ bool NoDeadStrip = false);
bool ParsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc);
bool ParseBinOpRHS(unsigned Precedence, const MCExpr *&Res, SMLoc &EndLoc);
@@ -229,8 +278,8 @@ private:
bool ParseBracketExpr(const MCExpr *&Res, SMLoc &EndLoc);
/// ParseIdentifier - Parse an identifier or string (as a quoted identifier)
- /// and set \arg Res to the identifier contents.
- bool ParseIdentifier(StringRef &Res);
+ /// and set \p Res to the identifier contents.
+ virtual bool ParseIdentifier(StringRef &Res);
// Directive Parsing.
@@ -282,6 +331,9 @@ private:
bool ParseDirectiveIrp(SMLoc DirectiveLoc); // ".irp"
bool ParseDirectiveIrpc(SMLoc DirectiveLoc); // ".irpc"
bool ParseDirectiveEndr(SMLoc DirectiveLoc); // ".endr"
+
+ // "_emit"
+ bool ParseDirectiveEmit(SMLoc DirectiveLoc, ParseStatementInfo &Info);
};
/// \brief Generic implementations of directive handling, etc. which is shared
@@ -406,8 +458,8 @@ AsmParser::AsmParser(SourceMgr &_SM, MCContext &_Ctx,
MCStreamer &_Out, const MCAsmInfo &_MAI)
: Lexer(_MAI), Ctx(_Ctx), Out(_Out), MAI(_MAI), SrcMgr(_SM),
GenericParser(new GenericAsmParser), PlatformParser(0),
- CurBuffer(0), MacrosEnabled(true), CppHashLineNumber(0),
- AssemblerDialect(~0U) {
+ CurBuffer(0), MacrosEnabled(true), CppHashLineNumber(0),
+ AssemblerDialect(~0U), IsDarwin(false), ParsingInlineAsm(false) {
// Save the old handler.
SavedDiagHandler = SrcMgr.getDiagHandler();
SavedDiagContext = SrcMgr.getDiagContext();
@@ -428,6 +480,7 @@ AsmParser::AsmParser(SourceMgr &_SM, MCContext &_Ctx,
} else if (_MAI.hasSubsectionsViaSymbols()) {
PlatformParser = createDarwinAsmParser();
PlatformParser->Initialize(*this);
+ IsDarwin = true;
} else {
PlatformParser = createELFAsmParser();
PlatformParser->Initialize(*this);
@@ -545,7 +598,8 @@ bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) {
// While we have input, parse each statement.
while (Lexer.isNot(AsmToken::Eof)) {
- if (!ParseStatement()) continue;
+ ParseStatementInfo Info;
+ if (!ParseStatement(Info)) continue;
// We had an error, validate that one was emitted and recover by skipping to
// the next line.
@@ -598,7 +652,7 @@ bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) {
}
void AsmParser::CheckForValidSection() {
- if (!getStreamer().getCurrentSection()) {
+ if (!ParsingInlineAsm && !getStreamer().getCurrentSection()) {
TokError("expected section directive before assembly directive");
Out.SwitchSection(Ctx.getMachOSection(
"__TEXT", "__text",
@@ -1024,14 +1078,11 @@ bool AsmParser::ParseBinOpRHS(unsigned Precedence, const MCExpr *&Res,
}
}
-
-
-
/// ParseStatement:
/// ::= EndOfStatement
/// ::= Label* Directive ...Operands... EndOfStatement
/// ::= Label* Identifier OperandList* EndOfStatement
-bool AsmParser::ParseStatement() {
+bool AsmParser::ParseStatement(ParseStatementInfo &Info) {
if (Lexer.is(AsmToken::EndOfStatement)) {
Out.AddBlankLine();
Lex();
@@ -1150,7 +1201,7 @@ bool AsmParser::ParseStatement() {
return false;
}
- return ParseStatement();
+ return false;
}
case AsmToken::Equal:
@@ -1304,26 +1355,30 @@ bool AsmParser::ParseStatement() {
return Error(IDLoc, "unknown directive");
}
+ // _emit
+ if (ParsingInlineAsm && IDVal == "_emit")
+ return ParseDirectiveEmit(IDLoc, Info);
+
CheckForValidSection();
// Canonicalize the opcode to lower case.
- SmallString<128> Opcode;
+ SmallString<128> OpcodeStr;
for (unsigned i = 0, e = IDVal.size(); i != e; ++i)
- Opcode.push_back(tolower(IDVal[i]));
+ OpcodeStr.push_back(tolower(IDVal[i]));
- SmallVector<MCParsedAsmOperand*, 8> ParsedOperands;
- bool HadError = getTargetParser().ParseInstruction(Opcode.str(), IDLoc,
- ParsedOperands);
+ ParseInstructionInfo IInfo(Info.AsmRewrites);
+ bool HadError = getTargetParser().ParseInstruction(IInfo, OpcodeStr.str(),
+ IDLoc,Info.ParsedOperands);
// Dump the parsed representation, if requested.
if (getShowParsedOperands()) {
SmallString<256> Str;
raw_svector_ostream OS(Str);
OS << "parsed instruction: [";
- for (unsigned i = 0; i != ParsedOperands.size(); ++i) {
+ for (unsigned i = 0; i != Info.ParsedOperands.size(); ++i) {
if (i != 0)
OS << ", ";
- ParsedOperands[i]->print(OS);
+ Info.ParsedOperands[i]->print(OS);
}
OS << "]";
@@ -1335,21 +1390,38 @@ bool AsmParser::ParseStatement() {
// the instruction.
if (!HadError && getContext().getGenDwarfForAssembly() &&
getContext().getGenDwarfSection() == getStreamer().getCurrentSection() ) {
+
+ unsigned Line = SrcMgr.FindLineNumber(IDLoc, CurBuffer);
+
+ // If we previously parsed a cpp hash file line comment then make sure the
+ // current Dwarf File is for the CppHashFilename if not then emit the
+ // Dwarf File table for it and adjust the line number for the .loc.
+ const std::vector<MCDwarfFile *> &MCDwarfFiles =
+ getContext().getMCDwarfFiles();
+ if (CppHashFilename.size() != 0) {
+ if(MCDwarfFiles[getContext().getGenDwarfFileNumber()]->getName() !=
+ CppHashFilename)
+ getStreamer().EmitDwarfFileDirective(
+ getContext().nextGenDwarfFileNumber(), StringRef(), CppHashFilename);
+
+ unsigned CppHashLocLineNo = SrcMgr.FindLineNumber(CppHashLoc,CppHashBuf);
+ Line = CppHashLineNumber - 1 + (Line - CppHashLocLineNo);
+ }
+
getStreamer().EmitDwarfLocDirective(getContext().getGenDwarfFileNumber(),
- SrcMgr.FindLineNumber(IDLoc, CurBuffer),
- 0, DWARF2_LINE_DEFAULT_IS_STMT ?
+ Line, 0, DWARF2_LINE_DEFAULT_IS_STMT ?
DWARF2_FLAG_IS_STMT : 0, 0, 0,
StringRef());
}
// If parsing succeeded, match the instruction.
- if (!HadError)
- HadError = getTargetParser().MatchAndEmitInstruction(IDLoc, ParsedOperands,
- Out);
-
- // Free any parsed operands.
- for (unsigned i = 0, e = ParsedOperands.size(); i != e; ++i)
- delete ParsedOperands[i];
+ if (!HadError) {
+ unsigned ErrorInfo;
+ HadError = getTargetParser().MatchAndEmitInstruction(IDLoc, Info.Opcode,
+ Info.ParsedOperands,
+ Out, ErrorInfo,
+ ParsingInlineAsm);
+ }
// Don't skip the rest of the line, the instruction parser is responsible for
// that.
@@ -1394,6 +1466,7 @@ bool AsmParser::ParseCppHashLineFilenameComment(const SMLoc &L) {
CppHashLoc = L;
CppHashFilename = Filename;
CppHashLineNumber = LineNumber;
+ CppHashBuf = CurBuffer;
// Ignore any trailing characters, they're just comment.
EatToEndOfLine();
@@ -1454,6 +1527,14 @@ void AsmParser::DiagHandler(const SMDiagnostic &Diag, void *Context) {
NewDiag.print(0, OS);
}
+// FIXME: This is mostly duplicated from the function in AsmLexer.cpp. The
+// difference being that that function accepts '@' as part of identifiers and
+// we can't do that. AsmLexer.cpp should probably be changed to handle
+// '@' as a special case when needed.
+static bool isIdentifierChar(char c) {
+ return isalnum(c) || c == '_' || c == '$' || c == '.';
+}
+
bool AsmParser::expandMacro(raw_svector_ostream &OS, StringRef Body,
const MacroParameters &Parameters,
const MacroArguments &A,
@@ -1462,6 +1543,8 @@ bool AsmParser::expandMacro(raw_svector_ostream &OS, StringRef Body,
if (NParameters != 0 && NParameters != A.size())
return Error(L, "Wrong number of arguments");
+ // A macro without parameters is handled differently on Darwin:
+ // gas accepts no arguments and does no substitutions
while (!Body.empty()) {
// Scan for the next substitution.
std::size_t End = Body.size(), Pos = 0;
@@ -1518,25 +1601,33 @@ bool AsmParser::expandMacro(raw_svector_ostream &OS, StringRef Body,
Pos += 2;
} else {
unsigned I = Pos + 1;
- while (isalnum(Body[I]) && I + 1 != End)
+ while (isIdentifierChar(Body[I]) && I + 1 != End)
++I;
const char *Begin = Body.data() + Pos +1;
StringRef Argument(Begin, I - (Pos +1));
unsigned Index = 0;
for (; Index < NParameters; ++Index)
- if (Parameters[Index] == Argument)
+ if (Parameters[Index].first == Argument)
break;
- // FIXME: We should error at the macro definition.
- if (Index == NParameters)
- return Error(L, "Parameter not found");
-
- for (MacroArgument::const_iterator it = A[Index].begin(),
- ie = A[Index].end(); it != ie; ++it)
- OS << it->getString();
+ if (Index == NParameters) {
+ if (Body[Pos+1] == '(' && Body[Pos+2] == ')')
+ Pos += 3;
+ else {
+ OS << '\\' << Argument;
+ Pos = I;
+ }
+ } else {
+ for (MacroArgument::const_iterator it = A[Index].begin(),
+ ie = A[Index].end(); it != ie; ++it)
+ if (it->getKind() == AsmToken::String)
+ OS << it->getStringContents();
+ else
+ OS << it->getString();
- Pos += 1 + Argument.size();
+ Pos += 1 + Argument.size();
+ }
}
// Update the scan point.
Body = Body.substr(Pos);
@@ -1551,24 +1642,97 @@ MacroInstantiation::MacroInstantiation(const Macro *M, SMLoc IL, SMLoc EL,
{
}
+static bool IsOperator(AsmToken::TokenKind kind)
+{
+ switch (kind)
+ {
+ default:
+ return false;
+ case AsmToken::Plus:
+ case AsmToken::Minus:
+ case AsmToken::Tilde:
+ case AsmToken::Slash:
+ case AsmToken::Star:
+ case AsmToken::Dot:
+ case AsmToken::Equal:
+ case AsmToken::EqualEqual:
+ case AsmToken::Pipe:
+ case AsmToken::PipePipe:
+ case AsmToken::Caret:
+ case AsmToken::Amp:
+ case AsmToken::AmpAmp:
+ case AsmToken::Exclaim:
+ case AsmToken::ExclaimEqual:
+ case AsmToken::Percent:
+ case AsmToken::Less:
+ case AsmToken::LessEqual:
+ case AsmToken::LessLess:
+ case AsmToken::LessGreater:
+ case AsmToken::Greater:
+ case AsmToken::GreaterEqual:
+ case AsmToken::GreaterGreater:
+ return true;
+ }
+}
+
/// ParseMacroArgument - Extract AsmTokens for a macro argument.
/// This is used for both default macro parameter values and the
/// arguments in macro invocations
-bool AsmParser::ParseMacroArgument(MacroArgument &MA) {
+bool AsmParser::ParseMacroArgument(MacroArgument &MA,
+ AsmToken::TokenKind &ArgumentDelimiter) {
unsigned ParenLevel = 0;
+ unsigned AddTokens = 0;
- for (;;) {
- SMLoc LastTokenLoc;
+ // gas accepts arguments separated by whitespace, except on Darwin
+ if (!IsDarwin)
+ Lexer.setSkipSpace(false);
- if (Lexer.is(AsmToken::Eof) || Lexer.is(AsmToken::Equal))
+ for (;;) {
+ if (Lexer.is(AsmToken::Eof) || Lexer.is(AsmToken::Equal)) {
+ Lexer.setSkipSpace(true);
return TokError("unexpected token in macro instantiation");
+ }
+
+ if (ParenLevel == 0 && Lexer.is(AsmToken::Comma)) {
+ // Spaces and commas cannot be mixed to delimit parameters
+ if (ArgumentDelimiter == AsmToken::Eof)
+ ArgumentDelimiter = AsmToken::Comma;
+ else if (ArgumentDelimiter != AsmToken::Comma) {
+ Lexer.setSkipSpace(true);
+ return TokError("expected ' ' for macro argument separator");
+ }
+ break;
+ }
+
+ if (Lexer.is(AsmToken::Space)) {
+ Lex(); // Eat spaces
+
+ // Spaces can delimit parameters, but could also be part an expression.
+ // If the token after a space is an operator, add the token and the next
+ // one into this argument
+ if (ArgumentDelimiter == AsmToken::Space ||
+ ArgumentDelimiter == AsmToken::Eof) {
+ if (IsOperator(Lexer.getKind())) {
+ // Check to see whether the token is used as an operator,
+ // or part of an identifier
+ const char *NextChar = getTok().getEndLoc().getPointer() + 1;
+ if (*NextChar == ' ')
+ AddTokens = 2;
+ }
+
+ if (!AddTokens && ParenLevel == 0) {
+ if (ArgumentDelimiter == AsmToken::Eof &&
+ !IsOperator(Lexer.getKind()))
+ ArgumentDelimiter = AsmToken::Space;
+ break;
+ }
+ }
+ }
// HandleMacroEntry relies on not advancing the lexer here
// to be able to fill in the remaining default parameter values
if (Lexer.is(AsmToken::EndOfStatement))
break;
- if (ParenLevel == 0 && Lexer.is(AsmToken::Comma))
- break;
// Adjust the current parentheses level.
if (Lexer.is(AsmToken::LParen))
@@ -1578,16 +1742,23 @@ bool AsmParser::ParseMacroArgument(MacroArgument &MA) {
// Append the token to the current argument list.
MA.push_back(getTok());
+ if (AddTokens)
+ AddTokens--;
Lex();
}
+
+ Lexer.setSkipSpace(true);
if (ParenLevel != 0)
- return TokError("unbalanced parenthesises in macro argument");
+ return TokError("unbalanced parentheses in macro argument");
return false;
}
// Parse the macro instantiation arguments.
bool AsmParser::ParseMacroArguments(const Macro *M, MacroArguments &A) {
const unsigned NParameters = M ? M->Parameters.size() : 0;
+ // Argument delimiter is initially unknown. It will be set by
+ // ParseMacroArgument()
+ AsmToken::TokenKind ArgumentDelimiter = AsmToken::Eof;
// Parse two kinds of macro invocations:
// - macros defined without any parameters accept an arbitrary number of them
@@ -1596,13 +1767,30 @@ bool AsmParser::ParseMacroArguments(const Macro *M, MacroArguments &A) {
++Parameter) {
MacroArgument MA;
- if (ParseMacroArgument(MA))
+ if (ParseMacroArgument(MA, ArgumentDelimiter))
return true;
- A.push_back(MA);
+ if (!MA.empty() || !NParameters)
+ A.push_back(MA);
+ else if (NParameters) {
+ if (!M->Parameters[Parameter].second.empty())
+ A.push_back(M->Parameters[Parameter].second);
+ }
- if (Lexer.is(AsmToken::EndOfStatement))
+ // At the end of the statement, fill in remaining arguments that have
+ // default values. If there aren't any, then the next argument is
+ // required but missing
+ if (Lexer.is(AsmToken::EndOfStatement)) {
+ if (NParameters && Parameter < NParameters - 1) {
+ if (M->Parameters[Parameter + 1].second.empty())
+ return TokError("macro argument '" +
+ Twine(M->Parameters[Parameter + 1].first) +
+ "' is missing");
+ else
+ continue;
+ }
return false;
+ }
if (Lexer.is(AsmToken::Comma))
Lex();
@@ -1691,7 +1879,8 @@ static bool IsUsedIn(const MCSymbol *Sym, const MCExpr *Value) {
llvm_unreachable("Unknown expr kind!");
}
-bool AsmParser::ParseAssignment(StringRef Name, bool allow_redef) {
+bool AsmParser::ParseAssignment(StringRef Name, bool allow_redef,
+ bool NoDeadStrip) {
// FIXME: Use better location, we should use proper tokens.
SMLoc EqualLoc = Lexer.getLoc();
@@ -1746,6 +1935,9 @@ bool AsmParser::ParseAssignment(StringRef Name, bool allow_redef) {
// Do the assignment.
Out.EmitAssignment(Sym, Value);
+ if (NoDeadStrip)
+ Out.EmitSymbolAttribute(Sym, MCSA_NoDeadStrip);
+
return false;
}
@@ -1803,7 +1995,7 @@ bool AsmParser::ParseDirectiveSet(StringRef IDVal, bool allow_redef) {
return TokError("unexpected token in '" + Twine(IDVal) + "'");
Lex();
- return ParseAssignment(Name, allow_redef);
+ return ParseAssignment(Name, allow_redef, true);
}
bool AsmParser::ParseEscapedString(std::string &Data) {
@@ -2274,8 +2466,13 @@ bool AsmParser::ParseDirectiveComm(bool IsLocal) {
if (ParseAbsoluteExpression(Pow2Alignment))
return true;
+ LCOMM::LCOMMType LCOMM = Lexer.getMAI().getLCOMMDirectiveAlignmentType();
+ if (IsLocal && LCOMM == LCOMM::NoAlignment)
+ return Error(Pow2AlignmentLoc, "alignment not supported on this target");
+
// If this target takes alignments in bytes (not log) validate and convert.
- if (Lexer.getMAI().getAlignmentIsInBytes()) {
+ if ((!IsLocal && Lexer.getMAI().getCOMMDirectiveAlignmentIsInBytes()) ||
+ (IsLocal && LCOMM == LCOMM::ByteAlignment)) {
if (!isPowerOf2_64(Pow2Alignment))
return Error(Pow2AlignmentLoc, "alignment must be a power of 2");
Pow2Alignment = Log2_64(Pow2Alignment);
@@ -2303,13 +2500,9 @@ bool AsmParser::ParseDirectiveComm(bool IsLocal) {
if (!Sym->isUndefined())
return Error(IDLoc, "invalid symbol redefinition");
- // '.lcomm' is equivalent to '.zerofill'.
// Create the Symbol as a common or local common with Size and Pow2Alignment
if (IsLocal) {
- getStreamer().EmitZerofill(Ctx.getMachOSection(
- "__DATA", "__bss", MCSectionMachO::S_ZEROFILL,
- 0, SectionKind::getBSS()),
- Sym, Size, 1 << Pow2Alignment);
+ getStreamer().EmitLocalCommonSymbol(Sym, Size, 1 << Pow2Alignment);
return false;
}
@@ -3073,25 +3266,33 @@ bool GenericAsmParser::ParseDirectiveMacro(StringRef Directive,
SMLoc DirectiveLoc) {
StringRef Name;
if (getParser().ParseIdentifier(Name))
- return TokError("expected identifier in directive");
+ return TokError("expected identifier in '.macro' directive");
MacroParameters Parameters;
+ // Argument delimiter is initially unknown. It will be set by
+ // ParseMacroArgument()
+ AsmToken::TokenKind ArgumentDelimiter = AsmToken::Eof;
if (getLexer().isNot(AsmToken::EndOfStatement)) {
- for(;;) {
- StringRef Parameter;
- if (getParser().ParseIdentifier(Parameter))
- return TokError("expected identifier in directive");
+ for (;;) {
+ MacroParameter Parameter;
+ if (getParser().ParseIdentifier(Parameter.first))
+ return TokError("expected identifier in '.macro' directive");
+
+ if (getLexer().is(AsmToken::Equal)) {
+ Lex();
+ if (getParser().ParseMacroArgument(Parameter.second, ArgumentDelimiter))
+ return true;
+ }
+
Parameters.push_back(Parameter);
- if (getLexer().isNot(AsmToken::Comma))
+ if (getLexer().is(AsmToken::Comma))
+ Lex();
+ else if (getLexer().is(AsmToken::EndOfStatement))
break;
- Lex();
}
}
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in '.macro' directive");
-
// Eat the end of statement.
Lex();
@@ -3296,7 +3497,7 @@ bool AsmParser::ParseDirectiveIrp(SMLoc DirectiveLoc) {
MacroParameters Parameters;
MacroParameter Parameter;
- if (ParseIdentifier(Parameter))
+ if (ParseIdentifier(Parameter.first))
return TokError("expected identifier in '.irp' directive");
Parameters.push_back(Parameter);
@@ -3323,9 +3524,8 @@ bool AsmParser::ParseDirectiveIrp(SMLoc DirectiveLoc) {
SmallString<256> Buf;
raw_svector_ostream OS(Buf);
- for (std::vector<MacroArgument>::iterator i = A.begin(), e = A.end(); i != e;
- ++i) {
- std::vector<MacroArgument> Args;
+ for (MacroArguments::iterator i = A.begin(), e = A.end(); i != e; ++i) {
+ MacroArguments Args;
Args.push_back(*i);
if (expandMacro(OS, M->Body, Parameters, Args, getTok().getLoc()))
@@ -3343,7 +3543,7 @@ bool AsmParser::ParseDirectiveIrpc(SMLoc DirectiveLoc) {
MacroParameters Parameters;
MacroParameter Parameter;
- if (ParseIdentifier(Parameter))
+ if (ParseIdentifier(Parameter.first))
return TokError("expected identifier in '.irpc' directive");
Parameters.push_back(Parameter);
@@ -3393,7 +3593,7 @@ bool AsmParser::ParseDirectiveIrpc(SMLoc DirectiveLoc) {
bool AsmParser::ParseDirectiveEndr(SMLoc DirectiveLoc) {
if (ActiveMacros.empty())
- return TokError("unexpected '.endr' directive, no current .rept");
+ return TokError("unmatched '.endr' directive");
// The only .repl that should get here are the ones created by
// InstantiateMacroLikeBody.
@@ -3403,6 +3603,214 @@ bool AsmParser::ParseDirectiveEndr(SMLoc DirectiveLoc) {
return false;
}
+bool AsmParser::ParseDirectiveEmit(SMLoc IDLoc, ParseStatementInfo &Info) {
+ const MCExpr *Value;
+ SMLoc ExprLoc = getLexer().getLoc();
+ if (ParseExpression(Value))
+ return true;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Value);
+ if (!MCE)
+ return Error(ExprLoc, "unexpected expression in _emit");
+ uint64_t IntValue = MCE->getValue();
+ if (!isUIntN(8, IntValue) && !isIntN(8, IntValue))
+ return Error(ExprLoc, "literal value out of range for directive");
+
+ Info.AsmRewrites->push_back(AsmRewrite(AOK_Emit, IDLoc, 5));
+ return false;
+}
+
+bool AsmParser::ParseMSInlineAsm(void *AsmLoc, std::string &AsmString,
+ unsigned &NumOutputs, unsigned &NumInputs,
+ SmallVectorImpl<std::pair<void *, bool> > &OpDecls,
+ SmallVectorImpl<std::string> &Constraints,
+ SmallVectorImpl<std::string> &Clobbers,
+ const MCInstrInfo *MII,
+ const MCInstPrinter *IP,
+ MCAsmParserSemaCallback &SI) {
+ SmallVector<void *, 4> InputDecls;
+ SmallVector<void *, 4> OutputDecls;
+ SmallVector<bool, 4> InputDeclsOffsetOf;
+ SmallVector<bool, 4> OutputDeclsOffsetOf;
+ SmallVector<std::string, 4> InputConstraints;
+ SmallVector<std::string, 4> OutputConstraints;
+ std::set<std::string> ClobberRegs;
+
+ SmallVector<struct AsmRewrite, 4> AsmStrRewrites;
+
+ // Prime the lexer.
+ Lex();
+
+ // While we have input, parse each statement.
+ unsigned InputIdx = 0;
+ unsigned OutputIdx = 0;
+ while (getLexer().isNot(AsmToken::Eof)) {
+ ParseStatementInfo Info(&AsmStrRewrites);
+ if (ParseStatement(Info))
+ return true;
+
+ if (Info.Opcode != ~0U) {
+ const MCInstrDesc &Desc = MII->get(Info.Opcode);
+
+ // Build the list of clobbers, outputs and inputs.
+ for (unsigned i = 1, e = Info.ParsedOperands.size(); i != e; ++i) {
+ MCParsedAsmOperand *Operand = Info.ParsedOperands[i];
+
+ // Immediate.
+ if (Operand->isImm()) {
+ if (Operand->needAsmRewrite())
+ AsmStrRewrites.push_back(AsmRewrite(AOK_ImmPrefix,
+ Operand->getStartLoc()));
+ continue;
+ }
+
+ // Register operand.
+ if (Operand->isReg() && !Operand->isOffsetOf()) {
+ unsigned NumDefs = Desc.getNumDefs();
+ // Clobber.
+ if (NumDefs && Operand->getMCOperandNum() < NumDefs) {
+ std::string Reg;
+ raw_string_ostream OS(Reg);
+ IP->printRegName(OS, Operand->getReg());
+ ClobberRegs.insert(StringRef(OS.str()));
+ }
+ continue;
+ }
+
+ // Expr/Input or Output.
+ unsigned Size;
+ void *OpDecl = SI.LookupInlineAsmIdentifier(Operand->getName(), AsmLoc,
+ Size);
+ if (OpDecl) {
+ bool isOutput = (i == 1) && Desc.mayStore();
+ if (!Operand->isOffsetOf() && Operand->needSizeDirective())
+ AsmStrRewrites.push_back(AsmRewrite(AOK_SizeDirective,
+ Operand->getStartLoc(),
+ /*Len*/0,
+ Operand->getMemSize()));
+ if (isOutput) {
+ std::string Constraint = "=";
+ ++InputIdx;
+ OutputDecls.push_back(OpDecl);
+ OutputDeclsOffsetOf.push_back(Operand->isOffsetOf());
+ Constraint += Operand->getConstraint().str();
+ OutputConstraints.push_back(Constraint);
+ AsmStrRewrites.push_back(AsmRewrite(AOK_Output,
+ Operand->getStartLoc(),
+ Operand->getNameLen()));
+ } else {
+ InputDecls.push_back(OpDecl);
+ InputDeclsOffsetOf.push_back(Operand->isOffsetOf());
+ InputConstraints.push_back(Operand->getConstraint().str());
+ AsmStrRewrites.push_back(AsmRewrite(AOK_Input,
+ Operand->getStartLoc(),
+ Operand->getNameLen()));
+ }
+ }
+ }
+ }
+ }
+
+ // Set the number of Outputs and Inputs.
+ NumOutputs = OutputDecls.size();
+ NumInputs = InputDecls.size();
+
+ // Set the unique clobbers.
+ for (std::set<std::string>::iterator I = ClobberRegs.begin(),
+ E = ClobberRegs.end(); I != E; ++I)
+ Clobbers.push_back(*I);
+
+ // Merge the various outputs and inputs. Output are expected first.
+ if (NumOutputs || NumInputs) {
+ unsigned NumExprs = NumOutputs + NumInputs;
+ OpDecls.resize(NumExprs);
+ Constraints.resize(NumExprs);
+ // FIXME: Constraints are hard coded to 'm', but we need an 'r'
+ // constraint for offsetof. This needs to be cleaned up!
+ for (unsigned i = 0; i < NumOutputs; ++i) {
+ OpDecls[i] = std::make_pair(OutputDecls[i], OutputDeclsOffsetOf[i]);
+ Constraints[i] = OutputDeclsOffsetOf[i] ? "=r" : OutputConstraints[i];
+ }
+ for (unsigned i = 0, j = NumOutputs; i < NumInputs; ++i, ++j) {
+ OpDecls[j] = std::make_pair(InputDecls[i], InputDeclsOffsetOf[i]);
+ Constraints[j] = InputDeclsOffsetOf[i] ? "r" : InputConstraints[i];
+ }
+ }
+
+ // Build the IR assembly string.
+ std::string AsmStringIR;
+ AsmRewriteKind PrevKind = AOK_Imm;
+ raw_string_ostream OS(AsmStringIR);
+ const char *Start = SrcMgr.getMemoryBuffer(0)->getBufferStart();
+ for (SmallVectorImpl<struct AsmRewrite>::iterator
+ I = AsmStrRewrites.begin(), E = AsmStrRewrites.end(); I != E; ++I) {
+ const char *Loc = (*I).Loc.getPointer();
+
+ AsmRewriteKind Kind = (*I).Kind;
+
+ // Emit everything up to the immediate/expression. If the previous rewrite
+ // was a size directive, then this has already been done.
+ if (PrevKind != AOK_SizeDirective)
+ OS << StringRef(Start, Loc - Start);
+ PrevKind = Kind;
+
+ // Skip the original expression.
+ if (Kind == AOK_Skip) {
+ Start = Loc + (*I).Len;
+ continue;
+ }
+
+ // Rewrite expressions in $N notation.
+ switch (Kind) {
+ default: break;
+ case AOK_Imm:
+ OS << Twine("$$");
+ OS << (*I).Val;
+ break;
+ case AOK_ImmPrefix:
+ OS << Twine("$$");
+ break;
+ case AOK_Input:
+ OS << '$';
+ OS << InputIdx++;
+ break;
+ case AOK_Output:
+ OS << '$';
+ OS << OutputIdx++;
+ break;
+ case AOK_SizeDirective:
+ switch((*I).Val) {
+ default: break;
+ case 8: OS << "byte ptr "; break;
+ case 16: OS << "word ptr "; break;
+ case 32: OS << "dword ptr "; break;
+ case 64: OS << "qword ptr "; break;
+ case 80: OS << "xword ptr "; break;
+ case 128: OS << "xmmword ptr "; break;
+ case 256: OS << "ymmword ptr "; break;
+ }
+ break;
+ case AOK_Emit:
+ OS << ".byte";
+ break;
+ case AOK_DotOperator:
+ OS << (*I).Val;
+ break;
+ }
+
+ // Skip the original expression.
+ if (Kind != AOK_SizeDirective)
+ Start = Loc + (*I).Len;
+ }
+
+ // Emit the remainder of the asm string.
+ const char *AsmEnd = SrcMgr.getMemoryBuffer(0)->getBufferEnd();
+ if (Start != AsmEnd)
+ OS << StringRef(Start, AsmEnd - Start);
+
+ AsmString = OS.str();
+ return false;
+}
+
/// \brief Create an MCAsmParser instance.
MCAsmParser *llvm::createMCAsmParser(SourceMgr &SM,
MCContext &C, MCStreamer &Out,
diff --git a/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
index 9316bb1..d55de1f 100644
--- a/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
@@ -203,7 +203,7 @@ bool ELFAsmParser::ParseDirectiveSize(StringRef, SMLoc) {
StringRef Name;
if (getParser().ParseIdentifier(Name))
return TokError("expected identifier in directive");
- MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);;
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in directive");
diff --git a/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp b/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp
index 3a3ff14..384b341 100644
--- a/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp
+++ b/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp
@@ -12,7 +12,8 @@
using namespace llvm;
-MCAsmLexer::MCAsmLexer() : CurTok(AsmToken::Error, StringRef()), TokStart(0) {
+MCAsmLexer::MCAsmLexer() : CurTok(AsmToken::Error, StringRef()),
+ TokStart(0), SkipSpace(true) {
}
MCAsmLexer::~MCAsmLexer() {
diff --git a/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp
index 3a825f0..6967fee 100644
--- a/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp
@@ -44,5 +44,7 @@ bool MCAsmParser::ParseExpression(const MCExpr *&Res) {
}
void MCParsedAsmOperand::dump() const {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dbgs() << " " << *this;
+#endif
}
diff --git a/contrib/llvm/lib/MC/MCParser/MCTargetAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/MCTargetAsmParser.cpp
index 6fb1ba4..60a3a3b 100644
--- a/contrib/llvm/lib/MC/MCParser/MCTargetAsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/MCTargetAsmParser.cpp
@@ -11,7 +11,7 @@
using namespace llvm;
MCTargetAsmParser::MCTargetAsmParser()
- : AvailableFeatures(0)
+ : AvailableFeatures(0), ParsingInlineAsm(false)
{
}
diff --git a/contrib/llvm/lib/MC/MCRegisterInfo.cpp b/contrib/llvm/lib/MC/MCRegisterInfo.cpp
index 4d1aff3..5c71106 100644
--- a/contrib/llvm/lib/MC/MCRegisterInfo.cpp
+++ b/contrib/llvm/lib/MC/MCRegisterInfo.cpp
@@ -24,6 +24,8 @@ unsigned MCRegisterInfo::getMatchingSuperReg(unsigned Reg, unsigned SubIdx,
}
unsigned MCRegisterInfo::getSubReg(unsigned Reg, unsigned Idx) const {
+ assert(Idx && Idx < getNumSubRegIndices() &&
+ "This is not a subregister index");
// Get a pointer to the corresponding SubRegIndices list. This list has the
// name of each sub-register in the same order as MCSubRegIterator.
const uint16_t *SRI = SubRegIndices + get(Reg).SubRegIndices;
@@ -34,6 +36,7 @@ unsigned MCRegisterInfo::getSubReg(unsigned Reg, unsigned Idx) const {
}
unsigned MCRegisterInfo::getSubRegIndex(unsigned Reg, unsigned SubReg) const {
+ assert(SubReg && SubReg < getNumRegs() && "This is not a register");
// Get a pointer to the corresponding SubRegIndices list. This list has the
// name of each sub-register in the same order as MCSubRegIterator.
const uint16_t *SRI = SubRegIndices + get(Reg).SubRegIndices;
diff --git a/contrib/llvm/lib/MC/MCStreamer.cpp b/contrib/llvm/lib/MC/MCStreamer.cpp
index 0bac24d..afece0b 100644
--- a/contrib/llvm/lib/MC/MCStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCStreamer.cpp
@@ -561,6 +561,10 @@ void MCStreamer::EmitRegSave(const SmallVectorImpl<unsigned> &RegList, bool) {
abort();
}
+void MCStreamer::EmitTCEntry(const MCSymbol &S) {
+ llvm_unreachable("Unsupported method");
+}
+
/// EmitRawText - If this file is backed by an assembly streamer, this dumps
/// the specified string in the output .s file. This capability is
/// indicated by the hasRawTextSupport() predicate.
diff --git a/contrib/llvm/lib/MC/MCSubtargetInfo.cpp b/contrib/llvm/lib/MC/MCSubtargetInfo.cpp
index 05c83f7..80a1f02 100644
--- a/contrib/llvm/lib/MC/MCSubtargetInfo.cpp
+++ b/contrib/llvm/lib/MC/MCSubtargetInfo.cpp
@@ -19,11 +19,28 @@ using namespace llvm;
MCSchedModel MCSchedModel::DefaultSchedModel; // For unknown processors.
+/// InitMCProcessorInfo - Set or change the CPU (optionally supplemented
+/// with feature string). Recompute feature bits and scheduling model.
+void
+MCSubtargetInfo::InitMCProcessorInfo(StringRef CPU, StringRef FS) {
+ SubtargetFeatures Features(FS);
+ FeatureBits = Features.getFeatureBits(CPU, ProcDesc, NumProcs,
+ ProcFeatures, NumFeatures);
+
+ if (!CPU.empty())
+ CPUSchedModel = getSchedModelForCPU(CPU);
+ else
+ CPUSchedModel = &MCSchedModel::DefaultSchedModel;
+}
+
void
MCSubtargetInfo::InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS,
const SubtargetFeatureKV *PF,
const SubtargetFeatureKV *PD,
const SubtargetInfoKV *ProcSched,
+ const MCWriteProcResEntry *WPR,
+ const MCWriteLatencyEntry *WL,
+ const MCReadAdvanceEntry *RA,
const InstrStage *IS,
const unsigned *OC,
const unsigned *FP,
@@ -31,26 +48,18 @@ MCSubtargetInfo::InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS,
TargetTriple = TT;
ProcFeatures = PF;
ProcDesc = PD;
- ProcSchedModel = ProcSched;
+ ProcSchedModels = ProcSched;
+ WriteProcResTable = WPR;
+ WriteLatencyTable = WL;
+ ReadAdvanceTable = RA;
+
Stages = IS;
OperandCycles = OC;
ForwardingPaths = FP;
NumFeatures = NF;
NumProcs = NP;
- SubtargetFeatures Features(FS);
- FeatureBits = Features.getFeatureBits(CPU, ProcDesc, NumProcs,
- ProcFeatures, NumFeatures);
-}
-
-
-/// ReInitMCSubtargetInfo - Change CPU (and optionally supplemented with
-/// feature string) and recompute feature bits.
-uint64_t MCSubtargetInfo::ReInitMCSubtargetInfo(StringRef CPU, StringRef FS) {
- SubtargetFeatures Features(FS);
- FeatureBits = Features.getFeatureBits(CPU, ProcDesc, NumProcs,
- ProcFeatures, NumFeatures);
- return FeatureBits;
+ InitMCProcessorInfo(CPU, FS);
}
/// ToggleFeature - Toggle a feature and returns the re-computed feature
@@ -70,13 +79,13 @@ uint64_t MCSubtargetInfo::ToggleFeature(StringRef FS) {
}
-MCSchedModel *
+const MCSchedModel *
MCSubtargetInfo::getSchedModelForCPU(StringRef CPU) const {
- assert(ProcSchedModel && "Processor machine model not available!");
+ assert(ProcSchedModels && "Processor machine model not available!");
#ifndef NDEBUG
for (size_t i = 1; i < NumProcs; i++) {
- assert(strcmp(ProcSchedModel[i - 1].Key, ProcSchedModel[i].Key) < 0 &&
+ assert(strcmp(ProcSchedModels[i - 1].Key, ProcSchedModels[i].Key) < 0 &&
"Processor machine model table is not sorted");
}
#endif
@@ -85,19 +94,25 @@ MCSubtargetInfo::getSchedModelForCPU(StringRef CPU) const {
SubtargetInfoKV KV;
KV.Key = CPU.data();
const SubtargetInfoKV *Found =
- std::lower_bound(ProcSchedModel, ProcSchedModel+NumProcs, KV);
- if (Found == ProcSchedModel+NumProcs || StringRef(Found->Key) != CPU) {
+ std::lower_bound(ProcSchedModels, ProcSchedModels+NumProcs, KV);
+ if (Found == ProcSchedModels+NumProcs || StringRef(Found->Key) != CPU) {
errs() << "'" << CPU
<< "' is not a recognized processor for this target"
<< " (ignoring processor)\n";
return &MCSchedModel::DefaultSchedModel;
}
assert(Found->Value && "Missing processor SchedModel value");
- return (MCSchedModel *)Found->Value;
+ return (const MCSchedModel *)Found->Value;
}
InstrItineraryData
MCSubtargetInfo::getInstrItineraryForCPU(StringRef CPU) const {
- MCSchedModel *SchedModel = getSchedModelForCPU(CPU);
+ const MCSchedModel *SchedModel = getSchedModelForCPU(CPU);
return InstrItineraryData(SchedModel, Stages, OperandCycles, ForwardingPaths);
}
+
+/// Initialize an InstrItineraryData instance.
+void MCSubtargetInfo::initInstrItins(InstrItineraryData &InstrItins) const {
+ InstrItins =
+ InstrItineraryData(CPUSchedModel, Stages, OperandCycles, ForwardingPaths);
+}
diff --git a/contrib/llvm/lib/MC/MCSymbol.cpp b/contrib/llvm/lib/MC/MCSymbol.cpp
index f7f9184..b973c57 100644
--- a/contrib/llvm/lib/MC/MCSymbol.cpp
+++ b/contrib/llvm/lib/MC/MCSymbol.cpp
@@ -26,7 +26,7 @@ static bool isAcceptableChar(char C) {
return true;
}
-/// NameNeedsQuoting - Return true if the identifier \arg Str needs quotes to be
+/// NameNeedsQuoting - Return true if the identifier \p Str needs quotes to be
/// syntactically correct.
static bool NameNeedsQuoting(StringRef Str) {
assert(!Str.empty() && "Cannot create an empty MCSymbol");
@@ -76,6 +76,8 @@ void MCSymbol::print(raw_ostream &OS) const {
OS << '"' << getName() << '"';
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MCSymbol::dump() const {
print(dbgs());
}
+#endif
diff --git a/contrib/llvm/lib/MC/MCValue.cpp b/contrib/llvm/lib/MC/MCValue.cpp
index c6ea16c..4393777 100644
--- a/contrib/llvm/lib/MC/MCValue.cpp
+++ b/contrib/llvm/lib/MC/MCValue.cpp
@@ -31,6 +31,8 @@ void MCValue::print(raw_ostream &OS, const MCAsmInfo *MAI) const {
OS << " + " << getConstant();
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MCValue::dump() const {
print(dbgs(), 0);
}
+#endif
diff --git a/contrib/llvm/lib/MC/MachObjectWriter.cpp b/contrib/llvm/lib/MC/MachObjectWriter.cpp
index 5820a22..a94b214 100644
--- a/contrib/llvm/lib/MC/MachObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/MachObjectWriter.cpp
@@ -68,6 +68,11 @@ uint64_t MachObjectWriter::getSymbolAddress(const MCSymbolData* SD,
// If this is a variable, then recursively evaluate now.
if (S.isVariable()) {
+ if (const MCConstantExpr *C =
+ dyn_cast<const MCConstantExpr>(S.getVariableValue()))
+ return C->getValue();
+
+
MCValue Target;
if (!S.getVariableValue()->EvaluateAsRelocatable(Target, Layout))
report_fatal_error("unable to evaluate offset for variable '" +
@@ -140,8 +145,8 @@ void MachObjectWriter::WriteHeader(unsigned NumLoadCommands,
/// WriteSegmentLoadCommand - Write a segment load command.
///
-/// \arg NumSections - The number of sections in this segment.
-/// \arg SectionDataSize - The total size of the sections.
+/// \param NumSections The number of sections in this segment.
+/// \param SectionDataSize The total size of the sections.
void MachObjectWriter::WriteSegmentLoadCommand(unsigned NumSections,
uint64_t VMSize,
uint64_t SectionDataStartOffset,
@@ -315,11 +320,7 @@ void MachObjectWriter::WriteNlist(MachSymbolData &MSD,
// Compute the symbol address.
if (Symbol.isDefined()) {
- if (Symbol.isAbsolute()) {
- Address = cast<MCConstantExpr>(Symbol.getVariableValue())->getValue();
- } else {
- Address = getSymbolAddress(&Data, Layout);
- }
+ Address = getSymbolAddress(&Data, Layout);
} else if (Data.isCommon()) {
// Common symbols are encoded with the size in the address
// field, and their alignment in the flags.
@@ -396,8 +397,7 @@ void MachObjectWriter::BindIndirectSymbols(MCAssembler &Asm) {
continue;
// Initialize the section indirect symbol base, if necessary.
- if (!IndirectSymBase.count(it->SectionData))
- IndirectSymBase[it->SectionData] = IndirectIndex;
+ IndirectSymBase.insert(std::make_pair(it->SectionData, IndirectIndex));
Asm.getOrCreateSymbolData(*it->Symbol);
}
@@ -414,8 +414,7 @@ void MachObjectWriter::BindIndirectSymbols(MCAssembler &Asm) {
continue;
// Initialize the section indirect symbol base, if necessary.
- if (!IndirectSymBase.count(it->SectionData))
- IndirectSymBase[it->SectionData] = IndirectIndex;
+ IndirectSymBase.insert(std::make_pair(it->SectionData, IndirectIndex));
// Set the symbol type to undefined lazy, but only on construction.
//
@@ -559,6 +558,26 @@ void MachObjectWriter::computeSectionAddresses(const MCAssembler &Asm,
}
}
+void MachObjectWriter::markAbsoluteVariableSymbols(MCAssembler &Asm,
+ const MCAsmLayout &Layout) {
+ for (MCAssembler::symbol_iterator i = Asm.symbol_begin(),
+ e = Asm.symbol_end();
+ i != e; ++i) {
+ MCSymbolData &SD = *i;
+ if (!SD.getSymbol().isVariable())
+ continue;
+
+ // Is the variable is a symbol difference (SA - SB + C) expression,
+ // and neither symbol is external, mark the variable as absolute.
+ const MCExpr *Expr = SD.getSymbol().getVariableValue();
+ MCValue Value;
+ if (Expr->EvaluateAsRelocatable(Value, Layout)) {
+ if (Value.getSymA() && Value.getSymB())
+ const_cast<MCSymbol*>(&SD.getSymbol())->setAbsolute();
+ }
+ }
+}
+
void MachObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm,
const MCAsmLayout &Layout) {
computeSectionAddresses(Asm, Layout);
@@ -566,6 +585,10 @@ void MachObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm,
// Create symbol data for any indirect symbols.
BindIndirectSymbols(Asm);
+ // Mark symbol difference expressions in variables (from .set or = directives)
+ // as absolute.
+ markAbsoluteVariableSymbols(Asm, Layout);
+
// Compute symbol table information and bind symbol indices.
ComputeSymbolTable(Asm, StringTable, LocalSymbolData, ExternalSymbolData,
UndefinedSymbolData);
@@ -797,8 +820,12 @@ void MachObjectWriter::WriteObject(MCAssembler &Asm,
it = Asm.data_region_begin(), ie = Asm.data_region_end();
it != ie; ++it) {
const DataRegionData *Data = &(*it);
- uint64_t Start = getSymbolAddress(&Layout.getAssembler().getSymbolData(*Data->Start), Layout);
- uint64_t End = getSymbolAddress(&Layout.getAssembler().getSymbolData(*Data->End), Layout);
+ uint64_t Start =
+ getSymbolAddress(&Layout.getAssembler().getSymbolData(*Data->Start),
+ Layout);
+ uint64_t End =
+ getSymbolAddress(&Layout.getAssembler().getSymbolData(*Data->End),
+ Layout);
DEBUG(dbgs() << "data in code region-- kind: " << Data->Kind
<< " start: " << Start << "(" << Data->Start->getName() << ")"
<< " end: " << End << "(" << Data->End->getName() << ")"
diff --git a/contrib/llvm/lib/MC/SubtargetFeature.cpp b/contrib/llvm/lib/MC/SubtargetFeature.cpp
index 0a44e77..7625abd 100644
--- a/contrib/llvm/lib/MC/SubtargetFeature.cpp
+++ b/contrib/llvm/lib/MC/SubtargetFeature.cpp
@@ -119,14 +119,15 @@ void SubtargetFeatures::AddFeature(const StringRef String,
}
/// Find KV in array using binary search.
-template<typename T> const T *Find(const StringRef S, const T *A, size_t L) {
+static const SubtargetFeatureKV *Find(StringRef S, const SubtargetFeatureKV *A,
+ size_t L) {
// Make the lower bound element we're looking for
- T KV;
+ SubtargetFeatureKV KV;
KV.Key = S.data();
// Determine the end of the array
- const T *Hi = A + L;
+ const SubtargetFeatureKV *Hi = A + L;
// Binary search the array
- const T *F = std::lower_bound(A, Hi, KV);
+ const SubtargetFeatureKV *F = std::lower_bound(A, Hi, KV);
// If not found then return NULL
if (F == Hi || StringRef(F->Key) != S) return NULL;
// Return the found array item
@@ -336,30 +337,6 @@ uint64_t SubtargetFeatures::getFeatureBits(const StringRef CPU,
return Bits;
}
-/// Get scheduling itinerary of a CPU.
-void *SubtargetFeatures::getItinerary(const StringRef CPU,
- const SubtargetInfoKV *Table,
- size_t TableSize) {
- assert(Table && "missing table");
-#ifndef NDEBUG
- for (size_t i = 1; i < TableSize; i++) {
- assert(strcmp(Table[i - 1].Key, Table[i].Key) < 0 && "Table is not sorted");
- }
-#endif
-
- // Find entry
- const SubtargetInfoKV *Entry = Find(CPU, Table, TableSize);
-
- if (Entry) {
- return Entry->Value;
- } else {
- errs() << "'" << CPU
- << "' is not a recognized processor for this target"
- << " (ignoring processor)\n";
- return NULL;
- }
-}
-
/// print - Print feature string.
///
void SubtargetFeatures::print(raw_ostream &OS) const {
@@ -368,11 +345,13 @@ void SubtargetFeatures::print(raw_ostream &OS) const {
OS << "\n";
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// dump - Dump feature info.
///
void SubtargetFeatures::dump() const {
print(dbgs());
}
+#endif
/// getDefaultSubtargetFeatures - Return a string listing the features
/// associated with the target triple.
diff --git a/contrib/llvm/lib/MC/WinCOFFStreamer.cpp b/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
index b026277..702eec0 100644
--- a/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
+++ b/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
@@ -70,11 +70,6 @@ public:
uint64_t Size,unsigned ByteAlignment);
virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
uint64_t Size, unsigned ByteAlignment);
- virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
- virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value,
- unsigned ValueSize, unsigned MaxBytesToEmit);
- virtual void EmitCodeAlignment(unsigned ByteAlignment,
- unsigned MaxBytesToEmit);
virtual void EmitFileDirective(StringRef Filename);
virtual void EmitInstruction(const MCInst &Instruction);
virtual void EmitWin64EHHandlerData();
@@ -333,43 +328,6 @@ void WinCOFFStreamer::EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
llvm_unreachable("not implemented");
}
-void WinCOFFStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
- // TODO: This is copied exactly from the MachOStreamer. Consider merging into
- // MCObjectStreamer?
- getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end());
-}
-
-void WinCOFFStreamer::EmitValueToAlignment(unsigned ByteAlignment,
- int64_t Value,
- unsigned ValueSize,
- unsigned MaxBytesToEmit) {
- // TODO: This is copied exactly from the MachOStreamer. Consider merging into
- // MCObjectStreamer?
- if (MaxBytesToEmit == 0)
- MaxBytesToEmit = ByteAlignment;
- new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit,
- getCurrentSectionData());
-
- // Update the maximum alignment on the current section if necessary.
- if (ByteAlignment > getCurrentSectionData()->getAlignment())
- getCurrentSectionData()->setAlignment(ByteAlignment);
-}
-
-void WinCOFFStreamer::EmitCodeAlignment(unsigned ByteAlignment,
- unsigned MaxBytesToEmit) {
- // TODO: This is copied exactly from the MachOStreamer. Consider merging into
- // MCObjectStreamer?
- if (MaxBytesToEmit == 0)
- MaxBytesToEmit = ByteAlignment;
- MCAlignFragment *F = new MCAlignFragment(ByteAlignment, 0, 1, MaxBytesToEmit,
- getCurrentSectionData());
- F->setEmitNops(true);
-
- // Update the maximum alignment on the current section if necessary.
- if (ByteAlignment > getCurrentSectionData()->getAlignment())
- getCurrentSectionData()->setAlignment(ByteAlignment);
-}
-
void WinCOFFStreamer::EmitFileDirective(StringRef Filename) {
// Ignore for now, linkers don't care, and proper debug
// info will be a much large effort.
diff --git a/contrib/llvm/lib/Object/COFFObjectFile.cpp b/contrib/llvm/lib/Object/COFFObjectFile.cpp
index 8ab54c6..0b7ee34 100644
--- a/contrib/llvm/lib/Object/COFFObjectFile.cpp
+++ b/contrib/llvm/lib/Object/COFFObjectFile.cpp
@@ -288,6 +288,11 @@ error_code COFFObjectFile::getSymbolSection(DataRefImpl Symb,
return object_error::success;
}
+error_code COFFObjectFile::getSymbolValue(DataRefImpl Symb,
+ uint64_t &Val) const {
+ report_fatal_error("getSymbolValue unimplemented in COFFObjectFile");
+}
+
error_code COFFObjectFile::getSectionNext(DataRefImpl Sec,
SectionRef &Result) const {
const coff_section *sec = toSec(Sec);
@@ -372,7 +377,14 @@ error_code COFFObjectFile::isSectionVirtual(DataRefImpl Sec,
error_code COFFObjectFile::isSectionZeroInit(DataRefImpl Sec,
bool &Result) const {
- // FIXME: Unimplemented
+ // FIXME: Unimplemented.
+ Result = false;
+ return object_error::success;
+}
+
+error_code COFFObjectFile::isSectionReadOnlyData(DataRefImpl Sec,
+ bool &Result) const {
+ // FIXME: Unimplemented.
Result = false;
return object_error::success;
}
diff --git a/contrib/llvm/lib/Object/MachOObjectFile.cpp b/contrib/llvm/lib/Object/MachOObjectFile.cpp
index d229671..45aeaac 100644
--- a/contrib/llvm/lib/Object/MachOObjectFile.cpp
+++ b/contrib/llvm/lib/Object/MachOObjectFile.cpp
@@ -363,6 +363,10 @@ error_code MachOObjectFile::getSymbolType(DataRefImpl Symb,
return object_error::success;
}
+error_code MachOObjectFile::getSymbolValue(DataRefImpl Symb,
+ uint64_t &Val) const {
+ report_fatal_error("getSymbolValue unimplemented in MachOObjectFile");
+}
symbol_iterator MachOObjectFile::begin_symbols() const {
// DRI.d.a = segment number; DRI.d.b = symbol index.
@@ -581,14 +585,14 @@ error_code MachOObjectFile::isSectionBSS(DataRefImpl DRI,
error_code MachOObjectFile::isSectionRequiredForExecution(DataRefImpl Sec,
bool &Result) const {
- // FIXME: Unimplemented
+ // FIXME: Unimplemented.
Result = true;
return object_error::success;
}
error_code MachOObjectFile::isSectionVirtual(DataRefImpl Sec,
- bool &Result) const {
- // FIXME: Unimplemented
+ bool &Result) const {
+ // FIXME: Unimplemented.
Result = false;
return object_error::success;
}
@@ -612,6 +616,17 @@ error_code MachOObjectFile::isSectionZeroInit(DataRefImpl DRI,
return object_error::success;
}
+error_code MachOObjectFile::isSectionReadOnlyData(DataRefImpl Sec,
+ bool &Result) const {
+ // Consider using the code from isSectionText to look for __const sections.
+ // Alternately, emit S_ATTR_PURE_INSTRUCTIONS and/or S_ATTR_SOME_INSTRUCTIONS
+ // to use section attributes to distinguish code from data.
+
+ // FIXME: Unimplemented.
+ Result = false;
+ return object_error::success;
+}
+
error_code MachOObjectFile::sectionContainsSymbol(DataRefImpl Sec,
DataRefImpl Symb,
bool &Result) const {
diff --git a/contrib/llvm/lib/Support/APFloat.cpp b/contrib/llvm/lib/Support/APFloat.cpp
index ed261a4..7e8b4a3 100644
--- a/contrib/llvm/lib/Support/APFloat.cpp
+++ b/contrib/llvm/lib/Support/APFloat.cpp
@@ -46,22 +46,27 @@ namespace llvm {
/* Number of bits in the significand. This includes the integer
bit. */
unsigned int precision;
-
- /* True if arithmetic is supported. */
- unsigned int arithmeticOK;
};
- const fltSemantics APFloat::IEEEhalf = { 15, -14, 11, true };
- const fltSemantics APFloat::IEEEsingle = { 127, -126, 24, true };
- const fltSemantics APFloat::IEEEdouble = { 1023, -1022, 53, true };
- const fltSemantics APFloat::IEEEquad = { 16383, -16382, 113, true };
- const fltSemantics APFloat::x87DoubleExtended = { 16383, -16382, 64, true };
- const fltSemantics APFloat::Bogus = { 0, 0, 0, true };
-
- // The PowerPC format consists of two doubles. It does not map cleanly
- // onto the usual format above. For now only storage of constants of
- // this type is supported, no arithmetic.
- const fltSemantics APFloat::PPCDoubleDouble = { 1023, -1022, 106, false };
+ const fltSemantics APFloat::IEEEhalf = { 15, -14, 11 };
+ const fltSemantics APFloat::IEEEsingle = { 127, -126, 24 };
+ const fltSemantics APFloat::IEEEdouble = { 1023, -1022, 53 };
+ const fltSemantics APFloat::IEEEquad = { 16383, -16382, 113 };
+ const fltSemantics APFloat::x87DoubleExtended = { 16383, -16382, 64 };
+ const fltSemantics APFloat::Bogus = { 0, 0, 0 };
+
+ /* The PowerPC format consists of two doubles. It does not map cleanly
+ onto the usual format above. It is approximated using twice the
+ mantissa bits. Note that for exponents near the double minimum,
+ we no longer can represent the full 106 mantissa bits, so those
+ will be treated as denormal numbers.
+
+ FIXME: While this approximation is equivalent to what GCC uses for
+ compile-time arithmetic on PPC double-double numbers, it is not able
+ to represent all possible values held by a PPC double-double number,
+ for example: (long double) 1.0 + (long double) 0x1p-106
+ Should this be replaced by a full emulation of PPC double-double? */
+ const fltSemantics APFloat::PPCDoubleDouble = { 1023, -1022 + 53, 53 + 53 };
/* A tight upper bound on number of parts required to hold the value
pow(5, power) is
@@ -116,12 +121,6 @@ hexDigitValue(unsigned int c)
return -1U;
}
-static inline void
-assertArithmeticOK(const llvm::fltSemantics &semantics) {
- assert(semantics.arithmeticOK &&
- "Compile-time arithmetic does not support these semantics");
-}
-
/* Return the value of a decimal exponent of the form
[+-]ddddddd.
@@ -196,8 +195,10 @@ totalExponent(StringRef::iterator p, StringRef::iterator end,
assert(value < 10U && "Invalid character in exponent");
unsignedExponent = unsignedExponent * 10 + value;
- if (unsignedExponent > 32767)
+ if (unsignedExponent > 32767) {
overflow = true;
+ break;
+ }
}
if (exponentAdjustment > 32767 || exponentAdjustment < -32768)
@@ -610,8 +611,6 @@ APFloat::assign(const APFloat &rhs)
sign = rhs.sign;
category = rhs.category;
exponent = rhs.exponent;
- sign2 = rhs.sign2;
- exponent2 = rhs.exponent2;
if (category == fcNormal || category == fcNaN)
copySignificand(rhs);
}
@@ -705,16 +704,10 @@ APFloat::bitwiseIsEqual(const APFloat &rhs) const {
category != rhs.category ||
sign != rhs.sign)
return false;
- if (semantics==(const llvm::fltSemantics*)&PPCDoubleDouble &&
- sign2 != rhs.sign2)
- return false;
if (category==fcZero || category==fcInfinity)
return true;
else if (category==fcNormal && exponent!=rhs.exponent)
return false;
- else if (semantics==(const llvm::fltSemantics*)&PPCDoubleDouble &&
- exponent2!=rhs.exponent2)
- return false;
else {
int i= partCount();
const integerPart* p=significandParts();
@@ -727,9 +720,7 @@ APFloat::bitwiseIsEqual(const APFloat &rhs) const {
}
}
-APFloat::APFloat(const fltSemantics &ourSemantics, integerPart value)
- : exponent2(0), sign2(0) {
- assertArithmeticOK(ourSemantics);
+APFloat::APFloat(const fltSemantics &ourSemantics, integerPart value) {
initialize(&ourSemantics);
sign = 0;
zeroSignificand();
@@ -738,24 +729,19 @@ APFloat::APFloat(const fltSemantics &ourSemantics, integerPart value)
normalize(rmNearestTiesToEven, lfExactlyZero);
}
-APFloat::APFloat(const fltSemantics &ourSemantics) : exponent2(0), sign2(0) {
- assertArithmeticOK(ourSemantics);
+APFloat::APFloat(const fltSemantics &ourSemantics) {
initialize(&ourSemantics);
category = fcZero;
sign = false;
}
-APFloat::APFloat(const fltSemantics &ourSemantics, uninitializedTag tag)
- : exponent2(0), sign2(0) {
- assertArithmeticOK(ourSemantics);
+APFloat::APFloat(const fltSemantics &ourSemantics, uninitializedTag tag) {
// Allocates storage if necessary but does not initialize it.
initialize(&ourSemantics);
}
APFloat::APFloat(const fltSemantics &ourSemantics,
- fltCategory ourCategory, bool negative)
- : exponent2(0), sign2(0) {
- assertArithmeticOK(ourSemantics);
+ fltCategory ourCategory, bool negative) {
initialize(&ourSemantics);
category = ourCategory;
sign = negative;
@@ -765,14 +751,12 @@ APFloat::APFloat(const fltSemantics &ourSemantics,
makeNaN();
}
-APFloat::APFloat(const fltSemantics &ourSemantics, StringRef text)
- : exponent2(0), sign2(0) {
- assertArithmeticOK(ourSemantics);
+APFloat::APFloat(const fltSemantics &ourSemantics, StringRef text) {
initialize(&ourSemantics);
convertFromString(text, rmNearestTiesToEven);
}
-APFloat::APFloat(const APFloat &rhs) : exponent2(0), sign2(0) {
+APFloat::APFloat(const APFloat &rhs) {
initialize(rhs.semantics);
assign(rhs);
}
@@ -1559,8 +1543,6 @@ APFloat::addOrSubtract(const APFloat &rhs, roundingMode rounding_mode,
{
opStatus fs;
- assertArithmeticOK(*semantics);
-
fs = addOrSubtractSpecials(rhs, subtract);
/* This return code means it was not a simple case. */
@@ -1605,7 +1587,6 @@ APFloat::multiply(const APFloat &rhs, roundingMode rounding_mode)
{
opStatus fs;
- assertArithmeticOK(*semantics);
sign ^= rhs.sign;
fs = multiplySpecials(rhs);
@@ -1625,7 +1606,6 @@ APFloat::divide(const APFloat &rhs, roundingMode rounding_mode)
{
opStatus fs;
- assertArithmeticOK(*semantics);
sign ^= rhs.sign;
fs = divideSpecials(rhs);
@@ -1647,7 +1627,6 @@ APFloat::remainder(const APFloat &rhs)
APFloat V = *this;
unsigned int origSign = sign;
- assertArithmeticOK(*semantics);
fs = V.divide(rhs, rmNearestTiesToEven);
if (fs == opDivByZero)
return fs;
@@ -1682,7 +1661,6 @@ APFloat::opStatus
APFloat::mod(const APFloat &rhs, roundingMode rounding_mode)
{
opStatus fs;
- assertArithmeticOK(*semantics);
fs = modSpecials(rhs);
if (category == fcNormal && rhs.category == fcNormal) {
@@ -1726,8 +1704,6 @@ APFloat::fusedMultiplyAdd(const APFloat &multiplicand,
{
opStatus fs;
- assertArithmeticOK(*semantics);
-
/* Post-multiplication sign, before addition. */
sign ^= multiplicand.sign;
@@ -1768,12 +1744,11 @@ APFloat::fusedMultiplyAdd(const APFloat &multiplicand,
/* Rounding-mode corrrect round to integral value. */
APFloat::opStatus APFloat::roundToIntegral(roundingMode rounding_mode) {
opStatus fs;
- assertArithmeticOK(*semantics);
// If the exponent is large enough, we know that this value is already
// integral, and the arithmetic below would potentially cause it to saturate
// to +/-Inf. Bail out early instead.
- if (exponent+1 >= (int)semanticsPrecision(*semantics))
+ if (category == fcNormal && exponent+1 >= (int)semanticsPrecision(*semantics))
return opOK;
// The algorithm here is quite simple: we add 2^(p-1), where p is the
@@ -1815,7 +1790,6 @@ APFloat::compare(const APFloat &rhs) const
{
cmpResult result;
- assertArithmeticOK(*semantics);
assert(semantics == rhs.semantics);
switch (convolve(category, rhs.category)) {
@@ -1900,8 +1874,6 @@ APFloat::convert(const fltSemantics &toSemantics,
int shift;
const fltSemantics &fromSemantics = *semantics;
- assertArithmeticOK(fromSemantics);
- assertArithmeticOK(toSemantics);
lostFraction = lfExactlyZero;
newPartCount = partCountForBits(toSemantics.precision + 1);
oldPartCount = partCount();
@@ -1986,8 +1958,6 @@ APFloat::convertToSignExtendedInteger(integerPart *parts, unsigned int width,
const integerPart *src;
unsigned int dstPartsCount, truncatedBits;
- assertArithmeticOK(*semantics);
-
*isExact = false;
/* Handle the three special cases first. */
@@ -2149,7 +2119,6 @@ APFloat::convertFromUnsignedParts(const integerPart *src,
integerPart *dst;
lostFraction lost_fraction;
- assertArithmeticOK(*semantics);
category = fcNormal;
omsb = APInt::tcMSB(src, srcCount) + 1;
dst = significandParts();
@@ -2200,7 +2169,6 @@ APFloat::convertFromSignExtendedInteger(const integerPart *src,
{
opStatus status;
- assertArithmeticOK(*semantics);
if (isSigned &&
APInt::tcExtractBit(src, srcCount * integerPartWidth - 1)) {
integerPart *copy;
@@ -2334,7 +2302,7 @@ APFloat::roundSignificandWithExponent(const integerPart *decSigParts,
roundingMode rounding_mode)
{
unsigned int parts, pow5PartCount;
- fltSemantics calcSemantics = { 32767, -32767, 0, true };
+ fltSemantics calcSemantics = { 32767, -32767, 0 };
integerPart pow5Parts[maxPowerOfFiveParts];
bool isNearest;
@@ -2526,7 +2494,6 @@ APFloat::convertFromDecimalString(StringRef str, roundingMode rounding_mode)
APFloat::opStatus
APFloat::convertFromString(StringRef str, roundingMode rounding_mode)
{
- assertArithmeticOK(*semantics);
assert(!str.empty() && "Invalid string length");
/* Handle a leading minus sign. */
@@ -2578,8 +2545,6 @@ APFloat::convertToHexString(char *dst, unsigned int hexDigits,
{
char *p;
- assertArithmeticOK(*semantics);
-
p = dst;
if (sign)
*dst++ = '-';
@@ -2788,42 +2753,46 @@ APFloat::convertPPCDoubleDoubleAPFloatToAPInt() const
assert(semantics == (const llvm::fltSemantics*)&PPCDoubleDouble);
assert(partCount()==2);
- uint64_t myexponent, mysignificand, myexponent2, mysignificand2;
-
- if (category==fcNormal) {
- myexponent = exponent + 1023; //bias
- myexponent2 = exponent2 + 1023;
- mysignificand = significandParts()[0];
- mysignificand2 = significandParts()[1];
- if (myexponent==1 && !(mysignificand & 0x10000000000000LL))
- myexponent = 0; // denormal
- if (myexponent2==1 && !(mysignificand2 & 0x10000000000000LL))
- myexponent2 = 0; // denormal
- } else if (category==fcZero) {
- myexponent = 0;
- mysignificand = 0;
- myexponent2 = 0;
- mysignificand2 = 0;
- } else if (category==fcInfinity) {
- myexponent = 0x7ff;
- myexponent2 = 0;
- mysignificand = 0;
- mysignificand2 = 0;
+ uint64_t words[2];
+ opStatus fs;
+ bool losesInfo;
+
+ // Convert number to double. To avoid spurious underflows, we re-
+ // normalize against the "double" minExponent first, and only *then*
+ // truncate the mantissa. The result of that second conversion
+ // may be inexact, but should never underflow.
+ APFloat extended(*this);
+ fltSemantics extendedSemantics = *semantics;
+ extendedSemantics.minExponent = IEEEdouble.minExponent;
+ fs = extended.convert(extendedSemantics, rmNearestTiesToEven, &losesInfo);
+ assert(fs == opOK && !losesInfo);
+ (void)fs;
+
+ APFloat u(extended);
+ fs = u.convert(IEEEdouble, rmNearestTiesToEven, &losesInfo);
+ assert(fs == opOK || fs == opInexact);
+ (void)fs;
+ words[0] = *u.convertDoubleAPFloatToAPInt().getRawData();
+
+ // If conversion was exact or resulted in a special case, we're done;
+ // just set the second double to zero. Otherwise, re-convert back to
+ // the extended format and compute the difference. This now should
+ // convert exactly to double.
+ if (u.category == fcNormal && losesInfo) {
+ fs = u.convert(extendedSemantics, rmNearestTiesToEven, &losesInfo);
+ assert(fs == opOK && !losesInfo);
+ (void)fs;
+
+ APFloat v(extended);
+ v.subtract(u, rmNearestTiesToEven);
+ fs = v.convert(IEEEdouble, rmNearestTiesToEven, &losesInfo);
+ assert(fs == opOK && !losesInfo);
+ (void)fs;
+ words[1] = *v.convertDoubleAPFloatToAPInt().getRawData();
} else {
- assert(category == fcNaN && "Unknown category");
- myexponent = 0x7ff;
- mysignificand = significandParts()[0];
- myexponent2 = exponent2;
- mysignificand2 = significandParts()[1];
+ words[1] = 0;
}
- uint64_t words[2];
- words[0] = ((uint64_t)(sign & 1) << 63) |
- ((myexponent & 0x7ff) << 52) |
- (mysignificand & 0xfffffffffffffLL);
- words[1] = ((uint64_t)(sign2 & 1) << 63) |
- ((myexponent2 & 0x7ff) << 52) |
- (mysignificand2 & 0xfffffffffffffLL);
return APInt(128, words);
}
@@ -3043,47 +3012,23 @@ APFloat::initFromPPCDoubleDoubleAPInt(const APInt &api)
assert(api.getBitWidth()==128);
uint64_t i1 = api.getRawData()[0];
uint64_t i2 = api.getRawData()[1];
- uint64_t myexponent = (i1 >> 52) & 0x7ff;
- uint64_t mysignificand = i1 & 0xfffffffffffffLL;
- uint64_t myexponent2 = (i2 >> 52) & 0x7ff;
- uint64_t mysignificand2 = i2 & 0xfffffffffffffLL;
+ opStatus fs;
+ bool losesInfo;
- initialize(&APFloat::PPCDoubleDouble);
- assert(partCount()==2);
+ // Get the first double and convert to our format.
+ initFromDoubleAPInt(APInt(64, i1));
+ fs = convert(PPCDoubleDouble, rmNearestTiesToEven, &losesInfo);
+ assert(fs == opOK && !losesInfo);
+ (void)fs;
- sign = static_cast<unsigned int>(i1>>63);
- sign2 = static_cast<unsigned int>(i2>>63);
- if (myexponent==0 && mysignificand==0) {
- // exponent, significand meaningless
- // exponent2 and significand2 are required to be 0; we don't check
- category = fcZero;
- } else if (myexponent==0x7ff && mysignificand==0) {
- // exponent, significand meaningless
- // exponent2 and significand2 are required to be 0; we don't check
- category = fcInfinity;
- } else if (myexponent==0x7ff && mysignificand!=0) {
- // exponent meaningless. So is the whole second word, but keep it
- // for determinism.
- category = fcNaN;
- exponent2 = myexponent2;
- significandParts()[0] = mysignificand;
- significandParts()[1] = mysignificand2;
- } else {
- category = fcNormal;
- // Note there is no category2; the second word is treated as if it is
- // fcNormal, although it might be something else considered by itself.
- exponent = myexponent - 1023;
- exponent2 = myexponent2 - 1023;
- significandParts()[0] = mysignificand;
- significandParts()[1] = mysignificand2;
- if (myexponent==0) // denormal
- exponent = -1022;
- else
- significandParts()[0] |= 0x10000000000000LL; // integer bit
- if (myexponent2==0)
- exponent2 = -1022;
- else
- significandParts()[1] |= 0x10000000000000LL; // integer bit
+ // Unless we have a special case, add in second double.
+ if (category == fcNormal) {
+ APFloat v(APInt(64, i2));
+ fs = v.convert(PPCDoubleDouble, rmNearestTiesToEven, &losesInfo);
+ assert(fs == opOK && !losesInfo);
+ (void)fs;
+
+ add(v, rmNearestTiesToEven);
}
}
@@ -3309,15 +3254,15 @@ APFloat APFloat::getSmallestNormalized(const fltSemantics &Sem, bool Negative) {
return Val;
}
-APFloat::APFloat(const APInt& api, bool isIEEE) : exponent2(0), sign2(0) {
+APFloat::APFloat(const APInt& api, bool isIEEE) {
initFromAPInt(api, isIEEE);
}
-APFloat::APFloat(float f) : exponent2(0), sign2(0) {
+APFloat::APFloat(float f) {
initFromAPInt(APInt::floatToBits(f));
}
-APFloat::APFloat(double d) : exponent2(0), sign2(0) {
+APFloat::APFloat(double d) {
initFromAPInt(APInt::doubleToBits(d));
}
@@ -3608,11 +3553,6 @@ void APFloat::toString(SmallVectorImpl<char> &Str,
}
bool APFloat::getExactInverse(APFloat *inv) const {
- // We can only guarantee the existence of an exact inverse for IEEE floats.
- if (semantics != &IEEEhalf && semantics != &IEEEsingle &&
- semantics != &IEEEdouble && semantics != &IEEEquad)
- return false;
-
// Special floats and denormals have no exact inverse.
if (category != fcNormal)
return false;
diff --git a/contrib/llvm/lib/Support/Atomic.cpp b/contrib/llvm/lib/Support/Atomic.cpp
index 3001f6c..9559ad7 100644
--- a/contrib/llvm/lib/Support/Atomic.cpp
+++ b/contrib/llvm/lib/Support/Atomic.cpp
@@ -21,11 +21,15 @@ using namespace llvm;
#undef MemoryFence
#endif
+#if defined(__GNUC__) || (defined(__IBMCPP__) && __IBMCPP__ >= 1210)
+#define GNU_ATOMICS
+#endif
+
void sys::MemoryFence() {
#if LLVM_HAS_ATOMICS == 0
return;
#else
-# if defined(__GNUC__)
+# if defined(GNU_ATOMICS)
__sync_synchronize();
# elif defined(_MSC_VER)
MemoryBarrier();
@@ -43,7 +47,7 @@ sys::cas_flag sys::CompareAndSwap(volatile sys::cas_flag* ptr,
if (result == old_value)
*ptr = new_value;
return result;
-#elif defined(__GNUC__)
+#elif defined(GNU_ATOMICS)
return __sync_val_compare_and_swap(ptr, old_value, new_value);
#elif defined(_MSC_VER)
return InterlockedCompareExchange(ptr, new_value, old_value);
@@ -56,7 +60,7 @@ sys::cas_flag sys::AtomicIncrement(volatile sys::cas_flag* ptr) {
#if LLVM_HAS_ATOMICS == 0
++(*ptr);
return *ptr;
-#elif defined(__GNUC__)
+#elif defined(GNU_ATOMICS)
return __sync_add_and_fetch(ptr, 1);
#elif defined(_MSC_VER)
return InterlockedIncrement(ptr);
@@ -69,7 +73,7 @@ sys::cas_flag sys::AtomicDecrement(volatile sys::cas_flag* ptr) {
#if LLVM_HAS_ATOMICS == 0
--(*ptr);
return *ptr;
-#elif defined(__GNUC__)
+#elif defined(GNU_ATOMICS)
return __sync_sub_and_fetch(ptr, 1);
#elif defined(_MSC_VER)
return InterlockedDecrement(ptr);
@@ -82,7 +86,7 @@ sys::cas_flag sys::AtomicAdd(volatile sys::cas_flag* ptr, sys::cas_flag val) {
#if LLVM_HAS_ATOMICS == 0
*ptr += val;
return *ptr;
-#elif defined(__GNUC__)
+#elif defined(GNU_ATOMICS)
return __sync_add_and_fetch(ptr, val);
#elif defined(_MSC_VER)
return InterlockedExchangeAdd(ptr, val) + val;
diff --git a/contrib/llvm/lib/Support/CommandLine.cpp b/contrib/llvm/lib/Support/CommandLine.cpp
index 593315d1..fc4f189 100644
--- a/contrib/llvm/lib/Support/CommandLine.cpp
+++ b/contrib/llvm/lib/Support/CommandLine.cpp
@@ -464,7 +464,7 @@ static void ParseCStringVector(std::vector<char *> &OutputVector,
/// an environment variable (whose name is given in ENVVAR).
///
void cl::ParseEnvironmentOptions(const char *progName, const char *envVar,
- const char *Overview, bool ReadResponseFiles) {
+ const char *Overview) {
// Check args.
assert(progName && "Program name not specified");
assert(envVar && "Environment variable name missing");
@@ -483,7 +483,7 @@ void cl::ParseEnvironmentOptions(const char *progName, const char *envVar,
// and hand it off to ParseCommandLineOptions().
ParseCStringVector(newArgv, envValue);
int newArgc = static_cast<int>(newArgv.size());
- ParseCommandLineOptions(newArgc, &newArgv[0], Overview, ReadResponseFiles);
+ ParseCommandLineOptions(newArgc, &newArgv[0], Overview);
// Free all the strdup()ed strings.
for (std::vector<char*>::iterator i = newArgv.begin(), e = newArgv.end();
@@ -529,7 +529,7 @@ static void ExpandResponseFiles(unsigned argc, const char*const* argv,
}
void cl::ParseCommandLineOptions(int argc, const char * const *argv,
- const char *Overview, bool ReadResponseFiles) {
+ const char *Overview) {
// Process all registered options.
SmallVector<Option*, 4> PositionalOpts;
SmallVector<Option*, 4> SinkOpts;
@@ -541,12 +541,10 @@ void cl::ParseCommandLineOptions(int argc, const char * const *argv,
// Expand response files.
std::vector<char*> newArgv;
- if (ReadResponseFiles) {
- newArgv.push_back(strdup(argv[0]));
- ExpandResponseFiles(argc, argv, newArgv);
- argv = &newArgv[0];
- argc = static_cast<int>(newArgv.size());
- }
+ newArgv.push_back(strdup(argv[0]));
+ ExpandResponseFiles(argc, argv, newArgv);
+ argv = &newArgv[0];
+ argc = static_cast<int>(newArgv.size());
// Copy the program name into ProgName, making sure not to overflow it.
std::string ProgName = sys::path::filename(argv[0]);
@@ -839,12 +837,10 @@ void cl::ParseCommandLineOptions(int argc, const char * const *argv,
MoreHelp->clear();
// Free the memory allocated by ExpandResponseFiles.
- if (ReadResponseFiles) {
- // Free all the strdup()ed strings.
- for (std::vector<char*>::iterator i = newArgv.begin(), e = newArgv.end();
- i != e; ++i)
- free(*i);
- }
+ // Free all the strdup()ed strings.
+ for (std::vector<char*>::iterator i = newArgv.begin(), e = newArgv.end();
+ i != e; ++i)
+ free(*i);
// If we had an error processing our arguments, don't let the program execute
if (ErrorParsing) exit(1);
diff --git a/contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp b/contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp
index 1e89c6a..34e82cf 100644
--- a/contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp
+++ b/contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp
@@ -122,7 +122,7 @@ private:
DDA.UpdatedSearchState(Changes, Sets, Required);
}
- /// ExecuteOneTest - Execute a single test predicate on the change set \arg S.
+ /// ExecuteOneTest - Execute a single test predicate on the change set \p S.
bool ExecuteOneTest(const changeset_ty &S) {
// Check dependencies invariant.
DEBUG({
@@ -143,8 +143,8 @@ public:
changeset_ty Run();
- /// GetTestResult - Get the test result for the active set \arg Changes with
- /// \arg Required changes from the cache, executing the test if necessary.
+ /// GetTestResult - Get the test result for the active set \p Changes with
+ /// \p Required changes from the cache, executing the test if necessary.
///
/// \param Changes - The set of active changes being minimized, which should
/// have their pred closure included in the test.
@@ -163,11 +163,11 @@ class DeltaActiveSetHelper : public DeltaAlgorithm {
protected:
/// UpdatedSearchState - Callback used when the search state changes.
virtual void UpdatedSearchState(const changeset_ty &Changes,
- const changesetlist_ty &Sets) {
+ const changesetlist_ty &Sets) LLVM_OVERRIDE {
DDAI.UpdatedSearchState(Changes, Sets, Required);
}
- virtual bool ExecuteOneTest(const changeset_ty &S) {
+ virtual bool ExecuteOneTest(const changeset_ty &S) LLVM_OVERRIDE {
return DDAI.GetTestResult(S, Required);
}
diff --git a/contrib/llvm/lib/Support/DataExtractor.cpp b/contrib/llvm/lib/Support/DataExtractor.cpp
index dc21155..3d5cce0 100644
--- a/contrib/llvm/lib/Support/DataExtractor.cpp
+++ b/contrib/llvm/lib/Support/DataExtractor.cpp
@@ -139,7 +139,7 @@ uint64_t DataExtractor::getULEB128(uint32_t *offset_ptr) const {
while (isValidOffset(offset)) {
byte = Data[offset++];
- result |= (byte & 0x7f) << shift;
+ result |= uint64_t(byte & 0x7f) << shift;
shift += 7;
if ((byte & 0x80) == 0)
break;
@@ -160,7 +160,7 @@ int64_t DataExtractor::getSLEB128(uint32_t *offset_ptr) const {
while (isValidOffset(offset)) {
byte = Data[offset++];
- result |= (byte & 0x7f) << shift;
+ result |= uint64_t(byte & 0x7f) << shift;
shift += 7;
if ((byte & 0x80) == 0)
break;
@@ -168,7 +168,7 @@ int64_t DataExtractor::getSLEB128(uint32_t *offset_ptr) const {
// Sign bit of byte is 2nd high order bit (0x40)
if (shift < 64 && (byte & 0x40))
- result |= -(1 << shift);
+ result |= -(1ULL << shift);
*offset_ptr = offset;
return result;
diff --git a/contrib/llvm/lib/Support/DataStream.cpp b/contrib/llvm/lib/Support/DataStream.cpp
index 94d14a5..3a38e2a 100644
--- a/contrib/llvm/lib/Support/DataStream.cpp
+++ b/contrib/llvm/lib/Support/DataStream.cpp
@@ -58,7 +58,7 @@ public:
virtual ~DataFileStreamer() {
close(Fd);
}
- virtual size_t GetBytes(unsigned char *buf, size_t len) {
+ virtual size_t GetBytes(unsigned char *buf, size_t len) LLVM_OVERRIDE {
NumStreamFetches++;
return read(Fd, buf, len);
}
diff --git a/contrib/llvm/lib/Support/DynamicLibrary.cpp b/contrib/llvm/lib/Support/DynamicLibrary.cpp
index fb02c07..45fec36 100644
--- a/contrib/llvm/lib/Support/DynamicLibrary.cpp
+++ b/contrib/llvm/lib/Support/DynamicLibrary.cpp
@@ -160,7 +160,7 @@ void* DynamicLibrary::SearchForAddressOfSymbol(const char *symbolName) {
// On linux we have a weird situation. The stderr/out/in symbols are both
// macros and global variables because of standards requirements. So, we
// boldly use the EXPLICIT_SYMBOL macro without checking for a #define first.
-#if defined(__linux__)
+#if defined(__linux__) and !defined(__ANDROID__)
{
EXPLICIT_SYMBOL(stderr);
EXPLICIT_SYMBOL(stdout);
diff --git a/contrib/llvm/lib/Support/Errno.cpp b/contrib/llvm/lib/Support/Errno.cpp
index dd218f6..730220f 100644
--- a/contrib/llvm/lib/Support/Errno.cpp
+++ b/contrib/llvm/lib/Support/Errno.cpp
@@ -13,6 +13,7 @@
#include "llvm/Support/Errno.h"
#include "llvm/Config/config.h" // Get autoconf configuration settings
+#include "llvm/Support/raw_ostream.h"
#if HAVE_STRING_H
#include <string.h>
@@ -39,7 +40,7 @@ std::string StrError(int errnum) {
const int MaxErrStrLen = 2000;
char buffer[MaxErrStrLen];
buffer[0] = '\0';
- char* str = buffer;
+ std::string str;
#ifdef HAVE_STRERROR_R
// strerror_r is thread-safe.
if (errnum)
@@ -49,21 +50,25 @@ std::string StrError(int errnum) {
str = strerror_r(errnum,buffer,MaxErrStrLen-1);
# else
strerror_r(errnum,buffer,MaxErrStrLen-1);
+ str = buffer;
# endif
#elif HAVE_DECL_STRERROR_S // "Windows Secure API"
- if (errnum)
+ if (errnum) {
strerror_s(buffer, MaxErrStrLen - 1, errnum);
+ str = buffer;
+ }
#elif defined(HAVE_STRERROR)
// Copy the thread un-safe result of strerror into
// the buffer as fast as possible to minimize impact
// of collision of strerror in multiple threads.
if (errnum)
- strncpy(buffer,strerror(errnum),MaxErrStrLen-1);
- buffer[MaxErrStrLen-1] = '\0';
+ str = strerror(errnum);
#else
// Strange that this system doesn't even have strerror
// but, oh well, just use a generic message
- sprintf(buffer, "Error #%d", errnum);
+ raw_string_ostream stream(str);
+ stream << "Error #" << errnum;
+ stream.flush();
#endif
return str;
}
diff --git a/contrib/llvm/lib/Support/FoldingSet.cpp b/contrib/llvm/lib/Support/FoldingSet.cpp
index c6282c6..4d489a8 100644
--- a/contrib/llvm/lib/Support/FoldingSet.cpp
+++ b/contrib/llvm/lib/Support/FoldingSet.cpp
@@ -38,6 +38,14 @@ bool FoldingSetNodeIDRef::operator==(FoldingSetNodeIDRef RHS) const {
return memcmp(Data, RHS.Data, Size*sizeof(*Data)) == 0;
}
+/// Used to compare the "ordering" of two nodes as defined by the
+/// profiled bits and their ordering defined by memcmp().
+bool FoldingSetNodeIDRef::operator<(FoldingSetNodeIDRef RHS) const {
+ if (Size != RHS.Size)
+ return Size < RHS.Size;
+ return memcmp(Data, RHS.Data, Size*sizeof(*Data)) < 0;
+}
+
//===----------------------------------------------------------------------===//
// FoldingSetNodeID Implementation
@@ -152,6 +160,16 @@ bool FoldingSetNodeID::operator==(FoldingSetNodeIDRef RHS) const {
return FoldingSetNodeIDRef(Bits.data(), Bits.size()) == RHS;
}
+/// Used to compare the "ordering" of two nodes as defined by the
+/// profiled bits and their ordering defined by memcmp().
+bool FoldingSetNodeID::operator<(const FoldingSetNodeID &RHS)const{
+ return *this < FoldingSetNodeIDRef(RHS.Bits.data(), RHS.Bits.size());
+}
+
+bool FoldingSetNodeID::operator<(FoldingSetNodeIDRef RHS) const {
+ return FoldingSetNodeIDRef(Bits.data(), Bits.size()) < RHS;
+}
+
/// Intern - Copy this node's data to a memory region allocated from the
/// given allocator and return a FoldingSetNodeIDRef describing the
/// interned data.
diff --git a/contrib/llvm/lib/Support/Host.cpp b/contrib/llvm/lib/Support/Host.cpp
index a13b9e2..34e32b8 100644
--- a/contrib/llvm/lib/Support/Host.cpp
+++ b/contrib/llvm/lib/Support/Host.cpp
@@ -234,6 +234,8 @@ std::string sys::getHostCPUName() {
case 37: // Intel Core i7, laptop version.
case 44: // Intel Core i7 processor and Intel Xeon processor. All
// processors are manufactured using the 32 nm process.
+ case 46: // Nehalem EX
+ case 47: // Westmere EX
return "corei7";
// SandyBridge:
@@ -501,6 +503,7 @@ std::string sys::getHostCPUName() {
.Case("0xb76", "arm1176jz-s")
.Case("0xc08", "cortex-a8")
.Case("0xc09", "cortex-a9")
+ .Case("0xc0f", "cortex-a15")
.Case("0xc20", "cortex-m0")
.Case("0xc23", "cortex-m3")
.Case("0xc24", "cortex-m4")
diff --git a/contrib/llvm/lib/Support/LockFileManager.cpp b/contrib/llvm/lib/Support/LockFileManager.cpp
index 64404a1..59bfcfc 100644
--- a/contrib/llvm/lib/Support/LockFileManager.cpp
+++ b/contrib/llvm/lib/Support/LockFileManager.cpp
@@ -49,7 +49,7 @@ LockFileManager::readLockFile(StringRef LockFileName) {
}
bool LockFileManager::processStillExecuting(StringRef Hostname, int PID) {
-#if LLVM_ON_UNIX
+#if LLVM_ON_UNIX && !defined(__ANDROID__)
char MyHostname[256];
MyHostname[255] = 0;
MyHostname[0] = 0;
diff --git a/contrib/llvm/lib/Support/Memory.cpp b/contrib/llvm/lib/Support/Memory.cpp
index 9229b0c..12f0838 100644
--- a/contrib/llvm/lib/Support/Memory.cpp
+++ b/contrib/llvm/lib/Support/Memory.cpp
@@ -16,14 +16,6 @@
#include "llvm/Support/Valgrind.h"
#include "llvm/Config/config.h"
-#if defined(__mips__)
-#include <sys/cachectl.h>
-#endif
-
-namespace llvm {
-using namespace sys;
-}
-
// Include the platform-specific parts of this class.
#ifdef LLVM_ON_UNIX
#include "Unix/Memory.inc"
@@ -31,51 +23,3 @@ using namespace sys;
#ifdef LLVM_ON_WIN32
#include "Windows/Memory.inc"
#endif
-
-extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
-
-/// InvalidateInstructionCache - Before the JIT can run a block of code
-/// that has been emitted it must invalidate the instruction cache on some
-/// platforms.
-void llvm::sys::Memory::InvalidateInstructionCache(const void *Addr,
- size_t Len) {
-
-// icache invalidation for PPC and ARM.
-#if defined(__APPLE__)
-
-# if (defined(__POWERPC__) || defined (__ppc__) || \
- defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__)
- sys_icache_invalidate(const_cast<void *>(Addr), Len);
-# endif
-
-#else
-
-# if (defined(__POWERPC__) || defined (__ppc__) || \
- defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
- const size_t LineSize = 32;
-
- const intptr_t Mask = ~(LineSize - 1);
- const intptr_t StartLine = ((intptr_t) Addr) & Mask;
- const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
-
- for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
- asm volatile("dcbf 0, %0" : : "r"(Line));
- asm volatile("sync");
-
- for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
- asm volatile("icbi 0, %0" : : "r"(Line));
- asm volatile("isync");
-# elif defined(__arm__) && defined(__GNUC__) && !defined(__FreeBSD__)
- // FIXME: Can we safely always call this for __GNUC__ everywhere?
- const char *Start = static_cast<const char *>(Addr);
- const char *End = Start + Len;
- __clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
-# elif defined(__mips__)
- const char *Start = static_cast<const char *>(Addr);
- cacheflush(const_cast<char *>(Start), Len, BCACHE);
-# endif
-
-#endif // end apple
-
- ValgrindDiscardTranslations(Addr, Len);
-}
diff --git a/contrib/llvm/lib/Support/MemoryBuffer.cpp b/contrib/llvm/lib/Support/MemoryBuffer.cpp
index 992f03c..ec373e7 100644
--- a/contrib/llvm/lib/Support/MemoryBuffer.cpp
+++ b/contrib/llvm/lib/Support/MemoryBuffer.cpp
@@ -33,6 +33,9 @@
#include <unistd.h>
#else
#include <io.h>
+#ifndef S_ISFIFO
+#define S_ISFIFO(x) (0)
+#endif
#endif
#include <fcntl.h>
using namespace llvm;
@@ -81,12 +84,12 @@ public:
init(InputData.begin(), InputData.end(), RequiresNullTerminator);
}
- virtual const char *getBufferIdentifier() const {
+ virtual const char *getBufferIdentifier() const LLVM_OVERRIDE {
// The name is stored after the class itself.
return reinterpret_cast<const char*>(this + 1);
}
-
- virtual BufferKind getBufferKind() const {
+
+ virtual BufferKind getBufferKind() const LLVM_OVERRIDE {
return MemoryBuffer_Malloc;
}
};
@@ -194,13 +197,34 @@ public:
sys::Path::UnMapFilePages(reinterpret_cast<const char*>(RealStart),
RealSize);
}
-
- virtual BufferKind getBufferKind() const {
+
+ virtual BufferKind getBufferKind() const LLVM_OVERRIDE {
return MemoryBuffer_MMap;
}
};
}
+static error_code getMemoryBufferForStream(int FD,
+ StringRef BufferName,
+ OwningPtr<MemoryBuffer> &result) {
+ const ssize_t ChunkSize = 4096*4;
+ SmallString<ChunkSize> Buffer;
+ ssize_t ReadBytes;
+ // Read into Buffer until we hit EOF.
+ do {
+ Buffer.reserve(Buffer.size() + ChunkSize);
+ ReadBytes = read(FD, Buffer.end(), ChunkSize);
+ if (ReadBytes == -1) {
+ if (errno == EINTR) continue;
+ return error_code(errno, posix_category());
+ }
+ Buffer.set_size(Buffer.size() + ReadBytes);
+ } while (ReadBytes != 0);
+
+ result.reset(MemoryBuffer::getMemBufferCopy(Buffer, BufferName));
+ return error_code::success();
+}
+
error_code MemoryBuffer::getFile(StringRef Filename,
OwningPtr<MemoryBuffer> &result,
int64_t FileSize,
@@ -297,6 +321,13 @@ error_code MemoryBuffer::getOpenFile(int FD, const char *Filename,
if (fstat(FD, &FileInfo) == -1) {
return error_code(errno, posix_category());
}
+
+ // If this is a named pipe, we can't trust the size. Create the memory
+ // buffer by copying off the stream.
+ if (S_ISFIFO(FileInfo.st_mode)) {
+ return getMemoryBufferForStream(FD, Filename, result);
+ }
+
FileSize = FileInfo.st_size;
}
MapSize = FileSize;
@@ -370,20 +401,5 @@ error_code MemoryBuffer::getSTDIN(OwningPtr<MemoryBuffer> &result) {
// fallback if it fails.
sys::Program::ChangeStdinToBinary();
- const ssize_t ChunkSize = 4096*4;
- SmallString<ChunkSize> Buffer;
- ssize_t ReadBytes;
- // Read into Buffer until we hit EOF.
- do {
- Buffer.reserve(Buffer.size() + ChunkSize);
- ReadBytes = read(0, Buffer.end(), ChunkSize);
- if (ReadBytes == -1) {
- if (errno == EINTR) continue;
- return error_code(errno, posix_category());
- }
- Buffer.set_size(Buffer.size() + ReadBytes);
- } while (ReadBytes != 0);
-
- result.reset(getMemBufferCopy(Buffer, "<stdin>"));
- return error_code::success();
+ return getMemoryBufferForStream(0, "<stdin>", result);
}
diff --git a/contrib/llvm/lib/Support/SmallVector.cpp b/contrib/llvm/lib/Support/SmallVector.cpp
index a89f149..f9c0e78 100644
--- a/contrib/llvm/lib/Support/SmallVector.cpp
+++ b/contrib/llvm/lib/Support/SmallVector.cpp
@@ -16,14 +16,15 @@ using namespace llvm;
/// grow_pod - This is an implementation of the grow() method which only works
/// on POD-like datatypes and is out of line to reduce code duplication.
-void SmallVectorBase::grow_pod(size_t MinSizeInBytes, size_t TSize) {
+void SmallVectorBase::grow_pod(void *FirstEl, size_t MinSizeInBytes,
+ size_t TSize) {
size_t CurSizeBytes = size_in_bytes();
size_t NewCapacityInBytes = 2 * capacity_in_bytes() + TSize; // Always grow.
if (NewCapacityInBytes < MinSizeInBytes)
NewCapacityInBytes = MinSizeInBytes;
void *NewElts;
- if (this->isSmall()) {
+ if (BeginX == FirstEl) {
NewElts = malloc(NewCapacityInBytes);
// Copy the elements over. No need to run dtors on PODs.
@@ -37,4 +38,3 @@ void SmallVectorBase::grow_pod(size_t MinSizeInBytes, size_t TSize) {
this->BeginX = NewElts;
this->CapacityX = (char*)this->BeginX + NewCapacityInBytes;
}
-
diff --git a/contrib/llvm/lib/Support/StreamableMemoryObject.cpp b/contrib/llvm/lib/Support/StreamableMemoryObject.cpp
index fe3752a..59e27a2 100644
--- a/contrib/llvm/lib/Support/StreamableMemoryObject.cpp
+++ b/contrib/llvm/lib/Support/StreamableMemoryObject.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/StreamableMemoryObject.h"
+#include "llvm/Support/Compiler.h"
#include <cassert>
#include <cstring>
@@ -23,18 +24,23 @@ public:
assert(LastChar >= FirstChar && "Invalid start/end range");
}
- virtual uint64_t getBase() const { return 0; }
- virtual uint64_t getExtent() const { return LastChar - FirstChar; }
- virtual int readByte(uint64_t address, uint8_t* ptr) const;
+ virtual uint64_t getBase() const LLVM_OVERRIDE { return 0; }
+ virtual uint64_t getExtent() const LLVM_OVERRIDE {
+ return LastChar - FirstChar;
+ }
+ virtual int readByte(uint64_t address, uint8_t* ptr) const LLVM_OVERRIDE;
virtual int readBytes(uint64_t address,
uint64_t size,
uint8_t* buf,
- uint64_t* copied) const;
- virtual const uint8_t *getPointer(uint64_t address, uint64_t size) const;
- virtual bool isValidAddress(uint64_t address) const {
+ uint64_t* copied) const LLVM_OVERRIDE;
+ virtual const uint8_t *getPointer(uint64_t address,
+ uint64_t size) const LLVM_OVERRIDE;
+ virtual bool isValidAddress(uint64_t address) const LLVM_OVERRIDE {
return validAddress(address);
}
- virtual bool isObjectEnd(uint64_t address) const {return objectEnd(address);}
+ virtual bool isObjectEnd(uint64_t address) const LLVM_OVERRIDE {
+ return objectEnd(address);
+ }
private:
const uint8_t* const FirstChar;
@@ -49,8 +55,8 @@ private:
return static_cast<ptrdiff_t>(address) == LastChar - FirstChar;
}
- RawMemoryObject(const RawMemoryObject&); // DO NOT IMPLEMENT
- void operator=(const RawMemoryObject&); // DO NOT IMPLEMENT
+ RawMemoryObject(const RawMemoryObject&) LLVM_DELETED_FUNCTION;
+ void operator=(const RawMemoryObject&) LLVM_DELETED_FUNCTION;
};
int RawMemoryObject::readByte(uint64_t address, uint8_t* ptr) const {
diff --git a/contrib/llvm/lib/Support/StringMap.cpp b/contrib/llvm/lib/Support/StringMap.cpp
index c2fc261..9ac1f86 100644
--- a/contrib/llvm/lib/Support/StringMap.cpp
+++ b/contrib/llvm/lib/Support/StringMap.cpp
@@ -13,6 +13,7 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Compiler.h"
#include <cassert>
using namespace llvm;
@@ -69,7 +70,7 @@ unsigned StringMapImpl::LookupBucketFor(StringRef Name) {
while (1) {
StringMapEntryBase *BucketItem = TheTable[BucketNo];
// If we found an empty bucket, this key isn't in the table yet, return it.
- if (BucketItem == 0) {
+ if (LLVM_LIKELY(BucketItem == 0)) {
// If we found a tombstone, we want to reuse the tombstone instead of an
// empty bucket. This reduces probing.
if (FirstTombstone != -1) {
@@ -84,7 +85,7 @@ unsigned StringMapImpl::LookupBucketFor(StringRef Name) {
if (BucketItem == getTombstoneVal()) {
// Skip over tombstones. However, remember the first one we see.
if (FirstTombstone == -1) FirstTombstone = BucketNo;
- } else if (HashTable[BucketNo] == FullHashValue) {
+ } else if (LLVM_LIKELY(HashTable[BucketNo] == FullHashValue)) {
// If the full hash value matches, check deeply for a match. The common
// case here is that we are only looking at the buckets (for item info
// being non-null and for the full hash value) not at the items. This
@@ -123,12 +124,12 @@ int StringMapImpl::FindKey(StringRef Key) const {
while (1) {
StringMapEntryBase *BucketItem = TheTable[BucketNo];
// If we found an empty bucket, this key isn't in the table yet, return.
- if (BucketItem == 0)
+ if (LLVM_LIKELY(BucketItem == 0))
return -1;
if (BucketItem == getTombstoneVal()) {
// Ignore tombstones.
- } else if (HashTable[BucketNo] == FullHashValue) {
+ } else if (LLVM_LIKELY(HashTable[BucketNo] == FullHashValue)) {
// If the full hash value matches, check deeply for a match. The common
// case here is that we are only looking at the buckets (for item info
// being non-null and for the full hash value) not at the items. This
diff --git a/contrib/llvm/lib/Support/StringRef.cpp b/contrib/llvm/lib/Support/StringRef.cpp
index 8aab4b2..f8e9208 100644
--- a/contrib/llvm/lib/Support/StringRef.cpp
+++ b/contrib/llvm/lib/Support/StringRef.cpp
@@ -350,8 +350,8 @@ bool llvm::getAsUnsignedInteger(StringRef Str, unsigned Radix,
unsigned long long PrevResult = Result;
Result = Result*Radix+CharVal;
- // Check for overflow.
- if (Result < PrevResult)
+ // Check for overflow by shifting back and seeing if bits were lost.
+ if (Result/Radix < PrevResult)
return true;
Str = Str.substr(1);
diff --git a/contrib/llvm/lib/Support/Triple.cpp b/contrib/llvm/lib/Support/Triple.cpp
index cca549d..c058c05 100644
--- a/contrib/llvm/lib/Support/Triple.cpp
+++ b/contrib/llvm/lib/Support/Triple.cpp
@@ -42,6 +42,8 @@ const char *Triple::getArchTypeName(ArchType Kind) {
case nvptx64: return "nvptx64";
case le32: return "le32";
case amdil: return "amdil";
+ case spir: return "spir";
+ case spir64: return "spir64";
}
llvm_unreachable("Invalid ArchType!");
@@ -83,6 +85,8 @@ const char *Triple::getArchTypePrefix(ArchType Kind) {
case nvptx64: return "nvptx";
case le32: return "le32";
case amdil: return "amdil";
+ case spir: return "spir";
+ case spir64: return "spir";
}
}
@@ -95,6 +99,8 @@ const char *Triple::getVendorTypeName(VendorType Kind) {
case SCEI: return "scei";
case BGP: return "bgp";
case BGQ: return "bgq";
+ case Freescale: return "fsl";
+ case IBM: return "ibm";
}
llvm_unreachable("Invalid VendorType!");
@@ -125,6 +131,7 @@ const char *Triple::getOSTypeName(OSType Kind) {
case NativeClient: return "nacl";
case CNK: return "cnk";
case Bitrig: return "bitrig";
+ case AIX: return "aix";
}
llvm_unreachable("Invalid OSType");
@@ -138,7 +145,8 @@ const char *Triple::getEnvironmentTypeName(EnvironmentType Kind) {
case GNUEABI: return "gnueabi";
case EABI: return "eabi";
case MachO: return "macho";
- case ANDROIDEABI: return "androideabi";
+ case Android: return "android";
+ case ELF: return "elf";
}
llvm_unreachable("Invalid EnvironmentType!");
@@ -170,40 +178,11 @@ Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) {
.Case("nvptx64", nvptx64)
.Case("le32", le32)
.Case("amdil", amdil)
+ .Case("spir", spir)
+ .Case("spir64", spir64)
.Default(UnknownArch);
}
-Triple::ArchType Triple::getArchTypeForDarwinArchName(StringRef Str) {
- // See arch(3) and llvm-gcc's driver-driver.c. We don't implement support for
- // archs which Darwin doesn't use.
-
- // The matching this routine does is fairly pointless, since it is neither the
- // complete architecture list, nor a reasonable subset. The problem is that
- // historically the driver driver accepts this and also ties its -march=
- // handling to the architecture name, so we need to be careful before removing
- // support for it.
-
- // This code must be kept in sync with Clang's Darwin specific argument
- // translation.
-
- return StringSwitch<ArchType>(Str)
- .Cases("ppc", "ppc601", "ppc603", "ppc604", "ppc604e", Triple::ppc)
- .Cases("ppc750", "ppc7400", "ppc7450", "ppc970", Triple::ppc)
- .Case("ppc64", Triple::ppc64)
- .Cases("i386", "i486", "i486SX", "i586", "i686", Triple::x86)
- .Cases("pentium", "pentpro", "pentIIm3", "pentIIm5", "pentium4",
- Triple::x86)
- .Case("x86_64", Triple::x86_64)
- // This is derived from the driver driver.
- .Cases("arm", "armv4t", "armv5", "armv6", Triple::arm)
- .Cases("armv7", "armv7f", "armv7k", "armv7s", "xscale", Triple::arm)
- .Case("r600", Triple::r600)
- .Case("nvptx", Triple::nvptx)
- .Case("nvptx64", Triple::nvptx64)
- .Case("amdil", Triple::amdil)
- .Default(Triple::UnknownArch);
-}
-
// Returns architecture name that is understood by the target assembler.
const char *Triple::getArchNameForAssembler() {
if (!isOSDarwin() && getVendor() != Triple::Apple)
@@ -225,6 +204,8 @@ const char *Triple::getArchNameForAssembler() {
.Case("nvptx64", "nvptx64")
.Case("le32", "le32")
.Case("amdil", "amdil")
+ .Case("spir", "spir")
+ .Case("spir64", "spir64")
.Default(NULL);
}
@@ -259,6 +240,8 @@ static Triple::ArchType parseArch(StringRef ArchName) {
.Case("nvptx64", Triple::nvptx64)
.Case("le32", Triple::le32)
.Case("amdil", Triple::amdil)
+ .Case("spir", Triple::spir)
+ .Case("spir64", Triple::spir64)
.Default(Triple::UnknownArch);
}
@@ -269,6 +252,8 @@ static Triple::VendorType parseVendor(StringRef VendorName) {
.Case("scei", Triple::SCEI)
.Case("bgp", Triple::BGP)
.Case("bgq", Triple::BGQ)
+ .Case("fsl", Triple::Freescale)
+ .Case("ibm", Triple::IBM)
.Default(Triple::UnknownVendor);
}
@@ -295,6 +280,7 @@ static Triple::OSType parseOS(StringRef OSName) {
.StartsWith("nacl", Triple::NativeClient)
.StartsWith("cnk", Triple::CNK)
.StartsWith("bitrig", Triple::Bitrig)
+ .StartsWith("aix", Triple::AIX)
.Default(Triple::UnknownOS);
}
@@ -305,7 +291,8 @@ static Triple::EnvironmentType parseEnvironment(StringRef EnvironmentName) {
.StartsWith("gnueabi", Triple::GNUEABI)
.StartsWith("gnu", Triple::GNU)
.StartsWith("macho", Triple::MachO)
- .StartsWith("androideabi", Triple::ANDROIDEABI)
+ .StartsWith("android", Triple::Android)
+ .StartsWith("elf", Triple::ELF)
.Default(Triple::UnknownEnvironment);
}
@@ -690,6 +677,7 @@ static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) {
case llvm::Triple::thumb:
case llvm::Triple::x86:
case llvm::Triple::xcore:
+ case llvm::Triple::spir:
return 32;
case llvm::Triple::mips64:
@@ -698,6 +686,7 @@ static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) {
case llvm::Triple::ppc64:
case llvm::Triple::sparcv9:
case llvm::Triple::x86_64:
+ case llvm::Triple::spir64:
return 64;
}
llvm_unreachable("Invalid architecture value");
@@ -724,6 +713,7 @@ Triple Triple::get32BitArchVariant() const {
break;
case Triple::amdil:
+ case Triple::spir:
case Triple::arm:
case Triple::cellspu:
case Triple::hexagon:
@@ -748,6 +738,7 @@ Triple Triple::get32BitArchVariant() const {
case Triple::ppc64: T.setArch(Triple::ppc); break;
case Triple::sparcv9: T.setArch(Triple::sparc); break;
case Triple::x86_64: T.setArch(Triple::x86); break;
+ case Triple::spir64: T.setArch(Triple::spir); break;
}
return T;
}
@@ -770,6 +761,7 @@ Triple Triple::get64BitArchVariant() const {
T.setArch(UnknownArch);
break;
+ case Triple::spir64:
case Triple::mips64:
case Triple::mips64el:
case Triple::nvptx64:
@@ -785,6 +777,7 @@ Triple Triple::get64BitArchVariant() const {
case Triple::ppc: T.setArch(Triple::ppc64); break;
case Triple::sparc: T.setArch(Triple::sparcv9); break;
case Triple::x86: T.setArch(Triple::x86_64); break;
+ case Triple::spir: T.setArch(Triple::spir64); break;
}
return T;
}
diff --git a/contrib/llvm/lib/Support/Unix/Memory.inc b/contrib/llvm/lib/Support/Unix/Memory.inc
index 5a57a28..1647e7b 100644
--- a/contrib/llvm/lib/Support/Unix/Memory.inc
+++ b/contrib/llvm/lib/Support/Unix/Memory.inc
@@ -13,6 +13,7 @@
#include "Unix.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Process.h"
#ifdef HAVE_SYS_MMAN_H
@@ -23,14 +24,146 @@
#include <mach/mach.h>
#endif
+#if defined(__mips__)
+# if defined(__OpenBSD__)
+# include <mips64/sysarch.h>
+# else
+# include <sys/cachectl.h>
+# endif
+#endif
+
+extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
+
+namespace {
+
+int getPosixProtectionFlags(unsigned Flags) {
+ switch (Flags) {
+ case llvm::sys::Memory::MF_READ:
+ return PROT_READ;
+ case llvm::sys::Memory::MF_WRITE:
+ return PROT_WRITE;
+ case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
+ return PROT_READ | PROT_WRITE;
+ case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
+ return PROT_READ | PROT_EXEC;
+ case llvm::sys::Memory::MF_READ |
+ llvm::sys::Memory::MF_WRITE |
+ llvm::sys::Memory::MF_EXEC:
+ return PROT_READ | PROT_WRITE | PROT_EXEC;
+ case llvm::sys::Memory::MF_EXEC:
+ return PROT_EXEC;
+ default:
+ llvm_unreachable("Illegal memory protection flag specified!");
+ }
+ // Provide a default return value as required by some compilers.
+ return PROT_NONE;
+}
+
+} // namespace
+
+namespace llvm {
+namespace sys {
+
+MemoryBlock
+Memory::allocateMappedMemory(size_t NumBytes,
+ const MemoryBlock *const NearBlock,
+ unsigned PFlags,
+ error_code &EC) {
+ EC = error_code::success();
+ if (NumBytes == 0)
+ return MemoryBlock();
+
+ static const size_t PageSize = Process::GetPageSize();
+ const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
+
+ int fd = -1;
+#ifdef NEED_DEV_ZERO_FOR_MMAP
+ static int zero_fd = open("/dev/zero", O_RDWR);
+ if (zero_fd == -1) {
+ EC = error_code(errno, system_category());
+ return MemoryBlock();
+ }
+ fd = zero_fd;
+#endif
+
+ int MMFlags = MAP_PRIVATE |
+#ifdef HAVE_MMAP_ANONYMOUS
+ MAP_ANONYMOUS
+#else
+ MAP_ANON
+#endif
+ ; // Ends statement above
+
+ int Protect = getPosixProtectionFlags(PFlags);
+
+ // Use any near hint and the page size to set a page-aligned starting address
+ uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
+ NearBlock->size() : 0;
+ if (Start && Start % PageSize)
+ Start += PageSize - Start % PageSize;
+
+ void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages,
+ Protect, MMFlags, fd, 0);
+ if (Addr == MAP_FAILED) {
+ if (NearBlock) //Try again without a near hint
+ return allocateMappedMemory(NumBytes, 0, PFlags, EC);
+
+ EC = error_code(errno, system_category());
+ return MemoryBlock();
+ }
+
+ MemoryBlock Result;
+ Result.Address = Addr;
+ Result.Size = NumPages*PageSize;
+
+ if (PFlags & MF_EXEC)
+ Memory::InvalidateInstructionCache(Result.Address, Result.Size);
+
+ return Result;
+}
+
+error_code
+Memory::releaseMappedMemory(MemoryBlock &M) {
+ if (M.Address == 0 || M.Size == 0)
+ return error_code::success();
+
+ if (0 != ::munmap(M.Address, M.Size))
+ return error_code(errno, system_category());
+
+ M.Address = 0;
+ M.Size = 0;
+
+ return error_code::success();
+}
+
+error_code
+Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
+ if (M.Address == 0 || M.Size == 0)
+ return error_code::success();
+
+ if (!Flags)
+ return error_code(EINVAL, generic_category());
+
+ int Protect = getPosixProtectionFlags(Flags);
+
+ int Result = ::mprotect(M.Address, M.Size, Protect);
+ if (Result != 0)
+ return error_code(errno, system_category());
+
+ if (Flags & MF_EXEC)
+ Memory::InvalidateInstructionCache(M.Address, M.Size);
+
+ return error_code::success();
+}
+
/// AllocateRWX - Allocate a slab of memory with read/write/execute
/// permissions. This is typically used for JIT applications where we want
/// to emit code to the memory then jump to it. Getting this type of memory
/// is very OS specific.
///
-llvm::sys::MemoryBlock
-llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
- std::string *ErrMsg) {
+MemoryBlock
+Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
+ std::string *ErrMsg) {
if (NumBytes == 0) return MemoryBlock();
size_t pageSize = Process::GetPageSize();
@@ -78,7 +211,7 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
if (KERN_SUCCESS != kr) {
MakeErrMsg(ErrMsg, "vm_protect max RX failed");
- return sys::MemoryBlock();
+ return MemoryBlock();
}
kr = vm_protect(mach_task_self(), (vm_address_t)pa,
@@ -86,7 +219,7 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
VM_PROT_READ | VM_PROT_WRITE);
if (KERN_SUCCESS != kr) {
MakeErrMsg(ErrMsg, "vm_protect RW failed");
- return sys::MemoryBlock();
+ return MemoryBlock();
}
#endif
@@ -97,17 +230,17 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
return result;
}
-bool llvm::sys::Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
+bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
if (M.Address == 0 || M.Size == 0) return false;
if (0 != ::munmap(M.Address, M.Size))
return MakeErrMsg(ErrMsg, "Can't release RWX Memory");
return false;
}
-bool llvm::sys::Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
+bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
#if defined(__APPLE__) && defined(__arm__)
if (M.Address == 0 || M.Size == 0) return false;
- sys::Memory::InvalidateInstructionCache(M.Address, M.Size);
+ Memory::InvalidateInstructionCache(M.Address, M.Size);
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
(vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE);
return KERN_SUCCESS == kr;
@@ -116,10 +249,10 @@ bool llvm::sys::Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
#endif
}
-bool llvm::sys::Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
+bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
#if defined(__APPLE__) && defined(__arm__)
if (M.Address == 0 || M.Size == 0) return false;
- sys::Memory::InvalidateInstructionCache(M.Address, M.Size);
+ Memory::InvalidateInstructionCache(M.Address, M.Size);
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
(vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
return KERN_SUCCESS == kr;
@@ -128,7 +261,7 @@ bool llvm::sys::Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
#endif
}
-bool llvm::sys::Memory::setRangeWritable(const void *Addr, size_t Size) {
+bool Memory::setRangeWritable(const void *Addr, size_t Size) {
#if defined(__APPLE__) && defined(__arm__)
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
(vm_size_t)Size, 0,
@@ -139,7 +272,7 @@ bool llvm::sys::Memory::setRangeWritable(const void *Addr, size_t Size) {
#endif
}
-bool llvm::sys::Memory::setRangeExecutable(const void *Addr, size_t Size) {
+bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
#if defined(__APPLE__) && defined(__arm__)
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
(vm_size_t)Size, 0,
@@ -149,3 +282,52 @@ bool llvm::sys::Memory::setRangeExecutable(const void *Addr, size_t Size) {
return true;
#endif
}
+
+/// InvalidateInstructionCache - Before the JIT can run a block of code
+/// that has been emitted it must invalidate the instruction cache on some
+/// platforms.
+void Memory::InvalidateInstructionCache(const void *Addr,
+ size_t Len) {
+
+// icache invalidation for PPC and ARM.
+#if defined(__APPLE__)
+
+# if (defined(__POWERPC__) || defined (__ppc__) || \
+ defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__)
+ sys_icache_invalidate(const_cast<void *>(Addr), Len);
+# endif
+
+#else
+
+# if (defined(__POWERPC__) || defined (__ppc__) || \
+ defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
+ const size_t LineSize = 32;
+
+ const intptr_t Mask = ~(LineSize - 1);
+ const intptr_t StartLine = ((intptr_t) Addr) & Mask;
+ const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
+
+ for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
+ asm volatile("dcbf 0, %0" : : "r"(Line));
+ asm volatile("sync");
+
+ for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
+ asm volatile("icbi 0, %0" : : "r"(Line));
+ asm volatile("isync");
+# elif defined(__arm__) && defined(__GNUC__) && !defined(__FreeBSD__)
+ // FIXME: Can we safely always call this for __GNUC__ everywhere?
+ const char *Start = static_cast<const char *>(Addr);
+ const char *End = Start + Len;
+ __clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
+# elif defined(__mips__)
+ const char *Start = static_cast<const char *>(Addr);
+ cacheflush(const_cast<char *>(Start), Len, BCACHE);
+# endif
+
+#endif // end apple
+
+ ValgrindDiscardTranslations(Addr, Len);
+}
+
+} // namespace sys
+} // namespace llvm
diff --git a/contrib/llvm/lib/Support/Unix/Path.inc b/contrib/llvm/lib/Support/Unix/Path.inc
index 6bddbdf..6a5ebb8 100644
--- a/contrib/llvm/lib/Support/Unix/Path.inc
+++ b/contrib/llvm/lib/Support/Unix/Path.inc
@@ -261,7 +261,8 @@ Path::GetCurrentDirectory() {
}
#if defined(__FreeBSD__) || defined (__NetBSD__) || defined(__Bitrig__) || \
- defined(__OpenBSD__) || defined(__minix) || defined(__FreeBSD_kernel__)
+ defined(__OpenBSD__) || defined(__minix) || defined(__FreeBSD_kernel__) || \
+ defined(__linux__) || defined(__CYGWIN__)
static int
test_dir(char buf[PATH_MAX], char ret[PATH_MAX],
const char *dir, const char *bin)
@@ -337,9 +338,17 @@ Path Path::GetMainExecutable(const char *argv0, void *MainAddr) {
return Path(exe_path);
#elif defined(__linux__) || defined(__CYGWIN__)
char exe_path[MAXPATHLEN];
- ssize_t len = readlink("/proc/self/exe", exe_path, sizeof(exe_path));
- if (len >= 0)
- return Path(StringRef(exe_path, len));
+ StringRef aPath("/proc/self/exe");
+ if (sys::fs::exists(aPath)) {
+ // /proc is not always mounted under Linux (chroot for example).
+ ssize_t len = readlink(aPath.str().c_str(), exe_path, sizeof(exe_path));
+ if (len >= 0)
+ return Path(StringRef(exe_path, len));
+ } else {
+ // Fall back to the classical detection.
+ if (getprogpath(exe_path, argv0) != NULL)
+ return Path(exe_path);
+ }
#elif defined(HAVE_DLFCN_H)
// Use dladdr to get executable path if available.
Dl_info DLInfo;
diff --git a/contrib/llvm/lib/Support/Unix/Signals.inc b/contrib/llvm/lib/Support/Unix/Signals.inc
index 5195116..9e94068 100644
--- a/contrib/llvm/lib/Support/Unix/Signals.inc
+++ b/contrib/llvm/lib/Support/Unix/Signals.inc
@@ -121,17 +121,29 @@ static void UnregisterHandlers() {
/// NB: This must be an async signal safe function. It cannot allocate or free
/// memory, even in debug builds.
static void RemoveFilesToRemove() {
- // Note: avoid iterators in case of debug iterators that allocate or release
+ // We avoid iterators in case of debug iterators that allocate or release
// memory.
for (unsigned i = 0, e = FilesToRemove.size(); i != e; ++i) {
- // Note that we don't want to use any external code here, and we don't care
- // about errors. We're going to try as hard as we can as often as we need
- // to to make these files go away. If these aren't files, too bad.
- //
- // We do however rely on a std::string implementation for which repeated
- // calls to 'c_str()' don't allocate memory. We pre-call 'c_str()' on all
- // of these strings to try to ensure this is safe.
- unlink(FilesToRemove[i].c_str());
+ // We rely on a std::string implementation for which repeated calls to
+ // 'c_str()' don't allocate memory. We pre-call 'c_str()' on all of these
+ // strings to try to ensure this is safe.
+ const char *path = FilesToRemove[i].c_str();
+
+ // Get the status so we can determine if it's a file or directory. If we
+ // can't stat the file, ignore it.
+ struct stat buf;
+ if (stat(path, &buf) != 0)
+ continue;
+
+ // If this is not a regular file, ignore it. We want to prevent removal of
+ // special files like /dev/null, even if the compiler is being run with the
+ // super-user permissions.
+ if (!S_ISREG(buf.st_mode))
+ continue;
+
+ // Otherwise, remove the file. We ignore any errors here as there is nothing
+ // else we can do.
+ unlink(path);
}
}
@@ -243,7 +255,7 @@ void llvm::sys::AddSignalHandler(void (*FnPtr)(void *), void *Cookie) {
// On glibc systems we have the 'backtrace' function, which works nicely, but
// doesn't demangle symbols.
static void PrintStackTrace(void *) {
-#ifdef HAVE_BACKTRACE
+#if defined(HAVE_BACKTRACE) && defined(ENABLE_BACKTRACES)
static void* StackTrace[256];
// Use backtrace() to output a backtrace on Linux systems with glibc.
int depth = backtrace(StackTrace,
@@ -293,7 +305,7 @@ static void PrintStackTrace(void *) {
#endif
}
-/// PrintStackTraceOnErrorSignal - When an error signal (such as SIBABRT or
+/// PrintStackTraceOnErrorSignal - When an error signal (such as SIGABRT or
/// SIGSEGV) is delivered to the process, print a stack trace and then exit.
void llvm::sys::PrintStackTraceOnErrorSignal() {
AddSignalHandler(PrintStackTrace, 0);
@@ -305,10 +317,10 @@ void llvm::sys::PrintStackTraceOnErrorSignal() {
exception_mask_t mask = EXC_MASK_CRASH;
- kern_return_t ret = task_set_exception_ports(self,
+ kern_return_t ret = task_set_exception_ports(self,
mask,
MACH_PORT_NULL,
- EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES,
+ EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES,
THREAD_STATE_NONE);
(void)ret;
}
diff --git a/contrib/llvm/lib/Support/Windows/Memory.inc b/contrib/llvm/lib/Support/Windows/Memory.inc
index fcc7283..cb80f28 100644
--- a/contrib/llvm/lib/Support/Windows/Memory.inc
+++ b/contrib/llvm/lib/Support/Windows/Memory.inc
@@ -12,51 +12,163 @@
//
//===----------------------------------------------------------------------===//
-#include "Windows.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Process.h"
+#include "Windows.h"
+
+namespace {
+
+DWORD getWindowsProtectionFlags(unsigned Flags) {
+ switch (Flags) {
+ // Contrary to what you might expect, the Windows page protection flags
+ // are not a bitwise combination of RWX values
+ case llvm::sys::Memory::MF_READ:
+ return PAGE_READONLY;
+ case llvm::sys::Memory::MF_WRITE:
+ // Note: PAGE_WRITE is not supported by VirtualProtect
+ return PAGE_READWRITE;
+ case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
+ return PAGE_READWRITE;
+ case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
+ return PAGE_EXECUTE_READ;
+ case llvm::sys::Memory::MF_READ |
+ llvm::sys::Memory::MF_WRITE |
+ llvm::sys::Memory::MF_EXEC:
+ return PAGE_EXECUTE_READWRITE;
+ case llvm::sys::Memory::MF_EXEC:
+ return PAGE_EXECUTE;
+ default:
+ llvm_unreachable("Illegal memory protection flag specified!");
+ }
+ // Provide a default return value as required by some compilers.
+ return PAGE_NOACCESS;
+}
+
+size_t getAllocationGranularity() {
+ SYSTEM_INFO Info;
+ ::GetSystemInfo(&Info);
+ if (Info.dwPageSize > Info.dwAllocationGranularity)
+ return Info.dwPageSize;
+ else
+ return Info.dwAllocationGranularity;
+}
+
+} // namespace
namespace llvm {
-using namespace sys;
+namespace sys {
//===----------------------------------------------------------------------===//
//=== WARNING: Implementation here must contain only Win32 specific code
//=== and must not be UNIX code
//===----------------------------------------------------------------------===//
-MemoryBlock Memory::AllocateRWX(size_t NumBytes,
- const MemoryBlock *NearBlock,
- std::string *ErrMsg) {
- if (NumBytes == 0) return MemoryBlock();
+MemoryBlock Memory::allocateMappedMemory(size_t NumBytes,
+ const MemoryBlock *const NearBlock,
+ unsigned Flags,
+ error_code &EC) {
+ EC = error_code::success();
+ if (NumBytes == 0)
+ return MemoryBlock();
+
+ // While we'd be happy to allocate single pages, the Windows allocation
+ // granularity may be larger than a single page (in practice, it is 64K)
+ // so mapping less than that will create an unreachable fragment of memory.
+ static const size_t Granularity = getAllocationGranularity();
+ const size_t NumBlocks = (NumBytes+Granularity-1)/Granularity;
- static const size_t pageSize = Process::GetPageSize();
- size_t NumPages = (NumBytes+pageSize-1)/pageSize;
+ uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
+ NearBlock->size()
+ : NULL;
- PVOID start = NearBlock ? static_cast<unsigned char *>(NearBlock->base()) +
- NearBlock->size() : NULL;
+ // If the requested address is not aligned to the allocation granularity,
+ // round up to get beyond NearBlock. VirtualAlloc would have rounded down.
+ if (Start && Start % Granularity != 0)
+ Start += Granularity - Start % Granularity;
- void *pa = VirtualAlloc(start, NumPages*pageSize, MEM_RESERVE | MEM_COMMIT,
- PAGE_EXECUTE_READWRITE);
- if (pa == NULL) {
+ DWORD Protect = getWindowsProtectionFlags(Flags);
+
+ void *PA = ::VirtualAlloc(reinterpret_cast<void*>(Start),
+ NumBlocks*Granularity,
+ MEM_RESERVE | MEM_COMMIT, Protect);
+ if (PA == NULL) {
if (NearBlock) {
// Try again without the NearBlock hint
- return AllocateRWX(NumBytes, NULL, ErrMsg);
+ return allocateMappedMemory(NumBytes, NULL, Flags, EC);
}
- MakeErrMsg(ErrMsg, "Can't allocate RWX Memory: ");
+ EC = error_code(::GetLastError(), system_category());
return MemoryBlock();
}
- MemoryBlock result;
- result.Address = pa;
- result.Size = NumPages*pageSize;
- return result;
+ MemoryBlock Result;
+ Result.Address = PA;
+ Result.Size = NumBlocks*Granularity;
+ ;
+ if (Flags & MF_EXEC)
+ Memory::InvalidateInstructionCache(Result.Address, Result.Size);
+
+ return Result;
}
-bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
- if (M.Address == 0 || M.Size == 0) return false;
+error_code Memory::releaseMappedMemory(MemoryBlock &M) {
+ if (M.Address == 0 || M.Size == 0)
+ return error_code::success();
+
if (!VirtualFree(M.Address, 0, MEM_RELEASE))
- return MakeErrMsg(ErrMsg, "Can't release RWX Memory: ");
- return false;
+ return error_code(::GetLastError(), system_category());
+
+ M.Address = 0;
+ M.Size = 0;
+
+ return error_code::success();
+}
+
+error_code Memory::protectMappedMemory(const MemoryBlock &M,
+ unsigned Flags) {
+ if (M.Address == 0 || M.Size == 0)
+ return error_code::success();
+
+ DWORD Protect = getWindowsProtectionFlags(Flags);
+
+ DWORD OldFlags;
+ if (!VirtualProtect(M.Address, M.Size, Protect, &OldFlags))
+ return error_code(::GetLastError(), system_category());
+
+ if (Flags & MF_EXEC)
+ Memory::InvalidateInstructionCache(M.Address, M.Size);
+
+ return error_code::success();
+}
+
+/// InvalidateInstructionCache - Before the JIT can run a block of code
+/// that has been emitted it must invalidate the instruction cache on some
+/// platforms.
+void Memory::InvalidateInstructionCache(
+ const void *Addr, size_t Len) {
+ FlushInstructionCache(GetCurrentProcess(), Addr, Len);
+}
+
+
+MemoryBlock Memory::AllocateRWX(size_t NumBytes,
+ const MemoryBlock *NearBlock,
+ std::string *ErrMsg) {
+ MemoryBlock MB;
+ error_code EC;
+ MB = allocateMappedMemory(NumBytes, NearBlock,
+ MF_READ|MF_WRITE|MF_EXEC, EC);
+ if (EC != error_code::success() && ErrMsg) {
+ MakeErrMsg(ErrMsg, EC.message());
+ }
+ return MB;
+}
+
+bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
+ error_code EC = releaseMappedMemory(M);
+ if (EC == error_code::success())
+ return false;
+ MakeErrMsg(ErrMsg, EC.message());
+ return true;
}
static DWORD getProtection(const void *addr) {
@@ -93,7 +205,7 @@ bool Memory::setRangeWritable(const void *Addr, size_t Size) {
}
DWORD oldProt;
- sys::Memory::InvalidateInstructionCache(Addr, Size);
+ Memory::InvalidateInstructionCache(Addr, Size);
return ::VirtualProtect(const_cast<LPVOID>(Addr), Size, prot, &oldProt)
== TRUE;
}
@@ -112,9 +224,10 @@ bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
}
DWORD oldProt;
- sys::Memory::InvalidateInstructionCache(Addr, Size);
+ Memory::InvalidateInstructionCache(Addr, Size);
return ::VirtualProtect(const_cast<LPVOID>(Addr), Size, prot, &oldProt)
== TRUE;
}
-}
+} // namespace sys
+} // namespace llvm
diff --git a/contrib/llvm/lib/Support/Windows/PathV2.inc b/contrib/llvm/lib/Support/Windows/PathV2.inc
index 696768b..3dfac66 100644
--- a/contrib/llvm/lib/Support/Windows/PathV2.inc
+++ b/contrib/llvm/lib/Support/Windows/PathV2.inc
@@ -794,7 +794,7 @@ mapped_file_region::mapped_file_region(const Twine &path,
SmallVector<wchar_t, 128> path_utf16;
// Convert path to UTF-16.
- if (ec = UTF8ToUTF16(path.toStringRef(path_storage), path_utf16))
+ if ((ec = UTF8ToUTF16(path.toStringRef(path_storage), path_utf16)))
return;
// Get file handle for creating a file mapping.
diff --git a/contrib/llvm/lib/Support/YAMLParser.cpp b/contrib/llvm/lib/Support/YAMLParser.cpp
index 7c353c8..34df636 100644
--- a/contrib/llvm/lib/Support/YAMLParser.cpp
+++ b/contrib/llvm/lib/Support/YAMLParser.cpp
@@ -903,6 +903,7 @@ bool Scanner::consume(uint32_t Expected) {
void Scanner::skip(uint32_t Distance) {
Current += Distance;
Column += Distance;
+ assert(Current <= End && "Skipped past the end");
}
bool Scanner::isBlankOrBreak(StringRef::iterator Position) {
@@ -1239,6 +1240,12 @@ bool Scanner::scanFlowScalar(bool IsDoubleQuoted) {
}
}
}
+
+ if (Current == End) {
+ setError("Expected quote at end of scalar", Current);
+ return false;
+ }
+
skip(1); // Skip ending quote.
Token T;
T.Kind = Token::TK_Scalar;
diff --git a/contrib/llvm/lib/Support/raw_ostream.cpp b/contrib/llvm/lib/Support/raw_ostream.cpp
index fa69c2d..7cd5364 100644
--- a/contrib/llvm/lib/Support/raw_ostream.cpp
+++ b/contrib/llvm/lib/Support/raw_ostream.cpp
@@ -266,8 +266,8 @@ void raw_ostream::flush_nonempty() {
raw_ostream &raw_ostream::write(unsigned char C) {
// Group exceptional cases into a single branch.
- if (BUILTIN_EXPECT(OutBufCur >= OutBufEnd, false)) {
- if (BUILTIN_EXPECT(!OutBufStart, false)) {
+ if (LLVM_UNLIKELY(OutBufCur >= OutBufEnd)) {
+ if (LLVM_UNLIKELY(!OutBufStart)) {
if (BufferMode == Unbuffered) {
write_impl(reinterpret_cast<char*>(&C), 1);
return *this;
@@ -286,8 +286,8 @@ raw_ostream &raw_ostream::write(unsigned char C) {
raw_ostream &raw_ostream::write(const char *Ptr, size_t Size) {
// Group exceptional cases into a single branch.
- if (BUILTIN_EXPECT(size_t(OutBufEnd - OutBufCur) < Size, false)) {
- if (BUILTIN_EXPECT(!OutBufStart, false)) {
+ if (LLVM_UNLIKELY(size_t(OutBufEnd - OutBufCur) < Size)) {
+ if (LLVM_UNLIKELY(!OutBufStart)) {
if (BufferMode == Unbuffered) {
write_impl(Ptr, Size);
return *this;
@@ -302,7 +302,7 @@ raw_ostream &raw_ostream::write(const char *Ptr, size_t Size) {
// If the buffer is empty at this point we have a string that is larger
// than the buffer. Directly write the chunk that is a multiple of the
// preferred buffer size and put the remainder in the buffer.
- if (BUILTIN_EXPECT(OutBufCur == OutBufStart, false)) {
+ if (LLVM_UNLIKELY(OutBufCur == OutBufStart)) {
size_t BytesToWrite = Size - (Size % NumBytes);
write_impl(Ptr, BytesToWrite);
copy_to_buffer(Ptr + BytesToWrite, Size - BytesToWrite);
@@ -523,7 +523,7 @@ void raw_fd_ostream::write_impl(const char *Ptr, size_t Size) {
ssize_t ret;
// Check whether we should attempt to use atomic writes.
- if (BUILTIN_EXPECT(!UseAtomicWrites, true)) {
+ if (LLVM_LIKELY(!UseAtomicWrites)) {
ret = ::write(FD, Ptr, Size);
} else {
// Use ::writev() where available.
diff --git a/contrib/llvm/lib/Support/regexec.c b/contrib/llvm/lib/Support/regexec.c
index 0078616..bd5e72d 100644
--- a/contrib/llvm/lib/Support/regexec.c
+++ b/contrib/llvm/lib/Support/regexec.c
@@ -69,7 +69,7 @@
#define SETUP(v) ((v) = 0)
#define onestate long
#define INIT(o, n) ((o) = (unsigned long)1 << (n))
-#define INC(o) ((o) <<= 1)
+#define INC(o) ((o) = (unsigned long)(o) << 1)
#define ISSTATEIN(v, o) (((v) & (o)) != 0)
/* some abbreviations; note that some of these know variable names! */
/* do "if I'm here, I can also be there" etc without branches */
diff --git a/contrib/llvm/lib/Support/system_error.cpp b/contrib/llvm/lib/Support/system_error.cpp
index 56898de..2df223c 100644
--- a/contrib/llvm/lib/Support/system_error.cpp
+++ b/contrib/llvm/lib/Support/system_error.cpp
@@ -48,8 +48,8 @@ _do_message::message(int ev) const {
class _generic_error_category : public _do_message {
public:
- virtual const char* name() const;
- virtual std::string message(int ev) const;
+ virtual const char* name() const LLVM_OVERRIDE;
+ virtual std::string message(int ev) const LLVM_OVERRIDE;
};
const char*
@@ -74,9 +74,9 @@ generic_category() {
class _system_error_category : public _do_message {
public:
- virtual const char* name() const;
- virtual std::string message(int ev) const;
- virtual error_condition default_error_condition(int ev) const;
+ virtual const char* name() const LLVM_OVERRIDE;
+ virtual std::string message(int ev) const LLVM_OVERRIDE;
+ virtual error_condition default_error_condition(int ev) const LLVM_OVERRIDE;
};
const char*
diff --git a/contrib/llvm/lib/TableGen/Error.cpp b/contrib/llvm/lib/TableGen/Error.cpp
index 1463b68..0bb86b0 100644
--- a/contrib/llvm/lib/TableGen/Error.cpp
+++ b/contrib/llvm/lib/TableGen/Error.cpp
@@ -16,12 +16,25 @@
#include "llvm/ADT/Twine.h"
#include "llvm/Support/raw_ostream.h"
+#include <cstdlib>
+
namespace llvm {
SourceMgr SrcMgr;
-void PrintWarning(SMLoc WarningLoc, const Twine &Msg) {
- SrcMgr.PrintMessage(WarningLoc, SourceMgr::DK_Warning, Msg);
+static void PrintMessage(ArrayRef<SMLoc> Loc, SourceMgr::DiagKind Kind,
+ const Twine &Msg) {
+ SMLoc NullLoc;
+ if (Loc.empty())
+ Loc = NullLoc;
+ SrcMgr.PrintMessage(Loc.front(), Kind, Msg);
+ for (unsigned i = 1; i < Loc.size(); ++i)
+ SrcMgr.PrintMessage(Loc[i], SourceMgr::DK_Note,
+ "instantiated from multiclass");
+}
+
+void PrintWarning(ArrayRef<SMLoc> WarningLoc, const Twine &Msg) {
+ PrintMessage(WarningLoc, SourceMgr::DK_Warning, Msg);
}
void PrintWarning(const char *Loc, const Twine &Msg) {
@@ -32,12 +45,8 @@ void PrintWarning(const Twine &Msg) {
errs() << "warning:" << Msg << "\n";
}
-void PrintWarning(const TGError &Warning) {
- PrintWarning(Warning.getLoc(), Warning.getMessage());
-}
-
-void PrintError(SMLoc ErrorLoc, const Twine &Msg) {
- SrcMgr.PrintMessage(ErrorLoc, SourceMgr::DK_Error, Msg);
+void PrintError(ArrayRef<SMLoc> ErrorLoc, const Twine &Msg) {
+ PrintMessage(ErrorLoc, SourceMgr::DK_Error, Msg);
}
void PrintError(const char *Loc, const Twine &Msg) {
@@ -48,8 +57,14 @@ void PrintError(const Twine &Msg) {
errs() << "error:" << Msg << "\n";
}
-void PrintError(const TGError &Error) {
- PrintError(Error.getLoc(), Error.getMessage());
+void PrintFatalError(const std::string &Msg) {
+ PrintError(Twine(Msg));
+ std::exit(1);
+}
+
+void PrintFatalError(ArrayRef<SMLoc> ErrorLoc, const std::string &Msg) {
+ PrintError(ErrorLoc, Msg);
+ std::exit(1);
}
} // end namespace llvm
diff --git a/contrib/llvm/lib/TableGen/Main.cpp b/contrib/llvm/lib/TableGen/Main.cpp
index 7aeef56..d0ca756 100644
--- a/contrib/llvm/lib/TableGen/Main.cpp
+++ b/contrib/llvm/lib/TableGen/Main.cpp
@@ -22,8 +22,8 @@
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/system_error.h"
#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Main.h"
#include "llvm/TableGen/Record.h"
-#include "llvm/TableGen/TableGenAction.h"
#include <algorithm>
#include <cstdio>
using namespace llvm;
@@ -47,79 +47,79 @@ namespace {
cl::value_desc("directory"), cl::Prefix);
}
+/// \brief Create a dependency file for `-d` option.
+///
+/// This functionality is really only for the benefit of the build system.
+/// It is similar to GCC's `-M*` family of options.
+static int createDependencyFile(const TGParser &Parser, const char *argv0) {
+ if (OutputFilename == "-") {
+ errs() << argv0 << ": the option -d must be used together with -o\n";
+ return 1;
+ }
+ std::string Error;
+ tool_output_file DepOut(DependFilename.c_str(), Error);
+ if (!Error.empty()) {
+ errs() << argv0 << ": error opening " << DependFilename
+ << ":" << Error << "\n";
+ return 1;
+ }
+ DepOut.os() << OutputFilename << ":";
+ const std::vector<std::string> &Dependencies = Parser.getDependencies();
+ for (std::vector<std::string>::const_iterator I = Dependencies.begin(),
+ E = Dependencies.end();
+ I != E; ++I) {
+ DepOut.os() << " " << (*I);
+ }
+ DepOut.os() << "\n";
+ DepOut.keep();
+ return 0;
+}
+
namespace llvm {
-int TableGenMain(char *argv0, TableGenAction &Action) {
+int TableGenMain(char *argv0, TableGenMainFn *MainFn) {
RecordKeeper Records;
- try {
- // Parse the input file.
- OwningPtr<MemoryBuffer> File;
- if (error_code ec =
- MemoryBuffer::getFileOrSTDIN(InputFilename.c_str(), File)) {
- errs() << "Could not open input file '" << InputFilename << "': "
- << ec.message() <<"\n";
- return 1;
- }
- MemoryBuffer *F = File.take();
-
- // Tell SrcMgr about this buffer, which is what TGParser will pick up.
- SrcMgr.AddNewSourceBuffer(F, SMLoc());
-
- // Record the location of the include directory so that the lexer can find
- // it later.
- SrcMgr.setIncludeDirs(IncludeDirs);
-
- TGParser Parser(SrcMgr, Records);
-
- if (Parser.ParseFile())
- return 1;
-
- std::string Error;
- tool_output_file Out(OutputFilename.c_str(), Error);
- if (!Error.empty()) {
- errs() << argv0 << ": error opening " << OutputFilename
- << ":" << Error << "\n";
- return 1;
- }
- if (!DependFilename.empty()) {
- if (OutputFilename == "-") {
- errs() << argv0 << ": the option -d must be used together with -o\n";
- return 1;
- }
- tool_output_file DepOut(DependFilename.c_str(), Error);
- if (!Error.empty()) {
- errs() << argv0 << ": error opening " << DependFilename
- << ":" << Error << "\n";
- return 1;
- }
- DepOut.os() << OutputFilename << ":";
- const std::vector<std::string> &Dependencies = Parser.getDependencies();
- for (std::vector<std::string>::const_iterator I = Dependencies.begin(),
- E = Dependencies.end();
- I != E; ++I) {
- DepOut.os() << " " << (*I);
- }
- DepOut.os() << "\n";
- DepOut.keep();
- }
-
- if (Action(Out.os(), Records))
- return 1;
-
- // Declare success.
- Out.keep();
- return 0;
-
- } catch (const TGError &Error) {
- PrintError(Error);
- } catch (const std::string &Error) {
- PrintError(Error);
- } catch (const char *Error) {
- PrintError(Error);
- } catch (...) {
- errs() << argv0 << ": Unknown unexpected exception occurred.\n";
+ // Parse the input file.
+ OwningPtr<MemoryBuffer> File;
+ if (error_code ec =
+ MemoryBuffer::getFileOrSTDIN(InputFilename.c_str(), File)) {
+ errs() << "Could not open input file '" << InputFilename << "': "
+ << ec.message() <<"\n";
+ return 1;
+ }
+ MemoryBuffer *F = File.take();
+
+ // Tell SrcMgr about this buffer, which is what TGParser will pick up.
+ SrcMgr.AddNewSourceBuffer(F, SMLoc());
+
+ // Record the location of the include directory so that the lexer can find
+ // it later.
+ SrcMgr.setIncludeDirs(IncludeDirs);
+
+ TGParser Parser(SrcMgr, Records);
+
+ if (Parser.ParseFile())
+ return 1;
+
+ std::string Error;
+ tool_output_file Out(OutputFilename.c_str(), Error);
+ if (!Error.empty()) {
+ errs() << argv0 << ": error opening " << OutputFilename
+ << ":" << Error << "\n";
+ return 1;
}
+ if (!DependFilename.empty()) {
+ if (int Ret = createDependencyFile(Parser, argv0))
+ return Ret;
+ }
+
+ if (MainFn(Out.os(), Records))
+ return 1;
+
+ // Declare success.
+ Out.keep();
+ return 0;
return 1;
}
diff --git a/contrib/llvm/lib/TableGen/Record.cpp b/contrib/llvm/lib/TableGen/Record.cpp
index 99fdc1f..11feb435 100644
--- a/contrib/llvm/lib/TableGen/Record.cpp
+++ b/contrib/llvm/lib/TableGen/Record.cpp
@@ -112,7 +112,8 @@ Init *BitRecTy::convertValue(IntInit *II) {
}
Init *BitRecTy::convertValue(TypedInit *VI) {
- if (dynamic_cast<BitRecTy*>(VI->getType()))
+ RecTy *Ty = VI->getType();
+ if (isa<BitRecTy>(Ty) || isa<BitsRecTy>(Ty) || isa<IntRecTy>(Ty))
return VI; // Accept variable if it is already of bit type!
return 0;
}
@@ -178,60 +179,15 @@ Init *BitsRecTy::convertValue(BitsInit *BI) {
}
Init *BitsRecTy::convertValue(TypedInit *VI) {
- if (BitsRecTy *BRT = dynamic_cast<BitsRecTy*>(VI->getType()))
- if (BRT->Size == Size) {
- SmallVector<Init *, 16> NewBits(Size);
-
- for (unsigned i = 0; i != Size; ++i)
- NewBits[i] = VarBitInit::get(VI, i);
- return BitsInit::get(NewBits);
- }
-
- if (Size == 1 && dynamic_cast<BitRecTy*>(VI->getType()))
+ if (Size == 1 && isa<BitRecTy>(VI->getType()))
return BitsInit::get(VI);
- if (TernOpInit *Tern = dynamic_cast<TernOpInit*>(VI)) {
- if (Tern->getOpcode() == TernOpInit::IF) {
- Init *LHS = Tern->getLHS();
- Init *MHS = Tern->getMHS();
- Init *RHS = Tern->getRHS();
-
- IntInit *MHSi = dynamic_cast<IntInit*>(MHS);
- IntInit *RHSi = dynamic_cast<IntInit*>(RHS);
-
- if (MHSi && RHSi) {
- int64_t MHSVal = MHSi->getValue();
- int64_t RHSVal = RHSi->getValue();
-
- if (canFitInBitfield(MHSVal, Size) && canFitInBitfield(RHSVal, Size)) {
- SmallVector<Init *, 16> NewBits(Size);
-
- for (unsigned i = 0; i != Size; ++i)
- NewBits[i] =
- TernOpInit::get(TernOpInit::IF, LHS,
- IntInit::get((MHSVal & (1LL << i)) ? 1 : 0),
- IntInit::get((RHSVal & (1LL << i)) ? 1 : 0),
- VI->getType());
-
- return BitsInit::get(NewBits);
- }
- } else {
- BitsInit *MHSbs = dynamic_cast<BitsInit*>(MHS);
- BitsInit *RHSbs = dynamic_cast<BitsInit*>(RHS);
-
- if (MHSbs && RHSbs) {
- SmallVector<Init *, 16> NewBits(Size);
-
- for (unsigned i = 0; i != Size; ++i)
- NewBits[i] = TernOpInit::get(TernOpInit::IF, LHS,
- MHSbs->getBit(i),
- RHSbs->getBit(i),
- VI->getType());
+ if (VI->getType()->typeIsConvertibleTo(this)) {
+ SmallVector<Init *, 16> NewBits(Size);
- return BitsInit::get(NewBits);
- }
- }
- }
+ for (unsigned i = 0; i != Size; ++i)
+ NewBits[i] = VarBitInit::get(VI, i);
+ return BitsInit::get(NewBits);
}
return 0;
@@ -244,7 +200,7 @@ Init *IntRecTy::convertValue(BitInit *BI) {
Init *IntRecTy::convertValue(BitsInit *BI) {
int64_t Result = 0;
for (unsigned i = 0, e = BI->getNumBits(); i != e; ++i)
- if (BitInit *Bit = dynamic_cast<BitInit*>(BI->getBit(i))) {
+ if (BitInit *Bit = dyn_cast<BitInit>(BI->getBit(i))) {
Result |= Bit->getValue() << i;
} else {
return 0;
@@ -285,7 +241,7 @@ Init *StringRecTy::convertValue(BinOpInit *BO) {
Init *StringRecTy::convertValue(TypedInit *TI) {
- if (dynamic_cast<StringRecTy*>(TI->getType()))
+ if (isa<StringRecTy>(TI->getType()))
return TI; // Accept variable if already of the right type!
return 0;
}
@@ -305,17 +261,15 @@ Init *ListRecTy::convertValue(ListInit *LI) {
else
return 0;
- ListRecTy *LType = dynamic_cast<ListRecTy*>(LI->getType());
- if (LType == 0) {
+ if (!isa<ListRecTy>(LI->getType()))
return 0;
- }
return ListInit::get(Elements, this);
}
Init *ListRecTy::convertValue(TypedInit *TI) {
// Ensure that TI is compatible with our class.
- if (ListRecTy *LRT = dynamic_cast<ListRecTy*>(TI->getType()))
+ if (ListRecTy *LRT = dyn_cast<ListRecTy>(TI->getType()))
if (LRT->getElementType()->typeIsConvertibleTo(getElementType()))
return TI;
return 0;
@@ -351,7 +305,7 @@ Init *DagRecTy::convertValue(BinOpInit *BO) {
}
RecordRecTy *RecordRecTy::get(Record *R) {
- return &dynamic_cast<RecordRecTy&>(*R->getDefInit()->getType());
+ return dyn_cast<RecordRecTy>(R->getDefInit()->getType());
}
std::string RecordRecTy::getAsString() const {
@@ -367,7 +321,7 @@ Init *RecordRecTy::convertValue(DefInit *DI) {
Init *RecordRecTy::convertValue(TypedInit *TI) {
// Ensure that TI is compatible with Rec.
- if (RecordRecTy *RRT = dynamic_cast<RecordRecTy*>(TI->getType()))
+ if (RecordRecTy *RRT = dyn_cast<RecordRecTy>(TI->getType()))
if (RRT->getRecord()->isSubClassOf(getRecord()) ||
RRT->getRecord() == getRecord())
return TI;
@@ -386,57 +340,53 @@ bool RecordRecTy::baseClassOf(const RecordRecTy *RHS) const {
return false;
}
-
/// resolveTypes - Find a common type that T1 and T2 convert to.
/// Return 0 if no such type exists.
///
RecTy *llvm::resolveTypes(RecTy *T1, RecTy *T2) {
- if (!T1->typeIsConvertibleTo(T2)) {
- if (!T2->typeIsConvertibleTo(T1)) {
- // If one is a Record type, check superclasses
- RecordRecTy *RecTy1 = dynamic_cast<RecordRecTy*>(T1);
- if (RecTy1) {
- // See if T2 inherits from a type T1 also inherits from
- const std::vector<Record *> &T1SuperClasses =
- RecTy1->getRecord()->getSuperClasses();
- for(std::vector<Record *>::const_iterator i = T1SuperClasses.begin(),
- iend = T1SuperClasses.end();
- i != iend;
- ++i) {
- RecordRecTy *SuperRecTy1 = RecordRecTy::get(*i);
- RecTy *NewType1 = resolveTypes(SuperRecTy1, T2);
- if (NewType1 != 0) {
- if (NewType1 != SuperRecTy1) {
- delete SuperRecTy1;
- }
- return NewType1;
- }
+ if (T1->typeIsConvertibleTo(T2))
+ return T2;
+ if (T2->typeIsConvertibleTo(T1))
+ return T1;
+
+ // If one is a Record type, check superclasses
+ if (RecordRecTy *RecTy1 = dyn_cast<RecordRecTy>(T1)) {
+ // See if T2 inherits from a type T1 also inherits from
+ const std::vector<Record *> &T1SuperClasses =
+ RecTy1->getRecord()->getSuperClasses();
+ for(std::vector<Record *>::const_iterator i = T1SuperClasses.begin(),
+ iend = T1SuperClasses.end();
+ i != iend;
+ ++i) {
+ RecordRecTy *SuperRecTy1 = RecordRecTy::get(*i);
+ RecTy *NewType1 = resolveTypes(SuperRecTy1, T2);
+ if (NewType1 != 0) {
+ if (NewType1 != SuperRecTy1) {
+ delete SuperRecTy1;
}
+ return NewType1;
}
- RecordRecTy *RecTy2 = dynamic_cast<RecordRecTy*>(T2);
- if (RecTy2) {
- // See if T1 inherits from a type T2 also inherits from
- const std::vector<Record *> &T2SuperClasses =
- RecTy2->getRecord()->getSuperClasses();
- for (std::vector<Record *>::const_iterator i = T2SuperClasses.begin(),
- iend = T2SuperClasses.end();
- i != iend;
- ++i) {
- RecordRecTy *SuperRecTy2 = RecordRecTy::get(*i);
- RecTy *NewType2 = resolveTypes(T1, SuperRecTy2);
- if (NewType2 != 0) {
- if (NewType2 != SuperRecTy2) {
- delete SuperRecTy2;
- }
- return NewType2;
- }
+ }
+ }
+ if (RecordRecTy *RecTy2 = dyn_cast<RecordRecTy>(T2)) {
+ // See if T1 inherits from a type T2 also inherits from
+ const std::vector<Record *> &T2SuperClasses =
+ RecTy2->getRecord()->getSuperClasses();
+ for (std::vector<Record *>::const_iterator i = T2SuperClasses.begin(),
+ iend = T2SuperClasses.end();
+ i != iend;
+ ++i) {
+ RecordRecTy *SuperRecTy2 = RecordRecTy::get(*i);
+ RecTy *NewType2 = resolveTypes(T1, SuperRecTy2);
+ if (NewType2 != 0) {
+ if (NewType2 != SuperRecTy2) {
+ delete SuperRecTy2;
}
+ return NewType2;
}
- return 0;
}
- return T2;
}
- return T1;
+ return 0;
}
@@ -519,6 +469,15 @@ std::string BitsInit::getAsString() const {
return Result + " }";
}
+// Fix bit initializer to preserve the behavior that bit reference from a unset
+// bits initializer will resolve into VarBitInit to keep the field name and bit
+// number used in targets with fixed insn length.
+static Init *fixBitInit(const RecordVal *RV, Init *Before, Init *After) {
+ if (RV || After != UnsetInit::get())
+ return After;
+ return Before;
+}
+
// resolveReferences - If there are any field references that refer to fields
// that have been filled in, we can propagate the values now.
//
@@ -526,16 +485,39 @@ Init *BitsInit::resolveReferences(Record &R, const RecordVal *RV) const {
bool Changed = false;
SmallVector<Init *, 16> NewBits(getNumBits());
- for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
- Init *B;
- Init *CurBit = getBit(i);
+ Init *CachedInit = 0;
+ Init *CachedBitVar = 0;
+ bool CachedBitVarChanged = false;
+
+ for (unsigned i = 0, e = getNumBits(); i != e; ++i) {
+ Init *CurBit = Bits[i];
+ Init *CurBitVar = CurBit->getBitVar();
- do {
- B = CurBit;
- CurBit = CurBit->resolveReferences(R, RV);
- Changed |= B != CurBit;
- } while (B != CurBit);
NewBits[i] = CurBit;
+
+ if (CurBitVar == CachedBitVar) {
+ if (CachedBitVarChanged) {
+ Init *Bit = CachedInit->getBit(CurBit->getBitNum());
+ NewBits[i] = fixBitInit(RV, CurBit, Bit);
+ }
+ continue;
+ }
+ CachedBitVar = CurBitVar;
+ CachedBitVarChanged = false;
+
+ Init *B;
+ do {
+ B = CurBitVar;
+ CurBitVar = CurBitVar->resolveReferences(R, RV);
+ CachedBitVarChanged |= B != CurBitVar;
+ Changed |= B != CurBitVar;
+ } while (B != CurBitVar);
+ CachedInit = CurBitVar;
+
+ if (CachedBitVarChanged) {
+ Init *Bit = CurBitVar->getBit(CurBit->getBitNum());
+ NewBits[i] = fixBitInit(RV, CurBit, Bit);
+ }
}
if (Changed)
@@ -613,7 +595,7 @@ ListInit *ListInit::get(ArrayRef<Init *> Range, RecTy *EltTy) {
}
void ListInit::Profile(FoldingSetNodeID &ID) const {
- ListRecTy *ListType = dynamic_cast<ListRecTy *>(getType());
+ ListRecTy *ListType = dyn_cast<ListRecTy>(getType());
assert(ListType && "Bad type for ListInit!");
RecTy *EltTy = ListType->getElementType();
@@ -633,8 +615,9 @@ ListInit::convertInitListSlice(const std::vector<unsigned> &Elements) const {
Record *ListInit::getElementAsRecord(unsigned i) const {
assert(i < Values.size() && "List element index out of range!");
- DefInit *DI = dynamic_cast<DefInit*>(Values[i]);
- if (DI == 0) throw "Expected record in list!";
+ DefInit *DI = dyn_cast<DefInit>(Values[i]);
+ if (DI == 0)
+ PrintFatalError("Expected record in list!");
return DI->getDef();
}
@@ -668,7 +651,7 @@ Init *ListInit::resolveListElementReference(Record &R, const RecordVal *IRV,
// If the element is set to some value, or if we are resolving a reference
// to a specific variable and that variable is explicitly unset, then
// replace the VarListElementInit with it.
- if (IRV || !dynamic_cast<UnsetInit*>(E))
+ if (IRV || !isa<UnsetInit>(E))
return E;
return 0;
}
@@ -682,30 +665,16 @@ std::string ListInit::getAsString() const {
return Result + "]";
}
-Init *OpInit::resolveBitReference(Record &R, const RecordVal *IRV,
- unsigned Bit) const {
- Init *Folded = Fold(&R, 0);
-
- if (Folded != this) {
- TypedInit *Typed = dynamic_cast<TypedInit *>(Folded);
- if (Typed) {
- return Typed->resolveBitReference(R, IRV, Bit);
- }
- }
-
- return 0;
-}
-
Init *OpInit::resolveListElementReference(Record &R, const RecordVal *IRV,
unsigned Elt) const {
Init *Resolved = resolveReferences(R, IRV);
- OpInit *OResolved = dynamic_cast<OpInit *>(Resolved);
+ OpInit *OResolved = dyn_cast<OpInit>(Resolved);
if (OResolved) {
Resolved = OResolved->Fold(&R, 0);
}
if (Resolved != this) {
- TypedInit *Typed = dynamic_cast<TypedInit *>(Resolved);
+ TypedInit *Typed = dyn_cast<TypedInit>(Resolved);
assert(Typed && "Expected typed init for list reference");
if (Typed) {
Init *New = Typed->resolveListElementReference(R, IRV, Elt);
@@ -718,6 +687,12 @@ Init *OpInit::resolveListElementReference(Record &R, const RecordVal *IRV,
return 0;
}
+Init *OpInit::getBit(unsigned Bit) const {
+ if (getType() == BitRecTy::get())
+ return const_cast<OpInit*>(this);
+ return VarBitInit::get(const_cast<OpInit*>(this), Bit);
+}
+
UnOpInit *UnOpInit::get(UnaryOp opc, Init *lhs, RecTy *Type) {
typedef std::pair<std::pair<unsigned, Init *>, RecTy *> Key;
@@ -735,30 +710,23 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
switch (getOpcode()) {
case CAST: {
if (getType()->getAsString() == "string") {
- StringInit *LHSs = dynamic_cast<StringInit*>(LHS);
- if (LHSs) {
+ if (StringInit *LHSs = dyn_cast<StringInit>(LHS))
return LHSs;
- }
- DefInit *LHSd = dynamic_cast<DefInit*>(LHS);
- if (LHSd) {
+ if (DefInit *LHSd = dyn_cast<DefInit>(LHS))
return StringInit::get(LHSd->getDef()->getName());
- }
- IntInit *LHSi = dynamic_cast<IntInit*>(LHS);
- if (LHSi) {
+ if (IntInit *LHSi = dyn_cast<IntInit>(LHS))
return StringInit::get(LHSi->getAsString());
- }
} else {
- StringInit *LHSs = dynamic_cast<StringInit*>(LHS);
- if (LHSs) {
+ if (StringInit *LHSs = dyn_cast<StringInit>(LHS)) {
std::string Name = LHSs->getValue();
// From TGParser::ParseIDValue
if (CurRec) {
if (const RecordVal *RV = CurRec->getValue(Name)) {
if (RV->getType() != getType())
- throw "type mismatch in cast";
+ PrintFatalError("type mismatch in cast");
return VarInit::get(Name, RV->getType());
}
@@ -770,7 +738,7 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
assert(RV && "Template arg doesn't exist??");
if (RV->getType() != getType())
- throw "type mismatch in cast";
+ PrintFatalError("type mismatch in cast");
return VarInit::get(TemplateArgName, RV->getType());
}
@@ -784,7 +752,7 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
assert(RV && "Template arg doesn't exist??");
if (RV->getType() != getType())
- throw "type mismatch in cast";
+ PrintFatalError("type mismatch in cast");
return VarInit::get(MCName, RV->getType());
}
@@ -793,14 +761,14 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
if (Record *D = (CurRec->getRecords()).getDef(Name))
return DefInit::get(D);
- throw TGError(CurRec->getLoc(), "Undefined reference:'" + Name + "'\n");
+ PrintFatalError(CurRec->getLoc(),
+ "Undefined reference:'" + Name + "'\n");
}
}
break;
}
case HEAD: {
- ListInit *LHSl = dynamic_cast<ListInit*>(LHS);
- if (LHSl) {
+ if (ListInit *LHSl = dyn_cast<ListInit>(LHS)) {
if (LHSl->getSize() == 0) {
assert(0 && "Empty list in car");
return 0;
@@ -810,8 +778,7 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
break;
}
case TAIL: {
- ListInit *LHSl = dynamic_cast<ListInit*>(LHS);
- if (LHSl) {
+ if (ListInit *LHSl = dyn_cast<ListInit>(LHS)) {
if (LHSl->getSize() == 0) {
assert(0 && "Empty list in cdr");
return 0;
@@ -828,16 +795,14 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
break;
}
case EMPTY: {
- ListInit *LHSl = dynamic_cast<ListInit*>(LHS);
- if (LHSl) {
+ if (ListInit *LHSl = dyn_cast<ListInit>(LHS)) {
if (LHSl->getSize() == 0) {
return IntInit::get(1);
} else {
return IntInit::get(0);
}
}
- StringInit *LHSs = dynamic_cast<StringInit*>(LHS);
- if (LHSs) {
+ if (StringInit *LHSs = dyn_cast<StringInit>(LHS)) {
if (LHSs->getValue().empty()) {
return IntInit::get(1);
} else {
@@ -891,13 +856,13 @@ BinOpInit *BinOpInit::get(BinaryOp opc, Init *lhs,
Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
switch (getOpcode()) {
case CONCAT: {
- DagInit *LHSs = dynamic_cast<DagInit*>(LHS);
- DagInit *RHSs = dynamic_cast<DagInit*>(RHS);
+ DagInit *LHSs = dyn_cast<DagInit>(LHS);
+ DagInit *RHSs = dyn_cast<DagInit>(RHS);
if (LHSs && RHSs) {
- DefInit *LOp = dynamic_cast<DefInit*>(LHSs->getOperator());
- DefInit *ROp = dynamic_cast<DefInit*>(RHSs->getOperator());
+ DefInit *LOp = dyn_cast<DefInit>(LHSs->getOperator());
+ DefInit *ROp = dyn_cast<DefInit>(RHSs->getOperator());
if (LOp == 0 || ROp == 0 || LOp->getDef() != ROp->getDef())
- throw "Concated Dag operators do not match!";
+ PrintFatalError("Concated Dag operators do not match!");
std::vector<Init*> Args;
std::vector<std::string> ArgNames;
for (unsigned i = 0, e = LHSs->getNumArgs(); i != e; ++i) {
@@ -913,8 +878,8 @@ Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
break;
}
case STRCONCAT: {
- StringInit *LHSs = dynamic_cast<StringInit*>(LHS);
- StringInit *RHSs = dynamic_cast<StringInit*>(RHS);
+ StringInit *LHSs = dyn_cast<StringInit>(LHS);
+ StringInit *RHSs = dyn_cast<StringInit>(RHS);
if (LHSs && RHSs)
return StringInit::get(LHSs->getValue() + RHSs->getValue());
break;
@@ -922,16 +887,16 @@ Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
case EQ: {
// try to fold eq comparison for 'bit' and 'int', otherwise fallback
// to string objects.
- IntInit* L =
- dynamic_cast<IntInit*>(LHS->convertInitializerTo(IntRecTy::get()));
- IntInit* R =
- dynamic_cast<IntInit*>(RHS->convertInitializerTo(IntRecTy::get()));
+ IntInit *L =
+ dyn_cast_or_null<IntInit>(LHS->convertInitializerTo(IntRecTy::get()));
+ IntInit *R =
+ dyn_cast_or_null<IntInit>(RHS->convertInitializerTo(IntRecTy::get()));
if (L && R)
return IntInit::get(L->getValue() == R->getValue());
- StringInit *LHSs = dynamic_cast<StringInit*>(LHS);
- StringInit *RHSs = dynamic_cast<StringInit*>(RHS);
+ StringInit *LHSs = dyn_cast<StringInit>(LHS);
+ StringInit *RHSs = dyn_cast<StringInit>(RHS);
// Make sure we've resolved
if (LHSs && RHSs)
@@ -942,8 +907,8 @@ Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
case SHL:
case SRA:
case SRL: {
- IntInit *LHSi = dynamic_cast<IntInit*>(LHS);
- IntInit *RHSi = dynamic_cast<IntInit*>(RHS);
+ IntInit *LHSi = dyn_cast<IntInit>(LHS);
+ IntInit *RHSi = dyn_cast<IntInit>(RHS);
if (LHSi && RHSi) {
int64_t LHSv = LHSi->getValue(), RHSv = RHSi->getValue();
int64_t Result;
@@ -1016,7 +981,7 @@ static Init *EvaluateOperation(OpInit *RHSo, Init *LHS, Init *Arg,
MultiClass *CurMultiClass) {
std::vector<Init *> NewOperands;
- TypedInit *TArg = dynamic_cast<TypedInit*>(Arg);
+ TypedInit *TArg = dyn_cast<TypedInit>(Arg);
// If this is a dag, recurse
if (TArg && TArg->getType()->getAsString() == "dag") {
@@ -1030,7 +995,7 @@ static Init *EvaluateOperation(OpInit *RHSo, Init *LHS, Init *Arg,
}
for (int i = 0; i < RHSo->getNumOperands(); ++i) {
- OpInit *RHSoo = dynamic_cast<OpInit*>(RHSo->getOperand(i));
+ OpInit *RHSoo = dyn_cast<OpInit>(RHSo->getOperand(i));
if (RHSoo) {
Init *Result = EvaluateOperation(RHSoo, LHS, Arg,
@@ -1058,25 +1023,21 @@ static Init *EvaluateOperation(OpInit *RHSo, Init *LHS, Init *Arg,
static Init *ForeachHelper(Init *LHS, Init *MHS, Init *RHS, RecTy *Type,
Record *CurRec, MultiClass *CurMultiClass) {
- DagInit *MHSd = dynamic_cast<DagInit*>(MHS);
- ListInit *MHSl = dynamic_cast<ListInit*>(MHS);
+ DagInit *MHSd = dyn_cast<DagInit>(MHS);
+ ListInit *MHSl = dyn_cast<ListInit>(MHS);
- DagRecTy *DagType = dynamic_cast<DagRecTy*>(Type);
- ListRecTy *ListType = dynamic_cast<ListRecTy*>(Type);
-
- OpInit *RHSo = dynamic_cast<OpInit*>(RHS);
+ OpInit *RHSo = dyn_cast<OpInit>(RHS);
if (!RHSo) {
- throw TGError(CurRec->getLoc(), "!foreach requires an operator\n");
+ PrintFatalError(CurRec->getLoc(), "!foreach requires an operator\n");
}
- TypedInit *LHSt = dynamic_cast<TypedInit*>(LHS);
+ TypedInit *LHSt = dyn_cast<TypedInit>(LHS);
- if (!LHSt) {
- throw TGError(CurRec->getLoc(), "!foreach requires typed variable\n");
- }
+ if (!LHSt)
+ PrintFatalError(CurRec->getLoc(), "!foreach requires typed variable\n");
- if ((MHSd && DagType) || (MHSl && ListType)) {
+ if ((MHSd && isa<DagRecTy>(Type)) || (MHSl && isa<ListRecTy>(Type))) {
if (MHSd) {
Init *Val = MHSd->getOperator();
Init *Result = EvaluateOperation(RHSo, LHS, Val,
@@ -1139,17 +1100,17 @@ static Init *ForeachHelper(Init *LHS, Init *MHS, Init *RHS, RecTy *Type,
Init *TernOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
switch (getOpcode()) {
case SUBST: {
- DefInit *LHSd = dynamic_cast<DefInit*>(LHS);
- VarInit *LHSv = dynamic_cast<VarInit*>(LHS);
- StringInit *LHSs = dynamic_cast<StringInit*>(LHS);
+ DefInit *LHSd = dyn_cast<DefInit>(LHS);
+ VarInit *LHSv = dyn_cast<VarInit>(LHS);
+ StringInit *LHSs = dyn_cast<StringInit>(LHS);
- DefInit *MHSd = dynamic_cast<DefInit*>(MHS);
- VarInit *MHSv = dynamic_cast<VarInit*>(MHS);
- StringInit *MHSs = dynamic_cast<StringInit*>(MHS);
+ DefInit *MHSd = dyn_cast<DefInit>(MHS);
+ VarInit *MHSv = dyn_cast<VarInit>(MHS);
+ StringInit *MHSs = dyn_cast<StringInit>(MHS);
- DefInit *RHSd = dynamic_cast<DefInit*>(RHS);
- VarInit *RHSv = dynamic_cast<VarInit*>(RHS);
- StringInit *RHSs = dynamic_cast<StringInit*>(RHS);
+ DefInit *RHSd = dyn_cast<DefInit>(RHS);
+ VarInit *RHSv = dyn_cast<VarInit>(RHS);
+ StringInit *RHSs = dyn_cast<StringInit>(RHS);
if ((LHSd && MHSd && RHSd)
|| (LHSv && MHSv && RHSv)
@@ -1197,9 +1158,9 @@ Init *TernOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
}
case IF: {
- IntInit *LHSi = dynamic_cast<IntInit*>(LHS);
+ IntInit *LHSi = dyn_cast<IntInit>(LHS);
if (Init *I = LHS->convertInitializerTo(IntRecTy::get()))
- LHSi = dynamic_cast<IntInit*>(I);
+ LHSi = dyn_cast<IntInit>(I);
if (LHSi) {
if (LHSi->getValue()) {
return MHS;
@@ -1219,9 +1180,9 @@ Init *TernOpInit::resolveReferences(Record &R,
Init *lhs = LHS->resolveReferences(R, RV);
if (Opc == IF && lhs != LHS) {
- IntInit *Value = dynamic_cast<IntInit*>(lhs);
+ IntInit *Value = dyn_cast<IntInit>(lhs);
if (Init *I = lhs->convertInitializerTo(IntRecTy::get()))
- Value = dynamic_cast<IntInit*>(I);
+ Value = dyn_cast<IntInit>(I);
if (Value != 0) {
// Short-circuit
if (Value->getValue()) {
@@ -1257,19 +1218,15 @@ std::string TernOpInit::getAsString() const {
}
RecTy *TypedInit::getFieldType(const std::string &FieldName) const {
- RecordRecTy *RecordType = dynamic_cast<RecordRecTy *>(getType());
- if (RecordType) {
- RecordVal *Field = RecordType->getRecord()->getValue(FieldName);
- if (Field) {
+ if (RecordRecTy *RecordType = dyn_cast<RecordRecTy>(getType()))
+ if (RecordVal *Field = RecordType->getRecord()->getValue(FieldName))
return Field->getType();
- }
- }
return 0;
}
Init *
TypedInit::convertInitializerBitRange(const std::vector<unsigned> &Bits) const {
- BitsRecTy *T = dynamic_cast<BitsRecTy*>(getType());
+ BitsRecTy *T = dyn_cast<BitsRecTy>(getType());
if (T == 0) return 0; // Cannot subscript a non-bits variable.
unsigned NumBits = T->getNumBits();
@@ -1285,7 +1242,7 @@ TypedInit::convertInitializerBitRange(const std::vector<unsigned> &Bits) const {
Init *
TypedInit::convertInitListSlice(const std::vector<unsigned> &Elements) const {
- ListRecTy *T = dynamic_cast<ListRecTy*>(getType());
+ ListRecTy *T = dyn_cast<ListRecTy>(getType());
if (T == 0) return 0; // Cannot subscript a non-list variable.
if (Elements.size() == 1)
@@ -1318,31 +1275,15 @@ VarInit *VarInit::get(Init *VN, RecTy *T) {
}
const std::string &VarInit::getName() const {
- StringInit *NameString =
- dynamic_cast<StringInit *>(getNameInit());
+ StringInit *NameString = dyn_cast<StringInit>(getNameInit());
assert(NameString && "VarInit name is not a string!");
return NameString->getValue();
}
-Init *VarInit::resolveBitReference(Record &R, const RecordVal *IRV,
- unsigned Bit) const {
- if (R.isTemplateArg(getNameInit())) return 0;
- if (IRV && IRV->getNameInit() != getNameInit()) return 0;
-
- RecordVal *RV = R.getValue(getNameInit());
- assert(RV && "Reference to a non-existent variable?");
- assert(dynamic_cast<BitsInit*>(RV->getValue()));
- BitsInit *BI = (BitsInit*)RV->getValue();
-
- assert(Bit < BI->getNumBits() && "Bit reference out of range!");
- Init *B = BI->getBit(Bit);
-
- // If the bit is set to some value, or if we are resolving a reference to a
- // specific variable and that variable is explicitly unset, then replace the
- // VarBitInit with it.
- if (IRV || !dynamic_cast<UnsetInit*>(B))
- return B;
- return 0;
+Init *VarInit::getBit(unsigned Bit) const {
+ if (getType() == BitRecTy::get())
+ return const_cast<VarInit*>(this);
+ return VarBitInit::get(const_cast<VarInit*>(this), Bit);
}
Init *VarInit::resolveListElementReference(Record &R,
@@ -1353,9 +1294,9 @@ Init *VarInit::resolveListElementReference(Record &R,
RecordVal *RV = R.getValue(getNameInit());
assert(RV && "Reference to a non-existent variable?");
- ListInit *LI = dynamic_cast<ListInit*>(RV->getValue());
+ ListInit *LI = dyn_cast<ListInit>(RV->getValue());
if (!LI) {
- TypedInit *VI = dynamic_cast<TypedInit*>(RV->getValue());
+ TypedInit *VI = dyn_cast<TypedInit>(RV->getValue());
assert(VI && "Invalid list element!");
return VarListElementInit::get(VI, Elt);
}
@@ -1366,14 +1307,14 @@ Init *VarInit::resolveListElementReference(Record &R,
// If the element is set to some value, or if we are resolving a reference
// to a specific variable and that variable is explicitly unset, then
// replace the VarListElementInit with it.
- if (IRV || !dynamic_cast<UnsetInit*>(E))
+ if (IRV || !isa<UnsetInit>(E))
return E;
return 0;
}
RecTy *VarInit::getFieldType(const std::string &FieldName) const {
- if (RecordRecTy *RTy = dynamic_cast<RecordRecTy*>(getType()))
+ if (RecordRecTy *RTy = dyn_cast<RecordRecTy>(getType()))
if (const RecordVal *RV = RTy->getRecord()->getValue(FieldName))
return RV->getType();
return 0;
@@ -1381,9 +1322,9 @@ RecTy *VarInit::getFieldType(const std::string &FieldName) const {
Init *VarInit::getFieldInit(Record &R, const RecordVal *RV,
const std::string &FieldName) const {
- if (dynamic_cast<RecordRecTy*>(getType()))
+ if (isa<RecordRecTy>(getType()))
if (const RecordVal *Val = R.getValue(VarName)) {
- if (RV != Val && (RV || dynamic_cast<UnsetInit*>(Val->getValue())))
+ if (RV != Val && (RV || isa<UnsetInit>(Val->getValue())))
return 0;
Init *TheInit = Val->getValue();
assert(TheInit != this && "Infinite loop detected!");
@@ -1402,7 +1343,7 @@ Init *VarInit::getFieldInit(Record &R, const RecordVal *RV,
///
Init *VarInit::resolveReferences(Record &R, const RecordVal *RV) const {
if (RecordVal *Val = R.getValue(VarName))
- if (RV == Val || (RV == 0 && !dynamic_cast<UnsetInit*>(Val->getValue())))
+ if (RV == Val || (RV == 0 && !isa<UnsetInit>(Val->getValue())))
return Val->getValue();
return const_cast<VarInit *>(this);
}
@@ -1425,9 +1366,11 @@ std::string VarBitInit::getAsString() const {
}
Init *VarBitInit::resolveReferences(Record &R, const RecordVal *RV) const {
- if (Init *I = getVariable()->resolveBitReference(R, RV, getBitNum()))
- return I;
- return const_cast<VarBitInit *>(this);
+ Init *I = TI->resolveReferences(R, RV);
+ if (TI != I)
+ return I->getBit(getBitNum());
+
+ return const_cast<VarBitInit*>(this);
}
VarListElementInit *VarListElementInit::get(TypedInit *T,
@@ -1456,11 +1399,10 @@ VarListElementInit::resolveReferences(Record &R, const RecordVal *RV) const {
return const_cast<VarListElementInit *>(this);
}
-Init *VarListElementInit::resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const {
- // FIXME: This should be implemented, to support references like:
- // bit B = AA[0]{1};
- return 0;
+Init *VarListElementInit::getBit(unsigned Bit) const {
+ if (getType() == BitRecTy::get())
+ return const_cast<VarListElementInit*>(this);
+ return VarBitInit::get(const_cast<VarListElementInit*>(this), Bit);
}
Init *VarListElementInit:: resolveListElementReference(Record &R,
@@ -1469,8 +1411,7 @@ Init *VarListElementInit:: resolveListElementReference(Record &R,
Init *Result = TI->resolveListElementReference(R, RV, Element);
if (Result) {
- TypedInit *TInit = dynamic_cast<TypedInit *>(Result);
- if (TInit) {
+ if (TypedInit *TInit = dyn_cast<TypedInit>(Result)) {
Init *Result2 = TInit->resolveListElementReference(R, RV, Elt);
if (Result2) return Result2;
return new VarListElementInit(TInit, Elt);
@@ -1513,30 +1454,23 @@ FieldInit *FieldInit::get(Init *R, const std::string &FN) {
return I;
}
-Init *FieldInit::resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const {
- if (Init *BitsVal = Rec->getFieldInit(R, RV, FieldName))
- if (BitsInit *BI = dynamic_cast<BitsInit*>(BitsVal)) {
- assert(Bit < BI->getNumBits() && "Bit reference out of range!");
- Init *B = BI->getBit(Bit);
-
- if (dynamic_cast<BitInit*>(B)) // If the bit is set.
- return B; // Replace the VarBitInit with it.
- }
- return 0;
+Init *FieldInit::getBit(unsigned Bit) const {
+ if (getType() == BitRecTy::get())
+ return const_cast<FieldInit*>(this);
+ return VarBitInit::get(const_cast<FieldInit*>(this), Bit);
}
Init *FieldInit::resolveListElementReference(Record &R, const RecordVal *RV,
unsigned Elt) const {
if (Init *ListVal = Rec->getFieldInit(R, RV, FieldName))
- if (ListInit *LI = dynamic_cast<ListInit*>(ListVal)) {
+ if (ListInit *LI = dyn_cast<ListInit>(ListVal)) {
if (Elt >= LI->getSize()) return 0;
Init *E = LI->getElement(Elt);
// If the element is set to some value, or if we are resolving a
// reference to a specific variable and that variable is explicitly
// unset, then replace the VarListElementInit with it.
- if (RV || !dynamic_cast<UnsetInit*>(E))
+ if (RV || !isa<UnsetInit>(E))
return E;
}
return 0;
@@ -1665,7 +1599,7 @@ RecordVal::RecordVal(const std::string &N, RecTy *T, unsigned P)
}
const std::string &RecordVal::getName() const {
- StringInit *NameString = dynamic_cast<StringInit *>(Name);
+ StringInit *NameString = dyn_cast<StringInit>(Name);
assert(NameString && "RecordVal name is not a string!");
return NameString->getValue();
}
@@ -1695,12 +1629,11 @@ void Record::init() {
void Record::checkName() {
// Ensure the record name has string type.
- const TypedInit *TypedName = dynamic_cast<const TypedInit *>(Name);
+ const TypedInit *TypedName = dyn_cast<const TypedInit>(Name);
assert(TypedName && "Record name is not typed!");
RecTy *Type = TypedName->getType();
- if (dynamic_cast<StringRecTy *>(Type) == 0) {
- throw TGError(getLoc(), "Record name is not a string!");
- }
+ if (!isa<StringRecTy>(Type))
+ PrintFatalError(getLoc(), "Record name is not a string!");
}
DefInit *Record::getDefInit() {
@@ -1710,8 +1643,7 @@ DefInit *Record::getDefInit() {
}
const std::string &Record::getName() const {
- const StringInit *NameString =
- dynamic_cast<const StringInit *>(Name);
+ const StringInit *NameString = dyn_cast<StringInit>(Name);
assert(NameString && "Record name is not a string!");
return NameString->getValue();
}
@@ -1751,7 +1683,15 @@ void Record::resolveReferencesTo(const RecordVal *RV) {
if (RV == &Values[i]) // Skip resolve the same field as the given one
continue;
if (Init *V = Values[i].getValue())
- Values[i].setValue(V->resolveReferences(*this, RV));
+ if (Values[i].setValue(V->resolveReferences(*this, RV)))
+ PrintFatalError(getLoc(), "Invalid value is found when setting '"
+ + Values[i].getNameInitAsString()
+ + "' after resolving references"
+ + (RV ? " against '" + RV->getNameInitAsString()
+ + "' of ("
+ + RV->getValue()->getAsUnquotedString() + ")"
+ : "")
+ + "\n");
}
Init *OldName = getNameInit();
Init *NewName = Name->resolveReferences(*this, RV);
@@ -1799,184 +1739,201 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const Record &R) {
}
/// getValueInit - Return the initializer for a value with the specified name,
-/// or throw an exception if the field does not exist.
+/// or abort if the field does not exist.
///
Init *Record::getValueInit(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
if (R == 0 || R->getValue() == 0)
- throw "Record `" + getName() + "' does not have a field named `" +
- FieldName.str() + "'!\n";
+ PrintFatalError(getLoc(), "Record `" + getName() +
+ "' does not have a field named `" + FieldName.str() + "'!\n");
return R->getValue();
}
/// getValueAsString - This method looks up the specified field and returns its
-/// value as a string, throwing an exception if the field does not exist or if
+/// value as a string, aborts if the field does not exist or if
/// the value is not a string.
///
std::string Record::getValueAsString(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
if (R == 0 || R->getValue() == 0)
- throw "Record `" + getName() + "' does not have a field named `" +
- FieldName.str() + "'!\n";
+ PrintFatalError(getLoc(), "Record `" + getName() +
+ "' does not have a field named `" + FieldName.str() + "'!\n");
- if (StringInit *SI = dynamic_cast<StringInit*>(R->getValue()))
+ if (StringInit *SI = dyn_cast<StringInit>(R->getValue()))
return SI->getValue();
- throw "Record `" + getName() + "', field `" + FieldName.str() +
- "' does not have a string initializer!";
+ PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
+ FieldName.str() + "' does not have a string initializer!");
}
/// getValueAsBitsInit - This method looks up the specified field and returns
-/// its value as a BitsInit, throwing an exception if the field does not exist
-/// or if the value is not the right type.
+/// its value as a BitsInit, aborts if the field does not exist or if
+/// the value is not the right type.
///
BitsInit *Record::getValueAsBitsInit(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
if (R == 0 || R->getValue() == 0)
- throw "Record `" + getName() + "' does not have a field named `" +
- FieldName.str() + "'!\n";
+ PrintFatalError(getLoc(), "Record `" + getName() +
+ "' does not have a field named `" + FieldName.str() + "'!\n");
- if (BitsInit *BI = dynamic_cast<BitsInit*>(R->getValue()))
+ if (BitsInit *BI = dyn_cast<BitsInit>(R->getValue()))
return BI;
- throw "Record `" + getName() + "', field `" + FieldName.str() +
- "' does not have a BitsInit initializer!";
+ PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
+ FieldName.str() + "' does not have a BitsInit initializer!");
}
/// getValueAsListInit - This method looks up the specified field and returns
-/// its value as a ListInit, throwing an exception if the field does not exist
-/// or if the value is not the right type.
+/// its value as a ListInit, aborting if the field does not exist or if
+/// the value is not the right type.
///
ListInit *Record::getValueAsListInit(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
if (R == 0 || R->getValue() == 0)
- throw "Record `" + getName() + "' does not have a field named `" +
- FieldName.str() + "'!\n";
+ PrintFatalError(getLoc(), "Record `" + getName() +
+ "' does not have a field named `" + FieldName.str() + "'!\n");
- if (ListInit *LI = dynamic_cast<ListInit*>(R->getValue()))
+ if (ListInit *LI = dyn_cast<ListInit>(R->getValue()))
return LI;
- throw "Record `" + getName() + "', field `" + FieldName.str() +
- "' does not have a list initializer!";
+ PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
+ FieldName.str() + "' does not have a list initializer!");
}
/// getValueAsListOfDefs - This method looks up the specified field and returns
-/// its value as a vector of records, throwing an exception if the field does
-/// not exist or if the value is not the right type.
+/// its value as a vector of records, aborting if the field does not exist
+/// or if the value is not the right type.
///
std::vector<Record*>
Record::getValueAsListOfDefs(StringRef FieldName) const {
ListInit *List = getValueAsListInit(FieldName);
std::vector<Record*> Defs;
for (unsigned i = 0; i < List->getSize(); i++) {
- if (DefInit *DI = dynamic_cast<DefInit*>(List->getElement(i))) {
+ if (DefInit *DI = dyn_cast<DefInit>(List->getElement(i))) {
Defs.push_back(DI->getDef());
} else {
- throw "Record `" + getName() + "', field `" + FieldName.str() +
- "' list is not entirely DefInit!";
+ PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
+ FieldName.str() + "' list is not entirely DefInit!");
}
}
return Defs;
}
/// getValueAsInt - This method looks up the specified field and returns its
-/// value as an int64_t, throwing an exception if the field does not exist or if
-/// the value is not the right type.
+/// value as an int64_t, aborting if the field does not exist or if the value
+/// is not the right type.
///
int64_t Record::getValueAsInt(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
if (R == 0 || R->getValue() == 0)
- throw "Record `" + getName() + "' does not have a field named `" +
- FieldName.str() + "'!\n";
+ PrintFatalError(getLoc(), "Record `" + getName() +
+ "' does not have a field named `" + FieldName.str() + "'!\n");
- if (IntInit *II = dynamic_cast<IntInit*>(R->getValue()))
+ if (IntInit *II = dyn_cast<IntInit>(R->getValue()))
return II->getValue();
- throw "Record `" + getName() + "', field `" + FieldName.str() +
- "' does not have an int initializer!";
+ PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
+ FieldName.str() + "' does not have an int initializer!");
}
/// getValueAsListOfInts - This method looks up the specified field and returns
-/// its value as a vector of integers, throwing an exception if the field does
-/// not exist or if the value is not the right type.
+/// its value as a vector of integers, aborting if the field does not exist or
+/// if the value is not the right type.
///
std::vector<int64_t>
Record::getValueAsListOfInts(StringRef FieldName) const {
ListInit *List = getValueAsListInit(FieldName);
std::vector<int64_t> Ints;
for (unsigned i = 0; i < List->getSize(); i++) {
- if (IntInit *II = dynamic_cast<IntInit*>(List->getElement(i))) {
+ if (IntInit *II = dyn_cast<IntInit>(List->getElement(i))) {
Ints.push_back(II->getValue());
} else {
- throw "Record `" + getName() + "', field `" + FieldName.str() +
- "' does not have a list of ints initializer!";
+ PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
+ FieldName.str() + "' does not have a list of ints initializer!");
}
}
return Ints;
}
/// getValueAsListOfStrings - This method looks up the specified field and
-/// returns its value as a vector of strings, throwing an exception if the
-/// field does not exist or if the value is not the right type.
+/// returns its value as a vector of strings, aborting if the field does not
+/// exist or if the value is not the right type.
///
std::vector<std::string>
Record::getValueAsListOfStrings(StringRef FieldName) const {
ListInit *List = getValueAsListInit(FieldName);
std::vector<std::string> Strings;
for (unsigned i = 0; i < List->getSize(); i++) {
- if (StringInit *II = dynamic_cast<StringInit*>(List->getElement(i))) {
+ if (StringInit *II = dyn_cast<StringInit>(List->getElement(i))) {
Strings.push_back(II->getValue());
} else {
- throw "Record `" + getName() + "', field `" + FieldName.str() +
- "' does not have a list of strings initializer!";
+ PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
+ FieldName.str() + "' does not have a list of strings initializer!");
}
}
return Strings;
}
/// getValueAsDef - This method looks up the specified field and returns its
-/// value as a Record, throwing an exception if the field does not exist or if
-/// the value is not the right type.
+/// value as a Record, aborting if the field does not exist or if the value
+/// is not the right type.
///
Record *Record::getValueAsDef(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
if (R == 0 || R->getValue() == 0)
- throw "Record `" + getName() + "' does not have a field named `" +
- FieldName.str() + "'!\n";
+ PrintFatalError(getLoc(), "Record `" + getName() +
+ "' does not have a field named `" + FieldName.str() + "'!\n");
- if (DefInit *DI = dynamic_cast<DefInit*>(R->getValue()))
+ if (DefInit *DI = dyn_cast<DefInit>(R->getValue()))
return DI->getDef();
- throw "Record `" + getName() + "', field `" + FieldName.str() +
- "' does not have a def initializer!";
+ PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
+ FieldName.str() + "' does not have a def initializer!");
}
/// getValueAsBit - This method looks up the specified field and returns its
-/// value as a bit, throwing an exception if the field does not exist or if
-/// the value is not the right type.
+/// value as a bit, aborting if the field does not exist or if the value is
+/// not the right type.
///
bool Record::getValueAsBit(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
if (R == 0 || R->getValue() == 0)
- throw "Record `" + getName() + "' does not have a field named `" +
- FieldName.str() + "'!\n";
+ PrintFatalError(getLoc(), "Record `" + getName() +
+ "' does not have a field named `" + FieldName.str() + "'!\n");
- if (BitInit *BI = dynamic_cast<BitInit*>(R->getValue()))
+ if (BitInit *BI = dyn_cast<BitInit>(R->getValue()))
return BI->getValue();
- throw "Record `" + getName() + "', field `" + FieldName.str() +
- "' does not have a bit initializer!";
+ PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
+ FieldName.str() + "' does not have a bit initializer!");
+}
+
+bool Record::getValueAsBitOrUnset(StringRef FieldName, bool &Unset) const {
+ const RecordVal *R = getValue(FieldName);
+ if (R == 0 || R->getValue() == 0)
+ PrintFatalError(getLoc(), "Record `" + getName() +
+ "' does not have a field named `" + FieldName.str() + "'!\n");
+
+ if (R->getValue() == UnsetInit::get()) {
+ Unset = true;
+ return false;
+ }
+ Unset = false;
+ if (BitInit *BI = dyn_cast<BitInit>(R->getValue()))
+ return BI->getValue();
+ PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
+ FieldName.str() + "' does not have a bit initializer!");
}
/// getValueAsDag - This method looks up the specified field and returns its
-/// value as an Dag, throwing an exception if the field does not exist or if
-/// the value is not the right type.
+/// value as an Dag, aborting if the field does not exist or if the value is
+/// not the right type.
///
DagInit *Record::getValueAsDag(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
if (R == 0 || R->getValue() == 0)
- throw "Record `" + getName() + "' does not have a field named `" +
- FieldName.str() + "'!\n";
+ PrintFatalError(getLoc(), "Record `" + getName() +
+ "' does not have a field named `" + FieldName.str() + "'!\n");
- if (DagInit *DI = dynamic_cast<DagInit*>(R->getValue()))
+ if (DagInit *DI = dyn_cast<DagInit>(R->getValue()))
return DI;
- throw "Record `" + getName() + "', field `" + FieldName.str() +
- "' does not have a dag initializer!";
+ PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
+ FieldName.str() + "' does not have a dag initializer!");
}
@@ -2019,7 +1976,7 @@ std::vector<Record*>
RecordKeeper::getAllDerivedDefinitions(const std::string &ClassName) const {
Record *Class = getClass(ClassName);
if (!Class)
- throw "ERROR: Couldn't find the `" + ClassName + "' class!\n";
+ PrintFatalError("ERROR: Couldn't find the `" + ClassName + "' class!\n");
std::vector<Record*> Defs;
for (std::map<std::string, Record*>::const_iterator I = getDefs().begin(),
@@ -2034,7 +1991,7 @@ RecordKeeper::getAllDerivedDefinitions(const std::string &ClassName) const {
/// to CurRec's name.
Init *llvm::QualifyName(Record &CurRec, MultiClass *CurMultiClass,
Init *Name, const std::string &Scoper) {
- RecTy *Type = dynamic_cast<TypedInit *>(Name)->getType();
+ RecTy *Type = dyn_cast<TypedInit>(Name)->getType();
BinOpInit *NewName =
BinOpInit::get(BinOpInit::STRCONCAT,
diff --git a/contrib/llvm/lib/TableGen/TGParser.cpp b/contrib/llvm/lib/TableGen/TGParser.cpp
index b9c7ff6..b1f9f72 100644
--- a/contrib/llvm/lib/TableGen/TGParser.cpp
+++ b/contrib/llvm/lib/TableGen/TGParser.cpp
@@ -93,7 +93,7 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, Init *ValName,
// Do not allow assignments like 'X = X'. This will just cause infinite loops
// in the resolution machinery.
if (BitList.empty())
- if (VarInit *VI = dynamic_cast<VarInit*>(V))
+ if (VarInit *VI = dyn_cast<VarInit>(V))
if (VI->getNameInit() == ValName)
return false;
@@ -102,7 +102,7 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, Init *ValName,
// initializer.
//
if (!BitList.empty()) {
- BitsInit *CurVal = dynamic_cast<BitsInit*>(RV->getValue());
+ BitsInit *CurVal = dyn_cast<BitsInit>(RV->getValue());
if (CurVal == 0)
return Error(Loc, "Value '" + ValName->getAsUnquotedString()
+ "' is not a bits type");
@@ -110,12 +110,11 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, Init *ValName,
// Convert the incoming value to a bits type of the appropriate size...
Init *BI = V->convertInitializerTo(BitsRecTy::get(BitList.size()));
if (BI == 0) {
- V->convertInitializerTo(BitsRecTy::get(BitList.size()));
return Error(Loc, "Initializer is not compatible with bit range");
}
// We should have a BitsInit type now.
- BitsInit *BInit = dynamic_cast<BitsInit*>(BI);
+ BitsInit *BInit = dyn_cast<BitsInit>(BI);
assert(BInit != 0);
SmallVector<Init *, 16> NewBits(CurVal->getNumBits());
@@ -311,7 +310,7 @@ bool TGParser::ProcessForeachDefs(Record *CurRec, SMLoc Loc, IterSet &IterVals){
if (IterVals.size() != Loops.size()) {
assert(IterVals.size() < Loops.size());
ForeachLoop &CurLoop = Loops[IterVals.size()];
- ListInit *List = dynamic_cast<ListInit *>(CurLoop.ListValue);
+ ListInit *List = dyn_cast<ListInit>(CurLoop.ListValue);
if (List == 0) {
Error(Loc, "Loop list is not a list");
return true;
@@ -336,7 +335,7 @@ bool TGParser::ProcessForeachDefs(Record *CurRec, SMLoc Loc, IterSet &IterVals){
// Set the iterator values now.
for (unsigned i = 0, e = IterVals.size(); i != e; ++i) {
VarInit *IterVar = IterVals[i].IterVar;
- TypedInit *IVal = dynamic_cast<TypedInit *>(IterVals[i].IterValue);
+ TypedInit *IVal = dyn_cast<TypedInit>(IterVals[i].IterValue);
if (IVal == 0) {
Error(Loc, "foreach iterator value is untyped");
return true;
@@ -407,8 +406,7 @@ Init *TGParser::ParseObjectName(MultiClass *CurMultiClass) {
RecTy *Type = 0;
if (CurRec) {
- const TypedInit *CurRecName =
- dynamic_cast<const TypedInit *>(CurRec->getNameInit());
+ const TypedInit *CurRecName = dyn_cast<TypedInit>(CurRec->getNameInit());
if (!CurRecName) {
TokError("Record name is not typed!");
return 0;
@@ -781,7 +779,7 @@ Init *TGParser::ParseIDValue(Record *CurRec,
for (LoopVector::iterator i = Loops.begin(), iend = Loops.end();
i != iend;
++i) {
- VarInit *IterVar = dynamic_cast<VarInit *>(i->IterVar);
+ VarInit *IterVar = dyn_cast<VarInit>(i->IterVar);
if (IterVar && IterVar->getName() == Name)
return IterVar;
}
@@ -856,16 +854,16 @@ Init *TGParser::ParseOperation(Record *CurRec) {
if (Code == UnOpInit::HEAD
|| Code == UnOpInit::TAIL
|| Code == UnOpInit::EMPTY) {
- ListInit *LHSl = dynamic_cast<ListInit*>(LHS);
- StringInit *LHSs = dynamic_cast<StringInit*>(LHS);
- TypedInit *LHSt = dynamic_cast<TypedInit*>(LHS);
+ ListInit *LHSl = dyn_cast<ListInit>(LHS);
+ StringInit *LHSs = dyn_cast<StringInit>(LHS);
+ TypedInit *LHSt = dyn_cast<TypedInit>(LHS);
if (LHSl == 0 && LHSs == 0 && LHSt == 0) {
TokError("expected list or string type argument in unary operator");
return 0;
}
if (LHSt) {
- ListRecTy *LType = dynamic_cast<ListRecTy*>(LHSt->getType());
- StringRecTy *SType = dynamic_cast<StringRecTy*>(LHSt->getType());
+ ListRecTy *LType = dyn_cast<ListRecTy>(LHSt->getType());
+ StringRecTy *SType = dyn_cast<StringRecTy>(LHSt->getType());
if (LType == 0 && SType == 0) {
TokError("expected list or string type argumnet in unary operator");
return 0;
@@ -885,7 +883,7 @@ Init *TGParser::ParseOperation(Record *CurRec) {
}
if (LHSl) {
Init *Item = LHSl->getElement(0);
- TypedInit *Itemt = dynamic_cast<TypedInit*>(Item);
+ TypedInit *Itemt = dyn_cast<TypedInit>(Item);
if (Itemt == 0) {
TokError("untyped list element in unary operator");
return 0;
@@ -897,7 +895,7 @@ Init *TGParser::ParseOperation(Record *CurRec) {
}
} else {
assert(LHSt && "expected list type argument in unary operator");
- ListRecTy *LType = dynamic_cast<ListRecTy*>(LHSt->getType());
+ ListRecTy *LType = dyn_cast<ListRecTy>(LHSt->getType());
if (LType == 0) {
TokError("expected list type argumnet in unary operator");
return 0;
@@ -1044,35 +1042,28 @@ Init *TGParser::ParseOperation(Record *CurRec) {
switch (LexCode) {
default: llvm_unreachable("Unhandled code!");
case tgtok::XIf: {
- // FIXME: The `!if' operator doesn't handle non-TypedInit well at
- // all. This can be made much more robust.
- TypedInit *MHSt = dynamic_cast<TypedInit*>(MHS);
- TypedInit *RHSt = dynamic_cast<TypedInit*>(RHS);
-
RecTy *MHSTy = 0;
RecTy *RHSTy = 0;
- if (MHSt == 0 && RHSt == 0) {
- BitsInit *MHSbits = dynamic_cast<BitsInit*>(MHS);
- BitsInit *RHSbits = dynamic_cast<BitsInit*>(RHS);
-
- if (MHSbits && RHSbits &&
- MHSbits->getNumBits() == RHSbits->getNumBits()) {
- Type = BitRecTy::get();
- break;
- } else {
- BitInit *MHSbit = dynamic_cast<BitInit*>(MHS);
- BitInit *RHSbit = dynamic_cast<BitInit*>(RHS);
-
- if (MHSbit && RHSbit) {
- Type = BitRecTy::get();
- break;
- }
- }
- } else if (MHSt != 0 && RHSt != 0) {
+ if (TypedInit *MHSt = dyn_cast<TypedInit>(MHS))
MHSTy = MHSt->getType();
+ if (BitsInit *MHSbits = dyn_cast<BitsInit>(MHS))
+ MHSTy = BitsRecTy::get(MHSbits->getNumBits());
+ if (isa<BitInit>(MHS))
+ MHSTy = BitRecTy::get();
+
+ if (TypedInit *RHSt = dyn_cast<TypedInit>(RHS))
RHSTy = RHSt->getType();
- }
+ if (BitsInit *RHSbits = dyn_cast<BitsInit>(RHS))
+ RHSTy = BitsRecTy::get(RHSbits->getNumBits());
+ if (isa<BitInit>(RHS))
+ RHSTy = BitRecTy::get();
+
+ // For UnsetInit, it's typed from the other hand.
+ if (isa<UnsetInit>(MHS))
+ MHSTy = RHSTy;
+ if (isa<UnsetInit>(RHS))
+ RHSTy = MHSTy;
if (!MHSTy || !RHSTy) {
TokError("could not get type for !if");
@@ -1090,7 +1081,7 @@ Init *TGParser::ParseOperation(Record *CurRec) {
break;
}
case tgtok::XForEach: {
- TypedInit *MHSt = dynamic_cast<TypedInit *>(MHS);
+ TypedInit *MHSt = dyn_cast<TypedInit>(MHS);
if (MHSt == 0) {
TokError("could not get type for !foreach");
return 0;
@@ -1099,7 +1090,7 @@ Init *TGParser::ParseOperation(Record *CurRec) {
break;
}
case tgtok::XSubst: {
- TypedInit *RHSt = dynamic_cast<TypedInit *>(RHS);
+ TypedInit *RHSt = dyn_cast<TypedInit>(RHS);
if (RHSt == 0) {
TokError("could not get type for !subst");
return 0;
@@ -1278,7 +1269,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
ListRecTy *GivenListTy = 0;
if (ItemType != 0) {
- ListRecTy *ListType = dynamic_cast<ListRecTy*>(ItemType);
+ ListRecTy *ListType = dyn_cast<ListRecTy>(ItemType);
if (ListType == 0) {
std::stringstream s;
s << "Type mismatch for list, expected list type, got "
@@ -1323,7 +1314,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
for (std::vector<Init *>::iterator i = Vals.begin(), ie = Vals.end();
i != ie;
++i) {
- TypedInit *TArg = dynamic_cast<TypedInit*>(*i);
+ TypedInit *TArg = dyn_cast<TypedInit>(*i);
if (TArg == 0) {
TokError("Untyped list element");
return 0;
@@ -1506,7 +1497,7 @@ Init *TGParser::ParseValue(Record *CurRec, RecTy *ItemType, IDParseMode Mode) {
// Create a !strconcat() operation, first casting each operand to
// a string if necessary.
- TypedInit *LHS = dynamic_cast<TypedInit *>(Result);
+ TypedInit *LHS = dyn_cast<TypedInit>(Result);
if (!LHS) {
Error(PasteLoc, "LHS of paste is not typed!");
return 0;
@@ -1533,7 +1524,7 @@ Init *TGParser::ParseValue(Record *CurRec, RecTy *ItemType, IDParseMode Mode) {
default:
Init *RHSResult = ParseValue(CurRec, ItemType, ParseNameMode);
- RHS = dynamic_cast<TypedInit *>(RHSResult);
+ RHS = dyn_cast<TypedInit>(RHSResult);
if (!RHS) {
Error(PasteLoc, "RHS of paste is not typed!");
return 0;
@@ -1724,13 +1715,13 @@ VarInit *TGParser::ParseForeachDeclaration(ListInit *&ForeachListValue) {
default: TokError("Unknown token when expecting a range list"); return 0;
case tgtok::l_square: { // '[' ValueList ']'
Init *List = ParseSimpleValue(0, 0, ParseForeachMode);
- ForeachListValue = dynamic_cast<ListInit*>(List);
+ ForeachListValue = dyn_cast<ListInit>(List);
if (ForeachListValue == 0) {
TokError("Expected a Value list");
return 0;
}
RecTy *ValueType = ForeachListValue->getType();
- ListRecTy *ListType = dynamic_cast<ListRecTy *>(ValueType);
+ ListRecTy *ListType = dyn_cast<ListRecTy>(ValueType);
if (ListType == 0) {
TokError("Value list is not of list type");
return 0;
@@ -2265,7 +2256,7 @@ InstantiateMulticlassDef(MultiClass &MC,
Init *DefName = DefProto->getNameInit();
- StringInit *DefNameString = dynamic_cast<StringInit *>(DefName);
+ StringInit *DefNameString = dyn_cast<StringInit>(DefName);
if (DefNameString != 0) {
// We have a fully expanded string so there are no operators to
@@ -2277,7 +2268,10 @@ InstantiateMulticlassDef(MultiClass &MC,
DefName, StringRecTy::get())->Fold(DefProto, &MC);
}
- Record *CurRec = new Record(DefName, DefmPrefixLoc, Records);
+ // Make a trail of SMLocs from the multiclass instantiations.
+ SmallVector<SMLoc, 4> Locs(1, DefmPrefixLoc);
+ Locs.append(DefProto->getLoc().begin(), DefProto->getLoc().end());
+ Record *CurRec = new Record(DefName, Locs, Records);
SubClassReference Ref;
Ref.RefLoc = DefmPrefixLoc;
diff --git a/contrib/llvm/lib/TableGen/TGParser.h b/contrib/llvm/lib/TableGen/TGParser.h
index 3d2c72c..9c2ad43 100644
--- a/contrib/llvm/lib/TableGen/TGParser.h
+++ b/contrib/llvm/lib/TableGen/TGParser.h
@@ -30,7 +30,7 @@ namespace llvm {
struct MultiClass;
struct SubClassReference;
struct SubMultiClassReference;
-
+
struct LetRecord {
std::string Name;
std::vector<unsigned> Bits;
@@ -41,7 +41,7 @@ namespace llvm {
: Name(N), Bits(B), Value(V), Loc(L) {
}
};
-
+
/// ForeachLoop - Record the iteration state associated with a for loop.
/// This is used to instantiate items in the loop body.
struct ForeachLoop {
@@ -56,13 +56,13 @@ class TGParser {
TGLexer Lex;
std::vector<std::vector<LetRecord> > LetStack;
std::map<std::string, MultiClass*> MultiClasses;
-
+
/// Loops - Keep track of any foreach loops we are within.
///
typedef std::vector<ForeachLoop> LoopVector;
LoopVector Loops;
- /// CurMultiClass - If we are parsing a 'multiclass' definition, this is the
+ /// CurMultiClass - If we are parsing a 'multiclass' definition, this is the
/// current value.
MultiClass *CurMultiClass;
@@ -82,13 +82,13 @@ class TGParser {
};
public:
- TGParser(SourceMgr &SrcMgr, RecordKeeper &records) :
+ TGParser(SourceMgr &SrcMgr, RecordKeeper &records) :
Lex(SrcMgr), CurMultiClass(0), Records(records) {}
-
+
/// ParseFile - Main entrypoint for parsing a tblgen file. These parser
/// routines return true on error, or false on success.
bool ParseFile();
-
+
bool Error(SMLoc L, const Twine &Msg) const {
PrintError(L, Msg);
return true;
@@ -102,9 +102,9 @@ public:
private: // Semantic analysis methods.
bool AddValue(Record *TheRec, SMLoc Loc, const RecordVal &RV);
- bool SetValue(Record *TheRec, SMLoc Loc, Init *ValName,
+ bool SetValue(Record *TheRec, SMLoc Loc, Init *ValName,
const std::vector<unsigned> &BitList, Init *V);
- bool SetValue(Record *TheRec, SMLoc Loc, const std::string &ValName,
+ bool SetValue(Record *TheRec, SMLoc Loc, const std::string &ValName,
const std::vector<unsigned> &BitList, Init *V) {
return SetValue(TheRec, Loc, StringInit::get(ValName), BitList, V);
}
@@ -170,7 +170,8 @@ private: // Parser methods.
IDParseMode Mode = ParseValueMode);
Init *ParseValue(Record *CurRec, RecTy *ItemType = 0,
IDParseMode Mode = ParseValueMode);
- std::vector<Init*> ParseValueList(Record *CurRec, Record *ArgsRec = 0, RecTy *EltTy = 0);
+ std::vector<Init*> ParseValueList(Record *CurRec, Record *ArgsRec = 0,
+ RecTy *EltTy = 0);
std::vector<std::pair<llvm::Init*, std::string> > ParseDagArgList(Record *);
bool ParseOptionalRangeList(std::vector<unsigned> &Ranges);
bool ParseOptionalBitList(std::vector<unsigned> &Ranges);
@@ -184,7 +185,7 @@ private: // Parser methods.
MultiClass *ParseMultiClassID();
Record *ParseDefmID();
};
-
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/TableGen/TableGenAction.cpp b/contrib/llvm/lib/TableGen/TableGenAction.cpp
deleted file mode 100644
index 54e5083..0000000
--- a/contrib/llvm/lib/TableGen/TableGenAction.cpp
+++ /dev/null
@@ -1,15 +0,0 @@
-//===- TableGenAction.cpp - defines TableGenAction --------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/TableGen/TableGenAction.h"
-
-using namespace llvm;
-
-void TableGenAction::anchor() { }
-
diff --git a/contrib/llvm/lib/Target/ARM/ARM.h b/contrib/llvm/lib/Target/ARM/ARM.h
index 2a1e8e4..1446bbb 100644
--- a/contrib/llvm/lib/Target/ARM/ARM.h
+++ b/contrib/llvm/lib/Target/ARM/ARM.h
@@ -37,6 +37,7 @@ FunctionPass *createARMJITCodeEmitterPass(ARMBaseTargetMachine &TM,
FunctionPass *createARMLoadStoreOptimizationPass(bool PreAlloc = false);
FunctionPass *createARMExpandPseudoPass();
+FunctionPass *createARMGlobalBaseRegPass();
FunctionPass *createARMGlobalMergePass(const TargetLowering* tli);
FunctionPass *createARMConstantIslandPass();
FunctionPass *createMLxExpansionPass();
diff --git a/contrib/llvm/lib/Target/ARM/ARM.td b/contrib/llvm/lib/Target/ARM/ARM.td
index 69e2346..23974ad 100644
--- a/contrib/llvm/lib/Target/ARM/ARM.td
+++ b/contrib/llvm/lib/Target/ARM/ARM.td
@@ -32,9 +32,6 @@ def FeatureVFP2 : SubtargetFeature<"vfp2", "HasVFPv2", "true",
def FeatureVFP3 : SubtargetFeature<"vfp3", "HasVFPv3", "true",
"Enable VFP3 instructions",
[FeatureVFP2]>;
-def FeatureVFP4 : SubtargetFeature<"vfp4", "HasVFPv4", "true",
- "Enable VFP4 instructions",
- [FeatureVFP3]>;
def FeatureNEON : SubtargetFeature<"neon", "HasNEON", "true",
"Enable NEON instructions",
[FeatureVFP3]>;
@@ -44,10 +41,16 @@ def FeatureNoARM : SubtargetFeature<"noarm", "NoARM", "true",
"Does not support ARM mode execution">;
def FeatureFP16 : SubtargetFeature<"fp16", "HasFP16", "true",
"Enable half-precision floating point">;
+def FeatureVFP4 : SubtargetFeature<"vfp4", "HasVFPv4", "true",
+ "Enable VFP4 instructions",
+ [FeatureVFP3, FeatureFP16]>;
def FeatureD16 : SubtargetFeature<"d16", "HasD16", "true",
"Restrict VFP3 to 16 double registers">;
def FeatureHWDiv : SubtargetFeature<"hwdiv", "HasHardwareDivide", "true",
"Enable divide instructions">;
+def FeatureHWDivARM : SubtargetFeature<"hwdiv-arm",
+ "HasHardwareDivideInARM", "true",
+ "Enable divide instructions in ARM mode">;
def FeatureT2XtPk : SubtargetFeature<"t2xtpk", "HasT2ExtractPack", "true",
"Enable Thumb2 extract and pack instructions">;
def FeatureDB : SubtargetFeature<"db", "HasDataBarrier", "true",
@@ -139,6 +142,18 @@ def ProcA9 : SubtargetFeature<"a9", "ARMProcFamily", "CortexA9",
[FeatureVMLxForwarding,
FeatureT2XtPk, FeatureFP16,
FeatureAvoidPartialCPSR]>;
+def ProcSwift : SubtargetFeature<"swift", "ARMProcFamily", "Swift",
+ "Swift ARM processors",
+ [FeatureNEONForFP, FeatureT2XtPk,
+ FeatureVFP4, FeatureMP, FeatureHWDiv,
+ FeatureHWDivARM, FeatureAvoidPartialCPSR,
+ FeatureHasSlowFPVMLx]>;
+
+// FIXME: It has not been determined if A15 has these features.
+def ProcA15 : SubtargetFeature<"a15", "ARMProcFamily", "CortexA15",
+ "Cortex-A15 ARM processors",
+ [FeatureT2XtPk, FeatureFP16,
+ FeatureAvoidPartialCPSR]>;
class ProcNoItin<string Name, list<SubtargetFeature> Features>
: Processor<Name, NoItineraries, Features>;
@@ -214,6 +229,10 @@ def : ProcessorModel<"cortex-a9-mp", CortexA9Model,
[ProcA9, HasV7Ops, FeatureNEON, FeatureDB,
FeatureDSPThumb2, FeatureMP,
FeatureHasRAS]>;
+// FIXME: A15 has currently the same ProcessorModel as A9.
+def : ProcessorModel<"cortex-a15", CortexA9Model,
+ [ProcA15, HasV7Ops, FeatureNEON, FeatureDB,
+ FeatureDSPThumb2, FeatureHasRAS]>;
// V7M Processors.
def : ProcNoItin<"cortex-m3", [HasV7Ops,
@@ -227,6 +246,12 @@ def : ProcNoItin<"cortex-m4", [HasV7Ops,
FeatureT2XtPk, FeatureVFP4,
FeatureVFPOnlySP, FeatureMClass]>;
+// Swift uArch Processors.
+def : ProcessorModel<"swift", SwiftModel,
+ [ProcSwift, HasV7Ops, FeatureNEON,
+ FeatureDB, FeatureDSPThumb2,
+ FeatureHasRAS]>;
+
//===----------------------------------------------------------------------===//
// Register File Description
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index e9e2803..d439d1d 100644
--- a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -23,6 +23,8 @@
#include "InstPrinter/ARMInstPrinter.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMMCExpr.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/Constants.h"
#include "llvm/DebugInfo.h"
#include "llvm/Module.h"
@@ -40,9 +42,8 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -302,7 +303,7 @@ void ARMAsmPrinter::EmitFunctionEntryLabel() {
}
void ARMAsmPrinter::EmitXXStructor(const Constant *CV) {
- uint64_t Size = TM.getTargetData()->getTypeAllocSize(CV->getType());
+ uint64_t Size = TM.getDataLayout()->getTypeAllocSize(CV->getType());
assert(Size && "C++ constructor pointer had zero size!");
const GlobalValue *GV = dyn_cast<GlobalValue>(CV->stripPointerCasts());
@@ -389,16 +390,6 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
//===--------------------------------------------------------------------===//
MCSymbol *ARMAsmPrinter::
-GetARMSetPICJumpTableLabel2(unsigned uid, unsigned uid2,
- const MachineBasicBlock *MBB) const {
- SmallString<60> Name;
- raw_svector_ostream(Name) << MAI->getPrivateGlobalPrefix()
- << getFunctionNumber() << '_' << uid << '_' << uid2
- << "_set_" << MBB->getNumber();
- return OutContext.GetOrCreateSymbol(Name.str());
-}
-
-MCSymbol *ARMAsmPrinter::
GetARMJTIPICJumpTableLabel2(unsigned uid, unsigned uid2) const {
SmallString<60> Name;
raw_svector_ostream(Name) << MAI->getPrivateGlobalPrefix() << "JTI"
@@ -592,9 +583,24 @@ void ARMAsmPrinter::EmitStartOfAsmFile(Module &M) {
const TargetLoweringObjectFileMachO &TLOFMacho =
static_cast<const TargetLoweringObjectFileMachO &>(
getObjFileLowering());
- OutStreamer.SwitchSection(TLOFMacho.getTextSection());
- OutStreamer.SwitchSection(TLOFMacho.getTextCoalSection());
- OutStreamer.SwitchSection(TLOFMacho.getConstTextCoalSection());
+
+ // Collect the set of sections our functions will go into.
+ SetVector<const MCSection *, SmallVector<const MCSection *, 8>,
+ SmallPtrSet<const MCSection *, 8> > TextSections;
+ // Default text section comes first.
+ TextSections.insert(TLOFMacho.getTextSection());
+ // Now any user defined text sections from function attributes.
+ for (Module::iterator F = M.begin(), e = M.end(); F != e; ++F)
+ if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage())
+ TextSections.insert(TLOFMacho.SectionForGlobal(F, Mang, TM));
+ // Now the coalescable sections.
+ TextSections.insert(TLOFMacho.getTextCoalSection());
+ TextSections.insert(TLOFMacho.getConstTextCoalSection());
+
+ // Emit the sections in the .s file header to fix the order.
+ for (unsigned i = 0, e = TextSections.size(); i != e; ++i)
+ OutStreamer.SwitchSection(TextSections[i]);
+
if (RelocM == Reloc::DynamicNoPIC) {
const MCSection *sect =
OutContext.getMachOSection("__TEXT", "__symbol_stub4",
@@ -743,13 +749,28 @@ void ARMAsmPrinter::emitAttributes() {
AttrEmitter->EmitAttribute(ARMBuildAttrs::THUMB_ISA_use,
ARMBuildAttrs::Allowed);
} else if (CPUString == "generic") {
- // FIXME: Why these defaults?
- AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v4T);
+ // For a generic CPU, we assume a standard v7a architecture in Subtarget.
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v7);
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch_profile,
+ ARMBuildAttrs::ApplicationProfile);
AttrEmitter->EmitAttribute(ARMBuildAttrs::ARM_ISA_use,
ARMBuildAttrs::Allowed);
AttrEmitter->EmitAttribute(ARMBuildAttrs::THUMB_ISA_use,
- ARMBuildAttrs::Allowed);
- }
+ ARMBuildAttrs::AllowThumb32);
+ } else if (Subtarget->hasV7Ops()) {
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v7);
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::THUMB_ISA_use,
+ ARMBuildAttrs::AllowThumb32);
+ } else if (Subtarget->hasV6T2Ops())
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v6T2);
+ else if (Subtarget->hasV6Ops())
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v6);
+ else if (Subtarget->hasV5TEOps())
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v5TE);
+ else if (Subtarget->hasV5TOps())
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v5T);
+ else if (Subtarget->hasV4TOps())
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v4T);
if (Subtarget->hasNEON() && emitFPU) {
/* NEON is not exactly a VFP architecture, but GAS emit one of
@@ -893,7 +914,7 @@ MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV) {
void ARMAsmPrinter::
EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
- int Size = TM.getTargetData()->getTypeAllocSize(MCPV->getType());
+ int Size = TM.getDataLayout()->getTypeAllocSize(MCPV->getType());
ARMConstantPoolValue *ACPV = static_cast<ARMConstantPoolValue*>(MCPV);
@@ -1091,16 +1112,6 @@ static void populateADROperands(MCInst &Inst, unsigned Dest,
Inst.addOperand(MCOperand::CreateReg(ccreg));
}
-void ARMAsmPrinter::EmitPatchedInstruction(const MachineInstr *MI,
- unsigned Opcode) {
- MCInst TmpInst;
-
- // Emit the instruction as usual, just patch the opcode.
- LowerARMMachineInstrToMCInst(MI, TmpInst, *this);
- TmpInst.setOpcode(Opcode);
- OutStreamer.EmitInstruction(TmpInst);
-}
-
void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
assert(MI->getFlag(MachineInstr::FrameSetup) &&
"Only instruction which are involved into frame setup code are allowed");
@@ -1402,31 +1413,6 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
return;
}
- case ARM::t2BMOVPCB_CALL: {
- {
- MCInst TmpInst;
- TmpInst.setOpcode(ARM::tMOVr);
- TmpInst.addOperand(MCOperand::CreateReg(ARM::LR));
- TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
- // Add predicate operands.
- TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
- TmpInst.addOperand(MCOperand::CreateReg(0));
- OutStreamer.EmitInstruction(TmpInst);
- }
- {
- MCInst TmpInst;
- TmpInst.setOpcode(ARM::t2B);
- const GlobalValue *GV = MI->getOperand(0).getGlobal();
- MCSymbol *GVSym = Mang->getSymbol(GV);
- const MCExpr *GVSymExpr = MCSymbolRefExpr::Create(GVSym, OutContext);
- TmpInst.addOperand(MCOperand::CreateExpr(GVSymExpr));
- // Add predicate operands.
- TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
- TmpInst.addOperand(MCOperand::CreateReg(0));
- OutStreamer.EmitInstruction(TmpInst);
- }
- return;
- }
case ARM::MOVi16_ga_pcrel:
case ARM::t2MOVi16_ga_pcrel: {
MCInst TmpInst;
diff --git a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h
index 3555e8f5..c875b2c 100644
--- a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h
+++ b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h
@@ -53,7 +53,7 @@ public:
Subtarget = &TM.getSubtarget<ARMSubtarget>();
}
- virtual const char *getPassName() const {
+ virtual const char *getPassName() const LLVM_OVERRIDE {
return "ARM Assembly Printer";
}
@@ -62,22 +62,24 @@ public:
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
+ raw_ostream &O) LLVM_OVERRIDE;
virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
- unsigned AsmVariant,
- const char *ExtraCode, raw_ostream &O);
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &O) LLVM_OVERRIDE;
void EmitJumpTable(const MachineInstr *MI);
void EmitJump2Table(const MachineInstr *MI);
- virtual void EmitInstruction(const MachineInstr *MI);
- bool runOnMachineFunction(MachineFunction &F);
+ virtual void EmitInstruction(const MachineInstr *MI) LLVM_OVERRIDE;
+ virtual bool runOnMachineFunction(MachineFunction &F) LLVM_OVERRIDE;
- virtual void EmitConstantPool() {} // we emit constant pools customly!
- virtual void EmitFunctionBodyEnd();
- virtual void EmitFunctionEntryLabel();
- void EmitStartOfAsmFile(Module &M);
- void EmitEndOfAsmFile(Module &M);
- void EmitXXStructor(const Constant *CV);
+ virtual void EmitConstantPool() LLVM_OVERRIDE {
+ // we emit constant pools customly!
+ }
+ virtual void EmitFunctionBodyEnd() LLVM_OVERRIDE;
+ virtual void EmitFunctionEntryLabel() LLVM_OVERRIDE;
+ virtual void EmitStartOfAsmFile(Module &M) LLVM_OVERRIDE;
+ virtual void EmitEndOfAsmFile(Module &M) LLVM_OVERRIDE;
+ virtual void EmitXXStructor(const Constant *CV) LLVM_OVERRIDE;
// lowerOperand - Convert a MachineOperand into the equivalent MCOperand.
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp);
@@ -101,12 +103,13 @@ private:
public:
void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
- MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
+ virtual MachineLocation
+ getDebugValueLocation(const MachineInstr *MI) const LLVM_OVERRIDE;
/// EmitDwarfRegOp - Emit dwarf register operation.
- virtual void EmitDwarfRegOp(const MachineLocation &MLoc) const;
+ virtual void EmitDwarfRegOp(const MachineLocation &MLoc) const LLVM_OVERRIDE;
- virtual unsigned getISAEncoding() {
+ virtual unsigned getISAEncoding() LLVM_OVERRIDE {
// ARM/Darwin adds ISA to the DWARF info for each function.
if (!Subtarget->isTargetDarwin())
return 0;
@@ -114,18 +117,19 @@ public:
ARM::DW_ISA_ARM_thumb : ARM::DW_ISA_ARM_arm;
}
+private:
MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol);
- MCSymbol *GetARMSetPICJumpTableLabel2(unsigned uid, unsigned uid2,
- const MachineBasicBlock *MBB) const;
MCSymbol *GetARMJTIPICJumpTableLabel2(unsigned uid, unsigned uid2) const;
MCSymbol *GetARMSJLJEHLabel(void) const;
MCSymbol *GetARMGVSymbol(const GlobalValue *GV);
+public:
/// EmitMachineConstantPoolValue - Print a machine constantpool value to
/// the .s file.
- virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
+ virtual void
+ EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) LLVM_OVERRIDE;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 1cc5a17..3c7bb24 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -49,6 +49,11 @@ static cl::opt<bool>
WidenVMOVS("widen-vmovs", cl::Hidden, cl::init(true),
cl::desc("Widen ARM vmovs to vmovd when possible"));
+static cl::opt<unsigned>
+SwiftPartialUpdateClearance("swift-partial-update-clearance",
+ cl::Hidden, cl::init(12),
+ cl::desc("Clearance before partial register updates"));
+
/// ARM_MLxEntry - Record information about MLA / MLS instructions.
struct ARM_MLxEntry {
uint16_t MLxOpc; // MLA / MLS opcode
@@ -683,7 +688,7 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
// Handle register classes that require multiple instructions.
unsigned BeginIdx = 0;
unsigned SubRegs = 0;
- unsigned Spacing = 1;
+ int Spacing = 1;
// Use VORRq when possible.
if (ARM::QQPRRegClass.contains(DestReg, SrcReg))
@@ -697,6 +702,8 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 3;
else if (ARM::DQuadRegClass.contains(DestReg, SrcReg))
Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 4;
+ else if (ARM::GPRPairRegClass.contains(DestReg, SrcReg))
+ Opc = ARM::MOVr, BeginIdx = ARM::gsub_0, SubRegs = 2;
else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg))
Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 2, Spacing = 2;
@@ -705,27 +712,38 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg))
Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 4, Spacing = 2;
- if (Opc) {
- const TargetRegisterInfo *TRI = &getRegisterInfo();
- MachineInstrBuilder Mov;
- for (unsigned i = 0; i != SubRegs; ++i) {
- unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i*Spacing);
- unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i*Spacing);
- assert(Dst && Src && "Bad sub-register");
- Mov = AddDefaultPred(BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst)
- .addReg(Src));
- // VORR takes two source operands.
- if (Opc == ARM::VORRq)
- Mov.addReg(Src);
- }
- // Add implicit super-register defs and kills to the last instruction.
- Mov->addRegisterDefined(DestReg, TRI);
- if (KillSrc)
- Mov->addRegisterKilled(SrcReg, TRI);
- return;
- }
+ assert(Opc && "Impossible reg-to-reg copy");
+
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+ MachineInstrBuilder Mov;
- llvm_unreachable("Impossible reg-to-reg copy");
+ // Copy register tuples backward when the first Dest reg overlaps with SrcReg.
+ if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) {
+ BeginIdx = BeginIdx + ((SubRegs-1)*Spacing);
+ Spacing = -Spacing;
+ }
+#ifndef NDEBUG
+ SmallSet<unsigned, 4> DstRegs;
+#endif
+ for (unsigned i = 0; i != SubRegs; ++i) {
+ unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i*Spacing);
+ unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i*Spacing);
+ assert(Dst && Src && "Bad sub-register");
+#ifndef NDEBUG
+ assert(!DstRegs.count(Src) && "destructive vector copy");
+ DstRegs.insert(Dst);
+#endif
+ Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst)
+ .addReg(Src);
+ // VORR takes two source operands.
+ if (Opc == ARM::VORRq)
+ Mov.addReg(Src);
+ Mov = AddDefaultPred(Mov);
+ }
+ // Add implicit super-register defs and kills to the last instruction.
+ Mov->addRegisterDefined(DestReg, TRI);
+ if (KillSrc)
+ Mov->addRegisterKilled(SrcReg, TRI);
}
static const
@@ -775,6 +793,13 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
+ } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
+ MachineInstrBuilder MIB =
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STMIA))
+ .addFrameIndex(FI))
+ .addMemOperand(MMO);
+ MIB = AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
+ AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
} else
llvm_unreachable("Unknown reg class!");
break;
@@ -922,6 +947,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
MachineFunction &MF = *MBB.getParent();
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
MachineFrameInfo &MFI = *MF.getFrameInfo();
unsigned Align = MFI.getObjectAlignment(FI);
MachineMemOperand *MMO =
@@ -947,6 +973,15 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
if (ARM::DPRRegClass.hasSubClassEq(RC)) {
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
+ } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
+ unsigned LdmOpc = AFI->isThumbFunction() ? ARM::t2LDMIA : ARM::LDMIA;
+ MachineInstrBuilder MIB =
+ AddDefaultPred(BuildMI(MBB, I, DL, get(LdmOpc))
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
+ MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
+ if (TargetRegisterInfo::isPhysicalRegister(DestReg))
+ MIB.addReg(DestReg, RegState::ImplicitDefine);
} else
llvm_unreachable("Unknown reg class!");
break;
@@ -1378,7 +1413,6 @@ bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
case ARM::VLDRD:
case ARM::VLDRS:
case ARM::t2LDRi8:
- case ARM::t2LDRDi8:
case ARM::t2LDRSHi8:
case ARM::t2LDRi12:
case ARM::t2LDRSHi12:
@@ -1517,6 +1551,14 @@ isProfitableToIfCvt(MachineBasicBlock &TMBB,
return (TCycles + FCycles + TExtra + FExtra) <= UnpredCost;
}
+bool
+ARMBaseInstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
+ MachineBasicBlock &FMBB) const {
+ // Reduce false anti-dependencies to let Swift's out-of-order execution
+ // engine do its thing.
+ return Subtarget.isSwift();
+}
+
/// getInstrPredicate - If instruction is predicated, returns its predicate
/// condition, otherwise returns AL. It also returns the condition code
/// register by reference.
@@ -1569,71 +1611,41 @@ ARMBaseInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
}
/// Identify instructions that can be folded into a MOVCC instruction, and
-/// return the corresponding opcode for the predicated pseudo-instruction.
-static unsigned canFoldIntoMOVCC(unsigned Reg, MachineInstr *&MI,
- const MachineRegisterInfo &MRI) {
+/// return the defining instruction.
+static MachineInstr *canFoldIntoMOVCC(unsigned Reg,
+ const MachineRegisterInfo &MRI,
+ const TargetInstrInfo *TII) {
if (!TargetRegisterInfo::isVirtualRegister(Reg))
return 0;
if (!MRI.hasOneNonDBGUse(Reg))
return 0;
- MI = MRI.getVRegDef(Reg);
+ MachineInstr *MI = MRI.getVRegDef(Reg);
if (!MI)
return 0;
+ // MI is folded into the MOVCC by predicating it.
+ if (!MI->isPredicable())
+ return 0;
// Check if MI has any non-dead defs or physreg uses. This also detects
// predicated instructions which will be reading CPSR.
for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
+ // Reject frame index operands, PEI can't handle the predicated pseudos.
+ if (MO.isFI() || MO.isCPI() || MO.isJTI())
+ return 0;
if (!MO.isReg())
continue;
+ // MI can't have any tied operands, that would conflict with predication.
+ if (MO.isTied())
+ return 0;
if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
return 0;
if (MO.isDef() && !MO.isDead())
return 0;
}
- switch (MI->getOpcode()) {
- default: return 0;
- case ARM::ANDri: return ARM::ANDCCri;
- case ARM::ANDrr: return ARM::ANDCCrr;
- case ARM::ANDrsi: return ARM::ANDCCrsi;
- case ARM::ANDrsr: return ARM::ANDCCrsr;
- case ARM::t2ANDri: return ARM::t2ANDCCri;
- case ARM::t2ANDrr: return ARM::t2ANDCCrr;
- case ARM::t2ANDrs: return ARM::t2ANDCCrs;
- case ARM::EORri: return ARM::EORCCri;
- case ARM::EORrr: return ARM::EORCCrr;
- case ARM::EORrsi: return ARM::EORCCrsi;
- case ARM::EORrsr: return ARM::EORCCrsr;
- case ARM::t2EORri: return ARM::t2EORCCri;
- case ARM::t2EORrr: return ARM::t2EORCCrr;
- case ARM::t2EORrs: return ARM::t2EORCCrs;
- case ARM::ORRri: return ARM::ORRCCri;
- case ARM::ORRrr: return ARM::ORRCCrr;
- case ARM::ORRrsi: return ARM::ORRCCrsi;
- case ARM::ORRrsr: return ARM::ORRCCrsr;
- case ARM::t2ORRri: return ARM::t2ORRCCri;
- case ARM::t2ORRrr: return ARM::t2ORRCCrr;
- case ARM::t2ORRrs: return ARM::t2ORRCCrs;
-
- // ARM ADD/SUB
- case ARM::ADDri: return ARM::ADDCCri;
- case ARM::ADDrr: return ARM::ADDCCrr;
- case ARM::ADDrsi: return ARM::ADDCCrsi;
- case ARM::ADDrsr: return ARM::ADDCCrsr;
- case ARM::SUBri: return ARM::SUBCCri;
- case ARM::SUBrr: return ARM::SUBCCrr;
- case ARM::SUBrsi: return ARM::SUBCCrsi;
- case ARM::SUBrsr: return ARM::SUBCCrsr;
-
- // Thumb2 ADD/SUB
- case ARM::t2ADDri: return ARM::t2ADDCCri;
- case ARM::t2ADDri12: return ARM::t2ADDCCri12;
- case ARM::t2ADDrr: return ARM::t2ADDCCrr;
- case ARM::t2ADDrs: return ARM::t2ADDCCrs;
- case ARM::t2SUBri: return ARM::t2SUBCCri;
- case ARM::t2SUBri12: return ARM::t2SUBCCri12;
- case ARM::t2SUBrr: return ARM::t2SUBCCrr;
- case ARM::t2SUBrs: return ARM::t2SUBCCrs;
- }
+ bool DontMoveAcrossStores = true;
+ if (!MI->isSafeToMove(TII, /* AliasAnalysis = */ 0, DontMoveAcrossStores))
+ return 0;
+ return MI;
}
bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr *MI,
@@ -1662,19 +1674,18 @@ MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) &&
"Unknown select instruction");
const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
- MachineInstr *DefMI = 0;
- unsigned Opc = canFoldIntoMOVCC(MI->getOperand(2).getReg(), DefMI, MRI);
- bool Invert = !Opc;
- if (!Opc)
- Opc = canFoldIntoMOVCC(MI->getOperand(1).getReg(), DefMI, MRI);
- if (!Opc)
+ MachineInstr *DefMI = canFoldIntoMOVCC(MI->getOperand(2).getReg(), MRI, this);
+ bool Invert = !DefMI;
+ if (!DefMI)
+ DefMI = canFoldIntoMOVCC(MI->getOperand(1).getReg(), MRI, this);
+ if (!DefMI)
return 0;
// Create a new predicated version of DefMI.
// Rfalse is the first use.
MachineInstrBuilder NewMI = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
- get(Opc), MI->getOperand(0).getReg())
- .addOperand(MI->getOperand(Invert ? 2 : 1));
+ DefMI->getDesc(),
+ MI->getOperand(0).getReg());
// Copy all the DefMI operands, excluding its (null) predicate.
const MCInstrDesc &DefDesc = DefMI->getDesc();
@@ -1693,6 +1704,15 @@ MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
if (NewMI->hasOptionalDef())
AddDefaultCC(NewMI);
+ // The output register value when the predicate is false is an implicit
+ // register operand tied to the first def.
+ // The tie makes the register allocator ensure the FalseReg is allocated the
+ // same register as operand 0.
+ MachineOperand FalseReg = MI->getOperand(Invert ? 2 : 1);
+ FalseReg.setImplicit();
+ NewMI->addOperand(FalseReg);
+ NewMI->tieOperands(0, NewMI->getNumOperands() - 1);
+
// The caller will erase MI, but not DefMI.
DefMI->eraseFromParent();
return NewMI;
@@ -2039,13 +2059,14 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// Masked compares sometimes use the same register as the corresponding 'and'.
if (CmpMask != ~0) {
- if (!isSuitableForMask(MI, SrcReg, CmpMask, false)) {
+ if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(MI)) {
MI = 0;
for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg),
UE = MRI->use_end(); UI != UE; ++UI) {
if (UI->getParent() != CmpInstr->getParent()) continue;
MachineInstr *PotentialAND = &*UI;
- if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true))
+ if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) ||
+ isPredicated(PotentialAND))
continue;
MI = PotentialAND;
break;
@@ -2111,6 +2132,10 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// The single candidate is called MI.
if (!MI) MI = Sub;
+ // We can't use a predicated instruction - it doesn't always write the flags.
+ if (isPredicated(MI))
+ return false;
+
switch (MI->getOpcode()) {
default: break;
case ARM::RSBrr:
@@ -2217,6 +2242,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// Toggle the optional operand to CPSR.
MI->getOperand(5).setReg(ARM::CPSR);
MI->getOperand(5).setIsDef(true);
+ assert(!isPredicated(MI) && "Can't use flags from predicated instruction");
CmpInstr->eraseFromParent();
// Modify the condition code of operands in OperandsToUpdate.
@@ -2347,6 +2373,260 @@ bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI,
return true;
}
+static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
+ const MachineInstr *MI) {
+ switch (MI->getOpcode()) {
+ default: {
+ const MCInstrDesc &Desc = MI->getDesc();
+ int UOps = ItinData->getNumMicroOps(Desc.getSchedClass());
+ assert(UOps >= 0 && "bad # UOps");
+ return UOps;
+ }
+
+ case ARM::LDRrs:
+ case ARM::LDRBrs:
+ case ARM::STRrs:
+ case ARM::STRBrs: {
+ unsigned ShOpVal = MI->getOperand(3).getImm();
+ bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
+ unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
+ if (!isSub &&
+ (ShImm == 0 ||
+ ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
+ ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
+ return 1;
+ return 2;
+ }
+
+ case ARM::LDRH:
+ case ARM::STRH: {
+ if (!MI->getOperand(2).getReg())
+ return 1;
+
+ unsigned ShOpVal = MI->getOperand(3).getImm();
+ bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
+ unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
+ if (!isSub &&
+ (ShImm == 0 ||
+ ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
+ ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
+ return 1;
+ return 2;
+ }
+
+ case ARM::LDRSB:
+ case ARM::LDRSH:
+ return (ARM_AM::getAM3Op(MI->getOperand(3).getImm()) == ARM_AM::sub) ? 3:2;
+
+ case ARM::LDRSB_POST:
+ case ARM::LDRSH_POST: {
+ unsigned Rt = MI->getOperand(0).getReg();
+ unsigned Rm = MI->getOperand(3).getReg();
+ return (Rt == Rm) ? 4 : 3;
+ }
+
+ case ARM::LDR_PRE_REG:
+ case ARM::LDRB_PRE_REG: {
+ unsigned Rt = MI->getOperand(0).getReg();
+ unsigned Rm = MI->getOperand(3).getReg();
+ if (Rt == Rm)
+ return 3;
+ unsigned ShOpVal = MI->getOperand(4).getImm();
+ bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
+ unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
+ if (!isSub &&
+ (ShImm == 0 ||
+ ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
+ ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
+ return 2;
+ return 3;
+ }
+
+ case ARM::STR_PRE_REG:
+ case ARM::STRB_PRE_REG: {
+ unsigned ShOpVal = MI->getOperand(4).getImm();
+ bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
+ unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
+ if (!isSub &&
+ (ShImm == 0 ||
+ ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
+ ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
+ return 2;
+ return 3;
+ }
+
+ case ARM::LDRH_PRE:
+ case ARM::STRH_PRE: {
+ unsigned Rt = MI->getOperand(0).getReg();
+ unsigned Rm = MI->getOperand(3).getReg();
+ if (!Rm)
+ return 2;
+ if (Rt == Rm)
+ return 3;
+ return (ARM_AM::getAM3Op(MI->getOperand(4).getImm()) == ARM_AM::sub)
+ ? 3 : 2;
+ }
+
+ case ARM::LDR_POST_REG:
+ case ARM::LDRB_POST_REG:
+ case ARM::LDRH_POST: {
+ unsigned Rt = MI->getOperand(0).getReg();
+ unsigned Rm = MI->getOperand(3).getReg();
+ return (Rt == Rm) ? 3 : 2;
+ }
+
+ case ARM::LDR_PRE_IMM:
+ case ARM::LDRB_PRE_IMM:
+ case ARM::LDR_POST_IMM:
+ case ARM::LDRB_POST_IMM:
+ case ARM::STRB_POST_IMM:
+ case ARM::STRB_POST_REG:
+ case ARM::STRB_PRE_IMM:
+ case ARM::STRH_POST:
+ case ARM::STR_POST_IMM:
+ case ARM::STR_POST_REG:
+ case ARM::STR_PRE_IMM:
+ return 2;
+
+ case ARM::LDRSB_PRE:
+ case ARM::LDRSH_PRE: {
+ unsigned Rm = MI->getOperand(3).getReg();
+ if (Rm == 0)
+ return 3;
+ unsigned Rt = MI->getOperand(0).getReg();
+ if (Rt == Rm)
+ return 4;
+ unsigned ShOpVal = MI->getOperand(4).getImm();
+ bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
+ unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
+ if (!isSub &&
+ (ShImm == 0 ||
+ ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
+ ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
+ return 3;
+ return 4;
+ }
+
+ case ARM::LDRD: {
+ unsigned Rt = MI->getOperand(0).getReg();
+ unsigned Rn = MI->getOperand(2).getReg();
+ unsigned Rm = MI->getOperand(3).getReg();
+ if (Rm)
+ return (ARM_AM::getAM3Op(MI->getOperand(4).getImm()) == ARM_AM::sub) ?4:3;
+ return (Rt == Rn) ? 3 : 2;
+ }
+
+ case ARM::STRD: {
+ unsigned Rm = MI->getOperand(3).getReg();
+ if (Rm)
+ return (ARM_AM::getAM3Op(MI->getOperand(4).getImm()) == ARM_AM::sub) ?4:3;
+ return 2;
+ }
+
+ case ARM::LDRD_POST:
+ case ARM::t2LDRD_POST:
+ return 3;
+
+ case ARM::STRD_POST:
+ case ARM::t2STRD_POST:
+ return 4;
+
+ case ARM::LDRD_PRE: {
+ unsigned Rt = MI->getOperand(0).getReg();
+ unsigned Rn = MI->getOperand(3).getReg();
+ unsigned Rm = MI->getOperand(4).getReg();
+ if (Rm)
+ return (ARM_AM::getAM3Op(MI->getOperand(5).getImm()) == ARM_AM::sub) ?5:4;
+ return (Rt == Rn) ? 4 : 3;
+ }
+
+ case ARM::t2LDRD_PRE: {
+ unsigned Rt = MI->getOperand(0).getReg();
+ unsigned Rn = MI->getOperand(3).getReg();
+ return (Rt == Rn) ? 4 : 3;
+ }
+
+ case ARM::STRD_PRE: {
+ unsigned Rm = MI->getOperand(4).getReg();
+ if (Rm)
+ return (ARM_AM::getAM3Op(MI->getOperand(5).getImm()) == ARM_AM::sub) ?5:4;
+ return 3;
+ }
+
+ case ARM::t2STRD_PRE:
+ return 3;
+
+ case ARM::t2LDR_POST:
+ case ARM::t2LDRB_POST:
+ case ARM::t2LDRB_PRE:
+ case ARM::t2LDRSBi12:
+ case ARM::t2LDRSBi8:
+ case ARM::t2LDRSBpci:
+ case ARM::t2LDRSBs:
+ case ARM::t2LDRH_POST:
+ case ARM::t2LDRH_PRE:
+ case ARM::t2LDRSBT:
+ case ARM::t2LDRSB_POST:
+ case ARM::t2LDRSB_PRE:
+ case ARM::t2LDRSH_POST:
+ case ARM::t2LDRSH_PRE:
+ case ARM::t2LDRSHi12:
+ case ARM::t2LDRSHi8:
+ case ARM::t2LDRSHpci:
+ case ARM::t2LDRSHs:
+ return 2;
+
+ case ARM::t2LDRDi8: {
+ unsigned Rt = MI->getOperand(0).getReg();
+ unsigned Rn = MI->getOperand(2).getReg();
+ return (Rt == Rn) ? 3 : 2;
+ }
+
+ case ARM::t2STRB_POST:
+ case ARM::t2STRB_PRE:
+ case ARM::t2STRBs:
+ case ARM::t2STRDi8:
+ case ARM::t2STRH_POST:
+ case ARM::t2STRH_PRE:
+ case ARM::t2STRHs:
+ case ARM::t2STR_POST:
+ case ARM::t2STR_PRE:
+ case ARM::t2STRs:
+ return 2;
+ }
+}
+
+// Return the number of 32-bit words loaded by LDM or stored by STM. If this
+// can't be easily determined return 0 (missing MachineMemOperand).
+//
+// FIXME: The current MachineInstr design does not support relying on machine
+// mem operands to determine the width of a memory access. Instead, we expect
+// the target to provide this information based on the instruction opcode and
+// operands. However, using MachineMemOperand is a the best solution now for
+// two reasons:
+//
+// 1) getNumMicroOps tries to infer LDM memory width from the total number of MI
+// operands. This is much more dangerous than using the MachineMemOperand
+// sizes because CodeGen passes can insert/remove optional machine operands. In
+// fact, it's totally incorrect for preRA passes and appears to be wrong for
+// postRA passes as well.
+//
+// 2) getNumLDMAddresses is only used by the scheduling machine model and any
+// machine model that calls this should handle the unknown (zero size) case.
+//
+// Long term, we should require a target hook that verifies MachineMemOperand
+// sizes during MC lowering. That target hook should be local to MC lowering
+// because we can't ensure that it is aware of other MI forms. Doing this will
+// ensure that MachineMemOperands are correctly propagated through all passes.
+unsigned ARMBaseInstrInfo::getNumLDMAddresses(const MachineInstr *MI) const {
+ unsigned Size = 0;
+ for (MachineInstr::mmo_iterator I = MI->memoperands_begin(),
+ E = MI->memoperands_end(); I != E; ++I) {
+ Size += (*I)->getSize();
+ }
+ return Size / 4;
+}
+
unsigned
ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
const MachineInstr *MI) const {
@@ -2356,8 +2636,12 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
const MCInstrDesc &Desc = MI->getDesc();
unsigned Class = Desc.getSchedClass();
int ItinUOps = ItinData->getNumMicroOps(Class);
- if (ItinUOps >= 0)
+ if (ItinUOps >= 0) {
+ if (Subtarget.isSwift() && (Desc.mayLoad() || Desc.mayStore()))
+ return getNumMicroOpsSwiftLdSt(ItinData, MI);
+
return ItinUOps;
+ }
unsigned Opc = MI->getOpcode();
switch (Opc) {
@@ -2426,7 +2710,43 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
case ARM::t2STMIA_UPD:
case ARM::t2STMDB_UPD: {
unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1;
- if (Subtarget.isCortexA8()) {
+ if (Subtarget.isSwift()) {
+ // rdar://8402126
+ int UOps = 1 + NumRegs; // One for address computation, one for each ld / st.
+ switch (Opc) {
+ default: break;
+ case ARM::VLDMDIA_UPD:
+ case ARM::VLDMDDB_UPD:
+ case ARM::VLDMSIA_UPD:
+ case ARM::VLDMSDB_UPD:
+ case ARM::VSTMDIA_UPD:
+ case ARM::VSTMDDB_UPD:
+ case ARM::VSTMSIA_UPD:
+ case ARM::VSTMSDB_UPD:
+ case ARM::LDMIA_UPD:
+ case ARM::LDMDA_UPD:
+ case ARM::LDMDB_UPD:
+ case ARM::LDMIB_UPD:
+ case ARM::STMIA_UPD:
+ case ARM::STMDA_UPD:
+ case ARM::STMDB_UPD:
+ case ARM::STMIB_UPD:
+ case ARM::tLDMIA_UPD:
+ case ARM::tSTMIA_UPD:
+ case ARM::t2LDMIA_UPD:
+ case ARM::t2LDMDB_UPD:
+ case ARM::t2STMIA_UPD:
+ case ARM::t2STMDB_UPD:
+ ++UOps; // One for base register writeback.
+ break;
+ case ARM::LDMIA_RET:
+ case ARM::tPOP_RET:
+ case ARM::t2LDMIA_RET:
+ UOps += 2; // One for base reg wb, one for write to pc.
+ break;
+ }
+ return UOps;
+ } else if (Subtarget.isCortexA8()) {
if (NumRegs < 4)
return 2;
// 4 registers would be issued: 2, 2.
@@ -2435,7 +2755,7 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
if (NumRegs % 2)
++A8UOps;
return A8UOps;
- } else if (Subtarget.isCortexA9()) {
+ } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
int A9UOps = (NumRegs / 2);
// If there are odd number of registers or if it's not 64-bit aligned,
// then it takes an extra AGU (Address Generation Unit) cycle.
@@ -2468,7 +2788,7 @@ ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
DefCycle = RegNo / 2 + 1;
if (RegNo % 2)
++DefCycle;
- } else if (Subtarget.isCortexA9()) {
+ } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
DefCycle = RegNo;
bool isSLoad = false;
@@ -2512,7 +2832,7 @@ ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
DefCycle = 1;
// Result latency is issue cycle + 2: E2.
DefCycle += 2;
- } else if (Subtarget.isCortexA9()) {
+ } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
DefCycle = (RegNo / 2);
// If there are odd number of registers or if it's not 64-bit aligned,
// then it takes an extra AGU (Address Generation Unit) cycle.
@@ -2543,7 +2863,7 @@ ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
UseCycle = RegNo / 2 + 1;
if (RegNo % 2)
++UseCycle;
- } else if (Subtarget.isCortexA9()) {
+ } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
UseCycle = RegNo;
bool isSStore = false;
@@ -2584,7 +2904,7 @@ ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
UseCycle = 2;
// Read in E3.
UseCycle += 2;
- } else if (Subtarget.isCortexA9()) {
+ } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
UseCycle = (RegNo / 2);
// If there are odd number of registers or if it's not 64-bit aligned,
// then it takes an extra AGU (Address Generation Unit) cycle.
@@ -2769,7 +3089,7 @@ static int adjustDefLatency(const ARMSubtarget &Subtarget,
const MachineInstr *DefMI,
const MCInstrDesc *DefMCID, unsigned DefAlign) {
int Adjust = 0;
- if (Subtarget.isCortexA8() || Subtarget.isCortexA9()) {
+ if (Subtarget.isCortexA8() || Subtarget.isLikeA9()) {
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
// variants are one cycle cheaper.
switch (DefMCID->getOpcode()) {
@@ -2794,9 +3114,40 @@ static int adjustDefLatency(const ARMSubtarget &Subtarget,
break;
}
}
+ } else if (Subtarget.isSwift()) {
+ // FIXME: Properly handle all of the latency adjustments for address
+ // writeback.
+ switch (DefMCID->getOpcode()) {
+ default: break;
+ case ARM::LDRrs:
+ case ARM::LDRBrs: {
+ unsigned ShOpVal = DefMI->getOperand(3).getImm();
+ bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
+ unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
+ if (!isSub &&
+ (ShImm == 0 ||
+ ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
+ ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
+ Adjust -= 2;
+ else if (!isSub &&
+ ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr)
+ --Adjust;
+ break;
+ }
+ case ARM::t2LDRs:
+ case ARM::t2LDRBs:
+ case ARM::t2LDRHs:
+ case ARM::t2LDRSHs: {
+ // Thumb2 mode: lsl only.
+ unsigned ShAmt = DefMI->getOperand(3).getImm();
+ if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
+ Adjust -= 2;
+ break;
+ }
+ }
}
- if (DefAlign < 8 && Subtarget.isCortexA9()) {
+ if (DefAlign < 8 && Subtarget.isLikeA9()) {
switch (DefMCID->getOpcode()) {
default: break;
case ARM::VLD1q8:
@@ -2954,7 +3305,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
if (Reg == ARM::CPSR) {
if (DefMI->getOpcode() == ARM::FMSTAT) {
// fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
- return Subtarget.isCortexA9() ? 1 : 20;
+ return Subtarget.isLikeA9() ? 1 : 20;
}
// CPSR set and branch can be paired in the same cycle.
@@ -2970,7 +3321,8 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
// instructions).
if (Latency > 0 && Subtarget.isThumb2()) {
const MachineFunction *MF = DefMI->getParent()->getParent();
- if (MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize))
+ if (MF->getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize))
--Latency;
}
return Latency;
@@ -3020,7 +3372,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
if (!UseNode->isMachineOpcode()) {
int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx);
- if (Subtarget.isCortexA9())
+ if (Subtarget.isLikeA9() || Subtarget.isSwift())
return Latency <= 2 ? 1 : Latency - 1;
else
return Latency <= 3 ? 1 : Latency - 2;
@@ -3037,7 +3389,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
UseMCID, UseIdx, UseAlign);
if (Latency > 1 &&
- (Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
+ (Subtarget.isCortexA8() || Subtarget.isLikeA9())) {
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
// variants are one cycle cheaper.
switch (DefMCID.getOpcode()) {
@@ -3064,9 +3416,36 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
break;
}
}
+ } else if (DefIdx == 0 && Latency > 2 && Subtarget.isSwift()) {
+ // FIXME: Properly handle all of the latency adjustments for address
+ // writeback.
+ switch (DefMCID.getOpcode()) {
+ default: break;
+ case ARM::LDRrs:
+ case ARM::LDRBrs: {
+ unsigned ShOpVal =
+ cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
+ unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
+ if (ShImm == 0 ||
+ ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
+ ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
+ Latency -= 2;
+ else if (ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr)
+ --Latency;
+ break;
+ }
+ case ARM::t2LDRs:
+ case ARM::t2LDRBs:
+ case ARM::t2LDRHs:
+ case ARM::t2LDRSHs: {
+ // Thumb2 mode: lsl 0-3 only.
+ Latency -= 2;
+ break;
+ }
+ }
}
- if (DefAlign < 8 && Subtarget.isCortexA9())
+ if (DefAlign < 8 && Subtarget.isLikeA9())
switch (DefMCID.getOpcode()) {
default: break;
case ARM::VLD1q8:
@@ -3190,18 +3569,6 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
return Latency;
}
-unsigned
-ARMBaseInstrInfo::getOutputLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *DepMI) const {
- unsigned Reg = DefMI->getOperand(DefIdx).getReg();
- if (DepMI->readsRegister(Reg, &getRegisterInfo()) || !isPredicated(DepMI))
- return 1;
-
- // If the second MI is predicated, then there is an implicit use dependency.
- return getInstrLatency(ItinData, DefMI);
-}
-
unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost) const {
@@ -3359,11 +3726,12 @@ ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const {
if (MI->getOpcode() == ARM::VMOVD && !isPredicated(MI))
return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON));
- // Cortex-A9 is particularly picky about mixing the two and wants these
+ // A9-like cores are particularly picky about mixing the two and want these
// converted.
- if (Subtarget.isCortexA9() && !isPredicated(MI) &&
+ if (Subtarget.isLikeA9() && !isPredicated(MI) &&
(MI->getOpcode() == ARM::VMOVRS ||
- MI->getOpcode() == ARM::VMOVSR))
+ MI->getOpcode() == ARM::VMOVSR ||
+ MI->getOpcode() == ARM::VMOVS))
return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON));
// No other instructions can be swizzled, so just determine their domain.
@@ -3383,13 +3751,70 @@ ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const {
return std::make_pair(ExeGeneric, 0);
}
+static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI,
+ unsigned SReg, unsigned &Lane) {
+ unsigned DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
+ Lane = 0;
+
+ if (DReg != ARM::NoRegister)
+ return DReg;
+
+ Lane = 1;
+ DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
+
+ assert(DReg && "S-register with no D super-register?");
+ return DReg;
+}
+
+/// getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane,
+/// set ImplicitSReg to a register number that must be marked as implicit-use or
+/// zero if no register needs to be defined as implicit-use.
+///
+/// If the function cannot determine if an SPR should be marked implicit use or
+/// not, it returns false.
+///
+/// This function handles cases where an instruction is being modified from taking
+/// an SPR to a DPR[Lane]. A use of the DPR is being added, which may conflict
+/// with an earlier def of an SPR corresponding to DPR[Lane^1] (i.e. the other
+/// lane of the DPR).
+///
+/// If the other SPR is defined, an implicit-use of it should be added. Else,
+/// (including the case where the DPR itself is defined), it should not.
+///
+static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI,
+ MachineInstr *MI,
+ unsigned DReg, unsigned Lane,
+ unsigned &ImplicitSReg) {
+ // If the DPR is defined or used already, the other SPR lane will be chained
+ // correctly, so there is nothing to be done.
+ if (MI->definesRegister(DReg, TRI) || MI->readsRegister(DReg, TRI)) {
+ ImplicitSReg = 0;
+ return true;
+ }
+
+ // Otherwise we need to go searching to see if the SPR is set explicitly.
+ ImplicitSReg = TRI->getSubReg(DReg,
+ (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
+ MachineBasicBlock::LivenessQueryResult LQR =
+ MI->getParent()->computeRegisterLiveness(TRI, ImplicitSReg, MI);
+
+ if (LQR == MachineBasicBlock::LQR_Live)
+ return true;
+ else if (LQR == MachineBasicBlock::LQR_Unknown)
+ return false;
+
+ // If the register is known not to be live, there is no need to add an
+ // implicit-use.
+ ImplicitSReg = 0;
+ return true;
+}
+
void
ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
unsigned DstReg, SrcReg, DReg;
unsigned Lane;
MachineInstrBuilder MIB(MI);
const TargetRegisterInfo *TRI = &getRegisterInfo();
- bool isKill;
switch (MI->getOpcode()) {
default:
llvm_unreachable("cannot handle opcode!");
@@ -3400,82 +3825,294 @@ ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
// Zap the predicate operands.
assert(!isPredicated(MI) && "Cannot predicate a VORRd");
- MI->RemoveOperand(3);
- MI->RemoveOperand(2);
- // Change to a VORRd which requires two identical use operands.
- MI->setDesc(get(ARM::VORRd));
+ // Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits)
+ DstReg = MI->getOperand(0).getReg();
+ SrcReg = MI->getOperand(1).getReg();
- // Add the extra source operand and new predicates.
- // This will go before any implicit ops.
- AddDefaultPred(MachineInstrBuilder(MI).addOperand(MI->getOperand(1)));
+ for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
+ MI->RemoveOperand(i-1);
+
+ // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits)
+ MI->setDesc(get(ARM::VORRd));
+ AddDefaultPred(MIB.addReg(DstReg, RegState::Define)
+ .addReg(SrcReg)
+ .addReg(SrcReg));
break;
case ARM::VMOVRS:
if (Domain != ExeNEON)
break;
assert(!isPredicated(MI) && "Cannot predicate a VGETLN");
+ // Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits)
DstReg = MI->getOperand(0).getReg();
SrcReg = MI->getOperand(1).getReg();
- DReg = TRI->getMatchingSuperReg(SrcReg, ARM::ssub_0, &ARM::DPRRegClass);
- Lane = 0;
- if (DReg == ARM::NoRegister) {
- DReg = TRI->getMatchingSuperReg(SrcReg, ARM::ssub_1, &ARM::DPRRegClass);
- Lane = 1;
- assert(DReg && "S-register with no D super-register?");
- }
+ for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
+ MI->RemoveOperand(i-1);
- MI->RemoveOperand(3);
- MI->RemoveOperand(2);
- MI->RemoveOperand(1);
+ DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane);
+ // Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps)
+ // Note that DSrc has been widened and the other lane may be undef, which
+ // contaminates the entire register.
MI->setDesc(get(ARM::VGETLNi32));
- MIB.addReg(DReg);
- MIB.addImm(Lane);
+ AddDefaultPred(MIB.addReg(DstReg, RegState::Define)
+ .addReg(DReg, RegState::Undef)
+ .addImm(Lane));
- MIB->getOperand(1).setIsUndef();
+ // The old source should be an implicit use, otherwise we might think it
+ // was dead before here.
MIB.addReg(SrcReg, RegState::Implicit);
-
- AddDefaultPred(MIB);
break;
- case ARM::VMOVSR:
+ case ARM::VMOVSR: {
if (Domain != ExeNEON)
break;
assert(!isPredicated(MI) && "Cannot predicate a VSETLN");
+ // Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits)
DstReg = MI->getOperand(0).getReg();
SrcReg = MI->getOperand(1).getReg();
- DReg = TRI->getMatchingSuperReg(DstReg, ARM::ssub_0, &ARM::DPRRegClass);
- Lane = 0;
- if (DReg == ARM::NoRegister) {
- DReg = TRI->getMatchingSuperReg(DstReg, ARM::ssub_1, &ARM::DPRRegClass);
- Lane = 1;
- assert(DReg && "S-register with no D super-register?");
- }
- isKill = MI->getOperand(0).isKill();
- MI->RemoveOperand(3);
- MI->RemoveOperand(2);
- MI->RemoveOperand(1);
- MI->RemoveOperand(0);
+ DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane);
+
+ unsigned ImplicitSReg;
+ if (!getImplicitSPRUseForDPRUse(TRI, MI, DReg, Lane, ImplicitSReg))
+ break;
+
+ for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
+ MI->RemoveOperand(i-1);
+ // Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps)
+ // Again DDst may be undefined at the beginning of this instruction.
MI->setDesc(get(ARM::VSETLNi32));
- MIB.addReg(DReg);
- MIB.addReg(DReg);
- MIB.addReg(SrcReg);
- MIB.addImm(Lane);
+ MIB.addReg(DReg, RegState::Define)
+ .addReg(DReg, getUndefRegState(!MI->readsRegister(DReg, TRI)))
+ .addReg(SrcReg)
+ .addImm(Lane);
+ AddDefaultPred(MIB);
+
+ // The narrower destination must be marked as set to keep previous chains
+ // in place.
+ MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
+ if (ImplicitSReg != 0)
+ MIB.addReg(ImplicitSReg, RegState::Implicit);
+ break;
+ }
+ case ARM::VMOVS: {
+ if (Domain != ExeNEON)
+ break;
+
+ // Source instruction is %SDst = VMOVS %SSrc, 14, %noreg (; implicits)
+ DstReg = MI->getOperand(0).getReg();
+ SrcReg = MI->getOperand(1).getReg();
+
+ unsigned DstLane = 0, SrcLane = 0, DDst, DSrc;
+ DDst = getCorrespondingDRegAndLane(TRI, DstReg, DstLane);
+ DSrc = getCorrespondingDRegAndLane(TRI, SrcReg, SrcLane);
- MIB->getOperand(1).setIsUndef();
+ unsigned ImplicitSReg;
+ if (!getImplicitSPRUseForDPRUse(TRI, MI, DSrc, SrcLane, ImplicitSReg))
+ break;
- if (isKill)
- MIB->addRegisterKilled(DstReg, TRI, true);
- MIB->addRegisterDefined(DstReg, TRI);
+ for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
+ MI->RemoveOperand(i-1);
+
+ if (DSrc == DDst) {
+ // Destination can be:
+ // %DDst = VDUPLN32d %DDst, Lane, 14, %noreg (; implicits)
+ MI->setDesc(get(ARM::VDUPLN32d));
+ MIB.addReg(DDst, RegState::Define)
+ .addReg(DDst, getUndefRegState(!MI->readsRegister(DDst, TRI)))
+ .addImm(SrcLane);
+ AddDefaultPred(MIB);
+
+ // Neither the source or the destination are naturally represented any
+ // more, so add them in manually.
+ MIB.addReg(DstReg, RegState::Implicit | RegState::Define);
+ MIB.addReg(SrcReg, RegState::Implicit);
+ if (ImplicitSReg != 0)
+ MIB.addReg(ImplicitSReg, RegState::Implicit);
+ break;
+ }
+ // In general there's no single instruction that can perform an S <-> S
+ // move in NEON space, but a pair of VEXT instructions *can* do the
+ // job. It turns out that the VEXTs needed will only use DSrc once, with
+ // the position based purely on the combination of lane-0 and lane-1
+ // involved. For example
+ // vmov s0, s2 -> vext.32 d0, d0, d1, #1 vext.32 d0, d0, d0, #1
+ // vmov s1, s3 -> vext.32 d0, d1, d0, #1 vext.32 d0, d0, d0, #1
+ // vmov s0, s3 -> vext.32 d0, d0, d0, #1 vext.32 d0, d1, d0, #1
+ // vmov s1, s2 -> vext.32 d0, d0, d0, #1 vext.32 d0, d0, d1, #1
+ //
+ // Pattern of the MachineInstrs is:
+ // %DDst = VEXTd32 %DSrc1, %DSrc2, Lane, 14, %noreg (;implicits)
+ MachineInstrBuilder NewMIB;
+ NewMIB = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
+ get(ARM::VEXTd32), DDst);
+
+ // On the first instruction, both DSrc and DDst may be <undef> if present.
+ // Specifically when the original instruction didn't have them as an
+ // <imp-use>.
+ unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
+ bool CurUndef = !MI->readsRegister(CurReg, TRI);
+ NewMIB.addReg(CurReg, getUndefRegState(CurUndef));
+
+ CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
+ CurUndef = !MI->readsRegister(CurReg, TRI);
+ NewMIB.addReg(CurReg, getUndefRegState(CurUndef));
+
+ NewMIB.addImm(1);
+ AddDefaultPred(NewMIB);
+
+ if (SrcLane == DstLane)
+ NewMIB.addReg(SrcReg, RegState::Implicit);
+
+ MI->setDesc(get(ARM::VEXTd32));
+ MIB.addReg(DDst, RegState::Define);
+
+ // On the second instruction, DDst has definitely been defined above, so
+ // it is not <undef>. DSrc, if present, can be <undef> as above.
+ CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
+ CurUndef = CurReg == DSrc && !MI->readsRegister(CurReg, TRI);
+ MIB.addReg(CurReg, getUndefRegState(CurUndef));
+
+ CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
+ CurUndef = CurReg == DSrc && !MI->readsRegister(CurReg, TRI);
+ MIB.addReg(CurReg, getUndefRegState(CurUndef));
+
+ MIB.addImm(1);
AddDefaultPred(MIB);
+
+ if (SrcLane != DstLane)
+ MIB.addReg(SrcReg, RegState::Implicit);
+
+ // As before, the original destination is no longer represented, add it
+ // implicitly.
+ MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
+ if (ImplicitSReg != 0)
+ MIB.addReg(ImplicitSReg, RegState::Implicit);
break;
+ }
+ }
+
+}
+
+//===----------------------------------------------------------------------===//
+// Partial register updates
+//===----------------------------------------------------------------------===//
+//
+// Swift renames NEON registers with 64-bit granularity. That means any
+// instruction writing an S-reg implicitly reads the containing D-reg. The
+// problem is mostly avoided by translating f32 operations to v2f32 operations
+// on D-registers, but f32 loads are still a problem.
+//
+// These instructions can load an f32 into a NEON register:
+//
+// VLDRS - Only writes S, partial D update.
+// VLD1LNd32 - Writes all D-regs, explicit partial D update, 2 uops.
+// VLD1DUPd32 - Writes all D-regs, no partial reg update, 2 uops.
+//
+// FCONSTD can be used as a dependency-breaking instruction.
+
+
+unsigned ARMBaseInstrInfo::
+getPartialRegUpdateClearance(const MachineInstr *MI,
+ unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {
+ // Only Swift has partial register update problems.
+ if (!SwiftPartialUpdateClearance || !Subtarget.isSwift())
+ return 0;
+
+ assert(TRI && "Need TRI instance");
+
+ const MachineOperand &MO = MI->getOperand(OpNum);
+ if (MO.readsReg())
+ return 0;
+ unsigned Reg = MO.getReg();
+ int UseOp = -1;
+
+ switch(MI->getOpcode()) {
+ // Normal instructions writing only an S-register.
+ case ARM::VLDRS:
+ case ARM::FCONSTS:
+ case ARM::VMOVSR:
+ // rdar://problem/8791586
+ case ARM::VMOVv8i8:
+ case ARM::VMOVv4i16:
+ case ARM::VMOVv2i32:
+ case ARM::VMOVv2f32:
+ case ARM::VMOVv1i64:
+ UseOp = MI->findRegisterUseOperandIdx(Reg, false, TRI);
+ break;
+
+ // Explicitly reads the dependency.
+ case ARM::VLD1LNd32:
+ UseOp = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ // If this instruction actually reads a value from Reg, there is no unwanted
+ // dependency.
+ if (UseOp != -1 && MI->getOperand(UseOp).readsReg())
+ return 0;
+
+ // We must be able to clobber the whole D-reg.
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ // Virtual register must be a foo:ssub_0<def,undef> operand.
+ if (!MO.getSubReg() || MI->readsVirtualRegister(Reg))
+ return 0;
+ } else if (ARM::SPRRegClass.contains(Reg)) {
+ // Physical register: MI must define the full D-reg.
+ unsigned DReg = TRI->getMatchingSuperReg(Reg, ARM::ssub_0,
+ &ARM::DPRRegClass);
+ if (!DReg || !MI->definesRegister(DReg, TRI))
+ return 0;
}
+ // MI has an unwanted D-register dependency.
+ // Avoid defs in the previous N instructrions.
+ return SwiftPartialUpdateClearance;
+}
+
+// Break a partial register dependency after getPartialRegUpdateClearance
+// returned non-zero.
+void ARMBaseInstrInfo::
+breakPartialRegDependency(MachineBasicBlock::iterator MI,
+ unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {
+ assert(MI && OpNum < MI->getDesc().getNumDefs() && "OpNum is not a def");
+ assert(TRI && "Need TRI instance");
+
+ const MachineOperand &MO = MI->getOperand(OpNum);
+ unsigned Reg = MO.getReg();
+ assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
+ "Can't break virtual register dependencies.");
+ unsigned DReg = Reg;
+
+ // If MI defines an S-reg, find the corresponding D super-register.
+ if (ARM::SPRRegClass.contains(Reg)) {
+ DReg = ARM::D0 + (Reg - ARM::S0) / 2;
+ assert(TRI->isSuperRegister(Reg, DReg) && "Register enums broken");
+ }
+
+ assert(ARM::DPRRegClass.contains(DReg) && "Can only break D-reg deps");
+ assert(MI->definesRegister(DReg, TRI) && "MI doesn't clobber full D-reg");
+
+ // FIXME: In some cases, VLDRS can be changed to a VLD1DUPd32 which defines
+ // the full D-register by loading the same value to both lanes. The
+ // instruction is micro-coded with 2 uops, so don't do this until we can
+ // properly schedule micro-coded instuctions. The dispatcher stalls cause
+ // too big regressions.
+
+ // Insert the dependency-breaking FCONSTD before MI.
+ // 96 is the encoding of 0.5, but the actual value doesn't matter here.
+ AddDefaultPred(BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
+ get(ARM::FCONSTD), DReg).addImm(96));
+ MI->addRegisterKilled(DReg, TRI, true);
}
bool ARMBaseInstrInfo::hasNOP() const {
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
index 92e5ee8..6f38e35 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -182,10 +182,13 @@ public:
virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
unsigned NumCycles,
const BranchProbability
- &Probability) const {
+ &Probability) const {
return NumCycles == 1;
}
+ virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
+ MachineBasicBlock &FMBB) const;
+
/// analyzeCompare - For a comparison instruction, return the source registers
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
@@ -226,15 +229,18 @@ public:
SDNode *DefNode, unsigned DefIdx,
SDNode *UseNode, unsigned UseIdx) const;
- virtual unsigned getOutputLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *DepMI) const;
-
/// VFP/NEON execution domains.
std::pair<uint16_t, uint16_t>
getExecutionDomain(const MachineInstr *MI) const;
void setExecutionDomain(MachineInstr *MI, unsigned Domain) const;
+ unsigned getPartialRegUpdateClearance(const MachineInstr*, unsigned,
+ const TargetRegisterInfo*) const;
+ void breakPartialRegDependency(MachineBasicBlock::iterator, unsigned,
+ const TargetRegisterInfo *TRI) const;
+ /// Get the number of addresses by LDM or VLDM or zero for unknown.
+ unsigned getNumLDMAddresses(const MachineInstr *MI) const;
+
private:
unsigned getInstBundleLength(const MachineInstr *MI) const;
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index 9deb96e..e5b300f 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -84,6 +84,11 @@ ARMBaseRegisterInfo::getCallPreservedMask(CallingConv::ID) const {
? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
}
+const uint32_t*
+ARMBaseRegisterInfo::getNoPreservedMask() const {
+ return CSR_NoRegs_RegMask;
+}
+
BitVector ARMBaseRegisterInfo::
getReservedRegs(const MachineFunction &MF) const {
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
@@ -106,148 +111,12 @@ getReservedRegs(const MachineFunction &MF) const {
for (unsigned i = 0; i != 16; ++i)
Reserved.set(ARM::D16 + i);
}
- return Reserved;
-}
-
-bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
- unsigned Reg) const {
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-
- switch (Reg) {
- default: break;
- case ARM::SP:
- case ARM::PC:
- return true;
- case ARM::R6:
- if (hasBasePointer(MF))
- return true;
- break;
- case ARM::R7:
- case ARM::R11:
- if (FramePtr == Reg && TFI->hasFP(MF))
- return true;
- break;
- case ARM::R9:
- return STI.isR9Reserved();
- }
-
- return false;
-}
+ const TargetRegisterClass *RC = &ARM::GPRPairRegClass;
+ for(TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); I!=E; ++I)
+ for (MCSubRegIterator SI(*I, this); SI.isValid(); ++SI)
+ if (Reserved.test(*SI)) Reserved.set(*I);
-bool
-ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC,
- SmallVectorImpl<unsigned> &SubIndices,
- unsigned &NewSubIdx) const {
-
- unsigned Size = RC->getSize() * 8;
- if (Size < 6)
- return 0;
-
- NewSubIdx = 0; // Whole register.
- unsigned NumRegs = SubIndices.size();
- if (NumRegs == 8) {
- // 8 D registers -> 1 QQQQ register.
- return (Size == 512 &&
- SubIndices[0] == ARM::dsub_0 &&
- SubIndices[1] == ARM::dsub_1 &&
- SubIndices[2] == ARM::dsub_2 &&
- SubIndices[3] == ARM::dsub_3 &&
- SubIndices[4] == ARM::dsub_4 &&
- SubIndices[5] == ARM::dsub_5 &&
- SubIndices[6] == ARM::dsub_6 &&
- SubIndices[7] == ARM::dsub_7);
- } else if (NumRegs == 4) {
- if (SubIndices[0] == ARM::qsub_0) {
- // 4 Q registers -> 1 QQQQ register.
- return (Size == 512 &&
- SubIndices[1] == ARM::qsub_1 &&
- SubIndices[2] == ARM::qsub_2 &&
- SubIndices[3] == ARM::qsub_3);
- } else if (SubIndices[0] == ARM::dsub_0) {
- // 4 D registers -> 1 QQ register.
- if (Size >= 256 &&
- SubIndices[1] == ARM::dsub_1 &&
- SubIndices[2] == ARM::dsub_2 &&
- SubIndices[3] == ARM::dsub_3) {
- if (Size == 512)
- NewSubIdx = ARM::qqsub_0;
- return true;
- }
- } else if (SubIndices[0] == ARM::dsub_4) {
- // 4 D registers -> 1 QQ register (2nd).
- if (Size == 512 &&
- SubIndices[1] == ARM::dsub_5 &&
- SubIndices[2] == ARM::dsub_6 &&
- SubIndices[3] == ARM::dsub_7) {
- NewSubIdx = ARM::qqsub_1;
- return true;
- }
- } else if (SubIndices[0] == ARM::ssub_0) {
- // 4 S registers -> 1 Q register.
- if (Size >= 128 &&
- SubIndices[1] == ARM::ssub_1 &&
- SubIndices[2] == ARM::ssub_2 &&
- SubIndices[3] == ARM::ssub_3) {
- if (Size >= 256)
- NewSubIdx = ARM::qsub_0;
- return true;
- }
- }
- } else if (NumRegs == 2) {
- if (SubIndices[0] == ARM::qsub_0) {
- // 2 Q registers -> 1 QQ register.
- if (Size >= 256 && SubIndices[1] == ARM::qsub_1) {
- if (Size == 512)
- NewSubIdx = ARM::qqsub_0;
- return true;
- }
- } else if (SubIndices[0] == ARM::qsub_2) {
- // 2 Q registers -> 1 QQ register (2nd).
- if (Size == 512 && SubIndices[1] == ARM::qsub_3) {
- NewSubIdx = ARM::qqsub_1;
- return true;
- }
- } else if (SubIndices[0] == ARM::dsub_0) {
- // 2 D registers -> 1 Q register.
- if (Size >= 128 && SubIndices[1] == ARM::dsub_1) {
- if (Size >= 256)
- NewSubIdx = ARM::qsub_0;
- return true;
- }
- } else if (SubIndices[0] == ARM::dsub_2) {
- // 2 D registers -> 1 Q register (2nd).
- if (Size >= 256 && SubIndices[1] == ARM::dsub_3) {
- NewSubIdx = ARM::qsub_1;
- return true;
- }
- } else if (SubIndices[0] == ARM::dsub_4) {
- // 2 D registers -> 1 Q register (3rd).
- if (Size == 512 && SubIndices[1] == ARM::dsub_5) {
- NewSubIdx = ARM::qsub_2;
- return true;
- }
- } else if (SubIndices[0] == ARM::dsub_6) {
- // 2 D registers -> 1 Q register (3rd).
- if (Size == 512 && SubIndices[1] == ARM::dsub_7) {
- NewSubIdx = ARM::qsub_3;
- return true;
- }
- } else if (SubIndices[0] == ARM::ssub_0) {
- // 2 S registers -> 1 D register.
- if (SubIndices[1] == ARM::ssub_1) {
- if (Size >= 128)
- NewSubIdx = ARM::dsub_0;
- return true;
- }
- } else if (SubIndices[0] == ARM::ssub_2) {
- // 2 S registers -> 1 D register (2nd).
- if (Size >= 128 && SubIndices[1] == ARM::ssub_3) {
- NewSubIdx = ARM::dsub_1;
- return true;
- }
- }
- }
- return false;
+ return Reserved;
}
const TargetRegisterClass*
@@ -263,6 +132,7 @@ ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC)
case ARM::QPRRegClassID:
case ARM::QQPRRegClassID:
case ARM::QQQQPRRegClassID:
+ case ARM::GPRPairRegClassID:
return Super;
}
Super = *I++;
@@ -476,7 +346,7 @@ ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
bool
ARMBaseRegisterInfo::avoidWriteAfterWrite(const TargetRegisterClass *RC) const {
// CortexA9 has a Write-after-write hazard for NEON registers.
- if (!STI.isCortexA9())
+ if (!STI.isLikeA9())
return false;
switch (RC->getID()) {
@@ -561,8 +431,9 @@ needsStackRealignment(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *F = MF.getFunction();
unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
- bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
- F->hasFnAttr(Attribute::StackAlignment));
+ bool requiresRealignment =
+ ((MFI->getMaxAlignment() > StackAlign) ||
+ F->getFnAttributes().hasAttribute(Attributes::StackAlignment));
return requiresRealignment && canRealignStack(MF);
}
@@ -595,6 +466,7 @@ unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
const MachineFunction &MF) const {
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
switch (Reg) {
default: break;
// Return 0 if either register of the pair is a special register.
@@ -603,10 +475,10 @@ unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
case ARM::R3: return ARM::R2;
case ARM::R5: return ARM::R4;
case ARM::R7:
- return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
+ return (MRI.isReserved(ARM::R7) || MRI.isReserved(ARM::R6))
? 0 : ARM::R6;
- case ARM::R9: return isReservedReg(MF, ARM::R9) ? 0 :ARM::R8;
- case ARM::R11: return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10;
+ case ARM::R9: return MRI.isReserved(ARM::R9) ? 0 :ARM::R8;
+ case ARM::R11: return MRI.isReserved(ARM::R11) ? 0 : ARM::R10;
case ARM::S1: return ARM::S0;
case ARM::S3: return ARM::S2;
@@ -648,6 +520,7 @@ unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg,
const MachineFunction &MF) const {
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
switch (Reg) {
default: break;
// Return 0 if either register of the pair is a special register.
@@ -656,10 +529,10 @@ unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg,
case ARM::R2: return ARM::R3;
case ARM::R4: return ARM::R5;
case ARM::R6:
- return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
+ return (MRI.isReserved(ARM::R7) || MRI.isReserved(ARM::R6))
? 0 : ARM::R7;
- case ARM::R8: return isReservedReg(MF, ARM::R9) ? 0 :ARM::R9;
- case ARM::R10: return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11;
+ case ARM::R8: return MRI.isReserved(ARM::R9) ? 0 :ARM::R9;
+ case ARM::R10: return MRI.isReserved(ARM::R11) ? 0 : ARM::R11;
case ARM::S0: return ARM::S1;
case ARM::S2: return ARM::S3;
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
index da29f7e..e2bdd04 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
@@ -96,19 +96,10 @@ public:
/// Code Generation virtual methods...
const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
const uint32_t *getCallPreservedMask(CallingConv::ID) const;
+ const uint32_t *getNoPreservedMask() const;
BitVector getReservedRegs(const MachineFunction &MF) const;
- /// canCombineSubRegIndices - Given a register class and a list of
- /// subregister indices, return true if it's possible to combine the
- /// subregister indices into one that corresponds to a larger
- /// subregister. Return the new subregister index by reference. Note the
- /// new index may be zero if the given subregisters can be combined to
- /// form the whole register.
- virtual bool canCombineSubRegIndices(const TargetRegisterClass *RC,
- SmallVectorImpl<unsigned> &SubIndices,
- unsigned &NewSubIdx) const;
-
const TargetRegisterClass*
getPointerRegClass(const MachineFunction &MF, unsigned Kind = 0) const;
const TargetRegisterClass*
@@ -170,8 +161,6 @@ public:
unsigned MIFlags = MachineInstr::NoFlags)const;
/// Code Generation virtual methods...
- virtual bool isReservedReg(const MachineFunction &MF, unsigned Reg) const;
-
virtual bool requiresRegisterScavenging(const MachineFunction &MF) const;
virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/ARM/ARMCallingConv.td b/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
index bda1517..b378b96 100644
--- a/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
+++ b/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
@@ -190,6 +190,8 @@ def RetCC_ARM_AAPCS_VFP : CallingConv<[
// Callee-saved register lists.
//===----------------------------------------------------------------------===//
+def CSR_NoRegs : CalleeSavedRegs<(add)>;
+
def CSR_AAPCS : CalleeSavedRegs<(add LR, R11, R10, R9, R8, R7, R6, R5, R4,
(sequence "D%u", 15, 8))>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp b/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
index e81b4cc..6adbf4f 100644
--- a/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
@@ -47,7 +47,7 @@ namespace {
class ARMCodeEmitter : public MachineFunctionPass {
ARMJITInfo *JTI;
const ARMBaseInstrInfo *II;
- const TargetData *TD;
+ const DataLayout *TD;
const ARMSubtarget *Subtarget;
TargetMachine &TM;
JITCodeEmitter &MCE;
@@ -67,7 +67,7 @@ namespace {
ARMCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce)
: MachineFunctionPass(ID), JTI(0),
II((const ARMBaseInstrInfo *)tm.getInstrInfo()),
- TD(tm.getTargetData()), TM(tm),
+ TD(tm.getDataLayout()), TM(tm),
MCE(mce), MCPEs(0), MJTEs(0),
IsPIC(TM.getRelocationModel() == Reloc::PIC_), IsThumb(false) {}
@@ -376,7 +376,7 @@ bool ARMCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
"JIT relocation model must be set to static or default!");
JTI = ((ARMBaseTargetMachine &)MF.getTarget()).getJITInfo();
II = (const ARMBaseInstrInfo *)MF.getTarget().getInstrInfo();
- TD = MF.getTarget().getTargetData();
+ TD = MF.getTarget().getDataLayout();
Subtarget = &TM.getSubtarget<ARMSubtarget>();
MCPEs = &MF.getConstantPool()->getConstants();
MJTEs = 0;
@@ -389,7 +389,7 @@ bool ARMCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
do {
DEBUG(errs() << "JITTing function '"
- << MF.getFunction()->getName() << "'\n");
+ << MF.getName() << "'\n");
MCE.startFunction(MF);
for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
MBB != E; ++MBB) {
diff --git a/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index a953985..a57368f 100644
--- a/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -22,7 +22,7 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -528,7 +528,7 @@ ARMConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
// identity mapping of CPI's to CPE's.
const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
- const TargetData &TD = *MF->getTarget().getTargetData();
+ const DataLayout &TD = *MF->getTarget().getDataLayout();
for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
unsigned Size = TD.getTypeAllocSize(CPs[i].getType());
assert(Size >= 4 && "Too small constant pool entry");
@@ -1388,10 +1388,9 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
// If the original WaterList entry was "new water" on this iteration,
// propagate that to the new island. This is just keeping NewWaterList
// updated to match the WaterList, which will be updated below.
- if (NewWaterList.count(WaterBB)) {
- NewWaterList.erase(WaterBB);
+ if (NewWaterList.erase(WaterBB))
NewWaterList.insert(NewIsland);
- }
+
// The new CPE goes before the following block (NewMBB).
NewMBB = llvm::next(MachineFunction::iterator(WaterBB));
diff --git a/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h b/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h
index 6b98d44..ae531c4 100644
--- a/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h
+++ b/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h
@@ -102,8 +102,6 @@ public:
virtual void print(raw_ostream &O) const;
void print(raw_ostream *O) const { if (O) print(*O); }
void dump() const;
-
- static bool classof(const ARMConstantPoolValue *) { return true; }
};
inline raw_ostream &operator<<(raw_ostream &O, const ARMConstantPoolValue &V) {
@@ -158,7 +156,6 @@ public:
static bool classof(const ARMConstantPoolValue *APV) {
return APV->isGlobalValue() || APV->isBlockAddress() || APV->isLSDA();
}
- static bool classof(const ARMConstantPoolConstant *) { return true; }
};
/// ARMConstantPoolSymbol - ARM-specific constantpool values for external
@@ -192,7 +189,6 @@ public:
static bool classof(const ARMConstantPoolValue *ACPV) {
return ACPV->isExtSymbol();
}
- static bool classof(const ARMConstantPoolSymbol *) { return true; }
};
/// ARMConstantPoolMBB - ARM-specific constantpool value of a machine basic
@@ -225,7 +221,6 @@ public:
static bool classof(const ARMConstantPoolValue *ACPV) {
return ACPV->isMachineBasicBlock();
}
- static bool classof(const ARMConstantPoolMBB *) { return true; }
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.cpp
deleted file mode 100644
index f671317..0000000
--- a/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.cpp
+++ /dev/null
@@ -1,78 +0,0 @@
-//===-- ARMELFWriterInfo.cpp - ELF Writer Info for the ARM backend --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements ELF writer information for the ARM backend.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARMELFWriterInfo.h"
-#include "ARMRelocations.h"
-#include "llvm/Function.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Support/ELF.h"
-
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// Implementation of the ARMELFWriterInfo class
-//===----------------------------------------------------------------------===//
-
-ARMELFWriterInfo::ARMELFWriterInfo(TargetMachine &TM)
- : TargetELFWriterInfo(TM.getTargetData()->getPointerSizeInBits() == 64,
- TM.getTargetData()->isLittleEndian()) {
-}
-
-ARMELFWriterInfo::~ARMELFWriterInfo() {}
-
-unsigned ARMELFWriterInfo::getRelocationType(unsigned MachineRelTy) const {
- switch (MachineRelTy) {
- case ARM::reloc_arm_absolute:
- case ARM::reloc_arm_relative:
- case ARM::reloc_arm_cp_entry:
- case ARM::reloc_arm_vfp_cp_entry:
- case ARM::reloc_arm_machine_cp_entry:
- case ARM::reloc_arm_jt_base:
- case ARM::reloc_arm_pic_jt:
- llvm_unreachable("unsupported ARM relocation type");
-
- case ARM::reloc_arm_branch: return ELF::R_ARM_CALL;
- case ARM::reloc_arm_movt: return ELF::R_ARM_MOVT_ABS;
- case ARM::reloc_arm_movw: return ELF::R_ARM_MOVW_ABS_NC;
- default:
- llvm_unreachable("unknown ARM relocation type");
- }
-}
-
-long int ARMELFWriterInfo::getDefaultAddendForRelTy(unsigned RelTy,
- long int Modifier) const {
- llvm_unreachable("ARMELFWriterInfo::getDefaultAddendForRelTy() not "
- "implemented");
-}
-
-unsigned ARMELFWriterInfo::getRelocationTySize(unsigned RelTy) const {
- llvm_unreachable("ARMELFWriterInfo::getRelocationTySize() not implemented");
-}
-
-bool ARMELFWriterInfo::isPCRelativeRel(unsigned RelTy) const {
- llvm_unreachable("ARMELFWriterInfo::isPCRelativeRel() not implemented");
-}
-
-unsigned ARMELFWriterInfo::getAbsoluteLabelMachineRelTy() const {
- llvm_unreachable("ARMELFWriterInfo::getAbsoluteLabelMachineRelTy() not "
- "implemented");
-}
-
-long int ARMELFWriterInfo::computeRelocation(unsigned SymOffset,
- unsigned RelOffset,
- unsigned RelTy) const {
- llvm_unreachable("ARMELFWriterInfo::getAbsoluteLabelMachineRelTy() not "
- "implemented");
-}
diff --git a/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.h b/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.h
deleted file mode 100644
index 6a84f8a..0000000
--- a/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.h
+++ /dev/null
@@ -1,59 +0,0 @@
-//===-- ARMELFWriterInfo.h - ELF Writer Info for ARM ------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements ELF writer information for the ARM backend.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARM_ELF_WRITER_INFO_H
-#define ARM_ELF_WRITER_INFO_H
-
-#include "llvm/Target/TargetELFWriterInfo.h"
-
-namespace llvm {
- class TargetMachine;
-
- class ARMELFWriterInfo : public TargetELFWriterInfo {
- public:
- ARMELFWriterInfo(TargetMachine &TM);
- virtual ~ARMELFWriterInfo();
-
- /// getRelocationType - Returns the target specific ELF Relocation type.
- /// 'MachineRelTy' contains the object code independent relocation type
- virtual unsigned getRelocationType(unsigned MachineRelTy) const;
-
- /// hasRelocationAddend - True if the target uses an addend in the
- /// ELF relocation entry.
- virtual bool hasRelocationAddend() const { return false; }
-
- /// getDefaultAddendForRelTy - Gets the default addend value for a
- /// relocation entry based on the target ELF relocation type.
- virtual long int getDefaultAddendForRelTy(unsigned RelTy,
- long int Modifier = 0) const;
-
- /// getRelTySize - Returns the size of relocatable field in bits
- virtual unsigned getRelocationTySize(unsigned RelTy) const;
-
- /// isPCRelativeRel - True if the relocation type is pc relative
- virtual bool isPCRelativeRel(unsigned RelTy) const;
-
- /// getJumpTableRelocationTy - Returns the machine relocation type used
- /// to reference a jumptable.
- virtual unsigned getAbsoluteLabelMachineRelTy() const;
-
- /// computeRelocation - Some relocatable fields could be relocated
- /// directly, avoiding the relocation symbol emission, compute the
- /// final relocation value for this symbol.
- virtual long int computeRelocation(unsigned SymOffset, unsigned RelOffset,
- unsigned RelTy) const;
- };
-
-} // end llvm namespace
-
-#endif // ARM_ELF_WRITER_INFO_H
diff --git a/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index 15bb32e..8c45e0b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -103,9 +103,9 @@ namespace {
bool IsLoad;
bool isUpdating;
bool hasWritebackOperand;
- NEONRegSpacing RegSpacing;
- unsigned char NumRegs; // D registers loaded or stored
- unsigned char RegElts; // elements per D register; used for lane ops
+ uint8_t RegSpacing; // One of type NEONRegSpacing
+ uint8_t NumRegs; // D registers loaded or stored
+ uint8_t RegElts; // elements per D register; used for lane ops
// FIXME: Temporary flag to denote whether the real instruction takes
// a single register (like the encoding) or all of the registers in
// the list (like the asm syntax and the isel DAG). When all definitions
@@ -377,7 +377,7 @@ void ARMExpandPseudo::ExpandVLD(MachineBasicBlock::iterator &MBBI) {
const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode());
assert(TableEntry && TableEntry->IsLoad && "NEONLdStTable lookup failed");
- NEONRegSpacing RegSpc = TableEntry->RegSpacing;
+ NEONRegSpacing RegSpc = (NEONRegSpacing)TableEntry->RegSpacing;
unsigned NumRegs = TableEntry->NumRegs;
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
@@ -442,7 +442,7 @@ void ARMExpandPseudo::ExpandVST(MachineBasicBlock::iterator &MBBI) {
const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode());
assert(TableEntry && !TableEntry->IsLoad && "NEONLdStTable lookup failed");
- NEONRegSpacing RegSpc = TableEntry->RegSpacing;
+ NEONRegSpacing RegSpc = (NEONRegSpacing)TableEntry->RegSpacing;
unsigned NumRegs = TableEntry->NumRegs;
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
@@ -493,7 +493,7 @@ void ARMExpandPseudo::ExpandLaneOp(MachineBasicBlock::iterator &MBBI) {
const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode());
assert(TableEntry && "NEONLdStTable lookup failed");
- NEONRegSpacing RegSpc = TableEntry->RegSpacing;
+ NEONRegSpacing RegSpc = (NEONRegSpacing)TableEntry->RegSpacing;
unsigned NumRegs = TableEntry->NumRegs;
unsigned RegElts = TableEntry->RegElts;
@@ -777,9 +777,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MI.eraseFromParent();
return true;
}
- case ARM::Int_eh_sjlj_dispatchsetup:
- case ARM::Int_eh_sjlj_dispatchsetup_nofp:
- case ARM::tInt_eh_sjlj_dispatchsetup: {
+ case ARM::Int_eh_sjlj_dispatchsetup: {
MachineFunction &MF = *MI.getParent()->getParent();
const ARMBaseInstrInfo *AII =
static_cast<const ARMBaseInstrInfo*>(TII);
diff --git a/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp b/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
index bf9d16e..6611862 100644
--- a/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
@@ -40,7 +40,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
@@ -100,51 +100,53 @@ class ARMFastISel : public FastISel {
}
// Code from FastISel.cpp.
- virtual unsigned FastEmitInst_(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC);
- virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- unsigned Op0, bool Op0IsKill);
- virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- unsigned Op0, bool Op0IsKill,
- unsigned Op1, bool Op1IsKill);
- virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- unsigned Op0, bool Op0IsKill,
- unsigned Op1, bool Op1IsKill,
- unsigned Op2, bool Op2IsKill);
- virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- unsigned Op0, bool Op0IsKill,
- uint64_t Imm);
- virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- unsigned Op0, bool Op0IsKill,
- const ConstantFP *FPImm);
- virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- unsigned Op0, bool Op0IsKill,
- unsigned Op1, bool Op1IsKill,
- uint64_t Imm);
- virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- uint64_t Imm);
- virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- uint64_t Imm1, uint64_t Imm2);
-
- virtual unsigned FastEmitInst_extractsubreg(MVT RetVT,
- unsigned Op0, bool Op0IsKill,
- uint32_t Idx);
+ private:
+ unsigned FastEmitInst_(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC);
+ unsigned FastEmitInst_r(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill);
+ unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill);
+ unsigned FastEmitInst_rrr(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill,
+ unsigned Op2, bool Op2IsKill);
+ unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill,
+ uint64_t Imm);
+ unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill,
+ const ConstantFP *FPImm);
+ unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill,
+ uint64_t Imm);
+ unsigned FastEmitInst_i(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ uint64_t Imm);
+ unsigned FastEmitInst_ii(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ uint64_t Imm1, uint64_t Imm2);
+
+ unsigned FastEmitInst_extractsubreg(MVT RetVT,
+ unsigned Op0, bool Op0IsKill,
+ uint32_t Idx);
// Backend specific FastISel code.
+ private:
virtual bool TargetSelectInstruction(const Instruction *I);
virtual unsigned TargetMaterializeConstant(const Constant *C);
virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
const LoadInst *LI);
-
+ private:
#include "ARMGenFastISel.inc"
// Instruction selection routines.
@@ -192,6 +194,7 @@ class ARMFastISel : public FastISel {
unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg);
unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg);
unsigned ARMSelectCallOp(bool UseReg);
+ unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, EVT VT);
// Call handling routines.
private:
@@ -615,11 +618,11 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
if (VT != MVT::i32) return 0;
Reloc::Model RelocM = TM.getRelocationModel();
-
- // TODO: Need more magic for ARM PIC.
- if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0;
-
- unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+ bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM);
+ const TargetRegisterClass *RC = isThumb2 ?
+ (const TargetRegisterClass*)&ARM::rGPRRegClass :
+ (const TargetRegisterClass*)&ARM::GPRRegClass;
+ unsigned DestReg = createResultReg(RC);
// Use movw+movt when possible, it avoids constant pool entries.
// Darwin targets don't support movt with Reloc::Static, see
@@ -649,6 +652,9 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
Align = TD.getTypeAllocSize(GV->getType());
}
+ if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_)
+ return ARMLowerPICELF(GV, Align, VT);
+
// Grab index.
unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 :
(Subtarget->isThumb() ? 4 : 8);
@@ -666,17 +672,30 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
.addConstantPoolIndex(Idx);
if (RelocM == Reloc::PIC_)
MIB.addImm(Id);
+ AddOptionalDefs(MIB);
} else {
// The extra immediate is for addrmode2.
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp),
DestReg)
.addConstantPoolIndex(Idx)
.addImm(0);
+ AddOptionalDefs(MIB);
+
+ if (RelocM == Reloc::PIC_) {
+ unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
+ unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
+
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(Opc), NewDestReg)
+ .addReg(DestReg)
+ .addImm(Id);
+ AddOptionalDefs(MIB);
+ return NewDestReg;
+ }
}
- AddOptionalDefs(MIB);
}
- if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) {
+ if (IsIndirect) {
MachineInstrBuilder MIB;
unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
if (isThumb2)
@@ -1009,6 +1028,9 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
RC = &ARM::GPRRegClass;
break;
case MVT::i16:
+ if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
+ return false;
+
if (isThumb2) {
if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
@@ -1021,6 +1043,9 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
RC = &ARM::GPRRegClass;
break;
case MVT::i32:
+ if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
+ return false;
+
if (isThumb2) {
if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
Opc = ARM::t2LDRi8;
@@ -1127,6 +1152,9 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
}
break;
case MVT::i16:
+ if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
+ return false;
+
if (isThumb2) {
if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
StrOpc = ARM::t2STRHi8;
@@ -1138,6 +1166,9 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
}
break;
case MVT::i32:
+ if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
+ return false;
+
if (isThumb2) {
if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
StrOpc = ARM::t2STRi8;
@@ -1360,6 +1391,11 @@ bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
.addReg(AddrReg));
+
+ const IndirectBrInst *IB = cast<IndirectBrInst>(I);
+ for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i)
+ FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]);
+
return true;
}
@@ -2210,25 +2246,17 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
DL, TII.get(CallOpc));
- if (isThumb2) {
- // Explicitly adding the predicate here.
+ // BL / BLX don't take a predicate, but tBL / tBLX do.
+ if (isThumb2)
AddDefaultPred(MIB);
- if (EnableARMLongCalls)
- MIB.addReg(CalleeReg);
- else
- MIB.addExternalSymbol(TLI.getLibcallName(Call));
- } else {
- if (EnableARMLongCalls)
- MIB.addReg(CalleeReg);
- else
- MIB.addExternalSymbol(TLI.getLibcallName(Call));
+ if (EnableARMLongCalls)
+ MIB.addReg(CalleeReg);
+ else
+ MIB.addExternalSymbol(TLI.getLibcallName(Call));
- // Explicitly adding the predicate here.
- AddDefaultPred(MIB);
- }
// Add implicit physical register uses to the call.
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
- MIB.addReg(RegArgs[i]);
+ MIB.addReg(RegArgs[i], RegState::Implicit);
// Add a register mask with the call-preserved registers.
// Proper defs for return values will be added by setPhysRegsDeadExcept().
@@ -2300,16 +2328,16 @@ bool ARMFastISel::SelectCall(const Instruction *I,
ISD::ArgFlagsTy Flags;
unsigned AttrInd = i - CS.arg_begin() + 1;
- if (CS.paramHasAttr(AttrInd, Attribute::SExt))
+ if (CS.paramHasAttr(AttrInd, Attributes::SExt))
Flags.setSExt();
- if (CS.paramHasAttr(AttrInd, Attribute::ZExt))
+ if (CS.paramHasAttr(AttrInd, Attributes::ZExt))
Flags.setZExt();
// FIXME: Only handle *easy* calls for now.
- if (CS.paramHasAttr(AttrInd, Attribute::InReg) ||
- CS.paramHasAttr(AttrInd, Attribute::StructRet) ||
- CS.paramHasAttr(AttrInd, Attribute::Nest) ||
- CS.paramHasAttr(AttrInd, Attribute::ByVal))
+ if (CS.paramHasAttr(AttrInd, Attributes::InReg) ||
+ CS.paramHasAttr(AttrInd, Attributes::StructRet) ||
+ CS.paramHasAttr(AttrInd, Attributes::Nest) ||
+ CS.paramHasAttr(AttrInd, Attributes::ByVal))
return false;
Type *ArgTy = (*i)->getType();
@@ -2356,30 +2384,20 @@ bool ARMFastISel::SelectCall(const Instruction *I,
unsigned CallOpc = ARMSelectCallOp(UseReg);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
DL, TII.get(CallOpc));
- if(isThumb2) {
- // Explicitly adding the predicate here.
- AddDefaultPred(MIB);
- if (UseReg)
- MIB.addReg(CalleeReg);
- else if (!IntrMemName)
- MIB.addGlobalAddress(GV, 0, 0);
- else
- MIB.addExternalSymbol(IntrMemName, 0);
- } else {
- if (UseReg)
- MIB.addReg(CalleeReg);
- else if (!IntrMemName)
- MIB.addGlobalAddress(GV, 0, 0);
- else
- MIB.addExternalSymbol(IntrMemName, 0);
- // Explicitly adding the predicate here.
+ // ARM calls don't take a predicate, but tBL / tBLX do.
+ if(isThumb2)
AddDefaultPred(MIB);
- }
+ if (UseReg)
+ MIB.addReg(CalleeReg);
+ else if (!IntrMemName)
+ MIB.addGlobalAddress(GV, 0, 0);
+ else
+ MIB.addExternalSymbol(IntrMemName, 0);
// Add implicit physical register uses to the call.
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
- MIB.addReg(RegArgs[i]);
+ MIB.addReg(RegArgs[i], RegState::Implicit);
// Add a register mask with the call-preserved registers.
// Proper defs for return values will be added by setPhysRegsDeadExcept().
@@ -2648,7 +2666,7 @@ bool ARMFastISel::SelectShift(const Instruction *I,
unsigned Reg1 = getRegForValue(Src1Value);
if (Reg1 == 0) return false;
- unsigned Reg2;
+ unsigned Reg2 = 0;
if (Opc == ARM::MOVsr) {
Reg2 = getRegForValue(Src2Value);
if (Reg2 == 0) return false;
@@ -2790,6 +2808,47 @@ bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
return true;
}
+unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
+ unsigned Align, EVT VT) {
+ bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
+ ARMConstantPoolConstant *CPV =
+ ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);
+ unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
+
+ unsigned Opc;
+ unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT));
+ // Load value.
+ if (isThumb2) {
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(ARM::t2LDRpci), DestReg1)
+ .addConstantPoolIndex(Idx));
+ Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs;
+ } else {
+ // The extra immediate is for addrmode2.
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(ARM::LDRcp), DestReg1)
+ .addConstantPoolIndex(Idx).addImm(0));
+ Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs;
+ }
+
+ unsigned GlobalBaseReg = AFI->getGlobalBaseReg();
+ if (GlobalBaseReg == 0) {
+ GlobalBaseReg = MRI.createVirtualRegister(TLI.getRegClassFor(VT));
+ AFI->setGlobalBaseReg(GlobalBaseReg);
+ }
+
+ unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT));
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(Opc), DestReg2)
+ .addReg(DestReg1)
+ .addReg(GlobalBaseReg);
+ if (!UseGOTOFF)
+ MIB.addImm(0);
+ AddOptionalDefs(MIB);
+
+ return DestReg2;
+}
+
namespace llvm {
FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo) {
diff --git a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index aee72d2..9392497 100644
--- a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -153,7 +153,8 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
int FramePtrSpillFI = 0;
int D8SpillFI = 0;
- // All calls are tail calls in GHC calling conv, and functions have no prologue/epilogue.
+ // All calls are tail calls in GHC calling conv, and functions have no
+ // prologue/epilogue.
if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
return;
@@ -360,7 +361,8 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
int NumBytes = (int)MFI->getStackSize();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
- // All calls are tail calls in GHC calling conv, and functions have no prologue/epilogue.
+ // All calls are tail calls in GHC calling conv, and functions have no
+ // prologue/epilogue.
if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
return;
@@ -1151,7 +1153,7 @@ static void checkNumAlignedDPRCS2Regs(MachineFunction &MF) {
return;
// Naked functions don't spill callee-saved registers.
- if (MF.getFunction()->hasFnAttr(Attribute::Naked))
+ if (MF.getFunction()->getFnAttributes().hasAttribute(Attributes::Naked))
return;
// We are planning to use NEON instructions vst1 / vld1.
@@ -1176,7 +1178,7 @@ static void checkNumAlignedDPRCS2Regs(MachineFunction &MF) {
MachineRegisterInfo &MRI = MF.getRegInfo();
unsigned NumSpills = 0;
for (; NumSpills < 8; ++NumSpills)
- if (!MRI.isPhysRegOrOverlapUsed(ARM::D8 + NumSpills))
+ if (!MRI.isPhysRegUsed(ARM::D8 + NumSpills))
break;
// Don't do this for just one d-register. It's not worth it.
@@ -1209,6 +1211,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
*static_cast<const ARMBaseInstrInfo*>(MF.getTarget().getInstrInfo());
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
// Spill R4 if Thumb2 function requires stack realignment - it will be used as
@@ -1218,12 +1221,12 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// FIXME: It will be better just to find spare register here.
if (AFI->isThumb2Function() &&
(MFI->hasVarSizedObjects() || RegInfo->needsStackRealignment(MF)))
- MF.getRegInfo().setPhysRegUsed(ARM::R4);
+ MRI.setPhysRegUsed(ARM::R4);
if (AFI->isThumb1OnlyFunction()) {
// Spill LR if Thumb1 function uses variable length argument lists.
if (AFI->getVarArgsRegSaveSize() > 0)
- MF.getRegInfo().setPhysRegUsed(ARM::LR);
+ MRI.setPhysRegUsed(ARM::LR);
// Spill R4 if Thumb1 epilogue has to restore SP from FP. We don't know
// for sure what the stack size will be, but for this, an estimate is good
@@ -1233,7 +1236,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// FIXME: It will be better just to find spare register here.
unsigned StackSize = estimateStackSize(MF);
if (MFI->hasVarSizedObjects() || StackSize > 508)
- MF.getRegInfo().setPhysRegUsed(ARM::R4);
+ MRI.setPhysRegUsed(ARM::R4);
}
// See if we can spill vector registers to aligned stack.
@@ -1241,7 +1244,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// Spill the BasePtr if it's used.
if (RegInfo->hasBasePointer(MF))
- MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister());
+ MRI.setPhysRegUsed(RegInfo->getBaseRegister());
// Don't spill FP if the frame can be eliminated. This is determined
// by scanning the callee-save registers to see if any is used.
@@ -1249,7 +1252,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
for (unsigned i = 0; CSRegs[i]; ++i) {
unsigned Reg = CSRegs[i];
bool Spilled = false;
- if (MF.getRegInfo().isPhysRegOrOverlapUsed(Reg)) {
+ if (MRI.isPhysRegUsed(Reg)) {
Spilled = true;
CanEliminateFrame = false;
}
@@ -1338,7 +1341,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// If LR is not spilled, but at least one of R4, R5, R6, and R7 is spilled.
// Spill LR as well so we can fold BX_RET to the registers restore (LDM).
if (!LRSpilled && CS1Spilled) {
- MF.getRegInfo().setPhysRegUsed(ARM::LR);
+ MRI.setPhysRegUsed(ARM::LR);
NumGPRSpills++;
UnspilledCS1GPRs.erase(std::find(UnspilledCS1GPRs.begin(),
UnspilledCS1GPRs.end(), (unsigned)ARM::LR));
@@ -1347,7 +1350,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
}
if (hasFP(MF)) {
- MF.getRegInfo().setPhysRegUsed(FramePtr);
+ MRI.setPhysRegUsed(FramePtr);
NumGPRSpills++;
}
@@ -1362,16 +1365,16 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// Don't spill high register if the function is thumb1
if (!AFI->isThumb1OnlyFunction() ||
isARMLowRegister(Reg) || Reg == ARM::LR) {
- MF.getRegInfo().setPhysRegUsed(Reg);
- if (!RegInfo->isReservedReg(MF, Reg))
+ MRI.setPhysRegUsed(Reg);
+ if (!MRI.isReserved(Reg))
ExtraCSSpill = true;
break;
}
}
} else if (!UnspilledCS2GPRs.empty() && !AFI->isThumb1OnlyFunction()) {
unsigned Reg = UnspilledCS2GPRs.front();
- MF.getRegInfo().setPhysRegUsed(Reg);
- if (!RegInfo->isReservedReg(MF, Reg))
+ MRI.setPhysRegUsed(Reg);
+ if (!MRI.isReserved(Reg))
ExtraCSSpill = true;
}
}
@@ -1389,7 +1392,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
while (NumExtras && !UnspilledCS1GPRs.empty()) {
unsigned Reg = UnspilledCS1GPRs.back();
UnspilledCS1GPRs.pop_back();
- if (!RegInfo->isReservedReg(MF, Reg) &&
+ if (!MRI.isReserved(Reg) &&
(!AFI->isThumb1OnlyFunction() || isARMLowRegister(Reg) ||
Reg == ARM::LR)) {
Extras.push_back(Reg);
@@ -1401,7 +1404,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
while (NumExtras && !UnspilledCS2GPRs.empty()) {
unsigned Reg = UnspilledCS2GPRs.back();
UnspilledCS2GPRs.pop_back();
- if (!RegInfo->isReservedReg(MF, Reg)) {
+ if (!MRI.isReserved(Reg)) {
Extras.push_back(Reg);
NumExtras--;
}
@@ -1409,7 +1412,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
}
if (Extras.size() && NumExtras == 0) {
for (unsigned i = 0, e = Extras.size(); i != e; ++i) {
- MF.getRegInfo().setPhysRegUsed(Extras[i]);
+ MRI.setPhysRegUsed(Extras[i]);
}
} else if (!AFI->isThumb1OnlyFunction()) {
// note: Thumb1 functions spill to R12, not the stack. Reserve a slot
@@ -1423,7 +1426,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
}
if (ForceLRSpill) {
- MF.getRegInfo().setPhysRegUsed(ARM::LR);
+ MRI.setPhysRegUsed(ARM::LR);
AFI->setLRIsSpilledForFarJump(true);
}
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp b/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
index a5fd15b..1240169 100644
--- a/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
@@ -47,7 +47,7 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
// Skip over one non-VFP / NEON instruction.
if (!LastMI->isBarrier() &&
// On A9, AGU and NEON/FPU are muxed.
- !(STI.isCortexA9() && (LastMI->mayLoad() || LastMI->mayStore())) &&
+ !(STI.isLikeA9() && (LastMI->mayLoad() || LastMI->mayStore())) &&
(LastMCID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) {
MachineBasicBlock::iterator I = LastMI;
if (I != LastMI->getParent()->begin()) {
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index a3a6c31..efd6d2b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -239,7 +239,6 @@ private:
/// SelectCMOVOp - Select CMOV instructions for ARM.
SDNode *SelectCMOVOp(SDNode *N);
- SDNode *SelectConditionalOp(SDNode *N);
SDNode *SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
ARMCC::CondCodes CCVal, SDValue CCR,
SDValue InFlag);
@@ -306,7 +305,7 @@ static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
}
/// \brief Check whether a particular node is a constant value representable as
-/// (N * Scale) where (N in [\arg RangeMin, \arg RangeMax).
+/// (N * Scale) where (N in [\p RangeMin, \p RangeMax).
///
/// \param ScaledConstant [out] - On success, the pre-scaled constant value.
static bool isScaledConstantInRange(SDValue Node, int Scale,
@@ -337,7 +336,8 @@ bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
if (!CheckVMLxHazard)
return true;
- if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9())
+ if (!Subtarget->isCortexA8() && !Subtarget->isLikeA9() &&
+ !Subtarget->isSwift())
return true;
if (!N->hasOneUse())
@@ -375,12 +375,13 @@ bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
ARM_AM::ShiftOpc ShOpcVal,
unsigned ShAmt) {
- if (!Subtarget->isCortexA9())
+ if (!Subtarget->isLikeA9() && !Subtarget->isSwift())
return true;
if (Shift.hasOneUse())
return true;
// R << 2 is free.
- return ShOpcVal == ARM_AM::lsl && ShAmt == 2;
+ return ShOpcVal == ARM_AM::lsl &&
+ (ShAmt == 2 || (Subtarget->isSwift() && ShAmt == 1));
}
bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
@@ -487,7 +488,7 @@ bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
SDValue &Opc) {
if (N.getOpcode() == ISD::MUL &&
- (!Subtarget->isCortexA9() || N.hasOneUse())) {
+ ((!Subtarget->isLikeA9() && !Subtarget->isSwift()) || N.hasOneUse())) {
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
// X * [3,5,9] -> X + X * [2,4,8] etc.
int RHSC = (int)RHS->getZExtValue();
@@ -551,7 +552,8 @@ bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
// Try matching (R shl C) + (R).
if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
- !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
+ !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
+ N.getOperand(0).hasOneUse())) {
ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
if (ShOpcVal != ARM_AM::no_shift) {
// Check to see if the RHS of the shift is a constant, if not, we can't
@@ -585,7 +587,7 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
SDValue &Offset,
SDValue &Opc) {
if (N.getOpcode() == ISD::MUL &&
- (!Subtarget->isCortexA9() || N.hasOneUse())) {
+ (!(Subtarget->isLikeA9() || Subtarget->isSwift()) || N.hasOneUse())) {
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
// X * [3,5,9] -> X + X * [2,4,8] etc.
int RHSC = (int)RHS->getZExtValue();
@@ -651,7 +653,7 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
}
}
- if (Subtarget->isCortexA9() && !N.hasOneUse()) {
+ if ((Subtarget->isLikeA9() || Subtarget->isSwift()) && !N.hasOneUse()) {
// Compute R +/- (R << N) and reuse it.
Base = N;
Offset = CurDAG->getRegister(0, MVT::i32);
@@ -689,7 +691,8 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
// Try matching (R shl C) + (R).
if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
- !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
+ !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
+ N.getOperand(0).hasOneUse())) {
ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
if (ShOpcVal != ARM_AM::no_shift) {
// Check to see if the RHS of the shift is a constant, if not, we can't
@@ -2363,121 +2366,6 @@ SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) {
return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5);
}
-SDNode *ARMDAGToDAGISel::SelectConditionalOp(SDNode *N) {
- SDValue FalseVal = N->getOperand(0);
- SDValue TrueVal = N->getOperand(1);
- ARMCC::CondCodes CCVal =
- (ARMCC::CondCodes)cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
- SDValue CCR = N->getOperand(3);
- assert(CCR.getOpcode() == ISD::Register);
- SDValue InFlag = N->getOperand(4);
- SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
- SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
-
- if (Subtarget->isThumb()) {
- SDValue CPTmp0;
- SDValue CPTmp1;
- if (SelectT2ShifterOperandReg(TrueVal, CPTmp0, CPTmp1)) {
- unsigned Opc;
- switch (N->getOpcode()) {
- default: llvm_unreachable("Unexpected node");
- case ARMISD::CAND: Opc = ARM::t2ANDCCrs; break;
- case ARMISD::COR: Opc = ARM::t2ORRCCrs; break;
- case ARMISD::CXOR: Opc = ARM::t2EORCCrs; break;
- }
- SDValue Ops[] = {
- FalseVal, FalseVal, CPTmp0, CPTmp1, CC, CCR, Reg0, InFlag
- };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 8);
- }
-
- ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
- if (T) {
- unsigned TrueImm = T->getZExtValue();
- if (is_t2_so_imm(TrueImm)) {
- unsigned Opc;
- switch (N->getOpcode()) {
- default: llvm_unreachable("Unexpected node");
- case ARMISD::CAND: Opc = ARM::t2ANDCCri; break;
- case ARMISD::COR: Opc = ARM::t2ORRCCri; break;
- case ARMISD::CXOR: Opc = ARM::t2EORCCri; break;
- }
- SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
- SDValue Ops[] = { FalseVal, FalseVal, True, CC, CCR, Reg0, InFlag };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
- }
- }
-
- unsigned Opc;
- switch (N->getOpcode()) {
- default: llvm_unreachable("Unexpected node");
- case ARMISD::CAND: Opc = ARM::t2ANDCCrr; break;
- case ARMISD::COR: Opc = ARM::t2ORRCCrr; break;
- case ARMISD::CXOR: Opc = ARM::t2EORCCrr; break;
- }
- SDValue Ops[] = { FalseVal, FalseVal, TrueVal, CC, CCR, Reg0, InFlag };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
- }
-
- SDValue CPTmp0;
- SDValue CPTmp1;
- SDValue CPTmp2;
- if (SelectImmShifterOperand(TrueVal, CPTmp0, CPTmp2)) {
- unsigned Opc;
- switch (N->getOpcode()) {
- default: llvm_unreachable("Unexpected node");
- case ARMISD::CAND: Opc = ARM::ANDCCrsi; break;
- case ARMISD::COR: Opc = ARM::ORRCCrsi; break;
- case ARMISD::CXOR: Opc = ARM::EORCCrsi; break;
- }
- SDValue Ops[] = {
- FalseVal, FalseVal, CPTmp0, CPTmp2, CC, CCR, Reg0, InFlag
- };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 8);
- }
-
- if (SelectRegShifterOperand(TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
- unsigned Opc;
- switch (N->getOpcode()) {
- default: llvm_unreachable("Unexpected node");
- case ARMISD::CAND: Opc = ARM::ANDCCrsr; break;
- case ARMISD::COR: Opc = ARM::ORRCCrsr; break;
- case ARMISD::CXOR: Opc = ARM::EORCCrsr; break;
- }
- SDValue Ops[] = {
- FalseVal, FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, Reg0, InFlag
- };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 9);
- }
-
- ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
- if (T) {
- unsigned TrueImm = T->getZExtValue();
- if (is_so_imm(TrueImm)) {
- unsigned Opc;
- switch (N->getOpcode()) {
- default: llvm_unreachable("Unexpected node");
- case ARMISD::CAND: Opc = ARM::ANDCCri; break;
- case ARMISD::COR: Opc = ARM::ORRCCri; break;
- case ARMISD::CXOR: Opc = ARM::EORCCri; break;
- }
- SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
- SDValue Ops[] = { FalseVal, FalseVal, True, CC, CCR, Reg0, InFlag };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
- }
- }
-
- unsigned Opc;
- switch (N->getOpcode()) {
- default: llvm_unreachable("Unexpected node");
- case ARMISD::CAND: Opc = ARM::ANDCCrr; break;
- case ARMISD::COR: Opc = ARM::ORRCCrr; break;
- case ARMISD::CXOR: Opc = ARM::EORCCrr; break;
- }
- SDValue Ops[] = { FalseVal, FalseVal, TrueVal, CC, CCR, Reg0, InFlag };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
-}
-
/// Target-specific DAG combining for ISD::XOR.
/// Target-independent combining lowers SELECT_CC nodes of the form
/// select_cc setg[ge] X, 0, X, -X
@@ -2753,6 +2641,38 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
dl, MVT::i32, MVT::i32, Ops, 5);
}
}
+ case ARMISD::UMLAL:{
+ if (Subtarget->isThumb()) {
+ SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
+ N->getOperand(3), getAL(CurDAG),
+ CurDAG->getRegister(0, MVT::i32)};
+ return CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops, 6);
+ }else{
+ SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
+ N->getOperand(3), getAL(CurDAG),
+ CurDAG->getRegister(0, MVT::i32),
+ CurDAG->getRegister(0, MVT::i32) };
+ return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
+ ARM::UMLAL : ARM::UMLALv5,
+ dl, MVT::i32, MVT::i32, Ops, 7);
+ }
+ }
+ case ARMISD::SMLAL:{
+ if (Subtarget->isThumb()) {
+ SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
+ N->getOperand(3), getAL(CurDAG),
+ CurDAG->getRegister(0, MVT::i32)};
+ return CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops, 6);
+ }else{
+ SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
+ N->getOperand(3), getAL(CurDAG),
+ CurDAG->getRegister(0, MVT::i32),
+ CurDAG->getRegister(0, MVT::i32) };
+ return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
+ ARM::SMLAL : ARM::SMLALv5,
+ dl, MVT::i32, MVT::i32, Ops, 7);
+ }
+ }
case ISD::LOAD: {
SDNode *ResNode = 0;
if (Subtarget->isThumb() && Subtarget->hasThumb2())
@@ -2805,10 +2725,6 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
}
case ARMISD::CMOV:
return SelectCMOVOp(N);
- case ARMISD::CAND:
- case ARMISD::COR:
- case ARMISD::CXOR:
- return SelectConditionalOp(N);
case ARMISD::VZIP: {
unsigned Opc = 0;
EVT VT = N->getValueType(0);
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 190ca07..ff99b04 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -122,6 +122,7 @@ void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
setOperationAction(ISD::SELECT, VT, Expand);
setOperationAction(ISD::SELECT_CC, VT, Expand);
+ setOperationAction(ISD::VSELECT, VT, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
if (VT.isInteger()) {
setOperationAction(ISD::SHL, VT, Custom);
@@ -514,6 +515,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
+ setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
// Neon does not support some operations on v1i64 and v2i64 types.
setOperationAction(ISD::MUL, MVT::v1i64, Expand);
@@ -566,6 +568,11 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
}
}
+ // ARM and Thumb2 support UMLAL/SMLAL.
+ if (!Subtarget->isThumb1Only())
+ setTargetDAGCombine(ISD::ADDC);
+
+
computeRegisterProperties();
// ARM does not have f32 extending load.
@@ -629,9 +636,9 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
if (!Subtarget->hasV6Ops())
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
- // These are expanded into libcalls.
- if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) {
- // v7M has a hardware divider
+ if (!(Subtarget->hasDivide() && Subtarget->isThumb2()) &&
+ !(Subtarget->hasDivideInARMMode() && !Subtarget->isThumb())) {
+ // These are expanded into libcalls if the cpu doesn't have HW divider.
setOperationAction(ISD::SDIV, MVT::i32, Expand);
setOperationAction(ISD::UDIV, MVT::i32, Expand);
}
@@ -791,12 +798,9 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setTargetDAGCombine(ISD::ADD);
setTargetDAGCombine(ISD::SUB);
setTargetDAGCombine(ISD::MUL);
-
- if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON()) {
- setTargetDAGCombine(ISD::AND);
- setTargetDAGCombine(ISD::OR);
- setTargetDAGCombine(ISD::XOR);
- }
+ setTargetDAGCombine(ISD::AND);
+ setTargetDAGCombine(ISD::OR);
+ setTargetDAGCombine(ISD::XOR);
if (Subtarget->hasV6Ops())
setTargetDAGCombine(ISD::SRL);
@@ -821,7 +825,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
benefitFromCodePlacementOpt = true;
// Prefer likely predicted branches to selects on out-of-order cores.
- predictableSelectIsExpensive = Subtarget->isCortexA9();
+ predictableSelectIsExpensive = Subtarget->isLikeA9();
setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
}
@@ -898,9 +902,6 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::FMSTAT: return "ARMISD::FMSTAT";
case ARMISD::CMOV: return "ARMISD::CMOV";
- case ARMISD::CAND: return "ARMISD::CAND";
- case ARMISD::COR: return "ARMISD::COR";
- case ARMISD::CXOR: return "ARMISD::CXOR";
case ARMISD::RBIT: return "ARMISD::RBIT";
@@ -984,6 +985,8 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::VTBL2: return "ARMISD::VTBL2";
case ARMISD::VMULLs: return "ARMISD::VMULLs";
case ARMISD::VMULLu: return "ARMISD::VMULLu";
+ case ARMISD::UMLAL: return "ARMISD::UMLAL";
+ case ARMISD::SMLAL: return "ARMISD::SMLAL";
case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR";
case ARMISD::FMAX: return "ARMISD::FMAX";
case ARMISD::FMIN: return "ARMISD::FMIN";
@@ -1591,19 +1594,19 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// FIXME: handle tail calls differently.
unsigned CallOpc;
+ bool HasMinSizeAttr = MF.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::MinSize);
if (Subtarget->isThumb()) {
if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
CallOpc = ARMISD::CALL_NOLINK;
- else if (doesNotRet && isDirect && !isARMFunc &&
- Subtarget->hasRAS() && !Subtarget->isThumb1Only())
- // "mov lr, pc; b _foo" to avoid confusing the RSP
- CallOpc = ARMISD::CALL_NOLINK;
else
CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
} else {
- if (!isDirect && !Subtarget->hasV5TOps()) {
+ if (!isDirect && !Subtarget->hasV5TOps())
CallOpc = ARMISD::CALL_NOLINK;
- } else if (doesNotRet && isDirect && Subtarget->hasRAS())
+ else if (doesNotRet && isDirect && Subtarget->hasRAS() &&
+ // Emit regular call when code size is the priority
+ !HasMinSizeAttr)
// "mov lr, pc; b _foo" to avoid confusing the RSP
CallOpc = ARMISD::CALL_NOLINK;
else
@@ -1653,22 +1656,31 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
/// and then confiscate the rest of the parameter registers to insure
/// this.
void
-ARMTargetLowering::HandleByVal(CCState *State, unsigned &size) const {
+ARMTargetLowering::HandleByVal(
+ CCState *State, unsigned &size, unsigned Align) const {
unsigned reg = State->AllocateReg(GPRArgRegs, 4);
assert((State->getCallOrPrologue() == Prologue ||
State->getCallOrPrologue() == Call) &&
"unhandled ParmContext");
if ((!State->isFirstByValRegValid()) &&
(ARM::R0 <= reg) && (reg <= ARM::R3)) {
- State->setFirstByValReg(reg);
- // At a call site, a byval parameter that is split between
- // registers and memory needs its size truncated here. In a
- // function prologue, such byval parameters are reassembled in
- // memory, and are not truncated.
- if (State->getCallOrPrologue() == Call) {
- unsigned excess = 4 * (ARM::R4 - reg);
- assert(size >= excess && "expected larger existing stack allocation");
- size -= excess;
+ if (Subtarget->isAAPCS_ABI() && Align > 4) {
+ unsigned AlignInRegs = Align / 4;
+ unsigned Waste = (ARM::R4 - reg) % AlignInRegs;
+ for (unsigned i = 0; i < Waste; ++i)
+ reg = State->AllocateReg(GPRArgRegs, 4);
+ }
+ if (reg != 0) {
+ State->setFirstByValReg(reg);
+ // At a call site, a byval parameter that is split between
+ // registers and memory needs its size truncated here. In a
+ // function prologue, such byval parameters are reassembled in
+ // memory, and are not truncated.
+ if (State->getCallOrPrologue() == Call) {
+ unsigned excess = 4 * (ARM::R4 - reg);
+ assert(size >= excess && "expected larger existing stack allocation");
+ size -= excess;
+ }
}
}
// Confiscate any remaining parameter registers to preclude their
@@ -1801,6 +1813,14 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
}
}
+ // If Caller's vararg or byval argument has been split between registers and
+ // stack, do not perform tail call, since part of the argument is in caller's
+ // local frame.
+ const ARMFunctionInfo *AFI_Caller = DAG.getMachineFunction().
+ getInfo<ARMFunctionInfo>();
+ if (AFI_Caller->getVarArgsRegSaveSize())
+ return false;
+
// If the callee takes no arguments then go on to check the results of the
// call.
if (!Outs.empty()) {
@@ -2532,7 +2552,10 @@ ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF,
void
ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
DebugLoc dl, SDValue &Chain,
- unsigned ArgOffset) const {
+ const Value *OrigArg,
+ unsigned OffsetFromOrigArg,
+ unsigned ArgOffset,
+ bool ForceMutable) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
@@ -2559,7 +2582,7 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
getPointerTy());
SmallVector<SDValue, 4> MemOps;
- for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) {
+ for (unsigned i = 0; firstRegToSaveIndex < 4; ++firstRegToSaveIndex, ++i) {
const TargetRegisterClass *RC;
if (AFI->isThumb1OnlyFunction())
RC = &ARM::tGPRRegClass;
@@ -2570,7 +2593,7 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()),
+ MachinePointerInfo(OrigArg, OffsetFromOrigArg + 4*i),
false, false, 0);
MemOps.push_back(Store);
FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
@@ -2581,7 +2604,8 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
&MemOps[0], MemOps.size());
} else
// This will point to the next argument passed via stack.
- AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true));
+ AFI->setVarArgsFrameIndex(
+ MFI->CreateFixedObject(4, ArgOffset, !ForceMutable));
}
SDValue
@@ -2604,14 +2628,16 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
CCInfo.AnalyzeFormalArguments(Ins,
CCAssignFnForNode(CallConv, /* Return*/ false,
isVarArg));
-
+
SmallVector<SDValue, 16> ArgValues;
int lastInsIndex = -1;
-
SDValue ArgValue;
+ Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
+ unsigned CurArgIdx = 0;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
-
+ std::advance(CurOrigArg, Ins[VA.getValNo()].OrigArgIndex - CurArgIdx);
+ CurArgIdx = Ins[VA.getValNo()].OrigArgIndex;
// Arguments stored in registers.
if (VA.isRegLoc()) {
EVT RegVT = VA.getLocVT();
@@ -2705,14 +2731,20 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
// Since they could be overwritten by lowering of arguments in case of
// a tail call.
if (Flags.isByVal()) {
- unsigned VARegSize, VARegSaveSize;
- computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize);
- VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0);
- unsigned Bytes = Flags.getByValSize() - VARegSize;
- if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
- int FI = MFI->CreateFixedObject(Bytes,
- VA.getLocMemOffset(), false);
- InVals.push_back(DAG.getFrameIndex(FI, getPointerTy()));
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ if (!AFI->getVarArgsFrameIndex()) {
+ VarArgStyleRegisters(CCInfo, DAG,
+ dl, Chain, CurOrigArg,
+ Ins[VA.getValNo()].PartOffset,
+ VA.getLocMemOffset(),
+ true /*force mutable frames*/);
+ int VAFrameIndex = AFI->getVarArgsFrameIndex();
+ InVals.push_back(DAG.getFrameIndex(VAFrameIndex, getPointerTy()));
+ } else {
+ int FI = MFI->CreateFixedObject(Flags.getByValSize(),
+ VA.getLocMemOffset(), false);
+ InVals.push_back(DAG.getFrameIndex(FI, getPointerTy()));
+ }
} else {
int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
VA.getLocMemOffset(), true);
@@ -2730,7 +2762,8 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
// varargs
if (isVarArg)
- VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset());
+ VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0, 0,
+ CCInfo.getNextStackOffset());
return Chain;
}
@@ -3890,6 +3923,36 @@ SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
return SDValue();
}
+// check if an VEXT instruction can handle the shuffle mask when the
+// vector sources of the shuffle are the same.
+static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
+ unsigned NumElts = VT.getVectorNumElements();
+
+ // Assume that the first shuffle index is not UNDEF. Fail if it is.
+ if (M[0] < 0)
+ return false;
+
+ Imm = M[0];
+
+ // If this is a VEXT shuffle, the immediate value is the index of the first
+ // element. The other shuffle indices must be the successive elements after
+ // the first one.
+ unsigned ExpectedElt = Imm;
+ for (unsigned i = 1; i < NumElts; ++i) {
+ // Increment the expected index. If it wraps around, just follow it
+ // back to index zero and keep going.
+ ++ExpectedElt;
+ if (ExpectedElt == NumElts)
+ ExpectedElt = 0;
+
+ if (M[i] < 0) continue; // ignore UNDEF indices
+ if (ExpectedElt != static_cast<unsigned>(M[i]))
+ return false;
+ }
+
+ return true;
+}
+
static bool isVEXTMask(ArrayRef<int> M, EVT VT,
bool &ReverseVEXT, unsigned &Imm) {
@@ -4157,10 +4220,21 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
}
// Scan through the operands to see if only one value is used.
+ //
+ // As an optimisation, even if more than one value is used it may be more
+ // profitable to splat with one value then change some lanes.
+ //
+ // Heuristically we decide to do this if the vector has a "dominant" value,
+ // defined as splatted to more than half of the lanes.
unsigned NumElts = VT.getVectorNumElements();
bool isOnlyLowElement = true;
bool usesOnlyOneValue = true;
+ bool hasDominantValue = false;
bool isConstant = true;
+
+ // Map of the number of times a particular SDValue appears in the
+ // element list.
+ DenseMap<SDValue, unsigned> ValueCounts;
SDValue Value;
for (unsigned i = 0; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
@@ -4171,13 +4245,21 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
isConstant = false;
- if (!Value.getNode())
+ ValueCounts.insert(std::make_pair(V, 0));
+ unsigned &Count = ValueCounts[V];
+
+ // Is this value dominant? (takes up more than half of the lanes)
+ if (++Count > (NumElts / 2)) {
+ hasDominantValue = true;
Value = V;
- else if (V != Value)
- usesOnlyOneValue = false;
+ }
}
+ if (ValueCounts.size() != 1)
+ usesOnlyOneValue = false;
+ if (!Value.getNode() && ValueCounts.size() > 0)
+ Value = ValueCounts.begin()->first;
- if (!Value.getNode())
+ if (ValueCounts.size() == 0)
return DAG.getUNDEF(VT);
if (isOnlyLowElement)
@@ -4187,9 +4269,51 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
// Use VDUP for non-constant splats. For f32 constant splats, reduce to
// i32 and try again.
- if (usesOnlyOneValue && EltSize <= 32) {
- if (!isConstant)
- return DAG.getNode(ARMISD::VDUP, dl, VT, Value);
+ if (hasDominantValue && EltSize <= 32) {
+ if (!isConstant) {
+ SDValue N;
+
+ // If we are VDUPing a value that comes directly from a vector, that will
+ // cause an unnecessary move to and from a GPR, where instead we could
+ // just use VDUPLANE.
+ if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
+ // We need to create a new undef vector to use for the VDUPLANE if the
+ // size of the vector from which we get the value is different than the
+ // size of the vector that we need to create. We will insert the element
+ // such that the register coalescer will remove unnecessary copies.
+ if (VT != Value->getOperand(0).getValueType()) {
+ ConstantSDNode *constIndex;
+ constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1));
+ assert(constIndex && "The index is not a constant!");
+ unsigned index = constIndex->getAPIntValue().getLimitedValue() %
+ VT.getVectorNumElements();
+ N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
+ DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
+ Value, DAG.getConstant(index, MVT::i32)),
+ DAG.getConstant(index, MVT::i32));
+ } else {
+ N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
+ Value->getOperand(0), Value->getOperand(1));
+ }
+ }
+ else
+ N = DAG.getNode(ARMISD::VDUP, dl, VT, Value);
+
+ if (!usesOnlyOneValue) {
+ // The dominant value was splatted as 'N', but we now have to insert
+ // all differing elements.
+ for (unsigned I = 0; I < NumElts; ++I) {
+ if (Op.getOperand(I) == Value)
+ continue;
+ SmallVector<SDValue, 3> Ops;
+ Ops.push_back(N);
+ Ops.push_back(Op.getOperand(I));
+ Ops.push_back(DAG.getConstant(I, MVT::i32));
+ N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, &Ops[0], 3);
+ }
+ }
+ return N;
+ }
if (VT.getVectorElementType().isFloatingPoint()) {
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElts; ++i)
@@ -4201,9 +4325,11 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
if (Val.getNode())
return DAG.getNode(ISD::BITCAST, dl, VT, Val);
}
- SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
- if (Val.getNode())
- return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
+ if (usesOnlyOneValue) {
+ SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
+ if (isConstant && Val.getNode())
+ return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
+ }
}
// If all elements are constants and the case above didn't get hit, fall back
@@ -4586,6 +4712,12 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
if (isVREVMask(ShuffleMask, VT, 16))
return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
+ if (V2->getOpcode() == ISD::UNDEF &&
+ isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
+ return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1,
+ DAG.getConstant(Imm, MVT::i32));
+ }
+
// Check for Neon shuffles that modify both input vectors in place.
// If both results are used, i.e., if there are two shuffles with the same
// source operands and with masks corresponding to both results of one of
@@ -5421,7 +5553,7 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
const TargetRegisterClass *TRC = isThumb2 ?
- (const TargetRegisterClass*)&ARM::tGPRRegClass :
+ (const TargetRegisterClass*)&ARM::rGPRRegClass :
(const TargetRegisterClass*)&ARM::GPRRegClass;
unsigned scratch = MRI.createVirtualRegister(TRC);
unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
@@ -5532,7 +5664,7 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
const TargetRegisterClass *TRC = isThumb2 ?
- (const TargetRegisterClass*)&ARM::tGPRRegClass :
+ (const TargetRegisterClass*)&ARM::rGPRRegClass :
(const TargetRegisterClass*)&ARM::GPRRegClass;
unsigned scratch = MRI.createVirtualRegister(TRC);
unsigned scratch2 = MRI.createVirtualRegister(TRC);
@@ -5546,7 +5678,7 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
// ldrex dest, ptr
// (sign extend dest, if required)
// cmp dest, incr
- // cmov.cond scratch2, dest, incr
+ // cmov.cond scratch2, incr, dest
// strex scratch, scratch2, ptr
// cmp scratch, #0
// bne- loopMBB
@@ -5569,7 +5701,7 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
.addReg(oldval).addReg(incr));
BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2)
- .addReg(oldval).addReg(incr).addImm(Cond).addReg(ARM::CPSR);
+ .addReg(incr).addReg(oldval).addImm(Cond).addReg(ARM::CPSR);
MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr);
if (strOpc == ARM::t2STREX)
@@ -5939,12 +6071,15 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
MachineMemOperand::MOLoad |
MachineMemOperand::MOVolatile, 4, 4);
- if (AFI->isThumb1OnlyFunction())
- BuildMI(DispatchBB, dl, TII->get(ARM::tInt_eh_sjlj_dispatchsetup));
- else if (!Subtarget->hasVFP2())
- BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup_nofp));
- else
- BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup));
+ MachineInstrBuilder MIB;
+ MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup));
+
+ const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
+ const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
+
+ // Add a register mask with no preserved registers. This results in all
+ // registers being marked as clobbered.
+ MIB.addRegMask(RI.getNoPreservedMask());
unsigned NumLPads = LPadList.size();
if (Subtarget->isThumb2()) {
@@ -6016,9 +6151,9 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
// MachineConstantPool wants an explicit alignment.
- unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty);
+ unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty);
if (Align == 0)
- Align = getTargetData()->getTypeAllocSize(C->getType());
+ Align = getDataLayout()->getTypeAllocSize(C->getType());
unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
unsigned VReg1 = MRI->createVirtualRegister(TRC);
@@ -6105,9 +6240,9 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
// MachineConstantPool wants an explicit alignment.
- unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty);
+ unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty);
if (Align == 0)
- Align = getTargetData()->getTypeAllocSize(C->getType());
+ Align = getDataLayout()->getTypeAllocSize(C->getType());
unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
unsigned VReg1 = MRI->createVirtualRegister(TRC);
@@ -6154,18 +6289,15 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
}
// Add the jump table entries as successors to the MBB.
- MachineBasicBlock *PrevMBB = 0;
+ SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs;
for (std::vector<MachineBasicBlock*>::iterator
I = LPadList.begin(), E = LPadList.end(); I != E; ++I) {
MachineBasicBlock *CurMBB = *I;
- if (PrevMBB != CurMBB)
+ if (SeenMBBs.insert(CurMBB))
DispContBB->addSuccessor(CurMBB);
- PrevMBB = CurMBB;
}
// N.B. the order the invoke BBs are processed in doesn't matter here.
- const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
- const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
const uint16_t *SavedRegs = RI.getCalleeSavedRegs(MF);
SmallVector<MachineBasicBlock*, 64> MBBLPads;
for (SmallPtrSet<MachineBasicBlock*, 64>::iterator
@@ -6279,7 +6411,8 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
UnitSize = 2;
} else {
// Check whether we can use NEON instructions.
- if (!MF->getFunction()->hasFnAttr(Attribute::NoImplicitFloat) &&
+ if (!MF->getFunction()->getFnAttributes().
+ hasAttribute(Attributes::NoImplicitFloat) &&
Subtarget->hasNEON()) {
if ((Align % 16 == 0) && SizeVal >= 16) {
ldrOpc = ARM::VLD1q32wb_fixed;
@@ -6364,7 +6497,8 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
} else {
AddDefaultPred(BuildMI(*BB, MI, dl,
TII->get(ldrOpc),scratch)
- .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(1));
+ .addReg(srcOut, RegState::Define).addReg(srcIn)
+ .addReg(0).addImm(1));
AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut)
.addReg(scratch).addReg(destIn)
@@ -6427,9 +6561,9 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
// MachineConstantPool wants an explicit alignment.
- unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty);
+ unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty);
if (Align == 0)
- Align = getTargetData()->getTypeAllocSize(C->getType());
+ Align = getDataLayout()->getTypeAllocSize(C->getType());
unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::LDRcp))
@@ -6981,73 +7115,131 @@ static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) {
return AllOnes ? C->isAllOnesValue() : C->isNullValue();
}
+// Return true if N is conditionally 0 or all ones.
+// Detects these expressions where cc is an i1 value:
+//
+// (select cc 0, y) [AllOnes=0]
+// (select cc y, 0) [AllOnes=0]
+// (zext cc) [AllOnes=0]
+// (sext cc) [AllOnes=0/1]
+// (select cc -1, y) [AllOnes=1]
+// (select cc y, -1) [AllOnes=1]
+//
+// Invert is set when N is the null/all ones constant when CC is false.
+// OtherOp is set to the alternative value of N.
+static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
+ SDValue &CC, bool &Invert,
+ SDValue &OtherOp,
+ SelectionDAG &DAG) {
+ switch (N->getOpcode()) {
+ default: return false;
+ case ISD::SELECT: {
+ CC = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDValue N2 = N->getOperand(2);
+ if (isZeroOrAllOnes(N1, AllOnes)) {
+ Invert = false;
+ OtherOp = N2;
+ return true;
+ }
+ if (isZeroOrAllOnes(N2, AllOnes)) {
+ Invert = true;
+ OtherOp = N1;
+ return true;
+ }
+ return false;
+ }
+ case ISD::ZERO_EXTEND:
+ // (zext cc) can never be the all ones value.
+ if (AllOnes)
+ return false;
+ // Fall through.
+ case ISD::SIGN_EXTEND: {
+ EVT VT = N->getValueType(0);
+ CC = N->getOperand(0);
+ if (CC.getValueType() != MVT::i1)
+ return false;
+ Invert = !AllOnes;
+ if (AllOnes)
+ // When looking for an AllOnes constant, N is an sext, and the 'other'
+ // value is 0.
+ OtherOp = DAG.getConstant(0, VT);
+ else if (N->getOpcode() == ISD::ZERO_EXTEND)
+ // When looking for a 0 constant, N can be zext or sext.
+ OtherOp = DAG.getConstant(1, VT);
+ else
+ OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
+ return true;
+ }
+ }
+}
+
// Combine a constant select operand into its use:
//
-// (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
-// (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
+// (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
+// (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
+// (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1]
+// (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
+// (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
//
// The transform is rejected if the select doesn't have a constant operand that
-// is null.
+// is null, or all ones when AllOnes is set.
+//
+// Also recognize sext/zext from i1:
+//
+// (add (zext cc), x) -> (select cc (add x, 1), x)
+// (add (sext cc), x) -> (select cc (add x, -1), x)
+//
+// These transformations eventually create predicated instructions.
//
// @param N The node to transform.
// @param Slct The N operand that is a select.
// @param OtherOp The other N operand (x above).
// @param DCI Context.
+// @param AllOnes Require the select constant to be all ones instead of null.
// @returns The new node, or SDValue() on failure.
static
SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
- TargetLowering::DAGCombinerInfo &DCI) {
+ TargetLowering::DAGCombinerInfo &DCI,
+ bool AllOnes = false) {
SelectionDAG &DAG = DCI.DAG;
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT = N->getValueType(0);
- unsigned Opc = N->getOpcode();
- bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC;
- SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1);
- SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2);
- ISD::CondCode CC = ISD::SETCC_INVALID;
-
- if (isSlctCC) {
- CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get();
- } else {
- SDValue CCOp = Slct.getOperand(0);
- if (CCOp.getOpcode() == ISD::SETCC)
- CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get();
- }
-
- bool DoXform = false;
- bool InvCC = false;
- assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) &&
- "Bad input!");
+ SDValue NonConstantVal;
+ SDValue CCOp;
+ bool SwapSelectOps;
+ if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps,
+ NonConstantVal, DAG))
+ return SDValue();
- if (isZeroOrAllOnes(LHS, false)) {
- DoXform = true;
- } else if (CC != ISD::SETCC_INVALID && isZeroOrAllOnes(RHS, false)) {
- std::swap(LHS, RHS);
- SDValue Op0 = Slct.getOperand(0);
- EVT OpVT = isSlctCC ? Op0.getValueType() : Op0.getOperand(0).getValueType();
- bool isInt = OpVT.isInteger();
- CC = ISD::getSetCCInverse(CC, isInt);
+ // Slct is now know to be the desired identity constant when CC is true.
+ SDValue TrueVal = OtherOp;
+ SDValue FalseVal = DAG.getNode(N->getOpcode(), N->getDebugLoc(), VT,
+ OtherOp, NonConstantVal);
+ // Unless SwapSelectOps says CC should be false.
+ if (SwapSelectOps)
+ std::swap(TrueVal, FalseVal);
- if (!TLI.isCondCodeLegal(CC, OpVT))
- return SDValue(); // Inverse operator isn't legal.
+ return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
+ CCOp, TrueVal, FalseVal);
+}
- DoXform = true;
- InvCC = true;
+// Attempt combineSelectAndUse on each operand of a commutative operator N.
+static
+SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ if (N0.getNode()->hasOneUse()) {
+ SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes);
+ if (Result.getNode())
+ return Result;
}
-
- if (!DoXform)
- return SDValue();
-
- SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS);
- if (isSlctCC)
- return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result,
- Slct.getOperand(0), Slct.getOperand(1), CC);
- SDValue CCOp = Slct.getOperand(0);
- if (InvCC)
- CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(),
- CCOp.getOperand(0), CCOp.getOperand(1), CC);
- return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
- CCOp, OtherOp, Result);
+ if (N1.getNode()->hasOneUse()) {
+ SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes);
+ if (Result.getNode())
+ return Result;
+ }
+ return SDValue();
}
// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction
@@ -7139,6 +7331,154 @@ static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1,
return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, tmp);
}
+static SDValue findMUL_LOHI(SDValue V) {
+ if (V->getOpcode() == ISD::UMUL_LOHI ||
+ V->getOpcode() == ISD::SMUL_LOHI)
+ return V;
+ return SDValue();
+}
+
+static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const ARMSubtarget *Subtarget) {
+
+ if (Subtarget->isThumb1Only()) return SDValue();
+
+ // Only perform the checks after legalize when the pattern is available.
+ if (DCI.isBeforeLegalize()) return SDValue();
+
+ // Look for multiply add opportunities.
+ // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where
+ // each add nodes consumes a value from ISD::UMUL_LOHI and there is
+ // a glue link from the first add to the second add.
+ // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by
+ // a S/UMLAL instruction.
+ // loAdd UMUL_LOHI
+ // \ / :lo \ :hi
+ // \ / \ [no multiline comment]
+ // ADDC | hiAdd
+ // \ :glue / /
+ // \ / /
+ // ADDE
+ //
+ assert(AddcNode->getOpcode() == ISD::ADDC && "Expect an ADDC");
+ SDValue AddcOp0 = AddcNode->getOperand(0);
+ SDValue AddcOp1 = AddcNode->getOperand(1);
+
+ // Check if the two operands are from the same mul_lohi node.
+ if (AddcOp0.getNode() == AddcOp1.getNode())
+ return SDValue();
+
+ assert(AddcNode->getNumValues() == 2 &&
+ AddcNode->getValueType(0) == MVT::i32 &&
+ AddcNode->getValueType(1) == MVT::Glue &&
+ "Expect ADDC with two result values: i32, glue");
+
+ // Check that the ADDC adds the low result of the S/UMUL_LOHI.
+ if (AddcOp0->getOpcode() != ISD::UMUL_LOHI &&
+ AddcOp0->getOpcode() != ISD::SMUL_LOHI &&
+ AddcOp1->getOpcode() != ISD::UMUL_LOHI &&
+ AddcOp1->getOpcode() != ISD::SMUL_LOHI)
+ return SDValue();
+
+ // Look for the glued ADDE.
+ SDNode* AddeNode = AddcNode->getGluedUser();
+ if (AddeNode == NULL)
+ return SDValue();
+
+ // Make sure it is really an ADDE.
+ if (AddeNode->getOpcode() != ISD::ADDE)
+ return SDValue();
+
+ assert(AddeNode->getNumOperands() == 3 &&
+ AddeNode->getOperand(2).getValueType() == MVT::Glue &&
+ "ADDE node has the wrong inputs");
+
+ // Check for the triangle shape.
+ SDValue AddeOp0 = AddeNode->getOperand(0);
+ SDValue AddeOp1 = AddeNode->getOperand(1);
+
+ // Make sure that the ADDE operands are not coming from the same node.
+ if (AddeOp0.getNode() == AddeOp1.getNode())
+ return SDValue();
+
+ // Find the MUL_LOHI node walking up ADDE's operands.
+ bool IsLeftOperandMUL = false;
+ SDValue MULOp = findMUL_LOHI(AddeOp0);
+ if (MULOp == SDValue())
+ MULOp = findMUL_LOHI(AddeOp1);
+ else
+ IsLeftOperandMUL = true;
+ if (MULOp == SDValue())
+ return SDValue();
+
+ // Figure out the right opcode.
+ unsigned Opc = MULOp->getOpcode();
+ unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL;
+
+ // Figure out the high and low input values to the MLAL node.
+ SDValue* HiMul = &MULOp;
+ SDValue* HiAdd = NULL;
+ SDValue* LoMul = NULL;
+ SDValue* LowAdd = NULL;
+
+ if (IsLeftOperandMUL)
+ HiAdd = &AddeOp1;
+ else
+ HiAdd = &AddeOp0;
+
+
+ if (AddcOp0->getOpcode() == Opc) {
+ LoMul = &AddcOp0;
+ LowAdd = &AddcOp1;
+ }
+ if (AddcOp1->getOpcode() == Opc) {
+ LoMul = &AddcOp1;
+ LowAdd = &AddcOp0;
+ }
+
+ if (LoMul == NULL)
+ return SDValue();
+
+ if (LoMul->getNode() != HiMul->getNode())
+ return SDValue();
+
+ // Create the merged node.
+ SelectionDAG &DAG = DCI.DAG;
+
+ // Build operand list.
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(LoMul->getOperand(0));
+ Ops.push_back(LoMul->getOperand(1));
+ Ops.push_back(*LowAdd);
+ Ops.push_back(*HiAdd);
+
+ SDValue MLALNode = DAG.getNode(FinalOpc, AddcNode->getDebugLoc(),
+ DAG.getVTList(MVT::i32, MVT::i32),
+ &Ops[0], Ops.size());
+
+ // Replace the ADDs' nodes uses by the MLA node's values.
+ SDValue HiMLALResult(MLALNode.getNode(), 1);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult);
+
+ SDValue LoMLALResult(MLALNode.getNode(), 0);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult);
+
+ // Return original node to notify the driver to stop replacing.
+ SDValue resNode(AddcNode, 0);
+ return resNode;
+}
+
+/// PerformADDCCombine - Target-specific dag combine transform from
+/// ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL.
+static SDValue PerformADDCCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const ARMSubtarget *Subtarget) {
+
+ return AddCombineTo64bitMLAL(N, DCI, Subtarget);
+
+}
+
/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
/// operands N0 and N1. This is a helper for PerformADDCombine that is
/// called with the default operands, and if that fails, with commuted
@@ -7153,7 +7493,7 @@ static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
return Result;
// fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
- if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) {
+ if (N0.getNode()->hasOneUse()) {
SDValue Result = combineSelectAndUse(N, N0, N1, DCI);
if (Result.getNode()) return Result;
}
@@ -7185,7 +7525,7 @@ static SDValue PerformSUBCombine(SDNode *N,
SDValue N1 = N->getOperand(1);
// fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
- if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
+ if (N1.getNode()->hasOneUse()) {
SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
if (Result.getNode()) return Result;
}
@@ -7313,41 +7653,6 @@ static SDValue PerformMULCombine(SDNode *N,
return SDValue();
}
-static bool isCMOVWithZeroOrAllOnesLHS(SDValue N, bool AllOnes) {
- return N.getOpcode() == ARMISD::CMOV && N.getNode()->hasOneUse() &&
- isZeroOrAllOnes(N.getOperand(0), AllOnes);
-}
-
-/// formConditionalOp - Combine an operation with a conditional move operand
-/// to form a conditional op. e.g. (or x, (cmov 0, y, cond)) => (or.cond x, y)
-/// (and x, (cmov -1, y, cond)) => (and.cond, x, y)
-static SDValue formConditionalOp(SDNode *N, SelectionDAG &DAG,
- bool Commutable) {
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
-
- bool isAND = N->getOpcode() == ISD::AND;
- bool isCand = isCMOVWithZeroOrAllOnesLHS(N1, isAND);
- if (!isCand && Commutable) {
- isCand = isCMOVWithZeroOrAllOnesLHS(N0, isAND);
- if (isCand)
- std::swap(N0, N1);
- }
- if (!isCand)
- return SDValue();
-
- unsigned Opc = 0;
- switch (N->getOpcode()) {
- default: llvm_unreachable("Unexpected node");
- case ISD::AND: Opc = ARMISD::CAND; break;
- case ISD::OR: Opc = ARMISD::COR; break;
- case ISD::XOR: Opc = ARMISD::CXOR; break;
- }
- return DAG.getNode(Opc, N->getDebugLoc(), N->getValueType(0), N0,
- N1.getOperand(1), N1.getOperand(2), N1.getOperand(3),
- N1.getOperand(4));
-}
-
static SDValue PerformANDCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) {
@@ -7382,10 +7687,10 @@ static SDValue PerformANDCombine(SDNode *N,
}
if (!Subtarget->isThumb1Only()) {
- // (and x, (cmov -1, y, cond)) => (and.cond x, y)
- SDValue CAND = formConditionalOp(N, DAG, true);
- if (CAND.getNode())
- return CAND;
+ // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))
+ SDValue Result = combineSelectAndUseCommutative(N, true, DCI);
+ if (Result.getNode())
+ return Result;
}
return SDValue();
@@ -7425,13 +7730,12 @@ static SDValue PerformORCombine(SDNode *N,
}
if (!Subtarget->isThumb1Only()) {
- // (or x, (cmov 0, y, cond)) => (or.cond x, y)
- SDValue COR = formConditionalOp(N, DAG, true);
- if (COR.getNode())
- return COR;
+ // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
+ SDValue Result = combineSelectAndUseCommutative(N, false, DCI);
+ if (Result.getNode())
+ return Result;
}
-
// The code below optimizes (or (and X, Y), Z).
// The AND operand needs to have a single user to make these optimizations
// profitable.
@@ -7593,10 +7897,10 @@ static SDValue PerformXORCombine(SDNode *N,
return SDValue();
if (!Subtarget->isThumb1Only()) {
- // (xor x, (cmov 0, y, cond)) => (xor.cond x, y)
- SDValue CXOR = formConditionalOp(N, DAG, true);
- if (CXOR.getNode())
- return CXOR;
+ // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
+ SDValue Result = combineSelectAndUseCommutative(N, false, DCI);
+ if (Result.getNode())
+ return Result;
}
return SDValue();
@@ -8746,6 +9050,7 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
switch (N->getOpcode()) {
default: break;
+ case ISD::ADDC: return PerformADDCCombine(N, DCI, Subtarget);
case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget);
case ISD::SUB: return PerformSUBCombine(N, DCI);
case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget);
@@ -8807,8 +9112,8 @@ bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
}
bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
- if (!Subtarget->allowsUnalignedMem())
- return false;
+ // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus
+ bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
switch (VT.getSimpleVT().SimpleTy) {
default:
@@ -8816,10 +9121,14 @@ bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
case MVT::i8:
case MVT::i16:
case MVT::i32:
- return true;
+ // Unaligned access can use (for example) LRDB, LRDH, LDR
+ return AllowsUnaligned;
case MVT::f64:
- return Subtarget->hasNEON();
- // FIXME: VLD1 etc with standard alignment is legal.
+ case MVT::v2f64:
+ // For any little-endian targets with neon, we can support unaligned ld/st
+ // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
+ // A big-endian target may also explictly support unaligned accesses
+ return Subtarget->hasNEON() && (AllowsUnaligned || isLittleEndian());
}
}
@@ -8838,7 +9147,7 @@ EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
// See if we can use NEON instructions for this...
if (IsZeroVal &&
- !F->hasFnAttr(Attribute::NoImplicitFloat) &&
+ !F->getFnAttributes().hasAttribute(Attributes::NoImplicitFloat) &&
Subtarget->hasNEON()) {
if (memOpAlign(SrcAlign, DstAlign, 16) && Size >= 16) {
return MVT::v4i32;
@@ -9632,7 +9941,7 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
case Intrinsic::arm_neon_vld4lane: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
// Conservatively set memVT to the entire set of vectors loaded.
- uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8;
+ uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
Info.ptrVal = I.getArgOperand(0);
Info.offset = 0;
@@ -9657,7 +9966,7 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Type *ArgTy = I.getArgOperand(ArgI)->getType();
if (!ArgTy->isVectorTy())
break;
- NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8;
+ NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8;
}
Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
Info.ptrVal = I.getArgOperand(0);
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
index 51d1205..4eb3b2c 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -63,9 +63,6 @@ namespace llvm {
FMSTAT, // ARM fmstat instruction.
CMOV, // ARM conditional move instructions.
- CAND, // ARM conditional and instructions.
- COR, // ARM conditional or instructions.
- CXOR, // ARM conditional xor instructions.
BCC_i64,
@@ -176,6 +173,9 @@ namespace llvm {
VMULLs, // ...signed
VMULLu, // ...unsigned
+ UMLAL, // 64bit Unsigned Accumulate Multiply
+ SMLAL, // 64bit Signed Accumulate Multiply
+
// Operands of the standard BUILD_VECTOR node are not legalized, which
// is fine if BUILD_VECTORs are always lowered to shuffles or other
// operations, but for ARM some BUILD_VECTORs are legal as-is and their
@@ -260,6 +260,11 @@ namespace llvm {
virtual const char *getTargetNodeName(unsigned Opcode) const;
+ virtual bool isSelectSupported(SelectSupportKind Kind) const {
+ // ARM does not support scalar condition selects on vectors.
+ return (Kind != ScalarCondVectorVal);
+ }
+
/// getSetCCResultType - Return the value type to use for ISD::SETCC.
virtual EVT getSetCCResultType(EVT VT) const;
@@ -461,7 +466,11 @@ namespace llvm {
SmallVectorImpl<SDValue> &InVals) const;
void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
- DebugLoc dl, SDValue &Chain, unsigned ArgOffset)
+ DebugLoc dl, SDValue &Chain,
+ const Value *OrigArg,
+ unsigned OffsetFromOrigArg,
+ unsigned ArgOffset,
+ bool ForceMutable = false)
const;
void computeRegArea(CCState &CCInfo, MachineFunction &MF,
@@ -472,7 +481,7 @@ namespace llvm {
SmallVectorImpl<SDValue> &InVals) const;
/// HandleByVal - Target-specific cleanup for ByVal support.
- virtual void HandleByVal(CCState *, unsigned &) const;
+ virtual void HandleByVal(CCState *, unsigned &, unsigned) const;
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Targets which want to do tail call
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td b/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
index c8966fb..67a6820 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
@@ -846,6 +846,23 @@ class AMiscA1I<bits<8> opcod, bits<4> opc7_4, dag oops, dag iops,
let Inst{3-0} = Rm;
}
+// Division instructions.
+class ADivA1I<bits<3> opcod, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm, list<dag> pattern>
+ : I<oops, iops, AddrModeNone, 4, IndexModeNone, ArithMiscFrm, itin,
+ opc, asm, "", pattern> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
+ let Inst{27-23} = 0b01110;
+ let Inst{22-20} = opcod;
+ let Inst{19-16} = Rd;
+ let Inst{15-12} = 0b1111;
+ let Inst{11-8} = Rm;
+ let Inst{7-4} = 0b0001;
+ let Inst{3-0} = Rn;
+}
+
// PKH instructions
def PKHLSLAsmOperand : ImmAsmOperand {
let Name = "PKHLSLImm";
@@ -893,6 +910,10 @@ class ARMV5TPat<dag pattern, dag result> : Pat<pattern, result> {
class ARMV5TEPat<dag pattern, dag result> : Pat<pattern, result> {
list<Predicate> Predicates = [IsARM, HasV5TE];
}
+// ARMV5MOPat - Same as ARMV5TEPat with UseMulOps.
+class ARMV5MOPat<dag pattern, dag result> : Pat<pattern, result> {
+ list<Predicate> Predicates = [IsARM, HasV5TE, UseMulOps];
+}
class ARMV6Pat<dag pattern, dag result> : Pat<pattern, result> {
list<Predicate> Predicates = [IsARM, HasV6];
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
index 31b0c41..a0b6f24 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
@@ -13,13 +13,17 @@
#include "ARMInstrInfo.h"
#include "ARM.h"
+#include "ARMConstantPoolValue.h"
#include "ARMMachineFunctionInfo.h"
+#include "ARMTargetMachine.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInst.h"
using namespace llvm;
@@ -84,3 +88,61 @@ unsigned ARMInstrInfo::getUnindexedOpcode(unsigned Opc) const {
return 0;
}
+
+namespace {
+ /// ARMCGBR - Create Global Base Reg pass. This initializes the PIC
+ /// global base register for ARM ELF.
+ struct ARMCGBR : public MachineFunctionPass {
+ static char ID;
+ ARMCGBR() : MachineFunctionPass(ID) {}
+
+ virtual bool runOnMachineFunction(MachineFunction &MF) {
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ if (AFI->getGlobalBaseReg() == 0)
+ return false;
+
+ const ARMTargetMachine *TM =
+ static_cast<const ARMTargetMachine *>(&MF.getTarget());
+ if (TM->getRelocationModel() != Reloc::PIC_)
+ return false;
+
+ LLVMContext* Context = &MF.getFunction()->getContext();
+ GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false,
+ GlobalValue::ExternalLinkage, 0,
+ "_GLOBAL_OFFSET_TABLE_");
+ unsigned Id = AFI->createPICLabelUId();
+ ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id);
+ unsigned Align = TM->getDataLayout()->getPrefTypeAlignment(GV->getType());
+ unsigned Idx = MF.getConstantPool()->getConstantPoolIndex(CPV, Align);
+
+ MachineBasicBlock &FirstMBB = MF.front();
+ MachineBasicBlock::iterator MBBI = FirstMBB.begin();
+ DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
+ unsigned GlobalBaseReg = AFI->getGlobalBaseReg();
+ unsigned Opc = TM->getSubtarget<ARMSubtarget>().isThumb2() ?
+ ARM::t2LDRpci : ARM::LDRcp;
+ const TargetInstrInfo &TII = *TM->getInstrInfo();
+ MachineInstrBuilder MIB = BuildMI(FirstMBB, MBBI, DL,
+ TII.get(Opc), GlobalBaseReg)
+ .addConstantPoolIndex(Idx);
+ if (Opc == ARM::LDRcp)
+ MIB.addImm(0);
+ AddDefaultPred(MIB);
+
+ return true;
+ }
+
+ virtual const char *getPassName() const {
+ return "ARM PIC Global Base Reg Initialization";
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+ };
+}
+
+char ARMCGBR::ID = 0;
+FunctionPass*
+llvm::createARMGlobalBaseRegPass() { return new ARMCGBR(); }
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
index 992aba5..df2e55e 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
@@ -83,6 +83,13 @@ def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
SDTCisInt<0>,
SDTCisVT<1, i32>,
SDTCisVT<4, i32>]>;
+
+def SDT_ARM64bitmlal : SDTypeProfile<2,4, [ SDTCisVT<0, i32>, SDTCisVT<1, i32>,
+ SDTCisVT<2, i32>, SDTCisVT<3, i32>,
+ SDTCisVT<4, i32>, SDTCisVT<5, i32> ] >;
+def ARMUmlal : SDNode<"ARMISD::UMLAL", SDT_ARM64bitmlal>;
+def ARMSmlal : SDNode<"ARMISD::SMLAL", SDT_ARM64bitmlal>;
+
// Node definitions.
def ARMWrapper : SDNode<"ARMISD::Wrapper", SDTIntUnaryOp>;
def ARMWrapperDYN : SDNode<"ARMISD::WrapperDYN", SDTIntUnaryOp>;
@@ -90,9 +97,10 @@ def ARMWrapperPIC : SDNode<"ARMISD::WrapperPIC", SDTIntUnaryOp>;
def ARMWrapperJT : SDNode<"ARMISD::WrapperJT", SDTIntBinOp>;
def ARMcallseq_start : SDNode<"ISD::CALLSEQ_START", SDT_ARMCallSeqStart,
- [SDNPHasChain, SDNPOutGlue]>;
+ [SDNPHasChain, SDNPSideEffect, SDNPOutGlue]>;
def ARMcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_ARMCallSeqEnd,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+ [SDNPHasChain, SDNPSideEffect,
+ SDNPOptInGlue, SDNPOutGlue]>;
def ARMcopystructbyval : SDNode<"ARMISD::COPY_STRUCT_BYVAL" ,
SDT_ARMStructByVal,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue,
@@ -148,14 +156,16 @@ def ARMsube : SDNode<"ARMISD::SUBE", SDTBinaryArithWithFlagsInOut>;
def ARMthread_pointer: SDNode<"ARMISD::THREAD_POINTER", SDT_ARMThreadPointer>;
def ARMeh_sjlj_setjmp: SDNode<"ARMISD::EH_SJLJ_SETJMP",
- SDT_ARMEH_SJLJ_Setjmp, [SDNPHasChain]>;
+ SDT_ARMEH_SJLJ_Setjmp,
+ [SDNPHasChain, SDNPSideEffect]>;
def ARMeh_sjlj_longjmp: SDNode<"ARMISD::EH_SJLJ_LONGJMP",
- SDT_ARMEH_SJLJ_Longjmp, [SDNPHasChain]>;
+ SDT_ARMEH_SJLJ_Longjmp,
+ [SDNPHasChain, SDNPSideEffect]>;
def ARMMemBarrier : SDNode<"ARMISD::MEMBARRIER", SDT_ARMMEMBARRIER,
- [SDNPHasChain]>;
+ [SDNPHasChain, SDNPSideEffect]>;
def ARMMemBarrierMCR : SDNode<"ARMISD::MEMBARRIER_MCR", SDT_ARMMEMBARRIER,
- [SDNPHasChain]>;
+ [SDNPHasChain, SDNPSideEffect]>;
def ARMPreload : SDNode<"ARMISD::PRELOAD", SDT_ARMPREFETCH,
[SDNPHasChain, SDNPMayLoad, SDNPMayStore]>;
@@ -197,6 +207,8 @@ def HasFP16 : Predicate<"Subtarget->hasFP16()">,
AssemblerPredicate<"FeatureFP16","half-float">;
def HasDivide : Predicate<"Subtarget->hasDivide()">,
AssemblerPredicate<"FeatureHWDiv", "divide">;
+def HasDivideInARM : Predicate<"Subtarget->hasDivideInARMMode()">,
+ AssemblerPredicate<"FeatureHWDivARM">;
def HasT2ExtractPack : Predicate<"Subtarget->hasT2ExtractPack()">,
AssemblerPredicate<"FeatureT2XtPk",
"pack/extract">;
@@ -232,6 +244,7 @@ def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
def UseMovt : Predicate<"Subtarget->useMovt()">;
def DontUseMovt : Predicate<"!Subtarget->useMovt()">;
def UseFPVMLx : Predicate<"Subtarget->useFPVMLx()">;
+def UseMulOps : Predicate<"Subtarget->useMulOps()">;
// Prefer fused MAC for fp mul + add over fp VMLA / VMLS if they are available.
// But only select them if more precision in FP computation is allowed.
@@ -242,6 +255,20 @@ def UseFusedMAC : Predicate<"(TM.Options.AllowFPOpFusion =="
def DontUseFusedMAC : Predicate<"!Subtarget->hasVFP4() || "
"Subtarget->isTargetDarwin()">;
+// VGETLNi32 is microcoded on Swift - prefer VMOV.
+def HasFastVGETLNi32 : Predicate<"!Subtarget->isSwift()">;
+def HasSlowVGETLNi32 : Predicate<"Subtarget->isSwift()">;
+
+// VDUP.32 is microcoded on Swift - prefer VMOV.
+def HasFastVDUP32 : Predicate<"!Subtarget->isSwift()">;
+def HasSlowVDUP32 : Predicate<"Subtarget->isSwift()">;
+
+// Cortex-A9 prefers VMOVSR to VMOVDRR even when using NEON for scalar FP, as
+// this allows more effective execution domain optimization. See
+// setExecutionDomain().
+def UseVMOVSR : Predicate<"Subtarget->isCortexA9() || !Subtarget->useNEONForSinglePrecisionFP()">;
+def DontUseVMOVSR : Predicate<"!Subtarget->isCortexA9() && Subtarget->useNEONForSinglePrecisionFP()">;
+
def IsLE : Predicate<"TLI.isLittleEndian()">;
def IsBE : Predicate<"TLI.isBigEndian()">;
@@ -256,15 +283,13 @@ class RegConstraint<string C> {
// ARM specific transformation functions and pattern fragments.
//
-// imm_neg_XFORM - Return a imm value packed into the format described for
-// imm_neg defs below.
+// imm_neg_XFORM - Return the negation of an i32 immediate value.
def imm_neg_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(-(int)N->getZExtValue(), MVT::i32);
}]>;
-// so_imm_not_XFORM - Return a so_imm value packed into the format described for
-// so_imm_not def below.
-def so_imm_not_XFORM : SDNodeXForm<imm, [{
+// imm_not_XFORM - Return the complement of a i32 immediate value.
+def imm_not_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(~(int)N->getZExtValue(), MVT::i32);
}]>;
@@ -275,7 +300,7 @@ def imm16_31 : ImmLeaf<i32, [{
def so_imm_neg_asmoperand : AsmOperandClass { let Name = "ARMSOImmNeg"; }
def so_imm_neg : Operand<i32>, PatLeaf<(imm), [{
- int64_t Value = -(int)N->getZExtValue();
+ unsigned Value = -(unsigned)N->getZExtValue();
return Value && ARM_AM::getSOImmVal(Value) != -1;
}], imm_neg_XFORM> {
let ParserMatchClass = so_imm_neg_asmoperand;
@@ -287,7 +312,7 @@ def so_imm_neg : Operand<i32>, PatLeaf<(imm), [{
def so_imm_not_asmoperand : AsmOperandClass { let Name = "ARMSOImmNot"; }
def so_imm_not : Operand<i32>, PatLeaf<(imm), [{
return ARM_AM::getSOImmVal(~(uint32_t)N->getZExtValue()) != -1;
- }], so_imm_not_XFORM> {
+ }], imm_not_XFORM> {
let ParserMatchClass = so_imm_not_asmoperand;
}
@@ -1791,12 +1816,15 @@ def ADR : AI1<{0,?,?,0}, (outs GPR:$Rd), (ins adrlabel:$label),
let Inst{15-12} = Rd;
let Inst{11-0} = label{11-0};
}
+
+let hasSideEffects = 1 in {
def LEApcrel : ARMPseudoInst<(outs GPR:$Rd), (ins i32imm:$label, pred:$p),
4, IIC_iALUi, []>;
def LEApcrelJT : ARMPseudoInst<(outs GPR:$Rd),
(ins i32imm:$label, nohash_imm:$id, pred:$p),
4, IIC_iALUi, []>;
+}
//===----------------------------------------------------------------------===//
// Control Flow Instructions.
@@ -3079,15 +3107,19 @@ def : ARMPat<(ARMaddc GPR:$src, so_imm_neg:$imm),
(SUBSri GPR:$src, so_imm_neg:$imm)>;
def : ARMPat<(add GPR:$src, imm0_65535_neg:$imm),
- (SUBrr GPR:$src, (MOVi16 (imm_neg_XFORM imm:$imm)))>;
+ (SUBrr GPR:$src, (MOVi16 (imm_neg_XFORM imm:$imm)))>,
+ Requires<[IsARM, HasV6T2]>;
def : ARMPat<(ARMaddc GPR:$src, imm0_65535_neg:$imm),
- (SUBSrr GPR:$src, (MOVi16 (imm_neg_XFORM imm:$imm)))>;
+ (SUBSrr GPR:$src, (MOVi16 (imm_neg_XFORM imm:$imm)))>,
+ Requires<[IsARM, HasV6T2]>;
// The with-carry-in form matches bitwise not instead of the negation.
// Effectively, the inverse interpretation of the carry flag already accounts
// for part of the negation.
def : ARMPat<(ARMadde GPR:$src, so_imm_not:$imm, CPSR),
(SBCri GPR:$src, so_imm_not:$imm)>;
+def : ARMPat<(ARMadde GPR:$src, imm0_65535_neg:$imm, CPSR),
+ (SBCrr GPR:$src, (MOVi16 (imm_not_XFORM imm:$imm)))>;
// Note: These are implemented in C++ code, because they have to generate
// ADD/SUBrs instructions, which use a complex pattern that a xform function
@@ -3399,6 +3431,18 @@ class AsMul1I64<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
let Inst{11-8} = Rm;
let Inst{3-0} = Rn;
}
+class AsMla1I64<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : AsMul1I<opcod, oops, iops, itin, opc, asm, pattern> {
+ bits<4> RdLo;
+ bits<4> RdHi;
+ bits<4> Rm;
+ bits<4> Rn;
+ let Inst{19-16} = RdHi;
+ let Inst{15-12} = RdLo;
+ let Inst{11-8} = Rm;
+ let Inst{3-0} = Rn;
+}
// FIXME: The v5 pseudos are only necessary for the additional Constraint
// property. Remove them when it's possible to add those properties
@@ -3419,13 +3463,13 @@ def MULv5: ARMPseudoExpand<(outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm,
4, IIC_iMUL32,
[(set GPRnopc:$Rd, (mul GPRnopc:$Rn, GPRnopc:$Rm))],
(MUL GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p, cc_out:$s)>,
- Requires<[IsARM, NoV6]>;
+ Requires<[IsARM, NoV6, UseMulOps]>;
}
def MLA : AsMul1I32<0b0000001, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "mla", "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (add (mul GPR:$Rn, GPR:$Rm), GPR:$Ra))]>,
- Requires<[IsARM, HasV6]> {
+ Requires<[IsARM, HasV6, UseMulOps]> {
bits<4> Ra;
let Inst{15-12} = Ra;
}
@@ -3441,7 +3485,7 @@ def MLAv5: ARMPseudoExpand<(outs GPR:$Rd),
def MLS : AMul1I<0b0000011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "mls", "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (sub GPR:$Ra, (mul GPR:$Rn, GPR:$Rm)))]>,
- Requires<[IsARM, HasV6T2]> {
+ Requires<[IsARM, HasV6T2, UseMulOps]> {
bits<4> Rd;
bits<4> Rm;
bits<4> Rn;
@@ -3481,14 +3525,14 @@ def UMULLv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
}
// Multiply + accumulate
-def SMLAL : AsMul1I64<0b0000111, (outs GPR:$RdLo, GPR:$RdHi),
- (ins GPR:$Rn, GPR:$Rm), IIC_iMAC64,
+def SMLAL : AsMla1I64<0b0000111, (outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi), IIC_iMAC64,
"smlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
- Requires<[IsARM, HasV6]>;
-def UMLAL : AsMul1I64<0b0000101, (outs GPR:$RdLo, GPR:$RdHi),
- (ins GPR:$Rn, GPR:$Rm), IIC_iMAC64,
+ RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">, Requires<[IsARM, HasV6]>;
+def UMLAL : AsMla1I64<0b0000101, (outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi), IIC_iMAC64,
"umlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
- Requires<[IsARM, HasV6]>;
+ RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">, Requires<[IsARM, HasV6]>;
def UMAAL : AMul1I <0b0000010, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm), IIC_iMAC64,
@@ -3504,17 +3548,22 @@ def UMAAL : AMul1I <0b0000010, (outs GPR:$RdLo, GPR:$RdHi),
let Inst{3-0} = Rn;
}
-let Constraints = "@earlyclobber $RdLo,@earlyclobber $RdHi" in {
+let Constraints = "$RLo = $RdLo,$RHi = $RdHi" in {
def SMLALv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
- (ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s),
+ (ins GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi, pred:$p, cc_out:$s),
4, IIC_iMAC64, [],
- (SMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
+ (SMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi,
+ pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
def UMLALv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
- (ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s),
+ (ins GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi, pred:$p, cc_out:$s),
4, IIC_iMAC64, [],
- (UMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
+ (UMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi,
+ pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
+}
+
+let Constraints = "@earlyclobber $RdLo,@earlyclobber $RdHi" in {
def UMAALv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm, pred:$p),
4, IIC_iMAC64, [],
@@ -3542,7 +3591,7 @@ def SMMLA : AMul2Ia <0b0111010, 0b0001, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "smmla", "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (add (mulhs GPR:$Rn, GPR:$Rm), GPR:$Ra))]>,
- Requires<[IsARM, HasV6]>;
+ Requires<[IsARM, HasV6, UseMulOps]>;
def SMMLAR : AMul2Ia <0b0111010, 0b0011, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
@@ -3552,7 +3601,7 @@ def SMMLAR : AMul2Ia <0b0111010, 0b0011, (outs GPR:$Rd),
def SMMLS : AMul2Ia <0b0111010, 0b1101, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "smmls", "\t$Rd, $Rn, $Rm, $Ra", []>,
- Requires<[IsARM, HasV6]>;
+ Requires<[IsARM, HasV6, UseMulOps]>;
def SMMLSR : AMul2Ia <0b0111010, 0b1111, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
@@ -3606,7 +3655,7 @@ multiclass AI_smla<string opc, PatFrag opnode> {
[(set GPRnopc:$Rd, (add GPR:$Ra,
(opnode (sext_inreg GPRnopc:$Rn, i16),
(sext_inreg GPRnopc:$Rm, i16))))]>,
- Requires<[IsARM, HasV5TE]>;
+ Requires<[IsARM, HasV5TE, UseMulOps]>;
def BT : AMulxyIa<0b0001000, 0b10, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
@@ -3614,7 +3663,7 @@ multiclass AI_smla<string opc, PatFrag opnode> {
[(set GPRnopc:$Rd,
(add GPR:$Ra, (opnode (sext_inreg GPRnopc:$Rn, i16),
(sra GPRnopc:$Rm, (i32 16)))))]>,
- Requires<[IsARM, HasV5TE]>;
+ Requires<[IsARM, HasV5TE, UseMulOps]>;
def TB : AMulxyIa<0b0001000, 0b01, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
@@ -3622,7 +3671,7 @@ multiclass AI_smla<string opc, PatFrag opnode> {
[(set GPRnopc:$Rd,
(add GPR:$Ra, (opnode (sra GPRnopc:$Rn, (i32 16)),
(sext_inreg GPRnopc:$Rm, i16))))]>,
- Requires<[IsARM, HasV5TE]>;
+ Requires<[IsARM, HasV5TE, UseMulOps]>;
def TT : AMulxyIa<0b0001000, 0b11, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
@@ -3630,7 +3679,7 @@ multiclass AI_smla<string opc, PatFrag opnode> {
[(set GPRnopc:$Rd,
(add GPR:$Ra, (opnode (sra GPRnopc:$Rn, (i32 16)),
(sra GPRnopc:$Rm, (i32 16)))))]>,
- Requires<[IsARM, HasV5TE]>;
+ Requires<[IsARM, HasV5TE, UseMulOps]>;
def WB : AMulxyIa<0b0001001, 0b00, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
@@ -3638,7 +3687,7 @@ multiclass AI_smla<string opc, PatFrag opnode> {
[(set GPRnopc:$Rd,
(add GPR:$Ra, (sra (opnode GPRnopc:$Rn,
(sext_inreg GPRnopc:$Rm, i16)), (i32 16))))]>,
- Requires<[IsARM, HasV5TE]>;
+ Requires<[IsARM, HasV5TE, UseMulOps]>;
def WT : AMulxyIa<0b0001001, 0b10, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
@@ -3646,7 +3695,7 @@ multiclass AI_smla<string opc, PatFrag opnode> {
[(set GPRnopc:$Rd,
(add GPR:$Ra, (sra (opnode GPRnopc:$Rn,
(sra GPRnopc:$Rm, (i32 16))), (i32 16))))]>,
- Requires<[IsARM, HasV5TE]>;
+ Requires<[IsARM, HasV5TE, UseMulOps]>;
}
}
@@ -3749,6 +3798,19 @@ defm SMUA : AI_sdml<0, "smua">;
defm SMUS : AI_sdml<1, "smus">;
//===----------------------------------------------------------------------===//
+// Division Instructions (ARMv7-A with virtualization extension)
+//
+def SDIV : ADivA1I<0b001, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), IIC_iDIV,
+ "sdiv", "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (sdiv GPR:$Rn, GPR:$Rm))]>,
+ Requires<[IsARM, HasDivideInARM]>;
+
+def UDIV : ADivA1I<0b011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), IIC_iDIV,
+ "udiv", "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (udiv GPR:$Rn, GPR:$Rm))]>,
+ Requires<[IsARM, HasDivideInARM]>;
+
+//===----------------------------------------------------------------------===//
// Misc. Arithmetic Instructions.
//
@@ -3986,48 +4048,6 @@ def MVNCCi : ARMPseudoInst<(outs GPR:$Rd),
[/*(set GPR:$Rd, (ARMcmov GPR:$false, so_imm_not:$imm, imm:$cc, CCR:$ccr))*/]>,
RegConstraint<"$false = $Rd">;
-// Conditional instructions
-multiclass AsI1_bincc_irs<Instruction iri, Instruction irr, Instruction irsi,
- Instruction irsr,
- InstrItinClass iii, InstrItinClass iir,
- InstrItinClass iis> {
- def ri : ARMPseudoExpand<(outs GPR:$Rd),
- (ins GPR:$Rfalse, GPR:$Rn, so_imm:$imm,
- pred:$p, cc_out:$s),
- 4, iii, [],
- (iri GPR:$Rd, GPR:$Rn, so_imm:$imm, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rfalse = $Rd">;
- def rr : ARMPseudoExpand<(outs GPR:$Rd),
- (ins GPR:$Rfalse, GPR:$Rn, GPR:$Rm,
- pred:$p, cc_out:$s),
- 4, iir, [],
- (irr GPR:$Rd, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rfalse = $Rd">;
- def rsi : ARMPseudoExpand<(outs GPR:$Rd),
- (ins GPR:$Rfalse, GPR:$Rn, so_reg_imm:$shift,
- pred:$p, cc_out:$s),
- 4, iis, [],
- (irsi GPR:$Rd, GPR:$Rn, so_reg_imm:$shift, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rfalse = $Rd">;
- def rsr : ARMPseudoExpand<(outs GPRnopc:$Rd),
- (ins GPRnopc:$Rfalse, GPRnopc:$Rn, so_reg_reg:$shift,
- pred:$p, cc_out:$s),
- 4, iis, [],
- (irsr GPR:$Rd, GPR:$Rn, so_reg_reg:$shift, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rfalse = $Rd">;
-}
-
-defm ANDCC : AsI1_bincc_irs<ANDri, ANDrr, ANDrsi, ANDrsr,
- IIC_iBITi, IIC_iBITr, IIC_iBITsr>;
-defm ORRCC : AsI1_bincc_irs<ORRri, ORRrr, ORRrsi, ORRrsr,
- IIC_iBITi, IIC_iBITr, IIC_iBITsr>;
-defm EORCC : AsI1_bincc_irs<EORri, EORrr, EORrsi, EORrsr,
- IIC_iBITi, IIC_iBITr, IIC_iBITsr>;
-defm ADDCC : AsI1_bincc_irs<ADDri, ADDrr, ADDrsi, ADDrsr,
- IIC_iBITi, IIC_iBITr, IIC_iBITsr>;
-defm SUBCC : AsI1_bincc_irs<SUBri, SUBrr, SUBrsi, SUBrsr,
- IIC_iBITi, IIC_iBITr, IIC_iBITsr>;
-
} // neverHasSideEffects
@@ -4723,21 +4743,13 @@ def Int_eh_sjlj_longjmp : PseudoInst<(outs), (ins GPR:$src, GPR:$scratch),
Requires<[IsARM, IsIOS]>;
}
-// eh.sjlj.dispatchsetup pseudo-instructions.
-// These pseudos are used for both ARM and Thumb2. Any differences are
-// handled when the pseudo is expanded (which happens before any passes
-// that need the instruction size).
-let Defs =
- [ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, CPSR,
- Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15 ],
- isBarrier = 1 in
+// eh.sjlj.dispatchsetup pseudo-instruction.
+// This pseudo is used for both ARM and Thumb. Any differences are handled when
+// the pseudo is expanded (which happens before any passes that need the
+// instruction size).
+let isBarrier = 1 in
def Int_eh_sjlj_dispatchsetup : PseudoInst<(outs), (ins), NoItinerary, []>;
-let Defs =
- [ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, CPSR ],
- isBarrier = 1 in
-def Int_eh_sjlj_dispatchsetup_nofp : PseudoInst<(outs), (ins), NoItinerary, []>;
-
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
@@ -4841,32 +4853,32 @@ def : ARMV5TEPat<(sra (mul GPR:$a, (sra (shl GPR:$b, (i32 16)), (i32 16))),
def : ARMV5TEPat<(sra (mul GPR:$a, sext_16_node:$b), (i32 16)),
(SMULWB GPR:$a, GPR:$b)>;
-def : ARMV5TEPat<(add GPR:$acc,
+def : ARMV5MOPat<(add GPR:$acc,
(mul (sra (shl GPR:$a, (i32 16)), (i32 16)),
(sra (shl GPR:$b, (i32 16)), (i32 16)))),
(SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
-def : ARMV5TEPat<(add GPR:$acc,
+def : ARMV5MOPat<(add GPR:$acc,
(mul sext_16_node:$a, sext_16_node:$b)),
(SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
-def : ARMV5TEPat<(add GPR:$acc,
+def : ARMV5MOPat<(add GPR:$acc,
(mul (sra (shl GPR:$a, (i32 16)), (i32 16)),
(sra GPR:$b, (i32 16)))),
(SMLABT GPR:$a, GPR:$b, GPR:$acc)>;
-def : ARMV5TEPat<(add GPR:$acc,
+def : ARMV5MOPat<(add GPR:$acc,
(mul sext_16_node:$a, (sra GPR:$b, (i32 16)))),
(SMLABT GPR:$a, GPR:$b, GPR:$acc)>;
-def : ARMV5TEPat<(add GPR:$acc,
+def : ARMV5MOPat<(add GPR:$acc,
(mul (sra GPR:$a, (i32 16)),
(sra (shl GPR:$b, (i32 16)), (i32 16)))),
(SMLATB GPR:$a, GPR:$b, GPR:$acc)>;
-def : ARMV5TEPat<(add GPR:$acc,
+def : ARMV5MOPat<(add GPR:$acc,
(mul (sra GPR:$a, (i32 16)), sext_16_node:$b)),
(SMLATB GPR:$a, GPR:$b, GPR:$acc)>;
-def : ARMV5TEPat<(add GPR:$acc,
+def : ARMV5MOPat<(add GPR:$acc,
(sra (mul GPR:$a, (sra (shl GPR:$b, (i32 16)), (i32 16))),
(i32 16))),
(SMLAWB GPR:$a, GPR:$b, GPR:$acc)>;
-def : ARMV5TEPat<(add GPR:$acc,
+def : ARMV5MOPat<(add GPR:$acc,
(sra (mul GPR:$a, sext_16_node:$b), (i32 16))),
(SMLAWB GPR:$a, GPR:$b, GPR:$acc)>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
index 048d340..3cf213c 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
@@ -398,6 +398,20 @@ def VecListFourQWordIndexed : Operand<i32> {
let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
}
+def dword_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return cast<LoadSDNode>(N)->getAlignment() >= 8;
+}]>;
+def dword_alignedstore : PatFrag<(ops node:$val, node:$ptr),
+ (store node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getAlignment() >= 8;
+}]>;
+def word_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return cast<LoadSDNode>(N)->getAlignment() == 4;
+}]>;
+def word_alignedstore : PatFrag<(ops node:$val, node:$ptr),
+ (store node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getAlignment() == 4;
+}]>;
def hword_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() == 2;
}]>;
@@ -1980,7 +1994,7 @@ def VST1LNd8 : VST1LN<0b0000, {?,?,?,0}, "8", v8i8, truncstorei8,
def VST1LNd16 : VST1LN<0b0100, {?,?,0,?}, "16", v4i16, truncstorei16,
NEONvgetlaneu, addrmode6> {
let Inst{7-6} = lane{1-0};
- let Inst{4} = Rn{5};
+ let Inst{4} = Rn{4};
}
def VST1LNd32 : VST1LN<0b1000, {?,0,?,?}, "32", v2i32, store, extractelt,
@@ -2023,7 +2037,7 @@ def VST1LNd8_UPD : VST1LNWB<0b0000, {?,?,?,0}, "8", v8i8, post_truncsti8,
def VST1LNd16_UPD : VST1LNWB<0b0100, {?,?,0,?}, "16", v4i16, post_truncsti16,
NEONvgetlaneu, addrmode6> {
let Inst{7-6} = lane{1-0};
- let Inst{4} = Rn{5};
+ let Inst{4} = Rn{4};
}
def VST1LNd32_UPD : VST1LNWB<0b1000, {?,0,?,?}, "32", v2i32, post_store,
extractelt, addrmode6oneL32> {
@@ -2273,6 +2287,25 @@ def : Pat<(f64 (non_word_alignedload addrmode6:$addr)),
def : Pat<(non_word_alignedstore (f64 DPR:$value), addrmode6:$addr),
(VST1d64 addrmode6:$addr, DPR:$value)>, Requires<[IsBE]>;
+// Use vld1/vst1 for Q and QQ. Also use them for unaligned v2f64
+// load / store if it's legal.
+def : Pat<(v2f64 (dword_alignedload addrmode6:$addr)),
+ (VLD1q64 addrmode6:$addr)>;
+def : Pat<(dword_alignedstore (v2f64 QPR:$value), addrmode6:$addr),
+ (VST1q64 addrmode6:$addr, QPR:$value)>;
+def : Pat<(v2f64 (word_alignedload addrmode6:$addr)),
+ (VLD1q32 addrmode6:$addr)>;
+def : Pat<(word_alignedstore (v2f64 QPR:$value), addrmode6:$addr),
+ (VST1q32 addrmode6:$addr, QPR:$value)>;
+def : Pat<(v2f64 (hword_alignedload addrmode6:$addr)),
+ (VLD1q16 addrmode6:$addr)>, Requires<[IsLE]>;
+def : Pat<(hword_alignedstore (v2f64 QPR:$value), addrmode6:$addr),
+ (VST1q16 addrmode6:$addr, QPR:$value)>, Requires<[IsLE]>;
+def : Pat<(v2f64 (byte_alignedload addrmode6:$addr)),
+ (VLD1q8 addrmode6:$addr)>, Requires<[IsLE]>;
+def : Pat<(byte_alignedstore (v2f64 QPR:$value), addrmode6:$addr),
+ (VST1q8 addrmode6:$addr, QPR:$value)>, Requires<[IsLE]>;
+
//===----------------------------------------------------------------------===//
// NEON pattern fragments
//===----------------------------------------------------------------------===//
@@ -4455,10 +4488,36 @@ def VBSLd : N3VX<1, 0, 0b01, 0b0001, 0, 1, (outs DPR:$Vd),
"vbsl", "$Vd, $Vn, $Vm", "$src1 = $Vd",
[(set DPR:$Vd,
(v2i32 (NEONvbsl DPR:$src1, DPR:$Vn, DPR:$Vm)))]>;
+def : Pat<(v8i8 (int_arm_neon_vbsl (v8i8 DPR:$src1),
+ (v8i8 DPR:$Vn), (v8i8 DPR:$Vm))),
+ (VBSLd DPR:$src1, DPR:$Vn, DPR:$Vm)>,
+ Requires<[HasNEON]>;
+def : Pat<(v4i16 (int_arm_neon_vbsl (v4i16 DPR:$src1),
+ (v4i16 DPR:$Vn), (v4i16 DPR:$Vm))),
+ (VBSLd DPR:$src1, DPR:$Vn, DPR:$Vm)>,
+ Requires<[HasNEON]>;
+def : Pat<(v2i32 (int_arm_neon_vbsl (v2i32 DPR:$src1),
+ (v2i32 DPR:$Vn), (v2i32 DPR:$Vm))),
+ (VBSLd DPR:$src1, DPR:$Vn, DPR:$Vm)>,
+ Requires<[HasNEON]>;
+def : Pat<(v2f32 (int_arm_neon_vbsl (v2f32 DPR:$src1),
+ (v2f32 DPR:$Vn), (v2f32 DPR:$Vm))),
+ (VBSLd DPR:$src1, DPR:$Vn, DPR:$Vm)>,
+ Requires<[HasNEON]>;
+def : Pat<(v1i64 (int_arm_neon_vbsl (v1i64 DPR:$src1),
+ (v1i64 DPR:$Vn), (v1i64 DPR:$Vm))),
+ (VBSLd DPR:$src1, DPR:$Vn, DPR:$Vm)>,
+ Requires<[HasNEON]>;
def : Pat<(v2i32 (or (and DPR:$Vn, DPR:$Vd),
(and DPR:$Vm, (vnotd DPR:$Vd)))),
- (VBSLd DPR:$Vd, DPR:$Vn, DPR:$Vm)>;
+ (VBSLd DPR:$Vd, DPR:$Vn, DPR:$Vm)>,
+ Requires<[HasNEON]>;
+
+def : Pat<(v1i64 (or (and DPR:$Vn, DPR:$Vd),
+ (and DPR:$Vm, (vnotd DPR:$Vd)))),
+ (VBSLd DPR:$Vd, DPR:$Vn, DPR:$Vm)>,
+ Requires<[HasNEON]>;
def VBSLq : N3VX<1, 0, 0b01, 0b0001, 1, 1, (outs QPR:$Vd),
(ins QPR:$src1, QPR:$Vn, QPR:$Vm),
@@ -4467,9 +4526,35 @@ def VBSLq : N3VX<1, 0, 0b01, 0b0001, 1, 1, (outs QPR:$Vd),
[(set QPR:$Vd,
(v4i32 (NEONvbsl QPR:$src1, QPR:$Vn, QPR:$Vm)))]>;
+def : Pat<(v16i8 (int_arm_neon_vbsl (v16i8 QPR:$src1),
+ (v16i8 QPR:$Vn), (v16i8 QPR:$Vm))),
+ (VBSLq QPR:$src1, QPR:$Vn, QPR:$Vm)>,
+ Requires<[HasNEON]>;
+def : Pat<(v8i16 (int_arm_neon_vbsl (v8i16 QPR:$src1),
+ (v8i16 QPR:$Vn), (v8i16 QPR:$Vm))),
+ (VBSLq QPR:$src1, QPR:$Vn, QPR:$Vm)>,
+ Requires<[HasNEON]>;
+def : Pat<(v4i32 (int_arm_neon_vbsl (v4i32 QPR:$src1),
+ (v4i32 QPR:$Vn), (v4i32 QPR:$Vm))),
+ (VBSLq QPR:$src1, QPR:$Vn, QPR:$Vm)>,
+ Requires<[HasNEON]>;
+def : Pat<(v4f32 (int_arm_neon_vbsl (v4f32 QPR:$src1),
+ (v4f32 QPR:$Vn), (v4f32 QPR:$Vm))),
+ (VBSLq QPR:$src1, QPR:$Vn, QPR:$Vm)>,
+ Requires<[HasNEON]>;
+def : Pat<(v2i64 (int_arm_neon_vbsl (v2i64 QPR:$src1),
+ (v2i64 QPR:$Vn), (v2i64 QPR:$Vm))),
+ (VBSLq QPR:$src1, QPR:$Vn, QPR:$Vm)>,
+ Requires<[HasNEON]>;
+
def : Pat<(v4i32 (or (and QPR:$Vn, QPR:$Vd),
(and QPR:$Vm, (vnotq QPR:$Vd)))),
- (VBSLq QPR:$Vd, QPR:$Vn, QPR:$Vm)>;
+ (VBSLq QPR:$Vd, QPR:$Vn, QPR:$Vm)>,
+ Requires<[HasNEON]>;
+def : Pat<(v2i64 (or (and QPR:$Vn, QPR:$Vd),
+ (and QPR:$Vm, (vnotq QPR:$Vd)))),
+ (VBSLq QPR:$Vd, QPR:$Vn, QPR:$Vm)>,
+ Requires<[HasNEON]>;
// VBIF : Vector Bitwise Insert if False
// like VBSL but with: "vbif $dst, $src3, $src1", "$src2 = $dst",
@@ -4983,7 +5068,8 @@ def VGETLNi32 : NVGetLane<{1,1,1,0,0,0,?,1}, 0b1011, 0b00,
(outs GPR:$R), (ins DPR:$V, VectorIndex32:$lane),
IIC_VMOVSI, "vmov", "32", "$R, $V$lane",
[(set GPR:$R, (extractelt (v2i32 DPR:$V),
- imm:$lane))]> {
+ imm:$lane))]>,
+ Requires<[HasNEON, HasFastVGETLNi32]> {
let Inst{21} = lane{0};
}
// def VGETLNf32: see FMRDH and FMRDL in ARMInstrVFP.td
@@ -5006,7 +5092,16 @@ def : Pat<(NEONvgetlaneu (v8i16 QPR:$src), imm:$lane),
def : Pat<(extractelt (v4i32 QPR:$src), imm:$lane),
(VGETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src,
(DSubReg_i32_reg imm:$lane))),
- (SubReg_i32_lane imm:$lane))>;
+ (SubReg_i32_lane imm:$lane))>,
+ Requires<[HasNEON, HasFastVGETLNi32]>;
+def : Pat<(extractelt (v2i32 DPR:$src), imm:$lane),
+ (COPY_TO_REGCLASS
+ (i32 (EXTRACT_SUBREG DPR:$src, (SSubReg_f32_reg imm:$lane))), GPR)>,
+ Requires<[HasNEON, HasSlowVGETLNi32]>;
+def : Pat<(extractelt (v4i32 QPR:$src), imm:$lane),
+ (COPY_TO_REGCLASS
+ (i32 (EXTRACT_SUBREG QPR:$src, (SSubReg_f32_reg imm:$lane))), GPR)>,
+ Requires<[HasNEON, HasSlowVGETLNi32]>;
def : Pat<(extractelt (v2f32 DPR:$src1), imm:$src2),
(EXTRACT_SUBREG (v2f32 (COPY_TO_REGCLASS (v2f32 DPR:$src1),DPR_VFP2)),
(SSubReg_f32_reg imm:$src2))>;
@@ -5117,14 +5212,23 @@ class VDUPQ<bits<8> opcod1, bits<2> opcod3, string Dt, ValueType Ty>
def VDUP8d : VDUPD<0b11101100, 0b00, "8", v8i8>;
def VDUP16d : VDUPD<0b11101000, 0b01, "16", v4i16>;
-def VDUP32d : VDUPD<0b11101000, 0b00, "32", v2i32>;
+def VDUP32d : VDUPD<0b11101000, 0b00, "32", v2i32>,
+ Requires<[HasNEON, HasFastVDUP32]>;
def VDUP8q : VDUPQ<0b11101110, 0b00, "8", v16i8>;
def VDUP16q : VDUPQ<0b11101010, 0b01, "16", v8i16>;
def VDUP32q : VDUPQ<0b11101010, 0b00, "32", v4i32>;
-def : Pat<(v2f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VDUP32d GPR:$R)>;
+// NEONvdup patterns for uarchs with fast VDUP.32.
+def : Pat<(v2f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VDUP32d GPR:$R)>,
+ Requires<[HasNEON,HasFastVDUP32]>;
def : Pat<(v4f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VDUP32q GPR:$R)>;
+// NEONvdup patterns for uarchs with slow VDUP.32 - use VMOVDRR instead.
+def : Pat<(v2i32 (NEONvdup (i32 GPR:$R))), (VMOVDRR GPR:$R, GPR:$R)>,
+ Requires<[HasNEON,HasSlowVDUP32]>;
+def : Pat<(v2f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VMOVDRR GPR:$R, GPR:$R)>,
+ Requires<[HasNEON,HasSlowVDUP32]>;
+
// VDUP : Vector Duplicate Lane (from scalar to all elements)
class VDUPLND<bits<4> op19_16, string OpcodeStr, string Dt,
@@ -5561,6 +5665,11 @@ def : N2VSPat<arm_ftoui, VCVTf2ud>;
def : N2VSPat<arm_sitof, VCVTs2fd>;
def : N2VSPat<arm_uitof, VCVTu2fd>;
+// Prefer VMOVDRR for i32 -> f32 bitcasts, it can write all DPR registers.
+def : Pat<(f32 (bitconvert GPR:$a)),
+ (EXTRACT_SUBREG (VMOVDRR GPR:$a, GPR:$a), ssub_0)>,
+ Requires<[HasNEON, DontUseVMOVSR]>;
+
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td b/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
index 554f6d9..ae7a5c0 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
@@ -223,6 +223,7 @@ def t_addrmode_sp : Operand<i32>,
def t_addrmode_pc : Operand<i32> {
let EncoderMethod = "getAddrModePCOpValue";
let DecoderMethod = "DecodeThumbAddrModePC";
+ let PrintMethod = "printThumbLdrLabelOperand";
}
//===----------------------------------------------------------------------===//
@@ -1200,6 +1201,7 @@ let neverHasSideEffects = 1, isReMaterializable = 1 in
def tLEApcrel : tPseudoInst<(outs tGPR:$Rd), (ins i32imm:$label, pred:$p),
2, IIC_iALUi, []>;
+let hasSideEffects = 1 in
def tLEApcrelJT : tPseudoInst<(outs tGPR:$Rd),
(ins i32imm:$label, nohash_imm:$id, pred:$p),
2, IIC_iALUi, []>;
@@ -1245,10 +1247,6 @@ def tInt_eh_sjlj_longjmp : XI<(outs), (ins GPR:$src, GPR:$scratch),
[(ARMeh_sjlj_longjmp GPR:$src, GPR:$scratch)]>,
Requires<[IsThumb, IsIOS]>;
-let Defs = [ R0, R1, R2, R3, R4, R5, R6, R7, R12, CPSR ],
- isBarrier = 1 in
-def tInt_eh_sjlj_dispatchsetup : PseudoInst<(outs), (ins), NoItinerary, []>;
-
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
index 8ecf009..002d64a 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
@@ -159,7 +159,7 @@ def t2addrmode_imm12 : Operand<i32>,
// t2ldrlabel := imm12
def t2ldrlabel : Operand<i32> {
let EncoderMethod = "getAddrModeImm12OpValue";
- let PrintMethod = "printT2LdrLabelOperand";
+ let PrintMethod = "printThumbLdrLabelOperand";
}
def t2ldr_pcrel_imm12_asmoperand : AsmOperandClass {let Name = "MemPCRelImm12";}
@@ -523,6 +523,23 @@ class T2MulLong<bits<3> opc22_20, bits<4> opc7_4,
let Inst{7-4} = opc7_4;
let Inst{3-0} = Rm;
}
+class T2MlaLong<bits<3> opc22_20, bits<4> opc7_4,
+ dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ bits<4> RdLo;
+ bits<4> RdHi;
+ bits<4> Rn;
+ bits<4> Rm;
+
+ let Inst{31-23} = 0b111110111;
+ let Inst{22-20} = opc22_20;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = RdLo;
+ let Inst{11-8} = RdHi;
+ let Inst{7-4} = opc7_4;
+ let Inst{3-0} = Rm;
+}
/// T2I_bin_irs - Defines a set of (op reg, {so_imm|r|so_reg}) patterns for a
@@ -757,33 +774,6 @@ multiclass T2I_bin_ii12rs<bits<3> op23_21, string opc, PatFrag opnode,
let Inst{24} = 1;
let Inst{23-21} = op23_21;
}
-
- // Predicated versions.
- def CCri : t2PseudoExpand<(outs GPRnopc:$Rd),
- (ins GPRnopc:$Rfalse, GPRnopc:$Rn, t2_so_imm:$imm,
- pred:$p, cc_out:$s), 4, IIC_iALUi, [],
- (!cast<Instruction>(NAME#ri) GPRnopc:$Rd,
- GPRnopc:$Rn, t2_so_imm:$imm, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rfalse = $Rd">;
- def CCri12 : t2PseudoExpand<(outs GPRnopc:$Rd),
- (ins GPRnopc:$Rfalse, GPR:$Rn, imm0_4095:$imm,
- pred:$p),
- 4, IIC_iALUi, [],
- (!cast<Instruction>(NAME#ri12) GPRnopc:$Rd,
- GPR:$Rn, imm0_4095:$imm, pred:$p)>,
- RegConstraint<"$Rfalse = $Rd">;
- def CCrr : t2PseudoExpand<(outs GPRnopc:$Rd),
- (ins GPRnopc:$Rfalse, GPRnopc:$Rn, rGPR:$Rm,
- pred:$p, cc_out:$s), 4, IIC_iALUr, [],
- (!cast<Instruction>(NAME#rr) GPRnopc:$Rd,
- GPRnopc:$Rn, rGPR:$Rm, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rfalse = $Rd">;
- def CCrs : t2PseudoExpand<(outs GPRnopc:$Rd),
- (ins GPRnopc:$Rfalse, GPRnopc:$Rn, t2_so_reg:$Rm,
- pred:$p, cc_out:$s), 4, IIC_iALUsi, [],
- (!cast<Instruction>(NAME#rs) GPRnopc:$Rd,
- GPRnopc:$Rn, t2_so_reg:$Rm, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rfalse = $Rd">;
}
/// T2I_adde_sube_irs - Defines a set of (op reg, {so_imm|r|so_reg}) patterns
@@ -1200,6 +1190,7 @@ def t2ADR : T2PCOneRegImm<(outs rGPR:$Rd),
let neverHasSideEffects = 1, isReMaterializable = 1 in
def t2LEApcrel : t2PseudoInst<(outs rGPR:$Rd), (ins i32imm:$label, pred:$p),
4, IIC_iALUi, []>;
+let hasSideEffects = 1 in
def t2LEApcrelJT : t2PseudoInst<(outs rGPR:$Rd),
(ins i32imm:$label, nohash_imm:$id, pred:$p),
4, IIC_iALUi,
@@ -1962,7 +1953,7 @@ def : T2Pat<(ARMadde rGPR:$src, imm0_255_not:$imm, CPSR),
def : T2Pat<(ARMadde rGPR:$src, t2_so_imm_not:$imm, CPSR),
(t2SBCri rGPR:$src, t2_so_imm_not:$imm)>;
def : T2Pat<(ARMadde rGPR:$src, imm0_65535_neg:$imm, CPSR),
- (t2SBCrr rGPR:$src, (t2MOVi16 (imm_neg_XFORM imm:$imm)))>;
+ (t2SBCrr rGPR:$src, (t2MOVi16 (imm_not_XFORM imm:$imm)))>;
// Select Bytes -- for disassembly only
@@ -2405,7 +2396,8 @@ def t2MUL: T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL32,
def t2MLA: T2FourReg<
(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32,
"mla", "\t$Rd, $Rn, $Rm, $Ra",
- [(set rGPR:$Rd, (add (mul rGPR:$Rn, rGPR:$Rm), rGPR:$Ra))]> {
+ [(set rGPR:$Rd, (add (mul rGPR:$Rn, rGPR:$Rm), rGPR:$Ra))]>,
+ Requires<[IsThumb2, UseMulOps]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b000;
@@ -2415,7 +2407,8 @@ def t2MLA: T2FourReg<
def t2MLS: T2FourReg<
(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32,
"mls", "\t$Rd, $Rn, $Rm, $Ra",
- [(set rGPR:$Rd, (sub rGPR:$Ra, (mul rGPR:$Rn, rGPR:$Rm)))]> {
+ [(set rGPR:$Rd, (sub rGPR:$Ra, (mul rGPR:$Rn, rGPR:$Rm)))]>,
+ Requires<[IsThumb2, UseMulOps]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b000;
@@ -2437,15 +2430,17 @@ def t2UMULL : T2MulLong<0b010, 0b0000,
} // isCommutable
// Multiply + accumulate
-def t2SMLAL : T2MulLong<0b100, 0b0000,
+def t2SMLAL : T2MlaLong<0b100, 0b0000,
(outs rGPR:$RdLo, rGPR:$RdHi),
- (ins rGPR:$Rn, rGPR:$Rm), IIC_iMAC64,
- "smlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>;
+ (ins rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi), IIC_iMAC64,
+ "smlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
+ RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">;
-def t2UMLAL : T2MulLong<0b110, 0b0000,
+def t2UMLAL : T2MlaLong<0b110, 0b0000,
(outs rGPR:$RdLo, rGPR:$RdHi),
- (ins rGPR:$Rn, rGPR:$Rm), IIC_iMAC64,
- "umlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>;
+ (ins rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi), IIC_iMAC64,
+ "umlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
+ RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">;
def t2UMAAL : T2MulLong<0b110, 0b0110,
(outs rGPR:$RdLo, rGPR:$RdHi),
@@ -2482,7 +2477,7 @@ def t2SMMLA : T2FourReg<
(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32,
"smmla", "\t$Rd, $Rn, $Rm, $Ra",
[(set rGPR:$Rd, (add (mulhs rGPR:$Rm, rGPR:$Rn), rGPR:$Ra))]>,
- Requires<[IsThumb2, HasThumb2DSP]> {
+ Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b101;
@@ -2503,7 +2498,7 @@ def t2SMMLS: T2FourReg<
(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32,
"smmls", "\t$Rd, $Rn, $Rm, $Ra",
[(set rGPR:$Rd, (sub rGPR:$Ra, (mulhs rGPR:$Rn, rGPR:$Rm)))]>,
- Requires<[IsThumb2, HasThumb2DSP]> {
+ Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b110;
@@ -2608,7 +2603,7 @@ multiclass T2I_smla<string opc, PatFrag opnode> {
[(set rGPR:$Rd, (add rGPR:$Ra,
(opnode (sext_inreg rGPR:$Rn, i16),
(sext_inreg rGPR:$Rm, i16))))]>,
- Requires<[IsThumb2, HasThumb2DSP]> {
+ Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b001;
@@ -2621,7 +2616,7 @@ multiclass T2I_smla<string opc, PatFrag opnode> {
!strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm, $Ra",
[(set rGPR:$Rd, (add rGPR:$Ra, (opnode (sext_inreg rGPR:$Rn, i16),
(sra rGPR:$Rm, (i32 16)))))]>,
- Requires<[IsThumb2, HasThumb2DSP]> {
+ Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b001;
@@ -2634,7 +2629,7 @@ multiclass T2I_smla<string opc, PatFrag opnode> {
!strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm, $Ra",
[(set rGPR:$Rd, (add rGPR:$Ra, (opnode (sra rGPR:$Rn, (i32 16)),
(sext_inreg rGPR:$Rm, i16))))]>,
- Requires<[IsThumb2, HasThumb2DSP]> {
+ Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b001;
@@ -2647,7 +2642,7 @@ multiclass T2I_smla<string opc, PatFrag opnode> {
!strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm, $Ra",
[(set rGPR:$Rd, (add rGPR:$Ra, (opnode (sra rGPR:$Rn, (i32 16)),
(sra rGPR:$Rm, (i32 16)))))]>,
- Requires<[IsThumb2, HasThumb2DSP]> {
+ Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b001;
@@ -2660,7 +2655,7 @@ multiclass T2I_smla<string opc, PatFrag opnode> {
!strconcat(opc, "wb"), "\t$Rd, $Rn, $Rm, $Ra",
[(set rGPR:$Rd, (add rGPR:$Ra, (sra (opnode rGPR:$Rn,
(sext_inreg rGPR:$Rm, i16)), (i32 16))))]>,
- Requires<[IsThumb2, HasThumb2DSP]> {
+ Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b011;
@@ -2673,7 +2668,7 @@ multiclass T2I_smla<string opc, PatFrag opnode> {
!strconcat(opc, "wt"), "\t$Rd, $Rn, $Rm, $Ra",
[(set rGPR:$Rd, (add rGPR:$Ra, (sra (opnode rGPR:$Rn,
(sra rGPR:$Rm, (i32 16))), (i32 16))))]>,
- Requires<[IsThumb2, HasThumb2DSP]> {
+ Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b011;
@@ -2767,7 +2762,7 @@ def t2SMLSLDX : T2FourReg_mac<1, 0b101, 0b1101, (outs rGPR:$Ra,rGPR:$Rd),
// Division Instructions.
// Signed and unsigned division on v7-M
//
-def t2SDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUi,
+def t2SDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iDIV,
"sdiv", "\t$Rd, $Rn, $Rm",
[(set rGPR:$Rd, (sdiv rGPR:$Rn, rGPR:$Rm))]>,
Requires<[HasDivide, IsThumb2]> {
@@ -2778,7 +2773,7 @@ def t2SDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUi,
let Inst{7-4} = 0b1111;
}
-def t2UDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUi,
+def t2UDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iDIV,
"udiv", "\t$Rd, $Rn, $Rm",
[(set rGPR:$Rd, (udiv rGPR:$Rn, rGPR:$Rm))]>,
Requires<[HasDivide, IsThumb2]> {
@@ -3049,37 +3044,6 @@ def t2MOVCCror : T2I_movcc_sh<0b11, (outs rGPR:$Rd),
RegConstraint<"$false = $Rd">;
} // isCodeGenOnly = 1
-multiclass T2I_bincc_irs<Instruction iri, Instruction irr, Instruction irs,
- InstrItinClass iii, InstrItinClass iir, InstrItinClass iis> {
- // shifted imm
- def ri : t2PseudoExpand<(outs rGPR:$Rd),
- (ins rGPR:$Rfalse, rGPR:$Rn, t2_so_imm:$imm,
- pred:$p, cc_out:$s),
- 4, iii, [],
- (iri rGPR:$Rd, rGPR:$Rn, t2_so_imm:$imm, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rfalse = $Rd">;
- // register
- def rr : t2PseudoExpand<(outs rGPR:$Rd),
- (ins rGPR:$Rfalse, rGPR:$Rn, rGPR:$Rm,
- pred:$p, cc_out:$s),
- 4, iir, [],
- (irr rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rfalse = $Rd">;
- // shifted register
- def rs : t2PseudoExpand<(outs rGPR:$Rd),
- (ins rGPR:$Rfalse, rGPR:$Rn, t2_so_reg:$ShiftedRm,
- pred:$p, cc_out:$s),
- 4, iis, [],
- (irs rGPR:$Rd, rGPR:$Rn, t2_so_reg:$ShiftedRm, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rfalse = $Rd">;
-} // T2I_bincc_irs
-
-defm t2ANDCC : T2I_bincc_irs<t2ANDri, t2ANDrr, t2ANDrs,
- IIC_iBITi, IIC_iBITr, IIC_iBITsi>;
-defm t2ORRCC : T2I_bincc_irs<t2ORRri, t2ORRrr, t2ORRrs,
- IIC_iBITi, IIC_iBITr, IIC_iBITsi>;
-defm t2EORCC : T2I_bincc_irs<t2EORri, t2EORrr, t2EORrs,
- IIC_iBITi, IIC_iBITr, IIC_iBITsi>;
} // neverHasSideEffects
//===----------------------------------------------------------------------===//
@@ -3281,11 +3245,11 @@ def t2B : T2I<(outs), (ins uncondbrtarget:$target), IIC_Br,
let Inst{15-14} = 0b10;
let Inst{12} = 1;
- bits<20> target;
+ bits<24> target;
let Inst{26} = target{19};
let Inst{11} = target{18};
let Inst{13} = target{17};
- let Inst{21-16} = target{16-11};
+ let Inst{25-16} = target{20-11};
let Inst{10-0} = target{10-0};
let DecoderMethod = "DecodeT2BInstruction";
}
@@ -3367,20 +3331,6 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
Requires<[IsThumb2, IsIOS]>;
}
-let isCall = 1, Defs = [LR], Uses = [SP] in {
- // mov lr, pc; b if callee is marked noreturn to avoid confusing the
- // return stack predictor.
- def t2BMOVPCB_CALL : tPseudoInst<(outs),
- (ins t_bltarget:$func),
- 6, IIC_Br, [(ARMcall_nolink tglobaladdr:$func)]>,
- Requires<[IsThumb]>;
-}
-
-// Direct calls
-def : T2Pat<(ARMcall_nolink texternalsym:$func),
- (t2BMOVPCB_CALL texternalsym:$func)>,
- Requires<[IsThumb]>;
-
// IT block
let Defs = [ITSTATE] in
def t2IT : Thumb2XI<(outs), (ins it_pred:$cc, it_mask:$mask),
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td b/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
index eb7eaa6..b5a896c 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
@@ -450,11 +450,11 @@ def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
/* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm",
[/* For disassembly only; pattern left blank */]>;
-def : ARMPat<(f32_to_f16 SPR:$a),
- (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
+def : Pat<(f32_to_f16 SPR:$a),
+ (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
-def : ARMPat<(f16_to_f32 GPR:$a),
- (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
+def : Pat<(f16_to_f32 GPR:$a),
+ (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
/* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm",
@@ -523,10 +523,12 @@ def VMOVRS : AVConv2I<0b11100001, 0b1010,
let D = VFPNeonDomain;
}
+// Bitcast i32 -> f32. NEON prefers to use VMOVDRR.
def VMOVSR : AVConv4I<0b11100000, 0b1010,
(outs SPR:$Sn), (ins GPR:$Rt),
IIC_fpMOVIS, "vmov", "\t$Sn, $Rt",
- [(set SPR:$Sn, (bitconvert GPR:$Rt))]> {
+ [(set SPR:$Sn, (bitconvert GPR:$Rt))]>,
+ Requires<[HasVFP2, UseVMOVSR]> {
// Instruction operands.
bits<5> Sn;
bits<4> Rt;
diff --git a/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp
index 3f99cce..254d8f6 100644
--- a/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp
@@ -168,7 +168,7 @@ void *ARMJITInfo::emitFunctionStub(const Function* F, void *Fn,
intptr_t LazyPtr = getIndirectSymAddr(Fn);
if (!LazyPtr) {
// In PIC mode, the function stub is loading a lazy-ptr.
- LazyPtr= (intptr_t)emitGlobalValueIndirectSym((GlobalValue*)F, Fn, JCE);
+ LazyPtr= (intptr_t)emitGlobalValueIndirectSym((const GlobalValue*)F, Fn, JCE);
DEBUG(if (F)
errs() << "JIT: Indirect symbol emitted at [" << LazyPtr
<< "] for GV '" << F->getName() << "'\n";
diff --git a/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 897ceb6..0185289 100644
--- a/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -27,7 +27,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -1448,7 +1448,7 @@ namespace {
static char ID;
ARMPreAllocLoadStoreOpt() : MachineFunctionPass(ID) {}
- const TargetData *TD;
+ const DataLayout *TD;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
const ARMSubtarget *STI;
@@ -1478,7 +1478,7 @@ namespace {
}
bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
- TD = Fn.getTarget().getTargetData();
+ TD = Fn.getTarget().getDataLayout();
TII = Fn.getTarget().getInstrInfo();
TRI = Fn.getTarget().getRegisterInfo();
STI = &Fn.getTarget().getSubtarget<ARMSubtarget>();
diff --git a/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h b/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
index f1c8fc8..c0ac04b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
@@ -108,6 +108,11 @@ class ARMFunctionInfo : public MachineFunctionInfo {
/// pass.
DenseMap<unsigned, unsigned> CPEClones;
+ /// GlobalBaseReg - keeps track of the virtual register initialized for
+ /// use as the global base register. This is used for PIC in some PIC
+ /// relocation models.
+ unsigned GlobalBaseReg;
+
public:
ARMFunctionInfo() :
isThumb(false),
@@ -119,7 +124,7 @@ public:
GPRCS1Frames(0), GPRCS2Frames(0), DPRCSFrames(0),
NumAlignedDPRCS2Regs(0),
JumpTableUId(0), PICLabelUId(0),
- VarArgsFrameIndex(0), HasITBlocks(false) {}
+ VarArgsFrameIndex(0), HasITBlocks(false), GlobalBaseReg(0) {}
explicit ARMFunctionInfo(MachineFunction &MF) :
isThumb(MF.getTarget().getSubtarget<ARMSubtarget>().isThumb()),
@@ -130,7 +135,7 @@ public:
GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0),
GPRCS1Frames(32), GPRCS2Frames(32), DPRCSFrames(32),
JumpTableUId(0), PICLabelUId(0),
- VarArgsFrameIndex(0), HasITBlocks(false) {}
+ VarArgsFrameIndex(0), HasITBlocks(false), GlobalBaseReg(0) {}
bool isThumbFunction() const { return isThumb; }
bool isThumb1OnlyFunction() const { return isThumb && !hasThumb2; }
@@ -249,6 +254,9 @@ public:
bool hasITBlocks() const { return HasITBlocks; }
void setHasITBlocks(bool h) { HasITBlocks = h; }
+ unsigned getGlobalBaseReg() const { return GlobalBaseReg; }
+ void setGlobalBaseReg(unsigned Reg) { GlobalBaseReg = Reg; }
+
void recordCPEClone(unsigned CPIdx, unsigned CPCloneIdx) {
if (!CPEClones.insert(std::make_pair(CPCloneIdx, CPIdx)).second)
assert(0 && "Duplicate entries!");
diff --git a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
index 6f974fd..b0f576b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
+++ b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
@@ -49,6 +49,9 @@ def ssub_0 : SubRegIndex;
def ssub_1 : SubRegIndex;
def ssub_2 : SubRegIndex<[dsub_1, ssub_0]>;
def ssub_3 : SubRegIndex<[dsub_1, ssub_1]>;
+
+def gsub_0 : SubRegIndex;
+def gsub_1 : SubRegIndex;
// Let TableGen synthesize the remaining 12 ssub_* indices.
// We don't need to name them.
}
@@ -247,11 +250,16 @@ def CCR : RegisterClass<"ARM", [i32], 32, (add CPSR)> {
}
// Scalar single precision floating point register class..
-def SPR : RegisterClass<"ARM", [f32], 32, (sequence "S%u", 0, 31)>;
+// FIXME: Allocation order changed to s0, s2, s4, ... as a quick hack to
+// avoid partial-write dependencies on D registers (S registers are
+// renamed as portions of D registers).
+def SPR : RegisterClass<"ARM", [f32], 32, (add (decimate
+ (sequence "S%u", 0, 31), 2),
+ (sequence "S%u", 0, 31))>;
// Subset of SPR which can be used as a source of NEON scalars for 16-bit
// operations
-def SPR_8 : RegisterClass<"ARM", [f32], 32, (trunc SPR, 16)>;
+def SPR_8 : RegisterClass<"ARM", [f32], 32, (sequence "S%u", 0, 15)>;
// Scalar double precision floating point / generic 64-bit vector register
// class.
@@ -308,6 +316,17 @@ def DPair : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
let AltOrderSelect = [{ return 1; }];
}
+// Pseudo-registers representing even-odd pairs of GPRs from R1 to R13/SP.
+// These are needed by instructions (e.g. ldrexd/strexd) requiring even-odd GPRs.
+def Tuples2R : RegisterTuples<[gsub_0, gsub_1],
+ [(add R0, R2, R4, R6, R8, R10, R12),
+ (add R1, R3, R5, R7, R9, R11, SP)]>;
+
+// Register class representing a pair of even-odd GPRs.
+def GPRPair : RegisterClass<"ARM", [untyped], 64, (add Tuples2R)> {
+ let Size = 64; // 2 x 32 bits, we have no predefined type of that size.
+}
+
// Pseudo-registers representing 3 consecutive D registers.
def Tuples3D : RegisterTuples<[dsub_0, dsub_1, dsub_2],
[(shl DPR, 0),
diff --git a/contrib/llvm/lib/Target/ARM/ARMSchedule.td b/contrib/llvm/lib/Target/ARM/ARMSchedule.td
index 81d2fa3..02196d0 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSchedule.td
+++ b/contrib/llvm/lib/Target/ARM/ARMSchedule.td
@@ -55,6 +55,7 @@ def IIC_iMUL32 : InstrItinClass;
def IIC_iMAC32 : InstrItinClass;
def IIC_iMUL64 : InstrItinClass;
def IIC_iMAC64 : InstrItinClass;
+def IIC_iDIV : InstrItinClass;
def IIC_iLoad_i : InstrItinClass;
def IIC_iLoad_r : InstrItinClass;
def IIC_iLoad_si : InstrItinClass;
@@ -261,3 +262,4 @@ def IIC_VTBX4 : InstrItinClass;
include "ARMScheduleV6.td"
include "ARMScheduleA8.td"
include "ARMScheduleA9.td"
+include "ARMScheduleSwift.td"
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td b/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
index 7bc590f..404634f 100644
--- a/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
@@ -1876,8 +1876,9 @@ def CortexA9Itineraries : ProcessorItineraries<
]>;
// ===---------------------------------------------------------------------===//
-// This following definitions describe the simple machine model which
-// will replace itineraries.
+// The following definitions describe the simpler per-operand machine model.
+// This works with MachineScheduler and will eventually replace itineraries.
+
// Cortex-A9 machine model for scheduling and other instruction cost heuristics.
def CortexA9Model : SchedMachineModel {
@@ -1891,5 +1892,595 @@ def CortexA9Model : SchedMachineModel {
let Itineraries = CortexA9Itineraries;
}
-// TODO: Add Cortex-A9 processor and scheduler resources.
+//===----------------------------------------------------------------------===//
+// Define each kind of processor resource and number available.
+
+def A9UnitALU : ProcResource<2>;
+def A9UnitMul : ProcResource<1> { let Super = A9UnitALU; }
+def A9UnitAGU : ProcResource<1>;
+def A9UnitLS : ProcResource<1>;
+def A9UnitFP : ProcResource<1> { let Buffered = 0; }
+def A9UnitB : ProcResource<1>;
+
+//===----------------------------------------------------------------------===//
+// Define scheduler read/write types with their resources and latency on A9.
+
+// Consume an issue slot, but no processor resources. This is useful when all
+// other writes associated with the operand have NumMicroOps = 0.
+def A9WriteIssue : SchedWriteRes<[]> { let Latency = 0; }
+
+// Write an integer register.
+def A9WriteI : SchedWriteRes<[A9UnitALU]>;
+// Write an integer shifted-by register
+def A9WriteIsr : SchedWriteRes<[A9UnitALU]> { let Latency = 2; }
+
+// Basic ALU.
+def A9WriteA : SchedWriteRes<[A9UnitALU]>;
+// ALU with operand shifted by immediate.
+def A9WriteAsi : SchedWriteRes<[A9UnitALU]> { let Latency = 2; }
+// ALU with operand shifted by register.
+def A9WriteAsr : SchedWriteRes<[A9UnitALU]> { let Latency = 3; }
+
+// Multiplication
+def A9WriteM : SchedWriteRes<[A9UnitMul, A9UnitMul]> { let Latency = 4; }
+def A9WriteMHi : SchedWriteRes<[A9UnitMul]> { let Latency = 5;
+ let NumMicroOps = 0; }
+def A9WriteM16 : SchedWriteRes<[A9UnitMul]> { let Latency = 3; }
+def A9WriteM16Hi : SchedWriteRes<[A9UnitMul]> { let Latency = 4;
+ let NumMicroOps = 0; }
+
+// Floating-point
+// Only one FP or AGU instruction may issue per cycle. We model this
+// by having FP instructions consume the AGU resource.
+def A9WriteF : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 4; }
+def A9WriteFMov : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 1; }
+def A9WriteFMulS : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 5; }
+def A9WriteFMulD : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 6; }
+def A9WriteFMAS : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 8; }
+def A9WriteFMAD : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 9; }
+def A9WriteFDivS : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 15; }
+def A9WriteFDivD : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 25; }
+def A9WriteFSqrtS : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 17; }
+def A9WriteFSqrtD : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 32; }
+
+// NEON has an odd mix of latencies. Simply name the write types by latency.
+def A9WriteV1 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 1; }
+def A9WriteV2 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 2; }
+def A9WriteV3 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 3; }
+def A9WriteV4 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 4; }
+def A9WriteV5 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 5; }
+def A9WriteV6 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 6; }
+def A9WriteV7 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 7; }
+def A9WriteV9 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 9; }
+def A9WriteV10 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 10; }
+
+// Reserve A9UnitFP for 2 consecutive cycles.
+def A9Write2V4 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> {
+ let Latency = 4;
+ let ResourceCycles = [2];
+}
+def A9Write2V7 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> {
+ let Latency = 7;
+ let ResourceCycles = [2];
+}
+def A9Write2V9 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> {
+ let Latency = 9;
+ let ResourceCycles = [2];
+}
+
+// Branches don't have a def operand but still consume resources.
+def A9WriteB : SchedWriteRes<[A9UnitB]>;
+
+// Address generation.
+def A9WriteAdr : SchedWriteRes<[A9UnitAGU]> { let NumMicroOps = 0; }
+
+// Load Integer.
+def A9WriteL : SchedWriteRes<[A9UnitLS]> { let Latency = 3; }
+// Load the upper 32-bits using the same micro-op.
+def A9WriteLHi : SchedWriteRes<[]> { let Latency = 3;
+ let NumMicroOps = 0; }
+// Offset shifted by register.
+def A9WriteLsi : SchedWriteRes<[A9UnitLS]> { let Latency = 4; }
+// Load (and zero extend) a byte.
+def A9WriteLb : SchedWriteRes<[A9UnitLS]> { let Latency = 4; }
+def A9WriteLbsi : SchedWriteRes<[A9UnitLS]> { let Latency = 5; }
+
+// Load or Store Float, aligned.
+def A9WriteLSfp : SchedWriteRes<[A9UnitLS, A9UnitFP]> { let Latency = 1; }
+
+// Store Integer.
+def A9WriteS : SchedWriteRes<[A9UnitLS]>;
+
+//===----------------------------------------------------------------------===//
+// Define resources dynamically for load multiple variants.
+
+// Define helpers for extra latency without consuming resources.
+def A9WriteCycle1 : SchedWriteRes<[]> { let Latency = 1; let NumMicroOps = 0; }
+foreach NumCycles = 2-8 in {
+def A9WriteCycle#NumCycles : WriteSequence<[A9WriteCycle1], NumCycles>;
+} // foreach NumCycles
+
+// Define TII for use in SchedVariant Predicates.
+def : PredicateProlog<[{
+ const ARMBaseInstrInfo *TII =
+ static_cast<const ARMBaseInstrInfo*>(SchedModel->getInstrInfo());
+ (void)TII;
+}]>;
+
+// Define address generation sequences and predicates for 8 flavors of LDMs.
+foreach NumAddr = 1-8 in {
+
+// Define A9WriteAdr1-8 as a sequence of A9WriteAdr with additive
+// latency for instructions that generate multiple loads or stores.
+def A9WriteAdr#NumAddr : WriteSequence<[A9WriteAdr], NumAddr>;
+
+// Define a predicate to select the LDM based on number of memory addresses.
+def A9LMAdr#NumAddr#Pred :
+ SchedPredicate<"TII->getNumLDMAddresses(MI) == "#NumAddr>;
+
+} // foreach NumAddr
+
+// Fall-back for unknown LDMs.
+def A9LMUnknownPred : SchedPredicate<"TII->getNumLDMAddresses(MI) == 0">;
+
+// LDM/VLDM/VLDn address generation latency & resources.
+// Dynamically select the A9WriteAdrN sequence using a predicate.
+def A9WriteLMAdr : SchedWriteVariant<[
+ SchedVar<A9LMAdr1Pred, [A9WriteAdr1]>,
+ SchedVar<A9LMAdr2Pred, [A9WriteAdr2]>,
+ SchedVar<A9LMAdr3Pred, [A9WriteAdr3]>,
+ SchedVar<A9LMAdr4Pred, [A9WriteAdr4]>,
+ SchedVar<A9LMAdr5Pred, [A9WriteAdr5]>,
+ SchedVar<A9LMAdr6Pred, [A9WriteAdr6]>,
+ SchedVar<A9LMAdr7Pred, [A9WriteAdr7]>,
+ SchedVar<A9LMAdr8Pred, [A9WriteAdr8]>,
+ // For unknown LDM/VLDM/VSTM, assume 2 32-bit registers.
+ SchedVar<A9LMUnknownPred, [A9WriteAdr2]>]>;
+
+// Define LDM Resources.
+// These take no issue resource, so they can be combined with other
+// writes like WriteB.
+// A9WriteLMLo takes a single LS resource and 2 cycles.
+def A9WriteLMLo : SchedWriteRes<[A9UnitLS]> { let Latency = 2;
+ let NumMicroOps = 0; }
+// Assuming aligned access, the upper half of each pair is free with
+// the same latency.
+def A9WriteLMHi : SchedWriteRes<[]> { let Latency = 2;
+ let NumMicroOps = 0; }
+// Each A9WriteL#N variant adds N cycles of latency without consuming
+// additional resources.
+foreach NumAddr = 1-8 in {
+def A9WriteL#NumAddr : WriteSequence<
+ [A9WriteLMLo, !cast<SchedWrite>("A9WriteCycle"#NumAddr)]>;
+def A9WriteL#NumAddr#Hi : WriteSequence<
+ [A9WriteLMHi, !cast<SchedWrite>("A9WriteCycle"#NumAddr)]>;
+}
+
+//===----------------------------------------------------------------------===//
+// LDM: Load multiple into 32-bit integer registers.
+
+// A9WriteLM variants expand into a pair of writes for each 64-bit
+// value loaded. When the number of registers is odd, the last
+// A9WriteLnHi is naturally ignored because the instruction has no
+// following def operands. These variants take no issue resource, so
+// they may need to be part of a WriteSequence that includes A9WriteIssue.
+def A9WriteLM : SchedWriteVariant<[
+ SchedVar<A9LMAdr1Pred, [A9WriteL1, A9WriteL1Hi]>,
+ SchedVar<A9LMAdr2Pred, [A9WriteL1, A9WriteL1Hi,
+ A9WriteL2, A9WriteL2Hi]>,
+ SchedVar<A9LMAdr3Pred, [A9WriteL1, A9WriteL1Hi,
+ A9WriteL2, A9WriteL2Hi,
+ A9WriteL3, A9WriteL3Hi]>,
+ SchedVar<A9LMAdr4Pred, [A9WriteL1, A9WriteL1Hi,
+ A9WriteL2, A9WriteL2Hi,
+ A9WriteL3, A9WriteL3Hi,
+ A9WriteL4, A9WriteL4Hi]>,
+ SchedVar<A9LMAdr5Pred, [A9WriteL1, A9WriteL1Hi,
+ A9WriteL2, A9WriteL2Hi,
+ A9WriteL3, A9WriteL3Hi,
+ A9WriteL4, A9WriteL4Hi,
+ A9WriteL5, A9WriteL5Hi]>,
+ SchedVar<A9LMAdr6Pred, [A9WriteL1, A9WriteL1Hi,
+ A9WriteL2, A9WriteL2Hi,
+ A9WriteL3, A9WriteL3Hi,
+ A9WriteL4, A9WriteL4Hi,
+ A9WriteL5, A9WriteL5Hi,
+ A9WriteL6, A9WriteL6Hi]>,
+ SchedVar<A9LMAdr7Pred, [A9WriteL1, A9WriteL1Hi,
+ A9WriteL2, A9WriteL2Hi,
+ A9WriteL3, A9WriteL3Hi,
+ A9WriteL4, A9WriteL4Hi,
+ A9WriteL5, A9WriteL5Hi,
+ A9WriteL6, A9WriteL6Hi,
+ A9WriteL7, A9WriteL7Hi]>,
+ SchedVar<A9LMAdr8Pred, [A9WriteL1, A9WriteL1Hi,
+ A9WriteL2, A9WriteL2Hi,
+ A9WriteL3, A9WriteL3Hi,
+ A9WriteL4, A9WriteL4Hi,
+ A9WriteL5, A9WriteL5Hi,
+ A9WriteL6, A9WriteL6Hi,
+ A9WriteL7, A9WriteL7Hi,
+ A9WriteL8, A9WriteL8Hi]>,
+ // For unknown LDMs, define the maximum number of writes, but only
+ // make the first two consume resources.
+ SchedVar<A9LMUnknownPred, [A9WriteL1, A9WriteL1Hi,
+ A9WriteL2, A9WriteL2Hi,
+ A9WriteL3Hi, A9WriteL3Hi,
+ A9WriteL4Hi, A9WriteL4Hi,
+ A9WriteL5Hi, A9WriteL5Hi,
+ A9WriteL6Hi, A9WriteL6Hi,
+ A9WriteL7Hi, A9WriteL7Hi,
+ A9WriteL8Hi, A9WriteL8Hi]>]> {
+ let Variadic = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// VFP Load/Store Multiple Variants, and NEON VLDn/VSTn support.
+
+// A9WriteLfpOp is the same as A9WriteLSfp but takes no issue resources
+// so can be used in WriteSequences for in single-issue instructions that
+// encapsulate multiple loads.
+def A9WriteLfpOp : SchedWriteRes<[A9UnitLS, A9UnitFP]> {
+ let Latency = 1;
+ let NumMicroOps = 0;
+}
+
+foreach NumAddr = 1-8 in {
+
+// Helper for A9WriteLfp1-8: A sequence of fp loads with no micro-ops.
+def A9WriteLfp#NumAddr#Seq : WriteSequence<[A9WriteLfpOp], NumAddr>;
+
+// A9WriteLfp1-8 definitions are statically expanded into a sequence of
+// A9WriteLfpOps with additive latency that takes a single issue slot.
+// Used directly to describe NEON VLDn.
+def A9WriteLfp#NumAddr : WriteSequence<
+ [A9WriteIssue, !cast<SchedWrite>("A9WriteLfp"#NumAddr#Seq)]>;
+
+// A9WriteLfp1-8Mov adds a cycle of latency and FP resource for
+// permuting loaded values.
+def A9WriteLfp#NumAddr#Mov : WriteSequence<
+ [A9WriteF, !cast<SchedWrite>("A9WriteLfp"#NumAddr#Seq)]>;
+
+} // foreach NumAddr
+
+// Define VLDM/VSTM PreRA resources.
+// A9WriteLMfpPreRA are dynamically expanded into the correct
+// A9WriteLfp1-8 sequence based on a predicate. This supports the
+// preRA VLDM variants in which all 64-bit loads are written to the
+// same tuple of either single or double precision registers.
+def A9WriteLMfpPreRA : SchedWriteVariant<[
+ SchedVar<A9LMAdr1Pred, [A9WriteLfp1]>,
+ SchedVar<A9LMAdr2Pred, [A9WriteLfp2]>,
+ SchedVar<A9LMAdr3Pred, [A9WriteLfp3]>,
+ SchedVar<A9LMAdr4Pred, [A9WriteLfp4]>,
+ SchedVar<A9LMAdr5Pred, [A9WriteLfp5]>,
+ SchedVar<A9LMAdr6Pred, [A9WriteLfp6]>,
+ SchedVar<A9LMAdr7Pred, [A9WriteLfp7]>,
+ SchedVar<A9LMAdr8Pred, [A9WriteLfp8]>,
+ // For unknown VLDM/VSTM PreRA, assume 2xS registers.
+ SchedVar<A9LMUnknownPred, [A9WriteLfp2]>]>;
+
+// Define VLDM/VSTM PostRA Resources.
+// A9WriteLMfpLo takes a LS and FP resource and one issue slot but no latency.
+def A9WriteLMfpLo : SchedWriteRes<[A9UnitLS, A9UnitFP]> { let Latency = 0; }
+
+foreach NumAddr = 1-8 in {
+
+// Each A9WriteL#N variant adds N cycles of latency without consuming
+// additional resources.
+def A9WriteLMfp#NumAddr : WriteSequence<
+ [A9WriteLMfpLo, !cast<SchedWrite>("A9WriteCycle"#NumAddr)]>;
+
+// Assuming aligned access, the upper half of each pair is free with
+// the same latency.
+def A9WriteLMfp#NumAddr#Hi : WriteSequence<
+ [A9WriteLMHi, !cast<SchedWrite>("A9WriteCycle"#NumAddr)]>;
+
+} // foreach NumAddr
+
+// VLDM PostRA Variants. These variants expand A9WriteLMfpPostRA into a
+// pair of writes for each 64-bit data loaded. When the number of
+// registers is odd, the last WriteLMfpnHi is naturally ignored because
+// the instruction has no following def operands.
+def A9WriteLMfpPostRA : SchedWriteVariant<[
+ SchedVar<A9LMAdr1Pred, [A9WriteLMfp1, A9WriteLMfp1Hi]>,
+ SchedVar<A9LMAdr2Pred, [A9WriteLMfp1, A9WriteLMfp1Hi,
+ A9WriteLMfp2, A9WriteLMfp2Hi]>,
+ SchedVar<A9LMAdr3Pred, [A9WriteLMfp1, A9WriteLMfp1Hi,
+ A9WriteLMfp2, A9WriteLMfp2Hi,
+ A9WriteLMfp3, A9WriteLMfp3Hi]>,
+ SchedVar<A9LMAdr4Pred, [A9WriteLMfp1, A9WriteLMfp1Hi,
+ A9WriteLMfp2, A9WriteLMfp2Hi,
+ A9WriteLMfp3, A9WriteLMfp3Hi,
+ A9WriteLMfp4, A9WriteLMfp4Hi]>,
+ SchedVar<A9LMAdr5Pred, [A9WriteLMfp1, A9WriteLMfp1Hi,
+ A9WriteLMfp2, A9WriteLMfp2Hi,
+ A9WriteLMfp3, A9WriteLMfp3Hi,
+ A9WriteLMfp4, A9WriteLMfp4Hi,
+ A9WriteLMfp5, A9WriteLMfp5Hi]>,
+ SchedVar<A9LMAdr6Pred, [A9WriteLMfp1, A9WriteLMfp1Hi,
+ A9WriteLMfp2, A9WriteLMfp2Hi,
+ A9WriteLMfp3, A9WriteLMfp3Hi,
+ A9WriteLMfp4, A9WriteLMfp4Hi,
+ A9WriteLMfp5, A9WriteLMfp5Hi,
+ A9WriteLMfp6, A9WriteLMfp6Hi]>,
+ SchedVar<A9LMAdr7Pred, [A9WriteLMfp1, A9WriteLMfp1Hi,
+ A9WriteLMfp2, A9WriteLMfp2Hi,
+ A9WriteLMfp3, A9WriteLMfp3Hi,
+ A9WriteLMfp4, A9WriteLMfp4Hi,
+ A9WriteLMfp5, A9WriteLMfp5Hi,
+ A9WriteLMfp6, A9WriteLMfp6Hi,
+ A9WriteLMfp7, A9WriteLMfp7Hi]>,
+ SchedVar<A9LMAdr8Pred, [A9WriteLMfp1, A9WriteLMfp1Hi,
+ A9WriteLMfp2, A9WriteLMfp2Hi,
+ A9WriteLMfp3, A9WriteLMfp3Hi,
+ A9WriteLMfp4, A9WriteLMfp4Hi,
+ A9WriteLMfp5, A9WriteLMfp5Hi,
+ A9WriteLMfp6, A9WriteLMfp6Hi,
+ A9WriteLMfp7, A9WriteLMfp7Hi,
+ A9WriteLMfp8, A9WriteLMfp8Hi]>,
+ // For unknown LDMs, define the maximum number of writes, but only
+ // make the first two consume resources.
+ SchedVar<A9LMUnknownPred, [A9WriteLMfp1, A9WriteLMfp1Hi,
+ A9WriteLMfp2, A9WriteLMfp2Hi,
+ A9WriteLMfp3Hi, A9WriteLMfp3Hi,
+ A9WriteLMfp4Hi, A9WriteLMfp4Hi,
+ A9WriteLMfp5Hi, A9WriteLMfp5Hi,
+ A9WriteLMfp6Hi, A9WriteLMfp6Hi,
+ A9WriteLMfp7Hi, A9WriteLMfp7Hi,
+ A9WriteLMfp8Hi, A9WriteLMfp8Hi]>]> {
+ let Variadic = 1;
+}
+
+// Distinguish between our multiple MI-level forms of the same
+// VLDM/VSTM instructions.
+def A9PreRA : SchedPredicate<
+ "TargetRegisterInfo::isVirtualRegister(MI->getOperand(0).getReg())">;
+def A9PostRA : SchedPredicate<
+ "TargetRegisterInfo::isPhysicalRegister(MI->getOperand(0).getReg())">;
+
+// VLDM represents all destination registers as a single register
+// tuple, unlike LDM. So the number of write operands is not variadic.
+def A9WriteLMfp : SchedWriteVariant<[
+ SchedVar<A9PreRA, [A9WriteLMfpPreRA]>,
+ SchedVar<A9PostRA, [A9WriteLMfpPostRA]>]>;
+
+//===----------------------------------------------------------------------===//
+// Resources for other (non LDM/VLDM) Variants.
+
+// These mov immediate writers are unconditionally expanded with
+// additive latency.
+def A9WriteI2 : WriteSequence<[A9WriteI, A9WriteI]>;
+def A9WriteI2pc : WriteSequence<[A9WriteI, A9WriteI, A9WriteA]>;
+def A9WriteI2ld : WriteSequence<[A9WriteI, A9WriteI, A9WriteL]>;
+
+// Some ALU operations can read loaded integer values one cycle early.
+def A9ReadA : SchedReadAdvance<1,
+ [A9WriteL, A9WriteLHi, A9WriteLsi, A9WriteLb, A9WriteLbsi,
+ A9WriteL1, A9WriteL2, A9WriteL3, A9WriteL4,
+ A9WriteL5, A9WriteL6, A9WriteL7, A9WriteL8,
+ A9WriteL1Hi, A9WriteL2Hi, A9WriteL3Hi, A9WriteL4Hi,
+ A9WriteL5Hi, A9WriteL6Hi, A9WriteL7Hi, A9WriteL8Hi]>;
+
+// Read types for operands that are unconditionally read in cycle N
+// after the instruction issues, decreases producer latency by N-1.
+def A9Read2 : SchedReadAdvance<1>;
+def A9Read3 : SchedReadAdvance<2>;
+def A9Read4 : SchedReadAdvance<3>;
+
+//===----------------------------------------------------------------------===//
+// Map itinerary classes to scheduler read/write resources per operand.
+//
+// For ARM, we piggyback scheduler resources on the Itinerary classes
+// to avoid perturbing the existing instruction definitions.
+
+// This table follows the ARM Cortex-A9 Technical Reference Manuals,
+// mostly in order.
+let SchedModel = CortexA9Model in {
+
+def :ItinRW<[A9WriteI], [IIC_iMOVi,IIC_iMOVr,IIC_iMOVsi,
+ IIC_iMVNi,IIC_iMVNsi,
+ IIC_iCMOVi,IIC_iCMOVr,IIC_iCMOVsi]>;
+def :ItinRW<[A9WriteI,A9ReadA],[IIC_iMVNr]>;
+def :ItinRW<[A9WriteIsr], [IIC_iMOVsr,IIC_iMVNsr,IIC_iCMOVsr]>;
+
+def :ItinRW<[A9WriteI2], [IIC_iMOVix2,IIC_iCMOVix2]>;
+def :ItinRW<[A9WriteI2pc], [IIC_iMOVix2addpc]>;
+def :ItinRW<[A9WriteI2ld], [IIC_iMOVix2ld]>;
+
+def :ItinRW<[A9WriteA], [IIC_iBITi,IIC_iBITr,IIC_iUNAr,IIC_iTSTi,IIC_iTSTr]>;
+def :ItinRW<[A9WriteA, A9ReadA], [IIC_iALUi, IIC_iCMPi, IIC_iCMPsi]>;
+def :ItinRW<[A9WriteA, A9ReadA, A9ReadA],[IIC_iALUr,IIC_iCMPr]>;
+def :ItinRW<[A9WriteAsi], [IIC_iBITsi,IIC_iUNAsi,IIC_iEXTr,IIC_iTSTsi]>;
+def :ItinRW<[A9WriteAsi, A9ReadA], [IIC_iALUsi]>;
+def :ItinRW<[A9WriteAsi, ReadDefault, A9ReadA], [IIC_iALUsir]>; // RSB
+def :ItinRW<[A9WriteAsr], [IIC_iBITsr,IIC_iTSTsr,IIC_iEXTAr,IIC_iEXTAsr]>;
+def :ItinRW<[A9WriteAsr, A9ReadA], [IIC_iALUsr,IIC_iCMPsr]>;
+
+// A9WriteHi ignored for MUL32.
+def :ItinRW<[A9WriteM, A9WriteMHi], [IIC_iMUL32,IIC_iMAC32,
+ IIC_iMUL64,IIC_iMAC64]>;
+// FIXME: SMLALxx needs itin classes
+def :ItinRW<[A9WriteM16, A9WriteM16Hi], [IIC_iMUL16,IIC_iMAC16]>;
+
+// TODO: For floating-point ops, we model the pipeline forwarding
+// latencies here. WAW latencies are sometimes longer.
+
+def :ItinRW<[A9WriteFMov], [IIC_fpSTAT, IIC_fpMOVIS, IIC_fpMOVID, IIC_fpMOVSI,
+ IIC_fpUNA32, IIC_fpUNA64,
+ IIC_fpCMP32, IIC_fpCMP64]>;
+def :ItinRW<[A9WriteFMov, A9WriteFMov], [IIC_fpMOVDI]>;
+def :ItinRW<[A9WriteF], [IIC_fpCVTSD, IIC_fpCVTDS, IIC_fpCVTSH, IIC_fpCVTHS,
+ IIC_fpCVTIS, IIC_fpCVTID, IIC_fpCVTSI, IIC_fpCVTDI,
+ IIC_fpALU32, IIC_fpALU64]>;
+def :ItinRW<[A9WriteFMulS], [IIC_fpMUL32]>;
+def :ItinRW<[A9WriteFMulD], [IIC_fpMUL64]>;
+def :ItinRW<[A9WriteFMAS], [IIC_fpMAC32]>;
+def :ItinRW<[A9WriteFMAD], [IIC_fpMAC64]>;
+def :ItinRW<[A9WriteFDivS], [IIC_fpDIV32]>;
+def :ItinRW<[A9WriteFDivD], [IIC_fpDIV64]>;
+def :ItinRW<[A9WriteFSqrtS], [IIC_fpSQRT32]>;
+def :ItinRW<[A9WriteFSqrtD], [IIC_fpSQRT64]>;
+
+def :ItinRW<[A9WriteB], [IIC_Br]>;
+
+// A9 PLD is processed in a dedicated unit.
+def :ItinRW<[], [IIC_Preload]>;
+
+// Note: We must assume that loads are aligned, since the machine
+// model cannot know this statically and A9 ignores alignment hints.
+
+// A9WriteAdr consumes AGU regardless address writeback. But it's
+// latency is only relevant for users of an updated address.
+def :ItinRW<[A9WriteL, A9WriteAdr], [IIC_iLoad_i,IIC_iLoad_r,
+ IIC_iLoad_iu,IIC_iLoad_ru]>;
+def :ItinRW<[A9WriteLsi, A9WriteAdr], [IIC_iLoad_si,IIC_iLoad_siu]>;
+def :ItinRW<[A9WriteLb, A9WriteAdr2], [IIC_iLoad_bh_i,IIC_iLoad_bh_r,
+ IIC_iLoad_bh_iu,IIC_iLoad_bh_ru]>;
+def :ItinRW<[A9WriteLbsi, A9WriteAdr2], [IIC_iLoad_bh_si,IIC_iLoad_bh_siu]>;
+def :ItinRW<[A9WriteL, A9WriteLHi, A9WriteAdr], [IIC_iLoad_d_i,IIC_iLoad_d_r,
+ IIC_iLoad_d_ru]>;
+// Store either has no def operands, or the one def for address writeback.
+def :ItinRW<[A9WriteAdr, A9WriteS], [IIC_iStore_i, IIC_iStore_r,
+ IIC_iStore_iu, IIC_iStore_ru,
+ IIC_iStore_d_i, IIC_iStore_d_r,
+ IIC_iStore_d_ru]>;
+def :ItinRW<[A9WriteAdr2, A9WriteS], [IIC_iStore_si, IIC_iStore_siu,
+ IIC_iStore_bh_i, IIC_iStore_bh_r,
+ IIC_iStore_bh_iu, IIC_iStore_bh_ru]>;
+def :ItinRW<[A9WriteAdr3, A9WriteS], [IIC_iStore_bh_si, IIC_iStore_bh_siu]>;
+
+// A9WriteML will be expanded into a separate write for each def
+// operand. Address generation consumes resources, but A9WriteLMAdr
+// is listed after all def operands, so has no effective latency.
+//
+// Note: A9WriteLM expands into an even number of def operands. The
+// actual number of def operands may be less by one.
+def :ItinRW<[A9WriteLM, A9WriteLMAdr, A9WriteIssue], [IIC_iLoad_m, IIC_iPop]>;
+
+// Load multiple with address writeback has an extra def operand in
+// front of the loaded registers.
+//
+// Reuse the load-multiple variants for store-multiple because the
+// resources are identical, For stores only the address writeback
+// has a def operand so the WriteL latencies are unused.
+def :ItinRW<[A9WriteLMAdr, A9WriteLM, A9WriteIssue], [IIC_iLoad_mu,
+ IIC_iStore_m,
+ IIC_iStore_mu]>;
+def :ItinRW<[A9WriteLM, A9WriteLMAdr, A9WriteB], [IIC_iLoad_mBr, IIC_iPop_Br]>;
+def :ItinRW<[A9WriteL, A9WriteAdr, A9WriteA], [IIC_iLoadiALU]>;
+
+def :ItinRW<[A9WriteLSfp, A9WriteAdr], [IIC_fpLoad32, IIC_fpLoad64]>;
+
+def :ItinRW<[A9WriteLMfp, A9WriteLMAdr], [IIC_fpLoad_m]>;
+def :ItinRW<[A9WriteLMAdr, A9WriteLMfp], [IIC_fpLoad_mu]>;
+def :ItinRW<[A9WriteAdr, A9WriteLSfp], [IIC_fpStore32, IIC_fpStore64,
+ IIC_fpStore_m, IIC_fpStore_mu]>;
+
+// Note: Unlike VLDM, VLD1 expects the writeback operand after the
+// normal writes.
+def :ItinRW<[A9WriteLfp1, A9WriteAdr1], [IIC_VLD1, IIC_VLD1u,
+ IIC_VLD1x2, IIC_VLD1x2u]>;
+def :ItinRW<[A9WriteLfp2, A9WriteAdr2], [IIC_VLD1x3, IIC_VLD1x3u,
+ IIC_VLD1x4, IIC_VLD1x4u,
+ IIC_VLD4dup, IIC_VLD4dupu]>;
+def :ItinRW<[A9WriteLfp1Mov, A9WriteAdr1], [IIC_VLD1dup, IIC_VLD1dupu,
+ IIC_VLD2, IIC_VLD2u,
+ IIC_VLD2dup, IIC_VLD2dupu]>;
+def :ItinRW<[A9WriteLfp2Mov, A9WriteAdr1], [IIC_VLD1ln, IIC_VLD1lnu,
+ IIC_VLD2x2, IIC_VLD2x2u,
+ IIC_VLD2ln, IIC_VLD2lnu]>;
+def :ItinRW<[A9WriteLfp3Mov, A9WriteAdr3], [IIC_VLD3, IIC_VLD3u,
+ IIC_VLD3dup, IIC_VLD3dupu]>;
+def :ItinRW<[A9WriteLfp4Mov, A9WriteAdr4], [IIC_VLD4, IIC_VLD4u,
+ IIC_VLD4ln, IIC_VLD4lnu]>;
+def :ItinRW<[A9WriteLfp5Mov, A9WriteAdr5], [IIC_VLD3ln, IIC_VLD3lnu]>;
+
+// Vector stores use similar resources to vector loads, so use the
+// same write types. The address write must be first for stores with
+// address writeback.
+def :ItinRW<[A9WriteAdr1, A9WriteLfp1], [IIC_VST1, IIC_VST1u,
+ IIC_VST1x2, IIC_VST1x2u,
+ IIC_VST1ln, IIC_VST1lnu,
+ IIC_VST2, IIC_VST2u,
+ IIC_VST2x2, IIC_VST2x2u,
+ IIC_VST2ln, IIC_VST2lnu]>;
+def :ItinRW<[A9WriteAdr2, A9WriteLfp2], [IIC_VST1x3, IIC_VST1x3u,
+ IIC_VST1x4, IIC_VST1x4u,
+ IIC_VST3, IIC_VST3u,
+ IIC_VST3ln, IIC_VST3lnu,
+ IIC_VST4, IIC_VST4u,
+ IIC_VST4ln, IIC_VST4lnu]>;
+
+// NEON moves.
+def :ItinRW<[A9WriteV2], [IIC_VMOVSI, IIC_VMOVDI, IIC_VMOVD, IIC_VMOVQ]>;
+def :ItinRW<[A9WriteV1], [IIC_VMOV, IIC_VMOVIS, IIC_VMOVID]>;
+def :ItinRW<[A9WriteV3], [IIC_VMOVISL, IIC_VMOVN]>;
+
+// NEON integer arithmetic
+//
+// VADD/VAND/VORR/VEOR/VBIC/VORN/VBIT/VBIF/VBSL
+def :ItinRW<[A9WriteV3, A9Read2, A9Read2], [IIC_VBINiD, IIC_VBINiQ]>;
+// VSUB/VMVN/VCLSD/VCLZD/VCNTD
+def :ItinRW<[A9WriteV3, A9Read2], [IIC_VSUBiD, IIC_VSUBiQ, IIC_VCNTiD]>;
+// VADDL/VSUBL/VNEG are mapped later under IIC_SHLi.
+// ...
+// VHADD/VRHADD/VQADD/VTST/VADH/VRADH
+def :ItinRW<[A9WriteV4, A9Read2, A9Read2], [IIC_VBINi4D, IIC_VBINi4Q]>;
+// VSBH/VRSBH/VHSUB/VQSUB/VABD/VCEQ/VCGE/VCGT/VMAX/VMIN/VPMAX/VPMIN/VABDL
+def :ItinRW<[A9WriteV4, A9Read2], [IIC_VSUBi4D, IIC_VSUBi4Q]>;
+// VQNEG/VQABS
+def :ItinRW<[A9WriteV4], [IIC_VQUNAiD, IIC_VQUNAiQ]>;
+// VABS
+def :ItinRW<[A9WriteV4, A9Read2], [IIC_VUNAiD, IIC_VUNAiQ]>;
+// VPADD/VPADDL are mapped later under IIC_SHLi.
+// ...
+// VCLSQ/VCLZQ/VCNTQ, takes two cycles.
+def :ItinRW<[A9Write2V4, A9Read3], [IIC_VCNTiQ]>;
+// VMOVimm/VMVNimm/VORRimm/VBICimm
+def :ItinRW<[A9WriteV3], [IIC_VMOVImm]>;
+def :ItinRW<[A9WriteV6, A9Read3, A9Read2], [IIC_VABAD, IIC_VABAQ]>;
+def :ItinRW<[A9WriteV6, A9Read3], [IIC_VPALiD, IIC_VPALiQ]>;
+
+// NEON integer multiply
+//
+// Note: these don't quite match the timing docs, but they do match
+// the original A9 itinerary.
+def :ItinRW<[A9WriteV6, A9Read2, A9Read2], [IIC_VMULi16D]>;
+def :ItinRW<[A9WriteV7, A9Read2, A9Read2], [IIC_VMULi16Q]>;
+def :ItinRW<[A9Write2V7, A9Read2], [IIC_VMULi32D]>;
+def :ItinRW<[A9Write2V9, A9Read2], [IIC_VMULi32Q]>;
+def :ItinRW<[A9WriteV6, A9Read3, A9Read2, A9Read2], [IIC_VMACi16D]>;
+def :ItinRW<[A9WriteV7, A9Read3, A9Read2, A9Read2], [IIC_VMACi16Q]>;
+def :ItinRW<[A9Write2V7, A9Read3, A9Read2], [IIC_VMACi32D]>;
+def :ItinRW<[A9Write2V9, A9Read3, A9Read2], [IIC_VMACi32Q]>;
+
+// NEON integer shift
+// TODO: Q,Q,Q shifts should actually reserve FP for 2 cycles.
+def :ItinRW<[A9WriteV3], [IIC_VSHLiD, IIC_VSHLiQ]>;
+def :ItinRW<[A9WriteV4], [IIC_VSHLi4D, IIC_VSHLi4Q]>;
+
+// NEON permute
+def :ItinRW<[A9WriteV2], [IIC_VPERMD, IIC_VPERMQ, IIC_VEXTD]>;
+def :ItinRW<[A9WriteV3, A9WriteV4, ReadDefault, A9Read2],
+ [IIC_VPERMQ3, IIC_VEXTQ]>;
+def :ItinRW<[A9WriteV3, A9Read2], [IIC_VTB1]>;
+def :ItinRW<[A9WriteV3, A9Read2, A9Read2], [IIC_VTB2]>;
+def :ItinRW<[A9WriteV4, A9Read2, A9Read2, A9Read3], [IIC_VTB3]>;
+def :ItinRW<[A9WriteV4, A9Read2, A9Read2, A9Read3, A9Read3], [IIC_VTB4]>;
+def :ItinRW<[A9WriteV3, ReadDefault, A9Read2], [IIC_VTBX1]>;
+def :ItinRW<[A9WriteV3, ReadDefault, A9Read2, A9Read2], [IIC_VTBX2]>;
+def :ItinRW<[A9WriteV4, ReadDefault, A9Read2, A9Read2, A9Read3], [IIC_VTBX3]>;
+def :ItinRW<[A9WriteV4, ReadDefault, A9Read2, A9Read2, A9Read3, A9Read3],
+ [IIC_VTBX4]>;
+// NEON floating-point
+def :ItinRW<[A9WriteV5, A9Read2, A9Read2], [IIC_VBIND]>;
+def :ItinRW<[A9WriteV6, A9Read2, A9Read2], [IIC_VBINQ]>;
+def :ItinRW<[A9WriteV5, A9Read2], [IIC_VUNAD, IIC_VFMULD]>;
+def :ItinRW<[A9WriteV6, A9Read2], [IIC_VUNAQ, IIC_VFMULQ]>;
+def :ItinRW<[A9WriteV9, A9Read3, A9Read2], [IIC_VMACD, IIC_VFMACD]>;
+def :ItinRW<[A9WriteV10, A9Read3, A9Read2], [IIC_VMACQ, IIC_VFMACQ]>;
+def :ItinRW<[A9WriteV9, A9Read2, A9Read2], [IIC_VRECSD]>;
+def :ItinRW<[A9WriteV10, A9Read2, A9Read2], [IIC_VRECSQ]>;
+} // SchedModel = CortexA9Model
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleSwift.td b/contrib/llvm/lib/Target/ARM/ARMScheduleSwift.td
new file mode 100644
index 0000000..e9bc3e0
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleSwift.td
@@ -0,0 +1,1085 @@
+//=- ARMScheduleSwift.td - Swift Scheduling Definitions -*- tablegen -*----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the itinerary class data for the Swift processor..
+//
+//===----------------------------------------------------------------------===//
+
+// ===---------------------------------------------------------------------===//
+// This section contains legacy support for itineraries. This is
+// required until SD and PostRA schedulers are replaced by MachineScheduler.
+
+def SW_DIS0 : FuncUnit;
+def SW_DIS1 : FuncUnit;
+def SW_DIS2 : FuncUnit;
+
+def SW_ALU0 : FuncUnit;
+def SW_ALU1 : FuncUnit;
+def SW_LS : FuncUnit;
+def SW_IDIV : FuncUnit;
+def SW_FDIV : FuncUnit;
+
+// FIXME: Need bypasses.
+// FIXME: Model the multiple stages of IIC_iMOVix2, IIC_iMOVix2addpc, and
+// IIC_iMOVix2ld better.
+// FIXME: Model the special immediate shifts that are not microcoded.
+// FIXME: Do we need to model the fact that uses of r15 in a micro-op force it
+// to issue on pipe 1?
+// FIXME: Model the pipelined behavior of CMP / TST instructions.
+// FIXME: Better model the microcode stages of multiply instructions, especially
+// conditional variants.
+// FIXME: Add preload instruction when it is documented.
+// FIXME: Model non-pipelined nature of FP div / sqrt unit.
+
+def SwiftItineraries : ProcessorItineraries<
+ [SW_DIS0, SW_DIS1, SW_DIS2, SW_ALU0, SW_ALU1, SW_LS, SW_IDIV, SW_FDIV], [], [
+ //
+ // Move instructions, unconditional
+ InstrItinData<IIC_iMOVi , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1]>,
+ InstrItinData<IIC_iMOVr , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1]>,
+ InstrItinData<IIC_iMOVsi , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1]>,
+ InstrItinData<IIC_iMOVsr , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1]>,
+ InstrItinData<IIC_iMOVix2 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [2]>,
+ InstrItinData<IIC_iMOVix2addpc,[InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [3]>,
+ InstrItinData<IIC_iMOVix2ld,[InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>,
+ InstrStage<1, [SW_LS]>],
+ [5]>,
+ //
+ // MVN instructions
+ InstrItinData<IIC_iMVNi , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1]>,
+ InstrItinData<IIC_iMVNr , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1]>,
+ InstrItinData<IIC_iMVNsi , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1]>,
+ InstrItinData<IIC_iMVNsr , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1]>,
+ //
+ // No operand cycles
+ InstrItinData<IIC_iALUx , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>]>,
+ //
+ // Binary Instructions that produce a result
+ InstrItinData<IIC_iALUi , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1, 1]>,
+ InstrItinData<IIC_iALUr , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_iALUsi, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_iALUsir,[InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_iALUsr, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [2, 1, 1, 1]>,
+ //
+ // Bitwise Instructions that produce a result
+ InstrItinData<IIC_iBITi , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1, 1]>,
+ InstrItinData<IIC_iBITr , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_iBITsi, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_iBITsr, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [2, 1, 1, 1]>,
+ //
+ // Unary Instructions that produce a result
+
+ // CLZ, RBIT, etc.
+ InstrItinData<IIC_iUNAr , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1, 1]>,
+
+ // BFC, BFI, UBFX, SBFX
+ InstrItinData<IIC_iUNAsi, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [2, 1]>,
+
+ //
+ // Zero and sign extension instructions
+ InstrItinData<IIC_iEXTr , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1, 1]>,
+ InstrItinData<IIC_iEXTAr, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_iEXTAsr,[InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1, 1, 1, 1]>,
+ //
+ // Compare instructions
+ InstrItinData<IIC_iCMPi , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1]>,
+ InstrItinData<IIC_iCMPr , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1, 1]>,
+ InstrItinData<IIC_iCMPsi , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<2, [SW_ALU0, SW_ALU1]>],
+ [1, 1]>,
+ InstrItinData<IIC_iCMPsr , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<2, [SW_ALU0, SW_ALU1]>],
+ [1, 1, 1]>,
+ //
+ // Test instructions
+ InstrItinData<IIC_iTSTi , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1]>,
+ InstrItinData<IIC_iTSTr , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1, 1]>,
+ InstrItinData<IIC_iTSTsi , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<2, [SW_ALU0, SW_ALU1]>],
+ [1, 1]>,
+ InstrItinData<IIC_iTSTsr , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<2, [SW_ALU0, SW_ALU1]>],
+ [1, 1, 1]>,
+ //
+ // Move instructions, conditional
+ // FIXME: Correctly model the extra input dep on the destination.
+ InstrItinData<IIC_iCMOVi , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1]>,
+ InstrItinData<IIC_iCMOVr , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1, 1]>,
+ InstrItinData<IIC_iCMOVsi , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1, 1]>,
+ InstrItinData<IIC_iCMOVsr , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_iCMOVix2, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [2]>,
+
+ // Integer multiply pipeline
+ //
+ InstrItinData<IIC_iMUL16 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_iMAC16 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [3, 1, 1, 1]>,
+ InstrItinData<IIC_iMUL32 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1, 1]>,
+ InstrItinData<IIC_iMAC32 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1, 1, 1]>,
+ InstrItinData<IIC_iMUL64 , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0], 1>,
+ InstrStage<1, [SW_ALU0], 3>,
+ InstrStage<1, [SW_ALU0]>],
+ [5, 5, 1, 1]>,
+ InstrItinData<IIC_iMAC64 , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0], 1>,
+ InstrStage<1, [SW_ALU0], 1>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 3>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [5, 6, 1, 1]>,
+ //
+ // Integer divide
+ InstrItinData<IIC_iDIV , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0], 0>,
+ InstrStage<14, [SW_IDIV]>],
+ [14, 1, 1]>,
+
+ // Integer load pipeline
+ // FIXME: The timings are some rough approximations
+ //
+ // Immediate offset
+ InstrItinData<IIC_iLoad_i , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [3, 1]>,
+ InstrItinData<IIC_iLoad_bh_i, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [3, 1]>,
+ InstrItinData<IIC_iLoad_d_i , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_LS], 1>,
+ InstrStage<1, [SW_LS]>],
+ [3, 4, 1]>,
+ //
+ // Register offset
+ InstrItinData<IIC_iLoad_r , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_iLoad_bh_r, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_iLoad_d_r , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_LS], 1>,
+ InstrStage<1, [SW_LS], 3>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [3, 4, 1, 1]>,
+ //
+ // Scaled register offset
+ InstrItinData<IIC_iLoad_si , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 2>,
+ InstrStage<1, [SW_LS]>],
+ [5, 1, 1]>,
+ InstrItinData<IIC_iLoad_bh_si,[InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 2>,
+ InstrStage<1, [SW_LS]>],
+ [5, 1, 1]>,
+ //
+ // Immediate offset with update
+ InstrItinData<IIC_iLoad_iu , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_iLoad_bh_iu,[InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS]>],
+ [3, 1, 1]>,
+ //
+ // Register offset with update
+ InstrItinData<IIC_iLoad_ru , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0], 1>,
+ InstrStage<1, [SW_LS]>],
+ [3, 1, 1, 1]>,
+ InstrItinData<IIC_iLoad_bh_ru,[InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0], 1>,
+ InstrStage<1, [SW_LS]>],
+ [3, 1, 1, 1]>,
+ InstrItinData<IIC_iLoad_d_ru, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 0>,
+ InstrStage<1, [SW_LS], 3>,
+ InstrStage<1, [SW_LS], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [3, 4, 1, 1]>,
+ //
+ // Scaled register offset with update
+ InstrItinData<IIC_iLoad_siu , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 2>,
+ InstrStage<1, [SW_LS], 3>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [5, 3, 1, 1]>,
+ InstrItinData<IIC_iLoad_bh_siu,[InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 2>,
+ InstrStage<1, [SW_LS], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [5, 3, 1, 1]>,
+ //
+ // Load multiple, def is the 5th operand.
+ // FIXME: This assumes 3 to 4 registers.
+ InstrItinData<IIC_iLoad_m , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 1, 1, 3], [], -1>, // dynamic uops
+
+ //
+ // Load multiple + update, defs are the 1st and 5th operands.
+ InstrItinData<IIC_iLoad_mu , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 0>,
+ InstrStage<1, [SW_LS], 3>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [2, 1, 1, 1, 3], [], -1>, // dynamic uops
+ //
+ // Load multiple plus branch
+ InstrItinData<IIC_iLoad_mBr, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 1, 1, 3], [], -1>, // dynamic uops
+ //
+ // Pop, def is the 3rd operand.
+ InstrItinData<IIC_iPop , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 3], [], -1>, // dynamic uops
+ //
+ // Pop + branch, def is the 3rd operand.
+ InstrItinData<IIC_iPop_Br, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 3], [], -1>, // dynamic uops
+
+ //
+ // iLoadi + iALUr for t2LDRpci_pic.
+ InstrItinData<IIC_iLoadiALU, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS], 3>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [4, 1]>,
+
+ // Integer store pipeline
+ ///
+ // Immediate offset
+ InstrItinData<IIC_iStore_i , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1]>,
+ InstrItinData<IIC_iStore_bh_i,[InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1]>,
+ InstrItinData<IIC_iStore_d_i, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_LS], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1]>,
+ //
+ // Register offset
+ InstrItinData<IIC_iStore_r , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_r,[InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_iStore_d_r, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_LS], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 1]>,
+ //
+ // Scaled register offset
+ InstrItinData<IIC_iStore_si , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 2>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_si,[InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 2>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 1]>,
+ //
+ // Immediate offset with update
+ InstrItinData<IIC_iStore_iu , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_iu,[InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 1]>,
+ //
+ // Register offset with update
+ InstrItinData<IIC_iStore_ru , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_ru,[InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 1, 1]>,
+ InstrItinData<IIC_iStore_d_ru, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 1, 1]>,
+ //
+ // Scaled register offset with update
+ InstrItinData<IIC_iStore_siu, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 2>,
+ InstrStage<1, [SW_LS], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>],
+ [3, 1, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_siu, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 2>,
+ InstrStage<1, [SW_LS], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>],
+ [3, 1, 1, 1]>,
+ //
+ // Store multiple
+ InstrItinData<IIC_iStore_m , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS], 1>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS], 1>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS]>],
+ [], [], -1>, // dynamic uops
+ //
+ // Store multiple + update
+ InstrItinData<IIC_iStore_mu, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS], 1>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS], 1>,
+ InstrStage<1, [SW_ALU0, SW_ALU1], 1>,
+ InstrStage<1, [SW_LS]>],
+ [2], [], -1>, // dynamic uops
+
+ //
+ // Preload
+ InstrItinData<IIC_Preload, [InstrStage<1, [SW_DIS0], 0>], [1, 1]>,
+
+ // Branch
+ //
+ // no delay slots, so the latency of a branch is unimportant
+ InstrItinData<IIC_Br , [InstrStage<1, [SW_DIS0], 0>]>,
+
+ // FP Special Register to Integer Register File Move
+ InstrItinData<IIC_fpSTAT , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [1]>,
+ //
+ // Single-precision FP Unary
+ //
+ // Most floating-point moves get issued on ALU0.
+ InstrItinData<IIC_fpUNA32 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2, 1]>,
+ //
+ // Double-precision FP Unary
+ InstrItinData<IIC_fpUNA64 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2, 1]>,
+
+ //
+ // Single-precision FP Compare
+ InstrItinData<IIC_fpCMP32 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [1, 1]>,
+ //
+ // Double-precision FP Compare
+ InstrItinData<IIC_fpCMP64 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [1, 1]>,
+ //
+ // Single to Double FP Convert
+ InstrItinData<IIC_fpCVTSD , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1]>,
+ //
+ // Double to Single FP Convert
+ InstrItinData<IIC_fpCVTDS , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1]>,
+
+ //
+ // Single to Half FP Convert
+ InstrItinData<IIC_fpCVTSH , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU1], 4>,
+ InstrStage<1, [SW_ALU1]>],
+ [6, 1]>,
+ //
+ // Half to Single FP Convert
+ InstrItinData<IIC_fpCVTHS , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1]>,
+
+ //
+ // Single-Precision FP to Integer Convert
+ InstrItinData<IIC_fpCVTSI , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1]>,
+ //
+ // Double-Precision FP to Integer Convert
+ InstrItinData<IIC_fpCVTDI , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1]>,
+ //
+ // Integer to Single-Precision FP Convert
+ InstrItinData<IIC_fpCVTIS , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1]>,
+ //
+ // Integer to Double-Precision FP Convert
+ InstrItinData<IIC_fpCVTID , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1]>,
+ //
+ // Single-precision FP ALU
+ InstrItinData<IIC_fpALU32 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2, 1, 1]>,
+ //
+ // Double-precision FP ALU
+ InstrItinData<IIC_fpALU64 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2, 1, 1]>,
+ //
+ // Single-precision FP Multiply
+ InstrItinData<IIC_fpMUL32 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1, 1]>,
+ //
+ // Double-precision FP Multiply
+ InstrItinData<IIC_fpMUL64 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [6, 1, 1]>,
+ //
+ // Single-precision FP MAC
+ InstrItinData<IIC_fpMAC32 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [8, 1, 1]>,
+ //
+ // Double-precision FP MAC
+ InstrItinData<IIC_fpMAC64 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [12, 1, 1]>,
+ //
+ // Single-precision Fused FP MAC
+ InstrItinData<IIC_fpFMAC32, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [8, 1, 1]>,
+ //
+ // Double-precision Fused FP MAC
+ InstrItinData<IIC_fpFMAC64, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [12, 1, 1]>,
+ //
+ // Single-precision FP DIV
+ InstrItinData<IIC_fpDIV32 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1], 0>,
+ InstrStage<15, [SW_FDIV]>],
+ [17, 1, 1]>,
+ //
+ // Double-precision FP DIV
+ InstrItinData<IIC_fpDIV64 , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1], 0>,
+ InstrStage<30, [SW_FDIV]>],
+ [32, 1, 1]>,
+ //
+ // Single-precision FP SQRT
+ InstrItinData<IIC_fpSQRT32, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1], 0>,
+ InstrStage<15, [SW_FDIV]>],
+ [17, 1]>,
+ //
+ // Double-precision FP SQRT
+ InstrItinData<IIC_fpSQRT64, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1], 0>,
+ InstrStage<30, [SW_FDIV]>],
+ [32, 1, 1]>,
+
+ //
+ // Integer to Single-precision Move
+ InstrItinData<IIC_fpMOVIS, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_LS], 4>,
+ InstrStage<1, [SW_ALU0]>],
+ [6, 1]>,
+ //
+ // Integer to Double-precision Move
+ InstrItinData<IIC_fpMOVID, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [4, 1]>,
+ //
+ // Single-precision to Integer Move
+ InstrItinData<IIC_fpMOVSI, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [3, 1]>,
+ //
+ // Double-precision to Integer Move
+ InstrItinData<IIC_fpMOVDI, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_LS], 3>,
+ InstrStage<1, [SW_LS]>],
+ [3, 4, 1]>,
+ //
+ // Single-precision FP Load
+ InstrItinData<IIC_fpLoad32, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [4, 1]>,
+ //
+ // Double-precision FP Load
+ InstrItinData<IIC_fpLoad64, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [4, 1]>,
+ //
+ // FP Load Multiple
+ // FIXME: Assumes a single Q register.
+ InstrItinData<IIC_fpLoad_m, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 1, 4], [], -1>, // dynamic uops
+ //
+ // FP Load Multiple + update
+ // FIXME: Assumes a single Q register.
+ InstrItinData<IIC_fpLoad_mu,[InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_LS], 4>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [2, 1, 1, 1, 4], [], -1>, // dynamic uops
+ //
+ // Single-precision FP Store
+ InstrItinData<IIC_fpStore32,[InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1]>,
+ //
+ // Double-precision FP Store
+ InstrItinData<IIC_fpStore64,[InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1]>,
+ //
+ // FP Store Multiple
+ // FIXME: Assumes a single Q register.
+ InstrItinData<IIC_fpStore_m,[InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [1, 1, 1], [], -1>, // dynamic uops
+ //
+ // FP Store Multiple + update
+ // FIXME: Assumes a single Q register.
+ InstrItinData<IIC_fpStore_mu,[InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_LS], 4>,
+ InstrStage<1, [SW_ALU0, SW_ALU1]>],
+ [2, 1, 1, 1], [], -1>, // dynamic uops
+ // NEON
+ //
+ // Double-register Integer Unary
+ InstrItinData<IIC_VUNAiD, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1]>,
+ //
+ // Quad-register Integer Unary
+ InstrItinData<IIC_VUNAiQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1]>,
+ //
+ // Double-register Integer Q-Unary
+ InstrItinData<IIC_VQUNAiD, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1]>,
+ //
+ // Quad-register Integer CountQ-Unary
+ InstrItinData<IIC_VQUNAiQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1]>,
+ //
+ // Double-register Integer Binary
+ InstrItinData<IIC_VBINiD, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2, 1, 1]>,
+ //
+ // Quad-register Integer Binary
+ InstrItinData<IIC_VBINiQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2, 1, 1]>,
+ //
+ // Double-register Integer Subtract
+ InstrItinData<IIC_VSUBiD, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2, 1, 1]>,
+ //
+ // Quad-register Integer Subtract
+ InstrItinData<IIC_VSUBiQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2, 1, 1]>,
+ //
+ // Double-register Integer Shift
+ InstrItinData<IIC_VSHLiD, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2, 1, 1]>,
+ //
+ // Quad-register Integer Shift
+ InstrItinData<IIC_VSHLiQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2, 1, 1]>,
+ //
+ // Double-register Integer Shift (4 cycle)
+ InstrItinData<IIC_VSHLi4D, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1, 1]>,
+ //
+ // Quad-register Integer Shift (4 cycle)
+ InstrItinData<IIC_VSHLi4Q, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1, 1]>,
+ //
+ // Double-register Integer Binary (4 cycle)
+ InstrItinData<IIC_VBINi4D, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1, 1]>,
+ //
+ // Quad-register Integer Binary (4 cycle)
+ InstrItinData<IIC_VBINi4Q, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1, 1]>,
+ //
+ // Double-register Integer Subtract (4 cycle)
+ InstrItinData<IIC_VSUBi4D, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1, 1]>,
+ //
+ // Quad-register Integer Subtract (4 cycle)
+ InstrItinData<IIC_VSUBi4Q, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1, 1]>,
+
+ //
+ // Double-register Integer Count
+ InstrItinData<IIC_VCNTiD, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2, 1, 1]>,
+ //
+ // Quad-register Integer Count
+ InstrItinData<IIC_VCNTiQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2, 1, 1]>,
+ //
+ // Double-register Absolute Difference and Accumulate
+ InstrItinData<IIC_VABAD, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1, 1, 1]>,
+ //
+ // Quad-register Absolute Difference and Accumulate
+ InstrItinData<IIC_VABAQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1, 1, 1]>,
+ //
+ // Double-register Integer Pair Add Long
+ InstrItinData<IIC_VPALiD, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1, 1]>,
+ //
+ // Quad-register Integer Pair Add Long
+ InstrItinData<IIC_VPALiQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1, 1]>,
+
+ //
+ // Double-register Integer Multiply (.8, .16)
+ InstrItinData<IIC_VMULi16D, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1, 1]>,
+ //
+ // Quad-register Integer Multiply (.8, .16)
+ InstrItinData<IIC_VMULi16Q, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1, 1]>,
+
+ //
+ // Double-register Integer Multiply (.32)
+ InstrItinData<IIC_VMULi32D, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1, 1]>,
+ //
+ // Quad-register Integer Multiply (.32)
+ InstrItinData<IIC_VMULi32Q, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1, 1]>,
+ //
+ // Double-register Integer Multiply-Accumulate (.8, .16)
+ InstrItinData<IIC_VMACi16D, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1, 1, 1]>,
+ //
+ // Double-register Integer Multiply-Accumulate (.32)
+ InstrItinData<IIC_VMACi32D, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1, 1, 1]>,
+ //
+ // Quad-register Integer Multiply-Accumulate (.8, .16)
+ InstrItinData<IIC_VMACi16Q, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1, 1, 1]>,
+ //
+ // Quad-register Integer Multiply-Accumulate (.32)
+ InstrItinData<IIC_VMACi32Q, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1, 1, 1]>,
+
+ //
+ // Move
+ InstrItinData<IIC_VMOV, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2, 1]>,
+ //
+ // Move Immediate
+ InstrItinData<IIC_VMOVImm, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2]>,
+ //
+ // Double-register Permute Move
+ InstrItinData<IIC_VMOVD, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [2, 1]>,
+ //
+ // Quad-register Permute Move
+ InstrItinData<IIC_VMOVQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [2, 1]>,
+ //
+ // Integer to Single-precision Move
+ InstrItinData<IIC_VMOVIS , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_LS], 4>,
+ InstrStage<1, [SW_ALU0]>],
+ [6, 1]>,
+ //
+ // Integer to Double-precision Move
+ InstrItinData<IIC_VMOVID , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [4, 1, 1]>,
+ //
+ // Single-precision to Integer Move
+ InstrItinData<IIC_VMOVSI , [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_LS]>],
+ [3, 1]>,
+ //
+ // Double-precision to Integer Move
+ InstrItinData<IIC_VMOVDI , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_LS], 3>,
+ InstrStage<1, [SW_LS]>],
+ [3, 4, 1]>,
+ //
+ // Integer to Lane Move
+ // FIXME: I think this is correct, but it is not clear from the tuning guide.
+ InstrItinData<IIC_VMOVISL , [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_LS], 4>,
+ InstrStage<1, [SW_ALU0]>],
+ [6, 1]>,
+
+ //
+ // Vector narrow move
+ InstrItinData<IIC_VMOVN, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [2, 1]>,
+ //
+ // Double-register FP Unary
+ // FIXME: VRECPE / VRSQRTE has a longer latency than VABS, which is used here,
+ // and they issue on a different pipeline.
+ InstrItinData<IIC_VUNAD, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2, 1]>,
+ //
+ // Quad-register FP Unary
+ // FIXME: VRECPE / VRSQRTE has a longer latency than VABS, which is used here,
+ // and they issue on a different pipeline.
+ InstrItinData<IIC_VUNAQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [2, 1]>,
+ //
+ // Double-register FP Binary
+ // FIXME: We're using this itin for many instructions.
+ InstrItinData<IIC_VBIND, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1, 1]>,
+
+ //
+ // VPADD, etc.
+ InstrItinData<IIC_VPBIND, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1, 1]>,
+ //
+ // Double-register FP VMUL
+ InstrItinData<IIC_VFMULD, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1, 1]>,
+ //
+ // Quad-register FP Binary
+ InstrItinData<IIC_VBINQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU0]>],
+ [4, 1, 1]>,
+ //
+ // Quad-register FP VMUL
+ InstrItinData<IIC_VFMULQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1, 1]>,
+ //
+ // Double-register FP Multiple-Accumulate
+ InstrItinData<IIC_VMACD, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [8, 1, 1]>,
+ //
+ // Quad-register FP Multiple-Accumulate
+ InstrItinData<IIC_VMACQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [8, 1, 1]>,
+ //
+ // Double-register Fused FP Multiple-Accumulate
+ InstrItinData<IIC_VFMACD, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [8, 1, 1]>,
+ //
+ // Quad-register FusedF P Multiple-Accumulate
+ InstrItinData<IIC_VFMACQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [8, 1, 1]>,
+ //
+ // Double-register Reciprical Step
+ InstrItinData<IIC_VRECSD, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [8, 1, 1]>,
+ //
+ // Quad-register Reciprical Step
+ InstrItinData<IIC_VRECSQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [8, 1, 1]>,
+ //
+ // Double-register Permute
+ // FIXME: The latencies are unclear from the documentation.
+ InstrItinData<IIC_VPERMD, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1]>],
+ [3, 4, 3, 4]>,
+ //
+ // Quad-register Permute
+ // FIXME: The latencies are unclear from the documentation.
+ InstrItinData<IIC_VPERMQ, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1]>],
+ [3, 4, 3, 4]>,
+ //
+ // Quad-register Permute (3 cycle issue on A9)
+ InstrItinData<IIC_VPERMQ3, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1]>],
+ [3, 4, 3, 4]>,
+
+ //
+ // Double-register VEXT
+ InstrItinData<IIC_VEXTD, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [2, 1, 1]>,
+ //
+ // Quad-register VEXT
+ InstrItinData<IIC_VEXTQ, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [2, 1, 1]>,
+ //
+ // VTB
+ InstrItinData<IIC_VTB1, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_VTB2, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1, 3, 3]>,
+ InstrItinData<IIC_VTB3, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1]>],
+ [6, 1, 3, 5, 5]>,
+ InstrItinData<IIC_VTB4, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1]>],
+ [8, 1, 3, 5, 7, 7]>,
+ //
+ // VTBX
+ InstrItinData<IIC_VTBX1, [InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_VTBX2, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1]>],
+ [4, 1, 3, 3]>,
+ InstrItinData<IIC_VTBX3, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1]>],
+ [6, 1, 3, 5, 5]>,
+ InstrItinData<IIC_VTBX4, [InstrStage<1, [SW_DIS0], 0>,
+ InstrStage<1, [SW_DIS1], 0>,
+ InstrStage<1, [SW_DIS2], 0>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1], 2>,
+ InstrStage<1, [SW_ALU1]>],
+ [8, 1, 3, 5, 7, 7]>
+]>;
+
+// ===---------------------------------------------------------------------===//
+// This following definitions describe the simple machine model which
+// will replace itineraries.
+
+// Swift machine model for scheduling and other instruction cost heuristics.
+def SwiftModel : SchedMachineModel {
+ let IssueWidth = 3; // 3 micro-ops are dispatched per cycle.
+ let MinLatency = 0; // Data dependencies are allowed within dispatch groups.
+ let LoadLatency = 3;
+
+ let Itineraries = SwiftItineraries;
+}
+
+// TODO: Add Swift processor and scheduler resources.
diff --git a/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
index 31d5d38..b33b3c9 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
@@ -155,7 +155,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
TargetLowering::ArgListEntry Entry;
// First argument: data pointer
- Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*DAG.getContext());
+ Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*DAG.getContext());
Entry.Node = Dst;
Entry.Ty = IntPtrTy;
Args.push_back(Entry);
diff --git a/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp b/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp
index 4762854..bcc9db4 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp
@@ -13,8 +13,9 @@
#include "ARMSubtarget.h"
#include "ARMBaseRegisterInfo.h"
+#include "ARMBaseInstrInfo.h"
#include "llvm/GlobalValue.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Support/CommandLine.h"
#define GET_SUBTARGETINFO_TARGET_DESC
@@ -31,6 +32,10 @@ static cl::opt<bool>
DarwinUseMOVT("arm-darwin-use-movt", cl::init(true), cl::Hidden);
static cl::opt<bool>
+UseFusedMulOps("arm-use-mulops",
+ cl::init(true), cl::Hidden);
+
+static cl::opt<bool>
StrictAlign("arm-strict-align", cl::Hidden,
cl::desc("Disallow all unaligned memory accesses"));
@@ -49,6 +54,7 @@ ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU,
, HasVFPv4(false)
, HasNEON(false)
, UseNEONForSinglePrecisionFP(false)
+ , UseMulOps(UseFusedMulOps)
, SlowFPVMLx(false)
, HasVMLxForwarding(false)
, SlowFPBrcc(false)
@@ -63,6 +69,7 @@ ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU,
, HasFP16(false)
, HasD16(false)
, HasHardwareDivide(false)
+ , HasHardwareDivideInARM(false)
, HasT2ExtractPack(false)
, HasDataBarrier(false)
, Pref32BitThumb(false)
diff --git a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
index b394061..8e6b650 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -30,7 +30,7 @@ class StringRef;
class ARMSubtarget : public ARMGenSubtargetInfo {
protected:
enum ARMProcFamilyEnum {
- Others, CortexA8, CortexA9
+ Others, CortexA8, CortexA9, CortexA15, Swift
};
/// ARMProcFamily - ARM processor family: Cortex-A8, Cortex-A9, and others.
@@ -57,6 +57,10 @@ protected:
/// determine if NEON should actually be used.
bool UseNEONForSinglePrecisionFP;
+ /// UseMulOps - True if non-microcoded fused integer multiply-add and
+ /// multiply-subtract instructions should be used.
+ bool UseMulOps;
+
/// SlowFPVMLx - If the VFP2 / NEON instructions are available, indicates
/// whether the FP VML[AS] instructions are slow (if so, don't use them).
bool SlowFPVMLx;
@@ -107,6 +111,9 @@ protected:
/// HasHardwareDivide - True if subtarget supports [su]div
bool HasHardwareDivide;
+ /// HasHardwareDivideInARM - True if subtarget supports [su]div in ARM mode
+ bool HasHardwareDivideInARM;
+
/// HasT2ExtractPack - True if subtarget supports thumb2 extract/pack
/// instructions.
bool HasT2ExtractPack;
@@ -199,7 +206,10 @@ protected:
bool isCortexA8() const { return ARMProcFamily == CortexA8; }
bool isCortexA9() const { return ARMProcFamily == CortexA9; }
+ bool isCortexA15() const { return ARMProcFamily == CortexA15; }
+ bool isSwift() const { return ARMProcFamily == Swift; }
bool isCortexM3() const { return CPUString == "cortex-m3"; }
+ bool isLikeA9() const { return isCortexA9() || isCortexA15(); }
bool hasARMOps() const { return !NoARM; }
@@ -211,8 +221,10 @@ protected:
return hasNEON() && UseNEONForSinglePrecisionFP; }
bool hasDivide() const { return HasHardwareDivide; }
+ bool hasDivideInARMMode() const { return HasHardwareDivideInARM; }
bool hasT2ExtractPack() const { return HasT2ExtractPack; }
bool hasDataBarrier() const { return HasDataBarrier; }
+ bool useMulOps() const { return UseMulOps; }
bool useFPVMLx() const { return !SlowFPVMLx; }
bool hasVMLxForwarding() const { return HasVMLxForwarding; }
bool isFPBrccSlow() const { return SlowFPBrcc; }
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
index 171c9ad..b486d4f 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
@@ -60,7 +60,7 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT,
CodeGenOpt::Level OL)
: ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
InstrInfo(Subtarget),
- DataLayout(Subtarget.isAPCS_ABI() ?
+ DL(Subtarget.isAPCS_ABI() ?
std::string("e-p:32:32-f64:32:64-i64:32:64-"
"v128:32:128-v64:32:64-n32-S32") :
Subtarget.isAAPCS_ABI() ?
@@ -68,10 +68,10 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT,
"v128:64:128-v64:64:64-n32-S64") :
std::string("e-p:32:32-f64:64:64-i64:64:64-"
"v128:64:128-v64:64:64-n32-S32")),
- ELFWriterInfo(*this),
TLInfo(*this),
TSInfo(*this),
- FrameLowering(Subtarget) {
+ FrameLowering(Subtarget),
+ STTI(&TLInfo), VTTI(&TLInfo) {
if (!Subtarget.hasARMOps())
report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not "
"support ARM mode execution!");
@@ -88,7 +88,7 @@ ThumbTargetMachine::ThumbTargetMachine(const Target &T, StringRef TT,
InstrInfo(Subtarget.hasThumb2()
? ((ARMBaseInstrInfo*)new Thumb2InstrInfo(Subtarget))
: ((ARMBaseInstrInfo*)new Thumb1InstrInfo(Subtarget))),
- DataLayout(Subtarget.isAPCS_ABI() ?
+ DL(Subtarget.isAPCS_ABI() ?
std::string("e-p:32:32-f64:32:64-i64:32:64-"
"i16:16:32-i8:8:32-i1:8:32-"
"v128:32:128-v64:32:64-a:0:32-n32-S32") :
@@ -99,12 +99,12 @@ ThumbTargetMachine::ThumbTargetMachine(const Target &T, StringRef TT,
std::string("e-p:32:32-f64:64:64-i64:64:64-"
"i16:16:32-i8:8:32-i1:8:32-"
"v128:64:128-v64:64:64-a:0:32-n32-S32")),
- ELFWriterInfo(*this),
TLInfo(*this),
TSInfo(*this),
FrameLowering(Subtarget.hasThumb2()
? new ARMFrameLowering(Subtarget)
- : (ARMFrameLowering*)new Thumb1FrameLowering(Subtarget)) {
+ : (ARMFrameLowering*)new Thumb1FrameLowering(Subtarget)),
+ STTI(&TLInfo), VTTI(&TLInfo) {
}
namespace {
@@ -143,6 +143,11 @@ bool ARMPassConfig::addPreISel() {
bool ARMPassConfig::addInstSelector() {
addPass(createARMISelDag(getARMTargetMachine(), getOptLevel()));
+
+ const ARMSubtarget *Subtarget = &getARMSubtarget();
+ if (Subtarget->isTargetELF() && !Subtarget->isThumb1Only() &&
+ TM->Options.EnableFastISel)
+ addPass(createARMGlobalBaseRegPass());
return false;
}
@@ -150,7 +155,7 @@ bool ARMPassConfig::addPreRegAlloc() {
// FIXME: temporarily disabling load / store optimization pass for Thumb1.
if (getOptLevel() != CodeGenOpt::None && !getARMSubtarget().isThumb1Only())
addPass(createARMLoadStoreOptimizationPass(true));
- if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9())
+ if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isLikeA9())
addPass(createMLxExpansionPass());
return true;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h
index abcdb24..ebdd5b4 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h
@@ -15,7 +15,6 @@
#define ARMTARGETMACHINE_H
#include "ARMInstrInfo.h"
-#include "ARMELFWriterInfo.h"
#include "ARMFrameLowering.h"
#include "ARMJITInfo.h"
#include "ARMSubtarget.h"
@@ -25,7 +24,8 @@
#include "Thumb1FrameLowering.h"
#include "Thumb2InstrInfo.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetTransformImpl.h"
+#include "llvm/DataLayout.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/ADT/OwningPtr.h"
@@ -62,11 +62,12 @@ public:
class ARMTargetMachine : public ARMBaseTargetMachine {
virtual void anchor();
ARMInstrInfo InstrInfo;
- const TargetData DataLayout; // Calculates type size & alignment
- ARMELFWriterInfo ELFWriterInfo;
+ const DataLayout DL; // Calculates type size & alignment
ARMTargetLowering TLInfo;
ARMSelectionDAGInfo TSInfo;
ARMFrameLowering FrameLowering;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
ARMTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
@@ -88,12 +89,14 @@ class ARMTargetMachine : public ARMBaseTargetMachine {
virtual const ARMFrameLowering *getFrameLowering() const {
return &FrameLowering;
}
-
- virtual const ARMInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetData *getTargetData() const { return &DataLayout; }
- virtual const ARMELFWriterInfo *getELFWriterInfo() const {
- return Subtarget.isTargetELF() ? &ELFWriterInfo : 0;
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
}
+ virtual const ARMInstrInfo *getInstrInfo() const { return &InstrInfo; }
+ virtual const DataLayout *getDataLayout() const { return &DL; }
};
/// ThumbTargetMachine - Thumb target machine.
@@ -104,12 +107,13 @@ class ThumbTargetMachine : public ARMBaseTargetMachine {
virtual void anchor();
// Either Thumb1InstrInfo or Thumb2InstrInfo.
OwningPtr<ARMBaseInstrInfo> InstrInfo;
- const TargetData DataLayout; // Calculates type size & alignment
- ARMELFWriterInfo ELFWriterInfo;
+ const DataLayout DL; // Calculates type size & alignment
ARMTargetLowering TLInfo;
ARMSelectionDAGInfo TSInfo;
// Either Thumb1FrameLowering or ARMFrameLowering.
OwningPtr<ARMFrameLowering> FrameLowering;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
ThumbTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
@@ -138,10 +142,13 @@ public:
virtual const ARMFrameLowering *getFrameLowering() const {
return FrameLowering.get();
}
- virtual const TargetData *getTargetData() const { return &DataLayout; }
- virtual const ARMELFWriterInfo *getELFWriterInfo() const {
- return Subtarget.isTargetELF() ? &ELFWriterInfo : 0;
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
}
+ virtual const DataLayout *getDataLayout() const { return &DL; }
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 3a5957b..c61e3bd 100644
--- a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -181,49 +181,44 @@ class ARMAsmParser : public MCTargetAsmParser {
OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
// Asm Match Converter Methods
- bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
+ void cvtT2LdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
+ void cvtT2StrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
+ void cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
+ void cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
+ void cvtLdWriteBackRegAddrMode2(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
+ void cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
+ void cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
+ void cvtStWriteBackRegAddrMode2(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
+ void cvtStWriteBackRegAddrMode3(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
+ void cvtLdExtTWriteBackImm(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
+ void cvtLdExtTWriteBackReg(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
+ void cvtStExtTWriteBackImm(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
+ void cvtStExtTWriteBackReg(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
+ void cvtLdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
+ void cvtStrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
+ void cvtLdWriteBackRegAddrMode3(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
+ void cvtThumbMultiply(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
+ void cvtVLDwbFixed(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
+ void cvtVLDwbRegister(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
+ void cvtVSTwbFixed(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
+ void cvtVSTwbRegister(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
-
bool validateInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
bool processInstruction(MCInst &Inst,
@@ -258,15 +253,17 @@ public:
// Implementation of the MCTargetAsmParser interface:
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
- bool ParseInstruction(StringRef Name, SMLoc NameLoc,
+ bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands);
bool ParseDirective(AsmToken DirectiveID);
unsigned checkTargetMatchPredicate(MCInst &Inst);
- bool MatchAndEmitInstruction(SMLoc IDLoc,
+ bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out);
+ MCStreamer &Out, unsigned &ErrorInfo,
+ bool MatchingInlineAsm);
};
} // end anonymous namespace
@@ -486,7 +483,8 @@ public:
SMLoc getStartLoc() const { return StartLoc; }
/// getEndLoc - Get the location of the last token of this operand.
SMLoc getEndLoc() const { return EndLoc; }
-
+ /// getLocRange - Get the range between the first and last token of this
+ /// operand.
SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
ARMCC::CondCodes getCondCode() const {
@@ -862,7 +860,7 @@ public:
bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
bool isToken() const { return Kind == k_Token; }
bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
- bool isMemory() const { return Kind == k_Memory; }
+ bool isMem() const { return Kind == k_Memory; }
bool isShifterImm() const { return Kind == k_ShifterImmediate; }
bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
@@ -873,14 +871,14 @@ public:
return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
}
bool isMemNoOffset(bool alignOK = false) const {
- if (!isMemory())
+ if (!isMem())
return false;
// No offset of any kind.
return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
(alignOK || Memory.Alignment == 0);
}
bool isMemPCRelImm12() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Base register must be PC.
if (Memory.BaseRegNum != ARM::PC)
@@ -894,7 +892,7 @@ public:
return isMemNoOffset(true);
}
bool isAddrMode2() const {
- if (!isMemory() || Memory.Alignment != 0) return false;
+ if (!isMem() || Memory.Alignment != 0) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return true;
// Immediate offset in range [-4095, 4095].
@@ -916,7 +914,7 @@ public:
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
- if (!isMemory() || Memory.Alignment != 0) return false;
+ if (!isMem() || Memory.Alignment != 0) return false;
// No shifts are legal for AM3.
if (Memory.ShiftType != ARM_AM::no_shift) return false;
// Check for register offset.
@@ -946,7 +944,7 @@ public:
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
- if (!isMemory() || Memory.Alignment != 0) return false;
+ if (!isMem() || Memory.Alignment != 0) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return false;
// Immediate offset in range [-1020, 1020] and a multiple of 4.
@@ -956,25 +954,25 @@ public:
Val == INT32_MIN;
}
bool isMemTBB() const {
- if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
+ if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
return false;
return true;
}
bool isMemTBH() const {
- if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
+ if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
Memory.Alignment != 0 )
return false;
return true;
}
bool isMemRegOffset() const {
- if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
+ if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
return false;
return true;
}
bool isT2MemRegOffset() const {
- if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
+ if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.Alignment != 0)
return false;
// Only lsl #{0, 1, 2, 3} allowed.
@@ -987,14 +985,14 @@ public:
bool isMemThumbRR() const {
// Thumb reg+reg addressing is simple. Just two registers, a base and
// an offset. No shifts, negations or any other complicating factors.
- if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
+ if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
return false;
return isARMLowRegister(Memory.BaseRegNum) &&
(!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
}
bool isMemThumbRIs4() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 ||
+ if (!isMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
return false;
// Immediate offset, multiple of 4 in range [0, 124].
@@ -1003,7 +1001,7 @@ public:
return Val >= 0 && Val <= 124 && (Val % 4) == 0;
}
bool isMemThumbRIs2() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 ||
+ if (!isMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
return false;
// Immediate offset, multiple of 4 in range [0, 62].
@@ -1012,7 +1010,7 @@ public:
return Val >= 0 && Val <= 62 && (Val % 2) == 0;
}
bool isMemThumbRIs1() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 ||
+ if (!isMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 31].
@@ -1021,7 +1019,7 @@ public:
return Val >= 0 && Val <= 31;
}
bool isMemThumbSPI() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 ||
+ if (!isMem() || Memory.OffsetRegNum != 0 ||
Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
return false;
// Immediate offset, multiple of 4 in range [0, 1020].
@@ -1035,7 +1033,7 @@ public:
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset a multiple of 4 in range [-1020, 1020].
if (!Memory.OffsetImm) return true;
@@ -1044,7 +1042,7 @@ public:
return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
}
bool isMemImm0_1020s4Offset() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset a multiple of 4 in range [0, 1020].
if (!Memory.OffsetImm) return true;
@@ -1052,7 +1050,7 @@ public:
return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
}
bool isMemImm8Offset() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Base reg of PC isn't allowed for these encodings.
if (Memory.BaseRegNum == ARM::PC) return false;
@@ -1062,7 +1060,7 @@ public:
return (Val == INT32_MIN) || (Val > -256 && Val < 256);
}
bool isMemPosImm8Offset() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 255].
if (!Memory.OffsetImm) return true;
@@ -1070,7 +1068,7 @@ public:
return Val >= 0 && Val < 256;
}
bool isMemNegImm8Offset() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Base reg of PC isn't allowed for these encodings.
if (Memory.BaseRegNum == ARM::PC) return false;
@@ -1080,7 +1078,7 @@ public:
return (Val == INT32_MIN) || (Val > -256 && Val < 0);
}
bool isMemUImm12Offset() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 4095].
if (!Memory.OffsetImm) return true;
@@ -1094,7 +1092,7 @@ public:
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [-4095, 4095].
if (!Memory.OffsetImm) return true;
@@ -3376,7 +3374,8 @@ ARMAsmParser::OperandMatchResultTy ARMAsmParser::
parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
- assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
+ if (!Tok.is(AsmToken::Identifier))
+ return MatchOperand_NoMatch;
StringRef Mask = Tok.getString();
if (isMClass()) {
@@ -3880,8 +3879,8 @@ parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
/// cvtT2LdrdPre - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtT2LdrdPre(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Rt, Rt2
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
@@ -3892,14 +3891,13 @@ cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtT2StrdPre - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtT2StrdPre(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateReg(0));
@@ -3910,14 +3908,13 @@ cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
@@ -3926,28 +3923,26 @@ cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtLdWriteBackRegAddrMode2(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
@@ -3956,14 +3951,13 @@ cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
@@ -3972,57 +3966,53 @@ cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtStWriteBackRegAddrMode2(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtStWriteBackRegAddrMode3(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtLdExtTWriteBackImm(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Rt
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
@@ -4034,14 +4024,13 @@ cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtLdExtTWriteBackReg(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Rt
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
@@ -4053,14 +4042,13 @@ cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtStExtTWriteBackImm(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
@@ -4072,14 +4060,13 @@ cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtStExtTWriteBackReg(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
@@ -4091,14 +4078,13 @@ cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtLdrdPre - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtLdrdPre(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtLdrdPre(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Rt, Rt2
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
@@ -4109,14 +4095,13 @@ cvtLdrdPre(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtStrdPre - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtStrdPre(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtStrdPre(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
@@ -4127,40 +4112,27 @@ cvtStrdPre(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtLdWriteBackRegAddrMode3(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
-/// cvtThumbMultiple- Convert parsed operands to MCInst.
+/// cvtThumbMultiply - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtThumbMultiply(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // The second source operand must be the same register as the destination
- // operand.
- if (Operands.size() == 6 &&
- (((ARMOperand*)Operands[3])->getReg() !=
- ((ARMOperand*)Operands[5])->getReg()) &&
- (((ARMOperand*)Operands[3])->getReg() !=
- ((ARMOperand*)Operands[4])->getReg())) {
- Error(Operands[3]->getStartLoc(),
- "destination register must match source register");
- return false;
- }
((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
// If we have a three-operand form, make sure to set Rn to be the operand
@@ -4173,12 +4145,10 @@ cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
Inst.addOperand(Inst.getOperand(0));
((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
-
- return true;
}
-bool ARMAsmParser::
-cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtVLDwbFixed(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Vd
((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
@@ -4188,11 +4158,10 @@ cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
-bool ARMAsmParser::
-cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtVLDwbRegister(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Vd
((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
@@ -4204,11 +4173,10 @@ cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
-bool ARMAsmParser::
-cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtVSTwbFixed(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
@@ -4218,11 +4186,10 @@ cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
-bool ARMAsmParser::
-cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtVSTwbRegister(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
@@ -4234,7 +4201,6 @@ cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// Parse an ARM memory expression, return false if successful else return true
@@ -4471,6 +4437,12 @@ bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
return Error(Loc, "immediate shift value out of range");
+ // If <ShiftTy> #0, turn it into a no_shift.
+ if (Imm == 0)
+ St = ARM_AM::lsl;
+ // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
+ if (Imm == 32)
+ Imm = 0;
Amount = Imm;
}
@@ -4648,7 +4620,7 @@ bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
return true;
const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
- getContext());
+ getContext());
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
return false;
@@ -4983,7 +4955,8 @@ static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
/// Parse an arm instruction mnemonic followed by its operands.
-bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
+bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Apply mnemonic aliases before doing anything else, as the destination
// mnemnonic may include suffices and we want to handle them normally.
@@ -5377,6 +5350,25 @@ validateInstruction(MCInst &Inst,
"in register list");
break;
}
+ case ARM::tMUL: {
+ // The second source operand must be the same register as the destination
+ // operand.
+ //
+ // In this case, we must directly check the parsed operands because the
+ // cvtThumbMultiply() function is written in such a way that it guarantees
+ // this first statement is always true for the new Inst. Essentially, the
+ // destination is unconditionally copied into the second source operand
+ // without checking to see if it matches what we actually parsed.
+ if (Operands.size() == 6 &&
+ (((ARMOperand*)Operands[3])->getReg() !=
+ ((ARMOperand*)Operands[5])->getReg()) &&
+ (((ARMOperand*)Operands[3])->getReg() !=
+ ((ARMOperand*)Operands[4])->getReg())) {
+ return Error(Operands[3]->getStartLoc(),
+ "destination register must match source register");
+ }
+ break;
+ }
// Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
// so only issue a diagnostic for thumb1. The instructions will be
// switched to the t2 encodings in processInstruction() if necessary.
@@ -5678,6 +5670,20 @@ bool ARMAsmParser::
processInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
switch (Inst.getOpcode()) {
+ // Alias for alternate form of 'ADR Rd, #imm' instruction.
+ case ARM::ADDri: {
+ if (Inst.getOperand(1).getReg() != ARM::PC ||
+ Inst.getOperand(5).getReg() != 0)
+ return false;
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::ADR);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(2));
+ TmpInst.addOperand(Inst.getOperand(3));
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
// Aliases for alternate PC+imm syntax of LDR instructions.
case ARM::t2LDRpcrel:
Inst.setOpcode(ARM::t2LDRpci);
@@ -7471,13 +7477,14 @@ unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
static const char *getSubtargetFeatureName(unsigned Val);
bool ARMAsmParser::
-MatchAndEmitInstruction(SMLoc IDLoc,
+MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out) {
+ MCStreamer &Out, unsigned &ErrorInfo,
+ bool MatchingInlineAsm) {
MCInst Inst;
- unsigned ErrorInfo;
unsigned MatchResult;
- MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
+ MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
+ MatchingInlineAsm);
switch (MatchResult) {
default: break;
case Match_Success:
@@ -7540,9 +7547,6 @@ MatchAndEmitInstruction(SMLoc IDLoc,
case Match_MnemonicFail:
return Error(IDLoc, "invalid instruction",
((ARMOperand*)Operands[0])->getLocRange());
- case Match_ConversionFail:
- // The converter function will have already emitted a diagnostic.
- return true;
case Match_RequiresNotITBlock:
return Error(IDLoc, "flag setting instruction only valid outside IT block");
case Match_RequiresITBlock:
diff --git a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index c90751d..f00142d 100644
--- a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -525,8 +525,9 @@ static bool tryAddingSymbolicOperand(uint64_t Address, int32_t Value,
else
ReferenceType = LLVMDisassembler_ReferenceType_InOut_None;
const char *ReferenceName;
- const char *Name = SymbolLookUp(DisInfo, Value, &ReferenceType, Address,
- &ReferenceName);
+ uint64_t SymbolValue = 0x00000000ffffffffULL & Value;
+ const char *Name = SymbolLookUp(DisInfo, SymbolValue, &ReferenceType,
+ Address, &ReferenceName);
if (Name) {
SymbolicOp.AddSymbol.Name = Name;
SymbolicOp.AddSymbol.Present = true;
@@ -1523,6 +1524,8 @@ DecodeAddrMode2IdxInstruction(MCInst &Inst, unsigned Insn,
return MCDisassembler::Fail;
}
unsigned amt = fieldFromInstruction(Insn, 7, 5);
+ if (Opc == ARM_AM::ror && amt == 0)
+ Opc = ARM_AM::rrx;
unsigned imm = ARM_AM::getAM2Opc(Op, amt, Opc, idx_mode);
Inst.addOperand(MCOperand::CreateImm(imm));
@@ -1564,6 +1567,9 @@ static DecodeStatus DecodeSORegMemOperand(MCInst &Inst, unsigned Val,
break;
}
+ if (ShOp == ARM_AM::ror && imm == 0)
+ ShOp = ARM_AM::rrx;
+
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
if (!Check(S, DecodeGPRRegisterClass(Inst, Rm, Address, Decoder)))
@@ -2089,16 +2095,28 @@ static DecodeStatus DecodeAddrMode7Operand(MCInst &Inst, unsigned Val,
static DecodeStatus
DecodeT2BInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
- DecodeStatus S = MCDisassembler::Success;
- unsigned imm = (fieldFromInstruction(Insn, 0, 11) << 0) |
- (fieldFromInstruction(Insn, 11, 1) << 18) |
- (fieldFromInstruction(Insn, 13, 1) << 17) |
- (fieldFromInstruction(Insn, 16, 6) << 11) |
- (fieldFromInstruction(Insn, 26, 1) << 19);
- if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<20>(imm<<1) + 4,
+ DecodeStatus Status = MCDisassembler::Success;
+
+ // Note the J1 and J2 values are from the encoded instruction. So here
+ // change them to I1 and I2 values via as documented:
+ // I1 = NOT(J1 EOR S);
+ // I2 = NOT(J2 EOR S);
+ // and build the imm32 with one trailing zero as documented:
+ // imm32 = SignExtend(S:I1:I2:imm10:imm11:'0', 32);
+ unsigned S = fieldFromInstruction(Insn, 26, 1);
+ unsigned J1 = fieldFromInstruction(Insn, 13, 1);
+ unsigned J2 = fieldFromInstruction(Insn, 11, 1);
+ unsigned I1 = !(J1 ^ S);
+ unsigned I2 = !(J2 ^ S);
+ unsigned imm10 = fieldFromInstruction(Insn, 16, 10);
+ unsigned imm11 = fieldFromInstruction(Insn, 0, 11);
+ unsigned tmp = (S << 23) | (I1 << 22) | (I2 << 21) | (imm10 << 11) | imm11;
+ int imm32 = SignExtend32<24>(tmp << 1);
+ if (!tryAddingSymbolicOperand(Address, Address + imm32 + 4,
true, 4, Inst, Decoder))
- Inst.addOperand(MCOperand::CreateImm(SignExtend32<20>(imm << 1)));
- return S;
+ Inst.addOperand(MCOperand::CreateImm(imm32));
+
+ return Status;
}
static DecodeStatus
@@ -2701,6 +2719,8 @@ static DecodeStatus DecodeVLD1DupInstruction(MCInst &Inst, unsigned Insn,
unsigned align = fieldFromInstruction(Insn, 4, 1);
unsigned size = fieldFromInstruction(Insn, 6, 2);
+ if (size == 0 && align == 1)
+ return MCDisassembler::Fail;
align *= (1 << size);
switch (Inst.getOpcode()) {
@@ -2831,6 +2851,8 @@ static DecodeStatus DecodeVLD4DupInstruction(MCInst &Inst, unsigned Insn,
unsigned align = fieldFromInstruction(Insn, 4, 1);
if (size == 0x3) {
+ if (align == 0)
+ return MCDisassembler::Fail;
size = 4;
align = 16;
} else {
@@ -3170,7 +3192,7 @@ static DecodeStatus DecodeT2Imm8S4(MCInst &Inst, unsigned Val,
int imm = Val & 0xFF;
if (!(Val & 0x100)) imm *= -1;
- Inst.addOperand(MCOperand::CreateImm(imm << 2));
+ Inst.addOperand(MCOperand::CreateImm(imm * 4));
}
return MCDisassembler::Success;
@@ -3710,8 +3732,16 @@ static DecodeStatus DecodeVLD1LN(MCInst &Inst, unsigned Insn,
if (fieldFromInstruction(Insn, 6, 1))
return MCDisassembler::Fail; // UNDEFINED
index = fieldFromInstruction(Insn, 7, 1);
- if (fieldFromInstruction(Insn, 4, 2) != 0)
- align = 4;
+
+ switch (fieldFromInstruction(Insn, 4, 2)) {
+ case 0 :
+ align = 0; break;
+ case 3:
+ align = 4; break;
+ default:
+ return MCDisassembler::Fail;
+ }
+ break;
}
if (!Check(S, DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)))
@@ -3769,8 +3799,16 @@ static DecodeStatus DecodeVST1LN(MCInst &Inst, unsigned Insn,
if (fieldFromInstruction(Insn, 6, 1))
return MCDisassembler::Fail; // UNDEFINED
index = fieldFromInstruction(Insn, 7, 1);
- if (fieldFromInstruction(Insn, 4, 2) != 0)
- align = 4;
+
+ switch (fieldFromInstruction(Insn, 4, 2)) {
+ case 0:
+ align = 0; break;
+ case 3:
+ align = 4; break;
+ default:
+ return MCDisassembler::Fail;
+ }
+ break;
}
if (Rm != 0xF) { // Writeback
@@ -4090,8 +4128,15 @@ static DecodeStatus DecodeVLD4LN(MCInst &Inst, unsigned Insn,
inc = 2;
break;
case 2:
- if (fieldFromInstruction(Insn, 4, 2))
- align = 4 << fieldFromInstruction(Insn, 4, 2);
+ switch (fieldFromInstruction(Insn, 4, 2)) {
+ case 0:
+ align = 0; break;
+ case 3:
+ return MCDisassembler::Fail;
+ default:
+ align = 4 << fieldFromInstruction(Insn, 4, 2); break;
+ }
+
index = fieldFromInstruction(Insn, 7, 1);
if (fieldFromInstruction(Insn, 6, 1))
inc = 2;
@@ -4164,8 +4209,15 @@ static DecodeStatus DecodeVST4LN(MCInst &Inst, unsigned Insn,
inc = 2;
break;
case 2:
- if (fieldFromInstruction(Insn, 4, 2))
- align = 4 << fieldFromInstruction(Insn, 4, 2);
+ switch (fieldFromInstruction(Insn, 4, 2)) {
+ case 0:
+ align = 0; break;
+ case 3:
+ return MCDisassembler::Fail;
+ default:
+ align = 4 << fieldFromInstruction(Insn, 4, 2); break;
+ }
+
index = fieldFromInstruction(Insn, 7, 1);
if (fieldFromInstruction(Insn, 6, 1))
inc = 2;
diff --git a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
index 8b9109e..dcc41d9 100644
--- a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
@@ -29,11 +29,33 @@ using namespace llvm;
///
/// getSORegOffset returns an integer from 0-31, representing '32' as 0.
static unsigned translateShiftImm(unsigned imm) {
+ // lsr #32 and asr #32 exist, but should be encoded as a 0.
+ assert((imm & ~0x1f) == 0 && "Invalid shift encoding");
+
if (imm == 0)
return 32;
return imm;
}
+/// Prints the shift value with an immediate value.
+static void printRegImmShift(raw_ostream &O, ARM_AM::ShiftOpc ShOpc,
+ unsigned ShImm, bool UseMarkup) {
+ if (ShOpc == ARM_AM::no_shift || (ShOpc == ARM_AM::lsl && !ShImm))
+ return;
+ O << ", ";
+
+ assert (!(ShOpc == ARM_AM::ror && !ShImm) && "Cannot have ror #0");
+ O << getShiftOpcStr(ShOpc);
+
+ if (ShOpc != ARM_AM::rrx) {
+ O << " ";
+ if (UseMarkup)
+ O << "<imm:";
+ O << "#" << translateShiftImm(ShImm);
+ if (UseMarkup)
+ O << ">";
+ }
+}
ARMInstPrinter::ARMInstPrinter(const MCAsmInfo &MAI,
const MCInstrInfo &MII,
@@ -45,7 +67,9 @@ ARMInstPrinter::ARMInstPrinter(const MCAsmInfo &MAI,
}
void ARMInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
- OS << getRegisterName(RegNo);
+ OS << markup("<reg:")
+ << getRegisterName(RegNo)
+ << markup(">");
}
void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
@@ -85,10 +109,13 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
printSBitModifierOperand(MI, 6, O);
printPredicateOperand(MI, 4, O);
- O << '\t' << getRegisterName(Dst.getReg())
- << ", " << getRegisterName(MO1.getReg());
+ O << '\t';
+ printRegName(O, Dst.getReg());
+ O << ", ";
+ printRegName(O, MO1.getReg());
- O << ", " << getRegisterName(MO2.getReg());
+ O << ", ";
+ printRegName(O, MO2.getReg());
assert(ARM_AM::getSORegOffset(MO3.getImm()) == 0);
printAnnotation(O, Annot);
return;
@@ -104,15 +131,20 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
printSBitModifierOperand(MI, 5, O);
printPredicateOperand(MI, 3, O);
- O << '\t' << getRegisterName(Dst.getReg())
- << ", " << getRegisterName(MO1.getReg());
+ O << '\t';
+ printRegName(O, Dst.getReg());
+ O << ", ";
+ printRegName(O, MO1.getReg());
if (ARM_AM::getSORegShOp(MO2.getImm()) == ARM_AM::rrx) {
printAnnotation(O, Annot);
return;
}
- O << ", #" << translateShiftImm(ARM_AM::getSORegOffset(MO2.getImm()));
+ O << ", "
+ << markup("<imm:")
+ << "#" << translateShiftImm(ARM_AM::getSORegOffset(MO2.getImm()))
+ << markup(">");
printAnnotation(O, Annot);
return;
}
@@ -136,7 +168,9 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
MI->getOperand(3).getImm() == -4) {
O << '\t' << "push";
printPredicateOperand(MI, 4, O);
- O << "\t{" << getRegisterName(MI->getOperand(1).getReg()) << "}";
+ O << "\t{";
+ printRegName(O, MI->getOperand(1).getReg());
+ O << "}";
printAnnotation(O, Annot);
return;
}
@@ -159,7 +193,9 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
MI->getOperand(4).getImm() == 4) {
O << '\t' << "pop";
printPredicateOperand(MI, 5, O);
- O << "\t{" << getRegisterName(MI->getOperand(0).getReg()) << "}";
+ O << "\t{";
+ printRegName(O, MI->getOperand(0).getReg());
+ O << "}";
printAnnotation(O, Annot);
return;
}
@@ -198,7 +234,8 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
O << "\tldm";
printPredicateOperand(MI, 1, O);
- O << '\t' << getRegisterName(BaseReg);
+ O << '\t';
+ printRegName(O, BaseReg);
if (Writeback) O << "!";
O << ", ";
printRegisterList(MI, 3, O);
@@ -224,9 +261,11 @@ void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isReg()) {
unsigned Reg = Op.getReg();
- O << getRegisterName(Reg);
+ printRegName(O, Reg);
} else if (Op.isImm()) {
- O << '#' << Op.getImm();
+ O << markup("<imm:")
+ << '#' << Op.getImm()
+ << markup(">");
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
// If a symbolic branch target was added as a constant expression then print
@@ -244,13 +283,16 @@ void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
}
}
-void ARMInstPrinter::printT2LdrLabelOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
+void ARMInstPrinter::printThumbLdrLabelOperand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
const MCOperand &MO1 = MI->getOperand(OpNum);
if (MO1.isExpr())
O << *MO1.getExpr();
- else if (MO1.isImm())
- O << "[pc, #" << MO1.getImm() << "]";
+ else if (MO1.isImm()) {
+ O << markup("<mem:") << "[pc, "
+ << markup("<imm:") << "#" << MO1.getImm()
+ << markup(">]>", "]");
+ }
else
llvm_unreachable("Unknown LDR label operand?");
}
@@ -266,7 +308,7 @@ void ARMInstPrinter::printSORegRegOperand(const MCInst *MI, unsigned OpNum,
const MCOperand &MO2 = MI->getOperand(OpNum+1);
const MCOperand &MO3 = MI->getOperand(OpNum+2);
- O << getRegisterName(MO1.getReg());
+ printRegName(O, MO1.getReg());
// Print the shift opc.
ARM_AM::ShiftOpc ShOpc = ARM_AM::getSORegShOp(MO3.getImm());
@@ -274,7 +316,8 @@ void ARMInstPrinter::printSORegRegOperand(const MCInst *MI, unsigned OpNum,
if (ShOpc == ARM_AM::rrx)
return;
- O << ' ' << getRegisterName(MO2.getReg());
+ O << ' ';
+ printRegName(O, MO2.getReg());
assert(ARM_AM::getSORegOffset(MO3.getImm()) == 0);
}
@@ -283,14 +326,11 @@ void ARMInstPrinter::printSORegImmOperand(const MCInst *MI, unsigned OpNum,
const MCOperand &MO1 = MI->getOperand(OpNum);
const MCOperand &MO2 = MI->getOperand(OpNum+1);
- O << getRegisterName(MO1.getReg());
+ printRegName(O, MO1.getReg());
// Print the shift opc.
- ARM_AM::ShiftOpc ShOpc = ARM_AM::getSORegShOp(MO2.getImm());
- O << ", " << ARM_AM::getShiftOpcStr(ShOpc);
- if (ShOpc == ARM_AM::rrx)
- return;
- O << " #" << translateShiftImm(ARM_AM::getSORegOffset(MO2.getImm()));
+ printRegImmShift(O, ARM_AM::getSORegShOp(MO2.getImm()),
+ ARM_AM::getSORegOffset(MO2.getImm()), UseMarkup);
}
@@ -304,67 +344,51 @@ void ARMInstPrinter::printAM2PreOrOffsetIndexOp(const MCInst *MI, unsigned Op,
const MCOperand &MO2 = MI->getOperand(Op+1);
const MCOperand &MO3 = MI->getOperand(Op+2);
- O << "[" << getRegisterName(MO1.getReg());
+ O << markup("<mem:") << "[";
+ printRegName(O, MO1.getReg());
if (!MO2.getReg()) {
- if (ARM_AM::getAM2Offset(MO3.getImm())) // Don't print +0.
- O << ", #"
+ if (ARM_AM::getAM2Offset(MO3.getImm())) { // Don't print +0.
+ O << ", "
+ << markup("<imm:")
+ << "#"
<< ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO3.getImm()))
- << ARM_AM::getAM2Offset(MO3.getImm());
- O << "]";
- return;
- }
-
- O << ", "
- << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO3.getImm()))
- << getRegisterName(MO2.getReg());
-
- if (unsigned ShImm = ARM_AM::getAM2Offset(MO3.getImm()))
- O << ", "
- << ARM_AM::getShiftOpcStr(ARM_AM::getAM2ShiftOpc(MO3.getImm()))
- << " #" << ShImm;
- O << "]";
-}
-
-void ARMInstPrinter::printAM2PostIndexOp(const MCInst *MI, unsigned Op,
- raw_ostream &O) {
- const MCOperand &MO1 = MI->getOperand(Op);
- const MCOperand &MO2 = MI->getOperand(Op+1);
- const MCOperand &MO3 = MI->getOperand(Op+2);
-
- O << "[" << getRegisterName(MO1.getReg()) << "], ";
-
- if (!MO2.getReg()) {
- unsigned ImmOffs = ARM_AM::getAM2Offset(MO3.getImm());
- O << '#'
- << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO3.getImm()))
- << ImmOffs;
+ << ARM_AM::getAM2Offset(MO3.getImm())
+ << markup(">");
+ }
+ O << "]" << markup(">");
return;
}
- O << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO3.getImm()))
- << getRegisterName(MO2.getReg());
+ O << ", ";
+ O << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO3.getImm()));
+ printRegName(O, MO2.getReg());
- if (unsigned ShImm = ARM_AM::getAM2Offset(MO3.getImm()))
- O << ", "
- << ARM_AM::getShiftOpcStr(ARM_AM::getAM2ShiftOpc(MO3.getImm()))
- << " #" << ShImm;
+ printRegImmShift(O, ARM_AM::getAM2ShiftOpc(MO3.getImm()),
+ ARM_AM::getAM2Offset(MO3.getImm()), UseMarkup);
+ O << "]" << markup(">");
}
void ARMInstPrinter::printAddrModeTBB(const MCInst *MI, unsigned Op,
raw_ostream &O) {
const MCOperand &MO1 = MI->getOperand(Op);
const MCOperand &MO2 = MI->getOperand(Op+1);
- O << "[" << getRegisterName(MO1.getReg()) << ", "
- << getRegisterName(MO2.getReg()) << "]";
+ O << markup("<mem:") << "[";
+ printRegName(O, MO1.getReg());
+ O << ", ";
+ printRegName(O, MO2.getReg());
+ O << "]" << markup(">");
}
void ARMInstPrinter::printAddrModeTBH(const MCInst *MI, unsigned Op,
raw_ostream &O) {
const MCOperand &MO1 = MI->getOperand(Op);
const MCOperand &MO2 = MI->getOperand(Op+1);
- O << "[" << getRegisterName(MO1.getReg()) << ", "
- << getRegisterName(MO2.getReg()) << ", lsl #1]";
+ O << markup("<mem:") << "[";
+ printRegName(O, MO1.getReg());
+ O << ", ";
+ printRegName(O, MO2.getReg());
+ O << ", lsl " << markup("<imm:") << "#1" << markup(">") << "]" << markup(">");
}
void ARMInstPrinter::printAddrMode2Operand(const MCInst *MI, unsigned Op,
@@ -376,13 +400,13 @@ void ARMInstPrinter::printAddrMode2Operand(const MCInst *MI, unsigned Op,
return;
}
+#ifndef NDEBUG
const MCOperand &MO3 = MI->getOperand(Op+2);
unsigned IdxMode = ARM_AM::getAM2IdxMode(MO3.getImm());
+ assert(IdxMode != ARMII::IndexModePost &&
+ "Should be pre or offset index op");
+#endif
- if (IdxMode == ARMII::IndexModePost) {
- printAM2PostIndexOp(MI, Op, O);
- return;
- }
printAM2PreOrOffsetIndexOp(MI, Op, O);
}
@@ -394,19 +418,18 @@ void ARMInstPrinter::printAddrMode2OffsetOperand(const MCInst *MI,
if (!MO1.getReg()) {
unsigned ImmOffs = ARM_AM::getAM2Offset(MO2.getImm());
- O << '#'
- << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO2.getImm()))
- << ImmOffs;
+ O << markup("<imm:")
+ << '#' << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO2.getImm()))
+ << ImmOffs
+ << markup(">");
return;
}
- O << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO2.getImm()))
- << getRegisterName(MO1.getReg());
+ O << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO2.getImm()));
+ printRegName(O, MO1.getReg());
- if (unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm()))
- O << ", "
- << ARM_AM::getShiftOpcStr(ARM_AM::getAM2ShiftOpc(MO2.getImm()))
- << " #" << ShImm;
+ printRegImmShift(O, ARM_AM::getAM2ShiftOpc(MO2.getImm()),
+ ARM_AM::getAM2Offset(MO2.getImm()), UseMarkup);
}
//===--------------------------------------------------------------------===//
@@ -419,18 +442,22 @@ void ARMInstPrinter::printAM3PostIndexOp(const MCInst *MI, unsigned Op,
const MCOperand &MO2 = MI->getOperand(Op+1);
const MCOperand &MO3 = MI->getOperand(Op+2);
- O << "[" << getRegisterName(MO1.getReg()) << "], ";
+ O << markup("<mem:") << "[";
+ printRegName(O, MO1.getReg());
+ O << "], " << markup(">");
if (MO2.getReg()) {
- O << (char)ARM_AM::getAM3Op(MO3.getImm())
- << getRegisterName(MO2.getReg());
+ O << (char)ARM_AM::getAM3Op(MO3.getImm());
+ printRegName(O, MO2.getReg());
return;
}
unsigned ImmOffs = ARM_AM::getAM3Offset(MO3.getImm());
- O << '#'
+ O << markup("<imm:")
+ << '#'
<< ARM_AM::getAddrOpcStr(ARM_AM::getAM3Op(MO3.getImm()))
- << ImmOffs;
+ << ImmOffs
+ << markup(">");
}
void ARMInstPrinter::printAM3PreOrOffsetIndexOp(const MCInst *MI, unsigned Op,
@@ -439,23 +466,29 @@ void ARMInstPrinter::printAM3PreOrOffsetIndexOp(const MCInst *MI, unsigned Op,
const MCOperand &MO2 = MI->getOperand(Op+1);
const MCOperand &MO3 = MI->getOperand(Op+2);
- O << '[' << getRegisterName(MO1.getReg());
+ O << markup("<mem:") << '[';
+ printRegName(O, MO1.getReg());
if (MO2.getReg()) {
- O << ", " << getAddrOpcStr(ARM_AM::getAM3Op(MO3.getImm()))
- << getRegisterName(MO2.getReg()) << ']';
+ O << ", " << getAddrOpcStr(ARM_AM::getAM3Op(MO3.getImm()));
+ printRegName(O, MO2.getReg());
+ O << ']' << markup(">");
return;
}
- //If the op is sub we have to print the immediate even if it is 0
+ //If the op is sub we have to print the immediate even if it is 0
unsigned ImmOffs = ARM_AM::getAM3Offset(MO3.getImm());
ARM_AM::AddrOpc op = ARM_AM::getAM3Op(MO3.getImm());
-
- if (ImmOffs || (op == ARM_AM::sub))
- O << ", #"
+
+ if (ImmOffs || (op == ARM_AM::sub)) {
+ O << ", "
+ << markup("<imm:")
+ << "#"
<< ARM_AM::getAddrOpcStr(op)
- << ImmOffs;
- O << ']';
+ << ImmOffs
+ << markup(">");
+ }
+ O << ']' << markup(">");
}
void ARMInstPrinter::printAddrMode3Operand(const MCInst *MI, unsigned Op,
@@ -483,15 +516,15 @@ void ARMInstPrinter::printAddrMode3OffsetOperand(const MCInst *MI,
const MCOperand &MO2 = MI->getOperand(OpNum+1);
if (MO1.getReg()) {
- O << getAddrOpcStr(ARM_AM::getAM3Op(MO2.getImm()))
- << getRegisterName(MO1.getReg());
+ O << getAddrOpcStr(ARM_AM::getAM3Op(MO2.getImm()));
+ printRegName(O, MO1.getReg());
return;
}
unsigned ImmOffs = ARM_AM::getAM3Offset(MO2.getImm());
- O << '#'
- << ARM_AM::getAddrOpcStr(ARM_AM::getAM3Op(MO2.getImm()))
- << ImmOffs;
+ O << markup("<imm:")
+ << '#' << ARM_AM::getAddrOpcStr(ARM_AM::getAM3Op(MO2.getImm())) << ImmOffs
+ << markup(">");
}
void ARMInstPrinter::printPostIdxImm8Operand(const MCInst *MI,
@@ -499,7 +532,9 @@ void ARMInstPrinter::printPostIdxImm8Operand(const MCInst *MI,
raw_ostream &O) {
const MCOperand &MO = MI->getOperand(OpNum);
unsigned Imm = MO.getImm();
- O << '#' << ((Imm & 256) ? "" : "-") << (Imm & 0xff);
+ O << markup("<imm:")
+ << '#' << ((Imm & 256) ? "" : "-") << (Imm & 0xff)
+ << markup(">");
}
void ARMInstPrinter::printPostIdxRegOperand(const MCInst *MI, unsigned OpNum,
@@ -507,7 +542,8 @@ void ARMInstPrinter::printPostIdxRegOperand(const MCInst *MI, unsigned OpNum,
const MCOperand &MO1 = MI->getOperand(OpNum);
const MCOperand &MO2 = MI->getOperand(OpNum+1);
- O << (MO2.getImm() ? "" : "-") << getRegisterName(MO1.getReg());
+ O << (MO2.getImm() ? "" : "-");
+ printRegName(O, MO1.getReg());
}
void ARMInstPrinter::printPostIdxImm8s4Operand(const MCInst *MI,
@@ -515,7 +551,9 @@ void ARMInstPrinter::printPostIdxImm8s4Operand(const MCInst *MI,
raw_ostream &O) {
const MCOperand &MO = MI->getOperand(OpNum);
unsigned Imm = MO.getImm();
- O << '#' << ((Imm & 256) ? "" : "-") << ((Imm & 0xff) << 2);
+ O << markup("<imm:")
+ << '#' << ((Imm & 256) ? "" : "-") << ((Imm & 0xff) << 2)
+ << markup(">");
}
@@ -536,16 +574,20 @@ void ARMInstPrinter::printAddrMode5Operand(const MCInst *MI, unsigned OpNum,
return;
}
- O << "[" << getRegisterName(MO1.getReg());
+ O << markup("<mem:") << "[";
+ printRegName(O, MO1.getReg());
unsigned ImmOffs = ARM_AM::getAM5Offset(MO2.getImm());
unsigned Op = ARM_AM::getAM5Op(MO2.getImm());
if (ImmOffs || Op == ARM_AM::sub) {
- O << ", #"
+ O << ", "
+ << markup("<imm:")
+ << "#"
<< ARM_AM::getAddrOpcStr(ARM_AM::getAM5Op(MO2.getImm()))
- << ImmOffs * 4;
+ << ImmOffs * 4
+ << markup(">");
}
- O << "]";
+ O << "]" << markup(">");
}
void ARMInstPrinter::printAddrMode6Operand(const MCInst *MI, unsigned OpNum,
@@ -553,18 +595,21 @@ void ARMInstPrinter::printAddrMode6Operand(const MCInst *MI, unsigned OpNum,
const MCOperand &MO1 = MI->getOperand(OpNum);
const MCOperand &MO2 = MI->getOperand(OpNum+1);
- O << "[" << getRegisterName(MO1.getReg());
+ O << markup("<mem:") << "[";
+ printRegName(O, MO1.getReg());
if (MO2.getImm()) {
// FIXME: Both darwin as and GNU as violate ARM docs here.
O << ", :" << (MO2.getImm() << 3);
}
- O << "]";
+ O << "]" << markup(">");
}
void ARMInstPrinter::printAddrMode7Operand(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
const MCOperand &MO1 = MI->getOperand(OpNum);
- O << "[" << getRegisterName(MO1.getReg()) << "]";
+ O << markup("<mem:") << "[";
+ printRegName(O, MO1.getReg());
+ O << "]" << markup(">");
}
void ARMInstPrinter::printAddrMode6OffsetOperand(const MCInst *MI,
@@ -573,8 +618,10 @@ void ARMInstPrinter::printAddrMode6OffsetOperand(const MCInst *MI,
const MCOperand &MO = MI->getOperand(OpNum);
if (MO.getReg() == 0)
O << "!";
- else
- O << ", " << getRegisterName(MO.getReg());
+ else {
+ O << ", ";
+ printRegName(O, MO.getReg());
+ }
}
void ARMInstPrinter::printBitfieldInvMaskImmOperand(const MCInst *MI,
@@ -585,7 +632,9 @@ void ARMInstPrinter::printBitfieldInvMaskImmOperand(const MCInst *MI,
int32_t lsb = CountTrailingZeros_32(v);
int32_t width = (32 - CountLeadingZeros_32 (v)) - lsb;
assert(MO.isImm() && "Not a valid bf_inv_mask_imm value!");
- O << '#' << lsb << ", #" << width;
+ O << markup("<imm:") << '#' << lsb << markup(">")
+ << ", "
+ << markup("<imm:") << '#' << width << markup(">");
}
void ARMInstPrinter::printMemBOption(const MCInst *MI, unsigned OpNum,
@@ -599,10 +648,18 @@ void ARMInstPrinter::printShiftImmOperand(const MCInst *MI, unsigned OpNum,
unsigned ShiftOp = MI->getOperand(OpNum).getImm();
bool isASR = (ShiftOp & (1 << 5)) != 0;
unsigned Amt = ShiftOp & 0x1f;
- if (isASR)
- O << ", asr #" << (Amt == 0 ? 32 : Amt);
- else if (Amt)
- O << ", lsl #" << Amt;
+ if (isASR) {
+ O << ", asr "
+ << markup("<imm:")
+ << "#" << (Amt == 0 ? 32 : Amt)
+ << markup(">");
+ }
+ else if (Amt) {
+ O << ", lsl "
+ << markup("<imm:")
+ << "#" << Amt
+ << markup(">");
+ }
}
void ARMInstPrinter::printPKHLSLShiftImm(const MCInst *MI, unsigned OpNum,
@@ -611,7 +668,7 @@ void ARMInstPrinter::printPKHLSLShiftImm(const MCInst *MI, unsigned OpNum,
if (Imm == 0)
return;
assert(Imm > 0 && Imm < 32 && "Invalid PKH shift immediate value!");
- O << ", lsl #" << Imm;
+ O << ", lsl " << markup("<imm:") << "#" << Imm << markup(">");
}
void ARMInstPrinter::printPKHASRShiftImm(const MCInst *MI, unsigned OpNum,
@@ -621,7 +678,7 @@ void ARMInstPrinter::printPKHASRShiftImm(const MCInst *MI, unsigned OpNum,
if (Imm == 0)
Imm = 32;
assert(Imm > 0 && Imm <= 32 && "Invalid PKH shift immediate value!");
- O << ", asr #" << Imm;
+ O << ", asr " << markup("<imm:") << "#" << Imm << markup(">");
}
void ARMInstPrinter::printRegisterList(const MCInst *MI, unsigned OpNum,
@@ -629,7 +686,7 @@ void ARMInstPrinter::printRegisterList(const MCInst *MI, unsigned OpNum,
O << "{";
for (unsigned i = OpNum, e = MI->getNumOperands(); i != e; ++i) {
if (i != OpNum) O << ", ";
- O << getRegisterName(MI->getOperand(i).getReg());
+ printRegName(O, MI->getOperand(i).getReg());
}
O << "}";
}
@@ -803,23 +860,29 @@ void ARMInstPrinter::printAdrLabelOperand(const MCInst *MI, unsigned OpNum,
int32_t OffImm = (int32_t)MO.getImm();
+ O << markup("<imm:");
if (OffImm == INT32_MIN)
O << "#-0";
else if (OffImm < 0)
O << "#-" << -OffImm;
else
O << "#" << OffImm;
+ O << markup(">");
}
void ARMInstPrinter::printThumbS4ImmOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
- O << "#" << MI->getOperand(OpNum).getImm() * 4;
+ O << markup("<imm:")
+ << "#" << MI->getOperand(OpNum).getImm() * 4
+ << markup(">");
}
void ARMInstPrinter::printThumbSRImm(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
unsigned Imm = MI->getOperand(OpNum).getImm();
- O << "#" << (Imm == 0 ? 32 : Imm);
+ O << markup("<imm:")
+ << "#" << (Imm == 0 ? 32 : Imm)
+ << markup(">");
}
void ARMInstPrinter::printThumbITMask(const MCInst *MI, unsigned OpNum,
@@ -849,10 +912,13 @@ void ARMInstPrinter::printThumbAddrModeRROperand(const MCInst *MI, unsigned Op,
return;
}
- O << "[" << getRegisterName(MO1.getReg());
- if (unsigned RegNum = MO2.getReg())
- O << ", " << getRegisterName(RegNum);
- O << "]";
+ O << markup("<mem:") << "[";
+ printRegName(O, MO1.getReg());
+ if (unsigned RegNum = MO2.getReg()) {
+ O << ", ";
+ printRegName(O, RegNum);
+ }
+ O << "]" << markup(">");
}
void ARMInstPrinter::printThumbAddrModeImm5SOperand(const MCInst *MI,
@@ -867,10 +933,15 @@ void ARMInstPrinter::printThumbAddrModeImm5SOperand(const MCInst *MI,
return;
}
- O << "[" << getRegisterName(MO1.getReg());
- if (unsigned ImmOffs = MO2.getImm())
- O << ", #" << ImmOffs * Scale;
- O << "]";
+ O << markup("<mem:") << "[";
+ printRegName(O, MO1.getReg());
+ if (unsigned ImmOffs = MO2.getImm()) {
+ O << ", "
+ << markup("<imm:")
+ << "#" << ImmOffs * Scale
+ << markup(">");
+ }
+ O << "]" << markup(">");
}
void ARMInstPrinter::printThumbAddrModeImm5S1Operand(const MCInst *MI,
@@ -906,14 +977,12 @@ void ARMInstPrinter::printT2SOOperand(const MCInst *MI, unsigned OpNum,
const MCOperand &MO2 = MI->getOperand(OpNum+1);
unsigned Reg = MO1.getReg();
- O << getRegisterName(Reg);
+ printRegName(O, Reg);
// Print the shift opc.
assert(MO2.isImm() && "Not a valid t2_so_reg value!");
- ARM_AM::ShiftOpc ShOpc = ARM_AM::getSORegShOp(MO2.getImm());
- O << ", " << ARM_AM::getShiftOpcStr(ShOpc);
- if (ShOpc != ARM_AM::rrx)
- O << " #" << translateShiftImm(ARM_AM::getSORegOffset(MO2.getImm()));
+ printRegImmShift(O, ARM_AM::getSORegShOp(MO2.getImm()),
+ ARM_AM::getSORegOffset(MO2.getImm()), UseMarkup);
}
void ARMInstPrinter::printAddrModeImm12Operand(const MCInst *MI, unsigned OpNum,
@@ -926,18 +995,27 @@ void ARMInstPrinter::printAddrModeImm12Operand(const MCInst *MI, unsigned OpNum,
return;
}
- O << "[" << getRegisterName(MO1.getReg());
+ O << markup("<mem:") << "[";
+ printRegName(O, MO1.getReg());
int32_t OffImm = (int32_t)MO2.getImm();
bool isSub = OffImm < 0;
// Special value for #-0. All others are normal.
if (OffImm == INT32_MIN)
OffImm = 0;
- if (isSub)
- O << ", #-" << -OffImm;
- else if (OffImm > 0)
- O << ", #" << OffImm;
- O << "]";
+ if (isSub) {
+ O << ", "
+ << markup("<imm:")
+ << "#-" << -OffImm
+ << markup(">");
+ }
+ else if (OffImm > 0) {
+ O << ", "
+ << markup("<imm:")
+ << "#" << OffImm
+ << markup(">");
+ }
+ O << "]" << markup(">");
}
void ARMInstPrinter::printT2AddrModeImm8Operand(const MCInst *MI,
@@ -946,17 +1024,24 @@ void ARMInstPrinter::printT2AddrModeImm8Operand(const MCInst *MI,
const MCOperand &MO1 = MI->getOperand(OpNum);
const MCOperand &MO2 = MI->getOperand(OpNum+1);
- O << "[" << getRegisterName(MO1.getReg());
+ O << markup("<mem:") << "[";
+ printRegName(O, MO1.getReg());
int32_t OffImm = (int32_t)MO2.getImm();
// Don't print +0.
+ if (OffImm != 0)
+ O << ", ";
+ if (OffImm != 0 && UseMarkup)
+ O << "<imm:";
if (OffImm == INT32_MIN)
- O << ", #-0";
+ O << "#-0";
else if (OffImm < 0)
- O << ", #-" << -OffImm;
+ O << "#-" << -OffImm;
else if (OffImm > 0)
- O << ", #" << OffImm;
- O << "]";
+ O << "#" << OffImm;
+ if (OffImm != 0 && UseMarkup)
+ O << ">";
+ O << "]" << markup(">");
}
void ARMInstPrinter::printT2AddrModeImm8s4Operand(const MCInst *MI,
@@ -970,20 +1055,27 @@ void ARMInstPrinter::printT2AddrModeImm8s4Operand(const MCInst *MI,
return;
}
- O << "[" << getRegisterName(MO1.getReg());
+ O << markup("<mem:") << "[";
+ printRegName(O, MO1.getReg());
int32_t OffImm = (int32_t)MO2.getImm();
assert(((OffImm & 0x3) == 0) && "Not a valid immediate!");
// Don't print +0.
+ if (OffImm != 0)
+ O << ", ";
+ if (OffImm != 0 && UseMarkup)
+ O << "<imm:";
if (OffImm == INT32_MIN)
- O << ", #-0";
+ O << "#-0";
else if (OffImm < 0)
- O << ", #-" << -OffImm;
+ O << "#-" << -OffImm;
else if (OffImm > 0)
- O << ", #" << OffImm;
- O << "]";
+ O << "#" << OffImm;
+ if (OffImm != 0 && UseMarkup)
+ O << ">";
+ O << "]" << markup(">");
}
void ARMInstPrinter::printT2AddrModeImm0_1020s4Operand(const MCInst *MI,
@@ -992,10 +1084,15 @@ void ARMInstPrinter::printT2AddrModeImm0_1020s4Operand(const MCInst *MI,
const MCOperand &MO1 = MI->getOperand(OpNum);
const MCOperand &MO2 = MI->getOperand(OpNum+1);
- O << "[" << getRegisterName(MO1.getReg());
- if (MO2.getImm())
- O << ", #" << MO2.getImm() * 4;
- O << "]";
+ O << markup("<mem:") << "[";
+ printRegName(O, MO1.getReg());
+ if (MO2.getImm()) {
+ O << ", "
+ << markup("<imm:")
+ << "#" << MO2.getImm() * 4
+ << markup(">");
+ }
+ O << "]" << markup(">");
}
void ARMInstPrinter::printT2AddrModeImm8OffsetOperand(const MCInst *MI,
@@ -1003,11 +1100,12 @@ void ARMInstPrinter::printT2AddrModeImm8OffsetOperand(const MCInst *MI,
raw_ostream &O) {
const MCOperand &MO1 = MI->getOperand(OpNum);
int32_t OffImm = (int32_t)MO1.getImm();
- // Don't print +0.
+ O << ", " << markup("<imm:");
if (OffImm < 0)
- O << ", #-" << -OffImm;
+ O << "#-" << -OffImm;
else
- O << ", #" << OffImm;
+ O << "#" << OffImm;
+ O << markup(">");
}
void ARMInstPrinter::printT2AddrModeImm8s4OffsetOperand(const MCInst *MI,
@@ -1019,12 +1117,18 @@ void ARMInstPrinter::printT2AddrModeImm8s4OffsetOperand(const MCInst *MI,
assert(((OffImm & 0x3) == 0) && "Not a valid immediate!");
// Don't print +0.
+ if (OffImm != 0)
+ O << ", ";
+ if (OffImm != 0 && UseMarkup)
+ O << "<imm:";
if (OffImm == INT32_MIN)
- O << ", #-0";
+ O << "#-0";
else if (OffImm < 0)
- O << ", #-" << -OffImm;
+ O << "#-" << -OffImm;
else if (OffImm > 0)
- O << ", #" << OffImm;
+ O << "#" << OffImm;
+ if (OffImm != 0 && UseMarkup)
+ O << ">";
}
void ARMInstPrinter::printT2AddrModeSoRegOperand(const MCInst *MI,
@@ -1034,23 +1138,30 @@ void ARMInstPrinter::printT2AddrModeSoRegOperand(const MCInst *MI,
const MCOperand &MO2 = MI->getOperand(OpNum+1);
const MCOperand &MO3 = MI->getOperand(OpNum+2);
- O << "[" << getRegisterName(MO1.getReg());
+ O << markup("<mem:") << "[";
+ printRegName(O, MO1.getReg());
assert(MO2.getReg() && "Invalid so_reg load / store address!");
- O << ", " << getRegisterName(MO2.getReg());
+ O << ", ";
+ printRegName(O, MO2.getReg());
unsigned ShAmt = MO3.getImm();
if (ShAmt) {
assert(ShAmt <= 3 && "Not a valid Thumb2 addressing mode!");
- O << ", lsl #" << ShAmt;
+ O << ", lsl "
+ << markup("<imm:")
+ << "#" << ShAmt
+ << markup(">");
}
- O << "]";
+ O << "]" << markup(">");
}
void ARMInstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
const MCOperand &MO = MI->getOperand(OpNum);
- O << '#' << ARM_AM::getFPImmFloat(MO.getImm());
+ O << markup("<imm:")
+ << '#' << ARM_AM::getFPImmFloat(MO.getImm())
+ << markup(">");
}
void ARMInstPrinter::printNEONModImmOperand(const MCInst *MI, unsigned OpNum,
@@ -1058,14 +1169,18 @@ void ARMInstPrinter::printNEONModImmOperand(const MCInst *MI, unsigned OpNum,
unsigned EncodedImm = MI->getOperand(OpNum).getImm();
unsigned EltBits;
uint64_t Val = ARM_AM::decodeNEONModImm(EncodedImm, EltBits);
- O << "#0x";
+ O << markup("<imm:")
+ << "#0x";
O.write_hex(Val);
+ O << markup(">");
}
void ARMInstPrinter::printImmPlusOneOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
unsigned Imm = MI->getOperand(OpNum).getImm();
- O << "#" << Imm + 1;
+ O << markup("<imm:")
+ << "#" << Imm + 1
+ << markup(">");
}
void ARMInstPrinter::printRotImmOperand(const MCInst *MI, unsigned OpNum,
@@ -1073,23 +1188,30 @@ void ARMInstPrinter::printRotImmOperand(const MCInst *MI, unsigned OpNum,
unsigned Imm = MI->getOperand(OpNum).getImm();
if (Imm == 0)
return;
- O << ", ror #";
+ O << ", ror "
+ << markup("<imm:")
+ << "#";
switch (Imm) {
default: assert (0 && "illegal ror immediate!");
case 1: O << "8"; break;
case 2: O << "16"; break;
case 3: O << "24"; break;
}
+ O << markup(">");
}
void ARMInstPrinter::printFBits16(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
- O << "#" << 16 - MI->getOperand(OpNum).getImm();
+ O << markup("<imm:")
+ << "#" << 16 - MI->getOperand(OpNum).getImm()
+ << markup(">");
}
void ARMInstPrinter::printFBits32(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
- O << "#" << 32 - MI->getOperand(OpNum).getImm();
+ O << markup("<imm:")
+ << "#" << 32 - MI->getOperand(OpNum).getImm()
+ << markup(">");
}
void ARMInstPrinter::printVectorIndex(const MCInst *MI, unsigned OpNum,
@@ -1099,7 +1221,9 @@ void ARMInstPrinter::printVectorIndex(const MCInst *MI, unsigned OpNum,
void ARMInstPrinter::printVectorListOne(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
- O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "}";
+ O << "{";
+ printRegName(O, MI->getOperand(OpNum).getReg());
+ O << "}";
}
void ARMInstPrinter::printVectorListTwo(const MCInst *MI, unsigned OpNum,
@@ -1107,7 +1231,11 @@ void ARMInstPrinter::printVectorListTwo(const MCInst *MI, unsigned OpNum,
unsigned Reg = MI->getOperand(OpNum).getReg();
unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0);
unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_1);
- O << "{" << getRegisterName(Reg0) << ", " << getRegisterName(Reg1) << "}";
+ O << "{";
+ printRegName(O, Reg0);
+ O << ", ";
+ printRegName(O, Reg1);
+ O << "}";
}
void ARMInstPrinter::printVectorListTwoSpaced(const MCInst *MI,
@@ -1116,7 +1244,11 @@ void ARMInstPrinter::printVectorListTwoSpaced(const MCInst *MI,
unsigned Reg = MI->getOperand(OpNum).getReg();
unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0);
unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_2);
- O << "{" << getRegisterName(Reg0) << ", " << getRegisterName(Reg1) << "}";
+ O << "{";
+ printRegName(O, Reg0);
+ O << ", ";
+ printRegName(O, Reg1);
+ O << "}";
}
void ARMInstPrinter::printVectorListThree(const MCInst *MI, unsigned OpNum,
@@ -1124,9 +1256,13 @@ void ARMInstPrinter::printVectorListThree(const MCInst *MI, unsigned OpNum,
// Normally, it's not safe to use register enum values directly with
// addition to get the next register, but for VFP registers, the
// sort order is guaranteed because they're all of the form D<n>.
- O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << ", "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 1) << ", "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << "}";
+ O << "{";
+ printRegName(O, MI->getOperand(OpNum).getReg());
+ O << ", ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 1);
+ O << ", ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 2);
+ O << "}";
}
void ARMInstPrinter::printVectorListFour(const MCInst *MI, unsigned OpNum,
@@ -1134,16 +1270,23 @@ void ARMInstPrinter::printVectorListFour(const MCInst *MI, unsigned OpNum,
// Normally, it's not safe to use register enum values directly with
// addition to get the next register, but for VFP registers, the
// sort order is guaranteed because they're all of the form D<n>.
- O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << ", "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 1) << ", "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << ", "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 3) << "}";
+ O << "{";
+ printRegName(O, MI->getOperand(OpNum).getReg());
+ O << ", ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 1);
+ O << ", ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 2);
+ O << ", ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 3);
+ O << "}";
}
void ARMInstPrinter::printVectorListOneAllLanes(const MCInst *MI,
unsigned OpNum,
raw_ostream &O) {
- O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "[]}";
+ O << "{";
+ printRegName(O, MI->getOperand(OpNum).getReg());
+ O << "[]}";
}
void ARMInstPrinter::printVectorListTwoAllLanes(const MCInst *MI,
@@ -1152,7 +1295,11 @@ void ARMInstPrinter::printVectorListTwoAllLanes(const MCInst *MI,
unsigned Reg = MI->getOperand(OpNum).getReg();
unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0);
unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_1);
- O << "{" << getRegisterName(Reg0) << "[], " << getRegisterName(Reg1) << "[]}";
+ O << "{";
+ printRegName(O, Reg0);
+ O << "[], ";
+ printRegName(O, Reg1);
+ O << "[]}";
}
void ARMInstPrinter::printVectorListThreeAllLanes(const MCInst *MI,
@@ -1161,9 +1308,13 @@ void ARMInstPrinter::printVectorListThreeAllLanes(const MCInst *MI,
// Normally, it's not safe to use register enum values directly with
// addition to get the next register, but for VFP registers, the
// sort order is guaranteed because they're all of the form D<n>.
- O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "[], "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 1) << "[], "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << "[]}";
+ O << "{";
+ printRegName(O, MI->getOperand(OpNum).getReg());
+ O << "[], ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 1);
+ O << "[], ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 2);
+ O << "[]}";
}
void ARMInstPrinter::printVectorListFourAllLanes(const MCInst *MI,
@@ -1172,10 +1323,15 @@ void ARMInstPrinter::printVectorListFourAllLanes(const MCInst *MI,
// Normally, it's not safe to use register enum values directly with
// addition to get the next register, but for VFP registers, the
// sort order is guaranteed because they're all of the form D<n>.
- O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "[], "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 1) << "[], "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << "[], "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 3) << "[]}";
+ O << "{";
+ printRegName(O, MI->getOperand(OpNum).getReg());
+ O << "[], ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 1);
+ O << "[], ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 2);
+ O << "[], ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 3);
+ O << "[]}";
}
void ARMInstPrinter::printVectorListTwoSpacedAllLanes(const MCInst *MI,
@@ -1184,7 +1340,11 @@ void ARMInstPrinter::printVectorListTwoSpacedAllLanes(const MCInst *MI,
unsigned Reg = MI->getOperand(OpNum).getReg();
unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0);
unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_2);
- O << "{" << getRegisterName(Reg0) << "[], " << getRegisterName(Reg1) << "[]}";
+ O << "{";
+ printRegName(O, Reg0);
+ O << "[], ";
+ printRegName(O, Reg1);
+ O << "[]}";
}
void ARMInstPrinter::printVectorListThreeSpacedAllLanes(const MCInst *MI,
@@ -1193,9 +1353,13 @@ void ARMInstPrinter::printVectorListThreeSpacedAllLanes(const MCInst *MI,
// Normally, it's not safe to use register enum values directly with
// addition to get the next register, but for VFP registers, the
// sort order is guaranteed because they're all of the form D<n>.
- O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "[], "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << "[], "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 4) << "[]}";
+ O << "{";
+ printRegName(O, MI->getOperand(OpNum).getReg());
+ O << "[], ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 2);
+ O << "[], ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 4);
+ O << "[]}";
}
void ARMInstPrinter::printVectorListFourSpacedAllLanes(const MCInst *MI,
@@ -1204,10 +1368,15 @@ void ARMInstPrinter::printVectorListFourSpacedAllLanes(const MCInst *MI,
// Normally, it's not safe to use register enum values directly with
// addition to get the next register, but for VFP registers, the
// sort order is guaranteed because they're all of the form D<n>.
- O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "[], "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << "[], "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 4) << "[], "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 6) << "[]}";
+ O << "{";
+ printRegName(O, MI->getOperand(OpNum).getReg());
+ O << "[], ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 2);
+ O << "[], ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 4);
+ O << "[], ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 6);
+ O << "[]}";
}
void ARMInstPrinter::printVectorListThreeSpaced(const MCInst *MI,
@@ -1216,9 +1385,13 @@ void ARMInstPrinter::printVectorListThreeSpaced(const MCInst *MI,
// Normally, it's not safe to use register enum values directly with
// addition to get the next register, but for VFP registers, the
// sort order is guaranteed because they're all of the form D<n>.
- O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << ", "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << ", "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 4) << "}";
+ O << "{";
+ printRegName(O, MI->getOperand(OpNum).getReg());
+ O << ", ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 2);
+ O << ", ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 4);
+ O << "}";
}
void ARMInstPrinter::printVectorListFourSpaced(const MCInst *MI,
@@ -1227,8 +1400,13 @@ void ARMInstPrinter::printVectorListFourSpaced(const MCInst *MI,
// Normally, it's not safe to use register enum values directly with
// addition to get the next register, but for VFP registers, the
// sort order is guaranteed because they're all of the form D<n>.
- O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << ", "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << ", "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 4) << ", "
- << getRegisterName(MI->getOperand(OpNum).getReg() + 6) << "}";
+ O << "{";
+ printRegName(O, MI->getOperand(OpNum).getReg());
+ O << ", ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 2);
+ O << ", ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 4);
+ O << ", ";
+ printRegName(O, MI->getOperand(OpNum).getReg() + 6);
+ O << "}";
}
diff --git a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
index 73d7bfd..b7bab5f 100644
--- a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
+++ b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
@@ -126,7 +126,8 @@ public:
void printRotImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printPCLabel(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printT2LdrLabelOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printThumbLdrLabelOperand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
void printFBits16(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printFBits32(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printVectorIndex(const MCInst *MI, unsigned OpNum, raw_ostream &O);
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index 68c47ac..1ba6ab0 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -593,7 +593,9 @@ public:
const object::mach::CPUSubtypeARM Subtype;
DarwinARMAsmBackend(const Target &T, const StringRef TT,
object::mach::CPUSubtypeARM st)
- : ARMAsmBackend(T, TT), Subtype(st) { }
+ : ARMAsmBackend(T, TT), Subtype(st) {
+ HasDataInCodeSupport = true;
+ }
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
return createARMMachObjectWriter(OS, /*Is64Bit=*/false,
@@ -687,6 +689,15 @@ MCAsmBackend *llvm::createARMAsmBackend(const Target &T, StringRef TT, StringRef
else if (TheTriple.getArchName() == "armv6" ||
TheTriple.getArchName() == "thumbv6")
return new DarwinARMAsmBackend(T, TT, object::mach::CSARM_V6);
+ else if (TheTriple.getArchName() == "armv7f" ||
+ TheTriple.getArchName() == "thumbv7f")
+ return new DarwinARMAsmBackend(T, TT, object::mach::CSARM_V7F);
+ else if (TheTriple.getArchName() == "armv7k" ||
+ TheTriple.getArchName() == "thumbv7k")
+ return new DarwinARMAsmBackend(T, TT, object::mach::CSARM_V7K);
+ else if (TheTriple.getArchName() == "armv7s" ||
+ TheTriple.getArchName() == "thumbv7s")
+ return new DarwinARMAsmBackend(T, TT, object::mach::CSARM_V7S);
return new DarwinARMAsmBackend(T, TT, object::mach::CSARM_V7);
}
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
index 7d6acbc..99e4f71 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
@@ -194,6 +194,10 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
case ARM::fixup_arm_uncondbranch:
Type = ELF::R_ARM_JUMP24;
break;
+ case ARM::fixup_t2_condbranch:
+ case ARM::fixup_t2_uncondbranch:
+ Type = ELF::R_ARM_THM_JUMP24;
+ break;
case ARM::fixup_arm_movt_hi16:
case ARM::fixup_arm_movt_hi16_pcrel:
Type = ELF::R_ARM_MOVT_PREL;
@@ -242,6 +246,9 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
case MCSymbolRefExpr::VK_ARM_TARGET1:
Type = ELF::R_ARM_TARGET1;
break;
+ case MCSymbolRefExpr::VK_ARM_TARGET2:
+ Type = ELF::R_ARM_TARGET2;
+ break;
}
break;
case ARM::fixup_arm_ldst_pcrel_12:
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
index d32805e..c1aab9c 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
@@ -50,7 +50,6 @@ ARMELFMCAsmInfo::ARMELFMCAsmInfo() {
Code32Directive = ".code\t32";
WeakRefDirective = "\t.weak\t";
- LCOMMDirectiveType = LCOMM::NoAlignment;
HasLEB128 = true;
SupportsDebugInformation = true;
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
index 94f1082..d0e127a 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
@@ -35,8 +35,8 @@ STATISTIC(MCNumCPRelocations, "Number of constant pool relocations created.");
namespace {
class ARMMCCodeEmitter : public MCCodeEmitter {
- ARMMCCodeEmitter(const ARMMCCodeEmitter &); // DO NOT IMPLEMENT
- void operator=(const ARMMCCodeEmitter &); // DO NOT IMPLEMENT
+ ARMMCCodeEmitter(const ARMMCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ void operator=(const ARMMCCodeEmitter &) LLVM_DELETED_FUNCTION;
const MCInstrInfo &MCII;
const MCSubtargetInfo &STI;
const MCContext &CTX;
@@ -783,7 +783,7 @@ getT2Imm8s4OpValue(const MCInst &MI, unsigned OpIdx,
// Immediate is always encoded as positive. The 'U' bit controls add vs sub.
if (Imm8 < 0)
- Imm8 = -Imm8;
+ Imm8 = -(uint32_t)Imm8;
// Scaled by 4.
Imm8 /= 4;
@@ -934,6 +934,10 @@ getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx,
ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(MO2.getImm());
unsigned SBits = getShiftOp(ShOp);
+ // While "lsr #32" and "asr #32" exist, they are encoded with a 0 in the shift
+ // amount. However, it would be an easy mistake to make so check here.
+ assert((ShImm & ~0x1f) == 0 && "Out of range shift amount");
+
// {16-13} = Rn
// {12} = isAdd
// {11-0} = shifter
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h
index a727e08..b404e6c 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h
@@ -28,7 +28,7 @@ private:
explicit ARMMCExpr(VariantKind _Kind, const MCExpr *_Expr)
: Kind(_Kind), Expr(_Expr) {}
-
+
public:
/// @name Construction
/// @{
@@ -67,9 +67,6 @@ public:
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Target;
}
-
- static bool classof(const ARMMCExpr *) { return true; }
-
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
index 5df84c8..00ffc94 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
@@ -71,6 +71,14 @@ std::string ARM_MC::ParseARMTriple(StringRef TT, StringRef CPU) {
else
// Use CPU to figure out the exact features.
ARMArchFeature = "+v7";
+ } else if (Len >= Idx+2 && TT[Idx+1] == 's') {
+ if (NoCPU)
+ // v7s: FeatureNEON, FeatureDB, FeatureDSPThumb2, FeatureT2XtPk
+ // Swift
+ ARMArchFeature = "+v7,+swift,+neon,+db,+t2dsp,+t2xtpk";
+ else
+ // Use CPU to figure out the exact features.
+ ARMArchFeature = "+v7";
} else {
// v7 CPUs have lots of different feature sets. If no CPU is specified,
// then assume v7a (e.g. cortex-a8) feature set. Otherwise, return
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
index a51e0fa..2154c93 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
@@ -41,6 +41,12 @@ class ARMMachObjectWriter : public MCMachObjectTargetWriter {
const MCFixup &Fixup, MCValue Target,
uint64_t &FixedValue);
+ bool requiresExternRelocation(MachObjectWriter *Writer,
+ const MCAssembler &Asm,
+ const MCFragment &Fragment,
+ unsigned RelocType, const MCSymbolData *SD,
+ uint64_t FixedValue);
+
public:
ARMMachObjectWriter(bool Is64Bit, uint32_t CPUType,
uint32_t CPUSubtype)
@@ -305,6 +311,46 @@ void ARMMachObjectWriter::RecordARMScatteredRelocation(MachObjectWriter *Writer,
Writer->addRelocation(Fragment->getParent(), MRE);
}
+bool ARMMachObjectWriter::requiresExternRelocation(MachObjectWriter *Writer,
+ const MCAssembler &Asm,
+ const MCFragment &Fragment,
+ unsigned RelocType,
+ const MCSymbolData *SD,
+ uint64_t FixedValue) {
+ // Most cases can be identified purely from the symbol.
+ if (Writer->doesSymbolRequireExternRelocation(SD))
+ return true;
+ int64_t Value = (int64_t)FixedValue; // The displacement is signed.
+ int64_t Range;
+ switch (RelocType) {
+ default:
+ return false;
+ case macho::RIT_ARM_Branch24Bit:
+ // PC pre-adjustment of 8 for these instructions.
+ Value -= 8;
+ // ARM BL/BLX has a 25-bit offset.
+ Range = 0x1ffffff;
+ break;
+ case macho::RIT_ARM_ThumbBranch22Bit:
+ // PC pre-adjustment of 4 for these instructions.
+ Value -= 4;
+ // Thumb BL/BLX has a 24-bit offset.
+ Range = 0xffffff;
+ }
+ // BL/BLX also use external relocations when an internal relocation
+ // would result in the target being out of range. This gives the linker
+ // enough information to generate a branch island.
+ const MCSectionData &SymSD = Asm.getSectionData(
+ SD->getSymbol().getSection());
+ Value += Writer->getSectionAddress(&SymSD);
+ Value -= Writer->getSectionAddress(Fragment.getParent());
+ // If the resultant value would be out of range for an internal relocation,
+ // use an external instead.
+ if (Value > Range || Value < -(Range + 1))
+ return true;
+ return false;
+}
+
void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
const MCAssembler &Asm,
const MCAsmLayout &Layout,
@@ -373,7 +419,8 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
}
// Check whether we need an external or internal relocation.
- if (Writer->doesSymbolRequireExternRelocation(SD)) {
+ if (requiresExternRelocation(Writer, Asm, *Fragment, RelocType, SD,
+ FixedValue)) {
IsExtern = 1;
Index = SD->getIndex();
@@ -410,7 +457,7 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
if (Type == macho::RIT_ARM_Half) {
// The other-half value only gets populated for the movt and movw
// relocation entries.
- uint32_t Value = 0;;
+ uint32_t Value = 0;
switch ((unsigned)Fixup.getKind()) {
default: break;
case ARM::fixup_arm_movw_lo16:
diff --git a/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp b/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp
index ad60e32..70643bc 100644
--- a/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp
@@ -51,7 +51,8 @@ namespace {
const TargetRegisterInfo *TRI;
MachineRegisterInfo *MRI;
- bool isA9;
+ bool isLikeA9;
+ bool isSwift;
unsigned MIIdx;
MachineInstr* LastMIs[4];
SmallPtrSet<MachineInstr*, 4> IgnoreStall;
@@ -60,6 +61,7 @@ namespace {
void pushStack(MachineInstr *MI);
MachineInstr *getAccDefMI(MachineInstr *MI) const;
unsigned getDefReg(MachineInstr *MI) const;
+ bool hasLoopHazard(MachineInstr *MI) const;
bool hasRAWHazard(unsigned Reg, MachineInstr *MI) const;
bool FindMLxHazard(MachineInstr *MI);
void ExpandFPMLxInstruction(MachineBasicBlock &MBB, MachineInstr *MI,
@@ -135,6 +137,50 @@ unsigned MLxExpansion::getDefReg(MachineInstr *MI) const {
return Reg;
}
+/// hasLoopHazard - Check whether an MLx instruction is chained to itself across
+/// a single-MBB loop.
+bool MLxExpansion::hasLoopHazard(MachineInstr *MI) const {
+ unsigned Reg = MI->getOperand(1).getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ return false;
+
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineInstr *DefMI = MRI->getVRegDef(Reg);
+ while (true) {
+outer_continue:
+ if (DefMI->getParent() != MBB)
+ break;
+
+ if (DefMI->isPHI()) {
+ for (unsigned i = 1, e = DefMI->getNumOperands(); i < e; i += 2) {
+ if (DefMI->getOperand(i + 1).getMBB() == MBB) {
+ unsigned SrcReg = DefMI->getOperand(i).getReg();
+ if (TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ DefMI = MRI->getVRegDef(SrcReg);
+ goto outer_continue;
+ }
+ }
+ }
+ } else if (DefMI->isCopyLike()) {
+ Reg = DefMI->getOperand(1).getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ DefMI = MRI->getVRegDef(Reg);
+ continue;
+ }
+ } else if (DefMI->isInsertSubreg()) {
+ Reg = DefMI->getOperand(2).getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ DefMI = MRI->getVRegDef(Reg);
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ return DefMI == MI;
+}
+
bool MLxExpansion::hasRAWHazard(unsigned Reg, MachineInstr *MI) const {
// FIXME: Detect integer instructions properly.
const MCInstrDesc &MCID = MI->getDesc();
@@ -149,6 +195,19 @@ bool MLxExpansion::hasRAWHazard(unsigned Reg, MachineInstr *MI) const {
return false;
}
+static bool isFpMulInstruction(unsigned Opcode) {
+ switch (Opcode) {
+ case ARM::VMULS:
+ case ARM::VMULfd:
+ case ARM::VMULfq:
+ case ARM::VMULD:
+ case ARM::VMULslfd:
+ case ARM::VMULslfq:
+ return true;
+ default:
+ return false;
+ }
+}
bool MLxExpansion::FindMLxHazard(MachineInstr *MI) {
if (NumExpand >= ExpandLimit)
@@ -171,6 +230,12 @@ bool MLxExpansion::FindMLxHazard(MachineInstr *MI) {
return true;
}
+ // On Swift, we mostly care about hazards from multiplication instructions
+ // writing the accumulator and the pipelining of loop iterations by out-of-
+ // order execution.
+ if (isSwift)
+ return isFpMulInstruction(DefMI->getOpcode()) || hasLoopHazard(MI);
+
if (IgnoreStall.count(MI))
return false;
@@ -179,8 +244,8 @@ bool MLxExpansion::FindMLxHazard(MachineInstr *MI) {
// preserves the in-order retirement of the instructions.
// Look at the next few instructions, if *most* of them can cause hazards,
// then the scheduler can't *fix* this, we'd better break up the VMLA.
- unsigned Limit1 = isA9 ? 1 : 4;
- unsigned Limit2 = isA9 ? 1 : 4;
+ unsigned Limit1 = isLikeA9 ? 1 : 4;
+ unsigned Limit2 = isLikeA9 ? 1 : 4;
for (unsigned i = 1; i <= 4; ++i) {
int Idx = ((int)MIIdx - i + 4) % 4;
MachineInstr *NextMI = LastMIs[Idx];
@@ -316,7 +381,8 @@ bool MLxExpansion::runOnMachineFunction(MachineFunction &Fn) {
TRI = Fn.getTarget().getRegisterInfo();
MRI = &Fn.getRegInfo();
const ARMSubtarget *STI = &Fn.getTarget().getSubtarget<ARMSubtarget>();
- isA9 = STI->isCortexA9();
+ isLikeA9 = STI->isLikeA9() || STI->isSwift();
+ isSwift = STI->isSwift();
bool Modified = false;
for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp b/contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp
index 03d5a9a..3396e8b 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp
@@ -130,8 +130,7 @@ namespace {
void
printS10ImmOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O)
{
- short value = (short) (((int) MI->getOperand(OpNo).getImm() << 16)
- >> 16);
+ short value = MI->getOperand(OpNo).getImm();
assert((value >= -(1 << 9) && value <= (1 << 9) - 1)
&& "Invalid s10 argument");
O << value;
@@ -140,8 +139,7 @@ namespace {
void
printU10ImmOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O)
{
- short value = (short) (((int) MI->getOperand(OpNo).getImm() << 16)
- >> 16);
+ short value = MI->getOperand(OpNo).getImm();
assert((value <= (1 << 10) - 1) && "Invalid u10 argument");
O << value;
}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.cpp b/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.cpp
index fac806e1..f011995 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.cpp
@@ -22,7 +22,7 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp b/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
index c27caea..5d50610 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
@@ -67,8 +67,8 @@ namespace {
//! ConstantSDNode predicate for signed 16-bit values
/*!
- \arg CN The constant SelectionDAG node holding the value
- \arg Imm The returned 16-bit value, if returning true
+ \param CN The constant SelectionDAG node holding the value
+ \param Imm The returned 16-bit value, if returning true
This predicate tests the value in \a CN to see whether it can be
represented as a 16-bit, sign-extended quantity. Returns true if
@@ -83,12 +83,10 @@ namespace {
return true;
} else if (vt == MVT::i32) {
int32_t i_val = (int32_t) CN->getZExtValue();
- short s_val = (short) i_val;
- return i_val == s_val;
+ return i_val == SignExtend32<16>(i_val);
} else {
int64_t i_val = (int64_t) CN->getZExtValue();
- short s_val = (short) i_val;
- return i_val == s_val;
+ return i_val == SignExtend64<16>(i_val);
}
}
@@ -99,9 +97,10 @@ namespace {
EVT vt = FPN->getValueType(0);
if (vt == MVT::f32) {
int val = FloatToBits(FPN->getValueAPF().convertToFloat());
- int sval = (int) ((val << 16) >> 16);
- Imm = (short) val;
- return val == sval;
+ if (val == SignExtend32<16>(val)) {
+ Imm = (short) val;
+ return true;
+ }
}
return false;
@@ -306,10 +305,10 @@ namespace {
}
/*!
- \arg Op The ISD instruction operand
- \arg N The address to be tested
- \arg Base The base address
- \arg Index The base address index
+ \param Op The ISD instruction operand
+ \param N The address to be tested
+ \param Base The base address
+ \param Index The base address index
*/
bool
SPUDAGToDAGISel::SelectAFormAddr(SDNode *Op, SDValue N, SDValue &Base,
@@ -376,10 +375,10 @@ SPUDAGToDAGISel::SelectDForm2Addr(SDNode *Op, SDValue N, SDValue &Disp,
}
/*!
- \arg Op The ISD instruction (ignored)
- \arg N The address to be tested
- \arg Base Base address register/pointer
- \arg Index Base address index
+ \param Op The ISD instruction (ignored)
+ \param N The address to be tested
+ \param Base Base address register/pointer
+ \param Index Base address index
Examine the input address by a base register plus a signed 10-bit
displacement, [r+I10] (D-form address).
@@ -542,10 +541,10 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDNode *Op, SDValue N, SDValue &Base,
}
/*!
- \arg Op The ISD instruction operand
- \arg N The address operand
- \arg Base The base pointer operand
- \arg Index The offset/index operand
+ \param Op The ISD instruction operand
+ \param N The address operand
+ \param Base The base pointer operand
+ \param Index The offset/index operand
If the address \a N can be expressed as an A-form or D-form address, returns
false. Otherwise, creates two operands, Base and Index that will become the
@@ -570,7 +569,7 @@ SPUDAGToDAGISel::SelectXFormAddr(SDNode *Op, SDValue N, SDValue &Base,
Utility function to use with COPY_TO_REGCLASS instructions. Returns a SDValue
to be used as the last parameter of a
CurDAG->getMachineNode(COPY_TO_REGCLASS,..., ) function call
- \arg VT the value type for which we want a register class
+ \param VT the value type for which we want a register class
*/
SDValue SPUDAGToDAGISel::getRC( MVT VT ) {
switch( VT.SimpleTy ) {
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.h b/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.h
index 7c4aa14..27d28b2 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.h
@@ -80,9 +80,9 @@ namespace llvm {
return UseLargeMem;
}
- /// getTargetDataString - Return the pointer size and type alignment
+ /// getDataLayoutString - Return the pointer size and type alignment
/// properties of this subtarget.
- const char *getTargetDataString() const {
+ const char *getDataLayoutString() const {
return "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128"
"-i16:16:128-i8:8:128-i1:8:128-a:0:128-v64:64:128-v128:128:128"
"-s:128:128-n32:64";
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp b/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp
index 54764f1..9183165 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp
@@ -38,12 +38,13 @@ SPUTargetMachine::SPUTargetMachine(const Target &T, StringRef TT,
CodeGenOpt::Level OL)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS),
- DataLayout(Subtarget.getTargetDataString()),
+ DL(Subtarget.getDataLayoutString()),
InstrInfo(*this),
FrameLowering(Subtarget),
TLInfo(*this),
TSInfo(*this),
- InstrItins(Subtarget.getInstrItineraryData()) {
+ InstrItins(Subtarget.getInstrItineraryData()),
+ STTI(&TLInfo), VTTI(&TLInfo) {
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.h b/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.h
index 3e5d38c..7f53ea6 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.h
@@ -20,7 +20,8 @@
#include "SPUSelectionDAGInfo.h"
#include "SPUFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetTransformImpl.h"
+#include "llvm/DataLayout.h"
namespace llvm {
@@ -28,12 +29,14 @@ namespace llvm {
///
class SPUTargetMachine : public LLVMTargetMachine {
SPUSubtarget Subtarget;
- const TargetData DataLayout;
+ const DataLayout DL;
SPUInstrInfo InstrInfo;
SPUFrameLowering FrameLowering;
SPUTargetLowering TLInfo;
SPUSelectionDAGInfo TSInfo;
InstrItineraryData InstrItins;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
SPUTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
@@ -70,13 +73,19 @@ public:
return &InstrInfo.getRegisterInfo();
}
- virtual const TargetData *getTargetData() const {
- return &DataLayout;
+ virtual const DataLayout *getDataLayout() const {
+ return &DL;
}
virtual const InstrItineraryData *getInstrItineraryData() const {
return &InstrItins;
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
// Pass Pipeline Configuration
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
diff --git a/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp b/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
index c8e757b..5c90990 100644
--- a/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
+++ b/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
@@ -285,14 +285,14 @@ void CppWriter::printLinkageType(GlobalValue::LinkageTypes LT) {
Out << "GlobalValue::LinkerPrivateLinkage"; break;
case GlobalValue::LinkerPrivateWeakLinkage:
Out << "GlobalValue::LinkerPrivateWeakLinkage"; break;
- case GlobalValue::LinkerPrivateWeakDefAutoLinkage:
- Out << "GlobalValue::LinkerPrivateWeakDefAutoLinkage"; break;
case GlobalValue::AvailableExternallyLinkage:
Out << "GlobalValue::AvailableExternallyLinkage "; break;
case GlobalValue::LinkOnceAnyLinkage:
Out << "GlobalValue::LinkOnceAnyLinkage "; break;
case GlobalValue::LinkOnceODRLinkage:
Out << "GlobalValue::LinkOnceODRLinkage "; break;
+ case GlobalValue::LinkOnceODRAutoHideLinkage:
+ Out << "GlobalValue::LinkOnceODRAutoHideLinkage"; break;
case GlobalValue::WeakAnyLinkage:
Out << "GlobalValue::WeakAnyLinkage"; break;
case GlobalValue::WeakODRLinkage:
@@ -474,13 +474,15 @@ void CppWriter::printAttributes(const AttrListPtr &PAL,
Out << "AttributeWithIndex PAWI;"; nl(Out);
for (unsigned i = 0; i < PAL.getNumSlots(); ++i) {
unsigned index = PAL.getSlot(i).Index;
- Attributes attrs = PAL.getSlot(i).Attrs;
- Out << "PAWI.Index = " << index << "U; PAWI.Attrs = Attribute::None ";
-#define HANDLE_ATTR(X) \
- if (attrs & Attribute::X) \
- Out << " | Attribute::" #X; \
- attrs &= ~Attribute::X;
-
+ AttrBuilder attrs(PAL.getSlot(i).Attrs);
+ Out << "PAWI.Index = " << index << "U;\n";
+ Out << " {\n AttrBuilder B;\n";
+
+#define HANDLE_ATTR(X) \
+ if (attrs.hasAttribute(Attributes::X)) \
+ Out << " B.addAttribute(Attributes::" #X ");\n"; \
+ attrs.removeAttribute(Attributes::X);
+
HANDLE_ATTR(SExt);
HANDLE_ATTR(ZExt);
HANDLE_ATTR(NoReturn);
@@ -505,19 +507,18 @@ void CppWriter::printAttributes(const AttrListPtr &PAL,
HANDLE_ATTR(ReturnsTwice);
HANDLE_ATTR(UWTable);
HANDLE_ATTR(NonLazyBind);
+ HANDLE_ATTR(MinSize);
#undef HANDLE_ATTR
- if (attrs & Attribute::StackAlignment)
- Out << " | Attribute::constructStackAlignmentFromInt("
- << Attribute::getStackAlignmentFromAttrs(attrs)
- << ")";
- attrs &= ~Attribute::StackAlignment;
- assert(attrs == 0 && "Unhandled attribute!");
- Out << ";";
+ if (attrs.hasAttribute(Attributes::StackAlignment))
+ Out << " B.addStackAlignmentAttr(" << attrs.getStackAlignment() << ")\n";
+ attrs.removeAttribute(Attributes::StackAlignment);
+ assert(!attrs.hasAttributes() && "Unhandled attribute!");
+ Out << " PAWI.Attrs = Attributes::get(mod->getContext(), B);\n }";
nl(Out);
Out << "Attrs.push_back(PAWI);";
nl(Out);
}
- Out << name << "_PAL = AttrListPtr::get(Attrs);";
+ Out << name << "_PAL = AttrListPtr::get(mod->getContext(), Attrs);";
nl(Out);
out(); nl(Out);
Out << '}'; nl(Out);
diff --git a/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h b/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h
index 9cbe798..30d765d 100644
--- a/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h
+++ b/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h
@@ -15,7 +15,7 @@
#define CPPTARGETMACHINE_H
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
namespace llvm {
@@ -35,7 +35,7 @@ struct CPPTargetMachine : public TargetMachine {
AnalysisID StartAfter,
AnalysisID StopAfter);
- virtual const TargetData *getTargetData() const { return 0; }
+ virtual const DataLayout *getDataLayout() const { return 0; }
};
extern Target TheCppBackendTarget;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp
index 5fa4740..c15bce6 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp
@@ -46,7 +46,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.cpp
index ba8e679..73f9d9a 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.cpp
@@ -16,7 +16,7 @@
#include "HexagonCallingConvLower.h"
#include "Hexagon.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 703a128..1c891f1 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -1350,6 +1350,8 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
} else {
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
}
+ // Increase jump tables cutover to 5, was 4.
+ setMinimumJumpTableEntries(5);
setOperationAction(ISD::BR_CC, MVT::i32, Expand);
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td
index e472d49..a64c7a1 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td
@@ -56,6 +56,16 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
bits<1> isPredicated = 0;
let TSFlags{6} = isPredicated;
+ // Dot new value store instructions.
+ bits<1> isNVStore = 0;
+ let TSFlags{8} = isNVStore;
+
+ // Fields used for relation models.
+ string BaseOpcode = "";
+ string CextOpcode = "";
+ string PredSense = "";
+ string PNewValue = "";
+ string InputType = ""; // Input is "imm" or "reg" type.
// *** The code above must match HexagonBaseInfo.h ***
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index c8f933d..8435440 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -25,6 +25,7 @@
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Support/MathExtras.h"
#define GET_INSTRINFO_CTOR
+#define GET_INSTRMAP_INFO
#include "HexagonGenInstrInfo.inc"
#include "HexagonGenDFAPacketizer.inc"
@@ -1915,6 +1916,15 @@ unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
int HexagonInstrInfo::
getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
+ enum Hexagon::PredSense inPredSense;
+ inPredSense = invertPredicate ? Hexagon::PredSense_false :
+ Hexagon::PredSense_true;
+ int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense);
+ if (CondOpcode >= 0) // Valid Conditional opcode/instruction
+ return CondOpcode;
+
+ // This switch case will be removed once all the instructions have been
+ // modified to use relation maps.
switch(Opc) {
case Hexagon::TFR:
return !invertPredicate ? Hexagon::TFR_cPt :
@@ -1934,24 +1944,6 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
case Hexagon::JMP_EQriPt_nv_V4:
return !invertPredicate ? Hexagon::JMP_EQriPt_nv_V4 :
Hexagon::JMP_EQriNotPt_nv_V4;
- case Hexagon::ADD_ri:
- return !invertPredicate ? Hexagon::ADD_ri_cPt :
- Hexagon::ADD_ri_cNotPt;
- case Hexagon::ADD_rr:
- return !invertPredicate ? Hexagon::ADD_rr_cPt :
- Hexagon::ADD_rr_cNotPt;
- case Hexagon::XOR_rr:
- return !invertPredicate ? Hexagon::XOR_rr_cPt :
- Hexagon::XOR_rr_cNotPt;
- case Hexagon::AND_rr:
- return !invertPredicate ? Hexagon::AND_rr_cPt :
- Hexagon::AND_rr_cNotPt;
- case Hexagon::OR_rr:
- return !invertPredicate ? Hexagon::OR_rr_cPt :
- Hexagon::OR_rr_cNotPt;
- case Hexagon::SUB_rr:
- return !invertPredicate ? Hexagon::SUB_rr_cPt :
- Hexagon::SUB_rr_cNotPt;
case Hexagon::COMBINE_rr:
return !invertPredicate ? Hexagon::COMBINE_rr_cPt :
Hexagon::COMBINE_rr_cNotPt;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td
index c0c0df6..1d4a706 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td
@@ -15,6 +15,18 @@ include "HexagonInstrFormats.td"
include "HexagonImmediates.td"
//===----------------------------------------------------------------------===//
+// Classes used for relation maps.
+//===----------------------------------------------------------------------===//
+// PredRel - Filter class used to relate non-predicated instructions with their
+// predicated forms.
+class PredRel;
+// PredNewRel - Filter class used to relate predicated instructions with their
+// predicate-new forms.
+class PredNewRel: PredRel;
+// ImmRegRel - Filter class used to relate instructions having reg-reg form
+// with their reg-imm counterparts.
+class ImmRegRel;
+//===----------------------------------------------------------------------===//
// Hexagon Instruction Predicate Definitions.
//===----------------------------------------------------------------------===//
def HasV2T : Predicate<"Subtarget.hasV2TOps()">;
@@ -148,37 +160,91 @@ multiclass CMP32_ri_s8<string OpcStr, PatFrag OpNode> {
}
//===----------------------------------------------------------------------===//
-// ALU32/ALU +
+// ALU32/ALU (Instructions with register-register form)
//===----------------------------------------------------------------------===//
-// Add.
-let isCommutable = 1, isPredicable = 1 in
-def ADD_rr : ALU32_rr<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = add($src1, $src2)",
- [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1),
- (i32 IntRegs:$src2)))]>;
+multiclass ALU32_Pbase<string mnemonic, bit isNot,
+ bit isPredNew> {
-let isPredicable = 1 in
-def ADD_ri : ALU32_ri<(outs IntRegs:$dst),
- (ins IntRegs:$src1, s16Imm:$src2),
- "$dst = add($src1, #$src2)",
- [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1),
- s16ImmPred:$src2))]>;
+ let PNewValue = #!if(isPredNew, "new", "") in
+ def #NAME# : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs: $src3),
+ !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew,".new) $dst = ",
+ ") $dst = ")#mnemonic#"($src2, $src3)",
+ []>;
+}
-// Logical operations.
-let isPredicable = 1 in
-def XOR_rr : ALU32_rr<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = xor($src1, $src2)",
- [(set (i32 IntRegs:$dst), (xor (i32 IntRegs:$src1),
- (i32 IntRegs:$src2)))]>;
+multiclass ALU32_Pred<string mnemonic, bit PredNot> {
+ let PredSense = #!if(PredNot, "false", "true") in {
+ defm _c#NAME# : ALU32_Pbase<mnemonic, PredNot, 0>;
+ // Predicate new
+ defm _cdn#NAME# : ALU32_Pbase<mnemonic, PredNot, 1>;
+ }
+}
-let isCommutable = 1, isPredicable = 1 in
-def AND_rr : ALU32_rr<(outs IntRegs:$dst),
+let InputType = "reg" in
+multiclass ALU32_base<string mnemonic, string CextOp, SDNode OpNode> {
+ let CextOpcode = CextOp, BaseOpcode = CextOp#_rr in {
+ let isPredicable = 1 in
+ def #NAME# : ALU32_rr<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
- "$dst = and($src1, $src2)",
- [(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1),
- (i32 IntRegs:$src2)))]>;
+ "$dst = "#mnemonic#"($src1, $src2)",
+ [(set (i32 IntRegs:$dst), (OpNode (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
+
+ let neverHasSideEffects = 1, isPredicated = 1 in {
+ defm Pt : ALU32_Pred<mnemonic, 0>;
+ defm NotPt : ALU32_Pred<mnemonic, 1>;
+ }
+ }
+}
+
+let isCommutable = 1 in {
+ defm ADD_rr : ALU32_base<"add", "ADD", add>, ImmRegRel, PredNewRel;
+ defm AND_rr : ALU32_base<"and", "AND", and>, ImmRegRel, PredNewRel;
+ defm XOR_rr : ALU32_base<"xor", "XOR", xor>, ImmRegRel, PredNewRel;
+ defm OR_rr : ALU32_base<"or", "OR", or>, ImmRegRel, PredNewRel;
+}
+
+defm SUB_rr : ALU32_base<"sub", "SUB", sub>, ImmRegRel, PredNewRel;
+
+//===----------------------------------------------------------------------===//
+// ALU32/ALU (ADD with register-immediate form)
+//===----------------------------------------------------------------------===//
+multiclass ALU32ri_Pbase<string mnemonic, bit isNot, bit isPredNew> {
+ let PNewValue = #!if(isPredNew, "new", "") in
+ def #NAME# : ALU32_ri<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, s8Imm: $src3),
+ !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew,".new) $dst = ",
+ ") $dst = ")#mnemonic#"($src2, #$src3)",
+ []>;
+}
+
+multiclass ALU32ri_Pred<string mnemonic, bit PredNot> {
+ let PredSense = #!if(PredNot, "false", "true") in {
+ defm _c#NAME# : ALU32ri_Pbase<mnemonic, PredNot, 0>;
+ // Predicate new
+ defm _cdn#NAME# : ALU32ri_Pbase<mnemonic, PredNot, 1>;
+ }
+}
+
+let InputType = "imm" in
+multiclass ALU32ri_base<string mnemonic, string CextOp, SDNode OpNode> {
+ let CextOpcode = CextOp, BaseOpcode = CextOp#_ri in {
+ let isPredicable = 1 in
+ def #NAME# : ALU32_ri<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s16Imm:$src2),
+ "$dst = "#mnemonic#"($src1, #$src2)",
+ [(set (i32 IntRegs:$dst), (OpNode (i32 IntRegs:$src1),
+ (s16ImmPred:$src2)))]>;
+
+ let neverHasSideEffects = 1, isPredicated = 1 in {
+ defm Pt : ALU32ri_Pred<mnemonic, 0>;
+ defm NotPt : ALU32ri_Pred<mnemonic, 1>;
+ }
+ }
+}
+
+defm ADD_ri : ALU32ri_base<"add", "ADD", add>, ImmRegRel, PredNewRel;
def OR_ri : ALU32_ri<(outs IntRegs:$dst),
(ins IntRegs:$src1, s10Imm:$src2),
@@ -197,13 +263,6 @@ def AND_ri : ALU32_ri<(outs IntRegs:$dst),
[(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1),
s10ImmPred:$src2))]>;
-let isCommutable = 1, isPredicable = 1 in
-def OR_rr : ALU32_rr<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = or($src1, $src2)",
- [(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1),
- (i32 IntRegs:$src2)))]>;
-
// Negate.
def NEG : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
"$dst = neg($src1)",
@@ -214,14 +273,6 @@ def NOP : ALU32_rr<(outs), (ins),
"nop",
[]>;
-// Subtract.
-let isPredicable = 1 in
-def SUB_rr : ALU32_rr<(outs IntRegs:$dst),
- (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = sub($src1, $src2)",
- [(set (i32 IntRegs:$dst), (sub (i32 IntRegs:$src1),
- (i32 IntRegs:$src2)))]>;
-
// Rd32=sub(#s10,Rs32)
def SUB_ri : ALU32_ri<(outs IntRegs:$dst),
(ins s10Imm:$src1, IntRegs:$src2),
@@ -348,56 +399,6 @@ def ZXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
// ALU32/PRED +
//===----------------------------------------------------------------------===//
-// Conditional add.
-let neverHasSideEffects = 1, isPredicated = 1 in
-def ADD_ri_cPt : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3),
- "if ($src1) $dst = add($src2, #$src3)",
- []>;
-
-let neverHasSideEffects = 1, isPredicated = 1 in
-def ADD_ri_cNotPt : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3),
- "if (!$src1) $dst = add($src2, #$src3)",
- []>;
-
-let neverHasSideEffects = 1, isPredicated = 1 in
-def ADD_ri_cdnPt : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3),
- "if ($src1.new) $dst = add($src2, #$src3)",
- []>;
-
-let neverHasSideEffects = 1, isPredicated = 1 in
-def ADD_ri_cdnNotPt : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3),
- "if (!$src1.new) $dst = add($src2, #$src3)",
- []>;
-
-let neverHasSideEffects = 1, isPredicated = 1 in
-def ADD_rr_cPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if ($src1) $dst = add($src2, $src3)",
- []>;
-
-let neverHasSideEffects = 1, isPredicated = 1 in
-def ADD_rr_cNotPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if (!$src1) $dst = add($src2, $src3)",
- []>;
-
-let neverHasSideEffects = 1, isPredicated = 1 in
-def ADD_rr_cdnPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if ($src1.new) $dst = add($src2, $src3)",
- []>;
-
-let neverHasSideEffects = 1, isPredicated = 1 in
-def ADD_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if (!$src1.new) $dst = add($src2, $src3)",
- []>;
-
-
// Conditional combine.
let neverHasSideEffects = 1, isPredicated = 1 in
@@ -424,108 +425,6 @@ def COMBINE_rr_cdnNotPt : ALU32_rr<(outs DoubleRegs:$dst),
"if (!$src1.new) $dst = combine($src2, $src3)",
[]>;
-// Conditional logical operations.
-
-let isPredicated = 1 in
-def XOR_rr_cPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if ($src1) $dst = xor($src2, $src3)",
- []>;
-
-let isPredicated = 1 in
-def XOR_rr_cNotPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if (!$src1) $dst = xor($src2, $src3)",
- []>;
-
-let isPredicated = 1 in
-def XOR_rr_cdnPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if ($src1.new) $dst = xor($src2, $src3)",
- []>;
-
-let isPredicated = 1 in
-def XOR_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if (!$src1.new) $dst = xor($src2, $src3)",
- []>;
-
-let isPredicated = 1 in
-def AND_rr_cPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if ($src1) $dst = and($src2, $src3)",
- []>;
-
-let isPredicated = 1 in
-def AND_rr_cNotPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if (!$src1) $dst = and($src2, $src3)",
- []>;
-
-let isPredicated = 1 in
-def AND_rr_cdnPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if ($src1.new) $dst = and($src2, $src3)",
- []>;
-
-let isPredicated = 1 in
-def AND_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if (!$src1.new) $dst = and($src2, $src3)",
- []>;
-
-let isPredicated = 1 in
-def OR_rr_cPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if ($src1) $dst = or($src2, $src3)",
- []>;
-
-let isPredicated = 1 in
-def OR_rr_cNotPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if (!$src1) $dst = or($src2, $src3)",
- []>;
-
-let isPredicated = 1 in
-def OR_rr_cdnPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if ($src1.new) $dst = or($src2, $src3)",
- []>;
-
-let isPredicated = 1 in
-def OR_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if (!$src1.new) $dst = or($src2, $src3)",
- []>;
-
-
-// Conditional subtract.
-
-let isPredicated = 1 in
-def SUB_rr_cPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if ($src1) $dst = sub($src2, $src3)",
- []>;
-
-let isPredicated = 1 in
-def SUB_rr_cNotPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if (!$src1) $dst = sub($src2, $src3)",
- []>;
-
-let isPredicated = 1 in
-def SUB_rr_cdnPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if ($src1.new) $dst = sub($src2, $src3)",
- []>;
-
-let isPredicated = 1 in
-def SUB_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- "if (!$src1.new) $dst = sub($src2, $src3)",
- []>;
-
-
// Conditional transfer.
let neverHasSideEffects = 1, isPredicated = 1 in
def TFR_cPt : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2),
@@ -3546,4 +3445,31 @@ include "HexagonInstrInfoV5.td"
// V5 Instructions -
//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// Generate mapping table to relate non-predicate instructions with their
+// predicated formats - true and false.
+//
+
+def getPredOpcode : InstrMapping {
+ let FilterClass = "PredRel";
+ // Instructions with the same BaseOpcode and isNVStore values form a row.
+ let RowFields = ["BaseOpcode", "isNVStore", "PNewValue"];
+ // Instructions with the same predicate sense form a column.
+ let ColFields = ["PredSense"];
+ // The key column is the unpredicated instructions.
+ let KeyCol = [""];
+ // Value columns are PredSense=true and PredSense=false
+ let ValueCols = [["true"], ["false"]];
+}
+//===----------------------------------------------------------------------===//
+// Generate mapping table to relate predicated instructions with their .new
+// format.
+//
+def getPredNewOpcode : InstrMapping {
+ let FilterClass = "PredNewRel";
+ let RowFields = ["BaseOpcode", "PredSense", "isNVStore"];
+ let ColFields = ["PNewValue"];
+ let KeyCol = [""];
+ let ValueCols = [["new"]];
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp
new file mode 100644
index 0000000..0e9ef48
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp
@@ -0,0 +1,681 @@
+//===- HexagonMachineScheduler.cpp - MI Scheduler for Hexagon -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// MachineScheduler schedules machine instructions after phi elimination. It
+// preserves LiveIntervals so it can be invoked before register allocation.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "misched"
+
+#include "HexagonMachineScheduler.h"
+
+#include <queue>
+
+using namespace llvm;
+
+/// Platform specific modifications to DAG.
+void VLIWMachineScheduler::postprocessDAG() {
+ SUnit* LastSequentialCall = NULL;
+ // Currently we only catch the situation when compare gets scheduled
+ // before preceding call.
+ for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
+ // Remember the call.
+ if (SUnits[su].getInstr()->isCall())
+ LastSequentialCall = &(SUnits[su]);
+ // Look for a compare that defines a predicate.
+ else if (SUnits[su].getInstr()->isCompare() && LastSequentialCall)
+ SUnits[su].addPred(SDep(LastSequentialCall, SDep::Barrier));
+ }
+}
+
+/// Check if scheduling of this SU is possible
+/// in the current packet.
+/// It is _not_ precise (statefull), it is more like
+/// another heuristic. Many corner cases are figured
+/// empirically.
+bool VLIWResourceModel::isResourceAvailable(SUnit *SU) {
+ if (!SU || !SU->getInstr())
+ return false;
+
+ // First see if the pipeline could receive this instruction
+ // in the current cycle.
+ switch (SU->getInstr()->getOpcode()) {
+ default:
+ if (!ResourcesModel->canReserveResources(SU->getInstr()))
+ return false;
+ case TargetOpcode::EXTRACT_SUBREG:
+ case TargetOpcode::INSERT_SUBREG:
+ case TargetOpcode::SUBREG_TO_REG:
+ case TargetOpcode::REG_SEQUENCE:
+ case TargetOpcode::IMPLICIT_DEF:
+ case TargetOpcode::COPY:
+ case TargetOpcode::INLINEASM:
+ break;
+ }
+
+ // Now see if there are no other dependencies to instructions already
+ // in the packet.
+ for (unsigned i = 0, e = Packet.size(); i != e; ++i) {
+ if (Packet[i]->Succs.size() == 0)
+ continue;
+ for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(),
+ E = Packet[i]->Succs.end(); I != E; ++I) {
+ // Since we do not add pseudos to packets, might as well
+ // ignore order dependencies.
+ if (I->isCtrl())
+ continue;
+
+ if (I->getSUnit() == SU)
+ return false;
+ }
+ }
+ return true;
+}
+
+/// Keep track of available resources.
+bool VLIWResourceModel::reserveResources(SUnit *SU) {
+ bool startNewCycle = false;
+ // Artificially reset state.
+ if (!SU) {
+ ResourcesModel->clearResources();
+ Packet.clear();
+ TotalPackets++;
+ return false;
+ }
+ // If this SU does not fit in the packet
+ // start a new one.
+ if (!isResourceAvailable(SU)) {
+ ResourcesModel->clearResources();
+ Packet.clear();
+ TotalPackets++;
+ startNewCycle = true;
+ }
+
+ switch (SU->getInstr()->getOpcode()) {
+ default:
+ ResourcesModel->reserveResources(SU->getInstr());
+ break;
+ case TargetOpcode::EXTRACT_SUBREG:
+ case TargetOpcode::INSERT_SUBREG:
+ case TargetOpcode::SUBREG_TO_REG:
+ case TargetOpcode::REG_SEQUENCE:
+ case TargetOpcode::IMPLICIT_DEF:
+ case TargetOpcode::KILL:
+ case TargetOpcode::PROLOG_LABEL:
+ case TargetOpcode::EH_LABEL:
+ case TargetOpcode::COPY:
+ case TargetOpcode::INLINEASM:
+ break;
+ }
+ Packet.push_back(SU);
+
+#ifndef NDEBUG
+ DEBUG(dbgs() << "Packet[" << TotalPackets << "]:\n");
+ for (unsigned i = 0, e = Packet.size(); i != e; ++i) {
+ DEBUG(dbgs() << "\t[" << i << "] SU(");
+ DEBUG(dbgs() << Packet[i]->NodeNum << ")\t");
+ DEBUG(Packet[i]->getInstr()->dump());
+ }
+#endif
+
+ // If packet is now full, reset the state so in the next cycle
+ // we start fresh.
+ if (Packet.size() >= SchedModel->getIssueWidth()) {
+ ResourcesModel->clearResources();
+ Packet.clear();
+ TotalPackets++;
+ startNewCycle = true;
+ }
+
+ return startNewCycle;
+}
+
+/// schedule - Called back from MachineScheduler::runOnMachineFunction
+/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
+/// only includes instructions that have DAG nodes, not scheduling boundaries.
+void VLIWMachineScheduler::schedule() {
+ DEBUG(dbgs()
+ << "********** MI Converging Scheduling VLIW BB#" << BB->getNumber()
+ << " " << BB->getName()
+ << " in_func " << BB->getParent()->getFunction()->getName()
+ << " at loop depth " << MLI.getLoopDepth(BB)
+ << " \n");
+
+ buildDAGWithRegPressure();
+
+ // Postprocess the DAG to add platform specific artificial dependencies.
+ postprocessDAG();
+
+ // To view Height/Depth correctly, they should be accessed at least once.
+ DEBUG(unsigned maxH = 0;
+ for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
+ if (SUnits[su].getHeight() > maxH)
+ maxH = SUnits[su].getHeight();
+ dbgs() << "Max Height " << maxH << "\n";);
+ DEBUG(unsigned maxD = 0;
+ for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
+ if (SUnits[su].getDepth() > maxD)
+ maxD = SUnits[su].getDepth();
+ dbgs() << "Max Depth " << maxD << "\n";);
+ DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
+ SUnits[su].dumpAll(this));
+
+ initQueues();
+
+ bool IsTopNode = false;
+ while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
+ if (!checkSchedLimit())
+ break;
+
+ scheduleMI(SU, IsTopNode);
+
+ updateQueues(SU, IsTopNode);
+ }
+ assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
+
+ placeDebugValues();
+}
+
+void ConvergingVLIWScheduler::initialize(ScheduleDAGMI *dag) {
+ DAG = static_cast<VLIWMachineScheduler*>(dag);
+ SchedModel = DAG->getSchedModel();
+ TRI = DAG->TRI;
+ Top.init(DAG, SchedModel);
+ Bot.init(DAG, SchedModel);
+
+ // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
+ // are disabled, then these HazardRecs will be disabled.
+ const InstrItineraryData *Itin = DAG->getSchedModel()->getInstrItineraries();
+ const TargetMachine &TM = DAG->MF.getTarget();
+ Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
+ Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
+
+ Top.ResourceModel = new VLIWResourceModel(TM, DAG->getSchedModel());
+ Bot.ResourceModel = new VLIWResourceModel(TM, DAG->getSchedModel());
+
+ assert((!llvm::ForceTopDown || !llvm::ForceBottomUp) &&
+ "-misched-topdown incompatible with -misched-bottomup");
+}
+
+void ConvergingVLIWScheduler::releaseTopNode(SUnit *SU) {
+ if (SU->isScheduled)
+ return;
+
+ for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
+ unsigned MinLatency = I->getMinLatency();
+#ifndef NDEBUG
+ Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency);
+#endif
+ if (SU->TopReadyCycle < PredReadyCycle + MinLatency)
+ SU->TopReadyCycle = PredReadyCycle + MinLatency;
+ }
+ Top.releaseNode(SU, SU->TopReadyCycle);
+}
+
+void ConvergingVLIWScheduler::releaseBottomNode(SUnit *SU) {
+ if (SU->isScheduled)
+ return;
+
+ assert(SU->getInstr() && "Scheduled SUnit must have instr");
+
+ for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I) {
+ unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
+ unsigned MinLatency = I->getMinLatency();
+#ifndef NDEBUG
+ Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency);
+#endif
+ if (SU->BotReadyCycle < SuccReadyCycle + MinLatency)
+ SU->BotReadyCycle = SuccReadyCycle + MinLatency;
+ }
+ Bot.releaseNode(SU, SU->BotReadyCycle);
+}
+
+/// Does this SU have a hazard within the current instruction group.
+///
+/// The scheduler supports two modes of hazard recognition. The first is the
+/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
+/// supports highly complicated in-order reservation tables
+/// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
+///
+/// The second is a streamlined mechanism that checks for hazards based on
+/// simple counters that the scheduler itself maintains. It explicitly checks
+/// for instruction dispatch limitations, including the number of micro-ops that
+/// can dispatch per cycle.
+///
+/// TODO: Also check whether the SU must start a new group.
+bool ConvergingVLIWScheduler::SchedBoundary::checkHazard(SUnit *SU) {
+ if (HazardRec->isEnabled())
+ return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
+
+ unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
+ if (IssueCount + uops > SchedModel->getIssueWidth())
+ return true;
+
+ return false;
+}
+
+void ConvergingVLIWScheduler::SchedBoundary::releaseNode(SUnit *SU,
+ unsigned ReadyCycle) {
+ if (ReadyCycle < MinReadyCycle)
+ MinReadyCycle = ReadyCycle;
+
+ // Check for interlocks first. For the purpose of other heuristics, an
+ // instruction that cannot issue appears as if it's not in the ReadyQueue.
+ if (ReadyCycle > CurrCycle || checkHazard(SU))
+
+ Pending.push(SU);
+ else
+ Available.push(SU);
+}
+
+/// Move the boundary of scheduled code by one cycle.
+void ConvergingVLIWScheduler::SchedBoundary::bumpCycle() {
+ unsigned Width = SchedModel->getIssueWidth();
+ IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width;
+
+ assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
+ unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle);
+
+ if (!HazardRec->isEnabled()) {
+ // Bypass HazardRec virtual calls.
+ CurrCycle = NextCycle;
+ } else {
+ // Bypass getHazardType calls in case of long latency.
+ for (; CurrCycle != NextCycle; ++CurrCycle) {
+ if (isTop())
+ HazardRec->AdvanceCycle();
+ else
+ HazardRec->RecedeCycle();
+ }
+ }
+ CheckPending = true;
+
+ DEBUG(dbgs() << "*** " << Available.getName() << " cycle "
+ << CurrCycle << '\n');
+}
+
+/// Move the boundary of scheduled code by one SUnit.
+void ConvergingVLIWScheduler::SchedBoundary::bumpNode(SUnit *SU) {
+ bool startNewCycle = false;
+
+ // Update the reservation table.
+ if (HazardRec->isEnabled()) {
+ if (!isTop() && SU->isCall) {
+ // Calls are scheduled with their preceding instructions. For bottom-up
+ // scheduling, clear the pipeline state before emitting.
+ HazardRec->Reset();
+ }
+ HazardRec->EmitInstruction(SU);
+ }
+
+ // Update DFA model.
+ startNewCycle = ResourceModel->reserveResources(SU);
+
+ // Check the instruction group dispatch limit.
+ // TODO: Check if this SU must end a dispatch group.
+ IssueCount += SchedModel->getNumMicroOps(SU->getInstr());
+ if (startNewCycle) {
+ DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n');
+ bumpCycle();
+ }
+ else
+ DEBUG(dbgs() << "*** IssueCount " << IssueCount
+ << " at cycle " << CurrCycle << '\n');
+}
+
+/// Release pending ready nodes in to the available queue. This makes them
+/// visible to heuristics.
+void ConvergingVLIWScheduler::SchedBoundary::releasePending() {
+ // If the available queue is empty, it is safe to reset MinReadyCycle.
+ if (Available.empty())
+ MinReadyCycle = UINT_MAX;
+
+ // Check to see if any of the pending instructions are ready to issue. If
+ // so, add them to the available queue.
+ for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
+ SUnit *SU = *(Pending.begin()+i);
+ unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
+
+ if (ReadyCycle < MinReadyCycle)
+ MinReadyCycle = ReadyCycle;
+
+ if (ReadyCycle > CurrCycle)
+ continue;
+
+ if (checkHazard(SU))
+ continue;
+
+ Available.push(SU);
+ Pending.remove(Pending.begin()+i);
+ --i; --e;
+ }
+ CheckPending = false;
+}
+
+/// Remove SU from the ready set for this boundary.
+void ConvergingVLIWScheduler::SchedBoundary::removeReady(SUnit *SU) {
+ if (Available.isInQueue(SU))
+ Available.remove(Available.find(SU));
+ else {
+ assert(Pending.isInQueue(SU) && "bad ready count");
+ Pending.remove(Pending.find(SU));
+ }
+}
+
+/// If this queue only has one ready candidate, return it. As a side effect,
+/// advance the cycle until at least one node is ready. If multiple instructions
+/// are ready, return NULL.
+SUnit *ConvergingVLIWScheduler::SchedBoundary::pickOnlyChoice() {
+ if (CheckPending)
+ releasePending();
+
+ for (unsigned i = 0; Available.empty(); ++i) {
+ assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) &&
+ "permanent hazard"); (void)i;
+ ResourceModel->reserveResources(0);
+ bumpCycle();
+ releasePending();
+ }
+ if (Available.size() == 1)
+ return *Available.begin();
+ return NULL;
+}
+
+#ifndef NDEBUG
+void ConvergingVLIWScheduler::traceCandidate(const char *Label,
+ const ReadyQueue &Q,
+ SUnit *SU, PressureElement P) {
+ dbgs() << Label << " " << Q.getName() << " ";
+ if (P.isValid())
+ dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease
+ << " ";
+ else
+ dbgs() << " ";
+ SU->dump(DAG);
+}
+#endif
+
+/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
+/// of SU, return it, otherwise return null.
+static SUnit *getSingleUnscheduledPred(SUnit *SU) {
+ SUnit *OnlyAvailablePred = 0;
+ for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ SUnit &Pred = *I->getSUnit();
+ if (!Pred.isScheduled) {
+ // We found an available, but not scheduled, predecessor. If it's the
+ // only one we have found, keep track of it... otherwise give up.
+ if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
+ return 0;
+ OnlyAvailablePred = &Pred;
+ }
+ }
+ return OnlyAvailablePred;
+}
+
+/// getSingleUnscheduledSucc - If there is exactly one unscheduled successor
+/// of SU, return it, otherwise return null.
+static SUnit *getSingleUnscheduledSucc(SUnit *SU) {
+ SUnit *OnlyAvailableSucc = 0;
+ for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I) {
+ SUnit &Succ = *I->getSUnit();
+ if (!Succ.isScheduled) {
+ // We found an available, but not scheduled, successor. If it's the
+ // only one we have found, keep track of it... otherwise give up.
+ if (OnlyAvailableSucc && OnlyAvailableSucc != &Succ)
+ return 0;
+ OnlyAvailableSucc = &Succ;
+ }
+ }
+ return OnlyAvailableSucc;
+}
+
+// Constants used to denote relative importance of
+// heuristic components for cost computation.
+static const unsigned PriorityOne = 200;
+static const unsigned PriorityTwo = 100;
+static const unsigned PriorityThree = 50;
+static const unsigned PriorityFour = 20;
+static const unsigned ScaleTwo = 10;
+static const unsigned FactorOne = 2;
+
+/// Single point to compute overall scheduling cost.
+/// TODO: More heuristics will be used soon.
+int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
+ SchedCandidate &Candidate,
+ RegPressureDelta &Delta,
+ bool verbose) {
+ // Initial trivial priority.
+ int ResCount = 1;
+
+ // Do not waste time on a node that is already scheduled.
+ if (!SU || SU->isScheduled)
+ return ResCount;
+
+ // Forced priority is high.
+ if (SU->isScheduleHigh)
+ ResCount += PriorityOne;
+
+ // Critical path first.
+ if (Q.getID() == TopQID) {
+ ResCount += (SU->getHeight() * ScaleTwo);
+
+ // If resources are available for it, multiply the
+ // chance of scheduling.
+ if (Top.ResourceModel->isResourceAvailable(SU))
+ ResCount <<= FactorOne;
+ } else {
+ ResCount += (SU->getDepth() * ScaleTwo);
+
+ // If resources are available for it, multiply the
+ // chance of scheduling.
+ if (Bot.ResourceModel->isResourceAvailable(SU))
+ ResCount <<= FactorOne;
+ }
+
+ unsigned NumNodesBlocking = 0;
+ if (Q.getID() == TopQID) {
+ // How many SUs does it block from scheduling?
+ // Look at all of the successors of this node.
+ // Count the number of nodes that
+ // this node is the sole unscheduled node for.
+ for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I)
+ if (getSingleUnscheduledPred(I->getSUnit()) == SU)
+ ++NumNodesBlocking;
+ } else {
+ // How many unscheduled predecessors block this node?
+ for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I)
+ if (getSingleUnscheduledSucc(I->getSUnit()) == SU)
+ ++NumNodesBlocking;
+ }
+ ResCount += (NumNodesBlocking * ScaleTwo);
+
+ // Factor in reg pressure as a heuristic.
+ ResCount -= (Delta.Excess.UnitIncrease*PriorityThree);
+ ResCount -= (Delta.CriticalMax.UnitIncrease*PriorityThree);
+
+ DEBUG(if (verbose) dbgs() << " Total(" << ResCount << ")");
+
+ return ResCount;
+}
+
+/// Pick the best candidate from the top queue.
+///
+/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
+/// DAG building. To adjust for the current scheduling location we need to
+/// maintain the number of vreg uses remaining to be top-scheduled.
+ConvergingVLIWScheduler::CandResult ConvergingVLIWScheduler::
+pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker,
+ SchedCandidate &Candidate) {
+ DEBUG(Q.dump());
+
+ // getMaxPressureDelta temporarily modifies the tracker.
+ RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
+
+ // BestSU remains NULL if no top candidates beat the best existing candidate.
+ CandResult FoundCandidate = NoCand;
+ for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
+ RegPressureDelta RPDelta;
+ TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
+ DAG->getRegionCriticalPSets(),
+ DAG->getRegPressure().MaxSetPressure);
+
+ int CurrentCost = SchedulingCost(Q, *I, Candidate, RPDelta, false);
+
+ // Initialize the candidate if needed.
+ if (!Candidate.SU) {
+ Candidate.SU = *I;
+ Candidate.RPDelta = RPDelta;
+ Candidate.SCost = CurrentCost;
+ FoundCandidate = NodeOrder;
+ continue;
+ }
+
+ // Best cost.
+ if (CurrentCost > Candidate.SCost) {
+ DEBUG(traceCandidate("CCAND", Q, *I));
+ Candidate.SU = *I;
+ Candidate.RPDelta = RPDelta;
+ Candidate.SCost = CurrentCost;
+ FoundCandidate = BestCost;
+ continue;
+ }
+
+ // Fall through to original instruction order.
+ // Only consider node order if Candidate was chosen from this Q.
+ if (FoundCandidate == NoCand)
+ continue;
+ }
+ return FoundCandidate;
+}
+
+/// Pick the best candidate node from either the top or bottom queue.
+SUnit *ConvergingVLIWScheduler::pickNodeBidrectional(bool &IsTopNode) {
+ // Schedule as far as possible in the direction of no choice. This is most
+ // efficient, but also provides the best heuristics for CriticalPSets.
+ if (SUnit *SU = Bot.pickOnlyChoice()) {
+ IsTopNode = false;
+ return SU;
+ }
+ if (SUnit *SU = Top.pickOnlyChoice()) {
+ IsTopNode = true;
+ return SU;
+ }
+ SchedCandidate BotCand;
+ // Prefer bottom scheduling when heuristics are silent.
+ CandResult BotResult = pickNodeFromQueue(Bot.Available,
+ DAG->getBotRPTracker(), BotCand);
+ assert(BotResult != NoCand && "failed to find the first candidate");
+
+ // If either Q has a single candidate that provides the least increase in
+ // Excess pressure, we can immediately schedule from that Q.
+ //
+ // RegionCriticalPSets summarizes the pressure within the scheduled region and
+ // affects picking from either Q. If scheduling in one direction must
+ // increase pressure for one of the excess PSets, then schedule in that
+ // direction first to provide more freedom in the other direction.
+ if (BotResult == SingleExcess || BotResult == SingleCritical) {
+ IsTopNode = false;
+ return BotCand.SU;
+ }
+ // Check if the top Q has a better candidate.
+ SchedCandidate TopCand;
+ CandResult TopResult = pickNodeFromQueue(Top.Available,
+ DAG->getTopRPTracker(), TopCand);
+ assert(TopResult != NoCand && "failed to find the first candidate");
+
+ if (TopResult == SingleExcess || TopResult == SingleCritical) {
+ IsTopNode = true;
+ return TopCand.SU;
+ }
+ // If either Q has a single candidate that minimizes pressure above the
+ // original region's pressure pick it.
+ if (BotResult == SingleMax) {
+ IsTopNode = false;
+ return BotCand.SU;
+ }
+ if (TopResult == SingleMax) {
+ IsTopNode = true;
+ return TopCand.SU;
+ }
+ if (TopCand.SCost > BotCand.SCost) {
+ IsTopNode = true;
+ return TopCand.SU;
+ }
+ // Otherwise prefer the bottom candidate in node order.
+ IsTopNode = false;
+ return BotCand.SU;
+}
+
+/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
+SUnit *ConvergingVLIWScheduler::pickNode(bool &IsTopNode) {
+ if (DAG->top() == DAG->bottom()) {
+ assert(Top.Available.empty() && Top.Pending.empty() &&
+ Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
+ return NULL;
+ }
+ SUnit *SU;
+ if (llvm::ForceTopDown) {
+ SU = Top.pickOnlyChoice();
+ if (!SU) {
+ SchedCandidate TopCand;
+ CandResult TopResult =
+ pickNodeFromQueue(Top.Available, DAG->getTopRPTracker(), TopCand);
+ assert(TopResult != NoCand && "failed to find the first candidate");
+ (void)TopResult;
+ SU = TopCand.SU;
+ }
+ IsTopNode = true;
+ } else if (llvm::ForceBottomUp) {
+ SU = Bot.pickOnlyChoice();
+ if (!SU) {
+ SchedCandidate BotCand;
+ CandResult BotResult =
+ pickNodeFromQueue(Bot.Available, DAG->getBotRPTracker(), BotCand);
+ assert(BotResult != NoCand && "failed to find the first candidate");
+ (void)BotResult;
+ SU = BotCand.SU;
+ }
+ IsTopNode = false;
+ } else {
+ SU = pickNodeBidrectional(IsTopNode);
+ }
+ if (SU->isTopReady())
+ Top.removeReady(SU);
+ if (SU->isBottomReady())
+ Bot.removeReady(SU);
+
+ DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
+ << " Scheduling Instruction in cycle "
+ << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << '\n';
+ SU->dump(DAG));
+ return SU;
+}
+
+/// Update the scheduler's state after scheduling a node. This is the same node
+/// that was just returned by pickNode(). However, VLIWMachineScheduler needs
+/// to update it's state based on the current cycle before MachineSchedStrategy
+/// does.
+void ConvergingVLIWScheduler::schedNode(SUnit *SU, bool IsTopNode) {
+ if (IsTopNode) {
+ SU->TopReadyCycle = Top.CurrCycle;
+ Top.bumpNode(SU);
+ } else {
+ SU->BotReadyCycle = Bot.CurrCycle;
+ Bot.bumpNode(SU);
+ }
+}
+
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.h b/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.h
new file mode 100644
index 0000000..fe0242a
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.h
@@ -0,0 +1,244 @@
+//===-- HexagonMachineScheduler.h - Custom Hexagon MI scheduler. ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Custom Hexagon MI scheduler.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HEXAGONASMPRINTER_H
+#define HEXAGONASMPRINTER_H
+
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineScheduler.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/CodeGen/RegisterPressure.h"
+#include "llvm/CodeGen/ResourcePriorityQueue.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/PriorityQueue.h"
+
+using namespace llvm;
+
+namespace llvm {
+//===----------------------------------------------------------------------===//
+// ConvergingVLIWScheduler - Implementation of the standard
+// MachineSchedStrategy.
+//===----------------------------------------------------------------------===//
+
+class VLIWResourceModel {
+ /// ResourcesModel - Represents VLIW state.
+ /// Not limited to VLIW targets per say, but assumes
+ /// definition of DFA by a target.
+ DFAPacketizer *ResourcesModel;
+
+ const TargetSchedModel *SchedModel;
+
+ /// Local packet/bundle model. Purely
+ /// internal to the MI schedulre at the time.
+ std::vector<SUnit*> Packet;
+
+ /// Total packets created.
+ unsigned TotalPackets;
+
+public:
+VLIWResourceModel(const TargetMachine &TM, const TargetSchedModel *SM) :
+ SchedModel(SM), TotalPackets(0) {
+ ResourcesModel = TM.getInstrInfo()->CreateTargetScheduleState(&TM,NULL);
+
+ // This hard requirement could be relaxed,
+ // but for now do not let it proceed.
+ assert(ResourcesModel && "Unimplemented CreateTargetScheduleState.");
+
+ Packet.resize(SchedModel->getIssueWidth());
+ Packet.clear();
+ ResourcesModel->clearResources();
+ }
+
+ ~VLIWResourceModel() {
+ delete ResourcesModel;
+ }
+
+ void resetPacketState() {
+ Packet.clear();
+ }
+
+ void resetDFA() {
+ ResourcesModel->clearResources();
+ }
+
+ void reset() {
+ Packet.clear();
+ ResourcesModel->clearResources();
+ }
+
+ bool isResourceAvailable(SUnit *SU);
+ bool reserveResources(SUnit *SU);
+ unsigned getTotalPackets() const { return TotalPackets; }
+};
+
+/// Extend the standard ScheduleDAGMI to provide more context and override the
+/// top-level schedule() driver.
+class VLIWMachineScheduler : public ScheduleDAGMI {
+public:
+ VLIWMachineScheduler(MachineSchedContext *C, MachineSchedStrategy *S):
+ ScheduleDAGMI(C, S) {}
+
+ /// Schedule - This is called back from ScheduleDAGInstrs::Run() when it's
+ /// time to do some work.
+ virtual void schedule();
+ /// Perform platform specific DAG postprocessing.
+ void postprocessDAG();
+};
+
+/// ConvergingVLIWScheduler shrinks the unscheduled zone using heuristics
+/// to balance the schedule.
+class ConvergingVLIWScheduler : public MachineSchedStrategy {
+
+ /// Store the state used by ConvergingVLIWScheduler heuristics, required
+ /// for the lifetime of one invocation of pickNode().
+ struct SchedCandidate {
+ // The best SUnit candidate.
+ SUnit *SU;
+
+ // Register pressure values for the best candidate.
+ RegPressureDelta RPDelta;
+
+ // Best scheduling cost.
+ int SCost;
+
+ SchedCandidate(): SU(NULL), SCost(0) {}
+ };
+ /// Represent the type of SchedCandidate found within a single queue.
+ enum CandResult {
+ NoCand, NodeOrder, SingleExcess, SingleCritical, SingleMax, MultiPressure,
+ BestCost};
+
+ /// Each Scheduling boundary is associated with ready queues. It tracks the
+ /// current cycle in whichever direction at has moved, and maintains the state
+ /// of "hazards" and other interlocks at the current cycle.
+ struct SchedBoundary {
+ VLIWMachineScheduler *DAG;
+ const TargetSchedModel *SchedModel;
+
+ ReadyQueue Available;
+ ReadyQueue Pending;
+ bool CheckPending;
+
+ ScheduleHazardRecognizer *HazardRec;
+ VLIWResourceModel *ResourceModel;
+
+ unsigned CurrCycle;
+ unsigned IssueCount;
+
+ /// MinReadyCycle - Cycle of the soonest available instruction.
+ unsigned MinReadyCycle;
+
+ // Remember the greatest min operand latency.
+ unsigned MaxMinLatency;
+
+ /// Pending queues extend the ready queues with the same ID and the
+ /// PendingFlag set.
+ SchedBoundary(unsigned ID, const Twine &Name):
+ DAG(0), SchedModel(0), Available(ID, Name+".A"),
+ Pending(ID << ConvergingVLIWScheduler::LogMaxQID, Name+".P"),
+ CheckPending(false), HazardRec(0), ResourceModel(0),
+ CurrCycle(0), IssueCount(0),
+ MinReadyCycle(UINT_MAX), MaxMinLatency(0) {}
+
+ ~SchedBoundary() {
+ delete ResourceModel;
+ delete HazardRec;
+ }
+
+ void init(VLIWMachineScheduler *dag, const TargetSchedModel *smodel) {
+ DAG = dag;
+ SchedModel = smodel;
+ }
+
+ bool isTop() const {
+ return Available.getID() == ConvergingVLIWScheduler::TopQID;
+ }
+
+ bool checkHazard(SUnit *SU);
+
+ void releaseNode(SUnit *SU, unsigned ReadyCycle);
+
+ void bumpCycle();
+
+ void bumpNode(SUnit *SU);
+
+ void releasePending();
+
+ void removeReady(SUnit *SU);
+
+ SUnit *pickOnlyChoice();
+ };
+
+ VLIWMachineScheduler *DAG;
+ const TargetSchedModel *SchedModel;
+ const TargetRegisterInfo *TRI;
+
+ // State of the top and bottom scheduled instruction boundaries.
+ SchedBoundary Top;
+ SchedBoundary Bot;
+
+public:
+ /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
+ enum {
+ TopQID = 1,
+ BotQID = 2,
+ LogMaxQID = 2
+ };
+
+ ConvergingVLIWScheduler():
+ DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
+
+ virtual void initialize(ScheduleDAGMI *dag);
+
+ virtual SUnit *pickNode(bool &IsTopNode);
+
+ virtual void schedNode(SUnit *SU, bool IsTopNode);
+
+ virtual void releaseTopNode(SUnit *SU);
+
+ virtual void releaseBottomNode(SUnit *SU);
+
+ unsigned ReportPackets() {
+ return Top.ResourceModel->getTotalPackets() +
+ Bot.ResourceModel->getTotalPackets();
+ }
+
+protected:
+ SUnit *pickNodeBidrectional(bool &IsTopNode);
+
+ int SchedulingCost(ReadyQueue &Q,
+ SUnit *SU, SchedCandidate &Candidate,
+ RegPressureDelta &Delta, bool verbose);
+
+ CandResult pickNodeFromQueue(ReadyQueue &Q,
+ const RegPressureTracker &RPTracker,
+ SchedCandidate &Candidate);
+#ifndef NDEBUG
+ void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU,
+ PressureElement P = PressureElement());
+#endif
+};
+
+} // namespace
+
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp
index 7ece408..1e91c39 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp
@@ -337,7 +337,7 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** Hexagon New Value Jump **********\n"
<< "********** Function: "
- << MF.getFunction()->getName() << "\n");
+ << MF.getName() << "\n");
#if 0
// for now disable this, if we move NewValueJump before register
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonPeephole.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
index 55cbc09..a295015 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
@@ -109,6 +109,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
MRI = &MF.getRegInfo();
DenseMap<unsigned, unsigned> PeepholeMap;
+ DenseMap<unsigned, std::pair<unsigned, unsigned> > PeepholeDoubleRegsMap;
if (DisableHexagonPeephole) return false;
@@ -117,6 +118,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
MBBb != MBBe; ++MBBb) {
MachineBasicBlock* MBB = MBBb;
PeepholeMap.clear();
+ PeepholeDoubleRegsMap.clear();
// Traverse the basic block.
for (MachineBasicBlock::iterator MII = MBB->begin(); MII != MBB->end();
@@ -140,6 +142,24 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
}
}
+ // Look for this sequence below
+ // %vregDoubleReg1 = LSRd_ri %vregDoubleReg0, 32
+ // %vregIntReg = COPY %vregDoubleReg1:subreg_loreg.
+ // and convert into
+ // %vregIntReg = COPY %vregDoubleReg0:subreg_hireg.
+ if (MI->getOpcode() == Hexagon::LSRd_ri) {
+ assert(MI->getNumOperands() == 3);
+ MachineOperand &Dst = MI->getOperand(0);
+ MachineOperand &Src1 = MI->getOperand(1);
+ MachineOperand &Src2 = MI->getOperand(2);
+ if (Src2.getImm() != 32)
+ continue;
+ unsigned DstReg = Dst.getReg();
+ unsigned SrcReg = Src1.getReg();
+ PeepholeDoubleRegsMap[DstReg] =
+ std::make_pair(*&SrcReg, 1/*Hexagon::subreg_hireg*/);
+ }
+
// Look for P=NOT(P).
if (!DisablePNotP &&
(MI->getOpcode() == Hexagon::NOT_p)) {
@@ -178,6 +198,21 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
// Change the 1st operand.
MI->RemoveOperand(1);
MI->addOperand(MachineOperand::CreateReg(PeepholeSrc, false));
+ } else {
+ DenseMap<unsigned, std::pair<unsigned, unsigned> >::iterator DI =
+ PeepholeDoubleRegsMap.find(SrcReg);
+ if (DI != PeepholeDoubleRegsMap.end()) {
+ std::pair<unsigned,unsigned> PeepholeSrc = DI->second;
+ MI->RemoveOperand(1);
+ MI->addOperand(MachineOperand::CreateReg(PeepholeSrc.first,
+ false /*isDef*/,
+ false /*isImp*/,
+ false /*isKill*/,
+ false /*isDead*/,
+ false /*isUndef*/,
+ false /*isEarlyClobber*/,
+ PeepholeSrc.second));
+ }
}
}
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
index 2c23674..3742486 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
@@ -310,6 +310,58 @@ void HexagonRegisterInfo::getInitialFrameState(std::vector<MachineMove>
Moves.push_back(MachineMove(0, Dst, Src));
}
+// Get the weight in units of pressure for this register class.
+const RegClassWeight &
+HexagonRegisterInfo::getRegClassWeight(const TargetRegisterClass *RC) const {
+ // Each TargetRegisterClass has a per register weight, and weight
+ // limit which must be less than the limits of its pressure sets.
+ static const RegClassWeight RCWeightTable[] = {
+ {1, 32}, // IntRegs
+ {1, 8}, // CRRegs
+ {1, 4}, // PredRegs
+ {2, 16}, // DoubleRegs
+ {0, 0} };
+ return RCWeightTable[RC->getID()];
+}
+
+/// Get the number of dimensions of register pressure.
+unsigned HexagonRegisterInfo::getNumRegPressureSets() const {
+ return 4;
+}
+
+/// Get the name of this register unit pressure set.
+const char *HexagonRegisterInfo::getRegPressureSetName(unsigned Idx) const {
+ static const char *const RegPressureSetName[] = {
+ "IntRegsRegSet",
+ "CRRegsRegSet",
+ "PredRegsRegSet",
+ "DoubleRegsRegSet"
+ };
+ assert((Idx < 4) && "Index out of bounds");
+ return RegPressureSetName[Idx];
+}
+
+/// Get the register unit pressure limit for this dimension.
+/// This limit must be adjusted dynamically for reserved registers.
+unsigned HexagonRegisterInfo::getRegPressureSetLimit(unsigned Idx) const {
+ static const int RegPressureLimit [] = { 16, 4, 2, 8 };
+ assert((Idx < 4) && "Index out of bounds");
+ return RegPressureLimit[Idx];
+}
+
+const int*
+HexagonRegisterInfo::getRegClassPressureSets(const TargetRegisterClass *RC)
+ const {
+ static const int RCSetsTable[] = {
+ 0, -1, // IntRegs
+ 1, -1, // CRRegs
+ 2, -1, // PredRegs
+ 0, -1, // DoubleRegs
+ -1 };
+ static const unsigned RCSetStartTable[] = { 0, 2, 4, 6, 0 };
+ unsigned SetListStart = RCSetStartTable[RC->getID()];
+ return &RCSetsTable[SetListStart];
+}
unsigned HexagonRegisterInfo::getEHExceptionRegister() const {
llvm_unreachable("What is the exception register");
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h
index 85355ae..8820d13 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h
@@ -87,6 +87,11 @@ struct HexagonRegisterInfo : public HexagonGenRegisterInfo {
// Exception handling queries.
unsigned getEHExceptionRegister() const;
unsigned getEHHandlerRegister() const;
+ const RegClassWeight &getRegClassWeight(const TargetRegisterClass *RC) const;
+ unsigned getNumRegPressureSets() const;
+ const char *getRegPressureSetName(unsigned Idx) const;
+ unsigned getRegPressureSetLimit(unsigned Idx) const;
+ const int* getRegClassPressureSets(const TargetRegisterClass *RC) const;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
index 2468f0b..4d93dd1 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
@@ -50,7 +50,7 @@ bool HexagonRemoveExtendArgs::runOnFunction(Function &F) {
unsigned Idx = 1;
for (Function::arg_iterator AI = F.arg_begin(), AE = F.arg_end(); AI != AE;
++AI, ++Idx) {
- if (F.paramHasAttr(Idx, Attribute::SExt)) {
+ if (F.getParamAttributes(Idx).hasAttribute(Attributes::SExt)) {
Argument* Arg = AI;
if (!isa<PointerType>(Arg->getType())) {
for (Instruction::use_iterator UI = Arg->use_begin();
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td b/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td
index d1076b8..b5ff69a 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td
@@ -47,6 +47,7 @@ def HexagonModel : SchedMachineModel {
// Max issue per cycle == bundle width.
let IssueWidth = 4;
let Itineraries = HexagonItineraries;
+ let LoadLatency = 1;
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td
index 9b41126..5668ae8 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td
@@ -58,6 +58,7 @@ def HexagonModelV4 : SchedMachineModel {
// Max issue per cycle == bundle width.
let IssueWidth = 4;
let Itineraries = HexagonItinerariesV4;
+ let LoadLatency = 1;
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
index 5d087db..4bacb8f 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -40,28 +40,27 @@ EnableIEEERndNear(
HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS):
HexagonGenSubtargetInfo(TT, CPU, FS),
- HexagonArchVersion(V2),
CPUString(CPU.str()) {
- ParseSubtargetFeatures(CPU, FS);
- switch(HexagonArchVersion) {
- case HexagonSubtarget::V2:
- break;
- case HexagonSubtarget::V3:
- EnableV3 = true;
- break;
- case HexagonSubtarget::V4:
- break;
- case HexagonSubtarget::V5:
- break;
- default:
- // If the programmer has not specified a Hexagon version, default
- // to -mv4.
+ // If the programmer has not specified a Hexagon version, default to -mv4.
+ if (CPUString.empty())
CPUString = "hexagonv4";
- HexagonArchVersion = HexagonSubtarget::V4;
- break;
+
+ if (CPUString == "hexagonv2") {
+ HexagonArchVersion = V2;
+ } else if (CPUString == "hexagonv3") {
+ EnableV3 = true;
+ HexagonArchVersion = V3;
+ } else if (CPUString == "hexagonv4") {
+ HexagonArchVersion = V4;
+ } else if (CPUString == "hexagonv5") {
+ HexagonArchVersion = V5;
+ } else {
+ llvm_unreachable("Unrecognized Hexagon processor version");
}
+ ParseSubtargetFeatures(CPUString, FS);
+
// Initialize scheduling itinerary for the specified CPU.
InstrItins = getInstrItineraryForCPU(CPUString);
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
index a7b291f..30866e9 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -14,6 +14,7 @@
#include "HexagonTargetMachine.h"
#include "Hexagon.h"
#include "HexagonISelLowering.h"
+#include "HexagonMachineScheduler.h"
#include "llvm/Module.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/PassManager.h"
@@ -29,6 +30,11 @@ opt<bool> DisableHardwareLoops(
"disable-hexagon-hwloops", cl::Hidden,
cl::desc("Disable Hardware Loops for Hexagon target"));
+static cl::
+opt<bool> DisableHexagonMISched("disable-hexagon-misched",
+ cl::Hidden, cl::ZeroOrMore, cl::init(false),
+ cl::desc("Disable Hexagon MI Scheduling"));
+
/// HexagonTargetMachineModule - Note that this is used on hosts that
/// cannot link in a library unless there are references into the
/// library. In particular, it seems that it is not possible to get
@@ -42,6 +48,13 @@ extern "C" void LLVMInitializeHexagonTarget() {
RegisterTargetMachine<HexagonTargetMachine> X(TheHexagonTarget);
}
+static ScheduleDAGInstrs *createVLIWMachineSched(MachineSchedContext *C) {
+ return new VLIWMachineScheduler(C, new ConvergingVLIWScheduler());
+}
+
+static MachineSchedRegistry
+SchedCustomRegistry("hexagon", "Run Hexagon's custom scheduler",
+ createVLIWMachineSched);
/// HexagonTargetMachine ctor - Create an ILP32 architecture model.
///
@@ -55,13 +68,14 @@ HexagonTargetMachine::HexagonTargetMachine(const Target &T, StringRef TT,
CodeModel::Model CM,
CodeGenOpt::Level OL)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- DataLayout("e-p:32:32:32-"
+ DL("e-p:32:32:32-"
"i64:64:64-i32:32:32-i16:16:16-i1:32:32-"
"f64:64:64-f32:32:32-a0:0-n32") ,
Subtarget(TT, CPU, FS), InstrInfo(Subtarget), TLInfo(*this),
TSInfo(*this),
FrameLowering(Subtarget),
- InstrItins(&Subtarget.getInstrItineraryData()) {
+ InstrItins(&Subtarget.getInstrItineraryData()),
+ STTI(&TLInfo), VTTI(&TLInfo) {
setMCUseCFI(false);
}
@@ -74,7 +88,7 @@ bool HexagonTargetMachine::addPassesForOptimizations(PassManagerBase &PM) {
PM.add(createDeadCodeEliminationPass());
PM.add(createConstantPropagationPass());
PM.add(createLoopUnrollPass());
- PM.add(createLoopStrengthReducePass(getTargetLowering()));
+ PM.add(createLoopStrengthReducePass());
return true;
}
@@ -83,7 +97,13 @@ namespace {
class HexagonPassConfig : public TargetPassConfig {
public:
HexagonPassConfig(HexagonTargetMachine *TM, PassManagerBase &PM)
- : TargetPassConfig(TM, PM) {}
+ : TargetPassConfig(TM, PM) {
+ // Enable MI scheduler.
+ if (!DisableHexagonMISched) {
+ enablePass(&MachineSchedulerID);
+ MachineSchedRegistry::setDefault(createVLIWMachineSched);
+ }
+ }
HexagonTargetMachine &getHexagonTargetMachine() const {
return getTM<HexagonTargetMachine>();
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.h b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.h
index 0336965..7a4215c 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.h
@@ -20,20 +20,23 @@
#include "HexagonSelectionDAGInfo.h"
#include "HexagonFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Target/TargetTransformImpl.h"
namespace llvm {
class Module;
class HexagonTargetMachine : public LLVMTargetMachine {
- const TargetData DataLayout; // Calculates type size & alignment.
+ const DataLayout DL; // Calculates type size & alignment.
HexagonSubtarget Subtarget;
HexagonInstrInfo InstrInfo;
HexagonTargetLowering TLInfo;
HexagonSelectionDAGInfo TSInfo;
HexagonFrameLowering FrameLowering;
const InstrItineraryData* InstrItins;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
HexagonTargetMachine(const Target &T, StringRef TT,StringRef CPU,
@@ -68,7 +71,15 @@ public:
return &TSInfo;
}
- virtual const TargetData *getTargetData() const { return &DataLayout; }
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
+
+ virtual const DataLayout *getDataLayout() const { return &DL; }
static unsigned getModuleMatchQuality(const Module &M);
// Pass Pipeline Configuration.
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
index 32cc709..f4d7761 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
@@ -16,7 +16,7 @@
#include "HexagonTargetMachine.h"
#include "llvm/Function.h"
#include "llvm/GlobalVariable.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/DerivedTypes.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Support/ELF.h"
@@ -73,7 +73,7 @@ IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM,
if (Kind.isBSS() || Kind.isDataNoRel() || Kind.isCommon()) {
Type *Ty = GV->getType()->getElementType();
- return IsInSmallSection(TM.getTargetData()->getTypeAllocSize(Ty));
+ return IsInSmallSection(TM.getDataLayout()->getTypeAllocSize(Ty));
}
return false;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index a03ed03..3d5f685 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -3474,8 +3474,8 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
// 1. Two loads unless they are volatile.
// 2. Two stores in V4 unless they are volatile.
else if ((DepType == SDep::Order) &&
- !I->hasVolatileMemoryRef() &&
- !J->hasVolatileMemoryRef()) {
+ !I->hasOrderedMemoryRef() &&
+ !J->hasOrderedMemoryRef()) {
if (QRI->Subtarget.hasV4TOps() &&
// hexagonv4 allows dual store.
MCIDI.mayStore() && MCIDJ.mayStore()) {
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonVarargsCallingConvention.h b/contrib/llvm/lib/Target/Hexagon/HexagonVarargsCallingConvention.h
index 9305c27..c607b5d 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonVarargsCallingConvention.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonVarargsCallingConvention.h
@@ -75,9 +75,9 @@ static bool CC_Hexagon32_VarArgs(unsigned ValNo, EVT ValVT,
const Type* ArgTy = LocVT.getTypeForEVT(State.getContext());
unsigned Alignment =
- State.getTarget().getTargetData()->getABITypeAlignment(ArgTy);
+ State.getTarget().getDataLayout()->getABITypeAlignment(ArgTy);
unsigned Size =
- State.getTarget().getTargetData()->getTypeSizeInBits(ArgTy) / 8;
+ State.getTarget().getDataLayout()->getTypeSizeInBits(ArgTy) / 8;
// If it's passed by value, then we need the size of the aggregate not of
// the pointer.
@@ -130,9 +130,9 @@ static bool RetCC_Hexagon32_VarArgs(unsigned ValNo, EVT ValVT,
const Type* ArgTy = LocVT.getTypeForEVT(State.getContext());
unsigned Alignment =
- State.getTarget().getTargetData()->getABITypeAlignment(ArgTy);
+ State.getTarget().getDataLayout()->getABITypeAlignment(ArgTy);
unsigned Size =
- State.getTarget().getTargetData()->getTypeSizeInBits(ArgTy) / 8;
+ State.getTarget().getDataLayout()->getTypeSizeInBits(ArgTy) / 8;
unsigned Offset3 = State.AllocateStack(Size, Alignment);
State.addLoc(CCValAssign::getMem(ValNo, ValVT.getSimpleVT(), Offset3,
diff --git a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
index d6e6c36..86f75d1 100644
--- a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
@@ -24,7 +24,7 @@ HexagonMCAsmInfo::HexagonMCAsmInfo(const Target &T, StringRef TT) {
HasLEB128 = true;
PrivateGlobalPrefix = ".L";
- LCOMMDirectiveType = LCOMM::ByteAlignment;
+ LCOMMDirectiveAlignmentType = LCOMM::ByteAlignment;
InlineAsmStart = "# InlineAsm Start";
InlineAsmEnd = "# InlineAsm End";
ZeroDirective = "\t.space\t";
diff --git a/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp b/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp
index 38fb0e8..f7809ca 100644
--- a/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp
@@ -44,9 +44,10 @@ class MBlazeAsmParser : public MCTargetAsmParser {
bool ParseDirectiveWord(unsigned Size, SMLoc L);
- bool MatchAndEmitInstruction(SMLoc IDLoc,
+ bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out);
+ MCStreamer &Out, unsigned &ErrorInfo,
+ bool MatchingInlineAsm);
/// @name Auto-generated Match Functions
/// {
@@ -56,12 +57,12 @@ class MBlazeAsmParser : public MCTargetAsmParser {
/// }
-
public:
MBlazeAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
: MCTargetAsmParser(), Parser(_Parser) {}
- virtual bool ParseInstruction(StringRef Name, SMLoc NameLoc,
+ virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands);
virtual bool ParseDirective(AsmToken DirectiveID);
@@ -313,14 +314,13 @@ static unsigned MatchRegisterName(StringRef Name);
/// }
//
bool MBlazeAsmParser::
-MatchAndEmitInstruction(SMLoc IDLoc,
+MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out) {
+ MCStreamer &Out, unsigned &ErrorInfo,
+ bool MatchingInlineAsm) {
MCInst Inst;
- SMLoc ErrorLoc;
- unsigned ErrorInfo;
-
- switch (MatchInstructionImpl(Operands, Inst, ErrorInfo)) {
+ switch (MatchInstructionImpl(Operands, Inst, ErrorInfo,
+ MatchingInlineAsm)) {
default: break;
case Match_Success:
Out.EmitInstruction(Inst);
@@ -329,10 +329,8 @@ MatchAndEmitInstruction(SMLoc IDLoc,
return Error(IDLoc, "instruction use requires an option to be enabled");
case Match_MnemonicFail:
return Error(IDLoc, "unrecognized instruction mnemonic");
- case Match_ConversionFail:
- return Error(IDLoc, "unable to convert operands to instruction");
- case Match_InvalidOperand:
- ErrorLoc = IDLoc;
+ case Match_InvalidOperand: {
+ SMLoc ErrorLoc = IDLoc;
if (ErrorInfo != ~0U) {
if (ErrorInfo >= Operands.size())
return Error(IDLoc, "too few operands for instruction");
@@ -343,6 +341,7 @@ MatchAndEmitInstruction(SMLoc IDLoc,
return Error(ErrorLoc, "invalid operand for instruction");
}
+ }
llvm_unreachable("Implement any new match types added!");
}
@@ -479,7 +478,7 @@ ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
/// Parse an mblaze instruction mnemonic followed by its operands.
bool MBlazeAsmParser::
-ParseInstruction(StringRef Name, SMLoc NameLoc,
+ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// The first operands is the token for the instruction name
size_t dotLoc = Name.find('.');
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp
index e9f340f..b679a31 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp
@@ -34,7 +34,7 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp
deleted file mode 100644
index e3c7236..0000000
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp
+++ /dev/null
@@ -1,107 +0,0 @@
-//===-- MBlazeELFWriterInfo.cpp - ELF Writer Info for the MBlaze backend --===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements ELF writer information for the MBlaze backend.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MBlazeELFWriterInfo.h"
-#include "MBlazeRelocations.h"
-#include "llvm/Function.h"
-#include "llvm/Support/ELF.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetMachine.h"
-
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// Implementation of the MBlazeELFWriterInfo class
-//===----------------------------------------------------------------------===//
-
-MBlazeELFWriterInfo::MBlazeELFWriterInfo(TargetMachine &TM)
- : TargetELFWriterInfo(TM.getTargetData()->getPointerSizeInBits() == 64,
- TM.getTargetData()->isLittleEndian()) {
-}
-
-MBlazeELFWriterInfo::~MBlazeELFWriterInfo() {}
-
-unsigned MBlazeELFWriterInfo::getRelocationType(unsigned MachineRelTy) const {
- switch (MachineRelTy) {
- case MBlaze::reloc_pcrel_word:
- return ELF::R_MICROBLAZE_64_PCREL;
- case MBlaze::reloc_absolute_word:
- return ELF::R_MICROBLAZE_NONE;
- default:
- llvm_unreachable("unknown mblaze machine relocation type");
- }
-}
-
-long int MBlazeELFWriterInfo::getDefaultAddendForRelTy(unsigned RelTy,
- long int Modifier) const {
- switch (RelTy) {
- case ELF::R_MICROBLAZE_32_PCREL:
- return Modifier - 4;
- case ELF::R_MICROBLAZE_32:
- return Modifier;
- default:
- llvm_unreachable("unknown mblaze relocation type");
- }
-}
-
-unsigned MBlazeELFWriterInfo::getRelocationTySize(unsigned RelTy) const {
- // FIXME: Most of these sizes are guesses based on the name
- switch (RelTy) {
- case ELF::R_MICROBLAZE_32:
- case ELF::R_MICROBLAZE_32_PCREL:
- case ELF::R_MICROBLAZE_32_PCREL_LO:
- case ELF::R_MICROBLAZE_32_LO:
- case ELF::R_MICROBLAZE_SRO32:
- case ELF::R_MICROBLAZE_SRW32:
- case ELF::R_MICROBLAZE_32_SYM_OP_SYM:
- case ELF::R_MICROBLAZE_GOTOFF_32:
- return 32;
-
- case ELF::R_MICROBLAZE_64_PCREL:
- case ELF::R_MICROBLAZE_64:
- case ELF::R_MICROBLAZE_GOTPC_64:
- case ELF::R_MICROBLAZE_GOT_64:
- case ELF::R_MICROBLAZE_PLT_64:
- case ELF::R_MICROBLAZE_GOTOFF_64:
- return 64;
- }
-
- return 0;
-}
-
-bool MBlazeELFWriterInfo::isPCRelativeRel(unsigned RelTy) const {
- // FIXME: Most of these are guesses based on the name
- switch (RelTy) {
- case ELF::R_MICROBLAZE_32_PCREL:
- case ELF::R_MICROBLAZE_64_PCREL:
- case ELF::R_MICROBLAZE_32_PCREL_LO:
- case ELF::R_MICROBLAZE_GOTPC_64:
- return true;
- }
-
- return false;
-}
-
-unsigned MBlazeELFWriterInfo::getAbsoluteLabelMachineRelTy() const {
- return MBlaze::reloc_absolute_word;
-}
-
-long int MBlazeELFWriterInfo::computeRelocation(unsigned SymOffset,
- unsigned RelOffset,
- unsigned RelTy) const {
- assert((RelTy == ELF::R_MICROBLAZE_32_PCREL ||
- RelTy == ELF::R_MICROBLAZE_64_PCREL) &&
- "computeRelocation unknown for this relocation type");
- return SymOffset - (RelOffset + 4);
-}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.h b/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.h
deleted file mode 100644
index a314eb7..0000000
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.h
+++ /dev/null
@@ -1,59 +0,0 @@
-//===-- MBlazeELFWriterInfo.h - ELF Writer Info for MBlaze ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements ELF writer information for the MBlaze backend.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MBLAZE_ELF_WRITER_INFO_H
-#define MBLAZE_ELF_WRITER_INFO_H
-
-#include "llvm/Target/TargetELFWriterInfo.h"
-
-namespace llvm {
- class TargetMachine;
-
- class MBlazeELFWriterInfo : public TargetELFWriterInfo {
- public:
- MBlazeELFWriterInfo(TargetMachine &TM);
- virtual ~MBlazeELFWriterInfo();
-
- /// getRelocationType - Returns the target specific ELF Relocation type.
- /// 'MachineRelTy' contains the object code independent relocation type
- virtual unsigned getRelocationType(unsigned MachineRelTy) const;
-
- /// hasRelocationAddend - True if the target uses an addend in the
- /// ELF relocation entry.
- virtual bool hasRelocationAddend() const { return false; }
-
- /// getDefaultAddendForRelTy - Gets the default addend value for a
- /// relocation entry based on the target ELF relocation type.
- virtual long int getDefaultAddendForRelTy(unsigned RelTy,
- long int Modifier = 0) const;
-
- /// getRelTySize - Returns the size of relocatable field in bits
- virtual unsigned getRelocationTySize(unsigned RelTy) const;
-
- /// isPCRelativeRel - True if the relocation type is pc relative
- virtual bool isPCRelativeRel(unsigned RelTy) const;
-
- /// getJumpTableRelocationTy - Returns the machine relocation type used
- /// to reference a jumptable.
- virtual unsigned getAbsoluteLabelMachineRelTy() const;
-
- /// computeRelocation - Some relocatable fields could be relocated
- /// directly, avoiding the relocation symbol emission, compute the
- /// final relocation value for this symbol.
- virtual long int computeRelocation(unsigned SymOffset, unsigned RelOffset,
- unsigned RelTy) const;
- };
-
-} // end llvm namespace
-
-#endif // MBLAZE_ELF_WRITER_INFO_H
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.cpp
index d2f14a5..9e467bf 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.cpp
@@ -23,7 +23,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp
index 91aaf94..1c2e3b2 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp
@@ -83,7 +83,7 @@ bool MBlazeIntrinsicInfo::isOverloaded(unsigned IntrID) const {
#undef GET_INTRINSIC_OVERLOAD_TABLE
}
-/// This defines the "getAttributes(ID id)" method.
+/// This defines the "getAttributes(LLVMContext &C, ID id)" method.
#define GET_INTRINSIC_ATTRIBUTES
#include "MBlazeGenIntrinsics.inc"
#undef GET_INTRINSIC_ATTRIBUTES
@@ -104,7 +104,8 @@ Function *MBlazeIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID,
Type **Tys,
unsigned numTy) const {
assert(!isOverloaded(IntrID) && "MBlaze intrinsics are not overloaded");
- AttrListPtr AList = getAttributes((mblazeIntrinsic::ID) IntrID);
+ AttrListPtr AList = getAttributes(M->getContext(),
+ (mblazeIntrinsic::ID) IntrID);
return cast<Function>(M->getOrInsertFunction(getName(IntrID),
getType(M->getContext(), IntrID),
AList));
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp
index 46f5207..daa76e8 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp
@@ -140,7 +140,7 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
unsigned oi = i == 2 ? 1 : 2;
- DEBUG(dbgs() << "\nFunction : " << MF.getFunction()->getName() << "\n";
+ DEBUG(dbgs() << "\nFunction : " << MF.getName() << "\n";
dbgs() << "<--------->\n" << MI);
int FrameIndex = MI.getOperand(i).getIndex();
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp
index 5f82f14..f180652 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp
@@ -38,11 +38,12 @@ MBlazeTargetMachine(const Target &T, StringRef TT,
CodeGenOpt::Level OL)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS),
- DataLayout("E-p:32:32:32-i8:8:8-i16:16:16"),
+ DL("E-p:32:32:32-i8:8:8-i16:16:16"),
InstrInfo(*this),
FrameLowering(Subtarget),
- TLInfo(*this), TSInfo(*this), ELFWriterInfo(*this),
- InstrItins(Subtarget.getInstrItineraryData()) {
+ TLInfo(*this), TSInfo(*this),
+ InstrItins(Subtarget.getInstrItineraryData()),
+ STTI(&TLInfo), VTTI(&TLInfo) {
}
namespace {
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.h b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.h
index 1647a21..a8df4e6 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.h
@@ -20,25 +20,26 @@
#include "MBlazeSelectionDAGInfo.h"
#include "MBlazeIntrinsicInfo.h"
#include "MBlazeFrameLowering.h"
-#include "MBlazeELFWriterInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetTransformImpl.h"
namespace llvm {
class formatted_raw_ostream;
class MBlazeTargetMachine : public LLVMTargetMachine {
MBlazeSubtarget Subtarget;
- const TargetData DataLayout; // Calculates type size & alignment
+ const DataLayout DL; // Calculates type size & alignment
MBlazeInstrInfo InstrInfo;
MBlazeFrameLowering FrameLowering;
MBlazeTargetLowering TLInfo;
MBlazeSelectionDAGInfo TSInfo;
MBlazeIntrinsicInfo IntrinsicInfo;
- MBlazeELFWriterInfo ELFWriterInfo;
InstrItineraryData InstrItins;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
MBlazeTargetMachine(const Target &T, StringRef TT,
@@ -59,8 +60,8 @@ namespace llvm {
virtual const MBlazeSubtarget *getSubtargetImpl() const
{ return &Subtarget; }
- virtual const TargetData *getTargetData() const
- { return &DataLayout;}
+ virtual const DataLayout *getDataLayout() const
+ { return &DL;}
virtual const MBlazeRegisterInfo *getRegisterInfo() const
{ return &InstrInfo.getRegisterInfo(); }
@@ -74,9 +75,10 @@ namespace llvm {
const TargetIntrinsicInfo *getIntrinsicInfo() const
{ return &IntrinsicInfo; }
- virtual const MBlazeELFWriterInfo *getELFWriterInfo() const {
- return &ELFWriterInfo;
- }
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const
+ { return &STTI; }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const
+ { return &VTTI; }
// Pass Pipeline Configuration
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp
index f66ea30..899c74ee 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp
@@ -13,7 +13,7 @@
#include "llvm/GlobalVariable.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ELF.h"
@@ -70,7 +70,7 @@ IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM,
return false;
Type *Ty = GV->getType()->getElementType();
- return IsInSmallSection(TM.getTargetData()->getTypeAllocSize(Ty));
+ return IsInSmallSection(TM.getDataLayout()->getTypeAllocSize(Ty));
}
const MCSection *MBlazeTargetObjectFile::
diff --git a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp
index bfd11a0..2b71d9d 100644
--- a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp
@@ -29,8 +29,8 @@ STATISTIC(MCNumEmitted, "Number of MC instructions emitted");
namespace {
class MBlazeMCCodeEmitter : public MCCodeEmitter {
- MBlazeMCCodeEmitter(const MBlazeMCCodeEmitter &); // DO NOT IMPLEMENT
- void operator=(const MBlazeMCCodeEmitter &); // DO NOT IMPLEMENT
+ MBlazeMCCodeEmitter(const MBlazeMCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ void operator=(const MBlazeMCCodeEmitter &) LLVM_DELETED_FUNCTION;
const MCInstrInfo &MCII;
public:
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp b/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
index 61d7f2b..2e170f1 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
@@ -20,7 +20,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
@@ -221,3 +221,17 @@ MSP430FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
return true;
}
+
+void
+MSP430FrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
+ const {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ // Create a frame entry for the FPW register that must be saved.
+ if (TFI->hasFP(MF)) {
+ int FrameIdx = MF.getFrameInfo()->CreateFixedObject(2, -4, true);
+ (void)FrameIdx;
+ assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() &&
+ "Slot for FPW register must be last in order to be found!");
+ }
+}
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.h b/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.h
index b636827..cb02545 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.h
@@ -46,6 +46,7 @@ public:
bool hasFP(const MachineFunction &MF) const;
bool hasReservedCallFrame(const MachineFunction &MF) const;
+ void processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
index 5430d43..5efc6a3 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
@@ -274,8 +274,8 @@ bool MSP430DAGToDAGISel::SelectAddr(SDValue N,
else if (AM.JT != -1)
Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i16, 0/*AM.SymbolFlags*/);
else if (AM.BlockAddr)
- Disp = CurDAG->getBlockAddress(AM.BlockAddr, MVT::i32,
- true, 0/*AM.SymbolFlags*/);
+ Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, 0,
+ 0/*AM.SymbolFlags*/);
else
Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i16);
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index f8b7e14..fc677ae 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -61,7 +61,7 @@ MSP430TargetLowering::MSP430TargetLowering(MSP430TargetMachine &tm) :
TargetLowering(tm, new TargetLoweringObjectFileELF()),
Subtarget(*tm.getSubtargetImpl()) {
- TD = getTargetData();
+ TD = getDataLayout();
// Set up the register classes.
addRegisterClass(MVT::i8, &MSP430::GR8RegClass);
@@ -655,7 +655,7 @@ SDValue MSP430TargetLowering::LowerBlockAddress(SDValue Op,
SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
- SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), /*isTarget=*/true);
+ SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy());
return DAG.getNode(MSP430ISD::Wrapper, dl, getPointerTy(), Result);
}
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h
index d8ad02f..991304c 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h
@@ -169,7 +169,7 @@ namespace llvm {
SelectionDAG &DAG) const;
const MSP430Subtarget &Subtarget;
- const TargetData *TD;
+ const DataLayout *TD;
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
index aed46a2..9ae238f 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
@@ -220,20 +220,6 @@ MSP430RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(i+1).ChangeToImmediate(Offset);
}
-void
-MSP430RegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
- const {
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-
- // Create a frame entry for the FPW register that must be saved.
- if (TFI->hasFP(MF)) {
- int FrameIdx = MF.getFrameInfo()->CreateFixedObject(2, -4, true);
- (void)FrameIdx;
- assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() &&
- "Slot for FPW register must be last in order to be found!");
- }
-}
-
unsigned MSP430RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
index 9ee0a03..64a43bc 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
@@ -49,8 +49,6 @@ public:
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, RegScavenger *RS = NULL) const;
- void processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
-
// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const;
};
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
index 817001d..13e37b3 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
@@ -33,10 +33,10 @@ MSP430TargetMachine::MSP430TargetMachine(const Target &T,
CodeGenOpt::Level OL)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS),
- // FIXME: Check TargetData string.
- DataLayout("e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"),
+ // FIXME: Check DataLayout string.
+ DL("e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"),
InstrInfo(*this), TLInfo(*this), TSInfo(*this),
- FrameLowering(Subtarget) { }
+ FrameLowering(Subtarget), STTI(&TLInfo), VTTI(&TLInfo) { }
namespace {
/// MSP430 Code Generator Pass Configuration Options.
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h
index f54146b..186172e 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h
@@ -21,9 +21,10 @@
#include "MSP430SelectionDAGInfo.h"
#include "MSP430RegisterInfo.h"
#include "MSP430Subtarget.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetTransformImpl.h"
namespace llvm {
@@ -31,11 +32,13 @@ namespace llvm {
///
class MSP430TargetMachine : public LLVMTargetMachine {
MSP430Subtarget Subtarget;
- const TargetData DataLayout; // Calculates type size & alignment
+ const DataLayout DL; // Calculates type size & alignment
MSP430InstrInfo InstrInfo;
MSP430TargetLowering TLInfo;
MSP430SelectionDAGInfo TSInfo;
MSP430FrameLowering FrameLowering;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
MSP430TargetMachine(const Target &T, StringRef TT,
@@ -47,7 +50,7 @@ public:
return &FrameLowering;
}
virtual const MSP430InstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetData *getTargetData() const { return &DataLayout;}
+ virtual const DataLayout *getDataLayout() const { return &DL;}
virtual const MSP430Subtarget *getSubtargetImpl() const { return &Subtarget; }
virtual const TargetRegisterInfo *getRegisterInfo() const {
@@ -61,7 +64,12 @@ public:
virtual const MSP430SelectionDAGInfo* getSelectionDAGInfo() const {
return &TSInfo;
}
-
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
}; // MSP430TargetMachine.
diff --git a/contrib/llvm/lib/Target/Mangler.cpp b/contrib/llvm/lib/Target/Mangler.cpp
index 786a0c5..539a1f7 100644
--- a/contrib/llvm/lib/Target/Mangler.cpp
+++ b/contrib/llvm/lib/Target/Mangler.cpp
@@ -14,7 +14,7 @@
#include "llvm/Target/Mangler.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Support/raw_ostream.h"
@@ -44,7 +44,7 @@ static void MangleLetter(SmallVectorImpl<char> &OutName, unsigned char C) {
OutName.push_back('_');
}
-/// NameNeedsEscaping - Return true if the identifier \arg Str needs quotes
+/// NameNeedsEscaping - Return true if the identifier \p Str needs quotes
/// for this assembler.
static bool NameNeedsEscaping(StringRef Str, const MCAsmInfo &MAI) {
assert(!Str.empty() && "Cannot create an empty MCSymbol");
@@ -157,7 +157,7 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
/// a suffix on their name indicating the number of words of arguments they
/// take.
static void AddFastCallStdCallSuffix(SmallVectorImpl<char> &OutName,
- const Function *F, const TargetData &TD) {
+ const Function *F, const DataLayout &TD) {
// Calculate arguments size total.
unsigned ArgWords = 0;
for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
@@ -183,8 +183,7 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
ManglerPrefixTy PrefixTy = Mangler::Default;
if (GV->hasPrivateLinkage() || isImplicitlyPrivate)
PrefixTy = Mangler::Private;
- else if (GV->hasLinkerPrivateLinkage() || GV->hasLinkerPrivateWeakLinkage() ||
- GV->hasLinkerPrivateWeakDefAutoLinkage())
+ else if (GV->hasLinkerPrivateLinkage() || GV->hasLinkerPrivateWeakLinkage())
PrefixTy = Mangler::LinkerPrivate;
// If this global has a name, handle it simply.
diff --git a/contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 58b5590..67b5248 100644
--- a/contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -8,53 +8,1316 @@
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/MipsMCTargetDesc.h"
+#include "MipsRegisterInfo.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
namespace {
+class MipsAssemblerOptions {
+public:
+ MipsAssemblerOptions():
+ aTReg(1), reorder(true), macro(true) {
+ }
+
+ unsigned getATRegNum() {return aTReg;}
+ bool setATReg(unsigned Reg);
+
+ bool isReorder() {return reorder;}
+ void setReorder() {reorder = true;}
+ void setNoreorder() {reorder = false;}
+
+ bool isMacro() {return macro;}
+ void setMacro() {macro = true;}
+ void setNomacro() {macro = false;}
+
+private:
+ unsigned aTReg;
+ bool reorder;
+ bool macro;
+};
+}
+
+namespace {
class MipsAsmParser : public MCTargetAsmParser {
- bool MatchAndEmitInstruction(SMLoc IDLoc,
+
+ enum FpFormatTy {
+ FP_FORMAT_NONE = -1,
+ FP_FORMAT_S,
+ FP_FORMAT_D,
+ FP_FORMAT_L,
+ FP_FORMAT_W
+ } FpFormat;
+
+ MCSubtargetInfo &STI;
+ MCAsmParser &Parser;
+ MipsAssemblerOptions Options;
+
+
+#define GET_ASSEMBLER_HEADER
+#include "MipsGenAsmMatcher.inc"
+
+ bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out);
+ MCStreamer &Out, unsigned &ErrorInfo,
+ bool MatchingInlineAsm);
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
- bool ParseInstruction(StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+
+ bool parseMathOperation(StringRef Name, SMLoc NameLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands);
bool ParseDirective(AsmToken DirectiveID);
+ MipsAsmParser::OperandMatchResultTy
+ parseMemOperand(SmallVectorImpl<MCParsedAsmOperand*>&);
+
+ bool ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &,
+ StringRef Mnemonic);
+
+ int tryParseRegister(StringRef Mnemonic);
+
+ bool tryParseRegisterOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ StringRef Mnemonic);
+
+ bool needsExpansion(MCInst &Inst);
+
+ void expandInstruction(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions);
+ void expandLoadImm(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions);
+ void expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions);
+ void expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions);
+ bool reportParseError(StringRef ErrorMsg);
+
+ bool parseMemOffset(const MCExpr *&Res);
+ bool parseRelocOperand(const MCExpr *&Res);
+
+ bool parseDirectiveSet();
+
+ bool parseSetAtDirective();
+ bool parseSetNoAtDirective();
+ bool parseSetMacroDirective();
+ bool parseSetNoMacroDirective();
+ bool parseSetReorderDirective();
+ bool parseSetNoReorderDirective();
+
+ MCSymbolRefExpr::VariantKind getVariantKind(StringRef Symbol);
+
+ bool isMips64() const {
+ return (STI.getFeatureBits() & Mips::FeatureMips64) != 0;
+ }
+
+ bool isFP64() const {
+ return (STI.getFeatureBits() & Mips::FeatureFP64Bit) != 0;
+ }
+
+ int matchRegisterName(StringRef Symbol);
+
+ int matchRegisterByNumber(unsigned RegNum, StringRef Mnemonic);
+
+ void setFpFormat(FpFormatTy Format) {
+ FpFormat = Format;
+ }
+
+ void setDefaultFpFormat();
+
+ void setFpFormat(StringRef Format);
+
+ FpFormatTy getFpFormat() {return FpFormat;}
+
+ bool requestsDoubleOperand(StringRef Mnemonic);
+
+ unsigned getReg(int RC,int RegNo);
+
+ unsigned getATReg();
public:
MipsAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser)
- : MCTargetAsmParser() {
+ : MCTargetAsmParser(), STI(sti), Parser(parser) {
+ // Initialize the set of available features.
+ setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
+ }
+
+ MCAsmParser &getParser() const { return Parser; }
+ MCAsmLexer &getLexer() const { return Parser.getLexer(); }
+
+};
+}
+
+namespace {
+
+/// MipsOperand - Instances of this class represent a parsed Mips machine
+/// instruction.
+class MipsOperand : public MCParsedAsmOperand {
+
+ enum KindTy {
+ k_CondCode,
+ k_CoprocNum,
+ k_Immediate,
+ k_Memory,
+ k_PostIndexRegister,
+ k_Register,
+ k_Token
+ } Kind;
+
+ MipsOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
+
+ union {
+ struct {
+ const char *Data;
+ unsigned Length;
+ } Tok;
+
+ struct {
+ unsigned RegNum;
+ } Reg;
+
+ struct {
+ const MCExpr *Val;
+ } Imm;
+
+ struct {
+ unsigned Base;
+ const MCExpr *Off;
+ } Mem;
+ };
+
+ SMLoc StartLoc, EndLoc;
+
+public:
+ void addRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getReg()));
+ }
+
+ void addExpr(MCInst &Inst, const MCExpr *Expr) const{
+ // Add as immediate when possible. Null MCExpr = 0.
+ if (Expr == 0)
+ Inst.addOperand(MCOperand::CreateImm(0));
+ else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
+ Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
+ else
+ Inst.addOperand(MCOperand::CreateExpr(Expr));
+ }
+
+ void addImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCExpr *Expr = getImm();
+ addExpr(Inst,Expr);
+ }
+
+ void addMemOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+
+ Inst.addOperand(MCOperand::CreateReg(getMemBase()));
+
+ const MCExpr *Expr = getMemOff();
+ addExpr(Inst,Expr);
+ }
+
+ bool isReg() const { return Kind == k_Register; }
+ bool isImm() const { return Kind == k_Immediate; }
+ bool isToken() const { return Kind == k_Token; }
+ bool isMem() const { return Kind == k_Memory; }
+
+ StringRef getToken() const {
+ assert(Kind == k_Token && "Invalid access!");
+ return StringRef(Tok.Data, Tok.Length);
+ }
+
+ unsigned getReg() const {
+ assert((Kind == k_Register) && "Invalid access!");
+ return Reg.RegNum;
+ }
+
+ const MCExpr *getImm() const {
+ assert((Kind == k_Immediate) && "Invalid access!");
+ return Imm.Val;
+ }
+
+ unsigned getMemBase() const {
+ assert((Kind == k_Memory) && "Invalid access!");
+ return Mem.Base;
+ }
+
+ const MCExpr *getMemOff() const {
+ assert((Kind == k_Memory) && "Invalid access!");
+ return Mem.Off;
+ }
+
+ static MipsOperand *CreateToken(StringRef Str, SMLoc S) {
+ MipsOperand *Op = new MipsOperand(k_Token);
+ Op->Tok.Data = Str.data();
+ Op->Tok.Length = Str.size();
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
}
+ static MipsOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
+ MipsOperand *Op = new MipsOperand(k_Register);
+ Op->Reg.RegNum = RegNum;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
+ static MipsOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
+ MipsOperand *Op = new MipsOperand(k_Immediate);
+ Op->Imm.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
+ static MipsOperand *CreateMem(unsigned Base, const MCExpr *Off,
+ SMLoc S, SMLoc E) {
+ MipsOperand *Op = new MipsOperand(k_Memory);
+ Op->Mem.Base = Base;
+ Op->Mem.Off = Off;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
+ /// getStartLoc - Get the location of the first token of this operand.
+ SMLoc getStartLoc() const { return StartLoc; }
+ /// getEndLoc - Get the location of the last token of this operand.
+ SMLoc getEndLoc() const { return EndLoc; }
+
+ virtual void print(raw_ostream &OS) const {
+ llvm_unreachable("unimplemented!");
+ }
};
}
+bool MipsAsmParser::needsExpansion(MCInst &Inst) {
+
+ switch(Inst.getOpcode()) {
+ case Mips::LoadImm32Reg:
+ case Mips::LoadAddr32Imm:
+ case Mips::LoadAddr32Reg:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void MipsAsmParser::expandInstruction(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions){
+ switch(Inst.getOpcode()) {
+ case Mips::LoadImm32Reg:
+ return expandLoadImm(Inst, IDLoc, Instructions);
+ case Mips::LoadAddr32Imm:
+ return expandLoadAddressImm(Inst,IDLoc,Instructions);
+ case Mips::LoadAddr32Reg:
+ return expandLoadAddressReg(Inst,IDLoc,Instructions);
+ }
+}
+
+void MipsAsmParser::expandLoadImm(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions){
+ MCInst tmpInst;
+ const MCOperand &ImmOp = Inst.getOperand(1);
+ assert(ImmOp.isImm() && "expected immediate operand kind");
+ const MCOperand &RegOp = Inst.getOperand(0);
+ assert(RegOp.isReg() && "expected register operand kind");
+
+ int ImmValue = ImmOp.getImm();
+ tmpInst.setLoc(IDLoc);
+ if ( 0 <= ImmValue && ImmValue <= 65535) {
+ // for 0 <= j <= 65535.
+ // li d,j => ori d,$zero,j
+ tmpInst.setOpcode(isMips64() ? Mips::ORi64 : Mips::ORi);
+ tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg()));
+ tmpInst.addOperand(
+ MCOperand::CreateReg(isMips64() ? Mips::ZERO_64 : Mips::ZERO));
+ tmpInst.addOperand(MCOperand::CreateImm(ImmValue));
+ Instructions.push_back(tmpInst);
+ } else if ( ImmValue < 0 && ImmValue >= -32768) {
+ // for -32768 <= j < 0.
+ // li d,j => addiu d,$zero,j
+ tmpInst.setOpcode(Mips::ADDiu); //TODO:no ADDiu64 in td files?
+ tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg()));
+ tmpInst.addOperand(
+ MCOperand::CreateReg(isMips64() ? Mips::ZERO_64 : Mips::ZERO));
+ tmpInst.addOperand(MCOperand::CreateImm(ImmValue));
+ Instructions.push_back(tmpInst);
+ } else {
+ // for any other value of j that is representable as a 32-bit integer.
+ // li d,j => lui d,hi16(j)
+ // ori d,d,lo16(j)
+ tmpInst.setOpcode(isMips64() ? Mips::LUi64 : Mips::LUi);
+ tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg()));
+ tmpInst.addOperand(MCOperand::CreateImm((ImmValue & 0xffff0000) >> 16));
+ Instructions.push_back(tmpInst);
+ tmpInst.clear();
+ tmpInst.setOpcode(isMips64() ? Mips::ORi64 : Mips::ORi);
+ tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg()));
+ tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg()));
+ tmpInst.addOperand(MCOperand::CreateImm(ImmValue & 0xffff));
+ tmpInst.setLoc(IDLoc);
+ Instructions.push_back(tmpInst);
+ }
+}
+
+void MipsAsmParser::expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions){
+ MCInst tmpInst;
+ const MCOperand &ImmOp = Inst.getOperand(2);
+ assert(ImmOp.isImm() && "expected immediate operand kind");
+ const MCOperand &SrcRegOp = Inst.getOperand(1);
+ assert(SrcRegOp.isReg() && "expected register operand kind");
+ const MCOperand &DstRegOp = Inst.getOperand(0);
+ assert(DstRegOp.isReg() && "expected register operand kind");
+ int ImmValue = ImmOp.getImm();
+ if ( -32768 <= ImmValue && ImmValue <= 65535) {
+ //for -32768 <= j <= 65535.
+ //la d,j(s) => addiu d,s,j
+ tmpInst.setOpcode(Mips::ADDiu); //TODO:no ADDiu64 in td files?
+ tmpInst.addOperand(MCOperand::CreateReg(DstRegOp.getReg()));
+ tmpInst.addOperand(MCOperand::CreateReg(SrcRegOp.getReg()));
+ tmpInst.addOperand(MCOperand::CreateImm(ImmValue));
+ Instructions.push_back(tmpInst);
+ } else {
+ //for any other value of j that is representable as a 32-bit integer.
+ //la d,j(s) => lui d,hi16(j)
+ // ori d,d,lo16(j)
+ // addu d,d,s
+ tmpInst.setOpcode(isMips64()?Mips::LUi64:Mips::LUi);
+ tmpInst.addOperand(MCOperand::CreateReg(DstRegOp.getReg()));
+ tmpInst.addOperand(MCOperand::CreateImm((ImmValue & 0xffff0000) >> 16));
+ Instructions.push_back(tmpInst);
+ tmpInst.clear();
+ tmpInst.setOpcode(isMips64()?Mips::ORi64:Mips::ORi);
+ tmpInst.addOperand(MCOperand::CreateReg(DstRegOp.getReg()));
+ tmpInst.addOperand(MCOperand::CreateReg(DstRegOp.getReg()));
+ tmpInst.addOperand(MCOperand::CreateImm(ImmValue & 0xffff));
+ Instructions.push_back(tmpInst);
+ tmpInst.clear();
+ tmpInst.setOpcode(Mips::ADDu);
+ tmpInst.addOperand(MCOperand::CreateReg(DstRegOp.getReg()));
+ tmpInst.addOperand(MCOperand::CreateReg(DstRegOp.getReg()));
+ tmpInst.addOperand(MCOperand::CreateReg(SrcRegOp.getReg()));
+ Instructions.push_back(tmpInst);
+ }
+}
+
+void MipsAsmParser::expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions){
+ MCInst tmpInst;
+ const MCOperand &ImmOp = Inst.getOperand(1);
+ assert(ImmOp.isImm() && "expected immediate operand kind");
+ const MCOperand &RegOp = Inst.getOperand(0);
+ assert(RegOp.isReg() && "expected register operand kind");
+ int ImmValue = ImmOp.getImm();
+ if ( -32768 <= ImmValue && ImmValue <= 65535) {
+ //for -32768 <= j <= 65535.
+ //la d,j => addiu d,$zero,j
+ tmpInst.setOpcode(Mips::ADDiu);
+ tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg()));
+ tmpInst.addOperand(
+ MCOperand::CreateReg(isMips64()?Mips::ZERO_64:Mips::ZERO));
+ tmpInst.addOperand(MCOperand::CreateImm(ImmValue));
+ Instructions.push_back(tmpInst);
+ } else {
+ //for any other value of j that is representable as a 32-bit integer.
+ //la d,j => lui d,hi16(j)
+ // ori d,d,lo16(j)
+ tmpInst.setOpcode(isMips64()?Mips::LUi64:Mips::LUi);
+ tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg()));
+ tmpInst.addOperand(MCOperand::CreateImm((ImmValue & 0xffff0000) >> 16));
+ Instructions.push_back(tmpInst);
+ tmpInst.clear();
+ tmpInst.setOpcode(isMips64()?Mips::ORi64:Mips::ORi);
+ tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg()));
+ tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg()));
+ tmpInst.addOperand(MCOperand::CreateImm(ImmValue & 0xffff));
+ Instructions.push_back(tmpInst);
+ }
+}
+
bool MipsAsmParser::
-MatchAndEmitInstruction(SMLoc IDLoc,
+MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out) {
+ MCStreamer &Out, unsigned &ErrorInfo,
+ bool MatchingInlineAsm) {
+ MCInst Inst;
+ unsigned MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
+ MatchingInlineAsm);
+
+ switch (MatchResult) {
+ default: break;
+ case Match_Success: {
+ if (needsExpansion(Inst)) {
+ SmallVector<MCInst, 4> Instructions;
+ expandInstruction(Inst, IDLoc, Instructions);
+ for(unsigned i =0; i < Instructions.size(); i++){
+ Out.EmitInstruction(Instructions[i]);
+ }
+ } else {
+ Inst.setLoc(IDLoc);
+ Out.EmitInstruction(Inst);
+ }
+ return false;
+ }
+ case Match_MissingFeature:
+ Error(IDLoc, "instruction requires a CPU feature not currently enabled");
+ return true;
+ case Match_InvalidOperand: {
+ SMLoc ErrorLoc = IDLoc;
+ if (ErrorInfo != ~0U) {
+ if (ErrorInfo >= Operands.size())
+ return Error(IDLoc, "too few operands for instruction");
+
+ ErrorLoc = ((MipsOperand*)Operands[ErrorInfo])->getStartLoc();
+ if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
+ }
+
+ return Error(ErrorLoc, "invalid operand for instruction");
+ }
+ case Match_MnemonicFail:
+ return Error(IDLoc, "invalid instruction");
+ }
+ return true;
+}
+
+int MipsAsmParser::matchRegisterName(StringRef Name) {
+
+ int CC;
+ if (!isMips64())
+ CC = StringSwitch<unsigned>(Name)
+ .Case("zero", Mips::ZERO)
+ .Case("a0", Mips::A0)
+ .Case("a1", Mips::A1)
+ .Case("a2", Mips::A2)
+ .Case("a3", Mips::A3)
+ .Case("v0", Mips::V0)
+ .Case("v1", Mips::V1)
+ .Case("s0", Mips::S0)
+ .Case("s1", Mips::S1)
+ .Case("s2", Mips::S2)
+ .Case("s3", Mips::S3)
+ .Case("s4", Mips::S4)
+ .Case("s5", Mips::S5)
+ .Case("s6", Mips::S6)
+ .Case("s7", Mips::S7)
+ .Case("k0", Mips::K0)
+ .Case("k1", Mips::K1)
+ .Case("sp", Mips::SP)
+ .Case("fp", Mips::FP)
+ .Case("gp", Mips::GP)
+ .Case("ra", Mips::RA)
+ .Case("t0", Mips::T0)
+ .Case("t1", Mips::T1)
+ .Case("t2", Mips::T2)
+ .Case("t3", Mips::T3)
+ .Case("t4", Mips::T4)
+ .Case("t5", Mips::T5)
+ .Case("t6", Mips::T6)
+ .Case("t7", Mips::T7)
+ .Case("t8", Mips::T8)
+ .Case("t9", Mips::T9)
+ .Case("at", Mips::AT)
+ .Case("fcc0", Mips::FCC0)
+ .Default(-1);
+ else
+ CC = StringSwitch<unsigned>(Name)
+ .Case("zero", Mips::ZERO_64)
+ .Case("at", Mips::AT_64)
+ .Case("v0", Mips::V0_64)
+ .Case("v1", Mips::V1_64)
+ .Case("a0", Mips::A0_64)
+ .Case("a1", Mips::A1_64)
+ .Case("a2", Mips::A2_64)
+ .Case("a3", Mips::A3_64)
+ .Case("a4", Mips::T0_64)
+ .Case("a5", Mips::T1_64)
+ .Case("a6", Mips::T2_64)
+ .Case("a7", Mips::T3_64)
+ .Case("t4", Mips::T4_64)
+ .Case("t5", Mips::T5_64)
+ .Case("t6", Mips::T6_64)
+ .Case("t7", Mips::T7_64)
+ .Case("s0", Mips::S0_64)
+ .Case("s1", Mips::S1_64)
+ .Case("s2", Mips::S2_64)
+ .Case("s3", Mips::S3_64)
+ .Case("s4", Mips::S4_64)
+ .Case("s5", Mips::S5_64)
+ .Case("s6", Mips::S6_64)
+ .Case("s7", Mips::S7_64)
+ .Case("t8", Mips::T8_64)
+ .Case("t9", Mips::T9_64)
+ .Case("kt0", Mips::K0_64)
+ .Case("kt1", Mips::K1_64)
+ .Case("gp", Mips::GP_64)
+ .Case("sp", Mips::SP_64)
+ .Case("fp", Mips::FP_64)
+ .Case("s8", Mips::FP_64)
+ .Case("ra", Mips::RA_64)
+ .Default(-1);
+
+ if (CC != -1)
+ return CC;
+
+ if (Name[0] == 'f') {
+ StringRef NumString = Name.substr(1);
+ unsigned IntVal;
+ if( NumString.getAsInteger(10, IntVal))
+ return -1; // not integer
+ if (IntVal > 31)
+ return -1;
+
+ FpFormatTy Format = getFpFormat();
+
+ if (Format == FP_FORMAT_S || Format == FP_FORMAT_W)
+ return getReg(Mips::FGR32RegClassID, IntVal);
+ if (Format == FP_FORMAT_D) {
+ if(isFP64()) {
+ return getReg(Mips::FGR64RegClassID, IntVal);
+ }
+ // only even numbers available as register pairs
+ if (( IntVal > 31) || (IntVal%2 != 0))
+ return -1;
+ return getReg(Mips::AFGR64RegClassID, IntVal/2);
+ }
+ }
+
+ return -1;
+}
+void MipsAsmParser::setDefaultFpFormat() {
+
+ if (isMips64() || isFP64())
+ FpFormat = FP_FORMAT_D;
+ else
+ FpFormat = FP_FORMAT_S;
+}
+
+bool MipsAsmParser::requestsDoubleOperand(StringRef Mnemonic){
+
+ bool IsDouble = StringSwitch<bool>(Mnemonic.lower())
+ .Case("ldxc1", true)
+ .Case("ldc1", true)
+ .Case("sdxc1", true)
+ .Case("sdc1", true)
+ .Default(false);
+
+ return IsDouble;
+}
+void MipsAsmParser::setFpFormat(StringRef Format) {
+
+ FpFormat = StringSwitch<FpFormatTy>(Format.lower())
+ .Case(".s", FP_FORMAT_S)
+ .Case(".d", FP_FORMAT_D)
+ .Case(".l", FP_FORMAT_L)
+ .Case(".w", FP_FORMAT_W)
+ .Default(FP_FORMAT_NONE);
+}
+
+bool MipsAssemblerOptions::setATReg(unsigned Reg) {
+ if (Reg > 31)
+ return false;
+
+ aTReg = Reg;
return true;
}
+unsigned MipsAsmParser::getATReg() {
+ unsigned Reg = Options.getATRegNum();
+ if (isMips64())
+ return getReg(Mips::CPU64RegsRegClassID,Reg);
+
+ return getReg(Mips::CPURegsRegClassID,Reg);
+}
+
+unsigned MipsAsmParser::getReg(int RC,int RegNo) {
+ return *(getContext().getRegisterInfo().getRegClass(RC).begin() + RegNo);
+}
+
+int MipsAsmParser::matchRegisterByNumber(unsigned RegNum, StringRef Mnemonic) {
+
+ if (Mnemonic.lower() == "rdhwr") {
+ // at the moment only hwreg29 is supported
+ if (RegNum != 29)
+ return -1;
+ return Mips::HWR29;
+ }
+
+ if (RegNum > 31)
+ return -1;
+
+ // MIPS64 registers are numbered 1 after the 32-bit equivalents
+ return getReg(Mips::CPURegsRegClassID, RegNum) + isMips64();
+}
+
+int MipsAsmParser::tryParseRegister(StringRef Mnemonic) {
+ const AsmToken &Tok = Parser.getTok();
+ int RegNum = -1;
+
+ if (Tok.is(AsmToken::Identifier)) {
+ std::string lowerCase = Tok.getString().lower();
+ RegNum = matchRegisterName(lowerCase);
+ } else if (Tok.is(AsmToken::Integer))
+ RegNum = matchRegisterByNumber(static_cast<unsigned>(Tok.getIntVal()),
+ Mnemonic.lower());
+ else
+ return RegNum; //error
+ // 64 bit div operations require Mips::ZERO instead of MIPS::ZERO_64
+ if (isMips64() && RegNum == Mips::ZERO_64) {
+ if (Mnemonic.find("ddiv") != StringRef::npos)
+ RegNum = Mips::ZERO;
+ }
+ return RegNum;
+}
+
bool MipsAsmParser::
-ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
+ tryParseRegisterOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ StringRef Mnemonic){
+
+ SMLoc S = Parser.getTok().getLoc();
+ int RegNo = -1;
+
+ // FIXME: we should make a more generic method for CCR
+ if ((Mnemonic == "cfc1" || Mnemonic == "ctc1")
+ && Operands.size() == 2 && Parser.getTok().is(AsmToken::Integer)){
+ RegNo = Parser.getTok().getIntVal(); // get the int value
+ // at the moment only fcc0 is supported
+ if (RegNo == 0)
+ RegNo = Mips::FCC0;
+ } else
+ RegNo = tryParseRegister(Mnemonic);
+ if (RegNo == -1)
+ return true;
+
+ Operands.push_back(MipsOperand::CreateReg(RegNo, S,
+ Parser.getTok().getLoc()));
+ Parser.Lex(); // Eat register token.
+ return false;
+}
+
+bool MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*>&Operands,
+ StringRef Mnemonic) {
+ // Check if the current operand has a custom associated parser, if so, try to
+ // custom parse the operand, or fallback to the general approach.
+ OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
+ if (ResTy == MatchOperand_Success)
+ return false;
+ // If there wasn't a custom match, try the generic matcher below. Otherwise,
+ // there was a match, but an error occurred, in which case, just return that
+ // the operand parsing failed.
+ if (ResTy == MatchOperand_ParseFail)
+ return true;
+
+ switch (getLexer().getKind()) {
+ default:
+ Error(Parser.getTok().getLoc(), "unexpected token in operand");
+ return true;
+ case AsmToken::Dollar: {
+ // parse register
+ SMLoc S = Parser.getTok().getLoc();
+ Parser.Lex(); // Eat dollar token.
+ // parse register operand
+ if (!tryParseRegisterOperand(Operands, Mnemonic)) {
+ if (getLexer().is(AsmToken::LParen)) {
+ // check if it is indexed addressing operand
+ Operands.push_back(MipsOperand::CreateToken("(", S));
+ Parser.Lex(); // eat parenthesis
+ if (getLexer().isNot(AsmToken::Dollar))
+ return true;
+
+ Parser.Lex(); // eat dollar
+ if (tryParseRegisterOperand(Operands, Mnemonic))
+ return true;
+
+ if (!getLexer().is(AsmToken::RParen))
+ return true;
+
+ S = Parser.getTok().getLoc();
+ Operands.push_back(MipsOperand::CreateToken(")", S));
+ Parser.Lex();
+ }
+ return false;
+ }
+ // maybe it is a symbol reference
+ StringRef Identifier;
+ if (Parser.ParseIdentifier(Identifier))
+ return true;
+
+ SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+
+ MCSymbol *Sym = getContext().GetOrCreateSymbol("$" + Identifier);
+
+ // Otherwise create a symbol ref.
+ const MCExpr *Res = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None,
+ getContext());
+
+ Operands.push_back(MipsOperand::CreateImm(Res, S, E));
+ return false;
+ }
+ case AsmToken::Identifier:
+ case AsmToken::LParen:
+ case AsmToken::Minus:
+ case AsmToken::Plus:
+ case AsmToken::Integer:
+ case AsmToken::String: {
+ // quoted label names
+ const MCExpr *IdVal;
+ SMLoc S = Parser.getTok().getLoc();
+ if (getParser().ParseExpression(IdVal))
+ return true;
+ SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ Operands.push_back(MipsOperand::CreateImm(IdVal, S, E));
+ return false;
+ }
+ case AsmToken::Percent: {
+ // it is a symbol reference or constant expression
+ const MCExpr *IdVal;
+ SMLoc S = Parser.getTok().getLoc(); // start location of the operand
+ if (parseRelocOperand(IdVal))
+ return true;
+
+ SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+
+ Operands.push_back(MipsOperand::CreateImm(IdVal, S, E));
+ return false;
+ } // case AsmToken::Percent
+ } // switch(getLexer().getKind())
+ return true;
+}
+
+bool MipsAsmParser::parseRelocOperand(const MCExpr *&Res) {
+
+ Parser.Lex(); // eat % token
+ const AsmToken &Tok = Parser.getTok(); // get next token, operation
+ if (Tok.isNot(AsmToken::Identifier))
+ return true;
+
+ std::string Str = Tok.getIdentifier().str();
+
+ Parser.Lex(); // eat identifier
+ // now make expression from the rest of the operand
+ const MCExpr *IdVal;
+ SMLoc EndLoc;
+
+ if (getLexer().getKind() == AsmToken::LParen) {
+ while (1) {
+ Parser.Lex(); // eat '(' token
+ if (getLexer().getKind() == AsmToken::Percent) {
+ Parser.Lex(); // eat % token
+ const AsmToken &nextTok = Parser.getTok();
+ if (nextTok.isNot(AsmToken::Identifier))
+ return true;
+ Str += "(%";
+ Str += nextTok.getIdentifier();
+ Parser.Lex(); // eat identifier
+ if (getLexer().getKind() != AsmToken::LParen)
+ return true;
+ } else
+ break;
+ }
+ if (getParser().ParseParenExpression(IdVal,EndLoc))
+ return true;
+
+ while (getLexer().getKind() == AsmToken::RParen)
+ Parser.Lex(); // eat ')' token
+
+ } else
+ return true; // parenthesis must follow reloc operand
+
+ // Check the type of the expression
+ if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(IdVal)) {
+ // it's a constant, evaluate lo or hi value
+ int Val = MCE->getValue();
+ if (Str == "lo") {
+ Val = Val & 0xffff;
+ } else if (Str == "hi") {
+ Val = (Val & 0xffff0000) >> 16;
+ }
+ Res = MCConstantExpr::Create(Val, getContext());
+ return false;
+ }
+
+ if (const MCSymbolRefExpr *MSRE = dyn_cast<MCSymbolRefExpr>(IdVal)) {
+ // it's a symbol, create symbolic expression from symbol
+ StringRef Symbol = MSRE->getSymbol().getName();
+ MCSymbolRefExpr::VariantKind VK = getVariantKind(Str);
+ Res = MCSymbolRefExpr::Create(Symbol,VK,getContext());
+ return false;
+ }
return true;
}
+bool MipsAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
+ SMLoc &EndLoc) {
+
+ StartLoc = Parser.getTok().getLoc();
+ RegNo = tryParseRegister("");
+ EndLoc = Parser.getTok().getLoc();
+ return (RegNo == (unsigned)-1);
+}
+
+bool MipsAsmParser::parseMemOffset(const MCExpr *&Res) {
+
+ SMLoc S;
+
+ switch(getLexer().getKind()) {
+ default:
+ return true;
+ case AsmToken::Integer:
+ case AsmToken::Minus:
+ case AsmToken::Plus:
+ return (getParser().ParseExpression(Res));
+ case AsmToken::Percent:
+ return parseRelocOperand(Res);
+ case AsmToken::LParen:
+ return false; // it's probably assuming 0
+ }
+ return true;
+}
+
+MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
+ SmallVectorImpl<MCParsedAsmOperand*>&Operands) {
+
+ const MCExpr *IdVal = 0;
+ SMLoc S;
+ // first operand is the offset
+ S = Parser.getTok().getLoc();
+
+ if (parseMemOffset(IdVal))
+ return MatchOperand_ParseFail;
+
+ const AsmToken &Tok = Parser.getTok(); // get next token
+ if (Tok.isNot(AsmToken::LParen)) {
+ MipsOperand *Mnemonic = static_cast<MipsOperand*>(Operands[0]);
+ if (Mnemonic->getToken() == "la") {
+ SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer()-1);
+ Operands.push_back(MipsOperand::CreateImm(IdVal, S, E));
+ return MatchOperand_Success;
+ }
+ Error(Parser.getTok().getLoc(), "'(' expected");
+ return MatchOperand_ParseFail;
+ }
+
+ Parser.Lex(); // Eat '(' token.
+
+ const AsmToken &Tok1 = Parser.getTok(); // get next token
+ if (Tok1.is(AsmToken::Dollar)) {
+ Parser.Lex(); // Eat '$' token.
+ if (tryParseRegisterOperand(Operands,"")) {
+ Error(Parser.getTok().getLoc(), "unexpected token in operand");
+ return MatchOperand_ParseFail;
+ }
+
+ } else {
+ Error(Parser.getTok().getLoc(), "unexpected token in operand");
+ return MatchOperand_ParseFail;
+ }
+
+ const AsmToken &Tok2 = Parser.getTok(); // get next token
+ if (Tok2.isNot(AsmToken::RParen)) {
+ Error(Parser.getTok().getLoc(), "')' expected");
+ return MatchOperand_ParseFail;
+ }
+
+ SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+
+ Parser.Lex(); // Eat ')' token.
+
+ if (IdVal == 0)
+ IdVal = MCConstantExpr::Create(0, getContext());
+
+ // now replace register operand with the mem operand
+ MipsOperand* op = static_cast<MipsOperand*>(Operands.back());
+ int RegNo = op->getReg();
+ // remove register from operands
+ Operands.pop_back();
+ // and add memory operand
+ Operands.push_back(MipsOperand::CreateMem(RegNo, IdVal, S, E));
+ delete op;
+ return MatchOperand_Success;
+}
+
+MCSymbolRefExpr::VariantKind MipsAsmParser::getVariantKind(StringRef Symbol) {
+
+ MCSymbolRefExpr::VariantKind VK
+ = StringSwitch<MCSymbolRefExpr::VariantKind>(Symbol)
+ .Case("hi", MCSymbolRefExpr::VK_Mips_ABS_HI)
+ .Case("lo", MCSymbolRefExpr::VK_Mips_ABS_LO)
+ .Case("gp_rel", MCSymbolRefExpr::VK_Mips_GPREL)
+ .Case("call16", MCSymbolRefExpr::VK_Mips_GOT_CALL)
+ .Case("got", MCSymbolRefExpr::VK_Mips_GOT)
+ .Case("tlsgd", MCSymbolRefExpr::VK_Mips_TLSGD)
+ .Case("tlsldm", MCSymbolRefExpr::VK_Mips_TLSLDM)
+ .Case("dtprel_hi", MCSymbolRefExpr::VK_Mips_DTPREL_HI)
+ .Case("dtprel_lo", MCSymbolRefExpr::VK_Mips_DTPREL_LO)
+ .Case("gottprel", MCSymbolRefExpr::VK_Mips_GOTTPREL)
+ .Case("tprel_hi", MCSymbolRefExpr::VK_Mips_TPREL_HI)
+ .Case("tprel_lo", MCSymbolRefExpr::VK_Mips_TPREL_LO)
+ .Case("got_disp", MCSymbolRefExpr::VK_Mips_GOT_DISP)
+ .Case("got_page", MCSymbolRefExpr::VK_Mips_GOT_PAGE)
+ .Case("got_ofst", MCSymbolRefExpr::VK_Mips_GOT_OFST)
+ .Case("hi(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_HI)
+ .Case("lo(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_LO)
+ .Default(MCSymbolRefExpr::VK_None);
+
+ return VK;
+}
+
+static int ConvertCcString(StringRef CondString) {
+ int CC = StringSwitch<unsigned>(CondString)
+ .Case(".f", 0)
+ .Case(".un", 1)
+ .Case(".eq", 2)
+ .Case(".ueq", 3)
+ .Case(".olt", 4)
+ .Case(".ult", 5)
+ .Case(".ole", 6)
+ .Case(".ule", 7)
+ .Case(".sf", 8)
+ .Case(".ngle", 9)
+ .Case(".seq", 10)
+ .Case(".ngl", 11)
+ .Case(".lt", 12)
+ .Case(".nge", 13)
+ .Case(".le", 14)
+ .Case(".ngt", 15)
+ .Default(-1);
+
+ return CC;
+}
+
bool MipsAsmParser::
-ParseInstruction(StringRef Name, SMLoc NameLoc,
+parseMathOperation(StringRef Name, SMLoc NameLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ // split the format
+ size_t Start = Name.find('.'), Next = Name.rfind('.');
+ StringRef Format1 = Name.slice(Start, Next);
+ // and add the first format to the operands
+ Operands.push_back(MipsOperand::CreateToken(Format1, NameLoc));
+ // now for the second format
+ StringRef Format2 = Name.slice(Next, StringRef::npos);
+ Operands.push_back(MipsOperand::CreateToken(Format2, NameLoc));
+
+ // set the format for the first register
+ setFpFormat(Format1);
+
+ // Read the remaining operands.
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ // Read the first operand.
+ if (ParseOperand(Operands, Name)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.EatToEndOfStatement();
+ return Error(Loc, "unexpected token in argument list");
+ }
+
+ if (getLexer().isNot(AsmToken::Comma)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.EatToEndOfStatement();
+ return Error(Loc, "unexpected token in argument list");
+
+ }
+ Parser.Lex(); // Eat the comma.
+
+ //set the format for the first register
+ setFpFormat(Format2);
+
+ // Parse and remember the operand.
+ if (ParseOperand(Operands, Name)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.EatToEndOfStatement();
+ return Error(Loc, "unexpected token in argument list");
+ }
+ }
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.EatToEndOfStatement();
+ return Error(Loc, "unexpected token in argument list");
+ }
+
+ Parser.Lex(); // Consume the EndOfStatement
+ return false;
+}
+
+bool MipsAsmParser::
+ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ // floating point instructions: should register be treated as double?
+ if (requestsDoubleOperand(Name)) {
+ setFpFormat(FP_FORMAT_D);
+ Operands.push_back(MipsOperand::CreateToken(Name, NameLoc));
+ }
+ else {
+ setDefaultFpFormat();
+ // Create the leading tokens for the mnemonic, split by '.' characters.
+ size_t Start = 0, Next = Name.find('.');
+ StringRef Mnemonic = Name.slice(Start, Next);
+
+ Operands.push_back(MipsOperand::CreateToken(Mnemonic, NameLoc));
+
+ if (Next != StringRef::npos) {
+ // there is a format token in mnemonic
+ // StringRef Rest = Name.slice(Next, StringRef::npos);
+ size_t Dot = Name.find('.', Next+1);
+ StringRef Format = Name.slice(Next, Dot);
+ if (Dot == StringRef::npos) //only one '.' in a string, it's a format
+ Operands.push_back(MipsOperand::CreateToken(Format, NameLoc));
+ else {
+ if (Name.startswith("c.")){
+ // floating point compare, add '.' and immediate represent for cc
+ Operands.push_back(MipsOperand::CreateToken(".", NameLoc));
+ int Cc = ConvertCcString(Format);
+ if (Cc == -1) {
+ return Error(NameLoc, "Invalid conditional code");
+ }
+ SMLoc E = SMLoc::getFromPointer(
+ Parser.getTok().getLoc().getPointer() -1 );
+ Operands.push_back(MipsOperand::CreateImm(
+ MCConstantExpr::Create(Cc, getContext()), NameLoc, E));
+ } else {
+ // trunc, ceil, floor ...
+ return parseMathOperation(Name, NameLoc, Operands);
+ }
+
+ // the rest is a format
+ Format = Name.slice(Dot, StringRef::npos);
+ Operands.push_back(MipsOperand::CreateToken(Format, NameLoc));
+ }
+
+ setFpFormat(Format);
+ }
+ }
+
+ // Read the remaining operands.
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ // Read the first operand.
+ if (ParseOperand(Operands, Name)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.EatToEndOfStatement();
+ return Error(Loc, "unexpected token in argument list");
+ }
+
+ while (getLexer().is(AsmToken::Comma) ) {
+ Parser.Lex(); // Eat the comma.
+
+ // Parse and remember the operand.
+ if (ParseOperand(Operands, Name)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.EatToEndOfStatement();
+ return Error(Loc, "unexpected token in argument list");
+ }
+ }
+ }
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.EatToEndOfStatement();
+ return Error(Loc, "unexpected token in argument list");
+ }
+
+ Parser.Lex(); // Consume the EndOfStatement
+ return false;
+}
+
+bool MipsAsmParser::reportParseError(StringRef ErrorMsg) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.EatToEndOfStatement();
+ return Error(Loc, ErrorMsg);
+}
+
+bool MipsAsmParser::parseSetNoAtDirective() {
+ // line should look like:
+ // .set noat
+ // set at reg to 0
+ Options.setATReg(0);
+ // eat noat
+ Parser.Lex();
+ // if this is not the end of the statement, report error
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token in statement");
+ return false;
+ }
+ Parser.Lex(); // Consume the EndOfStatement
+ return false;
+}
+bool MipsAsmParser::parseSetAtDirective() {
+ // line can be
+ // .set at - defaults to $1
+ // or .set at=$reg
+ getParser().Lex();
+ if (getLexer().is(AsmToken::EndOfStatement)) {
+ Options.setATReg(1);
+ Parser.Lex(); // Consume the EndOfStatement
+ return false;
+ } else if (getLexer().is(AsmToken::Equal)) {
+ getParser().Lex(); //eat '='
+ if (getLexer().isNot(AsmToken::Dollar)) {
+ reportParseError("unexpected token in statement");
+ return false;
+ }
+ Parser.Lex(); // eat '$'
+ if (getLexer().isNot(AsmToken::Integer)) {
+ reportParseError("unexpected token in statement");
+ return false;
+ }
+ const AsmToken &Reg = Parser.getTok();
+ if (!Options.setATReg(Reg.getIntVal())) {
+ reportParseError("unexpected token in statement");
+ return false;
+ }
+ getParser().Lex(); //eat reg
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token in statement");
+ return false;
+ }
+ Parser.Lex(); // Consume the EndOfStatement
+ return false;
+ } else {
+ reportParseError("unexpected token in statement");
+ return false;
+ }
+}
+
+bool MipsAsmParser::parseSetReorderDirective() {
+ Parser.Lex();
+ // if this is not the end of the statement, report error
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token in statement");
+ return false;
+ }
+ Options.setReorder();
+ Parser.Lex(); // Consume the EndOfStatement
+ return false;
+}
+
+bool MipsAsmParser::parseSetNoReorderDirective() {
+ Parser.Lex();
+ // if this is not the end of the statement, report error
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token in statement");
+ return false;
+ }
+ Options.setNoreorder();
+ Parser.Lex(); // Consume the EndOfStatement
+ return false;
+}
+
+bool MipsAsmParser::parseSetMacroDirective() {
+ Parser.Lex();
+ // if this is not the end of the statement, report error
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token in statement");
+ return false;
+ }
+ Options.setMacro();
+ Parser.Lex(); // Consume the EndOfStatement
+ return false;
+}
+
+bool MipsAsmParser::parseSetNoMacroDirective() {
+ Parser.Lex();
+ // if this is not the end of the statement, report error
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("`noreorder' must be set before `nomacro'");
+ return false;
+ }
+ if (Options.isReorder()) {
+ reportParseError("`noreorder' must be set before `nomacro'");
+ return false;
+ }
+ Options.setNomacro();
+ Parser.Lex(); // Consume the EndOfStatement
+ return false;
+}
+bool MipsAsmParser::parseDirectiveSet() {
+
+ // get next token
+ const AsmToken &Tok = Parser.getTok();
+
+ if (Tok.getString() == "noat") {
+ return parseSetNoAtDirective();
+ } else if (Tok.getString() == "at") {
+ return parseSetAtDirective();
+ } else if (Tok.getString() == "reorder") {
+ return parseSetReorderDirective();
+ } else if (Tok.getString() == "noreorder") {
+ return parseSetNoReorderDirective();
+ } else if (Tok.getString() == "macro") {
+ return parseSetMacroDirective();
+ } else if (Tok.getString() == "nomacro") {
+ return parseSetNoMacroDirective();
+ } else if (Tok.getString() == "nomips16") {
+ // ignore this directive for now
+ Parser.EatToEndOfStatement();
+ return false;
+ } else if (Tok.getString() == "nomicromips") {
+ // ignore this directive for now
+ Parser.EatToEndOfStatement();
+ return false;
+ }
return true;
}
-bool MipsAsmParser::
-ParseDirective(AsmToken DirectiveID) {
+bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
+
+ if (DirectiveID.getString() == ".ent") {
+ // ignore this directive for now
+ Parser.Lex();
+ return false;
+ }
+
+ if (DirectiveID.getString() == ".end") {
+ // ignore this directive for now
+ Parser.Lex();
+ return false;
+ }
+
+ if (DirectiveID.getString() == ".frame") {
+ // ignore this directive for now
+ Parser.EatToEndOfStatement();
+ return false;
+ }
+
+ if (DirectiveID.getString() == ".set") {
+ return parseDirectiveSet();
+ }
+
+ if (DirectiveID.getString() == ".fmask") {
+ // ignore this directive for now
+ Parser.EatToEndOfStatement();
+ return false;
+ }
+
+ if (DirectiveID.getString() == ".mask") {
+ // ignore this directive for now
+ Parser.EatToEndOfStatement();
+ return false;
+ }
+
+ if (DirectiveID.getString() == ".gpword") {
+ // ignore this directive for now
+ Parser.EatToEndOfStatement();
+ return false;
+ }
+
return true;
}
@@ -64,3 +1327,7 @@ extern "C" void LLVMInitializeMipsAsmParser() {
RegisterMCAsmParser<MipsAsmParser> A(TheMips64Target);
RegisterMCAsmParser<MipsAsmParser> B(TheMips64elTarget);
}
+
+#define GET_REGISTER_MATCHER
+#define GET_MATCHER_IMPLEMENTATION
+#include "MipsGenAsmMatcher.inc"
diff --git a/contrib/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/contrib/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
index aa57472..82dbcc5 100644
--- a/contrib/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
+++ b/contrib/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
@@ -108,6 +108,11 @@ static DecodeStatus DecodeCPURegsRegisterClass(MCInst &Inst,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeDSPRegsRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeFGR64RegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
@@ -138,6 +143,11 @@ static DecodeStatus DecodeHWRegs64RegisterClass(MCInst &Inst,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeACRegsRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeBranchTarget(MCInst &Inst,
unsigned Offset,
uint64_t Address,
@@ -346,6 +356,13 @@ static DecodeStatus DecodeCPURegsRegisterClass(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeDSPRegsRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return DecodeCPURegsRegisterClass(Inst, RegNo, Address, Decoder);
+}
+
static DecodeStatus DecodeFGR64RegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
@@ -463,6 +480,18 @@ static DecodeStatus DecodeHWRegs64RegisterClass(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeACRegsRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo >= 4)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = getReg(Decoder, Mips::ACRegsRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeBranchTarget(MCInst &Inst,
unsigned Offset,
uint64_t Address,
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
index 790bed2..9a35bb6 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
@@ -92,7 +92,7 @@ public:
MCELFObjectTargetWriter::getOSABI(OSType), IsLittle, Is64Bit);
}
- /// ApplyFixup - Apply the \arg Value for given \arg Fixup into the provided
+ /// ApplyFixup - Apply the \p Value for given \p Fixup into the provided
/// data fragment, at the offset specified by the fixup and following the
/// fixup kind as appropriate.
void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
@@ -217,7 +217,7 @@ public:
///
/// \param Inst - The instruction to relax, which may be the same
/// as the output.
- /// \parm Res [output] - On return, the relaxed instruction.
+ /// \param [out] Res On return, the relaxed instruction.
void relaxInstruction(const MCInst &Inst, MCInst &Res) const {
}
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
index 234455e..233214b 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
@@ -122,14 +122,16 @@ inline static unsigned getMipsRegisterNumbering(unsigned RegEnum)
{
switch (RegEnum) {
case Mips::ZERO: case Mips::ZERO_64: case Mips::F0: case Mips::D0_64:
- case Mips::D0:
+ case Mips::D0: case Mips::FCC0: case Mips::AC0:
return 0;
case Mips::AT: case Mips::AT_64: case Mips::F1: case Mips::D1_64:
+ case Mips::AC1:
return 1;
case Mips::V0: case Mips::V0_64: case Mips::F2: case Mips::D2_64:
- case Mips::D1:
+ case Mips::D1: case Mips::AC2:
return 2;
case Mips::V1: case Mips::V1_64: case Mips::F3: case Mips::D3_64:
+ case Mips::AC3:
return 3;
case Mips::A0: case Mips::A0_64: case Mips::F4: case Mips::D4_64:
case Mips::D2:
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp
new file mode 100644
index 0000000..15c4282
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp
@@ -0,0 +1,81 @@
+//===-- MipsDirectObjLower.cpp - Mips LLVM direct object lowering -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code to lower Mips MCInst records that are normally
+// left to the assembler to lower such as large shifts.
+//
+//===----------------------------------------------------------------------===//
+#include "MipsInstrInfo.h"
+#include "MCTargetDesc/MipsDirectObjLower.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCStreamer.h"
+
+using namespace llvm;
+
+// If the D<shift> instruction has a shift amount that is greater
+// than 31 (checked in calling routine), lower it to a D<shift>32 instruction
+void Mips::LowerLargeShift(MCInst& Inst) {
+
+ assert(Inst.getNumOperands() == 3 && "Invalid no. of operands for shift!");
+ assert(Inst.getOperand(2).isImm());
+
+ int64_t Shift = Inst.getOperand(2).getImm();
+ if (Shift <= 31)
+ return; // Do nothing
+ Shift -= 32;
+
+ // saminus32
+ Inst.getOperand(2).setImm(Shift);
+
+ switch (Inst.getOpcode()) {
+ default:
+ // Calling function is not synchronized
+ llvm_unreachable("Unexpected shift instruction");
+ case Mips::DSLL:
+ Inst.setOpcode(Mips::DSLL32);
+ return;
+ case Mips::DSRL:
+ Inst.setOpcode(Mips::DSRL32);
+ return;
+ case Mips::DSRA:
+ Inst.setOpcode(Mips::DSRA32);
+ return;
+ }
+}
+
+// Pick a DEXT or DINS instruction variant based on the pos and size operands
+void Mips::LowerDextDins(MCInst& InstIn) {
+ int Opcode = InstIn.getOpcode();
+
+ if (Opcode == Mips::DEXT)
+ assert(InstIn.getNumOperands() == 4 &&
+ "Invalid no. of machine operands for DEXT!");
+ else // Only DEXT and DINS are possible
+ assert(InstIn.getNumOperands() == 5 &&
+ "Invalid no. of machine operands for DINS!");
+
+ assert(InstIn.getOperand(2).isImm());
+ int64_t pos = InstIn.getOperand(2).getImm();
+ assert(InstIn.getOperand(3).isImm());
+ int64_t size = InstIn.getOperand(3).getImm();
+
+ if (size <= 32) {
+ if (pos < 32) // DEXT/DINS, do nothing
+ return;
+ // DEXTU/DINSU
+ InstIn.getOperand(2).setImm(pos - 32);
+ InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTU : Mips::DINSU);
+ return;
+ }
+ // DEXTM/DINSM
+ assert(pos < 32 && "DEXT/DINS cannot have both size and pos > 32");
+ InstIn.getOperand(3).setImm(size - 32);
+ InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTM : Mips::DINSM);
+ return;
+}
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h
new file mode 100644
index 0000000..8813cc9
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h
@@ -0,0 +1,28 @@
+//===-- MipsDirectObjLower.h - Mips LLVM direct object lowering *- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPSDIRECTOBJLOWER_H
+#define MIPSDIRECTOBJLOWER_H
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+ class MCInst;
+ class MCStreamer;
+
+ namespace Mips {
+ /// MipsDirectObjLower - This name space is used to lower MCInstr in cases
+ // where the assembler usually finishes the lowering
+ // such as large shifts.
+ void LowerLargeShift(MCInst &Inst);
+ void LowerDextDins(MCInst &Inst);
+ }
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
index 8e84b3f..5d240fe 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
@@ -34,7 +34,8 @@ namespace {
class MipsELFObjectWriter : public MCELFObjectTargetWriter {
public:
- MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI, bool _isN64);
+ MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI,
+ bool _isN64, bool IsLittleEndian);
virtual ~MipsELFObjectWriter();
@@ -53,9 +54,9 @@ namespace {
}
MipsELFObjectWriter::MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI,
- bool _isN64)
+ bool _isN64, bool IsLittleEndian)
: MCELFObjectTargetWriter(_is64Bit, OSABI, ELF::EM_MIPS,
- /*HasRelocationAddend*/ false,
+ /*HasRelocationAddend*/ (_isN64) ? true : false,
/*IsN64*/ _isN64) {}
MipsELFObjectWriter::~MipsELFObjectWriter() {}
@@ -274,6 +275,7 @@ MCObjectWriter *llvm::createMipsELFObjectWriter(raw_ostream &OS,
bool IsLittleEndian,
bool Is64Bit) {
MCELFObjectTargetWriter *MOTW = new MipsELFObjectWriter(Is64Bit, OSABI,
- (Is64Bit) ? true : false);
+ (Is64Bit) ? true : false,
+ IsLittleEndian);
return createELFObjectWriter(MOTW, OS, IsLittleEndian);
}
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
index 8dab62d..7fbdae0 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
@@ -13,6 +13,7 @@
//
#define DEBUG_TYPE "mccodeemitter"
#include "MCTargetDesc/MipsBaseInfo.h"
+#include "MCTargetDesc/MipsDirectObjLower.h"
#include "MCTargetDesc/MipsFixupKinds.h"
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "llvm/ADT/APFloat.h"
@@ -29,17 +30,14 @@ using namespace llvm;
namespace {
class MipsMCCodeEmitter : public MCCodeEmitter {
- MipsMCCodeEmitter(const MipsMCCodeEmitter &); // DO NOT IMPLEMENT
- void operator=(const MipsMCCodeEmitter &); // DO NOT IMPLEMENT
+ MipsMCCodeEmitter(const MipsMCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ void operator=(const MipsMCCodeEmitter &) LLVM_DELETED_FUNCTION;
const MCInstrInfo &MCII;
- const MCSubtargetInfo &STI;
- MCContext &Ctx;
bool IsLittleEndian;
public:
- MipsMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
- MCContext &ctx, bool IsLittle) :
- MCII(mcii), STI(sti) , Ctx(ctx), IsLittleEndian(IsLittle) {}
+ MipsMCCodeEmitter(const MCInstrInfo &mcii, bool IsLittle) :
+ MCII(mcii), IsLittleEndian(IsLittle) {}
~MipsMCCodeEmitter() {}
@@ -95,7 +93,7 @@ MCCodeEmitter *llvm::createMipsMCCodeEmitterEB(const MCInstrInfo &MCII,
const MCSubtargetInfo &STI,
MCContext &Ctx)
{
- return new MipsMCCodeEmitter(MCII, STI, Ctx, false);
+ return new MipsMCCodeEmitter(MCII, false);
}
MCCodeEmitter *llvm::createMipsMCCodeEmitterEL(const MCInstrInfo &MCII,
@@ -103,7 +101,7 @@ MCCodeEmitter *llvm::createMipsMCCodeEmitterEL(const MCInstrInfo &MCII,
const MCSubtargetInfo &STI,
MCContext &Ctx)
{
- return new MipsMCCodeEmitter(MCII, STI, Ctx, true);
+ return new MipsMCCodeEmitter(MCII, true);
}
/// EncodeInstruction - Emit the instruction.
@@ -112,16 +110,35 @@ void MipsMCCodeEmitter::
EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const
{
- uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
+
+ // Non-pseudo instructions that get changed for direct object
+ // only based on operand values.
+ // If this list of instructions get much longer we will move
+ // the check to a function call. Until then, this is more efficient.
+ MCInst TmpInst = MI;
+ switch (MI.getOpcode()) {
+ // If shift amount is >= 32 it the inst needs to be lowered further
+ case Mips::DSLL:
+ case Mips::DSRL:
+ case Mips::DSRA:
+ Mips::LowerLargeShift(TmpInst);
+ break;
+ // Double extract instruction is chosen by pos and size operands
+ case Mips::DEXT:
+ case Mips::DINS:
+ Mips::LowerDextDins(TmpInst);
+ }
+
+ uint32_t Binary = getBinaryCodeForInstr(TmpInst, Fixups);
// Check for unimplemented opcodes.
- // Unfortunately in MIPS both NOT and SLL will come in with Binary == 0
+ // Unfortunately in MIPS both NOP and SLL will come in with Binary == 0
// so we have to special check for them.
- unsigned Opcode = MI.getOpcode();
+ unsigned Opcode = TmpInst.getOpcode();
if ((Opcode != Mips::NOP) && (Opcode != Mips::SLL) && !Binary)
llvm_unreachable("unimplemented opcode in EncodeInstruction()");
- const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
+ const MCInstrDesc &Desc = MCII.get(TmpInst.getOpcode());
uint64_t TSFlags = Desc.TSFlags;
// Pseudo instructions don't get encoded and shouldn't be here
@@ -129,8 +146,10 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
if ((TSFlags & MipsII::FormMask) == MipsII::Pseudo)
llvm_unreachable("Pseudo opcode found in EncodeInstruction()");
- // For now all instructions are 4 bytes
- int Size = 4; // FIXME: Have Desc.getSize() return the correct value!
+ // Get byte count of instruction
+ unsigned Size = Desc.getSize();
+ if (!Size)
+ llvm_unreachable("Desc.getSize() returns 0");
EmitInstruction(Binary, Size, OS);
}
@@ -143,7 +162,11 @@ getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const {
const MCOperand &MO = MI.getOperand(OpNo);
- assert(MO.isExpr() && "getBranchTargetOpValue expects only expressions");
+
+ // If the destination is an immediate, we have nothing to do.
+ if (MO.isImm()) return MO.getImm();
+ assert(MO.isExpr() &&
+ "getBranchTargetOpValue expects only expressions or immediates");
const MCExpr *Expr = MO.getExpr();
Fixups.push_back(MCFixup::Create(0, Expr,
@@ -159,7 +182,10 @@ getJumpTargetOpValue(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const {
const MCOperand &MO = MI.getOperand(OpNo);
- assert(MO.isExpr() && "getJumpTargetOpValue expects only expressions");
+ // If the destination is an immediate, we have nothing to do.
+ if (MO.isImm()) return MO.getImm();
+ assert(MO.isExpr() &&
+ "getJumpTargetOpValue expects only expressions or an immediate");
const MCExpr *Expr = MO.getExpr();
Fixups.push_back(MCFixup::Create(0, Expr,
diff --git a/contrib/llvm/lib/Target/Mips/Mips.td b/contrib/llvm/lib/Target/Mips/Mips.td
index 90f7942..90c01d5 100644
--- a/contrib/llvm/lib/Target/Mips/Mips.td
+++ b/contrib/llvm/lib/Target/Mips/Mips.td
@@ -77,6 +77,10 @@ def FeatureMips64r2 : SubtargetFeature<"mips64r2", "MipsArchVersion",
def FeatureMips16 : SubtargetFeature<"mips16", "InMips16Mode", "true",
"Mips16 mode">;
+def FeatureDSP : SubtargetFeature<"dsp", "HasDSP", "true", "Mips DSP ASE">;
+def FeatureDSPR2 : SubtargetFeature<"dspr2", "HasDSPR2", "true",
+ "Mips DSP-R2 ASE", [FeatureDSP]>;
+
//===----------------------------------------------------------------------===//
// Mips processors supported.
//===----------------------------------------------------------------------===//
@@ -95,9 +99,20 @@ def MipsAsmWriter : AsmWriter {
bit isMCAsmWriter = 1;
}
+def MipsAsmParser : AsmParser {
+ let ShouldEmitMatchRegisterName = 0;
+}
+
+def MipsAsmParserVariant : AsmParserVariant {
+ int Variant = 0;
+
+ // Recognize hard coded registers.
+ string RegisterPrefix = "$";
+}
+
def Mips : Target {
let InstructionSet = MipsInstrInfo;
-
+ let AssemblyParsers = [MipsAsmParser];
let AssemblyWriters = [MipsAsmWriter];
+ let AssemblyParserVariants = [MipsAsmParserVariant];
}
-
diff --git a/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.cpp b/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.cpp
index 030042f..4e6b21f 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.cpp
@@ -20,7 +20,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
@@ -41,6 +41,11 @@ void Mips16FrameLowering::emitPrologue(MachineFunction &MF) const {
// Adjust stack.
if (isInt<16>(-StackSize))
BuildMI(MBB, MBBI, dl, TII.get(Mips::SaveRaF16)).addImm(StackSize);
+
+ if (hasFP(MF))
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::MoveR3216), Mips::S0)
+ .addReg(Mips::SP);
+
}
void Mips16FrameLowering::emitEpilogue(MachineFunction &MF,
@@ -55,6 +60,10 @@ void Mips16FrameLowering::emitEpilogue(MachineFunction &MF,
if (!StackSize)
return;
+ if (hasFP(MF))
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::Move32R16), Mips::SP)
+ .addReg(Mips::S0);
+
// Adjust stack.
if (isInt<16>(StackSize))
// assumes stacksize multiple of 8
@@ -66,19 +75,58 @@ spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
- // FIXME: implement.
+ MachineFunction *MF = MBB.getParent();
+ MachineBasicBlock *EntryBlock = MF->begin();
+
+ //
+ // Registers RA, S0,S1 are the callee saved registers and they
+ // will be saved with the "save" instruction
+ // during emitPrologue
+ //
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ // Add the callee-saved register as live-in. Do not add if the register is
+ // RA and return address is taken, because it has already been added in
+ // method MipsTargetLowering::LowerRETURNADDR.
+ // It's killed at the spill, unless the register is RA and return address
+ // is taken.
+ unsigned Reg = CSI[i].getReg();
+ bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA)
+ && MF->getFrameInfo()->isReturnAddressTaken();
+ if (!IsRAAndRetAddrIsTaken)
+ EntryBlock->addLiveIn(Reg);
+ }
+
+ return true;
+}
+
+bool Mips16FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ //
+ // Registers RA,S0,S1 are the callee saved registers and they will be restored
+ // with the restore instruction during emitEpilogue.
+ // We need to override this virtual function, otherwise llvm will try and
+ // restore the registers on it's on from the stack.
+ //
+
return true;
}
bool
Mips16FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
- // FIXME: implement.
- return true;
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ // Reserve call frame if the size of the maximum call frame fits into 15-bit
+ // immediate field and there are no variable sized objects on the stack.
+ return isInt<15>(MFI->getMaxCallFrameSize()) && !MFI->hasVarSizedObjects();
}
void Mips16FrameLowering::
processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS) const {
+ MF.getRegInfo().setPhysRegUsed(Mips::RA);
+ MF.getRegInfo().setPhysRegUsed(Mips::S0);
+ MF.getRegInfo().setPhysRegUsed(Mips::S1);
}
const MipsFrameLowering *
diff --git a/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.h b/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.h
index 25cc37b..01db71e 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.h
+++ b/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.h
@@ -32,6 +32,11 @@ public:
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const;
+ bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+
bool hasReservedCallFrame(const MachineFunction &MF) const;
void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
diff --git a/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.cpp b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
index 2bc286b..619646b 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
@@ -25,7 +25,7 @@
using namespace llvm;
Mips16InstrInfo::Mips16InstrInfo(MipsTargetMachine &tm)
- : MipsInstrInfo(tm, /* FIXME: set mips16 unconditional br */ 0),
+ : MipsInstrInfo(tm, Mips::BimmX16),
RI(*tm.getSubtargetImpl(), *this) {}
const MipsRegisterInfo &Mips16InstrInfo::getRegisterInfo() const {
@@ -58,12 +58,22 @@ void Mips16InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
- unsigned Opc = 0, ZeroReg = 0;
+ unsigned Opc = 0;
+
+ if (Mips::CPU16RegsRegClass.contains(DestReg) &&
+ Mips::CPURegsRegClass.contains(SrcReg))
+ Opc = Mips::MoveR3216;
+ else if (Mips::CPURegsRegClass.contains(DestReg) &&
+ Mips::CPU16RegsRegClass.contains(SrcReg))
+ Opc = Mips::Move32R16;
+ else if ((SrcReg == Mips::HI) &&
+ (Mips::CPU16RegsRegClass.contains(DestReg)))
+ Opc = Mips::Mfhi16, SrcReg = 0;
+
+ else if ((SrcReg == Mips::LO) &&
+ (Mips::CPU16RegsRegClass.contains(DestReg)))
+ Opc = Mips::Mflo16, SrcReg = 0;
- if (Mips::CPURegsRegClass.contains(DestReg)) { // Copy to CPU Reg.
- if (Mips::CPURegsRegClass.contains(SrcReg))
- Opc = Mips::Mov32R16;
- }
assert(Opc && "Cannot copy registers");
@@ -72,9 +82,6 @@ void Mips16InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
if (DestReg)
MIB.addReg(DestReg, RegState::Define);
- if (ZeroReg)
- MIB.addReg(ZeroReg);
-
if (SrcReg)
MIB.addReg(SrcReg, getKillRegState(KillSrc));
}
@@ -84,7 +91,15 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- assert(false && "Implement this function.");
+ DebugLoc DL;
+ if (I != MBB.end()) DL = I->getDebugLoc();
+ MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOStore);
+ unsigned Opc = 0;
+ if (Mips::CPU16RegsRegClass.hasSubClassEq(RC))
+ Opc = Mips::SwRxSpImmX16;
+ assert(Opc && "Register class not handled!");
+ BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill))
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
}
void Mips16InstrInfo::
@@ -92,7 +107,16 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned DestReg, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- assert(false && "Implement this function.");
+ DebugLoc DL;
+ if (I != MBB.end()) DL = I->getDebugLoc();
+ MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
+ unsigned Opc = 0;
+
+ if (Mips::CPU16RegsRegClass.hasSubClassEq(RC))
+ Opc = Mips::LwRxSpImmX16;
+ assert(Opc && "Register class not handled!");
+ BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(0)
+ .addMemOperand(MMO);
}
bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
@@ -102,7 +126,7 @@ bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
default:
return false;
case Mips::RetRA16:
- ExpandRetRA16(MBB, MI, Mips::JrRa16);
+ ExpandRetRA16(MBB, MI, Mips::JrcRa16);
break;
}
@@ -113,12 +137,55 @@ bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
/// GetOppositeBranchOpc - Return the inverse of the specified
/// opcode, e.g. turning BEQ to BNE.
unsigned Mips16InstrInfo::GetOppositeBranchOpc(unsigned Opc) const {
+ switch (Opc) {
+ default: llvm_unreachable("Illegal opcode!");
+ case Mips::BeqzRxImmX16: return Mips::BnezRxImmX16;
+ case Mips::BnezRxImmX16: return Mips::BeqzRxImmX16;
+ case Mips::BteqzT8CmpX16: return Mips::BtnezT8CmpX16;
+ case Mips::BteqzT8SltX16: return Mips::BtnezT8SltX16;
+ case Mips::BteqzT8SltiX16: return Mips::BtnezT8SltiX16;
+ case Mips::BtnezX16: return Mips::BteqzX16;
+ case Mips::BtnezT8CmpiX16: return Mips::BteqzT8CmpiX16;
+ case Mips::BtnezT8SltuX16: return Mips::BteqzT8SltuX16;
+ case Mips::BtnezT8SltiuX16: return Mips::BteqzT8SltiuX16;
+ case Mips::BteqzX16: return Mips::BtnezX16;
+ case Mips::BteqzT8CmpiX16: return Mips::BtnezT8CmpiX16;
+ case Mips::BteqzT8SltuX16: return Mips::BtnezT8SltuX16;
+ case Mips::BteqzT8SltiuX16: return Mips::BtnezT8SltiuX16;
+ case Mips::BtnezT8CmpX16: return Mips::BteqzT8CmpX16;
+ case Mips::BtnezT8SltX16: return Mips::BteqzT8SltX16;
+ case Mips::BtnezT8SltiX16: return Mips::BteqzT8SltiX16;
+ }
assert(false && "Implement this function.");
return 0;
}
+/// Adjust SP by Amount bytes.
+void Mips16InstrInfo::adjustStackPtr(unsigned SP, int64_t Amount,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
+ if (isInt<16>(Amount)) {
+ if (Amount < 0)
+ BuildMI(MBB, I, DL, get(Mips::SaveDecSpF16)). addImm(-Amount);
+ else if (Amount > 0)
+ BuildMI(MBB, I, DL, get(Mips::RestoreIncSpF16)).addImm(Amount);
+ }
+ else
+ // not implemented for large values yet
+ assert(false && "adjust stack pointer amount exceeded");
+}
+
unsigned Mips16InstrInfo::GetAnalyzableBrOpc(unsigned Opc) const {
- return 0;
+ return (Opc == Mips::BeqzRxImmX16 || Opc == Mips::BimmX16 ||
+ Opc == Mips::BnezRxImmX16 || Opc == Mips::BteqzX16 ||
+ Opc == Mips::BteqzT8CmpX16 || Opc == Mips::BteqzT8CmpiX16 ||
+ Opc == Mips::BteqzT8SltX16 || Opc == Mips::BteqzT8SltuX16 ||
+ Opc == Mips::BteqzT8SltiX16 || Opc == Mips::BteqzT8SltiuX16 ||
+ Opc == Mips::BtnezX16 || Opc == Mips::BtnezT8CmpX16 ||
+ Opc == Mips::BtnezT8CmpiX16 || Opc == Mips::BtnezT8SltX16 ||
+ Opc == Mips::BtnezT8SltuX16 || Opc == Mips::BtnezT8SltiX16 ||
+ Opc == Mips::BtnezT8SltiuX16 ) ? Opc : 0;
}
void Mips16InstrInfo::ExpandRetRA16(MachineBasicBlock &MBB,
diff --git a/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.h b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.h
index 260c5b6..e06ccfe 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.h
+++ b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.h
@@ -64,6 +64,10 @@ public:
virtual unsigned GetOppositeBranchOpc(unsigned Opc) const;
+ /// Adjust SP by Amount bytes.
+ void adjustStackPtr(unsigned SP, int64_t Amount, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const;
+
private:
virtual unsigned GetAnalyzableBrOpc(unsigned Opc) const;
diff --git a/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.td b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.td
index 94cf984..5defc75 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.td
+++ b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.td
@@ -10,21 +10,74 @@
// This file describes Mips16 instructions.
//
//===----------------------------------------------------------------------===//
+//
+//
+// Mips Address
+//
+def addr16 :
+ ComplexPattern<iPTR, 3, "SelectAddr16", [frameindex], [SDNPWantParent]>;
//
-// RRR-type instruction format
+// Address operand
+def mem16 : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops CPU16Regs, simm16, CPU16Regs);
+ let EncoderMethod = "getMemEncoding";
+}
+
+def mem16_ea : Operand<i32> {
+ let PrintMethod = "printMemOperandEA";
+ let MIOperandInfo = (ops CPU16Regs, simm16);
+ let EncoderMethod = "getMemEncoding";
+}
+
+//
+// Compare a register and immediate and place result in CC
+// Implicit use of T8
//
+// EXT-CCRR Instruction format
+//
+class FEXT_CCRXI16_ins<bits<5> _op, string asmstr,
+ InstrItinClass itin>:
+ FEXT_RI16<_op, (outs CPU16Regs:$cc), (ins CPU16Regs:$rx, simm16:$imm),
+ !strconcat(asmstr, "\t$rx, $imm\n\tmove\t$cc, $$t8"), [], itin> {
+ let isCodeGenOnly=1;
+}
-class FRRR16_ins<bits<2> _f, string asmstr, InstrItinClass itin> :
- FRRR16<_f, (outs CPU16Regs:$rz), (ins CPU16Regs:$rx, CPU16Regs:$ry),
- !strconcat(asmstr, "\t$rz, $rx, $ry"), [], itin>;
+//
+// EXT-I instruction format
+//
+class FEXT_I16_ins<bits<5> eop, string asmstr, InstrItinClass itin> :
+ FEXT_I16<eop, (outs), (ins brtarget:$imm16),
+ !strconcat(asmstr, "\t$imm16"),[], itin>;
//
-// I8_MOV32R instruction format (used only by MOV32R instruction)
+// EXT-I8 instruction format
//
-class FI8_MOV32R16_ins<string asmstr, InstrItinClass itin>:
- FI8_MOV32R16<(outs CPURegs:$r32), (ins CPU16Regs:$rz),
- !strconcat(asmstr, "\t$r32, $rz"), [], itin>;
+
+class FEXT_I816_ins_base<bits<3> _func, string asmstr,
+ string asmstr2, InstrItinClass itin>:
+ FEXT_I816<_func, (outs), (ins uimm16:$imm), !strconcat(asmstr, asmstr2),
+ [], itin>;
+
+class FEXT_I816_ins<bits<3> _func, string asmstr,
+ InstrItinClass itin>:
+ FEXT_I816_ins_base<_func, asmstr, "\t$imm", itin>;
+
+//
+// Assembler formats in alphabetical order.
+// Natural and pseudos are mixed together.
+//
+// Compare two registers and place result in CC
+// Implicit use of T8
+//
+// CC-RR Instruction format
+//
+class FCCRR16_ins<bits<5> f, string asmstr, InstrItinClass itin> :
+ FRR16<f, (outs CPU16Regs:$cc), (ins CPU16Regs:$rx, CPU16Regs:$ry),
+ !strconcat(asmstr, "\t$rx, $ry\n\tmove\t$cc, $$t8"), [], itin> {
+ let isCodeGenOnly=1;
+}
//
// EXT-RI instruction format
@@ -42,6 +95,10 @@ class FEXT_RI16_ins<bits<5> _op, string asmstr,
class FEXT_RI16_PC_ins<bits<5> _op, string asmstr, InstrItinClass itin>:
FEXT_RI16_ins_base<_op, asmstr, "\t$rx, $$pc, $imm", itin>;
+class FEXT_RI16_B_ins<bits<5> _op, string asmstr,
+ InstrItinClass itin>:
+ FEXT_RI16<_op, (outs), (ins CPU16Regs:$rx, brtarget:$imm),
+ !strconcat(asmstr, "\t$rx, $imm"), [], itin>;
class FEXT_2RI16_ins<bits<5> _op, string asmstr,
InstrItinClass itin>:
@@ -51,6 +108,104 @@ class FEXT_2RI16_ins<bits<5> _op, string asmstr,
}
+// this has an explicit sp argument that we ignore to work around a problem
+// in the compiler
+class FEXT_RI16_SP_explicit_ins<bits<5> _op, string asmstr,
+ InstrItinClass itin>:
+ FEXT_RI16<_op, (outs CPU16Regs:$rx), (ins CPUSPReg:$ry, simm16:$imm),
+ !strconcat(asmstr, "\t$rx, $imm ( $ry ); "), [], itin>;
+
+//
+// EXT-RRI instruction format
+//
+
+class FEXT_RRI16_mem_ins<bits<5> op, string asmstr, Operand MemOpnd,
+ InstrItinClass itin>:
+ FEXT_RRI16<op, (outs CPU16Regs:$ry), (ins MemOpnd:$addr),
+ !strconcat(asmstr, "\t$ry, $addr"), [], itin>;
+
+class FEXT_RRI16_mem2_ins<bits<5> op, string asmstr, Operand MemOpnd,
+ InstrItinClass itin>:
+ FEXT_RRI16<op, (outs ), (ins CPU16Regs:$ry, MemOpnd:$addr),
+ !strconcat(asmstr, "\t$ry, $addr"), [], itin>;
+
+//
+//
+// EXT-RRI-A instruction format
+//
+
+class FEXT_RRI_A16_mem_ins<bits<1> op, string asmstr, Operand MemOpnd,
+ InstrItinClass itin>:
+ FEXT_RRI_A16<op, (outs CPU16Regs:$ry), (ins MemOpnd:$addr),
+ !strconcat(asmstr, "\t$ry, $addr"), [], itin>;
+
+//
+// EXT-SHIFT instruction format
+//
+class FEXT_SHIFT16_ins<bits<2> _f, string asmstr, InstrItinClass itin>:
+ FEXT_SHIFT16<_f, (outs CPU16Regs:$rx), (ins CPU16Regs:$ry, shamt:$sa),
+ !strconcat(asmstr, "\t$rx, $ry, $sa"), [], itin>;
+
+//
+// EXT-T8I8
+//
+class FEXT_T8I816_ins<bits<3> _func, string asmstr, string asmstr2,
+ InstrItinClass itin>:
+ FEXT_I816<_func, (outs),
+ (ins CPU16Regs:$rx, CPU16Regs:$ry, brtarget:$imm),
+ !strconcat(asmstr2, !strconcat("\t$rx, $ry\n\t",
+ !strconcat(asmstr, "\t$imm"))),[], itin> {
+ let isCodeGenOnly=1;
+}
+
+//
+// EXT-T8I8I
+//
+class FEXT_T8I8I16_ins<bits<3> _func, string asmstr, string asmstr2,
+ InstrItinClass itin>:
+ FEXT_I816<_func, (outs),
+ (ins CPU16Regs:$rx, simm16:$imm, brtarget:$targ),
+ !strconcat(asmstr2, !strconcat("\t$rx, $imm\n\t",
+ !strconcat(asmstr, "\t$targ"))), [], itin> {
+ let isCodeGenOnly=1;
+}
+//
+
+
+//
+// I8_MOVR32 instruction format (used only by the MOVR32 instructio
+//
+class FI8_MOVR3216_ins<string asmstr, InstrItinClass itin>:
+ FI8_MOVR3216<(outs CPU16Regs:$rz), (ins CPURegs:$r32),
+ !strconcat(asmstr, "\t$rz, $r32"), [], itin>;
+
+//
+// I8_MOV32R instruction format (used only by MOV32R instruction)
+//
+
+class FI8_MOV32R16_ins<string asmstr, InstrItinClass itin>:
+ FI8_MOV32R16<(outs CPURegs:$r32), (ins CPU16Regs:$rz),
+ !strconcat(asmstr, "\t$r32, $rz"), [], itin>;
+
+//
+// This are pseudo formats for multiply
+// This first one can be changed to non pseudo now.
+//
+// MULT
+//
+class FMULT16_ins<string asmstr, InstrItinClass itin> :
+ MipsPseudo16<(outs), (ins CPU16Regs:$rx, CPU16Regs:$ry),
+ !strconcat(asmstr, "\t$rx, $ry"), []>;
+
+//
+// MULT-LO
+//
+class FMULT16_LO_ins<string asmstr, InstrItinClass itin> :
+ MipsPseudo16<(outs CPU16Regs:$rz), (ins CPU16Regs:$rx, CPU16Regs:$ry),
+ !strconcat(asmstr, "\t$rx, $ry\n\tmflo\t$rz"), []> {
+ let isCodeGenOnly=1;
+}
+
//
// RR-type instruction format
//
@@ -60,6 +215,27 @@ class FRR16_ins<bits<5> f, string asmstr, InstrItinClass itin> :
!strconcat(asmstr, "\t$rx, $ry"), [], itin> {
}
+class FRRTR16_ins<bits<5> f, string asmstr, InstrItinClass itin> :
+ FRR16<f, (outs CPU16Regs:$rz), (ins CPU16Regs:$rx, CPU16Regs:$ry),
+ !strconcat(asmstr, "\t$rx, $ry\n\tmove\t$rz, $$t8"), [], itin> ;
+
+//
+// maybe refactor but need a $zero as a dummy first parameter
+//
+class FRR16_div_ins<bits<5> f, string asmstr, InstrItinClass itin> :
+ FRR16<f, (outs ), (ins CPU16Regs:$rx, CPU16Regs:$ry),
+ !strconcat(asmstr, "\t$$zero, $rx, $ry"), [], itin> ;
+
+class FUnaryRR16_ins<bits<5> f, string asmstr, InstrItinClass itin> :
+ FRR16<f, (outs CPU16Regs:$rx), (ins CPU16Regs:$ry),
+ !strconcat(asmstr, "\t$rx, $ry"), [], itin> ;
+
+
+class FRR16_M_ins<bits<5> f, string asmstr,
+ InstrItinClass itin> :
+ FRR16<f, (outs CPU16Regs:$rx), (ins),
+ !strconcat(asmstr, "\t$rx"), [], itin>;
+
class FRxRxRy16_ins<bits<5> f, string asmstr,
InstrItinClass itin> :
FRR16<f, (outs CPU16Regs:$rz), (ins CPU16Regs:$rx, CPU16Regs:$ry),
@@ -74,35 +250,109 @@ class FRR16_JALRC_RA_only_ins<bits<1> nd_, bits<1> l_,
FRR16_JALRC<nd_, l_, 1, (outs), (ins), !strconcat(asmstr, "\t $$ra"),
[], itin> ;
+
+class FRR16_JALRC_ins<bits<1> nd, bits<1> l, bits<1> ra,
+ string asmstr, InstrItinClass itin>:
+ FRR16_JALRC<nd, l, ra, (outs), (ins CPU16Regs:$rx),
+ !strconcat(asmstr, "\t $rx"), [], itin> ;
+
//
-// EXT-RRI instruction format
+// RRR-type instruction format
//
-class FEXT_RRI16_mem_ins<bits<5> op, string asmstr, Operand MemOpnd,
- InstrItinClass itin>:
- FEXT_RRI16<op, (outs CPU16Regs:$ry), (ins MemOpnd:$addr),
- !strconcat(asmstr, "\t$ry, $addr"), [], itin>;
+class FRRR16_ins<bits<2> _f, string asmstr, InstrItinClass itin> :
+ FRRR16<_f, (outs CPU16Regs:$rz), (ins CPU16Regs:$rx, CPU16Regs:$ry),
+ !strconcat(asmstr, "\t$rz, $rx, $ry"), [], itin>;
-class FEXT_RRI16_mem2_ins<bits<5> op, string asmstr, Operand MemOpnd,
- InstrItinClass itin>:
- FEXT_RRI16<op, (outs ), (ins CPU16Regs:$ry, MemOpnd:$addr),
- !strconcat(asmstr, "\t$ry, $addr"), [], itin>;
+//
+// These Sel patterns support the generation of conditional move
+// pseudo instructions.
+//
+// The nomenclature uses the components making up the pseudo and may
+// be a bit counter intuitive when compared with the end result we seek.
+// For example using a bqez in the example directly below results in the
+// conditional move being done if the tested register is not zero.
+// I considered in easier to check by keeping the pseudo consistent with
+// it's components but it could have been done differently.
+//
+// The simplest case is when can test and operand directly and do the
+// conditional move based on a simple mips16 conditional
+// branch instruction.
+// for example:
+// if $op == beqz or bnez:
+//
+// $op1 $rt, .+4
+// move $rd, $rs
+//
+// if $op == beqz, then if $rt != 0, then the conditional assignment
+// $rd = $rs is done.
+// if $op == bnez, then if $rt == 0, then the conditional assignment
+// $rd = $rs is done.
//
-// EXT-SHIFT instruction format
+// So this pseudo class only has one operand, i.e. op
//
-class FEXT_SHIFT16_ins<bits<2> _f, string asmstr, InstrItinClass itin>:
- FEXT_SHIFT16<_f, (outs CPU16Regs:$rx), (ins CPU16Regs:$ry, shamt:$sa),
- !strconcat(asmstr, "\t$rx, $ry, $sa"), [], itin>;
+class Sel<bits<5> f1, string op, InstrItinClass itin>:
+ MipsInst16_32<(outs CPU16Regs:$rd_), (ins CPU16Regs:$rd, CPU16Regs:$rs,
+ CPU16Regs:$rt),
+ !strconcat(op, "\t$rt, .+4\n\t\n\tmove $rd, $rs"), [], itin,
+ Pseudo16> {
+ let isCodeGenOnly=1;
+ let Constraints = "$rd = $rd_";
+}
//
-// Address operand
-def mem16 : Operand<i32> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops CPU16Regs, simm16);
- let EncoderMethod = "getMemEncoding";
+// The next two instruction classes allow for an operand which tests
+// two operands and returns a value in register T8 and
+//then does a conditional branch based on the value of T8
+//
+
+// op2 can be cmpi or slti/sltiu
+// op1 can bteqz or btnez
+// the operands for op2 are a register and a signed constant
+//
+// $op2 $t, $imm ;test register t and branch conditionally
+// $op1 .+4 ;op1 is a conditional branch
+// move $rd, $rs
+//
+//
+class SeliT<bits<5> f1, string op1, bits<5> f2, string op2,
+ InstrItinClass itin>:
+ MipsInst16_32<(outs CPU16Regs:$rd_), (ins CPU16Regs:$rd, CPU16Regs:$rs,
+ CPU16Regs:$rl, simm16:$imm),
+ !strconcat(op2,
+ !strconcat("\t$rl, $imm\n\t",
+ !strconcat(op1, "\t.+4\n\tmove $rd, $rs"))), [], itin,
+ Pseudo16> {
+ let isCodeGenOnly=1;
+ let Constraints = "$rd = $rd_";
+}
+
+//
+// op2 can be cmp or slt/sltu
+// op1 can be bteqz or btnez
+// the operands for op2 are two registers
+// op1 is a conditional branch
+//
+//
+// $op2 $rl, $rr ;test registers rl,rr
+// $op1 .+4 ;op2 is a conditional branch
+// move $rd, $rs
+//
+//
+class SelT<bits<5> f1, string op1, bits<5> f2, string op2,
+ InstrItinClass itin>:
+ MipsInst16_32<(outs CPU16Regs:$rd_), (ins CPU16Regs:$rd, CPU16Regs:$rs,
+ CPU16Regs:$rl, CPU16Regs:$rr),
+ !strconcat(op2,
+ !strconcat("\t$rl, $rr\n\t",
+ !strconcat(op1, "\t.+4\n\tmove $rd, $rs"))), [], itin,
+ Pseudo16> {
+ let isCodeGenOnly=1;
+ let Constraints = "$rd = $rd_";
}
+
//
// Some general instruction class info
//
@@ -115,6 +365,24 @@ class ArithLogic16Defs<bit isCom=0> {
bit neverHasSideEffects = 1;
}
+class branch16 {
+ bit isBranch = 1;
+ bit isTerminator = 1;
+ bit isBarrier = 1;
+}
+
+class cbranch16 {
+ bit isBranch = 1;
+ bit isTerminator = 1;
+}
+
+class MayLoad {
+ bit mayLoad = 1;
+}
+
+class MayStore {
+ bit mayStore = 1;
+}
//
// Format: ADDIU rx, immediate MIPS16e
@@ -126,6 +394,9 @@ def AddiuRxImmX16: FEXT_RI16_ins<0b01001, "addiu", IIAlu>;
def AddiuRxRxImmX16: FEXT_2RI16_ins<0b01001, "addiu", IIAlu>,
ArithLogic16Defs<0>;
+def AddiuRxRyOffMemX16:
+ FEXT_RRI_A16_mem_ins<0, "addiu", mem16_ea, IIAlu>;
+
//
// Format: ADDIU rx, pc, immediate MIPS16e
@@ -148,6 +419,87 @@ def AdduRxRyRz16: FRRR16_ins<01, "addu", IIAlu>, ArithLogic16Defs<1>;
def AndRxRxRy16: FRxRxRy16_ins<0b01100, "and", IIAlu>, ArithLogic16Defs<1>;
+
+//
+// Format: BEQZ rx, offset MIPS16e
+// Purpose: Branch on Equal to Zero (Extended)
+// To test a GPR then do a PC-relative conditional branch.
+//
+def BeqzRxImmX16: FEXT_RI16_B_ins<0b00100, "beqz", IIAlu>, cbranch16;
+
+// Format: B offset MIPS16e
+// Purpose: Unconditional Branch
+// To do an unconditional PC-relative branch.
+//
+def BimmX16: FEXT_I16_ins<0b00010, "b", IIAlu>, branch16;
+
+//
+// Format: BNEZ rx, offset MIPS16e
+// Purpose: Branch on Not Equal to Zero (Extended)
+// To test a GPR then do a PC-relative conditional branch.
+//
+def BnezRxImmX16: FEXT_RI16_B_ins<0b00101, "bnez", IIAlu>, cbranch16;
+
+//
+// Format: BTEQZ offset MIPS16e
+// Purpose: Branch on T Equal to Zero (Extended)
+// To test special register T then do a PC-relative conditional branch.
+//
+def BteqzX16: FEXT_I816_ins<0b000, "bteqz", IIAlu>, cbranch16;
+
+def BteqzT8CmpX16: FEXT_T8I816_ins<0b000, "bteqz", "cmp", IIAlu>, cbranch16;
+
+def BteqzT8CmpiX16: FEXT_T8I8I16_ins<0b000, "bteqz", "cmpi", IIAlu>,
+ cbranch16;
+
+def BteqzT8SltX16: FEXT_T8I816_ins<0b000, "bteqz", "slt", IIAlu>, cbranch16;
+
+def BteqzT8SltuX16: FEXT_T8I816_ins<0b000, "bteqz", "sltu", IIAlu>, cbranch16;
+
+def BteqzT8SltiX16: FEXT_T8I8I16_ins<0b000, "bteqz", "slti", IIAlu>, cbranch16;
+
+def BteqzT8SltiuX16: FEXT_T8I8I16_ins<0b000, "bteqz", "sltiu", IIAlu>,
+ cbranch16;
+
+//
+// Format: BTNEZ offset MIPS16e
+// Purpose: Branch on T Not Equal to Zero (Extended)
+// To test special register T then do a PC-relative conditional branch.
+//
+def BtnezX16: FEXT_I816_ins<0b001, "btnez", IIAlu> ,cbranch16;
+
+def BtnezT8CmpX16: FEXT_T8I816_ins<0b000, "btnez", "cmp", IIAlu>, cbranch16;
+
+def BtnezT8CmpiX16: FEXT_T8I8I16_ins<0b000, "btnez", "cmpi", IIAlu>, cbranch16;
+
+def BtnezT8SltX16: FEXT_T8I816_ins<0b000, "btnez", "slt", IIAlu>, cbranch16;
+
+def BtnezT8SltuX16: FEXT_T8I816_ins<0b000, "btnez", "sltu", IIAlu>, cbranch16;
+
+def BtnezT8SltiX16: FEXT_T8I8I16_ins<0b000, "btnez", "slti", IIAlu>, cbranch16;
+
+def BtnezT8SltiuX16: FEXT_T8I8I16_ins<0b000, "btnez", "sltiu", IIAlu>,
+ cbranch16;
+
+//
+// Format: DIV rx, ry MIPS16e
+// Purpose: Divide Word
+// To divide 32-bit signed integers.
+//
+def DivRxRy16: FRR16_div_ins<0b11010, "div", IIAlu> {
+ let Defs = [HI, LO];
+}
+
+//
+// Format: DIVU rx, ry MIPS16e
+// Purpose: Divide Unsigned Word
+// To divide 32-bit unsigned integers.
+//
+def DivuRxRy16: FRR16_div_ins<0b11011, "divu", IIAlu> {
+ let Defs = [HI, LO];
+}
+
+
//
// Format: JR ra MIPS16e
// Purpose: Jump Register Through Register ra
@@ -155,35 +507,56 @@ def AndRxRxRy16: FRxRxRy16_ins<0b01100, "and", IIAlu>, ArithLogic16Defs<1>;
// address register.
//
-def JrRa16: FRR16_JALRC_RA_only_ins<0, 0, "jr", IIAlu>;
+def JrRa16: FRR16_JALRC_RA_only_ins<0, 0, "jr", IIAlu> {
+ let isBranch = 1;
+ let isIndirectBranch = 1;
+ let hasDelaySlot = 1;
+ let isTerminator=1;
+ let isBarrier=1;
+}
+
+def JrcRa16: FRR16_JALRC_RA_only_ins<0, 0, "jrc", IIAlu> {
+ let isBranch = 1;
+ let isIndirectBranch = 1;
+ let isTerminator=1;
+ let isBarrier=1;
+}
+def JrcRx16: FRR16_JALRC_ins<1, 1, 0, "jrc", IIAlu> {
+ let isBranch = 1;
+ let isIndirectBranch = 1;
+ let isTerminator=1;
+ let isBarrier=1;
+}
//
// Format: LB ry, offset(rx) MIPS16e
// Purpose: Load Byte (Extended)
// To load a byte from memory as a signed value.
//
-def LbRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lb", mem16, IIAlu>;
+def LbRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lb", mem16, IILoad>, MayLoad;
//
// Format: LBU ry, offset(rx) MIPS16e
// Purpose: Load Byte Unsigned (Extended)
// To load a byte from memory as a unsigned value.
//
-def LbuRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10100, "lbu", mem16, IIAlu>;
+def LbuRxRyOffMemX16:
+ FEXT_RRI16_mem_ins<0b10100, "lbu", mem16, IILoad>, MayLoad;
//
// Format: LH ry, offset(rx) MIPS16e
// Purpose: Load Halfword signed (Extended)
// To load a halfword from memory as a signed value.
//
-def LhRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10100, "lh", mem16, IIAlu>;
+def LhRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10100, "lh", mem16, IILoad>, MayLoad;
//
// Format: LHU ry, offset(rx) MIPS16e
// Purpose: Load Halfword unsigned (Extended)
// To load a halfword from memory as an unsigned value.
//
-def LhuRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10100, "lhu", mem16, IIAlu>;
+def LhuRxRyOffMemX16:
+ FEXT_RRI16_mem_ins<0b10100, "lhu", mem16, IILoad>, MayLoad;
//
// Format: LI rx, immediate MIPS16e
@@ -197,28 +570,98 @@ def LiRxImmX16: FEXT_RI16_ins<0b01101, "li", IIAlu>;
// Purpose: Load Word (Extended)
// To load a word from memory as a signed value.
//
-def LwRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lw", mem16, IIAlu>;
+def LwRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lw", mem16, IILoad>, MayLoad;
+
+// Format: LW rx, offset(sp) MIPS16e
+// Purpose: Load Word (SP-Relative, Extended)
+// To load an SP-relative word from memory as a signed value.
+//
+def LwRxSpImmX16: FEXT_RI16_SP_explicit_ins<0b10110, "lw", IILoad>, MayLoad;
//
// Format: MOVE r32, rz MIPS16e
// Purpose: Move
// To move the contents of a GPR to a GPR.
//
-def Mov32R16: FI8_MOV32R16_ins<"move", IIAlu>;
+def Move32R16: FI8_MOV32R16_ins<"move", IIAlu>;
+
+//
+// Format: MOVE ry, r32 MIPS16e
+//Purpose: Move
+// To move the contents of a GPR to a GPR.
+//
+def MoveR3216: FI8_MOVR3216_ins<"move", IIAlu>;
+
+//
+// Format: MFHI rx MIPS16e
+// Purpose: Move From HI Register
+// To copy the special purpose HI register to a GPR.
+//
+def Mfhi16: FRR16_M_ins<0b10000, "mfhi", IIAlu> {
+ let Uses = [HI];
+ let neverHasSideEffects = 1;
+}
+
+//
+// Format: MFLO rx MIPS16e
+// Purpose: Move From LO Register
+// To copy the special purpose LO register to a GPR.
+//
+def Mflo16: FRR16_M_ins<0b10010, "mflo", IIAlu> {
+ let Uses = [LO];
+ let neverHasSideEffects = 1;
+}
+
+//
+// Pseudo Instruction for mult
+//
+def MultRxRy16: FMULT16_ins<"mult", IIAlu> {
+ let isCommutable = 1;
+ let neverHasSideEffects = 1;
+ let Defs = [HI, LO];
+}
+
+def MultuRxRy16: FMULT16_ins<"multu", IIAlu> {
+ let isCommutable = 1;
+ let neverHasSideEffects = 1;
+ let Defs = [HI, LO];
+}
+
+//
+// Format: MULT rx, ry MIPS16e
+// Purpose: Multiply Word
+// To multiply 32-bit signed integers.
+//
+def MultRxRyRz16: FMULT16_LO_ins<"mult", IIAlu> {
+ let isCommutable = 1;
+ let neverHasSideEffects = 1;
+ let Defs = [HI, LO];
+}
+
+//
+// Format: MULTU rx, ry MIPS16e
+// Purpose: Multiply Unsigned Word
+// To multiply 32-bit unsigned integers.
+//
+def MultuRxRyRz16: FMULT16_LO_ins<"multu", IIAlu> {
+ let isCommutable = 1;
+ let neverHasSideEffects = 1;
+ let Defs = [HI, LO];
+}
//
// Format: NEG rx, ry MIPS16e
// Purpose: Negate
// To negate an integer value.
//
-def NegRxRy16: FRR16_ins<0b11101, "neg", IIAlu>;
+def NegRxRy16: FUnaryRR16_ins<0b11101, "neg", IIAlu>;
//
// Format: NOT rx, ry MIPS16e
// Purpose: Not
// To complement an integer value
//
-def NotRxRy16: FRR16_ins<0b01111, "not", IIAlu>;
+def NotRxRy16: FUnaryRR16_ins<0b01111, "not", IIAlu>;
//
// Format: OR rx, ry MIPS16e
@@ -240,10 +683,22 @@ def OrRxRxRy16: FRxRxRy16_ins<0b01101, "or", IIAlu>, ArithLogic16Defs<1>;
// for direct object emitter, encoding needs to be adjusted for the
// frame size
//
-let ra=1, s=0,s0=0,s1=0 in
+let ra=1, s=0,s0=1,s1=1 in
def RestoreRaF16:
FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size),
- "restore \t$$ra, $frame_size", [], IILoad >;
+ "restore\t$$ra, $$s0, $$s1, $frame_size", [], IILoad >, MayLoad {
+ let isCodeGenOnly = 1;
+}
+
+// Use Restore to increment SP since SP is not a Mip 16 register, this
+// is an easy way to do that which does not require a register.
+//
+let ra=0, s=0,s0=0,s1=0 in
+def RestoreIncSpF16:
+ FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size),
+ "restore\t$frame_size", [], IILoad >, MayLoad {
+ let isCodeGenOnly = 1;
+}
//
// Format: SAVE {ra,}{s0/s1/s0-1,}{framesize} (All arguments are optional)
@@ -252,24 +707,152 @@ def RestoreRaF16:
// To set up a stack frame on entry to a subroutine,
// saving return address and static registers, and adjusting stack
//
-let ra=1, s=1,s0=0,s1=0 in
+let ra=1, s=1,s0=1,s1=1 in
def SaveRaF16:
FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size),
- "save \t$$ra, $frame_size", [], IILoad >;
+ "save\t$$ra, $$s0, $$s1, $frame_size", [], IIStore >, MayStore {
+ let isCodeGenOnly = 1;
+}
//
+// Use Save to decrement the SP by a constant since SP is not
+// a Mips16 register.
+//
+let ra=0, s=0,s0=0,s1=0 in
+def SaveDecSpF16:
+ FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size),
+ "save\t$frame_size", [], IIStore >, MayStore {
+ let isCodeGenOnly = 1;
+}
+//
// Format: SB ry, offset(rx) MIPS16e
// Purpose: Store Byte (Extended)
// To store a byte to memory.
//
-def SbRxRyOffMemX16: FEXT_RRI16_mem2_ins<0b11000, "sb", mem16, IIAlu>;
+def SbRxRyOffMemX16:
+ FEXT_RRI16_mem2_ins<0b11000, "sb", mem16, IIStore>, MayStore;
//
+// The Sel(T) instructions are pseudos
+// T means that they use T8 implicitly.
+//
+//
+// Format: SelBeqZ rd, rs, rt
+// Purpose: if rt==0, do nothing
+// else rs = rt
+//
+def SelBeqZ: Sel<0b00100, "beqz", IIAlu>;
+
+//
+// Format: SelTBteqZCmp rd, rs, rl, rr
+// Purpose: b = Cmp rl, rr.
+// If b==0 then do nothing.
+// if b!=0 then rd = rs
+//
+def SelTBteqZCmp: SelT<0b000, "bteqz", 0b01010, "cmp", IIAlu>;
+
+//
+// Format: SelTBteqZCmpi rd, rs, rl, rr
+// Purpose: b = Cmpi rl, imm.
+// If b==0 then do nothing.
+// if b!=0 then rd = rs
+//
+def SelTBteqZCmpi: SeliT<0b000, "bteqz", 0b01110, "cmpi", IIAlu>;
+
+//
+// Format: SelTBteqZSlt rd, rs, rl, rr
+// Purpose: b = Slt rl, rr.
+// If b==0 then do nothing.
+// if b!=0 then rd = rs
+//
+def SelTBteqZSlt: SelT<0b000, "bteqz", 0b00010, "slt", IIAlu>;
+
+//
+// Format: SelTBteqZSlti rd, rs, rl, rr
+// Purpose: b = Slti rl, imm.
+// If b==0 then do nothing.
+// if b!=0 then rd = rs
+//
+def SelTBteqZSlti: SeliT<0b000, "bteqz", 0b01010, "slti", IIAlu>;
+
+//
+// Format: SelTBteqZSltu rd, rs, rl, rr
+// Purpose: b = Sltu rl, rr.
+// If b==0 then do nothing.
+// if b!=0 then rd = rs
+//
+def SelTBteqZSltu: SelT<0b000, "bteqz", 0b00011, "sltu", IIAlu>;
+
+//
+// Format: SelTBteqZSltiu rd, rs, rl, rr
+// Purpose: b = Sltiu rl, imm.
+// If b==0 then do nothing.
+// if b!=0 then rd = rs
+//
+def SelTBteqZSltiu: SeliT<0b000, "bteqz", 0b01011, "sltiu", IIAlu>;
+
+//
+// Format: SelBnez rd, rs, rt
+// Purpose: if rt!=0, do nothing
+// else rs = rt
+//
+def SelBneZ: Sel<0b00101, "bnez", IIAlu>;
+
+//
+// Format: SelTBtneZCmp rd, rs, rl, rr
+// Purpose: b = Cmp rl, rr.
+// If b!=0 then do nothing.
+// if b0=0 then rd = rs
+//
+def SelTBtneZCmp: SelT<0b001, "btnez", 0b01010, "cmp", IIAlu>;
+
+//
+// Format: SelTBtnezCmpi rd, rs, rl, rr
+// Purpose: b = Cmpi rl, imm.
+// If b!=0 then do nothing.
+// if b==0 then rd = rs
+//
+def SelTBtneZCmpi: SeliT<0b000, "btnez", 0b01110, "cmpi", IIAlu>;
+
+//
+// Format: SelTBtneZSlt rd, rs, rl, rr
+// Purpose: b = Slt rl, rr.
+// If b!=0 then do nothing.
+// if b==0 then rd = rs
+//
+def SelTBtneZSlt: SelT<0b001, "btnez", 0b00010, "slt", IIAlu>;
+
+//
+// Format: SelTBtneZSlti rd, rs, rl, rr
+// Purpose: b = Slti rl, imm.
+// If b!=0 then do nothing.
+// if b==0 then rd = rs
+//
+def SelTBtneZSlti: SeliT<0b001, "btnez", 0b01010, "slti", IIAlu>;
+
+//
+// Format: SelTBtneZSltu rd, rs, rl, rr
+// Purpose: b = Sltu rl, rr.
+// If b!=0 then do nothing.
+// if b==0 then rd = rs
+//
+def SelTBtneZSltu: SelT<0b001, "btnez", 0b00011, "sltu", IIAlu>;
+
+//
+// Format: SelTBtneZSltiu rd, rs, rl, rr
+// Purpose: b = Slti rl, imm.
+// If b!=0 then do nothing.
+// if b==0 then rd = rs
+//
+def SelTBtneZSltiu: SeliT<0b001, "btnez", 0b01011, "sltiu", IIAlu>;
+//
+//
// Format: SH ry, offset(rx) MIPS16e
// Purpose: Store Halfword (Extended)
// To store a halfword to memory.
//
-def ShRxRyOffMemX16: FEXT_RRI16_mem2_ins<0b11001, "sh", mem16, IIAlu>;
+def ShRxRyOffMemX16:
+ FEXT_RRI16_mem2_ins<0b11001, "sh", mem16, IIStore>, MayStore;
//
// Format: SLL rx, ry, sa MIPS16e
@@ -285,8 +868,40 @@ def SllX16: FEXT_SHIFT16_ins<0b00, "sll", IIAlu>;
//
def SllvRxRy16 : FRxRxRy16_ins<0b00100, "sllv", IIAlu>;
+//
+// Format: SLTI rx, immediate MIPS16e
+// Purpose: Set on Less Than Immediate (Extended)
+// To record the result of a less-than comparison with a constant.
+//
+def SltiCCRxImmX16: FEXT_CCRXI16_ins<0b01010, "slti", IIAlu>;
//
+// Format: SLTIU rx, immediate MIPS16e
+// Purpose: Set on Less Than Immediate Unsigned (Extended)
+// To record the result of a less-than comparison with a constant.
+//
+def SltiuCCRxImmX16: FEXT_CCRXI16_ins<0b01011, "sltiu", IIAlu>;
+
+//
+// Format: SLT rx, ry MIPS16e
+// Purpose: Set on Less Than
+// To record the result of a less-than comparison.
+//
+def SltRxRy16: FRR16_ins<0b00010, "slt", IIAlu>;
+
+def SltCCRxRy16: FCCRR16_ins<0b00010, "slt", IIAlu>;
+
+// Format: SLTU rx, ry MIPS16e
+// Purpose: Set on Less Than Unsigned
+// To record the result of an unsigned less-than comparison.
+//
+def SltuRxRyRz16: FRRTR16_ins<0b00011, "sltu", IIAlu> {
+ let isCodeGenOnly=1;
+}
+
+
+def SltuCCRxRy16: FCCRR16_ins<0b00011, "sltu", IIAlu>;
+//
// Format: SRAV ry, rx MIPS16e
// Purpose: Shift Word Right Arithmetic Variable
// To execute an arithmetic right-shift of a word by a variable
@@ -333,9 +948,18 @@ def SubuRxRyRz16: FRRR16_ins<0b11, "subu", IIAlu>, ArithLogic16Defs<0>;
// Purpose: Store Word (Extended)
// To store a word to memory.
//
-def SwRxRyOffMemX16: FEXT_RRI16_mem2_ins<0b11011, "sw", mem16, IIAlu>;
+def SwRxRyOffMemX16:
+ FEXT_RRI16_mem2_ins<0b11011, "sw", mem16, IIStore>, MayStore;
//
+// Format: SW rx, offset(sp) MIPS16e
+// Purpose: Store Word rx (SP-Relative)
+// To store an SP-relative word to memory.
+//
+def SwRxSpImmX16: FEXT_RI16_SP_explicit_ins<0b11010, "sw", IIStore>, MayStore;
+
+//
+//
// Format: XOR rx, ry MIPS16e
// Purpose: Xor
// To do a bitwise logical XOR.
@@ -361,6 +985,7 @@ class ArithLogic16_pat<SDNode OpNode, Instruction I> :
def: ArithLogic16_pat<add, AdduRxRyRz16>;
def: ArithLogic16_pat<and, AndRxRxRy16>;
+def: ArithLogic16_pat<mul, MultRxRyRz16>;
def: ArithLogic16_pat<or, OrRxRxRy16>;
def: ArithLogic16_pat<sub, SubuRxRyRz16>;
def: ArithLogic16_pat<xor, XorRxRxRy16>;
@@ -385,35 +1010,533 @@ def: shift_rotate_reg16_pat<sra, SravRxRy16>;
def: shift_rotate_reg16_pat<srl, SrlvRxRy16>;
class LoadM16_pat<PatFrag OpNode, Instruction I> :
- Mips16Pat<(OpNode addr:$addr), (I addr:$addr)>;
+ Mips16Pat<(OpNode addr16:$addr), (I addr16:$addr)>;
def: LoadM16_pat<sextloadi8, LbRxRyOffMemX16>;
def: LoadM16_pat<zextloadi8, LbuRxRyOffMemX16>;
-def: LoadM16_pat<sextloadi16_a, LhRxRyOffMemX16>;
-def: LoadM16_pat<zextloadi16_a, LhuRxRyOffMemX16>;
-def: LoadM16_pat<load_a, LwRxRyOffMemX16>;
+def: LoadM16_pat<sextloadi16, LhRxRyOffMemX16>;
+def: LoadM16_pat<zextloadi16, LhuRxRyOffMemX16>;
+def: LoadM16_pat<load, LwRxRyOffMemX16>;
class StoreM16_pat<PatFrag OpNode, Instruction I> :
- Mips16Pat<(OpNode CPU16Regs:$r, addr:$addr), (I CPU16Regs:$r, addr:$addr)>;
+ Mips16Pat<(OpNode CPU16Regs:$r, addr16:$addr),
+ (I CPU16Regs:$r, addr16:$addr)>;
def: StoreM16_pat<truncstorei8, SbRxRyOffMemX16>;
-def: StoreM16_pat<truncstorei16_a, ShRxRyOffMemX16>;
-def: StoreM16_pat<store_a, SwRxRyOffMemX16>;
+def: StoreM16_pat<truncstorei16, ShRxRyOffMemX16>;
+def: StoreM16_pat<store, SwRxRyOffMemX16>;
+
+// Unconditional branch
+class UncondBranch16_pat<SDNode OpNode, Instruction I>:
+ Mips16Pat<(OpNode bb:$imm16), (I bb:$imm16)> {
+ let Predicates = [RelocPIC, InMips16Mode];
+ }
+
+// Indirect branch
+def: Mips16Pat<
+ (brind CPU16Regs:$rs),
+ (JrcRx16 CPU16Regs:$rs)>;
// Jump and Link (Call)
-let isCall=1, hasDelaySlot=1 in
+let isCall=1, hasDelaySlot=0 in
def JumpLinkReg16:
FRR16_JALRC<0, 0, 0, (outs), (ins CPU16Regs:$rs),
- "jalr \t$rs", [(MipsJmpLink CPU16Regs:$rs)], IIBranch>;
+ "jalrc \t$rs", [(MipsJmpLink CPU16Regs:$rs)], IIBranch>;
// Mips16 pseudos
let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1, hasCtrlDep=1,
hasExtraSrcRegAllocReq = 1 in
def RetRA16 : MipsPseudo16<(outs), (ins), "", [(MipsRet)]>;
+
+// setcc patterns
+
+class SetCC_R16<PatFrag cond_op, Instruction I>:
+ Mips16Pat<(cond_op CPU16Regs:$rx, CPU16Regs:$ry),
+ (I CPU16Regs:$rx, CPU16Regs:$ry)>;
+
+class SetCC_I16<PatFrag cond_op, PatLeaf imm_type, Instruction I>:
+ Mips16Pat<(cond_op CPU16Regs:$rx, imm_type:$imm16),
+ (I CPU16Regs:$rx, imm_type:$imm16)>;
+
+
+def: Mips16Pat<(i32 addr16:$addr),
+ (AddiuRxRyOffMemX16 addr16:$addr)>;
+
+
+// Large (>16 bit) immediate loads
+def : Mips16Pat<(i32 imm:$imm),
+ (OrRxRxRy16 (SllX16 (LiRxImmX16 (HI16 imm:$imm)), 16),
+ (LiRxImmX16 (LO16 imm:$imm)))>;
+
+// Carry MipsPatterns
+def : Mips16Pat<(subc CPU16Regs:$lhs, CPU16Regs:$rhs),
+ (SubuRxRyRz16 CPU16Regs:$lhs, CPU16Regs:$rhs)>;
+def : Mips16Pat<(addc CPU16Regs:$lhs, CPU16Regs:$rhs),
+ (AdduRxRyRz16 CPU16Regs:$lhs, CPU16Regs:$rhs)>;
+def : Mips16Pat<(addc CPU16Regs:$src, immSExt16:$imm),
+ (AddiuRxRxImmX16 CPU16Regs:$src, imm:$imm)>;
+
+//
+// Some branch conditional patterns are not generated by llvm at this time.
+// Some are for seemingly arbitrary reasons not used: i.e. with signed number
+// comparison they are used and for unsigned a different pattern is used.
+// I am pushing upstream from the full mips16 port and it seemed that I needed
+// these earlier and the mips32 port has these but now I cannot create test
+// cases that use these patterns. While I sort this all out I will leave these
+// extra patterns commented out and if I can be sure they are really not used,
+// I will delete the code. I don't want to check the code in uncommented without
+// a valid test case. In some cases, the compiler is generating patterns with
+// setcc instead and earlier I had implemented setcc first so may have masked
+// the problem. The setcc variants are suboptimal for mips16 so I may wantto
+// figure out how to enable the brcond patterns or else possibly new
+// combinations of of brcond and setcc.
+//
+//
+// bcond-seteq
+//
+def: Mips16Pat
+ <(brcond (i32 (seteq CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16),
+ (BteqzT8CmpX16 CPU16Regs:$rx, CPU16Regs:$ry, bb:$imm16)
+ >;
+
+
+def: Mips16Pat
+ <(brcond (i32 (seteq CPU16Regs:$rx, immZExt16:$imm)), bb:$targ16),
+ (BteqzT8CmpiX16 CPU16Regs:$rx, immSExt16:$imm, bb:$targ16)
+ >;
+
+def: Mips16Pat
+ <(brcond (i32 (seteq CPU16Regs:$rx, 0)), bb:$targ16),
+ (BeqzRxImmX16 CPU16Regs:$rx, bb:$targ16)
+ >;
+
+//
+// bcond-setgt (do we need to have this pair of setlt, setgt??)
+//
+def: Mips16Pat
+ <(brcond (i32 (setgt CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16),
+ (BtnezT8SltX16 CPU16Regs:$ry, CPU16Regs:$rx, bb:$imm16)
+ >;
+
+//
+// bcond-setge
+//
+def: Mips16Pat
+ <(brcond (i32 (setge CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16),
+ (BteqzT8SltX16 CPU16Regs:$rx, CPU16Regs:$ry, bb:$imm16)
+ >;
+
+//
+// never called because compiler transforms a >= k to a > (k-1)
+def: Mips16Pat
+ <(brcond (i32 (setge CPU16Regs:$rx, immSExt16:$imm)), bb:$imm16),
+ (BteqzT8SltiX16 CPU16Regs:$rx, immSExt16:$imm, bb:$imm16)
+ >;
+
+//
+// bcond-setlt
+//
+def: Mips16Pat
+ <(brcond (i32 (setlt CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16),
+ (BtnezT8SltX16 CPU16Regs:$rx, CPU16Regs:$ry, bb:$imm16)
+ >;
+
+def: Mips16Pat
+ <(brcond (i32 (setlt CPU16Regs:$rx, immSExt16:$imm)), bb:$imm16),
+ (BtnezT8SltiX16 CPU16Regs:$rx, immSExt16:$imm, bb:$imm16)
+ >;
+
+//
+// bcond-setle
+//
+def: Mips16Pat
+ <(brcond (i32 (setle CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16),
+ (BteqzT8SltX16 CPU16Regs:$ry, CPU16Regs:$rx, bb:$imm16)
+ >;
+
+//
+// bcond-setne
+//
+def: Mips16Pat
+ <(brcond (i32 (setne CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16),
+ (BtnezT8CmpX16 CPU16Regs:$rx, CPU16Regs:$ry, bb:$imm16)
+ >;
+
+def: Mips16Pat
+ <(brcond (i32 (setne CPU16Regs:$rx, immZExt16:$imm)), bb:$targ16),
+ (BtnezT8CmpiX16 CPU16Regs:$rx, immSExt16:$imm, bb:$targ16)
+ >;
+
+def: Mips16Pat
+ <(brcond (i32 (setne CPU16Regs:$rx, 0)), bb:$targ16),
+ (BnezRxImmX16 CPU16Regs:$rx, bb:$targ16)
+ >;
+
+//
+// This needs to be there but I forget which code will generate it
+//
+def: Mips16Pat
+ <(brcond CPU16Regs:$rx, bb:$targ16),
+ (BnezRxImmX16 CPU16Regs:$rx, bb:$targ16)
+ >;
+
+//
+
+//
+// bcond-setugt
+//
+//def: Mips16Pat
+// <(brcond (i32 (setugt CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16),
+// (BtnezT8SltuX16 CPU16Regs:$ry, CPU16Regs:$rx, bb:$imm16)
+// >;
+
+//
+// bcond-setuge
+//
+//def: Mips16Pat
+// <(brcond (i32 (setuge CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16),
+// (BteqzT8SltuX16 CPU16Regs:$rx, CPU16Regs:$ry, bb:$imm16)
+// >;
+
+
+//
+// bcond-setult
+//
+//def: Mips16Pat
+// <(brcond (i32 (setult CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16),
+// (BtnezT8SltuX16 CPU16Regs:$rx, CPU16Regs:$ry, bb:$imm16)
+// >;
+
+def: UncondBranch16_pat<br, BimmX16>;
+
// Small immediates
+def: Mips16Pat<(i32 immSExt16:$in),
+ (AddiuRxRxImmX16 (Move32R16 ZERO), immSExt16:$in)>;
+
def: Mips16Pat<(i32 immZExt16:$in), (LiRxImmX16 immZExt16:$in)>;
+//
+// MipsDivRem
+//
+def: Mips16Pat
+ <(MipsDivRem CPU16Regs:$rx, CPU16Regs:$ry),
+ (DivRxRy16 CPU16Regs:$rx, CPU16Regs:$ry)>;
+
+//
+// MipsDivRemU
+//
+def: Mips16Pat
+ <(MipsDivRemU CPU16Regs:$rx, CPU16Regs:$ry),
+ (DivuRxRy16 CPU16Regs:$rx, CPU16Regs:$ry)>;
+
+// signed a,b
+// x = (a>=b)?x:y
+//
+// if !(a < b) x = y
+//
+def : Mips16Pat<(select (i32 (setge CPU16Regs:$a, CPU16Regs:$b)),
+ CPU16Regs:$x, CPU16Regs:$y),
+ (SelTBteqZSlt CPU16Regs:$x, CPU16Regs:$y,
+ CPU16Regs:$a, CPU16Regs:$b)>;
+
+// signed a,b
+// x = (a>b)?x:y
+//
+// if (b < a) x = y
+//
+def : Mips16Pat<(select (i32 (setgt CPU16Regs:$a, CPU16Regs:$b)),
+ CPU16Regs:$x, CPU16Regs:$y),
+ (SelTBtneZSlt CPU16Regs:$x, CPU16Regs:$y,
+ CPU16Regs:$b, CPU16Regs:$a)>;
+
+// unsigned a,b
+// x = (a>=b)?x:y
+//
+// if !(a < b) x = y;
+//
+def : Mips16Pat<
+ (select (i32 (setuge CPU16Regs:$a, CPU16Regs:$b)),
+ CPU16Regs:$x, CPU16Regs:$y),
+ (SelTBteqZSltu CPU16Regs:$x, CPU16Regs:$y,
+ CPU16Regs:$a, CPU16Regs:$b)>;
+
+// unsigned a,b
+// x = (a>b)?x:y
+//
+// if (b < a) x = y
+//
+def : Mips16Pat<(select (i32 (setugt CPU16Regs:$a, CPU16Regs:$b)),
+ CPU16Regs:$x, CPU16Regs:$y),
+ (SelTBtneZSltu CPU16Regs:$x, CPU16Regs:$y,
+ CPU16Regs:$b, CPU16Regs:$a)>;
+
+// signed
+// x = (a >= k)?x:y
+// due to an llvm optimization, i don't think that this will ever
+// be used. This is transformed into x = (a > k-1)?x:y
+//
+//
+
+//def : Mips16Pat<
+// (select (i32 (setge CPU16Regs:$lhs, immSExt16:$rhs)),
+// CPU16Regs:$T, CPU16Regs:$F),
+// (SelTBteqZSlti CPU16Regs:$T, CPU16Regs:$F,
+// CPU16Regs:$lhs, immSExt16:$rhs)>;
+
+//def : Mips16Pat<
+// (select (i32 (setuge CPU16Regs:$lhs, immSExt16:$rhs)),
+// CPU16Regs:$T, CPU16Regs:$F),
+// (SelTBteqZSltiu CPU16Regs:$T, CPU16Regs:$F,
+// CPU16Regs:$lhs, immSExt16:$rhs)>;
+
+// signed
+// x = (a < k)?x:y
+//
+// if !(a < k) x = y;
+//
+def : Mips16Pat<
+ (select (i32 (setlt CPU16Regs:$a, immSExt16:$b)),
+ CPU16Regs:$x, CPU16Regs:$y),
+ (SelTBtneZSlti CPU16Regs:$x, CPU16Regs:$y,
+ CPU16Regs:$a, immSExt16:$b)>;
+
+
+//
+//
+// signed
+// x = (a <= b)? x : y
+//
+// if (b < a) x = y
+//
+def : Mips16Pat<(select (i32 (setle CPU16Regs:$a, CPU16Regs:$b)),
+ CPU16Regs:$x, CPU16Regs:$y),
+ (SelTBteqZSlt CPU16Regs:$x, CPU16Regs:$y,
+ CPU16Regs:$b, CPU16Regs:$a)>;
+
+//
+// unnsigned
+// x = (a <= b)? x : y
+//
+// if (b < a) x = y
+//
+def : Mips16Pat<(select (i32 (setule CPU16Regs:$a, CPU16Regs:$b)),
+ CPU16Regs:$x, CPU16Regs:$y),
+ (SelTBteqZSltu CPU16Regs:$x, CPU16Regs:$y,
+ CPU16Regs:$b, CPU16Regs:$a)>;
+
+//
+// signed/unsigned
+// x = (a == b)? x : y
+//
+// if (a != b) x = y
+//
+def : Mips16Pat<(select (i32 (seteq CPU16Regs:$a, CPU16Regs:$b)),
+ CPU16Regs:$x, CPU16Regs:$y),
+ (SelTBteqZCmp CPU16Regs:$x, CPU16Regs:$y,
+ CPU16Regs:$b, CPU16Regs:$a)>;
+
+//
+// signed/unsigned
+// x = (a == 0)? x : y
+//
+// if (a != 0) x = y
+//
+def : Mips16Pat<(select (i32 (seteq CPU16Regs:$a, 0)),
+ CPU16Regs:$x, CPU16Regs:$y),
+ (SelBeqZ CPU16Regs:$x, CPU16Regs:$y,
+ CPU16Regs:$a)>;
+
+
+//
+// signed/unsigned
+// x = (a == k)? x : y
+//
+// if (a != k) x = y
+//
+def : Mips16Pat<(select (i32 (seteq CPU16Regs:$a, immZExt16:$k)),
+ CPU16Regs:$x, CPU16Regs:$y),
+ (SelTBteqZCmpi CPU16Regs:$x, CPU16Regs:$y,
+ CPU16Regs:$a, immZExt16:$k)>;
+
+
+//
+// signed/unsigned
+// x = (a != b)? x : y
+//
+// if (a == b) x = y
+//
+//
+def : Mips16Pat<(select (i32 (setne CPU16Regs:$a, CPU16Regs:$b)),
+ CPU16Regs:$x, CPU16Regs:$y),
+ (SelTBtneZCmp CPU16Regs:$x, CPU16Regs:$y,
+ CPU16Regs:$b, CPU16Regs:$a)>;
+
+//
+// signed/unsigned
+// x = (a != 0)? x : y
+//
+// if (a == 0) x = y
+//
+def : Mips16Pat<(select (i32 (setne CPU16Regs:$a, 0)),
+ CPU16Regs:$x, CPU16Regs:$y),
+ (SelBneZ CPU16Regs:$x, CPU16Regs:$y,
+ CPU16Regs:$a)>;
+
+// signed/unsigned
+// x = (a)? x : y
+//
+// if (!a) x = y
+//
+def : Mips16Pat<(select CPU16Regs:$a,
+ CPU16Regs:$x, CPU16Regs:$y),
+ (SelBneZ CPU16Regs:$x, CPU16Regs:$y,
+ CPU16Regs:$a)>;
+
+
+//
+// signed/unsigned
+// x = (a != k)? x : y
+//
+// if (a == k) x = y
+//
+def : Mips16Pat<(select (i32 (setne CPU16Regs:$a, immZExt16:$k)),
+ CPU16Regs:$x, CPU16Regs:$y),
+ (SelTBtneZCmpi CPU16Regs:$x, CPU16Regs:$y,
+ CPU16Regs:$a, immZExt16:$k)>;
+
+//
+// When writing C code to test setxx these patterns,
+// some will be transformed into
+// other things. So we test using C code but using -O3 and -O0
+//
+// seteq
+//
+def : Mips16Pat
+ <(seteq CPU16Regs:$lhs,CPU16Regs:$rhs),
+ (SltiuCCRxImmX16 (XorRxRxRy16 CPU16Regs:$lhs, CPU16Regs:$rhs), 1)>;
+
+def : Mips16Pat
+ <(seteq CPU16Regs:$lhs, 0),
+ (SltiuCCRxImmX16 CPU16Regs:$lhs, 1)>;
+
+
+//
+// setge
+//
+
+def: Mips16Pat
+ <(setge CPU16Regs:$lhs, CPU16Regs:$rhs),
+ (XorRxRxRy16 (SltCCRxRy16 CPU16Regs:$lhs, CPU16Regs:$rhs),
+ (LiRxImmX16 1))>;
+
+//
+// For constants, llvm transforms this to:
+// x > (k -1) and then reverses the operands to use setlt. So this pattern
+// is not used now by the compiler. (Presumably checking that k-1 does not
+// overflow). The compiler never uses this at a the current time, due to
+// other optimizations.
+//
+//def: Mips16Pat
+// <(setge CPU16Regs:$lhs, immSExt16:$rhs),
+// (XorRxRxRy16 (SltiCCRxImmX16 CPU16Regs:$lhs, immSExt16:$rhs),
+// (LiRxImmX16 1))>;
+
+// This catches the x >= -32768 case by transforming it to x > -32769
+//
+def: Mips16Pat
+ <(setgt CPU16Regs:$lhs, -32769),
+ (XorRxRxRy16 (SltiCCRxImmX16 CPU16Regs:$lhs, -32768),
+ (LiRxImmX16 1))>;
+
+//
+// setgt
+//
+//
+
+def: Mips16Pat
+ <(setgt CPU16Regs:$lhs, CPU16Regs:$rhs),
+ (SltCCRxRy16 CPU16Regs:$rhs, CPU16Regs:$lhs)>;
+
+//
+// setle
+//
+def: Mips16Pat
+ <(setle CPU16Regs:$lhs, CPU16Regs:$rhs),
+ (XorRxRxRy16 (SltCCRxRy16 CPU16Regs:$rhs, CPU16Regs:$lhs), (LiRxImmX16 1))>;
+
+//
+// setlt
+//
+def: SetCC_R16<setlt, SltCCRxRy16>;
+
+def: SetCC_I16<setlt, immSExt16, SltiCCRxImmX16>;
+
+//
+// setne
+//
+def : Mips16Pat
+ <(setne CPU16Regs:$lhs,CPU16Regs:$rhs),
+ (SltuCCRxRy16 (LiRxImmX16 0),
+ (XorRxRxRy16 CPU16Regs:$lhs, CPU16Regs:$rhs))>;
+
+
+//
+// setuge
+//
+def: Mips16Pat
+ <(setuge CPU16Regs:$lhs, CPU16Regs:$rhs),
+ (XorRxRxRy16 (SltuCCRxRy16 CPU16Regs:$lhs, CPU16Regs:$rhs),
+ (LiRxImmX16 1))>;
+
+// this pattern will never be used because the compiler will transform
+// x >= k to x > (k - 1) and then use SLT
+//
+//def: Mips16Pat
+// <(setuge CPU16Regs:$lhs, immZExt16:$rhs),
+// (XorRxRxRy16 (SltiuCCRxImmX16 CPU16Regs:$lhs, immZExt16:$rhs),
+// (LiRxImmX16 1))>;
+
+//
+// setugt
+//
+def: Mips16Pat
+ <(setugt CPU16Regs:$lhs, CPU16Regs:$rhs),
+ (SltuCCRxRy16 CPU16Regs:$rhs, CPU16Regs:$lhs)>;
+
+//
+// setule
+//
+def: Mips16Pat
+ <(setule CPU16Regs:$lhs, CPU16Regs:$rhs),
+ (XorRxRxRy16 (SltuCCRxRy16 CPU16Regs:$rhs, CPU16Regs:$lhs), (LiRxImmX16 1))>;
+
+//
+// setult
+//
+def: SetCC_R16<setult, SltuCCRxRy16>;
+
+def: SetCC_I16<setult, immSExt16, SltiuCCRxImmX16>;
+
def: Mips16Pat<(add CPU16Regs:$hi, (MipsLo tglobaladdr:$lo)),
(AddiuRxRxImmX16 CPU16Regs:$hi, tglobaladdr:$lo)>;
+
+// hi/lo relocs
+
+def : Mips16Pat<(MipsHi tglobaltlsaddr:$in),
+ (SllX16 (LiRxImmX16 tglobaltlsaddr:$in), 16)>;
+
+// wrapper_pic
+class Wrapper16Pat<SDNode node, Instruction ADDiuOp, RegisterClass RC>:
+ Mips16Pat<(MipsWrapper RC:$gp, node:$in),
+ (ADDiuOp RC:$gp, node:$in)>;
+
+
+def : Wrapper16Pat<tglobaladdr, AddiuRxRxImmX16, CPU16Regs>;
+def : Wrapper16Pat<tglobaltlsaddr, AddiuRxRxImmX16, CPU16Regs>;
+
+def : Mips16Pat<(i32 (extloadi8 addr16:$src)),
+ (LbuRxRyOffMemX16 addr16:$src)>;
+def : Mips16Pat<(i32 (extloadi16 addr16:$src)),
+ (LhuRxRyOffMemX16 addr16:$src)>; \ No newline at end of file
diff --git a/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp b/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp
index c15d1bf..d7397a3 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "Mips16RegisterInfo.h"
+#include "Mips16InstrInfo.h"
#include "Mips.h"
#include "MipsAnalyzeImmediate.h"
#include "MipsInstrInfo.h"
@@ -39,15 +40,27 @@
using namespace llvm;
Mips16RegisterInfo::Mips16RegisterInfo(const MipsSubtarget &ST,
- const TargetInstrInfo &TII)
- : MipsRegisterInfo(ST, TII) {}
+ const Mips16InstrInfo &I)
+ : MipsRegisterInfo(ST), TII(I) {}
// This function eliminate ADJCALLSTACKDOWN,
// ADJCALLSTACKUP pseudo instructions
void Mips16RegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- // Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions.
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ if (!TFI->hasReservedCallFrame(MF)) {
+ int64_t Amount = I->getOperand(0).getImm();
+
+ if (I->getOpcode() == Mips::ADJCALLSTACKDOWN)
+ Amount = -Amount;
+
+ const Mips16InstrInfo *II = static_cast<const Mips16InstrInfo*>(&TII);
+
+ II->adjustStackPtr(Mips::SP, Amount, MBB, I);
+ }
+
MBB.erase(I);
}
@@ -55,57 +68,60 @@ void Mips16RegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
unsigned OpNo, int FrameIndex,
uint64_t StackSize,
int64_t SPOffset) const {
- MachineInstr &MI = *II;
- MachineFunction &MF = *MI.getParent()->getParent();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
-
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- int MinCSFI = 0;
- int MaxCSFI = -1;
-
- if (CSI.size()) {
- MinCSFI = CSI[0].getFrameIdx();
- MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
- }
-
- // The following stack frame objects are always
- // referenced relative to $sp:
- // 1. Outgoing arguments.
- // 2. Pointer to dynamically allocated stack space.
- // 3. Locations for callee-saved registers.
- // Everything else is referenced relative to whatever register
- // getFrameRegister() returns.
- unsigned FrameReg;
-
- if (MipsFI->isOutArgFI(FrameIndex) ||
- (FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI))
- FrameReg = Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP;
+ MachineInstr &MI = *II;
+ MachineFunction &MF = *MI.getParent()->getParent();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ int MinCSFI = 0;
+ int MaxCSFI = -1;
+
+ if (CSI.size()) {
+ MinCSFI = CSI[0].getFrameIdx();
+ MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
+ }
+
+ // The following stack frame objects are always
+ // referenced relative to $sp:
+ // 1. Outgoing arguments.
+ // 2. Pointer to dynamically allocated stack space.
+ // 3. Locations for callee-saved registers.
+ // Everything else is referenced relative to whatever register
+ // getFrameRegister() returns.
+ unsigned FrameReg;
+
+ if (FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI)
+ FrameReg = Mips::SP;
+ else {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+ if (TFI->hasFP(MF)) {
+ FrameReg = Mips::S0;
+ }
+ else {
+ if ((MI.getNumOperands()> OpNo+2) && MI.getOperand(OpNo+2).isReg())
+ FrameReg = MI.getOperand(OpNo+2).getReg();
else
- FrameReg = getFrameRegister(MF);
-
- // Calculate final offset.
- // - There is no need to change the offset if the frame object
- // is one of the
- // following: an outgoing argument, pointer to a dynamically allocated
- // stack space or a $gp restore location,
- // - If the frame object is any of the following,
- // its offset must be adjusted
- // by adding the size of the stack:
- // incoming argument, callee-saved register location or local variable.
- int64_t Offset;
-
- if (MipsFI->isOutArgFI(FrameIndex))
- Offset = SPOffset;
- else
- Offset = SPOffset + (int64_t)StackSize;
-
- Offset += MI.getOperand(OpNo + 1).getImm();
-
- DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
-
- MI.getOperand(OpNo).ChangeToRegister(FrameReg, false);
- MI.getOperand(OpNo + 1).ChangeToImmediate(Offset);
+ FrameReg = Mips::SP;
+ }
+ }
+ // Calculate final offset.
+ // - There is no need to change the offset if the frame object
+ // is one of the
+ // following: an outgoing argument, pointer to a dynamically allocated
+ // stack space or a $gp restore location,
+ // - If the frame object is any of the following,
+ // its offset must be adjusted
+ // by adding the size of the stack:
+ // incoming argument, callee-saved register location or local variable.
+ int64_t Offset;
+ Offset = SPOffset + (int64_t)StackSize;
+ Offset += MI.getOperand(OpNo + 1).getImm();
+
+
+ DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
+
+ MI.getOperand(OpNo).ChangeToRegister(FrameReg, false);
+ MI.getOperand(OpNo + 1).ChangeToImmediate(Offset);
}
diff --git a/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.h b/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.h
index 3f4b3a7..153def2 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.h
+++ b/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.h
@@ -17,11 +17,12 @@
#include "MipsRegisterInfo.h"
namespace llvm {
+class Mips16InstrInfo;
class Mips16RegisterInfo : public MipsRegisterInfo {
+ const Mips16InstrInfo &TII;
public:
- Mips16RegisterInfo(const MipsSubtarget &Subtarget,
- const TargetInstrInfo &TII);
+ Mips16RegisterInfo(const MipsSubtarget &Subtarget, const Mips16InstrInfo &TII);
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
diff --git a/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td b/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td
index 20fc178..a611168 100644
--- a/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td
+++ b/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td
@@ -83,8 +83,10 @@ let usesCustomInserter = 1, Predicates = [HasMips64, HasStandardEncoding],
//===----------------------------------------------------------------------===//
let DecoderNamespace = "Mips64" in {
/// Arithmetic Instructions (ALU Immediate)
-def DADDiu : ArithLogicI<0x19, "daddiu", add, simm16_64, immSExt16,
+def DADDi : ArithOverflowI<0x18, "daddi", add, simm16_64, immSExt16,
CPU64Regs>;
+def DADDiu : ArithLogicI<0x19, "daddiu", add, simm16_64, immSExt16,
+ CPU64Regs>, IsAsCheapAsAMove;
def DANDi : ArithLogicI<0x0c, "andi", and, uimm16_64, immZExt16, CPU64Regs>;
def SLTi64 : SetCC_I<0x0a, "slti", setlt, simm16_64, immSExt16, CPU64Regs>;
def SLTiu64 : SetCC_I<0x0b, "sltiu", setult, simm16_64, immSExt16, CPU64Regs>;
@@ -93,6 +95,7 @@ def XORi64 : ArithLogicI<0x0e, "xori", xor, uimm16_64, immZExt16, CPU64Regs>;
def LUi64 : LoadUpper<0x0f, "lui", CPU64Regs, uimm16_64>;
/// Arithmetic Instructions (3-Operand, R-Type)
+def DADD : ArithOverflowR<0x00, 0x2C, "dadd", IIAlu, CPU64Regs, 1>;
def DADDu : ArithLogicR<0x00, 0x2d, "daddu", add, IIAlu, CPU64Regs, 1>;
def DSUBu : ArithLogicR<0x00, 0x2f, "dsubu", sub, IIAlu, CPU64Regs>;
def SLT64 : SetCC_R<0x00, 0x2a, "slt", setlt, CPU64Regs>;
@@ -110,9 +113,9 @@ def DSLLV : shift_rotate_reg<0x14, 0x00, "dsllv", shl, CPU64Regs>;
def DSRLV : shift_rotate_reg<0x16, 0x00, "dsrlv", srl, CPU64Regs>;
def DSRAV : shift_rotate_reg<0x17, 0x00, "dsrav", sra, CPU64Regs>;
let Pattern = []<dag> in {
-def DSLL32 : shift_rotate_imm64<0x3c, 0x00, "dsll32", shl>;
-def DSRL32 : shift_rotate_imm64<0x3e, 0x00, "dsrl32", srl>;
-def DSRA32 : shift_rotate_imm64<0x3f, 0x00, "dsra32", sra>;
+ def DSLL32 : shift_rotate_imm64<0x3c, 0x00, "dsll32", shl>;
+ def DSRL32 : shift_rotate_imm64<0x3e, 0x00, "dsrl32", srl>;
+ def DSRA32 : shift_rotate_imm64<0x3f, 0x00, "dsra32", sra>;
}
}
// Rotate Instructions
@@ -127,24 +130,15 @@ let DecoderNamespace = "Mips64" in {
/// aligned
defm LB64 : LoadM64<0x20, "lb", sextloadi8>;
defm LBu64 : LoadM64<0x24, "lbu", zextloadi8>;
-defm LH64 : LoadM64<0x21, "lh", sextloadi16_a>;
-defm LHu64 : LoadM64<0x25, "lhu", zextloadi16_a>;
-defm LW64 : LoadM64<0x23, "lw", sextloadi32_a>;
-defm LWu64 : LoadM64<0x27, "lwu", zextloadi32_a>;
+defm LH64 : LoadM64<0x21, "lh", sextloadi16>;
+defm LHu64 : LoadM64<0x25, "lhu", zextloadi16>;
+defm LW64 : LoadM64<0x23, "lw", sextloadi32>;
+defm LWu64 : LoadM64<0x27, "lwu", zextloadi32>;
defm SB64 : StoreM64<0x28, "sb", truncstorei8>;
-defm SH64 : StoreM64<0x29, "sh", truncstorei16_a>;
-defm SW64 : StoreM64<0x2b, "sw", truncstorei32_a>;
-defm LD : LoadM64<0x37, "ld", load_a>;
-defm SD : StoreM64<0x3f, "sd", store_a>;
-
-/// unaligned
-defm ULH64 : LoadM64<0x21, "ulh", sextloadi16_u, 1>;
-defm ULHu64 : LoadM64<0x25, "ulhu", zextloadi16_u, 1>;
-defm ULW64 : LoadM64<0x23, "ulw", sextloadi32_u, 1>;
-defm USH64 : StoreM64<0x29, "ush", truncstorei16_u, 1>;
-defm USW64 : StoreM64<0x2b, "usw", truncstorei32_u, 1>;
-defm ULD : LoadM64<0x37, "uld", load_u, 1>;
-defm USD : StoreM64<0x3f, "usd", store_u, 1>;
+defm SH64 : StoreM64<0x29, "sh", truncstorei16>;
+defm SW64 : StoreM64<0x2b, "sw", truncstorei32>;
+defm LD : LoadM64<0x37, "ld", load>;
+defm SD : StoreM64<0x3f, "sd", store>;
/// load/store left/right
let isCodeGenOnly = 1 in {
@@ -183,6 +177,7 @@ def BLTZ64 : CBranchZero<0x01, 0, "bltz", setlt, CPU64Regs>;
}
let DecoderNamespace = "Mips64" in
def JALR64 : JumpLinkReg<0x00, 0x09, "jalr", CPU64Regs>;
+def TAILCALL64_R : JumpFR<CPU64Regs, MipsTailCall>, IsTailCall;
let DecoderNamespace = "Mips64" in {
/// Multiply and Divide Instructions.
@@ -217,7 +212,15 @@ let DecoderNamespace = "Mips64" in {
def RDHWR64 : ReadHardware<CPU64Regs, HWRegs64>;
def DEXT : ExtBase<3, "dext", CPU64Regs>;
+let Pattern = []<dag> in {
+ def DEXTU : ExtBase<2, "dextu", CPU64Regs>;
+ def DEXTM : ExtBase<1, "dextm", CPU64Regs>;
+}
def DINS : InsBase<7, "dins", CPU64Regs>;
+let Pattern = []<dag> in {
+ def DINSU : InsBase<6, "dinsu", CPU64Regs>;
+ def DINSM : InsBase<5, "dinsm", CPU64Regs>;
+}
let isCodeGenOnly = 1, rs = 0, shamt = 0 in {
def DSLL64_32 : FR<0x00, 0x3c, (outs CPU64Regs:$rd), (ins CPURegs:$rt),
@@ -236,21 +239,14 @@ let isCodeGenOnly = 1, rs = 0, shamt = 0 in {
let Predicates = [NotN64, HasStandardEncoding] in {
def : MipsPat<(i64 (extloadi1 addr:$src)), (LB64 addr:$src)>;
def : MipsPat<(i64 (extloadi8 addr:$src)), (LB64 addr:$src)>;
- def : MipsPat<(i64 (extloadi16_a addr:$src)), (LH64 addr:$src)>;
- def : MipsPat<(i64 (extloadi16_u addr:$src)), (ULH64 addr:$src)>;
- def : MipsPat<(i64 (extloadi32_a addr:$src)), (LW64 addr:$src)>;
- def : MipsPat<(i64 (extloadi32_u addr:$src)), (ULW64 addr:$src)>;
- def : MipsPat<(zextloadi32_u addr:$a), (DSRL (DSLL (ULW64 addr:$a), 32), 32)>;
+ def : MipsPat<(i64 (extloadi16 addr:$src)), (LH64 addr:$src)>;
+ def : MipsPat<(i64 (extloadi32 addr:$src)), (LW64 addr:$src)>;
}
let Predicates = [IsN64, HasStandardEncoding] in {
def : MipsPat<(i64 (extloadi1 addr:$src)), (LB64_P8 addr:$src)>;
def : MipsPat<(i64 (extloadi8 addr:$src)), (LB64_P8 addr:$src)>;
- def : MipsPat<(i64 (extloadi16_a addr:$src)), (LH64_P8 addr:$src)>;
- def : MipsPat<(i64 (extloadi16_u addr:$src)), (ULH64_P8 addr:$src)>;
- def : MipsPat<(i64 (extloadi32_a addr:$src)), (LW64_P8 addr:$src)>;
- def : MipsPat<(i64 (extloadi32_u addr:$src)), (ULW64_P8 addr:$src)>;
- def : MipsPat<(zextloadi32_u addr:$a),
- (DSRL (DSLL (ULW64_P8 addr:$a), 32), 32)>;
+ def : MipsPat<(i64 (extloadi16 addr:$src)), (LH64_P8 addr:$src)>;
+ def : MipsPat<(i64 (extloadi32 addr:$src)), (LW64_P8 addr:$src)>;
}
// hi/lo relocs
@@ -315,3 +311,38 @@ def : MipsPat<(i64 (sext_inreg CPU64Regs:$src, i32)),
// bswap MipsPattern
def : MipsPat<(bswap CPU64Regs:$rt), (DSHD (DSBH CPU64Regs:$rt))>;
+
+//===----------------------------------------------------------------------===//
+// Instruction aliases
+//===----------------------------------------------------------------------===//
+def : InstAlias<"move $dst,$src", (DADD CPU64Regs:$dst,CPU64Regs:$src,ZERO_64)>;
+
+/// Move between CPU and coprocessor registers
+let DecoderNamespace = "Mips64" in {
+def MFC0_3OP64 : MFC3OP<0x10, 0, (outs CPU64Regs:$rt),
+ (ins CPU64Regs:$rd, uimm16:$sel),"mfc0\t$rt, $rd, $sel">;
+def MTC0_3OP64 : MFC3OP<0x10, 4, (outs CPU64Regs:$rd, uimm16:$sel),
+ (ins CPU64Regs:$rt),"mtc0\t$rt, $rd, $sel">;
+def MFC2_3OP64 : MFC3OP<0x12, 0, (outs CPU64Regs:$rt),
+ (ins CPU64Regs:$rd, uimm16:$sel),"mfc2\t$rt, $rd, $sel">;
+def MTC2_3OP64 : MFC3OP<0x12, 4, (outs CPU64Regs:$rd, uimm16:$sel),
+ (ins CPU64Regs:$rt),"mtc2\t$rt, $rd, $sel">;
+def DMFC0_3OP64 : MFC3OP<0x10, 1, (outs CPU64Regs:$rt),
+ (ins CPU64Regs:$rd, uimm16:$sel),"dmfc0\t$rt, $rd, $sel">;
+def DMTC0_3OP64 : MFC3OP<0x10, 5, (outs CPU64Regs:$rd, uimm16:$sel),
+ (ins CPU64Regs:$rt),"dmtc0\t$rt, $rd, $sel">;
+def DMFC2_3OP64 : MFC3OP<0x12, 1, (outs CPU64Regs:$rt),
+ (ins CPU64Regs:$rd, uimm16:$sel),"dmfc2\t$rt, $rd, $sel">;
+def DMTC2_3OP64 : MFC3OP<0x12, 5, (outs CPU64Regs:$rd, uimm16:$sel),
+ (ins CPU64Regs:$rt),"dmtc2\t$rt, $rd, $sel">;
+}
+// Two operand (implicit 0 selector) versions:
+def : InstAlias<"mfc0 $rt, $rd", (MFC0_3OP64 CPU64Regs:$rt, CPU64Regs:$rd, 0)>;
+def : InstAlias<"mtc0 $rt, $rd", (MTC0_3OP64 CPU64Regs:$rd, 0, CPU64Regs:$rt)>;
+def : InstAlias<"mfc2 $rt, $rd", (MFC2_3OP64 CPU64Regs:$rt, CPU64Regs:$rd, 0)>;
+def : InstAlias<"mtc2 $rt, $rd", (MTC2_3OP64 CPU64Regs:$rd, 0, CPU64Regs:$rt)>;
+def : InstAlias<"dmfc0 $rt, $rd", (DMFC0_3OP64 CPU64Regs:$rt, CPU64Regs:$rd, 0)>;
+def : InstAlias<"dmtc0 $rt, $rd", (DMTC0_3OP64 CPU64Regs:$rd, 0, CPU64Regs:$rt)>;
+def : InstAlias<"dmfc2 $rt, $rd", (DMFC2_3OP64 CPU64Regs:$rt, CPU64Regs:$rd, 0)>;
+def : InstAlias<"dmtc2 $rt, $rd", (DMTC2_3OP64 CPU64Regs:$rd, 0, CPU64Regs:$rt)>;
+
diff --git a/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp b/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp
index dc8fbd0..99b163e 100644
--- a/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp
@@ -91,7 +91,7 @@ void MipsAnalyzeImmediate::ReplaceADDiuSLLWithLUi(InstSeq &Seq) {
// Sign-extend and shift operand of ADDiu and see if it still fits in 16-bit.
int64_t Imm = SignExtend64<16>(Seq[0].ImmOpnd);
- int64_t ShiftedImm = Imm << (Seq[1].ImmOpnd - 16);
+ int64_t ShiftedImm = (uint64_t)Imm << (Seq[1].ImmOpnd - 16);
if (!isInt<16>(ShiftedImm))
return;
diff --git a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
index 00ff754..bf2818d 100644
--- a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -37,7 +37,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetOptions.h"
@@ -49,6 +49,13 @@ bool MipsAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
return true;
}
+bool MipsAsmPrinter::lowerOperand(const MachineOperand &MO, MCOperand &MCOp) {
+ MCOp = MCInstLowering.LowerOperand(MO);
+ return MCOp.isValid();
+}
+
+#include "MipsGenMCPseudoLowering.inc"
+
void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
if (MI->isDebugValue()) {
SmallString<128> Str;
@@ -58,24 +65,9 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
return;
}
- // Direct object specific instruction lowering
- if (!OutStreamer.hasRawTextSupport())
- switch (MI->getOpcode()) {
- case Mips::DSLL:
- case Mips::DSRL:
- case Mips::DSRA:
- assert(MI->getNumOperands() == 3 &&
- "Invalid no. of machine operands for shift!");
- assert(MI->getOperand(2).isImm());
- int64_t Shift = MI->getOperand(2).getImm();
- if (Shift > 31) {
- MCInst TmpInst0;
- MCInstLowering.LowerLargeShift(MI, TmpInst0, Shift - 32);
- OutStreamer.EmitInstruction(TmpInst0);
- return;
- }
- break;
- }
+ // Do any auto-generated pseudo lowerings.
+ if (emitPseudoExpansionLowering(OutStreamer, MI))
+ return;
MachineBasicBlock::const_instr_iterator I = MI;
MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
@@ -83,8 +75,9 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
do {
MCInst TmpInst0;
MCInstLowering.Lower(I++, TmpInst0);
+
OutStreamer.EmitInstruction(TmpInst0);
- } while ((I != E) && I->isInsideBundle());
+ } while ((I != E) && I->isInsideBundle()); // Delay slot check
}
//===----------------------------------------------------------------------===//
@@ -214,7 +207,7 @@ const char *MipsAsmPrinter::getCurrentABIString() const {
case MipsSubtarget::N32: return "abiN32";
case MipsSubtarget::N64: return "abi64";
case MipsSubtarget::EABI: return "eabi32"; // TODO: handle eabi64
- default: llvm_unreachable("Unknown Mips ABI");;
+ default: llvm_unreachable("Unknown Mips ABI");
}
}
@@ -246,8 +239,7 @@ void MipsAsmPrinter::EmitFunctionBodyStart() {
OutStreamer.EmitRawText(StringRef("\t.set\tnoreorder"));
OutStreamer.EmitRawText(StringRef("\t.set\tnomacro"));
- if (MipsFI->getEmitNOAT())
- OutStreamer.EmitRawText(StringRef("\t.set\tnoat"));
+ OutStreamer.EmitRawText(StringRef("\t.set\tnoat"));
}
}
@@ -258,9 +250,7 @@ void MipsAsmPrinter::EmitFunctionBodyEnd() {
// always be at the function end, and we can't emit and
// break with BB logic.
if (OutStreamer.hasRawTextSupport()) {
- if (MipsFI->getEmitNOAT())
- OutStreamer.EmitRawText(StringRef("\t.set\tat"));
-
+ OutStreamer.EmitRawText(StringRef("\t.set\tat"));
OutStreamer.EmitRawText(StringRef("\t.set\tmacro"));
OutStreamer.EmitRawText(StringRef("\t.set\treorder"));
OutStreamer.EmitRawText("\t.end\t" + Twine(CurrentFnSym->getName()));
diff --git a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h
index 562bf9c..94d8bfa 100644
--- a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h
+++ b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h
@@ -32,6 +32,14 @@ class LLVM_LIBRARY_VISIBILITY MipsAsmPrinter : public AsmPrinter {
void EmitInstrWithMacroNoAT(const MachineInstr *MI);
+private:
+ // tblgen'erated function.
+ bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
+ const MachineInstr *MI);
+
+ // lowerOperand - Convert a MachineOperand into the equivalent MCOperand.
+ bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp);
+
public:
const MipsSubtarget *Subtarget;
diff --git a/contrib/llvm/lib/Target/Mips/MipsCallingConv.td b/contrib/llvm/lib/Target/Mips/MipsCallingConv.td
index 19213fa..78cf140 100644
--- a/contrib/llvm/lib/Target/Mips/MipsCallingConv.td
+++ b/contrib/llvm/lib/Target/Mips/MipsCallingConv.td
@@ -35,9 +35,6 @@ def RetCC_MipsO32 : CallingConv<[
//===----------------------------------------------------------------------===//
def CC_MipsN : CallingConv<[
- // Handles byval parameters.
- CCIfByVal<CCCustom<"CC_Mips64Byval">>,
-
// Promote i8/i16 arguments to i32.
CCIfType<[i8, i16], CCPromoteToType<i32>>,
@@ -72,9 +69,6 @@ def CC_MipsN : CallingConv<[
// N32/64 variable arguments.
// All arguments are passed in integer registers.
def CC_MipsN_VarArg : CallingConv<[
- // Handles byval parameters.
- CCIfByVal<CCCustom<"CC_Mips64Byval">>,
-
// Promote i8/i16 arguments to i32.
CCIfType<[i8, i16], CCPromoteToType<i32>>,
@@ -211,12 +205,6 @@ def CC_Mips_FastCC : CallingConv<[
// Mips Calling Convention Dispatch
//===----------------------------------------------------------------------===//
-def CC_Mips : CallingConv<[
- CCIfSubtarget<"isABI_EABI()", CCDelegateTo<CC_MipsEABI>>,
- CCIfSubtarget<"isABI_N32()", CCDelegateTo<CC_MipsN>>,
- CCIfSubtarget<"isABI_N64()", CCDelegateTo<CC_MipsN>>
-]>;
-
def RetCC_Mips : CallingConv<[
CCIfSubtarget<"isABI_EABI()", CCDelegateTo<RetCC_MipsEABI>>,
CCIfSubtarget<"isABI_N32()", CCDelegateTo<RetCC_MipsN>>,
diff --git a/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp b/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp
index cb7022b..4bfccd8 100644
--- a/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp
@@ -30,7 +30,6 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
#include "llvm/PassManager.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -48,7 +47,7 @@ namespace {
class MipsCodeEmitter : public MachineFunctionPass {
MipsJITInfo *JTI;
const MipsInstrInfo *II;
- const TargetData *TD;
+ const DataLayout *TD;
const MipsSubtarget *Subtarget;
TargetMachine &TM;
JITCodeEmitter &MCE;
@@ -67,7 +66,7 @@ class MipsCodeEmitter : public MachineFunctionPass {
MipsCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce) :
MachineFunctionPass(ID), JTI(0),
II((const MipsInstrInfo *) tm.getInstrInfo()),
- TD(tm.getTargetData()), TM(tm), MCE(mce), MCPEs(0), MJTEs(0),
+ TD(tm.getDataLayout()), TM(tm), MCE(mce), MCPEs(0), MJTEs(0),
IsPIC(TM.getRelocationModel() == Reloc::PIC_) {
}
@@ -129,7 +128,7 @@ char MipsCodeEmitter::ID = 0;
bool MipsCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
JTI = ((MipsTargetMachine&) MF.getTarget()).getJITInfo();
II = ((const MipsTargetMachine&) MF.getTarget()).getInstrInfo();
- TD = ((const MipsTargetMachine&) MF.getTarget()).getTargetData();
+ TD = ((const MipsTargetMachine&) MF.getTarget()).getDataLayout();
Subtarget = &TM.getSubtarget<MipsSubtarget> ();
MCPEs = &MF.getConstantPool()->getConstants();
MJTEs = 0;
@@ -139,7 +138,7 @@ bool MipsCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
do {
DEBUG(errs() << "JITTing function '"
- << MF.getFunction()->getName() << "'\n");
+ << MF.getName() << "'\n");
MCE.startFunction(MF);
for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
@@ -219,15 +218,9 @@ unsigned MipsCodeEmitter::getMachineOpValue(const MachineInstr &MI,
return getMipsRegisterNumbering(MO.getReg());
else if (MO.isImm())
return static_cast<unsigned>(MO.getImm());
- else if (MO.isGlobal()) {
- if (MI.getOpcode() == Mips::ULW || MI.getOpcode() == Mips::USW ||
- MI.getOpcode() == Mips::ULH || MI.getOpcode() == Mips::ULHu)
- emitGlobalAddressUnaligned(MO.getGlobal(), getRelocation(MI, MO), 4);
- else if (MI.getOpcode() == Mips::USH)
- emitGlobalAddressUnaligned(MO.getGlobal(), getRelocation(MI, MO), 8);
- else
- emitGlobalAddress(MO.getGlobal(), getRelocation(MI, MO), true);
- } else if (MO.isSymbol())
+ else if (MO.isGlobal())
+ emitGlobalAddress(MO.getGlobal(), getRelocation(MI, MO), true);
+ else if (MO.isSymbol())
emitExternalSymbolAddress(MO.getSymbolName(), getRelocation(MI, MO));
else if (MO.isCPI())
emitConstPoolAddress(MO.getIndex(), getRelocation(MI, MO));
@@ -384,29 +377,8 @@ void MipsCodeEmitter::emitInstruction(const MachineInstr &MI) {
if ((MI.getDesc().TSFlags & MipsII::FormMask) == MipsII::Pseudo)
return;
-
- switch (MI.getOpcode()) {
- case Mips::USW:
- NumEmitted += emitUSW(MI);
- break;
- case Mips::ULW:
- NumEmitted += emitULW(MI);
- break;
- case Mips::ULH:
- NumEmitted += emitULH(MI);
- break;
- case Mips::ULHu:
- NumEmitted += emitULHu(MI);
- break;
- case Mips::USH:
- NumEmitted += emitUSH(MI);
- break;
-
- default:
- emitWordLE(getBinaryCodeForInstr(MI));
- ++NumEmitted; // Keep track of the # of mi's emitted
- break;
- }
+ emitWordLE(getBinaryCodeForInstr(MI));
+ ++NumEmitted; // Keep track of the # of mi's emitted
MCE.processDebugLoc(MI.getDebugLoc(), false);
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsDSPInstrFormats.td b/contrib/llvm/lib/Target/Mips/MipsDSPInstrFormats.td
new file mode 100644
index 0000000..8e01d06
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsDSPInstrFormats.td
@@ -0,0 +1,309 @@
+//===- MipsDSPInstrFormats.td - Mips Instruction Formats ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+def HasDSP : Predicate<"Subtarget.hasDSP()">,
+ AssemblerPredicate<"FeatureDSP">;
+def HasDSPR2 : Predicate<"Subtarget.hasDSPR2()">,
+ AssemblerPredicate<"FeatureDSPR2">;
+
+// Fields.
+class Field6<bits<6> val> {
+ bits<6> V = val;
+}
+
+def SPECIAL3_OPCODE : Field6<0b011111>;
+def REGIMM_OPCODE : Field6<0b000001>;
+
+class DSPInst : MipsInst<(outs), (ins), "", [], NoItinerary, FrmOther> {
+ let Predicates = [HasDSP];
+}
+
+class PseudoDSP<dag outs, dag ins, list<dag> pattern>:
+ MipsPseudo<outs, ins, "", pattern> {
+ let Predicates = [HasDSP];
+}
+
+// ADDU.QB sub-class format.
+class ADDU_QB_FMT<bits<5> op> : DSPInst {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b010000;
+}
+
+class RADDU_W_QB_FMT<bits<5> op> : DSPInst {
+ bits<5> rd;
+ bits<5> rs;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-21} = rs;
+ let Inst{20-16} = 0;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b010000;
+}
+
+// CMPU.EQ.QB sub-class format.
+class CMP_EQ_QB_R2_FMT<bits<5> op> : DSPInst {
+ bits<5> rs;
+ bits<5> rt;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = 0;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b010001;
+}
+
+class CMP_EQ_QB_R3_FMT<bits<5> op> : DSPInst {
+ bits<5> rs;
+ bits<5> rt;
+ bits<5> rd;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b010001;
+}
+
+class PRECR_SRA_PH_W_FMT<bits<5> op> : DSPInst {
+ bits<5> rs;
+ bits<5> rt;
+ bits<5> sa;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = sa;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b010001;
+}
+
+// ABSQ_S.PH sub-class format.
+class ABSQ_S_PH_R2_FMT<bits<5> op> : DSPInst {
+ bits<5> rd;
+ bits<5> rt;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-21} = 0;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b010010;
+}
+
+
+class REPL_FMT<bits<5> op> : DSPInst {
+ bits<5> rd;
+ bits<10> imm;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-16} = imm;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b010010;
+}
+
+// SHLL.QB sub-class format.
+class SHLL_QB_FMT<bits<5> op> : DSPInst {
+ bits<5> rd;
+ bits<5> rt;
+ bits<5> rs_sa;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-21} = rs_sa;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b010011;
+}
+
+// LX sub-class format.
+class LX_FMT<bits<5> op> : DSPInst {
+ bits<5> rd;
+ bits<5> base;
+ bits<5> index;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-21} = base;
+ let Inst{20-16} = index;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b001010;
+}
+
+// ADDUH.QB sub-class format.
+class ADDUH_QB_FMT<bits<5> op> : DSPInst {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b011000;
+}
+
+// APPEND sub-class format.
+class APPEND_FMT<bits<5> op> : DSPInst {
+ bits<5> rt;
+ bits<5> rs;
+ bits<5> sa;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = sa;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b110001;
+}
+
+// DPA.W.PH sub-class format.
+class DPA_W_PH_FMT<bits<5> op> : DSPInst {
+ bits<2> ac;
+ bits<5> rs;
+ bits<5> rt;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-13} = 0;
+ let Inst{12-11} = ac;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b110000;
+}
+
+// MULT sub-class format.
+class MULT_FMT<bits<6> opcode, bits<6> funct> : DSPInst {
+ bits<2> ac;
+ bits<5> rs;
+ bits<5> rt;
+
+ let Opcode = opcode;
+
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-13} = 0;
+ let Inst{12-11} = ac;
+ let Inst{10-6} = 0;
+ let Inst{5-0} = funct;
+}
+
+// EXTR.W sub-class format (type 1).
+class EXTR_W_TY1_FMT<bits<5> op> : DSPInst {
+ bits<5> rt;
+ bits<2> ac;
+ bits<5> shift_rs;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-21} = shift_rs;
+ let Inst{20-16} = rt;
+ let Inst{15-13} = 0;
+ let Inst{12-11} = ac;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b111000;
+}
+
+// SHILO sub-class format.
+class SHILO_R1_FMT<bits<5> op> : DSPInst {
+ bits<2> ac;
+ bits<6> shift;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-20} = shift;
+ let Inst{19-13} = 0;
+ let Inst{12-11} = ac;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b111000;
+}
+
+class SHILO_R2_FMT<bits<5> op> : DSPInst {
+ bits<2> ac;
+ bits<5> rs;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-21} = rs;
+ let Inst{20-13} = 0;
+ let Inst{12-11} = ac;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b111000;
+}
+
+class RDDSP_FMT<bits<5> op> : DSPInst {
+ bits<5> rd;
+ bits<10> mask;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-16} = mask;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b111000;
+}
+
+class WRDSP_FMT<bits<5> op> : DSPInst {
+ bits<5> rs;
+ bits<10> mask;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-21} = rs;
+ let Inst{20-11} = mask;
+ let Inst{10-6} = op;
+ let Inst{5-0} = 0b111000;
+}
+
+class BPOSGE32_FMT<bits<5> op> : DSPInst {
+ bits<16> offset;
+
+ let Opcode = REGIMM_OPCODE.V;
+
+ let Inst{25-21} = 0;
+ let Inst{20-16} = op;
+ let Inst{15-0} = offset;
+}
+
+// INSV sub-class format.
+class INSV_FMT<bits<6> op> : DSPInst {
+ bits<5> rt;
+ bits<5> rs;
+
+ let Opcode = SPECIAL3_OPCODE.V;
+
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-6} = 0;
+ let Inst{5-0} = op;
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsDSPInstrInfo.td b/contrib/llvm/lib/Target/Mips/MipsDSPInstrInfo.td
new file mode 100644
index 0000000..ef94028
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsDSPInstrInfo.td
@@ -0,0 +1,1319 @@
+//===- MipsDSPInstrInfo.td - DSP ASE instructions -*- tablegen ------------*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes Mips DSP ASE instructions.
+//
+//===----------------------------------------------------------------------===//
+
+// ImmLeaf
+def immZExt2 : ImmLeaf<i32, [{return isUInt<2>(Imm);}]>;
+def immZExt3 : ImmLeaf<i32, [{return isUInt<3>(Imm);}]>;
+def immZExt4 : ImmLeaf<i32, [{return isUInt<4>(Imm);}]>;
+def immZExt8 : ImmLeaf<i32, [{return isUInt<8>(Imm);}]>;
+def immZExt10 : ImmLeaf<i32, [{return isUInt<10>(Imm);}]>;
+def immSExt6 : ImmLeaf<i32, [{return isInt<6>(Imm);}]>;
+
+// Mips-specific dsp nodes
+def SDT_MipsExtr : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>]>;
+def SDT_MipsShilo : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
+def SDT_MipsDPA : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>]>;
+
+class MipsDSPBase<string Opc, SDTypeProfile Prof> :
+ SDNode<!strconcat("MipsISD::", Opc), Prof,
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue]>;
+
+class MipsDSPSideEffectBase<string Opc, SDTypeProfile Prof> :
+ SDNode<!strconcat("MipsISD::", Opc), Prof,
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPSideEffect]>;
+
+def MipsEXTP : MipsDSPSideEffectBase<"EXTP", SDT_MipsExtr>;
+def MipsEXTPDP : MipsDSPSideEffectBase<"EXTPDP", SDT_MipsExtr>;
+def MipsEXTR_S_H : MipsDSPSideEffectBase<"EXTR_S_H", SDT_MipsExtr>;
+def MipsEXTR_W : MipsDSPSideEffectBase<"EXTR_W", SDT_MipsExtr>;
+def MipsEXTR_R_W : MipsDSPSideEffectBase<"EXTR_R_W", SDT_MipsExtr>;
+def MipsEXTR_RS_W : MipsDSPSideEffectBase<"EXTR_RS_W", SDT_MipsExtr>;
+
+def MipsSHILO : MipsDSPBase<"SHILO", SDT_MipsShilo>;
+def MipsMTHLIP : MipsDSPBase<"MTHLIP", SDT_MipsShilo>;
+
+def MipsMULSAQ_S_W_PH : MipsDSPSideEffectBase<"MULSAQ_S_W_PH", SDT_MipsDPA>;
+def MipsMAQ_S_W_PHL : MipsDSPSideEffectBase<"MAQ_S_W_PHL", SDT_MipsDPA>;
+def MipsMAQ_S_W_PHR : MipsDSPSideEffectBase<"MAQ_S_W_PHR", SDT_MipsDPA>;
+def MipsMAQ_SA_W_PHL : MipsDSPSideEffectBase<"MAQ_SA_W_PHL", SDT_MipsDPA>;
+def MipsMAQ_SA_W_PHR : MipsDSPSideEffectBase<"MAQ_SA_W_PHR", SDT_MipsDPA>;
+
+def MipsDPAU_H_QBL : MipsDSPBase<"DPAU_H_QBL", SDT_MipsDPA>;
+def MipsDPAU_H_QBR : MipsDSPBase<"DPAU_H_QBR", SDT_MipsDPA>;
+def MipsDPSU_H_QBL : MipsDSPBase<"DPSU_H_QBL", SDT_MipsDPA>;
+def MipsDPSU_H_QBR : MipsDSPBase<"DPSU_H_QBR", SDT_MipsDPA>;
+def MipsDPAQ_S_W_PH : MipsDSPSideEffectBase<"DPAQ_S_W_PH", SDT_MipsDPA>;
+def MipsDPSQ_S_W_PH : MipsDSPSideEffectBase<"DPSQ_S_W_PH", SDT_MipsDPA>;
+def MipsDPAQ_SA_L_W : MipsDSPSideEffectBase<"DPAQ_SA_L_W", SDT_MipsDPA>;
+def MipsDPSQ_SA_L_W : MipsDSPSideEffectBase<"DPSQ_SA_L_W", SDT_MipsDPA>;
+
+def MipsDPA_W_PH : MipsDSPBase<"DPA_W_PH", SDT_MipsDPA>;
+def MipsDPS_W_PH : MipsDSPBase<"DPS_W_PH", SDT_MipsDPA>;
+def MipsDPAQX_S_W_PH : MipsDSPSideEffectBase<"DPAQX_S_W_PH", SDT_MipsDPA>;
+def MipsDPAQX_SA_W_PH : MipsDSPSideEffectBase<"DPAQX_SA_W_PH", SDT_MipsDPA>;
+def MipsDPAX_W_PH : MipsDSPBase<"DPAX_W_PH", SDT_MipsDPA>;
+def MipsDPSX_W_PH : MipsDSPBase<"DPSX_W_PH", SDT_MipsDPA>;
+def MipsDPSQX_S_W_PH : MipsDSPSideEffectBase<"DPSQX_S_W_PH", SDT_MipsDPA>;
+def MipsDPSQX_SA_W_PH : MipsDSPSideEffectBase<"DPSQX_SA_W_PH", SDT_MipsDPA>;
+def MipsMULSA_W_PH : MipsDSPBase<"MULSA_W_PH", SDT_MipsDPA>;
+
+def MipsMULT : MipsDSPBase<"MULT", SDT_MipsDPA>;
+def MipsMULTU : MipsDSPBase<"MULTU", SDT_MipsDPA>;
+def MipsMADD_DSP : MipsDSPBase<"MADD_DSP", SDT_MipsDPA>;
+def MipsMADDU_DSP : MipsDSPBase<"MADDU_DSP", SDT_MipsDPA>;
+def MipsMSUB_DSP : MipsDSPBase<"MSUB_DSP", SDT_MipsDPA>;
+def MipsMSUBU_DSP : MipsDSPBase<"MSUBU_DSP", SDT_MipsDPA>;
+
+// Flags.
+class IsCommutable {
+ bit isCommutable = 1;
+}
+
+class UseAC {
+ list<Register> Uses = [AC0];
+}
+
+class UseDSPCtrl {
+ list<Register> Uses = [DSPCtrl];
+}
+
+class ClearDefs {
+ list<Register> Defs = [];
+}
+
+// Instruction encoding.
+class ADDU_QB_ENC : ADDU_QB_FMT<0b00000>;
+class ADDU_S_QB_ENC : ADDU_QB_FMT<0b00100>;
+class SUBU_QB_ENC : ADDU_QB_FMT<0b00001>;
+class SUBU_S_QB_ENC : ADDU_QB_FMT<0b00101>;
+class ADDQ_PH_ENC : ADDU_QB_FMT<0b01010>;
+class ADDQ_S_PH_ENC : ADDU_QB_FMT<0b01110>;
+class SUBQ_PH_ENC : ADDU_QB_FMT<0b01011>;
+class SUBQ_S_PH_ENC : ADDU_QB_FMT<0b01111>;
+class ADDQ_S_W_ENC : ADDU_QB_FMT<0b10110>;
+class SUBQ_S_W_ENC : ADDU_QB_FMT<0b10111>;
+class ADDSC_ENC : ADDU_QB_FMT<0b10000>;
+class ADDWC_ENC : ADDU_QB_FMT<0b10001>;
+class MODSUB_ENC : ADDU_QB_FMT<0b10010>;
+class RADDU_W_QB_ENC : RADDU_W_QB_FMT<0b10100>;
+class ABSQ_S_PH_ENC : ABSQ_S_PH_R2_FMT<0b01001>;
+class ABSQ_S_W_ENC : ABSQ_S_PH_R2_FMT<0b10001>;
+class PRECRQ_QB_PH_ENC : CMP_EQ_QB_R3_FMT<0b01100>;
+class PRECRQ_PH_W_ENC : CMP_EQ_QB_R3_FMT<0b10100>;
+class PRECRQ_RS_PH_W_ENC : CMP_EQ_QB_R3_FMT<0b10101>;
+class PRECRQU_S_QB_PH_ENC : CMP_EQ_QB_R3_FMT<0b01111>;
+class PRECEQ_W_PHL_ENC : ABSQ_S_PH_R2_FMT<0b01100>;
+class PRECEQ_W_PHR_ENC : ABSQ_S_PH_R2_FMT<0b01101>;
+class PRECEQU_PH_QBL_ENC : ABSQ_S_PH_R2_FMT<0b00100>;
+class PRECEQU_PH_QBR_ENC : ABSQ_S_PH_R2_FMT<0b00101>;
+class PRECEQU_PH_QBLA_ENC : ABSQ_S_PH_R2_FMT<0b00110>;
+class PRECEQU_PH_QBRA_ENC : ABSQ_S_PH_R2_FMT<0b00111>;
+class PRECEU_PH_QBL_ENC : ABSQ_S_PH_R2_FMT<0b11100>;
+class PRECEU_PH_QBR_ENC : ABSQ_S_PH_R2_FMT<0b11101>;
+class PRECEU_PH_QBLA_ENC : ABSQ_S_PH_R2_FMT<0b11110>;
+class PRECEU_PH_QBRA_ENC : ABSQ_S_PH_R2_FMT<0b11111>;
+class SHLL_QB_ENC : SHLL_QB_FMT<0b00000>;
+class SHLLV_QB_ENC : SHLL_QB_FMT<0b00010>;
+class SHRL_QB_ENC : SHLL_QB_FMT<0b00001>;
+class SHRLV_QB_ENC : SHLL_QB_FMT<0b00011>;
+class SHLL_PH_ENC : SHLL_QB_FMT<0b01000>;
+class SHLLV_PH_ENC : SHLL_QB_FMT<0b01010>;
+class SHLL_S_PH_ENC : SHLL_QB_FMT<0b01100>;
+class SHLLV_S_PH_ENC : SHLL_QB_FMT<0b01110>;
+class SHRA_PH_ENC : SHLL_QB_FMT<0b01001>;
+class SHRAV_PH_ENC : SHLL_QB_FMT<0b01011>;
+class SHRA_R_PH_ENC : SHLL_QB_FMT<0b01101>;
+class SHRAV_R_PH_ENC : SHLL_QB_FMT<0b01111>;
+class SHLL_S_W_ENC : SHLL_QB_FMT<0b10100>;
+class SHLLV_S_W_ENC : SHLL_QB_FMT<0b10110>;
+class SHRA_R_W_ENC : SHLL_QB_FMT<0b10101>;
+class SHRAV_R_W_ENC : SHLL_QB_FMT<0b10111>;
+class MULEU_S_PH_QBL_ENC : ADDU_QB_FMT<0b00110>;
+class MULEU_S_PH_QBR_ENC : ADDU_QB_FMT<0b00111>;
+class MULEQ_S_W_PHL_ENC : ADDU_QB_FMT<0b11100>;
+class MULEQ_S_W_PHR_ENC : ADDU_QB_FMT<0b11101>;
+class MULQ_RS_PH_ENC : ADDU_QB_FMT<0b11111>;
+class MULSAQ_S_W_PH_ENC : DPA_W_PH_FMT<0b00110>;
+class MAQ_S_W_PHL_ENC : DPA_W_PH_FMT<0b10100>;
+class MAQ_S_W_PHR_ENC : DPA_W_PH_FMT<0b10110>;
+class MAQ_SA_W_PHL_ENC : DPA_W_PH_FMT<0b10000>;
+class MAQ_SA_W_PHR_ENC : DPA_W_PH_FMT<0b10010>;
+class DPAU_H_QBL_ENC : DPA_W_PH_FMT<0b00011>;
+class DPAU_H_QBR_ENC : DPA_W_PH_FMT<0b00111>;
+class DPSU_H_QBL_ENC : DPA_W_PH_FMT<0b01011>;
+class DPSU_H_QBR_ENC : DPA_W_PH_FMT<0b01111>;
+class DPAQ_S_W_PH_ENC : DPA_W_PH_FMT<0b00100>;
+class DPSQ_S_W_PH_ENC : DPA_W_PH_FMT<0b00101>;
+class DPAQ_SA_L_W_ENC : DPA_W_PH_FMT<0b01100>;
+class DPSQ_SA_L_W_ENC : DPA_W_PH_FMT<0b01101>;
+class MULT_DSP_ENC : MULT_FMT<0b000000, 0b011000>;
+class MULTU_DSP_ENC : MULT_FMT<0b000000, 0b011001>;
+class MADD_DSP_ENC : MULT_FMT<0b011100, 0b000000>;
+class MADDU_DSP_ENC : MULT_FMT<0b011100, 0b000001>;
+class MSUB_DSP_ENC : MULT_FMT<0b011100, 0b000100>;
+class MSUBU_DSP_ENC : MULT_FMT<0b011100, 0b000101>;
+class CMPU_EQ_QB_ENC : CMP_EQ_QB_R2_FMT<0b00000>;
+class CMPU_LT_QB_ENC : CMP_EQ_QB_R2_FMT<0b00001>;
+class CMPU_LE_QB_ENC : CMP_EQ_QB_R2_FMT<0b00010>;
+class CMPGU_EQ_QB_ENC : CMP_EQ_QB_R3_FMT<0b00100>;
+class CMPGU_LT_QB_ENC : CMP_EQ_QB_R3_FMT<0b00101>;
+class CMPGU_LE_QB_ENC : CMP_EQ_QB_R3_FMT<0b00110>;
+class CMP_EQ_PH_ENC : CMP_EQ_QB_R2_FMT<0b01000>;
+class CMP_LT_PH_ENC : CMP_EQ_QB_R2_FMT<0b01001>;
+class CMP_LE_PH_ENC : CMP_EQ_QB_R2_FMT<0b01010>;
+class BITREV_ENC : ABSQ_S_PH_R2_FMT<0b11011>;
+class PACKRL_PH_ENC : CMP_EQ_QB_R3_FMT<0b01110>;
+class REPL_QB_ENC : REPL_FMT<0b00010>;
+class REPL_PH_ENC : REPL_FMT<0b01010>;
+class REPLV_QB_ENC : ABSQ_S_PH_R2_FMT<0b00011>;
+class REPLV_PH_ENC : ABSQ_S_PH_R2_FMT<0b01011>;
+class PICK_QB_ENC : CMP_EQ_QB_R3_FMT<0b00011>;
+class PICK_PH_ENC : CMP_EQ_QB_R3_FMT<0b01011>;
+class LWX_ENC : LX_FMT<0b00000>;
+class LHX_ENC : LX_FMT<0b00100>;
+class LBUX_ENC : LX_FMT<0b00110>;
+class BPOSGE32_ENC : BPOSGE32_FMT<0b11100>;
+class INSV_ENC : INSV_FMT<0b001100>;
+
+class EXTP_ENC : EXTR_W_TY1_FMT<0b00010>;
+class EXTPV_ENC : EXTR_W_TY1_FMT<0b00011>;
+class EXTPDP_ENC : EXTR_W_TY1_FMT<0b01010>;
+class EXTPDPV_ENC : EXTR_W_TY1_FMT<0b01011>;
+class EXTR_W_ENC : EXTR_W_TY1_FMT<0b00000>;
+class EXTRV_W_ENC : EXTR_W_TY1_FMT<0b00001>;
+class EXTR_R_W_ENC : EXTR_W_TY1_FMT<0b00100>;
+class EXTRV_R_W_ENC : EXTR_W_TY1_FMT<0b00101>;
+class EXTR_RS_W_ENC : EXTR_W_TY1_FMT<0b00110>;
+class EXTRV_RS_W_ENC : EXTR_W_TY1_FMT<0b00111>;
+class EXTR_S_H_ENC : EXTR_W_TY1_FMT<0b01110>;
+class EXTRV_S_H_ENC : EXTR_W_TY1_FMT<0b01111>;
+class SHILO_ENC : SHILO_R1_FMT<0b11010>;
+class SHILOV_ENC : SHILO_R2_FMT<0b11011>;
+class MTHLIP_ENC : SHILO_R2_FMT<0b11111>;
+
+class RDDSP_ENC : RDDSP_FMT<0b10010>;
+class WRDSP_ENC : WRDSP_FMT<0b10011>;
+class ADDU_PH_ENC : ADDU_QB_FMT<0b01000>;
+class ADDU_S_PH_ENC : ADDU_QB_FMT<0b01100>;
+class SUBU_PH_ENC : ADDU_QB_FMT<0b01001>;
+class SUBU_S_PH_ENC : ADDU_QB_FMT<0b01101>;
+class CMPGDU_EQ_QB_ENC : CMP_EQ_QB_R3_FMT<0b11000>;
+class CMPGDU_LT_QB_ENC : CMP_EQ_QB_R3_FMT<0b11001>;
+class CMPGDU_LE_QB_ENC : CMP_EQ_QB_R3_FMT<0b11010>;
+class ABSQ_S_QB_ENC : ABSQ_S_PH_R2_FMT<0b00001>;
+class ADDUH_QB_ENC : ADDUH_QB_FMT<0b00000>;
+class ADDUH_R_QB_ENC : ADDUH_QB_FMT<0b00010>;
+class SUBUH_QB_ENC : ADDUH_QB_FMT<0b00001>;
+class SUBUH_R_QB_ENC : ADDUH_QB_FMT<0b00011>;
+class ADDQH_PH_ENC : ADDUH_QB_FMT<0b01000>;
+class ADDQH_R_PH_ENC : ADDUH_QB_FMT<0b01010>;
+class SUBQH_PH_ENC : ADDUH_QB_FMT<0b01001>;
+class SUBQH_R_PH_ENC : ADDUH_QB_FMT<0b01011>;
+class ADDQH_W_ENC : ADDUH_QB_FMT<0b10000>;
+class ADDQH_R_W_ENC : ADDUH_QB_FMT<0b10010>;
+class SUBQH_W_ENC : ADDUH_QB_FMT<0b10001>;
+class SUBQH_R_W_ENC : ADDUH_QB_FMT<0b10011>;
+class MUL_PH_ENC : ADDUH_QB_FMT<0b01100>;
+class MUL_S_PH_ENC : ADDUH_QB_FMT<0b01110>;
+class MULQ_S_W_ENC : ADDUH_QB_FMT<0b10110>;
+class MULQ_RS_W_ENC : ADDUH_QB_FMT<0b10111>;
+class MULQ_S_PH_ENC : ADDU_QB_FMT<0b11110>;
+class DPA_W_PH_ENC : DPA_W_PH_FMT<0b00000>;
+class DPS_W_PH_ENC : DPA_W_PH_FMT<0b00001>;
+class DPAQX_S_W_PH_ENC : DPA_W_PH_FMT<0b11000>;
+class DPAQX_SA_W_PH_ENC : DPA_W_PH_FMT<0b11010>;
+class DPAX_W_PH_ENC : DPA_W_PH_FMT<0b01000>;
+class DPSX_W_PH_ENC : DPA_W_PH_FMT<0b01001>;
+class DPSQX_S_W_PH_ENC : DPA_W_PH_FMT<0b11001>;
+class DPSQX_SA_W_PH_ENC : DPA_W_PH_FMT<0b11011>;
+class MULSA_W_PH_ENC : DPA_W_PH_FMT<0b00010>;
+class PRECR_QB_PH_ENC : CMP_EQ_QB_R3_FMT<0b01101>;
+class PRECR_SRA_PH_W_ENC : PRECR_SRA_PH_W_FMT<0b11110>;
+class PRECR_SRA_R_PH_W_ENC : PRECR_SRA_PH_W_FMT<0b11111>;
+class SHRA_QB_ENC : SHLL_QB_FMT<0b00100>;
+class SHRAV_QB_ENC : SHLL_QB_FMT<0b00110>;
+class SHRA_R_QB_ENC : SHLL_QB_FMT<0b00101>;
+class SHRAV_R_QB_ENC : SHLL_QB_FMT<0b00111>;
+class SHRL_PH_ENC : SHLL_QB_FMT<0b11001>;
+class SHRLV_PH_ENC : SHLL_QB_FMT<0b11011>;
+class APPEND_ENC : APPEND_FMT<0b00000>;
+class BALIGN_ENC : APPEND_FMT<0b10000>;
+class PREPEND_ENC : APPEND_FMT<0b00001>;
+
+// Instruction desc.
+class ADDU_QB_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ InstrItinClass itin, RegisterClass RCD,
+ RegisterClass RCS, RegisterClass RCT = RCS> {
+ dag OutOperandList = (outs RCD:$rd);
+ dag InOperandList = (ins RCS:$rs, RCT:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
+ list<dag> Pattern = [(set RCD:$rd, (OpNode RCS:$rs, RCT:$rt))];
+ InstrItinClass Itinerary = itin;
+ list<Register> Defs = [DSPCtrl];
+}
+
+class RADDU_W_QB_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ InstrItinClass itin, RegisterClass RCD,
+ RegisterClass RCS = RCD> {
+ dag OutOperandList = (outs RCD:$rd);
+ dag InOperandList = (ins RCS:$rs);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rs");
+ list<dag> Pattern = [(set RCD:$rd, (OpNode RCS:$rs))];
+ InstrItinClass Itinerary = itin;
+ list<Register> Defs = [DSPCtrl];
+}
+
+class CMP_EQ_QB_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ InstrItinClass itin, RegisterClass RCS,
+ RegisterClass RCT = RCS> {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins RCS:$rs, RCT:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$rs, $rt");
+ list<dag> Pattern = [(OpNode RCS:$rs, RCT:$rt)];
+ InstrItinClass Itinerary = itin;
+ list<Register> Defs = [DSPCtrl];
+}
+
+class CMP_EQ_QB_R3_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ InstrItinClass itin, RegisterClass RCD,
+ RegisterClass RCS, RegisterClass RCT = RCS> {
+ dag OutOperandList = (outs RCD:$rd);
+ dag InOperandList = (ins RCS:$rs, RCT:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
+ list<dag> Pattern = [(set RCD:$rd, (OpNode RCS:$rs, RCT:$rt))];
+ InstrItinClass Itinerary = itin;
+ list<Register> Defs = [DSPCtrl];
+}
+
+class PRECR_SRA_PH_W_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ InstrItinClass itin, RegisterClass RCT,
+ RegisterClass RCS = RCT> {
+ dag OutOperandList = (outs RCT:$rt);
+ dag InOperandList = (ins RCS:$rs, shamt:$sa, RCS:$src);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $rs, $sa");
+ list<dag> Pattern = [(set RCT:$rt, (OpNode RCS:$src, RCS:$rs, immZExt5:$sa))];
+ InstrItinClass Itinerary = itin;
+ list<Register> Defs = [DSPCtrl];
+ string Constraints = "$src = $rt";
+}
+
+class ABSQ_S_PH_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ InstrItinClass itin, RegisterClass RCD,
+ RegisterClass RCT = RCD> {
+ dag OutOperandList = (outs RCD:$rd);
+ dag InOperandList = (ins RCT:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rt");
+ list<dag> Pattern = [(set RCD:$rd, (OpNode RCT:$rt))];
+ InstrItinClass Itinerary = itin;
+ list<Register> Defs = [DSPCtrl];
+}
+
+class REPL_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ ImmLeaf immPat, InstrItinClass itin, RegisterClass RC> {
+ dag OutOperandList = (outs RC:$rd);
+ dag InOperandList = (ins uimm16:$imm);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $imm");
+ list<dag> Pattern = [(set RC:$rd, (OpNode immPat:$imm))];
+ InstrItinClass Itinerary = itin;
+ list<Register> Defs = [DSPCtrl];
+}
+
+class SHLL_QB_R3_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ InstrItinClass itin, RegisterClass RC> {
+ dag OutOperandList = (outs RC:$rd);
+ dag InOperandList = (ins RC:$rt, CPURegs:$rs_sa);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rt, $rs_sa");
+ list<dag> Pattern = [(set RC:$rd, (OpNode RC:$rt, CPURegs:$rs_sa))];
+ InstrItinClass Itinerary = itin;
+ list<Register> Defs = [DSPCtrl];
+}
+
+class SHLL_QB_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ SDPatternOperator ImmPat, InstrItinClass itin,
+ RegisterClass RC> {
+ dag OutOperandList = (outs RC:$rd);
+ dag InOperandList = (ins RC:$rt, uimm16:$rs_sa);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rt, $rs_sa");
+ list<dag> Pattern = [(set RC:$rd, (OpNode RC:$rt, ImmPat:$rs_sa))];
+ InstrItinClass Itinerary = itin;
+ list<Register> Defs = [DSPCtrl];
+}
+
+class LX_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ InstrItinClass itin> {
+ dag OutOperandList = (outs CPURegs:$rd);
+ dag InOperandList = (ins CPURegs:$base, CPURegs:$index);
+ string AsmString = !strconcat(instr_asm, "\t$rd, ${index}(${base})");
+ list<dag> Pattern = [(set CPURegs:$rd,
+ (OpNode CPURegs:$base, CPURegs:$index))];
+ InstrItinClass Itinerary = itin;
+ list<Register> Defs = [DSPCtrl];
+ bit mayLoad = 1;
+}
+
+class ADDUH_QB_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ InstrItinClass itin, RegisterClass RCD,
+ RegisterClass RCS = RCD, RegisterClass RCT = RCD> {
+ dag OutOperandList = (outs RCD:$rd);
+ dag InOperandList = (ins RCS:$rs, RCT:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
+ list<dag> Pattern = [(set RCD:$rd, (OpNode RCS:$rs, RCT:$rt))];
+ InstrItinClass Itinerary = itin;
+ list<Register> Defs = [DSPCtrl];
+}
+
+class APPEND_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ SDPatternOperator ImmOp, InstrItinClass itin> {
+ dag OutOperandList = (outs CPURegs:$rt);
+ dag InOperandList = (ins CPURegs:$rs, shamt:$sa, CPURegs:$src);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $rs, $sa");
+ list<dag> Pattern = [(set CPURegs:$rt,
+ (OpNode CPURegs:$src, CPURegs:$rs, ImmOp:$sa))];
+ InstrItinClass Itinerary = itin;
+ list<Register> Defs = [DSPCtrl];
+ string Constraints = "$src = $rt";
+}
+
+class EXTR_W_TY1_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ InstrItinClass itin> {
+ dag OutOperandList = (outs CPURegs:$rt);
+ dag InOperandList = (ins ACRegs:$ac, CPURegs:$shift_rs);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $ac, $shift_rs");
+ InstrItinClass Itinerary = itin;
+ list<Register> Defs = [DSPCtrl];
+}
+
+class EXTR_W_TY1_R1_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ InstrItinClass itin> {
+ dag OutOperandList = (outs CPURegs:$rt);
+ dag InOperandList = (ins ACRegs:$ac, uimm16:$shift_rs);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $ac, $shift_rs");
+ InstrItinClass Itinerary = itin;
+ list<Register> Defs = [DSPCtrl];
+}
+
+class SHILO_R1_PSEUDO_BASE<SDPatternOperator OpNode, InstrItinClass itin,
+ Instruction realinst> :
+ PseudoDSP<(outs), (ins simm16:$shift), [(OpNode immSExt6:$shift)]>,
+ PseudoInstExpansion<(realinst AC0, simm16:$shift)> {
+ list<Register> Defs = [DSPCtrl, AC0];
+ list<Register> Uses = [AC0];
+ InstrItinClass Itinerary = itin;
+}
+
+class SHILO_R1_DESC_BASE<string instr_asm> {
+ dag OutOperandList = (outs ACRegs:$ac);
+ dag InOperandList = (ins simm16:$shift);
+ string AsmString = !strconcat(instr_asm, "\t$ac, $shift");
+}
+
+class SHILO_R2_PSEUDO_BASE<SDPatternOperator OpNode, InstrItinClass itin,
+ Instruction realinst> :
+ PseudoDSP<(outs), (ins CPURegs:$rs), [(OpNode CPURegs:$rs)]>,
+ PseudoInstExpansion<(realinst AC0, CPURegs:$rs)> {
+ list<Register> Defs = [DSPCtrl, AC0];
+ list<Register> Uses = [AC0];
+ InstrItinClass Itinerary = itin;
+}
+
+class SHILO_R2_DESC_BASE<string instr_asm> {
+ dag OutOperandList = (outs ACRegs:$ac);
+ dag InOperandList = (ins CPURegs:$rs);
+ string AsmString = !strconcat(instr_asm, "\t$ac, $rs");
+}
+
+class MTHLIP_DESC_BASE<string instr_asm> {
+ dag OutOperandList = (outs ACRegs:$ac);
+ dag InOperandList = (ins CPURegs:$rs);
+ string AsmString = !strconcat(instr_asm, "\t$rs, $ac");
+}
+
+class RDDSP_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ InstrItinClass itin> {
+ dag OutOperandList = (outs CPURegs:$rd);
+ dag InOperandList = (ins uimm16:$mask);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $mask");
+ list<dag> Pattern = [(set CPURegs:$rd, (OpNode immZExt10:$mask))];
+ InstrItinClass Itinerary = itin;
+ list<Register> Uses = [DSPCtrl];
+}
+
+class WRDSP_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ InstrItinClass itin> {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins CPURegs:$rs, uimm16:$mask);
+ string AsmString = !strconcat(instr_asm, "\t$rs, $mask");
+ list<dag> Pattern = [(OpNode CPURegs:$rs, immZExt10:$mask)];
+ InstrItinClass Itinerary = itin;
+ list<Register> Defs = [DSPCtrl];
+}
+
+class DPA_W_PH_PSEUDO_BASE<SDPatternOperator OpNode, InstrItinClass itin,
+ Instruction realinst> :
+ PseudoDSP<(outs), (ins CPURegs:$rs, CPURegs:$rt),
+ [(OpNode CPURegs:$rs, CPURegs:$rt)]>,
+ PseudoInstExpansion<(realinst AC0, CPURegs:$rs, CPURegs:$rt)> {
+ list<Register> Defs = [DSPCtrl, AC0];
+ list<Register> Uses = [AC0];
+ InstrItinClass Itinerary = itin;
+}
+
+class DPA_W_PH_DESC_BASE<string instr_asm> {
+ dag OutOperandList = (outs ACRegs:$ac);
+ dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt");
+}
+
+class MULT_PSEUDO_BASE<SDPatternOperator OpNode, InstrItinClass itin,
+ Instruction realinst> :
+ PseudoDSP<(outs), (ins CPURegs:$rs, CPURegs:$rt),
+ [(OpNode CPURegs:$rs, CPURegs:$rt)]>,
+ PseudoInstExpansion<(realinst AC0, CPURegs:$rs, CPURegs:$rt)> {
+ list<Register> Defs = [DSPCtrl, AC0];
+ InstrItinClass Itinerary = itin;
+}
+
+class MULT_DESC_BASE<string instr_asm> {
+ dag OutOperandList = (outs ACRegs:$ac);
+ dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt");
+}
+
+class BPOSGE32_PSEUDO_DESC_BASE<SDPatternOperator OpNode, InstrItinClass itin> :
+ MipsPseudo<(outs CPURegs:$dst), (ins), "", [(set CPURegs:$dst, (OpNode))]> {
+ list<Register> Uses = [DSPCtrl];
+ bit usesCustomInserter = 1;
+}
+
+class BPOSGE32_DESC_BASE<string instr_asm, InstrItinClass itin> {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins brtarget:$offset);
+ string AsmString = !strconcat(instr_asm, "\t$offset");
+ InstrItinClass Itinerary = itin;
+ list<Register> Uses = [DSPCtrl];
+ bit isBranch = 1;
+ bit isTerminator = 1;
+ bit hasDelaySlot = 1;
+}
+
+class INSV_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ InstrItinClass itin> {
+ dag OutOperandList = (outs CPURegs:$rt);
+ dag InOperandList = (ins CPURegs:$src, CPURegs:$rs);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $rs");
+ list<dag> Pattern = [(set CPURegs:$rt, (OpNode CPURegs:$src, CPURegs:$rs))];
+ InstrItinClass Itinerary = itin;
+ list<Register> Uses = [DSPCtrl];
+ string Constraints = "$src = $rt";
+}
+
+//===----------------------------------------------------------------------===//
+// MIPS DSP Rev 1
+//===----------------------------------------------------------------------===//
+
+// Addition/subtraction
+class ADDU_QB_DESC : ADDU_QB_DESC_BASE<"addu.qb", int_mips_addu_qb, NoItinerary,
+ DSPRegs, DSPRegs>, IsCommutable;
+
+class ADDU_S_QB_DESC : ADDU_QB_DESC_BASE<"addu_s.qb", int_mips_addu_s_qb,
+ NoItinerary, DSPRegs, DSPRegs>,
+ IsCommutable;
+
+class SUBU_QB_DESC : ADDU_QB_DESC_BASE<"subu.qb", int_mips_subu_qb, NoItinerary,
+ DSPRegs, DSPRegs>;
+
+class SUBU_S_QB_DESC : ADDU_QB_DESC_BASE<"subu_s.qb", int_mips_subu_s_qb,
+ NoItinerary, DSPRegs, DSPRegs>;
+
+class ADDQ_PH_DESC : ADDU_QB_DESC_BASE<"addq.ph", int_mips_addq_ph, NoItinerary,
+ DSPRegs, DSPRegs>, IsCommutable;
+
+class ADDQ_S_PH_DESC : ADDU_QB_DESC_BASE<"addq_s.ph", int_mips_addq_s_ph,
+ NoItinerary, DSPRegs, DSPRegs>,
+ IsCommutable;
+
+class SUBQ_PH_DESC : ADDU_QB_DESC_BASE<"subq.ph", int_mips_subq_ph, NoItinerary,
+ DSPRegs, DSPRegs>;
+
+class SUBQ_S_PH_DESC : ADDU_QB_DESC_BASE<"subq_s.ph", int_mips_subq_s_ph,
+ NoItinerary, DSPRegs, DSPRegs>;
+
+class ADDQ_S_W_DESC : ADDU_QB_DESC_BASE<"addq_s.w", int_mips_addq_s_w,
+ NoItinerary, CPURegs, CPURegs>,
+ IsCommutable;
+
+class SUBQ_S_W_DESC : ADDU_QB_DESC_BASE<"subq_s.w", int_mips_subq_s_w,
+ NoItinerary, CPURegs, CPURegs>;
+
+class ADDSC_DESC : ADDU_QB_DESC_BASE<"addsc", int_mips_addsc, NoItinerary,
+ CPURegs, CPURegs>, IsCommutable;
+
+class ADDWC_DESC : ADDU_QB_DESC_BASE<"addwc", int_mips_addwc, NoItinerary,
+ CPURegs, CPURegs>,
+ IsCommutable, UseDSPCtrl;
+
+class MODSUB_DESC : ADDU_QB_DESC_BASE<"modsub", int_mips_modsub, NoItinerary,
+ CPURegs, CPURegs>, ClearDefs;
+
+class RADDU_W_QB_DESC : RADDU_W_QB_DESC_BASE<"raddu.w.qb", int_mips_raddu_w_qb,
+ NoItinerary, CPURegs, DSPRegs>,
+ ClearDefs;
+
+// Absolute value
+class ABSQ_S_PH_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.ph", int_mips_absq_s_ph,
+ NoItinerary, DSPRegs>;
+
+class ABSQ_S_W_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.w", int_mips_absq_s_w,
+ NoItinerary, CPURegs>;
+
+// Precision reduce/expand
+class PRECRQ_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrq.qb.ph",
+ int_mips_precrq_qb_ph,
+ NoItinerary, DSPRegs, DSPRegs>,
+ ClearDefs;
+
+class PRECRQ_PH_W_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrq.ph.w",
+ int_mips_precrq_ph_w,
+ NoItinerary, DSPRegs, CPURegs>,
+ ClearDefs;
+
+class PRECRQ_RS_PH_W_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrq_rs.ph.w",
+ int_mips_precrq_rs_ph_w,
+ NoItinerary, DSPRegs,
+ CPURegs>;
+
+class PRECRQU_S_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrqu_s.qb.ph",
+ int_mips_precrqu_s_qb_ph,
+ NoItinerary, DSPRegs,
+ DSPRegs>;
+
+class PRECEQ_W_PHL_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceq.w.phl",
+ int_mips_preceq_w_phl,
+ NoItinerary, CPURegs, DSPRegs>,
+ ClearDefs;
+
+class PRECEQ_W_PHR_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceq.w.phr",
+ int_mips_preceq_w_phr,
+ NoItinerary, CPURegs, DSPRegs>,
+ ClearDefs;
+
+class PRECEQU_PH_QBL_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbl",
+ int_mips_precequ_ph_qbl,
+ NoItinerary, DSPRegs>,
+ ClearDefs;
+
+class PRECEQU_PH_QBR_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbr",
+ int_mips_precequ_ph_qbr,
+ NoItinerary, DSPRegs>,
+ ClearDefs;
+
+class PRECEQU_PH_QBLA_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbla",
+ int_mips_precequ_ph_qbla,
+ NoItinerary, DSPRegs>,
+ ClearDefs;
+
+class PRECEQU_PH_QBRA_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbra",
+ int_mips_precequ_ph_qbra,
+ NoItinerary, DSPRegs>,
+ ClearDefs;
+
+class PRECEU_PH_QBL_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbl",
+ int_mips_preceu_ph_qbl,
+ NoItinerary, DSPRegs>,
+ ClearDefs;
+
+class PRECEU_PH_QBR_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbr",
+ int_mips_preceu_ph_qbr,
+ NoItinerary, DSPRegs>,
+ ClearDefs;
+
+class PRECEU_PH_QBLA_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbla",
+ int_mips_preceu_ph_qbla,
+ NoItinerary, DSPRegs>,
+ ClearDefs;
+
+class PRECEU_PH_QBRA_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbra",
+ int_mips_preceu_ph_qbra,
+ NoItinerary, DSPRegs>,
+ ClearDefs;
+
+// Shift
+class SHLL_QB_DESC : SHLL_QB_R2_DESC_BASE<"shll.qb", int_mips_shll_qb, immZExt3,
+ NoItinerary, DSPRegs>;
+
+class SHLLV_QB_DESC : SHLL_QB_R3_DESC_BASE<"shllv.qb", int_mips_shll_qb,
+ NoItinerary, DSPRegs>;
+
+class SHRL_QB_DESC : SHLL_QB_R2_DESC_BASE<"shrl.qb", int_mips_shrl_qb, immZExt3,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+class SHRLV_QB_DESC : SHLL_QB_R3_DESC_BASE<"shrlv.qb", int_mips_shrl_qb,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+class SHLL_PH_DESC : SHLL_QB_R2_DESC_BASE<"shll.ph", int_mips_shll_ph, immZExt4,
+ NoItinerary, DSPRegs>;
+
+class SHLLV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shllv.ph", int_mips_shll_ph,
+ NoItinerary, DSPRegs>;
+
+class SHLL_S_PH_DESC : SHLL_QB_R2_DESC_BASE<"shll_s.ph", int_mips_shll_s_ph,
+ immZExt4, NoItinerary, DSPRegs>;
+
+class SHLLV_S_PH_DESC : SHLL_QB_R3_DESC_BASE<"shllv_s.ph", int_mips_shll_s_ph,
+ NoItinerary, DSPRegs>;
+
+class SHRA_PH_DESC : SHLL_QB_R2_DESC_BASE<"shra.ph", int_mips_shra_ph, immZExt4,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+class SHRAV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrav.ph", int_mips_shra_ph,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+class SHRA_R_PH_DESC : SHLL_QB_R2_DESC_BASE<"shra_r.ph", int_mips_shra_r_ph,
+ immZExt4, NoItinerary, DSPRegs>,
+ ClearDefs;
+
+class SHRAV_R_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrav_r.ph", int_mips_shra_r_ph,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+class SHLL_S_W_DESC : SHLL_QB_R2_DESC_BASE<"shll_s.w", int_mips_shll_s_w,
+ immZExt5, NoItinerary, CPURegs>;
+
+class SHLLV_S_W_DESC : SHLL_QB_R3_DESC_BASE<"shllv_s.w", int_mips_shll_s_w,
+ NoItinerary, CPURegs>;
+
+class SHRA_R_W_DESC : SHLL_QB_R2_DESC_BASE<"shra_r.w", int_mips_shra_r_w,
+ immZExt5, NoItinerary, CPURegs>,
+ ClearDefs;
+
+class SHRAV_R_W_DESC : SHLL_QB_R3_DESC_BASE<"shrav_r.w", int_mips_shra_r_w,
+ NoItinerary, CPURegs>;
+
+// Multiplication
+class MULEU_S_PH_QBL_DESC : ADDU_QB_DESC_BASE<"muleu_s.ph.qbl",
+ int_mips_muleu_s_ph_qbl,
+ NoItinerary, DSPRegs, DSPRegs>;
+
+class MULEU_S_PH_QBR_DESC : ADDU_QB_DESC_BASE<"muleu_s.ph.qbr",
+ int_mips_muleu_s_ph_qbr,
+ NoItinerary, DSPRegs, DSPRegs>;
+
+class MULEQ_S_W_PHL_DESC : ADDU_QB_DESC_BASE<"muleq_s.w.phl",
+ int_mips_muleq_s_w_phl,
+ NoItinerary, CPURegs, DSPRegs>,
+ IsCommutable;
+
+class MULEQ_S_W_PHR_DESC : ADDU_QB_DESC_BASE<"muleq_s.w.phr",
+ int_mips_muleq_s_w_phr,
+ NoItinerary, CPURegs, DSPRegs>,
+ IsCommutable;
+
+class MULQ_RS_PH_DESC : ADDU_QB_DESC_BASE<"mulq_rs.ph", int_mips_mulq_rs_ph,
+ NoItinerary, DSPRegs, DSPRegs>,
+ IsCommutable;
+
+class MULSAQ_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"mulsaq_s.w.ph">;
+
+class MAQ_S_W_PHL_DESC : DPA_W_PH_DESC_BASE<"maq_s.w.phl">;
+
+class MAQ_S_W_PHR_DESC : DPA_W_PH_DESC_BASE<"maq_s.w.phr">;
+
+class MAQ_SA_W_PHL_DESC : DPA_W_PH_DESC_BASE<"maq_sa.w.phl">;
+
+class MAQ_SA_W_PHR_DESC : DPA_W_PH_DESC_BASE<"maq_sa.w.phr">;
+
+// Dot product with accumulate/subtract
+class DPAU_H_QBL_DESC : DPA_W_PH_DESC_BASE<"dpau.h.qbl">;
+
+class DPAU_H_QBR_DESC : DPA_W_PH_DESC_BASE<"dpau.h.qbr">;
+
+class DPSU_H_QBL_DESC : DPA_W_PH_DESC_BASE<"dpsu.h.qbl">;
+
+class DPSU_H_QBR_DESC : DPA_W_PH_DESC_BASE<"dpsu.h.qbr">;
+
+class DPAQ_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpaq_s.w.ph">;
+
+class DPSQ_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsq_s.w.ph">;
+
+class DPAQ_SA_L_W_DESC : DPA_W_PH_DESC_BASE<"dpaq_sa.l.w">;
+
+class DPSQ_SA_L_W_DESC : DPA_W_PH_DESC_BASE<"dpsq_sa.l.w">;
+
+class MULT_DSP_DESC : MULT_DESC_BASE<"mult">;
+
+class MULTU_DSP_DESC : MULT_DESC_BASE<"multu">;
+
+class MADD_DSP_DESC : MULT_DESC_BASE<"madd">;
+
+class MADDU_DSP_DESC : MULT_DESC_BASE<"maddu">;
+
+class MSUB_DSP_DESC : MULT_DESC_BASE<"msub">;
+
+class MSUBU_DSP_DESC : MULT_DESC_BASE<"msubu">;
+
+// Comparison
+class CMPU_EQ_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.eq.qb",
+ int_mips_cmpu_eq_qb, NoItinerary,
+ DSPRegs>, IsCommutable;
+
+class CMPU_LT_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.lt.qb",
+ int_mips_cmpu_lt_qb, NoItinerary,
+ DSPRegs>, IsCommutable;
+
+class CMPU_LE_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.le.qb",
+ int_mips_cmpu_le_qb, NoItinerary,
+ DSPRegs>, IsCommutable;
+
+class CMPGU_EQ_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.eq.qb",
+ int_mips_cmpgu_eq_qb,
+ NoItinerary, CPURegs, DSPRegs>,
+ IsCommutable;
+
+class CMPGU_LT_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.lt.qb",
+ int_mips_cmpgu_lt_qb,
+ NoItinerary, CPURegs, DSPRegs>,
+ IsCommutable;
+
+class CMPGU_LE_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.le.qb",
+ int_mips_cmpgu_le_qb,
+ NoItinerary, CPURegs, DSPRegs>,
+ IsCommutable;
+
+class CMP_EQ_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.eq.ph", int_mips_cmp_eq_ph,
+ NoItinerary, DSPRegs>,
+ IsCommutable;
+
+class CMP_LT_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.lt.ph", int_mips_cmp_lt_ph,
+ NoItinerary, DSPRegs>,
+ IsCommutable;
+
+class CMP_LE_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.le.ph", int_mips_cmp_le_ph,
+ NoItinerary, DSPRegs>,
+ IsCommutable;
+
+// Misc
+class BITREV_DESC : ABSQ_S_PH_R2_DESC_BASE<"bitrev", int_mips_bitrev,
+ NoItinerary, CPURegs>, ClearDefs;
+
+class PACKRL_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"packrl.ph", int_mips_packrl_ph,
+ NoItinerary, DSPRegs, DSPRegs>,
+ ClearDefs;
+
+class REPL_QB_DESC : REPL_DESC_BASE<"repl.qb", int_mips_repl_qb, immZExt8,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+class REPL_PH_DESC : REPL_DESC_BASE<"repl.ph", int_mips_repl_ph, immZExt10,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+class REPLV_QB_DESC : ABSQ_S_PH_R2_DESC_BASE<"replv.qb", int_mips_repl_qb,
+ NoItinerary, DSPRegs, CPURegs>,
+ ClearDefs;
+
+class REPLV_PH_DESC : ABSQ_S_PH_R2_DESC_BASE<"replv.ph", int_mips_repl_ph,
+ NoItinerary, DSPRegs, CPURegs>,
+ ClearDefs;
+
+class PICK_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"pick.qb", int_mips_pick_qb,
+ NoItinerary, DSPRegs, DSPRegs>,
+ ClearDefs, UseDSPCtrl;
+
+class PICK_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"pick.ph", int_mips_pick_ph,
+ NoItinerary, DSPRegs, DSPRegs>,
+ ClearDefs, UseDSPCtrl;
+
+class LWX_DESC : LX_DESC_BASE<"lwx", int_mips_lwx, NoItinerary>, ClearDefs;
+
+class LHX_DESC : LX_DESC_BASE<"lhx", int_mips_lhx, NoItinerary>, ClearDefs;
+
+class LBUX_DESC : LX_DESC_BASE<"lbux", int_mips_lbux, NoItinerary>, ClearDefs;
+
+class BPOSGE32_DESC : BPOSGE32_DESC_BASE<"bposge32", NoItinerary>;
+
+// Extr
+class EXTP_DESC : EXTR_W_TY1_R1_DESC_BASE<"extp", MipsEXTP, NoItinerary>;
+
+class EXTPV_DESC : EXTR_W_TY1_R2_DESC_BASE<"extpv", MipsEXTP, NoItinerary>;
+
+class EXTPDP_DESC : EXTR_W_TY1_R1_DESC_BASE<"extpdp", MipsEXTPDP, NoItinerary>;
+
+class EXTPDPV_DESC : EXTR_W_TY1_R2_DESC_BASE<"extpdpv", MipsEXTPDP,
+ NoItinerary>;
+
+class EXTR_W_DESC : EXTR_W_TY1_R1_DESC_BASE<"extr.w", MipsEXTR_W, NoItinerary>;
+
+class EXTRV_W_DESC : EXTR_W_TY1_R2_DESC_BASE<"extrv.w", MipsEXTR_W,
+ NoItinerary>;
+
+class EXTR_R_W_DESC : EXTR_W_TY1_R1_DESC_BASE<"extr_r.w", MipsEXTR_R_W,
+ NoItinerary>;
+
+class EXTRV_R_W_DESC : EXTR_W_TY1_R2_DESC_BASE<"extrv_r.w", MipsEXTR_R_W,
+ NoItinerary>;
+
+class EXTR_RS_W_DESC : EXTR_W_TY1_R1_DESC_BASE<"extr_rs.w", MipsEXTR_RS_W,
+ NoItinerary>;
+
+class EXTRV_RS_W_DESC : EXTR_W_TY1_R2_DESC_BASE<"extrv_rs.w", MipsEXTR_RS_W,
+ NoItinerary>;
+
+class EXTR_S_H_DESC : EXTR_W_TY1_R1_DESC_BASE<"extr_s.h", MipsEXTR_S_H,
+ NoItinerary>;
+
+class EXTRV_S_H_DESC : EXTR_W_TY1_R2_DESC_BASE<"extrv_s.h", MipsEXTR_S_H,
+ NoItinerary>;
+
+class SHILO_DESC : SHILO_R1_DESC_BASE<"shilo">;
+
+class SHILOV_DESC : SHILO_R2_DESC_BASE<"shilov">;
+
+class MTHLIP_DESC : MTHLIP_DESC_BASE<"mthlip">;
+
+class RDDSP_DESC : RDDSP_DESC_BASE<"rddsp", int_mips_rddsp, NoItinerary>;
+
+class WRDSP_DESC : WRDSP_DESC_BASE<"wrdsp", int_mips_wrdsp, NoItinerary>;
+
+class INSV_DESC : INSV_DESC_BASE<"insv", int_mips_insv, NoItinerary>;
+
+//===----------------------------------------------------------------------===//
+// MIPS DSP Rev 2
+// Addition/subtraction
+class ADDU_PH_DESC : ADDU_QB_DESC_BASE<"addu.ph", int_mips_addu_ph, NoItinerary,
+ DSPRegs, DSPRegs>, IsCommutable;
+
+class ADDU_S_PH_DESC : ADDU_QB_DESC_BASE<"addu_s.ph", int_mips_addu_s_ph,
+ NoItinerary, DSPRegs, DSPRegs>,
+ IsCommutable;
+
+class SUBU_PH_DESC : ADDU_QB_DESC_BASE<"subu.ph", int_mips_subu_ph, NoItinerary,
+ DSPRegs, DSPRegs>;
+
+class SUBU_S_PH_DESC : ADDU_QB_DESC_BASE<"subu_s.ph", int_mips_subu_s_ph,
+ NoItinerary, DSPRegs, DSPRegs>;
+
+class ADDUH_QB_DESC : ADDUH_QB_DESC_BASE<"adduh.qb", int_mips_adduh_qb,
+ NoItinerary, DSPRegs>,
+ ClearDefs, IsCommutable;
+
+class ADDUH_R_QB_DESC : ADDUH_QB_DESC_BASE<"adduh_r.qb", int_mips_adduh_r_qb,
+ NoItinerary, DSPRegs>,
+ ClearDefs, IsCommutable;
+
+class SUBUH_QB_DESC : ADDUH_QB_DESC_BASE<"subuh.qb", int_mips_subuh_qb,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+class SUBUH_R_QB_DESC : ADDUH_QB_DESC_BASE<"subuh_r.qb", int_mips_subuh_r_qb,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+class ADDQH_PH_DESC : ADDUH_QB_DESC_BASE<"addqh.ph", int_mips_addqh_ph,
+ NoItinerary, DSPRegs>,
+ ClearDefs, IsCommutable;
+
+class ADDQH_R_PH_DESC : ADDUH_QB_DESC_BASE<"addqh_r.ph", int_mips_addqh_r_ph,
+ NoItinerary, DSPRegs>,
+ ClearDefs, IsCommutable;
+
+class SUBQH_PH_DESC : ADDUH_QB_DESC_BASE<"subqh.ph", int_mips_subqh_ph,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+class SUBQH_R_PH_DESC : ADDUH_QB_DESC_BASE<"subqh_r.ph", int_mips_subqh_r_ph,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+class ADDQH_W_DESC : ADDUH_QB_DESC_BASE<"addqh.w", int_mips_addqh_w,
+ NoItinerary, CPURegs>,
+ ClearDefs, IsCommutable;
+
+class ADDQH_R_W_DESC : ADDUH_QB_DESC_BASE<"addqh_r.w", int_mips_addqh_r_w,
+ NoItinerary, CPURegs>,
+ ClearDefs, IsCommutable;
+
+class SUBQH_W_DESC : ADDUH_QB_DESC_BASE<"subqh.w", int_mips_subqh_w,
+ NoItinerary, CPURegs>, ClearDefs;
+
+class SUBQH_R_W_DESC : ADDUH_QB_DESC_BASE<"subqh_r.w", int_mips_subqh_r_w,
+ NoItinerary, CPURegs>, ClearDefs;
+
+// Comparison
+class CMPGDU_EQ_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.eq.qb",
+ int_mips_cmpgdu_eq_qb,
+ NoItinerary, CPURegs, DSPRegs>,
+ IsCommutable;
+
+class CMPGDU_LT_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.lt.qb",
+ int_mips_cmpgdu_lt_qb,
+ NoItinerary, CPURegs, DSPRegs>,
+ IsCommutable;
+
+class CMPGDU_LE_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.le.qb",
+ int_mips_cmpgdu_le_qb,
+ NoItinerary, CPURegs, DSPRegs>,
+ IsCommutable;
+
+// Absolute
+class ABSQ_S_QB_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.qb", int_mips_absq_s_qb,
+ NoItinerary, DSPRegs>;
+
+// Multiplication
+class MUL_PH_DESC : ADDUH_QB_DESC_BASE<"mul.ph", int_mips_mul_ph, NoItinerary,
+ DSPRegs>, IsCommutable;
+
+class MUL_S_PH_DESC : ADDUH_QB_DESC_BASE<"mul_s.ph", int_mips_mul_s_ph,
+ NoItinerary, DSPRegs>, IsCommutable;
+
+class MULQ_S_W_DESC : ADDUH_QB_DESC_BASE<"mulq_s.w", int_mips_mulq_s_w,
+ NoItinerary, CPURegs>, IsCommutable;
+
+class MULQ_RS_W_DESC : ADDUH_QB_DESC_BASE<"mulq_rs.w", int_mips_mulq_rs_w,
+ NoItinerary, CPURegs>, IsCommutable;
+
+class MULQ_S_PH_DESC : ADDU_QB_DESC_BASE<"mulq_s.ph", int_mips_mulq_s_ph,
+ NoItinerary, DSPRegs, DSPRegs>,
+ IsCommutable;
+
+// Dot product with accumulate/subtract
+class DPA_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpa.w.ph">;
+
+class DPS_W_PH_DESC : DPA_W_PH_DESC_BASE<"dps.w.ph">;
+
+class DPAQX_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpaqx_s.w.ph">;
+
+class DPAQX_SA_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpaqx_sa.w.ph">;
+
+class DPAX_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpax.w.ph">;
+
+class DPSX_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsx.w.ph">;
+
+class DPSQX_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsqx_s.w.ph">;
+
+class DPSQX_SA_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsqx_sa.w.ph">;
+
+class MULSA_W_PH_DESC : DPA_W_PH_DESC_BASE<"mulsa.w.ph">;
+
+// Precision reduce/expand
+class PRECR_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precr.qb.ph",
+ int_mips_precr_qb_ph,
+ NoItinerary, DSPRegs, DSPRegs>;
+
+class PRECR_SRA_PH_W_DESC : PRECR_SRA_PH_W_DESC_BASE<"precr_sra.ph.w",
+ int_mips_precr_sra_ph_w,
+ NoItinerary, DSPRegs,
+ CPURegs>, ClearDefs;
+
+class PRECR_SRA_R_PH_W_DESC : PRECR_SRA_PH_W_DESC_BASE<"precr_sra_r.ph.w",
+ int_mips_precr_sra_r_ph_w,
+ NoItinerary, DSPRegs,
+ CPURegs>, ClearDefs;
+
+// Shift
+class SHRA_QB_DESC : SHLL_QB_R2_DESC_BASE<"shra.qb", int_mips_shra_qb, immZExt3,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+class SHRAV_QB_DESC : SHLL_QB_R3_DESC_BASE<"shrav.qb", int_mips_shra_qb,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+class SHRA_R_QB_DESC : SHLL_QB_R2_DESC_BASE<"shra_r.qb", int_mips_shra_r_qb,
+ immZExt3, NoItinerary, DSPRegs>,
+ ClearDefs;
+
+class SHRAV_R_QB_DESC : SHLL_QB_R3_DESC_BASE<"shrav_r.qb", int_mips_shra_r_qb,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+class SHRL_PH_DESC : SHLL_QB_R2_DESC_BASE<"shrl.ph", int_mips_shrl_ph, immZExt4,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+class SHRLV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrlv.ph", int_mips_shrl_ph,
+ NoItinerary, DSPRegs>, ClearDefs;
+
+// Misc
+class APPEND_DESC : APPEND_DESC_BASE<"append", int_mips_append, immZExt5,
+ NoItinerary>, ClearDefs;
+
+class BALIGN_DESC : APPEND_DESC_BASE<"balign", int_mips_balign, immZExt2,
+ NoItinerary>, ClearDefs;
+
+class PREPEND_DESC : APPEND_DESC_BASE<"prepend", int_mips_prepend, immZExt5,
+ NoItinerary>, ClearDefs;
+
+// Pseudos.
+def BPOSGE32_PSEUDO : BPOSGE32_PSEUDO_DESC_BASE<int_mips_bposge32, NoItinerary>;
+
+// Instruction defs.
+// MIPS DSP Rev 1
+def ADDU_QB : ADDU_QB_ENC, ADDU_QB_DESC;
+def ADDU_S_QB : ADDU_S_QB_ENC, ADDU_S_QB_DESC;
+def SUBU_QB : SUBU_QB_ENC, SUBU_QB_DESC;
+def SUBU_S_QB : SUBU_S_QB_ENC, SUBU_S_QB_DESC;
+def ADDQ_PH : ADDQ_PH_ENC, ADDQ_PH_DESC;
+def ADDQ_S_PH : ADDQ_S_PH_ENC, ADDQ_S_PH_DESC;
+def SUBQ_PH : SUBQ_PH_ENC, SUBQ_PH_DESC;
+def SUBQ_S_PH : SUBQ_S_PH_ENC, SUBQ_S_PH_DESC;
+def ADDQ_S_W : ADDQ_S_W_ENC, ADDQ_S_W_DESC;
+def SUBQ_S_W : SUBQ_S_W_ENC, SUBQ_S_W_DESC;
+def ADDSC : ADDSC_ENC, ADDSC_DESC;
+def ADDWC : ADDWC_ENC, ADDWC_DESC;
+def MODSUB : MODSUB_ENC, MODSUB_DESC;
+def RADDU_W_QB : RADDU_W_QB_ENC, RADDU_W_QB_DESC;
+def ABSQ_S_PH : ABSQ_S_PH_ENC, ABSQ_S_PH_DESC;
+def ABSQ_S_W : ABSQ_S_W_ENC, ABSQ_S_W_DESC;
+def PRECRQ_QB_PH : PRECRQ_QB_PH_ENC, PRECRQ_QB_PH_DESC;
+def PRECRQ_PH_W : PRECRQ_PH_W_ENC, PRECRQ_PH_W_DESC;
+def PRECRQ_RS_PH_W : PRECRQ_RS_PH_W_ENC, PRECRQ_RS_PH_W_DESC;
+def PRECRQU_S_QB_PH : PRECRQU_S_QB_PH_ENC, PRECRQU_S_QB_PH_DESC;
+def PRECEQ_W_PHL : PRECEQ_W_PHL_ENC, PRECEQ_W_PHL_DESC;
+def PRECEQ_W_PHR : PRECEQ_W_PHR_ENC, PRECEQ_W_PHR_DESC;
+def PRECEQU_PH_QBL : PRECEQU_PH_QBL_ENC, PRECEQU_PH_QBL_DESC;
+def PRECEQU_PH_QBR : PRECEQU_PH_QBR_ENC, PRECEQU_PH_QBR_DESC;
+def PRECEQU_PH_QBLA : PRECEQU_PH_QBLA_ENC, PRECEQU_PH_QBLA_DESC;
+def PRECEQU_PH_QBRA : PRECEQU_PH_QBRA_ENC, PRECEQU_PH_QBRA_DESC;
+def PRECEU_PH_QBL : PRECEU_PH_QBL_ENC, PRECEU_PH_QBL_DESC;
+def PRECEU_PH_QBR : PRECEU_PH_QBR_ENC, PRECEU_PH_QBR_DESC;
+def PRECEU_PH_QBLA : PRECEU_PH_QBLA_ENC, PRECEU_PH_QBLA_DESC;
+def PRECEU_PH_QBRA : PRECEU_PH_QBRA_ENC, PRECEU_PH_QBRA_DESC;
+def SHLL_QB : SHLL_QB_ENC, SHLL_QB_DESC;
+def SHLLV_QB : SHLLV_QB_ENC, SHLLV_QB_DESC;
+def SHRL_QB : SHRL_QB_ENC, SHRL_QB_DESC;
+def SHRLV_QB : SHRLV_QB_ENC, SHRLV_QB_DESC;
+def SHLL_PH : SHLL_PH_ENC, SHLL_PH_DESC;
+def SHLLV_PH : SHLLV_PH_ENC, SHLLV_PH_DESC;
+def SHLL_S_PH : SHLL_S_PH_ENC, SHLL_S_PH_DESC;
+def SHLLV_S_PH : SHLLV_S_PH_ENC, SHLLV_S_PH_DESC;
+def SHRA_PH : SHRA_PH_ENC, SHRA_PH_DESC;
+def SHRAV_PH : SHRAV_PH_ENC, SHRAV_PH_DESC;
+def SHRA_R_PH : SHRA_R_PH_ENC, SHRA_R_PH_DESC;
+def SHRAV_R_PH : SHRAV_R_PH_ENC, SHRAV_R_PH_DESC;
+def SHLL_S_W : SHLL_S_W_ENC, SHLL_S_W_DESC;
+def SHLLV_S_W : SHLLV_S_W_ENC, SHLLV_S_W_DESC;
+def SHRA_R_W : SHRA_R_W_ENC, SHRA_R_W_DESC;
+def SHRAV_R_W : SHRAV_R_W_ENC, SHRAV_R_W_DESC;
+def MULEU_S_PH_QBL : MULEU_S_PH_QBL_ENC, MULEU_S_PH_QBL_DESC;
+def MULEU_S_PH_QBR : MULEU_S_PH_QBR_ENC, MULEU_S_PH_QBR_DESC;
+def MULEQ_S_W_PHL : MULEQ_S_W_PHL_ENC, MULEQ_S_W_PHL_DESC;
+def MULEQ_S_W_PHR : MULEQ_S_W_PHR_ENC, MULEQ_S_W_PHR_DESC;
+def MULQ_RS_PH : MULQ_RS_PH_ENC, MULQ_RS_PH_DESC;
+def MULSAQ_S_W_PH : MULSAQ_S_W_PH_ENC, MULSAQ_S_W_PH_DESC;
+def MAQ_S_W_PHL : MAQ_S_W_PHL_ENC, MAQ_S_W_PHL_DESC;
+def MAQ_S_W_PHR : MAQ_S_W_PHR_ENC, MAQ_S_W_PHR_DESC;
+def MAQ_SA_W_PHL : MAQ_SA_W_PHL_ENC, MAQ_SA_W_PHL_DESC;
+def MAQ_SA_W_PHR : MAQ_SA_W_PHR_ENC, MAQ_SA_W_PHR_DESC;
+def DPAU_H_QBL : DPAU_H_QBL_ENC, DPAU_H_QBL_DESC;
+def DPAU_H_QBR : DPAU_H_QBR_ENC, DPAU_H_QBR_DESC;
+def DPSU_H_QBL : DPSU_H_QBL_ENC, DPSU_H_QBL_DESC;
+def DPSU_H_QBR : DPSU_H_QBR_ENC, DPSU_H_QBR_DESC;
+def DPAQ_S_W_PH : DPAQ_S_W_PH_ENC, DPAQ_S_W_PH_DESC;
+def DPSQ_S_W_PH : DPSQ_S_W_PH_ENC, DPSQ_S_W_PH_DESC;
+def DPAQ_SA_L_W : DPAQ_SA_L_W_ENC, DPAQ_SA_L_W_DESC;
+def DPSQ_SA_L_W : DPSQ_SA_L_W_ENC, DPSQ_SA_L_W_DESC;
+def MULT_DSP : MULT_DSP_ENC, MULT_DSP_DESC;
+def MULTU_DSP : MULTU_DSP_ENC, MULTU_DSP_DESC;
+def MADD_DSP : MADD_DSP_ENC, MADD_DSP_DESC;
+def MADDU_DSP : MADDU_DSP_ENC, MADDU_DSP_DESC;
+def MSUB_DSP : MSUB_DSP_ENC, MSUB_DSP_DESC;
+def MSUBU_DSP : MSUBU_DSP_ENC, MSUBU_DSP_DESC;
+def CMPU_EQ_QB : CMPU_EQ_QB_ENC, CMPU_EQ_QB_DESC;
+def CMPU_LT_QB : CMPU_LT_QB_ENC, CMPU_LT_QB_DESC;
+def CMPU_LE_QB : CMPU_LE_QB_ENC, CMPU_LE_QB_DESC;
+def CMPGU_EQ_QB : CMPGU_EQ_QB_ENC, CMPGU_EQ_QB_DESC;
+def CMPGU_LT_QB : CMPGU_LT_QB_ENC, CMPGU_LT_QB_DESC;
+def CMPGU_LE_QB : CMPGU_LE_QB_ENC, CMPGU_LE_QB_DESC;
+def CMP_EQ_PH : CMP_EQ_PH_ENC, CMP_EQ_PH_DESC;
+def CMP_LT_PH : CMP_LT_PH_ENC, CMP_LT_PH_DESC;
+def CMP_LE_PH : CMP_LE_PH_ENC, CMP_LE_PH_DESC;
+def BITREV : BITREV_ENC, BITREV_DESC;
+def PACKRL_PH : PACKRL_PH_ENC, PACKRL_PH_DESC;
+def REPL_QB : REPL_QB_ENC, REPL_QB_DESC;
+def REPL_PH : REPL_PH_ENC, REPL_PH_DESC;
+def REPLV_QB : REPLV_QB_ENC, REPLV_QB_DESC;
+def REPLV_PH : REPLV_PH_ENC, REPLV_PH_DESC;
+def PICK_QB : PICK_QB_ENC, PICK_QB_DESC;
+def PICK_PH : PICK_PH_ENC, PICK_PH_DESC;
+def LWX : LWX_ENC, LWX_DESC;
+def LHX : LHX_ENC, LHX_DESC;
+def LBUX : LBUX_ENC, LBUX_DESC;
+def BPOSGE32 : BPOSGE32_ENC, BPOSGE32_DESC;
+def INSV : INSV_ENC, INSV_DESC;
+def EXTP : EXTP_ENC, EXTP_DESC;
+def EXTPV : EXTPV_ENC, EXTPV_DESC;
+def EXTPDP : EXTPDP_ENC, EXTPDP_DESC;
+def EXTPDPV : EXTPDPV_ENC, EXTPDPV_DESC;
+def EXTR_W : EXTR_W_ENC, EXTR_W_DESC;
+def EXTRV_W : EXTRV_W_ENC, EXTRV_W_DESC;
+def EXTR_R_W : EXTR_R_W_ENC, EXTR_R_W_DESC;
+def EXTRV_R_W : EXTRV_R_W_ENC, EXTRV_R_W_DESC;
+def EXTR_RS_W : EXTR_RS_W_ENC, EXTR_RS_W_DESC;
+def EXTRV_RS_W : EXTRV_RS_W_ENC, EXTRV_RS_W_DESC;
+def EXTR_S_H : EXTR_S_H_ENC, EXTR_S_H_DESC;
+def EXTRV_S_H : EXTRV_S_H_ENC, EXTRV_S_H_DESC;
+def SHILO : SHILO_ENC, SHILO_DESC;
+def SHILOV : SHILOV_ENC, SHILOV_DESC;
+def MTHLIP : MTHLIP_ENC, MTHLIP_DESC;
+def RDDSP : RDDSP_ENC, RDDSP_DESC;
+def WRDSP : WRDSP_ENC, WRDSP_DESC;
+
+// MIPS DSP Rev 2
+let Predicates = [HasDSPR2] in {
+
+def ADDU_PH : ADDU_PH_ENC, ADDU_PH_DESC;
+def ADDU_S_PH : ADDU_S_PH_ENC, ADDU_S_PH_DESC;
+def SUBU_PH : SUBU_PH_ENC, SUBU_PH_DESC;
+def SUBU_S_PH : SUBU_S_PH_ENC, SUBU_S_PH_DESC;
+def CMPGDU_EQ_QB : CMPGDU_EQ_QB_ENC, CMPGDU_EQ_QB_DESC;
+def CMPGDU_LT_QB : CMPGDU_LT_QB_ENC, CMPGDU_LT_QB_DESC;
+def CMPGDU_LE_QB : CMPGDU_LE_QB_ENC, CMPGDU_LE_QB_DESC;
+def ABSQ_S_QB : ABSQ_S_QB_ENC, ABSQ_S_QB_DESC;
+def ADDUH_QB : ADDUH_QB_ENC, ADDUH_QB_DESC;
+def ADDUH_R_QB : ADDUH_R_QB_ENC, ADDUH_R_QB_DESC;
+def SUBUH_QB : SUBUH_QB_ENC, SUBUH_QB_DESC;
+def SUBUH_R_QB : SUBUH_R_QB_ENC, SUBUH_R_QB_DESC;
+def ADDQH_PH : ADDQH_PH_ENC, ADDQH_PH_DESC;
+def ADDQH_R_PH : ADDQH_R_PH_ENC, ADDQH_R_PH_DESC;
+def SUBQH_PH : SUBQH_PH_ENC, SUBQH_PH_DESC;
+def SUBQH_R_PH : SUBQH_R_PH_ENC, SUBQH_R_PH_DESC;
+def ADDQH_W : ADDQH_W_ENC, ADDQH_W_DESC;
+def ADDQH_R_W : ADDQH_R_W_ENC, ADDQH_R_W_DESC;
+def SUBQH_W : SUBQH_W_ENC, SUBQH_W_DESC;
+def SUBQH_R_W : SUBQH_R_W_ENC, SUBQH_R_W_DESC;
+def MUL_PH : MUL_PH_ENC, MUL_PH_DESC;
+def MUL_S_PH : MUL_S_PH_ENC, MUL_S_PH_DESC;
+def MULQ_S_W : MULQ_S_W_ENC, MULQ_S_W_DESC;
+def MULQ_RS_W : MULQ_RS_W_ENC, MULQ_RS_W_DESC;
+def MULQ_S_PH : MULQ_S_PH_ENC, MULQ_S_PH_DESC;
+def DPA_W_PH : DPA_W_PH_ENC, DPA_W_PH_DESC;
+def DPS_W_PH : DPS_W_PH_ENC, DPS_W_PH_DESC;
+def DPAQX_S_W_PH : DPAQX_S_W_PH_ENC, DPAQX_S_W_PH_DESC;
+def DPAQX_SA_W_PH : DPAQX_SA_W_PH_ENC, DPAQX_SA_W_PH_DESC;
+def DPAX_W_PH : DPAX_W_PH_ENC, DPAX_W_PH_DESC;
+def DPSX_W_PH : DPSX_W_PH_ENC, DPSX_W_PH_DESC;
+def DPSQX_S_W_PH : DPSQX_S_W_PH_ENC, DPSQX_S_W_PH_DESC;
+def DPSQX_SA_W_PH : DPSQX_SA_W_PH_ENC, DPSQX_SA_W_PH_DESC;
+def MULSA_W_PH : MULSA_W_PH_ENC, MULSA_W_PH_DESC;
+def PRECR_QB_PH : PRECR_QB_PH_ENC, PRECR_QB_PH_DESC;
+def PRECR_SRA_PH_W : PRECR_SRA_PH_W_ENC, PRECR_SRA_PH_W_DESC;
+def PRECR_SRA_R_PH_W : PRECR_SRA_R_PH_W_ENC, PRECR_SRA_R_PH_W_DESC;
+def SHRA_QB : SHRA_QB_ENC, SHRA_QB_DESC;
+def SHRAV_QB : SHRAV_QB_ENC, SHRAV_QB_DESC;
+def SHRA_R_QB : SHRA_R_QB_ENC, SHRA_R_QB_DESC;
+def SHRAV_R_QB : SHRAV_R_QB_ENC, SHRAV_R_QB_DESC;
+def SHRL_PH : SHRL_PH_ENC, SHRL_PH_DESC;
+def SHRLV_PH : SHRLV_PH_ENC, SHRLV_PH_DESC;
+def APPEND : APPEND_ENC, APPEND_DESC;
+def BALIGN : BALIGN_ENC, BALIGN_DESC;
+def PREPEND : PREPEND_ENC, PREPEND_DESC;
+
+}
+
+// Pseudos.
+def MULSAQ_S_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsMULSAQ_S_W_PH, NoItinerary,
+ MULSAQ_S_W_PH>;
+def MAQ_S_W_PHL_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsMAQ_S_W_PHL, NoItinerary,
+ MAQ_S_W_PHL>;
+def MAQ_S_W_PHR_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsMAQ_S_W_PHR, NoItinerary,
+ MAQ_S_W_PHR>;
+def MAQ_SA_W_PHL_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsMAQ_SA_W_PHL, NoItinerary,
+ MAQ_SA_W_PHL>;
+def MAQ_SA_W_PHR_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsMAQ_SA_W_PHR, NoItinerary,
+ MAQ_SA_W_PHR>;
+def DPAU_H_QBL_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPAU_H_QBL, NoItinerary,
+ DPAU_H_QBL>;
+def DPAU_H_QBR_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPAU_H_QBR, NoItinerary,
+ DPAU_H_QBR>;
+def DPSU_H_QBL_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPSU_H_QBL, NoItinerary,
+ DPSU_H_QBL>;
+def DPSU_H_QBR_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPSU_H_QBR, NoItinerary,
+ DPSU_H_QBR>;
+def DPAQ_S_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPAQ_S_W_PH, NoItinerary,
+ DPAQ_S_W_PH>;
+def DPSQ_S_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPSQ_S_W_PH, NoItinerary,
+ DPSQ_S_W_PH>;
+def DPAQ_SA_L_W_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPAQ_SA_L_W, NoItinerary,
+ DPAQ_SA_L_W>;
+def DPSQ_SA_L_W_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPSQ_SA_L_W, NoItinerary,
+ DPSQ_SA_L_W>;
+
+def MULT_DSP_PSEUDO : MULT_PSEUDO_BASE<MipsMULT, NoItinerary, MULT_DSP>,
+ IsCommutable;
+def MULTU_DSP_PSEUDO : MULT_PSEUDO_BASE<MipsMULTU, NoItinerary, MULTU_DSP>,
+ IsCommutable;
+def MADD_DSP_PSEUDO : MULT_PSEUDO_BASE<MipsMADD_DSP, NoItinerary, MADD_DSP>,
+ IsCommutable, UseAC;
+def MADDU_DSP_PSEUDO : MULT_PSEUDO_BASE<MipsMADDU_DSP, NoItinerary, MADDU_DSP>,
+ IsCommutable, UseAC;
+def MSUB_DSP_PSEUDO : MULT_PSEUDO_BASE<MipsMSUB_DSP, NoItinerary, MSUB_DSP>,
+ UseAC;
+def MSUBU_DSP_PSEUDO : MULT_PSEUDO_BASE<MipsMSUBU_DSP, NoItinerary, MSUBU_DSP>,
+ UseAC;
+
+def SHILO_PSEUDO : SHILO_R1_PSEUDO_BASE<MipsSHILO, NoItinerary, SHILO>;
+def SHILOV_PSEUDO : SHILO_R2_PSEUDO_BASE<MipsSHILO, NoItinerary, SHILOV>;
+def MTHLIP_PSEUDO : SHILO_R2_PSEUDO_BASE<MipsMTHLIP, NoItinerary, MTHLIP>;
+
+let Predicates = [HasDSPR2] in {
+
+def DPA_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPA_W_PH, NoItinerary, DPA_W_PH>;
+def DPS_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPS_W_PH, NoItinerary, DPS_W_PH>;
+def DPAQX_S_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPAQX_S_W_PH, NoItinerary,
+ DPAQX_S_W_PH>;
+def DPAQX_SA_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPAQX_SA_W_PH, NoItinerary,
+ DPAQX_SA_W_PH>;
+def DPAX_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPAX_W_PH, NoItinerary,
+ DPAX_W_PH>;
+def DPSX_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPSX_W_PH, NoItinerary,
+ DPSX_W_PH>;
+def DPSQX_S_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPSQX_S_W_PH, NoItinerary,
+ DPSQX_S_W_PH>;
+def DPSQX_SA_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPSQX_SA_W_PH, NoItinerary,
+ DPSQX_SA_W_PH>;
+def MULSA_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsMULSA_W_PH, NoItinerary,
+ MULSA_W_PH>;
+
+}
+
+// Patterns.
+class DSPPat<dag pattern, dag result, Predicate pred = HasDSP> :
+ Pat<pattern, result>, Requires<[pred]>;
+
+class BitconvertPat<ValueType DstVT, ValueType SrcVT, RegisterClass DstRC,
+ RegisterClass SrcRC> :
+ DSPPat<(DstVT (bitconvert (SrcVT SrcRC:$src))),
+ (COPY_TO_REGCLASS SrcRC:$src, DstRC)>;
+
+def : BitconvertPat<i32, v2i16, CPURegs, DSPRegs>;
+def : BitconvertPat<i32, v4i8, CPURegs, DSPRegs>;
+def : BitconvertPat<v2i16, i32, DSPRegs, CPURegs>;
+def : BitconvertPat<v4i8, i32, DSPRegs, CPURegs>;
+
+def : DSPPat<(v2i16 (load addr:$a)),
+ (v2i16 (COPY_TO_REGCLASS (LW addr:$a), DSPRegs))>;
+def : DSPPat<(v4i8 (load addr:$a)),
+ (v4i8 (COPY_TO_REGCLASS (LW addr:$a), DSPRegs))>;
+def : DSPPat<(store (v2i16 DSPRegs:$val), addr:$a),
+ (SW (COPY_TO_REGCLASS DSPRegs:$val, CPURegs), addr:$a)>;
+def : DSPPat<(store (v4i8 DSPRegs:$val), addr:$a),
+ (SW (COPY_TO_REGCLASS DSPRegs:$val, CPURegs), addr:$a)>;
+
+// Extr patterns.
+class EXTR_W_TY1_R2_Pat<SDPatternOperator OpNode, Instruction Instr> :
+ DSPPat<(i32 (OpNode CPURegs:$rs)), (Instr AC0, CPURegs:$rs)>;
+
+class EXTR_W_TY1_R1_Pat<SDPatternOperator OpNode, Instruction Instr> :
+ DSPPat<(i32 (OpNode immZExt5:$shift)), (Instr AC0, immZExt5:$shift)>;
+
+def : EXTR_W_TY1_R1_Pat<MipsEXTP, EXTP>;
+def : EXTR_W_TY1_R2_Pat<MipsEXTP, EXTPV>;
+def : EXTR_W_TY1_R1_Pat<MipsEXTPDP, EXTPDP>;
+def : EXTR_W_TY1_R2_Pat<MipsEXTPDP, EXTPDPV>;
+def : EXTR_W_TY1_R1_Pat<MipsEXTR_W, EXTR_W>;
+def : EXTR_W_TY1_R2_Pat<MipsEXTR_W, EXTRV_W>;
+def : EXTR_W_TY1_R1_Pat<MipsEXTR_R_W, EXTR_R_W>;
+def : EXTR_W_TY1_R2_Pat<MipsEXTR_R_W, EXTRV_R_W>;
+def : EXTR_W_TY1_R1_Pat<MipsEXTR_RS_W, EXTR_RS_W>;
+def : EXTR_W_TY1_R2_Pat<MipsEXTR_RS_W, EXTRV_RS_W>;
+def : EXTR_W_TY1_R1_Pat<MipsEXTR_S_H, EXTR_S_H>;
+def : EXTR_W_TY1_R2_Pat<MipsEXTR_S_H, EXTRV_S_H>;
diff --git a/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp b/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
index 2bba8a3..e3c8ed7 100644
--- a/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
@@ -30,10 +30,11 @@ STATISTIC(FilledSlots, "Number of delay slots filled");
STATISTIC(UsefulSlots, "Number of delay slots filled with instructions that"
" are not NOP.");
-static cl::opt<bool> EnableDelaySlotFiller(
- "enable-mips-delay-filler",
+static cl::opt<bool> DisableDelaySlotFiller(
+ "disable-mips-delay-filler",
cl::init(false),
- cl::desc("Fill the Mips delay slots useful instructions."),
+ cl::desc("Disable the delay slot filler, which attempts to fill the Mips"
+ "delay slots with useful instructions."),
cl::Hidden);
// This option can be used to silence complaints by machine verifier passes.
@@ -114,7 +115,9 @@ runOnMachineBasicBlock(MachineBasicBlock &MBB) {
InstrIter D;
- if (EnableDelaySlotFiller && findDelayInstr(MBB, I, D)) {
+ // Delay slot filling is disabled at -O0.
+ if (!DisableDelaySlotFiller && (TM.getOptLevel() != CodeGenOpt::None) &&
+ findDelayInstr(MBB, I, D)) {
MBB.splice(llvm::next(I), &MBB, D);
++UsefulSlots;
} else
diff --git a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp
index 8c0474b..2cad2a6 100644
--- a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp
@@ -23,7 +23,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
@@ -98,3 +98,37 @@ bool MipsFrameLowering::hasFP(const MachineFunction &MF) const {
return MF.getTarget().Options.DisableFramePointerElim(MF) ||
MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken();
}
+
+uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const TargetRegisterInfo &TRI = *MF.getTarget().getRegisterInfo();
+
+ int64_t Offset = 0;
+
+ // Iterate over fixed sized objects.
+ for (int I = MFI->getObjectIndexBegin(); I != 0; ++I)
+ Offset = std::max(Offset, -MFI->getObjectOffset(I));
+
+ // Conservatively assume all callee-saved registers will be saved.
+ for (const uint16_t *R = TRI.getCalleeSavedRegs(&MF); *R; ++R) {
+ unsigned Size = TRI.getMinimalPhysRegClass(*R)->getSize();
+ Offset = RoundUpToAlignment(Offset + Size, Size);
+ }
+
+ unsigned MaxAlign = MFI->getMaxAlignment();
+
+ // Check that MaxAlign is not zero if there is a stack object that is not a
+ // callee-saved spill.
+ assert(!MFI->getObjectIndexEnd() || MaxAlign);
+
+ // Iterate over other objects.
+ for (unsigned I = 0, E = MFI->getObjectIndexEnd(); I != E; ++I)
+ Offset = RoundUpToAlignment(Offset + MFI->getObjectSize(I), MaxAlign);
+
+ // Call frame.
+ if (MFI->adjustsStack() && hasReservedCallFrame(MF))
+ Offset = RoundUpToAlignment(Offset + MFI->getMaxCallFrameSize(),
+ std::max(MaxAlign, getStackAlignment()));
+
+ return RoundUpToAlignment(Offset, getStackAlignment());
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h
index ed7b7fe..df52d92 100644
--- a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h
+++ b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h
@@ -34,6 +34,9 @@ public:
const MipsSubtarget &ST);
bool hasFP(const MachineFunction &MF) const;
+
+protected:
+ uint64_t estimateStackSize(const MachineFunction &MF) const;
};
/// Create MipsInstrInfo objects.
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp
index 5a97c17..c5fca7f 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp
@@ -86,6 +86,10 @@ private:
SDNode *getGlobalBaseReg();
+ SDValue getMips16SPAliasReg();
+
+ void getMips16SPRefReg(SDNode *parent, SDValue &AliasReg);
+
std::pair<SDNode*, SDNode*> SelectMULT(SDNode *N, unsigned Opc, DebugLoc dl,
EVT Ty, bool HasLo, bool HasHi);
@@ -94,6 +98,9 @@ private:
// Complex Pattern.
bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, SDValue &Offset);
+ bool SelectAddr16(SDNode *Parent, SDValue N, SDValue &Base, SDValue &Offset,
+ SDValue &Alias);
+
// getImm - Return a target constant with the specified value.
inline SDValue getImm(const SDNode *Node, unsigned Imm) {
return CurDAG->getTargetConstant(Imm, Node->getValueType(0));
@@ -102,6 +109,7 @@ private:
void ProcessFunctionAfterISel(MachineFunction &MF);
bool ReplaceUsesWithZeroReg(MachineRegisterInfo *MRI, const MachineInstr&);
void InitGlobalBaseReg(MachineFunction &MF);
+ void InitMips16SPAliasReg(MachineFunction &MF);
virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
char ConstraintCode,
@@ -220,6 +228,26 @@ void MipsDAGToDAGISel::InitGlobalBaseReg(MachineFunction &MF) {
.addReg(Mips::V0).addReg(Mips::T9);
}
+// Insert instructions to initialize the Mips16 SP Alias register in the
+// first MBB of the function.
+//
+void MipsDAGToDAGISel::InitMips16SPAliasReg(MachineFunction &MF) {
+ MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
+
+ if (!MipsFI->mips16SPAliasRegSet())
+ return;
+
+ MachineBasicBlock &MBB = MF.front();
+ MachineBasicBlock::iterator I = MBB.begin();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
+ unsigned Mips16SPAliasReg = MipsFI->getMips16SPAliasReg();
+
+ BuildMI(MBB, I, DL, TII.get(Mips::MoveR3216), Mips16SPAliasReg)
+ .addReg(Mips::SP);
+}
+
+
bool MipsDAGToDAGISel::ReplaceUsesWithZeroReg(MachineRegisterInfo *MRI,
const MachineInstr& MI) {
unsigned DstReg = 0, ZeroReg = 0;
@@ -260,6 +288,7 @@ bool MipsDAGToDAGISel::ReplaceUsesWithZeroReg(MachineRegisterInfo *MRI,
void MipsDAGToDAGISel::ProcessFunctionAfterISel(MachineFunction &MF) {
InitGlobalBaseReg(MF);
+ InitMips16SPAliasReg(MF);
MachineRegisterInfo *MRI = &MF.getRegInfo();
@@ -284,6 +313,14 @@ SDNode *MipsDAGToDAGISel::getGlobalBaseReg() {
return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
}
+/// getMips16SPAliasReg - Output the instructions required to put the
+/// SP into a Mips16 accessible aliased register.
+SDValue MipsDAGToDAGISel::getMips16SPAliasReg() {
+ unsigned Mips16SPAliasReg =
+ MF->getInfo<MipsFunctionInfo>()->getMips16SPAliasReg();
+ return CurDAG->getRegister(Mips16SPAliasReg, TLI.getPointerTy());
+}
+
/// ComplexPattern used on MipsInstrInfo
/// Used on Mips Load/Store instructions
bool MipsDAGToDAGISel::
@@ -337,8 +374,9 @@ SelectAddr(SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset) {
// Generate:
// lui $2, %hi($CPI1_0)
// lwc1 $f0, %lo($CPI1_0)($2)
- if (Addr.getOperand(1).getOpcode() == MipsISD::Lo) {
- SDValue LoVal = Addr.getOperand(1), Opnd0 = LoVal.getOperand(0);
+ if (Addr.getOperand(1).getOpcode() == MipsISD::Lo ||
+ Addr.getOperand(1).getOpcode() == MipsISD::GPRel) {
+ SDValue Opnd0 = Addr.getOperand(1).getOperand(0);
if (isa<ConstantPoolSDNode>(Opnd0) || isa<GlobalAddressSDNode>(Opnd0) ||
isa<JumpTableSDNode>(Opnd0)) {
Base = Addr.getOperand(0);
@@ -361,6 +399,115 @@ SelectAddr(SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset) {
return true;
}
+void MipsDAGToDAGISel::getMips16SPRefReg(SDNode *Parent, SDValue &AliasReg) {
+ SDValue AliasFPReg = CurDAG->getRegister(Mips::S0, TLI.getPointerTy());
+ if (Parent) {
+ switch (Parent->getOpcode()) {
+ case ISD::LOAD: {
+ LoadSDNode *SD = dyn_cast<LoadSDNode>(Parent);
+ switch (SD->getMemoryVT().getSizeInBits()) {
+ case 8:
+ case 16:
+ AliasReg = TM.getFrameLowering()->hasFP(*MF)?
+ AliasFPReg: getMips16SPAliasReg();
+ return;
+ }
+ break;
+ }
+ case ISD::STORE: {
+ StoreSDNode *SD = dyn_cast<StoreSDNode>(Parent);
+ switch (SD->getMemoryVT().getSizeInBits()) {
+ case 8:
+ case 16:
+ AliasReg = TM.getFrameLowering()->hasFP(*MF)?
+ AliasFPReg: getMips16SPAliasReg();
+ return;
+ }
+ break;
+ }
+ }
+ }
+ AliasReg = CurDAG->getRegister(Mips::SP, TLI.getPointerTy());
+ return;
+
+}
+bool MipsDAGToDAGISel::SelectAddr16(
+ SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset,
+ SDValue &Alias) {
+ EVT ValTy = Addr.getValueType();
+
+ Alias = CurDAG->getTargetConstant(0, ValTy);
+
+ // if Address is FI, get the TargetFrameIndex.
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
+ Offset = CurDAG->getTargetConstant(0, ValTy);
+ getMips16SPRefReg(Parent, Alias);
+ return true;
+ }
+ // on PIC code Load GA
+ if (Addr.getOpcode() == MipsISD::Wrapper) {
+ Base = Addr.getOperand(0);
+ Offset = Addr.getOperand(1);
+ return true;
+ }
+ if (TM.getRelocationModel() != Reloc::PIC_) {
+ if ((Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress))
+ return false;
+ }
+ // Addresses of the form FI+const or FI|const
+ if (CurDAG->isBaseWithConstantOffset(Addr)) {
+ ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1));
+ if (isInt<16>(CN->getSExtValue())) {
+
+ // If the first operand is a FI, get the TargetFI Node
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>
+ (Addr.getOperand(0))) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
+ getMips16SPRefReg(Parent, Alias);
+ }
+ else
+ Base = Addr.getOperand(0);
+
+ Offset = CurDAG->getTargetConstant(CN->getZExtValue(), ValTy);
+ return true;
+ }
+ }
+ // Operand is a result from an ADD.
+ if (Addr.getOpcode() == ISD::ADD) {
+ // When loading from constant pools, load the lower address part in
+ // the instruction itself. Example, instead of:
+ // lui $2, %hi($CPI1_0)
+ // addiu $2, $2, %lo($CPI1_0)
+ // lwc1 $f0, 0($2)
+ // Generate:
+ // lui $2, %hi($CPI1_0)
+ // lwc1 $f0, %lo($CPI1_0)($2)
+ if (Addr.getOperand(1).getOpcode() == MipsISD::Lo ||
+ Addr.getOperand(1).getOpcode() == MipsISD::GPRel) {
+ SDValue Opnd0 = Addr.getOperand(1).getOperand(0);
+ if (isa<ConstantPoolSDNode>(Opnd0) || isa<GlobalAddressSDNode>(Opnd0) ||
+ isa<JumpTableSDNode>(Opnd0)) {
+ Base = Addr.getOperand(0);
+ Offset = Opnd0;
+ return true;
+ }
+ }
+
+ // If an indexed floating point load/store can be emitted, return false.
+ const LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(Parent);
+
+ if (LS &&
+ (LS->getMemoryVT() == MVT::f32 || LS->getMemoryVT() == MVT::f64) &&
+ Subtarget.hasMips32r2Or64())
+ return false;
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, ValTy);
+ return true;
+}
+
/// Select multiply instructions.
std::pair<SDNode*, SDNode*>
MipsDAGToDAGISel::SelectMULT(SDNode *N, unsigned Opc, DebugLoc dl, EVT Ty,
@@ -371,14 +518,16 @@ MipsDAGToDAGISel::SelectMULT(SDNode *N, unsigned Opc, DebugLoc dl, EVT Ty,
SDValue InFlag = SDValue(Mul, 0);
if (HasLo) {
- Lo = CurDAG->getMachineNode(Ty == MVT::i32 ? Mips::MFLO : Mips::MFLO64, dl,
- Ty, MVT::Glue, InFlag);
+ unsigned Opcode = Subtarget.inMips16Mode() ? Mips::Mflo16 :
+ (Ty == MVT::i32 ? Mips::MFLO : Mips::MFLO64);
+ Lo = CurDAG->getMachineNode(Opcode, dl, Ty, MVT::Glue, InFlag);
InFlag = SDValue(Lo, 1);
}
- if (HasHi)
- Hi = CurDAG->getMachineNode(Ty == MVT::i32 ? Mips::MFHI : Mips::MFHI64, dl,
- Ty, InFlag);
-
+ if (HasHi) {
+ unsigned Opcode = Subtarget.inMips16Mode() ? Mips::Mfhi16 :
+ (Ty == MVT::i32 ? Mips::MFHI : Mips::MFHI64);
+ Hi = CurDAG->getMachineNode(Opcode, dl, Ty, InFlag);
+ }
return std::make_pair(Lo, Hi);
}
@@ -410,6 +559,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
case ISD::SUBE:
case ISD::ADDE: {
+ bool inMips16Mode = Subtarget.inMips16Mode();
SDValue InFlag = Node->getOperand(2), CmpLHS;
unsigned Opc = InFlag.getOpcode(); (void)Opc;
assert(((Opc == ISD::ADDC || Opc == ISD::ADDE) ||
@@ -419,10 +569,16 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
unsigned MOp;
if (Opcode == ISD::ADDE) {
CmpLHS = InFlag.getValue(0);
- MOp = Mips::ADDu;
+ if (inMips16Mode)
+ MOp = Mips::AdduRxRyRz16;
+ else
+ MOp = Mips::ADDu;
} else {
CmpLHS = InFlag.getOperand(0);
- MOp = Mips::SUBu;
+ if (inMips16Mode)
+ MOp = Mips::SubuRxRyRz16;
+ else
+ MOp = Mips::SUBu;
}
SDValue Ops[] = { CmpLHS, InFlag.getOperand(1) };
@@ -431,8 +587,11 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
SDValue RHS = Node->getOperand(1);
EVT VT = LHS.getValueType();
- SDNode *Carry = CurDAG->getMachineNode(Mips::SLTu, dl, VT, Ops, 2);
- SDNode *AddCarry = CurDAG->getMachineNode(Mips::ADDu, dl, VT,
+
+ unsigned Sltu_op = inMips16Mode? Mips::SltuRxRyRz16: Mips::SLTu;
+ SDNode *Carry = CurDAG->getMachineNode(Sltu_op, dl, VT, Ops, 2);
+ unsigned Addu_op = inMips16Mode? Mips::AdduRxRyRz16 : Mips::ADDu;
+ SDNode *AddCarry = CurDAG->getMachineNode(Addu_op, dl, VT,
SDValue(Carry,0), RHS);
return CurDAG->SelectNodeTo(Node, MOp, VT, MVT::Glue,
@@ -442,8 +601,13 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
/// Mul with two results
case ISD::SMUL_LOHI:
case ISD::UMUL_LOHI: {
- if (NodeTy == MVT::i32)
- MultOpc = (Opcode == ISD::UMUL_LOHI ? Mips::MULTu : Mips::MULT);
+ if (NodeTy == MVT::i32) {
+ if (Subtarget.inMips16Mode())
+ MultOpc = (Opcode == ISD::UMUL_LOHI ? Mips::MultuRxRy16 :
+ Mips::MultRxRy16);
+ else
+ MultOpc = (Opcode == ISD::UMUL_LOHI ? Mips::MULTu : Mips::MULT);
+ }
else
MultOpc = (Opcode == ISD::UMUL_LOHI ? Mips::DMULTu : Mips::DMULT);
@@ -469,8 +633,13 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
}
case ISD::MULHS:
case ISD::MULHU: {
- if (NodeTy == MVT::i32)
- MultOpc = (Opcode == ISD::MULHU ? Mips::MULTu : Mips::MULT);
+ if (NodeTy == MVT::i32) {
+ if (Subtarget.inMips16Mode())
+ MultOpc = (Opcode == ISD::MULHU ?
+ Mips::MultuRxRy16 : Mips::MultRxRy16);
+ else
+ MultOpc = (Opcode == ISD::MULHU ? Mips::MULTu : Mips::MULT);
+ }
else
MultOpc = (Opcode == ISD::MULHU ? Mips::DMULTu : Mips::DMULT);
@@ -539,6 +708,15 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
return RegOpnd;
}
+#ifndef NDEBUG
+ case ISD::LOAD:
+ case ISD::STORE:
+ assert(cast<MemSDNode>(Node)->getMemoryVT().getSizeInBits() / 8 <=
+ cast<MemSDNode>(Node)->getAlignment() &&
+ "Unexpected unaligned loads/stores.");
+ break;
+#endif
+
case MipsISD::ThreadPointer: {
EVT PtrVT = TLI.getPointerTy();
unsigned RdhwrOpc, SrcReg, DestReg;
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
index c5207c6..e225b6c 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -25,6 +25,7 @@
#include "llvm/GlobalVariable.h"
#include "llvm/Intrinsics.h"
#include "llvm/CallingConv.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -32,12 +33,33 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+STATISTIC(NumTailCalls, "Number of tail calls");
+
+static cl::opt<bool>
+EnableMipsTailCalls("enable-mips-tail-calls", cl::Hidden,
+ cl::desc("MIPS: Enable tail calls."), cl::init(false));
+
+static const uint16_t O32IntRegs[4] = {
+ Mips::A0, Mips::A1, Mips::A2, Mips::A3
+};
+
+static const uint16_t Mips64IntRegs[8] = {
+ Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64,
+ Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64
+};
+
+static const uint16_t Mips64DPRegs[8] = {
+ Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
+ Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
+};
+
// If I is a shifted mask, set the size (Size) and the first bit of the
// mask (Pos), and return true.
// For example, if I is 0x003ff800, (Pos, Size) = (11, 11).
@@ -58,6 +80,7 @@ static SDValue GetGlobalReg(SelectionDAG &DAG, EVT Ty) {
const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
case MipsISD::JmpLink: return "MipsISD::JmpLink";
+ case MipsISD::TailCall: return "MipsISD::TailCall";
case MipsISD::Hi: return "MipsISD::Hi";
case MipsISD::Lo: return "MipsISD::Lo";
case MipsISD::GPRel: return "MipsISD::GPRel";
@@ -89,6 +112,20 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
case MipsISD::LDR: return "MipsISD::LDR";
case MipsISD::SDL: return "MipsISD::SDL";
case MipsISD::SDR: return "MipsISD::SDR";
+ case MipsISD::EXTP: return "MipsISD::EXTP";
+ case MipsISD::EXTPDP: return "MipsISD::EXTPDP";
+ case MipsISD::EXTR_S_H: return "MipsISD::EXTR_S_H";
+ case MipsISD::EXTR_W: return "MipsISD::EXTR_W";
+ case MipsISD::EXTR_R_W: return "MipsISD::EXTR_R_W";
+ case MipsISD::EXTR_RS_W: return "MipsISD::EXTR_RS_W";
+ case MipsISD::SHILO: return "MipsISD::SHILO";
+ case MipsISD::MTHLIP: return "MipsISD::MTHLIP";
+ case MipsISD::MULT: return "MipsISD::MULT";
+ case MipsISD::MULTU: return "MipsISD::MULTU";
+ case MipsISD::MADD_DSP: return "MipsISD::MADD_DSPDSP";
+ case MipsISD::MADDU_DSP: return "MipsISD::MADDU_DSP";
+ case MipsISD::MSUB_DSP: return "MipsISD::MSUB_DSP";
+ case MipsISD::MSUBU_DSP: return "MipsISD::MSUBU_DSP";
default: return NULL;
}
}
@@ -113,7 +150,22 @@ MipsTargetLowering(MipsTargetMachine &TM)
if (Subtarget->inMips16Mode()) {
addRegisterClass(MVT::i32, &Mips::CPU16RegsRegClass);
- addRegisterClass(MVT::i32, &Mips::CPURARegRegClass);
+ }
+
+ if (Subtarget->hasDSP()) {
+ MVT::SimpleValueType VecTys[2] = {MVT::v2i16, MVT::v4i8};
+
+ for (unsigned i = 0; i < array_lengthof(VecTys); ++i) {
+ addRegisterClass(VecTys[i], &Mips::DSPRegsRegClass);
+
+ // Expand all builtin opcodes.
+ for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
+ setOperationAction(Opc, VecTys[i], Expand);
+
+ setOperationAction(ISD::LOAD, VecTys[i], Legal);
+ setOperationAction(ISD::STORE, VecTys[i], Legal);
+ setOperationAction(ISD::BITCAST, VecTys[i], Legal);
+ }
}
if (!TM.Options.UseSoftFloat) {
@@ -160,10 +212,18 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::VASTART, MVT::Other, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
- setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
- setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
- setOperationAction(ISD::LOAD, MVT::i32, Custom);
- setOperationAction(ISD::STORE, MVT::i32, Custom);
+ if (Subtarget->inMips16Mode()) {
+ setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
+ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
+ }
+ else {
+ setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
+ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
+ }
+ if (!Subtarget->inMips16Mode()) {
+ setOperationAction(ISD::LOAD, MVT::i32, Custom);
+ setOperationAction(ISD::STORE, MVT::i32, Custom);
+ }
if (!TM.Options.NoNaNsFPMath) {
setOperationAction(ISD::FABS, MVT::f32, Custom);
@@ -187,6 +247,10 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
}
+ setOperationAction(ISD::ADD, MVT::i32, Custom);
+ if (HasMips64)
+ setOperationAction(ISD::ADD, MVT::i64, Custom);
+
setOperationAction(ISD::SDIV, MVT::i32, Expand);
setOperationAction(ISD::SREM, MVT::i32, Expand);
setOperationAction(ISD::UDIV, MVT::i32, Expand);
@@ -254,6 +318,9 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
setOperationAction(ISD::VAEND, MVT::Other, Expand);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
+
// Use the default for now
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
@@ -263,6 +330,21 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand);
setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
+ if (Subtarget->inMips16Mode()) {
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
+ }
+
setInsertFencesForAtomic(true);
if (!Subtarget->hasSEInReg()) {
@@ -310,6 +392,9 @@ MipsTargetLowering(MipsTargetMachine &TM)
bool MipsTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy;
+ if (Subtarget->inMips16Mode())
+ return false;
+
switch (SVT) {
case MVT::i64:
case MVT::i32:
@@ -785,6 +870,26 @@ SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
return SDValue();
}
+void
+MipsTargetLowering::LowerOperationWrapper(SDNode *N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const {
+ SDValue Res = LowerOperation(SDValue(N, 0), DAG);
+
+ for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I)
+ Results.push_back(Res.getValue(I));
+}
+
+void
+MipsTargetLowering::ReplaceNodeResults(SDNode *N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const {
+ SDValue Res = LowerOperation(SDValue(N, 0), DAG);
+
+ for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I)
+ Results.push_back(Res.getValue(I));
+}
+
SDValue MipsTargetLowering::
LowerOperation(SDValue Op, SelectionDAG &DAG) const
{
@@ -811,6 +916,9 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
case ISD::SRL_PARTS: return LowerShiftRightParts(Op, DAG, false);
case ISD::LOAD: return LowerLOAD(Op, DAG);
case ISD::STORE: return LowerSTORE(Op, DAG);
+ case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+ case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
+ case ISD::ADD: return LowerADD(Op, DAG);
}
return SDValue();
}
@@ -919,6 +1027,70 @@ static MachineBasicBlock* ExpandCondMov(MachineInstr *MI, MachineBasicBlock *BB,
return BB;
}
*/
+
+MachineBasicBlock *
+MipsTargetLowering::EmitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
+ // $bb:
+ // bposge32_pseudo $vr0
+ // =>
+ // $bb:
+ // bposge32 $tbb
+ // $fbb:
+ // li $vr2, 0
+ // b $sink
+ // $tbb:
+ // li $vr1, 1
+ // $sink:
+ // $vr0 = phi($vr2, $fbb, $vr1, $tbb)
+
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const TargetRegisterClass *RC = &Mips::CPURegsRegClass;
+ DebugLoc DL = MI->getDebugLoc();
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
+ MachineFunction *F = BB->getParent();
+ MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *Sink = F->CreateMachineBasicBlock(LLVM_BB);
+ F->insert(It, FBB);
+ F->insert(It, TBB);
+ F->insert(It, Sink);
+
+ // Transfer the remainder of BB and its successor edges to Sink.
+ Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ Sink->transferSuccessorsAndUpdatePHIs(BB);
+
+ // Add successors.
+ BB->addSuccessor(FBB);
+ BB->addSuccessor(TBB);
+ FBB->addSuccessor(Sink);
+ TBB->addSuccessor(Sink);
+
+ // Insert the real bposge32 instruction to $BB.
+ BuildMI(BB, DL, TII->get(Mips::BPOSGE32)).addMBB(TBB);
+
+ // Fill $FBB.
+ unsigned VR2 = RegInfo.createVirtualRegister(RC);
+ BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), VR2)
+ .addReg(Mips::ZERO).addImm(0);
+ BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink);
+
+ // Fill $TBB.
+ unsigned VR1 = RegInfo.createVirtualRegister(RC);
+ BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), VR1)
+ .addReg(Mips::ZERO).addImm(1);
+
+ // Insert phi function to $Sink.
+ BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI),
+ MI->getOperand(0).getReg())
+ .addReg(VR2).addMBB(FBB).addReg(VR1).addMBB(TBB);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return Sink;
+}
+
MachineBasicBlock *
MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
@@ -1027,6 +1199,8 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case Mips::ATOMIC_CMP_SWAP_I64:
case Mips::ATOMIC_CMP_SWAP_I64_P8:
return EmitAtomicCmpSwap(MI, BB, 8);
+ case Mips::BPOSGE32_PSEUDO:
+ return EmitBPOSGE32(MI, BB);
}
}
@@ -1571,15 +1745,16 @@ SDValue MipsTargetLowering::LowerGlobalAddress(SDValue Op,
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) {
SDVTList VTs = DAG.getVTList(MVT::i32);
- MipsTargetObjectFile &TLOF = (MipsTargetObjectFile&)getObjFileLowering();
+ const MipsTargetObjectFile &TLOF =
+ (const MipsTargetObjectFile&)getObjFileLowering();
// %gp_rel relocation
if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
MipsII::MO_GPREL);
SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, dl, VTs, &GA, 1);
- SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
- return DAG.getNode(ISD::ADD, dl, MVT::i32, GOT, GPRelNode);
+ SDValue GPReg = DAG.getRegister(Mips::GP, MVT::i32);
+ return DAG.getNode(ISD::ADD, dl, MVT::i32, GPReg, GPRelNode);
}
// %hi/%lo relocation
SDValue GAHi = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
@@ -1620,8 +1795,10 @@ SDValue MipsTargetLowering::LowerBlockAddress(SDValue Op,
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) {
// %hi/%lo relocation
- SDValue BAHi = DAG.getBlockAddress(BA, MVT::i32, true, MipsII::MO_ABS_HI);
- SDValue BALo = DAG.getBlockAddress(BA, MVT::i32, true, MipsII::MO_ABS_LO);
+ SDValue BAHi =
+ DAG.getTargetBlockAddress(BA, MVT::i32, 0, MipsII::MO_ABS_HI);
+ SDValue BALo =
+ DAG.getTargetBlockAddress(BA, MVT::i32, 0, MipsII::MO_ABS_LO);
SDValue Hi = DAG.getNode(MipsISD::Hi, dl, MVT::i32, BAHi);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, BALo);
return DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, Lo);
@@ -1630,10 +1807,10 @@ SDValue MipsTargetLowering::LowerBlockAddress(SDValue Op,
EVT ValTy = Op.getValueType();
unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
unsigned OFSTFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO;
- SDValue BAGOTOffset = DAG.getBlockAddress(BA, ValTy, true, GOTFlag);
+ SDValue BAGOTOffset = DAG.getTargetBlockAddress(BA, ValTy, 0, GOTFlag);
BAGOTOffset = DAG.getNode(MipsISD::Wrapper, dl, ValTy,
GetGlobalReg(DAG, ValTy), BAGOTOffset);
- SDValue BALOOffset = DAG.getBlockAddress(BA, ValTy, true, OFSTFlag);
+ SDValue BALOOffset = DAG.getTargetBlockAddress(BA, ValTy, 0, OFSTFlag);
SDValue Load = DAG.getLoad(ValTy, dl, DAG.getEntryNode(), BAGOTOffset,
MachinePointerInfo(), false, false, false, 0);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, ValTy, BALOOffset);
@@ -2224,6 +2401,172 @@ SDValue MipsTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
return CreateStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
}
+// This function expands mips intrinsic nodes which have 64-bit input operands
+// or output values.
+//
+// out64 = intrinsic-node in64
+// =>
+// lo = copy (extract-element (in64, 0))
+// hi = copy (extract-element (in64, 1))
+// mips-specific-node
+// v0 = copy lo
+// v1 = copy hi
+// out64 = merge-values (v0, v1)
+//
+static SDValue LowerDSPIntr(SDValue Op, SelectionDAG &DAG,
+ unsigned Opc, bool HasI64In, bool HasI64Out) {
+ DebugLoc DL = Op.getDebugLoc();
+ bool HasChainIn = Op->getOperand(0).getValueType() == MVT::Other;
+ SDValue Chain = HasChainIn ? Op->getOperand(0) : DAG.getEntryNode();
+ SmallVector<SDValue, 3> Ops;
+
+ if (HasI64In) {
+ SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
+ Op->getOperand(1 + HasChainIn),
+ DAG.getConstant(0, MVT::i32));
+ SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
+ Op->getOperand(1 + HasChainIn),
+ DAG.getConstant(1, MVT::i32));
+
+ Chain = DAG.getCopyToReg(Chain, DL, Mips::LO, InLo, SDValue());
+ Chain = DAG.getCopyToReg(Chain, DL, Mips::HI, InHi, Chain.getValue(1));
+
+ Ops.push_back(Chain);
+ Ops.append(Op->op_begin() + HasChainIn + 2, Op->op_end());
+ Ops.push_back(Chain.getValue(1));
+ } else {
+ Ops.push_back(Chain);
+ Ops.append(Op->op_begin() + HasChainIn + 1, Op->op_end());
+ }
+
+ if (!HasI64Out)
+ return DAG.getNode(Opc, DL, Op->value_begin(), Op->getNumValues(),
+ Ops.begin(), Ops.size());
+
+ SDValue Intr = DAG.getNode(Opc, DL, DAG.getVTList(MVT::Other, MVT::Glue),
+ Ops.begin(), Ops.size());
+ SDValue OutLo = DAG.getCopyFromReg(Intr.getValue(0), DL, Mips::LO, MVT::i32,
+ Intr.getValue(1));
+ SDValue OutHi = DAG.getCopyFromReg(OutLo.getValue(1), DL, Mips::HI, MVT::i32,
+ OutLo.getValue(2));
+ SDValue Out = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, OutLo, OutHi);
+
+ if (!HasChainIn)
+ return Out;
+
+ SDValue Vals[] = { Out, OutHi.getValue(1) };
+ return DAG.getMergeValues(Vals, 2, DL);
+}
+
+SDValue MipsTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
+ SelectionDAG &DAG) const {
+ switch (cast<ConstantSDNode>(Op->getOperand(0))->getZExtValue()) {
+ default:
+ return SDValue();
+ case Intrinsic::mips_shilo:
+ return LowerDSPIntr(Op, DAG, MipsISD::SHILO, true, true);
+ case Intrinsic::mips_dpau_h_qbl:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBL, true, true);
+ case Intrinsic::mips_dpau_h_qbr:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBR, true, true);
+ case Intrinsic::mips_dpsu_h_qbl:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBL, true, true);
+ case Intrinsic::mips_dpsu_h_qbr:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBR, true, true);
+ case Intrinsic::mips_dpa_w_ph:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPA_W_PH, true, true);
+ case Intrinsic::mips_dps_w_ph:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPS_W_PH, true, true);
+ case Intrinsic::mips_dpax_w_ph:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPAX_W_PH, true, true);
+ case Intrinsic::mips_dpsx_w_ph:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPSX_W_PH, true, true);
+ case Intrinsic::mips_mulsa_w_ph:
+ return LowerDSPIntr(Op, DAG, MipsISD::MULSA_W_PH, true, true);
+ case Intrinsic::mips_mult:
+ return LowerDSPIntr(Op, DAG, MipsISD::MULT, false, true);
+ case Intrinsic::mips_multu:
+ return LowerDSPIntr(Op, DAG, MipsISD::MULTU, false, true);
+ case Intrinsic::mips_madd:
+ return LowerDSPIntr(Op, DAG, MipsISD::MADD_DSP, true, true);
+ case Intrinsic::mips_maddu:
+ return LowerDSPIntr(Op, DAG, MipsISD::MADDU_DSP, true, true);
+ case Intrinsic::mips_msub:
+ return LowerDSPIntr(Op, DAG, MipsISD::MSUB_DSP, true, true);
+ case Intrinsic::mips_msubu:
+ return LowerDSPIntr(Op, DAG, MipsISD::MSUBU_DSP, true, true);
+ }
+}
+
+SDValue MipsTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
+ SelectionDAG &DAG) const {
+ switch (cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue()) {
+ default:
+ return SDValue();
+ case Intrinsic::mips_extp:
+ return LowerDSPIntr(Op, DAG, MipsISD::EXTP, true, false);
+ case Intrinsic::mips_extpdp:
+ return LowerDSPIntr(Op, DAG, MipsISD::EXTPDP, true, false);
+ case Intrinsic::mips_extr_w:
+ return LowerDSPIntr(Op, DAG, MipsISD::EXTR_W, true, false);
+ case Intrinsic::mips_extr_r_w:
+ return LowerDSPIntr(Op, DAG, MipsISD::EXTR_R_W, true, false);
+ case Intrinsic::mips_extr_rs_w:
+ return LowerDSPIntr(Op, DAG, MipsISD::EXTR_RS_W, true, false);
+ case Intrinsic::mips_extr_s_h:
+ return LowerDSPIntr(Op, DAG, MipsISD::EXTR_S_H, true, false);
+ case Intrinsic::mips_mthlip:
+ return LowerDSPIntr(Op, DAG, MipsISD::MTHLIP, true, true);
+ case Intrinsic::mips_mulsaq_s_w_ph:
+ return LowerDSPIntr(Op, DAG, MipsISD::MULSAQ_S_W_PH, true, true);
+ case Intrinsic::mips_maq_s_w_phl:
+ return LowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHL, true, true);
+ case Intrinsic::mips_maq_s_w_phr:
+ return LowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHR, true, true);
+ case Intrinsic::mips_maq_sa_w_phl:
+ return LowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHL, true, true);
+ case Intrinsic::mips_maq_sa_w_phr:
+ return LowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHR, true, true);
+ case Intrinsic::mips_dpaq_s_w_ph:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPAQ_S_W_PH, true, true);
+ case Intrinsic::mips_dpsq_s_w_ph:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPSQ_S_W_PH, true, true);
+ case Intrinsic::mips_dpaq_sa_l_w:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPAQ_SA_L_W, true, true);
+ case Intrinsic::mips_dpsq_sa_l_w:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPSQ_SA_L_W, true, true);
+ case Intrinsic::mips_dpaqx_s_w_ph:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPAQX_S_W_PH, true, true);
+ case Intrinsic::mips_dpaqx_sa_w_ph:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPAQX_SA_W_PH, true, true);
+ case Intrinsic::mips_dpsqx_s_w_ph:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPSQX_S_W_PH, true, true);
+ case Intrinsic::mips_dpsqx_sa_w_ph:
+ return LowerDSPIntr(Op, DAG, MipsISD::DPSQX_SA_W_PH, true, true);
+ }
+}
+
+SDValue MipsTargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) const {
+ if (Op->getOperand(0).getOpcode() != ISD::FRAMEADDR
+ || cast<ConstantSDNode>
+ (Op->getOperand(0).getOperand(0))->getZExtValue() != 0
+ || Op->getOperand(1).getOpcode() != ISD::FRAME_TO_ARGS_OFFSET)
+ return SDValue();
+
+ // The pattern
+ // (add (frameaddr 0), (frame_to_args_offset))
+ // results from lowering llvm.eh.dwarf.cfa intrinsic. Transform it to
+ // (add FrameObject, 0)
+ // where FrameObject is a fixed StackObject with offset 0 which points to
+ // the old stack pointer.
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ EVT ValTy = Op->getValueType(0);
+ int FI = MFI->CreateFixedObject(Op.getValueSizeInBits() / 8, 0, false);
+ SDValue InArgsAddr = DAG.getFrameIndex(FI, ValTy);
+ return DAG.getNode(ISD::ADD, Op->getDebugLoc(), ValTy, InArgsAddr,
+ DAG.getConstant(0, ValTy));
+}
+
//===----------------------------------------------------------------------===//
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
@@ -2259,16 +2602,9 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
Mips::D6, Mips::D7
};
- // ByVal Args
- if (ArgFlags.isByVal()) {
- State.HandleByVal(ValNo, ValVT, LocVT, LocInfo,
- 1 /*MinSize*/, 4 /*MinAlign*/, ArgFlags);
- unsigned NextReg = (State.getNextStackOffset() + 3) / 4;
- for (unsigned r = State.getFirstUnallocated(IntRegs, IntRegsSize);
- r < std::min(IntRegsSize, NextReg); ++r)
- State.AllocateReg(IntRegs[r]);
- return false;
- }
+ // Do not process byval args here.
+ if (ArgFlags.isByVal())
+ return true;
// Promote i8 and i16
if (LocVT == MVT::i8 || LocVT == MVT::i16) {
@@ -2323,279 +2659,72 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
} else
llvm_unreachable("Cannot handle this ValVT.");
- unsigned SizeInBytes = ValVT.getSizeInBits() >> 3;
- unsigned Offset = State.AllocateStack(SizeInBytes, OrigAlign);
-
- if (!Reg)
+ if (!Reg) {
+ unsigned Offset = State.AllocateStack(ValVT.getSizeInBits() >> 3,
+ OrigAlign);
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
- else
+ } else
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- return false; // CC must always match
-}
-
-static const uint16_t Mips64IntRegs[8] =
- {Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64,
- Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64};
-static const uint16_t Mips64DPRegs[8] =
- {Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
- Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64};
-
-static bool CC_Mips64Byval(unsigned ValNo, MVT ValVT, MVT LocVT,
- CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
- unsigned Align = std::max(ArgFlags.getByValAlign(), (unsigned)8);
- unsigned Size = (ArgFlags.getByValSize() + 7) / 8 * 8;
- unsigned FirstIdx = State.getFirstUnallocated(Mips64IntRegs, 8);
-
- assert(Align <= 16 && "Cannot handle alignments larger than 16.");
-
- // If byval is 16-byte aligned, the first arg register must be even.
- if ((Align == 16) && (FirstIdx % 2)) {
- State.AllocateReg(Mips64IntRegs[FirstIdx], Mips64DPRegs[FirstIdx]);
- ++FirstIdx;
- }
-
- // Mark the registers allocated.
- for (unsigned I = FirstIdx; Size && (I < 8); Size -= 8, ++I)
- State.AllocateReg(Mips64IntRegs[I], Mips64DPRegs[I]);
-
- // Allocate space on caller's stack.
- unsigned Offset = State.AllocateStack(Size, Align);
-
- if (FirstIdx < 8)
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Mips64IntRegs[FirstIdx],
- LocVT, LocInfo));
- else
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
-
- return true;
+ return false;
}
#include "MipsGenCallingConv.inc"
-static void
-AnalyzeMips64CallOperands(CCState &CCInfo,
- const SmallVectorImpl<ISD::OutputArg> &Outs) {
- unsigned NumOps = Outs.size();
- for (unsigned i = 0; i != NumOps; ++i) {
- MVT ArgVT = Outs[i].VT;
- ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
- bool R;
-
- if (Outs[i].IsFixed)
- R = CC_MipsN(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
- else
- R = CC_MipsN_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
-
- if (R) {
-#ifndef NDEBUG
- dbgs() << "Call operand #" << i << " has unhandled type "
- << EVT(ArgVT).getEVTString();
-#endif
- llvm_unreachable(0);
- }
- }
-}
-
//===----------------------------------------------------------------------===//
// Call Calling Convention Implementation
//===----------------------------------------------------------------------===//
static const unsigned O32IntRegsSize = 4;
-static const uint16_t O32IntRegs[] = {
- Mips::A0, Mips::A1, Mips::A2, Mips::A3
-};
-
// Return next O32 integer argument register.
static unsigned getNextIntArgReg(unsigned Reg) {
assert((Reg == Mips::A0) || (Reg == Mips::A2));
return (Reg == Mips::A0) ? Mips::A1 : Mips::A3;
}
-// Write ByVal Arg to arg registers and stack.
-static void
-WriteByValArg(SDValue Chain, DebugLoc dl,
- SmallVector<std::pair<unsigned, SDValue>, 16> &RegsToPass,
- SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
- MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
- const CCValAssign &VA, const ISD::ArgFlagsTy &Flags,
- MVT PtrType, bool isLittle) {
- unsigned LocMemOffset = VA.getLocMemOffset();
- unsigned Offset = 0;
- uint32_t RemainingSize = Flags.getByValSize();
- unsigned ByValAlign = Flags.getByValAlign();
-
- // Copy the first 4 words of byval arg to registers A0 - A3.
- // FIXME: Use a stricter alignment if it enables better optimization in passes
- // run later.
- for (; RemainingSize >= 4 && LocMemOffset < 4 * 4;
- Offset += 4, RemainingSize -= 4, LocMemOffset += 4) {
- SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg,
- DAG.getConstant(Offset, MVT::i32));
- SDValue LoadVal = DAG.getLoad(MVT::i32, dl, Chain, LoadPtr,
- MachinePointerInfo(), false, false, false,
- std::min(ByValAlign, (unsigned )4));
- MemOpChains.push_back(LoadVal.getValue(1));
- unsigned DstReg = O32IntRegs[LocMemOffset / 4];
- RegsToPass.push_back(std::make_pair(DstReg, LoadVal));
- }
-
- if (RemainingSize == 0)
- return;
+/// IsEligibleForTailCallOptimization - Check whether the call is eligible
+/// for tail call optimization.
+bool MipsTargetLowering::
+IsEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
+ unsigned NextStackOffset,
+ const MipsFunctionInfo& FI) const {
+ if (!EnableMipsTailCalls)
+ return false;
- // If there still is a register available for argument passing, write the
- // remaining part of the structure to it using subword loads and shifts.
- if (LocMemOffset < 4 * 4) {
- assert(RemainingSize <= 3 && RemainingSize >= 1 &&
- "There must be one to three bytes remaining.");
- unsigned LoadSize = (RemainingSize == 3 ? 2 : RemainingSize);
- SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg,
- DAG.getConstant(Offset, MVT::i32));
- unsigned Alignment = std::min(ByValAlign, (unsigned )4);
- SDValue LoadVal = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain,
- LoadPtr, MachinePointerInfo(),
- MVT::getIntegerVT(LoadSize * 8), false,
- false, Alignment);
- MemOpChains.push_back(LoadVal.getValue(1));
-
- // If target is big endian, shift it to the most significant half-word or
- // byte.
- if (!isLittle)
- LoadVal = DAG.getNode(ISD::SHL, dl, MVT::i32, LoadVal,
- DAG.getConstant(32 - LoadSize * 8, MVT::i32));
-
- Offset += LoadSize;
- RemainingSize -= LoadSize;
-
- // Read second subword if necessary.
- if (RemainingSize != 0) {
- assert(RemainingSize == 1 && "There must be one byte remaining.");
- LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg,
- DAG.getConstant(Offset, MVT::i32));
- unsigned Alignment = std::min(ByValAlign, (unsigned )2);
- SDValue Subword = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain,
- LoadPtr, MachinePointerInfo(),
- MVT::i8, false, false, Alignment);
- MemOpChains.push_back(Subword.getValue(1));
- // Insert the loaded byte to LoadVal.
- // FIXME: Use INS if supported by target.
- unsigned ShiftAmt = isLittle ? 16 : 8;
- SDValue Shift = DAG.getNode(ISD::SHL, dl, MVT::i32, Subword,
- DAG.getConstant(ShiftAmt, MVT::i32));
- LoadVal = DAG.getNode(ISD::OR, dl, MVT::i32, LoadVal, Shift);
- }
+ // No tail call optimization for mips16.
+ if (Subtarget->inMips16Mode())
+ return false;
- unsigned DstReg = O32IntRegs[LocMemOffset / 4];
- RegsToPass.push_back(std::make_pair(DstReg, LoadVal));
- return;
- }
+ // Return false if either the callee or caller has a byval argument.
+ if (MipsCCInfo.hasByValArg() || FI.hasByvalArg())
+ return false;
- // Copy remaining part of byval arg using memcpy.
- SDValue Src = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg,
- DAG.getConstant(Offset, MVT::i32));
- SDValue Dst = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr,
- DAG.getIntPtrConstant(LocMemOffset));
- Chain = DAG.getMemcpy(Chain, dl, Dst, Src,
- DAG.getConstant(RemainingSize, MVT::i32),
- std::min(ByValAlign, (unsigned)4),
- /*isVolatile=*/false, /*AlwaysInline=*/false,
- MachinePointerInfo(0), MachinePointerInfo(0));
- MemOpChains.push_back(Chain);
+ // Return true if the callee's argument area is no larger than the
+ // caller's.
+ return NextStackOffset <= FI.getIncomingArgSize();
}
-// Copy Mips64 byVal arg to registers and stack.
-void static
-PassByValArg64(SDValue Chain, DebugLoc dl,
- SmallVector<std::pair<unsigned, SDValue>, 16> &RegsToPass,
- SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
- MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
- const CCValAssign &VA, const ISD::ArgFlagsTy &Flags,
- EVT PtrTy, bool isLittle) {
- unsigned ByValSize = Flags.getByValSize();
- unsigned Alignment = std::min(Flags.getByValAlign(), (unsigned)8);
- bool IsRegLoc = VA.isRegLoc();
- unsigned Offset = 0; // Offset in # of bytes from the beginning of struct.
- unsigned LocMemOffset = 0;
- unsigned MemCpySize = ByValSize;
-
- if (!IsRegLoc)
- LocMemOffset = VA.getLocMemOffset();
- else {
- const uint16_t *Reg = std::find(Mips64IntRegs, Mips64IntRegs + 8,
- VA.getLocReg());
- const uint16_t *RegEnd = Mips64IntRegs + 8;
-
- // Copy double words to registers.
- for (; (Reg != RegEnd) && (ByValSize >= Offset + 8); ++Reg, Offset += 8) {
- SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
- DAG.getConstant(Offset, PtrTy));
- SDValue LoadVal = DAG.getLoad(MVT::i64, dl, Chain, LoadPtr,
- MachinePointerInfo(), false, false, false,
- Alignment);
- MemOpChains.push_back(LoadVal.getValue(1));
- RegsToPass.push_back(std::make_pair(*Reg, LoadVal));
- }
-
- // Return if the struct has been fully copied.
- if (!(MemCpySize = ByValSize - Offset))
- return;
-
- // If there is an argument register available, copy the remainder of the
- // byval argument with sub-doubleword loads and shifts.
- if (Reg != RegEnd) {
- assert((ByValSize < Offset + 8) &&
- "Size of the remainder should be smaller than 8-byte.");
- SDValue Val;
- for (unsigned LoadSize = 4; Offset < ByValSize; LoadSize /= 2) {
- unsigned RemSize = ByValSize - Offset;
-
- if (RemSize < LoadSize)
- continue;
-
- SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
- DAG.getConstant(Offset, PtrTy));
- SDValue LoadVal =
- DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i64, Chain, LoadPtr,
- MachinePointerInfo(), MVT::getIntegerVT(LoadSize * 8),
- false, false, Alignment);
- MemOpChains.push_back(LoadVal.getValue(1));
-
- // Offset in number of bits from double word boundary.
- unsigned OffsetDW = (Offset % 8) * 8;
- unsigned Shamt = isLittle ? OffsetDW : 64 - (OffsetDW + LoadSize * 8);
- SDValue Shift = DAG.getNode(ISD::SHL, dl, MVT::i64, LoadVal,
- DAG.getConstant(Shamt, MVT::i32));
-
- Val = Val.getNode() ? DAG.getNode(ISD::OR, dl, MVT::i64, Val, Shift) :
- Shift;
- Offset += LoadSize;
- Alignment = std::min(Alignment, LoadSize);
- }
-
- RegsToPass.push_back(std::make_pair(*Reg, Val));
- return;
- }
+SDValue
+MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
+ SDValue Chain, SDValue Arg, DebugLoc DL,
+ bool IsTailCall, SelectionDAG &DAG) const {
+ if (!IsTailCall) {
+ SDValue PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr,
+ DAG.getIntPtrConstant(Offset));
+ return DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo(), false,
+ false, 0);
}
- assert(MemCpySize && "MemCpySize must not be zero.");
-
- // Copy remainder of byval arg to it with memcpy.
- SDValue Src = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
- DAG.getConstant(Offset, PtrTy));
- SDValue Dst = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr,
- DAG.getIntPtrConstant(LocMemOffset));
- Chain = DAG.getMemcpy(Chain, dl, Dst, Src,
- DAG.getConstant(MemCpySize, PtrTy), Alignment,
- /*isVolatile=*/false, /*AlwaysInline=*/false,
- MachinePointerInfo(0), MachinePointerInfo(0));
- MemOpChains.push_back(Chain);
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ int FI = MFI->CreateFixedObject(Arg.getValueSizeInBits() / 8, Offset, false);
+ SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
+ return DAG.getStore(Chain, DL, Arg, FIN, MachinePointerInfo(),
+ /*isVolatile=*/ true, false, 0);
}
/// LowerCall - functions arguments are copied from virtual regs to
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
-/// TODO: isTailCall.
SDValue
MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
@@ -2610,56 +2739,49 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
CallingConv::ID CallConv = CLI.CallConv;
bool isVarArg = CLI.IsVarArg;
- // MIPs target does not yet support tail call optimization.
- isTailCall = false;
-
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
const TargetFrameLowering *TFL = MF.getTarget().getFrameLowering();
bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
- MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
+ MipsCC MipsCCInfo(CallConv, isVarArg, IsO32, CCInfo);
- if (CallConv == CallingConv::Fast)
- CCInfo.AnalyzeCallOperands(Outs, CC_Mips_FastCC);
- else if (IsO32)
- CCInfo.AnalyzeCallOperands(Outs, CC_MipsO32);
- else if (HasMips64)
- AnalyzeMips64CallOperands(CCInfo, Outs);
- else
- CCInfo.AnalyzeCallOperands(Outs, CC_Mips);
+ MipsCCInfo.analyzeCallOperands(Outs);
// Get a count of how many bytes are to be pushed on the stack.
unsigned NextStackOffset = CCInfo.getNextStackOffset();
- unsigned StackAlignment = TFL->getStackAlignment();
- NextStackOffset = RoundUpToAlignment(NextStackOffset, StackAlignment);
- // Update size of the maximum argument space.
- // For O32, a minimum of four words (16 bytes) of argument space is
- // allocated.
- if (IsO32 && (CallConv != CallingConv::Fast))
- NextStackOffset = std::max(NextStackOffset, (unsigned)16);
+ // Check if it's really possible to do a tail call.
+ if (isTailCall)
+ isTailCall =
+ IsEligibleForTailCallOptimization(MipsCCInfo, NextStackOffset,
+ *MF.getInfo<MipsFunctionInfo>());
+
+ if (isTailCall)
+ ++NumTailCalls;
// Chain is the output chain of the last Load/Store or CopyToReg node.
// ByValChain is the output chain of the last Memcpy node created for copying
// byval arguments to the stack.
+ unsigned StackAlignment = TFL->getStackAlignment();
+ NextStackOffset = RoundUpToAlignment(NextStackOffset, StackAlignment);
SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, true);
- Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal);
+
+ if (!isTailCall)
+ Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal);
SDValue StackPtr = DAG.getCopyFromReg(Chain, dl,
IsN64 ? Mips::SP_64 : Mips::SP,
getPointerTy());
- if (MipsFI->getMaxCallFrameSize() < NextStackOffset)
- MipsFI->setMaxCallFrameSize(NextStackOffset);
-
// With EABI is it possible to have 16 args on registers.
SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
+ MipsCC::byval_iterator ByValArg = MipsCCInfo.byval_begin();
// Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
@@ -2672,14 +2794,12 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (Flags.isByVal()) {
assert(Flags.getByValSize() &&
"ByVal args of size 0 should have been ignored by front-end.");
- if (IsO32)
- WriteByValArg(Chain, dl, RegsToPass, MemOpChains, StackPtr,
- MFI, DAG, Arg, VA, Flags, getPointerTy(),
- Subtarget->isLittle());
- else
- PassByValArg64(Chain, dl, RegsToPass, MemOpChains, StackPtr,
- MFI, DAG, Arg, VA, Flags, getPointerTy(),
- Subtarget->isLittle());
+ assert(ByValArg != MipsCCInfo.byval_end());
+ assert(!isTailCall &&
+ "Do not tail-call optimize if there is a byval argument.");
+ passByValArg(Chain, dl, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
+ MipsCCInfo, *ByValArg, Flags, Subtarget->isLittle());
+ ++ByValArg;
continue;
}
@@ -2729,10 +2849,8 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// emit ISD::STORE whichs stores the
// parameter value to a stack Location
- SDValue PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr,
- DAG.getIntPtrConstant(VA.getLocMemOffset()));
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
- MachinePointerInfo(), false, false, 0));
+ MemOpChains.push_back(passArgOnStack(StackPtr, VA.getLocMemOffset(),
+ Chain, Arg, dl, isTailCall, DAG));
}
// Transform all store nodes into one single node because all store
@@ -2861,6 +2979,9 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (InFlag.getNode())
Ops.push_back(InFlag);
+ if (isTailCall)
+ return DAG.getNode(MipsISD::TailCall, dl, MVT::Other, &Ops[0], Ops.size());
+
Chain = DAG.getNode(MipsISD::JmpLink, dl, NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
@@ -2904,70 +3025,6 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
//===----------------------------------------------------------------------===//
// Formal Arguments Calling Convention Implementation
//===----------------------------------------------------------------------===//
-static void ReadByValArg(MachineFunction &MF, SDValue Chain, DebugLoc dl,
- std::vector<SDValue> &OutChains,
- SelectionDAG &DAG, unsigned NumWords, SDValue FIN,
- const CCValAssign &VA, const ISD::ArgFlagsTy &Flags,
- const Argument *FuncArg) {
- unsigned LocMem = VA.getLocMemOffset();
- unsigned FirstWord = LocMem / 4;
-
- // copy register A0 - A3 to frame object
- for (unsigned i = 0; i < NumWords; ++i) {
- unsigned CurWord = FirstWord + i;
- if (CurWord >= O32IntRegsSize)
- break;
-
- unsigned SrcReg = O32IntRegs[CurWord];
- unsigned Reg = AddLiveIn(MF, SrcReg, &Mips::CPURegsRegClass);
- SDValue StorePtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIN,
- DAG.getConstant(i * 4, MVT::i32));
- SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(Reg, MVT::i32),
- StorePtr, MachinePointerInfo(FuncArg, i * 4),
- false, false, 0);
- OutChains.push_back(Store);
- }
-}
-
-// Create frame object on stack and copy registers used for byval passing to it.
-static unsigned
-CopyMips64ByValRegs(MachineFunction &MF, SDValue Chain, DebugLoc dl,
- std::vector<SDValue> &OutChains, SelectionDAG &DAG,
- const CCValAssign &VA, const ISD::ArgFlagsTy &Flags,
- MachineFrameInfo *MFI, bool IsRegLoc,
- SmallVectorImpl<SDValue> &InVals, MipsFunctionInfo *MipsFI,
- EVT PtrTy, const Argument *FuncArg) {
- const uint16_t *Reg = Mips64IntRegs + 8;
- int FOOffset; // Frame object offset from virtual frame pointer.
-
- if (IsRegLoc) {
- Reg = std::find(Mips64IntRegs, Mips64IntRegs + 8, VA.getLocReg());
- FOOffset = (Reg - Mips64IntRegs) * 8 - 8 * 8;
- }
- else
- FOOffset = VA.getLocMemOffset();
-
- // Create frame object.
- unsigned NumRegs = (Flags.getByValSize() + 7) / 8;
- unsigned LastFI = MFI->CreateFixedObject(NumRegs * 8, FOOffset, true);
- SDValue FIN = DAG.getFrameIndex(LastFI, PtrTy);
- InVals.push_back(FIN);
-
- // Copy arg registers.
- for (unsigned I = 0; (Reg != Mips64IntRegs + 8) && (I < NumRegs);
- ++Reg, ++I) {
- unsigned VReg = AddLiveIn(MF, *Reg, &Mips::CPU64RegsRegClass);
- SDValue StorePtr = DAG.getNode(ISD::ADD, dl, PtrTy, FIN,
- DAG.getConstant(I * 8, PtrTy));
- SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(VReg, MVT::i64),
- StorePtr, MachinePointerInfo(FuncArg, I * 8),
- false, false, 0);
- OutChains.push_back(Store);
- }
-
- return LastFI;
-}
-
/// LowerFormalArguments - transform physical registers into virtual registers
/// and generate load operations for arguments places on the stack.
SDValue
@@ -2991,20 +3048,21 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
+ MipsCC MipsCCInfo(CallConv, isVarArg, IsO32, CCInfo);
- if (CallConv == CallingConv::Fast)
- CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FastCC);
- else if (IsO32)
- CCInfo.AnalyzeFormalArguments(Ins, CC_MipsO32);
- else
- CCInfo.AnalyzeFormalArguments(Ins, CC_Mips);
+ MipsCCInfo.analyzeFormalArguments(Ins);
+ MipsFI->setFormalArgInfo(CCInfo.getNextStackOffset(),
+ MipsCCInfo.hasByValArg());
Function::const_arg_iterator FuncArg =
DAG.getMachineFunction().getFunction()->arg_begin();
- int LastFI = 0;// MipsFI->LastInArgFI is 0 at the entry of this function.
+ unsigned CurArgIdx = 0;
+ MipsCC::byval_iterator ByValArg = MipsCCInfo.byval_begin();
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++FuncArg) {
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
+ std::advance(FuncArg, Ins[i].OrigArgIndex - CurArgIdx);
+ CurArgIdx = Ins[i].OrigArgIndex;
EVT ValVT = VA.getValVT();
ISD::ArgFlagsTy Flags = Ins[i].Flags;
bool IsRegLoc = VA.isRegLoc();
@@ -3012,18 +3070,10 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
if (Flags.isByVal()) {
assert(Flags.getByValSize() &&
"ByVal args of size 0 should have been ignored by front-end.");
- if (IsO32) {
- unsigned NumWords = (Flags.getByValSize() + 3) / 4;
- LastFI = MFI->CreateFixedObject(NumWords * 4, VA.getLocMemOffset(),
- true);
- SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy());
- InVals.push_back(FIN);
- ReadByValArg(MF, Chain, dl, OutChains, DAG, NumWords, FIN, VA, Flags,
- &*FuncArg);
- } else // N32/64
- LastFI = CopyMips64ByValRegs(MF, Chain, dl, OutChains, DAG, VA, Flags,
- MFI, IsRegLoc, InVals, MipsFI,
- getPointerTy(), &*FuncArg);
+ assert(ByValArg != MipsCCInfo.byval_end());
+ copyByValRegs(Chain, dl, OutChains, DAG, Flags, InVals, &*FuncArg,
+ MipsCCInfo, *ByValArg);
+ ++ByValArg;
continue;
}
@@ -3085,13 +3135,13 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
assert(VA.isMemLoc());
// The stack pointer offset is relative to the caller stack frame.
- LastFI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
+ int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
VA.getLocMemOffset(), true);
// Create load nodes to retrieve arguments from the stack
- SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy());
+ SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
InVals.push_back(DAG.getLoad(ValVT, dl, Chain, FIN,
- MachinePointerInfo::getFixedStack(LastFI),
+ MachinePointerInfo::getFixedStack(FI),
false, false, false, 0));
}
}
@@ -3102,55 +3152,16 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
unsigned Reg = MipsFI->getSRetReturnReg();
if (!Reg) {
- Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32));
+ Reg = MF.getRegInfo().
+ createVirtualRegister(getRegClassFor(IsN64 ? MVT::i64 : MVT::i32));
MipsFI->setSRetReturnReg(Reg);
}
SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
}
- if (isVarArg) {
- unsigned NumOfRegs = IsO32 ? 4 : 8;
- const uint16_t *ArgRegs = IsO32 ? O32IntRegs : Mips64IntRegs;
- unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumOfRegs);
- int FirstRegSlotOffset = IsO32 ? 0 : -64 ; // offset of $a0's slot.
- const TargetRegisterClass *RC = IsO32 ?
- (const TargetRegisterClass*)&Mips::CPURegsRegClass :
- (const TargetRegisterClass*)&Mips::CPU64RegsRegClass;
- unsigned RegSize = RC->getSize();
- int RegSlotOffset = FirstRegSlotOffset + Idx * RegSize;
-
- // Offset of the first variable argument from stack pointer.
- int FirstVaArgOffset;
-
- if (IsO32 || (Idx == NumOfRegs)) {
- FirstVaArgOffset =
- (CCInfo.getNextStackOffset() + RegSize - 1) / RegSize * RegSize;
- } else
- FirstVaArgOffset = RegSlotOffset;
-
- // Record the frame index of the first variable argument
- // which is a value necessary to VASTART.
- LastFI = MFI->CreateFixedObject(RegSize, FirstVaArgOffset, true);
- MipsFI->setVarArgsFrameIndex(LastFI);
-
- // Copy the integer registers that have not been used for argument passing
- // to the argument register save area. For O32, the save area is allocated
- // in the caller's stack frame, while for N32/64, it is allocated in the
- // callee's stack frame.
- for (int StackOffset = RegSlotOffset;
- Idx < NumOfRegs; ++Idx, StackOffset += RegSize) {
- unsigned Reg = AddLiveIn(DAG.getMachineFunction(), ArgRegs[Idx], RC);
- SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
- MVT::getIntegerVT(RegSize * 8));
- LastFI = MFI->CreateFixedObject(RegSize, StackOffset, true);
- SDValue PtrOff = DAG.getFrameIndex(LastFI, getPointerTy());
- OutChains.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff,
- MachinePointerInfo(), false, false, 0));
- }
- }
-
- MipsFI->setLastInArgFI(LastFI);
+ if (isVarArg)
+ writeVarArgRegs(OutChains, MipsCCInfo, Chain, dl, DAG);
// All stores are grouped in one node to allow the matching between
// the size of Ins and InVals. This only happens when on varg functions
@@ -3167,6 +3178,17 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// Return Value Calling Convention Implementation
//===----------------------------------------------------------------------===//
+bool
+MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
+ MachineFunction &MF, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const {
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
+ RVLocs, Context);
+ return CCInfo.CheckReturn(Outs, RetCC_Mips);
+}
+
SDValue
MipsTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
@@ -3219,9 +3241,11 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
if (!Reg)
llvm_unreachable("sret virtual register not created in the entry block");
SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
+ unsigned V0 = IsN64 ? Mips::V0_64 : Mips::V0;
- Chain = DAG.getCopyToReg(Chain, dl, Mips::V0, Val, Flag);
+ Chain = DAG.getCopyToReg(Chain, dl, V0, Val, Flag);
Flag = Chain.getValue(1);
+ MF.getRegInfo().addLiveOut(V0);
}
// Return on Mips is always a "jr $ra"
@@ -3325,8 +3349,11 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
case 'y': // Same as 'r'. Exists for compatibility.
case 'r':
- if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
+ if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
+ if (Subtarget->inMips16Mode())
+ return std::make_pair(0U, &Mips::CPU16RegsRegClass);
return std::make_pair(0U, &Mips::CPURegsRegClass);
+ }
if (VT == MVT::i64 && !HasMips64)
return std::make_pair(0U, &Mips::CPURegsRegClass);
if (VT == MVT::i64 && HasMips64)
@@ -3485,3 +3512,316 @@ unsigned MipsTargetLowering::getJumpTableEncoding() const {
return TargetLowering::getJumpTableEncoding();
}
+
+MipsTargetLowering::MipsCC::MipsCC(CallingConv::ID CallConv, bool IsVarArg,
+ bool IsO32, CCState &Info) : CCInfo(Info) {
+ UseRegsForByval = true;
+
+ if (IsO32) {
+ RegSize = 4;
+ NumIntArgRegs = array_lengthof(O32IntRegs);
+ ReservedArgArea = 16;
+ IntArgRegs = ShadowRegs = O32IntRegs;
+ FixedFn = VarFn = CC_MipsO32;
+ } else {
+ RegSize = 8;
+ NumIntArgRegs = array_lengthof(Mips64IntRegs);
+ ReservedArgArea = 0;
+ IntArgRegs = Mips64IntRegs;
+ ShadowRegs = Mips64DPRegs;
+ FixedFn = CC_MipsN;
+ VarFn = CC_MipsN_VarArg;
+ }
+
+ if (CallConv == CallingConv::Fast) {
+ assert(!IsVarArg);
+ UseRegsForByval = false;
+ ReservedArgArea = 0;
+ FixedFn = VarFn = CC_Mips_FastCC;
+ }
+
+ // Pre-allocate reserved argument area.
+ CCInfo.AllocateStack(ReservedArgArea, 1);
+}
+
+void MipsTargetLowering::MipsCC::
+analyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Args) {
+ unsigned NumOpnds = Args.size();
+
+ for (unsigned I = 0; I != NumOpnds; ++I) {
+ MVT ArgVT = Args[I].VT;
+ ISD::ArgFlagsTy ArgFlags = Args[I].Flags;
+ bool R;
+
+ if (ArgFlags.isByVal()) {
+ handleByValArg(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags);
+ continue;
+ }
+
+ if (Args[I].IsFixed)
+ R = FixedFn(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
+ else
+ R = VarFn(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
+
+ if (R) {
+#ifndef NDEBUG
+ dbgs() << "Call operand #" << I << " has unhandled type "
+ << EVT(ArgVT).getEVTString();
+#endif
+ llvm_unreachable(0);
+ }
+ }
+}
+
+void MipsTargetLowering::MipsCC::
+analyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Args) {
+ unsigned NumArgs = Args.size();
+
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ MVT ArgVT = Args[I].VT;
+ ISD::ArgFlagsTy ArgFlags = Args[I].Flags;
+
+ if (ArgFlags.isByVal()) {
+ handleByValArg(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags);
+ continue;
+ }
+
+ if (!FixedFn(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo))
+ continue;
+
+#ifndef NDEBUG
+ dbgs() << "Formal Arg #" << I << " has unhandled type "
+ << EVT(ArgVT).getEVTString();
+#endif
+ llvm_unreachable(0);
+ }
+}
+
+void
+MipsTargetLowering::MipsCC::handleByValArg(unsigned ValNo, MVT ValVT,
+ MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags) {
+ assert(ArgFlags.getByValSize() && "Byval argument's size shouldn't be 0.");
+
+ struct ByValArgInfo ByVal;
+ unsigned ByValSize = RoundUpToAlignment(ArgFlags.getByValSize(), RegSize);
+ unsigned Align = std::min(std::max(ArgFlags.getByValAlign(), RegSize),
+ RegSize * 2);
+
+ if (UseRegsForByval)
+ allocateRegs(ByVal, ByValSize, Align);
+
+ // Allocate space on caller's stack.
+ ByVal.Address = CCInfo.AllocateStack(ByValSize - RegSize * ByVal.NumRegs,
+ Align);
+ CCInfo.addLoc(CCValAssign::getMem(ValNo, ValVT, ByVal.Address, LocVT,
+ LocInfo));
+ ByValArgs.push_back(ByVal);
+}
+
+void MipsTargetLowering::MipsCC::allocateRegs(ByValArgInfo &ByVal,
+ unsigned ByValSize,
+ unsigned Align) {
+ assert(!(ByValSize % RegSize) && !(Align % RegSize) &&
+ "Byval argument's size and alignment should be a multiple of"
+ "RegSize.");
+
+ ByVal.FirstIdx = CCInfo.getFirstUnallocated(IntArgRegs, NumIntArgRegs);
+
+ // If Align > RegSize, the first arg register must be even.
+ if ((Align > RegSize) && (ByVal.FirstIdx % 2)) {
+ CCInfo.AllocateReg(IntArgRegs[ByVal.FirstIdx], ShadowRegs[ByVal.FirstIdx]);
+ ++ByVal.FirstIdx;
+ }
+
+ // Mark the registers allocated.
+ for (unsigned I = ByVal.FirstIdx; ByValSize && (I < NumIntArgRegs);
+ ByValSize -= RegSize, ++I, ++ByVal.NumRegs)
+ CCInfo.AllocateReg(IntArgRegs[I], ShadowRegs[I]);
+}
+
+void MipsTargetLowering::
+copyByValRegs(SDValue Chain, DebugLoc DL, std::vector<SDValue> &OutChains,
+ SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
+ SmallVectorImpl<SDValue> &InVals, const Argument *FuncArg,
+ const MipsCC &CC, const ByValArgInfo &ByVal) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ unsigned RegAreaSize = ByVal.NumRegs * CC.regSize();
+ unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize);
+ int FrameObjOffset;
+
+ if (RegAreaSize)
+ FrameObjOffset = (int)CC.reservedArgArea() -
+ (int)((CC.numIntArgRegs() - ByVal.FirstIdx) * CC.regSize());
+ else
+ FrameObjOffset = ByVal.Address;
+
+ // Create frame object.
+ EVT PtrTy = getPointerTy();
+ int FI = MFI->CreateFixedObject(FrameObjSize, FrameObjOffset, true);
+ SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
+ InVals.push_back(FIN);
+
+ if (!ByVal.NumRegs)
+ return;
+
+ // Copy arg registers.
+ EVT RegTy = MVT::getIntegerVT(CC.regSize() * 8);
+ const TargetRegisterClass *RC = getRegClassFor(RegTy);
+
+ for (unsigned I = 0; I < ByVal.NumRegs; ++I) {
+ unsigned ArgReg = CC.intArgRegs()[ByVal.FirstIdx + I];
+ unsigned VReg = AddLiveIn(MF, ArgReg, RC);
+ unsigned Offset = I * CC.regSize();
+ SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrTy, FIN,
+ DAG.getConstant(Offset, PtrTy));
+ SDValue Store = DAG.getStore(Chain, DL, DAG.getRegister(VReg, RegTy),
+ StorePtr, MachinePointerInfo(FuncArg, Offset),
+ false, false, 0);
+ OutChains.push_back(Store);
+ }
+}
+
+// Copy byVal arg to registers and stack.
+void MipsTargetLowering::
+passByValArg(SDValue Chain, DebugLoc DL,
+ SmallVector<std::pair<unsigned, SDValue>, 16> &RegsToPass,
+ SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
+ MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
+ const MipsCC &CC, const ByValArgInfo &ByVal,
+ const ISD::ArgFlagsTy &Flags, bool isLittle) const {
+ unsigned ByValSize = Flags.getByValSize();
+ unsigned Offset = 0; // Offset in # of bytes from the beginning of struct.
+ unsigned RegSize = CC.regSize();
+ unsigned Alignment = std::min(Flags.getByValAlign(), RegSize);
+ EVT PtrTy = getPointerTy(), RegTy = MVT::getIntegerVT(RegSize * 8);
+
+ if (ByVal.NumRegs) {
+ const uint16_t *ArgRegs = CC.intArgRegs();
+ bool LeftoverBytes = (ByVal.NumRegs * RegSize > ByValSize);
+ unsigned I = 0;
+
+ // Copy words to registers.
+ for (; I < ByVal.NumRegs - LeftoverBytes; ++I, Offset += RegSize) {
+ SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
+ DAG.getConstant(Offset, PtrTy));
+ SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr,
+ MachinePointerInfo(), false, false, false,
+ Alignment);
+ MemOpChains.push_back(LoadVal.getValue(1));
+ unsigned ArgReg = ArgRegs[ByVal.FirstIdx + I];
+ RegsToPass.push_back(std::make_pair(ArgReg, LoadVal));
+ }
+
+ // Return if the struct has been fully copied.
+ if (ByValSize == Offset)
+ return;
+
+ // Copy the remainder of the byval argument with sub-word loads and shifts.
+ if (LeftoverBytes) {
+ assert((ByValSize > Offset) && (ByValSize < Offset + RegSize) &&
+ "Size of the remainder should be smaller than RegSize.");
+ SDValue Val;
+
+ for (unsigned LoadSize = RegSize / 2, TotalSizeLoaded = 0;
+ Offset < ByValSize; LoadSize /= 2) {
+ unsigned RemSize = ByValSize - Offset;
+
+ if (RemSize < LoadSize)
+ continue;
+
+ // Load subword.
+ SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
+ DAG.getConstant(Offset, PtrTy));
+ SDValue LoadVal =
+ DAG.getExtLoad(ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr,
+ MachinePointerInfo(), MVT::getIntegerVT(LoadSize * 8),
+ false, false, Alignment);
+ MemOpChains.push_back(LoadVal.getValue(1));
+
+ // Shift the loaded value.
+ unsigned Shamt;
+
+ if (isLittle)
+ Shamt = TotalSizeLoaded;
+ else
+ Shamt = (RegSize - (TotalSizeLoaded + LoadSize)) * 8;
+
+ SDValue Shift = DAG.getNode(ISD::SHL, DL, RegTy, LoadVal,
+ DAG.getConstant(Shamt, MVT::i32));
+
+ if (Val.getNode())
+ Val = DAG.getNode(ISD::OR, DL, RegTy, Val, Shift);
+ else
+ Val = Shift;
+
+ Offset += LoadSize;
+ TotalSizeLoaded += LoadSize;
+ Alignment = std::min(Alignment, LoadSize);
+ }
+
+ unsigned ArgReg = ArgRegs[ByVal.FirstIdx + I];
+ RegsToPass.push_back(std::make_pair(ArgReg, Val));
+ return;
+ }
+ }
+
+ // Copy remainder of byval arg to it with memcpy.
+ unsigned MemCpySize = ByValSize - Offset;
+ SDValue Src = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
+ DAG.getConstant(Offset, PtrTy));
+ SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr,
+ DAG.getIntPtrConstant(ByVal.Address));
+ Chain = DAG.getMemcpy(Chain, DL, Dst, Src,
+ DAG.getConstant(MemCpySize, PtrTy), Alignment,
+ /*isVolatile=*/false, /*AlwaysInline=*/false,
+ MachinePointerInfo(0), MachinePointerInfo(0));
+ MemOpChains.push_back(Chain);
+}
+
+void
+MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
+ const MipsCC &CC, SDValue Chain,
+ DebugLoc DL, SelectionDAG &DAG) const {
+ unsigned NumRegs = CC.numIntArgRegs();
+ const uint16_t *ArgRegs = CC.intArgRegs();
+ const CCState &CCInfo = CC.getCCInfo();
+ unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumRegs);
+ unsigned RegSize = CC.regSize();
+ EVT RegTy = MVT::getIntegerVT(RegSize * 8);
+ const TargetRegisterClass *RC = getRegClassFor(RegTy);
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
+
+ // Offset of the first variable argument from stack pointer.
+ int VaArgOffset;
+
+ if (NumRegs == Idx)
+ VaArgOffset = RoundUpToAlignment(CCInfo.getNextStackOffset(), RegSize);
+ else
+ VaArgOffset =
+ (int)CC.reservedArgArea() - (int)(RegSize * (NumRegs - Idx));
+
+ // Record the frame index of the first variable argument
+ // which is a value necessary to VASTART.
+ int FI = MFI->CreateFixedObject(RegSize, VaArgOffset, true);
+ MipsFI->setVarArgsFrameIndex(FI);
+
+ // Copy the integer registers that have not been used for argument passing
+ // to the argument register save area. For O32, the save area is allocated
+ // in the caller's stack frame, while for N32/64, it is allocated in the
+ // callee's stack frame.
+ for (unsigned I = Idx; I < NumRegs; ++I, VaArgOffset += RegSize) {
+ unsigned Reg = AddLiveIn(MF, ArgRegs[I], RC);
+ SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegTy);
+ FI = MFI->CreateFixedObject(RegSize, VaArgOffset, true);
+ SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy());
+ SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
+ MachinePointerInfo(), false, false, 0);
+ cast<StoreSDNode>(Store.getNode())->getMemOperand()->setValue(0);
+ OutChains.push_back(Store);
+ }
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelLowering.h b/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
index 95ea8fa..43f97e8 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
+++ b/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
@@ -17,6 +17,7 @@
#include "Mips.h"
#include "MipsSubtarget.h"
+#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetLowering.h"
@@ -29,6 +30,9 @@ namespace llvm {
// Jump and link (call)
JmpLink,
+ // Tail call
+ TailCall,
+
// Get the Higher 16 bits from a 32-bit immediate
// No relation with Mips Hi register
Hi,
@@ -81,6 +85,47 @@ namespace llvm {
Ext,
Ins,
+ // EXTR.W instrinsic nodes.
+ EXTP,
+ EXTPDP,
+ EXTR_S_H,
+ EXTR_W,
+ EXTR_R_W,
+ EXTR_RS_W,
+ SHILO,
+ MTHLIP,
+
+ // DPA.W intrinsic nodes.
+ MULSAQ_S_W_PH,
+ MAQ_S_W_PHL,
+ MAQ_S_W_PHR,
+ MAQ_SA_W_PHL,
+ MAQ_SA_W_PHR,
+ DPAU_H_QBL,
+ DPAU_H_QBR,
+ DPSU_H_QBL,
+ DPSU_H_QBR,
+ DPAQ_S_W_PH,
+ DPSQ_S_W_PH,
+ DPAQ_SA_L_W,
+ DPSQ_SA_L_W,
+ DPA_W_PH,
+ DPS_W_PH,
+ DPAQX_S_W_PH,
+ DPAQX_SA_W_PH,
+ DPAX_W_PH,
+ DPSX_W_PH,
+ DPSQX_S_W_PH,
+ DPSQX_SA_W_PH,
+ MULSA_W_PH,
+
+ MULT,
+ MULTU,
+ MADD_DSP,
+ MADDU_DSP,
+ MSUB_DSP,
+ MSUBU_DSP,
+
// Load/Store Left/Right nodes.
LWL = ISD::FIRST_TARGET_MEMORY_OPCODE,
LWR,
@@ -96,6 +141,7 @@ namespace llvm {
//===--------------------------------------------------------------------===//
// TargetLowering Implementation
//===--------------------------------------------------------------------===//
+ class MipsFunctionInfo;
class MipsTargetLowering : public TargetLowering {
public:
@@ -105,9 +151,19 @@ namespace llvm {
virtual bool allowsUnalignedMemoryAccesses (EVT VT) const;
+ virtual void LowerOperationWrapper(SDNode *N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const;
+
/// LowerOperation - Provide custom lowering hooks for some operations.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+ /// ReplaceNodeResults - Replace the results of node with an illegal result
+ /// type with new values built out of custom code.
+ ///
+ virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
+ SelectionDAG &DAG) const;
+
/// getTargetNodeName - This method returns the name of a target specific
// DAG node.
virtual const char *getTargetNodeName(unsigned Opcode) const;
@@ -117,6 +173,69 @@ namespace llvm {
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
private:
+
+ /// ByValArgInfo - Byval argument information.
+ struct ByValArgInfo {
+ unsigned FirstIdx; // Index of the first register used.
+ unsigned NumRegs; // Number of registers used for this argument.
+ unsigned Address; // Offset of the stack area used to pass this argument.
+
+ ByValArgInfo() : FirstIdx(0), NumRegs(0), Address(0) {}
+ };
+
+ /// MipsCC - This class provides methods used to analyze formal and call
+ /// arguments and inquire about calling convention information.
+ class MipsCC {
+ public:
+ MipsCC(CallingConv::ID CallConv, bool IsVarArg, bool IsO32,
+ CCState &Info);
+
+ void analyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs);
+ void analyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins);
+ void handleByValArg(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags);
+
+ const CCState &getCCInfo() const { return CCInfo; }
+
+ /// hasByValArg - Returns true if function has byval arguments.
+ bool hasByValArg() const { return !ByValArgs.empty(); }
+
+ /// useRegsForByval - Returns true if the calling convention allows the
+ /// use of registers to pass byval arguments.
+ bool useRegsForByval() const { return UseRegsForByval; }
+
+ /// regSize - Size (in number of bits) of integer registers.
+ unsigned regSize() const { return RegSize; }
+
+ /// numIntArgRegs - Number of integer registers available for calls.
+ unsigned numIntArgRegs() const { return NumIntArgRegs; }
+
+ /// reservedArgArea - The size of the area the caller reserves for
+ /// register arguments. This is 16-byte if ABI is O32.
+ unsigned reservedArgArea() const { return ReservedArgArea; }
+
+ /// intArgRegs - Pointer to array of integer registers.
+ const uint16_t *intArgRegs() const { return IntArgRegs; }
+
+ typedef SmallVector<ByValArgInfo, 2>::const_iterator byval_iterator;
+ byval_iterator byval_begin() const { return ByValArgs.begin(); }
+ byval_iterator byval_end() const { return ByValArgs.end(); }
+
+ private:
+ void allocateRegs(ByValArgInfo &ByVal, unsigned ByValSize,
+ unsigned Align);
+
+ CCState &CCInfo;
+ bool UseRegsForByval;
+ unsigned RegSize;
+ unsigned NumIntArgRegs;
+ unsigned ReservedArgArea;
+ const uint16_t *IntArgRegs, *ShadowRegs;
+ SmallVector<ByValArgInfo, 2> ByValArgs;
+ llvm::CCAssignFn *FixedFn, *VarFn;
+ };
+
// Subtarget Info
const MipsSubtarget *Subtarget;
@@ -151,6 +270,39 @@ namespace llvm {
bool IsSRA) const;
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerADD(SDValue Op, SelectionDAG &DAG) const;
+
+ /// IsEligibleForTailCallOptimization - Check whether the call is eligible
+ /// for tail call optimization.
+ bool IsEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
+ unsigned NextStackOffset,
+ const MipsFunctionInfo& FI) const;
+
+ /// copyByValArg - Copy argument registers which were used to pass a byval
+ /// argument to the stack. Create a stack frame object for the byval
+ /// argument.
+ void copyByValRegs(SDValue Chain, DebugLoc DL,
+ std::vector<SDValue> &OutChains, SelectionDAG &DAG,
+ const ISD::ArgFlagsTy &Flags,
+ SmallVectorImpl<SDValue> &InVals,
+ const Argument *FuncArg,
+ const MipsCC &CC, const ByValArgInfo &ByVal) const;
+
+ /// passByValArg - Pass a byval argument in registers or on stack.
+ void passByValArg(SDValue Chain, DebugLoc DL,
+ SmallVector<std::pair<unsigned, SDValue>, 16> &RegsToPass,
+ SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
+ MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
+ const MipsCC &CC, const ByValArgInfo &ByVal,
+ const ISD::ArgFlagsTy &Flags, bool isLittle) const;
+
+ /// writeVarArgRegs - Write variable function arguments passed in registers
+ /// to the stack. Also create a stack frame object for the first variable
+ /// argument.
+ void writeVarArgRegs(std::vector<SDValue> &OutChains, const MipsCC &CC,
+ SDValue Chain, DebugLoc DL, SelectionDAG &DAG) const;
virtual SDValue
LowerFormalArguments(SDValue Chain,
@@ -159,10 +311,20 @@ namespace llvm {
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
+ SDValue passArgOnStack(SDValue StackPtr, unsigned Offset, SDValue Chain,
+ SDValue Arg, DebugLoc DL, bool IsTailCall,
+ SelectionDAG &DAG) const;
+
virtual SDValue
LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
+ virtual bool
+ CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const;
+
virtual SDValue
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
@@ -209,6 +371,8 @@ namespace llvm {
virtual unsigned getJumpTableEncoding() const;
+ MachineBasicBlock *EmitBPOSGE32(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
unsigned Size, unsigned BinOpcode, bool Nand = false) const;
MachineBasicBlock *EmitAtomicBinaryPartword(MachineInstr *MI,
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td b/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td
index 3e78c45..33ee020 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td
@@ -90,20 +90,20 @@ def fpimm0neg : PatLeaf<(fpimm), [{
let DecoderMethod = "DecodeFMem" in {
class FPLoad<bits<6> op, string opstr, RegisterClass RC, Operand MemOpnd>:
FMem<op, (outs RC:$ft), (ins MemOpnd:$addr),
- !strconcat(opstr, "\t$ft, $addr"), [(set RC:$ft, (load_a addr:$addr))],
+ !strconcat(opstr, "\t$ft, $addr"), [(set RC:$ft, (load addr:$addr))],
IILoad>;
// FP store.
class FPStore<bits<6> op, string opstr, RegisterClass RC, Operand MemOpnd>:
FMem<op, (outs), (ins RC:$ft, MemOpnd:$addr),
- !strconcat(opstr, "\t$ft, $addr"), [(store_a RC:$ft, addr:$addr)],
+ !strconcat(opstr, "\t$ft, $addr"), [(store RC:$ft, addr:$addr)],
IIStore>;
}
// FP indexed load.
class FPIdxLoad<bits<6> funct, string opstr, RegisterClass DRC,
RegisterClass PRC, SDPatternOperator FOp = null_frag>:
FFMemIdx<funct, (outs DRC:$fd), (ins PRC:$base, PRC:$index),
- !strconcat(opstr, "\t$fd, $index($base)"),
+ !strconcat(opstr, "\t$fd, ${index}(${base})"),
[(set DRC:$fd, (FOp (add PRC:$base, PRC:$index)))]> {
let fs = 0;
}
@@ -112,7 +112,7 @@ class FPIdxLoad<bits<6> funct, string opstr, RegisterClass DRC,
class FPIdxStore<bits<6> funct, string opstr, RegisterClass DRC,
RegisterClass PRC, SDPatternOperator FOp= null_frag>:
FFMemIdx<funct, (outs), (ins DRC:$fs, PRC:$base, PRC:$index),
- !strconcat(opstr, "\t$fs, $index($base)"),
+ !strconcat(opstr, "\t$fs, ${index}(${base})"),
[(FOp DRC:$fs, (add PRC:$base, PRC:$index))]> {
let fd = 0;
}
@@ -182,20 +182,21 @@ defm CEIL_W : FFR1_W_M<0xe, "ceil">;
defm CEIL_L : FFR1_L_M<0xa, "ceil">;
defm FLOOR_W : FFR1_W_M<0xf, "floor">;
defm FLOOR_L : FFR1_L_M<0xb, "floor">;
-defm CVT_W : FFR1_W_M<0x24, "cvt">;
+defm CVT_W : FFR1_W_M<0x24, "cvt">, NeverHasSideEffects;
//defm CVT_L : FFR1_L_M<0x25, "cvt">;
-def CVT_S_W : FFR1<0x20, 20, "cvt", "s.w", FGR32, FGR32>;
-def CVT_L_S : FFR1<0x25, 16, "cvt", "l.s", FGR64, FGR32>;
-def CVT_L_D64: FFR1<0x25, 17, "cvt", "l.d", FGR64, FGR64>;
+def CVT_S_W : FFR1<0x20, 20, "cvt", "s.w", FGR32, FGR32>, NeverHasSideEffects;
+def CVT_L_S : FFR1<0x25, 16, "cvt", "l.s", FGR64, FGR32>, NeverHasSideEffects;
+def CVT_L_D64: FFR1<0x25, 17, "cvt", "l.d", FGR64, FGR64>, NeverHasSideEffects;
-let Predicates = [NotFP64bit, HasStandardEncoding] in {
+let Predicates = [NotFP64bit, HasStandardEncoding], neverHasSideEffects = 1 in {
def CVT_S_D32 : FFR1<0x20, 17, "cvt", "s.d", FGR32, AFGR64>;
def CVT_D32_W : FFR1<0x21, 20, "cvt", "d.w", AFGR64, FGR32>;
def CVT_D32_S : FFR1<0x21, 16, "cvt", "d.s", AFGR64, FGR32>;
}
-let Predicates = [IsFP64bit, HasStandardEncoding], DecoderNamespace = "Mips64" in {
+let Predicates = [IsFP64bit, HasStandardEncoding], DecoderNamespace = "Mips64",
+ neverHasSideEffects = 1 in {
def CVT_S_D64 : FFR1<0x20, 17, "cvt", "s.d", FGR32, FGR64>;
def CVT_S_L : FFR1<0x20, 21, "cvt", "s.l", FGR32, FGR64>;
def CVT_D64_W : FFR1<0x21, 20, "cvt", "d.w", FGR64, FGR32>;
@@ -282,26 +283,26 @@ let Predicates = [NotN64, NotMips64, HasStandardEncoding] in {
// Indexed loads and stores.
let Predicates = [HasMips32r2Or64, HasStandardEncoding] in {
- def LWXC1 : FPIdxLoad<0x0, "lwxc1", FGR32, CPURegs, load_a>;
- def SWXC1 : FPIdxStore<0x8, "swxc1", FGR32, CPURegs, store_a>;
+ def LWXC1 : FPIdxLoad<0x0, "lwxc1", FGR32, CPURegs, load>;
+ def SWXC1 : FPIdxStore<0x8, "swxc1", FGR32, CPURegs, store>;
}
let Predicates = [HasMips32r2, NotMips64, HasStandardEncoding] in {
- def LDXC1 : FPIdxLoad<0x1, "ldxc1", AFGR64, CPURegs, load_a>;
- def SDXC1 : FPIdxStore<0x9, "sdxc1", AFGR64, CPURegs, store_a>;
+ def LDXC1 : FPIdxLoad<0x1, "ldxc1", AFGR64, CPURegs, load>;
+ def SDXC1 : FPIdxStore<0x9, "sdxc1", AFGR64, CPURegs, store>;
}
let Predicates = [HasMips64, NotN64, HasStandardEncoding], DecoderNamespace="Mips64" in {
- def LDXC164 : FPIdxLoad<0x1, "ldxc1", FGR64, CPURegs, load_a>;
- def SDXC164 : FPIdxStore<0x9, "sdxc1", FGR64, CPURegs, store_a>;
+ def LDXC164 : FPIdxLoad<0x1, "ldxc1", FGR64, CPURegs, load>;
+ def SDXC164 : FPIdxStore<0x9, "sdxc1", FGR64, CPURegs, store>;
}
// n64
let Predicates = [IsN64, HasStandardEncoding], isCodeGenOnly=1 in {
- def LWXC1_P8 : FPIdxLoad<0x0, "lwxc1", FGR32, CPU64Regs, load_a>;
- def LDXC164_P8 : FPIdxLoad<0x1, "ldxc1", FGR64, CPU64Regs, load_a>;
- def SWXC1_P8 : FPIdxStore<0x8, "swxc1", FGR32, CPU64Regs, store_a>;
- def SDXC164_P8 : FPIdxStore<0x9, "sdxc1", FGR64, CPU64Regs, store_a>;
+ def LWXC1_P8 : FPIdxLoad<0x0, "lwxc1", FGR32, CPU64Regs, load>;
+ def LDXC164_P8 : FPIdxLoad<0x1, "ldxc1", FGR64, CPU64Regs, load>;
+ def SWXC1_P8 : FPIdxStore<0x8, "swxc1", FGR32, CPU64Regs, store>;
+ def SDXC164_P8 : FPIdxStore<0x9, "sdxc1", FGR64, CPU64Regs, store>;
}
// Load/store doubleword indexed unaligned.
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td b/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td
index 8feb853..1ecbdc2 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td
@@ -92,6 +92,14 @@ class PseudoSE<dag outs, dag ins, string asmstr, list<dag> pattern>:
let Predicates = [HasStandardEncoding];
}
+// Pseudo-instructions for alternate assembly syntax (never used by codegen).
+// These are aliases that require C++ handling to convert to the target
+// instruction, while InstAliases can be handled directly by tblgen.
+class MipsAsmPseudoInst<dag outs, dag ins, string asmstr>:
+ MipsInst<outs, ins, asmstr, [], IIPseudo, Pseudo> {
+ let isPseudo = 1;
+ let Pattern = [];
+}
//===----------------------------------------------------------------------===//
// Format R instruction class in Mips : <|opcode|rs|rt|rd|shamt|funct|>
//===----------------------------------------------------------------------===//
@@ -163,6 +171,27 @@ class FJ<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
let Inst{25-0} = addr;
}
+ //===----------------------------------------------------------------------===//
+// MFC instruction class in Mips : <|op|mf|rt|rd|0000000|sel|>
+//===----------------------------------------------------------------------===//
+class MFC3OP<bits<6> op, bits<5> _mfmt, dag outs, dag ins, string asmstr>:
+ InstSE<outs, ins, asmstr, [], NoItinerary, FrmFR>
+{
+ bits<5> mfmt;
+ bits<5> rt;
+ bits<5> rd;
+ bits<3> sel;
+
+ let Opcode = op;
+ let mfmt = _mfmt;
+
+ let Inst{25-21} = mfmt;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-3} = 0;
+ let Inst{2-0} = sel;
+}
+
//===----------------------------------------------------------------------===//
//
// FLOATING POINT INSTRUCTION FORMATS
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp
index 50e3eb5..ca80d43 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp
@@ -95,6 +95,7 @@ bool MipsInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const
{
+
MachineBasicBlock::reverse_iterator I = MBB.rbegin(), REnd = MBB.rend();
// Skip all the debug instructions.
@@ -177,9 +178,14 @@ void MipsInstrInfo::BuildCondBr(MachineBasicBlock &MBB,
const MCInstrDesc &MCID = get(Opc);
MachineInstrBuilder MIB = BuildMI(&MBB, DL, MCID);
- for (unsigned i = 1; i < Cond.size(); ++i)
- MIB.addReg(Cond[i].getReg());
-
+ for (unsigned i = 1; i < Cond.size(); ++i) {
+ if (Cond[i].isReg())
+ MIB.addReg(Cond[i].getReg());
+ else if (Cond[i].isImm())
+ MIB.addImm(Cond[i].getImm());
+ else
+ assert(true && "Cannot copy operand");
+ }
MIB.addMBB(TBB);
}
@@ -262,46 +268,3 @@ unsigned MipsInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
}
}
}
-
-unsigned
-llvm::Mips::loadImmediate(int64_t Imm, bool IsN64, const TargetInstrInfo &TII,
- MachineBasicBlock& MBB,
- MachineBasicBlock::iterator II, DebugLoc DL,
- bool LastInstrIsADDiu,
- MipsAnalyzeImmediate::Inst *LastInst) {
- MipsAnalyzeImmediate AnalyzeImm;
- unsigned Size = IsN64 ? 64 : 32;
- unsigned LUi = IsN64 ? Mips::LUi64 : Mips::LUi;
- unsigned ZEROReg = IsN64 ? Mips::ZERO_64 : Mips::ZERO;
- unsigned ATReg = IsN64 ? Mips::AT_64 : Mips::AT;
-
- const MipsAnalyzeImmediate::InstSeq &Seq =
- AnalyzeImm.Analyze(Imm, Size, LastInstrIsADDiu);
- MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin();
-
- if (LastInst && (Seq.size() == 1)) {
- *LastInst = *Inst;
- return 0;
- }
-
- // The first instruction can be a LUi, which is different from other
- // instructions (ADDiu, ORI and SLL) in that it does not have a register
- // operand.
- if (Inst->Opc == LUi)
- BuildMI(MBB, II, DL, TII.get(LUi), ATReg)
- .addImm(SignExtend64<16>(Inst->ImmOpnd));
- else
- BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ZEROReg)
- .addImm(SignExtend64<16>(Inst->ImmOpnd));
-
- // Build the remaining instructions in Seq. Skip the last instruction if
- // LastInst is not 0.
- for (++Inst; Inst != Seq.end() - !!LastInst; ++Inst)
- BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ATReg)
- .addImm(SignExtend64<16>(Inst->ImmOpnd));
-
- if (LastInst)
- *LastInst = *Inst;
-
- return Seq.size() - !!LastInst;
-}
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h
index 7d56259..aca2bc7 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h
@@ -88,18 +88,6 @@ private:
const SmallVectorImpl<MachineOperand>& Cond) const;
};
-namespace Mips {
- /// Emit a series of instructions to load an immediate. All instructions
- /// except for the last one are emitted. The function returns the number of
- /// MachineInstrs generated. The opcode-immediate pair of the last
- /// instruction is returned in LastInst, if it is not 0.
- unsigned
- loadImmediate(int64_t Imm, bool IsN64, const TargetInstrInfo &TII,
- MachineBasicBlock& MBB, MachineBasicBlock::iterator II,
- DebugLoc DL, bool LastInstrIsADDiu,
- MipsAnalyzeImmediate::Inst *LastInst);
-}
-
/// Create MipsInstrInfo objects.
const MipsInstrInfo *createMips16InstrInfo(MipsTargetMachine &TM);
const MipsInstrInfo *createMipsSEInstrInfo(MipsTargetMachine &TM);
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
index da15d4d..f16b5f9 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
@@ -52,6 +52,10 @@ def MipsJmpLink : SDNode<"MipsISD::JmpLink",SDT_MipsJmpLink,
[SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
SDNPVariadic]>;
+// Tail call
+def MipsTailCall : SDNode<"MipsISD::TailCall", SDT_MipsJmpLink,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+
// Hi and Lo nodes are used to handle global addresses. Used on
// MipsISelLowering to lower stuff like GlobalAddress, ExternalSymbol
// static model. (nothing to do with Mips Registers Hi and Lo)
@@ -74,9 +78,10 @@ def MipsRet : SDNode<"MipsISD::Ret", SDTNone, [SDNPHasChain, SDNPOptInGlue]>;
// These are target-independent nodes, but have target-specific formats.
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_MipsCallSeqStart,
- [SDNPHasChain, SDNPOutGlue]>;
+ [SDNPHasChain, SDNPSideEffect, SDNPOutGlue]>;
def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_MipsCallSeqEnd,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+ [SDNPHasChain, SDNPSideEffect,
+ SDNPOptInGlue, SDNPOutGlue]>;
// MAdd*/MSub* nodes
def MipsMAdd : SDNode<"MipsISD::MAdd", SDT_MipsMAddMSub,
@@ -110,7 +115,7 @@ def MipsWrapper : SDNode<"MipsISD::Wrapper", SDTIntBinOp>;
def MipsDynAlloc : SDNode<"MipsISD::DynAlloc", SDT_MipsDynAlloc,
[SDNPHasChain, SDNPInGlue]>;
-def MipsSync : SDNode<"MipsISD::Sync", SDT_Sync, [SDNPHasChain]>;
+def MipsSync : SDNode<"MipsISD::Sync", SDT_Sync, [SDNPHasChain,SDNPSideEffect]>;
def MipsExt : SDNode<"MipsISD::Ext", SDT_Ext>;
def MipsIns : SDNode<"MipsISD::Ins", SDT_Ins>;
@@ -174,6 +179,35 @@ class MipsPat<dag pattern, dag result> : Pat<pattern, result> {
let Predicates = [HasStandardEncoding];
}
+class IsBranch {
+ bit isBranch = 1;
+}
+
+class IsReturn {
+ bit isReturn = 1;
+}
+
+class IsCall {
+ bit isCall = 1;
+}
+
+class IsTailCall {
+ bit isCall = 1;
+ bit isTerminator = 1;
+ bit isReturn = 1;
+ bit isBarrier = 1;
+ bit hasExtraSrcRegAllocReq = 1;
+ bit isCodeGenOnly = 1;
+}
+
+class IsAsCheapAsAMove {
+ bit isAsCheapAsAMove = 1;
+}
+
+class NeverHasSideEffects {
+ bit neverHasSideEffects = 1;
+}
+
//===----------------------------------------------------------------------===//
// Instruction format superclass
//===----------------------------------------------------------------------===//
@@ -208,17 +242,24 @@ def uimm16 : Operand<i32> {
let PrintMethod = "printUnsignedImm";
}
+def MipsMemAsmOperand : AsmOperandClass {
+ let Name = "Mem";
+ let ParserMethod = "parseMemOperand";
+}
+
// Address operand
def mem : Operand<i32> {
let PrintMethod = "printMemOperand";
let MIOperandInfo = (ops CPURegs, simm16);
let EncoderMethod = "getMemEncoding";
+ let ParserMatchClass = MipsMemAsmOperand;
}
def mem64 : Operand<i64> {
let PrintMethod = "printMemOperand";
let MIOperandInfo = (ops CPU64Regs, simm16_64);
let EncoderMethod = "getMemEncoding";
+ let ParserMatchClass = MipsMemAsmOperand;
}
def mem_ea : Operand<i32> {
@@ -285,57 +326,25 @@ def addr :
ComplexPattern<iPTR, 2, "SelectAddr", [frameindex], [SDNPWantParent]>;
//===----------------------------------------------------------------------===//
-// Pattern fragment for load/store
+// Instructions specific format
//===----------------------------------------------------------------------===//
-class UnalignedLoad<PatFrag Node> :
- PatFrag<(ops node:$ptr), (Node node:$ptr), [{
- LoadSDNode *LD = cast<LoadSDNode>(N);
- return LD->getMemoryVT().getSizeInBits()/8 > LD->getAlignment();
-}]>;
-class AlignedLoad<PatFrag Node> :
- PatFrag<(ops node:$ptr), (Node node:$ptr), [{
- LoadSDNode *LD = cast<LoadSDNode>(N);
- return LD->getMemoryVT().getSizeInBits()/8 <= LD->getAlignment();
-}]>;
-
-class UnalignedStore<PatFrag Node> :
- PatFrag<(ops node:$val, node:$ptr), (Node node:$val, node:$ptr), [{
- StoreSDNode *SD = cast<StoreSDNode>(N);
- return SD->getMemoryVT().getSizeInBits()/8 > SD->getAlignment();
-}]>;
+/// Move Control Registers From/To CPU Registers
+def MFC0_3OP : MFC3OP<0x10, 0, (outs CPURegs:$rt),
+ (ins CPURegs:$rd, uimm16:$sel),"mfc0\t$rt, $rd, $sel">;
+def : InstAlias<"mfc0 $rt, $rd", (MFC0_3OP CPURegs:$rt, CPURegs:$rd, 0)>;
-class AlignedStore<PatFrag Node> :
- PatFrag<(ops node:$val, node:$ptr), (Node node:$val, node:$ptr), [{
- StoreSDNode *SD = cast<StoreSDNode>(N);
- return SD->getMemoryVT().getSizeInBits()/8 <= SD->getAlignment();
-}]>;
+def MTC0_3OP : MFC3OP<0x10, 4, (outs CPURegs:$rd, uimm16:$sel),
+ (ins CPURegs:$rt),"mtc0\t$rt, $rd, $sel">;
+def : InstAlias<"mtc0 $rt, $rd", (MTC0_3OP CPURegs:$rd, 0, CPURegs:$rt)>;
-// Load/Store PatFrags.
-def sextloadi16_a : AlignedLoad<sextloadi16>;
-def zextloadi16_a : AlignedLoad<zextloadi16>;
-def extloadi16_a : AlignedLoad<extloadi16>;
-def load_a : AlignedLoad<load>;
-def sextloadi32_a : AlignedLoad<sextloadi32>;
-def zextloadi32_a : AlignedLoad<zextloadi32>;
-def extloadi32_a : AlignedLoad<extloadi32>;
-def truncstorei16_a : AlignedStore<truncstorei16>;
-def store_a : AlignedStore<store>;
-def truncstorei32_a : AlignedStore<truncstorei32>;
-def sextloadi16_u : UnalignedLoad<sextloadi16>;
-def zextloadi16_u : UnalignedLoad<zextloadi16>;
-def extloadi16_u : UnalignedLoad<extloadi16>;
-def load_u : UnalignedLoad<load>;
-def sextloadi32_u : UnalignedLoad<sextloadi32>;
-def zextloadi32_u : UnalignedLoad<zextloadi32>;
-def extloadi32_u : UnalignedLoad<extloadi32>;
-def truncstorei16_u : UnalignedStore<truncstorei16>;
-def store_u : UnalignedStore<store>;
-def truncstorei32_u : UnalignedStore<truncstorei32>;
+def MFC2_3OP : MFC3OP<0x12, 0, (outs CPURegs:$rt),
+ (ins CPURegs:$rd, uimm16:$sel),"mfc2\t$rt, $rd, $sel">;
+def : InstAlias<"mfc2 $rt, $rd", (MFC2_3OP CPURegs:$rt, CPURegs:$rd, 0)>;
-//===----------------------------------------------------------------------===//
-// Instructions specific format
-//===----------------------------------------------------------------------===//
+def MTC2_3OP : MFC3OP<0x12, 4, (outs CPURegs:$rd, uimm16:$sel),
+ (ins CPURegs:$rt),"mtc2\t$rt, $rd, $sel">;
+def : InstAlias<"mtc2 $rt, $rd", (MTC2_3OP CPURegs:$rd, 0, CPURegs:$rt)>;
// Arithmetic and logical instructions with 3 register operands.
class ArithLogicR<bits<6> op, bits<6> func, string instr_asm, SDNode OpNode,
@@ -416,7 +425,7 @@ class shift_rotate_reg<bits<6> func, bits<5> isRotate, string instr_asm,
// Load Upper Imediate
class LoadUpper<bits<6> op, string instr_asm, RegisterClass RC, Operand Imm>:
FI<op, (outs RC:$rt), (ins Imm:$imm16),
- !strconcat(instr_asm, "\t$rt, $imm16"), [], IIAlu> {
+ !strconcat(instr_asm, "\t$rt, $imm16"), [], IIAlu>, IsAsCheapAsAMove {
let rs = 0;
let neverHasSideEffects = 1;
let isReMaterializable = 1;
@@ -597,14 +606,13 @@ class SetCC_I<bits<6> op, string instr_asm, PatFrag cond_op, Operand Od,
IIAlu>;
// Jump
-class JumpFJ<bits<6> op, string instr_asm>:
- FJ<op, (outs), (ins jmptarget:$target),
- !strconcat(instr_asm, "\t$target"), [(br bb:$target)], IIBranch> {
- let isBranch=1;
+class JumpFJ<bits<6> op, DAGOperand opnd, string instr_asm,
+ SDPatternOperator operator, SDPatternOperator targetoperator>:
+ FJ<op, (outs), (ins opnd:$target), !strconcat(instr_asm, "\t$target"),
+ [(operator targetoperator:$target)], IIBranch> {
let isTerminator=1;
let isBarrier=1;
let hasDelaySlot = 1;
- let Predicates = [RelocStatic, HasStandardEncoding];
let DecoderMethod = "DecodeJumpTarget";
let Defs = [AT];
}
@@ -625,21 +633,21 @@ class UncondBranch<bits<6> op, string instr_asm>:
// Base class for indirect branch and return instruction classes.
let isTerminator=1, isBarrier=1, hasDelaySlot = 1 in
-class JumpFR<RegisterClass RC, list<dag> pattern>:
- FR<0, 0x8, (outs), (ins RC:$rs), "jr\t$rs", pattern, IIBranch> {
+class JumpFR<RegisterClass RC, SDPatternOperator operator = null_frag>:
+ FR<0, 0x8, (outs), (ins RC:$rs), "jr\t$rs", [(operator RC:$rs)], IIBranch> {
let rt = 0;
let rd = 0;
let shamt = 0;
}
// Indirect branch
-class IndirectBranch<RegisterClass RC>: JumpFR<RC, [(brind RC:$rs)]> {
+class IndirectBranch<RegisterClass RC>: JumpFR<RC, brind> {
let isBranch = 1;
let isIndirectBranch = 1;
}
// Return instruction
-class RetBase<RegisterClass RC>: JumpFR<RC, []> {
+class RetBase<RegisterClass RC>: JumpFR<RC> {
let isReturn = 1;
let isCodeGenOnly = 1;
let hasCtrlDep = 1;
@@ -905,12 +913,28 @@ let usesCustomInserter = 1 in {
// Instruction definition
//===----------------------------------------------------------------------===//
+class LoadImm32< string instr_asm, Operand Od, RegisterClass RC> :
+ MipsAsmPseudoInst<(outs RC:$rt), (ins Od:$imm32),
+ !strconcat(instr_asm, "\t$rt, $imm32")> ;
+def LoadImm32Reg : LoadImm32<"li", shamt,CPURegs>;
+
+class LoadAddress<string instr_asm, Operand MemOpnd, RegisterClass RC> :
+ MipsAsmPseudoInst<(outs RC:$rt), (ins MemOpnd:$addr),
+ !strconcat(instr_asm, "\t$rt, $addr")> ;
+def LoadAddr32Reg : LoadAddress<"la", mem, CPURegs>;
+
+class LoadAddressImm<string instr_asm, Operand Od, RegisterClass RC> :
+ MipsAsmPseudoInst<(outs RC:$rt), (ins Od:$imm32),
+ !strconcat(instr_asm, "\t$rt, $imm32")> ;
+def LoadAddr32Imm : LoadAddressImm<"la", shamt,CPURegs>;
+
//===----------------------------------------------------------------------===//
// MipsI Instructions
//===----------------------------------------------------------------------===//
/// Arithmetic Instructions (ALU Immediate)
-def ADDiu : ArithLogicI<0x09, "addiu", add, simm16, immSExt16, CPURegs>;
+def ADDiu : ArithLogicI<0x09, "addiu", add, simm16, immSExt16, CPURegs>,
+ IsAsCheapAsAMove;
def ADDi : ArithOverflowI<0x08, "addi", add, simm16, immSExt16, CPURegs>;
def SLTi : SetCC_I<0x0a, "slti", setlt, simm16, immSExt16, CPURegs>;
def SLTiu : SetCC_I<0x0b, "sltiu", setult, simm16, immSExt16, CPURegs>;
@@ -949,19 +973,12 @@ let Predicates = [HasMips32r2, HasStandardEncoding] in {
/// aligned
defm LB : LoadM32<0x20, "lb", sextloadi8>;
defm LBu : LoadM32<0x24, "lbu", zextloadi8>;
-defm LH : LoadM32<0x21, "lh", sextloadi16_a>;
-defm LHu : LoadM32<0x25, "lhu", zextloadi16_a>;
-defm LW : LoadM32<0x23, "lw", load_a>;
+defm LH : LoadM32<0x21, "lh", sextloadi16>;
+defm LHu : LoadM32<0x25, "lhu", zextloadi16>;
+defm LW : LoadM32<0x23, "lw", load>;
defm SB : StoreM32<0x28, "sb", truncstorei8>;
-defm SH : StoreM32<0x29, "sh", truncstorei16_a>;
-defm SW : StoreM32<0x2b, "sw", store_a>;
-
-/// unaligned
-defm ULH : LoadM32<0x21, "ulh", sextloadi16_u, 1>;
-defm ULHu : LoadM32<0x25, "ulhu", zextloadi16_u, 1>;
-defm ULW : LoadM32<0x23, "ulw", load_u, 1>;
-defm USH : StoreM32<0x29, "ush", truncstorei16_u, 1>;
-defm USW : StoreM32<0x2b, "usw", store_u, 1>;
+defm SH : StoreM32<0x29, "sh", truncstorei16>;
+defm SW : StoreM32<0x2b, "sw", store>;
/// load/store left/right
defm LWL : LoadLeftRightM32<0x22, "lwl", MipsLWL>;
@@ -996,7 +1013,8 @@ def SC_P8 : SCBase<0x38, "sc", CPURegs, mem64>,
}
/// Jump and Branch Instructions
-def J : JumpFJ<0x02, "j">;
+def J : JumpFJ<0x02, jmptarget, "j", br, bb>,
+ Requires<[RelocStatic, HasStandardEncoding]>, IsBranch;
def JR : IndirectBranch<CPURegs>;
def B : UncondBranch<0x04, "b">;
def BEQ : CBranch<0x04, "beq", seteq, CPURegs>;
@@ -1014,6 +1032,8 @@ def JAL : JumpLink<0x03, "jal">;
def JALR : JumpLinkReg<0x00, 0x09, "jalr", CPURegs>;
def BGEZAL : BranchLink<"bgezal", 0x11, CPURegs>;
def BLTZAL : BranchLink<"bltzal", 0x10, CPURegs>;
+def TAILCALL : JumpFJ<0x02, calltarget, "j", MipsTailCall, imm>, IsTailCall;
+def TAILCALL_R : JumpFR<CPURegs, MipsTailCall>, IsTailCall;
def RET : RetBase<CPURegs>;
@@ -1072,6 +1092,26 @@ def EXT : ExtBase<0, "ext", CPURegs>;
def INS : InsBase<4, "ins", CPURegs>;
//===----------------------------------------------------------------------===//
+// Instruction aliases
+//===----------------------------------------------------------------------===//
+def : InstAlias<"move $dst,$src", (ADD CPURegs:$dst,CPURegs:$src,ZERO)>;
+def : InstAlias<"bal $offset", (BGEZAL RA,brtarget:$offset)>;
+def : InstAlias<"addu $rs,$rt,$imm",
+ (ADDiu CPURegs:$rs,CPURegs:$rt,simm16:$imm)>;
+def : InstAlias<"add $rs,$rt,$imm",
+ (ADDi CPURegs:$rs,CPURegs:$rt,simm16:$imm)>;
+def : InstAlias<"and $rs,$rt,$imm",
+ (ANDi CPURegs:$rs,CPURegs:$rt,simm16:$imm)>;
+def : InstAlias<"j $rs", (JR CPURegs:$rs)>;
+def : InstAlias<"not $rt,$rs", (NOR CPURegs:$rt,CPURegs:$rs,ZERO)>;
+def : InstAlias<"neg $rt,$rs", (SUB CPURegs:$rt,ZERO,CPURegs:$rs)>;
+def : InstAlias<"negu $rt,$rs", (SUBu CPURegs:$rt,ZERO,CPURegs:$rs)>;
+def : InstAlias<"slt $rs,$rt,$imm",
+ (SLTi CPURegs:$rs,CPURegs:$rt,simm16:$imm)>;
+def : InstAlias<"xor $rs,$rt,$imm",
+ (XORi CPURegs:$rs,CPURegs:$rt,simm16:$imm)>;
+
+//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
//===----------------------------------------------------------------------===//
@@ -1103,6 +1143,11 @@ def : MipsPat<(MipsJmpLink (i32 texternalsym:$dst)),
//def : MipsPat<(MipsJmpLink CPURegs:$dst),
// (JALR CPURegs:$dst)>;
+// Tail call
+def : MipsPat<(MipsTailCall (iPTR tglobaladdr:$dst)),
+ (TAILCALL tglobaladdr:$dst)>;
+def : MipsPat<(MipsTailCall (iPTR texternalsym:$dst)),
+ (TAILCALL texternalsym:$dst)>;
// hi/lo relocs
def : MipsPat<(MipsHi tglobaladdr:$in), (LUi tglobaladdr:$in)>;
def : MipsPat<(MipsHi tblockaddress:$in), (LUi tblockaddress:$in)>;
@@ -1153,24 +1198,20 @@ def : MipsPat<(not CPURegs:$in),
let Predicates = [NotN64, HasStandardEncoding] in {
def : MipsPat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>;
def : MipsPat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>;
- def : MipsPat<(i32 (extloadi16_a addr:$src)), (LHu addr:$src)>;
- def : MipsPat<(i32 (extloadi16_u addr:$src)), (ULHu addr:$src)>;
+ def : MipsPat<(i32 (extloadi16 addr:$src)), (LHu addr:$src)>;
}
let Predicates = [IsN64, HasStandardEncoding] in {
def : MipsPat<(i32 (extloadi1 addr:$src)), (LBu_P8 addr:$src)>;
def : MipsPat<(i32 (extloadi8 addr:$src)), (LBu_P8 addr:$src)>;
- def : MipsPat<(i32 (extloadi16_a addr:$src)), (LHu_P8 addr:$src)>;
- def : MipsPat<(i32 (extloadi16_u addr:$src)), (ULHu_P8 addr:$src)>;
+ def : MipsPat<(i32 (extloadi16 addr:$src)), (LHu_P8 addr:$src)>;
}
// peepholes
let Predicates = [NotN64, HasStandardEncoding] in {
- def : MipsPat<(store_a (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
- def : MipsPat<(store_u (i32 0), addr:$dst), (USW ZERO, addr:$dst)>;
+ def : MipsPat<(store (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
}
let Predicates = [IsN64, HasStandardEncoding] in {
- def : MipsPat<(store_a (i32 0), addr:$dst), (SW_P8 ZERO, addr:$dst)>;
- def : MipsPat<(store_u (i32 0), addr:$dst), (USW_P8 ZERO, addr:$dst)>;
+ def : MipsPat<(store (i32 0), addr:$dst), (SW_P8 ZERO, addr:$dst)>;
}
// brcond patterns
@@ -1265,3 +1306,8 @@ include "MipsCondMov.td"
include "Mips16InstrFormats.td"
include "Mips16InstrInfo.td"
+
+// DSP
+include "MipsDSPInstrFormats.td"
+include "MipsDSPInstrInfo.td"
+
diff --git a/contrib/llvm/lib/Target/Mips/MipsLongBranch.cpp b/contrib/llvm/lib/Target/Mips/MipsLongBranch.cpp
index f78203f..5d9f0cf 100644
--- a/contrib/llvm/lib/Target/Mips/MipsLongBranch.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsLongBranch.cpp
@@ -10,6 +10,10 @@
// This pass expands a branch or jump instruction into a long branch if its
// offset is too large to fit into its immediate field.
//
+// FIXME:
+// 1. Fix pc-region jump instructions which cross 256MB segment boundaries.
+// 2. If program has inline assembly statements whose size cannot be
+// determined accurately, load branch target addresses from the GOT.
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "mips-long-branch"
@@ -48,7 +52,7 @@ namespace {
typedef MachineBasicBlock::reverse_iterator ReverseIter;
struct MBBInfo {
- uint64_t Size;
+ uint64_t Size, Address;
bool HasLongBranch;
MachineInstr *Br;
@@ -61,7 +65,10 @@ namespace {
static char ID;
MipsLongBranch(TargetMachine &tm)
: MachineFunctionPass(ID), TM(tm),
- TII(static_cast<const MipsInstrInfo*>(tm.getInstrInfo())) {}
+ TII(static_cast<const MipsInstrInfo*>(tm.getInstrInfo())),
+ IsPIC(TM.getRelocationModel() == Reloc::PIC_),
+ ABI(TM.getSubtarget<MipsSubtarget>().getTargetABI()),
+ LongBranchSeqSize(!IsPIC ? 2 : (ABI == MipsSubtarget::N64 ? 13 : 9)) {}
virtual const char *getPassName() const {
return "Mips Long Branch";
@@ -81,6 +88,9 @@ namespace {
const MipsInstrInfo *TII;
MachineFunction *MF;
SmallVector<MBBInfo, 16> MBBInfos;
+ bool IsPIC;
+ unsigned ABI;
+ unsigned LongBranchSeqSize;
};
char MipsLongBranch::ID = 0;
@@ -230,12 +240,6 @@ void MipsLongBranch::replaceBranch(MachineBasicBlock &MBB, Iter Br,
// Expand branch instructions to long branches.
void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
- I.HasLongBranch = true;
-
- bool IsPIC = TM.getRelocationModel() == Reloc::PIC_;
- unsigned ABI = TM.getSubtarget<MipsSubtarget>().getTargetABI();
- bool N64 = ABI == MipsSubtarget::N64;
-
MachineBasicBlock::iterator Pos;
MachineBasicBlock *MBB = I.Br->getParent(), *TgtMBB = getTargetMBB(*I.Br);
DebugLoc DL = I.Br->getDebugLoc();
@@ -248,101 +252,105 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
MBB->addSuccessor(LongBrMBB);
if (IsPIC) {
- // $longbr:
- // addiu $sp, $sp, -regsize * 2
- // sw $ra, 0($sp)
- // bal $baltgt
- // sw $a3, regsize($sp)
- // $baltgt:
- // lui $a3, %hi($baltgt)
- // lui $at, %hi($tgt)
- // addiu $a3, $a3, %lo($baltgt)
- // addiu $at, $at, %lo($tgt)
- // subu $at, $at, $a3
- // addu $at, $ra, $at
- //
- // if n64:
- // lui $a3, %highest($baltgt)
- // lui $ra, %highest($tgt)
- // addiu $a3, $a3, %higher($baltgt)
- // addiu $ra, $ra, %higher($tgt)
- // dsll $a3, $a3, 32
- // dsll $ra, $ra, 32
- // subu $at, $at, $a3
- // addu $at, $at, $ra
- //
- // lw $ra, 0($sp)
- // lw $a3, regsize($sp)
- // jr $at
- // addiu $sp, $sp, regsize * 2
- // $fallthrough:
- //
- MF->getInfo<MipsFunctionInfo>()->setEmitNOAT();
MachineBasicBlock *BalTgtMBB = MF->CreateMachineBasicBlock(BB);
MF->insert(FallThroughMBB, BalTgtMBB);
LongBrMBB->addSuccessor(BalTgtMBB);
BalTgtMBB->addSuccessor(TgtMBB);
- int RegSize = N64 ? 8 : 4;
- unsigned AT = N64 ? Mips::AT_64 : Mips::AT;
- unsigned A3 = N64 ? Mips::A3_64 : Mips::A3;
- unsigned SP = N64 ? Mips::SP_64 : Mips::SP;
- unsigned RA = N64 ? Mips::RA_64 : Mips::RA;
- unsigned Load = N64 ? Mips::LD_P8 : Mips::LW;
- unsigned Store = N64 ? Mips::SD_P8 : Mips::SW;
- unsigned LUi = N64 ? Mips::LUi64 : Mips::LUi;
- unsigned ADDiu = N64 ? Mips::DADDiu : Mips::ADDiu;
- unsigned ADDu = N64 ? Mips::DADDu : Mips::ADDu;
- unsigned SUBu = N64 ? Mips::SUBu : Mips::SUBu;
- unsigned JR = N64 ? Mips::JR64 : Mips::JR;
-
- Pos = LongBrMBB->begin();
-
- BuildMI(*LongBrMBB, Pos, DL, TII->get(ADDiu), SP).addReg(SP)
- .addImm(-RegSize * 2);
- BuildMI(*LongBrMBB, Pos, DL, TII->get(Store)).addReg(RA).addReg(SP)
- .addImm(0);
- BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::BAL_BR)).addMBB(BalTgtMBB);
- BuildMI(*LongBrMBB, Pos, DL, TII->get(Store)).addReg(A3).addReg(SP)
- .addImm(RegSize)->setIsInsideBundle();
-
- Pos = BalTgtMBB->begin();
-
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(LUi), A3)
- .addMBB(BalTgtMBB, MipsII::MO_ABS_HI);
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(LUi), AT)
- .addMBB(TgtMBB, MipsII::MO_ABS_HI);
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDiu), A3).addReg(A3)
- .addMBB(BalTgtMBB, MipsII::MO_ABS_LO);
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDiu), AT).addReg(AT)
- .addMBB(TgtMBB, MipsII::MO_ABS_LO);
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(SUBu), AT).addReg(AT).addReg(A3);
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDu), AT).addReg(RA).addReg(AT);
-
- if (N64) {
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(LUi), A3)
- .addMBB(BalTgtMBB, MipsII::MO_HIGHEST);
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(LUi), RA)
- .addMBB(TgtMBB, MipsII::MO_HIGHEST);
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDiu), A3).addReg(A3)
- .addMBB(BalTgtMBB, MipsII::MO_HIGHER);
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDiu), RA).addReg(RA)
- .addMBB(TgtMBB, MipsII::MO_HIGHER);
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DSLL), A3).addReg(A3)
- .addImm(32);
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DSLL), RA).addReg(RA)
- .addImm(32);
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(SUBu), AT).addReg(AT).addReg(A3);
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDu), AT).addReg(AT).addReg(RA);
- I.Size += 4 * 8;
+ int64_t TgtAddress = MBBInfos[TgtMBB->getNumber()].Address;
+ int64_t Offset = TgtAddress - (I.Address + I.Size - 20);
+ int64_t Lo = SignExtend64<16>(Offset & 0xffff);
+ int64_t Hi = SignExtend64<16>(((Offset + 0x8000) >> 16) & 0xffff);
+
+ if (ABI != MipsSubtarget::N64) {
+ // $longbr:
+ // addiu $sp, $sp, -8
+ // sw $ra, 0($sp)
+ // bal $baltgt
+ // lui $at, %hi($tgt - $baltgt)
+ // $baltgt:
+ // addiu $at, $at, %lo($tgt - $baltgt)
+ // addu $at, $ra, $at
+ // lw $ra, 0($sp)
+ // jr $at
+ // addiu $sp, $sp, 8
+ // $fallthrough:
+ //
+
+ Pos = LongBrMBB->begin();
+
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::ADDiu), Mips::SP)
+ .addReg(Mips::SP).addImm(-8);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::SW)).addReg(Mips::RA)
+ .addReg(Mips::SP).addImm(0);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::BAL_BR)).addMBB(BalTgtMBB);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::LUi), Mips::AT).addImm(Hi)
+ ->setIsInsideBundle();
+
+ Pos = BalTgtMBB->begin();
+
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::ADDiu), Mips::AT)
+ .addReg(Mips::AT).addImm(Lo);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::ADDu), Mips::AT)
+ .addReg(Mips::RA).addReg(Mips::AT);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::LW), Mips::RA)
+ .addReg(Mips::SP).addImm(0);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::JR)).addReg(Mips::AT);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::ADDiu), Mips::SP)
+ .addReg(Mips::SP).addImm(8)->setIsInsideBundle();
+ } else {
+ // $longbr:
+ // daddiu $sp, $sp, -16
+ // sd $ra, 0($sp)
+ // lui64 $at, %highest($tgt - $baltgt)
+ // daddiu $at, $at, %higher($tgt - $baltgt)
+ // dsll $at, $at, 16
+ // daddiu $at, $at, %hi($tgt - $baltgt)
+ // bal $baltgt
+ // dsll $at, $at, 16
+ // $baltgt:
+ // daddiu $at, $at, %lo($tgt - $baltgt)
+ // daddu $at, $ra, $at
+ // ld $ra, 0($sp)
+ // jr64 $at
+ // daddiu $sp, $sp, 16
+ // $fallthrough:
+ //
+
+ int64_t Higher = SignExtend64<16>(((Offset + 0x80008000) >> 32) & 0xffff);
+ int64_t Highest =
+ SignExtend64<16>(((Offset + 0x800080008000LL) >> 48) & 0xffff);
+
+ Pos = LongBrMBB->begin();
+
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::SP_64)
+ .addReg(Mips::SP_64).addImm(-16);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::SD)).addReg(Mips::RA_64)
+ .addReg(Mips::SP_64).addImm(0);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::LUi64), Mips::AT_64)
+ .addImm(Highest);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::AT_64)
+ .addReg(Mips::AT_64).addImm(Higher);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::DSLL), Mips::AT_64)
+ .addReg(Mips::AT_64).addImm(16);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::AT_64)
+ .addReg(Mips::AT_64).addImm(Hi);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::BAL_BR)).addMBB(BalTgtMBB);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::DSLL), Mips::AT_64)
+ .addReg(Mips::AT_64).addImm(16)->setIsInsideBundle();
+
+ Pos = BalTgtMBB->begin();
+
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::AT_64)
+ .addReg(Mips::AT_64).addImm(Lo);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DADDu), Mips::AT_64)
+ .addReg(Mips::RA_64).addReg(Mips::AT_64);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::LD), Mips::RA_64)
+ .addReg(Mips::SP_64).addImm(0);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::JR64)).addReg(Mips::AT_64);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::SP_64)
+ .addReg(Mips::SP_64).addImm(16)->setIsInsideBundle();
}
-
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(Load), RA).addReg(SP).addImm(0);
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(Load), A3).addReg(SP).addImm(RegSize);
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(JR)).addReg(AT);
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDiu), SP).addReg(SP)
- .addImm(RegSize * 2)->setIsInsideBundle();
- I.Size += 4 * 14;
} else {
// $longbr:
// j $tgt
@@ -353,7 +361,6 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
LongBrMBB->addSuccessor(TgtMBB);
BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::J)).addMBB(TgtMBB);
BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::NOP))->setIsInsideBundle();
- I.Size += 4 * 2;
}
if (I.Br->isUnconditionalBranch()) {
@@ -401,19 +408,34 @@ bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) {
if (!I->Br || I->HasLongBranch)
continue;
- if (!ForceLongBranch)
- // Check if offset fits into 16-bit immediate field of branches.
- if (isInt<16>(computeOffset(I->Br) / 4))
- continue;
+ // Check if offset fits into 16-bit immediate field of branches.
+ if (!ForceLongBranch && isInt<16>(computeOffset(I->Br) / 4))
+ continue;
- expandToLongBranch(*I);
+ I->HasLongBranch = true;
+ I->Size += LongBranchSeqSize * 4;
++LongBranches;
EverMadeChange = MadeChange = true;
}
}
- if (EverMadeChange)
- MF->RenumberBlocks();
+ if (!EverMadeChange)
+ return true;
+
+ // Compute basic block addresses.
+ if (TM.getRelocationModel() == Reloc::PIC_) {
+ uint64_t Address = 0;
+
+ for (I = MBBInfos.begin(); I != E; Address += I->Size, ++I)
+ I->Address = Address;
+ }
+
+ // Do the expansion.
+ for (I = MBBInfos.begin(); I != E; ++I)
+ if (I->HasLongBranch)
+ expandToLongBranch(*I);
+
+ MF->RenumberBlocks();
return true;
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp
index d4c5e6d..5fa6339 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp
@@ -11,7 +11,6 @@
// MCInst records.
//
//===----------------------------------------------------------------------===//
-
#include "MipsMCInstLower.h"
#include "MipsAsmPrinter.h"
#include "MipsInstrInfo.h"
@@ -161,31 +160,3 @@ void MipsMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
}
}
-// If the D<shift> instruction has a shift amount that is greater
-// than 31 (checked in calling routine), lower it to a D<shift>32 instruction
-void MipsMCInstLower::LowerLargeShift(const MachineInstr *MI,
- MCInst& Inst,
- int64_t Shift) {
- // rt
- Inst.addOperand(LowerOperand(MI->getOperand(0)));
- // rd
- Inst.addOperand(LowerOperand(MI->getOperand(1)));
- // saminus32
- Inst.addOperand(MCOperand::CreateImm(Shift));
-
- switch (MI->getOpcode()) {
- default:
- // Calling function is not synchronized
- llvm_unreachable("Unexpected shift instruction");
- break;
- case Mips::DSLL:
- Inst.setOpcode(Mips::DSLL32);
- break;
- case Mips::DSRL:
- Inst.setOpcode(Mips::DSRL32);
- break;
- case Mips::DSRA:
- Inst.setOpcode(Mips::DSRA32);
- break;
- }
-}
diff --git a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h
index 0abb996..c4a6016 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h
+++ b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h
@@ -33,12 +33,11 @@ public:
MipsMCInstLower(MipsAsmPrinter &asmprinter);
void Initialize(Mangler *mang, MCContext *C);
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
- void LowerLargeShift(const MachineInstr *MI, MCInst &Inst, int64_t Shift);
+ MCOperand LowerOperand(const MachineOperand& MO, unsigned offset = 0) const;
private:
MCOperand LowerSymbolOperand(const MachineOperand &MO,
MachineOperandType MOTy, unsigned Offset) const;
- MCOperand LowerOperand(const MachineOperand& MO, unsigned offset = 0) const;
};
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp
index 362173e..5ff19ab 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp
@@ -43,4 +43,17 @@ unsigned MipsFunctionInfo::getGlobalBaseReg() {
return GlobalBaseReg = MF.getRegInfo().createVirtualRegister(RC);
}
+bool MipsFunctionInfo::mips16SPAliasRegSet() const {
+ return Mips16SPAliasReg;
+}
+unsigned MipsFunctionInfo::getMips16SPAliasReg() {
+ // Return if it has already been initialized.
+ if (Mips16SPAliasReg)
+ return Mips16SPAliasReg;
+
+ const TargetRegisterClass *RC;
+ RC=(const TargetRegisterClass*)&Mips::CPU16RegsRegClass;
+ return Mips16SPAliasReg = MF.getRegInfo().createVirtualRegister(RC);
+}
+
void MipsFunctionInfo::anchor() { }
diff --git a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h
index df3c4c0..bb45f92 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h
+++ b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h
@@ -39,55 +39,45 @@ class MipsFunctionInfo : public MachineFunctionInfo {
/// relocation models.
unsigned GlobalBaseReg;
+ /// Mips16SPAliasReg - keeps track of the virtual register initialized for
+ /// use as an alias for SP for use in load/store of halfword/byte from/to
+ /// the stack
+ unsigned Mips16SPAliasReg;
+
/// VarArgsFrameIndex - FrameIndex for start of varargs area.
int VarArgsFrameIndex;
- // Range of frame object indices.
- // InArgFIRange: Range of indices of all frame objects created during call to
- // LowerFormalArguments.
- // OutArgFIRange: Range of indices of all frame objects created during call to
- // LowerCall except for the frame object for restoring $gp.
- std::pair<int, int> InArgFIRange, OutArgFIRange;
- unsigned MaxCallFrameSize;
+ /// True if function has a byval argument.
+ bool HasByvalArg;
- bool EmitNOAT;
+ /// Size of incoming argument area.
+ unsigned IncomingArgSize;
public:
MipsFunctionInfo(MachineFunction& MF)
- : MF(MF), SRetReturnReg(0), GlobalBaseReg(0),
- VarArgsFrameIndex(0), InArgFIRange(std::make_pair(-1, 0)),
- OutArgFIRange(std::make_pair(-1, 0)), MaxCallFrameSize(0), EmitNOAT(false)
+ : MF(MF), SRetReturnReg(0), GlobalBaseReg(0), Mips16SPAliasReg(0),
+ VarArgsFrameIndex(0)
{}
- bool isInArgFI(int FI) const {
- return FI <= InArgFIRange.first && FI >= InArgFIRange.second;
- }
- void setLastInArgFI(int FI) { InArgFIRange.second = FI; }
-
- bool isOutArgFI(int FI) const {
- return FI <= OutArgFIRange.first && FI >= OutArgFIRange.second;
- }
- void extendOutArgFIRange(int FirstFI, int LastFI) {
- if (!OutArgFIRange.second)
- // this must be the first time this function was called.
- OutArgFIRange.first = FirstFI;
- OutArgFIRange.second = LastFI;
- }
-
unsigned getSRetReturnReg() const { return SRetReturnReg; }
void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
bool globalBaseRegSet() const;
unsigned getGlobalBaseReg();
+ bool mips16SPAliasRegSet() const;
+ unsigned getMips16SPAliasReg();
+
int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; }
- unsigned getMaxCallFrameSize() const { return MaxCallFrameSize; }
- void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; }
+ bool hasByvalArg() const { return HasByvalArg; }
+ void setFormalArgInfo(unsigned Size, bool HasByval) {
+ IncomingArgSize = Size;
+ HasByvalArg = HasByval;
+ }
- bool getEmitNOAT() const { return EmitNOAT; }
- void setEmitNOAT() { EmitNOAT = true; }
+ unsigned getIncomingArgSize() const { return IncomingArgSize; }
};
} // end of namespace llvm
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
index ae6ae3a..d8e0dd4 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -22,7 +22,6 @@
#include "llvm/Constants.h"
#include "llvm/DebugInfo.h"
#include "llvm/Type.h"
-#include "llvm/Function.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -43,9 +42,8 @@
using namespace llvm;
-MipsRegisterInfo::MipsRegisterInfo(const MipsSubtarget &ST,
- const TargetInstrInfo &tii)
- : MipsGenRegisterInfo(Mips::RA), Subtarget(ST), TII(tii) {}
+MipsRegisterInfo::MipsRegisterInfo(const MipsSubtarget &ST)
+ : MipsGenRegisterInfo(Mips::RA), Subtarget(ST) {}
unsigned MipsRegisterInfo::getPICCallReg() { return Mips::T9; }
@@ -83,11 +81,11 @@ MipsRegisterInfo::getCallPreservedMask(CallingConv::ID) const {
BitVector MipsRegisterInfo::
getReservedRegs(const MachineFunction &MF) const {
static const uint16_t ReservedCPURegs[] = {
- Mips::ZERO, Mips::AT, Mips::K0, Mips::K1, Mips::SP
+ Mips::ZERO, Mips::K0, Mips::K1, Mips::SP
};
static const uint16_t ReservedCPU64Regs[] = {
- Mips::ZERO_64, Mips::AT_64, Mips::K0_64, Mips::K1_64, Mips::SP_64
+ Mips::ZERO_64, Mips::K0_64, Mips::K1_64, Mips::SP_64
};
BitVector Reserved(getNumRegs());
@@ -96,41 +94,49 @@ getReservedRegs(const MachineFunction &MF) const {
for (unsigned I = 0; I < array_lengthof(ReservedCPURegs); ++I)
Reserved.set(ReservedCPURegs[I]);
- if (Subtarget.hasMips64()) {
- for (unsigned I = 0; I < array_lengthof(ReservedCPU64Regs); ++I)
- Reserved.set(ReservedCPU64Regs[I]);
+ for (unsigned I = 0; I < array_lengthof(ReservedCPU64Regs); ++I)
+ Reserved.set(ReservedCPU64Regs[I]);
+ if (Subtarget.hasMips64()) {
// Reserve all registers in AFGR64.
for (RegIter Reg = Mips::AFGR64RegClass.begin(),
EReg = Mips::AFGR64RegClass.end(); Reg != EReg; ++Reg)
Reserved.set(*Reg);
} else {
- // Reserve all registers in CPU64Regs & FGR64.
- for (RegIter Reg = Mips::CPU64RegsRegClass.begin(),
- EReg = Mips::CPU64RegsRegClass.end(); Reg != EReg; ++Reg)
- Reserved.set(*Reg);
-
+ // Reserve all registers in FGR64.
for (RegIter Reg = Mips::FGR64RegClass.begin(),
EReg = Mips::FGR64RegClass.end(); Reg != EReg; ++Reg)
Reserved.set(*Reg);
}
-
// Reserve FP if this function should have a dedicated frame pointer register.
if (MF.getTarget().getFrameLowering()->hasFP(MF)) {
- Reserved.set(Mips::FP);
- Reserved.set(Mips::FP_64);
+ if (Subtarget.inMips16Mode())
+ Reserved.set(Mips::S0);
+ else {
+ Reserved.set(Mips::FP);
+ Reserved.set(Mips::FP_64);
+ }
}
// Reserve hardware registers.
Reserved.set(Mips::HWR29);
Reserved.set(Mips::HWR29_64);
+ // Reserve DSP control register.
+ Reserved.set(Mips::DSPCtrl);
+
// Reserve RA if in mips16 mode.
if (Subtarget.inMips16Mode()) {
Reserved.set(Mips::RA);
Reserved.set(Mips::RA_64);
}
+ // Reserve GP if small section is used.
+ if (Subtarget.useSmallSection()) {
+ Reserved.set(Mips::GP);
+ Reserved.set(Mips::GP_64);
+ }
+
return Reserved;
}
@@ -160,7 +166,7 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
"Instr doesn't have FrameIndex operand!");
}
- DEBUG(errs() << "\nFunction : " << MF.getFunction()->getName() << "\n";
+ DEBUG(errs() << "\nFunction : " << MF.getName() << "\n";
errs() << "<--------->\n" << MI);
int FrameIndex = MI.getOperand(i).getIndex();
@@ -179,8 +185,12 @@ getFrameRegister(const MachineFunction &MF) const {
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
bool IsN64 = Subtarget.isABI_N64();
- return TFI->hasFP(MF) ? (IsN64 ? Mips::FP_64 : Mips::FP) :
- (IsN64 ? Mips::SP_64 : Mips::SP);
+ if (Subtarget.inMips16Mode())
+ return TFI->hasFP(MF) ? Mips::S0 : Mips::SP;
+ else
+ return TFI->hasFP(MF) ? (IsN64 ? Mips::FP_64 : Mips::FP) :
+ (IsN64 ? Mips::SP_64 : Mips::SP);
+
}
unsigned MipsRegisterInfo::
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
index 9a05e94..78adf7f 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
@@ -22,16 +22,14 @@
namespace llvm {
class MipsSubtarget;
-class TargetInstrInfo;
class Type;
class MipsRegisterInfo : public MipsGenRegisterInfo {
protected:
const MipsSubtarget &Subtarget;
- const TargetInstrInfo &TII;
public:
- MipsRegisterInfo(const MipsSubtarget &Subtarget, const TargetInstrInfo &tii);
+ MipsRegisterInfo(const MipsSubtarget &Subtarget);
/// getRegisterNumbering - Given the enum value for some register, e.g.
/// Mips::RA, return the number that it corresponds to (e.g. 31).
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td
index b255e42..391c19e 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td
@@ -14,6 +14,8 @@ let Namespace = "Mips" in {
def sub_fpeven : SubRegIndex;
def sub_fpodd : SubRegIndex;
def sub_32 : SubRegIndex;
+def sub_lo : SubRegIndex;
+def sub_hi : SubRegIndex;
}
// We have banks of 32 registers each.
@@ -71,7 +73,7 @@ class HWR<bits<5> num, string n> : MipsReg<n> {
let Namespace = "Mips" in {
// General Purpose Registers
def ZERO : MipsGPRReg< 0, "zero">, DwarfRegNum<[0]>;
- def AT : MipsGPRReg< 1, "at">, DwarfRegNum<[1]>;
+ def AT : MipsGPRReg< 1, "1">, DwarfRegNum<[1]>;
def V0 : MipsGPRReg< 2, "2">, DwarfRegNum<[2]>;
def V1 : MipsGPRReg< 3, "3">, DwarfRegNum<[3]>;
def A0 : MipsGPRReg< 4, "4">, DwarfRegNum<[4]>;
@@ -105,7 +107,7 @@ let Namespace = "Mips" in {
// General Purpose 64-bit Registers
def ZERO_64 : Mips64GPRReg< 0, "zero", [ZERO]>, DwarfRegNum<[0]>;
- def AT_64 : Mips64GPRReg< 1, "at", [AT]>, DwarfRegNum<[1]>;
+ def AT_64 : Mips64GPRReg< 1, "1", [AT]>, DwarfRegNum<[1]>;
def V0_64 : Mips64GPRReg< 2, "2", [V0]>, DwarfRegNum<[2]>;
def V1_64 : Mips64GPRReg< 3, "3", [V1]>, DwarfRegNum<[3]>;
def A0_64 : Mips64GPRReg< 4, "4", [A0]>, DwarfRegNum<[4]>;
@@ -239,16 +241,29 @@ let Namespace = "Mips" in {
// fcc0 register
def FCC0 : Register<"fcc0">;
+ // PC register
+ def PC : Register<"pc">;
+
// Hardware register $29
def HWR29 : Register<"29">;
def HWR29_64 : Register<"29">;
+
+ // Accum registers
+ let SubRegIndices = [sub_lo, sub_hi] in
+ def AC0 : RegisterWithSubRegs<"ac0", [LO, HI]>;
+ def AC1 : Register<"ac1">;
+ def AC2 : Register<"ac2">;
+ def AC3 : Register<"ac3">;
+
+ def DSPCtrl : Register<"dspctrl">;
}
//===----------------------------------------------------------------------===//
// Register Classes
//===----------------------------------------------------------------------===//
-def CPURegs : RegisterClass<"Mips", [i32], 32, (add
+class CPURegsClass<list<ValueType> regTypes> :
+ RegisterClass<"Mips", regTypes, 32, (add
// Reserved
ZERO, AT,
// Return Values and Arguments
@@ -262,6 +277,9 @@ def CPURegs : RegisterClass<"Mips", [i32], 32, (add
// Reserved
K0, K1, GP, SP, FP, RA)>;
+def CPURegs : CPURegsClass<[i32]>;
+def DSPRegs : CPURegsClass<[v4i8, v2i16]>;
+
def CPU64Regs : RegisterClass<"Mips", [i64], 64, (add
// Reserved
ZERO_64, AT_64,
@@ -284,6 +302,7 @@ def CPU16Regs : RegisterClass<"Mips", [i32], 32, (add
def CPURAReg : RegisterClass<"Mips", [i32], 32, (add RA)>;
+def CPUSPReg : RegisterClass<"Mips", [i32], 32, (add SP)>;
// 64bit fp:
// * FGR64 - 32 64-bit registers
@@ -319,3 +338,5 @@ def HILO64 : RegisterClass<"Mips", [i64], 64, (add HI64, LO64)>;
def HWRegs : RegisterClass<"Mips", [i32], 32, (add HWR29)>;
def HWRegs64 : RegisterClass<"Mips", [i64], 32, (add HWR29_64)>;
+// Accumulator Registers
+def ACRegs : RegisterClass<"Mips", [i64], 64, (sequence "AC%u", 0, 3)>;
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
index 1c59847..03f5176 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -22,7 +22,8 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
@@ -202,6 +203,19 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// Mark $fp as used if function has dedicated frame pointer.
if (hasFP(MF))
MRI.setPhysRegUsed(FP);
+
+ // Set scavenging frame index if necessary.
+ uint64_t MaxSPOffset = MF.getInfo<MipsFunctionInfo>()->getIncomingArgSize() +
+ estimateStackSize(MF);
+
+ if (isInt<16>(MaxSPOffset))
+ return;
+
+ const TargetRegisterClass *RC = STI.isABI_N64() ?
+ &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass;
+ int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
+ RC->getAlignment(), false);
+ RS->setScavengingFrameIndex(FI);
}
const MipsFrameLowering *
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
index eeb1de3..fb0f9df 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
@@ -260,14 +260,55 @@ void MipsSEInstrInfo::adjustStackPtr(unsigned SP, int64_t Amount,
if (isInt<16>(Amount))// addi sp, sp, amount
BuildMI(MBB, I, DL, get(ADDiu), SP).addReg(SP).addImm(Amount);
else { // Expand immediate that doesn't fit in 16-bit.
- unsigned ATReg = STI.isABI_N64() ? Mips::AT_64 : Mips::AT;
-
- MBB.getParent()->getInfo<MipsFunctionInfo>()->setEmitNOAT();
- Mips::loadImmediate(Amount, STI.isABI_N64(), *this, MBB, I, DL, false, 0);
- BuildMI(MBB, I, DL, get(ADDu), SP).addReg(SP).addReg(ATReg);
+ unsigned Reg = loadImmediate(Amount, MBB, I, DL, 0);
+ BuildMI(MBB, I, DL, get(ADDu), SP).addReg(SP).addReg(Reg, RegState::Kill);
}
}
+/// This function generates the sequence of instructions needed to get the
+/// result of adding register REG and immediate IMM.
+unsigned
+MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator II, DebugLoc DL,
+ unsigned *NewImm) const {
+ MipsAnalyzeImmediate AnalyzeImm;
+ const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>();
+ MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
+ unsigned Size = STI.isABI_N64() ? 64 : 32;
+ unsigned LUi = STI.isABI_N64() ? Mips::LUi64 : Mips::LUi;
+ unsigned ZEROReg = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
+ const TargetRegisterClass *RC = STI.isABI_N64() ?
+ &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass;
+ bool LastInstrIsADDiu = NewImm;
+
+ const MipsAnalyzeImmediate::InstSeq &Seq =
+ AnalyzeImm.Analyze(Imm, Size, LastInstrIsADDiu);
+ MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin();
+
+ assert(Seq.size() && (!LastInstrIsADDiu || (Seq.size() > 1)));
+
+ // The first instruction can be a LUi, which is different from other
+ // instructions (ADDiu, ORI and SLL) in that it does not have a register
+ // operand.
+ unsigned Reg = RegInfo.createVirtualRegister(RC);
+
+ if (Inst->Opc == LUi)
+ BuildMI(MBB, II, DL, get(LUi), Reg).addImm(SignExtend64<16>(Inst->ImmOpnd));
+ else
+ BuildMI(MBB, II, DL, get(Inst->Opc), Reg).addReg(ZEROReg)
+ .addImm(SignExtend64<16>(Inst->ImmOpnd));
+
+ // Build the remaining instructions in Seq.
+ for (++Inst; Inst != Seq.end() - LastInstrIsADDiu; ++Inst)
+ BuildMI(MBB, II, DL, get(Inst->Opc), Reg).addReg(Reg, RegState::Kill)
+ .addImm(SignExtend64<16>(Inst->ImmOpnd));
+
+ if (LastInstrIsADDiu)
+ *NewImm = Inst->ImmOpnd;
+
+ return Reg;
+}
+
unsigned MipsSEInstrInfo::GetAnalyzableBrOpc(unsigned Opc) const {
return (Opc == Mips::BEQ || Opc == Mips::BNE || Opc == Mips::BGTZ ||
Opc == Mips::BGEZ || Opc == Mips::BLTZ || Opc == Mips::BLEZ ||
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.h b/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.h
index 346e74d..55b78b2 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.h
@@ -15,7 +15,6 @@
#define MIPSSEINSTRUCTIONINFO_H
#include "MipsInstrInfo.h"
-#include "MipsAnalyzeImmediate.h"
#include "MipsSERegisterInfo.h"
namespace llvm {
@@ -70,6 +69,13 @@ public:
void adjustStackPtr(unsigned SP, int64_t Amount, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
+ /// Emit a series of instructions to load an immediate. If NewImm is a
+ /// non-NULL parameter, the last instruction is not emitted, but instead
+ /// its immediate operand is returned in NewImm.
+ unsigned loadImmediate(int64_t Imm, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator II, DebugLoc DL,
+ unsigned *NewImm) const;
+
private:
virtual unsigned GetAnalyzableBrOpc(unsigned Opc) const;
diff --git a/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp
index 043a1ef..56b9ba9 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp
@@ -26,6 +26,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -40,8 +41,18 @@
using namespace llvm;
MipsSERegisterInfo::MipsSERegisterInfo(const MipsSubtarget &ST,
- const TargetInstrInfo &TII)
- : MipsRegisterInfo(ST, TII) {}
+ const MipsSEInstrInfo &I)
+ : MipsRegisterInfo(ST), TII(I) {}
+
+bool MipsSERegisterInfo::
+requiresRegisterScavenging(const MachineFunction &MF) const {
+ return true;
+}
+
+bool MipsSERegisterInfo::
+requiresFrameIndexScavenging(const MachineFunction &MF) const {
+ return true;
+}
// This function eliminate ADJCALLSTACKDOWN,
// ADJCALLSTACKUP pseudo instructions
@@ -72,7 +83,6 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
MachineInstr &MI = *II;
MachineFunction &MF = *MI.getParent()->getParent();
MachineFrameInfo *MFI = MF.getFrameInfo();
- MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
int MinCSFI = 0;
@@ -91,8 +101,7 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
// getFrameRegister() returns.
unsigned FrameReg;
- if (MipsFI->isOutArgFI(FrameIndex) ||
- (FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI))
+ if (FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI)
FrameReg = Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP;
else
FrameReg = getFrameRegister(MF);
@@ -104,14 +113,11 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
// - If the frame object is any of the following, its offset must be adjusted
// by adding the size of the stack:
// incoming argument, callee-saved register location or local variable.
+ bool IsKill = false;
int64_t Offset;
- if (MipsFI->isOutArgFI(FrameIndex))
- Offset = SPOffset;
- else
- Offset = SPOffset + (int64_t)StackSize;
-
- Offset += MI.getOperand(OpNo + 1).getImm();
+ Offset = SPOffset + (int64_t)StackSize;
+ Offset += MI.getOperand(OpNo + 1).getImm();
DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
@@ -121,18 +127,17 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = II->getDebugLoc();
unsigned ADDu = Subtarget.isABI_N64() ? Mips::DADDu : Mips::ADDu;
- unsigned ATReg = Subtarget.isABI_N64() ? Mips::AT_64 : Mips::AT;
- MipsAnalyzeImmediate::Inst LastInst(0, 0);
+ unsigned NewImm;
- MipsFI->setEmitNOAT();
- Mips::loadImmediate(Offset, Subtarget.isABI_N64(), TII, MBB, II, DL, true,
- &LastInst);
- BuildMI(MBB, II, DL, TII.get(ADDu), ATReg).addReg(FrameReg).addReg(ATReg);
+ unsigned Reg = TII.loadImmediate(Offset, MBB, II, DL, &NewImm);
+ BuildMI(MBB, II, DL, TII.get(ADDu), Reg).addReg(FrameReg)
+ .addReg(Reg, RegState::Kill);
- FrameReg = ATReg;
- Offset = SignExtend64<16>(LastInst.ImmOpnd);
+ FrameReg = Reg;
+ Offset = SignExtend64<16>(NewImm);
+ IsKill = true;
}
- MI.getOperand(OpNo).ChangeToRegister(FrameReg, false);
+ MI.getOperand(OpNo).ChangeToRegister(FrameReg, false, false, IsKill);
MI.getOperand(OpNo + 1).ChangeToImmediate(Offset);
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.h b/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.h
index 4b17b33..7437bd3 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.h
@@ -18,11 +18,18 @@
#include "MipsRegisterInfo.h"
namespace llvm {
+class MipsSEInstrInfo;
class MipsSERegisterInfo : public MipsRegisterInfo {
+ const MipsSEInstrInfo &TII;
+
public:
MipsSERegisterInfo(const MipsSubtarget &Subtarget,
- const TargetInstrInfo &TII);
+ const MipsSEInstrInfo &TII);
+
+ bool requiresRegisterScavenging(const MachineFunction &MF) const;
+
+ bool requiresFrameIndexScavenging(const MachineFunction &MF) const;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
diff --git a/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp b/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp
index 11ff809..930af4d 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp
@@ -25,12 +25,14 @@ using namespace llvm;
void MipsSubtarget::anchor() { }
MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, bool little) :
+ const std::string &FS, bool little,
+ Reloc::Model RM) :
MipsGenSubtargetInfo(TT, CPU, FS),
MipsArchVersion(Mips32), MipsABI(UnknownABI), IsLittle(little),
IsSingleFloat(false), IsFP64bit(false), IsGP64bit(false), HasVFPU(false),
IsLinux(true), HasSEInReg(false), HasCondMov(false), HasMulDivAdd(false),
- HasMinMax(false), HasSwap(false), HasBitCount(false), InMips16Mode(false)
+ HasMinMax(false), HasSwap(false), HasBitCount(false), InMips16Mode(false),
+ HasDSP(false), HasDSPR2(false), IsAndroid(false)
{
std::string CPUName = CPU;
if (CPUName.empty())
@@ -54,6 +56,9 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
// Is the target system Linux ?
if (TT.find("linux") == std::string::npos)
IsLinux = false;
+
+ // Set UseSmallSection.
+ UseSmallSection = !IsLinux && (RM == Reloc::Static);
}
bool
diff --git a/contrib/llvm/lib/Target/Mips/MipsSubtarget.h b/contrib/llvm/lib/Target/Mips/MipsSubtarget.h
index ba15362..ff69237 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSubtarget.h
+++ b/contrib/llvm/lib/Target/Mips/MipsSubtarget.h
@@ -65,6 +65,9 @@ protected:
// isLinux - Target system is Linux. Is false we consider ELFOS for now.
bool IsLinux;
+ // UseSmallSection - Small section is used.
+ bool UseSmallSection;
+
/// Features related to the presence of specific instructions.
// HasSEInReg - SEB and SEH (signext in register) instructions.
@@ -89,6 +92,9 @@ protected:
// InMips16 -- can process Mips16 instructions
bool InMips16Mode;
+ // HasDSP, HasDSPR2 -- supports DSP ASE.
+ bool HasDSP, HasDSPR2;
+
// IsAndroid -- target is android
bool IsAndroid;
@@ -109,7 +115,7 @@ public:
/// This constructor initializes the data members to match that
/// of the specified triple.
MipsSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, bool little);
+ const std::string &FS, bool little, Reloc::Model RM);
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
@@ -131,8 +137,11 @@ public:
bool isNotSingleFloat() const { return !IsSingleFloat; }
bool hasVFPU() const { return HasVFPU; }
bool inMips16Mode() const { return InMips16Mode; }
+ bool hasDSP() const { return HasDSP; }
+ bool hasDSPR2() const { return HasDSPR2; }
bool isAndroid() const { return IsAndroid; }
bool isLinux() const { return IsLinux; }
+ bool useSmallSection() const { return UseSmallSection; }
bool hasStandardEncoding() const { return !inMips16Mode(); }
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp
index 03a024a..983ee21 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp
@@ -42,8 +42,8 @@ MipsTargetMachine(const Target &T, StringRef TT,
CodeGenOpt::Level OL,
bool isLittle)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- Subtarget(TT, CPU, FS, isLittle),
- DataLayout(isLittle ?
+ Subtarget(TT, CPU, FS, isLittle, RM),
+ DL(isLittle ?
(Subtarget.isABI_N64() ?
"e-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-n32" :
"e-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32") :
@@ -52,7 +52,8 @@ MipsTargetMachine(const Target &T, StringRef TT,
"E-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32")),
InstrInfo(MipsInstrInfo::create(*this)),
FrameLowering(MipsFrameLowering::create(*this, Subtarget)),
- TLInfo(*this), TSInfo(*this), JITInfo() {
+ TLInfo(*this), TSInfo(*this), JITInfo(),
+ STTI(&TLInfo), VTTI(&TLInfo) {
}
void MipsebTargetMachine::anchor() { }
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h
index 21b49e6..b54f5ce 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h
@@ -21,8 +21,9 @@
#include "MipsSelectionDAGInfo.h"
#include "MipsSubtarget.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetTransformImpl.h"
namespace llvm {
class formatted_raw_ostream;
@@ -30,12 +31,14 @@ class MipsRegisterInfo;
class MipsTargetMachine : public LLVMTargetMachine {
MipsSubtarget Subtarget;
- const TargetData DataLayout; // Calculates type size & alignment
+ const DataLayout DL; // Calculates type size & alignment
const MipsInstrInfo *InstrInfo;
const MipsFrameLowering *FrameLowering;
MipsTargetLowering TLInfo;
MipsSelectionDAGInfo TSInfo;
MipsJITInfo JITInfo;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
MipsTargetMachine(const Target &T, StringRef TT,
@@ -52,8 +55,8 @@ public:
{ return FrameLowering; }
virtual const MipsSubtarget *getSubtargetImpl() const
{ return &Subtarget; }
- virtual const TargetData *getTargetData() const
- { return &DataLayout;}
+ virtual const DataLayout *getDataLayout() const
+ { return &DL;}
virtual MipsJITInfo *getJITInfo()
{ return &JITInfo; }
@@ -69,6 +72,13 @@ public:
return &TSInfo;
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
+
// Pass Pipeline Configuration
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
virtual bool addCodeEmitter(PassManagerBase &PM, JITCodeEmitter &JCE);
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp b/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp
index 04dc60a..881908b 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp
@@ -13,7 +13,7 @@
#include "llvm/GlobalVariable.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ELF.h"
@@ -26,6 +26,7 @@ SSThreshold("mips-ssection-threshold", cl::Hidden,
void MipsTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM){
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
+ InitializeELF(TM.Options.UseInitArray);
SmallDataSection =
getContext().getELFSection(".sdata", ELF::SHT_PROGBITS,
@@ -60,9 +61,10 @@ bool MipsTargetObjectFile::
IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM,
SectionKind Kind) const {
- // Only use small section for non linux targets.
const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>();
- if (Subtarget.isLinux())
+
+ // Return if small section is not available.
+ if (!Subtarget.useSmallSection())
return false;
// Only global variables, not functions.
@@ -80,7 +82,7 @@ IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM,
return false;
Type *Ty = GV->getType()->getElementType();
- return IsInSmallSection(TM.getTargetData()->getTypeAllocSize(Ty));
+ return IsInSmallSection(TM.getDataLayout()->getTypeAllocSize(Ty));
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTX.td b/contrib/llvm/lib/Target/NVPTX/NVPTX.td
index ae7710e..7aee359 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTX.td
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTX.td
@@ -24,7 +24,30 @@ include "NVPTXInstrInfo.td"
// - Need at least one feature to avoid generating zero sized array by
// TableGen in NVPTXGenSubtarget.inc.
//===----------------------------------------------------------------------===//
-def FeatureDummy : SubtargetFeature<"dummy", "dummy", "true", "">;
+
+// SM Versions
+def SM10 : SubtargetFeature<"sm_10", "SmVersion", "10",
+ "Target SM 1.0">;
+def SM11 : SubtargetFeature<"sm_11", "SmVersion", "11",
+ "Target SM 1.1">;
+def SM12 : SubtargetFeature<"sm_12", "SmVersion", "12",
+ "Target SM 1.2">;
+def SM13 : SubtargetFeature<"sm_13", "SmVersion", "13",
+ "Target SM 1.3">;
+def SM20 : SubtargetFeature<"sm_20", "SmVersion", "20",
+ "Target SM 2.0">;
+def SM21 : SubtargetFeature<"sm_21", "SmVersion", "21",
+ "Target SM 2.1">;
+def SM30 : SubtargetFeature<"sm_30", "SmVersion", "30",
+ "Target SM 3.0">;
+def SM35 : SubtargetFeature<"sm_35", "SmVersion", "35",
+ "Target SM 3.5">;
+
+// PTX Versions
+def PTX30 : SubtargetFeature<"ptx30", "PTXVersion", "30",
+ "Use PTX version 3.0">;
+def PTX31 : SubtargetFeature<"ptx31", "PTXVersion", "31",
+ "Use PTX version 3.1">;
//===----------------------------------------------------------------------===//
// NVPTX supported processors.
@@ -33,7 +56,14 @@ def FeatureDummy : SubtargetFeature<"dummy", "dummy", "true", "">;
class Proc<string Name, list<SubtargetFeature> Features>
: Processor<Name, NoItineraries, Features>;
-def : Proc<"sm_10", [FeatureDummy]>;
+def : Proc<"sm_10", [SM10]>;
+def : Proc<"sm_11", [SM11]>;
+def : Proc<"sm_12", [SM12]>;
+def : Proc<"sm_13", [SM13]>;
+def : Proc<"sm_20", [SM20]>;
+def : Proc<"sm_21", [SM21]>;
+def : Proc<"sm_30", [SM30]>;
+def : Proc<"sm_35", [SM35]>;
def NVPTXInstrInfo : InstrInfo {
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h b/contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h
index 24b3bd5..c7cabf6 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h
@@ -16,7 +16,7 @@
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
namespace llvm {
@@ -31,7 +31,7 @@ public:
NVPTXAllocaHoisting() : FunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<TargetData>();
+ AU.addRequired<DataLayout>();
AU.addPreserved<MachineFunctionAnalysis>();
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index f2b9616..0a885ce 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -68,7 +68,54 @@ static cl::opt<bool, true>InterleaveSrc("nvptx-emit-src",
cl::location(llvm::InterleaveSrcInPtx));
+namespace {
+/// DiscoverDependentGlobals - Return a set of GlobalVariables on which \p V
+/// depends.
+void DiscoverDependentGlobals(Value *V,
+ DenseSet<GlobalVariable*> &Globals) {
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
+ Globals.insert(GV);
+ else {
+ if (User *U = dyn_cast<User>(V)) {
+ for (unsigned i = 0, e = U->getNumOperands(); i != e; ++i) {
+ DiscoverDependentGlobals(U->getOperand(i), Globals);
+ }
+ }
+ }
+}
+/// VisitGlobalVariableForEmission - Add \p GV to the list of GlobalVariable
+/// instances to be emitted, but only after any dependents have been added
+/// first.
+void VisitGlobalVariableForEmission(GlobalVariable *GV,
+ SmallVectorImpl<GlobalVariable*> &Order,
+ DenseSet<GlobalVariable*> &Visited,
+ DenseSet<GlobalVariable*> &Visiting) {
+ // Have we already visited this one?
+ if (Visited.count(GV)) return;
+
+ // Do we have a circular dependency?
+ if (Visiting.count(GV))
+ report_fatal_error("Circular dependency found in global variable set");
+
+ // Start visiting this global
+ Visiting.insert(GV);
+
+ // Make sure we visit all dependents first
+ DenseSet<GlobalVariable*> Others;
+ for (unsigned i = 0, e = GV->getNumOperands(); i != e; ++i)
+ DiscoverDependentGlobals(GV->getOperand(i), Others);
+
+ for (DenseSet<GlobalVariable*>::iterator I = Others.begin(),
+ E = Others.end(); I != E; ++I)
+ VisitGlobalVariableForEmission(*I, Order, Visited, Visiting);
+
+ // Now we can visit ourself
+ Order.push_back(GV);
+ Visited.insert(GV);
+ Visiting.erase(GV);
+}
+}
// @TODO: This is a copy from AsmPrinter.cpp. The function is static, so we
// cannot just link to the existing version.
@@ -98,10 +145,10 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
switch (CE->getOpcode()) {
default:
// If the code isn't optimized, there may be outstanding folding
- // opportunities. Attempt to fold the expression using TargetData as a
+ // opportunities. Attempt to fold the expression using DataLayout as a
// last resort before giving up.
if (Constant *C =
- ConstantFoldConstantExpression(CE, AP.TM.getTargetData()))
+ ConstantFoldConstantExpression(CE, AP.TM.getDataLayout()))
if (C != CE)
return LowerConstant(C, AP);
@@ -115,7 +162,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
report_fatal_error(OS.str());
}
case Instruction::GetElementPtr: {
- const TargetData &TD = *AP.TM.getTargetData();
+ const DataLayout &TD = *AP.TM.getDataLayout();
// Generate a symbolic expression for the byte address
const Constant *PtrVal = CE->getOperand(0);
SmallVector<Value*, 8> IdxVec(CE->op_begin()+1, CE->op_end());
@@ -145,7 +192,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
return LowerConstant(CE->getOperand(0), AP);
case Instruction::IntToPtr: {
- const TargetData &TD = *AP.TM.getTargetData();
+ const DataLayout &TD = *AP.TM.getDataLayout();
// Handle casts to pointers by changing them into casts to the appropriate
// integer type. This promotes constant folding and simplifies this code.
Constant *Op = CE->getOperand(0);
@@ -155,7 +202,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
}
case Instruction::PtrToInt: {
- const TargetData &TD = *AP.TM.getTargetData();
+ const DataLayout &TD = *AP.TM.getDataLayout();
// Support only foldable casts to/from pointers that can be eliminated by
// changing the pointer to the appropriately sized integer type.
Constant *Op = CE->getOperand(0);
@@ -270,7 +317,7 @@ void NVPTXAsmPrinter::EmitInstruction(const MachineInstr *MI) {
void NVPTXAsmPrinter::printReturnValStr(const Function *F,
raw_ostream &O)
{
- const TargetData *TD = TM.getTargetData();
+ const DataLayout *TD = TM.getDataLayout();
const TargetLowering *TLI = TM.getTargetLowering();
Type *Ty = F->getReturnType();
@@ -874,7 +921,7 @@ bool NVPTXAsmPrinter::doInitialization (Module &M) {
const_cast<TargetLoweringObjectFile&>(getObjFileLowering())
.Initialize(OutContext, TM);
- Mang = new Mangler(OutContext, *TM.getTargetData());
+ Mang = new Mangler(OutContext, *TM.getDataLayout());
// Emit header before any dwarf directives are emitted below.
emitHeader(M, OS1);
@@ -893,10 +940,27 @@ bool NVPTXAsmPrinter::doInitialization (Module &M) {
emitDeclarations(M, OS2);
- // Print out module-level global variables here.
+ // As ptxas does not support forward references of globals, we need to first
+ // sort the list of module-level globals in def-use order. We visit each
+ // global variable in order, and ensure that we emit it *after* its dependent
+ // globals. We use a little extra memory maintaining both a set and a list to
+ // have fast searches while maintaining a strict ordering.
+ SmallVector<GlobalVariable*,8> Globals;
+ DenseSet<GlobalVariable*> GVVisited;
+ DenseSet<GlobalVariable*> GVVisiting;
+
+ // Visit each global variable, in order
for (Module::global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I)
- printModuleLevelGV(I, OS2);
+ I != E; ++I)
+ VisitGlobalVariableForEmission(I, Globals, GVVisited, GVVisiting);
+
+ assert(GVVisited.size() == M.getGlobalList().size() &&
+ "Missed a global variable");
+ assert(GVVisiting.size() == 0 && "Did not fully process a global variable");
+
+ // Print out module-level global variables in proper order
+ for (unsigned i = 0, e = Globals.size(); i != e; ++i)
+ printModuleLevelGV(Globals[i], OS2);
OS2 << '\n';
@@ -910,7 +974,8 @@ void NVPTXAsmPrinter::emitHeader (Module &M, raw_ostream &O) {
O << "//\n";
O << "\n";
- O << ".version 3.0\n";
+ unsigned PTXVersion = nvptxSubtarget.getPTXVersion();
+ O << ".version " << (PTXVersion / 10) << "." << (PTXVersion % 10) << "\n";
O << ".target ";
O << nvptxSubtarget.getTargetName();
@@ -1023,7 +1088,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(GlobalVariable* GVar, raw_ostream &O,
return;
}
- const TargetData *TD = TM.getTargetData();
+ const DataLayout *TD = TM.getDataLayout();
// GlobalVariables are always constant pointers themselves.
const PointerType *PTy = GVar->getType();
@@ -1296,7 +1361,7 @@ std::string NVPTXAsmPrinter::getPTXFundamentalTypeStr(const Type *Ty,
void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable* GVar,
raw_ostream &O) {
- const TargetData *TD = TM.getTargetData();
+ const DataLayout *TD = TM.getDataLayout();
// GlobalVariables are always constant pointers themselves.
const PointerType *PTy = GVar->getType();
@@ -1342,7 +1407,7 @@ void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable* GVar,
static unsigned int
-getOpenCLAlignment(const TargetData *TD,
+getOpenCLAlignment(const DataLayout *TD,
Type *Ty) {
if (Ty->isPrimitiveType() || Ty->isIntegerTy() || isa<PointerType>(Ty))
return TD->getPrefTypeAlignment(Ty);
@@ -1421,7 +1486,7 @@ void NVPTXAsmPrinter::printParamName(int paramIndex, raw_ostream &O) {
void NVPTXAsmPrinter::emitFunctionParamList(const Function *F,
raw_ostream &O) {
- const TargetData *TD = TM.getTargetData();
+ const DataLayout *TD = TM.getDataLayout();
const AttrListPtr &PAL = F->getAttributes();
const TargetLowering *TLI = TM.getTargetLowering();
Function::const_arg_iterator I, E;
@@ -1456,7 +1521,8 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F,
continue;
}
- if (PAL.paramHasAttr(paramIndex+1, Attribute::ByVal) == false) {
+ if (PAL.getParamAttributes(paramIndex+1).
+ hasAttribute(Attributes::ByVal) == false) {
// Just a scalar
const PointerType *PTy = dyn_cast<PointerType>(Ty);
if (isKernelFunc) {
@@ -1524,6 +1590,9 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F,
// <a> = PAL.getparamalignment
// size = typeallocsize of element type
unsigned align = PAL.getParamAlignment(paramIndex+1);
+ if (align == 0)
+ align = TD->getABITypeAlignment(ETy);
+
unsigned sz = TD->getTypeAllocSize(ETy);
O << "\t.param .align " << align
<< " .b8 ";
@@ -1714,7 +1783,7 @@ void NVPTXAsmPrinter::printScalarConstant(Constant *CPV, raw_ostream &O) {
void NVPTXAsmPrinter::bufferLEByte(Constant *CPV, int Bytes,
AggBuffer *aggBuffer) {
- const TargetData *TD = TM.getTargetData();
+ const DataLayout *TD = TM.getDataLayout();
if (isa<UndefValue>(CPV) || CPV->isNullValue()) {
int s = TD->getTypeAllocSize(CPV->getType());
@@ -1843,7 +1912,7 @@ void NVPTXAsmPrinter::bufferLEByte(Constant *CPV, int Bytes,
void NVPTXAsmPrinter::bufferAggregateConstant(Constant *CPV,
AggBuffer *aggBuffer) {
- const TargetData *TD = TM.getTargetData();
+ const DataLayout *TD = TM.getDataLayout();
int Bytes;
// Old constants
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 6ea10ea..f1a99d7 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -174,10 +174,11 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
// PTX does not support load / store predicate registers
- setOperationAction(ISD::LOAD, MVT::i1, Expand);
+ setOperationAction(ISD::LOAD, MVT::i1, Custom);
+ setOperationAction(ISD::STORE, MVT::i1, Custom);
+
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setOperationAction(ISD::STORE, MVT::i1, Expand);
setTruncStoreAction(MVT::i64, MVT::i1, Expand);
setTruncStoreAction(MVT::i32, MVT::i1, Expand);
setTruncStoreAction(MVT::i16, MVT::i1, Expand);
@@ -402,7 +403,7 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy,
if (isABI) {
unsigned align = Outs[i].Flags.getByValAlign();
- unsigned sz = getTargetData()->getTypeAllocSize(ETy);
+ unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
O << ".param .align " << align
<< " .b8 ";
O << "_";
@@ -655,11 +656,11 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
else {
if (Func) { // direct call
if (!llvm::getAlign(*(CS->getCalledFunction()), 0, retAlignment))
- retAlignment = getTargetData()->getABITypeAlignment(retTy);
+ retAlignment = getDataLayout()->getABITypeAlignment(retTy);
} else { // indirect call
const CallInst *CallI = dyn_cast<CallInst>(CS->getInstruction());
if (!llvm::getAlign(*CallI, 0, retAlignment))
- retAlignment = getTargetData()->getABITypeAlignment(retTy);
+ retAlignment = getDataLayout()->getABITypeAlignment(retTy);
}
SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue DeclareRetOps[] = { Chain, DAG.getConstant(retAlignment,
@@ -856,11 +857,64 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::EXTRACT_SUBVECTOR:
return Op;
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
+ case ISD::STORE: return LowerSTORE(Op, DAG);
+ case ISD::LOAD: return LowerLOAD(Op, DAG);
default:
llvm_unreachable("Custom lowering not defined for operation");
}
}
+
+// v = ld i1* addr
+// =>
+// v1 = ld i8* addr
+// v = trunc v1 to i1
+SDValue NVPTXTargetLowering::
+LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ LoadSDNode *LD = cast<LoadSDNode>(Node);
+ DebugLoc dl = Node->getDebugLoc();
+ assert(LD->getExtensionType() == ISD::NON_EXTLOAD) ;
+ assert(Node->getValueType(0) == MVT::i1 &&
+ "Custom lowering for i1 load only");
+ SDValue newLD = DAG.getLoad(MVT::i8, dl, LD->getChain(), LD->getBasePtr(),
+ LD->getPointerInfo(),
+ LD->isVolatile(), LD->isNonTemporal(),
+ LD->isInvariant(),
+ LD->getAlignment());
+ SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
+ // The legalizer (the caller) is expecting two values from the legalized
+ // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
+ // in LegalizeDAG.cpp which also uses MergeValues.
+ SDValue Ops[] = {result, LD->getChain()};
+ return DAG.getMergeValues(Ops, 2, dl);
+}
+
+// st i1 v, addr
+// =>
+// v1 = zxt v to i8
+// st i8, addr
+SDValue NVPTXTargetLowering::
+LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ DebugLoc dl = Node->getDebugLoc();
+ StoreSDNode *ST = cast<StoreSDNode>(Node);
+ SDValue Tmp1 = ST->getChain();
+ SDValue Tmp2 = ST->getBasePtr();
+ SDValue Tmp3 = ST->getValue();
+ assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
+ unsigned Alignment = ST->getAlignment();
+ bool isVolatile = ST->isVolatile();
+ bool isNonTemporal = ST->isNonTemporal();
+ Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl,
+ MVT::i8, Tmp3);
+ SDValue Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2,
+ ST->getPointerInfo(), isVolatile,
+ isNonTemporal, Alignment);
+ return Result;
+}
+
+
SDValue
NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname, int idx,
EVT v) const {
@@ -916,7 +970,7 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
- const TargetData *TD = getTargetData();
+ const DataLayout *TD = getDataLayout();
const Function *F = MF.getFunction();
const AttrListPtr &PAL = F->getAttributes();
@@ -965,7 +1019,7 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
// to newly created nodes. The SDNOdes for params have to
// appear in the same order as their order of appearance
// in the original function. "idx+1" holds that order.
- if (PAL.paramHasAttr(i+1, Attribute::ByVal) == false) {
+ if (PAL.getParamAttributes(i+1).hasAttribute(Attributes::ByVal) == false) {
// A plain scalar.
if (isABI || isKernel) {
// If ABI, load from the param symbol
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
index 86246e6..94a177c 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -138,6 +138,9 @@ private:
SDValue getParamHelpSymbol(SelectionDAG &DAG, int idx);
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
index 56b2372..9273931 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
@@ -21,7 +21,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/Support/InstIterator.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
@@ -110,7 +110,7 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
SmallVector<MemTransferInst *, 4> aggrMemcpys;
SmallVector<MemSetInst *, 4> aggrMemsets;
- TargetData *TD = &getAnalysis<TargetData>();
+ DataLayout *TD = &getAnalysis<DataLayout>();
LLVMContext &Context = F.getParent()->getContext();
//
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h b/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h
index ac7f150..b150c69 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h
@@ -17,7 +17,7 @@
#include "llvm/Pass.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
namespace llvm {
@@ -28,7 +28,7 @@ struct NVPTXLowerAggrCopies : public FunctionPass {
NVPTXLowerAggrCopies() : FunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<TargetData>();
+ AU.addRequired<DataLayout>();
AU.addPreserved<MachineFunctionAnalysis>();
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp
index 6aadd43..7b62cce 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp
@@ -34,16 +34,18 @@ DriverInterface(cl::desc("Choose driver interface:"),
NVPTXSubtarget::NVPTXSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, bool is64Bit)
-:NVPTXGenSubtargetInfo(TT, "", FS), // Don't pass CPU to subtarget,
- // because we don't register all
- // nvptx targets.
- Is64Bit(is64Bit) {
+: NVPTXGenSubtargetInfo(TT, CPU, FS),
+ Is64Bit(is64Bit),
+ PTXVersion(0),
+ SmVersion(10) {
drvInterface = DriverInterface;
// Provide the default CPU if none
std::string defCPU = "sm_10";
+ ParseSubtargetFeatures((CPU.empty() ? defCPU : CPU), FS);
+
// Get the TargetName from the FS if available
if (FS.empty() && CPU.empty())
TargetName = defCPU;
@@ -52,6 +54,12 @@ NVPTXSubtarget::NVPTXSubtarget(const std::string &TT, const std::string &CPU,
else
llvm_unreachable("we are not using FeatureStr");
- // Set up the SmVersion
- SmVersion = atoi(TargetName.c_str()+3);
+ // We default to PTX 3.1, but we cannot just default to it in the initializer
+ // since the attribute parser checks if the given option is >= the default.
+ // So if we set ptx31 as the default, the ptx30 attribute would never match.
+ // Instead, we use 0 as the default and manually set 31 if the default is
+ // used.
+ if (PTXVersion == 0) {
+ PTXVersion = 31;
+ }
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.h b/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
index 8f2a629..3cfd971 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
@@ -25,13 +25,17 @@
namespace llvm {
class NVPTXSubtarget : public NVPTXGenSubtargetInfo {
-
- unsigned int SmVersion;
+
std::string TargetName;
NVPTX::DrvInterface drvInterface;
- bool dummy; // For the 'dummy' feature, see NVPTX.td
bool Is64Bit;
+ // PTX version x.y is represented as 10*x+y, e.g. 3.1 == 31
+ unsigned PTXVersion;
+
+ // SM version x.y is represented as 10*x+y, e.g. 3.1 == 31
+ unsigned int SmVersion;
+
public:
/// This constructor initializes the data members to match that
/// of the specified module.
@@ -69,6 +73,8 @@ public:
NVPTX::DrvInterface getDrvInterface() const { return drvInterface; }
std::string getTargetName() const { return TargetName; }
+ unsigned getPTXVersion() const { return PTXVersion; }
+
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
std::string getDataLayout() const {
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index 433f415..cbb4900 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -32,7 +32,7 @@
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
@@ -71,8 +71,9 @@ NVPTXTargetMachine::NVPTXTargetMachine(const Target &T,
bool is64bit)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS, is64bit),
- DataLayout(Subtarget.getDataLayout()),
- InstrInfo(*this), TLInfo(*this), TSInfo(*this), FrameLowering(*this,is64bit)
+ DL(Subtarget.getDataLayout()),
+ InstrInfo(*this), TLInfo(*this), TSInfo(*this), FrameLowering(*this,is64bit),
+ STTI(&TLInfo), VTTI(&TLInfo)
/*FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0)*/ {
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
index b3f9cac..11bc9d4 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
@@ -21,10 +21,11 @@
#include "NVPTXSubtarget.h"
#include "NVPTXFrameLowering.h"
#include "ManagedStringPool.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/Target/TargetTransformImpl.h"
namespace llvm {
@@ -32,7 +33,7 @@ namespace llvm {
///
class NVPTXTargetMachine : public LLVMTargetMachine {
NVPTXSubtarget Subtarget;
- const TargetData DataLayout; // Calculates type size & alignment
+ const DataLayout DL; // Calculates type size & alignment
NVPTXInstrInfo InstrInfo;
NVPTXTargetLowering TLInfo;
TargetSelectionDAGInfo TSInfo;
@@ -44,6 +45,9 @@ class NVPTXTargetMachine : public LLVMTargetMachine {
// Hold Strings that can be free'd all together with NVPTXTargetMachine
ManagedStringPool ManagedStrPool;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
+
//bool addCommonCodeGenPasses(PassManagerBase &, CodeGenOpt::Level,
// bool DisableVerify, MCContext *&OutCtx);
@@ -58,7 +62,7 @@ public:
return &FrameLowering;
}
virtual const NVPTXInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetData *getTargetData() const { return &DataLayout;}
+ virtual const DataLayout *getDataLayout() const { return &DL;}
virtual const NVPTXSubtarget *getSubtargetImpl() const { return &Subtarget;}
virtual const NVPTXRegisterInfo *getRegisterInfo() const {
@@ -72,6 +76,12 @@ public:
virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const {
return &TSInfo;
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
//virtual bool addInstSelector(PassManagerBase &PM,
// CodeGenOpt::Level OptLevel);
diff --git a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
index d175e3e..3d58306 100644
--- a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
@@ -136,21 +136,21 @@ void PPCInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo,
void PPCInstPrinter::printS5ImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- char Value = MI->getOperand(OpNo).getImm();
- Value = (Value << (32-5)) >> (32-5);
+ int Value = MI->getOperand(OpNo).getImm();
+ Value = SignExtend32<5>(Value);
O << (int)Value;
}
void PPCInstPrinter::printU5ImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- unsigned char Value = MI->getOperand(OpNo).getImm();
+ unsigned int Value = MI->getOperand(OpNo).getImm();
assert(Value <= 31 && "Invalid u5imm argument!");
O << (unsigned int)Value;
}
void PPCInstPrinter::printU6ImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- unsigned char Value = MI->getOperand(OpNo).getImm();
+ unsigned int Value = MI->getOperand(OpNo).getImm();
assert(Value <= 63 && "Invalid u6imm argument!");
O << (unsigned int)Value;
}
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
index 1744738..87ecb13 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
@@ -29,9 +29,14 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
case FK_Data_1:
case FK_Data_2:
case FK_Data_4:
+ case FK_Data_8:
+ case PPC::fixup_ppc_toc:
return Value;
+ case PPC::fixup_ppc_lo14:
+ case PPC::fixup_ppc_toc16_ds:
+ return (Value & 0xffff) << 2;
case PPC::fixup_ppc_brcond14:
- return Value & 0x3ffc;
+ return Value & 0xfffc;
case PPC::fixup_ppc_br24:
return Value & 0x3fffffc;
#if 0
@@ -41,6 +46,7 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
case PPC::fixup_ppc_ha16:
return ((Value >> 16) + ((Value & 0x8000) ? 1 : 0)) & 0xffff;
case PPC::fixup_ppc_lo16:
+ case PPC::fixup_ppc_toc16:
return Value & 0xffff;
}
}
@@ -72,7 +78,10 @@ public:
{ "fixup_ppc_brcond14", 16, 14, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_ppc_lo16", 16, 16, 0 },
{ "fixup_ppc_ha16", 16, 16, 0 },
- { "fixup_ppc_lo14", 16, 14, 0 }
+ { "fixup_ppc_lo14", 16, 14, 0 },
+ { "fixup_ppc_toc", 0, 64, 0 },
+ { "fixup_ppc_toc16", 16, 16, 0 },
+ { "fixup_ppc_toc16_ds", 16, 14, 0 }
};
if (Kind < FirstTargetFixupKind)
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
index a197981..dc93f71 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
@@ -11,6 +11,8 @@
#include "MCTargetDesc/PPCMCTargetDesc.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCValue.h"
using namespace llvm;
@@ -21,9 +23,15 @@ namespace {
virtual ~PPCELFObjectWriter();
protected:
+ virtual unsigned getRelocTypeInner(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const;
virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
bool IsPCRel, bool IsRelocWithSymbol,
int64_t Addend) const;
+ virtual const MCSymbol *undefinedExplicitRelSym(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const;
virtual void adjustFixupOffset(const MCFixup &Fixup, uint64_t &RelocOffset);
};
}
@@ -36,11 +44,13 @@ PPCELFObjectWriter::PPCELFObjectWriter(bool Is64Bit, uint8_t OSABI)
PPCELFObjectWriter::~PPCELFObjectWriter() {
}
-unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsPCRel,
- bool IsRelocWithSymbol,
- int64_t Addend) const {
+unsigned PPCELFObjectWriter::getRelocTypeInner(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const
+{
+ MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
+ MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
+
// determine the type of the relocation
unsigned Type;
if (IsPCRel) {
@@ -61,17 +71,53 @@ unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target,
Type = ELF::R_PPC_ADDR24;
break;
case PPC::fixup_ppc_brcond14:
- Type = ELF::R_PPC_ADDR14_BRTAKEN; // XXX: or BRNTAKEN?_
+ Type = ELF::R_PPC_ADDR14; // XXX: or BRNTAKEN?_
break;
case PPC::fixup_ppc_ha16:
- Type = ELF::R_PPC_ADDR16_HA;
+ switch (Modifier) {
+ default: llvm_unreachable("Unsupported Modifier");
+ case MCSymbolRefExpr::VK_PPC_TPREL16_HA:
+ Type = ELF::R_PPC_TPREL16_HA;
+ break;
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_PPC_ADDR16_HA;
+ break;
+ }
break;
case PPC::fixup_ppc_lo16:
- Type = ELF::R_PPC_ADDR16_LO;
+ switch (Modifier) {
+ default: llvm_unreachable("Unsupported Modifier");
+ case MCSymbolRefExpr::VK_PPC_TPREL16_LO:
+ Type = ELF::R_PPC_TPREL16_LO;
+ break;
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_PPC_ADDR16_LO;
+ break;
+ }
break;
case PPC::fixup_ppc_lo14:
Type = ELF::R_PPC_ADDR14;
break;
+ case PPC::fixup_ppc_toc:
+ Type = ELF::R_PPC64_TOC;
+ break;
+ case PPC::fixup_ppc_toc16:
+ Type = ELF::R_PPC64_TOC16;
+ break;
+ case PPC::fixup_ppc_toc16_ds:
+ Type = ELF::R_PPC64_TOC16_DS;
+ break;
+ case FK_Data_8:
+ switch (Modifier) {
+ default: llvm_unreachable("Unsupported Modifier");
+ case MCSymbolRefExpr::VK_PPC_TOC:
+ Type = ELF::R_PPC64_TOC;
+ break;
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_PPC64_ADDR64;
+ break;
+ }
+ break;
case FK_Data_4:
Type = ELF::R_PPC_ADDR32;
break;
@@ -83,11 +129,41 @@ unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target,
return Type;
}
+unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel,
+ bool IsRelocWithSymbol,
+ int64_t Addend) const {
+ return getRelocTypeInner(Target, Fixup, IsPCRel);
+}
+
+const MCSymbol *PPCELFObjectWriter::undefinedExplicitRelSym(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ assert(Target.getSymA() && "SymA cannot be 0");
+ const MCSymbol &Symbol = Target.getSymA()->getSymbol().AliasedSymbol();
+
+ unsigned RelocType = getRelocTypeInner(Target, Fixup, IsPCRel);
+
+ // The .odp creation emits a relocation against the symbol ".TOC." which
+ // create a R_PPC64_TOC relocation. However the relocation symbol name
+ // in final object creation should be NULL, since the symbol does not
+ // really exist, it is just the reference to TOC base for the current
+ // object file.
+ bool EmitThisSym = RelocType != ELF::R_PPC64_TOC;
+
+ if (EmitThisSym && !Symbol.isTemporary())
+ return &Symbol;
+ return NULL;
+}
+
void PPCELFObjectWriter::
adjustFixupOffset(const MCFixup &Fixup, uint64_t &RelocOffset) {
switch ((unsigned)Fixup.getKind()) {
case PPC::fixup_ppc_ha16:
case PPC::fixup_ppc_lo16:
+ case PPC::fixup_ppc_toc16:
+ case PPC::fixup_ppc_toc16_ds:
RelocOffset += 2;
break;
default:
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h
index b3c889e..37b265e 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h
@@ -34,6 +34,16 @@ enum Fixups {
/// fixup_ppc_lo14 - A 14-bit fixup corresponding to lo16(_foo) for instrs
/// like 'std'.
fixup_ppc_lo14,
+
+ /// fixup_ppc_toc - Insert value of TOC base (.TOC.).
+ fixup_ppc_toc,
+
+ /// fixup_ppc_toc16 - A 16-bit signed fixup relative to the TOC base.
+ fixup_ppc_toc16,
+
+ /// fixup_ppc_toc16_ds - A 14-bit signed fixup relative to the TOC base with
+ /// implied 2 zero bits
+ fixup_ppc_toc16_ds,
// Marker
LastTargetFixupKind,
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
index 245b457..215aa40 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
@@ -59,12 +59,10 @@ PPCLinuxMCAsmInfo::PPCLinuxMCAsmInfo(bool is64Bit) {
HasLEB128 = true; // Target asm supports leb128 directives (little-endian)
// Exceptions handling
- if (!is64Bit)
- ExceptionsType = ExceptionHandling::DwarfCFI;
+ ExceptionsType = ExceptionHandling::DwarfCFI;
ZeroDirective = "\t.space\t";
Data64bitsDirective = is64Bit ? "\t.quad\t" : 0;
- LCOMMDirectiveType = LCOMM::NoAlignment;
AssemblerDialect = 0; // Old-Style mnemonics.
}
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
index f652422..2118302 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
@@ -15,7 +15,9 @@
#include "MCTargetDesc/PPCBaseInfo.h"
#include "MCTargetDesc/PPCFixupKinds.h"
#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ErrorHandling.h"
@@ -25,16 +27,28 @@ STATISTIC(MCNumEmitted, "Number of MC instructions emitted");
namespace {
class PPCMCCodeEmitter : public MCCodeEmitter {
- PPCMCCodeEmitter(const PPCMCCodeEmitter &); // DO NOT IMPLEMENT
- void operator=(const PPCMCCodeEmitter &); // DO NOT IMPLEMENT
-
+ PPCMCCodeEmitter(const PPCMCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ void operator=(const PPCMCCodeEmitter &) LLVM_DELETED_FUNCTION;
+
+ const MCSubtargetInfo &STI;
+ Triple TT;
+
public:
PPCMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
- MCContext &ctx) {
+ MCContext &ctx)
+ : STI(sti), TT(STI.getTargetTriple()) {
}
~PPCMCCodeEmitter() {}
+ bool is64BitMode() const {
+ return (STI.getFeatureBits() & PPC::Feature64Bit) != 0;
+ }
+
+ bool isSVR4ABI() const {
+ return TT.isMacOSX() == 0;
+ }
+
unsigned getDirectBrEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
unsigned getCondBrEncoding(const MCInst &MI, unsigned OpNo,
@@ -61,11 +75,19 @@ public:
SmallVectorImpl<MCFixup> &Fixups) const;
void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const {
- unsigned Bits = getBinaryCodeForInstr(MI, Fixups);
+ uint64_t Bits = getBinaryCodeForInstr(MI, Fixups);
+
+ // BL8_NOPELF and BLA8_NOP_ELF is both size of 8 bacause of the
+ // following 'nop'.
+ unsigned Size = 4; // FIXME: Have Desc.getSize() return the correct value!
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode == PPC::BL8_NOP_ELF || Opcode == PPC::BLA8_NOP_ELF)
+ Size = 8;
// Output the constant in big endian byte order.
- for (unsigned i = 0; i != 4; ++i) {
- OS << (char)(Bits >> 24);
+ int ShiftValue = (Size * 8) - 8;
+ for (unsigned i = 0; i != Size; ++i) {
+ OS << (char)(Bits >> ShiftValue);
Bits <<= 8;
}
@@ -140,8 +162,12 @@ unsigned PPCMCCodeEmitter::getMemRIEncoding(const MCInst &MI, unsigned OpNo,
return (getMachineOpValue(MI, MO, Fixups) & 0xFFFF) | RegBits;
// Add a fixup for the displacement field.
- Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
- (MCFixupKind)PPC::fixup_ppc_lo16));
+ if (isSVR4ABI() && is64BitMode())
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
+ (MCFixupKind)PPC::fixup_ppc_toc16));
+ else
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
+ (MCFixupKind)PPC::fixup_ppc_lo16));
return RegBits;
}
@@ -158,8 +184,12 @@ unsigned PPCMCCodeEmitter::getMemRIXEncoding(const MCInst &MI, unsigned OpNo,
return (getMachineOpValue(MI, MO, Fixups) & 0x3FFF) | RegBits;
// Add a fixup for the branch target.
- Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
- (MCFixupKind)PPC::fixup_ppc_lo14));
+ if (isSVR4ABI() && is64BitMode())
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
+ (MCFixupKind)PPC::fixup_ppc_toc16_ds));
+ else
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
+ (MCFixupKind)PPC::fixup_ppc_lo14));
return RegBits;
}
@@ -168,7 +198,9 @@ unsigned PPCMCCodeEmitter::
get_crbitm_encoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const {
const MCOperand &MO = MI.getOperand(OpNo);
- assert((MI.getOpcode() == PPC::MTCRF || MI.getOpcode() == PPC::MFOCRF) &&
+ assert((MI.getOpcode() == PPC::MTCRF ||
+ MI.getOpcode() == PPC::MFOCRF ||
+ MI.getOpcode() == PPC::MTCRF8) &&
(MO.getReg() >= PPC::CR0 && MO.getReg() <= PPC::CR7));
return 0x80 >> getPPCRegisterNumbering(MO.getReg());
}
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
index 6568e82..4c2578d 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
@@ -70,7 +70,7 @@ static MCAsmInfo *createPPCMCAsmInfo(const Target &T, StringRef TT) {
// Initial state of the frame pointer is R1.
MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(PPC::R1, 0);
+ MachineLocation Src(isPPC64? PPC::X1 : PPC::R1, 0);
MAI->addInitialFrameState(0, Dst, Src);
return MAI;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPC.td b/contrib/llvm/lib/Target/PowerPC/PPC.td
index b7f1688..cb15dad 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPC.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPC.td
@@ -35,6 +35,10 @@ def Directive970 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_970", "">;
def Directive32 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_32", "">;
def Directive64 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_64", "">;
def DirectiveA2 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_A2", "">;
+def DirectiveE500mc : SubtargetFeature<"", "DarwinDirective",
+ "PPC::DIR_E500mc", "">;
+def DirectiveE5500 : SubtargetFeature<"", "DarwinDirective",
+ "PPC::DIR_E5500", "">;
def DirectivePwr6: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR6", "">;
def DirectivePwr7: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR7", "">;
@@ -94,6 +98,12 @@ def : Processor<"g5", G5Itineraries,
[Directive970, FeatureAltivec,
FeatureMFOCRF, FeatureFSqrt, FeatureSTFIWX,
Feature64Bit /*, Feature64BitRegs */]>;
+def : ProcessorModel<"e500mc", PPCE500mcModel,
+ [DirectiveE500mc, FeatureMFOCRF,
+ FeatureSTFIWX, FeatureBookE, FeatureISEL]>;
+def : ProcessorModel<"e5500", PPCE5500Model,
+ [DirectiveE5500, FeatureMFOCRF, Feature64Bit,
+ FeatureSTFIWX, FeatureBookE, FeatureISEL]>;
def : Processor<"a2", PPCA2Itineraries, [DirectiveA2, FeatureBookE,
FeatureMFOCRF, FeatureFSqrt,
FeatureSTFIWX, FeatureISEL,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
index f76b89c..15d690b 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -54,12 +54,13 @@
#include "llvm/Support/ELF.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/MapVector.h"
using namespace llvm;
namespace {
class PPCAsmPrinter : public AsmPrinter {
protected:
- DenseMap<MCSymbol*, MCSymbol*> TOC;
+ MapVector<MCSymbol*, MCSymbol*> TOC;
const PPCSubtarget &Subtarget;
uint64_t TOCLabelID;
public:
@@ -109,6 +110,8 @@ namespace {
bool doFinalization(Module &M);
virtual void EmitFunctionEntryLabel();
+
+ void EmitFunctionBodyEnd();
};
/// PPCDarwinAsmPrinter - PowerPC assembly printer, customized for Darwin/Mac
@@ -282,8 +285,22 @@ bool PPCAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant,
const char *ExtraCode,
raw_ostream &O) {
- if (ExtraCode && ExtraCode[0])
- return true; // Unknown modifier.
+ if (ExtraCode && ExtraCode[0]) {
+ if (ExtraCode[1] != 0) return true; // Unknown modifier.
+
+ switch (ExtraCode[0]) {
+ default: return true; // Unknown modifier.
+ case 'y': // A memory reference for an X-form instruction
+ {
+ const char *RegName = "r0";
+ if (!Subtarget.isDarwin()) RegName = stripRegisterPrefix(RegName);
+ O << RegName << ", ";
+ printOperand(MI, OpNo, O);
+ return false;
+ }
+ }
+ }
+
assert(MI->getOperand(OpNo).isReg());
O << "0(";
printOperand(MI, OpNo, O);
@@ -345,23 +362,37 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
OutStreamer.EmitLabel(PICBase);
return;
}
+ case PPC::LDtocJTI:
+ case PPC::LDtocCPT:
case PPC::LDtoc: {
// Transform %X3 = LDtoc <ga:@min1>, %X2
LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin());
-
+
// Change the opcode to LD, and the global address operand to be a
// reference to the TOC entry we will synthesize later.
TmpInst.setOpcode(PPC::LD);
const MachineOperand &MO = MI->getOperand(1);
- assert(MO.isGlobal());
-
- // Map symbol -> label of TOC entry.
- MCSymbol *&TOCEntry = TOC[Mang->getSymbol(MO.getGlobal())];
- if (TOCEntry == 0)
- TOCEntry = GetTempSymbol("C", TOCLabelID++);
-
+
+ // Map symbol -> label of TOC entry
+ assert(MO.isGlobal() || MO.isCPI() || MO.isJTI());
+ MCSymbol *MOSymbol = 0;
+ if (MO.isGlobal())
+ MOSymbol = Mang->getSymbol(MO.getGlobal());
+ else if (MO.isCPI())
+ MOSymbol = GetCPISymbol(MO.getIndex());
+ else if (MO.isJTI())
+ MOSymbol = GetJTISymbol(MO.getIndex());
+ MCSymbol *&TOCEntry = TOC[MOSymbol];
+ // To avoid name clash check if the name already exists.
+ while (TOCEntry == 0) {
+ if (OutContext.LookupSymbol(Twine(MAI->getPrivateGlobalPrefix()) +
+ "C" + Twine(TOCLabelID++)) == 0) {
+ TOCEntry = GetTempSymbol("C", TOCLabelID);
+ }
+ }
+
const MCExpr *Exp =
- MCSymbolRefExpr::Create(TOCEntry, MCSymbolRefExpr::VK_PPC_TOC,
+ MCSymbolRefExpr::Create(TOCEntry, MCSymbolRefExpr::VK_PPC_TOC_ENTRY,
OutContext);
TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp);
OutStreamer.EmitInstruction(TmpInst);
@@ -404,11 +435,17 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() {
OutStreamer.EmitValueToAlignment(8);
MCSymbol *Symbol1 =
OutContext.GetOrCreateSymbol(".L." + Twine(CurrentFnSym->getName()));
- MCSymbol *Symbol2 = OutContext.GetOrCreateSymbol(StringRef(".TOC.@tocbase"));
+ // Generates a R_PPC64_ADDR64 (from FK_DATA_8) relocation for the function
+ // entry point.
OutStreamer.EmitValue(MCSymbolRefExpr::Create(Symbol1, OutContext),
- Subtarget.isPPC64() ? 8 : 4/*size*/, 0/*addrspace*/);
- OutStreamer.EmitValue(MCSymbolRefExpr::Create(Symbol2, OutContext),
- Subtarget.isPPC64() ? 8 : 4/*size*/, 0/*addrspace*/);
+ 8/*size*/, 0/*addrspace*/);
+ MCSymbol *Symbol2 = OutContext.GetOrCreateSymbol(StringRef(".TOC."));
+ // Generates a R_PPC64_TOC relocation for TOC base insertion.
+ OutStreamer.EmitValue(MCSymbolRefExpr::Create(Symbol2,
+ MCSymbolRefExpr::VK_PPC_TOC, OutContext),
+ 8/*size*/, 0/*addrspace*/);
+ // Emit a null environment pointer.
+ OutStreamer.EmitIntValue(0, 8 /* size */, 0 /* addrspace */);
OutStreamer.SwitchSection(Current);
MCSymbol *RealFnSym = OutContext.GetOrCreateSymbol(
@@ -419,7 +456,7 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() {
bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
- const TargetData *TD = TM.getTargetData();
+ const DataLayout *TD = TM.getDataLayout();
bool isPPC64 = TD->getPointerSizeInBits() == 64;
@@ -429,18 +466,34 @@ bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
SectionKind::getReadOnly());
OutStreamer.SwitchSection(Section);
- // FIXME: This is nondeterminstic!
- for (DenseMap<MCSymbol*, MCSymbol*>::iterator I = TOC.begin(),
+ for (MapVector<MCSymbol*, MCSymbol*>::iterator I = TOC.begin(),
E = TOC.end(); I != E; ++I) {
OutStreamer.EmitLabel(I->second);
- OutStreamer.EmitRawText("\t.tc " + Twine(I->first->getName()) +
- "[TC]," + I->first->getName());
+ MCSymbol *S = OutContext.GetOrCreateSymbol(I->first->getName());
+ OutStreamer.EmitTCEntry(*S);
}
}
return AsmPrinter::doFinalization(M);
}
+/// EmitFunctionBodyEnd - Print the traceback table before the .size
+/// directive.
+///
+void PPCLinuxAsmPrinter::EmitFunctionBodyEnd() {
+ // Only the 64-bit target requires a traceback table. For now,
+ // we only emit the word of zeroes that GDB requires to find
+ // the end of the function, and zeroes for the eight-byte
+ // mandatory fields.
+ // FIXME: We should fill in the eight-byte mandatory fields as described in
+ // the PPC64 ELF ABI (this is a low-priority item because GDB does not
+ // currently make use of these fields).
+ if (Subtarget.isPPC64()) {
+ OutStreamer.EmitIntValue(0, 4/*size*/);
+ OutStreamer.EmitIntValue(0, 8/*size*/);
+ }
+}
+
void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) {
static const char *const CPUDirectives[] = {
"",
@@ -453,6 +506,8 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) {
"ppc750",
"ppc970",
"ppcA2",
+ "ppce500mc",
+ "ppce5500",
"power6",
"power7",
"ppc64"
@@ -508,7 +563,7 @@ static MCSymbol *GetAnonSym(MCSymbol *Sym, MCContext &Ctx) {
void PPCDarwinAsmPrinter::
EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
- bool isPPC64 = TM.getTargetData()->getPointerSizeInBits() == 64;
+ bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64;
const TargetLoweringObjectFileMachO &TLOFMacho =
static_cast<const TargetLoweringObjectFileMachO &>(getObjFileLowering());
@@ -603,7 +658,7 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
- bool isPPC64 = TM.getTargetData()->getPointerSizeInBits() == 64;
+ bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64;
// Darwin/PPC always uses mach-o.
const TargetLoweringObjectFileMachO &TLOFMacho =
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCCallingConv.td b/contrib/llvm/lib/Target/PowerPC/PPCCallingConv.td
index b2b5364..3f87e88 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCCallingConv.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCCallingConv.td
@@ -12,12 +12,19 @@
//
//===----------------------------------------------------------------------===//
+/// CCIfSubtarget - Match if the current subtarget has a feature F.
+class CCIfSubtarget<string F, CCAction A>
+ : CCIf<!strconcat("State.getTarget().getSubtarget<PPCSubtarget>().", F), A>;
+
//===----------------------------------------------------------------------===//
// Return Value Calling Convention
//===----------------------------------------------------------------------===//
// Return-value convention for PowerPC
def RetCC_PPC : CallingConv<[
+ // On PPC64, integer return values are always promoted to i64
+ CCIfType<[i32], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>,
+
CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>,
CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index c24afa9..caf7bf2 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -13,6 +13,7 @@
#include "PPCFrameLowering.h"
#include "PPCInstrInfo.h"
+#include "PPCInstrBuilder.h"
#include "PPCMachineFunctionInfo.h"
#include "llvm/Function.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -49,6 +50,11 @@ static const uint16_t VRRegNo[] = {
/// to manipulate the VRSAVE register, even though it uses vector registers.
/// This can happen when the only registers used are known to be live in or out
/// of the function. Remove all of the VRSAVE related code from the function.
+/// FIXME: The removal of the code results in a compile failure at -O0 when the
+/// function contains a function call, as the GPR containing original VRSAVE
+/// contents is spilled and reloaded around the call. Without the prolog code,
+/// the spill instruction refers to an undefined register. This code needs
+/// to account for all uses of that GPR.
static void RemoveVRSaveCode(MachineInstr *MI) {
MachineBasicBlock *Entry = MI->getParent();
MachineFunction *MF = Entry->getParent();
@@ -168,6 +174,11 @@ static void HandleVRSaveUpdate(MachineInstr *MI, const TargetInstrInfo &TII) {
MI->eraseFromParent();
}
+static bool spillsCR(const MachineFunction &MF) {
+ const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
+ return FuncInfo->isCRSpilled();
+}
+
/// determineFrameLayout - Determine the size of the frame and maximum call
/// frame size.
void PPCFrameLowering::determineFrameLayout(MachineFunction &MF) const {
@@ -184,13 +195,22 @@ void PPCFrameLowering::determineFrameLayout(MachineFunction &MF) const {
// If we are a leaf function, and use up to 224 bytes of stack space,
// don't have a frame pointer, calls, or dynamic alloca then we do not need
- // to adjust the stack pointer (we fit in the Red Zone).
- bool DisableRedZone = MF.getFunction()->hasFnAttr(Attribute::NoRedZone);
- // FIXME SVR4 The 32-bit SVR4 ABI has no red zone.
+ // to adjust the stack pointer (we fit in the Red Zone). For 64-bit
+ // SVR4, we also require a stack frame if we need to spill the CR,
+ // since this spill area is addressed relative to the stack pointer.
+ bool DisableRedZone = MF.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::NoRedZone);
+ // FIXME SVR4 The 32-bit SVR4 ABI has no red zone. However, it can
+ // still generate stackless code if all local vars are reg-allocated.
+ // Try: (FrameSize <= 224
+ // || (FrameSize == 0 && Subtarget.isPPC32 && Subtarget.isSVR4ABI()))
if (!DisableRedZone &&
FrameSize <= 224 && // Fits in red zone.
!MFI->hasVarSizedObjects() && // No dynamic alloca.
!MFI->adjustsStack() && // No calls.
+ !(Subtarget.isPPC64() && // No 64-bit SVR4 CRsave.
+ Subtarget.isSVR4ABI()
+ && spillsCR(MF)) &&
(!ALIGN_STACK || MaxAlign <= TargetAlign)) { // No special alignment.
// No need for frame
MFI->setStackSize(0);
@@ -241,7 +261,7 @@ bool PPCFrameLowering::needsFP(const MachineFunction &MF) const {
// Naked functions have no stack frame pushed, so we don't have a frame
// pointer.
- if (MF.getFunction()->hasFnAttr(Attribute::Naked))
+ if (MF.getFunction()->getFnAttributes().hasAttribute(Attributes::Naked))
return false;
return MF.getTarget().Options.DisableFramePointerElim(MF) ||
@@ -268,12 +288,13 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
// Scan the prolog, looking for an UPDATE_VRSAVE instruction. If we find it,
// process it.
- for (unsigned i = 0; MBBI != MBB.end(); ++i, ++MBBI) {
- if (MBBI->getOpcode() == PPC::UPDATE_VRSAVE) {
- HandleVRSaveUpdate(MBBI, TII);
- break;
+ if (!Subtarget.isSVR4ABI())
+ for (unsigned i = 0; MBBI != MBB.end(); ++i, ++MBBI) {
+ if (MBBI->getOpcode() == PPC::UPDATE_VRSAVE) {
+ HandleVRSaveUpdate(MBBI, TII);
+ break;
+ }
}
- }
// Move MBBI back to the beginning of the function.
MBBI = MBB.begin();
@@ -488,7 +509,6 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
// Add callee saved registers to move list.
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
- int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
unsigned Reg = CSI[I].getReg();
if (Reg == PPC::LR || Reg == PPC::LR8 || Reg == PPC::RM) continue;
@@ -497,6 +517,25 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
if (PPC::CRBITRCRegClass.contains(Reg))
continue;
+ // For SVR4, don't emit a move for the CR spill slot if we haven't
+ // spilled CRs.
+ if (Subtarget.isSVR4ABI()
+ && (PPC::CR2 <= Reg && Reg <= PPC::CR4)
+ && !spillsCR(MF))
+ continue;
+
+ // For 64-bit SVR4 when we have spilled CRs, the spill location
+ // is SP+8, not a frame-relative slot.
+ if (Subtarget.isSVR4ABI()
+ && Subtarget.isPPC64()
+ && (PPC::CR2 <= Reg && Reg <= PPC::CR4)) {
+ MachineLocation CSDst(PPC::X1, 8);
+ MachineLocation CSSrc(PPC::CR2);
+ Moves.push_back(MachineMove(Label, CSDst, CSSrc));
+ continue;
+ }
+
+ int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
MachineLocation CSSrc(Reg);
Moves.push_back(MachineMove(Label, CSDst, CSSrc));
@@ -714,11 +753,6 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
}
}
-static bool spillsCR(const MachineFunction &MF) {
- const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
- return FuncInfo->isCRSpilled();
-}
-
/// MustSaveLR - Return true if this function requires that we save the LR
/// register onto the stack in the prolog and restore it in the epilog of the
/// function.
@@ -808,7 +842,6 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
bool HasGPSaveArea = false;
bool HasG8SaveArea = false;
bool HasFPSaveArea = false;
- bool HasCRSaveArea = false;
bool HasVRSAVESaveArea = false;
bool HasVRSaveArea = false;
@@ -843,10 +876,9 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
if (Reg < MinFPR) {
MinFPR = Reg;
}
-// FIXME SVR4: Disable CR save area for now.
} else if (PPC::CRBITRCRegClass.contains(Reg) ||
PPC::CRRCRegClass.contains(Reg)) {
-// HasCRSaveArea = true;
+ ; // do nothing, as we already know whether CRs are spilled
} else if (PPC::VRSAVERCRegClass.contains(Reg)) {
HasVRSAVESaveArea = true;
} else if (PPC::VRRCRegClass.contains(Reg)) {
@@ -926,16 +958,21 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
}
}
- // The CR save area is below the general register save area.
- if (HasCRSaveArea) {
- // FIXME SVR4: Is it actually possible to have multiple elements in CSI
- // which have the CR/CRBIT register class?
+ // For 32-bit only, the CR save area is below the general register
+ // save area. For 64-bit SVR4, the CR save area is addressed relative
+ // to the stack pointer and hence does not need an adjustment here.
+ // Only CR2 (the first nonvolatile spilled) has an associated frame
+ // index so that we have a single uniform save area.
+ if (spillsCR(MF) && !(Subtarget.isPPC64() && Subtarget.isSVR4ABI())) {
// Adjust the frame index of the CR spill slot.
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- if (PPC::CRBITRCRegClass.contains(Reg) ||
- PPC::CRRCRegClass.contains(Reg)) {
+ if ((Subtarget.isSVR4ABI() && Reg == PPC::CR2)
+ // Leave Darwin logic as-is.
+ || (!Subtarget.isSVR4ABI() &&
+ (PPC::CRBITRCRegClass.contains(Reg) ||
+ PPC::CRRCRegClass.contains(Reg)))) {
int FI = CSI[i].getFrameIdx();
FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
@@ -973,3 +1010,184 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
}
}
}
+
+bool
+PPCFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+
+ // Currently, this function only handles SVR4 32- and 64-bit ABIs.
+ // Return false otherwise to maintain pre-existing behavior.
+ if (!Subtarget.isSVR4ABI())
+ return false;
+
+ MachineFunction *MF = MBB.getParent();
+ const PPCInstrInfo &TII =
+ *static_cast<const PPCInstrInfo*>(MF->getTarget().getInstrInfo());
+ DebugLoc DL;
+ bool CRSpilled = false;
+
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ // CR2 through CR4 are the nonvolatile CR fields.
+ bool IsCRField = PPC::CR2 <= Reg && Reg <= PPC::CR4;
+
+ if (CRSpilled && IsCRField)
+ continue;
+
+ // Add the callee-saved register as live-in; it's killed at the spill.
+ MBB.addLiveIn(Reg);
+
+ // Insert the spill to the stack frame.
+ if (IsCRField) {
+ CRSpilled = true;
+ // The first time we see a CR field, store the whole CR into the
+ // save slot via GPR12 (available in the prolog for 32- and 64-bit).
+ if (Subtarget.isPPC64()) {
+ // 64-bit: SP+8
+ MBB.insert(MI, BuildMI(*MF, DL, TII.get(PPC::MFCR), PPC::X12));
+ MBB.insert(MI, BuildMI(*MF, DL, TII.get(PPC::STW))
+ .addReg(PPC::X12,
+ getKillRegState(true))
+ .addImm(8)
+ .addReg(PPC::X1));
+ } else {
+ // 32-bit: FP-relative. Note that we made sure CR2-CR4 all have
+ // the same frame index in PPCRegisterInfo::hasReservedSpillSlot.
+ MBB.insert(MI, BuildMI(*MF, DL, TII.get(PPC::MFCR), PPC::R12));
+ MBB.insert(MI, addFrameReference(BuildMI(*MF, DL, TII.get(PPC::STW))
+ .addReg(PPC::R12,
+ getKillRegState(true)),
+ CSI[i].getFrameIdx()));
+ }
+
+ // Record that we spill the CR in this function.
+ PPCFunctionInfo *FuncInfo = MF->getInfo<PPCFunctionInfo>();
+ FuncInfo->setSpillsCR();
+ } else {
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.storeRegToStackSlot(MBB, MI, Reg, true,
+ CSI[i].getFrameIdx(), RC, TRI);
+ }
+ }
+ return true;
+}
+
+static void
+restoreCRs(bool isPPC64, bool CR2Spilled, bool CR3Spilled, bool CR4Spilled,
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI, unsigned CSIIndex) {
+
+ MachineFunction *MF = MBB.getParent();
+ const PPCInstrInfo &TII =
+ *static_cast<const PPCInstrInfo*>(MF->getTarget().getInstrInfo());
+ DebugLoc DL;
+ unsigned RestoreOp, MoveReg;
+
+ if (isPPC64) {
+ // 64-bit: SP+8
+ MBB.insert(MI, BuildMI(*MF, DL, TII.get(PPC::LWZ), PPC::X12)
+ .addImm(8)
+ .addReg(PPC::X1));
+ RestoreOp = PPC::MTCRF8;
+ MoveReg = PPC::X12;
+ } else {
+ // 32-bit: FP-relative
+ MBB.insert(MI, addFrameReference(BuildMI(*MF, DL, TII.get(PPC::LWZ),
+ PPC::R12),
+ CSI[CSIIndex].getFrameIdx()));
+ RestoreOp = PPC::MTCRF;
+ MoveReg = PPC::R12;
+ }
+
+ if (CR2Spilled)
+ MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR2)
+ .addReg(MoveReg));
+
+ if (CR3Spilled)
+ MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR3)
+ .addReg(MoveReg));
+
+ if (CR4Spilled)
+ MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR4)
+ .addReg(MoveReg));
+}
+
+bool
+PPCFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+
+ // Currently, this function only handles SVR4 32- and 64-bit ABIs.
+ // Return false otherwise to maintain pre-existing behavior.
+ if (!Subtarget.isSVR4ABI())
+ return false;
+
+ MachineFunction *MF = MBB.getParent();
+ const PPCInstrInfo &TII =
+ *static_cast<const PPCInstrInfo*>(MF->getTarget().getInstrInfo());
+ bool CR2Spilled = false;
+ bool CR3Spilled = false;
+ bool CR4Spilled = false;
+ unsigned CSIIndex = 0;
+
+ // Initialize insertion-point logic; we will be restoring in reverse
+ // order of spill.
+ MachineBasicBlock::iterator I = MI, BeforeI = I;
+ bool AtStart = I == MBB.begin();
+
+ if (!AtStart)
+ --BeforeI;
+
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+
+ if (Reg == PPC::CR2) {
+ CR2Spilled = true;
+ // The spill slot is associated only with CR2, which is the
+ // first nonvolatile spilled. Save it here.
+ CSIIndex = i;
+ continue;
+ } else if (Reg == PPC::CR3) {
+ CR3Spilled = true;
+ continue;
+ } else if (Reg == PPC::CR4) {
+ CR4Spilled = true;
+ continue;
+ } else {
+ // When we first encounter a non-CR register after seeing at
+ // least one CR register, restore all spilled CRs together.
+ if ((CR2Spilled || CR3Spilled || CR4Spilled)
+ && !(PPC::CR2 <= Reg && Reg <= PPC::CR4)) {
+ restoreCRs(Subtarget.isPPC64(), CR2Spilled, CR3Spilled, CR4Spilled,
+ MBB, I, CSI, CSIIndex);
+ CR2Spilled = CR3Spilled = CR4Spilled = false;
+ }
+
+ // Default behavior for non-CR saves.
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(MBB, I, Reg, CSI[i].getFrameIdx(),
+ RC, TRI);
+ assert(I != MBB.begin() &&
+ "loadRegFromStackSlot didn't insert any code!");
+ }
+
+ // Insert in reverse order.
+ if (AtStart)
+ I = MBB.begin();
+ else {
+ I = BeforeI;
+ ++I;
+ }
+ }
+
+ // If we haven't yet spilled the CRs, do so now.
+ if (CR2Spilled || CR3Spilled || CR4Spilled)
+ restoreCRs(Subtarget.isPPC64(), CR2Spilled, CR3Spilled, CR4Spilled,
+ MBB, I, CSI, CSIIndex);
+
+ return true;
+}
+
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h
index d708541..4d957b9 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h
@@ -45,6 +45,16 @@ public:
RegScavenger *RS = NULL) const;
void processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+
+ bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+
/// targetHandlesStackFrameRounding - Returns true if the target is
/// responsible for rounding up the stack frame (probably at emitPrologue
/// time).
@@ -170,23 +180,11 @@ public:
{PPC::R15, -68},
{PPC::R14, -72},
- // CR save area offset.
- // FIXME SVR4: Disable CR save area for now.
-// {PPC::CR2, -4},
-// {PPC::CR3, -4},
-// {PPC::CR4, -4},
-// {PPC::CR2LT, -4},
-// {PPC::CR2GT, -4},
-// {PPC::CR2EQ, -4},
-// {PPC::CR2UN, -4},
-// {PPC::CR3LT, -4},
-// {PPC::CR3GT, -4},
-// {PPC::CR3EQ, -4},
-// {PPC::CR3UN, -4},
-// {PPC::CR4LT, -4},
-// {PPC::CR4GT, -4},
-// {PPC::CR4EQ, -4},
-// {PPC::CR4UN, -4},
+ // CR save area offset. We map each of the nonvolatile CR fields
+ // to the slot for CR2, which is the first of the nonvolatile CR
+ // fields to be assigned, so that we only allocate one save slot.
+ // See PPCRegisterInfo::hasReservedSpillSlot() for more information.
+ {PPC::CR2, -4},
// VRSAVE save area offset.
{PPC::VRSAVE, -4},
@@ -228,27 +226,6 @@ public:
{PPC::F14, -144},
// General register save area offsets.
- // FIXME 64-bit SVR4: Are 32-bit registers actually allocated in 64-bit
- // mode?
- {PPC::R31, -4},
- {PPC::R30, -12},
- {PPC::R29, -20},
- {PPC::R28, -28},
- {PPC::R27, -36},
- {PPC::R26, -44},
- {PPC::R25, -52},
- {PPC::R24, -60},
- {PPC::R23, -68},
- {PPC::R22, -76},
- {PPC::R21, -84},
- {PPC::R20, -92},
- {PPC::R19, -100},
- {PPC::R18, -108},
- {PPC::R17, -116},
- {PPC::R16, -124},
- {PPC::R15, -132},
- {PPC::R14, -140},
-
{PPC::X31, -8},
{PPC::X30, -16},
{PPC::X29, -24},
@@ -268,24 +245,6 @@ public:
{PPC::X15, -136},
{PPC::X14, -144},
- // CR save area offset.
- // FIXME SVR4: Disable CR save area for now.
-// {PPC::CR2, -4},
-// {PPC::CR3, -4},
-// {PPC::CR4, -4},
-// {PPC::CR2LT, -4},
-// {PPC::CR2GT, -4},
-// {PPC::CR2EQ, -4},
-// {PPC::CR2UN, -4},
-// {PPC::CR3LT, -4},
-// {PPC::CR3GT, -4},
-// {PPC::CR3EQ, -4},
-// {PPC::CR3UN, -4},
-// {PPC::CR4LT, -4},
-// {PPC::CR4GT, -4},
-// {PPC::CR4EQ, -4},
-// {PPC::CR4UN, -4},
-
// VRSAVE save area offset.
{PPC::VRSAVE, -4},
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index a00f686..254fea6 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -53,7 +53,9 @@ namespace {
GlobalBaseReg = 0;
SelectionDAGISel::runOnMachineFunction(MF);
- InsertVRSaveCode(MF);
+ if (!PPCSubTarget.isSVR4ABI())
+ InsertVRSaveCode(MF);
+
return true;
}
@@ -621,6 +623,88 @@ static unsigned getCRIdxForSetCC(ISD::CondCode CC, bool &Invert, int &Other) {
}
}
+// getVCmpInst: return the vector compare instruction for the specified
+// vector type and condition code. Since this is for altivec specific code,
+// only support the altivec types (v16i8, v8i16, v4i32, and v4f32).
+static unsigned int getVCmpInst(MVT::SimpleValueType VecVT, ISD::CondCode CC) {
+ switch (CC) {
+ case ISD::SETEQ:
+ case ISD::SETUEQ:
+ case ISD::SETNE:
+ case ISD::SETUNE:
+ if (VecVT == MVT::v16i8)
+ return PPC::VCMPEQUB;
+ else if (VecVT == MVT::v8i16)
+ return PPC::VCMPEQUH;
+ else if (VecVT == MVT::v4i32)
+ return PPC::VCMPEQUW;
+ // v4f32 != v4f32 could be translate to unordered not equal
+ else if (VecVT == MVT::v4f32)
+ return PPC::VCMPEQFP;
+ break;
+ case ISD::SETLT:
+ case ISD::SETGT:
+ case ISD::SETLE:
+ case ISD::SETGE:
+ if (VecVT == MVT::v16i8)
+ return PPC::VCMPGTSB;
+ else if (VecVT == MVT::v8i16)
+ return PPC::VCMPGTSH;
+ else if (VecVT == MVT::v4i32)
+ return PPC::VCMPGTSW;
+ else if (VecVT == MVT::v4f32)
+ return PPC::VCMPGTFP;
+ break;
+ case ISD::SETULT:
+ case ISD::SETUGT:
+ case ISD::SETUGE:
+ case ISD::SETULE:
+ if (VecVT == MVT::v16i8)
+ return PPC::VCMPGTUB;
+ else if (VecVT == MVT::v8i16)
+ return PPC::VCMPGTUH;
+ else if (VecVT == MVT::v4i32)
+ return PPC::VCMPGTUW;
+ break;
+ case ISD::SETOEQ:
+ if (VecVT == MVT::v4f32)
+ return PPC::VCMPEQFP;
+ break;
+ case ISD::SETOLT:
+ case ISD::SETOGT:
+ case ISD::SETOLE:
+ if (VecVT == MVT::v4f32)
+ return PPC::VCMPGTFP;
+ break;
+ case ISD::SETOGE:
+ if (VecVT == MVT::v4f32)
+ return PPC::VCMPGEFP;
+ break;
+ default:
+ break;
+ }
+ llvm_unreachable("Invalid integer vector compare condition");
+}
+
+// getVCmpEQInst: return the equal compare instruction for the specified vector
+// type. Since this is for altivec specific code, only support the altivec
+// types (v16i8, v8i16, v4i32, and v4f32).
+static unsigned int getVCmpEQInst(MVT::SimpleValueType VecVT) {
+ switch (VecVT) {
+ case MVT::v16i8:
+ return PPC::VCMPEQUB;
+ case MVT::v8i16:
+ return PPC::VCMPEQUH;
+ case MVT::v4i32:
+ return PPC::VCMPEQUW;
+ case MVT::v4f32:
+ return PPC::VCMPEQFP;
+ default:
+ llvm_unreachable("Invalid integer vector compare condition");
+ }
+}
+
+
SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
DebugLoc dl = N->getDebugLoc();
unsigned Imm;
@@ -701,10 +785,67 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
}
}
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+
+ // Altivec Vector compare instructions do not set any CR register by default and
+ // vector compare operations return the same type as the operands.
+ if (LHS.getValueType().isVector()) {
+ EVT VecVT = LHS.getValueType();
+ MVT::SimpleValueType VT = VecVT.getSimpleVT().SimpleTy;
+ unsigned int VCmpInst = getVCmpInst(VT, CC);
+
+ switch (CC) {
+ case ISD::SETEQ:
+ case ISD::SETOEQ:
+ case ISD::SETUEQ:
+ return CurDAG->SelectNodeTo(N, VCmpInst, VecVT, LHS, RHS);
+ case ISD::SETNE:
+ case ISD::SETONE:
+ case ISD::SETUNE: {
+ SDValue VCmp(CurDAG->getMachineNode(VCmpInst, dl, VecVT, LHS, RHS), 0);
+ return CurDAG->SelectNodeTo(N, PPC::VNOR, VecVT, VCmp, VCmp);
+ }
+ case ISD::SETLT:
+ case ISD::SETOLT:
+ case ISD::SETULT:
+ return CurDAG->SelectNodeTo(N, VCmpInst, VecVT, RHS, LHS);
+ case ISD::SETGT:
+ case ISD::SETOGT:
+ case ISD::SETUGT:
+ return CurDAG->SelectNodeTo(N, VCmpInst, VecVT, LHS, RHS);
+ case ISD::SETGE:
+ case ISD::SETOGE:
+ case ISD::SETUGE: {
+ // Small optimization: Altivec provides a 'Vector Compare Greater Than
+ // or Equal To' instruction (vcmpgefp), so in this case there is no
+ // need for extra logic for the equal compare.
+ if (VecVT.getSimpleVT().isFloatingPoint()) {
+ return CurDAG->SelectNodeTo(N, VCmpInst, VecVT, LHS, RHS);
+ } else {
+ SDValue VCmpGT(CurDAG->getMachineNode(VCmpInst, dl, VecVT, LHS, RHS), 0);
+ unsigned int VCmpEQInst = getVCmpEQInst(VT);
+ SDValue VCmpEQ(CurDAG->getMachineNode(VCmpEQInst, dl, VecVT, LHS, RHS), 0);
+ return CurDAG->SelectNodeTo(N, PPC::VOR, VecVT, VCmpGT, VCmpEQ);
+ }
+ }
+ case ISD::SETLE:
+ case ISD::SETOLE:
+ case ISD::SETULE: {
+ SDValue VCmpLE(CurDAG->getMachineNode(VCmpInst, dl, VecVT, RHS, LHS), 0);
+ unsigned int VCmpEQInst = getVCmpEQInst(VT);
+ SDValue VCmpEQ(CurDAG->getMachineNode(VCmpEQInst, dl, VecVT, LHS, RHS), 0);
+ return CurDAG->SelectNodeTo(N, PPC::VOR, VecVT, VCmpLE, VCmpEQ);
+ }
+ default:
+ llvm_unreachable("Invalid vector compare type: should be expanded by legalize");
+ }
+ }
+
bool Inv;
int OtherCondIdx;
unsigned Idx = getCRIdxForSetCC(CC, Inv, OtherCondIdx);
- SDValue CCReg = SelectCC(N->getOperand(0), N->getOperand(1), CC, dl);
+ SDValue CCReg = SelectCC(LHS, RHS, CC, dl);
SDValue IntCR;
// Force the ccreg into CR7.
@@ -717,7 +858,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
if (PPCSubTarget.hasMFOCRF() && OtherCondIdx == -1)
IntCR = SDValue(CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg,
CCReg), 0);
- else
+ else
IntCR = SDValue(CurDAG->getMachineNode(PPC::MFCRpseud, dl, MVT::i32,
CR7Reg, CCReg), 0);
@@ -975,6 +1116,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
case ISD::AND: {
unsigned Imm, Imm2, SH, MB, ME;
+ uint64_t Imm64;
// If this is an and of a value rotated between 0 and 31 bits and then and'd
// with a mask, emit rlwinm
@@ -993,6 +1135,14 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Ops[] = { Val, getI32Imm(0), getI32Imm(MB), getI32Imm(ME) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
}
+ // If this is a 64-bit zero-extension mask, emit rldicl.
+ if (isInt64Immediate(N->getOperand(1).getNode(), Imm64) &&
+ isMask_64(Imm64)) {
+ SDValue Val = N->getOperand(0);
+ MB = 64 - CountTrailingOnes_64(Imm64);
+ SDValue Ops[] = { Val, getI32Imm(0), getI32Imm(MB) };
+ return CurDAG->SelectNodeTo(N, PPC::RLDICL, MVT::i64, Ops, 3);
+ }
// AND X, 0 -> 0, not "rlwinm 32".
if (isInt32Immediate(N->getOperand(1), Imm) && (Imm == 0)) {
ReplaceUses(SDValue(N, 0), N->getOperand(1));
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 61d44c5..adf78d5 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -361,6 +361,22 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::CTTZ, VT, Expand);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
+
+ for (unsigned j = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ j <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++j) {
+ MVT::SimpleValueType InnerVT = (MVT::SimpleValueType)j;
+ setTruncStoreAction(VT, InnerVT, Expand);
+ }
+ setLoadExtAction(ISD::SEXTLOAD, VT, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, Expand);
+ }
+
+ for (unsigned i = (unsigned)MVT::FIRST_FP_VECTOR_VALUETYPE;
+ i <= (unsigned)MVT::LAST_FP_VECTOR_VALUETYPE; ++i) {
+ MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
+ setOperationAction(ISD::FSQRT, VT, Expand);
}
// We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
@@ -373,6 +389,10 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
setOperationAction(ISD::STORE , MVT::v4i32, Legal);
+ setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
+ setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
+ setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
+ setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
@@ -392,6 +412,14 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
+
+ // Altivec does not contain unordered floating-point compare instructions
+ setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
+ setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
+ setCondCodeAction(ISD::SETUGT, MVT::v4f32, Expand);
+ setCondCodeAction(ISD::SETUGE, MVT::v4f32, Expand);
+ setCondCodeAction(ISD::SETULT, MVT::v4f32, Expand);
+ setCondCodeAction(ISD::SETULE, MVT::v4f32, Expand);
}
if (Subtarget->has64BitSupport()) {
@@ -449,6 +477,21 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setSchedulingPreference(Sched::Hybrid);
computeRegisterProperties();
+
+ // The Freescale cores does better with aggressive inlining of memcpy and
+ // friends. Gcc uses same threshold of 128 bytes (= 32 word stores).
+ if (Subtarget->getDarwinDirective() == PPC::DIR_E500mc ||
+ Subtarget->getDarwinDirective() == PPC::DIR_E5500) {
+ maxStoresPerMemset = 32;
+ maxStoresPerMemsetOptSize = 16;
+ maxStoresPerMemcpy = 32;
+ maxStoresPerMemcpyOptSize = 8;
+ maxStoresPerMemmove = 32;
+ maxStoresPerMemmoveOptSize = 8;
+
+ setPrefFunctionAlignment(4);
+ benefitFromCodePlacementOpt = true;
+ }
}
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
@@ -517,11 +560,15 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ";
case PPCISD::MTFSF: return "PPCISD::MTFSF";
case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN";
+ case PPCISD::CR6SET: return "PPCISD::CR6SET";
+ case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET";
}
}
EVT PPCTargetLowering::getSetCCResultType(EVT VT) const {
- return MVT::i32;
+ if (!VT.isVector())
+ return MVT::i32;
+ return VT.changeVectorElementTypeToInteger();
}
//===----------------------------------------------------------------------===//
@@ -811,14 +858,13 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
}
// Properly sign extend the value.
- int ShAmt = (4-ByteSize)*8;
- int MaskVal = ((int)Value << ShAmt) >> ShAmt;
+ int MaskVal = SignExtend32(Value, ByteSize * 8);
// If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
if (MaskVal == 0) return SDValue();
// Finally, if this value fits in a 5 bit sext field, return it
- if (((MaskVal << (32-5)) >> (32-5)) == MaskVal)
+ if (SignExtend32<5>(MaskVal) == MaskVal)
return DAG.getTargetConstant(MaskVal, MVT::i32);
return SDValue();
}
@@ -1204,6 +1250,14 @@ SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
const Constant *C = CP->getConstVal();
+ // 64-bit SVR4 ABI code is always position-independent.
+ // The actual address of the GlobalValue is stored in the TOC.
+ if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) {
+ SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0);
+ return DAG.getNode(PPCISD::TOC_ENTRY, CP->getDebugLoc(), MVT::i64, GA,
+ DAG.getRegister(PPC::X2, MVT::i64));
+ }
+
unsigned MOHiFlag, MOLoFlag;
bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
SDValue CPIHi =
@@ -1217,6 +1271,14 @@ SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType();
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
+ // 64-bit SVR4 ABI code is always position-independent.
+ // The actual address of the GlobalValue is stored in the TOC.
+ if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) {
+ SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
+ return DAG.getNode(PPCISD::TOC_ENTRY, JT->getDebugLoc(), MVT::i64, GA,
+ DAG.getRegister(PPC::X2, MVT::i64));
+ }
+
unsigned MOHiFlag, MOLoFlag;
bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
@@ -1232,8 +1294,8 @@ SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
unsigned MOHiFlag, MOLoFlag;
bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
- SDValue TgtBAHi = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true, MOHiFlag);
- SDValue TgtBALo = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true, MOLoFlag);
+ SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
+ SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG);
}
@@ -1441,7 +1503,7 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
MachinePointerInfo(),
MVT::i32, false, false, 0);
- return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(),
+ return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(),
false, false, false, 0);
}
@@ -1461,7 +1523,7 @@ SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = (PtrVT == MVT::i64);
Type *IntPtrTy =
- DAG.getTargetLoweringInfo().getTargetData()->getIntPtrType(
+ DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType(
*DAG.getContext());
TargetLowering::ArgListTy Args;
@@ -1684,9 +1746,13 @@ PPCTargetLowering::LowerFormalArguments(SDValue Chain,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
- if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) {
- return LowerFormalArguments_SVR4(Chain, CallConv, isVarArg, Ins,
- dl, DAG, InVals);
+ if (PPCSubTarget.isSVR4ABI()) {
+ if (PPCSubTarget.isPPC64())
+ return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins,
+ dl, DAG, InVals);
+ else
+ return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins,
+ dl, DAG, InVals);
} else {
return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins,
dl, DAG, InVals);
@@ -1694,7 +1760,7 @@ PPCTargetLowering::LowerFormalArguments(SDValue Chain,
}
SDValue
-PPCTargetLowering::LowerFormalArguments_SVR4(
+PPCTargetLowering::LowerFormalArguments_32SVR4(
SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
@@ -1911,6 +1977,334 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
return Chain;
}
+// PPC64 passes i8, i16, and i32 values in i64 registers. Promote
+// value to MVT::i64 and then truncate to the correct register size.
+SDValue
+PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
+ SelectionDAG &DAG, SDValue ArgVal,
+ DebugLoc dl) const {
+ if (Flags.isSExt())
+ ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
+ DAG.getValueType(ObjectVT));
+ else if (Flags.isZExt())
+ ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
+ DAG.getValueType(ObjectVT));
+
+ return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
+}
+
+// Set the size that is at least reserved in caller of this function. Tail
+// call optimized functions' reserved stack space needs to be aligned so that
+// taking the difference between two stack areas will result in an aligned
+// stack.
+void
+PPCTargetLowering::setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG,
+ unsigned nAltivecParamsAtEnd,
+ unsigned MinReservedArea,
+ bool isPPC64) const {
+ PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
+ // Add the Altivec parameters at the end, if needed.
+ if (nAltivecParamsAtEnd) {
+ MinReservedArea = ((MinReservedArea+15)/16)*16;
+ MinReservedArea += 16*nAltivecParamsAtEnd;
+ }
+ MinReservedArea =
+ std::max(MinReservedArea,
+ PPCFrameLowering::getMinCallFrameSize(isPPC64, true));
+ unsigned TargetAlign
+ = DAG.getMachineFunction().getTarget().getFrameLowering()->
+ getStackAlignment();
+ unsigned AlignMask = TargetAlign-1;
+ MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
+ FI->setMinReservedArea(MinReservedArea);
+}
+
+SDValue
+PPCTargetLowering::LowerFormalArguments_64SVR4(
+ SDValue Chain,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg>
+ &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
+ // TODO: add description of PPC stack frame format, or at least some docs.
+ //
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
+
+ EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ // Potential tail calls could cause overwriting of argument stack slots.
+ bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
+ (CallConv == CallingConv::Fast));
+ unsigned PtrByteSize = 8;
+
+ unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true);
+ // Area that is at least reserved in caller of this function.
+ unsigned MinReservedArea = ArgOffset;
+
+ static const uint16_t GPR[] = {
+ PPC::X3, PPC::X4, PPC::X5, PPC::X6,
+ PPC::X7, PPC::X8, PPC::X9, PPC::X10,
+ };
+
+ static const uint16_t *FPR = GetFPR();
+
+ static const uint16_t VR[] = {
+ PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
+ PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
+ };
+
+ const unsigned Num_GPR_Regs = array_lengthof(GPR);
+ const unsigned Num_FPR_Regs = 13;
+ const unsigned Num_VR_Regs = array_lengthof(VR);
+
+ unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
+
+ // Add DAG nodes to load the arguments or copy them out of registers. On
+ // entry to a function on PPC, the arguments start after the linkage area,
+ // although the first ones are often in registers.
+
+ SmallVector<SDValue, 8> MemOps;
+ unsigned nAltivecParamsAtEnd = 0;
+ Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
+ for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo, ++FuncArg) {
+ SDValue ArgVal;
+ bool needsLoad = false;
+ EVT ObjectVT = Ins[ArgNo].VT;
+ unsigned ObjSize = ObjectVT.getSizeInBits()/8;
+ unsigned ArgSize = ObjSize;
+ ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
+
+ unsigned CurArgOffset = ArgOffset;
+
+ // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
+ if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
+ ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
+ if (isVarArg) {
+ MinReservedArea = ((MinReservedArea+15)/16)*16;
+ MinReservedArea += CalculateStackSlotSize(ObjectVT,
+ Flags,
+ PtrByteSize);
+ } else
+ nAltivecParamsAtEnd++;
+ } else
+ // Calculate min reserved area.
+ MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
+ Flags,
+ PtrByteSize);
+
+ // FIXME the codegen can be much improved in some cases.
+ // We do not have to keep everything in memory.
+ if (Flags.isByVal()) {
+ // ObjSize is the true size, ArgSize rounded up to multiple of registers.
+ ObjSize = Flags.getByValSize();
+ ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
+ // Empty aggregate parameters do not take up registers. Examples:
+ // struct { } a;
+ // union { } b;
+ // int c[0];
+ // etc. However, we have to provide a place-holder in InVals, so
+ // pretend we have an 8-byte item at the current address for that
+ // purpose.
+ if (!ObjSize) {
+ int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true);
+ SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
+ InVals.push_back(FIN);
+ continue;
+ }
+ // All aggregates smaller than 8 bytes must be passed right-justified.
+ if (ObjSize < PtrByteSize)
+ CurArgOffset = CurArgOffset + (PtrByteSize - ObjSize);
+ // The value of the object is its address.
+ int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true);
+ SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
+ InVals.push_back(FIN);
+
+ if (ObjSize < 8) {
+ if (GPR_idx != Num_GPR_Regs) {
+ unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
+ SDValue Store;
+
+ if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
+ EVT ObjType = (ObjSize == 1 ? MVT::i8 :
+ (ObjSize == 2 ? MVT::i16 : MVT::i32));
+ Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
+ MachinePointerInfo(FuncArg, CurArgOffset),
+ ObjType, false, false, 0);
+ } else {
+ // For sizes that don't fit a truncating store (3, 5, 6, 7),
+ // store the whole register as-is to the parameter save area
+ // slot. The address of the parameter was already calculated
+ // above (InVals.push_back(FIN)) to be the right-justified
+ // offset within the slot. For this store, we need a new
+ // frame index that points at the beginning of the slot.
+ int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true);
+ SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
+ Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
+ MachinePointerInfo(FuncArg, ArgOffset),
+ false, false, 0);
+ }
+
+ MemOps.push_back(Store);
+ ++GPR_idx;
+ }
+ // Whether we copied from a register or not, advance the offset
+ // into the parameter save area by a full doubleword.
+ ArgOffset += PtrByteSize;
+ continue;
+ }
+
+ for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
+ // Store whatever pieces of the object are in registers
+ // to memory. ArgOffset will be the address of the beginning
+ // of the object.
+ if (GPR_idx != Num_GPR_Regs) {
+ unsigned VReg;
+ VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true);
+ SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
+ SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
+ SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
+ MachinePointerInfo(FuncArg, ArgOffset),
+ false, false, 0);
+ MemOps.push_back(Store);
+ ++GPR_idx;
+ ArgOffset += PtrByteSize;
+ } else {
+ ArgOffset += ArgSize - j;
+ break;
+ }
+ }
+ continue;
+ }
+
+ switch (ObjectVT.getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Unhandled argument type!");
+ case MVT::i32:
+ case MVT::i64:
+ if (GPR_idx != Num_GPR_Regs) {
+ unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
+
+ if (ObjectVT == MVT::i32)
+ // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
+ // value to MVT::i64 and then truncate to the correct register size.
+ ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
+
+ ++GPR_idx;
+ } else {
+ needsLoad = true;
+ ArgSize = PtrByteSize;
+ }
+ ArgOffset += 8;
+ break;
+
+ case MVT::f32:
+ case MVT::f64:
+ // Every 8 bytes of argument space consumes one of the GPRs available for
+ // argument passing.
+ if (GPR_idx != Num_GPR_Regs) {
+ ++GPR_idx;
+ }
+ if (FPR_idx != Num_FPR_Regs) {
+ unsigned VReg;
+
+ if (ObjectVT == MVT::f32)
+ VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
+ else
+ VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
+
+ ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
+ ++FPR_idx;
+ } else {
+ needsLoad = true;
+ ArgSize = PtrByteSize;
+ }
+
+ ArgOffset += 8;
+ break;
+ case MVT::v4f32:
+ case MVT::v4i32:
+ case MVT::v8i16:
+ case MVT::v16i8:
+ // Note that vector arguments in registers don't reserve stack space,
+ // except in varargs functions.
+ if (VR_idx != Num_VR_Regs) {
+ unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
+ ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
+ if (isVarArg) {
+ while ((ArgOffset % 16) != 0) {
+ ArgOffset += PtrByteSize;
+ if (GPR_idx != Num_GPR_Regs)
+ GPR_idx++;
+ }
+ ArgOffset += 16;
+ GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
+ }
+ ++VR_idx;
+ } else {
+ // Vectors are aligned.
+ ArgOffset = ((ArgOffset+15)/16)*16;
+ CurArgOffset = ArgOffset;
+ ArgOffset += 16;
+ needsLoad = true;
+ }
+ break;
+ }
+
+ // We need to load the argument to a virtual register if we determined
+ // above that we ran out of physical registers of the appropriate type.
+ if (needsLoad) {
+ int FI = MFI->CreateFixedObject(ObjSize,
+ CurArgOffset + (ArgSize - ObjSize),
+ isImmutable);
+ SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
+ ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
+ false, false, false, 0);
+ }
+
+ InVals.push_back(ArgVal);
+ }
+
+ // Set the size that is at least reserved in caller of this function. Tail
+ // call optimized functions' reserved stack space needs to be aligned so that
+ // taking the difference between two stack areas will result in an aligned
+ // stack.
+ setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, true);
+
+ // If the function takes variable number of arguments, make a frame index for
+ // the start of the first vararg value... for expansion of llvm.va_start.
+ if (isVarArg) {
+ int Depth = ArgOffset;
+
+ FuncInfo->setVarArgsFrameIndex(
+ MFI->CreateFixedObject(PtrByteSize, Depth, true));
+ SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
+
+ // If this function is vararg, store any remaining integer argument regs
+ // to their spots on the stack so that they may be loaded by deferencing the
+ // result of va_next.
+ for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
+ unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
+ SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
+ MachinePointerInfo(), false, false, 0);
+ MemOps.push_back(Store);
+ // Increment the address by four for the next argument to store
+ SDValue PtrOff = DAG.getConstant(PtrByteSize, PtrVT);
+ FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
+ }
+ }
+
+ if (!MemOps.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, dl,
+ MVT::Other, &MemOps[0], MemOps.size());
+
+ return Chain;
+}
+
SDValue
PPCTargetLowering::LowerFormalArguments_Darwin(
SDValue Chain,
@@ -1987,10 +2381,12 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
default: llvm_unreachable("Unhandled argument type!");
case MVT::i32:
case MVT::f32:
- VecArgOffset += isPPC64 ? 8 : 4;
+ VecArgOffset += 4;
break;
case MVT::i64: // PPC64
case MVT::f64:
+ // FIXME: We are guaranteed to be !isPPC64 at this point.
+ // Does MVT::i64 apply?
VecArgOffset += 8;
break;
case MVT::v4f32:
@@ -2013,7 +2409,8 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
SmallVector<SDValue, 8> MemOps;
unsigned nAltivecParamsAtEnd = 0;
- for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
+ Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
+ for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo, ++FuncArg) {
SDValue ArgVal;
bool needsLoad = false;
EVT ObjectVT = Ins[ArgNo].VT;
@@ -2061,10 +2458,11 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
else
VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
+ EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo(),
- ObjSize==1 ? MVT::i8 : MVT::i16,
- false, false, 0);
+ MachinePointerInfo(FuncArg,
+ CurArgOffset),
+ ObjType, false, false, 0);
MemOps.push_back(Store);
++GPR_idx;
}
@@ -2075,8 +2473,8 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
}
for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
// Store whatever pieces of the object are in registers
- // to memory. ArgVal will be address of the beginning of
- // the object.
+ // to memory. ArgOffset will be the address of the beginning
+ // of the object.
if (GPR_idx != Num_GPR_Regs) {
unsigned VReg;
if (isPPC64)
@@ -2087,7 +2485,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo(),
+ MachinePointerInfo(FuncArg, ArgOffset),
false, false, 0);
MemOps.push_back(Store);
++GPR_idx;
@@ -2122,18 +2520,10 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
- if (ObjectVT == MVT::i32) {
+ if (ObjectVT == MVT::i32)
// PPC64 passes i8, i16, and i32 values in i64 registers. Promote
// value to MVT::i64 and then truncate to the correct register size.
- if (Flags.isSExt())
- ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
- DAG.getValueType(ObjectVT));
- else if (Flags.isZExt())
- ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
- DAG.getValueType(ObjectVT));
-
- ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
- }
+ ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
++GPR_idx;
} else {
@@ -2220,23 +2610,10 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
}
// Set the size that is at least reserved in caller of this function. Tail
- // call optimized function's reserved stack space needs to be aligned so that
+ // call optimized functions' reserved stack space needs to be aligned so that
// taking the difference between two stack areas will result in an aligned
// stack.
- PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
- // Add the Altivec parameters at the end, if needed.
- if (nAltivecParamsAtEnd) {
- MinReservedArea = ((MinReservedArea+15)/16)*16;
- MinReservedArea += 16*nAltivecParamsAtEnd;
- }
- MinReservedArea =
- std::max(MinReservedArea,
- PPCFrameLowering::getMinCallFrameSize(isPPC64, true));
- unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()->
- getStackAlignment();
- unsigned AlignMask = TargetAlign-1;
- MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
- FI->setMinReservedArea(MinReservedArea);
+ setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, isPPC64);
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start.
@@ -2276,8 +2653,8 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
return Chain;
}
-/// CalculateParameterAndLinkageAreaSize - Get the size of the paramter plus
-/// linkage area for the Darwin ABI.
+/// CalculateParameterAndLinkageAreaSize - Get the size of the parameter plus
+/// linkage area for the Darwin ABI, or the 64-bit SVR4 ABI.
static unsigned
CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
bool isPPC64,
@@ -2408,7 +2785,7 @@ static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
int Addr = C->getZExtValue();
if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
- (Addr << 6 >> 6) != Addr)
+ SignExtend32<26>(Addr) != Addr)
return 0; // Top 6 bits have to be sext of immediate.
return DAG.getConstant((int)C->getZExtValue() >> 2,
@@ -2686,7 +3063,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
// Thus for a call through a function pointer, the following actions need
// to be performed:
// 1. Save the TOC of the caller in the TOC save area of its stack
- // frame (this is done in LowerCall_Darwin()).
+ // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
// 2. Load the address of the function entry point from the function
// descriptor.
// 3. Load the TOC of the callee from the function descriptor into r2.
@@ -2776,6 +3153,15 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
return CallOpc;
}
+static
+bool isLocalCall(const SDValue &Callee)
+{
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
+ return !G->getGlobal()->isDeclaration() &&
+ !G->getGlobal()->isWeakForLinker();
+ return false;
+}
+
SDValue
PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
@@ -2791,12 +3177,32 @@ PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
- EVT VT = VA.getValVT();
assert(VA.isRegLoc() && "Can only return in registers!");
- Chain = DAG.getCopyFromReg(Chain, dl,
- VA.getLocReg(), VT, InFlag).getValue(1);
- InVals.push_back(Chain.getValue(0));
- InFlag = Chain.getValue(2);
+
+ SDValue Val = DAG.getCopyFromReg(Chain, dl,
+ VA.getLocReg(), VA.getLocVT(), InFlag);
+ Chain = Val.getValue(1);
+ InFlag = Val.getValue(2);
+
+ switch (VA.getLocInfo()) {
+ default: llvm_unreachable("Unknown loc info!");
+ case CCValAssign::Full: break;
+ case CCValAssign::AExt:
+ Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
+ break;
+ case CCValAssign::ZExt:
+ Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
+ DAG.getValueType(VA.getValVT()));
+ Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
+ break;
+ case CCValAssign::SExt:
+ Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
+ DAG.getValueType(VA.getValVT()));
+ Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
+ break;
+ }
+
+ InVals.push_back(Val);
}
return Chain;
@@ -2819,6 +3225,10 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
isTailCall, RegsToPass, Ops, NodeTys,
PPCSubTarget);
+ // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
+ if (isVarArg && PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64())
+ Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
+
// When performing tail call optimization the callee pops its arguments off
// the stack. Account for this here so these bytes can be pushed back on in
// PPCRegisterInfo::eliminateCallFramePseudoInstr.
@@ -2880,8 +3290,8 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
// from allocating it), resulting in an additional register being
// allocated and an unnecessary move instruction being generated.
needsTOCRestore = true;
- } else if (CallOpc == PPCISD::CALL_SVR4) {
- // Otherwise insert NOP.
+ } else if ((CallOpc == PPCISD::CALL_SVR4) && !isLocalCall(Callee)) {
+ // Otherwise insert NOP for non-local calls.
CallOpc = PPCISD::CALL_NOP_SVR4;
}
}
@@ -2923,10 +3333,16 @@ PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
Ins, DAG);
- if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64())
- return LowerCall_SVR4(Chain, Callee, CallConv, isVarArg,
- isTailCall, Outs, OutVals, Ins,
- dl, DAG, InVals);
+ if (PPCSubTarget.isSVR4ABI()) {
+ if (PPCSubTarget.isPPC64())
+ return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
+ isTailCall, Outs, OutVals, Ins,
+ dl, DAG, InVals);
+ else
+ return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
+ isTailCall, Outs, OutVals, Ins,
+ dl, DAG, InVals);
+ }
return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
isTailCall, Outs, OutVals, Ins,
@@ -2934,15 +3350,15 @@ PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
SDValue
-PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
- // See PPCTargetLowering::LowerFormalArguments_SVR4() for a description
+PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
+ CallingConv::ID CallConv, bool isVarArg,
+ bool isTailCall,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
+ // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
// of the 32-bit SVR4 ABI stack frame layout.
assert((CallConv == CallingConv::C ||
@@ -3116,12 +3532,406 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size());
- // Set CR6 to true if this is a vararg call with floating args passed in
+ // Build a sequence of copy-to-reg nodes chained together with token chain
+ // and flag operands which copy the outgoing args into the appropriate regs.
+ SDValue InFlag;
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
+ RegsToPass[i].second, InFlag);
+ InFlag = Chain.getValue(1);
+ }
+
+ // Set CR bit 6 to true if this is a vararg call with floating args passed in
// registers.
if (isVarArg) {
- SDValue SetCR(DAG.getMachineNode(seenFloatArg ? PPC::CRSET : PPC::CRUNSET,
- dl, MVT::i32), 0);
- RegsToPass.push_back(std::make_pair(unsigned(PPC::CR1EQ), SetCR));
+ SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue Ops[] = { Chain, InFlag };
+
+ Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
+ dl, VTs, Ops, InFlag.getNode() ? 2 : 1);
+
+ InFlag = Chain.getValue(1);
+ }
+
+ if (isTailCall)
+ PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp,
+ false, TailCallArguments);
+
+ return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
+ RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
+ Ins, InVals);
+}
+
+// Copy an argument into memory, being careful to do this outside the
+// call sequence for the call to which the argument belongs.
+SDValue
+PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
+ SDValue CallSeqStart,
+ ISD::ArgFlagsTy Flags,
+ SelectionDAG &DAG,
+ DebugLoc dl) const {
+ SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
+ CallSeqStart.getNode()->getOperand(0),
+ Flags, DAG, dl);
+ // The MEMCPY must go outside the CALLSEQ_START..END.
+ SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
+ CallSeqStart.getNode()->getOperand(1));
+ DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
+ NewCallSeqStart.getNode());
+ return NewCallSeqStart;
+}
+
+SDValue
+PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
+ CallingConv::ID CallConv, bool isVarArg,
+ bool isTailCall,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
+
+ unsigned NumOps = Outs.size();
+
+ EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ unsigned PtrByteSize = 8;
+
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ // Mark this function as potentially containing a function that contains a
+ // tail call. As a consequence the frame pointer will be used for dynamicalloc
+ // and restoring the callers stack pointer in this functions epilog. This is
+ // done because by tail calling the called function might overwrite the value
+ // in this function's (MF) stack pointer stack slot 0(SP).
+ if (getTargetMachine().Options.GuaranteedTailCallOpt &&
+ CallConv == CallingConv::Fast)
+ MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
+
+ unsigned nAltivecParamsAtEnd = 0;
+
+ // Count how many bytes are to be pushed on the stack, including the linkage
+ // area, and parameter passing area. We start with at least 48 bytes, which
+ // is reserved space for [SP][CR][LR][3 x unused].
+ // NOTE: For PPC64, nAltivecParamsAtEnd always remains zero as a result
+ // of this call.
+ unsigned NumBytes =
+ CalculateParameterAndLinkageAreaSize(DAG, true, isVarArg, CallConv,
+ Outs, OutVals, nAltivecParamsAtEnd);
+
+ // Calculate by how many bytes the stack has to be adjusted in case of tail
+ // call optimization.
+ int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
+
+ // To protect arguments on the stack from being clobbered in a tail call,
+ // force all the loads to happen before doing any other lowering.
+ if (isTailCall)
+ Chain = DAG.getStackArgumentTokenFactor(Chain);
+
+ // Adjust the stack pointer for the new arguments...
+ // These operations are automatically eliminated by the prolog/epilog pass
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
+ SDValue CallSeqStart = Chain;
+
+ // Load the return address and frame pointer so it can be move somewhere else
+ // later.
+ SDValue LROp, FPOp;
+ Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true,
+ dl);
+
+ // Set up a copy of the stack pointer for use loading and storing any
+ // arguments that may not fit in the registers available for argument
+ // passing.
+ SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
+
+ // Figure out which arguments are going to go in registers, and which in
+ // memory. Also, if this is a vararg function, floating point operations
+ // must be stored to our stack, and loaded into integer regs as well, if
+ // any integer regs are available for argument passing.
+ unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true);
+ unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
+
+ static const uint16_t GPR[] = {
+ PPC::X3, PPC::X4, PPC::X5, PPC::X6,
+ PPC::X7, PPC::X8, PPC::X9, PPC::X10,
+ };
+ static const uint16_t *FPR = GetFPR();
+
+ static const uint16_t VR[] = {
+ PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
+ PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
+ };
+ const unsigned NumGPRs = array_lengthof(GPR);
+ const unsigned NumFPRs = 13;
+ const unsigned NumVRs = array_lengthof(VR);
+
+ SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
+ SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
+
+ SmallVector<SDValue, 8> MemOpChains;
+ for (unsigned i = 0; i != NumOps; ++i) {
+ SDValue Arg = OutVals[i];
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+
+ // PtrOff will be used to store the current argument to the stack if a
+ // register cannot be found for it.
+ SDValue PtrOff;
+
+ PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
+
+ PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
+
+ // Promote integers to 64-bit values.
+ if (Arg.getValueType() == MVT::i32) {
+ // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
+ unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
+ Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
+ }
+
+ // FIXME memcpy is used way more than necessary. Correctness first.
+ // Note: "by value" is code for passing a structure by value, not
+ // basic types.
+ if (Flags.isByVal()) {
+ // Note: Size includes alignment padding, so
+ // struct x { short a; char b; }
+ // will have Size = 4. With #pragma pack(1), it will have Size = 3.
+ // These are the proper values we need for right-justifying the
+ // aggregate in a parameter register.
+ unsigned Size = Flags.getByValSize();
+
+ // An empty aggregate parameter takes up no storage and no
+ // registers.
+ if (Size == 0)
+ continue;
+
+ // All aggregates smaller than 8 bytes must be passed right-justified.
+ if (Size==1 || Size==2 || Size==4) {
+ EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
+ if (GPR_idx != NumGPRs) {
+ SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
+ MachinePointerInfo(), VT,
+ false, false, 0);
+ MemOpChains.push_back(Load.getValue(1));
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
+
+ ArgOffset += PtrByteSize;
+ continue;
+ }
+ }
+
+ if (GPR_idx == NumGPRs && Size < 8) {
+ SDValue Const = DAG.getConstant(PtrByteSize - Size,
+ PtrOff.getValueType());
+ SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
+ Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
+ CallSeqStart,
+ Flags, DAG, dl);
+ ArgOffset += PtrByteSize;
+ continue;
+ }
+ // Copy entire object into memory. There are cases where gcc-generated
+ // code assumes it is there, even if it could be put entirely into
+ // registers. (This is not what the doc says.)
+
+ // FIXME: The above statement is likely due to a misunderstanding of the
+ // documents. All arguments must be copied into the parameter area BY
+ // THE CALLEE in the event that the callee takes the address of any
+ // formal argument. That has not yet been implemented. However, it is
+ // reasonable to use the stack area as a staging area for the register
+ // load.
+
+ // Skip this for small aggregates, as we will use the same slot for a
+ // right-justified copy, below.
+ if (Size >= 8)
+ Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
+ CallSeqStart,
+ Flags, DAG, dl);
+
+ // When a register is available, pass a small aggregate right-justified.
+ if (Size < 8 && GPR_idx != NumGPRs) {
+ // The easiest way to get this right-justified in a register
+ // is to copy the structure into the rightmost portion of a
+ // local variable slot, then load the whole slot into the
+ // register.
+ // FIXME: The memcpy seems to produce pretty awful code for
+ // small aggregates, particularly for packed ones.
+ // FIXME: It would be preferable to use the slot in the
+ // parameter save area instead of a new local variable.
+ SDValue Const = DAG.getConstant(8 - Size, PtrOff.getValueType());
+ SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
+ Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
+ CallSeqStart,
+ Flags, DAG, dl);
+
+ // Load the slot into the register.
+ SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff,
+ MachinePointerInfo(),
+ false, false, false, 0);
+ MemOpChains.push_back(Load.getValue(1));
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
+
+ // Done with this argument.
+ ArgOffset += PtrByteSize;
+ continue;
+ }
+
+ // For aggregates larger than PtrByteSize, copy the pieces of the
+ // object that fit into registers from the parameter save area.
+ for (unsigned j=0; j<Size; j+=PtrByteSize) {
+ SDValue Const = DAG.getConstant(j, PtrOff.getValueType());
+ SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
+ if (GPR_idx != NumGPRs) {
+ SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
+ MachinePointerInfo(),
+ false, false, false, 0);
+ MemOpChains.push_back(Load.getValue(1));
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
+ ArgOffset += PtrByteSize;
+ } else {
+ ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
+ break;
+ }
+ }
+ continue;
+ }
+
+ switch (Arg.getValueType().getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Unexpected ValueType for argument!");
+ case MVT::i32:
+ case MVT::i64:
+ if (GPR_idx != NumGPRs) {
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
+ } else {
+ LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
+ true, isTailCall, false, MemOpChains,
+ TailCallArguments, dl);
+ }
+ ArgOffset += PtrByteSize;
+ break;
+ case MVT::f32:
+ case MVT::f64:
+ if (FPR_idx != NumFPRs) {
+ RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
+
+ if (isVarArg) {
+ // A single float or an aggregate containing only a single float
+ // must be passed right-justified in the stack doubleword, and
+ // in the GPR, if one is available.
+ SDValue StoreOff;
+ if (Arg.getValueType().getSimpleVT().SimpleTy == MVT::f32) {
+ SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
+ StoreOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
+ } else
+ StoreOff = PtrOff;
+
+ SDValue Store = DAG.getStore(Chain, dl, Arg, StoreOff,
+ MachinePointerInfo(), false, false, 0);
+ MemOpChains.push_back(Store);
+
+ // Float varargs are always shadowed in available integer registers
+ if (GPR_idx != NumGPRs) {
+ SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
+ MachinePointerInfo(), false, false,
+ false, 0);
+ MemOpChains.push_back(Load.getValue(1));
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
+ }
+ } else if (GPR_idx != NumGPRs)
+ // If we have any FPRs remaining, we may also have GPRs remaining.
+ ++GPR_idx;
+ } else {
+ // Single-precision floating-point values are mapped to the
+ // second (rightmost) word of the stack doubleword.
+ if (Arg.getValueType() == MVT::f32) {
+ SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
+ PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
+ }
+
+ LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
+ true, isTailCall, false, MemOpChains,
+ TailCallArguments, dl);
+ }
+ ArgOffset += 8;
+ break;
+ case MVT::v4f32:
+ case MVT::v4i32:
+ case MVT::v8i16:
+ case MVT::v16i8:
+ if (isVarArg) {
+ // These go aligned on the stack, or in the corresponding R registers
+ // when within range. The Darwin PPC ABI doc claims they also go in
+ // V registers; in fact gcc does this only for arguments that are
+ // prototyped, not for those that match the ... We do it for all
+ // arguments, seems to work.
+ while (ArgOffset % 16 !=0) {
+ ArgOffset += PtrByteSize;
+ if (GPR_idx != NumGPRs)
+ GPR_idx++;
+ }
+ // We could elide this store in the case where the object fits
+ // entirely in R registers. Maybe later.
+ PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
+ DAG.getConstant(ArgOffset, PtrVT));
+ SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
+ MachinePointerInfo(), false, false, 0);
+ MemOpChains.push_back(Store);
+ if (VR_idx != NumVRs) {
+ SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff,
+ MachinePointerInfo(),
+ false, false, false, 0);
+ MemOpChains.push_back(Load.getValue(1));
+ RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
+ }
+ ArgOffset += 16;
+ for (unsigned i=0; i<16; i+=PtrByteSize) {
+ if (GPR_idx == NumGPRs)
+ break;
+ SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
+ DAG.getConstant(i, PtrVT));
+ SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
+ false, false, false, 0);
+ MemOpChains.push_back(Load.getValue(1));
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
+ }
+ break;
+ }
+
+ // Non-varargs Altivec params generally go in registers, but have
+ // stack space allocated at the end.
+ if (VR_idx != NumVRs) {
+ // Doesn't have GPR space allocated.
+ RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
+ } else {
+ LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
+ true, isTailCall, true, MemOpChains,
+ TailCallArguments, dl);
+ ArgOffset += 16;
+ }
+ break;
+ }
+ }
+
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ &MemOpChains[0], MemOpChains.size());
+
+ // Check if this is an indirect call (MTCTR/BCTRL).
+ // See PrepareCall() for more information about calls through function
+ // pointers in the 64-bit SVR4 ABI.
+ if (!isTailCall &&
+ !dyn_cast<GlobalAddressSDNode>(Callee) &&
+ !dyn_cast<ExternalSymbolSDNode>(Callee) &&
+ !isBLACompatibleAddress(Callee, DAG)) {
+ // Load r2 into a virtual register and store it to the TOC save area.
+ SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
+ // TOC save area offset.
+ SDValue PtrOff = DAG.getIntPtrConstant(40);
+ SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
+ Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(),
+ false, false, 0);
+ // R12 must contain the address of an indirect callee. This does not
+ // mean the MTCTR instruction must use R12; it's easier to model this
+ // as an extra parameter, so do that.
+ RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
}
// Build a sequence of copy-to-reg nodes chained together with token chain
@@ -3134,8 +3944,8 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
}
if (isTailCall)
- PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp,
- false, TailCallArguments);
+ PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp,
+ FPOp, true, TailCallArguments);
return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
@@ -3152,7 +3962,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
- unsigned NumOps = Outs.size();
+ unsigned NumOps = Outs.size();
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = PtrVT == MVT::i64;
@@ -3259,11 +4069,13 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
}
// FIXME memcpy is used way more than necessary. Correctness first.
+ // Note: "by value" is code for passing a structure by value, not
+ // basic types.
if (Flags.isByVal()) {
unsigned Size = Flags.getByValSize();
+ // Very small objects are passed right-justified. Everything else is
+ // passed left-justified.
if (Size==1 || Size==2) {
- // Very small objects are passed right-justified.
- // Everything else is passed left-justified.
EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
if (GPR_idx != NumGPRs) {
SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
@@ -3274,17 +4086,12 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
ArgOffset += PtrByteSize;
} else {
- SDValue Const = DAG.getConstant(4 - Size, PtrOff.getValueType());
+ SDValue Const = DAG.getConstant(PtrByteSize - Size,
+ PtrOff.getValueType());
SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
- SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr,
- CallSeqStart.getNode()->getOperand(0),
- Flags, DAG, dl);
- // This must go outside the CALLSEQ_START..END.
- SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
- CallSeqStart.getNode()->getOperand(1));
- DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
- NewCallSeqStart.getNode());
- Chain = CallSeqStart = NewCallSeqStart;
+ Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
+ CallSeqStart,
+ Flags, DAG, dl);
ArgOffset += PtrByteSize;
}
continue;
@@ -3292,15 +4099,13 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// Copy entire object into memory. There are cases where gcc-generated
// code assumes it is there, even if it could be put entirely into
// registers. (This is not what the doc says.)
- SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
- CallSeqStart.getNode()->getOperand(0),
- Flags, DAG, dl);
- // This must go outside the CALLSEQ_START..END.
- SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
- CallSeqStart.getNode()->getOperand(1));
- DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), NewCallSeqStart.getNode());
- Chain = CallSeqStart = NewCallSeqStart;
- // And copy the pieces of it that fit into registers.
+ Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
+ CallSeqStart,
+ Flags, DAG, dl);
+
+ // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
+ // copy the pieces of the object that fit into registers from the
+ // parameter save area.
for (unsigned j=0; j<Size; j+=PtrByteSize) {
SDValue Const = DAG.getConstant(j, PtrOff.getValueType());
SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
@@ -3369,11 +4174,10 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
!isPPC64) // PPC64 has 64-bit GPR's obviously :)
++GPR_idx;
}
- } else {
+ } else
LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
isPPC64, isTailCall, false, MemOpChains,
TailCallArguments, dl);
- }
if (isPPC64)
ArgOffset += 8;
else
@@ -3468,22 +4272,6 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size());
- // Check if this is an indirect call (MTCTR/BCTRL).
- // See PrepareCall() for more information about calls through function
- // pointers in the 64-bit SVR4 ABI.
- if (!isTailCall && isPPC64 && PPCSubTarget.isSVR4ABI() &&
- !dyn_cast<GlobalAddressSDNode>(Callee) &&
- !dyn_cast<ExternalSymbolSDNode>(Callee) &&
- !isBLACompatibleAddress(Callee, DAG)) {
- // Load r2 into a virtual register and store it to the TOC save area.
- SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
- // TOC save area offset.
- SDValue PtrOff = DAG.getIntPtrConstant(40);
- SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
- Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(),
- false, false, 0);
- }
-
// On Darwin, R12 must contain the address of an indirect callee. This does
// not mean the MTCTR instruction must use R12; it's easier to model this as
// an extra parameter, so do that.
@@ -3548,8 +4336,24 @@ PPCTargetLowering::LowerReturn(SDValue Chain,
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
- OutVals[i], Flag);
+
+ SDValue Arg = OutVals[i];
+
+ switch (VA.getLocInfo()) {
+ default: llvm_unreachable("Unknown loc info!");
+ case CCValAssign::Full: break;
+ case CCValAssign::AExt:
+ Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ }
+
+ Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
Flag = Chain.getValue(1);
}
@@ -3781,7 +4585,52 @@ SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op,
return SDValue();
if (Op.getOperand(0).getValueType() == MVT::i64) {
- SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op.getOperand(0));
+ SDValue SINT = Op.getOperand(0);
+ // When converting to single-precision, we actually need to convert
+ // to double-precision first and then round to single-precision.
+ // To avoid double-rounding effects during that operation, we have
+ // to prepare the input operand. Bits that might be truncated when
+ // converting to double-precision are replaced by a bit that won't
+ // be lost at this stage, but is below the single-precision rounding
+ // position.
+ //
+ // However, if -enable-unsafe-fp-math is in effect, accept double
+ // rounding to avoid the extra overhead.
+ if (Op.getValueType() == MVT::f32 &&
+ !DAG.getTarget().Options.UnsafeFPMath) {
+
+ // Twiddle input to make sure the low 11 bits are zero. (If this
+ // is the case, we are guaranteed the value will fit into the 53 bit
+ // mantissa of an IEEE double-precision value without rounding.)
+ // If any of those low 11 bits were not zero originally, make sure
+ // bit 12 (value 2048) is set instead, so that the final rounding
+ // to single-precision gets the correct result.
+ SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
+ SINT, DAG.getConstant(2047, MVT::i64));
+ Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
+ Round, DAG.getConstant(2047, MVT::i64));
+ Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
+ Round = DAG.getNode(ISD::AND, dl, MVT::i64,
+ Round, DAG.getConstant(-2048, MVT::i64));
+
+ // However, we cannot use that value unconditionally: if the magnitude
+ // of the input value is small, the bit-twiddling we did above might
+ // end up visibly changing the output. Fortunately, in that case, we
+ // don't need to twiddle bits since the original input will convert
+ // exactly to double-precision floating-point already. Therefore,
+ // construct a conditional to use the original value if the top 11
+ // bits are all sign-bit copies, and use the rounded value computed
+ // above otherwise.
+ SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
+ SINT, DAG.getConstant(53, MVT::i32));
+ Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
+ Cond, DAG.getConstant(1, MVT::i64));
+ Cond = DAG.getSetCC(dl, MVT::i32,
+ Cond, DAG.getConstant(1, MVT::i64), ISD::SETUGT);
+
+ SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
+ }
+ SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits);
if (Op.getValueType() == MVT::f32)
FP = DAG.getNode(ISD::FP_ROUND, dl,
@@ -4126,7 +4975,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
unsigned TypeShiftAmt = i & (SplatBitSize-1);
// vsplti + shl self.
- if (SextVal == (i << (int)TypeShiftAmt)) {
+ if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
static const unsigned IIDs[] = { // Intrinsic to use for each size.
Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
@@ -4171,17 +5020,17 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
}
// t = vsplti c, result = vsldoi t, t, 1
- if (SextVal == ((i << 8) | (i < 0 ? 0xFF : 0))) {
+ if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl);
}
// t = vsplti c, result = vsldoi t, t, 2
- if (SextVal == ((i << 16) | (i < 0 ? 0xFFFF : 0))) {
+ if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl);
}
// t = vsplti c, result = vsldoi t, t, 3
- if (SextVal == ((i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
+ if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl);
}
@@ -5630,6 +6479,14 @@ PPCTargetLowering::getConstraintType(const std::string &Constraint) const {
case 'v':
case 'y':
return C_RegisterClass;
+ case 'Z':
+ // FIXME: While Z does indicate a memory constraint, it specifically
+ // indicates an r+r address (used in conjunction with the 'y' modifier
+ // in the replacement string). Currently, we're forcing the base
+ // register to be r0 in the asm printer (which is interpreted as zero)
+ // and forming the complete address in the second register. This is
+ // suboptimal.
+ return C_Memory;
}
}
return TargetLowering::getConstraintType(Constraint);
@@ -5672,6 +6529,9 @@ PPCTargetLowering::getSingleConstraintMatchWeight(
case 'y':
weight = CW_Register;
break;
+ case 'Z':
+ weight = CW_Memory;
+ break;
}
return weight;
}
@@ -5688,9 +6548,9 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
return std::make_pair(0U, &PPC::G8RCRegClass);
return std::make_pair(0U, &PPC::GPRCRegClass);
case 'f':
- if (VT == MVT::f32)
+ if (VT == MVT::f32 || VT == MVT::i32)
return std::make_pair(0U, &PPC::F4RCRegClass);
- if (VT == MVT::f64)
+ if (VT == MVT::f64 || VT == MVT::i64)
return std::make_pair(0U, &PPC::F8RCRegClass);
break;
case 'v':
@@ -5870,7 +6730,8 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
bool is31 = (getTargetMachine().Options.DisableFramePointerElim(MF) ||
MFI->hasVarSizedObjects()) &&
MFI->getStackSize() &&
- !MF.getFunction()->hasFnAttr(Attribute::Naked);
+ !MF.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::Naked);
unsigned FrameReg = isPPC64 ? (is31 ? PPC::X31 : PPC::X1) :
(is31 ? PPC::R31 : PPC::R1);
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
index b0a013b..b3c7f9c 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -174,6 +174,10 @@ namespace llvm {
/// operand #3 optional in flag
TC_RETURN,
+ /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
+ CR6SET,
+ CR6UNSET,
+
/// STD_32 - This is the STD instruction for use with "32-bit" registers.
STD_32 = ISD::FIRST_TARGET_MEMORY_OPCODE,
@@ -463,20 +467,41 @@ namespace llvm {
DebugLoc dl, SelectionDAG &DAG) const;
SDValue
+ extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, SelectionDAG &DAG,
+ SDValue ArgVal, DebugLoc dl) const;
+
+ void
+ setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG,
+ unsigned nAltivecParamsAtEnd,
+ unsigned MinReservedArea, bool isPPC64) const;
+
+ SDValue
LowerFormalArguments_Darwin(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue
- LowerFormalArguments_SVR4(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ LowerFormalArguments_64SVR4(SDValue Chain,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+ SDValue
+ LowerFormalArguments_32SVR4(SDValue Chain,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+
+ SDValue
+ createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
+ SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
+ SelectionDAG &DAG, DebugLoc dl) const;
SDValue
- LowerCall_Darwin(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
+ LowerCall_Darwin(SDValue Chain, SDValue Callee,
+ CallingConv::ID CallConv,
bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
@@ -484,13 +509,22 @@ namespace llvm {
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue
- LowerCall_SVR4(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
- bool isVarArg, bool isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ LowerCall_64SVR4(SDValue Chain, SDValue Callee,
+ CallingConv::ID CallConv,
+ bool isVarArg, bool isTailCall,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+ SDValue
+ LowerCall_32SVR4(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
+ bool isVarArg, bool isTailCall,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
};
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td b/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
index 39778a5..9711452 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
@@ -29,6 +29,9 @@ def symbolLo64 : Operand<i64> {
let PrintMethod = "printSymbolLo";
let EncoderMethod = "getLO16Encoding";
}
+def tocentry : Operand<iPTR> {
+ let MIOperandInfo = (ops i32imm:$imm);
+}
//===----------------------------------------------------------------------===//
// 64-bit transformation functions.
@@ -60,7 +63,7 @@ def HI48_64 : SDNodeXForm<imm, [{
//
let Defs = [LR8] in
- def MovePCtoLR8 : Pseudo<(outs), (ins), "", []>,
+ def MovePCtoLR8 : Pseudo<(outs), (ins), "#MovePCtoLR8", []>,
PPC970_Unit_BRU;
// Darwin ABI Calls.
@@ -138,31 +141,31 @@ def : Pat<(PPCnop),
let usesCustomInserter = 1 in {
let Defs = [CR0] in {
def ATOMIC_LOAD_ADD_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_ADD_I64",
[(set G8RC:$dst, (atomic_load_add_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_LOAD_SUB_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_SUB_I64",
[(set G8RC:$dst, (atomic_load_sub_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_LOAD_OR_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_OR_I64",
[(set G8RC:$dst, (atomic_load_or_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_LOAD_XOR_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_XOR_I64",
[(set G8RC:$dst, (atomic_load_xor_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_LOAD_AND_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_AND_i64",
[(set G8RC:$dst, (atomic_load_and_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_LOAD_NAND_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_NAND_I64",
[(set G8RC:$dst, (atomic_load_nand_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_CMP_SWAP_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$old, G8RC:$new), "",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$old, G8RC:$new), "#ATOMIC_CMP_SWAP_I64",
[(set G8RC:$dst,
(atomic_cmp_swap_64 xoaddr:$ptr, G8RC:$old, G8RC:$new))]>;
def ATOMIC_SWAP_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$new), "",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$new), "#ATOMIC_SWAP_I64",
[(set G8RC:$dst, (atomic_swap_64 xoaddr:$ptr, G8RC:$new))]>;
}
}
@@ -231,10 +234,10 @@ def : Pat<(PPCtc_return CTRRC8:$dst, imm:$imm),
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
let Defs = [CTR8], Uses = [CTR8] in {
- def BDZ8 : IForm_ext<16, 18, 0, 0, (outs), (ins condbrtarget:$dst),
- "bdz $dst", BrB, []>;
- def BDNZ8 : IForm_ext<16, 16, 0, 0, (outs), (ins condbrtarget:$dst),
- "bdnz $dst", BrB, []>;
+ def BDZ8 : BForm_1<16, 18, 0, 0, (outs), (ins condbrtarget:$dst),
+ "bdz $dst">;
+ def BDNZ8 : BForm_1<16, 16, 0, 0, (outs), (ins condbrtarget:$dst),
+ "bdnz $dst">;
}
}
@@ -244,7 +247,7 @@ def MTCRF8 : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins G8RC:$rS),
PPC970_MicroCode, PPC970_Unit_CRU;
def MFCR8pseud: XFXForm_3<31, 19, (outs G8RC:$rT), (ins crbitm:$FXM),
- "", SprMFCR>,
+ "#MFCR8pseud", SprMFCR>,
PPC970_MicroCode, PPC970_Unit_CRU;
def MFCR8 : XFXForm_3<31, 19, (outs G8RC:$rT), (ins),
@@ -275,7 +278,7 @@ def MFTB8 : XFXForm_1_ext<31, 339, 268, (outs G8RC:$rT), (ins),
// the POWER3.
let Defs = [X1], Uses = [X1] in
-def DYNALLOC8 : Pseudo<(outs G8RC:$result), (ins G8RC:$negsize, memri:$fpsi),"",
+def DYNALLOC8 : Pseudo<(outs G8RC:$result), (ins G8RC:$negsize, memri:$fpsi),"#DYNALLOC8",
[(set G8RC:$result,
(PPCdynalloc G8RC:$negsize, iaddr:$fpsi))]>;
@@ -296,12 +299,14 @@ def MFLR8 : XFXForm_1_ext<31, 339, 8, (outs G8RC:$rT), (ins),
let PPC970_Unit = 1 in { // FXU Operations.
+let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
def LI8 : DForm_2_r0<14, (outs G8RC:$rD), (ins symbolLo64:$imm),
"li $rD, $imm", IntSimple,
[(set G8RC:$rD, immSExt16:$imm)]>;
def LIS8 : DForm_2_r0<15, (outs G8RC:$rD), (ins symbolHi64:$imm),
"lis $rD, $imm", IntSimple,
[(set G8RC:$rD, imm16ShiftedSExt:$imm)]>;
+}
// Logical ops.
def NAND8: XForm_6<31, 476, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB),
@@ -459,7 +464,7 @@ def EXTSW_32_64 : XForm_11<31, 986, (outs G8RC:$rA), (ins GPRC:$rS),
let Defs = [CARRY] in {
def SRADI : XSForm_1<31, 413, (outs G8RC:$rA), (ins G8RC:$rS, u6imm:$SH),
- "sradi $rA, $rS, $SH", IntRotateD,
+ "sradi $rA, $rS, $SH", IntRotateDI,
[(set G8RC:$rA, (sra G8RC:$rS, (i32 imm:$SH)))]>, isPPC64;
}
def CNTLZD : XForm_11<31, 58, (outs G8RC:$rA), (ins G8RC:$rS),
@@ -482,23 +487,23 @@ def MULLD : XOForm_1<31, 233, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB),
let isCommutable = 1 in {
def RLDIMI : MDForm_1<30, 3,
(outs G8RC:$rA), (ins G8RC:$rSi, G8RC:$rS, u6imm:$SH, u6imm:$MB),
- "rldimi $rA, $rS, $SH, $MB", IntRotateD,
+ "rldimi $rA, $rS, $SH, $MB", IntRotateDI,
[]>, isPPC64, RegConstraint<"$rSi = $rA">,
NoEncode<"$rSi">;
}
// Rotate instructions.
def RLDCL : MDForm_1<30, 0,
- (outs G8RC:$rA), (ins G8RC:$rS, GPRC:$rB, u6imm:$MB),
- "rldcl $rA, $rS, $rB, $MB", IntRotateD,
+ (outs G8RC:$rA), (ins G8RC:$rS, GPRC:$rB, u6imm:$MBE),
+ "rldcl $rA, $rS, $rB, $MBE", IntRotateD,
[]>, isPPC64;
def RLDICL : MDForm_1<30, 0,
- (outs G8RC:$rA), (ins G8RC:$rS, u6imm:$SH, u6imm:$MB),
- "rldicl $rA, $rS, $SH, $MB", IntRotateD,
+ (outs G8RC:$rA), (ins G8RC:$rS, u6imm:$SH, u6imm:$MBE),
+ "rldicl $rA, $rS, $SH, $MBE", IntRotateDI,
[]>, isPPC64;
def RLDICR : MDForm_1<30, 1,
- (outs G8RC:$rA), (ins G8RC:$rS, u6imm:$SH, u6imm:$ME),
- "rldicr $rA, $rS, $SH, $ME", IntRotateD,
+ (outs G8RC:$rA), (ins G8RC:$rS, u6imm:$SH, u6imm:$MBE),
+ "rldicr $rA, $rS, $SH, $MBE", IntRotateDI,
[]>, isPPC64;
def RLWINM8 : MForm_2<21,
@@ -506,7 +511,7 @@ def RLWINM8 : MForm_2<21,
"rlwinm $rA, $rS, $SH, $MB, $ME", IntGeneral,
[]>;
-def ISEL8 : AForm_1<31, 15,
+def ISEL8 : AForm_4<31, 15,
(outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB, pred:$cond),
"isel $rT, $rA, $rB, $cond", IntGeneral,
[]>;
@@ -541,19 +546,19 @@ def LWAX : XForm_1<31, 341, (outs G8RC:$rD), (ins memrr:$src),
let mayLoad = 1 in
def LHAU8 : DForm_1a<43, (outs G8RC:$rD, ptr_rc:$ea_result), (ins symbolLo:$disp,
ptr_rc:$rA),
- "lhau $rD, $disp($rA)", LdStLoad,
+ "lhau $rD, $disp($rA)", LdStLHAU,
[]>, RegConstraint<"$rA = $ea_result">,
NoEncode<"$ea_result">;
// NO LWAU!
def LHAUX8 : XForm_1<31, 375, (outs G8RC:$rD, ptr_rc:$ea_result),
(ins memrr:$addr),
- "lhaux $rD, $addr", LdStLoad,
+ "lhaux $rD, $addr", LdStLHAU,
[]>, RegConstraint<"$addr.offreg = $ea_result">,
NoEncode<"$ea_result">;
-def LWAUX : XForm_1<31, 375, (outs G8RC:$rD, ptr_rc:$ea_result),
+def LWAUX : XForm_1<31, 373, (outs G8RC:$rD, ptr_rc:$ea_result),
(ins memrr:$addr),
- "lwaux $rD, $addr", LdStLoad,
+ "lwaux $rD, $addr", LdStLHAU,
[]>, RegConstraint<"$addr.offreg = $ea_result">,
NoEncode<"$ea_result">, isPPC64;
}
@@ -584,31 +589,31 @@ def LWZX8 : XForm_1<31, 23, (outs G8RC:$rD), (ins memrr:$src),
// Update forms.
let mayLoad = 1 in {
def LBZU8 : DForm_1<35, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lbzu $rD, $addr", LdStLoad,
+ "lbzu $rD, $addr", LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LHZU8 : DForm_1<41, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lhzu $rD, $addr", LdStLoad,
+ "lhzu $rD, $addr", LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LWZU8 : DForm_1<33, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lwzu $rD, $addr", LdStLoad,
+ "lwzu $rD, $addr", LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LBZUX8 : XForm_1<31, 119, (outs G8RC:$rD, ptr_rc:$ea_result),
(ins memrr:$addr),
- "lbzux $rD, $addr", LdStLoad,
+ "lbzux $rD, $addr", LdStLoadUpd,
[]>, RegConstraint<"$addr.offreg = $ea_result">,
NoEncode<"$ea_result">;
-def LHZUX8 : XForm_1<31, 331, (outs G8RC:$rD, ptr_rc:$ea_result),
+def LHZUX8 : XForm_1<31, 311, (outs G8RC:$rD, ptr_rc:$ea_result),
(ins memrr:$addr),
- "lhzux $rD, $addr", LdStLoad,
+ "lhzux $rD, $addr", LdStLoadUpd,
[]>, RegConstraint<"$addr.offreg = $ea_result">,
NoEncode<"$ea_result">;
def LWZUX8 : XForm_1<31, 55, (outs G8RC:$rD, ptr_rc:$ea_result),
(ins memrr:$addr),
- "lwzux $rD, $addr", LdStLoad,
+ "lwzux $rD, $addr", LdStLoadUpd,
[]>, RegConstraint<"$addr.offreg = $ea_result">,
NoEncode<"$ea_result">;
}
@@ -621,18 +626,26 @@ def LD : DSForm_1<58, 0, (outs G8RC:$rD), (ins memrix:$src),
"ld $rD, $src", LdStLD,
[(set G8RC:$rD, (load ixaddr:$src))]>, isPPC64;
def LDtoc: Pseudo<(outs G8RC:$rD), (ins tocentry:$disp, G8RC:$reg),
- "",
+ "#LDtoc",
[(set G8RC:$rD,
(PPCtoc_entry tglobaladdr:$disp, G8RC:$reg))]>, isPPC64;
+def LDtocJTI: Pseudo<(outs G8RC:$rD), (ins tocentry:$disp, G8RC:$reg),
+ "#LDtocJTI",
+ [(set G8RC:$rD,
+ (PPCtoc_entry tjumptable:$disp, G8RC:$reg))]>, isPPC64;
+def LDtocCPT: Pseudo<(outs G8RC:$rD), (ins tocentry:$disp, G8RC:$reg),
+ "#LDtocCPT",
+ [(set G8RC:$rD,
+ (PPCtoc_entry tconstpool:$disp, G8RC:$reg))]>, isPPC64;
let hasSideEffects = 1 in {
-let RST = 2, DS_RA = 0 in // FIXME: Should be a pseudo.
-def LDinto_toc: DSForm_1<58, 0, (outs), (ins G8RC:$reg),
+let RST = 2, DS = 2 in
+def LDinto_toc: DSForm_1a<58, 0, (outs), (ins G8RC:$reg),
"ld 2, 8($reg)", LdStLD,
[(PPCload_toc G8RC:$reg)]>, isPPC64;
-let RST = 2, DS_RA = 0 in // FIXME: Should be a pseudo.
-def LDtoc_restore : DSForm_1<58, 0, (outs), (ins),
+let RST = 2, DS = 10, RA = 1 in
+def LDtoc_restore : DSForm_1a<58, 0, (outs), (ins),
"ld 2, 40(1)", LdStLD,
[(PPCtoc_restore)]>, isPPC64;
}
@@ -642,13 +655,13 @@ def LDX : XForm_1<31, 21, (outs G8RC:$rD), (ins memrr:$src),
let mayLoad = 1 in
def LDU : DSForm_1<58, 1, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memrix:$addr),
- "ldu $rD, $addr", LdStLD,
+ "ldu $rD, $addr", LdStLDU,
[]>, RegConstraint<"$addr.reg = $ea_result">, isPPC64,
NoEncode<"$ea_result">;
def LDUX : XForm_1<31, 53, (outs G8RC:$rD, ptr_rc:$ea_result),
(ins memrr:$addr),
- "ldux $rD, $addr", LdStLoad,
+ "ldux $rD, $addr", LdStLDU,
[]>, RegConstraint<"$addr.offreg = $ea_result">,
NoEncode<"$ea_result">, isPPC64;
}
@@ -693,16 +706,16 @@ def STDX : XForm_8<31, 149, (outs), (ins G8RC:$rS, memrr:$dst),
let PPC970_Unit = 2 in {
-def STBU8 : DForm_1a<38, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
+def STBU8 : DForm_1a<39, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
- "stbu $rS, $ptroff($ptrreg)", LdStStore,
+ "stbu $rS, $ptroff($ptrreg)", LdStStoreUpd,
[(set ptr_rc:$ea_res,
(pre_truncsti8 G8RC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
def STHU8 : DForm_1a<45, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
- "sthu $rS, $ptroff($ptrreg)", LdStStore,
+ "sthu $rS, $ptroff($ptrreg)", LdStStoreUpd,
[(set ptr_rc:$ea_res,
(pre_truncsti16 G8RC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
@@ -710,7 +723,7 @@ def STHU8 : DForm_1a<45, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
def STWU8 : DForm_1a<37, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
- "stwu $rS, $ptroff($ptrreg)", LdStStore,
+ "stwu $rS, $ptroff($ptrreg)", LdStStoreUpd,
[(set ptr_rc:$ea_res,
(pre_truncsti32 G8RC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
@@ -718,7 +731,7 @@ def STWU8 : DForm_1a<37, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
def STDU : DSForm_1a<62, 1, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
s16immX4:$ptroff, ptr_rc:$ptrreg),
- "stdu $rS, $ptroff($ptrreg)", LdStSTD,
+ "stdu $rS, $ptroff($ptrreg)", LdStSTDU,
[(set ptr_rc:$ea_res, (pre_store G8RC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">,
@@ -727,7 +740,7 @@ def STDU : DSForm_1a<62, 1, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
def STBUX8 : XForm_8<31, 247, (outs ptr_rc:$ea_res),
(ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
- "stbux $rS, $ptroff, $ptrreg", LdStStore,
+ "stbux $rS, $ptroff, $ptrreg", LdStStoreUpd,
[(set ptr_rc:$ea_res,
(pre_truncsti8 G8RC:$rS,
ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
@@ -736,7 +749,7 @@ def STBUX8 : XForm_8<31, 247, (outs ptr_rc:$ea_res),
def STHUX8 : XForm_8<31, 439, (outs ptr_rc:$ea_res),
(ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
- "sthux $rS, $ptroff, $ptrreg", LdStStore,
+ "sthux $rS, $ptroff, $ptrreg", LdStStoreUpd,
[(set ptr_rc:$ea_res,
(pre_truncsti16 G8RC:$rS,
ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
@@ -745,7 +758,7 @@ def STHUX8 : XForm_8<31, 439, (outs ptr_rc:$ea_res),
def STWUX8 : XForm_8<31, 183, (outs ptr_rc:$ea_res),
(ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
- "stwux $rS, $ptroff, $ptrreg", LdStStore,
+ "stwux $rS, $ptroff, $ptrreg", LdStStoreUpd,
[(set ptr_rc:$ea_res,
(pre_truncsti32 G8RC:$rS,
ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
@@ -754,7 +767,7 @@ def STWUX8 : XForm_8<31, 183, (outs ptr_rc:$ea_res),
def STDUX : XForm_8<31, 181, (outs ptr_rc:$ea_res),
(ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
- "stdux $rS, $ptroff, $ptrreg", LdStStore,
+ "stdux $rS, $ptroff, $ptrreg", LdStSTDU,
[(set ptr_rc:$ea_res,
(pre_store G8RC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
index b0b8423..ba58c3e 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
@@ -340,6 +340,28 @@ def VCTUXS : VXForm_1<906, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB),
"vctuxs $vD, $vB, $UIMM", VecFP,
[(set VRRC:$vD,
(int_ppc_altivec_vctuxs VRRC:$vB, imm:$UIMM))]>;
+
+// Defines with the UIM field set to 0 for floating-point
+// to integer (fp_to_sint/fp_to_uint) conversions and integer
+// to floating-point (sint_to_fp/uint_to_fp) conversions.
+let VA = 0 in {
+def VCFSX_0 : VXForm_1<842, (outs VRRC:$vD), (ins VRRC:$vB),
+ "vcfsx $vD, $vB, 0", VecFP,
+ [(set VRRC:$vD,
+ (int_ppc_altivec_vcfsx VRRC:$vB, 0))]>;
+def VCTUXS_0 : VXForm_1<906, (outs VRRC:$vD), (ins VRRC:$vB),
+ "vctuxs $vD, $vB, 0", VecFP,
+ [(set VRRC:$vD,
+ (int_ppc_altivec_vctuxs VRRC:$vB, 0))]>;
+def VCFUX_0 : VXForm_1<778, (outs VRRC:$vD), (ins VRRC:$vB),
+ "vcfux $vD, $vB, 0", VecFP,
+ [(set VRRC:$vD,
+ (int_ppc_altivec_vcfux VRRC:$vB, 0))]>;
+def VCTSXS_0 : VXForm_1<970, (outs VRRC:$vD), (ins VRRC:$vB),
+ "vctsxs $vD, $vB, 0", VecFP,
+ [(set VRRC:$vD,
+ (int_ppc_altivec_vctsxs VRRC:$vB, 0))]>;
+}
def VEXPTEFP : VX2_Int<394, "vexptefp", int_ppc_altivec_vexptefp>;
def VLOGEFP : VX2_Int<458, "vlogefp", int_ppc_altivec_vlogefp>;
@@ -689,3 +711,13 @@ def : Pat<(v8i16 (sra (v8i16 VRRC:$vA), (v8i16 VRRC:$vB))),
(v8i16 (VSRAH VRRC:$vA, VRRC:$vB))>;
def : Pat<(v4i32 (sra (v4i32 VRRC:$vA), (v4i32 VRRC:$vB))),
(v4i32 (VSRAW VRRC:$vA, VRRC:$vB))>;
+
+// Float to integer and integer to float conversions
+def : Pat<(v4i32 (fp_to_sint (v4f32 VRRC:$vA))),
+ (VCTSXS_0 VRRC:$vA)>;
+def : Pat<(v4i32 (fp_to_uint (v4f32 VRRC:$vA))),
+ (VCTUXS_0 VRRC:$vA)>;
+def : Pat<(v4f32 (sint_to_fp (v4i32 VRRC:$vA))),
+ (VCFSX_0 VRRC:$vA)>;
+def : Pat<(v4f32 (uint_to_fp (v4i32 VRRC:$vA))),
+ (VCFUX_0 VRRC:$vA)>;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td
index a41a027..c3c171c 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td
@@ -94,12 +94,6 @@ class IForm<bits<6> opcode, bit aa, bit lk, dag OOL, dag IOL, string asmstr,
let Inst{31} = lk;
}
-class IForm_ext<bits<6> opcode, bits<5> bo, bit aa, bit lk, dag OOL, dag IOL,
- string asmstr, InstrItinClass itin, list<dag> pattern>
- : IForm<opcode, aa, lk, OOL, IOL, asmstr, itin, pattern> {
- let LI{0-4} = bo;
-}
-
// 1.7.2 B-Form
class BForm<bits<6> opcode, bit aa, bit lk, dag OOL, dag IOL, string asmstr>
: I<opcode, OOL, IOL, asmstr, BrB> {
@@ -118,6 +112,13 @@ class BForm<bits<6> opcode, bit aa, bit lk, dag OOL, dag IOL, string asmstr>
let Inst{31} = lk;
}
+class BForm_1<bits<6> opcode, bits<5> bo, bit aa, bit lk, dag OOL, dag IOL,
+ string asmstr>
+ : BForm<opcode, aa, lk, OOL, IOL, asmstr> {
+ let BIBO{4-0} = bo;
+ let BIBO{6-5} = 0;
+ let CR = 0;
+}
// 1.7.4 D-Form
class DForm_base<bits<6> opcode, dag OOL, dag IOL, string asmstr,
@@ -625,9 +626,9 @@ class XFXForm_5<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
InstrItinClass itin>
: I<opcode, OOL, IOL, asmstr, itin> {
bits<8> FXM;
- bits<5> ST;
+ bits<5> rS;
- let Inst{6-10} = ST;
+ let Inst{6-10} = rS;
let Inst{11} = 0;
let Inst{12-19} = FXM;
let Inst{20} = 0;
@@ -666,7 +667,7 @@ class XFLForm<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
string cstr, InstrItinClass itin, list<dag>pattern>
: I<opcode, OOL, IOL, asmstr, itin> {
bits<8> FM;
- bits<5> RT;
+ bits<5> rT;
bit RC = 0; // set by isDOT
let Pattern = pattern;
@@ -675,7 +676,7 @@ class XFLForm<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
let Inst{6} = 0;
let Inst{7-14} = FM;
let Inst{15} = 0;
- let Inst{16-20} = RT;
+ let Inst{16-20} = rT;
let Inst{21-30} = xo;
let Inst{31} = RC;
}
@@ -758,6 +759,26 @@ class AForm_3<bits<6> opcode, bits<5> xo, dag OOL, dag IOL, string asmstr,
let FRB = 0;
}
+class AForm_4<bits<6> opcode, bits<5> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<5> RT;
+ bits<5> RA;
+ bits<5> RB;
+ bits<7> BIBO; // 2 bits of BI and 5 bits of BO (must be 12).
+ bits<3> CR;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = RT;
+ let Inst{11-15} = RA;
+ let Inst{16-20} = RB;
+ let Inst{21-23} = CR;
+ let Inst{24-25} = BIBO{6-5};
+ let Inst{26-30} = xo;
+ let Inst{31} = 0;
+}
+
// 1.7.13 M-Form
class MForm_1<bits<6> opcode, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 47f09dc..d9d6844 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -54,7 +54,8 @@ ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetHazardRecognizer(
const TargetMachine *TM,
const ScheduleDAG *DAG) const {
unsigned Directive = TM->getSubtarget<PPCSubtarget>().getDarwinDirective();
- if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2) {
+ if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2 ||
+ Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) {
const InstrItineraryData *II = TM->getInstrItineraryData();
return new PPCScoreboardHazardRecognizer(II, DAG);
}
@@ -70,7 +71,8 @@ ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetPostRAHazardRecognizer(
unsigned Directive = TM.getSubtarget<PPCSubtarget>().getDarwinDirective();
// Most subtargets use a PPC970 recognizer.
- if (Directive != PPC::DIR_440 && Directive != PPC::DIR_A2) {
+ if (Directive != PPC::DIR_440 && Directive != PPC::DIR_A2 &&
+ Directive != PPC::DIR_E500mc && Directive != PPC::DIR_E5500) {
const TargetInstrInfo *TII = TM.getInstrInfo();
assert(TII && "No InstrInfo?");
@@ -568,12 +570,15 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
// STVX VAL, 0, R0
//
// FIXME: We use R0 here, because it isn't available for RA.
- NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::ADDI), PPC::R0),
+ bool Is64Bit = TM.getSubtargetImpl()->isPPC64();
+ unsigned Instr = Is64Bit ? PPC::ADDI8 : PPC::ADDI;
+ unsigned GPR0 = Is64Bit ? PPC::X0 : PPC::R0;
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(Instr), GPR0),
FrameIdx, 0, 0));
NewMIs.push_back(BuildMI(MF, DL, get(PPC::STVX))
.addReg(SrcReg, getKillRegState(isKill))
- .addReg(PPC::R0)
- .addReg(PPC::R0));
+ .addReg(GPR0)
+ .addReg(GPR0));
} else {
llvm_unreachable("Unknown regclass!");
}
@@ -705,10 +710,13 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
// Dest = LVX 0, R0
//
// FIXME: We use R0 here, because it isn't available for RA.
- NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::ADDI), PPC::R0),
+ bool Is64Bit = TM.getSubtargetImpl()->isPPC64();
+ unsigned Instr = Is64Bit ? PPC::ADDI8 : PPC::ADDI;
+ unsigned GPR0 = Is64Bit ? PPC::X0 : PPC::R0;
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(Instr), GPR0),
FrameIdx, 0, 0));
- NewMIs.push_back(BuildMI(MF, DL, get(PPC::LVX),DestReg).addReg(PPC::R0)
- .addReg(PPC::R0));
+ NewMIs.push_back(BuildMI(MF, DL, get(PPC::LVX),DestReg).addReg(GPR0)
+ .addReg(GPR0));
} else {
llvm_unreachable("Unknown regclass!");
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td
index f57f0c9..6ee045a 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td
@@ -123,9 +123,11 @@ def PPCnop : SDNode<"PPCISD::NOP", SDT_PPCnop, [SDNPInGlue, SDNPOutGlue]>;
def PPCload : SDNode<"PPCISD::LOAD", SDTypeProfile<1, 1, []>,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def PPCload_toc : SDNode<"PPCISD::LOAD_TOC", SDTypeProfile<0, 1, []>,
- [SDNPHasChain, SDNPInGlue, SDNPOutGlue]>;
+ [SDNPHasChain, SDNPSideEffect,
+ SDNPInGlue, SDNPOutGlue]>;
def PPCtoc_restore : SDNode<"PPCISD::TOC_RESTORE", SDTypeProfile<0, 0, []>,
- [SDNPHasChain, SDNPInGlue, SDNPOutGlue]>;
+ [SDNPHasChain, SDNPSideEffect,
+ SDNPInGlue, SDNPOutGlue]>;
def PPCmtctr : SDNode<"PPCISD::MTCTR", SDT_PPCCall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def PPCbctrl_Darwin : SDNode<"PPCISD::BCTRL_Darwin", SDTNone,
@@ -153,6 +155,12 @@ def PPClbrx : SDNode<"PPCISD::LBRX", SDT_PPClbrx,
def PPCstbrx : SDNode<"PPCISD::STBRX", SDT_PPCstbrx,
[SDNPHasChain, SDNPMayStore]>;
+// Instructions to set/unset CR bit 6 for SVR4 vararg calls
+def PPCcr6set : SDNode<"PPCISD::CR6SET", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+def PPCcr6unset : SDNode<"PPCISD::CR6UNSET", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+
// Instructions to support atomic operations
def PPClarx : SDNode<"PPCISD::LARX", SDT_PPClarx,
[SDNPHasChain, SDNPMayLoad]>;
@@ -330,9 +338,6 @@ def memrix : Operand<iPTR> { // memri where the imm is shifted 2 bits.
let MIOperandInfo = (ops i32imm:$imm, ptr_rc:$reg);
let EncoderMethod = "getMemRIXEncoding";
}
-def tocentry : Operand<iPTR> {
- let MIOperandInfo = (ops i32imm:$imm);
-}
// PowerPC Predicate operand. 20 = (0<<5)|20 = always, CR0 is a dummy reg
// that doesn't matter.
@@ -364,9 +369,9 @@ def IsBookE : Predicate<"PPCSubTarget.isBookE()">;
let hasCtrlDep = 1 in {
let Defs = [R1], Uses = [R1] in {
-def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm:$amt), "",
+def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm:$amt), "#ADJCALLSTACKDOWN $amt",
[(callseq_start timm:$amt)]>;
-def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm:$amt1, u16imm:$amt2), "",
+def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm:$amt1, u16imm:$amt2), "#ADJCALLSTACKUP $amt1 $amt2",
[(callseq_end timm:$amt1, timm:$amt2)]>;
}
@@ -375,7 +380,7 @@ def UPDATE_VRSAVE : Pseudo<(outs GPRC:$rD), (ins GPRC:$rS),
}
let Defs = [R1], Uses = [R1] in
-def DYNALLOC : Pseudo<(outs GPRC:$result), (ins GPRC:$negsize, memri:$fpsi), "",
+def DYNALLOC : Pseudo<(outs GPRC:$result), (ins GPRC:$negsize, memri:$fpsi), "#DYNALLOC",
[(set GPRC:$result,
(PPCdynalloc GPRC:$negsize, iaddr:$fpsi))]>;
@@ -384,19 +389,19 @@ def DYNALLOC : Pseudo<(outs GPRC:$result), (ins GPRC:$negsize, memri:$fpsi), "",
let usesCustomInserter = 1, // Expanded after instruction selection.
PPC970_Single = 1 in {
def SELECT_CC_I4 : Pseudo<(outs GPRC:$dst), (ins CRRC:$cond, GPRC:$T, GPRC:$F,
- i32imm:$BROPC), "",
+ i32imm:$BROPC), "#SELECT_CC_I4",
[]>;
def SELECT_CC_I8 : Pseudo<(outs G8RC:$dst), (ins CRRC:$cond, G8RC:$T, G8RC:$F,
- i32imm:$BROPC), "",
+ i32imm:$BROPC), "#SELECT_CC_I8",
[]>;
def SELECT_CC_F4 : Pseudo<(outs F4RC:$dst), (ins CRRC:$cond, F4RC:$T, F4RC:$F,
- i32imm:$BROPC), "",
+ i32imm:$BROPC), "#SELECT_CC_F4",
[]>;
def SELECT_CC_F8 : Pseudo<(outs F8RC:$dst), (ins CRRC:$cond, F8RC:$T, F8RC:$F,
- i32imm:$BROPC), "",
+ i32imm:$BROPC), "#SELECT_CC_F8",
[]>;
def SELECT_CC_VRRC: Pseudo<(outs VRRC:$dst), (ins CRRC:$cond, VRRC:$T, VRRC:$F,
- i32imm:$BROPC), "",
+ i32imm:$BROPC), "#SELECT_CC_VRRC",
[]>;
}
@@ -404,16 +409,16 @@ let usesCustomInserter = 1, // Expanded after instruction selection.
// scavenge a register for it.
let mayStore = 1 in
def SPILL_CR : Pseudo<(outs), (ins CRRC:$cond, memri:$F),
- "", []>;
+ "#SPILL_CR", []>;
// RESTORE_CR - Indicate that we're restoring the CR register (previously
// spilled), so we'll need to scavenge a register for it.
let mayLoad = 1 in
def RESTORE_CR : Pseudo<(outs CRRC:$cond), (ins memri:$F),
- "", []>;
+ "#RESTORE_CR", []>;
let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in {
- let isReturn = 1, Uses = [LR, RM] in
+ let isCodeGenOnly = 1, isReturn = 1, Uses = [LR, RM] in
def BLR : XLForm_2_br<19, 16, 0, (outs), (ins pred:$p),
"b${p:cc}lr ${p:reg}", BrB,
[(retflag)]>;
@@ -422,7 +427,7 @@ let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in {
}
let Defs = [LR] in
- def MovePCtoLR : Pseudo<(outs), (ins), "", []>,
+ def MovePCtoLR : Pseudo<(outs), (ins), "#MovePCtoLR", []>,
PPC970_Unit_BRU;
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
@@ -434,16 +439,17 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
// BCC represents an arbitrary conditional branch on a predicate.
// FIXME: should be able to write a pattern for PPCcondbranch, but can't use
- // a two-value operand where a dag node expects two operands. :(
- def BCC : BForm<16, 0, 0, (outs), (ins pred:$cond, condbrtarget:$dst),
- "b${cond:cc} ${cond:reg}, $dst"
- /*[(PPCcondbranch CRRC:$crS, imm:$opc, bb:$dst)]*/>;
+ // a two-value operand where a dag node expects two operands. :(
+ let isCodeGenOnly = 1 in
+ def BCC : BForm<16, 0, 0, (outs), (ins pred:$cond, condbrtarget:$dst),
+ "b${cond:cc} ${cond:reg}, $dst"
+ /*[(PPCcondbranch CRRC:$crS, imm:$opc, bb:$dst)]*/>;
let Defs = [CTR], Uses = [CTR] in {
- def BDZ : IForm_ext<16, 18, 0, 0, (outs), (ins condbrtarget:$dst),
- "bdz $dst", BrB, []>;
- def BDNZ : IForm_ext<16, 16, 0, 0, (outs), (ins condbrtarget:$dst),
- "bdnz $dst", BrB, []>;
+ def BDZ : BForm_1<16, 18, 0, 0, (outs), (ins condbrtarget:$dst),
+ "bdz $dst">;
+ def BDNZ : BForm_1<16, 16, 0, 0, (outs), (ins condbrtarget:$dst),
+ "bdnz $dst">;
}
}
@@ -559,81 +565,81 @@ def : Pat<(prefetch xoaddr:$dst, (i32 0), imm, (i32 1)),
let usesCustomInserter = 1 in {
let Defs = [CR0] in {
def ATOMIC_LOAD_ADD_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_ADD_I8",
[(set GPRC:$dst, (atomic_load_add_8 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_SUB_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_SUB_I8",
[(set GPRC:$dst, (atomic_load_sub_8 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_AND_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_AND_I8",
[(set GPRC:$dst, (atomic_load_and_8 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_OR_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_OR_I8",
[(set GPRC:$dst, (atomic_load_or_8 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_XOR_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "ATOMIC_LOAD_XOR_I8",
[(set GPRC:$dst, (atomic_load_xor_8 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_NAND_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_NAND_I8",
[(set GPRC:$dst, (atomic_load_nand_8 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_ADD_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_ADD_I16",
[(set GPRC:$dst, (atomic_load_add_16 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_SUB_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_SUB_I16",
[(set GPRC:$dst, (atomic_load_sub_16 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_AND_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_AND_I16",
[(set GPRC:$dst, (atomic_load_and_16 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_OR_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_OR_I16",
[(set GPRC:$dst, (atomic_load_or_16 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_XOR_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_XOR_I16",
[(set GPRC:$dst, (atomic_load_xor_16 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_NAND_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_NAND_I16",
[(set GPRC:$dst, (atomic_load_nand_16 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_ADD_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_ADD_I32",
[(set GPRC:$dst, (atomic_load_add_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_SUB_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_SUB_I32",
[(set GPRC:$dst, (atomic_load_sub_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_AND_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_AND_I32",
[(set GPRC:$dst, (atomic_load_and_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_OR_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_OR_I32",
[(set GPRC:$dst, (atomic_load_or_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_XOR_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_XOR_I32",
[(set GPRC:$dst, (atomic_load_xor_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_NAND_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_NAND_I32",
[(set GPRC:$dst, (atomic_load_nand_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_CMP_SWAP_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "#ATOMIC_CMP_SWAP_I8",
[(set GPRC:$dst,
(atomic_cmp_swap_8 xoaddr:$ptr, GPRC:$old, GPRC:$new))]>;
def ATOMIC_CMP_SWAP_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "#ATOMIC_CMP_SWAP_I16 $dst $ptr $old $new",
[(set GPRC:$dst,
(atomic_cmp_swap_16 xoaddr:$ptr, GPRC:$old, GPRC:$new))]>;
def ATOMIC_CMP_SWAP_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "#ATOMIC_CMP_SWAP_I32 $dst $ptr $old $new",
[(set GPRC:$dst,
(atomic_cmp_swap_32 xoaddr:$ptr, GPRC:$old, GPRC:$new))]>;
def ATOMIC_SWAP_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "#ATOMIC_SWAP_i8",
[(set GPRC:$dst, (atomic_swap_8 xoaddr:$ptr, GPRC:$new))]>;
def ATOMIC_SWAP_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "#ATOMIC_SWAP_I16",
[(set GPRC:$dst, (atomic_swap_16 xoaddr:$ptr, GPRC:$new))]>;
def ATOMIC_SWAP_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "#ATOMIC_SWAP_I32",
[(set GPRC:$dst, (atomic_swap_32 xoaddr:$ptr, GPRC:$new))]>;
}
}
@@ -673,7 +679,7 @@ def LWZ : DForm_1<32, (outs GPRC:$rD), (ins memri:$src),
[(set GPRC:$rD, (load iaddr:$src))]>;
def LFS : DForm_1<48, (outs F4RC:$rD), (ins memri:$src),
- "lfs $rD, $src", LdStLFDU,
+ "lfs $rD, $src", LdStLFD,
[(set F4RC:$rD, (load iaddr:$src))]>;
def LFD : DForm_1<50, (outs F8RC:$rD), (ins memri:$src),
"lfd $rD, $src", LdStLFD,
@@ -683,32 +689,32 @@ def LFD : DForm_1<50, (outs F8RC:$rD), (ins memri:$src),
// Unindexed (r+i) Loads with Update (preinc).
let mayLoad = 1 in {
def LBZU : DForm_1<35, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lbzu $rD, $addr", LdStLoad,
+ "lbzu $rD, $addr", LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LHAU : DForm_1<43, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lhau $rD, $addr", LdStLoad,
+ "lhau $rD, $addr", LdStLHAU,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LHZU : DForm_1<41, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lhzu $rD, $addr", LdStLoad,
+ "lhzu $rD, $addr", LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LWZU : DForm_1<33, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lwzu $rD, $addr", LdStLoad,
+ "lwzu $rD, $addr", LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LFSU : DForm_1<49, (outs F4RC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lfs $rD, $addr", LdStLFDU,
+ "lfsu $rD, $addr", LdStLFDU,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LFDU : DForm_1<51, (outs F8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lfd $rD, $addr", LdStLFD,
+ "lfdu $rD, $addr", LdStLFDU,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
@@ -716,37 +722,37 @@ def LFDU : DForm_1<51, (outs F8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
// Indexed (r+r) Loads with Update (preinc).
def LBZUX : XForm_1<31, 119, (outs GPRC:$rD, ptr_rc:$ea_result),
(ins memrr:$addr),
- "lbzux $rD, $addr", LdStLoad,
+ "lbzux $rD, $addr", LdStLoadUpd,
[]>, RegConstraint<"$addr.offreg = $ea_result">,
NoEncode<"$ea_result">;
def LHAUX : XForm_1<31, 375, (outs GPRC:$rD, ptr_rc:$ea_result),
(ins memrr:$addr),
- "lhaux $rD, $addr", LdStLoad,
+ "lhaux $rD, $addr", LdStLHAU,
[]>, RegConstraint<"$addr.offreg = $ea_result">,
NoEncode<"$ea_result">;
-def LHZUX : XForm_1<31, 331, (outs GPRC:$rD, ptr_rc:$ea_result),
+def LHZUX : XForm_1<31, 311, (outs GPRC:$rD, ptr_rc:$ea_result),
(ins memrr:$addr),
- "lhzux $rD, $addr", LdStLoad,
+ "lhzux $rD, $addr", LdStLoadUpd,
[]>, RegConstraint<"$addr.offreg = $ea_result">,
NoEncode<"$ea_result">;
def LWZUX : XForm_1<31, 55, (outs GPRC:$rD, ptr_rc:$ea_result),
(ins memrr:$addr),
- "lwzux $rD, $addr", LdStLoad,
+ "lwzux $rD, $addr", LdStLoadUpd,
[]>, RegConstraint<"$addr.offreg = $ea_result">,
NoEncode<"$ea_result">;
def LFSUX : XForm_1<31, 567, (outs F4RC:$rD, ptr_rc:$ea_result),
(ins memrr:$addr),
- "lfsux $rD, $addr", LdStLoad,
+ "lfsux $rD, $addr", LdStLFDU,
[]>, RegConstraint<"$addr.offreg = $ea_result">,
NoEncode<"$ea_result">;
def LFDUX : XForm_1<31, 631, (outs F8RC:$rD, ptr_rc:$ea_result),
(ins memrr:$addr),
- "lfdux $rD, $addr", LdStLoad,
+ "lfdux $rD, $addr", LdStLFDU,
[]>, RegConstraint<"$addr.offreg = $ea_result">,
NoEncode<"$ea_result">;
}
@@ -778,10 +784,10 @@ def LWBRX : XForm_1<31, 534, (outs GPRC:$rD), (ins memrr:$src),
[(set GPRC:$rD, (PPClbrx xoaddr:$src, i32))]>;
def LFSX : XForm_25<31, 535, (outs F4RC:$frD), (ins memrr:$src),
- "lfsx $frD, $src", LdStLFDU,
+ "lfsx $frD, $src", LdStLFD,
[(set F4RC:$frD, (load xaddr:$src))]>;
def LFDX : XForm_25<31, 599, (outs F8RC:$frD), (ins memrr:$src),
- "lfdx $frD, $src", LdStLFDU,
+ "lfdx $frD, $src", LdStLFD,
[(set F8RC:$frD, (load xaddr:$src))]>;
}
@@ -801,10 +807,10 @@ def STW : DForm_1<36, (outs), (ins GPRC:$rS, memri:$src),
"stw $rS, $src", LdStStore,
[(store GPRC:$rS, iaddr:$src)]>;
def STFS : DForm_1<52, (outs), (ins F4RC:$rS, memri:$dst),
- "stfs $rS, $dst", LdStUX,
+ "stfs $rS, $dst", LdStSTFD,
[(store F4RC:$rS, iaddr:$dst)]>;
def STFD : DForm_1<54, (outs), (ins F8RC:$rS, memri:$dst),
- "stfd $rS, $dst", LdStUX,
+ "stfd $rS, $dst", LdStSTFD,
[(store F8RC:$rS, iaddr:$dst)]>;
}
@@ -812,33 +818,33 @@ def STFD : DForm_1<54, (outs), (ins F8RC:$rS, memri:$dst),
let PPC970_Unit = 2 in {
def STBU : DForm_1a<39, (outs ptr_rc:$ea_res), (ins GPRC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
- "stbu $rS, $ptroff($ptrreg)", LdStStore,
+ "stbu $rS, $ptroff($ptrreg)", LdStStoreUpd,
[(set ptr_rc:$ea_res,
(pre_truncsti8 GPRC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
def STHU : DForm_1a<45, (outs ptr_rc:$ea_res), (ins GPRC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
- "sthu $rS, $ptroff($ptrreg)", LdStStore,
+ "sthu $rS, $ptroff($ptrreg)", LdStStoreUpd,
[(set ptr_rc:$ea_res,
(pre_truncsti16 GPRC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
def STWU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins GPRC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
- "stwu $rS, $ptroff($ptrreg)", LdStStore,
+ "stwu $rS, $ptroff($ptrreg)", LdStStoreUpd,
[(set ptr_rc:$ea_res, (pre_store GPRC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
def STFSU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins F4RC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
- "stfsu $rS, $ptroff($ptrreg)", LdStStore,
+ "stfsu $rS, $ptroff($ptrreg)", LdStSTFDU,
[(set ptr_rc:$ea_res, (pre_store F4RC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
def STFDU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins F8RC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
- "stfdu $rS, $ptroff($ptrreg)", LdStStore,
+ "stfdu $rS, $ptroff($ptrreg)", LdStSTFDU,
[(set ptr_rc:$ea_res, (pre_store F8RC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
@@ -863,7 +869,7 @@ def STWX : XForm_8<31, 151, (outs), (ins GPRC:$rS, memrr:$dst),
def STBUX : XForm_8<31, 247, (outs ptr_rc:$ea_res),
(ins GPRC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
- "stbux $rS, $ptroff, $ptrreg", LdStStore,
+ "stbux $rS, $ptroff, $ptrreg", LdStStoreUpd,
[(set ptr_rc:$ea_res,
(pre_truncsti8 GPRC:$rS,
ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
@@ -872,7 +878,7 @@ def STBUX : XForm_8<31, 247, (outs ptr_rc:$ea_res),
def STHUX : XForm_8<31, 439, (outs ptr_rc:$ea_res),
(ins GPRC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
- "sthux $rS, $ptroff, $ptrreg", LdStStore,
+ "sthux $rS, $ptroff, $ptrreg", LdStStoreUpd,
[(set ptr_rc:$ea_res,
(pre_truncsti16 GPRC:$rS,
ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
@@ -881,7 +887,7 @@ def STHUX : XForm_8<31, 439, (outs ptr_rc:$ea_res),
def STWUX : XForm_8<31, 183, (outs ptr_rc:$ea_res),
(ins GPRC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
- "stwux $rS, $ptroff, $ptrreg", LdStStore,
+ "stwux $rS, $ptroff, $ptrreg", LdStStoreUpd,
[(set ptr_rc:$ea_res,
(pre_store GPRC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">,
@@ -889,7 +895,7 @@ def STWUX : XForm_8<31, 183, (outs ptr_rc:$ea_res),
def STFSUX : XForm_8<31, 695, (outs ptr_rc:$ea_res),
(ins F4RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
- "stfsux $rS, $ptroff, $ptrreg", LdStStore,
+ "stfsux $rS, $ptroff, $ptrreg", LdStSTFDU,
[(set ptr_rc:$ea_res,
(pre_store F4RC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">,
@@ -897,7 +903,7 @@ def STFSUX : XForm_8<31, 695, (outs ptr_rc:$ea_res),
def STFDUX : XForm_8<31, 759, (outs ptr_rc:$ea_res),
(ins F8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
- "stfdux $rS, $ptroff, $ptrreg", LdStStore,
+ "stfdux $rS, $ptroff, $ptrreg", LdStSTFDU,
[(set ptr_rc:$ea_res,
(pre_store F8RC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">,
@@ -913,14 +919,14 @@ def STWBRX: XForm_8<31, 662, (outs), (ins GPRC:$rS, memrr:$dst),
PPC970_DGroup_Cracked;
def STFIWX: XForm_28<31, 983, (outs), (ins F8RC:$frS, memrr:$dst),
- "stfiwx $frS, $dst", LdStUX,
+ "stfiwx $frS, $dst", LdStSTFD,
[(PPCstfiwx F8RC:$frS, xoaddr:$dst)]>;
def STFSX : XForm_28<31, 663, (outs), (ins F4RC:$frS, memrr:$dst),
- "stfsx $frS, $dst", LdStUX,
+ "stfsx $frS, $dst", LdStSTFD,
[(store F4RC:$frS, xaddr:$dst)]>;
def STFDX : XForm_28<31, 727, (outs), (ins F8RC:$frS, memrr:$dst),
- "stfdx $frS, $dst", LdStUX,
+ "stfdx $frS, $dst", LdStSTFD,
[(store F8RC:$frS, xaddr:$dst)]>;
}
@@ -964,7 +970,7 @@ def SUBFIC : DForm_2< 8, (outs GPRC:$rD), (ins GPRC:$rA, s16imm:$imm),
[(set GPRC:$rD, (subc immSExt16:$imm, GPRC:$rA))]>;
}
-let isReMaterializable = 1 in {
+let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
def LI : DForm_2_r0<14, (outs GPRC:$rD), (ins symbolLo:$imm),
"li $rD, $imm", IntSimple,
[(set GPRC:$rD, immSExt16:$imm)]>;
@@ -1143,6 +1149,16 @@ def CRUNSET: XLForm_1_ext<19, 193, (outs CRBITRC:$dst), (ins),
"crxor $dst, $dst, $dst", BrCR,
[]>;
+let Defs = [CR1EQ], CRD = 6 in {
+def CR6SET : XLForm_1_ext<19, 289, (outs), (ins),
+ "creqv 6, 6, 6", BrCR,
+ [(PPCcr6set)]>;
+
+def CR6UNSET: XLForm_1_ext<19, 193, (outs), (ins),
+ "crxor 6, 6, 6", BrCR,
+ [(PPCcr6unset)]>;
+}
+
// XFX-Form instructions. Instructions that deal with SPRs.
//
let Uses = [CTR] in {
@@ -1192,7 +1208,7 @@ def MTCRF : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins GPRC:$rS),
//
// FIXME: Make this a real Pseudo instruction when the JIT switches to MC.
def MFCRpseud: XFXForm_3<31, 19, (outs GPRC:$rT), (ins crbitm:$FXM),
- "", SprMFCR>,
+ "#MFCRpseud", SprMFCR>,
PPC970_MicroCode, PPC970_Unit_CRU;
def MFCR : XFXForm_3<31, 19, (outs GPRC:$rT), (ins),
@@ -1233,7 +1249,7 @@ let Uses = [RM] in {
PPC970_DGroup_Single, PPC970_Unit_FPU;
def FADDrtz: AForm_2<63, 21,
(outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRB),
- "fadd $FRT, $FRA, $FRB", FPGeneral,
+ "fadd $FRT, $FRA, $FRB", FPAddSub,
[(set F8RC:$FRT, (PPCfaddrtz F8RC:$FRA, F8RC:$FRB))]>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
}
@@ -1364,7 +1380,7 @@ def FSELS : AForm_1<63, 23,
let Uses = [RM] in {
def FADD : AForm_2<63, 21,
(outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRB),
- "fadd $FRT, $FRA, $FRB", FPGeneral,
+ "fadd $FRT, $FRA, $FRB", FPAddSub,
[(set F8RC:$FRT, (fadd F8RC:$FRA, F8RC:$FRB))]>;
def FADDS : AForm_2<59, 21,
(outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRB),
@@ -1379,16 +1395,16 @@ let Uses = [RM] in {
"fdivs $FRT, $FRA, $FRB", FPDivS,
[(set F4RC:$FRT, (fdiv F4RC:$FRA, F4RC:$FRB))]>;
def FMUL : AForm_3<63, 25,
- (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRB),
- "fmul $FRT, $FRA, $FRB", FPFused,
- [(set F8RC:$FRT, (fmul F8RC:$FRA, F8RC:$FRB))]>;
+ (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRC),
+ "fmul $FRT, $FRA, $FRC", FPFused,
+ [(set F8RC:$FRT, (fmul F8RC:$FRA, F8RC:$FRC))]>;
def FMULS : AForm_3<59, 25,
- (outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRB),
- "fmuls $FRT, $FRA, $FRB", FPGeneral,
- [(set F4RC:$FRT, (fmul F4RC:$FRA, F4RC:$FRB))]>;
+ (outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRC),
+ "fmuls $FRT, $FRA, $FRC", FPGeneral,
+ [(set F4RC:$FRT, (fmul F4RC:$FRA, F4RC:$FRC))]>;
def FSUB : AForm_2<63, 20,
(outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRB),
- "fsub $FRT, $FRA, $FRB", FPGeneral,
+ "fsub $FRT, $FRA, $FRB", FPAddSub,
[(set F8RC:$FRT, (fsub F8RC:$FRA, F8RC:$FRB))]>;
def FSUBS : AForm_2<59, 20,
(outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRB),
@@ -1398,7 +1414,7 @@ let Uses = [RM] in {
}
let PPC970_Unit = 1 in { // FXU Operations.
- def ISEL : AForm_1<31, 15,
+ def ISEL : AForm_4<31, 15,
(outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB, pred:$cond),
"isel $rT, $rA, $rB, $cond", IntGeneral,
[]>;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index ab8bf1f..459c358 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -71,7 +71,7 @@ PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST,
: PPCGenRegisterInfo(ST.isPPC64() ? PPC::LR8 : PPC::LR,
ST.isPPC64() ? 0 : 1,
ST.isPPC64() ? 0 : 1),
- Subtarget(ST), TII(tii) {
+ Subtarget(ST), TII(tii), CRSpillFrameIdx(0) {
ImmToIdxMap[PPC::LD] = PPC::LDX; ImmToIdxMap[PPC::STD] = PPC::STDX;
ImmToIdxMap[PPC::LBZ] = PPC::LBZX; ImmToIdxMap[PPC::STB] = PPC::STBX;
ImmToIdxMap[PPC::LHZ] = PPC::LHZX; ImmToIdxMap[PPC::LHA] = PPC::LHAX;
@@ -111,10 +111,15 @@ PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return Subtarget.isPPC64() ? CSR_Darwin64_SaveList :
CSR_Darwin32_SaveList;
+ // For 32-bit SVR4, also initialize the frame index associated with
+ // the CR spill slot.
+ if (!Subtarget.isPPC64())
+ CRSpillFrameIdx = 0;
+
return Subtarget.isPPC64() ? CSR_SVR464_SaveList : CSR_SVR432_SaveList;
}
-const unsigned*
+const uint32_t*
PPCRegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
if (Subtarget.isDarwinABI())
return Subtarget.isPPC64() ? CSR_Darwin64_RegMask :
@@ -477,6 +482,31 @@ void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II,
MBB.erase(II);
}
+bool
+PPCRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
+ unsigned Reg, int &FrameIdx) const {
+
+ // For the nonvolatile condition registers (CR2, CR3, CR4) in an SVR4
+ // ABI, return true to prevent allocating an additional frame slot.
+ // For 64-bit, the CR save area is at SP+8; the value of FrameIdx = 0
+ // is arbitrary and will be subsequently ignored. For 32-bit, we must
+ // create exactly one stack slot and return its FrameIdx for all
+ // nonvolatiles.
+ if (Subtarget.isSVR4ABI() && PPC::CR2 <= Reg && Reg <= PPC::CR4) {
+ if (Subtarget.isPPC64()) {
+ FrameIdx = 0;
+ } else if (CRSpillFrameIdx) {
+ FrameIdx = CRSpillFrameIdx;
+ } else {
+ MachineFrameInfo *MFI = ((MachineFunction &)MF).getFrameInfo();
+ FrameIdx = MFI->CreateFixedObject((uint64_t)4, (int64_t)-4, true);
+ CRSpillFrameIdx = FrameIdx;
+ }
+ return true;
+ }
+ return false;
+}
+
void
PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, RegScavenger *RS) const {
@@ -566,7 +596,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// to Offset to get the correct offset.
// Naked functions have stack size 0, although getStackSize may not reflect that
// because we didn't call all the pieces that compute it for naked functions.
- if (!MF.getFunction()->hasFnAttr(Attribute::Naked))
+ if (!MF.getFunction()->getFnAttributes().hasAttribute(Attributes::Naked))
Offset += MFI->getStackSize();
// If we can, encode the offset directly into the instruction. If this is a
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
index 152c36d..a8fd796 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
@@ -30,6 +30,7 @@ class PPCRegisterInfo : public PPCGenRegisterInfo {
std::map<unsigned, unsigned> ImmToIdxMap;
const PPCSubtarget &Subtarget;
const TargetInstrInfo &TII;
+ mutable int CRSpillFrameIdx;
public:
PPCRegisterInfo(const PPCSubtarget &SubTarget, const TargetInstrInfo &tii);
@@ -43,7 +44,7 @@ public:
/// Code Generation virtual methods...
const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
- const unsigned *getCallPreservedMask(CallingConv::ID CC) const;
+ const uint32_t *getCallPreservedMask(CallingConv::ID CC) const;
BitVector getReservedRegs(const MachineFunction &MF) const;
@@ -65,6 +66,8 @@ public:
int SPAdj, RegScavenger *RS) const;
void lowerCRRestore(MachineBasicBlock::iterator II, unsigned FrameIndex,
int SPAdj, RegScavenger *RS) const;
+ bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg,
+ int &FrameIdx) const;
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, RegScavenger *RS = NULL) const;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td b/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td
index 6a6ccb9..660c0c3 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td
@@ -40,6 +40,7 @@ def IntMulHWU : InstrItinClass;
def IntMulLI : InstrItinClass;
def IntRFID : InstrItinClass;
def IntRotateD : InstrItinClass;
+def IntRotateDI : InstrItinClass;
def IntRotate : InstrItinClass;
def IntShift : InstrItinClass;
def IntTrapD : InstrItinClass;
@@ -52,15 +53,18 @@ def LdStDCBA : InstrItinClass;
def LdStDCBF : InstrItinClass;
def LdStDCBI : InstrItinClass;
def LdStLoad : InstrItinClass;
+def LdStLoadUpd : InstrItinClass;
def LdStStore : InstrItinClass;
+def LdStStoreUpd : InstrItinClass;
def LdStDSS : InstrItinClass;
def LdStICBI : InstrItinClass;
-def LdStUX : InstrItinClass;
def LdStLD : InstrItinClass;
+def LdStLDU : InstrItinClass;
def LdStLDARX : InstrItinClass;
def LdStLFD : InstrItinClass;
def LdStLFDU : InstrItinClass;
def LdStLHA : InstrItinClass;
+def LdStLHAU : InstrItinClass;
def LdStLMW : InstrItinClass;
def LdStLVecX : InstrItinClass;
def LdStLWA : InstrItinClass;
@@ -69,6 +73,9 @@ def LdStSLBIA : InstrItinClass;
def LdStSLBIE : InstrItinClass;
def LdStSTD : InstrItinClass;
def LdStSTDCX : InstrItinClass;
+def LdStSTDU : InstrItinClass;
+def LdStSTFD : InstrItinClass;
+def LdStSTFDU : InstrItinClass;
def LdStSTVEBX : InstrItinClass;
def LdStSTWCX : InstrItinClass;
def LdStSync : InstrItinClass;
@@ -86,6 +93,7 @@ def SprMTSRIN : InstrItinClass;
def SprRFI : InstrItinClass;
def SprSC : InstrItinClass;
def FPGeneral : InstrItinClass;
+def FPAddSub : InstrItinClass;
def FPCompare : InstrItinClass;
def FPDivD : InstrItinClass;
def FPDivS : InstrItinClass;
@@ -110,6 +118,8 @@ include "PPCScheduleG4.td"
include "PPCScheduleG4Plus.td"
include "PPCScheduleG5.td"
include "PPCScheduleA2.td"
+include "PPCScheduleE500mc.td"
+include "PPCScheduleE5500.td"
//===----------------------------------------------------------------------===//
// Instruction to itinerary class map - When add new opcodes to the supported
@@ -171,7 +181,7 @@ include "PPCScheduleA2.td"
// extsh IntSimple
// extsw IntSimple
// fabs FPGeneral
-// fadd FPGeneral
+// fadd FPAddSub
// fadds FPGeneral
// fcfid FPGeneral
// fcmpo FPCompare
@@ -201,35 +211,35 @@ include "PPCScheduleA2.td"
// fsel FPGeneral
// fsqrt FPSqrt
// fsqrts FPSqrt
-// fsub FPGeneral
+// fsub FPAddSub
// fsubs FPGeneral
// icbi LdStICBI
// isync SprISYNC
// lbz LdStLoad
-// lbzu LdStLoad
-// lbzux LdStUX
+// lbzu LdStLoadUpd
+// lbzux LdStLoadUpd
// lbzx LdStLoad
// ld LdStLD
// ldarx LdStLDARX
-// ldu LdStLD
-// ldux LdStLD
+// ldu LdStLDU
+// ldux LdStLDU
// ldx LdStLD
// lfd LdStLFD
// lfdu LdStLFDU
// lfdux LdStLFDU
-// lfdx LdStLFDU
-// lfs LdStLFDU
+// lfdx LdStLFD
+// lfs LdStLFD
// lfsu LdStLFDU
// lfsux LdStLFDU
-// lfsx LdStLFDU
+// lfsx LdStLFD
// lha LdStLHA
-// lhau LdStLHA
-// lhaux LdStLHA
+// lhau LdStLHAU
+// lhaux LdStLHAU
// lhax LdStLHA
// lhbrx LdStLoad
// lhz LdStLoad
-// lhzu LdStLoad
-// lhzux LdStUX
+// lhzu LdStLoadUpd
+// lhzux LdStLoadUpd
// lhzx LdStLoad
// lmw LdStLMW
// lswi LdStLMW
@@ -243,12 +253,12 @@ include "PPCScheduleA2.td"
// lvxl LdStLVecX
// lwa LdStLWA
// lwarx LdStLWARX
-// lwaux LdStLHA
+// lwaux LdStLHAU
// lwax LdStLHA
// lwbrx LdStLoad
// lwz LdStLoad
-// lwzu LdStLoad
-// lwzux LdStUX
+// lwzu LdStLoadUpd
+// lwzux LdStLoadUpd
// lwzx LdStLoad
// mcrf BrMCR
// mcrfs FPGeneral
@@ -292,10 +302,10 @@ include "PPCScheduleA2.td"
// rfid IntRFID
// rldcl IntRotateD
// rldcr IntRotateD
-// rldic IntRotateD
-// rldicl IntRotateD
-// rldicr IntRotateD
-// rldimi IntRotateD
+// rldic IntRotateDI
+// rldicl IntRotateDI
+// rldicr IntRotateDI
+// rldimi IntRotateDI
// rlwimi IntRotate
// rlwinm IntGeneral
// rlwnm IntGeneral
@@ -305,33 +315,33 @@ include "PPCScheduleA2.td"
// sld IntRotateD
// slw IntGeneral
// srad IntRotateD
-// sradi IntRotateD
+// sradi IntRotateDI
// sraw IntShift
// srawi IntShift
// srd IntRotateD
// srw IntGeneral
// stb LdStStore
-// stbu LdStStore
-// stbux LdStStore
+// stbu LdStStoreUpd
+// stbux LdStStoreUpd
// stbx LdStStore
// std LdStSTD
// stdcx. LdStSTDCX
-// stdu LdStSTD
-// stdux LdStSTD
+// stdu LdStSTDU
+// stdux LdStSTDU
// stdx LdStSTD
-// stfd LdStUX
-// stfdu LdStUX
-// stfdux LdStUX
-// stfdx LdStUX
-// stfiwx LdStUX
-// stfs LdStUX
-// stfsu LdStUX
-// stfsux LdStUX
-// stfsx LdStUX
+// stfd LdStSTFD
+// stfdu LdStSTFDU
+// stfdux LdStSTFDU
+// stfdx LdStSTFD
+// stfiwx LdStSTFD
+// stfs LdStSTFD
+// stfsu LdStSTFDU
+// stfsux LdStSTFDU
+// stfsx LdStSTFD
// sth LdStStore
// sthbrx LdStStore
-// sthu LdStStore
-// sthux LdStStore
+// sthu LdStStoreUpd
+// sthux LdStStoreUpd
// sthx LdStStore
// stmw LdStLMW
// stswi LdStLMW
@@ -344,8 +354,8 @@ include "PPCScheduleA2.td"
// stw LdStStore
// stwbrx LdStStore
// stwcx. LdStSTWCX
-// stwu LdStStore
-// stwux LdStStore
+// stwu LdStStoreUpd
+// stwux LdStStoreUpd
// stwx LdStStore
// subf IntGeneral
// subfc IntGeneral
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td b/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td
index cd0fb70..37b6eac 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td
@@ -288,6 +288,15 @@ def PPC440Itineraries : ProcessorItineraries<
InstrStage<2, [LWB]>],
[9, 5],
[GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLoadUpd , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<2, [LWB]>],
+ [9, 5],
+ [GPR_Bypass, GPR_Bypass]>,
InstrItinData<LdStStore , [InstrStage<1, [IFTH1, IFTH2]>,
InstrStage<1, [PDCD1, PDCD2]>,
InstrStage<1, [DISS1, DISS2]>,
@@ -297,6 +306,15 @@ def PPC440Itineraries : ProcessorItineraries<
InstrStage<2, [LWB]>],
[8, 5],
[NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStStoreUpd, [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<2, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
InstrItinData<LdStICBI , [InstrStage<1, [IFTH1, IFTH2]>,
InstrStage<1, [PDCD1, PDCD2]>,
InstrStage<1, [DISS1, DISS2]>,
@@ -306,7 +324,7 @@ def PPC440Itineraries : ProcessorItineraries<
InstrStage<1, [LWB]>],
[8, 5],
[NoBypass, GPR_Bypass]>,
- InstrItinData<LdStUX , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrItinData<LdStSTFD , [InstrStage<1, [IFTH1, IFTH2]>,
InstrStage<1, [PDCD1, PDCD2]>,
InstrStage<1, [DISS1, DISS2]>,
InstrStage<1, [LRACC]>,
@@ -315,6 +333,15 @@ def PPC440Itineraries : ProcessorItineraries<
InstrStage<1, [LWB]>],
[8, 5, 5],
[NoBypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStSTFDU , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<1, [LWB]>],
+ [8, 5, 5],
+ [NoBypass, GPR_Bypass, GPR_Bypass]>,
InstrItinData<LdStLFD , [InstrStage<1, [IFTH1, IFTH2]>,
InstrStage<1, [PDCD1, PDCD2]>,
InstrStage<1, [DISS1, DISS2]>,
@@ -342,6 +369,15 @@ def PPC440Itineraries : ProcessorItineraries<
InstrStage<1, [LWB]>],
[8, 5],
[NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStLHAU , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<1, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
InstrItinData<LdStLMW , [InstrStage<1, [IFTH1, IFTH2]>,
InstrStage<1, [PDCD1, PDCD2]>,
InstrStage<1, [DISS1, DISS2]>,
@@ -371,6 +407,15 @@ def PPC440Itineraries : ProcessorItineraries<
InstrStage<2, [LWB]>],
[8, 5],
[NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSTDU , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<2, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
InstrItinData<LdStSTDCX , [InstrStage<1, [IFTH1, IFTH2]>,
InstrStage<1, [PDCD1, PDCD2]>,
InstrStage<1, [DISS1]>,
@@ -537,6 +582,19 @@ def PPC440Itineraries : ProcessorItineraries<
InstrStage<1, [FWB]>],
[10, 4, 4],
[FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPAddSub , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [FRACC]>,
+ InstrStage<1, [FEXE1]>,
+ InstrStage<1, [FEXE2]>,
+ InstrStage<1, [FEXE3]>,
+ InstrStage<1, [FEXE4]>,
+ InstrStage<1, [FEXE5]>,
+ InstrStage<1, [FEXE6]>,
+ InstrStage<1, [FWB]>],
+ [10, 4, 4],
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
InstrItinData<FPCompare , [InstrStage<1, [IFTH1, IFTH2]>,
InstrStage<1, [PDCD1, PDCD2]>,
InstrStage<1, [DISS1, DISS2]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td
index 4d4a5d0..ba63b5c 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td
@@ -181,6 +181,17 @@ def PPCA2Itineraries : ProcessorItineraries<
InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
[10, 7, 7],
[GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntRotateDI , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [10, 7, 7],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
InstrItinData<IntShift , [InstrStage<4,
[IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
@@ -302,7 +313,18 @@ def PPCA2Itineraries : ProcessorItineraries<
InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
[14, 7],
[GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLD , [InstrStage<4,
+ InstrItinData<LdStLoadUpd , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [14, 7],
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLDU , [InstrStage<4,
[IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
IU4_4, IU4_5, IU4_6, IU4_7]>,
@@ -324,6 +346,17 @@ def PPCA2Itineraries : ProcessorItineraries<
InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
[13, 7],
[GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStStoreUpd, [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [13, 7],
+ [GPR_Bypass, GPR_Bypass]>,
InstrItinData<LdStICBI , [InstrStage<4,
[IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
@@ -335,7 +368,7 @@ def PPCA2Itineraries : ProcessorItineraries<
InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
[14, 7],
[NoBypass, GPR_Bypass]>,
- InstrItinData<LdStUX , [InstrStage<4,
+ InstrItinData<LdStSTFD , [InstrStage<4,
[IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
IU4_4, IU4_5, IU4_6, IU4_7]>,
@@ -346,6 +379,17 @@ def PPCA2Itineraries : ProcessorItineraries<
InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
[14, 7, 7],
[NoBypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<LdStSTFDU , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [14, 7, 7],
+ [NoBypass, FPR_Bypass, FPR_Bypass]>,
InstrItinData<LdStLFD , [InstrStage<4,
[IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
@@ -379,6 +423,17 @@ def PPCA2Itineraries : ProcessorItineraries<
InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
[14, 7],
[NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStLHAU , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [14, 7],
+ [NoBypass, GPR_Bypass]>,
InstrItinData<LdStLMW , [InstrStage<4,
[IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
@@ -412,6 +467,17 @@ def PPCA2Itineraries : ProcessorItineraries<
InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
[13, 7],
[GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStSTDU , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [13, 7],
+ [GPR_Bypass, GPR_Bypass]>,
InstrItinData<LdStSTDCX , [InstrStage<4,
[IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
@@ -593,6 +659,17 @@ def PPCA2Itineraries : ProcessorItineraries<
InstrStage<1, [FEX5]>, InstrStage<1, [FEX6]>],
[15, 7, 7],
[FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPAddSub , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [FRF1]>,
+ InstrStage<1, [FEX1]>, InstrStage<1, [FEX2]>,
+ InstrStage<1, [FEX3]>, InstrStage<1, [FEX4]>,
+ InstrStage<1, [FEX5]>, InstrStage<1, [FEX6]>],
+ [15, 7, 7],
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
InstrItinData<FPCompare , [InstrStage<4,
[IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleE500mc.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleE500mc.td
new file mode 100644
index 0000000..9bb779a
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleE500mc.td
@@ -0,0 +1,265 @@
+//===-- PPCScheduleE500mc.td - e500mc Scheduling Defs ------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the itinerary class data for the Freescale e500mc 32-bit
+// Power processor.
+//
+// All information is derived from the "e500mc Core Reference Manual",
+// Freescale Document Number E500MCRM, Rev. 1, 03/2012.
+//
+//===----------------------------------------------------------------------===//
+// Relevant functional units in the Freescale e500mc core:
+//
+// * Decode & Dispatch
+// Can dispatch up to 2 instructions per clock cycle to either the GPR Issue
+// queues (GIQx), FP Issue Queue (FIQ), or Branch issue queue (BIQ).
+def DIS0 : FuncUnit; // Dispatch stage - insn 1
+def DIS1 : FuncUnit; // Dispatch stage - insn 2
+
+// * Execute
+// 6 pipelined execution units: SFX0, SFX1, BU, FPU, LSU, CFX.
+// Some instructions can only execute in SFX0 but not SFX1.
+// The CFX has a bypass path, allowing non-divide instructions to execute
+// while a divide instruction is executed.
+def SFX0 : FuncUnit; // Simple unit 0
+def SFX1 : FuncUnit; // Simple unit 1
+def BU : FuncUnit; // Branch unit
+def CFX_DivBypass
+ : FuncUnit; // CFX divide bypass path
+def CFX_0 : FuncUnit; // CFX pipeline
+def LSU_0 : FuncUnit; // LSU pipeline
+def FPU_0 : FuncUnit; // FPU pipeline
+
+def PPCE500mcItineraries : ProcessorItineraries<
+ [DIS0, DIS1, SFX0, SFX1, BU, CFX_DivBypass, CFX_0, LSU_0, FPU_0],
+ [CR_Bypass, GPR_Bypass, FPR_Bypass], [
+ InstrItinData<IntSimple , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1]>],
+ [4, 1, 1], // Latency = 1
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntGeneral , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1]>],
+ [4, 1, 1], // Latency = 1
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntCompare , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1]>],
+ [5, 1, 1], // Latency = 1 or 2
+ [CR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntDivW , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [CFX_0], 0>,
+ InstrStage<14, [CFX_DivBypass]>],
+ [17, 1, 1], // Latency=4..35, Repeat= 4..35
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMFFS , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<8, [FPU_0]>],
+ [11], // Latency = 8
+ [FPR_Bypass]>,
+ InstrItinData<IntMTFSB0 , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<8, [FPU_0]>],
+ [11, 1, 1], // Latency = 8
+ [NoBypass, NoBypass, NoBypass]>,
+ InstrItinData<IntMulHW , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [CFX_0]>],
+ [7, 1, 1], // Latency = 4, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMulHWU , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [CFX_0]>],
+ [7, 1, 1], // Latency = 4, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMulLI , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [CFX_0]>],
+ [7, 1, 1], // Latency = 4, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntRotate , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1]>],
+ [4, 1, 1], // Latency = 1
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntShift , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1]>],
+ [4, 1, 1], // Latency = 1
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntTrapW , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<2, [SFX0]>],
+ [5, 1], // Latency = 2, Repeat rate = 2
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<BrB , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [BU]>],
+ [4, 1], // Latency = 1
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<BrCR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [BU]>],
+ [4, 1, 1], // Latency = 1
+ [CR_Bypass, CR_Bypass, CR_Bypass]>,
+ InstrItinData<BrMCR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [BU]>],
+ [4, 1], // Latency = 1
+ [CR_Bypass, CR_Bypass]>,
+ InstrItinData<BrMCRX , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1]>],
+ [4, 1, 1], // Latency = 1
+ [CR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStDCBA , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [6, 1], // Latency = 3, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStDCBF , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [6, 1], // Latency = 3
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStDCBI , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [6, 1], // Latency = 3
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLoad , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [6, 1], // Latency = 3
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLoadUpd , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [6, 1], // Latency = 3
+ [GPR_Bypass, GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<LdStStore , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [6, 1], // Latency = 3
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStStoreUpd, [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [6, 1], // Latency = 3
+ [NoBypass, GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<LdStICBI , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [6, 1], // Latency = 3
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSTFD , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [6, 1, 1], // Latency = 3
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStSTFDU , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [6, 1, 1], // Latency = 3
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<LdStLFD , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 1, 1], // Latency = 4
+ [FPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLFDU , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 1, 1], // Latency = 4
+ [FPR_Bypass, GPR_Bypass, GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<LdStLHA , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [6, 1], // Latency = 3
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLHAU , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [6, 1], // Latency = 3
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLMW , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 1], // Latency = r+3
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStLWARX , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<3, [LSU_0]>],
+ [6, 1, 1], // Latency = 3, Repeat rate = 3
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStSTWCX , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [6, 1], // Latency = 3
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSync , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>]>,
+ InstrItinData<SprMFSR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<4, [SFX0]>],
+ [7, 1],
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<SprMTMSR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<2, [SFX0, SFX1]>],
+ [5, 1], // Latency = 2, Repeat rate = 4
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<SprMTSR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0]>],
+ [5, 1],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprTLBSYNC , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0], 0>]>,
+ InstrItinData<SprMFCR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<5, [SFX0]>],
+ [8, 1],
+ [GPR_Bypass, CR_Bypass]>,
+ InstrItinData<SprMFMSR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<4, [SFX0]>],
+ [7, 1], // Latency = 4, Repeat rate = 4
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<SprMFSPR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1]>],
+ [4, 1], // Latency = 1, Repeat rate = 1
+ [GPR_Bypass, CR_Bypass]>,
+ InstrItinData<SprMFTB , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<4, [SFX0]>],
+ [7, 1], // Latency = 4, Repeat rate = 4
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprMTSPR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1]>],
+ [4, 1], // Latency = 1, Repeat rate = 1
+ [CR_Bypass, GPR_Bypass]>,
+ InstrItinData<SprMTSRIN , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0]>],
+ [4, 1],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<FPGeneral , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<2, [FPU_0]>],
+ [11, 1, 1], // Latency = 8, Repeat rate = 2
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPAddSub , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<4, [FPU_0]>],
+ [13, 1, 1], // Latency = 10, Repeat rate = 4
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPCompare , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<2, [FPU_0]>],
+ [11, 1, 1], // Latency = 8, Repeat rate = 2
+ [CR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPDivD , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<68, [FPU_0]>],
+ [71, 1, 1], // Latency = 68, Repeat rate = 68
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPDivS , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<38, [FPU_0]>],
+ [41, 1, 1], // Latency = 38, Repeat rate = 38
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPFused , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<4, [FPU_0]>],
+ [13, 1, 1, 1], // Latency = 10, Repeat rate = 4
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPRes , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<38, [FPU_0]>],
+ [41, 1], // Latency = 38, Repeat rate = 38
+ [FPR_Bypass, FPR_Bypass]>
+]>;
+
+// ===---------------------------------------------------------------------===//
+// e500mc machine model for scheduling and other instruction cost heuristics.
+
+def PPCE500mcModel : SchedMachineModel {
+ let IssueWidth = 2; // 2 micro-ops are dispatched per cycle.
+ let MinLatency = -1; // OperandCycles are interpreted as MinLatency.
+ let LoadLatency = 5; // Optimistic load latency assuming bypass.
+ // This is overriden by OperandCycles if the
+ // Itineraries are queried instead.
+
+ let Itineraries = PPCE500mcItineraries;
+}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleE5500.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleE5500.td
new file mode 100644
index 0000000..d7e11ac
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleE5500.td
@@ -0,0 +1,309 @@
+//===-- PPCScheduleE500mc.td - e5500 Scheduling Defs -------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the itinerary class data for the Freescale e5500 64-bit
+// Power processor.
+//
+// All information is derived from the "e5500 Core Reference Manual",
+// Freescale Document Number e5500RM, Rev. 1, 03/2012.
+//
+//===----------------------------------------------------------------------===//
+// Relevant functional units in the Freescale e5500 core
+// (These are the same as for the e500mc)
+//
+// * Decode & Dispatch
+// Can dispatch up to 2 instructions per clock cycle to either the GPR Issue
+// queues (GIQx), FP Issue Queue (FIQ), or Branch issue queue (BIQ).
+// def DIS0 : FuncUnit;
+// def DIS1 : FuncUnit;
+
+// * Execute
+// 6 pipelined execution units: SFX0, SFX1, BU, FPU, LSU, CFX.
+// The CFX has a bypass path, allowing non-divide instructions to execute
+// while a divide instruction is being executed.
+// def SFX0 : FuncUnit; // Simple unit 0
+// def SFX1 : FuncUnit; // Simple unit 1
+// def BU : FuncUnit; // Branch unit
+// def CFX_DivBypass
+// : FuncUnit; // CFX divide bypass path
+// def CFX_0 : FuncUnit; // CFX pipeline stage 0
+
+def CFX_1 : FuncUnit; // CFX pipeline stage 1
+
+// def LSU_0 : FuncUnit; // LSU pipeline
+// def FPU_0 : FuncUnit; // FPU pipeline
+
+
+def PPCE5500Itineraries : ProcessorItineraries<
+ [DIS0, DIS1, SFX0, SFX1, BU, CFX_DivBypass, CFX_0, CFX_1,
+ LSU_0, FPU_0],
+ [CR_Bypass, GPR_Bypass, FPR_Bypass], [
+ InstrItinData<IntSimple , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1]>],
+ [5, 2, 2], // Latency = 1
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntGeneral , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1]>],
+ [5, 2, 2], // Latency = 1
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntCompare , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1]>],
+ [6, 2, 2], // Latency = 1 or 2
+ [CR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntDivD , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [CFX_0], 0>,
+ InstrStage<26, [CFX_DivBypass]>],
+ [30, 2, 2], // Latency= 4..26, Repeat rate= 4..26
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntDivW , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [CFX_0], 0>,
+ InstrStage<16, [CFX_DivBypass]>],
+ [20, 2, 2], // Latency= 4..16, Repeat rate= 4..16
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMFFS , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [FPU_0]>],
+ [11], // Latency = 7, Repeat rate = 1
+ [FPR_Bypass]>,
+ InstrItinData<IntMTFSB0 , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<7, [FPU_0]>],
+ [11, 2, 2], // Latency = 7, Repeat rate = 7
+ [NoBypass, NoBypass, NoBypass]>,
+ InstrItinData<IntMulHD , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [CFX_0], 0>,
+ InstrStage<2, [CFX_1]>],
+ [9, 2, 2], // Latency = 4..7, Repeat rate = 2..4
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMulHW , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [CFX_0], 0>,
+ InstrStage<1, [CFX_1]>],
+ [8, 2, 2], // Latency = 4, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMulHWU , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [CFX_0], 0>,
+ InstrStage<1, [CFX_1]>],
+ [8, 2, 2], // Latency = 4, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMulLI , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [CFX_0], 0>,
+ InstrStage<2, [CFX_1]>],
+ [8, 2, 2], // Latency = 4 or 5, Repeat = 2
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntRotate , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1]>],
+ [5, 2, 2], // Latency = 1
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntRotateD , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<2, [SFX0, SFX1]>],
+ [6, 2, 2], // Latency = 2, Repeat rate = 2
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntRotateDI , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1]>],
+ [5, 2, 2], // Latency = 1, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntShift , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<2, [SFX0, SFX1]>],
+ [6, 2, 2], // Latency = 2, Repeat rate = 2
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntTrapW , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<2, [SFX0]>],
+ [6, 2], // Latency = 2, Repeat rate = 2
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<BrB , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [BU]>],
+ [5, 2], // Latency = 1
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<BrCR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [BU]>],
+ [5, 2, 2], // Latency = 1
+ [CR_Bypass, CR_Bypass, CR_Bypass]>,
+ InstrItinData<BrMCR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [BU]>],
+ [5, 2], // Latency = 1
+ [CR_Bypass, CR_Bypass]>,
+ InstrItinData<BrMCRX , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [CFX_0]>],
+ [5, 2, 2], // Latency = 1
+ [CR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStDCBA , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStDCBF , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStDCBI , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLoad , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLoadUpd , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<LdStLD , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLDARX , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<3, [LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 3
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLDU , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<LdStStore , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStStoreUpd, [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [NoBypass, GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<LdStICBI , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSTFD , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2, 2], // Latency = 3, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStSTFDU , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2, 2], // Latency = 3, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<LdStLFD , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [8, 2, 2], // Latency = 4, Repeat rate = 1
+ [FPR_Bypass, GPR_Bypass, GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<LdStLFDU , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [8, 2, 2], // Latency = 4, Repeat rate = 1
+ [FPR_Bypass, GPR_Bypass, GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<LdStLHA , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLHAU , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [GPR_Bypass, GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<LdStLMW , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<4, [LSU_0]>],
+ [8, 2], // Latency = r+3, Repeat rate = r+3
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStLWARX , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<3, [LSU_0]>],
+ [7, 2, 2], // Latency = 3, Repeat rate = 3
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStSTD , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSTDCX , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSTDU , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [NoBypass, GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<LdStSTWCX , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSync , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0]>]>,
+ InstrItinData<SprMTMSR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<2, [CFX_0]>],
+ [6, 2], // Latency = 2, Repeat rate = 4
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<SprTLBSYNC , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [LSU_0], 0>]>,
+ InstrItinData<SprMFCR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<5, [CFX_0]>],
+ [9, 2], // Latency = 5, Repeat rate = 5
+ [GPR_Bypass, CR_Bypass]>,
+ InstrItinData<SprMFMSR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<4, [SFX0]>],
+ [8, 2], // Latency = 4, Repeat rate = 4
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<SprMFSPR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [CFX_0]>],
+ [5], // Latency = 1, Repeat rate = 1
+ [GPR_Bypass]>,
+ InstrItinData<SprMFTB , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<4, [CFX_0]>],
+ [8, 2], // Latency = 4, Repeat rate = 4
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprMTSPR , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [SFX0, SFX1]>],
+ [5], // Latency = 1, Repeat rate = 1
+ [GPR_Bypass]>,
+ InstrItinData<FPGeneral , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [FPU_0]>],
+ [11, 2, 2], // Latency = 7, Repeat rate = 1
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPAddSub , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [FPU_0]>],
+ [11, 2, 2], // Latency = 7, Repeat rate = 1
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPCompare , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [FPU_0]>],
+ [11, 2, 2], // Latency = 7, Repeat rate = 1
+ [CR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPDivD , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<31, [FPU_0]>],
+ [39, 2, 2], // Latency = 35, Repeat rate = 31
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPDivS , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<16, [FPU_0]>],
+ [24, 2, 2], // Latency = 20, Repeat rate = 16
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPFused , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<1, [FPU_0]>],
+ [11, 2, 2, 2], // Latency = 7, Repeat rate = 1
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPRes , [InstrStage<1, [DIS0, DIS1], 0>,
+ InstrStage<2, [FPU_0]>],
+ [12, 2], // Latency = 8, Repeat rate = 2
+ [FPR_Bypass, FPR_Bypass]>
+]>;
+
+// ===---------------------------------------------------------------------===//
+// e5500 machine model for scheduling and other instruction cost heuristics.
+
+def PPCE5500Model : SchedMachineModel {
+ let IssueWidth = 2; // 2 micro-ops are dispatched per cycle.
+ let MinLatency = -1; // OperandCycles are interpreted as MinLatency.
+ let LoadLatency = 6; // Optimistic load latency assuming bypass.
+ // This is overriden by OperandCycles if the
+ // Itineraries are queried instead.
+
+ let Itineraries = PPCE5500Itineraries;
+}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td
index 61e89ed..72a0a39 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td
@@ -34,12 +34,16 @@ def G3Itineraries : ProcessorItineraries<
InstrItinData<LdStDCBF , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStDCBI , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStLoad , [InstrStage<2, [SLU]>]>,
+ InstrItinData<LdStLoadUpd , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStStore , [InstrStage<2, [SLU]>]>,
+ InstrItinData<LdStStoreUpd, [InstrStage<2, [SLU]>]>,
InstrItinData<LdStICBI , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStUX , [InstrStage<2, [SLU]>]>,
+ InstrItinData<LdStSTFD , [InstrStage<2, [SLU]>]>,
+ InstrItinData<LdStSTFDU , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStLFD , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStLFDU , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStLHA , [InstrStage<2, [SLU]>]>,
+ InstrItinData<LdStLHAU , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStLMW , [InstrStage<34, [SLU]>]>,
InstrItinData<LdStLWARX , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStSTWCX , [InstrStage<8, [SLU]>]>,
@@ -58,6 +62,7 @@ def G3Itineraries : ProcessorItineraries<
InstrItinData<SprRFI , [InstrStage<2, [SRU]>]>,
InstrItinData<SprSC , [InstrStage<2, [SRU]>]>,
InstrItinData<FPGeneral , [InstrStage<1, [FPU1]>]>,
+ InstrItinData<FPAddSub , [InstrStage<1, [FPU1]>]>,
InstrItinData<FPCompare , [InstrStage<1, [FPU1]>]>,
InstrItinData<FPDivD , [InstrStage<31, [FPU1]>]>,
InstrItinData<FPDivS , [InstrStage<17, [FPU1]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td
index e19ddfa..fc9120d 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td
@@ -33,13 +33,17 @@ def G4Itineraries : ProcessorItineraries<
InstrItinData<LdStDCBF , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStDCBI , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStLoad , [InstrStage<2, [SLU]>]>,
+ InstrItinData<LdStLoadUpd , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStStore , [InstrStage<2, [SLU]>]>,
+ InstrItinData<LdStStoreUpd, [InstrStage<2, [SLU]>]>,
InstrItinData<LdStDSS , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStICBI , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStUX , [InstrStage<2, [SLU]>]>,
+ InstrItinData<LdStSTFD , [InstrStage<2, [SLU]>]>,
+ InstrItinData<LdStSTFDU , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStLFD , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStLFDU , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStLHA , [InstrStage<2, [SLU]>]>,
+ InstrItinData<LdStLHAU , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStLMW , [InstrStage<34, [SLU]>]>,
InstrItinData<LdStLVecX , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStLWARX , [InstrStage<3, [SLU]>]>,
@@ -60,6 +64,7 @@ def G4Itineraries : ProcessorItineraries<
InstrItinData<SprRFI , [InstrStage<2, [SRU]>]>,
InstrItinData<SprSC , [InstrStage<2, [SRU]>]>,
InstrItinData<FPGeneral , [InstrStage<1, [FPU1]>]>,
+ InstrItinData<FPAddSub , [InstrStage<1, [FPU1]>]>,
InstrItinData<FPCompare , [InstrStage<1, [FPU1]>]>,
InstrItinData<FPDivD , [InstrStage<31, [FPU1]>]>,
InstrItinData<FPDivS , [InstrStage<17, [FPU1]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
index e7446cb..a4e82ce 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
@@ -36,19 +36,24 @@ def G4PlusItineraries : ProcessorItineraries<
InstrItinData<LdStDCBF , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStDCBI , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStLoad , [InstrStage<3, [SLU]>]>,
+ InstrItinData<LdStLoadUpd , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStStore , [InstrStage<3, [SLU]>]>,
+ InstrItinData<LdStStoreUpd, [InstrStage<3, [SLU]>]>,
InstrItinData<LdStDSS , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStICBI , [InstrStage<3, [IU2]>]>,
- InstrItinData<LdStUX , [InstrStage<3, [SLU]>]>,
+ InstrItinData<LdStSTFD , [InstrStage<3, [SLU]>]>,
+ InstrItinData<LdStSTFDU , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStLFD , [InstrStage<4, [SLU]>]>,
InstrItinData<LdStLFDU , [InstrStage<4, [SLU]>]>,
InstrItinData<LdStLHA , [InstrStage<3, [SLU]>]>,
+ InstrItinData<LdStLHAU , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStLMW , [InstrStage<37, [SLU]>]>,
InstrItinData<LdStLVecX , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStLWA , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStLWARX , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStSTD , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStSTDCX , [InstrStage<3, [SLU]>]>,
+ InstrItinData<LdStSTDU , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStSTVEBX , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStSTWCX , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStSync , [InstrStage<35, [SLU]>]>,
@@ -66,6 +71,7 @@ def G4PlusItineraries : ProcessorItineraries<
InstrItinData<SprRFI , [InstrStage<1, [IU1, IU2, IU3, IU4]>]>,
InstrItinData<SprSC , [InstrStage<0, [IU1, IU2, IU3, IU4]>]>,
InstrItinData<FPGeneral , [InstrStage<5, [FPU1]>]>,
+ InstrItinData<FPAddSub , [InstrStage<5, [FPU1]>]>,
InstrItinData<FPCompare , [InstrStage<5, [FPU1]>]>,
InstrItinData<FPDivD , [InstrStage<35, [FPU1]>]>,
InstrItinData<FPDivS , [InstrStage<21, [FPU1]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td
index 1371499..7c02ea0 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td
@@ -27,6 +27,7 @@ def G5Itineraries : ProcessorItineraries<
InstrItinData<IntMulLI , [InstrStage<4, [IU1, IU2]>]>,
InstrItinData<IntRFID , [InstrStage<1, [IU2]>]>,
InstrItinData<IntRotateD , [InstrStage<2, [IU1, IU2]>]>,
+ InstrItinData<IntRotateDI , [InstrStage<2, [IU1, IU2]>]>,
InstrItinData<IntRotate , [InstrStage<4, [IU1, IU2]>]>,
InstrItinData<IntShift , [InstrStage<2, [IU1, IU2]>]>,
InstrItinData<IntTrapD , [InstrStage<1, [IU1, IU2]>]>,
@@ -37,15 +38,20 @@ def G5Itineraries : ProcessorItineraries<
InstrItinData<BrMCRX , [InstrStage<3, [BPU]>]>,
InstrItinData<LdStDCBF , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStLoad , [InstrStage<3, [SLU]>]>,
+ InstrItinData<LdStLoadUpd , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStStore , [InstrStage<3, [SLU]>]>,
+ InstrItinData<LdStStoreUpd, [InstrStage<3, [SLU]>]>,
InstrItinData<LdStDSS , [InstrStage<10, [SLU]>]>,
InstrItinData<LdStICBI , [InstrStage<40, [SLU]>]>,
- InstrItinData<LdStUX , [InstrStage<4, [SLU]>]>,
+ InstrItinData<LdStSTFD , [InstrStage<4, [SLU]>]>,
+ InstrItinData<LdStSTFDU , [InstrStage<4, [SLU]>]>,
InstrItinData<LdStLD , [InstrStage<3, [SLU]>]>,
+ InstrItinData<LdStLDU , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStLDARX , [InstrStage<11, [SLU]>]>,
InstrItinData<LdStLFD , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStLFDU , [InstrStage<5, [SLU]>]>,
InstrItinData<LdStLHA , [InstrStage<5, [SLU]>]>,
+ InstrItinData<LdStLHAU , [InstrStage<5, [SLU]>]>,
InstrItinData<LdStLMW , [InstrStage<64, [SLU]>]>,
InstrItinData<LdStLVecX , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStLWA , [InstrStage<5, [SLU]>]>,
@@ -53,6 +59,7 @@ def G5Itineraries : ProcessorItineraries<
InstrItinData<LdStSLBIA , [InstrStage<40, [SLU]>]>, // needs work
InstrItinData<LdStSLBIE , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStSTD , [InstrStage<3, [SLU]>]>,
+ InstrItinData<LdStSTDU , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStSTDCX , [InstrStage<11, [SLU]>]>,
InstrItinData<LdStSTVEBX , [InstrStage<5, [SLU]>]>,
InstrItinData<LdStSTWCX , [InstrStage<11, [SLU]>]>,
@@ -69,6 +76,7 @@ def G5Itineraries : ProcessorItineraries<
InstrItinData<SprMTSPR , [InstrStage<8, [IU2]>]>,
InstrItinData<SprSC , [InstrStage<1, [IU2]>]>,
InstrItinData<FPGeneral , [InstrStage<6, [FPU1, FPU2]>]>,
+ InstrItinData<FPAddSub , [InstrStage<6, [FPU1, FPU2]>]>,
InstrItinData<FPCompare , [InstrStage<8, [FPU1, FPU2]>]>,
InstrItinData<FPDivD , [InstrStage<33, [FPU1, FPU2]>]>,
InstrItinData<FPDivS , [InstrStage<33, [FPU1, FPU2]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp
index bb193ac..9c8cb92 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp
@@ -54,19 +54,26 @@ PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU,
CPUName = sys::getHostCPUName();
#endif
- // Parse features string.
- ParseSubtargetFeatures(CPUName, FS);
-
// Initialize scheduling itinerary for the specified CPU.
InstrItins = getInstrItineraryForCPU(CPUName);
+ // Make sure 64-bit features are available when CPUname is generic
+ std::string FullFS = FS;
+
// If we are generating code for ppc64, verify that options make sense.
if (is64Bit) {
Has64BitSupport = true;
// Silently force 64-bit register use on ppc64.
Use64BitRegs = true;
+ if (!FullFS.empty())
+ FullFS = "+64bit," + FullFS;
+ else
+ FullFS = "+64bit";
}
-
+
+ // Parse features string.
+ ParseSubtargetFeatures(CPUName, FullFS);
+
// If the user requested use of 64-bit regs, but the cpu selected doesn't
// support it, ignore.
if (use64BitRegs() && !has64BitSupport())
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h
index 0207c83..b9e22f4 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h
@@ -33,32 +33,34 @@ namespace PPC {
enum {
DIR_NONE,
DIR_32,
- DIR_440,
- DIR_601,
- DIR_602,
- DIR_603,
+ DIR_440,
+ DIR_601,
+ DIR_602,
+ DIR_603,
DIR_7400,
- DIR_750,
- DIR_970,
+ DIR_750,
+ DIR_970,
DIR_A2,
+ DIR_E500mc,
+ DIR_E5500,
DIR_PWR6,
DIR_PWR7,
- DIR_64
+ DIR_64
};
}
class GlobalValue;
class TargetMachine;
-
+
class PPCSubtarget : public PPCGenSubtargetInfo {
protected:
/// stackAlignment - The minimum alignment known to hold of the stack frame on
/// entry to the function and which must be maintained by every function.
unsigned StackAlignment;
-
+
/// Selected instruction itineraries (one entry per itinerary class.)
InstrItineraryData InstrItins;
-
+
/// Which cpu directive was used.
unsigned DarwinDirective;
@@ -74,7 +76,7 @@ protected:
bool IsBookE;
bool HasLazyResolverStubs;
bool IsJITCodeModel;
-
+
/// TargetTriple - What processor and OS we're targeting.
Triple TargetTriple;
@@ -84,11 +86,11 @@ public:
///
PPCSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, bool is64Bit);
-
- /// ParseSubtargetFeatures - Parses features string setting specified
+
+ /// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
-
+
/// SetJITMode - This is called to inform the subtarget info that we are
/// producing code for the JIT.
void SetJITMode();
@@ -97,20 +99,27 @@ public:
/// stack frame on entry to the function and which must be maintained by every
/// function for this subtarget.
unsigned getStackAlignment() const { return StackAlignment; }
-
+
/// getDarwinDirective - Returns the -m directive specified for the cpu.
///
unsigned getDarwinDirective() const { return DarwinDirective; }
-
- /// getInstrItins - Return the instruction itineraies based on subtarget
+
+ /// getInstrItins - Return the instruction itineraies based on subtarget
/// selection.
const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
- /// getTargetDataString - Return the pointer size and type alignment
+ /// getDataLayoutString - Return the pointer size and type alignment
/// properties of this subtarget.
- const char *getTargetDataString() const {
+ const char *getDataLayoutString() const {
// Note, the alignment values for f64 and i64 on ppc64 in Darwin
// documentation are wrong; these are correct (i.e. "what gcc does").
+ if (isPPC64() && isSVR4ABI()) {
+ if (TargetTriple.getOS() == llvm::Triple::FreeBSD)
+ return "E-p:64:64-f64:64:64-i64:64:64-f128:64:64-v128:128:128-n32:64";
+ else
+ return "E-p:64:64-f64:64:64-i64:64:64-f128:128:128-v128:128:128-n32:64";
+ }
+
return isPPC64() ? "E-p:64:64-f64:64:64-i64:64:64-f128:64:128-n32:64"
: "E-p:32:32-f64:64:64-i64:64:64-f128:64:128-n32";
}
@@ -118,22 +127,22 @@ public:
/// isPPC64 - Return true if we are generating code for 64-bit pointer mode.
///
bool isPPC64() const { return IsPPC64; }
-
+
/// has64BitSupport - Return true if the selected CPU supports 64-bit
/// instructions, regardless of whether we are in 32-bit or 64-bit mode.
bool has64BitSupport() const { return Has64BitSupport; }
-
+
/// use64BitRegs - Return true if in 64-bit mode or if we should use 64-bit
/// registers in 32-bit mode when possible. This can only true if
/// has64BitSupport() returns true.
bool use64BitRegs() const { return Use64BitRegs; }
-
+
/// hasLazyResolverStub - Return true if accesses to the specified global have
/// to go through a dyld lazy resolution stub. This means that an extra load
/// is required to get the address of the global.
- bool hasLazyResolverStub(const GlobalValue *GV,
+ bool hasLazyResolverStub(const GlobalValue *GV,
const TargetMachine &TM) const;
-
+
// isJITCodeModel - True if we're generating code for the JIT
bool isJITCodeModel() const { return IsJITCodeModel; }
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
index 9805112..3fc977e 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -40,10 +40,11 @@ PPCTargetMachine::PPCTargetMachine(const Target &T, StringRef TT,
bool is64Bit)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS, is64Bit),
- DataLayout(Subtarget.getTargetDataString()), InstrInfo(*this),
+ DL(Subtarget.getDataLayoutString()), InstrInfo(*this),
FrameLowering(Subtarget), JITInfo(*this, is64Bit),
TLInfo(*this), TSInfo(*this),
- InstrItins(Subtarget.getInstrItineraryData()) {
+ InstrItins(Subtarget.getInstrItineraryData()),
+ STTI(&TLInfo), VTTI(&TLInfo) {
// The binutils for the BG/P are too old for CFI.
if (Subtarget.isBGP())
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h
index 7da2b0c..c168433 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h
@@ -21,7 +21,8 @@
#include "PPCISelLowering.h"
#include "PPCSelectionDAGInfo.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetTransformImpl.h"
+#include "llvm/DataLayout.h"
namespace llvm {
@@ -29,13 +30,15 @@ namespace llvm {
///
class PPCTargetMachine : public LLVMTargetMachine {
PPCSubtarget Subtarget;
- const TargetData DataLayout; // Calculates type size & alignment
+ const DataLayout DL; // Calculates type size & alignment
PPCInstrInfo InstrInfo;
PPCFrameLowering FrameLowering;
PPCJITInfo JITInfo;
PPCTargetLowering TLInfo;
PPCSelectionDAGInfo TSInfo;
InstrItineraryData InstrItins;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
PPCTargetMachine(const Target &T, StringRef TT,
@@ -58,11 +61,17 @@ public:
return &InstrInfo.getRegisterInfo();
}
- virtual const TargetData *getTargetData() const { return &DataLayout; }
+ virtual const DataLayout *getDataLayout() const { return &DL; }
virtual const PPCSubtarget *getSubtargetImpl() const { return &Subtarget; }
virtual const InstrItineraryData *getInstrItineraryData() const {
return &InstrItins;
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
// Pass Pipeline Configuration
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
diff --git a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
index 1c5c89e..716c79f 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
@@ -20,7 +20,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 79f7ebd..8e5619e 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -637,7 +637,7 @@ SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType());
Type *ElementTy = Ty->getElementType();
- return getTargetData()->getTypeAllocSize(ElementTy);
+ return getDataLayout()->getTypeAllocSize(ElementTy);
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
index 15541ef..e64c140 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
@@ -129,7 +129,7 @@ def retflag : SDNode<"SPISD::RET_FLAG", SDT_SPRet,
[SDNPHasChain, SDNPOptInGlue]>;
def flushw : SDNode<"SPISD::FLUSHW", SDTNone,
- [SDNPHasChain]>;
+ [SDNPHasChain, SDNPSideEffect, SDNPMayStore]>;
def getPCX : Operand<i32> {
let PrintMethod = "printGetPCX";
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
index 9ee12ed..45c9624 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -33,10 +33,10 @@ SparcTargetMachine::SparcTargetMachine(const Target &T, StringRef TT,
bool is64bit)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS, is64bit),
- DataLayout(Subtarget.getDataLayout()),
+ DL(Subtarget.getDataLayout()),
InstrInfo(Subtarget),
TLInfo(*this), TSInfo(*this),
- FrameLowering(Subtarget) {
+ FrameLowering(Subtarget), STTI(&TLInfo), VTTI(&TLInfo) {
}
namespace {
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
index b2cc624..0fbe2d7 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
@@ -20,18 +20,21 @@
#include "SparcSelectionDAGInfo.h"
#include "SparcSubtarget.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetTransformImpl.h"
namespace llvm {
class SparcTargetMachine : public LLVMTargetMachine {
SparcSubtarget Subtarget;
- const TargetData DataLayout; // Calculates type size & alignment
+ const DataLayout DL; // Calculates type size & alignment
SparcInstrInfo InstrInfo;
SparcTargetLowering TLInfo;
SparcSelectionDAGInfo TSInfo;
SparcFrameLowering FrameLowering;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
SparcTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
@@ -52,7 +55,13 @@ public:
virtual const SparcSelectionDAGInfo* getSelectionDAGInfo() const {
return &TSInfo;
}
- virtual const TargetData *getTargetData() const { return &DataLayout; }
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
+ virtual const DataLayout *getDataLayout() const { return &DL; }
// Pass Pipeline Configuration
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
diff --git a/contrib/llvm/lib/Target/Target.cpp b/contrib/llvm/lib/Target/Target.cpp
index a2b83bc..393178a 100644
--- a/contrib/llvm/lib/Target/Target.cpp
+++ b/contrib/llvm/lib/Target/Target.cpp
@@ -16,7 +16,7 @@
#include "llvm-c/Initialization.h"
#include "llvm/InitializePasses.h"
#include "llvm/PassManager.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/LLVMContext.h"
#include <cstring>
@@ -24,8 +24,9 @@
using namespace llvm;
void llvm::initializeTarget(PassRegistry &Registry) {
- initializeTargetDataPass(Registry);
+ initializeDataLayoutPass(Registry);
initializeTargetLibraryInfoPass(Registry);
+ initializeTargetTransformInfoPass(Registry);
}
void LLVMInitializeTarget(LLVMPassRegistryRef R) {
@@ -33,11 +34,11 @@ void LLVMInitializeTarget(LLVMPassRegistryRef R) {
}
LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep) {
- return wrap(new TargetData(StringRep));
+ return wrap(new DataLayout(StringRep));
}
void LLVMAddTargetData(LLVMTargetDataRef TD, LLVMPassManagerRef PM) {
- unwrap(PM)->add(new TargetData(*unwrap(TD)));
+ unwrap(PM)->add(new DataLayout(*unwrap(TD)));
}
void LLVMAddTargetLibraryInfo(LLVMTargetLibraryInfoRef TLI,
@@ -55,13 +56,21 @@ LLVMByteOrdering LLVMByteOrder(LLVMTargetDataRef TD) {
}
unsigned LLVMPointerSize(LLVMTargetDataRef TD) {
- return unwrap(TD)->getPointerSize();
+ return unwrap(TD)->getPointerSize(0);
+}
+
+unsigned LLVMPointerSizeForAS(LLVMTargetDataRef TD, unsigned AS) {
+ return unwrap(TD)->getPointerSize(AS);
}
LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef TD) {
return wrap(unwrap(TD)->getIntPtrType(getGlobalContext()));
}
+LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef TD, unsigned AS) {
+ return wrap(unwrap(TD)->getIntPtrType(getGlobalContext(), AS));
+}
+
unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef TD, LLVMTypeRef Ty) {
return unwrap(TD)->getTypeSizeInBits(unwrap(Ty));
}
diff --git a/contrib/llvm/lib/Target/TargetELFWriterInfo.cpp b/contrib/llvm/lib/Target/TargetELFWriterInfo.cpp
deleted file mode 100644
index a661ee9..0000000
--- a/contrib/llvm/lib/Target/TargetELFWriterInfo.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-//===-- lib/Target/TargetELFWriterInfo.cpp - ELF Writer Info --0-*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the TargetELFWriterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Function.h"
-#include "llvm/Target/TargetELFWriterInfo.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetMachine.h"
-using namespace llvm;
-
-TargetELFWriterInfo::TargetELFWriterInfo(bool is64Bit_, bool isLittleEndian_) :
- is64Bit(is64Bit_), isLittleEndian(isLittleEndian_) {
-}
-
-TargetELFWriterInfo::~TargetELFWriterInfo() {}
-
diff --git a/contrib/llvm/lib/Target/TargetLibraryInfo.cpp b/contrib/llvm/lib/Target/TargetLibraryInfo.cpp
index 8e215a7..6d4eab1 100644
--- a/contrib/llvm/lib/Target/TargetLibraryInfo.cpp
+++ b/contrib/llvm/lib/Target/TargetLibraryInfo.cpp
@@ -24,6 +24,16 @@ void TargetLibraryInfo::anchor() { }
const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
{
+ "_ZdaPv",
+ "_ZdlPv",
+ "_Znaj",
+ "_ZnajRKSt9nothrow_t",
+ "_Znam",
+ "_ZnamRKSt9nothrow_t",
+ "_Znwj",
+ "_ZnwjRKSt9nothrow_t",
+ "_Znwm",
+ "_ZnwmRKSt9nothrow_t",
"__cxa_atexit",
"__cxa_guard_abort",
"__cxa_guard_acquire",
@@ -31,16 +41,29 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"__memcpy_chk",
"acos",
"acosf",
+ "acosh",
+ "acoshf",
+ "acoshl",
"acosl",
"asin",
"asinf",
+ "asinh",
+ "asinhf",
+ "asinhl",
"asinl",
"atan",
"atan2",
"atan2f",
"atan2l",
"atanf",
+ "atanh",
+ "atanhf",
+ "atanhl",
"atanl",
+ "calloc",
+ "cbrt",
+ "cbrtf",
+ "cbrtl",
"ceil",
"ceilf",
"ceill",
@@ -54,6 +77,9 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"coshl",
"cosl",
"exp",
+ "exp10",
+ "exp10f",
+ "exp10l",
"exp2",
"exp2f",
"exp2l",
@@ -74,6 +100,7 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"fmodl",
"fputc",
"fputs",
+ "free",
"fwrite",
"iprintf",
"log",
@@ -86,8 +113,12 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"log2",
"log2f",
"log2l",
+ "logb",
+ "logbf",
+ "logbl",
"logf",
"logl",
+ "malloc",
"memchr",
"memcmp",
"memcpy",
@@ -97,11 +128,14 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"nearbyint",
"nearbyintf",
"nearbyintl",
+ "posix_memalign",
"pow",
"powf",
"powl",
"putchar",
"puts",
+ "realloc",
+ "reallocf",
"rint",
"rintf",
"rintl",
@@ -118,14 +152,30 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"sqrt",
"sqrtf",
"sqrtl",
+ "stpcpy",
"strcat",
"strchr",
+ "strcmp",
"strcpy",
+ "strcspn",
+ "strdup",
"strlen",
"strncat",
"strncmp",
"strncpy",
+ "strndup",
"strnlen",
+ "strpbrk",
+ "strrchr",
+ "strspn",
+ "strstr",
+ "strtod",
+ "strtof",
+ "strtol",
+ "strtold",
+ "strtoll",
+ "strtoul",
+ "strtoull",
"tan",
"tanf",
"tanh",
@@ -134,7 +184,8 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"tanl",
"trunc",
"truncf",
- "truncl"
+ "truncl",
+ "valloc"
};
/// initialize - Initialize the set of available library functions based on the
@@ -205,6 +256,21 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T,
TLI.setUnavailable(LibFunc::tanhl);
// Win32 only has C89 math
+ TLI.setUnavailable(LibFunc::acosh);
+ TLI.setUnavailable(LibFunc::acoshf);
+ TLI.setUnavailable(LibFunc::acoshl);
+ TLI.setUnavailable(LibFunc::asinh);
+ TLI.setUnavailable(LibFunc::asinhf);
+ TLI.setUnavailable(LibFunc::asinhl);
+ TLI.setUnavailable(LibFunc::atanh);
+ TLI.setUnavailable(LibFunc::atanhf);
+ TLI.setUnavailable(LibFunc::atanhl);
+ TLI.setUnavailable(LibFunc::cbrt);
+ TLI.setUnavailable(LibFunc::cbrtf);
+ TLI.setUnavailable(LibFunc::cbrtl);
+ TLI.setUnavailable(LibFunc::exp10);
+ TLI.setUnavailable(LibFunc::exp10f);
+ TLI.setUnavailable(LibFunc::exp10l);
TLI.setUnavailable(LibFunc::exp2);
TLI.setUnavailable(LibFunc::exp2f);
TLI.setUnavailable(LibFunc::exp2l);
@@ -217,6 +283,9 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T,
TLI.setUnavailable(LibFunc::log1p);
TLI.setUnavailable(LibFunc::log1pf);
TLI.setUnavailable(LibFunc::log1pl);
+ TLI.setUnavailable(LibFunc::logb);
+ TLI.setUnavailable(LibFunc::logbf);
+ TLI.setUnavailable(LibFunc::logbl);
TLI.setUnavailable(LibFunc::nearbyint);
TLI.setUnavailable(LibFunc::nearbyintf);
TLI.setUnavailable(LibFunc::nearbyintl);
@@ -254,6 +323,10 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T,
TLI.setUnavailable(LibFunc::tanf);
TLI.setUnavailable(LibFunc::tanhf);
}
+
+ // Win32 does *not* provide stpcpy. It is provided on POSIX systems:
+ // http://pubs.opengroup.org/onlinepubs/9699919799/functions/stpcpy.html
+ TLI.setUnavailable(LibFunc::stpcpy);
}
}
diff --git a/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp b/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
index b74a0bd..9d7e2b8 100644
--- a/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
+++ b/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
@@ -22,7 +22,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/Dwarf.h"
@@ -184,7 +184,7 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV,
// Otherwise, just drop it into a mergable constant section. If we have
// a section for this size, use it, otherwise use the arbitrary sized
// mergable section.
- switch (TM.getTargetData()->getTypeAllocSize(C->getType())) {
+ switch (TM.getDataLayout()->getTypeAllocSize(C->getType())) {
case 4: return SectionKind::getMergeableConst4();
case 8: return SectionKind::getMergeableConst8();
case 16: return SectionKind::getMergeableConst16();
diff --git a/contrib/llvm/lib/Target/TargetMachineC.cpp b/contrib/llvm/lib/Target/TargetMachineC.cpp
index d6bba8b..f69c2ab 100644
--- a/contrib/llvm/lib/Target/TargetMachineC.cpp
+++ b/contrib/llvm/lib/Target/TargetMachineC.cpp
@@ -14,7 +14,7 @@
#include "llvm-c/Core.h"
#include "llvm-c/Target.h"
#include "llvm-c/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
@@ -146,7 +146,7 @@ char* LLVMGetTargetMachineFeatureString(LLVMTargetMachineRef T) {
}
LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T) {
- return wrap(unwrap(T)->getTargetData());
+ return wrap(unwrap(T)->getDataLayout());
}
LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M,
@@ -158,14 +158,14 @@ LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M,
std::string error;
- const TargetData* td = TM->getTargetData();
+ const DataLayout* td = TM->getDataLayout();
if (!td) {
- error = "No TargetData in TargetMachine";
+ error = "No DataLayout in TargetMachine";
*ErrorMessage = strdup(error.c_str());
return true;
}
- pass.add(new TargetData(*td));
+ pass.add(new DataLayout(*td));
TargetMachine::CodeGenFileType ft;
switch (codegen) {
@@ -184,7 +184,7 @@ LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M,
}
if (TM->addPassesToEmitFile(pass, destf, ft)) {
- error = "No TargetData in TargetMachine";
+ error = "No DataLayout in TargetMachine";
*ErrorMessage = strdup(error.c_str());
return true;
}
diff --git a/contrib/llvm/lib/Target/TargetRegisterInfo.cpp b/contrib/llvm/lib/Target/TargetRegisterInfo.cpp
index 2395f2b..be8b582 100644
--- a/contrib/llvm/lib/Target/TargetRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/TargetRegisterInfo.cpp
@@ -20,8 +20,10 @@ using namespace llvm;
TargetRegisterInfo::TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
regclass_iterator RCB, regclass_iterator RCE,
- const char *const *subregindexnames)
- : InfoDesc(ID), SubRegIndexNames(subregindexnames),
+ const char *const *SRINames,
+ const unsigned *SRILaneMasks)
+ : InfoDesc(ID), SubRegIndexNames(SRINames),
+ SubRegIndexLaneMasks(SRILaneMasks),
RegClassBegin(RCB), RegClassEnd(RCE) {
}
diff --git a/contrib/llvm/lib/Target/TargetTransformImpl.cpp b/contrib/llvm/lib/Target/TargetTransformImpl.cpp
new file mode 100644
index 0000000..b36e6f8
--- /dev/null
+++ b/contrib/llvm/lib/Target/TargetTransformImpl.cpp
@@ -0,0 +1,353 @@
+// llvm/Target/TargetTransformImpl.cpp - Target Loop Trans Info ---*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Target/TargetTransformImpl.h"
+#include "llvm/Target/TargetLowering.h"
+#include <utility>
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+//
+// Calls used by scalar transformations.
+//
+//===----------------------------------------------------------------------===//
+
+bool ScalarTargetTransformImpl::isLegalAddImmediate(int64_t imm) const {
+ return TLI->isLegalAddImmediate(imm);
+}
+
+bool ScalarTargetTransformImpl::isLegalICmpImmediate(int64_t imm) const {
+ return TLI->isLegalICmpImmediate(imm);
+}
+
+bool ScalarTargetTransformImpl::isLegalAddressingMode(const AddrMode &AM,
+ Type *Ty) const {
+ return TLI->isLegalAddressingMode(AM, Ty);
+}
+
+bool ScalarTargetTransformImpl::isTruncateFree(Type *Ty1, Type *Ty2) const {
+ return TLI->isTruncateFree(Ty1, Ty2);
+}
+
+bool ScalarTargetTransformImpl::isTypeLegal(Type *Ty) const {
+ EVT T = TLI->getValueType(Ty);
+ return TLI->isTypeLegal(T);
+}
+
+unsigned ScalarTargetTransformImpl::getJumpBufAlignment() const {
+ return TLI->getJumpBufAlignment();
+}
+
+unsigned ScalarTargetTransformImpl::getJumpBufSize() const {
+ return TLI->getJumpBufSize();
+}
+
+bool ScalarTargetTransformImpl::shouldBuildLookupTables() const {
+ return TLI->supportJumpTables() &&
+ (TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
+ TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Calls used by the vectorizers.
+//
+//===----------------------------------------------------------------------===//
+int VectorTargetTransformImpl::InstructionOpcodeToISD(unsigned Opcode) const {
+ enum InstructionOpcodes {
+#define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
+#define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
+#include "llvm/Instruction.def"
+ };
+ switch (static_cast<InstructionOpcodes>(Opcode)) {
+ case Ret: return 0;
+ case Br: return 0;
+ case Switch: return 0;
+ case IndirectBr: return 0;
+ case Invoke: return 0;
+ case Resume: return 0;
+ case Unreachable: return 0;
+ case Add: return ISD::ADD;
+ case FAdd: return ISD::FADD;
+ case Sub: return ISD::SUB;
+ case FSub: return ISD::FSUB;
+ case Mul: return ISD::MUL;
+ case FMul: return ISD::FMUL;
+ case UDiv: return ISD::UDIV;
+ case SDiv: return ISD::UDIV;
+ case FDiv: return ISD::FDIV;
+ case URem: return ISD::UREM;
+ case SRem: return ISD::SREM;
+ case FRem: return ISD::FREM;
+ case Shl: return ISD::SHL;
+ case LShr: return ISD::SRL;
+ case AShr: return ISD::SRA;
+ case And: return ISD::AND;
+ case Or: return ISD::OR;
+ case Xor: return ISD::XOR;
+ case Alloca: return 0;
+ case Load: return ISD::LOAD;
+ case Store: return ISD::STORE;
+ case GetElementPtr: return 0;
+ case Fence: return 0;
+ case AtomicCmpXchg: return 0;
+ case AtomicRMW: return 0;
+ case Trunc: return ISD::TRUNCATE;
+ case ZExt: return ISD::ZERO_EXTEND;
+ case SExt: return ISD::SIGN_EXTEND;
+ case FPToUI: return ISD::FP_TO_UINT;
+ case FPToSI: return ISD::FP_TO_SINT;
+ case UIToFP: return ISD::UINT_TO_FP;
+ case SIToFP: return ISD::SINT_TO_FP;
+ case FPTrunc: return ISD::FP_ROUND;
+ case FPExt: return ISD::FP_EXTEND;
+ case PtrToInt: return ISD::BITCAST;
+ case IntToPtr: return ISD::BITCAST;
+ case BitCast: return ISD::BITCAST;
+ case ICmp: return ISD::SETCC;
+ case FCmp: return ISD::SETCC;
+ case PHI: return 0;
+ case Call: return 0;
+ case Select: return ISD::SELECT;
+ case UserOp1: return 0;
+ case UserOp2: return 0;
+ case VAArg: return 0;
+ case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
+ case InsertElement: return ISD::INSERT_VECTOR_ELT;
+ case ShuffleVector: return ISD::VECTOR_SHUFFLE;
+ case ExtractValue: return ISD::MERGE_VALUES;
+ case InsertValue: return ISD::MERGE_VALUES;
+ case LandingPad: return 0;
+ }
+
+ llvm_unreachable("Unknown instruction type encountered!");
+}
+
+std::pair<unsigned, MVT>
+VectorTargetTransformImpl::getTypeLegalizationCost(Type *Ty) const {
+
+ LLVMContext &C = Ty->getContext();
+ EVT MTy = TLI->getValueType(Ty);
+
+ unsigned Cost = 1;
+ // We keep legalizing the type until we find a legal kind. We assume that
+ // the only operation that costs anything is the split. After splitting
+ // we need to handle two types.
+ while (true) {
+ TargetLowering::LegalizeKind LK = TLI->getTypeConversion(C, MTy);
+
+ if (LK.first == TargetLowering::TypeLegal)
+ return std::make_pair(Cost, MTy.getSimpleVT());
+
+ if (LK.first == TargetLowering::TypeSplitVector ||
+ LK.first == TargetLowering::TypeExpandInteger)
+ Cost *= 2;
+
+ // Keep legalizing the type.
+ MTy = LK.second;
+ }
+}
+
+unsigned
+VectorTargetTransformImpl::getScalarizationOverhead(Type *Ty,
+ bool Insert,
+ bool Extract) const {
+ assert (Ty->isVectorTy() && "Can only scalarize vectors");
+ unsigned Cost = 0;
+
+ for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
+ if (Insert)
+ Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
+ if (Extract)
+ Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
+ }
+
+ return Cost;
+}
+
+unsigned VectorTargetTransformImpl::getArithmeticInstrCost(unsigned Opcode,
+ Type *Ty) const {
+ // Check if any of the operands are vector operands.
+ int ISD = InstructionOpcodeToISD(Opcode);
+ assert(ISD && "Invalid opcode");
+
+ std::pair<unsigned, MVT> LT = getTypeLegalizationCost(Ty);
+
+ if (!TLI->isOperationExpand(ISD, LT.second)) {
+ // The operation is legal. Assume it costs 1. Multiply
+ // by the type-legalization overhead.
+ return LT.first * 1;
+ }
+
+ // Else, assume that we need to scalarize this op.
+ if (Ty->isVectorTy()) {
+ unsigned Num = Ty->getVectorNumElements();
+ unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType());
+ // return the cost of multiple scalar invocation plus the cost of inserting
+ // and extracting the values.
+ return getScalarizationOverhead(Ty, true, true) + Num * Cost;
+ }
+
+ // We don't know anything about this scalar instruction.
+ return 1;
+}
+
+unsigned VectorTargetTransformImpl::getBroadcastCost(Type *Tp) const {
+ return 1;
+}
+
+unsigned VectorTargetTransformImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
+ Type *Src) const {
+ int ISD = InstructionOpcodeToISD(Opcode);
+ assert(ISD && "Invalid opcode");
+
+ std::pair<unsigned, MVT> SrcLT = getTypeLegalizationCost(Src);
+ std::pair<unsigned, MVT> DstLT = getTypeLegalizationCost(Dst);
+
+ // Handle scalar conversions.
+ if (!Src->isVectorTy() && !Dst->isVectorTy()) {
+
+ // Scalar bitcasts are usually free.
+ if (Opcode == Instruction::BitCast)
+ return 0;
+
+ if (Opcode == Instruction::Trunc &&
+ TLI->isTruncateFree(SrcLT.second, DstLT.second))
+ return 0;
+
+ if (Opcode == Instruction::ZExt &&
+ TLI->isZExtFree(SrcLT.second, DstLT.second))
+ return 0;
+
+ // Just check the op cost. If the operation is legal then assume it costs 1.
+ if (!TLI->isOperationExpand(ISD, DstLT.second))
+ return 1;
+
+ // Assume that illegal scalar instruction are expensive.
+ return 4;
+ }
+
+ // Check vector-to-vector casts.
+ if (Dst->isVectorTy() && Src->isVectorTy()) {
+
+ // If the cast is between same-sized registers, then the check is simple.
+ if (SrcLT.first == DstLT.first &&
+ SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
+
+ // Bitcast between types that are legalized to the same type are free.
+ if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
+ return 0;
+
+ // Assume that Zext is done using AND.
+ if (Opcode == Instruction::ZExt)
+ return 1;
+
+ // Assume that sext is done using SHL and SRA.
+ if (Opcode == Instruction::SExt)
+ return 2;
+
+ // Just check the op cost. If the operation is legal then assume it costs
+ // 1 and multiply by the type-legalization overhead.
+ if (!TLI->isOperationExpand(ISD, DstLT.second))
+ return SrcLT.first * 1;
+ }
+
+ // If we are converting vectors and the operation is illegal, or
+ // if the vectors are legalized to different types, estimate the
+ // scalarization costs.
+ unsigned Num = Dst->getVectorNumElements();
+ unsigned Cost = getCastInstrCost(Opcode, Dst->getScalarType(),
+ Src->getScalarType());
+
+ // Return the cost of multiple scalar invocation plus the cost of
+ // inserting and extracting the values.
+ return getScalarizationOverhead(Dst, true, true) + Num * Cost;
+ }
+
+ // We already handled vector-to-vector and scalar-to-scalar conversions. This
+ // is where we handle bitcast between vectors and scalars. We need to assume
+ // that the conversion is scalarized in one way or another.
+ if (Opcode == Instruction::BitCast)
+ // Illegal bitcasts are done by storing and loading from a stack slot.
+ return (Src->isVectorTy()? getScalarizationOverhead(Src, false, true):0) +
+ (Dst->isVectorTy()? getScalarizationOverhead(Dst, true, false):0);
+
+ llvm_unreachable("Unhandled cast");
+ }
+
+unsigned VectorTargetTransformImpl::getCFInstrCost(unsigned Opcode) const {
+ return 1;
+}
+
+unsigned VectorTargetTransformImpl::getCmpSelInstrCost(unsigned Opcode,
+ Type *ValTy,
+ Type *CondTy) const {
+ int ISD = InstructionOpcodeToISD(Opcode);
+ assert(ISD && "Invalid opcode");
+
+ // Selects on vectors are actually vector selects.
+ if (ISD == ISD::SELECT) {
+ assert(CondTy && "CondTy must exist");
+ if (CondTy->isVectorTy())
+ ISD = ISD::VSELECT;
+ }
+
+ std::pair<unsigned, MVT> LT = getTypeLegalizationCost(ValTy);
+
+ if (!TLI->isOperationExpand(ISD, LT.second)) {
+ // The operation is legal. Assume it costs 1. Multiply
+ // by the type-legalization overhead.
+ return LT.first * 1;
+ }
+
+ // Otherwise, assume that the cast is scalarized.
+ if (ValTy->isVectorTy()) {
+ unsigned Num = ValTy->getVectorNumElements();
+ if (CondTy)
+ CondTy = CondTy->getScalarType();
+ unsigned Cost = getCmpSelInstrCost(Opcode, ValTy->getScalarType(),
+ CondTy);
+
+ // Return the cost of multiple scalar invocation plus the cost of inserting
+ // and extracting the values.
+ return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
+ }
+
+ // Unknown scalar opcode.
+ return 1;
+}
+
+unsigned VectorTargetTransformImpl::getVectorInstrCost(unsigned Opcode,
+ Type *Val,
+ unsigned Index) const {
+ return 1;
+}
+
+unsigned
+VectorTargetTransformImpl::getInstrCost(unsigned Opcode, Type *Ty1,
+ Type *Ty2) const {
+ return 1;
+}
+
+unsigned
+VectorTargetTransformImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
+ unsigned Alignment,
+ unsigned AddressSpace) const {
+ std::pair<unsigned, MVT> LT = getTypeLegalizationCost(Src);
+
+ // Assume that all loads of legal types cost 1.
+ return LT.first;
+}
+
+unsigned
+VectorTargetTransformImpl::getNumberOfParts(Type *Tp) const {
+ std::pair<unsigned, MVT> LT = getTypeLegalizationCost(Tp);
+ return LT.first;
+}
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp
index 2794e60..66ad353 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp
@@ -18,19 +18,19 @@
using namespace llvm;
namespace {
-
+
class X86AsmLexer : public MCTargetAsmLexer {
const MCAsmInfo &AsmInfo;
-
+
bool tentativeIsValid;
AsmToken tentativeToken;
-
+
const AsmToken &lexTentative() {
tentativeToken = getLexer()->Lex();
tentativeIsValid = true;
return tentativeToken;
}
-
+
const AsmToken &lexDefinite() {
if (tentativeIsValid) {
tentativeIsValid = false;
@@ -38,7 +38,7 @@ class X86AsmLexer : public MCTargetAsmLexer {
}
return getLexer()->Lex();
}
-
+
AsmToken LexTokenATT();
AsmToken LexTokenIntel();
protected:
@@ -47,7 +47,7 @@ protected:
SetError(SMLoc(), "No MCAsmLexer installed");
return AsmToken(AsmToken::Error, "", 0);
}
-
+
switch (AsmInfo.getAssemblerDialect()) {
default:
SetError(SMLoc(), "Unhandled dialect");
@@ -71,33 +71,32 @@ public:
AsmToken X86AsmLexer::LexTokenATT() {
AsmToken lexedToken = lexDefinite();
-
+
switch (lexedToken.getKind()) {
default:
return lexedToken;
case AsmToken::Error:
SetError(Lexer->getErrLoc(), Lexer->getErr());
return lexedToken;
-
+
case AsmToken::Percent: {
const AsmToken &nextToken = lexTentative();
if (nextToken.getKind() != AsmToken::Identifier)
return lexedToken;
-
if (unsigned regID = MatchRegisterName(nextToken.getString())) {
lexDefinite();
-
+
// FIXME: This is completely wrong when there is a space or other
// punctuation between the % and the register name.
StringRef regStr(lexedToken.getString().data(),
- lexedToken.getString().size() +
+ lexedToken.getString().size() +
nextToken.getString().size());
-
- return AsmToken(AsmToken::Register, regStr,
+
+ return AsmToken(AsmToken::Register, regStr,
static_cast<int64_t>(regID));
}
-
+
// Match register name failed. If this is "db[0-7]", match it as an alias
// for dr[0-7].
if (nextToken.getString().size() == 3 &&
@@ -113,29 +112,29 @@ AsmToken X86AsmLexer::LexTokenATT() {
case '6': RegNo = X86::DR6; break;
case '7': RegNo = X86::DR7; break;
}
-
+
if (RegNo != -1) {
lexDefinite();
// FIXME: This is completely wrong when there is a space or other
// punctuation between the % and the register name.
StringRef regStr(lexedToken.getString().data(),
- lexedToken.getString().size() +
+ lexedToken.getString().size() +
nextToken.getString().size());
- return AsmToken(AsmToken::Register, regStr,
+ return AsmToken(AsmToken::Register, regStr,
static_cast<int64_t>(RegNo));
}
}
-
-
+
+
return lexedToken;
- }
+ }
}
}
AsmToken X86AsmLexer::LexTokenIntel() {
const AsmToken &lexedToken = lexDefinite();
-
+
switch(lexedToken.getKind()) {
default:
return lexedToken;
@@ -144,7 +143,7 @@ AsmToken X86AsmLexer::LexTokenIntel() {
return lexedToken;
case AsmToken::Identifier: {
unsigned regID = MatchRegisterName(lexedToken.getString().lower());
-
+
if (regID)
return AsmToken(AsmToken::Register,
lexedToken.getString(),
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index fbbaa9500..ce446e7 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -11,12 +11,14 @@
#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
@@ -33,13 +35,16 @@ struct X86Operand;
class X86AsmParser : public MCTargetAsmParser {
MCSubtargetInfo &STI;
MCAsmParser &Parser;
+ ParseInstructionInfo *InstInfo;
private:
MCAsmParser &getParser() const { return Parser; }
MCAsmLexer &getLexer() const { return Parser.getLexer(); }
bool Error(SMLoc L, const Twine &Msg,
- ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
+ ArrayRef<SMRange> Ranges = ArrayRef<SMRange>(),
+ bool MatchingInlineAsm = false) {
+ if (MatchingInlineAsm) return true;
return Parser.Error(L, Msg, Ranges);
}
@@ -51,23 +56,25 @@ private:
X86Operand *ParseOperand();
X86Operand *ParseATTOperand();
X86Operand *ParseIntelOperand();
- X86Operand *ParseIntelMemOperand();
+ X86Operand *ParseIntelOffsetOfOperator(SMLoc StartLoc);
+ X86Operand *ParseIntelTypeOperator(SMLoc StartLoc);
+ X86Operand *ParseIntelMemOperand(unsigned SegReg, SMLoc StartLoc);
X86Operand *ParseIntelBracExpression(unsigned SegReg, unsigned Size);
X86Operand *ParseMemOperand(unsigned SegReg, SMLoc StartLoc);
+ bool ParseIntelDotOperator(const MCExpr *Disp, const MCExpr **NewDisp,
+ SmallString<64> &Err);
+
bool ParseDirectiveWord(unsigned Size, SMLoc L);
bool ParseDirectiveCode(StringRef IDVal, SMLoc L);
bool processInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
- bool MatchAndEmitInstruction(SMLoc IDLoc,
+ bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out);
-
- bool MatchInstruction(SMLoc IDLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- SmallVectorImpl<MCInst> &MCInsts);
+ MCStreamer &Out, unsigned &ErrorInfo,
+ bool MatchingInlineAsm);
/// isSrcOp - Returns true if operand is either (%rsi) or %ds:%(rsi)
/// in 64bit mode or (%esi) or %es:(%esi) in 32bit mode.
@@ -96,14 +103,15 @@ private:
public:
X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &parser)
- : MCTargetAsmParser(), STI(sti), Parser(parser) {
+ : MCTargetAsmParser(), STI(sti), Parser(parser), InstInfo(0) {
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
}
virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
- virtual bool ParseInstruction(StringRef Name, SMLoc NameLoc,
+ virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands);
virtual bool ParseDirective(AsmToken DirectiveID);
@@ -159,6 +167,7 @@ struct X86Operand : public MCParsedAsmOperand {
} Kind;
SMLoc StartLoc, EndLoc;
+ SMLoc OffsetOfLoc;
union {
struct {
@@ -172,6 +181,7 @@ struct X86Operand : public MCParsedAsmOperand {
struct {
const MCExpr *Val;
+ bool NeedAsmRewrite;
} Imm;
struct {
@@ -181,6 +191,7 @@ struct X86Operand : public MCParsedAsmOperand {
unsigned IndexReg;
unsigned Scale;
unsigned Size;
+ bool NeedSizeDir;
} Mem;
};
@@ -191,8 +202,11 @@ struct X86Operand : public MCParsedAsmOperand {
SMLoc getStartLoc() const { return StartLoc; }
/// getEndLoc - Get the location of the last token of this operand.
SMLoc getEndLoc() const { return EndLoc; }
-
+ /// getLocRange - Get the range between the first and last token of this
+ /// operand.
SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
+ /// getOffsetOfLoc - Get the location of the offset operator.
+ SMLoc getOffsetOfLoc() const { return OffsetOfLoc; }
virtual void print(raw_ostream &OS) const {}
@@ -216,6 +230,11 @@ struct X86Operand : public MCParsedAsmOperand {
return Imm.Val;
}
+ bool needAsmRewrite() const {
+ assert(Kind == Immediate && "Invalid access!");
+ return Imm.NeedAsmRewrite;
+ }
+
const MCExpr *getMemDisp() const {
assert(Kind == Memory && "Invalid access!");
return Mem.Disp;
@@ -312,6 +331,20 @@ struct X86Operand : public MCParsedAsmOperand {
return isImmSExti64i32Value(CE->getValue());
}
+ unsigned getMemSize() const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.Size;
+ }
+
+ bool isOffsetOf() const {
+ return OffsetOfLoc.getPointer();
+ }
+
+ bool needSizeDirective() const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.NeedSizeDir;
+ }
+
bool isMem() const { return Kind == Memory; }
bool isMem8() const {
return Kind == Memory && (!Mem.Size || Mem.Size == 8);
@@ -437,21 +470,25 @@ struct X86Operand : public MCParsedAsmOperand {
return Res;
}
- static X86Operand *CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc) {
+ static X86Operand *CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc,
+ SMLoc OffsetOfLoc = SMLoc()) {
X86Operand *Res = new X86Operand(Register, StartLoc, EndLoc);
Res->Reg.RegNo = RegNo;
+ Res->OffsetOfLoc = OffsetOfLoc;
return Res;
}
- static X86Operand *CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc){
+ static X86Operand *CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc,
+ bool NeedRewrite = true){
X86Operand *Res = new X86Operand(Immediate, StartLoc, EndLoc);
Res->Imm.Val = Val;
+ Res->Imm.NeedAsmRewrite = NeedRewrite;
return Res;
}
/// Create an absolute memory operand.
- static X86Operand *CreateMem(const MCExpr *Disp, SMLoc StartLoc,
- SMLoc EndLoc, unsigned Size = 0) {
+ static X86Operand *CreateMem(const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
+ unsigned Size = 0, bool NeedSizeDir = false){
X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc);
Res->Mem.SegReg = 0;
Res->Mem.Disp = Disp;
@@ -459,6 +496,7 @@ struct X86Operand : public MCParsedAsmOperand {
Res->Mem.IndexReg = 0;
Res->Mem.Scale = 1;
Res->Mem.Size = Size;
+ Res->Mem.NeedSizeDir = NeedSizeDir;
return Res;
}
@@ -466,7 +504,7 @@ struct X86Operand : public MCParsedAsmOperand {
static X86Operand *CreateMem(unsigned SegReg, const MCExpr *Disp,
unsigned BaseReg, unsigned IndexReg,
unsigned Scale, SMLoc StartLoc, SMLoc EndLoc,
- unsigned Size = 0) {
+ unsigned Size = 0, bool NeedSizeDir = false) {
// We should never just have a displacement, that should be parsed as an
// absolute memory operand.
assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!");
@@ -481,6 +519,7 @@ struct X86Operand : public MCParsedAsmOperand {
Res->Mem.IndexReg = IndexReg;
Res->Mem.Scale = Scale;
Res->Mem.Size = Size;
+ Res->Mem.NeedSizeDir = NeedSizeDir;
return Res;
}
};
@@ -510,12 +549,13 @@ bool X86AsmParser::isDstOp(X86Operand &Op) {
bool X86AsmParser::ParseRegister(unsigned &RegNo,
SMLoc &StartLoc, SMLoc &EndLoc) {
RegNo = 0;
- if (!isParsingIntelSyntax()) {
- const AsmToken &TokPercent = Parser.getTok();
- assert(TokPercent.is(AsmToken::Percent) && "Invalid token kind!");
- StartLoc = TokPercent.getLoc();
+ const AsmToken &PercentTok = Parser.getTok();
+ StartLoc = PercentTok.getLoc();
+
+ // If we encounter a %, ignore it. This code handles registers with and
+ // without the prefix, unprefixed registers can occur in cfi directives.
+ if (!isParsingIntelSyntax() && PercentTok.is(AsmToken::Percent))
Parser.Lex(); // Eat percent token.
- }
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier)) {
@@ -621,23 +661,25 @@ X86Operand *X86AsmParser::ParseOperand() {
/// getIntelMemOperandSize - Return intel memory operand size.
static unsigned getIntelMemOperandSize(StringRef OpStr) {
- unsigned Size = 0;
- if (OpStr == "BYTE") Size = 8;
- if (OpStr == "WORD") Size = 16;
- if (OpStr == "DWORD") Size = 32;
- if (OpStr == "QWORD") Size = 64;
- if (OpStr == "XWORD") Size = 80;
- if (OpStr == "XMMWORD") Size = 128;
- if (OpStr == "YMMWORD") Size = 256;
+ unsigned Size = StringSwitch<unsigned>(OpStr)
+ .Cases("BYTE", "byte", 8)
+ .Cases("WORD", "word", 16)
+ .Cases("DWORD", "dword", 32)
+ .Cases("QWORD", "qword", 64)
+ .Cases("XWORD", "xword", 80)
+ .Cases("XMMWORD", "xmmword", 128)
+ .Cases("YMMWORD", "ymmword", 256)
+ .Default(0);
return Size;
}
-X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg,
+X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg,
unsigned Size) {
unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
- SMLoc Start = Parser.getTok().getLoc(), End;
+ const AsmToken &Tok = Parser.getTok();
+ SMLoc Start = Tok.getLoc(), End;
- const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext());
+ const MCExpr *Disp = MCConstantExpr::Create(0, getContext());
// Parse [ BaseReg + Scale*IndexReg + Disp ] or [ symbol ]
// Eat '['
@@ -653,15 +695,17 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg,
if (getLexer().isNot(AsmToken::RBrac))
return ErrorOperand(Start, "Expected ']' token!");
Parser.Lex();
+ End = Tok.getLoc();
return X86Operand::CreateMem(Disp, Start, End, Size);
}
} else if (getLexer().is(AsmToken::Integer)) {
- int64_t Val = Parser.getTok().getIntVal();
+ int64_t Val = Tok.getIntVal();
Parser.Lex();
- SMLoc Loc = Parser.getTok().getLoc();
+ SMLoc Loc = Tok.getLoc();
if (getLexer().is(AsmToken::RBrac)) {
// Handle '[' number ']'
Parser.Lex();
+ End = Tok.getLoc();
const MCExpr *Disp = MCConstantExpr::Create(Val, getContext());
if (SegReg)
return X86Operand::CreateMem(SegReg, Disp, 0, 0, Scale,
@@ -670,7 +714,7 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg,
} else if (getLexer().is(AsmToken::Star)) {
// Handle '[' Scale*IndexReg ']'
Parser.Lex();
- SMLoc IdxRegLoc = Parser.getTok().getLoc();
+ SMLoc IdxRegLoc = Tok.getLoc();
if (ParseRegister(IndexReg, IdxRegLoc, End))
return ErrorOperand(IdxRegLoc, "Expected register");
Scale = Val;
@@ -678,16 +722,27 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg,
return ErrorOperand(Loc, "Unexpected token");
}
- if (getLexer().is(AsmToken::Plus) || getLexer().is(AsmToken::Minus)) {
- bool isPlus = getLexer().is(AsmToken::Plus);
+ // Parse ][ as a plus.
+ bool ExpectRBrac = true;
+ if (getLexer().is(AsmToken::RBrac)) {
+ ExpectRBrac = false;
Parser.Lex();
- SMLoc PlusLoc = Parser.getTok().getLoc();
+ End = Tok.getLoc();
+ }
+
+ if (getLexer().is(AsmToken::Plus) || getLexer().is(AsmToken::Minus) ||
+ getLexer().is(AsmToken::LBrac)) {
+ ExpectRBrac = true;
+ bool isPlus = getLexer().is(AsmToken::Plus) ||
+ getLexer().is(AsmToken::LBrac);
+ Parser.Lex();
+ SMLoc PlusLoc = Tok.getLoc();
if (getLexer().is(AsmToken::Integer)) {
- int64_t Val = Parser.getTok().getIntVal();
+ int64_t Val = Tok.getIntVal();
Parser.Lex();
if (getLexer().is(AsmToken::Star)) {
Parser.Lex();
- SMLoc IdxRegLoc = Parser.getTok().getLoc();
+ SMLoc IdxRegLoc = Tok.getLoc();
if (ParseRegister(IndexReg, IdxRegLoc, End))
return ErrorOperand(IdxRegLoc, "Expected register");
Scale = Val;
@@ -698,21 +753,48 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg,
return ErrorOperand(PlusLoc, "unexpected token after +");
} else if (getLexer().is(AsmToken::Identifier)) {
// This could be an index register or a displacement expression.
- End = Parser.getTok().getLoc();
+ End = Tok.getLoc();
if (!IndexReg)
ParseRegister(IndexReg, Start, End);
else if (getParser().ParseExpression(Disp, End)) return 0;
}
}
+
+ // Parse ][ as a plus.
+ if (getLexer().is(AsmToken::RBrac)) {
+ ExpectRBrac = false;
+ Parser.Lex();
+ End = Tok.getLoc();
+ if (getLexer().is(AsmToken::LBrac)) {
+ ExpectRBrac = true;
+ Parser.Lex();
+ if (getParser().ParseExpression(Disp, End))
+ return 0;
+ }
+ } else if (ExpectRBrac) {
+ if (getParser().ParseExpression(Disp, End))
+ return 0;
+ }
- if (getLexer().isNot(AsmToken::RBrac))
- if (getParser().ParseExpression(Disp, End)) return 0;
+ if (ExpectRBrac) {
+ if (getLexer().isNot(AsmToken::RBrac))
+ return ErrorOperand(End, "expected ']' token!");
+ Parser.Lex();
+ End = Tok.getLoc();
+ }
- End = Parser.getTok().getLoc();
- if (getLexer().isNot(AsmToken::RBrac))
- return ErrorOperand(End, "expected ']' token!");
- Parser.Lex();
- End = Parser.getTok().getLoc();
+ // Parse the dot operator (e.g., [ebx].foo.bar).
+ if (Tok.getString().startswith(".")) {
+ SmallString<64> Err;
+ const MCExpr *NewDisp;
+ if (ParseIntelDotOperator(Disp, &NewDisp, Err))
+ return ErrorOperand(Tok.getLoc(), Err);
+
+ Parser.Lex(); // Eat the field.
+ Disp = NewDisp;
+ }
+
+ End = Tok.getLoc();
// handle [-42]
if (!BaseReg && !IndexReg)
@@ -723,15 +805,15 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg,
}
/// ParseIntelMemOperand - Parse intel style memory operand.
-X86Operand *X86AsmParser::ParseIntelMemOperand() {
+X86Operand *X86AsmParser::ParseIntelMemOperand(unsigned SegReg, SMLoc Start) {
const AsmToken &Tok = Parser.getTok();
- SMLoc Start = Parser.getTok().getLoc(), End;
- unsigned SegReg = 0;
+ SMLoc End;
unsigned Size = getIntelMemOperandSize(Tok.getString());
if (Size) {
Parser.Lex();
- assert (Tok.getString() == "PTR" && "Unexpected token!");
+ assert ((Tok.getString() == "PTR" || Tok.getString() == "ptr") &&
+ "Unexpected token!");
Parser.Lex();
}
@@ -750,12 +832,164 @@ X86Operand *X86AsmParser::ParseIntelMemOperand() {
const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext());
if (getParser().ParseExpression(Disp, End)) return 0;
- return X86Operand::CreateMem(Disp, Start, End, Size);
+ End = Parser.getTok().getLoc();
+
+ bool NeedSizeDir = false;
+ if (!Size && isParsingInlineAsm()) {
+ if (const MCSymbolRefExpr *SymRef = dyn_cast<MCSymbolRefExpr>(Disp)) {
+ const MCSymbol &Sym = SymRef->getSymbol();
+ // FIXME: The SemaLookup will fail if the name is anything other then an
+ // identifier.
+ // FIXME: Pass a valid SMLoc.
+ SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, Size);
+ NeedSizeDir = Size > 0;
+ }
+ }
+ if (!isParsingInlineAsm())
+ return X86Operand::CreateMem(Disp, Start, End, Size);
+ else
+ // When parsing inline assembly we set the base register to a non-zero value
+ // as we don't know the actual value at this time. This is necessary to
+ // get the matching correct in some cases.
+ return X86Operand::CreateMem(/*SegReg*/0, Disp, /*BaseReg*/1, /*IndexReg*/0,
+ /*Scale*/1, Start, End, Size, NeedSizeDir);
+}
+
+/// Parse the '.' operator.
+bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp,
+ const MCExpr **NewDisp,
+ SmallString<64> &Err) {
+ AsmToken Tok = *&Parser.getTok();
+ uint64_t OrigDispVal, DotDispVal;
+
+ // FIXME: Handle non-constant expressions.
+ if (const MCConstantExpr *OrigDisp = dyn_cast<MCConstantExpr>(Disp)) {
+ OrigDispVal = OrigDisp->getValue();
+ } else {
+ Err = "Non-constant offsets are not supported!";
+ return true;
+ }
+
+ // Drop the '.'.
+ StringRef DotDispStr = Tok.getString().drop_front(1);
+
+ // .Imm gets lexed as a real.
+ if (Tok.is(AsmToken::Real)) {
+ APInt DotDisp;
+ DotDispStr.getAsInteger(10, DotDisp);
+ DotDispVal = DotDisp.getZExtValue();
+ } else if (Tok.is(AsmToken::Identifier)) {
+ // We should only see an identifier when parsing the original inline asm.
+ // The front-end should rewrite this in terms of immediates.
+ assert (isParsingInlineAsm() && "Unexpected field name!");
+
+ unsigned DotDisp;
+ std::pair<StringRef, StringRef> BaseMember = DotDispStr.split('.');
+ if (SemaCallback->LookupInlineAsmField(BaseMember.first, BaseMember.second,
+ DotDisp)) {
+ Err = "Unable to lookup field reference!";
+ return true;
+ }
+ DotDispVal = DotDisp;
+ } else {
+ Err = "Unexpected token type!";
+ return true;
+ }
+
+ if (isParsingInlineAsm() && Tok.is(AsmToken::Identifier)) {
+ SMLoc Loc = SMLoc::getFromPointer(DotDispStr.data());
+ unsigned Len = DotDispStr.size();
+ unsigned Val = OrigDispVal + DotDispVal;
+ InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_DotOperator, Loc, Len,
+ Val));
+ }
+
+ *NewDisp = MCConstantExpr::Create(OrigDispVal + DotDispVal, getContext());
+ return false;
+}
+
+/// Parse the 'offset' operator. This operator is used to specify the
+/// location rather then the content of a variable.
+X86Operand *X86AsmParser::ParseIntelOffsetOfOperator(SMLoc Start) {
+ SMLoc OffsetOfLoc = Start;
+ Parser.Lex(); // Eat offset.
+ Start = Parser.getTok().getLoc();
+ assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier");
+
+ SMLoc End;
+ const MCExpr *Val;
+ if (getParser().ParseExpression(Val, End))
+ return ErrorOperand(Start, "Unable to parse expression!");
+
+ End = Parser.getTok().getLoc();
+
+ // Don't emit the offset operator.
+ InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Skip, OffsetOfLoc, 7));
+
+ // The offset operator will have an 'r' constraint, thus we need to create
+ // register operand to ensure proper matching. Just pick a GPR based on
+ // the size of a pointer.
+ unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX;
+ return X86Operand::CreateReg(RegNo, Start, End, OffsetOfLoc);
+}
+
+/// Parse the 'TYPE' operator. The TYPE operator returns the size of a C or
+/// C++ type or variable. If the variable is an array, TYPE returns the size of
+/// a single element of the array.
+X86Operand *X86AsmParser::ParseIntelTypeOperator(SMLoc Start) {
+ SMLoc TypeLoc = Start;
+ Parser.Lex(); // Eat offset.
+ Start = Parser.getTok().getLoc();
+ assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier");
+
+ SMLoc End;
+ const MCExpr *Val;
+ if (getParser().ParseExpression(Val, End))
+ return 0;
+
+ End = Parser.getTok().getLoc();
+
+ unsigned Size = 0;
+ if (const MCSymbolRefExpr *SymRef = dyn_cast<MCSymbolRefExpr>(Val)) {
+ const MCSymbol &Sym = SymRef->getSymbol();
+ // FIXME: The SemaLookup will fail if the name is anything other then an
+ // identifier.
+ // FIXME: Pass a valid SMLoc.
+ if (!SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, Size))
+ return ErrorOperand(Start, "Unable to lookup TYPE of expr!");
+
+ Size /= 8; // Size is in terms of bits, but we want bytes in the context.
+ }
+
+ // Rewrite the type operator and the C or C++ type or variable in terms of an
+ // immediate. E.g. TYPE foo -> $$4
+ unsigned Len = End.getPointer() - TypeLoc.getPointer();
+ InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Imm, TypeLoc, Len, Size));
+
+ const MCExpr *Imm = MCConstantExpr::Create(Size, getContext());
+ return X86Operand::CreateImm(Imm, Start, End, /*NeedAsmRewrite*/false);
}
X86Operand *X86AsmParser::ParseIntelOperand() {
SMLoc Start = Parser.getTok().getLoc(), End;
+ // offset operator.
+ StringRef AsmTokStr = Parser.getTok().getString();
+ if ((AsmTokStr == "offset" || AsmTokStr == "OFFSET") &&
+ isParsingInlineAsm())
+ return ParseIntelOffsetOfOperator(Start);
+
+ // Type directive.
+ if ((AsmTokStr == "type" || AsmTokStr == "TYPE") &&
+ isParsingInlineAsm())
+ return ParseIntelTypeOperator(Start);
+
+ // Unsupported directives.
+ if (isParsingIntelSyntax() &&
+ (AsmTokStr == "size" || AsmTokStr == "SIZE" ||
+ AsmTokStr == "length" || AsmTokStr == "LENGTH"))
+ return ErrorOperand(Start, "Unsupported directive!");
+
// immediate.
if (getLexer().is(AsmToken::Integer) || getLexer().is(AsmToken::Real) ||
getLexer().is(AsmToken::Minus)) {
@@ -769,12 +1003,17 @@ X86Operand *X86AsmParser::ParseIntelOperand() {
// register
unsigned RegNo = 0;
if (!ParseRegister(RegNo, Start, End)) {
- End = Parser.getTok().getLoc();
- return X86Operand::CreateReg(RegNo, Start, End);
+ // If this is a segment register followed by a ':', then this is the start
+ // of a memory reference, otherwise this is a normal register reference.
+ if (getLexer().isNot(AsmToken::Colon))
+ return X86Operand::CreateReg(RegNo, Start, Parser.getTok().getLoc());
+
+ getParser().Lex(); // Eat the colon.
+ return ParseIntelMemOperand(RegNo, Start);
}
// mem operand
- return ParseIntelMemOperand();
+ return ParseIntelMemOperand(0, Start);
}
X86Operand *X86AsmParser::ParseATTOperand() {
@@ -972,8 +1211,9 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
}
bool X86AsmParser::
-ParseInstruction(StringRef Name, SMLoc NameLoc,
+ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ InstInfo = &Info;
StringRef PatchedName = Name;
// FIXME: Hack to recognize setneb as setne.
@@ -1509,28 +1749,18 @@ processInstruction(MCInst &Inst,
}
bool X86AsmParser::
-MatchAndEmitInstruction(SMLoc IDLoc,
+MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out) {
- SmallVector<MCInst, 2> Insts;
- bool Error = MatchInstruction(IDLoc, Operands, Insts);
- if (!Error)
- for (unsigned i = 0, e = Insts.size(); i != e; ++i)
- Out.EmitInstruction(Insts[i]);
- return Error;
-}
-
-bool X86AsmParser::
-MatchInstruction(SMLoc IDLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- SmallVectorImpl<MCInst> &MCInsts) {
+ MCStreamer &Out, unsigned &ErrorInfo,
+ bool MatchingInlineAsm) {
assert(!Operands.empty() && "Unexpect empty operand list!");
X86Operand *Op = static_cast<X86Operand*>(Operands[0]);
assert(Op->isToken() && "Leading operand should always be a mnemonic!");
+ ArrayRef<SMRange> EmptyRanges = ArrayRef<SMRange>();
// First, handle aliases that expand to multiple instructions.
// FIXME: This should be replaced with a real .td file alias mechanism.
- // Also, MatchInstructionImpl should do actually *do* the EmitInstruction
+ // Also, MatchInstructionImpl should actually *do* the EmitInstruction
// call.
if (Op->getToken() == "fstsw" || Op->getToken() == "fstcw" ||
Op->getToken() == "fstsww" || Op->getToken() == "fstcww" ||
@@ -1539,7 +1769,8 @@ MatchInstruction(SMLoc IDLoc,
MCInst Inst;
Inst.setOpcode(X86::WAIT);
Inst.setLoc(IDLoc);
- MCInsts.push_back(Inst);
+ if (!MatchingInlineAsm)
+ Out.EmitInstruction(Inst);
const char *Repl =
StringSwitch<const char*>(Op->getToken())
@@ -1558,28 +1789,30 @@ MatchInstruction(SMLoc IDLoc,
}
bool WasOriginallyInvalidOperand = false;
- unsigned OrigErrorInfo;
MCInst Inst;
// First, try a direct match.
- switch (MatchInstructionImpl(Operands, Inst, OrigErrorInfo,
+ switch (MatchInstructionImpl(Operands, Inst,
+ ErrorInfo, MatchingInlineAsm,
isParsingIntelSyntax())) {
default: break;
case Match_Success:
// Some instructions need post-processing to, for example, tweak which
// encoding is selected. Loop on it while changes happen so the
// individual transformations can chain off each other.
- while (processInstruction(Inst, Operands))
- ;
+ if (!MatchingInlineAsm)
+ while (processInstruction(Inst, Operands))
+ ;
Inst.setLoc(IDLoc);
- MCInsts.push_back(Inst);
+ if (!MatchingInlineAsm)
+ Out.EmitInstruction(Inst);
+ Opcode = Inst.getOpcode();
return false;
case Match_MissingFeature:
- Error(IDLoc, "instruction requires a CPU feature not currently enabled");
+ Error(IDLoc, "instruction requires a CPU feature not currently enabled",
+ EmptyRanges, MatchingInlineAsm);
return true;
- case Match_ConversionFail:
- return Error(IDLoc, "unable to convert operands to instruction");
case Match_InvalidOperand:
WasOriginallyInvalidOperand = true;
break;
@@ -1612,13 +1845,17 @@ MatchInstruction(SMLoc IDLoc,
unsigned ErrorInfoIgnore;
unsigned Match1, Match2, Match3, Match4;
- Match1 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore);
+ Match1 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
+ isParsingIntelSyntax());
Tmp[Base.size()] = Suffixes[1];
- Match2 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore);
+ Match2 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
+ isParsingIntelSyntax());
Tmp[Base.size()] = Suffixes[2];
- Match3 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore);
+ Match3 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
+ isParsingIntelSyntax());
Tmp[Base.size()] = Suffixes[3];
- Match4 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore);
+ Match4 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
+ isParsingIntelSyntax());
// Restore the old token.
Op->setTokenValue(Base);
@@ -1631,7 +1868,9 @@ MatchInstruction(SMLoc IDLoc,
(Match3 == Match_Success) + (Match4 == Match_Success);
if (NumSuccessfulMatches == 1) {
Inst.setLoc(IDLoc);
- MCInsts.push_back(Inst);
+ if (!MatchingInlineAsm)
+ Out.EmitInstruction(Inst);
+ Opcode = Inst.getOpcode();
return false;
}
@@ -1658,7 +1897,7 @@ MatchInstruction(SMLoc IDLoc,
OS << "'" << Base << MatchChars[i] << "'";
}
OS << ")";
- Error(IDLoc, OS.str());
+ Error(IDLoc, OS.str(), EmptyRanges, MatchingInlineAsm);
return true;
}
@@ -1669,31 +1908,36 @@ MatchInstruction(SMLoc IDLoc,
if ((Match1 == Match_MnemonicFail) && (Match2 == Match_MnemonicFail) &&
(Match3 == Match_MnemonicFail) && (Match4 == Match_MnemonicFail)) {
if (!WasOriginallyInvalidOperand) {
+ ArrayRef<SMRange> Ranges = MatchingInlineAsm ? EmptyRanges :
+ Op->getLocRange();
return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'",
- Op->getLocRange());
+ Ranges, MatchingInlineAsm);
}
// Recover location info for the operand if we know which was the problem.
- if (OrigErrorInfo != ~0U) {
- if (OrigErrorInfo >= Operands.size())
- return Error(IDLoc, "too few operands for instruction");
+ if (ErrorInfo != ~0U) {
+ if (ErrorInfo >= Operands.size())
+ return Error(IDLoc, "too few operands for instruction",
+ EmptyRanges, MatchingInlineAsm);
- X86Operand *Operand = (X86Operand*)Operands[OrigErrorInfo];
+ X86Operand *Operand = (X86Operand*)Operands[ErrorInfo];
if (Operand->getStartLoc().isValid()) {
SMRange OperandRange = Operand->getLocRange();
return Error(Operand->getStartLoc(), "invalid operand for instruction",
- OperandRange);
+ OperandRange, MatchingInlineAsm);
}
}
- return Error(IDLoc, "invalid operand for instruction");
+ return Error(IDLoc, "invalid operand for instruction", EmptyRanges,
+ MatchingInlineAsm);
}
// If one instruction matched with a missing feature, report this as a
// missing feature.
if ((Match1 == Match_MissingFeature) + (Match2 == Match_MissingFeature) +
(Match3 == Match_MissingFeature) + (Match4 == Match_MissingFeature) == 1){
- Error(IDLoc, "instruction requires a CPU feature not currently enabled");
+ Error(IDLoc, "instruction requires a CPU feature not currently enabled",
+ EmptyRanges, MatchingInlineAsm);
return true;
}
@@ -1701,12 +1945,14 @@ MatchInstruction(SMLoc IDLoc,
// operand failure.
if ((Match1 == Match_InvalidOperand) + (Match2 == Match_InvalidOperand) +
(Match3 == Match_InvalidOperand) + (Match4 == Match_InvalidOperand) == 1){
- Error(IDLoc, "invalid operand for instruction");
+ Error(IDLoc, "invalid operand for instruction", EmptyRanges,
+ MatchingInlineAsm);
return true;
}
// If all of these were an outright failure, report it in a useless way.
- Error(IDLoc, "unknown use of instruction mnemonic without a size suffix");
+ Error(IDLoc, "unknown use of instruction mnemonic without a size suffix",
+ EmptyRanges, MatchingInlineAsm);
return true;
}
@@ -1717,7 +1963,10 @@ bool X86AsmParser::ParseDirective(AsmToken DirectiveID) {
return ParseDirectiveWord(2, DirectiveID.getLoc());
else if (IDVal.startswith(".code"))
return ParseDirectiveCode(IDVal, DirectiveID.getLoc());
- else if (IDVal.startswith(".intel_syntax")) {
+ else if (IDVal.startswith(".att_syntax")) {
+ getParser().setAssemblerDialect(0);
+ return false;
+ } else if (IDVal.startswith(".intel_syntax")) {
getParser().setAssemblerDialect(1);
if (getLexer().isNot(AsmToken::EndOfStatement)) {
if(Parser.getTok().getString() == "noprefix") {
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
index 5039887..f136927 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
@@ -44,7 +44,7 @@ void x86DisassemblerDebug(const char *file,
dbgs() << file << ":" << line << ": " << s;
}
-const char *x86DisassemblerGetInstrName(unsigned Opcode, void *mii) {
+const char *x86DisassemblerGetInstrName(unsigned Opcode, const void *mii) {
const MCInstrInfo *MII = static_cast<const MCInstrInfo *>(mii);
return MII->getName(Opcode);
}
@@ -95,8 +95,8 @@ const EDInstInfo *X86GenericDisassembler::getEDInfo() const {
/// be a pointer to a MemoryObject.
/// @param byte - A pointer to the byte to be read.
/// @param address - The address to be read.
-static int regionReader(void* arg, uint8_t* byte, uint64_t address) {
- MemoryObject* region = static_cast<MemoryObject*>(arg);
+static int regionReader(const void* arg, uint8_t* byte, uint64_t address) {
+ const MemoryObject* region = static_cast<const MemoryObject*>(arg);
return region->readByte(address, byte);
}
@@ -135,10 +135,10 @@ X86GenericDisassembler::getInstruction(MCInst &instr,
int ret = decodeInstruction(&internalInstr,
regionReader,
- (void*)&region,
+ (const void*)&region,
loggerFn,
(void*)&vStream,
- (void*)MII,
+ (const void*)MII,
address,
fMode);
@@ -379,6 +379,8 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
}
switch (type) {
+ case TYPE_XMM32:
+ case TYPE_XMM64:
case TYPE_XMM128:
mcInst.addOperand(MCOperand::CreateReg(X86::XMM0 + (immediate >> 4)));
return;
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
index 0dbfa26..981701f 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
@@ -78,7 +78,7 @@
uint16_t operands;
#define INSTRUCTION_IDS \
- unsigned instructionIDs;
+ uint16_t instructionIDs;
#include "X86DisassemblerDecoderCommon.h"
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
index 0c92912..85d8a99 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
@@ -138,6 +138,10 @@ static InstrUID decode(OpcodeType type,
if (modFromModRM(modRM) == 0x3)
return modRMTable[dec->instructionIDs+((modRM & 0x38) >> 3)+8];
return modRMTable[dec->instructionIDs+((modRM & 0x38) >> 3)];
+ case MODRM_SPLITMISC:
+ if (modFromModRM(modRM) == 0x3)
+ return modRMTable[dec->instructionIDs+(modRM & 0x3f)+8];
+ return modRMTable[dec->instructionIDs+((modRM & 0x38) >> 3)];
case MODRM_FULL:
return modRMTable[dec->instructionIDs+modRM];
}
@@ -200,7 +204,7 @@ static void unconsumeByte(struct InternalInstruction* insn) {
insn->readerCursor + offset); \
if (ret) \
return ret; \
- combined = combined | ((type)byte << ((type)offset * 8)); \
+ combined = combined | ((uint64_t)byte << (offset * 8)); \
} \
*ptr = combined; \
insn->readerCursor += sizeof(type); \
@@ -690,7 +694,7 @@ static int getIDWithAttrMask(uint16_t* instructionID,
* @param orig - The instruction that is not 16-bit
* @param equiv - The instruction that is 16-bit
*/
-static BOOL is16BitEquvalent(const char* orig, const char* equiv) {
+static BOOL is16BitEquivalent(const char* orig, const char* equiv) {
off_t i;
for (i = 0;; i++) {
@@ -719,7 +723,7 @@ static BOOL is16BitEquvalent(const char* orig, const char* equiv) {
* @return - 0 if the ModR/M could be read when needed or was not needed;
* nonzero otherwise.
*/
-static int getID(struct InternalInstruction* insn, void *miiArg) {
+static int getID(struct InternalInstruction* insn, const void *miiArg) {
uint8_t attrMask;
uint16_t instructionID;
@@ -856,7 +860,7 @@ static int getID(struct InternalInstruction* insn, void *miiArg) {
specWithOpSizeName =
x86DisassemblerGetInstrName(instructionIDWithOpsize, miiArg);
- if (is16BitEquvalent(specName, specWithOpSizeName)) {
+ if (is16BitEquivalent(specName, specWithOpSizeName)) {
insn->instructionID = instructionIDWithOpsize;
insn->spec = specifierForUID(instructionIDWithOpsize);
} else {
@@ -1621,10 +1625,10 @@ static int readOperands(struct InternalInstruction* insn) {
*/
int decodeInstruction(struct InternalInstruction* insn,
byteReader_t reader,
- void* readerArg,
+ const void* readerArg,
dlog_t logger,
void* loggerArg,
- void* miiArg,
+ const void* miiArg,
uint64_t startLoc,
DisassemblerMode mode) {
memset(insn, 0, sizeof(struct InternalInstruction));
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
index 797703f..407ead3 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
@@ -24,7 +24,7 @@ extern "C" {
uint16_t operands;
#define INSTRUCTION_IDS \
- unsigned instructionIDs;
+ uint16_t instructionIDs;
#include "X86DisassemblerDecoderCommon.h"
@@ -403,7 +403,7 @@ typedef uint8_t BOOL;
* be read from.
* @return - -1 if the byte cannot be read for any reason; 0 otherwise.
*/
-typedef int (*byteReader_t)(void* arg, uint8_t* byte, uint64_t address);
+typedef int (*byteReader_t)(const void* arg, uint8_t* byte, uint64_t address);
/*
* dlog_t - Type for the logging function that the consumer can provide to
@@ -422,7 +422,7 @@ struct InternalInstruction {
/* Reader interface (C) */
byteReader_t reader;
/* Opaque value passed to the reader */
- void* readerArg;
+ const void* readerArg;
/* The address of the next byte to read via the reader */
uint64_t readerCursor;
@@ -561,10 +561,10 @@ struct InternalInstruction {
*/
int decodeInstruction(struct InternalInstruction* insn,
byteReader_t reader,
- void* readerArg,
+ const void* readerArg,
dlog_t logger,
void* loggerArg,
- void* miiArg,
+ const void* miiArg,
uint64_t startLoc,
DisassemblerMode mode);
@@ -579,7 +579,7 @@ void x86DisassemblerDebug(const char *file,
unsigned line,
const char *s);
-const char *x86DisassemblerGetInstrName(unsigned Opcode, void *mii);
+const char *x86DisassemblerGetInstrName(unsigned Opcode, const void *mii);
#ifdef __cplusplus
}
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
index b0a0e1e..23dfe4b 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
@@ -160,6 +160,10 @@ typedef uint16_t InstrUID;
* MODRM_SPLITRM - If the ModR/M byte is between 0x00 and 0xbf, the opcode
* corresponds to one instruction; otherwise, it corresponds to
* a different instruction.
+ * MODRM_SPLITMISC- If the ModR/M byte is between 0x00 and 0xbf, ModR/M byte
+ * divided by 8 is used to select instruction; otherwise, each
+ * value of the ModR/M byte could correspond to a different
+ * instruction.
* MODRM_SPLITREG - ModR/M byte divided by 8 is used to select instruction. This
corresponds to instructions that use reg field as opcode
* MODRM_FULL - Potentially, each value of the ModR/M byte could correspond
@@ -169,6 +173,7 @@ typedef uint16_t InstrUID;
#define MODRMTYPES \
ENUM_ENTRY(MODRM_ONEENTRY) \
ENUM_ENTRY(MODRM_SPLITRM) \
+ ENUM_ENTRY(MODRM_SPLITMISC) \
ENUM_ENTRY(MODRM_SPLITREG) \
ENUM_ENTRY(MODRM_FULL)
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
index 5118e4c..a4bd114 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
@@ -15,6 +15,7 @@
#define DEBUG_TYPE "asm-printer"
#include "X86ATTInstPrinter.h"
#include "X86InstComments.h"
+#include "MCTargetDesc/X86BaseInfo.h"
#include "MCTargetDesc/X86MCTargetDesc.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -33,11 +34,19 @@ using namespace llvm;
void X86ATTInstPrinter::printRegName(raw_ostream &OS,
unsigned RegNo) const {
- OS << '%' << getRegisterName(RegNo);
+ OS << markup("<reg:")
+ << '%' << getRegisterName(RegNo)
+ << markup(">");
}
void X86ATTInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
StringRef Annot) {
+ const MCInstrDesc &Desc = MII.get(MI->getOpcode());
+ uint64_t TSFlags = Desc.TSFlags;
+
+ if (TSFlags & X86II::LOCK)
+ OS << "\tlock\n";
+
// Try to print any aliases first.
if (!printAliasInstr(MI, OS))
printInstruction(MI, OS);
@@ -52,7 +61,8 @@ void X86ATTInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
void X86ATTInstPrinter::printSSECC(const MCInst *MI, unsigned Op,
raw_ostream &O) {
- switch (MI->getOperand(Op).getImm()) {
+ int64_t Imm = MI->getOperand(Op).getImm() & 0xf;
+ switch (Imm) {
default: llvm_unreachable("Invalid ssecc argument!");
case 0: O << "eq"; break;
case 1: O << "lt"; break;
@@ -70,6 +80,30 @@ void X86ATTInstPrinter::printSSECC(const MCInst *MI, unsigned Op,
case 0xd: O << "ge"; break;
case 0xe: O << "gt"; break;
case 0xf: O << "true"; break;
+ }
+}
+
+void X86ATTInstPrinter::printAVXCC(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
+ int64_t Imm = MI->getOperand(Op).getImm() & 0x1f;
+ switch (Imm) {
+ default: llvm_unreachable("Invalid avxcc argument!");
+ case 0: O << "eq"; break;
+ case 1: O << "lt"; break;
+ case 2: O << "le"; break;
+ case 3: O << "unord"; break;
+ case 4: O << "neq"; break;
+ case 5: O << "nlt"; break;
+ case 6: O << "nle"; break;
+ case 7: O << "ord"; break;
+ case 8: O << "eq_uq"; break;
+ case 9: O << "nge"; break;
+ case 0xa: O << "ngt"; break;
+ case 0xb: O << "false"; break;
+ case 0xc: O << "neq_oq"; break;
+ case 0xd: O << "ge"; break;
+ case 0xe: O << "gt"; break;
+ case 0xf: O << "true"; break;
case 0x10: O << "eq_os"; break;
case 0x11: O << "lt_oq"; break;
case 0x12: O << "le_oq"; break;
@@ -89,12 +123,12 @@ void X86ATTInstPrinter::printSSECC(const MCInst *MI, unsigned Op,
}
}
-/// print_pcrel_imm - This is used to print an immediate value that ends up
+/// printPCRelImm - This is used to print an immediate value that ends up
/// being encoded as a pc-relative value (e.g. for jumps and calls). These
/// print slightly differently than normal immediates. For example, a $ is not
/// emitted.
-void X86ATTInstPrinter::print_pcrel_imm(const MCInst *MI, unsigned OpNo,
- raw_ostream &O) {
+void X86ATTInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isImm())
O << Op.getImm();
@@ -119,17 +153,21 @@ void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isReg()) {
- O << '%' << getRegisterName(Op.getReg());
+ printRegName(O, Op.getReg());
} else if (Op.isImm()) {
// Print X86 immediates as signed values.
- O << '$' << (int64_t)Op.getImm();
+ O << markup("<imm:")
+ << '$' << (int64_t)Op.getImm()
+ << markup(">");
if (CommentStream && (Op.getImm() > 255 || Op.getImm() < -256))
*CommentStream << format("imm = 0x%" PRIX64 "\n", (uint64_t)Op.getImm());
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
- O << '$' << *Op.getExpr();
+ O << markup("<imm:")
+ << '$' << *Op.getExpr()
+ << markup(">");
}
}
@@ -140,6 +178,8 @@ void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
const MCOperand &DispSpec = MI->getOperand(Op+3);
const MCOperand &SegReg = MI->getOperand(Op+4);
+ O << markup("<mem:");
+
// If this has a segment register, print it.
if (SegReg.getReg()) {
printOperand(MI, Op+4, O);
@@ -164,9 +204,15 @@ void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
O << ',';
printOperand(MI, Op+2, O);
unsigned ScaleVal = MI->getOperand(Op+1).getImm();
- if (ScaleVal != 1)
- O << ',' << ScaleVal;
+ if (ScaleVal != 1) {
+ O << ','
+ << markup("<imm:")
+ << ScaleVal
+ << markup(">");
+ }
}
O << ')';
}
+
+ O << markup(">");
}
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
index 2e00bff..8e09183 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
@@ -40,7 +40,8 @@ public:
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &OS);
void printSSECC(const MCInst *MI, unsigned Op, raw_ostream &OS);
- void print_pcrel_imm(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
+ void printAVXCC(const MCInst *MI, unsigned Op, raw_ostream &OS);
+ void printPCRelImm(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
void printopaquemem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
index 4ea662c..d67aec7 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
@@ -1,4 +1,4 @@
-//===-- X86IntelInstPrinter.cpp - AT&T assembly instruction printing ------===//
+//===-- X86IntelInstPrinter.cpp - Intel assembly instruction printing -----===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file includes code for rendering MCInst instances as AT&T-style
+// This file includes code for rendering MCInst instances as Intel-style
// assembly.
//
//===----------------------------------------------------------------------===//
@@ -15,6 +15,7 @@
#define DEBUG_TYPE "asm-printer"
#include "X86IntelInstPrinter.h"
#include "X86InstComments.h"
+#include "MCTargetDesc/X86BaseInfo.h"
#include "MCTargetDesc/X86MCTargetDesc.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCExpr.h"
@@ -32,6 +33,12 @@ void X86IntelInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
void X86IntelInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
StringRef Annot) {
+ const MCInstrDesc &Desc = MII.get(MI->getOpcode());
+ uint64_t TSFlags = Desc.TSFlags;
+
+ if (TSFlags & X86II::LOCK)
+ OS << "\tlock\n";
+
printInstruction(MI, OS);
// Next always print the annotation.
@@ -44,7 +51,8 @@ void X86IntelInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
void X86IntelInstPrinter::printSSECC(const MCInst *MI, unsigned Op,
raw_ostream &O) {
- switch (MI->getOperand(Op).getImm()) {
+ int64_t Imm = MI->getOperand(Op).getImm() & 0xf;
+ switch (Imm) {
default: llvm_unreachable("Invalid ssecc argument!");
case 0: O << "eq"; break;
case 1: O << "lt"; break;
@@ -62,6 +70,30 @@ void X86IntelInstPrinter::printSSECC(const MCInst *MI, unsigned Op,
case 0xd: O << "ge"; break;
case 0xe: O << "gt"; break;
case 0xf: O << "true"; break;
+ }
+}
+
+void X86IntelInstPrinter::printAVXCC(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
+ int64_t Imm = MI->getOperand(Op).getImm() & 0x1f;
+ switch (Imm) {
+ default: llvm_unreachable("Invalid avxcc argument!");
+ case 0: O << "eq"; break;
+ case 1: O << "lt"; break;
+ case 2: O << "le"; break;
+ case 3: O << "unord"; break;
+ case 4: O << "neq"; break;
+ case 5: O << "nlt"; break;
+ case 6: O << "nle"; break;
+ case 7: O << "ord"; break;
+ case 8: O << "eq_uq"; break;
+ case 9: O << "nge"; break;
+ case 0xa: O << "ngt"; break;
+ case 0xb: O << "false"; break;
+ case 0xc: O << "neq_oq"; break;
+ case 0xd: O << "ge"; break;
+ case 0xe: O << "gt"; break;
+ case 0xf: O << "true"; break;
case 0x10: O << "eq_os"; break;
case 0x11: O << "lt_oq"; break;
case 0x12: O << "le_oq"; break;
@@ -78,14 +110,13 @@ void X86IntelInstPrinter::printSSECC(const MCInst *MI, unsigned Op,
case 0x1d: O << "ge_oq"; break;
case 0x1e: O << "gt_oq"; break;
case 0x1f: O << "true_us"; break;
-
}
}
-/// print_pcrel_imm - This is used to print an immediate value that ends up
+/// printPCRelImm - This is used to print an immediate value that ends up
/// being encoded as a pc-relative value.
-void X86IntelInstPrinter::print_pcrel_imm(const MCInst *MI, unsigned OpNo,
- raw_ostream &O) {
+void X86IntelInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isImm())
O << Op.getImm();
@@ -153,8 +184,7 @@ void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
printOperand(MI, Op+2, O);
NeedPlus = true;
}
-
-
+
if (!DispSpec.isImm()) {
if (NeedPlus) O << " + ";
assert(DispSpec.isExpr() && "non-immediate displacement for LEA?");
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
index 4f5938d..bb769eb 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This class prints an X86 MCInst to intel style .s file syntax.
+// This class prints an X86 MCInst to Intel style .s file syntax.
//
//===----------------------------------------------------------------------===//
@@ -37,7 +37,8 @@ public:
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &O);
void printSSECC(const MCInst *MI, unsigned Op, raw_ostream &O);
- void print_pcrel_imm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printAVXCC(const MCInst *MI, unsigned Op, raw_ostream &O);
+ void printPCRelImm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printopaquemem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "OPAQUE PTR ";
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index c4b75a6..467edad 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -279,9 +279,9 @@ void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const {
Res.setOpcode(RelaxedOp);
}
-/// writeNopData - Write optimal nops to the output file for the \arg Count
+/// writeNopData - Write optimal nops to the output file for the \p Count
/// bytes. This returns the number of bytes written. It may return 0 if
-/// the \arg Count is more than the maximum optimal nops.
+/// the \p Count is more than the maximum optimal nops.
bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
static const uint8_t Nops[10][10] = {
// nop
@@ -354,7 +354,7 @@ public:
: ELFX86AsmBackend(T, OSABI, CPU) {}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return createX86ELFObjectWriter(OS, /*Is64Bit*/ false, OSABI);
+ return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI, ELF::EM_386);
}
};
@@ -364,7 +364,7 @@ public:
: ELFX86AsmBackend(T, OSABI, CPU) {}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return createX86ELFObjectWriter(OS, /*Is64Bit*/ true, OSABI);
+ return createX86ELFObjectWriter(OS, /*IsELF64*/ true, OSABI, ELF::EM_X86_64);
}
};
@@ -455,7 +455,7 @@ MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T, StringRef TT, String
if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO)
return new DarwinX86_32AsmBackend(T, CPU);
- if (TheTriple.isOSWindows())
+ if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF)
return new WindowsX86AsmBackend(T, false, CPU);
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
@@ -468,7 +468,7 @@ MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T, StringRef TT, String
if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO)
return new DarwinX86_64AsmBackend(T, CPU);
- if (TheTriple.isOSWindows())
+ if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF)
return new WindowsX86AsmBackend(T, true, CPU);
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index db597fb..7ea1961 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -276,9 +276,9 @@ namespace X86II {
MRM_C1 = 33, MRM_C2 = 34, MRM_C3 = 35, MRM_C4 = 36,
MRM_C8 = 37, MRM_C9 = 38, MRM_E8 = 39, MRM_F0 = 40,
MRM_F8 = 41, MRM_F9 = 42, MRM_D0 = 45, MRM_D1 = 46,
- MRM_D4 = 47, MRM_D8 = 48, MRM_D9 = 49, MRM_DA = 50,
- MRM_DB = 51, MRM_DC = 52, MRM_DD = 53, MRM_DE = 54,
- MRM_DF = 55,
+ MRM_D4 = 47, MRM_D5 = 48, MRM_D8 = 49, MRM_D9 = 50,
+ MRM_DA = 51, MRM_DB = 52, MRM_DC = 53, MRM_DD = 54,
+ MRM_DE = 55, MRM_DF = 56,
/// RawFrmImm8 - This is used for the ENTER instruction, which has two
/// immediates, the first of which is a 16-bit immediate (specified by
@@ -580,11 +580,11 @@ namespace X86II {
case X86II::MRM_E8: case X86II::MRM_F0:
case X86II::MRM_F8: case X86II::MRM_F9:
case X86II::MRM_D0: case X86II::MRM_D1:
- case X86II::MRM_D4: case X86II::MRM_D8:
- case X86II::MRM_D9: case X86II::MRM_DA:
- case X86II::MRM_DB: case X86II::MRM_DC:
- case X86II::MRM_DD: case X86II::MRM_DE:
- case X86II::MRM_DF:
+ case X86II::MRM_D4: case X86II::MRM_D5:
+ case X86II::MRM_D8: case X86II::MRM_D9:
+ case X86II::MRM_DA: case X86II::MRM_DB:
+ case X86II::MRM_DC: case X86II::MRM_DD:
+ case X86II::MRM_DE: case X86II::MRM_DF:
return -1;
}
}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
index 5a42a80..de80dd8 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
@@ -20,7 +20,7 @@ using namespace llvm;
namespace {
class X86ELFObjectWriter : public MCELFObjectTargetWriter {
public:
- X86ELFObjectWriter(bool is64Bit, uint8_t OSABI);
+ X86ELFObjectWriter(bool IsELF64, uint8_t OSABI, uint16_t EMachine);
virtual ~X86ELFObjectWriter();
protected:
@@ -30,10 +30,11 @@ namespace {
};
}
-X86ELFObjectWriter::X86ELFObjectWriter(bool Is64Bit, uint8_t OSABI)
- : MCELFObjectTargetWriter(Is64Bit, OSABI,
- Is64Bit ? ELF::EM_X86_64 : ELF::EM_386,
- /*HasRelocationAddend*/ Is64Bit) {}
+X86ELFObjectWriter::X86ELFObjectWriter(bool IsELF64, uint8_t OSABI,
+ uint16_t EMachine)
+ : MCELFObjectTargetWriter(IsELF64, OSABI, EMachine,
+ // Only i386 uses Rel instead of RelA.
+ /*HasRelocationAddend*/ EMachine != ELF::EM_386) {}
X86ELFObjectWriter::~X86ELFObjectWriter()
{}
@@ -48,7 +49,7 @@ unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
unsigned Type;
- if (is64Bit()) {
+ if (getEMachine() == ELF::EM_X86_64) {
if (IsPCRel) {
switch ((unsigned)Fixup.getKind()) {
default: llvm_unreachable("invalid fixup kind!");
@@ -130,7 +131,7 @@ unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
case FK_Data_1: Type = ELF::R_X86_64_8; break;
}
}
- } else {
+ } else if (getEMachine() == ELF::EM_386) {
if (IsPCRel) {
switch ((unsigned)Fixup.getKind()) {
default: llvm_unreachable("invalid fixup kind!");
@@ -210,15 +211,17 @@ unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
case FK_Data_1: Type = ELF::R_386_8; break;
}
}
- }
+ } else
+ llvm_unreachable("Unsupported ELF machine type.");
return Type;
}
MCObjectWriter *llvm::createX86ELFObjectWriter(raw_ostream &OS,
- bool Is64Bit,
- uint8_t OSABI) {
+ bool IsELF64,
+ uint8_t OSABI,
+ uint16_t EMachine) {
MCELFObjectTargetWriter *MOTW =
- new X86ELFObjectWriter(Is64Bit, OSABI);
+ new X86ELFObjectWriter(IsELF64, OSABI, EMachine);
return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/true);
}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
index b0acd7d..16488eb 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
@@ -34,6 +34,10 @@ AsmWriterFlavor("x86-asm-syntax", cl::init(ATT),
clEnumValN(Intel, "intel", "Emit Intel-style assembly"),
clEnumValEnd));
+static cl::opt<bool>
+MarkedJTDataRegions("mark-data-regions", cl::init(false),
+ cl::desc("Mark code section jump table data regions."),
+ cl::Hidden);
void X86MCAsmInfoDarwin::anchor() { }
@@ -59,6 +63,7 @@ X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &T) {
SupportsDebugInformation = true;
DwarfUsesInlineInfoSection = true;
+ UseDataRegionDirectives = MarkedJTDataRegions;
// Exceptions handling
ExceptionsType = ExceptionHandling::DwarfCFI;
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index 4a38324..122204a 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -16,6 +16,7 @@
#include "MCTargetDesc/X86BaseInfo.h"
#include "MCTargetDesc/X86FixupKinds.h"
#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
@@ -28,8 +29,8 @@ using namespace llvm;
namespace {
class X86MCCodeEmitter : public MCCodeEmitter {
- X86MCCodeEmitter(const X86MCCodeEmitter &); // DO NOT IMPLEMENT
- void operator=(const X86MCCodeEmitter &); // DO NOT IMPLEMENT
+ X86MCCodeEmitter(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ void operator=(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION;
const MCInstrInfo &MCII;
const MCSubtargetInfo &STI;
MCContext &Ctx;
@@ -51,8 +52,8 @@ public:
return (STI.getFeatureBits() & X86::Mode64Bit) == 0;
}
- static unsigned GetX86RegNum(const MCOperand &MO) {
- return X86_MC::getX86RegNum(MO.getReg());
+ unsigned GetX86RegNum(const MCOperand &MO) const {
+ return Ctx.getRegisterInfo().getEncodingValue(MO.getReg()) & 0x7;
}
// On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range
@@ -64,8 +65,8 @@ public:
// VEX.VVVV => XMM9 => ~9
//
// See table 4-35 of Intel AVX Programming Reference for details.
- static unsigned char getVEXRegisterEncoding(const MCInst &MI,
- unsigned OpNum) {
+ unsigned char getVEXRegisterEncoding(const MCInst &MI,
+ unsigned OpNum) const {
unsigned SrcReg = MI.getOperand(OpNum).getReg();
unsigned SrcRegNum = GetX86RegNum(MI.getOperand(OpNum));
if (X86II::isX86_64ExtendedReg(SrcReg))
@@ -560,15 +561,6 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
}
- // Set the vector length to 256-bit if YMM0-YMM15 is used
- for (unsigned i = 0; i != MI.getNumOperands(); ++i) {
- if (!MI.getOperand(i).isReg())
- continue;
- unsigned SrcReg = MI.getOperand(i).getReg();
- if (SrcReg >= X86::YMM0 && SrcReg <= X86::YMM15)
- VEX_L = 1;
- }
-
// Classify VEX_B, VEX_4V, VEX_R, VEX_X
unsigned NumOps = Desc.getNumOperands();
unsigned CurOp = 0;
@@ -1129,13 +1121,13 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
case X86II::MRM_C3: case X86II::MRM_C4:
case X86II::MRM_C8: case X86II::MRM_C9:
case X86II::MRM_D0: case X86II::MRM_D1:
- case X86II::MRM_D4: case X86II::MRM_D8:
- case X86II::MRM_D9: case X86II::MRM_DA:
- case X86II::MRM_DB: case X86II::MRM_DC:
- case X86II::MRM_DD: case X86II::MRM_DE:
- case X86II::MRM_DF: case X86II::MRM_E8:
- case X86II::MRM_F0: case X86II::MRM_F8:
- case X86II::MRM_F9:
+ case X86II::MRM_D4: case X86II::MRM_D5:
+ case X86II::MRM_D8: case X86II::MRM_D9:
+ case X86II::MRM_DA: case X86II::MRM_DB:
+ case X86II::MRM_DC: case X86II::MRM_DD:
+ case X86II::MRM_DE: case X86II::MRM_DF:
+ case X86II::MRM_E8: case X86II::MRM_F0:
+ case X86II::MRM_F8: case X86II::MRM_F9:
EmitByte(BaseOpcode, CurByte, OS);
unsigned char MRM;
@@ -1150,6 +1142,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
case X86II::MRM_D0: MRM = 0xD0; break;
case X86II::MRM_D1: MRM = 0xD1; break;
case X86II::MRM_D4: MRM = 0xD4; break;
+ case X86II::MRM_D5: MRM = 0xD5; break;
case X86II::MRM_D8: MRM = 0xD8; break;
case X86II::MRM_D9: MRM = 0xD9; break;
case X86II::MRM_DA: MRM = 0xDA; break;
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index 3482363..287c9f1 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -209,117 +209,10 @@ unsigned X86_MC::getDwarfRegFlavour(StringRef TT, bool isEH) {
return DWARFFlavour::X86_32_Generic;
}
-/// getX86RegNum - This function maps LLVM register identifiers to their X86
-/// specific numbering, which is used in various places encoding instructions.
-unsigned X86_MC::getX86RegNum(unsigned RegNo) {
- switch(RegNo) {
- case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX;
- case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX;
- case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX;
- case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX;
- case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH:
- return N86::ESP;
- case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH:
- return N86::EBP;
- case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH:
- return N86::ESI;
- case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH:
- return N86::EDI;
-
- case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B:
- return N86::EAX;
- case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B:
- return N86::ECX;
- case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B:
- return N86::EDX;
- case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B:
- return N86::EBX;
- case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B:
- return N86::ESP;
- case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B:
- return N86::EBP;
- case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B:
- return N86::ESI;
- case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B:
- return N86::EDI;
-
- case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3:
- case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7:
- return RegNo-X86::ST0;
-
- case X86::XMM0: case X86::XMM8:
- case X86::YMM0: case X86::YMM8: case X86::MM0:
- return 0;
- case X86::XMM1: case X86::XMM9:
- case X86::YMM1: case X86::YMM9: case X86::MM1:
- return 1;
- case X86::XMM2: case X86::XMM10:
- case X86::YMM2: case X86::YMM10: case X86::MM2:
- return 2;
- case X86::XMM3: case X86::XMM11:
- case X86::YMM3: case X86::YMM11: case X86::MM3:
- return 3;
- case X86::XMM4: case X86::XMM12:
- case X86::YMM4: case X86::YMM12: case X86::MM4:
- return 4;
- case X86::XMM5: case X86::XMM13:
- case X86::YMM5: case X86::YMM13: case X86::MM5:
- return 5;
- case X86::XMM6: case X86::XMM14:
- case X86::YMM6: case X86::YMM14: case X86::MM6:
- return 6;
- case X86::XMM7: case X86::XMM15:
- case X86::YMM7: case X86::YMM15: case X86::MM7:
- return 7;
-
- case X86::ES: return 0;
- case X86::CS: return 1;
- case X86::SS: return 2;
- case X86::DS: return 3;
- case X86::FS: return 4;
- case X86::GS: return 5;
-
- case X86::CR0: case X86::CR8 : case X86::DR0: return 0;
- case X86::CR1: case X86::CR9 : case X86::DR1: return 1;
- case X86::CR2: case X86::CR10: case X86::DR2: return 2;
- case X86::CR3: case X86::CR11: case X86::DR3: return 3;
- case X86::CR4: case X86::CR12: case X86::DR4: return 4;
- case X86::CR5: case X86::CR13: case X86::DR5: return 5;
- case X86::CR6: case X86::CR14: case X86::DR6: return 6;
- case X86::CR7: case X86::CR15: case X86::DR7: return 7;
-
- // Pseudo index registers are equivalent to a "none"
- // scaled index (See Intel Manual 2A, table 2-3)
- case X86::EIZ:
- case X86::RIZ:
- return 4;
-
- default:
- assert((int(RegNo) > 0) && "Unknown physical register!");
- return 0;
- }
-}
-
void X86_MC::InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI) {
// FIXME: TableGen these.
for (unsigned Reg = X86::NoRegister+1; Reg < X86::NUM_TARGET_REGS; ++Reg) {
- int SEH = X86_MC::getX86RegNum(Reg);
- switch (Reg) {
- case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B:
- case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B:
- case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B:
- case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B:
- case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B:
- case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B:
- case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B:
- case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B:
- case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
- case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
- case X86::YMM8: case X86::YMM9: case X86::YMM10: case X86::YMM11:
- case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15:
- SEH += 8;
- break;
- }
+ unsigned SEH = MRI->getEncodingValue(Reg);
MRI->mapLLVMRegToSEHReg(Reg, SEH);
}
}
@@ -379,11 +272,15 @@ static MCAsmInfo *createX86MCAsmInfo(const Target &T, StringRef TT) {
MAI = new X86_64MCAsmInfoDarwin(TheTriple);
else
MAI = new X86MCAsmInfoDarwin(TheTriple);
+ } else if (TheTriple.getEnvironment() == Triple::ELF) {
+ // Force the use of an ELF container.
+ MAI = new X86ELFMCAsmInfo(TheTriple);
} else if (TheTriple.getOS() == Triple::Win32) {
MAI = new X86MCAsmInfoMicrosoft(TheTriple);
} else if (TheTriple.getOS() == Triple::MinGW32 || TheTriple.getOS() == Triple::Cygwin) {
MAI = new X86MCAsmInfoGNUCOFF(TheTriple);
} else {
+ // The default is ELF.
MAI = new X86ELFMCAsmInfo(TheTriple);
}
@@ -465,7 +362,7 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO)
return createMachOStreamer(Ctx, MAB, _OS, _Emitter, RelaxAll);
- if (TheTriple.isOSWindows())
+ if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF)
return createWinCOFFStreamer(Ctx, MAB, *_Emitter, _OS, RelaxAll);
return createELFStreamer(Ctx, MAB, _OS, _Emitter, RelaxAll, NoExecStack);
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
index 4b0cace..981aa1a 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
@@ -64,8 +64,6 @@ namespace X86_MC {
unsigned getDwarfRegFlavour(StringRef TT, bool isEH);
- unsigned getX86RegNum(unsigned RegNo);
-
void InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI);
/// createX86MCSubtargetInfo - Create a X86 MCSubtargetInfo instance.
@@ -91,8 +89,9 @@ MCObjectWriter *createX86MachObjectWriter(raw_ostream &OS,
/// createX86ELFObjectWriter - Construct an X86 ELF object writer.
MCObjectWriter *createX86ELFObjectWriter(raw_ostream &OS,
- bool Is64Bit,
- uint8_t OSABI);
+ bool IsELF64,
+ uint8_t OSABI,
+ uint16_t EMachine);
/// createX86WinCOFFObjectWriter - Construct an X86 Win COFF object writer.
MCObjectWriter *createX86WinCOFFObjectWriter(raw_ostream &OS, bool Is64Bit);
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
index f0f1982..7ff058e 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
@@ -11,11 +11,13 @@
#include "MCTargetDesc/X86MCTargetDesc.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCMachObjectWriter.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCValue.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
#include "llvm/Object/MachOFormat.h"
using namespace llvm;
@@ -23,7 +25,7 @@ using namespace llvm::object;
namespace {
class X86MachObjectWriter : public MCMachObjectTargetWriter {
- void RecordScatteredRelocation(MachObjectWriter *Writer,
+ bool RecordScatteredRelocation(MachObjectWriter *Writer,
const MCAssembler &Asm,
const MCAsmLayout &Layout,
const MCFragment *Fragment,
@@ -335,7 +337,7 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
Writer->addRelocation(Fragment->getParent(), MRE);
}
-void X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
+bool X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
const MCAssembler &Asm,
const MCAsmLayout &Layout,
const MCFragment *Fragment,
@@ -381,6 +383,19 @@ void X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
// Relocations are written out in reverse order, so the PAIR comes first.
if (Type == macho::RIT_Difference ||
Type == macho::RIT_Generic_LocalDifference) {
+ // If the offset is too large to fit in a scattered relocation,
+ // we're hosed. It's an unfortunate limitation of the MachO format.
+ if (FixupOffset > 0xffffff) {
+ char Buffer[32];
+ format("0x%x", FixupOffset).print(Buffer, sizeof(Buffer));
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ Twine("Section too large, can't encode "
+ "r_address (") + Buffer +
+ ") into 24 bits of scattered "
+ "relocation entry.");
+ llvm_unreachable("fatal error returned?!");
+ }
+
macho::RelocationEntry MRE;
MRE.Word0 = ((0 << 0) |
(macho::RIT_Pair << 24) |
@@ -389,6 +404,16 @@ void X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
macho::RF_Scattered);
MRE.Word1 = Value2;
Writer->addRelocation(Fragment->getParent(), MRE);
+ } else {
+ // If the offset is more than 24-bits, it won't fit in a scattered
+ // relocation offset field, so we fall back to using a non-scattered
+ // relocation. This is a bit risky, as if the offset reaches out of
+ // the block and the linker is doing scattered loading on this
+ // symbol, things can go badly.
+ //
+ // Required for 'as' compatibility.
+ if (FixupOffset > 0xffffff)
+ return false;
}
macho::RelocationEntry MRE;
@@ -399,6 +424,7 @@ void X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
macho::RF_Scattered);
MRE.Word1 = Value;
Writer->addRelocation(Fragment->getParent(), MRE);
+ return true;
}
void X86MachObjectWriter::RecordTLVPRelocation(MachObjectWriter *Writer,
@@ -469,9 +495,11 @@ void X86MachObjectWriter::RecordX86Relocation(MachObjectWriter *Writer,
// If this is a difference or a defined symbol plus an offset, then we need a
// scattered relocation entry. Differences always require scattered
// relocations.
- if (Target.getSymB())
- return RecordScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup,
- Target, Log2Size, FixedValue);
+ if (Target.getSymB()) {
+ RecordScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup,
+ Target, Log2Size, FixedValue);
+ return;
+ }
// Get the symbol data, if any.
MCSymbolData *SD = 0;
@@ -483,9 +511,13 @@ void X86MachObjectWriter::RecordX86Relocation(MachObjectWriter *Writer,
uint32_t Offset = Target.getConstant();
if (IsPCRel)
Offset += 1 << Log2Size;
- if (Offset && SD && !Writer->doesSymbolRequireExternRelocation(SD))
- return RecordScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup,
- Target, Log2Size, FixedValue);
+ // Try to record the scattered relocation if needed. Fall back to non
+ // scattered if necessary (see comments in RecordScatteredRelocation()
+ // for details).
+ if (Offset && SD && !Writer->doesSymbolRequireExternRelocation(SD) &&
+ RecordScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup,
+ Target, Log2Size, FixedValue))
+ return;
// See <reloc.h>.
uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
diff --git a/contrib/llvm/lib/Target/X86/X86.td b/contrib/llvm/lib/Target/X86/X86.td
index 1249781..8ad0bc0 100644
--- a/contrib/llvm/lib/Target/X86/X86.td
+++ b/contrib/llvm/lib/Target/X86/X86.td
@@ -118,8 +118,13 @@ def FeatureBMI : SubtargetFeature<"bmi", "HasBMI", "true",
"Support BMI instructions">;
def FeatureBMI2 : SubtargetFeature<"bmi2", "HasBMI2", "true",
"Support BMI2 instructions">;
+def FeatureRTM : SubtargetFeature<"rtm", "HasRTM", "true",
+ "Support RTM instructions">;
def FeatureLeaForSP : SubtargetFeature<"lea-sp", "UseLeaForSP", "true",
"Use LEA for adjusting the stack pointer">;
+def FeatureSlowDivide : SubtargetFeature<"idiv-to-divb",
+ "HasSlowDivide", "true",
+ "Use small divide for positive values less than 256">;
//===----------------------------------------------------------------------===//
// X86 processors supported.
@@ -159,8 +164,9 @@ def : Proc<"core2", [FeatureSSSE3, FeatureCMPXCHG16B,
FeatureSlowBTMem]>;
def : Proc<"penryn", [FeatureSSE41, FeatureCMPXCHG16B,
FeatureSlowBTMem]>;
-def : AtomProc<"atom", [ProcIntelAtom, FeatureSSE3, FeatureCMPXCHG16B,
- FeatureMOVBE, FeatureSlowBTMem, FeatureLeaForSP]>;
+def : AtomProc<"atom", [ProcIntelAtom, FeatureSSSE3, FeatureCMPXCHG16B,
+ FeatureMOVBE, FeatureSlowBTMem, FeatureLeaForSP,
+ FeatureSlowDivide]>;
// "Arrandale" along with corei3 and corei5
def : Proc<"corei7", [FeatureSSE42, FeatureCMPXCHG16B,
FeatureSlowBTMem, FeatureFastUAMem,
@@ -188,7 +194,8 @@ def : Proc<"core-avx2", [FeatureAVX2, FeatureCMPXCHG16B, FeaturePOPCNT,
FeatureAES, FeaturePCLMUL, FeatureRDRAND,
FeatureF16C, FeatureFSGSBase,
FeatureMOVBE, FeatureLZCNT, FeatureBMI,
- FeatureBMI2, FeatureFMA]>;
+ FeatureBMI2, FeatureFMA,
+ FeatureRTM]>;
def : Proc<"k6", [FeatureMMX]>;
def : Proc<"k6-2", [Feature3DNow]>;
diff --git a/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp b/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp
index db71e27..fdd7125 100644
--- a/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -13,7 +13,6 @@
//===----------------------------------------------------------------------===//
#include "X86AsmPrinter.h"
-#include "X86MCInstLower.h"
#include "X86.h"
#include "X86COFFMachineModuleInfo.h"
#include "X86MachineFunctionInfo.h"
@@ -206,10 +205,10 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
}
}
-/// print_pcrel_imm - This is used to print an immediate value that ends up
+/// printPCRelImm - This is used to print an immediate value that ends up
/// being encoded as a pc-relative value. These print slightly differently, for
/// example, a $ is not emitted.
-void X86AsmPrinter::print_pcrel_imm(const MachineInstr *MI, unsigned OpNo,
+void X86AsmPrinter::printPCRelImm(const MachineInstr *MI, unsigned OpNo,
raw_ostream &O) {
const MachineOperand &MO = MI->getOperand(OpNo);
switch (MO.getType()) {
@@ -233,15 +232,17 @@ void X86AsmPrinter::print_pcrel_imm(const MachineInstr *MI, unsigned OpNo,
void X86AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O, const char *Modifier) {
+ raw_ostream &O, const char *Modifier,
+ unsigned AsmVariant) {
const MachineOperand &MO = MI->getOperand(OpNo);
switch (MO.getType()) {
default: llvm_unreachable("unknown operand type!");
case MachineOperand::MO_Register: {
- O << '%';
+ // FIXME: Enumerating AsmVariant, so we can remove magic number.
+ if (AsmVariant == 0) O << '%';
unsigned Reg = MO.getReg();
if (Modifier && strncmp(Modifier, "subreg", strlen("subreg")) == 0) {
- EVT VT = (strcmp(Modifier+6,"64") == 0) ?
+ MVT::SimpleValueType VT = (strcmp(Modifier+6,"64") == 0) ?
MVT::i64 : ((strcmp(Modifier+6, "32") == 0) ? MVT::i32 :
((strcmp(Modifier+6,"16") == 0) ? MVT::i16 : MVT::i8));
Reg = getX86SubSuperRegister(Reg, VT);
@@ -265,46 +266,6 @@ void X86AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
}
}
-void X86AsmPrinter::printSSECC(const MachineInstr *MI, unsigned Op,
- raw_ostream &O) {
- unsigned char value = MI->getOperand(Op).getImm();
- switch (value) {
- default: llvm_unreachable("Invalid ssecc argument!");
- case 0: O << "eq"; break;
- case 1: O << "lt"; break;
- case 2: O << "le"; break;
- case 3: O << "unord"; break;
- case 4: O << "neq"; break;
- case 5: O << "nlt"; break;
- case 6: O << "nle"; break;
- case 7: O << "ord"; break;
- case 8: O << "eq_uq"; break;
- case 9: O << "nge"; break;
- case 0xa: O << "ngt"; break;
- case 0xb: O << "false"; break;
- case 0xc: O << "neq_oq"; break;
- case 0xd: O << "ge"; break;
- case 0xe: O << "gt"; break;
- case 0xf: O << "true"; break;
- case 0x10: O << "eq_os"; break;
- case 0x11: O << "lt_oq"; break;
- case 0x12: O << "le_oq"; break;
- case 0x13: O << "unord_s"; break;
- case 0x14: O << "neq_us"; break;
- case 0x15: O << "nlt_uq"; break;
- case 0x16: O << "nle_uq"; break;
- case 0x17: O << "ord_s"; break;
- case 0x18: O << "eq_us"; break;
- case 0x19: O << "nge_uq"; break;
- case 0x1a: O << "ngt_uq"; break;
- case 0x1b: O << "false_os"; break;
- case 0x1c: O << "neq_os"; break;
- case 0x1d: O << "ge_oq"; break;
- case 0x1e: O << "gt_oq"; break;
- case 0x1f: O << "true_us"; break;
- }
-}
-
void X86AsmPrinter::printLeaMemReference(const MachineInstr *MI, unsigned Op,
raw_ostream &O, const char *Modifier) {
const MachineOperand &BaseReg = MI->getOperand(Op);
@@ -363,10 +324,51 @@ void X86AsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op,
printLeaMemReference(MI, Op, O, Modifier);
}
-void X86AsmPrinter::printPICLabel(const MachineInstr *MI, unsigned Op,
- raw_ostream &O) {
- O << *MF->getPICBaseSymbol() << '\n';
- O << *MF->getPICBaseSymbol() << ':';
+void X86AsmPrinter::printIntelMemReference(const MachineInstr *MI, unsigned Op,
+ raw_ostream &O, const char *Modifier,
+ unsigned AsmVariant){
+ const MachineOperand &BaseReg = MI->getOperand(Op);
+ unsigned ScaleVal = MI->getOperand(Op+1).getImm();
+ const MachineOperand &IndexReg = MI->getOperand(Op+2);
+ const MachineOperand &DispSpec = MI->getOperand(Op+3);
+ const MachineOperand &SegReg = MI->getOperand(Op+4);
+
+ // If this has a segment register, print it.
+ if (SegReg.getReg()) {
+ printOperand(MI, Op+4, O, Modifier, AsmVariant);
+ O << ':';
+ }
+
+ O << '[';
+
+ bool NeedPlus = false;
+ if (BaseReg.getReg()) {
+ printOperand(MI, Op, O, Modifier, AsmVariant);
+ NeedPlus = true;
+ }
+
+ if (IndexReg.getReg()) {
+ if (NeedPlus) O << " + ";
+ if (ScaleVal != 1)
+ O << ScaleVal << '*';
+ printOperand(MI, Op+2, O, Modifier, AsmVariant);
+ NeedPlus = true;
+ }
+
+ assert (DispSpec.isImm() && "Displacement is not an immediate!");
+ int64_t DispVal = DispSpec.getImm();
+ if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg())) {
+ if (NeedPlus) {
+ if (DispVal > 0)
+ O << " + ";
+ else {
+ O << " - ";
+ DispVal = -DispVal;
+ }
+ }
+ O << DispVal;
+ }
+ O << ']';
}
bool X86AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
@@ -457,7 +459,7 @@ bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
return false;
case 'P': // This is the operand of a call, treat specially.
- print_pcrel_imm(MI, OpNo, O);
+ printPCRelImm(MI, OpNo, O);
return false;
case 'n': // Negate the immediate or print a '-' before the operand.
@@ -471,7 +473,7 @@ bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
}
}
- printOperand(MI, OpNo, O);
+ printOperand(MI, OpNo, O, /*Modifier*/ 0, AsmVariant);
return false;
}
@@ -479,6 +481,11 @@ bool X86AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
unsigned OpNo, unsigned AsmVariant,
const char *ExtraCode,
raw_ostream &O) {
+ if (AsmVariant) {
+ printIntelMemReference(MI, OpNo, O);
+ return false;
+ }
+
if (ExtraCode && ExtraCode[0]) {
if (ExtraCode[1] != 0) return true; // Unknown modifier.
@@ -680,7 +687,7 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
if (!Stubs.empty()) {
OutStreamer.SwitchSection(TLOFELF.getDataRelSection());
- const TargetData *TD = TM.getTargetData();
+ const DataLayout *TD = TM.getDataLayout();
for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
OutStreamer.EmitLabel(Stubs[i].first);
diff --git a/contrib/llvm/lib/Target/X86/X86AsmPrinter.h b/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
index 35386cd..61eb14e 100644
--- a/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
+++ b/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
@@ -34,47 +34,48 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
Subtarget = &TM.getSubtarget<X86Subtarget>();
}
- virtual const char *getPassName() const {
+ virtual const char *getPassName() const LLVM_OVERRIDE {
return "X86 AT&T-Style Assembly Printer";
}
const X86Subtarget &getSubtarget() const { return *Subtarget; }
- virtual void EmitStartOfAsmFile(Module &M);
+ virtual void EmitStartOfAsmFile(Module &M) LLVM_OVERRIDE;
- virtual void EmitEndOfAsmFile(Module &M);
+ virtual void EmitEndOfAsmFile(Module &M) LLVM_OVERRIDE;
- virtual void EmitInstruction(const MachineInstr *MI);
+ virtual void EmitInstruction(const MachineInstr *MI) LLVM_OVERRIDE;
void printSymbolOperand(const MachineOperand &MO, raw_ostream &O);
// These methods are used by the tablegen'erated instruction printer.
void printOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O,
- const char *Modifier = 0);
- void print_pcrel_imm(const MachineInstr *MI, unsigned OpNo, raw_ostream &O);
+ const char *Modifier = 0, unsigned AsmVariant = 0);
+ void printPCRelImm(const MachineInstr *MI, unsigned OpNo, raw_ostream &O);
bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
- bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &OS);
- bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &OS);
-
- void printMachineInstruction(const MachineInstr *MI);
- void printSSECC(const MachineInstr *MI, unsigned Op, raw_ostream &O);
+ virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS) LLVM_OVERRIDE;
+ virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS) LLVM_OVERRIDE;
+
void printMemReference(const MachineInstr *MI, unsigned Op, raw_ostream &O,
const char *Modifier=NULL);
void printLeaMemReference(const MachineInstr *MI, unsigned Op, raw_ostream &O,
const char *Modifier=NULL);
- void printPICLabel(const MachineInstr *MI, unsigned Op, raw_ostream &O);
+ void printIntelMemReference(const MachineInstr *MI, unsigned Op,
+ raw_ostream &O, const char *Modifier=NULL,
+ unsigned AsmVariant = 1);
- bool runOnMachineFunction(MachineFunction &F);
+ virtual bool runOnMachineFunction(MachineFunction &F) LLVM_OVERRIDE;
void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
- MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
+ virtual MachineLocation
+ getDebugValueLocation(const MachineInstr *MI) const LLVM_OVERRIDE;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h b/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h
index 471eb31..a5a8dc1 100644
--- a/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h
@@ -20,7 +20,7 @@
namespace llvm {
class X86MachineFunctionInfo;
- class TargetData;
+ class DataLayout;
/// X86COFFMachineModuleInfo - This is a MachineModuleInfoImpl implementation
/// for X86 COFF targets.
diff --git a/contrib/llvm/lib/Target/X86/X86CallingConv.td b/contrib/llvm/lib/Target/X86/X86CallingConv.td
index a6d2709..6786756 100644
--- a/contrib/llvm/lib/Target/X86/X86CallingConv.td
+++ b/contrib/llvm/lib/Target/X86/X86CallingConv.td
@@ -88,6 +88,21 @@ def RetCC_X86_32_Fast : CallingConv<[
CCDelegateTo<RetCC_X86Common>
]>;
+// Intel_OCL_BI return-value convention.
+def RetCC_Intel_OCL_BI : CallingConv<[
+ // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3.
+ CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
+ CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
+
+ // 256-bit FP vectors
+ // No more than 4 registers
+ CCIfType<[v8f32, v4f64, v8i32, v4i64],
+ CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
+
+ // i32, i64 in the standard way
+ CCDelegateTo<RetCC_X86Common>
+]>;
+
// X86-64 C return-value convention.
def RetCC_X86_64_C : CallingConv<[
// The X86-64 calling convention always returns FP values in XMM0.
@@ -128,6 +143,10 @@ def RetCC_X86_64 : CallingConv<[
// This is the return-value convention used for the entire X86 backend.
def RetCC_X86 : CallingConv<[
+
+ // Check if this is the Intel OpenCL built-ins calling convention
+ CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<RetCC_Intel_OCL_BI>>,
+
CCIfSubtarget<"is64Bit()", CCDelegateTo<RetCC_X86_64>>,
CCDelegateTo<RetCC_X86_32>
]>;
@@ -235,6 +254,29 @@ def CC_X86_Win64_C : CallingConv<[
CCIfType<[f80], CCAssignToStack<0, 0>>
]>;
+// X86-64 Intel OpenCL built-ins calling convention.
+def CC_Intel_OCL_BI : CallingConv<[
+ CCIfType<[i32], CCIfSubtarget<"isTargetWin32()", CCAssignToStack<4, 4>>>,
+
+ CCIfType<[i32], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[ECX, EDX, R8D, R9D]>>>,
+ CCIfType<[i64], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[RCX, RDX, R8, R9 ]>>>,
+
+ CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX]>>,
+ CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX]>>,
+
+ // The SSE vector arguments are passed in XMM registers.
+ CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
+ CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
+
+ // The 256-bit vector arguments are passed in YMM registers.
+ CCIfType<[v8f32, v4f64, v8i32, v4i64],
+ CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>,
+
+ CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
+ CCDelegateTo<CC_X86_64_C>
+]>;
+
+
def CC_X86_64_GHC : CallingConv<[
// Promote i8/i16/i32 arguments to i64.
CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
@@ -324,7 +366,7 @@ def CC_X86_32_FastCall : CallingConv<[
CCIfNest<CCAssignToReg<[EAX]>>,
// The first 2 integer arguments are passed in ECX/EDX
- CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>,
+ CCIfInReg<CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>>,
// Otherwise, same as everything else.
CCDelegateTo<CC_X86_32_Common>
@@ -408,6 +450,7 @@ def CC_X86_64 : CallingConv<[
// This is the argument convention used for the entire X86 backend.
def CC_X86 : CallingConv<[
+ CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<CC_Intel_OCL_BI>>,
CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64>>,
CCDelegateTo<CC_X86_32>
]>;
@@ -426,3 +469,17 @@ def CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>;
def CSR_Win64 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15,
(sequence "XMM%u", 6, 15))>;
+
+
+// Standard C + YMM6-15
+def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12,
+ R13, R14, R15,
+ (sequence "YMM%u", 6, 15))>;
+
+//Standard C + XMM 8-15
+def CSR_64_Intel_OCL_BI : CalleeSavedRegs<(add CSR_64,
+ (sequence "XMM%u", 8, 15))>;
+
+//Standard C + YMM 8-15
+def CSR_64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add CSR_64,
+ (sequence "YMM%u", 8, 15))>;
diff --git a/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp b/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
index d705049..44db563 100644
--- a/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
@@ -26,7 +26,6 @@
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/Function.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCExpr.h"
@@ -43,7 +42,7 @@ namespace {
template<class CodeEmitter>
class Emitter : public MachineFunctionPass {
const X86InstrInfo *II;
- const TargetData *TD;
+ const DataLayout *TD;
X86TargetMachine &TM;
CodeEmitter &MCE;
MachineModuleInfo *MMI;
@@ -57,7 +56,7 @@ namespace {
MCE(mce), PICBaseOffset(0), Is64BitMode(false),
IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
Emitter(X86TargetMachine &tm, CodeEmitter &mce,
- const X86InstrInfo &ii, const TargetData &td, bool is64)
+ const X86InstrInfo &ii, const DataLayout &td, bool is64)
: MachineFunctionPass(ID), II(&ii), TD(&td), TM(tm),
MCE(mce), PICBaseOffset(0), Is64BitMode(is64),
IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
@@ -110,6 +109,14 @@ namespace {
void emitMemModRMByte(const MachineInstr &MI,
unsigned Op, unsigned RegOpcodeField,
intptr_t PCAdj = 0);
+
+ unsigned getX86RegNum(unsigned RegNo) const {
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ return TRI->getEncodingValue(RegNo) & 0x7;
+ }
+
+ unsigned char getVEXRegisterEncoding(const MachineInstr &MI,
+ unsigned OpNum) const;
};
template<class CodeEmitter>
@@ -129,13 +136,12 @@ bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) {
MCE.setModuleInfo(MMI);
II = TM.getInstrInfo();
- TD = TM.getTargetData();
+ TD = TM.getDataLayout();
Is64BitMode = TM.getSubtarget<X86Subtarget>().is64Bit();
IsPIC = TM.getRelocationModel() == Reloc::PIC_;
do {
- DEBUG(dbgs() << "JITTing function '"
- << MF.getFunction()->getName() << "'\n");
+ DEBUG(dbgs() << "JITTing function '" << MF.getName() << "'\n");
MCE.startFunction(MF);
for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
MBB != E; ++MBB) {
@@ -365,7 +371,7 @@ inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode,
template<class CodeEmitter>
void Emitter<CodeEmitter>::emitRegModRMByte(unsigned ModRMReg,
unsigned RegOpcodeFld){
- MCE.emitByte(ModRMByte(3, RegOpcodeFld, X86_MC::getX86RegNum(ModRMReg)));
+ MCE.emitByte(ModRMByte(3, RegOpcodeFld, getX86RegNum(ModRMReg)));
}
template<class CodeEmitter>
@@ -503,7 +509,7 @@ void Emitter<CodeEmitter>::emitMemModRMByte(const MachineInstr &MI,
// 2-7) and absolute references.
unsigned BaseRegNo = -1U;
if (BaseReg != 0 && BaseReg != X86::RIP)
- BaseRegNo = X86_MC::getX86RegNum(BaseReg);
+ BaseRegNo = getX86RegNum(BaseReg);
if (// The SIB byte must be used if there is an index register.
IndexReg.getReg() == 0 &&
@@ -579,15 +585,15 @@ void Emitter<CodeEmitter>::emitMemModRMByte(const MachineInstr &MI,
// Manual 2A, table 2-7. The displacement has already been output.
unsigned IndexRegNo;
if (IndexReg.getReg())
- IndexRegNo = X86_MC::getX86RegNum(IndexReg.getReg());
+ IndexRegNo = getX86RegNum(IndexReg.getReg());
else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5)
IndexRegNo = 4;
emitSIBByte(SS, IndexRegNo, 5);
} else {
- unsigned BaseRegNo = X86_MC::getX86RegNum(BaseReg);
+ unsigned BaseRegNo = getX86RegNum(BaseReg);
unsigned IndexRegNo;
if (IndexReg.getReg())
- IndexRegNo = X86_MC::getX86RegNum(IndexReg.getReg());
+ IndexRegNo = getX86RegNum(IndexReg.getReg());
else
IndexRegNo = 4; // For example [ESP+1*<noreg>+4]
emitSIBByte(SS, IndexRegNo, BaseRegNo);
@@ -758,10 +764,12 @@ void Emitter<CodeEmitter>::emitOpcodePrefix(uint64_t TSFlags,
// VEX.VVVV => XMM9 => ~9
//
// See table 4-35 of Intel AVX Programming Reference for details.
-static unsigned char getVEXRegisterEncoding(const MachineInstr &MI,
- unsigned OpNum) {
+template<class CodeEmitter>
+unsigned char
+Emitter<CodeEmitter>::getVEXRegisterEncoding(const MachineInstr &MI,
+ unsigned OpNum) const {
unsigned SrcReg = MI.getOperand(OpNum).getReg();
- unsigned SrcRegNum = X86_MC::getX86RegNum(MI.getOperand(OpNum).getReg());
+ unsigned SrcRegNum = getX86RegNum(MI.getOperand(OpNum).getReg());
if (X86II::isX86_64ExtendedReg(SrcReg))
SrcRegNum |= 8;
@@ -923,17 +931,6 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
}
- // Set the vector length to 256-bit if YMM0-YMM15 is used
- for (unsigned i = 0; i != MI.getNumOperands(); ++i) {
- if (!MI.getOperand(i).isReg())
- continue;
- if (MI.getOperand(i).isImplicit())
- continue;
- unsigned SrcReg = MI.getOperand(i).getReg();
- if (SrcReg >= X86::YMM0 && SrcReg <= X86::YMM15)
- VEX_L = 1;
- }
-
// Classify VEX_B, VEX_4V, VEX_R, VEX_X
unsigned NumOps = Desc->getNumOperands();
unsigned CurOp = 0;
@@ -1248,7 +1245,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
case X86II::AddRegFrm: {
MCE.emitByte(BaseOpcode +
- X86_MC::getX86RegNum(MI.getOperand(CurOp++).getReg()));
+ getX86RegNum(MI.getOperand(CurOp++).getReg()));
if (CurOp == NumOps)
break;
@@ -1283,7 +1280,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
case X86II::MRMDestReg: {
MCE.emitByte(BaseOpcode);
emitRegModRMByte(MI.getOperand(CurOp).getReg(),
- X86_MC::getX86RegNum(MI.getOperand(CurOp+1).getReg()));
+ getX86RegNum(MI.getOperand(CurOp+1).getReg()));
CurOp += 2;
break;
}
@@ -1294,7 +1291,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
SrcRegNum++;
emitMemModRMByte(MI, CurOp,
- X86_MC::getX86RegNum(MI.getOperand(SrcRegNum).getReg()));
+ getX86RegNum(MI.getOperand(SrcRegNum).getReg()));
CurOp = SrcRegNum + 1;
break;
}
@@ -1310,7 +1307,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
++SrcRegNum;
emitRegModRMByte(MI.getOperand(SrcRegNum).getReg(),
- X86_MC::getX86RegNum(MI.getOperand(CurOp).getReg()));
+ getX86RegNum(MI.getOperand(CurOp).getReg()));
// 2 operands skipped with HasMemOp4, compensate accordingly
CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1;
if (HasVEX_4VOp3)
@@ -1332,7 +1329,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
intptr_t PCAdj = (CurOp + AddrOperands + 1 != NumOps) ?
X86II::getSizeOfImm(Desc->TSFlags) : 0;
emitMemModRMByte(MI, FirstMemOp,
- X86_MC::getX86RegNum(MI.getOperand(CurOp).getReg()),PCAdj);
+ getX86RegNum(MI.getOperand(CurOp).getReg()),PCAdj);
CurOp += AddrOperands + 1;
if (HasVEX_4VOp3)
++CurOp;
@@ -1422,7 +1419,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
MCE.emitByte(BaseOpcode);
// Duplicate register, used by things like MOV8r0 (aka xor reg,reg).
emitRegModRMByte(MI.getOperand(CurOp).getReg(),
- X86_MC::getX86RegNum(MI.getOperand(CurOp).getReg()));
+ getX86RegNum(MI.getOperand(CurOp).getReg()));
++CurOp;
break;
@@ -1455,7 +1452,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
const MachineOperand &MO = MI.getOperand(HasMemOp4 ? MemOp4_I8IMMOperand
: CurOp);
++CurOp;
- unsigned RegNum = X86_MC::getX86RegNum(MO.getReg()) << 4;
+ unsigned RegNum = getX86RegNum(MO.getReg()) << 4;
if (X86II::isX86_64ExtendedReg(MO.getReg()))
RegNum |= 1 << 7;
// If there is an additional 5th operand it must be an immediate, which
diff --git a/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.cpp b/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.cpp
deleted file mode 100644
index c1a49a7..0000000
--- a/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.cpp
+++ /dev/null
@@ -1,147 +0,0 @@
-//===-- X86ELFWriterInfo.cpp - ELF Writer Info for the X86 backend --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements ELF writer information for the X86 backend.
-//
-//===----------------------------------------------------------------------===//
-
-#include "X86ELFWriterInfo.h"
-#include "X86Relocations.h"
-#include "llvm/Function.h"
-#include "llvm/Support/ELF.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetMachine.h"
-
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// Implementation of the X86ELFWriterInfo class
-//===----------------------------------------------------------------------===//
-
-X86ELFWriterInfo::X86ELFWriterInfo(bool is64Bit_, bool isLittleEndian_)
- : TargetELFWriterInfo(is64Bit_, isLittleEndian_) {
- EMachine = is64Bit ? EM_X86_64 : EM_386;
- }
-
-X86ELFWriterInfo::~X86ELFWriterInfo() {}
-
-unsigned X86ELFWriterInfo::getRelocationType(unsigned MachineRelTy) const {
- if (is64Bit) {
- switch(MachineRelTy) {
- case X86::reloc_pcrel_word:
- return ELF::R_X86_64_PC32;
- case X86::reloc_absolute_word:
- return ELF::R_X86_64_32;
- case X86::reloc_absolute_word_sext:
- return ELF::R_X86_64_32S;
- case X86::reloc_absolute_dword:
- return ELF::R_X86_64_64;
- case X86::reloc_picrel_word:
- default:
- llvm_unreachable("unknown x86_64 machine relocation type");
- }
- } else {
- switch(MachineRelTy) {
- case X86::reloc_pcrel_word:
- return ELF::R_386_PC32;
- case X86::reloc_absolute_word:
- return ELF::R_386_32;
- case X86::reloc_absolute_word_sext:
- case X86::reloc_absolute_dword:
- case X86::reloc_picrel_word:
- default:
- llvm_unreachable("unknown x86 machine relocation type");
- }
- }
-}
-
-long int X86ELFWriterInfo::getDefaultAddendForRelTy(unsigned RelTy,
- long int Modifier) const {
- if (is64Bit) {
- switch(RelTy) {
- case ELF::R_X86_64_PC32: return Modifier - 4;
- case ELF::R_X86_64_32:
- case ELF::R_X86_64_32S:
- case ELF::R_X86_64_64:
- return Modifier;
- default:
- llvm_unreachable("unknown x86_64 relocation type");
- }
- } else {
- switch(RelTy) {
- case ELF::R_386_PC32: return Modifier - 4;
- case ELF::R_386_32: return Modifier;
- default:
- llvm_unreachable("unknown x86 relocation type");
- }
- }
-}
-
-unsigned X86ELFWriterInfo::getRelocationTySize(unsigned RelTy) const {
- if (is64Bit) {
- switch(RelTy) {
- case ELF::R_X86_64_PC32:
- case ELF::R_X86_64_32:
- case ELF::R_X86_64_32S:
- return 32;
- case ELF::R_X86_64_64:
- return 64;
- default:
- llvm_unreachable("unknown x86_64 relocation type");
- }
- } else {
- switch(RelTy) {
- case ELF::R_386_PC32:
- case ELF::R_386_32:
- return 32;
- default:
- llvm_unreachable("unknown x86 relocation type");
- }
- }
-}
-
-bool X86ELFWriterInfo::isPCRelativeRel(unsigned RelTy) const {
- if (is64Bit) {
- switch(RelTy) {
- case ELF::R_X86_64_PC32:
- return true;
- case ELF::R_X86_64_32:
- case ELF::R_X86_64_32S:
- case ELF::R_X86_64_64:
- return false;
- default:
- llvm_unreachable("unknown x86_64 relocation type");
- }
- } else {
- switch(RelTy) {
- case ELF::R_386_PC32:
- return true;
- case ELF::R_386_32:
- return false;
- default:
- llvm_unreachable("unknown x86 relocation type");
- }
- }
-}
-
-unsigned X86ELFWriterInfo::getAbsoluteLabelMachineRelTy() const {
- return is64Bit ?
- X86::reloc_absolute_dword : X86::reloc_absolute_word;
-}
-
-long int X86ELFWriterInfo::computeRelocation(unsigned SymOffset,
- unsigned RelOffset,
- unsigned RelTy) const {
-
- if (RelTy == ELF::R_X86_64_PC32 || RelTy == ELF::R_386_PC32)
- return SymOffset - (RelOffset + 4);
-
- llvm_unreachable("computeRelocation unknown for this relocation type");
-}
diff --git a/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.h b/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.h
deleted file mode 100644
index a45b5bb..0000000
--- a/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.h
+++ /dev/null
@@ -1,59 +0,0 @@
-//===-- X86ELFWriterInfo.h - ELF Writer Info for X86 ------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements ELF writer information for the X86 backend.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef X86_ELF_WRITER_INFO_H
-#define X86_ELF_WRITER_INFO_H
-
-#include "llvm/Target/TargetELFWriterInfo.h"
-
-namespace llvm {
-
- class X86ELFWriterInfo : public TargetELFWriterInfo {
-
- public:
- X86ELFWriterInfo(bool is64Bit_, bool isLittleEndian_);
- virtual ~X86ELFWriterInfo();
-
- /// getRelocationType - Returns the target specific ELF Relocation type.
- /// 'MachineRelTy' contains the object code independent relocation type
- virtual unsigned getRelocationType(unsigned MachineRelTy) const;
-
- /// hasRelocationAddend - True if the target uses an addend in the
- /// ELF relocation entry.
- virtual bool hasRelocationAddend() const { return is64Bit ? true : false; }
-
- /// getDefaultAddendForRelTy - Gets the default addend value for a
- /// relocation entry based on the target ELF relocation type.
- virtual long int getDefaultAddendForRelTy(unsigned RelTy,
- long int Modifier = 0) const;
-
- /// getRelTySize - Returns the size of relocatable field in bits
- virtual unsigned getRelocationTySize(unsigned RelTy) const;
-
- /// isPCRelativeRel - True if the relocation type is pc relative
- virtual bool isPCRelativeRel(unsigned RelTy) const;
-
- /// getJumpTableRelocationTy - Returns the machine relocation type used
- /// to reference a jumptable.
- virtual unsigned getAbsoluteLabelMachineRelTy() const;
-
- /// computeRelocation - Some relocatable fields could be relocated
- /// directly, avoiding the relocation symbol emission, compute the
- /// final relocation value for this symbol.
- virtual long int computeRelocation(unsigned SymOffset, unsigned RelOffset,
- unsigned RelTy) const;
- };
-
-} // end llvm namespace
-
-#endif // X86_ELF_WRITER_INFO_H
diff --git a/contrib/llvm/lib/Target/X86/X86FastISel.cpp b/contrib/llvm/lib/Target/X86/X86FastISel.cpp
index e5952aa..d4627c7 100644
--- a/contrib/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FastISel.cpp
@@ -45,9 +45,9 @@ class X86FastISel : public FastISel {
/// make the right decision when generating code for different targets.
const X86Subtarget *Subtarget;
- /// StackPtr - Register used as the stack pointer.
+ /// RegInfo - X86 register info.
///
- unsigned StackPtr;
+ const X86RegisterInfo *RegInfo;
/// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
/// floating point ops.
@@ -61,9 +61,9 @@ public:
const TargetLibraryInfo *libInfo)
: FastISel(funcInfo, libInfo) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
- StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
X86ScalarSSEf64 = Subtarget->hasSSE2();
X86ScalarSSEf32 = Subtarget->hasSSE1();
+ RegInfo = static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
}
virtual bool TargetSelectInstruction(const Instruction *I);
@@ -710,6 +710,8 @@ bool X86FastISel::X86SelectStore(const Instruction *I) {
bool X86FastISel::X86SelectRet(const Instruction *I) {
const ReturnInst *Ret = cast<ReturnInst>(I);
const Function &F = *I->getParent()->getParent();
+ const X86MachineFunctionInfo *X86MFInfo =
+ FuncInfo.MF->getInfo<X86MachineFunctionInfo>();
if (!FuncInfo.CanLowerReturn)
return false;
@@ -724,8 +726,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
return false;
// Don't handle popping bytes on return for now.
- if (FuncInfo.MF->getInfo<X86MachineFunctionInfo>()
- ->getBytesToPopOnReturn() != 0)
+ if (X86MFInfo->getBytesToPopOnReturn() != 0)
return 0;
// fastcc with -tailcallopt is intended to provide a guaranteed
@@ -809,6 +810,19 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
MRI.addLiveOut(VA.getLocReg());
}
+ // The x86-64 ABI for returning structs by value requires that we copy
+ // the sret argument into %rax for the return. We saved the argument into
+ // a virtual register in the entry block, so now we copy the value out
+ // and into %rax.
+ if (Subtarget->is64Bit() && F.hasStructRetAttr()) {
+ unsigned Reg = X86MFInfo->getSRetReturnReg();
+ assert(Reg &&
+ "SRetReturnReg should have been set in LowerFormalArguments()!");
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ X86::RAX).addReg(Reg);
+ MRI.addLiveOut(X86::RAX);
+ }
+
// Now emit the RET.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::RET));
return true;
@@ -1527,9 +1541,9 @@ static unsigned computeBytesPoppedByCallee(const X86Subtarget &Subtarget,
CallingConv::ID CC = CS.getCallingConv();
if (CC == CallingConv::Fast || CC == CallingConv::GHC)
return 0;
- if (!CS.paramHasAttr(1, Attribute::StructRet))
+ if (!CS.paramHasAttr(1, Attributes::StructRet))
return 0;
- if (CS.paramHasAttr(1, Attribute::InReg))
+ if (CS.paramHasAttr(1, Attributes::InReg))
return 0;
return 4;
}
@@ -1608,12 +1622,12 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
Value *ArgVal = *i;
ISD::ArgFlagsTy Flags;
unsigned AttrInd = i - CS.arg_begin() + 1;
- if (CS.paramHasAttr(AttrInd, Attribute::SExt))
+ if (CS.paramHasAttr(AttrInd, Attributes::SExt))
Flags.setSExt();
- if (CS.paramHasAttr(AttrInd, Attribute::ZExt))
+ if (CS.paramHasAttr(AttrInd, Attributes::ZExt))
Flags.setZExt();
- if (CS.paramHasAttr(AttrInd, Attribute::ByVal)) {
+ if (CS.paramHasAttr(AttrInd, Attributes::ByVal)) {
PointerType *Ty = cast<PointerType>(ArgVal->getType());
Type *ElementTy = Ty->getElementType();
unsigned FrameSize = TD.getTypeAllocSize(ElementTy);
@@ -1627,9 +1641,9 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
return false;
}
- if (CS.paramHasAttr(AttrInd, Attribute::InReg))
+ if (CS.paramHasAttr(AttrInd, Attributes::InReg))
Flags.setInReg();
- if (CS.paramHasAttr(AttrInd, Attribute::Nest))
+ if (CS.paramHasAttr(AttrInd, Attributes::Nest))
Flags.setNest();
// If this is an i1/i8/i16 argument, promote to i32 to avoid an extra
@@ -1771,7 +1785,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
} else {
unsigned LocMemOffset = VA.getLocMemOffset();
X86AddressMode AM;
- AM.Base.Reg = StackPtr;
+ AM.Base.Reg = RegInfo->getStackRegister();
AM.Disp = LocMemOffset;
const Value *ArgVal = ArgVals[VA.getValNo()];
ISD::ArgFlagsTy Flags = ArgFlags[VA.getValNo()];
@@ -1897,11 +1911,11 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
ISD::InputArg MyFlags;
MyFlags.VT = RegisterVT.getSimpleVT();
MyFlags.Used = !CS.getInstruction()->use_empty();
- if (CS.paramHasAttr(0, Attribute::SExt))
+ if (CS.paramHasAttr(0, Attributes::SExt))
MyFlags.Flags.setSExt();
- if (CS.paramHasAttr(0, Attribute::ZExt))
+ if (CS.paramHasAttr(0, Attributes::ZExt))
MyFlags.Flags.setZExt();
- if (CS.paramHasAttr(0, Attribute::InReg))
+ if (CS.paramHasAttr(0, Attributes::InReg))
MyFlags.Flags.setInReg();
Ins.push_back(MyFlags);
}
@@ -2014,13 +2028,17 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) {
unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
MVT VT;
if (!isTypeLegal(C->getType(), VT))
- return false;
+ return 0;
+
+ // Can't handle alternate code models yet.
+ if (TM.getCodeModel() != CodeModel::Small)
+ return 0;
// Get opcode and regclass of the output for the given load instruction.
unsigned Opc = 0;
const TargetRegisterClass *RC = NULL;
switch (VT.SimpleTy) {
- default: return false;
+ default: return 0;
case MVT::i8:
Opc = X86::MOV8rm;
RC = &X86::GR8RegClass;
@@ -2058,7 +2076,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
break;
case MVT::f80:
// No f80 support yet.
- return false;
+ return 0;
}
// Materialize addresses with LEA instructions.
diff --git a/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp b/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
index 955c75a..791f598 100644
--- a/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
@@ -171,6 +171,7 @@ namespace {
// Shuffle live registers to match the expectations of successor blocks.
void finishBlockStack();
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void dumpStack() const {
dbgs() << "Stack contents:";
for (unsigned i = 0; i != StackTop; ++i) {
@@ -181,6 +182,7 @@ namespace {
dbgs() << ", ST" << i << " in FP" << unsigned(PendingST[i]);
dbgs() << "\n";
}
+#endif
/// getSlot - Return the stack slot number a particular register number is
/// in.
@@ -575,8 +577,8 @@ namespace {
friend bool operator<(const TableEntry &TE, unsigned V) {
return TE.from < V;
}
- friend bool LLVM_ATTRIBUTE_USED operator<(unsigned V,
- const TableEntry &TE) {
+ friend bool LLVM_ATTRIBUTE_UNUSED operator<(unsigned V,
+ const TableEntry &TE) {
return V < TE.from;
}
};
diff --git a/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
index 2238688..369589d 100644
--- a/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -25,7 +25,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/ADT/SmallSet.h"
@@ -313,11 +313,11 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF,
if (CSI.empty()) return;
std::vector<MachineMove> &Moves = MMI.getFrameMoves();
- const TargetData *TD = TM.getTargetData();
+ const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
bool HasFP = hasFP(MF);
// Calculate amount of bytes used for return address storing.
- int stackGrowth = -TD->getPointerSize();
+ int stackGrowth = -RegInfo->getSlotSize();
// FIXME: This is dirty hack. The code itself is pretty mess right now.
// It should be rewritten from scratch and generalized sometimes.
@@ -674,7 +674,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
// function, and use up to 128 bytes of stack space, don't have a frame
// pointer, calls, or dynamic alloca then we do not need to adjust the
// stack pointer (we fit in the Red Zone).
- if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) &&
+ if (Is64Bit && !Fn->getFnAttributes().hasAttribute(Attributes::NoRedZone) &&
!RegInfo->needsStackRealignment(MF) &&
!MFI->hasVarSizedObjects() && // No dynamic alloca.
!MFI->adjustsStack() && // No calls.
@@ -715,9 +715,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
// ELSE => DW_CFA_offset_extended
std::vector<MachineMove> &Moves = MMI.getFrameMoves();
- const TargetData *TD = MF.getTarget().getTargetData();
uint64_t NumBytes = 0;
- int stackGrowth = -TD->getPointerSize();
+ int stackGrowth = -SlotSize;
if (HasFP) {
// Calculate required stack adjustment.
@@ -836,8 +835,6 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
MI->getOperand(3).setIsDead();
}
- DL = MBB.findDebugLoc(MBBI);
-
// If there is an SUB32ri of ESP immediately before this instruction, merge
// the two. This can be the case when tail call elimination is enabled and
// the callee has more arguments then the caller.
diff --git a/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 27195b4..99f5574 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -100,6 +100,7 @@ namespace {
Base_Reg = Reg;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void dump() {
dbgs() << "X86ISelAddressMode " << this << '\n';
dbgs() << "Base_Reg ";
@@ -133,6 +134,7 @@ namespace {
dbgs() << "nul";
dbgs() << " JT" << JT << " Align" << Align << '\n';
}
+#endif
};
}
@@ -189,7 +191,6 @@ namespace {
SDNode *Select(SDNode *N);
SDNode *SelectGather(SDNode *N, unsigned Opc);
SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
- SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT);
SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT);
bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
@@ -244,13 +245,15 @@ namespace {
else if (AM.CP)
Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
AM.Align, AM.Disp, AM.SymbolFlags);
- else if (AM.ES)
+ else if (AM.ES) {
+ assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
- else if (AM.JT != -1)
+ } else if (AM.JT != -1) {
+ assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
- else if (AM.BlockAddr)
- Disp = CurDAG->getBlockAddress(AM.BlockAddr, MVT::i32,
- true, AM.SymbolFlags);
+ } else if (AM.BlockAddr)
+ Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
+ AM.SymbolFlags);
else
Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
@@ -359,7 +362,7 @@ X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
/// MoveBelowCallOrigChain - Replace the original chain operand of the call with
/// load's chain operand and move load below the call's chain operand.
static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
- SDValue Call, SDValue OrigChain) {
+ SDValue Call, SDValue OrigChain) {
SmallVector<SDValue, 8> Ops;
SDValue Chain = OrigChain.getOperand(0);
if (Chain.getNode() == Load.getNode())
@@ -383,11 +386,13 @@ static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size());
CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
Load.getOperand(1), Load.getOperand(2));
+
+ unsigned NumOps = Call.getNode()->getNumOperands();
Ops.clear();
Ops.push_back(SDValue(Load.getNode(), 1));
- for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
+ for (unsigned i = 1, e = NumOps; i != e; ++i)
Ops.push_back(Call.getOperand(i));
- CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], Ops.size());
+ CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], NumOps);
}
/// isCalleeLoad - Return true if call address is a load and it can be
@@ -396,6 +401,10 @@ static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
/// In the case of a tail call, there isn't a callseq node between the call
/// chain and the load.
static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
+ // The transformation is somewhat dangerous if the call's chain was glued to
+ // the call. After MoveBelowOrigChain the load is moved between the call and
+ // the chain, this can create a cycle if the load is not folded. So it is
+ // *really* important that we are sure the load will be folded.
if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
return false;
LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
@@ -425,7 +434,8 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
void X86DAGToDAGISel::PreprocessISelDAG() {
// OptForSize is used in pattern predicates that isel is matching.
- OptForSize = MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize);
+ OptForSize = MF->getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize);
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
E = CurDAG->allnodes_end(); I != E; ) {
@@ -433,7 +443,10 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
if (OptLevel != CodeGenOpt::None &&
(N->getOpcode() == X86ISD::CALL ||
- N->getOpcode() == X86ISD::TC_RETURN)) {
+ (N->getOpcode() == X86ISD::TC_RETURN &&
+ // Only does this if load can be foled into TC_RETURN.
+ (Subtarget->is64Bit() ||
+ getTargetMachine().getRelocationModel() != Reloc::PIC_)))) {
/// Also try moving call address load from outside callseq_start to just
/// before the call to allow it to be folded.
///
@@ -652,10 +665,16 @@ bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
} else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
AM.JT = J->getIndex();
AM.SymbolFlags = J->getTargetFlags();
- } else {
- AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
- AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
- }
+ } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
+ X86ISelAddressMode Backup = AM;
+ AM.BlockAddr = BA->getBlockAddress();
+ AM.SymbolFlags = BA->getTargetFlags();
+ if (FoldOffsetIntoAddress(BA->getOffset(), AM)) {
+ AM = Backup;
+ return true;
+ }
+ } else
+ llvm_unreachable("Unhandled symbol reference node.");
if (N.getOpcode() == X86ISD::WrapperRIP)
AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
@@ -684,10 +703,12 @@ bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
} else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
AM.JT = J->getIndex();
AM.SymbolFlags = J->getTargetFlags();
- } else {
- AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
- AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
- }
+ } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
+ AM.BlockAddr = BA->getBlockAddress();
+ AM.Disp += BA->getOffset();
+ AM.SymbolFlags = BA->getTargetFlags();
+ } else
+ llvm_unreachable("Unhandled symbol reference node.");
return false;
}
@@ -1011,7 +1032,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
AM.IndexReg = ShVal.getNode()->getOperand(0);
ConstantSDNode *AddVal =
cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
- uint64_t Disp = AddVal->getSExtValue() << Val;
+ uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
if (!FoldOffsetIntoAddress(Disp, AM))
return false;
}
@@ -1281,7 +1302,9 @@ bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
// that are not a MemSDNode, and thus don't have proper addrspace info.
Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
- Parent->getOpcode() != X86ISD::TLSCALL) { // Fixme
+ Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
+ Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
+ Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
unsigned AddrSpace =
cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
// AddrSpace 256 -> GS, 257 -> FS.
@@ -1468,6 +1491,7 @@ SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
SDValue In1 = Node->getOperand(1);
SDValue In2L = Node->getOperand(2);
SDValue In2H = Node->getOperand(3);
+
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
return NULL;
@@ -1481,159 +1505,13 @@ SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
return ResNode;
}
-// FIXME: Figure out some way to unify this with the 'or' and other code
-// below.
-SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
- if (Node->hasAnyUseOfValue(0))
- return 0;
-
- // Optimize common patterns for __sync_add_and_fetch and
- // __sync_sub_and_fetch where the result is not used. This allows us
- // to use "lock" version of add, sub, inc, dec instructions.
- // FIXME: Do not use special instructions but instead add the "lock"
- // prefix to the target node somehow. The extra information will then be
- // transferred to machine instruction and it denotes the prefix.
- SDValue Chain = Node->getOperand(0);
- SDValue Ptr = Node->getOperand(1);
- SDValue Val = Node->getOperand(2);
- SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
- if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
- return 0;
-
- bool isInc = false, isDec = false, isSub = false, isCN = false;
- ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
- if (CN && CN->getSExtValue() == (int32_t)CN->getSExtValue()) {
- isCN = true;
- int64_t CNVal = CN->getSExtValue();
- if (CNVal == 1)
- isInc = true;
- else if (CNVal == -1)
- isDec = true;
- else if (CNVal >= 0)
- Val = CurDAG->getTargetConstant(CNVal, NVT);
- else {
- isSub = true;
- Val = CurDAG->getTargetConstant(-CNVal, NVT);
- }
- } else if (Val.hasOneUse() &&
- Val.getOpcode() == ISD::SUB &&
- X86::isZeroNode(Val.getOperand(0))) {
- isSub = true;
- Val = Val.getOperand(1);
- }
-
- DebugLoc dl = Node->getDebugLoc();
- unsigned Opc = 0;
- switch (NVT.getSimpleVT().SimpleTy) {
- default: return 0;
- case MVT::i8:
- if (isInc)
- Opc = X86::LOCK_INC8m;
- else if (isDec)
- Opc = X86::LOCK_DEC8m;
- else if (isSub) {
- if (isCN)
- Opc = X86::LOCK_SUB8mi;
- else
- Opc = X86::LOCK_SUB8mr;
- } else {
- if (isCN)
- Opc = X86::LOCK_ADD8mi;
- else
- Opc = X86::LOCK_ADD8mr;
- }
- break;
- case MVT::i16:
- if (isInc)
- Opc = X86::LOCK_INC16m;
- else if (isDec)
- Opc = X86::LOCK_DEC16m;
- else if (isSub) {
- if (isCN) {
- if (immSext8(Val.getNode()))
- Opc = X86::LOCK_SUB16mi8;
- else
- Opc = X86::LOCK_SUB16mi;
- } else
- Opc = X86::LOCK_SUB16mr;
- } else {
- if (isCN) {
- if (immSext8(Val.getNode()))
- Opc = X86::LOCK_ADD16mi8;
- else
- Opc = X86::LOCK_ADD16mi;
- } else
- Opc = X86::LOCK_ADD16mr;
- }
- break;
- case MVT::i32:
- if (isInc)
- Opc = X86::LOCK_INC32m;
- else if (isDec)
- Opc = X86::LOCK_DEC32m;
- else if (isSub) {
- if (isCN) {
- if (immSext8(Val.getNode()))
- Opc = X86::LOCK_SUB32mi8;
- else
- Opc = X86::LOCK_SUB32mi;
- } else
- Opc = X86::LOCK_SUB32mr;
- } else {
- if (isCN) {
- if (immSext8(Val.getNode()))
- Opc = X86::LOCK_ADD32mi8;
- else
- Opc = X86::LOCK_ADD32mi;
- } else
- Opc = X86::LOCK_ADD32mr;
- }
- break;
- case MVT::i64:
- if (isInc)
- Opc = X86::LOCK_INC64m;
- else if (isDec)
- Opc = X86::LOCK_DEC64m;
- else if (isSub) {
- Opc = X86::LOCK_SUB64mr;
- if (isCN) {
- if (immSext8(Val.getNode()))
- Opc = X86::LOCK_SUB64mi8;
- else if (i64immSExt32(Val.getNode()))
- Opc = X86::LOCK_SUB64mi32;
- }
- } else {
- Opc = X86::LOCK_ADD64mr;
- if (isCN) {
- if (immSext8(Val.getNode()))
- Opc = X86::LOCK_ADD64mi8;
- else if (i64immSExt32(Val.getNode()))
- Opc = X86::LOCK_ADD64mi32;
- }
- }
- break;
- }
-
- SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, NVT), 0);
- MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
- MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
- if (isInc || isDec) {
- SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain };
- SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6), 0);
- cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
- SDValue RetVals[] = { Undef, Ret };
- return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
- } else {
- SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
- SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
- cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
- SDValue RetVals[] = { Undef, Ret };
- return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
- }
-}
-
+/// Atomic opcode table
+///
enum AtomicOpc {
+ ADD,
+ SUB,
+ INC,
+ DEC,
OR,
AND,
XOR,
@@ -1657,6 +1535,58 @@ enum AtomicSz {
static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
{
+ X86::LOCK_ADD8mi,
+ X86::LOCK_ADD8mr,
+ X86::LOCK_ADD16mi8,
+ X86::LOCK_ADD16mi,
+ X86::LOCK_ADD16mr,
+ X86::LOCK_ADD32mi8,
+ X86::LOCK_ADD32mi,
+ X86::LOCK_ADD32mr,
+ X86::LOCK_ADD64mi8,
+ X86::LOCK_ADD64mi32,
+ X86::LOCK_ADD64mr,
+ },
+ {
+ X86::LOCK_SUB8mi,
+ X86::LOCK_SUB8mr,
+ X86::LOCK_SUB16mi8,
+ X86::LOCK_SUB16mi,
+ X86::LOCK_SUB16mr,
+ X86::LOCK_SUB32mi8,
+ X86::LOCK_SUB32mi,
+ X86::LOCK_SUB32mr,
+ X86::LOCK_SUB64mi8,
+ X86::LOCK_SUB64mi32,
+ X86::LOCK_SUB64mr,
+ },
+ {
+ 0,
+ X86::LOCK_INC8m,
+ 0,
+ 0,
+ X86::LOCK_INC16m,
+ 0,
+ 0,
+ X86::LOCK_INC32m,
+ 0,
+ 0,
+ X86::LOCK_INC64m,
+ },
+ {
+ 0,
+ X86::LOCK_DEC8m,
+ 0,
+ 0,
+ X86::LOCK_DEC16m,
+ 0,
+ 0,
+ X86::LOCK_DEC32m,
+ 0,
+ 0,
+ X86::LOCK_DEC64m,
+ },
+ {
X86::LOCK_OR8mi,
X86::LOCK_OR8mr,
X86::LOCK_OR16mi8,
@@ -1667,7 +1597,7 @@ static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
X86::LOCK_OR32mr,
X86::LOCK_OR64mi8,
X86::LOCK_OR64mi32,
- X86::LOCK_OR64mr
+ X86::LOCK_OR64mr,
},
{
X86::LOCK_AND8mi,
@@ -1680,7 +1610,7 @@ static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
X86::LOCK_AND32mr,
X86::LOCK_AND64mi8,
X86::LOCK_AND64mi32,
- X86::LOCK_AND64mr
+ X86::LOCK_AND64mr,
},
{
X86::LOCK_XOR8mi,
@@ -1693,18 +1623,74 @@ static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
X86::LOCK_XOR32mr,
X86::LOCK_XOR64mi8,
X86::LOCK_XOR64mi32,
- X86::LOCK_XOR64mr
+ X86::LOCK_XOR64mr,
}
};
+// Return the target constant operand for atomic-load-op and do simple
+// translations, such as from atomic-load-add to lock-sub. The return value is
+// one of the following 3 cases:
+// + target-constant, the operand could be supported as a target constant.
+// + empty, the operand is not needed any more with the new op selected.
+// + non-empty, otherwise.
+static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG,
+ DebugLoc dl,
+ enum AtomicOpc &Op, EVT NVT,
+ SDValue Val) {
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) {
+ int64_t CNVal = CN->getSExtValue();
+ // Quit if not 32-bit imm.
+ if ((int32_t)CNVal != CNVal)
+ return Val;
+ // For atomic-load-add, we could do some optimizations.
+ if (Op == ADD) {
+ // Translate to INC/DEC if ADD by 1 or -1.
+ if ((CNVal == 1) || (CNVal == -1)) {
+ Op = (CNVal == 1) ? INC : DEC;
+ // No more constant operand after being translated into INC/DEC.
+ return SDValue();
+ }
+ // Translate to SUB if ADD by negative value.
+ if (CNVal < 0) {
+ Op = SUB;
+ CNVal = -CNVal;
+ }
+ }
+ return CurDAG->getTargetConstant(CNVal, NVT);
+ }
+
+ // If the value operand is single-used, try to optimize it.
+ if (Op == ADD && Val.hasOneUse()) {
+ // Translate (atomic-load-add ptr (sub 0 x)) back to (lock-sub x).
+ if (Val.getOpcode() == ISD::SUB && X86::isZeroNode(Val.getOperand(0))) {
+ Op = SUB;
+ return Val.getOperand(1);
+ }
+ // A special case for i16, which needs truncating as, in most cases, it's
+ // promoted to i32. We will translate
+ // (atomic-load-add (truncate (sub 0 x))) to (lock-sub (EXTRACT_SUBREG x))
+ if (Val.getOpcode() == ISD::TRUNCATE && NVT == MVT::i16 &&
+ Val.getOperand(0).getOpcode() == ISD::SUB &&
+ X86::isZeroNode(Val.getOperand(0).getOperand(0))) {
+ Op = SUB;
+ Val = Val.getOperand(0);
+ return CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, NVT,
+ Val.getOperand(1));
+ }
+ }
+
+ return Val;
+}
+
SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
if (Node->hasAnyUseOfValue(0))
return 0;
+ DebugLoc dl = Node->getDebugLoc();
+
// Optimize common patterns for __sync_or_and_fetch and similar arith
// operations where the result is not used. This allows us to use the "lock"
// version of the arithmetic instruction.
- // FIXME: Same as for 'add' and 'sub', try to merge those down here.
SDValue Chain = Node->getOperand(0);
SDValue Ptr = Node->getOperand(1);
SDValue Val = Node->getOperand(2);
@@ -1715,6 +1701,8 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
// Which index into the table.
enum AtomicOpc Op;
switch (Node->getOpcode()) {
+ default:
+ return 0;
case ISD::ATOMIC_LOAD_OR:
Op = OR;
break;
@@ -1724,16 +1712,14 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
case ISD::ATOMIC_LOAD_XOR:
Op = XOR;
break;
- default:
- return 0;
- }
-
- bool isCN = false;
- ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
- if (CN && (int32_t)CN->getSExtValue() == CN->getSExtValue()) {
- isCN = true;
- Val = CurDAG->getTargetConstant(CN->getSExtValue(), NVT);
+ case ISD::ATOMIC_LOAD_ADD:
+ Op = ADD;
+ break;
}
+
+ Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val);
+ bool isUnOp = !Val.getNode();
+ bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant);
unsigned Opc = 0;
switch (NVT.getSimpleVT().SimpleTy) {
@@ -1775,13 +1761,20 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
assert(Opc != 0 && "Invalid arith lock transform!");
- DebugLoc dl = Node->getDebugLoc();
+ SDValue Ret;
SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
dl, NVT), 0);
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
- SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
- SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
+ if (isUnOp) {
+ SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain };
+ Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops,
+ array_lengthof(Ops)), 0);
+ } else {
+ SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
+ Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops,
+ array_lengthof(Ops)), 0);
+ }
cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
SDValue RetVals[] = { Undef, Ret };
return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
@@ -2059,6 +2052,10 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
case X86ISD::ATOMSUB64_DAG:
case X86ISD::ATOMNAND64_DAG:
case X86ISD::ATOMAND64_DAG:
+ case X86ISD::ATOMMAX64_DAG:
+ case X86ISD::ATOMMIN64_DAG:
+ case X86ISD::ATOMUMAX64_DAG:
+ case X86ISD::ATOMUMIN64_DAG:
case X86ISD::ATOMSWAP64_DAG: {
unsigned Opc;
switch (Opcode) {
@@ -2069,6 +2066,10 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
case X86ISD::ATOMSUB64_DAG: Opc = X86::ATOMSUB6432; break;
case X86ISD::ATOMNAND64_DAG: Opc = X86::ATOMNAND6432; break;
case X86ISD::ATOMAND64_DAG: Opc = X86::ATOMAND6432; break;
+ case X86ISD::ATOMMAX64_DAG: Opc = X86::ATOMMAX6432; break;
+ case X86ISD::ATOMMIN64_DAG: Opc = X86::ATOMMIN6432; break;
+ case X86ISD::ATOMUMAX64_DAG: Opc = X86::ATOMUMAX6432; break;
+ case X86ISD::ATOMUMIN64_DAG: Opc = X86::ATOMUMIN6432; break;
case X86ISD::ATOMSWAP64_DAG: Opc = X86::ATOMSWAP6432; break;
}
SDNode *RetVal = SelectAtomic64(Node, Opc);
@@ -2077,15 +2078,10 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
break;
}
- case ISD::ATOMIC_LOAD_ADD: {
- SDNode *RetVal = SelectAtomicLoadAdd(Node, NVT);
- if (RetVal)
- return RetVal;
- break;
- }
case ISD::ATOMIC_LOAD_XOR:
case ISD::ATOMIC_LOAD_AND:
- case ISD::ATOMIC_LOAD_OR: {
+ case ISD::ATOMIC_LOAD_OR:
+ case ISD::ATOMIC_LOAD_ADD: {
SDNode *RetVal = SelectAtomicLoadArith(Node, NVT);
if (RetVal)
return RetVal;
@@ -2116,7 +2112,8 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// Make sure that we don't change the operation by removing bits.
// This only matters for OR and XOR, AND is unaffected.
- if (Opcode != ISD::AND && ((Val >> ShlVal) << ShlVal) != Val)
+ uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1;
+ if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
break;
unsigned ShlOp, Op;
@@ -2199,13 +2196,16 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue N1 = Node->getOperand(1);
bool isSigned = Opcode == ISD::SMUL_LOHI;
+ bool hasBMI2 = Subtarget->hasBMI2();
if (!isSigned) {
switch (NVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
- case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
- case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
+ case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r;
+ MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break;
+ case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r;
+ MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break;
}
} else {
switch (NVT.getSimpleVT().SimpleTy) {
@@ -2217,13 +2217,31 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
}
}
- unsigned LoReg, HiReg;
- switch (NVT.getSimpleVT().SimpleTy) {
- default: llvm_unreachable("Unsupported VT!");
- case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
- case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
- case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
- case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
+ unsigned SrcReg, LoReg, HiReg;
+ switch (Opc) {
+ default: llvm_unreachable("Unknown MUL opcode!");
+ case X86::IMUL8r:
+ case X86::MUL8r:
+ SrcReg = LoReg = X86::AL; HiReg = X86::AH;
+ break;
+ case X86::IMUL16r:
+ case X86::MUL16r:
+ SrcReg = LoReg = X86::AX; HiReg = X86::DX;
+ break;
+ case X86::IMUL32r:
+ case X86::MUL32r:
+ SrcReg = LoReg = X86::EAX; HiReg = X86::EDX;
+ break;
+ case X86::IMUL64r:
+ case X86::MUL64r:
+ SrcReg = LoReg = X86::RAX; HiReg = X86::RDX;
+ break;
+ case X86::MULX32rr:
+ SrcReg = X86::EDX; LoReg = HiReg = 0;
+ break;
+ case X86::MULX64rr:
+ SrcReg = X86::RDX; LoReg = HiReg = 0;
+ break;
}
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
@@ -2235,22 +2253,47 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
std::swap(N0, N1);
}
- SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
+ SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg,
N0, SDValue()).getValue(1);
+ SDValue ResHi, ResLo;
if (foldedLoad) {
+ SDValue Chain;
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
InFlag };
- SDNode *CNode =
- CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
- array_lengthof(Ops));
- InFlag = SDValue(CNode, 1);
+ if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) {
+ SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue);
+ SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops,
+ array_lengthof(Ops));
+ ResHi = SDValue(CNode, 0);
+ ResLo = SDValue(CNode, 1);
+ Chain = SDValue(CNode, 2);
+ InFlag = SDValue(CNode, 3);
+ } else {
+ SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
+ SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops,
+ array_lengthof(Ops));
+ Chain = SDValue(CNode, 0);
+ InFlag = SDValue(CNode, 1);
+ }
// Update the chain.
- ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
+ ReplaceUses(N1.getValue(1), Chain);
} else {
- SDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag);
- InFlag = SDValue(CNode, 0);
+ SDValue Ops[] = { N1, InFlag };
+ if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) {
+ SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue);
+ SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops,
+ array_lengthof(Ops));
+ ResHi = SDValue(CNode, 0);
+ ResLo = SDValue(CNode, 1);
+ InFlag = SDValue(CNode, 2);
+ } else {
+ SDVTList VTs = CurDAG->getVTList(MVT::Glue);
+ SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops,
+ array_lengthof(Ops));
+ InFlag = SDValue(CNode, 0);
+ }
}
// Prevent use of AH in a REX instruction by referencing AX instead.
@@ -2275,19 +2318,25 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
}
// Copy the low half of the result, if it is needed.
if (!SDValue(Node, 0).use_empty()) {
- SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- LoReg, NVT, InFlag);
- InFlag = Result.getValue(2);
- ReplaceUses(SDValue(Node, 0), Result);
- DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
+ if (ResLo.getNode() == 0) {
+ assert(LoReg && "Register for low half is not defined!");
+ ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT,
+ InFlag);
+ InFlag = ResLo.getValue(2);
+ }
+ ReplaceUses(SDValue(Node, 0), ResLo);
+ DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n');
}
// Copy the high half of the result, if it is needed.
if (!SDValue(Node, 1).use_empty()) {
- SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- HiReg, NVT, InFlag);
- InFlag = Result.getValue(2);
- ReplaceUses(SDValue(Node, 1), Result);
- DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
+ if (ResHi.getNode() == 0) {
+ assert(HiReg && "Register for high half is not defined!");
+ ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT,
+ InFlag);
+ InFlag = ResHi.getValue(2);
+ }
+ ReplaceUses(SDValue(Node, 1), ResHi);
+ DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n');
}
return NULL;
@@ -2488,7 +2537,13 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
MVT::i8, Reg);
// Emit a testb.
- return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, Subreg, Imm);
+ SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32,
+ Subreg, Imm);
+ // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
+ // one, do not call ReplaceAllUsesWith.
+ ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
+ SDValue(NewNode, 0));
+ return NULL;
}
// For example, "testl %eax, $2048" to "testb %ah, $8".
@@ -2519,8 +2574,13 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only
// target GR8_NOREX registers, so make sure the register class is
// forced.
- return CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl, MVT::i32,
- Subreg, ShiftedImm);
+ SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl,
+ MVT::i32, Subreg, ShiftedImm);
+ // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
+ // one, do not call ReplaceAllUsesWith.
+ ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
+ SDValue(NewNode, 0));
+ return NULL;
}
// For example, "testl %eax, $32776" to "testw %ax, $32776".
@@ -2536,7 +2596,13 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
MVT::i16, Reg);
// Emit a testw.
- return CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, Subreg, Imm);
+ SDNode *NewNode = CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32,
+ Subreg, Imm);
+ // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
+ // one, do not call ReplaceAllUsesWith.
+ ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
+ SDValue(NewNode, 0));
+ return NULL;
}
// For example, "testq %rax, $268468232" to "testl %eax, $268468232".
@@ -2552,7 +2618,13 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
MVT::i32, Reg);
// Emit a testl.
- return CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, Subreg, Imm);
+ SDNode *NewNode = CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32,
+ Subreg, Imm);
+ // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
+ // one, do not call ReplaceAllUsesWith.
+ ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
+ SDValue(NewNode, 0));
+ return NULL;
}
}
break;
@@ -2607,85 +2679,6 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
return Result;
}
-
- // FIXME: Custom handling because TableGen doesn't support multiple implicit
- // defs in an instruction pattern
- case X86ISD::PCMPESTRI: {
- SDValue N0 = Node->getOperand(0);
- SDValue N1 = Node->getOperand(1);
- SDValue N2 = Node->getOperand(2);
- SDValue N3 = Node->getOperand(3);
- SDValue N4 = Node->getOperand(4);
-
- // Make sure last argument is a constant
- ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N4);
- if (!Cst)
- break;
-
- uint64_t Imm = Cst->getZExtValue();
-
- SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
- X86::EAX, N1, SDValue()).getValue(1);
- InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX,
- N3, InFlag).getValue(1);
-
- SDValue Ops[] = { N0, N2, getI8Imm(Imm), InFlag };
- unsigned Opc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr :
- X86::PCMPESTRIrr;
- InFlag = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Ops,
- array_lengthof(Ops)), 0);
-
- if (!SDValue(Node, 0).use_empty()) {
- SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- X86::ECX, NVT, InFlag);
- InFlag = Result.getValue(2);
- ReplaceUses(SDValue(Node, 0), Result);
- }
- if (!SDValue(Node, 1).use_empty()) {
- SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- X86::EFLAGS, NVT, InFlag);
- InFlag = Result.getValue(2);
- ReplaceUses(SDValue(Node, 1), Result);
- }
-
- return NULL;
- }
-
- // FIXME: Custom handling because TableGen doesn't support multiple implicit
- // defs in an instruction pattern
- case X86ISD::PCMPISTRI: {
- SDValue N0 = Node->getOperand(0);
- SDValue N1 = Node->getOperand(1);
- SDValue N2 = Node->getOperand(2);
-
- // Make sure last argument is a constant
- ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N2);
- if (!Cst)
- break;
-
- uint64_t Imm = Cst->getZExtValue();
-
- SDValue Ops[] = { N0, N1, getI8Imm(Imm) };
- unsigned Opc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr :
- X86::PCMPISTRIrr;
- SDValue InFlag = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Ops,
- array_lengthof(Ops)), 0);
-
- if (!SDValue(Node, 0).use_empty()) {
- SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- X86::ECX, NVT, InFlag);
- InFlag = Result.getValue(2);
- ReplaceUses(SDValue(Node, 0), Result);
- }
- if (!SDValue(Node, 1).use_empty()) {
- SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- X86::EFLAGS, NVT, InFlag);
- InFlag = Result.getValue(2);
- ReplaceUses(SDValue(Node, 1), Result);
- }
-
- return NULL;
- }
}
SDNode *ResNode = SelectCode(Node);
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
index 4af12e4..b35fb51 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -85,7 +85,7 @@ static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128)
* ElemsPerChunk);
- SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32);
+ SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec,
VecIdx);
@@ -118,7 +118,7 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec,
unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128)
* ElemsPerChunk);
- SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32);
+ SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec,
VecIdx);
}
@@ -158,10 +158,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
Subtarget = &TM.getSubtarget<X86Subtarget>();
X86ScalarSSEf64 = Subtarget->hasSSE2();
X86ScalarSSEf32 = Subtarget->hasSSE1();
- X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
RegInfo = TM.getRegisterInfo();
- TD = getTargetData();
+ TD = getDataLayout();
// Set up the TargetLowering object.
static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
@@ -180,7 +179,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setSchedulingPreference(Sched::ILP);
else
setSchedulingPreference(Sched::RegPressure);
- setStackPointerRegisterToSaveRestore(X86StackPtr);
+ setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
+
+ // Bypass i32 with i8 on Atom when compiling with O2
+ if (Subtarget->hasSlowDivide() && TM.getOptLevel() >= CodeGenOpt::Default)
+ addBypassSlowDiv(32, 8);
if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) {
// Setup Windows compiler runtime calls.
@@ -453,6 +456,14 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::SETCC , MVT::i64 , Custom);
}
setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
+ // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intened to support
+ // SjLj exception handling but a light-weight setjmp/longjmp replacement to
+ // support continuation, user-level threading, and etc.. As a result, no
+ // other SjLj exception interfaces are implemented and please don't build
+ // your own exception handling based on them.
+ // LLVM/Clang supports zero-cost DWARF exception handling.
+ setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
+ setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
// Darwin ABI issue.
setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
@@ -510,6 +521,10 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom);
setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom);
setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i64, Custom);
}
if (Subtarget->hasCmpxchg16b()) {
@@ -541,6 +556,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
setOperationAction(ISD::TRAP, MVT::Other, Legal);
+ setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
@@ -737,6 +753,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::FFLOOR, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand);
@@ -826,6 +843,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
+ setOperationAction(ISD::FABS, MVT::v4f32, Custom);
setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
@@ -859,6 +877,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
+ setOperationAction(ISD::FABS, MVT::v2f64, Custom);
setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
@@ -927,6 +946,18 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
+
+ setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
+ setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
+ // As there is no 64-bit GPR available, we need build a special custom
+ // sequence to convert from v2i32 to v2f32.
+ if (!Subtarget->is64Bit())
+ setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
+
+ setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
+ setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
+
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal);
}
if (Subtarget->hasSSE41()) {
@@ -941,6 +972,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FRINT, MVT::f64, Legal);
setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
+ setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
+ setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
+
// FIXME: Do we need to handle scalar-to-vector here?
setOperationAction(ISD::MUL, MVT::v4i32, Legal);
@@ -1018,19 +1052,33 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
+ setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
+ setOperationAction(ISD::FABS, MVT::v8f32, Custom);
setOperationAction(ISD::FADD, MVT::v4f64, Legal);
setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
+ setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
+ setOperationAction(ISD::FABS, MVT::v4f64, Custom);
+
+ setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
+
+ setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
+ setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
+ setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
+ setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
+
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, Legal);
+
setOperationAction(ISD::SRL, MVT::v16i16, Custom);
setOperationAction(ISD::SRL, MVT::v32i8, Custom);
@@ -1054,7 +1102,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::VSELECT, MVT::v8i32, Legal);
setOperationAction(ISD::VSELECT, MVT::v8f32, Legal);
- if (Subtarget->hasFMA()) {
+ if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
setOperationAction(ISD::FMA, MVT::v8f32, Custom);
setOperationAction(ISD::FMA, MVT::v4f64, Custom);
setOperationAction(ISD::FMA, MVT::v4f32, Custom);
@@ -1219,10 +1267,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setTargetDAGCombine(ISD::ANY_EXTEND);
setTargetDAGCombine(ISD::SIGN_EXTEND);
setTargetDAGCombine(ISD::TRUNCATE);
- setTargetDAGCombine(ISD::UINT_TO_FP);
setTargetDAGCombine(ISD::SINT_TO_FP);
setTargetDAGCombine(ISD::SETCC);
- setTargetDAGCombine(ISD::FP_TO_SINT);
if (Subtarget->is64Bit())
setTargetDAGCombine(ISD::MUL);
setTargetDAGCombine(ISD::XOR);
@@ -1320,7 +1366,7 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size,
// cases like PR2962. This should be removed when PR2962 is fixed.
const Function *F = MF.getFunction();
if (IsZeroVal &&
- !F->hasFnAttr(Attribute::NoImplicitFloat)) {
+ !F->getFnAttributes().hasAttribute(Attributes::NoImplicitFloat)) {
if (Size >= 16 &&
(Subtarget->isUnalignedMemAccessFast() ||
((DstAlign == 0 || DstAlign >= 16) &&
@@ -1988,7 +2034,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs,
TotalNumIntRegs);
- bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat);
+ bool NoImplicitFloatOps = Fn->getFnAttributes().
+ hasAttribute(Attributes::NoImplicitFloat);
assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
"SSE register cannot be used when SSE is disabled!");
assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat &&
@@ -2136,16 +2183,14 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
/// optimization is performed and it is required (FPDiff!=0).
static SDValue
EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF,
- SDValue Chain, SDValue RetAddrFrIdx,
- bool Is64Bit, int FPDiff, DebugLoc dl) {
+ SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT,
+ unsigned SlotSize, int FPDiff, DebugLoc dl) {
// Store the return address to the appropriate stack slot.
if (!FPDiff) return Chain;
// Calculate the new stack slot for the return address.
- int SlotSize = Is64Bit ? 8 : 4;
int NewReturnAddrFI =
MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false);
- EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
- SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
+ SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
MachinePointerInfo::getFixedStack(NewReturnAddrFI),
false, false, 0);
@@ -2180,7 +2225,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
isVarArg, SR != NotStructReturn,
- MF.getFunction()->hasStructRetAttr(),
+ MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
Outs, OutVals, Ins, DAG);
// Sibcalls are automatically detected tailcalls which do not require
@@ -2220,14 +2265,15 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
int FPDiff = 0;
if (isTailCall && !IsSibcall) {
// Lower arguments at fp - stackoffset + fpdiff.
- unsigned NumBytesCallerPushed =
- MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn();
+ X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
+ unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
+
FPDiff = NumBytesCallerPushed - NumBytes;
// Set the delta of movement of the returnaddr stackslot.
// But only set if delta is greater than previous delta.
- if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta()))
- MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff);
+ if (FPDiff < X86Info->getTCReturnAddrDelta())
+ X86Info->setTCReturnAddrDelta(FPDiff);
}
if (!IsSibcall)
@@ -2304,7 +2350,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
} else if (!IsSibcall && (!isTailCall || isByVal)) {
assert(VA.isMemLoc());
if (StackPtr.getNode() == 0)
- StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy());
+ StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
+ getPointerTy());
MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
dl, DAG, VA, Flags));
}
@@ -2392,7 +2439,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Copy relative to framepointer.
SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
if (StackPtr.getNode() == 0)
- StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr,
+ StackPtr = DAG.getCopyFromReg(Chain, dl,
+ RegInfo->getStackRegister(),
getPointerTy());
Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
@@ -2414,7 +2462,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
&MemOpChains2[0], MemOpChains2.size());
// Store the return address to the appropriate stack slot.
- Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit,
+ Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
+ getPointerTy(), RegInfo->getSlotSize(),
FPDiff, dl);
}
@@ -2464,7 +2513,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
OpFlags = X86II::MO_DARWIN_STUB;
} else if (Subtarget->isPICStyleRIPRel() &&
isa<Function>(GV) &&
- cast<Function>(GV)->hasFnAttr(Attribute::NonLazyBind)) {
+ cast<Function>(GV)->getFnAttributes().
+ hasAttribute(Attributes::NonLazyBind)) {
// If the function is marked as non-lazy, generate an indirect call
// which loads from the GOT directly. This avoids runtime overhead
// at the cost of eager binding (and one extra byte of encoding).
@@ -2625,7 +2675,7 @@ X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
unsigned StackAlignment = TFI.getStackAlignment();
uint64_t AlignMask = StackAlignment - 1;
int64_t Offset = StackSize;
- uint64_t SlotSize = TD->getPointerSize();
+ unsigned SlotSize = RegInfo->getSlotSize();
if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
// Number smaller than 12 so just add the difference.
Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
@@ -2700,6 +2750,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
bool isVarArg,
bool isCalleeStructRet,
bool isCallerStructRet,
+ Type *RetTy,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -2711,6 +2762,13 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// If -tailcallopt is specified, make fastcc functions tail-callable.
const MachineFunction &MF = DAG.getMachineFunction();
const Function *CallerF = DAG.getMachineFunction().getFunction();
+
+ // If the function return type is x86_fp80 and the callee return type is not,
+ // then the FP_EXTEND of the call result is not a nop. It's not safe to
+ // perform a tailcall optimization here.
+ if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
+ return false;
+
CallingConv::ID CallerCC = CallerF->getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
@@ -2834,7 +2892,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
MachineFrameInfo *MFI = MF.getFrameInfo();
const MachineRegisterInfo *MRI = &MF.getRegInfo();
const X86InstrInfo *TII =
- ((X86TargetMachine&)getTargetMachine()).getInstrInfo();
+ ((const X86TargetMachine&)getTargetMachine()).getInstrInfo();
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
SDValue Arg = OutVals[i];
@@ -2985,7 +3043,7 @@ SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
if (ReturnAddrIndex == 0) {
// Set up a frame object for the return address.
- uint64_t SlotSize = TD->getPointerSize();
+ unsigned SlotSize = RegInfo->getSlotSize();
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize,
false);
FuncInfo->setRAIndex(ReturnAddrIndex);
@@ -3508,25 +3566,26 @@ SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
MatchOddMask = false;
}
- static const int CompactionMaskEven[] = {0, 2, -1, -1, 4, 6, -1, -1};
- static const int CompactionMaskOdd [] = {1, 3, -1, -1, 5, 7, -1, -1};
- const int *CompactionMask;
- if (MatchEvenMask)
- CompactionMask = CompactionMaskEven;
- else if (MatchOddMask)
- CompactionMask = CompactionMaskOdd;
- else
+ if (!MatchEvenMask && !MatchOddMask)
return SDValue();
SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
- SDValue Op0 = DAG.getVectorShuffle(VT, dl, SVOp->getOperand(0),
- UndefNode, CompactionMask);
- SDValue Op1 = DAG.getVectorShuffle(VT, dl, SVOp->getOperand(1),
- UndefNode, CompactionMask);
- static const int UnpackMask[] = {0, 8, 1, 9, 4, 12, 5, 13};
- return DAG.getVectorShuffle(VT, dl, Op0, Op1, UnpackMask);
+ SDValue Op0 = SVOp->getOperand(0);
+ SDValue Op1 = SVOp->getOperand(1);
+
+ if (MatchEvenMask) {
+ // Shift the second operand right to 32 bits.
+ static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
+ Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
+ } else {
+ // Shift the first operand left to 32 bits.
+ static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
+ Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
+ }
+ static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
+ return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
}
/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
@@ -4577,7 +4636,6 @@ static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
MVT ShufVT = V.getValueType().getSimpleVT();
unsigned NumElems = ShufVT.getVectorNumElements();
SmallVector<int, 16> ShuffleMask;
- SDValue ImmN;
bool IsUnary;
if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
@@ -4979,6 +5037,18 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
LDBase->getAlignment(),
false/*isVolatile*/, true/*ReadMem*/,
false/*WriteMem*/);
+
+ // Make sure the newly-created LOAD is in the same position as LDBase in
+ // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
+ // update uses of LDBase's output chain to use the TokenFactor.
+ if (LDBase->hasAnyUseOfValue(1)) {
+ SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
+ SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
+ DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
+ DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
+ SDValue(ResNode.getNode(), 1));
+ }
+
return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
}
return SDValue();
@@ -4992,7 +5062,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
/// The VBROADCAST node is returned when a pattern is found,
/// or SDValue() otherwise.
SDValue
-X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const {
+X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const {
if (!Subtarget->hasAVX())
return SDValue();
@@ -5116,80 +5186,78 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const {
return SDValue();
}
-// LowerVectorFpExtend - Recognize the scalarized FP_EXTEND from v2f32 to v2f64
-// and convert it into X86ISD::VFPEXT due to the current ISD::FP_EXTEND has the
-// constraint of matching input/output vector elements.
SDValue
-X86TargetLowering::LowerVectorFpExtend(SDValue &Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
- SDNode *N = Op.getNode();
+X86TargetLowering::buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
- unsigned NumElts = Op.getNumOperands();
- // Check supported types and sub-targets.
- //
- // Only v2f32 -> v2f64 needs special handling.
- if (VT != MVT::v2f64 || !Subtarget->hasSSE2())
+ // Skip if insert_vec_elt is not supported.
+ if (!isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
return SDValue();
- SDValue VecIn;
- EVT VecInVT;
- SmallVector<int, 8> Mask;
- EVT SrcVT = MVT::Other;
+ DebugLoc DL = Op.getDebugLoc();
+ unsigned NumElems = Op.getNumOperands();
+
+ SDValue VecIn1;
+ SDValue VecIn2;
+ SmallVector<unsigned, 4> InsertIndices;
+ SmallVector<int, 8> Mask(NumElems, -1);
- // Check the patterns could be translated into X86vfpext.
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue In = N->getOperand(i);
- unsigned Opcode = In.getOpcode();
+ for (unsigned i = 0; i != NumElems; ++i) {
+ unsigned Opc = Op.getOperand(i).getOpcode();
- // Skip if the element is undefined.
- if (Opcode == ISD::UNDEF) {
- Mask.push_back(-1);
+ if (Opc == ISD::UNDEF)
continue;
- }
- // Quit if one of the elements is not defined from 'fpext'.
- if (Opcode != ISD::FP_EXTEND)
- return SDValue();
+ if (Opc != ISD::EXTRACT_VECTOR_ELT) {
+ // Quit if more than 1 elements need inserting.
+ if (InsertIndices.size() > 1)
+ return SDValue();
+
+ InsertIndices.push_back(i);
+ continue;
+ }
- // Check how the source of 'fpext' is defined.
- SDValue L2In = In.getOperand(0);
- EVT L2InVT = L2In.getValueType();
+ SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
+ SDValue ExtIdx = Op.getOperand(i).getOperand(1);
- // Check the original type
- if (SrcVT == MVT::Other)
- SrcVT = L2InVT;
- else if (SrcVT != L2InVT) // Quit if non-homogenous typed.
+ // Quit if extracted from vector of different type.
+ if (ExtractedFromVec.getValueType() != VT)
return SDValue();
- // Check whether the value being 'fpext'ed is extracted from the same
- // source.
- Opcode = L2In.getOpcode();
-
- // Quit if it's not extracted with a constant index.
- if (Opcode != ISD::EXTRACT_VECTOR_ELT ||
- !isa<ConstantSDNode>(L2In.getOperand(1)))
+ // Quit if non-constant index.
+ if (!isa<ConstantSDNode>(ExtIdx))
return SDValue();
- SDValue ExtractedFromVec = L2In.getOperand(0);
+ if (VecIn1.getNode() == 0)
+ VecIn1 = ExtractedFromVec;
+ else if (VecIn1 != ExtractedFromVec) {
+ if (VecIn2.getNode() == 0)
+ VecIn2 = ExtractedFromVec;
+ else if (VecIn2 != ExtractedFromVec)
+ // Quit if more than 2 vectors to shuffle
+ return SDValue();
+ }
- if (VecIn.getNode() == 0) {
- VecIn = ExtractedFromVec;
- VecInVT = ExtractedFromVec.getValueType();
- } else if (VecIn != ExtractedFromVec) // Quit if built from more than 1 vec.
- return SDValue();
+ unsigned Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
- Mask.push_back(cast<ConstantSDNode>(L2In.getOperand(1))->getZExtValue());
+ if (ExtractedFromVec == VecIn1)
+ Mask[i] = Idx;
+ else if (ExtractedFromVec == VecIn2)
+ Mask[i] = Idx + NumElems;
}
- // Fill the remaining mask as undef.
- for (unsigned i = NumElts; i < VecInVT.getVectorNumElements(); ++i)
- Mask.push_back(-1);
+ if (VecIn1.getNode() == 0)
+ return SDValue();
+
+ VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
+ SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
+ for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
+ unsigned Idx = InsertIndices[i];
+ NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
+ DAG.getIntPtrConstant(Idx));
+ }
- return DAG.getNode(X86ISD::VFPEXT, DL, VT,
- DAG.getVectorShuffle(VecInVT, DL,
- VecIn, DAG.getUNDEF(VecInVT),
- &Mask[0]));
+ return NV;
}
SDValue
@@ -5224,10 +5292,6 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (Broadcast.getNode())
return Broadcast;
- SDValue FpExt = LowerVectorFpExtend(Op, DAG);
- if (FpExt.getNode())
- return FpExt;
-
unsigned EVTBits = ExtVT.getSizeInBits();
unsigned NumZero = 0;
@@ -5472,6 +5536,11 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (LD.getNode())
return LD;
+ // Check for a build vector from mostly shuffle plus few inserting.
+ SDValue Sh = buildFromShuffleMostly(Op, DAG);
+ if (Sh.getNode())
+ return Sh;
+
// For SSE 4.1, use insertps to put the high elements into the low element.
if (getSubtarget()->hasSSE41()) {
SDValue Result;
@@ -5538,8 +5607,7 @@ static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
}
-SDValue
-X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
assert(Op.getNumOperands() == 2);
// 256-bit AVX can use the vinsertf128 instruction to create 256-bit vectors
@@ -5548,9 +5616,9 @@ X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
}
// Try to lower a shuffle node into a simple blend instruction.
-static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
- const X86Subtarget *Subtarget,
- SelectionDAG &DAG) {
+static SDValue
+LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
+ const X86Subtarget *Subtarget, SelectionDAG &DAG) {
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
DebugLoc dl = SVOp->getDebugLoc();
@@ -5620,9 +5688,9 @@ static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
// 2. [ssse3] 1 x pshufb
// 3. [ssse3] 2 x pshufb + 1 x por
// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
-SDValue
-X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
- SelectionDAG &DAG) const {
+static SDValue
+LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
@@ -5879,8 +5947,6 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
DebugLoc dl = SVOp->getDebugLoc();
ArrayRef<int> MaskVals = SVOp->getMask();
- bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
-
// If we have SSSE3, case 1 is generated when all result bytes come from
// one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
// present, fall back to case 3.
@@ -5904,7 +5970,11 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::v16i8, &pshufbMask[0], 16));
- if (V2IsUndef)
+
+ // As PSHUFB will zero elements with negative indices, it's safe to ignore
+ // the 2nd operand if it's undefined or zero.
+ if (V2.getOpcode() == ISD::UNDEF ||
+ ISD::isBuildVectorAllZeros(V2.getNode()))
return V1;
// Calculate the shuffle mask for the second input, shuffle it, and
@@ -5990,6 +6060,51 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
}
+// v32i8 shuffles - Translate to VPSHUFB if possible.
+static
+SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
+ const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ EVT VT = SVOp->getValueType(0);
+ SDValue V1 = SVOp->getOperand(0);
+ SDValue V2 = SVOp->getOperand(1);
+ DebugLoc dl = SVOp->getDebugLoc();
+ SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
+
+ bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
+ bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
+ bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
+
+ // VPSHUFB may be generated if
+ // (1) one of input vector is undefined or zeroinitializer.
+ // The mask value 0x80 puts 0 in the corresponding slot of the vector.
+ // And (2) the mask indexes don't cross the 128-bit lane.
+ if (VT != MVT::v32i8 || !Subtarget->hasAVX2() ||
+ (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
+ return SDValue();
+
+ if (V1IsAllZero && !V2IsAllZero) {
+ CommuteVectorShuffleMask(MaskVals, 32);
+ V1 = V2;
+ }
+ SmallVector<SDValue, 32> pshufbMask;
+ for (unsigned i = 0; i != 32; i++) {
+ int EltIdx = MaskVals[i];
+ if (EltIdx < 0 || EltIdx >= 32)
+ EltIdx = 0x80;
+ else {
+ if ((EltIdx >= 16 && i < 16) || (EltIdx < 16 && i >= 16))
+ // Cross lane is not allowed.
+ return SDValue();
+ EltIdx &= 0xf;
+ }
+ pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
+ }
+ return DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, V1,
+ DAG.getNode(ISD::BUILD_VECTOR, dl,
+ MVT::v32i8, &pshufbMask[0], 32));
+}
+
/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
/// done when every pair / quad of shuffle mask elements point to elements in
@@ -6324,17 +6439,17 @@ LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
}
static bool MayFoldVectorLoad(SDValue V) {
- if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
+ while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
V = V.getOperand(0);
+
if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
V = V.getOperand(0);
if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
// BUILD_VECTOR (load), undef
V = V.getOperand(0);
- if (MayFoldLoad(V))
- return true;
- return false;
+
+ return MayFoldLoad(V);
}
// FIXME: the version above should always be used. Since there's
@@ -6457,6 +6572,81 @@ SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
getShuffleSHUFImmediate(SVOp), DAG);
}
+// Reduce a vector shuffle to zext.
+SDValue
+X86TargetLowering::lowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const {
+ // PMOVZX is only available from SSE41.
+ if (!Subtarget->hasSSE41())
+ return SDValue();
+
+ EVT VT = Op.getValueType();
+
+ // Only AVX2 support 256-bit vector integer extending.
+ if (!Subtarget->hasAVX2() && VT.is256BitVector())
+ return SDValue();
+
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
+ DebugLoc DL = Op.getDebugLoc();
+ SDValue V1 = Op.getOperand(0);
+ SDValue V2 = Op.getOperand(1);
+ unsigned NumElems = VT.getVectorNumElements();
+
+ // Extending is an unary operation and the element type of the source vector
+ // won't be equal to or larger than i64.
+ if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
+ VT.getVectorElementType() == MVT::i64)
+ return SDValue();
+
+ // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
+ unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
+ while ((1U << Shift) < NumElems) {
+ if (SVOp->getMaskElt(1U << Shift) == 1)
+ break;
+ Shift += 1;
+ // The maximal ratio is 8, i.e. from i8 to i64.
+ if (Shift > 3)
+ return SDValue();
+ }
+
+ // Check the shuffle mask.
+ unsigned Mask = (1U << Shift) - 1;
+ for (unsigned i = 0; i != NumElems; ++i) {
+ int EltIdx = SVOp->getMaskElt(i);
+ if ((i & Mask) != 0 && EltIdx != -1)
+ return SDValue();
+ if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
+ return SDValue();
+ }
+
+ unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
+ EVT NeVT = EVT::getIntegerVT(*DAG.getContext(), NBits);
+ EVT NVT = EVT::getVectorVT(*DAG.getContext(), NeVT, NumElems >> Shift);
+
+ if (!isTypeLegal(NVT))
+ return SDValue();
+
+ // Simplify the operand as it's prepared to be fed into shuffle.
+ unsigned SignificantBits = NVT.getSizeInBits() >> Shift;
+ if (V1.getOpcode() == ISD::BITCAST &&
+ V1.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
+ V1.getOperand(0).getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ V1.getOperand(0)
+ .getOperand(0).getValueType().getSizeInBits() == SignificantBits) {
+ // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
+ SDValue V = V1.getOperand(0).getOperand(0).getOperand(0);
+ ConstantSDNode *CIdx =
+ dyn_cast<ConstantSDNode>(V1.getOperand(0).getOperand(0).getOperand(1));
+ // If it's foldable, i.e. normal load with single use, we will let code
+ // selection to fold it. Otherwise, we will short the conversion sequence.
+ if (CIdx && CIdx->getZExtValue() == 0 &&
+ (!ISD::isNormalLoad(V.getNode()) || !V.hasOneUse()))
+ V1 = DAG.getNode(ISD::BITCAST, DL, V1.getValueType(), V);
+ }
+
+ return DAG.getNode(ISD::BITCAST, DL, VT,
+ DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
+}
+
SDValue
X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
@@ -6487,6 +6677,11 @@ X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const {
return PromoteSplat(SVOp, DAG);
}
+ // Check integer expanding shuffles.
+ SDValue NewOp = lowerVectorIntExtend(Op, DAG);
+ if (NewOp.getNode())
+ return NewOp;
+
// If the shuffle can be profitably rewritten as a narrower shuffle, then
// do it!
if (VT == MVT::v8i16 || VT == MVT::v16i8 ||
@@ -6536,7 +6731,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
bool HasAVX = Subtarget->hasAVX();
bool HasAVX2 = Subtarget->hasAVX2();
MachineFunction &MF = DAG.getMachineFunction();
- bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
+ bool OptForSize = MF.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize);
assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
@@ -6805,7 +7001,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
// Handle v8i16 specifically since SSE can do byte extraction and insertion.
if (VT == MVT::v8i16) {
- SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, DAG);
+ SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
if (NewOp.getNode())
return NewOp;
}
@@ -6816,6 +7012,12 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
return NewOp;
}
+ if (VT == MVT::v32i8) {
+ SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
+ if (NewOp.getNode())
+ return NewOp;
+ }
+
// Handle all 128-bit wide vectors with 4 elements, and match them with
// several different shuffle types.
if (NumElems == 4 && VT.is128BitVector())
@@ -6839,9 +7041,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
if (VT.getSizeInBits() == 8) {
SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
- Op.getOperand(0), Op.getOperand(1));
+ Op.getOperand(0), Op.getOperand(1));
SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
- DAG.getValueType(VT));
+ DAG.getValueType(VT));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
}
@@ -6856,9 +7058,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
Op.getOperand(0)),
Op.getOperand(1)));
SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
- Op.getOperand(0), Op.getOperand(1));
+ Op.getOperand(0), Op.getOperand(1));
SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
- DAG.getValueType(VT));
+ DAG.getValueType(VT));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
}
@@ -6942,9 +7144,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
// Transform it so it match pextrw which produces a 32-bit result.
EVT EltVT = MVT::i32;
SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
- Op.getOperand(0), Op.getOperand(1));
+ Op.getOperand(0), Op.getOperand(1));
SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
- DAG.getValueType(VT));
+ DAG.getValueType(VT));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
}
@@ -7087,8 +7289,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
}
-SDValue
-X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
LLVMContext *Context = DAG.getContext();
DebugLoc dl = Op.getDebugLoc();
EVT OpVT = Op.getValueType();
@@ -7120,8 +7321,8 @@ X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
// a simple subregister reference or explicit instructions to grab
// upper bits of a vector.
-SDValue
-X86TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
if (Subtarget->hasAVX()) {
DebugLoc dl = Op.getNode()->getDebugLoc();
SDValue Vec = Op.getNode()->getOperand(0);
@@ -7140,8 +7341,8 @@ X86TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const {
// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
// simple superregister reference or explicit instructions to insert
// the upper bits of a vector.
-SDValue
-X86TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
if (Subtarget->hasAVX()) {
DebugLoc dl = Op.getNode()->getDebugLoc();
SDValue Vec = Op.getNode()->getOperand(0);
@@ -7284,9 +7485,10 @@ X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
Subtarget->ClassifyBlockAddressReference();
CodeModel::Model M = getTargetMachine().getCodeModel();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
+ int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
DebugLoc dl = Op.getDebugLoc();
- SDValue Result = DAG.getBlockAddress(BA, getPointerTy(),
- /*isTarget=*/true, OpFlags);
+ SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
+ OpFlags);
if (Subtarget->isPICStyleRIPRel() &&
(M == CodeModel::Small || M == CodeModel::Kernel))
@@ -7395,8 +7597,8 @@ LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
SDValue InFlag;
DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better
SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
- DAG.getNode(X86ISD::GlobalBaseReg,
- DebugLoc(), PtrVT), InFlag);
+ DAG.getNode(X86ISD::GlobalBaseReg,
+ DebugLoc(), PtrVT), InFlag);
InFlag = Chain.getValue(1);
return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
@@ -7897,11 +8099,29 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
return Sub;
}
+SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue N0 = Op.getOperand(0);
+ EVT SVT = N0.getValueType();
+ DebugLoc dl = Op.getDebugLoc();
+
+ assert((SVT == MVT::v4i8 || SVT == MVT::v4i16 ||
+ SVT == MVT::v8i8 || SVT == MVT::v8i16) &&
+ "Custom UINT_TO_FP is not supported!");
+
+ EVT NVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, SVT.getVectorNumElements());
+ return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
+ DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
+}
+
SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
SelectionDAG &DAG) const {
SDValue N0 = Op.getOperand(0);
DebugLoc dl = Op.getDebugLoc();
+ if (Op.getValueType().isVector())
+ return lowerUINT_TO_FP_vec(Op, DAG);
+
// Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
// optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
// the optimization here.
@@ -8075,10 +8295,66 @@ FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned, bool IsReplace) co
}
}
+SDValue X86TargetLowering::lowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const {
+ DebugLoc DL = Op.getDebugLoc();
+ EVT VT = Op.getValueType();
+ SDValue In = Op.getOperand(0);
+ EVT SVT = In.getValueType();
+
+ if (!VT.is256BitVector() || !SVT.is128BitVector() ||
+ VT.getVectorNumElements() != SVT.getVectorNumElements())
+ return SDValue();
+
+ assert(Subtarget->hasAVX() && "256-bit vector is observed without AVX!");
+
+ // AVX2 has better support of integer extending.
+ if (Subtarget->hasAVX2())
+ return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
+
+ SDValue Lo = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, In);
+ static const int Mask[] = {4, 5, 6, 7, -1, -1, -1, -1};
+ SDValue Hi = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32,
+ DAG.getVectorShuffle(MVT::v8i16, DL, In, DAG.getUNDEF(MVT::v8i16), &Mask[0]));
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i32, Lo, Hi);
+}
+
+SDValue X86TargetLowering::lowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
+ DebugLoc DL = Op.getDebugLoc();
+ EVT VT = Op.getValueType();
+ EVT SVT = Op.getOperand(0).getValueType();
+
+ if (!VT.is128BitVector() || !SVT.is256BitVector() ||
+ VT.getVectorNumElements() != SVT.getVectorNumElements())
+ return SDValue();
+
+ assert(Subtarget->hasAVX() && "256-bit vector is observed without AVX!");
+
+ unsigned NumElems = VT.getVectorNumElements();
+ EVT NVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
+ NumElems * 2);
+
+ SDValue In = Op.getOperand(0);
+ SmallVector<int, 16> MaskVec(NumElems * 2, -1);
+ // Prepare truncation shuffle mask
+ for (unsigned i = 0; i != NumElems; ++i)
+ MaskVec[i] = i * 2;
+ SDValue V = DAG.getVectorShuffle(NVT, DL,
+ DAG.getNode(ISD::BITCAST, DL, NVT, In),
+ DAG.getUNDEF(NVT), &MaskVec[0]);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
+ DAG.getIntPtrConstant(0));
+}
+
SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
SelectionDAG &DAG) const {
- if (Op.getValueType().isVector())
+ if (Op.getValueType().isVector()) {
+ if (Op.getValueType() == MVT::v8i16)
+ return DAG.getNode(ISD::TRUNCATE, Op.getDebugLoc(), Op.getValueType(),
+ DAG.getNode(ISD::FP_TO_SINT, Op.getDebugLoc(),
+ MVT::v8i32, Op.getOperand(0)));
return SDValue();
+ }
std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
/*IsSigned=*/ true, /*IsReplace=*/ false);
@@ -8113,26 +8389,49 @@ SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
return FIST;
}
-SDValue X86TargetLowering::LowerFABS(SDValue Op,
- SelectionDAG &DAG) const {
+SDValue X86TargetLowering::lowerFP_EXTEND(SDValue Op,
+ SelectionDAG &DAG) const {
+ DebugLoc DL = Op.getDebugLoc();
+ EVT VT = Op.getValueType();
+ SDValue In = Op.getOperand(0);
+ EVT SVT = In.getValueType();
+
+ assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
+
+ return DAG.getNode(X86ISD::VFPEXT, DL, VT,
+ DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
+ In, DAG.getUNDEF(SVT)));
+}
+
+SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const {
LLVMContext *Context = DAG.getContext();
DebugLoc dl = Op.getDebugLoc();
EVT VT = Op.getValueType();
EVT EltVT = VT;
- if (VT.isVector())
+ unsigned NumElts = VT == MVT::f64 ? 2 : 4;
+ if (VT.isVector()) {
EltVT = VT.getVectorElementType();
- Constant *C;
- if (EltVT == MVT::f64) {
- C = ConstantVector::getSplat(2,
- ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63)))));
- } else {
- C = ConstantVector::getSplat(4,
- ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31)))));
+ NumElts = VT.getVectorNumElements();
}
- SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
+ Constant *C;
+ if (EltVT == MVT::f64)
+ C = ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))));
+ else
+ C = ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))));
+ C = ConstantVector::getSplat(NumElts, C);
+ SDValue CPIdx = DAG.getConstantPool(C, getPointerTy());
+ unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
- false, false, false, 16);
+ false, false, false, Alignment);
+ if (VT.isVector()) {
+ MVT ANDVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
+ return DAG.getNode(ISD::BITCAST, dl, VT,
+ DAG.getNode(ISD::AND, dl, ANDVT,
+ DAG.getNode(ISD::BITCAST, dl, ANDVT,
+ Op.getOperand(0)),
+ DAG.getNode(ISD::BITCAST, dl, ANDVT, Mask)));
+ }
return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask);
}
@@ -8152,10 +8451,11 @@ SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const {
else
C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31)));
C = ConstantVector::getSplat(NumElts, C);
- SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
+ SDValue CPIdx = DAG.getConstantPool(C, getPointerTy());
+ unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
- false, false, false, 16);
+ false, false, false, Alignment);
if (VT.isVector()) {
MVT XORVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
return DAG.getNode(ISD::BITCAST, dl, VT,
@@ -8241,7 +8541,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
}
-SDValue X86TargetLowering::LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
SDValue N0 = Op.getOperand(0);
DebugLoc dl = Op.getDebugLoc();
EVT VT = Op.getValueType();
@@ -8252,6 +8552,98 @@ SDValue X86TargetLowering::LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
}
+// LowerVectorAllZeroTest - Check whether an OR'd tree is PTEST-able.
+//
+SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op, SelectionDAG &DAG) const {
+ assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
+
+ if (!Subtarget->hasSSE41())
+ return SDValue();
+
+ if (!Op->hasOneUse())
+ return SDValue();
+
+ SDNode *N = Op.getNode();
+ DebugLoc DL = N->getDebugLoc();
+
+ SmallVector<SDValue, 8> Opnds;
+ DenseMap<SDValue, unsigned> VecInMap;
+ EVT VT = MVT::Other;
+
+ // Recognize a special case where a vector is casted into wide integer to
+ // test all 0s.
+ Opnds.push_back(N->getOperand(0));
+ Opnds.push_back(N->getOperand(1));
+
+ for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
+ SmallVector<SDValue, 8>::const_iterator I = Opnds.begin() + Slot;
+ // BFS traverse all OR'd operands.
+ if (I->getOpcode() == ISD::OR) {
+ Opnds.push_back(I->getOperand(0));
+ Opnds.push_back(I->getOperand(1));
+ // Re-evaluate the number of nodes to be traversed.
+ e += 2; // 2 more nodes (LHS and RHS) are pushed.
+ continue;
+ }
+
+ // Quit if a non-EXTRACT_VECTOR_ELT
+ if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return SDValue();
+
+ // Quit if without a constant index.
+ SDValue Idx = I->getOperand(1);
+ if (!isa<ConstantSDNode>(Idx))
+ return SDValue();
+
+ SDValue ExtractedFromVec = I->getOperand(0);
+ DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
+ if (M == VecInMap.end()) {
+ VT = ExtractedFromVec.getValueType();
+ // Quit if not 128/256-bit vector.
+ if (!VT.is128BitVector() && !VT.is256BitVector())
+ return SDValue();
+ // Quit if not the same type.
+ if (VecInMap.begin() != VecInMap.end() &&
+ VT != VecInMap.begin()->first.getValueType())
+ return SDValue();
+ M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
+ }
+ M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
+ }
+
+ assert((VT.is128BitVector() || VT.is256BitVector()) &&
+ "Not extracted from 128-/256-bit vector.");
+
+ unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
+ SmallVector<SDValue, 8> VecIns;
+
+ for (DenseMap<SDValue, unsigned>::const_iterator
+ I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
+ // Quit if not all elements are used.
+ if (I->second != FullMask)
+ return SDValue();
+ VecIns.push_back(I->first);
+ }
+
+ EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
+
+ // Cast all vectors into TestVT for PTEST.
+ for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
+ VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
+
+ // If more than one full vectors are evaluated, OR them first before PTEST.
+ for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
+ // Each iteration will OR 2 nodes and append the result until there is only
+ // 1 node left, i.e. the final OR'd value of all vectors.
+ SDValue LHS = VecIns[Slot];
+ SDValue RHS = VecIns[Slot + 1];
+ VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
+ }
+
+ return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
+ VecIns.back(), VecIns.back());
+}
+
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent.
SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
@@ -8285,7 +8677,33 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
unsigned Opcode = 0;
unsigned NumOperands = 0;
- switch (Op.getNode()->getOpcode()) {
+
+ // Truncate operations may prevent the merge of the SETCC instruction
+ // and the arithmetic intruction before it. Attempt to truncate the operands
+ // of the arithmetic instruction and use a reduced bit-width instruction.
+ bool NeedTruncation = false;
+ SDValue ArithOp = Op;
+ if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
+ SDValue Arith = Op->getOperand(0);
+ // Both the trunc and the arithmetic op need to have one user each.
+ if (Arith->hasOneUse())
+ switch (Arith.getOpcode()) {
+ default: break;
+ case ISD::ADD:
+ case ISD::SUB:
+ case ISD::AND:
+ case ISD::OR:
+ case ISD::XOR: {
+ NeedTruncation = true;
+ ArithOp = Arith;
+ }
+ }
+ }
+
+ // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
+ // which may be the result of a CAST. We use the variable 'Op', which is the
+ // non-casted variable when we check for possible users.
+ switch (ArithOp.getOpcode()) {
case ISD::ADD:
// Due to an isel shortcoming, be conservative if this add is likely to be
// selected as part of a load-modify-store instruction. When the root node
@@ -8305,7 +8723,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
goto default_case;
if (ConstantSDNode *C =
- dyn_cast<ConstantSDNode>(Op.getNode()->getOperand(1))) {
+ dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
// An add of one will be selected as an INC.
if (C->getAPIntValue() == 1) {
Opcode = X86ISD::INC;
@@ -8341,7 +8759,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
if (User->getOpcode() != ISD::BRCOND &&
User->getOpcode() != ISD::SETCC &&
- (User->getOpcode() != ISD::SELECT || UOpNo != 0)) {
+ !(User->getOpcode() == ISD::SELECT && UOpNo == 0)) {
NonFlagUse = true;
break;
}
@@ -8362,14 +8780,20 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
goto default_case;
// Otherwise use a regular EFLAGS-setting instruction.
- switch (Op.getNode()->getOpcode()) {
+ switch (ArithOp.getOpcode()) {
default: llvm_unreachable("unexpected operator!");
- case ISD::SUB:
- Opcode = X86ISD::SUB;
- break;
- case ISD::OR: Opcode = X86ISD::OR; break;
+ case ISD::SUB: Opcode = X86ISD::SUB; break;
case ISD::XOR: Opcode = X86ISD::XOR; break;
case ISD::AND: Opcode = X86ISD::AND; break;
+ case ISD::OR: {
+ if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
+ SDValue EFLAGS = LowerVectorAllZeroTest(Op, DAG);
+ if (EFLAGS.getNode())
+ return EFLAGS;
+ }
+ Opcode = X86ISD::OR;
+ break;
+ }
}
NumOperands = 2;
@@ -8387,19 +8811,40 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
break;
}
+ // If we found that truncation is beneficial, perform the truncation and
+ // update 'Op'.
+ if (NeedTruncation) {
+ EVT VT = Op.getValueType();
+ SDValue WideVal = Op->getOperand(0);
+ EVT WideVT = WideVal.getValueType();
+ unsigned ConvertedOp = 0;
+ // Use a target machine opcode to prevent further DAGCombine
+ // optimizations that may separate the arithmetic operations
+ // from the setcc node.
+ switch (WideVal.getOpcode()) {
+ default: break;
+ case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
+ case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
+ case ISD::AND: ConvertedOp = X86ISD::AND; break;
+ case ISD::OR: ConvertedOp = X86ISD::OR; break;
+ case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
+ }
+
+ if (ConvertedOp) {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
+ SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
+ SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
+ Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
+ }
+ }
+ }
+
if (Opcode == 0)
// Emit a CMP with 0, which is the TEST pattern.
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
DAG.getConstant(0, Op.getValueType()));
- if (Opcode == X86ISD::CMP) {
- SDValue New = DAG.getNode(Opcode, dl, MVT::i32, Op.getOperand(0),
- Op.getOperand(1));
- // We can't replace usage of SUB with CMP.
- // The SUB node will be removed later because there is no use of it.
- return SDValue(New.getNode(), 0);
- }
-
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
SmallVector<SDValue, 4> Ops;
for (unsigned i = 0; i != NumOperands; ++i)
@@ -8958,6 +9403,21 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
}
}
+ // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
+ // widen the cmov and push the truncate through. This avoids introducing a new
+ // branch during isel and doesn't add any extensions.
+ if (Op.getValueType() == MVT::i8 &&
+ Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
+ SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
+ if (T1.getValueType() == T2.getValueType() &&
+ // Blacklist CopyFromReg to avoid partial register stalls.
+ T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
+ SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
+ SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
+ return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
+ }
+ }
+
// X86ISD::CMOV means set the result (which is operand 1) to the RHS if
// condition is true.
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
@@ -9312,7 +9772,8 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
Flag = Chain.getValue(1);
- Chain = DAG.getCopyFromReg(Chain, dl, X86StackPtr, SPTy).getValue(1);
+ Chain = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
+ SPTy).getValue(1);
SDValue Ops1[2] = { Chain.getValue(0), Chain };
return DAG.getMergeValues(Ops1, 2, dl);
@@ -9395,7 +9856,7 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
EVT ArgVT = Op.getNode()->getValueType(0);
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
- uint32_t ArgSize = getTargetData()->getTypeAllocSize(ArgTy);
+ uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
uint8_t ArgMode;
// Decide which area this value should be read from.
@@ -9415,7 +9876,8 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
// Sanity Check: Make sure using fp_offset makes sense.
assert(!getTargetMachine().Options.UseSoftFloat &&
!(DAG.getMachineFunction()
- .getFunction()->hasFnAttr(Attribute::NoImplicitFloat)) &&
+ .getFunction()->getFnAttributes()
+ .hasAttribute(Attributes::NoImplicitFloat)) &&
Subtarget->hasSSE1());
}
@@ -9446,7 +9908,8 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
false, false, false, 0);
}
-SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
// X86-64 va_list is a struct { i32, i32, i8*, i8* }.
assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
SDValue Chain = Op.getOperand(0);
@@ -9507,8 +9970,7 @@ static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT,
return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
}
-SDValue
-X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
switch (IntNo) {
@@ -9896,62 +10358,6 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
Op.getOperand(1), Op.getOperand(2), DAG);
}
- // Fix vector shift instructions where the last operand is a non-immediate
- // i32 value.
- case Intrinsic::x86_mmx_pslli_w:
- case Intrinsic::x86_mmx_pslli_d:
- case Intrinsic::x86_mmx_pslli_q:
- case Intrinsic::x86_mmx_psrli_w:
- case Intrinsic::x86_mmx_psrli_d:
- case Intrinsic::x86_mmx_psrli_q:
- case Intrinsic::x86_mmx_psrai_w:
- case Intrinsic::x86_mmx_psrai_d: {
- SDValue ShAmt = Op.getOperand(2);
- if (isa<ConstantSDNode>(ShAmt))
- return SDValue();
-
- unsigned NewIntNo;
- switch (IntNo) {
- default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
- case Intrinsic::x86_mmx_pslli_w:
- NewIntNo = Intrinsic::x86_mmx_psll_w;
- break;
- case Intrinsic::x86_mmx_pslli_d:
- NewIntNo = Intrinsic::x86_mmx_psll_d;
- break;
- case Intrinsic::x86_mmx_pslli_q:
- NewIntNo = Intrinsic::x86_mmx_psll_q;
- break;
- case Intrinsic::x86_mmx_psrli_w:
- NewIntNo = Intrinsic::x86_mmx_psrl_w;
- break;
- case Intrinsic::x86_mmx_psrli_d:
- NewIntNo = Intrinsic::x86_mmx_psrl_d;
- break;
- case Intrinsic::x86_mmx_psrli_q:
- NewIntNo = Intrinsic::x86_mmx_psrl_q;
- break;
- case Intrinsic::x86_mmx_psrai_w:
- NewIntNo = Intrinsic::x86_mmx_psra_w;
- break;
- case Intrinsic::x86_mmx_psrai_d:
- NewIntNo = Intrinsic::x86_mmx_psra_d;
- break;
- }
-
- // The vector shift intrinsics with scalars uses 32b shift amounts but
- // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
- // to be zero.
- ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, ShAmt,
- DAG.getConstant(0, MVT::i32));
-// FIXME this must be lowered to get rid of the invalid type.
-
- EVT VT = Op.getValueType();
- ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt);
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(NewIntNo, MVT::i32),
- Op.getOperand(1), ShAmt);
- }
case Intrinsic::x86_sse42_pcmpistria128:
case Intrinsic::x86_sse42_pcmpestria128:
case Intrinsic::x86_sse42_pcmpistric128:
@@ -10030,11 +10436,78 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
return DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size());
}
+ case Intrinsic::x86_fma_vfmadd_ps:
+ case Intrinsic::x86_fma_vfmadd_pd:
+ case Intrinsic::x86_fma_vfmsub_ps:
+ case Intrinsic::x86_fma_vfmsub_pd:
+ case Intrinsic::x86_fma_vfnmadd_ps:
+ case Intrinsic::x86_fma_vfnmadd_pd:
+ case Intrinsic::x86_fma_vfnmsub_ps:
+ case Intrinsic::x86_fma_vfnmsub_pd:
+ case Intrinsic::x86_fma_vfmaddsub_ps:
+ case Intrinsic::x86_fma_vfmaddsub_pd:
+ case Intrinsic::x86_fma_vfmsubadd_ps:
+ case Intrinsic::x86_fma_vfmsubadd_pd:
+ case Intrinsic::x86_fma_vfmadd_ps_256:
+ case Intrinsic::x86_fma_vfmadd_pd_256:
+ case Intrinsic::x86_fma_vfmsub_ps_256:
+ case Intrinsic::x86_fma_vfmsub_pd_256:
+ case Intrinsic::x86_fma_vfnmadd_ps_256:
+ case Intrinsic::x86_fma_vfnmadd_pd_256:
+ case Intrinsic::x86_fma_vfnmsub_ps_256:
+ case Intrinsic::x86_fma_vfnmsub_pd_256:
+ case Intrinsic::x86_fma_vfmaddsub_ps_256:
+ case Intrinsic::x86_fma_vfmaddsub_pd_256:
+ case Intrinsic::x86_fma_vfmsubadd_ps_256:
+ case Intrinsic::x86_fma_vfmsubadd_pd_256: {
+ unsigned Opc;
+ switch (IntNo) {
+ default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
+ case Intrinsic::x86_fma_vfmadd_ps:
+ case Intrinsic::x86_fma_vfmadd_pd:
+ case Intrinsic::x86_fma_vfmadd_ps_256:
+ case Intrinsic::x86_fma_vfmadd_pd_256:
+ Opc = X86ISD::FMADD;
+ break;
+ case Intrinsic::x86_fma_vfmsub_ps:
+ case Intrinsic::x86_fma_vfmsub_pd:
+ case Intrinsic::x86_fma_vfmsub_ps_256:
+ case Intrinsic::x86_fma_vfmsub_pd_256:
+ Opc = X86ISD::FMSUB;
+ break;
+ case Intrinsic::x86_fma_vfnmadd_ps:
+ case Intrinsic::x86_fma_vfnmadd_pd:
+ case Intrinsic::x86_fma_vfnmadd_ps_256:
+ case Intrinsic::x86_fma_vfnmadd_pd_256:
+ Opc = X86ISD::FNMADD;
+ break;
+ case Intrinsic::x86_fma_vfnmsub_ps:
+ case Intrinsic::x86_fma_vfnmsub_pd:
+ case Intrinsic::x86_fma_vfnmsub_ps_256:
+ case Intrinsic::x86_fma_vfnmsub_pd_256:
+ Opc = X86ISD::FNMSUB;
+ break;
+ case Intrinsic::x86_fma_vfmaddsub_ps:
+ case Intrinsic::x86_fma_vfmaddsub_pd:
+ case Intrinsic::x86_fma_vfmaddsub_ps_256:
+ case Intrinsic::x86_fma_vfmaddsub_pd_256:
+ Opc = X86ISD::FMADDSUB;
+ break;
+ case Intrinsic::x86_fma_vfmsubadd_ps:
+ case Intrinsic::x86_fma_vfmsubadd_pd:
+ case Intrinsic::x86_fma_vfmsubadd_ps_256:
+ case Intrinsic::x86_fma_vfmsubadd_pd_256:
+ Opc = X86ISD::FMSUBADD;
+ break;
+ }
+
+ return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
+ Op.getOperand(2), Op.getOperand(3));
+ }
}
}
-SDValue
-X86TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
switch (IntNo) {
@@ -10072,21 +10545,21 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
DebugLoc dl = Op.getDebugLoc();
+ EVT PtrVT = getPointerTy();
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
SDValue Offset =
- DAG.getConstant(TD->getPointerSize(),
- Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
- return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
- DAG.getNode(ISD::ADD, dl, getPointerTy(),
+ DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
+ return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
+ DAG.getNode(ISD::ADD, dl, PtrVT,
FrameAddr, Offset),
MachinePointerInfo(), false, false, false, 0);
}
// Just load the return address.
SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
- return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
+ return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
RetAddrFI, MachinePointerInfo(), false, false, false, 0);
}
@@ -10108,7 +10581,7 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
SelectionDAG &DAG) const {
- return DAG.getIntPtrConstant(2*TD->getPointerSize());
+ return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
}
SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
@@ -10123,7 +10596,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX);
SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame,
- DAG.getIntPtrConstant(TD->getPointerSize()));
+ DAG.getIntPtrConstant(RegInfo->getSlotSize()));
StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset);
Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
false, false, 0);
@@ -10134,8 +10607,22 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
Chain, DAG.getRegister(StoreAddrReg, getPointerTy()));
}
-SDValue X86TargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
- SelectionDAG &DAG) const {
+SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
+ SelectionDAG &DAG) const {
+ DebugLoc DL = Op.getDebugLoc();
+ return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
+ DAG.getVTList(MVT::i32, MVT::Other),
+ Op.getOperand(0), Op.getOperand(1));
+}
+
+SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
+ SelectionDAG &DAG) const {
+ DebugLoc DL = Op.getDebugLoc();
+ return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
+ Op.getOperand(0), Op.getOperand(1));
+}
+
+static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
return Op.getOperand(0);
}
@@ -10148,6 +10635,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
DebugLoc dl = Op.getDebugLoc();
const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
+ const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo();
if (Subtarget->is64Bit()) {
SDValue OutChains[6];
@@ -10156,8 +10644,8 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
- const unsigned char N86R10 = X86_MC::getX86RegNum(X86::R10);
- const unsigned char N86R11 = X86_MC::getX86RegNum(X86::R11);
+ const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
+ const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
@@ -10230,7 +10718,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
for (FunctionType::param_iterator I = FTy->param_begin(),
E = FTy->param_end(); I != E; ++I, ++Idx)
- if (Attrs.paramHasAttr(Idx, Attribute::InReg))
+ if (Attrs.getParamAttributes(Idx).hasAttribute(Attributes::InReg))
// FIXME: should only count parameters that are lowered to integers.
InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
@@ -10259,7 +10747,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
// This is storing the opcode for MOV32ri.
const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
- const unsigned char N86Reg = X86_MC::getX86RegNum(NestReg);
+ const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
OutChains[0] = DAG.getStore(Root, dl,
DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
Trmp, MachinePointerInfo(TrmpAddr),
@@ -10358,7 +10846,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
}
-SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
EVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
@@ -10392,8 +10880,7 @@ SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const {
return Op;
}
-SDValue X86TargetLowering::LowerCTLZ_ZERO_UNDEF(SDValue Op,
- SelectionDAG &DAG) const {
+static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
EVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
@@ -10418,7 +10905,7 @@ SDValue X86TargetLowering::LowerCTLZ_ZERO_UNDEF(SDValue Op,
return Op;
}
-SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
unsigned NumBits = VT.getSizeInBits();
DebugLoc dl = Op.getDebugLoc();
@@ -10467,21 +10954,22 @@ static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
}
-SDValue X86TargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
assert(Op.getValueType().is256BitVector() &&
Op.getValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
return Lower256IntArith(Op, DAG);
}
-SDValue X86TargetLowering::LowerSUB(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
assert(Op.getValueType().is256BitVector() &&
Op.getValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
return Lower256IntArith(Op, DAG);
}
-SDValue X86TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
EVT VT = Op.getValueType();
// Decompose 256-bit ops into smaller 128-bit ops.
@@ -10756,7 +11244,7 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
}
-SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
// Lower the "add/sub/mul with overflow" instruction into a regular ins plus
// a "setcc" instruction that checks the overflow flag. The "brcond" lowering
// looks for this combo and may remove the "setcc" instruction if the "setcc"
@@ -10871,7 +11359,7 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);;
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
}
// fall through
case MVT::v4i32:
@@ -10884,7 +11372,8 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
}
-SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{
+static SDValue LowerMEMBARRIER(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
// Go ahead and emit the fence on x86-64 even if we asked for no-sse2.
@@ -10929,8 +11418,8 @@ SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{
return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
}
-SDValue X86TargetLowering::LowerATOMIC_FENCE(SDValue Op,
- SelectionDAG &DAG) const {
+static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
@@ -10968,7 +11457,8 @@ SDValue X86TargetLowering::LowerATOMIC_FENCE(SDValue Op,
}
-SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
EVT T = Op.getValueType();
DebugLoc DL = Op.getDebugLoc();
unsigned Reg = 0;
@@ -10999,8 +11489,8 @@ SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
return cpOut;
}
-SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
- SelectionDAG &DAG) const {
+static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
assert(Subtarget->is64Bit() && "Result not type legalized?");
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue TheChain = Op.getOperand(0);
@@ -11018,8 +11508,7 @@ SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
return DAG.getMergeValues(Ops, 2, dl);
}
-SDValue X86TargetLowering::LowerBITCAST(SDValue Op,
- SelectionDAG &DAG) const {
+SDValue X86TargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
EVT SrcVT = Op.getOperand(0).getValueType();
EVT DstVT = Op.getValueType();
assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
@@ -11039,7 +11528,7 @@ SDValue X86TargetLowering::LowerBITCAST(SDValue Op,
return SDValue();
}
-SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
SDNode *Node = Op.getNode();
DebugLoc dl = Node->getDebugLoc();
EVT T = Node->getValueType(0);
@@ -11112,9 +11601,9 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: llvm_unreachable("Should not custom lower this!");
case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
- case ISD::MEMBARRIER: return LowerMEMBARRIER(Op,DAG);
- case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op,DAG);
- case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG);
+ case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, Subtarget, DAG);
+ case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
+ case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op, Subtarget, DAG);
case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
@@ -11122,8 +11611,8 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
- case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
- case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG);
+ case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
+ case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
@@ -11135,8 +11624,11 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
+ case ISD::TRUNCATE: return lowerTRUNCATE(Op, DAG);
+ case ISD::ZERO_EXTEND: return lowerZERO_EXTEND(Op, DAG);
case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
+ case ISD::FP_EXTEND: return lowerFP_EXTEND(Op, DAG);
case ISD::FABS: return LowerFABS(Op, DAG);
case ISD::FNEG: return LowerFNEG(Op, DAG);
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
@@ -11147,7 +11639,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG);
case ISD::VAARG: return LowerVAARG(Op, DAG);
- case ISD::VACOPY: return LowerVACOPY(Op, DAG);
+ case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
@@ -11156,13 +11648,15 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
+ case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
+ case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
case ISD::CTLZ: return LowerCTLZ(Op, DAG);
case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
case ISD::CTTZ: return LowerCTTZ(Op, DAG);
- case ISD::MUL: return LowerMUL(Op, DAG);
+ case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
case ISD::SRA:
case ISD::SRL:
case ISD::SHL: return LowerShift(Op, DAG);
@@ -11172,7 +11666,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::USUBO:
case ISD::SMULO:
case ISD::UMULO: return LowerXALUO(Op, DAG);
- case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
+ case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
case ISD::BITCAST: return LowerBITCAST(Op, DAG);
case ISD::ADDC:
case ISD::ADDE:
@@ -11265,6 +11759,27 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
}
return;
}
+ case ISD::UINT_TO_FP: {
+ if (N->getOperand(0).getValueType() != MVT::v2i32 &&
+ N->getValueType(0) != MVT::v2f32)
+ return;
+ SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
+ N->getOperand(0));
+ SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
+ MVT::f64);
+ SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
+ SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
+ DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
+ Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
+ SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
+ Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
+ return;
+ }
+ case ISD::FP_ROUND: {
+ SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
+ Results.push_back(V);
+ return;
+ }
case ISD::READCYCLECOUNTER: {
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue TheChain = N->getOperand(0);
@@ -11332,6 +11847,10 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::ATOMIC_LOAD_OR:
case ISD::ATOMIC_LOAD_SUB:
case ISD::ATOMIC_LOAD_XOR:
+ case ISD::ATOMIC_LOAD_MAX:
+ case ISD::ATOMIC_LOAD_MIN:
+ case ISD::ATOMIC_LOAD_UMAX:
+ case ISD::ATOMIC_LOAD_UMIN:
case ISD::ATOMIC_SWAP: {
unsigned Opc;
switch (N->getOpcode()) {
@@ -11354,6 +11873,18 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::ATOMIC_LOAD_XOR:
Opc = X86ISD::ATOMXOR64_DAG;
break;
+ case ISD::ATOMIC_LOAD_MAX:
+ Opc = X86ISD::ATOMMAX64_DAG;
+ break;
+ case ISD::ATOMIC_LOAD_MIN:
+ Opc = X86ISD::ATOMMIN64_DAG;
+ break;
+ case ISD::ATOMIC_LOAD_UMAX:
+ Opc = X86ISD::ATOMUMAX64_DAG;
+ break;
+ case ISD::ATOMIC_LOAD_UMIN:
+ Opc = X86ISD::ATOMUMIN64_DAG;
+ break;
case ISD::ATOMIC_SWAP:
Opc = X86ISD::ATOMSWAP64_DAG;
break;
@@ -11420,11 +11951,15 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FHSUB: return "X86ISD::FHSUB";
case X86ISD::FMAX: return "X86ISD::FMAX";
case X86ISD::FMIN: return "X86ISD::FMIN";
+ case X86ISD::FMAXC: return "X86ISD::FMAXC";
+ case X86ISD::FMINC: return "X86ISD::FMINC";
case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
case X86ISD::FRCP: return "X86ISD::FRCP";
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
+ case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
+ case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
@@ -11440,7 +11975,10 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
case X86ISD::VSEXT_MOVL: return "X86ISD::VSEXT_MOVL";
case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
+ case X86ISD::VZEXT: return "X86ISD::VZEXT";
+ case X86ISD::VSEXT: return "X86ISD::VSEXT";
case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
+ case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
case X86ISD::VSHL: return "X86ISD::VSHL";
@@ -11507,6 +12045,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
+ case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
+ case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
}
}
@@ -11655,430 +12195,724 @@ X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
// X86 Scheduler Hooks
//===----------------------------------------------------------------------===//
-// private utility function
-MachineBasicBlock *
-X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
- MachineBasicBlock *MBB,
- unsigned regOpc,
- unsigned immOpc,
- unsigned LoadOpc,
- unsigned CXchgOpc,
- unsigned notOpc,
- unsigned EAXreg,
- const TargetRegisterClass *RC,
- bool Invert) const {
- // For the atomic bitwise operator, we generate
- // thisMBB:
- // newMBB:
- // ld t1 = [bitinstr.addr]
- // op t2 = t1, [bitinstr.val]
- // not t3 = t2 (if Invert)
- // mov EAX = t1
- // lcs dest = [bitinstr.addr], t3 [EAX is implicit]
- // bz newMBB
- // fallthrough -->nextMBB
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- const BasicBlock *LLVM_BB = MBB->getBasicBlock();
- MachineFunction::iterator MBBIter = MBB;
- ++MBBIter;
+/// Utility function to emit xbegin specifying the start of an RTM region.
+static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
+ const TargetInstrInfo *TII) {
+ DebugLoc DL = MI->getDebugLoc();
+
+ const BasicBlock *BB = MBB->getBasicBlock();
+ MachineFunction::iterator I = MBB;
+ ++I;
+
+ // For the v = xbegin(), we generate
+ //
+ // thisMBB:
+ // xbegin sinkMBB
+ //
+ // mainMBB:
+ // eax = -1
+ //
+ // sinkMBB:
+ // v = eax
- /// First build the CFG
- MachineFunction *F = MBB->getParent();
MachineBasicBlock *thisMBB = MBB;
- MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
- F->insert(MBBIter, newMBB);
- F->insert(MBBIter, nextMBB);
-
- // Transfer the remainder of thisMBB and its successor edges to nextMBB.
- nextMBB->splice(nextMBB->begin(), thisMBB,
- llvm::next(MachineBasicBlock::iterator(bInstr)),
- thisMBB->end());
- nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
-
- // Update thisMBB to fall through to newMBB
- thisMBB->addSuccessor(newMBB);
-
- // newMBB jumps to itself and fall through to nextMBB
- newMBB->addSuccessor(nextMBB);
- newMBB->addSuccessor(newMBB);
-
- // Insert instructions into newMBB based on incoming instruction
- assert(bInstr->getNumOperands() < X86::AddrNumOperands + 4 &&
- "unexpected number of operands");
- DebugLoc dl = bInstr->getDebugLoc();
- MachineOperand& destOper = bInstr->getOperand(0);
- MachineOperand* argOpers[2 + X86::AddrNumOperands];
- int numArgs = bInstr->getNumOperands() - 1;
- for (int i=0; i < numArgs; ++i)
- argOpers[i] = &bInstr->getOperand(i+1);
-
- // x86 address has 4 operands: base, index, scale, and displacement
- int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
- int valArgIndx = lastAddrIndx + 1;
-
- unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
- MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(LoadOpc), t1);
- for (int i=0; i <= lastAddrIndx; ++i)
- (*MIB).addOperand(*argOpers[i]);
-
- unsigned t2 = F->getRegInfo().createVirtualRegister(RC);
- assert((argOpers[valArgIndx]->isReg() ||
- argOpers[valArgIndx]->isImm()) &&
- "invalid operand");
- if (argOpers[valArgIndx]->isReg())
- MIB = BuildMI(newMBB, dl, TII->get(regOpc), t2);
- else
- MIB = BuildMI(newMBB, dl, TII->get(immOpc), t2);
- MIB.addReg(t1);
- (*MIB).addOperand(*argOpers[valArgIndx]);
+ MachineFunction *MF = MBB->getParent();
+ MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
+ MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
+ MF->insert(I, mainMBB);
+ MF->insert(I, sinkMBB);
- unsigned t3 = F->getRegInfo().createVirtualRegister(RC);
- if (Invert) {
- MIB = BuildMI(newMBB, dl, TII->get(notOpc), t3).addReg(t2);
- }
- else
- t3 = t2;
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), MBB,
+ llvm::next(MachineBasicBlock::iterator(MI)), MBB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
+
+ // thisMBB:
+ // xbegin sinkMBB
+ // # fallthrough to mainMBB
+ // # abortion to sinkMBB
+ BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
+ thisMBB->addSuccessor(mainMBB);
+ thisMBB->addSuccessor(sinkMBB);
+
+ // mainMBB:
+ // EAX = -1
+ BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
+ mainMBB->addSuccessor(sinkMBB);
+
+ // sinkMBB:
+ // EAX is live into the sinkMBB
+ sinkMBB->addLiveIn(X86::EAX);
+ BuildMI(*sinkMBB, sinkMBB->begin(), DL,
+ TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
+ .addReg(X86::EAX);
- MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), EAXreg);
- MIB.addReg(t1);
+ MI->eraseFromParent();
+ return sinkMBB;
+}
- MIB = BuildMI(newMBB, dl, TII->get(CXchgOpc));
- for (int i=0; i <= lastAddrIndx; ++i)
- (*MIB).addOperand(*argOpers[i]);
- MIB.addReg(t3);
- assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand");
- (*MIB).setMemRefs(bInstr->memoperands_begin(),
- bInstr->memoperands_end());
+// Get CMPXCHG opcode for the specified data type.
+static unsigned getCmpXChgOpcode(EVT VT) {
+ switch (VT.getSimpleVT().SimpleTy) {
+ case MVT::i8: return X86::LCMPXCHG8;
+ case MVT::i16: return X86::LCMPXCHG16;
+ case MVT::i32: return X86::LCMPXCHG32;
+ case MVT::i64: return X86::LCMPXCHG64;
+ default:
+ break;
+ }
+ llvm_unreachable("Invalid operand size!");
+}
- MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg());
- MIB.addReg(EAXreg);
+// Get LOAD opcode for the specified data type.
+static unsigned getLoadOpcode(EVT VT) {
+ switch (VT.getSimpleVT().SimpleTy) {
+ case MVT::i8: return X86::MOV8rm;
+ case MVT::i16: return X86::MOV16rm;
+ case MVT::i32: return X86::MOV32rm;
+ case MVT::i64: return X86::MOV64rm;
+ default:
+ break;
+ }
+ llvm_unreachable("Invalid operand size!");
+}
- // insert branch
- BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB);
+// Get opcode of the non-atomic one from the specified atomic instruction.
+static unsigned getNonAtomicOpcode(unsigned Opc) {
+ switch (Opc) {
+ case X86::ATOMAND8: return X86::AND8rr;
+ case X86::ATOMAND16: return X86::AND16rr;
+ case X86::ATOMAND32: return X86::AND32rr;
+ case X86::ATOMAND64: return X86::AND64rr;
+ case X86::ATOMOR8: return X86::OR8rr;
+ case X86::ATOMOR16: return X86::OR16rr;
+ case X86::ATOMOR32: return X86::OR32rr;
+ case X86::ATOMOR64: return X86::OR64rr;
+ case X86::ATOMXOR8: return X86::XOR8rr;
+ case X86::ATOMXOR16: return X86::XOR16rr;
+ case X86::ATOMXOR32: return X86::XOR32rr;
+ case X86::ATOMXOR64: return X86::XOR64rr;
+ }
+ llvm_unreachable("Unhandled atomic-load-op opcode!");
+}
+
+// Get opcode of the non-atomic one from the specified atomic instruction with
+// extra opcode.
+static unsigned getNonAtomicOpcodeWithExtraOpc(unsigned Opc,
+ unsigned &ExtraOpc) {
+ switch (Opc) {
+ case X86::ATOMNAND8: ExtraOpc = X86::NOT8r; return X86::AND8rr;
+ case X86::ATOMNAND16: ExtraOpc = X86::NOT16r; return X86::AND16rr;
+ case X86::ATOMNAND32: ExtraOpc = X86::NOT32r; return X86::AND32rr;
+ case X86::ATOMNAND64: ExtraOpc = X86::NOT64r; return X86::AND64rr;
+ case X86::ATOMMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVL32rr;
+ case X86::ATOMMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVL16rr;
+ case X86::ATOMMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVL32rr;
+ case X86::ATOMMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVL64rr;
+ case X86::ATOMMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVG32rr;
+ case X86::ATOMMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVG16rr;
+ case X86::ATOMMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVG32rr;
+ case X86::ATOMMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVG64rr;
+ case X86::ATOMUMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVB32rr;
+ case X86::ATOMUMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVB16rr;
+ case X86::ATOMUMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVB32rr;
+ case X86::ATOMUMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVB64rr;
+ case X86::ATOMUMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVA32rr;
+ case X86::ATOMUMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVA16rr;
+ case X86::ATOMUMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVA32rr;
+ case X86::ATOMUMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVA64rr;
+ }
+ llvm_unreachable("Unhandled atomic-load-op opcode!");
+}
+
+// Get opcode of the non-atomic one from the specified atomic instruction for
+// 64-bit data type on 32-bit target.
+static unsigned getNonAtomic6432Opcode(unsigned Opc, unsigned &HiOpc) {
+ switch (Opc) {
+ case X86::ATOMAND6432: HiOpc = X86::AND32rr; return X86::AND32rr;
+ case X86::ATOMOR6432: HiOpc = X86::OR32rr; return X86::OR32rr;
+ case X86::ATOMXOR6432: HiOpc = X86::XOR32rr; return X86::XOR32rr;
+ case X86::ATOMADD6432: HiOpc = X86::ADC32rr; return X86::ADD32rr;
+ case X86::ATOMSUB6432: HiOpc = X86::SBB32rr; return X86::SUB32rr;
+ case X86::ATOMSWAP6432: HiOpc = X86::MOV32rr; return X86::MOV32rr;
+ case X86::ATOMMAX6432: HiOpc = X86::SETLr; return X86::SETLr;
+ case X86::ATOMMIN6432: HiOpc = X86::SETGr; return X86::SETGr;
+ case X86::ATOMUMAX6432: HiOpc = X86::SETBr; return X86::SETBr;
+ case X86::ATOMUMIN6432: HiOpc = X86::SETAr; return X86::SETAr;
+ }
+ llvm_unreachable("Unhandled atomic-load-op opcode!");
+}
+
+// Get opcode of the non-atomic one from the specified atomic instruction for
+// 64-bit data type on 32-bit target with extra opcode.
+static unsigned getNonAtomic6432OpcodeWithExtraOpc(unsigned Opc,
+ unsigned &HiOpc,
+ unsigned &ExtraOpc) {
+ switch (Opc) {
+ case X86::ATOMNAND6432:
+ ExtraOpc = X86::NOT32r;
+ HiOpc = X86::AND32rr;
+ return X86::AND32rr;
+ }
+ llvm_unreachable("Unhandled atomic-load-op opcode!");
+}
- bInstr->eraseFromParent(); // The pseudo instruction is gone now.
- return nextMBB;
+// Get pseudo CMOV opcode from the specified data type.
+static unsigned getPseudoCMOVOpc(EVT VT) {
+ switch (VT.getSimpleVT().SimpleTy) {
+ case MVT::i8: return X86::CMOV_GR8;
+ case MVT::i16: return X86::CMOV_GR16;
+ case MVT::i32: return X86::CMOV_GR32;
+ default:
+ break;
+ }
+ llvm_unreachable("Unknown CMOV opcode!");
}
-// private utility function: 64 bit atomics on 32 bit host.
+// EmitAtomicLoadArith - emit the code sequence for pseudo atomic instructions.
+// They will be translated into a spin-loop or compare-exchange loop from
+//
+// ...
+// dst = atomic-fetch-op MI.addr, MI.val
+// ...
+//
+// to
+//
+// ...
+// EAX = LOAD MI.addr
+// loop:
+// t1 = OP MI.val, EAX
+// LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined]
+// JNE loop
+// sink:
+// dst = EAX
+// ...
MachineBasicBlock *
-X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
- MachineBasicBlock *MBB,
- unsigned regOpcL,
- unsigned regOpcH,
- unsigned immOpcL,
- unsigned immOpcH,
- bool Invert) const {
- // For the atomic bitwise operator, we generate
- // thisMBB (instructions are in pairs, except cmpxchg8b)
- // ld t1,t2 = [bitinstr.addr]
- // newMBB:
- // out1, out2 = phi (thisMBB, t1/t2) (newMBB, t3/t4)
- // op t5, t6 <- out1, out2, [bitinstr.val]
- // (for SWAP, substitute: mov t5, t6 <- [bitinstr.val])
- // neg t7, t8 < t5, t6 (if Invert)
- // mov ECX, EBX <- t5, t6
- // mov EAX, EDX <- t1, t2
- // cmpxchg8b [bitinstr.addr] [EAX, EDX, EBX, ECX implicit]
- // mov t3, t4 <- EAX, EDX
- // bz newMBB
- // result in out1, out2
- // fallthrough -->nextMBB
-
- const TargetRegisterClass *RC = &X86::GR32RegClass;
- const unsigned LoadOpc = X86::MOV32rm;
- const unsigned NotOpc = X86::NOT32r;
+X86TargetLowering::EmitAtomicLoadArith(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- const BasicBlock *LLVM_BB = MBB->getBasicBlock();
- MachineFunction::iterator MBBIter = MBB;
- ++MBBIter;
+ DebugLoc DL = MI->getDebugLoc();
+
+ MachineFunction *MF = MBB->getParent();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+
+ const BasicBlock *BB = MBB->getBasicBlock();
+ MachineFunction::iterator I = MBB;
+ ++I;
+
+ assert(MI->getNumOperands() <= X86::AddrNumOperands + 2 &&
+ "Unexpected number of operands");
+
+ assert(MI->hasOneMemOperand() &&
+ "Expected atomic-load-op to have one memoperand");
+
+ // Memory Reference
+ MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
+ MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
+
+ unsigned DstReg, SrcReg;
+ unsigned MemOpndSlot;
+
+ unsigned CurOp = 0;
+
+ DstReg = MI->getOperand(CurOp++).getReg();
+ MemOpndSlot = CurOp;
+ CurOp += X86::AddrNumOperands;
+ SrcReg = MI->getOperand(CurOp++).getReg();
+
+ const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
+ MVT::SimpleValueType VT = *RC->vt_begin();
+ unsigned AccPhyReg = getX86SubSuperRegister(X86::EAX, VT);
+
+ unsigned LCMPXCHGOpc = getCmpXChgOpcode(VT);
+ unsigned LOADOpc = getLoadOpcode(VT);
+
+ // For the atomic load-arith operator, we generate
+ //
+ // thisMBB:
+ // EAX = LOAD [MI.addr]
+ // mainMBB:
+ // t1 = OP MI.val, EAX
+ // LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined]
+ // JNE mainMBB
+ // sinkMBB:
- /// First build the CFG
- MachineFunction *F = MBB->getParent();
MachineBasicBlock *thisMBB = MBB;
- MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
- F->insert(MBBIter, newMBB);
- F->insert(MBBIter, nextMBB);
-
- // Transfer the remainder of thisMBB and its successor edges to nextMBB.
- nextMBB->splice(nextMBB->begin(), thisMBB,
- llvm::next(MachineBasicBlock::iterator(bInstr)),
- thisMBB->end());
- nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
-
- // Update thisMBB to fall through to newMBB
- thisMBB->addSuccessor(newMBB);
-
- // newMBB jumps to itself and fall through to nextMBB
- newMBB->addSuccessor(nextMBB);
- newMBB->addSuccessor(newMBB);
-
- DebugLoc dl = bInstr->getDebugLoc();
- // Insert instructions into newMBB based on incoming instruction
- // There are 8 "real" operands plus 9 implicit def/uses, ignored here.
- assert(bInstr->getNumOperands() < X86::AddrNumOperands + 14 &&
- "unexpected number of operands");
- MachineOperand& dest1Oper = bInstr->getOperand(0);
- MachineOperand& dest2Oper = bInstr->getOperand(1);
- MachineOperand* argOpers[2 + X86::AddrNumOperands];
- for (int i=0; i < 2 + X86::AddrNumOperands; ++i) {
- argOpers[i] = &bInstr->getOperand(i+2);
-
- // We use some of the operands multiple times, so conservatively just
- // clear any kill flags that might be present.
- if (argOpers[i]->isReg() && argOpers[i]->isUse())
- argOpers[i]->setIsKill(false);
- }
-
- // x86 address has 5 operands: base, index, scale, displacement, and segment.
- int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
-
- unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
- MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1);
- for (int i=0; i <= lastAddrIndx; ++i)
- (*MIB).addOperand(*argOpers[i]);
- unsigned t2 = F->getRegInfo().createVirtualRegister(RC);
- MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t2);
- // add 4 to displacement.
- for (int i=0; i <= lastAddrIndx-2; ++i)
- (*MIB).addOperand(*argOpers[i]);
- MachineOperand newOp3 = *(argOpers[3]);
- if (newOp3.isImm())
- newOp3.setImm(newOp3.getImm()+4);
- else
- newOp3.setOffset(newOp3.getOffset()+4);
- (*MIB).addOperand(newOp3);
- (*MIB).addOperand(*argOpers[lastAddrIndx]);
-
- // t3/4 are defined later, at the bottom of the loop
- unsigned t3 = F->getRegInfo().createVirtualRegister(RC);
- unsigned t4 = F->getRegInfo().createVirtualRegister(RC);
- BuildMI(newMBB, dl, TII->get(X86::PHI), dest1Oper.getReg())
- .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(newMBB);
- BuildMI(newMBB, dl, TII->get(X86::PHI), dest2Oper.getReg())
- .addReg(t2).addMBB(thisMBB).addReg(t4).addMBB(newMBB);
-
- // The subsequent operations should be using the destination registers of
- // the PHI instructions.
- t1 = dest1Oper.getReg();
- t2 = dest2Oper.getReg();
-
- int valArgIndx = lastAddrIndx + 1;
- assert((argOpers[valArgIndx]->isReg() ||
- argOpers[valArgIndx]->isImm()) &&
- "invalid operand");
- unsigned t5 = F->getRegInfo().createVirtualRegister(RC);
- unsigned t6 = F->getRegInfo().createVirtualRegister(RC);
- if (argOpers[valArgIndx]->isReg())
- MIB = BuildMI(newMBB, dl, TII->get(regOpcL), t5);
- else
- MIB = BuildMI(newMBB, dl, TII->get(immOpcL), t5);
- if (regOpcL != X86::MOV32rr)
- MIB.addReg(t1);
- (*MIB).addOperand(*argOpers[valArgIndx]);
- assert(argOpers[valArgIndx + 1]->isReg() ==
- argOpers[valArgIndx]->isReg());
- assert(argOpers[valArgIndx + 1]->isImm() ==
- argOpers[valArgIndx]->isImm());
- if (argOpers[valArgIndx + 1]->isReg())
- MIB = BuildMI(newMBB, dl, TII->get(regOpcH), t6);
- else
- MIB = BuildMI(newMBB, dl, TII->get(immOpcH), t6);
- if (regOpcH != X86::MOV32rr)
- MIB.addReg(t2);
- (*MIB).addOperand(*argOpers[valArgIndx + 1]);
-
- unsigned t7, t8;
- if (Invert) {
- t7 = F->getRegInfo().createVirtualRegister(RC);
- t8 = F->getRegInfo().createVirtualRegister(RC);
- MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t7).addReg(t5);
- MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t8).addReg(t6);
- } else {
- t7 = t5;
- t8 = t6;
+ MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
+ MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
+ MF->insert(I, mainMBB);
+ MF->insert(I, sinkMBB);
+
+ MachineInstrBuilder MIB;
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), MBB,
+ llvm::next(MachineBasicBlock::iterator(MI)), MBB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
+
+ // thisMBB:
+ MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), AccPhyReg);
+ for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
+ MIB.addOperand(MI->getOperand(MemOpndSlot + i));
+ MIB.setMemRefs(MMOBegin, MMOEnd);
+
+ thisMBB->addSuccessor(mainMBB);
+
+ // mainMBB:
+ MachineBasicBlock *origMainMBB = mainMBB;
+ mainMBB->addLiveIn(AccPhyReg);
+
+ // Copy AccPhyReg as it is used more than once.
+ unsigned AccReg = MRI.createVirtualRegister(RC);
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), AccReg)
+ .addReg(AccPhyReg);
+
+ unsigned t1 = MRI.createVirtualRegister(RC);
+ unsigned Opc = MI->getOpcode();
+ switch (Opc) {
+ default:
+ llvm_unreachable("Unhandled atomic-load-op opcode!");
+ case X86::ATOMAND8:
+ case X86::ATOMAND16:
+ case X86::ATOMAND32:
+ case X86::ATOMAND64:
+ case X86::ATOMOR8:
+ case X86::ATOMOR16:
+ case X86::ATOMOR32:
+ case X86::ATOMOR64:
+ case X86::ATOMXOR8:
+ case X86::ATOMXOR16:
+ case X86::ATOMXOR32:
+ case X86::ATOMXOR64: {
+ unsigned ARITHOpc = getNonAtomicOpcode(Opc);
+ BuildMI(mainMBB, DL, TII->get(ARITHOpc), t1).addReg(SrcReg)
+ .addReg(AccReg);
+ break;
+ }
+ case X86::ATOMNAND8:
+ case X86::ATOMNAND16:
+ case X86::ATOMNAND32:
+ case X86::ATOMNAND64: {
+ unsigned t2 = MRI.createVirtualRegister(RC);
+ unsigned NOTOpc;
+ unsigned ANDOpc = getNonAtomicOpcodeWithExtraOpc(Opc, NOTOpc);
+ BuildMI(mainMBB, DL, TII->get(ANDOpc), t2).addReg(SrcReg)
+ .addReg(AccReg);
+ BuildMI(mainMBB, DL, TII->get(NOTOpc), t1).addReg(t2);
+ break;
+ }
+ case X86::ATOMMAX8:
+ case X86::ATOMMAX16:
+ case X86::ATOMMAX32:
+ case X86::ATOMMAX64:
+ case X86::ATOMMIN8:
+ case X86::ATOMMIN16:
+ case X86::ATOMMIN32:
+ case X86::ATOMMIN64:
+ case X86::ATOMUMAX8:
+ case X86::ATOMUMAX16:
+ case X86::ATOMUMAX32:
+ case X86::ATOMUMAX64:
+ case X86::ATOMUMIN8:
+ case X86::ATOMUMIN16:
+ case X86::ATOMUMIN32:
+ case X86::ATOMUMIN64: {
+ unsigned CMPOpc;
+ unsigned CMOVOpc = getNonAtomicOpcodeWithExtraOpc(Opc, CMPOpc);
+
+ BuildMI(mainMBB, DL, TII->get(CMPOpc))
+ .addReg(SrcReg)
+ .addReg(AccReg);
+
+ if (Subtarget->hasCMov()) {
+ if (VT != MVT::i8) {
+ // Native support
+ BuildMI(mainMBB, DL, TII->get(CMOVOpc), t1)
+ .addReg(SrcReg)
+ .addReg(AccReg);
+ } else {
+ // Promote i8 to i32 to use CMOV32
+ const TargetRegisterClass *RC32 = getRegClassFor(MVT::i32);
+ unsigned SrcReg32 = MRI.createVirtualRegister(RC32);
+ unsigned AccReg32 = MRI.createVirtualRegister(RC32);
+ unsigned t2 = MRI.createVirtualRegister(RC32);
+
+ unsigned Undef = MRI.createVirtualRegister(RC32);
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Undef);
+
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), SrcReg32)
+ .addReg(Undef)
+ .addReg(SrcReg)
+ .addImm(X86::sub_8bit);
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), AccReg32)
+ .addReg(Undef)
+ .addReg(AccReg)
+ .addImm(X86::sub_8bit);
+
+ BuildMI(mainMBB, DL, TII->get(CMOVOpc), t2)
+ .addReg(SrcReg32)
+ .addReg(AccReg32);
+
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t1)
+ .addReg(t2, 0, X86::sub_8bit);
+ }
+ } else {
+ // Use pseudo select and lower them.
+ assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
+ "Invalid atomic-load-op transformation!");
+ unsigned SelOpc = getPseudoCMOVOpc(VT);
+ X86::CondCode CC = X86::getCondFromCMovOpc(CMOVOpc);
+ assert(CC != X86::COND_INVALID && "Invalid atomic-load-op transformation!");
+ MIB = BuildMI(mainMBB, DL, TII->get(SelOpc), t1)
+ .addReg(SrcReg).addReg(AccReg)
+ .addImm(CC);
+ mainMBB = EmitLoweredSelect(MIB, mainMBB);
+ }
+ break;
+ }
}
- MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX);
- MIB.addReg(t1);
- MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EDX);
- MIB.addReg(t2);
+ // Copy AccPhyReg back from virtual register.
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), AccPhyReg)
+ .addReg(AccReg);
- MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EBX);
- MIB.addReg(t7);
- MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::ECX);
- MIB.addReg(t8);
+ MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc));
+ for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
+ MIB.addOperand(MI->getOperand(MemOpndSlot + i));
+ MIB.addReg(t1);
+ MIB.setMemRefs(MMOBegin, MMOEnd);
- MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG8B));
- for (int i=0; i <= lastAddrIndx; ++i)
- (*MIB).addOperand(*argOpers[i]);
+ BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB);
- assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand");
- (*MIB).setMemRefs(bInstr->memoperands_begin(),
- bInstr->memoperands_end());
+ mainMBB->addSuccessor(origMainMBB);
+ mainMBB->addSuccessor(sinkMBB);
- MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t3);
- MIB.addReg(X86::EAX);
- MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t4);
- MIB.addReg(X86::EDX);
+ // sinkMBB:
+ sinkMBB->addLiveIn(AccPhyReg);
- // insert branch
- BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB);
+ BuildMI(*sinkMBB, sinkMBB->begin(), DL,
+ TII->get(TargetOpcode::COPY), DstReg)
+ .addReg(AccPhyReg);
- bInstr->eraseFromParent(); // The pseudo instruction is gone now.
- return nextMBB;
+ MI->eraseFromParent();
+ return sinkMBB;
}
-// private utility function
+// EmitAtomicLoadArith6432 - emit the code sequence for pseudo atomic
+// instructions. They will be translated into a spin-loop or compare-exchange
+// loop from
+//
+// ...
+// dst = atomic-fetch-op MI.addr, MI.val
+// ...
+//
+// to
+//
+// ...
+// EAX = LOAD [MI.addr + 0]
+// EDX = LOAD [MI.addr + 4]
+// loop:
+// EBX = OP MI.val.lo, EAX
+// ECX = OP MI.val.hi, EDX
+// LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined]
+// JNE loop
+// sink:
+// dst = EDX:EAX
+// ...
MachineBasicBlock *
-X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
- MachineBasicBlock *MBB,
- unsigned cmovOpc) const {
- // For the atomic min/max operator, we generate
- // thisMBB:
- // newMBB:
- // ld t1 = [min/max.addr]
- // mov t2 = [min/max.val]
- // cmp t1, t2
- // cmov[cond] t2 = t1
- // mov EAX = t1
- // lcs dest = [bitinstr.addr], t2 [EAX is implicit]
- // bz newMBB
- // fallthrough -->nextMBB
- //
+X86TargetLowering::EmitAtomicLoadArith6432(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- const BasicBlock *LLVM_BB = MBB->getBasicBlock();
- MachineFunction::iterator MBBIter = MBB;
- ++MBBIter;
+ DebugLoc DL = MI->getDebugLoc();
+
+ MachineFunction *MF = MBB->getParent();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+
+ const BasicBlock *BB = MBB->getBasicBlock();
+ MachineFunction::iterator I = MBB;
+ ++I;
+
+ assert(MI->getNumOperands() <= X86::AddrNumOperands + 4 &&
+ "Unexpected number of operands");
+
+ assert(MI->hasOneMemOperand() &&
+ "Expected atomic-load-op32 to have one memoperand");
+
+ // Memory Reference
+ MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
+ MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
+
+ unsigned DstLoReg, DstHiReg;
+ unsigned SrcLoReg, SrcHiReg;
+ unsigned MemOpndSlot;
+
+ unsigned CurOp = 0;
+
+ DstLoReg = MI->getOperand(CurOp++).getReg();
+ DstHiReg = MI->getOperand(CurOp++).getReg();
+ MemOpndSlot = CurOp;
+ CurOp += X86::AddrNumOperands;
+ SrcLoReg = MI->getOperand(CurOp++).getReg();
+ SrcHiReg = MI->getOperand(CurOp++).getReg();
+
+ const TargetRegisterClass *RC = &X86::GR32RegClass;
+ const TargetRegisterClass *RC8 = &X86::GR8RegClass;
+
+ unsigned LCMPXCHGOpc = X86::LCMPXCHG8B;
+ unsigned LOADOpc = X86::MOV32rm;
+
+ // For the atomic load-arith operator, we generate
+ //
+ // thisMBB:
+ // EAX = LOAD [MI.addr + 0]
+ // EDX = LOAD [MI.addr + 4]
+ // mainMBB:
+ // EBX = OP MI.vallo, EAX
+ // ECX = OP MI.valhi, EDX
+ // LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined]
+ // JNE mainMBB
+ // sinkMBB:
- /// First build the CFG
- MachineFunction *F = MBB->getParent();
MachineBasicBlock *thisMBB = MBB;
- MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
- F->insert(MBBIter, newMBB);
- F->insert(MBBIter, nextMBB);
-
- // Transfer the remainder of thisMBB and its successor edges to nextMBB.
- nextMBB->splice(nextMBB->begin(), thisMBB,
- llvm::next(MachineBasicBlock::iterator(mInstr)),
- thisMBB->end());
- nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
-
- // Update thisMBB to fall through to newMBB
- thisMBB->addSuccessor(newMBB);
-
- // newMBB jumps to newMBB and fall through to nextMBB
- newMBB->addSuccessor(nextMBB);
- newMBB->addSuccessor(newMBB);
-
- DebugLoc dl = mInstr->getDebugLoc();
- // Insert instructions into newMBB based on incoming instruction
- assert(mInstr->getNumOperands() < X86::AddrNumOperands + 4 &&
- "unexpected number of operands");
- MachineOperand& destOper = mInstr->getOperand(0);
- MachineOperand* argOpers[2 + X86::AddrNumOperands];
- int numArgs = mInstr->getNumOperands() - 1;
- for (int i=0; i < numArgs; ++i)
- argOpers[i] = &mInstr->getOperand(i+1);
-
- // x86 address has 4 operands: base, index, scale, and displacement
- int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
- int valArgIndx = lastAddrIndx + 1;
-
- unsigned t1 = F->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
- MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rm), t1);
- for (int i=0; i <= lastAddrIndx; ++i)
- (*MIB).addOperand(*argOpers[i]);
-
- // We only support register and immediate values
- assert((argOpers[valArgIndx]->isReg() ||
- argOpers[valArgIndx]->isImm()) &&
- "invalid operand");
-
- unsigned t2 = F->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
- if (argOpers[valArgIndx]->isReg())
- MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t2);
- else
- MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2);
- (*MIB).addOperand(*argOpers[valArgIndx]);
+ MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
+ MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
+ MF->insert(I, mainMBB);
+ MF->insert(I, sinkMBB);
- MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX);
- MIB.addReg(t1);
+ MachineInstrBuilder MIB;
- MIB = BuildMI(newMBB, dl, TII->get(X86::CMP32rr));
- MIB.addReg(t1);
- MIB.addReg(t2);
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), MBB,
+ llvm::next(MachineBasicBlock::iterator(MI)), MBB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
+
+ // thisMBB:
+ // Lo
+ MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), X86::EAX);
+ for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
+ MIB.addOperand(MI->getOperand(MemOpndSlot + i));
+ MIB.setMemRefs(MMOBegin, MMOEnd);
+ // Hi
+ MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), X86::EDX);
+ for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
+ if (i == X86::AddrDisp)
+ MIB.addDisp(MI->getOperand(MemOpndSlot + i), 4); // 4 == sizeof(i32)
+ else
+ MIB.addOperand(MI->getOperand(MemOpndSlot + i));
+ }
+ MIB.setMemRefs(MMOBegin, MMOEnd);
- // Generate movc
- unsigned t3 = F->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
- MIB = BuildMI(newMBB, dl, TII->get(cmovOpc),t3);
- MIB.addReg(t2);
- MIB.addReg(t1);
+ thisMBB->addSuccessor(mainMBB);
+
+ // mainMBB:
+ MachineBasicBlock *origMainMBB = mainMBB;
+ mainMBB->addLiveIn(X86::EAX);
+ mainMBB->addLiveIn(X86::EDX);
+
+ // Copy EDX:EAX as they are used more than once.
+ unsigned LoReg = MRI.createVirtualRegister(RC);
+ unsigned HiReg = MRI.createVirtualRegister(RC);
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), LoReg).addReg(X86::EAX);
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), HiReg).addReg(X86::EDX);
+
+ unsigned t1L = MRI.createVirtualRegister(RC);
+ unsigned t1H = MRI.createVirtualRegister(RC);
+
+ unsigned Opc = MI->getOpcode();
+ switch (Opc) {
+ default:
+ llvm_unreachable("Unhandled atomic-load-op6432 opcode!");
+ case X86::ATOMAND6432:
+ case X86::ATOMOR6432:
+ case X86::ATOMXOR6432:
+ case X86::ATOMADD6432:
+ case X86::ATOMSUB6432: {
+ unsigned HiOpc;
+ unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc);
+ BuildMI(mainMBB, DL, TII->get(LoOpc), t1L).addReg(LoReg).addReg(SrcLoReg);
+ BuildMI(mainMBB, DL, TII->get(HiOpc), t1H).addReg(HiReg).addReg(SrcHiReg);
+ break;
+ }
+ case X86::ATOMNAND6432: {
+ unsigned HiOpc, NOTOpc;
+ unsigned LoOpc = getNonAtomic6432OpcodeWithExtraOpc(Opc, HiOpc, NOTOpc);
+ unsigned t2L = MRI.createVirtualRegister(RC);
+ unsigned t2H = MRI.createVirtualRegister(RC);
+ BuildMI(mainMBB, DL, TII->get(LoOpc), t2L).addReg(SrcLoReg).addReg(LoReg);
+ BuildMI(mainMBB, DL, TII->get(HiOpc), t2H).addReg(SrcHiReg).addReg(HiReg);
+ BuildMI(mainMBB, DL, TII->get(NOTOpc), t1L).addReg(t2L);
+ BuildMI(mainMBB, DL, TII->get(NOTOpc), t1H).addReg(t2H);
+ break;
+ }
+ case X86::ATOMMAX6432:
+ case X86::ATOMMIN6432:
+ case X86::ATOMUMAX6432:
+ case X86::ATOMUMIN6432: {
+ unsigned HiOpc;
+ unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc);
+ unsigned cL = MRI.createVirtualRegister(RC8);
+ unsigned cH = MRI.createVirtualRegister(RC8);
+ unsigned cL32 = MRI.createVirtualRegister(RC);
+ unsigned cH32 = MRI.createVirtualRegister(RC);
+ unsigned cc = MRI.createVirtualRegister(RC);
+ // cl := cmp src_lo, lo
+ BuildMI(mainMBB, DL, TII->get(X86::CMP32rr))
+ .addReg(SrcLoReg).addReg(LoReg);
+ BuildMI(mainMBB, DL, TII->get(LoOpc), cL);
+ BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cL32).addReg(cL);
+ // ch := cmp src_hi, hi
+ BuildMI(mainMBB, DL, TII->get(X86::CMP32rr))
+ .addReg(SrcHiReg).addReg(HiReg);
+ BuildMI(mainMBB, DL, TII->get(HiOpc), cH);
+ BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cH32).addReg(cH);
+ // cc := if (src_hi == hi) ? cl : ch;
+ if (Subtarget->hasCMov()) {
+ BuildMI(mainMBB, DL, TII->get(X86::CMOVE32rr), cc)
+ .addReg(cH32).addReg(cL32);
+ } else {
+ MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), cc)
+ .addReg(cH32).addReg(cL32)
+ .addImm(X86::COND_E);
+ mainMBB = EmitLoweredSelect(MIB, mainMBB);
+ }
+ BuildMI(mainMBB, DL, TII->get(X86::TEST32rr)).addReg(cc).addReg(cc);
+ if (Subtarget->hasCMov()) {
+ BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t1L)
+ .addReg(SrcLoReg).addReg(LoReg);
+ BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t1H)
+ .addReg(SrcHiReg).addReg(HiReg);
+ } else {
+ MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t1L)
+ .addReg(SrcLoReg).addReg(LoReg)
+ .addImm(X86::COND_NE);
+ mainMBB = EmitLoweredSelect(MIB, mainMBB);
+ MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t1H)
+ .addReg(SrcHiReg).addReg(HiReg)
+ .addImm(X86::COND_NE);
+ mainMBB = EmitLoweredSelect(MIB, mainMBB);
+ }
+ break;
+ }
+ case X86::ATOMSWAP6432: {
+ unsigned HiOpc;
+ unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc);
+ BuildMI(mainMBB, DL, TII->get(LoOpc), t1L).addReg(SrcLoReg);
+ BuildMI(mainMBB, DL, TII->get(HiOpc), t1H).addReg(SrcHiReg);
+ break;
+ }
+ }
+
+ // Copy EDX:EAX back from HiReg:LoReg
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EAX).addReg(LoReg);
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EDX).addReg(HiReg);
+ // Copy ECX:EBX from t1H:t1L
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EBX).addReg(t1L);
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::ECX).addReg(t1H);
+
+ MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc));
+ for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
+ MIB.addOperand(MI->getOperand(MemOpndSlot + i));
+ MIB.setMemRefs(MMOBegin, MMOEnd);
- // Cmp and exchange if none has modified the memory location
- MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG32));
- for (int i=0; i <= lastAddrIndx; ++i)
- (*MIB).addOperand(*argOpers[i]);
- MIB.addReg(t3);
- assert(mInstr->hasOneMemOperand() && "Unexpected number of memoperand");
- (*MIB).setMemRefs(mInstr->memoperands_begin(),
- mInstr->memoperands_end());
+ BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB);
- MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg());
- MIB.addReg(X86::EAX);
+ mainMBB->addSuccessor(origMainMBB);
+ mainMBB->addSuccessor(sinkMBB);
- // insert branch
- BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB);
+ // sinkMBB:
+ sinkMBB->addLiveIn(X86::EAX);
+ sinkMBB->addLiveIn(X86::EDX);
+
+ BuildMI(*sinkMBB, sinkMBB->begin(), DL,
+ TII->get(TargetOpcode::COPY), DstLoReg)
+ .addReg(X86::EAX);
+ BuildMI(*sinkMBB, sinkMBB->begin(), DL,
+ TII->get(TargetOpcode::COPY), DstHiReg)
+ .addReg(X86::EDX);
- mInstr->eraseFromParent(); // The pseudo instruction is gone now.
- return nextMBB;
+ MI->eraseFromParent();
+ return sinkMBB;
}
// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
// or XMM0_V32I8 in AVX all of this code can be replaced with that
// in the .td file.
-MachineBasicBlock *
-X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB,
- unsigned numArgs, bool memArg) const {
- assert(Subtarget->hasSSE42() &&
- "Target must have SSE4.2 or AVX features enabled");
+static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
+ const TargetInstrInfo *TII) {
+ unsigned Opc;
+ switch (MI->getOpcode()) {
+ default: llvm_unreachable("illegal opcode!");
+ case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
+ case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
+ case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
+ case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
+ case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
+ case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
+ case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
+ case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
+ }
DebugLoc dl = MI->getDebugLoc();
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
+
+ unsigned NumArgs = MI->getNumOperands();
+ for (unsigned i = 1; i < NumArgs; ++i) {
+ MachineOperand &Op = MI->getOperand(i);
+ if (!(Op.isReg() && Op.isImplicit()))
+ MIB.addOperand(Op);
+ }
+ if (MI->hasOneMemOperand())
+ MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+
+ BuildMI(*BB, MI, dl,
+ TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
+ .addReg(X86::XMM0);
+
+ MI->eraseFromParent();
+ return BB;
+}
+
+// FIXME: Custom handling because TableGen doesn't support multiple implicit
+// defs in an instruction pattern
+static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
+ const TargetInstrInfo *TII) {
unsigned Opc;
- if (!Subtarget->hasAVX()) {
- if (memArg)
- Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm;
- else
- Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr;
- } else {
- if (memArg)
- Opc = numArgs == 3 ? X86::VPCMPISTRM128rm : X86::VPCMPESTRM128rm;
- else
- Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr;
+ switch (MI->getOpcode()) {
+ default: llvm_unreachable("illegal opcode!");
+ case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
+ case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
+ case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
+ case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
+ case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
+ case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
+ case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
+ case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
}
+ DebugLoc dl = MI->getDebugLoc();
MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
- for (unsigned i = 0; i < numArgs; ++i) {
- MachineOperand &Op = MI->getOperand(i+1);
+
+ unsigned NumArgs = MI->getNumOperands(); // remove the results
+ for (unsigned i = 1; i < NumArgs; ++i) {
+ MachineOperand &Op = MI->getOperand(i);
if (!(Op.isReg() && Op.isImplicit()))
MIB.addOperand(Op);
}
+ if (MI->hasOneMemOperand())
+ MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+
BuildMI(*BB, MI, dl,
TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
- .addReg(X86::XMM0);
+ .addReg(X86::ECX);
MI->eraseFromParent();
return BB;
}
-MachineBasicBlock *
-X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const {
+static MachineBasicBlock * EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
+ const TargetInstrInfo *TII,
+ const X86Subtarget* Subtarget) {
DebugLoc dl = MI->getDebugLoc();
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
// Address into RAX/EAX, other two args into ECX, EDX.
unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
@@ -12767,6 +13601,203 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
}
MachineBasicBlock *
+X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
+ DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+
+ MachineFunction *MF = MBB->getParent();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+
+ const BasicBlock *BB = MBB->getBasicBlock();
+ MachineFunction::iterator I = MBB;
+ ++I;
+
+ // Memory Reference
+ MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
+ MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
+
+ unsigned DstReg;
+ unsigned MemOpndSlot = 0;
+
+ unsigned CurOp = 0;
+
+ DstReg = MI->getOperand(CurOp++).getReg();
+ const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
+ assert(RC->hasType(MVT::i32) && "Invalid destination!");
+ unsigned mainDstReg = MRI.createVirtualRegister(RC);
+ unsigned restoreDstReg = MRI.createVirtualRegister(RC);
+
+ MemOpndSlot = CurOp;
+
+ MVT PVT = getPointerTy();
+ assert((PVT == MVT::i64 || PVT == MVT::i32) &&
+ "Invalid Pointer Size!");
+
+ // For v = setjmp(buf), we generate
+ //
+ // thisMBB:
+ // buf[LabelOffset] = restoreMBB
+ // SjLjSetup restoreMBB
+ //
+ // mainMBB:
+ // v_main = 0
+ //
+ // sinkMBB:
+ // v = phi(main, restore)
+ //
+ // restoreMBB:
+ // v_restore = 1
+
+ MachineBasicBlock *thisMBB = MBB;
+ MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
+ MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
+ MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
+ MF->insert(I, mainMBB);
+ MF->insert(I, sinkMBB);
+ MF->push_back(restoreMBB);
+
+ MachineInstrBuilder MIB;
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), MBB,
+ llvm::next(MachineBasicBlock::iterator(MI)), MBB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
+
+ // thisMBB:
+ unsigned PtrStoreOpc = 0;
+ unsigned LabelReg = 0;
+ const int64_t LabelOffset = 1 * PVT.getStoreSize();
+ Reloc::Model RM = getTargetMachine().getRelocationModel();
+ bool UseImmLabel = (getTargetMachine().getCodeModel() == CodeModel::Small) &&
+ (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
+
+ // Prepare IP either in reg or imm.
+ if (!UseImmLabel) {
+ PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
+ const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
+ LabelReg = MRI.createVirtualRegister(PtrRC);
+ if (Subtarget->is64Bit()) {
+ MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
+ .addReg(X86::RIP)
+ .addImm(0)
+ .addReg(0)
+ .addMBB(restoreMBB)
+ .addReg(0);
+ } else {
+ const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
+ MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
+ .addReg(XII->getGlobalBaseReg(MF))
+ .addImm(0)
+ .addReg(0)
+ .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
+ .addReg(0);
+ }
+ } else
+ PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
+ // Store IP
+ MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
+ for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
+ if (i == X86::AddrDisp)
+ MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
+ else
+ MIB.addOperand(MI->getOperand(MemOpndSlot + i));
+ }
+ if (!UseImmLabel)
+ MIB.addReg(LabelReg);
+ else
+ MIB.addMBB(restoreMBB);
+ MIB.setMemRefs(MMOBegin, MMOEnd);
+ // Setup
+ MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
+ .addMBB(restoreMBB);
+ MIB.addRegMask(RegInfo->getNoPreservedMask());
+ thisMBB->addSuccessor(mainMBB);
+ thisMBB->addSuccessor(restoreMBB);
+
+ // mainMBB:
+ // EAX = 0
+ BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
+ mainMBB->addSuccessor(sinkMBB);
+
+ // sinkMBB:
+ BuildMI(*sinkMBB, sinkMBB->begin(), DL,
+ TII->get(X86::PHI), DstReg)
+ .addReg(mainDstReg).addMBB(mainMBB)
+ .addReg(restoreDstReg).addMBB(restoreMBB);
+
+ // restoreMBB:
+ BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
+ BuildMI(restoreMBB, DL, TII->get(X86::JMP_4)).addMBB(sinkMBB);
+ restoreMBB->addSuccessor(sinkMBB);
+
+ MI->eraseFromParent();
+ return sinkMBB;
+}
+
+MachineBasicBlock *
+X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
+ DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+
+ MachineFunction *MF = MBB->getParent();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+
+ // Memory Reference
+ MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
+ MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
+
+ MVT PVT = getPointerTy();
+ assert((PVT == MVT::i64 || PVT == MVT::i32) &&
+ "Invalid Pointer Size!");
+
+ const TargetRegisterClass *RC =
+ (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
+ unsigned Tmp = MRI.createVirtualRegister(RC);
+ // Since FP is only updated here but NOT referenced, it's treated as GPR.
+ unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
+ unsigned SP = RegInfo->getStackRegister();
+
+ MachineInstrBuilder MIB;
+
+ const int64_t LabelOffset = 1 * PVT.getStoreSize();
+ const int64_t SPOffset = 2 * PVT.getStoreSize();
+
+ unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
+ unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
+
+ // Reload FP
+ MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
+ for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
+ MIB.addOperand(MI->getOperand(i));
+ MIB.setMemRefs(MMOBegin, MMOEnd);
+ // Reload IP
+ MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
+ for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
+ if (i == X86::AddrDisp)
+ MIB.addDisp(MI->getOperand(i), LabelOffset);
+ else
+ MIB.addOperand(MI->getOperand(i));
+ }
+ MIB.setMemRefs(MMOBegin, MMOEnd);
+ // Reload SP
+ MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
+ for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
+ if (i == X86::AddrDisp)
+ MIB.addDisp(MI->getOperand(i), SPOffset);
+ else
+ MIB.addOperand(MI->getOperand(i));
+ }
+ MIB.setMemRefs(MMOBegin, MMOEnd);
+ // Jump
+ BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
+
+ MI->eraseFromParent();
+ return MBB;
+}
+
+MachineBasicBlock *
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
switch (MI->getOpcode()) {
@@ -12895,198 +13926,101 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::PCMPESTRM128REG:
case X86::VPCMPESTRM128REG:
case X86::PCMPESTRM128MEM:
- case X86::VPCMPESTRM128MEM: {
- unsigned NumArgs;
- bool MemArg;
- switch (MI->getOpcode()) {
- default: llvm_unreachable("illegal opcode!");
- case X86::PCMPISTRM128REG:
- case X86::VPCMPISTRM128REG:
- NumArgs = 3; MemArg = false; break;
- case X86::PCMPISTRM128MEM:
- case X86::VPCMPISTRM128MEM:
- NumArgs = 3; MemArg = true; break;
- case X86::PCMPESTRM128REG:
- case X86::VPCMPESTRM128REG:
- NumArgs = 5; MemArg = false; break;
- case X86::PCMPESTRM128MEM:
- case X86::VPCMPESTRM128MEM:
- NumArgs = 5; MemArg = true; break;
- }
- return EmitPCMP(MI, BB, NumArgs, MemArg);
- }
-
- // Thread synchronization.
+ case X86::VPCMPESTRM128MEM:
+ assert(Subtarget->hasSSE42() &&
+ "Target must have SSE4.2 or AVX features enabled");
+ return EmitPCMPSTRM(MI, BB, getTargetMachine().getInstrInfo());
+
+ // String/text processing lowering.
+ case X86::PCMPISTRIREG:
+ case X86::VPCMPISTRIREG:
+ case X86::PCMPISTRIMEM:
+ case X86::VPCMPISTRIMEM:
+ case X86::PCMPESTRIREG:
+ case X86::VPCMPESTRIREG:
+ case X86::PCMPESTRIMEM:
+ case X86::VPCMPESTRIMEM:
+ assert(Subtarget->hasSSE42() &&
+ "Target must have SSE4.2 or AVX features enabled");
+ return EmitPCMPSTRI(MI, BB, getTargetMachine().getInstrInfo());
+
+ // Thread synchronization.
case X86::MONITOR:
- return EmitMonitor(MI, BB);
+ return EmitMonitor(MI, BB, getTargetMachine().getInstrInfo(), Subtarget);
- // Atomic Lowering.
- case X86::ATOMAND32:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
- X86::AND32ri, X86::MOV32rm,
- X86::LCMPXCHG32,
- X86::NOT32r, X86::EAX,
- &X86::GR32RegClass);
- case X86::ATOMOR32:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr,
- X86::OR32ri, X86::MOV32rm,
- X86::LCMPXCHG32,
- X86::NOT32r, X86::EAX,
- &X86::GR32RegClass);
- case X86::ATOMXOR32:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr,
- X86::XOR32ri, X86::MOV32rm,
- X86::LCMPXCHG32,
- X86::NOT32r, X86::EAX,
- &X86::GR32RegClass);
- case X86::ATOMNAND32:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
- X86::AND32ri, X86::MOV32rm,
- X86::LCMPXCHG32,
- X86::NOT32r, X86::EAX,
- &X86::GR32RegClass, true);
- case X86::ATOMMIN32:
- return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr);
- case X86::ATOMMAX32:
- return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG32rr);
- case X86::ATOMUMIN32:
- return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB32rr);
- case X86::ATOMUMAX32:
- return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA32rr);
+ // xbegin
+ case X86::XBEGIN:
+ return EmitXBegin(MI, BB, getTargetMachine().getInstrInfo());
+ // Atomic Lowering.
+ case X86::ATOMAND8:
case X86::ATOMAND16:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr,
- X86::AND16ri, X86::MOV16rm,
- X86::LCMPXCHG16,
- X86::NOT16r, X86::AX,
- &X86::GR16RegClass);
+ case X86::ATOMAND32:
+ case X86::ATOMAND64:
+ // Fall through
+ case X86::ATOMOR8:
case X86::ATOMOR16:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr,
- X86::OR16ri, X86::MOV16rm,
- X86::LCMPXCHG16,
- X86::NOT16r, X86::AX,
- &X86::GR16RegClass);
+ case X86::ATOMOR32:
+ case X86::ATOMOR64:
+ // Fall through
case X86::ATOMXOR16:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr,
- X86::XOR16ri, X86::MOV16rm,
- X86::LCMPXCHG16,
- X86::NOT16r, X86::AX,
- &X86::GR16RegClass);
- case X86::ATOMNAND16:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr,
- X86::AND16ri, X86::MOV16rm,
- X86::LCMPXCHG16,
- X86::NOT16r, X86::AX,
- &X86::GR16RegClass, true);
- case X86::ATOMMIN16:
- return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL16rr);
- case X86::ATOMMAX16:
- return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG16rr);
- case X86::ATOMUMIN16:
- return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB16rr);
- case X86::ATOMUMAX16:
- return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA16rr);
-
- case X86::ATOMAND8:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr,
- X86::AND8ri, X86::MOV8rm,
- X86::LCMPXCHG8,
- X86::NOT8r, X86::AL,
- &X86::GR8RegClass);
- case X86::ATOMOR8:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr,
- X86::OR8ri, X86::MOV8rm,
- X86::LCMPXCHG8,
- X86::NOT8r, X86::AL,
- &X86::GR8RegClass);
case X86::ATOMXOR8:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr,
- X86::XOR8ri, X86::MOV8rm,
- X86::LCMPXCHG8,
- X86::NOT8r, X86::AL,
- &X86::GR8RegClass);
- case X86::ATOMNAND8:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr,
- X86::AND8ri, X86::MOV8rm,
- X86::LCMPXCHG8,
- X86::NOT8r, X86::AL,
- &X86::GR8RegClass, true);
- // FIXME: There are no CMOV8 instructions; MIN/MAX need some other way.
- // This group is for 64-bit host.
- case X86::ATOMAND64:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
- X86::AND64ri32, X86::MOV64rm,
- X86::LCMPXCHG64,
- X86::NOT64r, X86::RAX,
- &X86::GR64RegClass);
- case X86::ATOMOR64:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr,
- X86::OR64ri32, X86::MOV64rm,
- X86::LCMPXCHG64,
- X86::NOT64r, X86::RAX,
- &X86::GR64RegClass);
+ case X86::ATOMXOR32:
case X86::ATOMXOR64:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr,
- X86::XOR64ri32, X86::MOV64rm,
- X86::LCMPXCHG64,
- X86::NOT64r, X86::RAX,
- &X86::GR64RegClass);
+ // Fall through
+ case X86::ATOMNAND8:
+ case X86::ATOMNAND16:
+ case X86::ATOMNAND32:
case X86::ATOMNAND64:
- return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
- X86::AND64ri32, X86::MOV64rm,
- X86::LCMPXCHG64,
- X86::NOT64r, X86::RAX,
- &X86::GR64RegClass, true);
- case X86::ATOMMIN64:
- return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr);
+ // Fall through
+ case X86::ATOMMAX8:
+ case X86::ATOMMAX16:
+ case X86::ATOMMAX32:
case X86::ATOMMAX64:
- return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr);
- case X86::ATOMUMIN64:
- return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr);
+ // Fall through
+ case X86::ATOMMIN8:
+ case X86::ATOMMIN16:
+ case X86::ATOMMIN32:
+ case X86::ATOMMIN64:
+ // Fall through
+ case X86::ATOMUMAX8:
+ case X86::ATOMUMAX16:
+ case X86::ATOMUMAX32:
case X86::ATOMUMAX64:
- return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr);
+ // Fall through
+ case X86::ATOMUMIN8:
+ case X86::ATOMUMIN16:
+ case X86::ATOMUMIN32:
+ case X86::ATOMUMIN64:
+ return EmitAtomicLoadArith(MI, BB);
// This group does 64-bit operations on a 32-bit host.
case X86::ATOMAND6432:
- return EmitAtomicBit6432WithCustomInserter(MI, BB,
- X86::AND32rr, X86::AND32rr,
- X86::AND32ri, X86::AND32ri,
- false);
case X86::ATOMOR6432:
- return EmitAtomicBit6432WithCustomInserter(MI, BB,
- X86::OR32rr, X86::OR32rr,
- X86::OR32ri, X86::OR32ri,
- false);
case X86::ATOMXOR6432:
- return EmitAtomicBit6432WithCustomInserter(MI, BB,
- X86::XOR32rr, X86::XOR32rr,
- X86::XOR32ri, X86::XOR32ri,
- false);
case X86::ATOMNAND6432:
- return EmitAtomicBit6432WithCustomInserter(MI, BB,
- X86::AND32rr, X86::AND32rr,
- X86::AND32ri, X86::AND32ri,
- true);
case X86::ATOMADD6432:
- return EmitAtomicBit6432WithCustomInserter(MI, BB,
- X86::ADD32rr, X86::ADC32rr,
- X86::ADD32ri, X86::ADC32ri,
- false);
case X86::ATOMSUB6432:
- return EmitAtomicBit6432WithCustomInserter(MI, BB,
- X86::SUB32rr, X86::SBB32rr,
- X86::SUB32ri, X86::SBB32ri,
- false);
+ case X86::ATOMMAX6432:
+ case X86::ATOMMIN6432:
+ case X86::ATOMUMAX6432:
+ case X86::ATOMUMIN6432:
case X86::ATOMSWAP6432:
- return EmitAtomicBit6432WithCustomInserter(MI, BB,
- X86::MOV32rr, X86::MOV32rr,
- X86::MOV32ri, X86::MOV32ri,
- false);
+ return EmitAtomicLoadArith6432(MI, BB);
+
case X86::VASTART_SAVE_XMM_REGS:
return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
case X86::VAARG_64:
return EmitVAARG64WithCustomInserter(MI, BB);
+
+ case X86::EH_SjLj_SetJmp32:
+ case X86::EH_SjLj_SetJmp64:
+ return emitEHSjLjSetJmp(MI, BB);
+
+ case X86::EH_SjLj_LongJmp32:
+ case X86::EH_SjLj_LongJmp64:
+ return emitEHSjLjLongJmp(MI, BB);
}
}
@@ -13333,12 +14267,12 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
}
-/// DCI, PerformTruncateCombine - Converts truncate operation to
+/// PerformTruncateCombine - Converts truncate operation to
/// a sequence of vector shuffle operations.
/// It is possible when we truncate 256-bit vector to 128-bit vector
-
-SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
- DAGCombinerInfo &DCI) const {
+static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget *Subtarget) {
if (!DCI.isBeforeLegalizeOps())
return SDValue();
@@ -13530,7 +14464,7 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
// alignment is valid.
unsigned Align = LN0->getAlignment();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- unsigned NewAlign = TLI.getTargetData()->
+ unsigned NewAlign = TLI.getDataLayout()->
getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT))
@@ -13561,6 +14495,14 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
return NewOp;
SDValue InputVector = N->getOperand(0);
+ // Detect whether we are trying to convert from mmx to i32 and the bitcast
+ // from mmx to v2i32 has a single usage.
+ if (InputVector.getNode()->getOpcode() == llvm::ISD::BITCAST &&
+ InputVector.getNode()->getOperand(0).getValueType() == MVT::x86mmx &&
+ InputVector.hasOneUse() && N->getValueType(0) == MVT::i32)
+ return DAG.getNode(X86ISD::MMX_MOVD2W, InputVector.getDebugLoc(),
+ N->getValueType(0),
+ InputVector.getNode()->getOperand(0));
// Only operate on vectors of 4 elements, where the alternative shuffling
// gets to be more expensive.
@@ -13961,7 +14903,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
//
// where Op could be BRCOND or CMOV.
//
-static SDValue BoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
+static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
// Quit if not CMP and SUB with its value result used.
if (Cmp.getOpcode() != X86ISD::CMP &&
(Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
@@ -13997,40 +14939,55 @@ static SDValue BoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
if (SetCC.getOpcode() == ISD::ZERO_EXTEND)
SetCC = SetCC.getOperand(0);
- // Quit if not SETCC.
- // FIXME: So far we only handle the boolean value generated from SETCC. If
- // there is other ways to generate boolean values, we need handle them here
- // as well.
- if (SetCC.getOpcode() != X86ISD::SETCC)
- return SDValue();
-
- // Set the condition code or opposite one if necessary.
- CC = X86::CondCode(SetCC.getConstantOperandVal(0));
- if (needOppositeCond)
- CC = X86::GetOppositeBranchCondition(CC);
-
- return SetCC.getOperand(1);
-}
-
-static bool IsValidFCMOVCondition(X86::CondCode CC) {
- switch (CC) {
- default:
- return false;
- case X86::COND_B:
- case X86::COND_BE:
- case X86::COND_E:
- case X86::COND_P:
- case X86::COND_AE:
- case X86::COND_A:
- case X86::COND_NE:
- case X86::COND_NP:
- return true;
+ switch (SetCC.getOpcode()) {
+ case X86ISD::SETCC:
+ // Set the condition code or opposite one if necessary.
+ CC = X86::CondCode(SetCC.getConstantOperandVal(0));
+ if (needOppositeCond)
+ CC = X86::GetOppositeBranchCondition(CC);
+ return SetCC.getOperand(1);
+ case X86ISD::CMOV: {
+ // Check whether false/true value has canonical one, i.e. 0 or 1.
+ ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
+ ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
+ // Quit if true value is not a constant.
+ if (!TVal)
+ return SDValue();
+ // Quit if false value is not a constant.
+ if (!FVal) {
+ // A special case for rdrand, where 0 is set if false cond is found.
+ SDValue Op = SetCC.getOperand(0);
+ if (Op.getOpcode() != X86ISD::RDRAND)
+ return SDValue();
+ }
+ // Quit if false value is not the constant 0 or 1.
+ bool FValIsFalse = true;
+ if (FVal && FVal->getZExtValue() != 0) {
+ if (FVal->getZExtValue() != 1)
+ return SDValue();
+ // If FVal is 1, opposite cond is needed.
+ needOppositeCond = !needOppositeCond;
+ FValIsFalse = false;
+ }
+ // Quit if TVal is not the constant opposite of FVal.
+ if (FValIsFalse && TVal->getZExtValue() != 1)
+ return SDValue();
+ if (!FValIsFalse && TVal->getZExtValue() != 0)
+ return SDValue();
+ CC = X86::CondCode(SetCC.getConstantOperandVal(2));
+ if (needOppositeCond)
+ CC = X86::GetOppositeBranchCondition(CC);
+ return SetCC.getOperand(3);
+ }
}
+
+ return SDValue();
}
/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI) {
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget *Subtarget) {
DebugLoc DL = N->getDebugLoc();
// If the flag operand isn't dead, don't touch this CMOV.
@@ -14055,10 +15012,10 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
SDValue Flags;
- Flags = BoolTestSetCCCombine(Cond, CC);
+ Flags = checkBoolTestSetCCCombine(Cond, CC);
if (Flags.getNode() &&
// Extra check as FCMOV only supports a subset of X86 cond.
- (FalseOp.getValueType() != MVT::f80 || IsValidFCMOVCondition(CC))) {
+ (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
SDValue Ops[] = { FalseOp, TrueOp,
DAG.getConstant(CC, MVT::i8), Flags };
return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(),
@@ -14075,6 +15032,7 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
CC = X86::GetOppositeBranchCondition(CC);
std::swap(TrueC, FalseC);
+ std::swap(TrueOp, FalseOp);
}
// Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
@@ -14157,6 +15115,46 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
}
}
}
+
+ // Handle these cases:
+ // (select (x != c), e, c) -> select (x != c), e, x),
+ // (select (x == c), c, e) -> select (x == c), x, e)
+ // where the c is an integer constant, and the "select" is the combination
+ // of CMOV and CMP.
+ //
+ // The rationale for this change is that the conditional-move from a constant
+ // needs two instructions, however, conditional-move from a register needs
+ // only one instruction.
+ //
+ // CAVEAT: By replacing a constant with a symbolic value, it may obscure
+ // some instruction-combining opportunities. This opt needs to be
+ // postponed as late as possible.
+ //
+ if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
+ // the DCI.xxxx conditions are provided to postpone the optimization as
+ // late as possible.
+
+ ConstantSDNode *CmpAgainst = 0;
+ if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
+ (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
+ dyn_cast<ConstantSDNode>(Cond.getOperand(0)) == 0) {
+
+ if (CC == X86::COND_NE &&
+ CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
+ CC = X86::GetOppositeBranchCondition(CC);
+ std::swap(TrueOp, FalseOp);
+ }
+
+ if (CC == X86::COND_E &&
+ CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
+ SDValue Ops[] = { FalseOp, Cond.getOperand(0),
+ DAG.getConstant(CC, MVT::i8), Cond };
+ return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops,
+ array_lengthof(Ops));
+ }
+ }
+ }
+
return SDValue();
}
@@ -14813,11 +15811,11 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
ISD::LoadExtType Ext = Ld->getExtensionType();
// If this is a vector EXT Load then attempt to optimize it using a
- // shuffle. We need SSE4 for the shuffles.
+ // shuffle. We need SSSE3 shuffles.
// TODO: It is possible to support ZExt by zeroing the undef values
// during the shuffle phase or after the shuffle.
if (RegVT.isVector() && RegVT.isInteger() &&
- Ext == ISD::EXTLOAD && Subtarget->hasSSE41()) {
+ Ext == ISD::EXTLOAD && Subtarget->hasSSSE3()) {
assert(MemVT != RegVT && "Cannot extend to the same type");
assert(MemVT.isVector() && "Must load a vector from memory");
@@ -15043,7 +16041,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
const Function *F = DAG.getMachineFunction().getFunction();
- bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat);
+ bool NoImplicitFloatOps = F->getFnAttributes().
+ hasAttribute(Attributes::NoImplicitFloat);
bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
&& Subtarget->hasSSE2();
if ((VT.isVector() ||
@@ -15315,6 +16314,29 @@ static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
+/// PerformFMinFMaxCombine - Do target-specific dag combines on X86ISD::FMIN and
+/// X86ISD::FMAX nodes.
+static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
+ assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
+
+ // Only perform optimizations if UnsafeMath is used.
+ if (!DAG.getTarget().Options.UnsafeFPMath)
+ return SDValue();
+
+ // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
+ // into FMINC and FMAXC, which are Commutative operations.
+ unsigned NewOp = 0;
+ switch (N->getOpcode()) {
+ default: llvm_unreachable("unknown opcode");
+ case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
+ case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
+ }
+
+ return DAG.getNode(NewOp, N->getDebugLoc(), N->getValueType(0),
+ N->getOperand(0), N->getOperand(1));
+}
+
+
/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes.
static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
// FAND(0.0, x) -> 0.0
@@ -15420,8 +16442,13 @@ static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
DebugLoc dl = N->getDebugLoc();
EVT VT = N->getValueType(0);
+ // Let legalize expand this if it isn't a legal type yet.
+ if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
+ return SDValue();
+
EVT ScalarVT = VT.getScalarType();
- if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget->hasFMA())
+ if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
+ (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
return SDValue();
SDValue A = N->getOperand(0);
@@ -15443,9 +16470,10 @@ static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
unsigned Opcode;
if (!NegMul)
- Opcode = (!NegC)? X86ISD::FMADD : X86ISD::FMSUB;
+ Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
else
- Opcode = (!NegC)? X86ISD::FNMADD : X86ISD::FNMSUB;
+ Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
+
return DAG.getNode(Opcode, dl, VT, A, B, C);
}
@@ -15542,24 +16570,51 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
+// Helper function of PerformSETCCCombine. It is to materialize "setb reg"
+// as "sbb reg,reg", since it can be extended without zext and produces
+// an all-ones bit which is more useful than 0/1 in some cases.
+static SDValue MaterializeSETB(DebugLoc DL, SDValue EFLAGS, SelectionDAG &DAG) {
+ return DAG.getNode(ISD::AND, DL, MVT::i8,
+ DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
+ DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
+ DAG.getConstant(1, MVT::i8));
+}
+
// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
-static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget *Subtarget) {
DebugLoc DL = N->getDebugLoc();
X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
SDValue EFLAGS = N->getOperand(1);
+ if (CC == X86::COND_A) {
+ // Try to convert COND_A into COND_B in an attempt to facilitate
+ // materializing "setb reg".
+ //
+ // Do not flip "e > c", where "c" is a constant, because Cmp instruction
+ // cannot take an immediate as its first operand.
+ //
+ if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
+ EFLAGS.getValueType().isInteger() &&
+ !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
+ SDValue NewSub = DAG.getNode(X86ISD::SUB, EFLAGS.getDebugLoc(),
+ EFLAGS.getNode()->getVTList(),
+ EFLAGS.getOperand(1), EFLAGS.getOperand(0));
+ SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
+ return MaterializeSETB(DL, NewEFLAGS, DAG);
+ }
+ }
+
// Materialize "setb reg" as "sbb reg,reg", since it can be extended without
// a zext and produces an all-ones bit which is more useful than 0/1 in some
// cases.
if (CC == X86::COND_B)
- return DAG.getNode(ISD::AND, DL, MVT::i8,
- DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
- DAG.getConstant(CC, MVT::i8), EFLAGS),
- DAG.getConstant(1, MVT::i8));
+ return MaterializeSETB(DL, EFLAGS, DAG);
SDValue Flags;
- Flags = BoolTestSetCCCombine(EFLAGS, CC);
+ Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
if (Flags.getNode()) {
SDValue Cond = DAG.getConstant(CC, MVT::i8);
return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
@@ -15581,7 +16636,7 @@ static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
SDValue Flags;
- Flags = BoolTestSetCCCombine(EFLAGS, CC);
+ Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
if (Flags.getNode()) {
SDValue Cond = DAG.getConstant(CC, MVT::i8);
return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
@@ -15591,23 +16646,6 @@ static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static SDValue PerformUINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG) {
- SDValue Op0 = N->getOperand(0);
- EVT InVT = Op0->getValueType(0);
-
- // UINT_TO_FP(v4i8) -> SINT_TO_FP(ZEXT(v4i8 to v4i32))
- if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
- DebugLoc dl = N->getDebugLoc();
- MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
- SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
- // Notice that we use SINT_TO_FP because we know that the high bits
- // are zero and SINT_TO_FP is better supported by the hardware.
- return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
- }
-
- return SDValue();
-}
-
static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
const X86TargetLowering *XTLI) {
SDValue Op0 = N->getOperand(0);
@@ -15639,20 +16677,6 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static SDValue PerformFP_TO_SINTCombine(SDNode *N, SelectionDAG &DAG) {
- EVT VT = N->getValueType(0);
-
- // v4i8 = FP_TO_SINT() -> v4i8 = TRUNCATE (V4i32 = FP_TO_SINT()
- if (VT == MVT::v8i8 || VT == MVT::v4i8) {
- DebugLoc dl = N->getDebugLoc();
- MVT DstVT = VT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
- SDValue I = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, N->getOperand(0));
- return DAG.getNode(ISD::TRUNCATE, dl, VT, I);
- }
-
- return SDValue();
-}
-
// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
X86TargetLowering::DAGCombinerInfo &DCI) {
@@ -15767,6 +16791,21 @@ static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
return OptimizeConditionalInDecrement(N, DAG);
}
+/// performVZEXTCombine - Performs build vector combines
+static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget *Subtarget) {
+ // (vzext (bitcast (vzext (x)) -> (vzext x)
+ SDValue In = N->getOperand(0);
+ while (In.getOpcode() == ISD::BITCAST)
+ In = In.getOperand(0);
+
+ if (In.getOpcode() != X86ISD::VZEXT)
+ return SDValue();
+
+ return DAG.getNode(X86ISD::VZEXT, N->getDebugLoc(), N->getValueType(0), In.getOperand(0));
+}
+
SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
@@ -15776,7 +16815,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
case ISD::VSELECT:
case ISD::SELECT: return PerformSELECTCombine(N, DAG, DCI, Subtarget);
- case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI);
+ case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
@@ -15789,23 +16828,24 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
- case ISD::UINT_TO_FP: return PerformUINT_TO_FPCombine(N, DAG);
case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this);
- case ISD::FP_TO_SINT: return PerformFP_TO_SINTCombine(N, DAG);
case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
case X86ISD::FXOR:
case X86ISD::FOR: return PerformFORCombine(N, DAG);
+ case X86ISD::FMIN:
+ case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
case X86ISD::FAND: return PerformFANDCombine(N, DAG);
case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
case ISD::ANY_EXTEND:
case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
- case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG, DCI);
+ case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
case ISD::SETCC: return PerformISDSETCCCombine(N, DAG);
- case X86ISD::SETCC: return PerformSETCCCombine(N, DAG);
+ case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
+ case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
case X86ISD::SHUFP: // Handle all target specific shuffles
case X86ISD::PALIGN:
case X86ISD::UNPCKH:
@@ -16233,7 +17273,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
return;
case 'K':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
- if ((int8_t)C->getSExtValue() == C->getSExtValue()) {
+ if (isInt<8>(C->getSExtValue())) {
Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
break;
}
@@ -16558,3 +17598,207 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
return Res;
}
+
+//===----------------------------------------------------------------------===//
+//
+// X86 cost model.
+//
+//===----------------------------------------------------------------------===//
+
+struct X86CostTblEntry {
+ int ISD;
+ MVT Type;
+ unsigned Cost;
+};
+
+static int
+FindInTable(const X86CostTblEntry *Tbl, unsigned len, int ISD, MVT Ty) {
+ for (unsigned int i = 0; i < len; ++i)
+ if (Tbl[i].ISD == ISD && Tbl[i].Type == Ty)
+ return i;
+
+ // Could not find an entry.
+ return -1;
+}
+
+struct X86TypeConversionCostTblEntry {
+ int ISD;
+ MVT Dst;
+ MVT Src;
+ unsigned Cost;
+};
+
+static int
+FindInConvertTable(const X86TypeConversionCostTblEntry *Tbl, unsigned len,
+ int ISD, MVT Dst, MVT Src) {
+ for (unsigned int i = 0; i < len; ++i)
+ if (Tbl[i].ISD == ISD && Tbl[i].Src == Src && Tbl[i].Dst == Dst)
+ return i;
+
+ // Could not find an entry.
+ return -1;
+}
+
+unsigned
+X86VectorTargetTransformInfo::getArithmeticInstrCost(unsigned Opcode,
+ Type *Ty) const {
+ // Legalize the type.
+ std::pair<unsigned, MVT> LT = getTypeLegalizationCost(Ty);
+
+ int ISD = InstructionOpcodeToISD(Opcode);
+ assert(ISD && "Invalid opcode");
+
+ const X86Subtarget &ST = TLI->getTargetMachine().getSubtarget<X86Subtarget>();
+
+ static const X86CostTblEntry AVX1CostTable[] = {
+ // We don't have to scalarize unsupported ops. We can issue two half-sized
+ // operations and we only need to extract the upper YMM half.
+ // Two ops + 1 extract + 1 insert = 4.
+ { ISD::MUL, MVT::v8i32, 4 },
+ { ISD::SUB, MVT::v8i32, 4 },
+ { ISD::ADD, MVT::v8i32, 4 },
+ { ISD::MUL, MVT::v4i64, 4 },
+ { ISD::SUB, MVT::v4i64, 4 },
+ { ISD::ADD, MVT::v4i64, 4 },
+ };
+
+ // Look for AVX1 lowering tricks.
+ if (ST.hasAVX()) {
+ int Idx = FindInTable(AVX1CostTable, array_lengthof(AVX1CostTable), ISD,
+ LT.second);
+ if (Idx != -1)
+ return LT.first * AVX1CostTable[Idx].Cost;
+ }
+ // Fallback to the default implementation.
+ return VectorTargetTransformImpl::getArithmeticInstrCost(Opcode, Ty);
+}
+
+unsigned
+X86VectorTargetTransformInfo::getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) const {
+ assert(Val->isVectorTy() && "This must be a vector type");
+
+ if (Index != -1U) {
+ // Legalize the type.
+ std::pair<unsigned, MVT> LT = getTypeLegalizationCost(Val);
+
+ // This type is legalized to a scalar type.
+ if (!LT.second.isVector())
+ return 0;
+
+ // The type may be split. Normalize the index to the new type.
+ unsigned Width = LT.second.getVectorNumElements();
+ Index = Index % Width;
+
+ // Floating point scalars are already located in index #0.
+ if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
+ return 0;
+ }
+
+ return VectorTargetTransformImpl::getVectorInstrCost(Opcode, Val, Index);
+}
+
+unsigned X86VectorTargetTransformInfo::getCmpSelInstrCost(unsigned Opcode,
+ Type *ValTy,
+ Type *CondTy) const {
+ // Legalize the type.
+ std::pair<unsigned, MVT> LT = getTypeLegalizationCost(ValTy);
+
+ MVT MTy = LT.second;
+
+ int ISD = InstructionOpcodeToISD(Opcode);
+ assert(ISD && "Invalid opcode");
+
+ const X86Subtarget &ST =
+ TLI->getTargetMachine().getSubtarget<X86Subtarget>();
+
+ static const X86CostTblEntry SSE42CostTbl[] = {
+ { ISD::SETCC, MVT::v2f64, 1 },
+ { ISD::SETCC, MVT::v4f32, 1 },
+ { ISD::SETCC, MVT::v2i64, 1 },
+ { ISD::SETCC, MVT::v4i32, 1 },
+ { ISD::SETCC, MVT::v8i16, 1 },
+ { ISD::SETCC, MVT::v16i8, 1 },
+ };
+
+ static const X86CostTblEntry AVX1CostTbl[] = {
+ { ISD::SETCC, MVT::v4f64, 1 },
+ { ISD::SETCC, MVT::v8f32, 1 },
+ // AVX1 does not support 8-wide integer compare.
+ { ISD::SETCC, MVT::v4i64, 4 },
+ { ISD::SETCC, MVT::v8i32, 4 },
+ { ISD::SETCC, MVT::v16i16, 4 },
+ { ISD::SETCC, MVT::v32i8, 4 },
+ };
+
+ static const X86CostTblEntry AVX2CostTbl[] = {
+ { ISD::SETCC, MVT::v4i64, 1 },
+ { ISD::SETCC, MVT::v8i32, 1 },
+ { ISD::SETCC, MVT::v16i16, 1 },
+ { ISD::SETCC, MVT::v32i8, 1 },
+ };
+
+ if (ST.hasSSE42()) {
+ int Idx = FindInTable(SSE42CostTbl, array_lengthof(SSE42CostTbl), ISD, MTy);
+ if (Idx != -1)
+ return LT.first * SSE42CostTbl[Idx].Cost;
+ }
+
+ if (ST.hasAVX()) {
+ int Idx = FindInTable(AVX1CostTbl, array_lengthof(AVX1CostTbl), ISD, MTy);
+ if (Idx != -1)
+ return LT.first * AVX1CostTbl[Idx].Cost;
+ }
+
+ if (ST.hasAVX2()) {
+ int Idx = FindInTable(AVX2CostTbl, array_lengthof(AVX2CostTbl), ISD, MTy);
+ if (Idx != -1)
+ return LT.first * AVX2CostTbl[Idx].Cost;
+ }
+
+ return VectorTargetTransformImpl::getCmpSelInstrCost(Opcode, ValTy, CondTy);
+}
+
+unsigned X86VectorTargetTransformInfo::getCastInstrCost(unsigned Opcode,
+ Type *Dst,
+ Type *Src) const {
+ int ISD = InstructionOpcodeToISD(Opcode);
+ assert(ISD && "Invalid opcode");
+
+ EVT SrcTy = TLI->getValueType(Src);
+ EVT DstTy = TLI->getValueType(Dst);
+
+ if (!SrcTy.isSimple() || !DstTy.isSimple())
+ return VectorTargetTransformImpl::getCastInstrCost(Opcode, Dst, Src);
+
+ const X86Subtarget &ST = TLI->getTargetMachine().getSubtarget<X86Subtarget>();
+
+ static const X86TypeConversionCostTblEntry AVXConversionTbl[] = {
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
+ { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 },
+ { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 1 },
+ { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 1 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 1 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 1 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 1 },
+ { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 1 },
+ { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 6 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 9 },
+ { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 },
+ };
+
+ if (ST.hasAVX()) {
+ int Idx = FindInConvertTable(AVXConversionTbl,
+ array_lengthof(AVXConversionTbl),
+ ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT());
+ if (Idx != -1)
+ return AVXConversionTbl[Idx].Cost;
+ }
+
+ return VectorTargetTransformImpl::getCastInstrCost(Opcode, Dst, Src);
+}
+
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.h b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
index 896d067..465c603 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
@@ -19,6 +19,7 @@
#include "X86RegisterInfo.h"
#include "X86MachineFunctionInfo.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetTransformImpl.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/SelectionDAG.h"
@@ -142,6 +143,10 @@ namespace llvm {
/// mnemonic, so do I; blame Intel.
MOVDQ2Q,
+ /// MMX_MOVD2W - Copies a 32-bit value from the low word of a MMX
+ /// vector to a GPR.
+ MMX_MOVD2W,
+
/// PEXTRB - Extract an 8-bit value from a vector and zero extend it to
/// i32, corresponds to X86::PEXTRB.
PEXTRB,
@@ -195,6 +200,9 @@ namespace llvm {
///
FMAX, FMIN,
+ /// FMAXC, FMINC - Commutative FMIN and FMAX.
+ FMAXC, FMINC,
+
/// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal
/// approximation. Note that these typically require refinement
/// in order to obtain suitable precision.
@@ -214,6 +222,12 @@ namespace llvm {
// EH_RETURN - Exception Handling helpers.
EH_RETURN,
+ // EH_SJLJ_SETJMP - SjLj exception handling setjmp.
+ EH_SJLJ_SETJMP,
+
+ // EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
+ EH_SJLJ_LONGJMP,
+
/// TC_RETURN - Tail call return.
/// operand #0 chain
/// operand #1 callee (register or absolute)
@@ -227,9 +241,18 @@ namespace llvm {
// VSEXT_MOVL - Vector move low and sign extend.
VSEXT_MOVL,
+ // VZEXT - Vector integer zero-extend.
+ VZEXT,
+
+ // VSEXT - Vector integer signed-extend.
+ VSEXT,
+
// VFPEXT - Vector FP extend.
VFPEXT,
+ // VFPROUND - Vector FP round.
+ VFPROUND,
+
// VSHL, VSRL - 128-bit vector logical left / right shift
VSHLDQ, VSRLDQ,
@@ -345,6 +368,10 @@ namespace llvm {
ATOMXOR64_DAG,
ATOMAND64_DAG,
ATOMNAND64_DAG,
+ ATOMMAX64_DAG,
+ ATOMMIN64_DAG,
+ ATOMUMAX64_DAG,
+ ATOMUMIN64_DAG,
ATOMSWAP64_DAG,
// LCMPXCHG_DAG, LCMPXCHG8_DAG, LCMPXCHG16_DAG - Compare and swap.
@@ -458,10 +485,6 @@ namespace llvm {
getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
unsigned JTI, MCContext &Ctx) const;
- /// getStackPtrReg - Return the stack pointer register we are using: either
- /// ESP or RSP.
- unsigned getStackPtrReg() const { return X86StackPtr; }
-
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. For X86, aggregates
/// that contains are placed at 16-byte boundaries while the rest are at
@@ -694,10 +717,7 @@ namespace llvm {
/// make the right decision when generating code for different targets.
const X86Subtarget *Subtarget;
const X86RegisterInfo *RegInfo;
- const TargetData *TD;
-
- /// X86StackPtr - X86 physical register used as stack ptr.
- unsigned X86StackPtr;
+ const DataLayout *TD;
/// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
/// floating point ops.
@@ -741,6 +761,7 @@ namespace llvm {
bool isVarArg,
bool isCalleeStructRet,
bool isCallerStructRet,
+ Type *RetTy,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -760,15 +781,11 @@ namespace llvm {
SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
SelectionDAG &DAG) const;
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
@@ -782,12 +799,15 @@ namespace llvm {
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerToBT(SDValue And, ISD::CondCode CC,
DebugLoc dl, SelectionDAG &DAG) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
@@ -799,39 +819,26 @@ namespace llvm {
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerADD(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
- SDValue PerformTruncateCombine(SDNode* N, SelectionDAG &DAG, DAGCombinerInfo &DCI) const;
- // Utility functions to help LowerVECTOR_SHUFFLE
- SDValue LowerVECTOR_SHUFFLEv8i16(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const;
+ // Utility functions to help LowerVECTOR_SHUFFLE & LowerBUILD_VECTOR
+ SDValue LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const;
SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const;
+ SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue LowerVectorAllZeroTest(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerVectorFpExtend(SDValue &Op, SelectionDAG &DAG) const;
+ SDValue lowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const;
virtual SDValue
LowerFormalArguments(SDValue Chain,
@@ -864,51 +871,17 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const;
- /// Utility function to emit string processing sse4.2 instructions
- /// that return in xmm0.
- /// This takes the instruction to expand, the associated machine basic
- /// block, the number of args, and whether or not the second arg is
- /// in memory or not.
- MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB,
- unsigned argNum, bool inMem) const;
-
- /// Utility functions to emit monitor and mwait instructions. These
- /// need to make sure that the arguments to the intrinsic are in the
- /// correct registers.
- MachineBasicBlock *EmitMonitor(MachineInstr *MI,
- MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const;
-
- /// Utility function to emit atomic bitwise operations (and, or, xor).
- /// It takes the bitwise instruction to expand, the associated machine basic
- /// block, and the associated X86 opcodes for reg/reg and reg/imm.
- MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter(
- MachineInstr *BInstr,
- MachineBasicBlock *BB,
- unsigned regOpc,
- unsigned immOpc,
- unsigned loadOpc,
- unsigned cxchgOpc,
- unsigned notOpc,
- unsigned EAXreg,
- const TargetRegisterClass *RC,
- bool Invert = false) const;
-
- MachineBasicBlock *EmitAtomicBit6432WithCustomInserter(
- MachineInstr *BInstr,
- MachineBasicBlock *BB,
- unsigned regOpcL,
- unsigned regOpcH,
- unsigned immOpcL,
- unsigned immOpcH,
- bool Invert = false) const;
-
- /// Utility function to emit atomic min and max. It takes the min/max
- /// instruction to expand, the associated basic block, and the associated
- /// cmov opcode for moving the min or max value.
- MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr,
- MachineBasicBlock *BB,
- unsigned cmovOpc) const;
+ /// Utility function to emit atomic-load-arith operations (and, or, xor,
+ /// nand, max, min, umax, umin). It takes the corresponding instruction to
+ /// expand, the associated machine basic block, and the associated X86
+ /// opcodes for reg/reg.
+ MachineBasicBlock *EmitAtomicLoadArith(MachineInstr *MI,
+ MachineBasicBlock *MBB) const;
+
+ /// Utility function to emit atomic-load-arith operations (and, or, xor,
+ /// nand, add, sub, swap) for 64-bit operands on 32-bit target.
+ MachineBasicBlock *EmitAtomicLoadArith6432(MachineInstr *MI,
+ MachineBasicBlock *MBB) const;
// Utility function to emit the low-level va_arg code for X86-64.
MachineBasicBlock *EmitVAARG64WithCustomInserter(
@@ -936,6 +909,12 @@ namespace llvm {
MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI,
MachineBasicBlock *BB) const;
+ MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr *MI,
+ MachineBasicBlock *MBB) const;
+
+ MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI,
+ MachineBasicBlock *MBB) const;
+
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent, for use with the given x86 condition code.
SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
@@ -953,6 +932,23 @@ namespace llvm {
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo);
}
+
+ class X86VectorTargetTransformInfo : public VectorTargetTransformImpl {
+ public:
+ explicit X86VectorTargetTransformInfo(const TargetLowering *TL) :
+ VectorTargetTransformImpl(TL) {}
+
+ virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const;
+
+ virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) const;
+
+ unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+ Type *CondTy) const;
+
+ virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
+ Type *Src) const;
+ };
}
#endif // X86ISELLOWERING_H
diff --git a/contrib/llvm/lib/Target/X86/X86InstrCompiler.td b/contrib/llvm/lib/Target/X86/X86InstrCompiler.td
index d78264f..9e6f279 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -165,6 +165,33 @@ def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
}
+let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
+ usesCustomInserter = 1 in {
+ def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
+ "#EH_SJLJ_SETJMP32",
+ [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
+ Requires<[In32BitMode]>;
+ def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
+ "#EH_SJLJ_SETJMP64",
+ [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
+ Requires<[In64BitMode]>;
+ let isTerminator = 1 in {
+ def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
+ "#EH_SJLJ_LONGJMP32",
+ [(X86eh_sjlj_longjmp addr:$buf)]>,
+ Requires<[In32BitMode]>;
+ def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
+ "#EH_SJLJ_LONGJMP64",
+ [(X86eh_sjlj_longjmp addr:$buf)]>,
+ Requires<[In64BitMode]>;
+ }
+}
+
+let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
+ def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
+ "#EH_SjLj_Setup\t$dst", []>;
+}
+
//===----------------------------------------------------------------------===//
// Pseudo instructions used by segmented stacks.
//
@@ -230,25 +257,18 @@ def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
IIC_ALU_NONMEM>;
// Use sbb to materialize carry bit.
-let Uses = [EFLAGS], Defs = [EFLAGS], isCodeGenOnly = 1 in {
+let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1 in {
// FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
// However, Pat<> can't replicate the destination reg into the inputs of the
// result.
-// FIXME: Change these to have encoding Pseudo when X86MCCodeEmitter replaces
-// X86CodeEmitter.
-def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), "",
- [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))],
- IIC_ALU_NONMEM>;
-def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "",
- [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))],
- IIC_ALU_NONMEM>,
- OpSize;
-def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "",
- [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))],
- IIC_ALU_NONMEM>;
-def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "",
- [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))],
- IIC_ALU_NONMEM>;
+def SETB_C8r : I<0, Pseudo, (outs GR8:$dst), (ins), "",
+ [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
+def SETB_C16r : I<0, Pseudo, (outs GR16:$dst), (ins), "",
+ [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
+def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "",
+ [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
+def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "",
+ [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
} // isCodeGenOnly
@@ -489,130 +509,74 @@ def CMOV_RFP80 : I<0, Pseudo,
// Atomic Instruction Pseudo Instructions
//===----------------------------------------------------------------------===//
-// Atomic exchange, and, or, xor
-let Constraints = "$val = $dst", Defs = [EFLAGS],
- usesCustomInserter = 1 in {
-
-def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
- "#ATOMAND8 PSEUDO!",
- [(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>;
-def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
- "#ATOMOR8 PSEUDO!",
- [(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>;
-def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
- "#ATOMXOR8 PSEUDO!",
- [(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>;
-def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
- "#ATOMNAND8 PSEUDO!",
- [(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>;
-
-def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
- "#ATOMAND16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>;
-def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
- "#ATOMOR16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>;
-def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
- "#ATOMXOR16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>;
-def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
- "#ATOMNAND16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>;
-def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
- "#ATOMMIN16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>;
-def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
- "#ATOMMAX16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>;
-def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
- "#ATOMUMIN16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>;
-def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
- "#ATOMUMAX16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>;
-
-
-def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
- "#ATOMAND32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>;
-def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
- "#ATOMOR32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>;
-def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
- "#ATOMXOR32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>;
-def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
- "#ATOMNAND32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>;
-def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
- "#ATOMMIN32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>;
-def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
- "#ATOMMAX32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>;
-def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
- "#ATOMUMIN32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>;
-def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
- "#ATOMUMAX32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>;
-
-
-
-def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
- "#ATOMAND64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
-def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
- "#ATOMOR64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
-def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
- "#ATOMXOR64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
-def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
- "#ATOMNAND64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
-def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
- "#ATOMMIN64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
-def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
- "#ATOMMAX64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
-def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
- "#ATOMUMIN64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
-def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
- "#ATOMUMAX64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
+// Pseudo atomic instructions
+
+multiclass PSEUDO_ATOMIC_LOAD_BINOP<string mnemonic> {
+ let usesCustomInserter = 1, mayLoad = 1, mayStore = 1 in {
+ def #NAME#8 : I<0, Pseudo, (outs GR8:$dst),
+ (ins i8mem:$ptr, GR8:$val),
+ !strconcat(mnemonic, "8 PSEUDO!"), []>;
+ def #NAME#16 : I<0, Pseudo,(outs GR16:$dst),
+ (ins i16mem:$ptr, GR16:$val),
+ !strconcat(mnemonic, "16 PSEUDO!"), []>;
+ def #NAME#32 : I<0, Pseudo, (outs GR32:$dst),
+ (ins i32mem:$ptr, GR32:$val),
+ !strconcat(mnemonic, "32 PSEUDO!"), []>;
+ def #NAME#64 : I<0, Pseudo, (outs GR64:$dst),
+ (ins i64mem:$ptr, GR64:$val),
+ !strconcat(mnemonic, "64 PSEUDO!"), []>;
+ }
+}
+
+multiclass PSEUDO_ATOMIC_LOAD_BINOP_PATS<string name, string frag> {
+ def : Pat<(!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val),
+ (!cast<Instruction>(name # "8") addr:$ptr, GR8:$val)>;
+ def : Pat<(!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val),
+ (!cast<Instruction>(name # "16") addr:$ptr, GR16:$val)>;
+ def : Pat<(!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val),
+ (!cast<Instruction>(name # "32") addr:$ptr, GR32:$val)>;
+ def : Pat<(!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val),
+ (!cast<Instruction>(name # "64") addr:$ptr, GR64:$val)>;
}
-let Constraints = "$val1 = $dst1, $val2 = $dst2",
- Defs = [EFLAGS, EAX, EBX, ECX, EDX],
- Uses = [EAX, EBX, ECX, EDX],
- mayLoad = 1, mayStore = 1,
- usesCustomInserter = 1 in {
-def ATOMAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
- (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
- "#ATOMAND6432 PSEUDO!", []>;
-def ATOMOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
- (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
- "#ATOMOR6432 PSEUDO!", []>;
-def ATOMXOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
- (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
- "#ATOMXOR6432 PSEUDO!", []>;
-def ATOMNAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
- (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
- "#ATOMNAND6432 PSEUDO!", []>;
-def ATOMADD6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
- (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
- "#ATOMADD6432 PSEUDO!", []>;
-def ATOMSUB6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
- (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
- "#ATOMSUB6432 PSEUDO!", []>;
-def ATOMSWAP6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
- (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
- "#ATOMSWAP6432 PSEUDO!", []>;
+// Atomic exchange, and, or, xor
+defm ATOMAND : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMAND">;
+defm ATOMOR : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMOR">;
+defm ATOMXOR : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMXOR">;
+defm ATOMNAND : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMNAND">;
+defm ATOMMAX : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMMAX">;
+defm ATOMMIN : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMMIN">;
+defm ATOMUMAX : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMUMAX">;
+defm ATOMUMIN : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMUMIN">;
+
+defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMAND", "atomic_load_and">;
+defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMOR", "atomic_load_or">;
+defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMXOR", "atomic_load_xor">;
+defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMNAND", "atomic_load_nand">;
+defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMMAX", "atomic_load_max">;
+defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMMIN", "atomic_load_min">;
+defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMUMAX", "atomic_load_umax">;
+defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMUMIN", "atomic_load_umin">;
+
+multiclass PSEUDO_ATOMIC_LOAD_BINOP6432<string mnemonic> {
+ let usesCustomInserter = 1, mayLoad = 1, mayStore = 1 in
+ def #NAME#6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
+ (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
+ !strconcat(mnemonic, "6432 PSEUDO!"), []>;
}
+defm ATOMAND : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMAND">;
+defm ATOMOR : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMOR">;
+defm ATOMXOR : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMXOR">;
+defm ATOMNAND : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMNAND">;
+defm ATOMADD : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMADD">;
+defm ATOMSUB : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMSUB">;
+defm ATOMMAX : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMMAX">;
+defm ATOMMIN : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMMIN">;
+defm ATOMUMAX : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMUMAX">;
+defm ATOMUMIN : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMUMIN">;
+defm ATOMSWAP : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMSWAP">;
+
//===----------------------------------------------------------------------===//
// Normal-Instructions-With-Lock-Prefix Pseudo Instructions
//===----------------------------------------------------------------------===//
@@ -624,7 +588,6 @@ def ATOMSWAP6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
// TODO: Get this to fold the constant into the instruction.
let isCodeGenOnly = 1, Defs = [EFLAGS] in
def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
- "lock\n\t"
"or{l}\t{$zero, $dst|$dst, $zero}",
[], IIC_ALU_MEM>, Requires<[In32BitMode]>, LOCK;
@@ -644,72 +607,72 @@ let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in {
def #NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
- !strconcat("lock\n\t", mnemonic, "{b}\t",
+ !strconcat(mnemonic, "{b}\t",
"{$src2, $dst|$dst, $src2}"),
[], IIC_ALU_NONMEM>, LOCK;
def #NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
- !strconcat("lock\n\t", mnemonic, "{w}\t",
+ !strconcat(mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
[], IIC_ALU_NONMEM>, OpSize, LOCK;
def #NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
- !strconcat("lock\n\t", mnemonic, "{l}\t",
+ !strconcat(mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
[], IIC_ALU_NONMEM>, LOCK;
def #NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
- !strconcat("lock\n\t", mnemonic, "{q}\t",
+ !strconcat(mnemonic, "{q}\t",
"{$src2, $dst|$dst, $src2}"),
[], IIC_ALU_NONMEM>, LOCK;
def #NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
- !strconcat("lock\n\t", mnemonic, "{b}\t",
+ !strconcat(mnemonic, "{b}\t",
"{$src2, $dst|$dst, $src2}"),
[], IIC_ALU_MEM>, LOCK;
def #NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
- !strconcat("lock\n\t", mnemonic, "{w}\t",
+ !strconcat(mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, LOCK;
+ [], IIC_ALU_MEM>, OpSize, LOCK;
def #NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
- !strconcat("lock\n\t", mnemonic, "{l}\t",
+ !strconcat(mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
[], IIC_ALU_MEM>, LOCK;
def #NAME#64mi32 : RIi32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
- !strconcat("lock\n\t", mnemonic, "{q}\t",
+ !strconcat(mnemonic, "{q}\t",
"{$src2, $dst|$dst, $src2}"),
[], IIC_ALU_MEM>, LOCK;
def #NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
- !strconcat("lock\n\t", mnemonic, "{w}\t",
+ !strconcat(mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, LOCK;
+ [], IIC_ALU_MEM>, OpSize, LOCK;
def #NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
- !strconcat("lock\n\t", mnemonic, "{l}\t",
+ !strconcat(mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
[], IIC_ALU_MEM>, LOCK;
def #NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
- !strconcat("lock\n\t", mnemonic, "{q}\t",
+ !strconcat(mnemonic, "{q}\t",
"{$src2, $dst|$dst, $src2}"),
[], IIC_ALU_MEM>, LOCK;
@@ -724,107 +687,117 @@ defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">;
defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">;
// Optimized codegen when the non-memory output is not used.
+multiclass LOCK_ArithUnOp<bits<8> Opc8, bits<8> Opc, Format Form,
+ string mnemonic> {
let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in {
-def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
- "lock\n\t"
- "inc{b}\t$dst", [], IIC_UNARY_MEM>, LOCK;
-def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
- "lock\n\t"
- "inc{w}\t$dst", [], IIC_UNARY_MEM>, OpSize, LOCK;
-def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
- "lock\n\t"
- "inc{l}\t$dst", [], IIC_UNARY_MEM>, LOCK;
-def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
- "lock\n\t"
- "inc{q}\t$dst", [], IIC_UNARY_MEM>, LOCK;
-
-def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
- "lock\n\t"
- "dec{b}\t$dst", [], IIC_UNARY_MEM>, LOCK;
-def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
- "lock\n\t"
- "dec{w}\t$dst", [], IIC_UNARY_MEM>, OpSize, LOCK;
-def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
- "lock\n\t"
- "dec{l}\t$dst", [], IIC_UNARY_MEM>, LOCK;
-def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
- "lock\n\t"
- "dec{q}\t$dst", [], IIC_UNARY_MEM>, LOCK;
+def #NAME#8m : I<Opc8, Form, (outs), (ins i8mem :$dst),
+ !strconcat(mnemonic, "{b}\t$dst"),
+ [], IIC_UNARY_MEM>, LOCK;
+def #NAME#16m : I<Opc, Form, (outs), (ins i16mem:$dst),
+ !strconcat(mnemonic, "{w}\t$dst"),
+ [], IIC_UNARY_MEM>, OpSize, LOCK;
+def #NAME#32m : I<Opc, Form, (outs), (ins i32mem:$dst),
+ !strconcat(mnemonic, "{l}\t$dst"),
+ [], IIC_UNARY_MEM>, LOCK;
+def #NAME#64m : RI<Opc, Form, (outs), (ins i64mem:$dst),
+ !strconcat(mnemonic, "{q}\t$dst"),
+ [], IIC_UNARY_MEM>, LOCK;
+}
}
-// Atomic compare and swap.
-let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
- isCodeGenOnly = 1 in
-def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr),
- "lock\n\t"
- "cmpxchg8b\t$ptr",
- [(X86cas8 addr:$ptr)], IIC_CMPX_LOCK_8B>, TB, LOCK;
+defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "inc">;
+defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "dec">;
-let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
- isCodeGenOnly = 1 in
-def LCMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$ptr),
- "lock\n\t"
- "cmpxchg16b\t$ptr",
- [(X86cas16 addr:$ptr)], IIC_CMPX_LOCK_16B>, TB, LOCK,
- Requires<[HasCmpxchg16b]>;
-
-let Defs = [AL, EFLAGS], Uses = [AL], isCodeGenOnly = 1 in {
-def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap),
- "lock\n\t"
- "cmpxchg{b}\t{$swap, $ptr|$ptr, $swap}",
- [(X86cas addr:$ptr, GR8:$swap, 1)], IIC_CMPX_LOCK_8>, TB, LOCK;
+// Atomic compare and swap.
+multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic,
+ SDPatternOperator frag, X86MemOperand x86memop,
+ InstrItinClass itin> {
+let isCodeGenOnly = 1 in {
+ def #NAME# : I<Opc, Form, (outs), (ins x86memop:$ptr),
+ !strconcat(mnemonic, "\t$ptr"),
+ [(frag addr:$ptr)], itin>, TB, LOCK;
+}
}
-let Defs = [AX, EFLAGS], Uses = [AX], isCodeGenOnly = 1 in {
-def LCMPXCHG16 : I<0xB1, MRMDestMem, (outs), (ins i16mem:$ptr, GR16:$swap),
- "lock\n\t"
- "cmpxchg{w}\t{$swap, $ptr|$ptr, $swap}",
- [(X86cas addr:$ptr, GR16:$swap, 2)], IIC_CMPX_LOCK>, TB, OpSize, LOCK;
+multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,
+ string mnemonic, SDPatternOperator frag,
+ InstrItinClass itin8, InstrItinClass itin> {
+let isCodeGenOnly = 1 in {
+ let Defs = [AL, EFLAGS], Uses = [AL] in
+ def #NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),
+ !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),
+ [(frag addr:$ptr, GR8:$swap, 1)], itin8>, TB, LOCK;
+ let Defs = [AX, EFLAGS], Uses = [AX] in
+ def #NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
+ !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
+ [(frag addr:$ptr, GR16:$swap, 2)], itin>, TB, OpSize, LOCK;
+ let Defs = [EAX, EFLAGS], Uses = [EAX] in
+ def #NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
+ !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
+ [(frag addr:$ptr, GR32:$swap, 4)], itin>, TB, LOCK;
+ let Defs = [RAX, EFLAGS], Uses = [RAX] in
+ def #NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
+ !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
+ [(frag addr:$ptr, GR64:$swap, 8)], itin>, TB, LOCK;
+}
}
-let Defs = [EAX, EFLAGS], Uses = [EAX], isCodeGenOnly = 1 in {
-def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap),
- "lock\n\t"
- "cmpxchg{l}\t{$swap, $ptr|$ptr, $swap}",
- [(X86cas addr:$ptr, GR32:$swap, 4)], IIC_CMPX_LOCK>, TB, LOCK;
+let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in {
+defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b",
+ X86cas8, i64mem,
+ IIC_CMPX_LOCK_8B>;
}
-let Defs = [RAX, EFLAGS], Uses = [RAX], isCodeGenOnly = 1 in {
-def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
- "lock\n\t"
- "cmpxchg{q}\t{$swap, $ptr|$ptr, $swap}",
- [(X86cas addr:$ptr, GR64:$swap, 8)], IIC_CMPX_LOCK>, TB, LOCK;
+let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
+ Predicates = [HasCmpxchg16b] in {
+defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b",
+ X86cas16, i128mem,
+ IIC_CMPX_LOCK_16B>, REX_W;
}
+defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg",
+ X86cas, IIC_CMPX_LOCK_8, IIC_CMPX_LOCK>;
+
// Atomic exchange and add
-let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1 in {
-def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr),
- "lock\n\t"
- "xadd{b}\t{$val, $ptr|$ptr, $val}",
- [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))],
- IIC_XADD_LOCK_MEM8>,
- TB, LOCK;
-def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins GR16:$val, i16mem:$ptr),
- "lock\n\t"
- "xadd{w}\t{$val, $ptr|$ptr, $val}",
- [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))],
- IIC_XADD_LOCK_MEM>,
- TB, OpSize, LOCK;
-def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$val, i32mem:$ptr),
- "lock\n\t"
- "xadd{l}\t{$val, $ptr|$ptr, $val}",
- [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))],
- IIC_XADD_LOCK_MEM>,
- TB, LOCK;
-def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr),
- "lock\n\t"
- "xadd{q}\t{$val, $ptr|$ptr, $val}",
- [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))],
- IIC_XADD_LOCK_MEM>,
- TB, LOCK;
+multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
+ string frag,
+ InstrItinClass itin8, InstrItinClass itin> {
+ let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1 in {
+ def #NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst),
+ (ins GR8:$val, i8mem:$ptr),
+ !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
+ [(set GR8:$dst,
+ (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))],
+ itin8>;
+ def #NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),
+ (ins GR16:$val, i16mem:$ptr),
+ !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
+ [(set
+ GR16:$dst,
+ (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))],
+ itin>, OpSize;
+ def #NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
+ (ins GR32:$val, i32mem:$ptr),
+ !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
+ [(set
+ GR32:$dst,
+ (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))],
+ itin>;
+ def #NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
+ (ins GR64:$val, i64mem:$ptr),
+ !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
+ [(set
+ GR64:$dst,
+ (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))],
+ itin>;
+ }
}
+defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add",
+ IIC_XADD_LOCK_MEM8, IIC_XADD_LOCK_MEM>,
+ TB, LOCK;
+
def ACQUIRE_MOV8rm : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src),
"#ACQUIRE_MOV PSEUDO!",
[(set GR8:$dst, (atomic_load_8 addr:$src))]>;
@@ -1024,7 +997,24 @@ def : Pat<(X86call (i64 tglobaladdr:$dst)),
def : Pat<(X86call (i64 texternalsym:$dst)),
(CALL64pcrel32 texternalsym:$dst)>;
-// tailcall stuff
+// Tailcall stuff. The TCRETURN instructions execute after the epilog, so they
+// can never use callee-saved registers. That is the purpose of the GR64_TC
+// register classes.
+//
+// The only volatile register that is never used by the calling convention is
+// %r11. This happens when calling a vararg function with 6 arguments.
+//
+// Match an X86tcret that uses less than 7 volatile registers.
+def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
+ (X86tcret node:$ptr, node:$off), [{
+ // X86tcret args: (*chain, ptr, imm, regs..., glue)
+ unsigned NumRegs = 0;
+ for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
+ if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)
+ return false;
+ return true;
+}]>;
+
def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
(TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,
Requires<[In32BitMode]>;
@@ -1048,7 +1038,9 @@ def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
(TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
Requires<[In64BitMode]>;
-def : Pat<(X86tcret (load addr:$dst), imm:$off),
+// Don't fold loads into X86tcret requiring more than 6 regs.
+// There wouldn't be enough scratch registers for base+index.
+def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off),
(TCRETURNmi64 addr:$dst, imm:$off)>,
Requires<[In64BitMode]>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrControl.td b/contrib/llvm/lib/Target/X86/X86InstrControl.td
index b0c27c8..bfe9541 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrControl.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrControl.td
@@ -16,15 +16,18 @@
//
// Return instructions.
+//
+// The X86retflag return instructions are variadic because we may add ST0 and
+// ST1 arguments when returning values on the x87 stack.
let isTerminator = 1, isReturn = 1, isBarrier = 1,
hasCtrlDep = 1, FPForm = SpecialFP in {
- def RET : I <0xC3, RawFrm, (outs), (ins),
+ def RET : I <0xC3, RawFrm, (outs), (ins variable_ops),
"ret",
[(X86retflag 0)], IIC_RET>;
def RETW : I <0xC3, RawFrm, (outs), (ins),
"ret{w}",
[], IIC_RET>, OpSize;
- def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt),
+ def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
"ret\t$amt",
[(X86retflag timm:$amt)], IIC_RET_IMM>;
def RETIW : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt),
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFMA.td b/contrib/llvm/lib/Target/X86/X86InstrFMA.td
index 265b4bb..959d91a 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFMA.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFMA.td
@@ -16,243 +16,180 @@
//===----------------------------------------------------------------------===//
let Constraints = "$src1 = $dst" in {
-multiclass fma3p_rm<bits<8> opc, string OpcodeStr> {
-let neverHasSideEffects = 1 in {
- def r : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, VR128:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
- let mayLoad = 1 in
- def m : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, f128mem:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
- def rY : FMA3<opc, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2, VR256:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
- let mayLoad = 1 in
- def mY : FMA3<opc, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2, f256mem:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
-} // neverHasSideEffects = 1
-}
-
-// Intrinsic for 213 pattern
-multiclass fma3p_rm_int<bits<8> opc, string OpcodeStr,
- PatFrag MemFrag128, PatFrag MemFrag256,
- Intrinsic Int128, Intrinsic Int256, SDNode Op213,
- ValueType OpVT128, ValueType OpVT256> {
- def r_Int : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, VR128:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst, (Int128 VR128:$src2, VR128:$src1,
- VR128:$src3))]>;
-
+multiclass fma3p_rm<bits<8> opc, string OpcodeStr,
+ PatFrag MemFrag128, PatFrag MemFrag256,
+ ValueType OpVT128, ValueType OpVT256,
+ SDPatternOperator Op = null_frag> {
+ let isCommutable = 1 in
def r : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, VR128:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst, (OpVT128 (Op213 VR128:$src2,
+ [(set VR128:$dst, (OpVT128 (Op VR128:$src2,
VR128:$src1, VR128:$src3)))]>;
- def m_Int : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, f128mem:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst, (Int128 VR128:$src2, VR128:$src1,
- (MemFrag128 addr:$src3)))]>;
-
+ let mayLoad = 1 in
def m : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, f128mem:$src3),
- !strconcat(OpcodeStr,
+ !strconcat(OpcodeStr,
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst, (OpVT128 (Op213 VR128:$src2, VR128:$src1,
+ [(set VR128:$dst, (OpVT128 (Op VR128:$src2, VR128:$src1,
(MemFrag128 addr:$src3))))]>;
-
- def rY_Int : FMA3<opc, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2, VR256:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR256:$dst, (Int256 VR256:$src2, VR256:$src1,
- VR256:$src3))]>;
-
+ let isCommutable = 1 in
def rY : FMA3<opc, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, VR256:$src3),
- !strconcat(OpcodeStr,
+ !strconcat(OpcodeStr,
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR256:$dst, (OpVT256 (Op213 VR256:$src2, VR256:$src1,
- VR256:$src3)))]>;
-
- def mY_Int : FMA3<opc, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2, f256mem:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR256:$dst, (Int256 VR256:$src2, VR256:$src1,
- (MemFrag256 addr:$src3)))]>;
+ [(set VR256:$dst, (OpVT256 (Op VR256:$src2, VR256:$src1,
+ VR256:$src3)))]>, VEX_L;
+ let mayLoad = 1 in
def mY : FMA3<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, f256mem:$src3),
- !strconcat(OpcodeStr,
+ !strconcat(OpcodeStr,
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set VR256:$dst,
- (OpVT256 (Op213 VR256:$src2, VR256:$src1,
- (MemFrag256 addr:$src3))))]>;
+ (OpVT256 (Op VR256:$src2, VR256:$src1,
+ (MemFrag256 addr:$src3))))]>, VEX_L;
}
} // Constraints = "$src1 = $dst"
multiclass fma3p_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
string OpcodeStr, string PackTy,
PatFrag MemFrag128, PatFrag MemFrag256,
- Intrinsic Int128, Intrinsic Int256, SDNode Op,
- ValueType OpTy128, ValueType OpTy256> {
- defm r213 : fma3p_rm_int <opc213, !strconcat(OpcodeStr,
- !strconcat("213", PackTy)), MemFrag128, MemFrag256,
- Int128, Int256, Op, OpTy128, OpTy256>;
- defm r132 : fma3p_rm <opc132,
- !strconcat(OpcodeStr, !strconcat("132", PackTy))>;
- defm r231 : fma3p_rm <opc231,
- !strconcat(OpcodeStr, !strconcat("231", PackTy))>;
+ SDNode Op, ValueType OpTy128, ValueType OpTy256> {
+ defm r213 : fma3p_rm<opc213,
+ !strconcat(OpcodeStr, !strconcat("213", PackTy)),
+ MemFrag128, MemFrag256, OpTy128, OpTy256, Op>;
+let neverHasSideEffects = 1 in {
+ defm r132 : fma3p_rm<opc132,
+ !strconcat(OpcodeStr, !strconcat("132", PackTy)),
+ MemFrag128, MemFrag256, OpTy128, OpTy256>;
+ defm r231 : fma3p_rm<opc231,
+ !strconcat(OpcodeStr, !strconcat("231", PackTy)),
+ MemFrag128, MemFrag256, OpTy128, OpTy256>;
+} // neverHasSideEffects = 1
}
// Fused Multiply-Add
let ExeDomain = SSEPackedSingle in {
defm VFMADDPS : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps", memopv4f32,
- memopv8f32, int_x86_fma_vfmadd_ps,
- int_x86_fma_vfmadd_ps_256, X86Fmadd,
- v4f32, v8f32>;
+ memopv8f32, X86Fmadd, v4f32, v8f32>;
defm VFMSUBPS : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", memopv4f32,
- memopv8f32, int_x86_fma_vfmsub_ps,
- int_x86_fma_vfmsub_ps_256, X86Fmsub,
- v4f32, v8f32>;
+ memopv8f32, X86Fmsub, v4f32, v8f32>;
defm VFMADDSUBPS : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps",
- memopv4f32, memopv8f32,
- int_x86_fma_vfmaddsub_ps,
- int_x86_fma_vfmaddsub_ps_256, X86Fmaddsub,
+ memopv4f32, memopv8f32, X86Fmaddsub,
v4f32, v8f32>;
defm VFMSUBADDPS : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps",
- memopv4f32, memopv8f32,
- int_x86_fma_vfmsubadd_ps,
- int_x86_fma_vfmaddsub_ps_256, X86Fmsubadd,
+ memopv4f32, memopv8f32, X86Fmsubadd,
v4f32, v8f32>;
}
let ExeDomain = SSEPackedDouble in {
defm VFMADDPD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd", memopv2f64,
- memopv4f64, int_x86_fma_vfmadd_pd,
- int_x86_fma_vfmadd_pd_256, X86Fmadd, v2f64,
- v4f64>, VEX_W;
+ memopv4f64, X86Fmadd, v2f64, v4f64>, VEX_W;
defm VFMSUBPD : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", memopv2f64,
- memopv4f64, int_x86_fma_vfmsub_pd,
- int_x86_fma_vfmsub_pd_256, X86Fmsub, v2f64,
- v4f64>, VEX_W;
+ memopv4f64, X86Fmsub, v2f64, v4f64>, VEX_W;
defm VFMADDSUBPD : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd",
- memopv2f64, memopv4f64,
- int_x86_fma_vfmaddsub_pd,
- int_x86_fma_vfmaddsub_pd_256, X86Fmaddsub,
+ memopv2f64, memopv4f64, X86Fmaddsub,
v2f64, v4f64>, VEX_W;
defm VFMSUBADDPD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd",
- memopv2f64, memopv4f64,
- int_x86_fma_vfmsubadd_pd,
- int_x86_fma_vfmsubadd_pd_256, X86Fmsubadd,
+ memopv2f64, memopv4f64, X86Fmsubadd,
v2f64, v4f64>, VEX_W;
}
// Fused Negative Multiply-Add
let ExeDomain = SSEPackedSingle in {
defm VFNMADDPS : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", memopv4f32,
- memopv8f32, int_x86_fma_vfnmadd_ps,
- int_x86_fma_vfnmadd_ps_256, X86Fnmadd, v4f32,
- v8f32>;
+ memopv8f32, X86Fnmadd, v4f32, v8f32>;
defm VFNMSUBPS : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", memopv4f32,
- memopv8f32, int_x86_fma_vfnmsub_ps,
- int_x86_fma_vfnmsub_ps_256, X86Fnmsub, v4f32,
- v8f32>;
+ memopv8f32, X86Fnmsub, v4f32, v8f32>;
}
let ExeDomain = SSEPackedDouble in {
defm VFNMADDPD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", memopv2f64,
- memopv4f64, int_x86_fma_vfnmadd_pd,
- int_x86_fma_vfnmadd_pd_256, X86Fnmadd, v2f64,
- v4f64>, VEX_W;
+ memopv4f64, X86Fnmadd, v2f64, v4f64>, VEX_W;
defm VFNMSUBPD : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd",
- memopv2f64,
- memopv4f64, int_x86_fma_vfnmsub_pd,
- int_x86_fma_vfnmsub_pd_256, X86Fnmsub, v2f64,
+ memopv2f64, memopv4f64, X86Fnmsub, v2f64,
v4f64>, VEX_W;
}
let Constraints = "$src1 = $dst" in {
multiclass fma3s_rm<bits<8> opc, string OpcodeStr, X86MemOperand x86memop,
- RegisterClass RC> {
-let neverHasSideEffects = 1 in {
- def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2, RC:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
+ RegisterClass RC, ValueType OpVT, PatFrag mem_frag,
+ SDPatternOperator OpNode = null_frag> {
+ let isCommutable = 1 in
+ def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, RC:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set RC:$dst,
+ (OpVT (OpNode RC:$src2, RC:$src1, RC:$src3)))]>;
let mayLoad = 1 in
- def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, RC:$src2, x86memop:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
-} // neverHasSideEffects = 1
+ def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, x86memop:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set RC:$dst,
+ (OpVT (OpNode RC:$src2, RC:$src1,
+ (mem_frag addr:$src3))))]>;
}
multiclass fma3s_rm_int<bits<8> opc, string OpcodeStr, Operand memop,
- ComplexPattern mem_cpat, Intrinsic IntId,
- RegisterClass RC, SDNode OpNode, ValueType OpVT> {
+ ComplexPattern mem_cpat, Intrinsic IntId,
+ RegisterClass RC> {
+ let isCommutable = 1 in
def r_Int : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, VR128:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst, (IntId VR128:$src2, VR128:$src1,
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set VR128:$dst, (IntId VR128:$src2, VR128:$src1,
VR128:$src3))]>;
def m_Int : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, memop:$src3),
- !strconcat(OpcodeStr,
+ !strconcat(OpcodeStr,
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set VR128:$dst,
(IntId VR128:$src2, VR128:$src1, mem_cpat:$src3))]>;
- def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2, RC:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set RC:$dst,
- (OpVT (OpNode RC:$src2, RC:$src1, RC:$src3)))]>;
- let mayLoad = 1 in
- def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, RC:$src2, memop:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
}
} // Constraints = "$src1 = $dst"
multiclass fma3s_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
- string OpStr, Intrinsic IntF32, Intrinsic IntF64,
- SDNode OpNode> {
- defm SSr132 : fma3s_rm<opc132, !strconcat(OpStr, "132ss"), f32mem, FR32>;
- defm SSr231 : fma3s_rm<opc231, !strconcat(OpStr, "231ss"), f32mem, FR32>;
- defm SDr132 : fma3s_rm<opc132, !strconcat(OpStr, "132sd"), f64mem, FR64>,
- VEX_W;
- defm SDr231 : fma3s_rm<opc231, !strconcat(OpStr, "231sd"), f64mem, FR64>,
- VEX_W;
- defm SSr213 : fma3s_rm_int <opc213, !strconcat(OpStr, "213ss"), ssmem,
- sse_load_f32, IntF32, FR32, OpNode, f32>;
- defm SDr213 : fma3s_rm_int <opc213, !strconcat(OpStr, "213sd"), sdmem,
- sse_load_f64, IntF64, FR64, OpNode, f64>, VEX_W;
+ string OpStr, string PackTy, Intrinsic Int,
+ SDNode OpNode, RegisterClass RC, ValueType OpVT,
+ X86MemOperand x86memop, Operand memop, PatFrag mem_frag,
+ ComplexPattern mem_cpat> {
+let neverHasSideEffects = 1 in {
+ defm r132 : fma3s_rm<opc132, !strconcat(OpStr, !strconcat("132", PackTy)),
+ x86memop, RC, OpVT, mem_frag>;
+ defm r231 : fma3s_rm<opc231, !strconcat(OpStr, !strconcat("231", PackTy)),
+ x86memop, RC, OpVT, mem_frag>;
+}
+
+defm r213 : fma3s_rm<opc213, !strconcat(OpStr, !strconcat("213", PackTy)),
+ x86memop, RC, OpVT, mem_frag, OpNode>,
+ fma3s_rm_int<opc213, !strconcat(OpStr, !strconcat("213", PackTy)),
+ memop, mem_cpat, Int, RC>;
+}
+
+multiclass fma3s<bits<8> opc132, bits<8> opc213, bits<8> opc231,
+ string OpStr, Intrinsic IntF32, Intrinsic IntF64,
+ SDNode OpNode> {
+ defm SS : fma3s_forms<opc132, opc213, opc231, OpStr, "ss", IntF32, OpNode,
+ FR32, f32, f32mem, ssmem, loadf32, sse_load_f32>;
+ defm SD : fma3s_forms<opc132, opc213, opc231, OpStr, "sd", IntF64, OpNode,
+ FR64, f64, f64mem, sdmem, loadf64, sse_load_f64>, VEX_W;
}
-defm VFMADD : fma3s_forms<0x99, 0xA9, 0xB9, "vfmadd", int_x86_fma_vfmadd_ss,
- int_x86_fma_vfmadd_sd, X86Fmadd>, VEX_LIG;
-defm VFMSUB : fma3s_forms<0x9B, 0xAB, 0xBB, "vfmsub", int_x86_fma_vfmsub_ss,
- int_x86_fma_vfmsub_sd, X86Fmsub>, VEX_LIG;
+defm VFMADD : fma3s<0x99, 0xA9, 0xB9, "vfmadd", int_x86_fma_vfmadd_ss,
+ int_x86_fma_vfmadd_sd, X86Fmadd>, VEX_LIG;
+defm VFMSUB : fma3s<0x9B, 0xAB, 0xBB, "vfmsub", int_x86_fma_vfmsub_ss,
+ int_x86_fma_vfmsub_sd, X86Fmsub>, VEX_LIG;
-defm VFNMADD : fma3s_forms<0x9D, 0xAD, 0xBD, "vfnmadd", int_x86_fma_vfnmadd_ss,
- int_x86_fma_vfnmadd_sd, X86Fnmadd>, VEX_LIG;
-defm VFNMSUB : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", int_x86_fma_vfnmsub_ss,
- int_x86_fma_vfnmsub_sd, X86Fnmsub>, VEX_LIG;
+defm VFNMADD : fma3s<0x9D, 0xAD, 0xBD, "vfnmadd", int_x86_fma_vfnmadd_ss,
+ int_x86_fma_vfnmadd_sd, X86Fnmadd>, VEX_LIG;
+defm VFNMSUB : fma3s<0x9F, 0xAF, 0xBF, "vfnmsub", int_x86_fma_vfnmsub_ss,
+ int_x86_fma_vfnmsub_sd, X86Fnmsub>, VEX_LIG;
//===----------------------------------------------------------------------===//
@@ -260,73 +197,102 @@ defm VFNMSUB : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", int_x86_fma_vfnmsub_ss,
//===----------------------------------------------------------------------===//
-multiclass fma4s<bits<8> opc, string OpcodeStr, Operand memop,
- ComplexPattern mem_cpat, Intrinsic Int> {
- def rr : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, VR128:$src3),
+multiclass fma4s<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ X86MemOperand x86memop, ValueType OpVT, SDNode OpNode,
+ PatFrag mem_frag> {
+ let isCommutable = 1 in
+ def rr : FMA4<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, RC:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128:$dst,
- (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, VEX_W, MemOp4;
- def rm : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, memop:$src3),
+ [(set RC:$dst,
+ (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>, VEX_W, MemOp4;
+ def rm : FMA4<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, x86memop:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128:$dst,
- (Int VR128:$src1, VR128:$src2, mem_cpat:$src3))]>, VEX_W, MemOp4;
- def mr : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, memop:$src2, VR128:$src3),
+ [(set RC:$dst, (OpNode RC:$src1, RC:$src2,
+ (mem_frag addr:$src3)))]>, VEX_W, MemOp4;
+ def mr : FMA4<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2, RC:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128:$dst,
- (Int VR128:$src1, mem_cpat:$src2, VR128:$src3))]>;
+ [(set RC:$dst,
+ (OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3))]>;
// For disassembler
let isCodeGenOnly = 1 in
- def rr_REV : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ def rr_REV : FMA4<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, RC:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>;
}
-multiclass fma4p<bits<8> opc, string OpcodeStr,
- Intrinsic Int128, Intrinsic Int256,
+multiclass fma4s_int<bits<8> opc, string OpcodeStr, Operand memop,
+ ComplexPattern mem_cpat, Intrinsic Int> {
+ let isCommutable = 1 in
+ def rr_Int : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, VEX_W, MemOp4;
+ def rm_Int : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, memop:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst, (Int VR128:$src1, VR128:$src2,
+ mem_cpat:$src3))]>, VEX_W, MemOp4;
+ def mr_Int : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, memop:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (Int VR128:$src1, mem_cpat:$src2, VR128:$src3))]>;
+}
+
+multiclass fma4p<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType OpVT128, ValueType OpVT256,
PatFrag ld_frag128, PatFrag ld_frag256> {
+ let isCommutable = 1 in
def rr : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, VR128:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
- (Int128 VR128:$src1, VR128:$src2, VR128:$src3))]>, VEX_W, MemOp4;
+ (OpVT128 (OpNode VR128:$src1, VR128:$src2, VR128:$src3)))]>,
+ VEX_W, MemOp4;
def rm : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, f128mem:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128:$dst, (Int128 VR128:$src1, VR128:$src2,
+ [(set VR128:$dst, (OpNode VR128:$src1, VR128:$src2,
(ld_frag128 addr:$src3)))]>, VEX_W, MemOp4;
def mr : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, f128mem:$src2, VR128:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
- (Int128 VR128:$src1, (ld_frag128 addr:$src2), VR128:$src3))]>;
+ (OpNode VR128:$src1, (ld_frag128 addr:$src2), VR128:$src3))]>;
+ let isCommutable = 1 in
def rrY : FMA4<opc, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, VR256:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR256:$dst,
- (Int256 VR256:$src1, VR256:$src2, VR256:$src3))]>, VEX_W, MemOp4;
+ (OpVT256 (OpNode VR256:$src1, VR256:$src2, VR256:$src3)))]>,
+ VEX_W, MemOp4, VEX_L;
def rmY : FMA4<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, f256mem:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR256:$dst, (Int256 VR256:$src1, VR256:$src2,
- (ld_frag256 addr:$src3)))]>, VEX_W, MemOp4;
+ [(set VR256:$dst, (OpNode VR256:$src1, VR256:$src2,
+ (ld_frag256 addr:$src3)))]>, VEX_W, MemOp4, VEX_L;
def mrY : FMA4<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f256mem:$src2, VR256:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR256:$dst,
- (Int256 VR256:$src1, (ld_frag256 addr:$src2), VR256:$src3))]>;
+ [(set VR256:$dst, (OpNode VR256:$src1,
+ (ld_frag256 addr:$src2), VR256:$src3))]>, VEX_L;
// For disassembler
let isCodeGenOnly = 1 in {
def rr_REV : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
@@ -336,51 +302,65 @@ let isCodeGenOnly = 1 in {
def rrY_REV : FMA4<opc, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, VR256:$src3),
!strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>;
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
+ VEX_L;
} // isCodeGenOnly = 1
}
let Predicates = [HasFMA4] in {
-defm VFMADDSS4 : fma4s<0x6A, "vfmaddss", ssmem, sse_load_f32,
- int_x86_fma_vfmadd_ss>;
-defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd", sdmem, sse_load_f64,
- int_x86_fma_vfmadd_sd>;
-defm VFMADDPS4 : fma4p<0x68, "vfmaddps", int_x86_fma_vfmadd_ps,
- int_x86_fma_vfmadd_ps_256, memopv4f32, memopv8f32>;
-defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", int_x86_fma_vfmadd_pd,
- int_x86_fma_vfmadd_pd_256, memopv2f64, memopv4f64>;
-defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", ssmem, sse_load_f32,
- int_x86_fma_vfmsub_ss>;
-defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", sdmem, sse_load_f64,
- int_x86_fma_vfmsub_sd>;
-defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", int_x86_fma_vfmsub_ps,
- int_x86_fma_vfmsub_ps_256, memopv4f32, memopv8f32>;
-defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", int_x86_fma_vfmsub_pd,
- int_x86_fma_vfmsub_pd_256, memopv2f64, memopv4f64>;
-defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss", ssmem, sse_load_f32,
- int_x86_fma_vfnmadd_ss>;
-defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd", sdmem, sse_load_f64,
- int_x86_fma_vfnmadd_sd>;
-defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", int_x86_fma_vfnmadd_ps,
- int_x86_fma_vfnmadd_ps_256, memopv4f32, memopv8f32>;
-defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", int_x86_fma_vfnmadd_pd,
- int_x86_fma_vfnmadd_pd_256, memopv2f64, memopv4f64>;
-defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss", ssmem, sse_load_f32,
- int_x86_fma_vfnmsub_ss>;
-defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", sdmem, sse_load_f64,
- int_x86_fma_vfnmsub_sd>;
-defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", int_x86_fma_vfnmsub_ps,
- int_x86_fma_vfnmsub_ps_256, memopv4f32, memopv8f32>;
-defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", int_x86_fma_vfnmsub_pd,
- int_x86_fma_vfnmsub_pd_256, memopv2f64, memopv4f64>;
-defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", int_x86_fma_vfmaddsub_ps,
- int_x86_fma_vfmaddsub_ps_256, memopv4f32, memopv8f32>;
-defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", int_x86_fma_vfmaddsub_pd,
- int_x86_fma_vfmaddsub_pd_256, memopv2f64, memopv4f64>;
-defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", int_x86_fma_vfmsubadd_ps,
- int_x86_fma_vfmsubadd_ps_256, memopv4f32, memopv8f32>;
-defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", int_x86_fma_vfmsubadd_pd,
- int_x86_fma_vfmsubadd_pd_256, memopv2f64, memopv4f64>;
+defm VFMADDSS4 : fma4s<0x6A, "vfmaddss", FR32, f32mem, f32, X86Fmadd, loadf32>,
+ fma4s_int<0x6A, "vfmaddss", ssmem, sse_load_f32,
+ int_x86_fma_vfmadd_ss>;
+defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd", FR64, f64mem, f64, X86Fmadd, loadf64>,
+ fma4s_int<0x6B, "vfmaddsd", sdmem, sse_load_f64,
+ int_x86_fma_vfmadd_sd>;
+defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", FR32, f32mem, f32, X86Fmsub, loadf32>,
+ fma4s_int<0x6E, "vfmsubss", ssmem, sse_load_f32,
+ int_x86_fma_vfmsub_ss>;
+defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", FR64, f64mem, f64, X86Fmsub, loadf64>,
+ fma4s_int<0x6F, "vfmsubsd", sdmem, sse_load_f64,
+ int_x86_fma_vfmsub_sd>;
+defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss", FR32, f32mem, f32,
+ X86Fnmadd, loadf32>,
+ fma4s_int<0x7A, "vfnmaddss", ssmem, sse_load_f32,
+ int_x86_fma_vfnmadd_ss>;
+defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd", FR64, f64mem, f64,
+ X86Fnmadd, loadf64>,
+ fma4s_int<0x7B, "vfnmaddsd", sdmem, sse_load_f64,
+ int_x86_fma_vfnmadd_sd>;
+defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss", FR32, f32mem, f32,
+ X86Fnmsub, loadf32>,
+ fma4s_int<0x7E, "vfnmsubss", ssmem, sse_load_f32,
+ int_x86_fma_vfnmsub_ss>;
+defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", FR64, f64mem, f64,
+ X86Fnmsub, loadf64>,
+ fma4s_int<0x7F, "vfnmsubsd", sdmem, sse_load_f64,
+ int_x86_fma_vfnmsub_sd>;
+
+defm VFMADDPS4 : fma4p<0x68, "vfmaddps", X86Fmadd, v4f32, v8f32,
+ memopv4f32, memopv8f32>;
+defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", X86Fmadd, v2f64, v4f64,
+ memopv2f64, memopv4f64>;
+defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", X86Fmsub, v4f32, v8f32,
+ memopv4f32, memopv8f32>;
+defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", X86Fmsub, v2f64, v4f64,
+ memopv2f64, memopv4f64>;
+defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", X86Fnmadd, v4f32, v8f32,
+ memopv4f32, memopv8f32>;
+defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", X86Fnmadd, v2f64, v4f64,
+ memopv2f64, memopv4f64>;
+defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", X86Fnmsub, v4f32, v8f32,
+ memopv4f32, memopv8f32>;
+defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", X86Fnmsub, v2f64, v4f64,
+ memopv2f64, memopv4f64>;
+defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", X86Fmaddsub, v4f32, v8f32,
+ memopv4f32, memopv8f32>;
+defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", X86Fmaddsub, v2f64, v4f64,
+ memopv2f64, memopv4f64>;
+defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", X86Fmsubadd, v4f32, v8f32,
+ memopv4f32, memopv8f32>;
+defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", X86Fmsubadd, v2f64, v4f64,
+ memopv2f64, memopv4f64>;
} // HasFMA4
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFormats.td b/contrib/llvm/lib/Target/X86/X86InstrFormats.td
index 81b4f81..268e9fc 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFormats.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFormats.td
@@ -44,14 +44,15 @@ def RawFrmImm16 : Format<44>;
def MRM_D0 : Format<45>;
def MRM_D1 : Format<46>;
def MRM_D4 : Format<47>;
-def MRM_D8 : Format<48>;
-def MRM_D9 : Format<49>;
-def MRM_DA : Format<50>;
-def MRM_DB : Format<51>;
-def MRM_DC : Format<52>;
-def MRM_DD : Format<53>;
-def MRM_DE : Format<54>;
-def MRM_DF : Format<55>;
+def MRM_D5 : Format<48>;
+def MRM_D8 : Format<49>;
+def MRM_D9 : Format<50>;
+def MRM_DA : Format<51>;
+def MRM_DB : Format<52>;
+def MRM_DC : Format<53>;
+def MRM_DD : Format<54>;
+def MRM_DE : Format<55>;
+def MRM_DF : Format<56>;
// ImmType - This specifies the immediate type used by an instruction. This is
// part of the ad-hoc solution used to emit machine instruction encodings by our
@@ -287,12 +288,14 @@ class Iseg32 <bits<8> o, Format f, dag outs, dag ins, string asm,
let CodeSize = 3;
}
+def __xs : XS;
+
// SI - SSE 1 & 2 scalar instructions
class SI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, asm, pattern, itin> {
let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX],
- !if(!eq(Prefix, 12 /* XS */), [HasSSE1], [HasSSE2]));
+ !if(!eq(Prefix, __xs.Prefix), [UseSSE1], [UseSSE2]));
// AVX instructions have a 'v' prefix in the mnemonic
let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
@@ -303,7 +306,7 @@ class SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: Ii8<o, F, outs, ins, asm, pattern, itin> {
let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX],
- !if(!eq(Prefix, 12 /* XS */), [HasSSE1], [HasSSE2]));
+ !if(!eq(Prefix, __xs.Prefix), [UseSSE1], [UseSSE2]));
// AVX instructions have a 'v' prefix in the mnemonic
let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
@@ -314,18 +317,25 @@ class PI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern,
InstrItinClass itin, Domain d>
: I<o, F, outs, ins, asm, pattern, itin, d> {
let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX],
- !if(hasOpSizePrefix /* OpSize */, [HasSSE2], [HasSSE1]));
+ !if(hasOpSizePrefix /* OpSize */, [UseSSE2], [UseSSE1]));
// AVX instructions have a 'v' prefix in the mnemonic
let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
}
+// MMXPI - SSE 1 & 2 packed instructions with MMX operands
+class MMXPI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern,
+ InstrItinClass itin, Domain d>
+ : I<o, F, outs, ins, asm, pattern, itin, d> {
+ let Predicates = !if(hasOpSizePrefix /* OpSize */, [HasSSE2], [HasSSE1]);
+}
+
// PIi8 - SSE 1 & 2 packed instructions with immediate
class PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin, Domain d>
: Ii8<o, F, outs, ins, asm, pattern, itin, d> {
let Predicates = !if(hasVEX_4VPrefix /* VEX */, [HasAVX],
- !if(hasOpSizePrefix /* OpSize */, [HasSSE2], [HasSSE1]));
+ !if(hasOpSizePrefix /* OpSize */, [UseSSE2], [UseSSE1]));
// AVX instructions have a 'v' prefix in the mnemonic
let AsmString = !if(hasVEX_4VPrefix, !strconcat("v", asm), asm);
@@ -341,18 +351,18 @@ class PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
class SSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
- : I<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[HasSSE1]>;
+ : I<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[UseSSE1]>;
class SSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
- : Ii8<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[HasSSE1]>;
+ : Ii8<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[UseSSE1]>;
class PSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, TB,
- Requires<[HasSSE1]>;
+ Requires<[UseSSE1]>;
class PSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, TB,
- Requires<[HasSSE1]>;
+ Requires<[UseSSE1]>;
class VSSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, XS,
@@ -372,27 +382,31 @@ class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
// PDIi8 - SSE2 instructions with ImmT == Imm8 and TB and OpSize prefixes.
// VSDI - SSE2 instructions with XD prefix in AVX form.
// VPDI - SSE2 instructions with TB and OpSize prefixes in AVX form.
+// MMXSDIi8 - SSE2 instructions with ImmT == Imm8 and XD prefix as well as
+// MMX operands.
+// MMXSSDIi8 - SSE2 instructions with ImmT == Imm8 and XS prefix as well as
+// MMX operands.
class SDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
- : I<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[HasSSE2]>;
+ : I<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[UseSSE2]>;
class SDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
- : Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[HasSSE2]>;
+ : Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[UseSSE2]>;
class S2SI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
- : I<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[HasSSE2]>;
+ : I<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[UseSSE2]>;
class S2SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
- : Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE2]>;
+ : Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[UseSSE2]>;
class PDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB, OpSize,
- Requires<[HasSSE2]>;
+ Requires<[UseSSE2]>;
class PDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB, OpSize,
- Requires<[HasSSE2]>;
+ Requires<[UseSSE2]>;
class VSDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, XD,
@@ -405,6 +419,12 @@ class VPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedDouble>, TB,
OpSize, Requires<[HasAVX]>;
+class MMXSDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[HasSSE2]>;
+class MMXS2SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE2]>;
// SSE3 Instruction Templates:
//
@@ -415,21 +435,23 @@ class VPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
class S3SI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, XS,
- Requires<[HasSSE3]>;
+ Requires<[UseSSE3]>;
class S3DI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, XD,
- Requires<[HasSSE3]>;
+ Requires<[UseSSE3]>;
class S3I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB, OpSize,
- Requires<[HasSSE3]>;
+ Requires<[UseSSE3]>;
// SSSE3 Instruction Templates:
//
// SS38I - SSSE3 instructions with T8 prefix.
// SS3AI - SSSE3 instructions with TA prefix.
+// MMXSS38I - SSSE3 instructions with T8 prefix and MMX operands.
+// MMXSS3AI - SSSE3 instructions with TA prefix and MMX operands.
//
// Note: SSSE3 instructions have 64-bit and 128-bit versions. The 64-bit version
// uses the MMX registers. The 64-bit versions are grouped with the MMX
@@ -438,10 +460,18 @@ class S3I<bits<8> o, Format F, dag outs, dag ins, string asm,
class SS38I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8,
- Requires<[HasSSSE3]>;
+ Requires<[UseSSSE3]>;
class SS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
+ Requires<[UseSSSE3]>;
+class MMXSS38I<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8,
+ Requires<[HasSSSE3]>;
+class MMXSS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
Requires<[HasSSSE3]>;
// SSE4.1 Instruction Templates:
@@ -452,11 +482,11 @@ class SS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
class SS48I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8,
- Requires<[HasSSE41]>;
+ Requires<[UseSSE41]>;
class SS4AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
- Requires<[HasSSE41]>;
+ Requires<[UseSSE41]>;
// SSE4.2 Instruction Templates:
//
@@ -464,9 +494,10 @@ class SS4AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
class SS428I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8,
- Requires<[HasSSE42]>;
+ Requires<[UseSSE42]>;
// SS42FI - SSE 4.2 instructions with T8XD prefix.
+// NOTE: 'HasSSE42' is used as SS42FI is only used for CRC32 insns.
class SS42FI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, asm, pattern, itin>, T8XD, Requires<[HasSSE42]>;
@@ -475,7 +506,7 @@ class SS42FI<bits<8> o, Format F, dag outs, dag ins, string asm,
class SS42AI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
- Requires<[HasSSE42]>;
+ Requires<[UseSSE42]>;
// AVX Instruction Templates:
// Instructions introduced in AVX (no SSE equivalent forms)
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
index 1db68c8..73ba001 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -29,6 +29,13 @@ def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
+
+// Commutative and Associative FMIN and FMAX.
+def X86fminc : SDNode<"X86ISD::FMINC", SDTFPBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def X86fmaxc : SDNode<"X86ISD::FMAXC", SDTFPBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+
def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
[SDNPCommutative, SDNPAssociative]>;
def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
@@ -73,18 +80,30 @@ def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
def X86vzmovly : SDNode<"X86ISD::VZEXT_MOVL",
- SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
+ SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
SDTCisOpSmallerThanOp<1, 0> ]>>;
def X86vsmovl : SDNode<"X86ISD::VSEXT_MOVL",
- SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisInt<1>, SDTCisInt<0>]>>;
+ SDTypeProfile<1, 1,
+ [SDTCisVec<0>, SDTCisInt<1>, SDTCisInt<0>]>>;
def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+def X86vzext : SDNode<"X86ISD::VZEXT",
+ SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
+ SDTCisInt<0>, SDTCisInt<1>]>>;
+
+def X86vsext : SDNode<"X86ISD::VSEXT",
+ SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
+ SDTCisInt<0>, SDTCisInt<1>]>>;
+
def X86vfpext : SDNode<"X86ISD::VFPEXT",
SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
SDTCisFP<0>, SDTCisFP<1>]>>;
+def X86vfpround: SDNode<"X86ISD::VFPROUND",
+ SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
+ SDTCisFP<0>, SDTCisFP<1>]>>;
def X86vshldq : SDNode<"X86ISD::VSHLDQ", SDTIntShiftOp>;
def X86vshrdq : SDNode<"X86ISD::VSRLDQ", SDTIntShiftOp>;
@@ -175,8 +194,8 @@ def X86Fmadd : SDNode<"X86ISD::FMADD", SDTFma>;
def X86Fnmadd : SDNode<"X86ISD::FNMADD", SDTFma>;
def X86Fmsub : SDNode<"X86ISD::FMSUB", SDTFma>;
def X86Fnmsub : SDNode<"X86ISD::FNMSUB", SDTFma>;
-def X86Fmaddsub : SDNode<"X86ISD::FMSUBADD", SDTFma>;
-def X86Fmsubadd : SDNode<"X86ISD::FMADDSUB", SDTFma>;
+def X86Fmaddsub : SDNode<"X86ISD::FMADDSUB", SDTFma>;
+def X86Fmsubadd : SDNode<"X86ISD::FMSUBADD", SDTFma>;
def SDT_PCMPISTRI : SDTypeProfile<2, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
SDTCisVT<2, v16i8>, SDTCisVT<3, v16i8>,
@@ -232,6 +251,10 @@ def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
+// 128-/256-bit extload pattern fragments
+def extloadv2f32 : PatFrag<(ops node:$ptr), (v2f64 (extloadvf32 node:$ptr))>;
+def extloadv4f32 : PatFrag<(ops node:$ptr), (v4f64 (extloadvf32 node:$ptr))>;
+
// Like 'store', but always requires 128-bit vector alignment.
def alignedstore : PatFrag<(ops node:$val, node:$ptr),
(store node:$val, node:$ptr), [{
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp b/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
index cca04e5..5a99ff0 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -561,6 +561,16 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VSQRTPSYr_Int, X86::VSQRTPSYm_Int, TB_ALIGN_32 },
{ X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrm, TB_NO_REVERSE },
{ X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrm, TB_NO_REVERSE },
+
+ // BMI/BMI2 foldable instructions
+ { X86::RORX32ri, X86::RORX32mi, 0 },
+ { X86::RORX64ri, X86::RORX64mi, 0 },
+ { X86::SARX32rr, X86::SARX32rm, 0 },
+ { X86::SARX64rr, X86::SARX64rm, 0 },
+ { X86::SHRX32rr, X86::SHRX32rm, 0 },
+ { X86::SHRX64rr, X86::SHRX64rm, 0 },
+ { X86::SHLX32rr, X86::SHLX32rm, 0 },
+ { X86::SHLX64rr, X86::SHLX64rm, 0 },
};
for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) {
@@ -1110,6 +1120,44 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VPUNPCKLWDYrr, X86::VPUNPCKLWDYrm, TB_ALIGN_32 },
{ X86::VPXORYrr, X86::VPXORYrm, TB_ALIGN_32 },
// FIXME: add AVX 256-bit foldable instructions
+
+ // FMA4 foldable patterns
+ { X86::VFMADDSS4rr, X86::VFMADDSS4mr, 0 },
+ { X86::VFMADDSD4rr, X86::VFMADDSD4mr, 0 },
+ { X86::VFMADDPS4rr, X86::VFMADDPS4mr, TB_ALIGN_16 },
+ { X86::VFMADDPD4rr, X86::VFMADDPD4mr, TB_ALIGN_16 },
+ { X86::VFMADDPS4rrY, X86::VFMADDPS4mrY, TB_ALIGN_32 },
+ { X86::VFMADDPD4rrY, X86::VFMADDPD4mrY, TB_ALIGN_32 },
+ { X86::VFNMADDSS4rr, X86::VFNMADDSS4mr, 0 },
+ { X86::VFNMADDSD4rr, X86::VFNMADDSD4mr, 0 },
+ { X86::VFNMADDPS4rr, X86::VFNMADDPS4mr, TB_ALIGN_16 },
+ { X86::VFNMADDPD4rr, X86::VFNMADDPD4mr, TB_ALIGN_16 },
+ { X86::VFNMADDPS4rrY, X86::VFNMADDPS4mrY, TB_ALIGN_32 },
+ { X86::VFNMADDPD4rrY, X86::VFNMADDPD4mrY, TB_ALIGN_32 },
+ { X86::VFMSUBSS4rr, X86::VFMSUBSS4mr, 0 },
+ { X86::VFMSUBSD4rr, X86::VFMSUBSD4mr, 0 },
+ { X86::VFMSUBPS4rr, X86::VFMSUBPS4mr, TB_ALIGN_16 },
+ { X86::VFMSUBPD4rr, X86::VFMSUBPD4mr, TB_ALIGN_16 },
+ { X86::VFMSUBPS4rrY, X86::VFMSUBPS4mrY, TB_ALIGN_32 },
+ { X86::VFMSUBPD4rrY, X86::VFMSUBPD4mrY, TB_ALIGN_32 },
+ { X86::VFNMSUBSS4rr, X86::VFNMSUBSS4mr, 0 },
+ { X86::VFNMSUBSD4rr, X86::VFNMSUBSD4mr, 0 },
+ { X86::VFNMSUBPS4rr, X86::VFNMSUBPS4mr, TB_ALIGN_16 },
+ { X86::VFNMSUBPD4rr, X86::VFNMSUBPD4mr, TB_ALIGN_16 },
+ { X86::VFNMSUBPS4rrY, X86::VFNMSUBPS4mrY, TB_ALIGN_32 },
+ { X86::VFNMSUBPD4rrY, X86::VFNMSUBPD4mrY, TB_ALIGN_32 },
+ { X86::VFMADDSUBPS4rr, X86::VFMADDSUBPS4mr, TB_ALIGN_16 },
+ { X86::VFMADDSUBPD4rr, X86::VFMADDSUBPD4mr, TB_ALIGN_16 },
+ { X86::VFMADDSUBPS4rrY, X86::VFMADDSUBPS4mrY, TB_ALIGN_32 },
+ { X86::VFMADDSUBPD4rrY, X86::VFMADDSUBPD4mrY, TB_ALIGN_32 },
+ { X86::VFMSUBADDPS4rr, X86::VFMSUBADDPS4mr, TB_ALIGN_16 },
+ { X86::VFMSUBADDPD4rr, X86::VFMSUBADDPD4mr, TB_ALIGN_16 },
+ { X86::VFMSUBADDPS4rrY, X86::VFMSUBADDPS4mrY, TB_ALIGN_32 },
+ { X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4mrY, TB_ALIGN_32 },
+
+ // BMI/BMI2 foldable instructions
+ { X86::MULX32rr, X86::MULX32rm, 0 },
+ { X86::MULX64rr, X86::MULX64rm, 0 },
};
for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) {
@@ -1145,10 +1193,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VFMADDPDr132rY, X86::VFMADDPDr132mY, TB_ALIGN_32 },
{ X86::VFMADDPSr213rY, X86::VFMADDPSr213mY, TB_ALIGN_32 },
{ X86::VFMADDPDr213rY, X86::VFMADDPDr213mY, TB_ALIGN_32 },
- { X86::VFMADDPSr213r_Int, X86::VFMADDPSr213m_Int, TB_ALIGN_16 },
- { X86::VFMADDPDr213r_Int, X86::VFMADDPDr213m_Int, TB_ALIGN_16 },
- { X86::VFMADDPSr213rY_Int, X86::VFMADDPSr213mY_Int, TB_ALIGN_32 },
- { X86::VFMADDPDr213rY_Int, X86::VFMADDPDr213mY_Int, TB_ALIGN_32 },
{ X86::VFNMADDSSr231r, X86::VFNMADDSSr231m, 0 },
{ X86::VFNMADDSDr231r, X86::VFNMADDSDr231m, 0 },
@@ -1171,10 +1215,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VFNMADDPDr132rY, X86::VFNMADDPDr132mY, TB_ALIGN_32 },
{ X86::VFNMADDPSr213rY, X86::VFNMADDPSr213mY, TB_ALIGN_32 },
{ X86::VFNMADDPDr213rY, X86::VFNMADDPDr213mY, TB_ALIGN_32 },
- { X86::VFNMADDPSr213r_Int, X86::VFNMADDPSr213m_Int, TB_ALIGN_16 },
- { X86::VFNMADDPDr213r_Int, X86::VFNMADDPDr213m_Int, TB_ALIGN_16 },
- { X86::VFNMADDPSr213rY_Int, X86::VFNMADDPSr213mY_Int, TB_ALIGN_32 },
- { X86::VFNMADDPDr213rY_Int, X86::VFNMADDPDr213mY_Int, TB_ALIGN_32 },
{ X86::VFMSUBSSr231r, X86::VFMSUBSSr231m, 0 },
{ X86::VFMSUBSDr231r, X86::VFMSUBSDr231m, 0 },
@@ -1197,10 +1237,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VFMSUBPDr132rY, X86::VFMSUBPDr132mY, TB_ALIGN_32 },
{ X86::VFMSUBPSr213rY, X86::VFMSUBPSr213mY, TB_ALIGN_32 },
{ X86::VFMSUBPDr213rY, X86::VFMSUBPDr213mY, TB_ALIGN_32 },
- { X86::VFMSUBPSr213r_Int, X86::VFMSUBPSr213m_Int, TB_ALIGN_16 },
- { X86::VFMSUBPDr213r_Int, X86::VFMSUBPDr213m_Int, TB_ALIGN_16 },
- { X86::VFMSUBPSr213rY_Int, X86::VFMSUBPSr213mY_Int, TB_ALIGN_32 },
- { X86::VFMSUBPDr213rY_Int, X86::VFMSUBPDr213mY_Int, TB_ALIGN_32 },
{ X86::VFNMSUBSSr231r, X86::VFNMSUBSSr231m, 0 },
{ X86::VFNMSUBSDr231r, X86::VFNMSUBSDr231m, 0 },
@@ -1223,10 +1259,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VFNMSUBPDr132rY, X86::VFNMSUBPDr132mY, TB_ALIGN_32 },
{ X86::VFNMSUBPSr213rY, X86::VFNMSUBPSr213mY, TB_ALIGN_32 },
{ X86::VFNMSUBPDr213rY, X86::VFNMSUBPDr213mY, TB_ALIGN_32 },
- { X86::VFNMSUBPSr213r_Int, X86::VFNMSUBPSr213m_Int, TB_ALIGN_16 },
- { X86::VFNMSUBPDr213r_Int, X86::VFNMSUBPDr213m_Int, TB_ALIGN_16 },
- { X86::VFNMSUBPSr213rY_Int, X86::VFNMSUBPSr213mY_Int, TB_ALIGN_32 },
- { X86::VFNMSUBPDr213rY_Int, X86::VFNMSUBPDr213mY_Int, TB_ALIGN_32 },
{ X86::VFMADDSUBPSr231r, X86::VFMADDSUBPSr231m, TB_ALIGN_16 },
{ X86::VFMADDSUBPDr231r, X86::VFMADDSUBPDr231m, TB_ALIGN_16 },
@@ -1240,10 +1272,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VFMADDSUBPDr132rY, X86::VFMADDSUBPDr132mY, TB_ALIGN_32 },
{ X86::VFMADDSUBPSr213rY, X86::VFMADDSUBPSr213mY, TB_ALIGN_32 },
{ X86::VFMADDSUBPDr213rY, X86::VFMADDSUBPDr213mY, TB_ALIGN_32 },
- { X86::VFMADDSUBPSr213r_Int, X86::VFMADDSUBPSr213m_Int, TB_ALIGN_16 },
- { X86::VFMADDSUBPDr213r_Int, X86::VFMADDSUBPDr213m_Int, TB_ALIGN_16 },
- { X86::VFMADDSUBPSr213rY_Int, X86::VFMADDSUBPSr213mY_Int, TB_ALIGN_32 },
- { X86::VFMADDSUBPDr213rY_Int, X86::VFMADDSUBPDr213mY_Int, TB_ALIGN_32 },
{ X86::VFMSUBADDPSr231r, X86::VFMSUBADDPSr231m, TB_ALIGN_16 },
{ X86::VFMSUBADDPDr231r, X86::VFMSUBADDPDr231m, TB_ALIGN_16 },
@@ -1257,10 +1285,40 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VFMSUBADDPDr132rY, X86::VFMSUBADDPDr132mY, TB_ALIGN_32 },
{ X86::VFMSUBADDPSr213rY, X86::VFMSUBADDPSr213mY, TB_ALIGN_32 },
{ X86::VFMSUBADDPDr213rY, X86::VFMSUBADDPDr213mY, TB_ALIGN_32 },
- { X86::VFMSUBADDPSr213r_Int, X86::VFMSUBADDPSr213m_Int, TB_ALIGN_16 },
- { X86::VFMSUBADDPDr213r_Int, X86::VFMSUBADDPDr213m_Int, TB_ALIGN_16 },
- { X86::VFMSUBADDPSr213rY_Int, X86::VFMSUBADDPSr213mY_Int, TB_ALIGN_32 },
- { X86::VFMSUBADDPDr213rY_Int, X86::VFMSUBADDPDr213mY_Int, TB_ALIGN_32 },
+
+ // FMA4 foldable patterns
+ { X86::VFMADDSS4rr, X86::VFMADDSS4rm, 0 },
+ { X86::VFMADDSD4rr, X86::VFMADDSD4rm, 0 },
+ { X86::VFMADDPS4rr, X86::VFMADDPS4rm, TB_ALIGN_16 },
+ { X86::VFMADDPD4rr, X86::VFMADDPD4rm, TB_ALIGN_16 },
+ { X86::VFMADDPS4rrY, X86::VFMADDPS4rmY, TB_ALIGN_32 },
+ { X86::VFMADDPD4rrY, X86::VFMADDPD4rmY, TB_ALIGN_32 },
+ { X86::VFNMADDSS4rr, X86::VFNMADDSS4rm, 0 },
+ { X86::VFNMADDSD4rr, X86::VFNMADDSD4rm, 0 },
+ { X86::VFNMADDPS4rr, X86::VFNMADDPS4rm, TB_ALIGN_16 },
+ { X86::VFNMADDPD4rr, X86::VFNMADDPD4rm, TB_ALIGN_16 },
+ { X86::VFNMADDPS4rrY, X86::VFNMADDPS4rmY, TB_ALIGN_32 },
+ { X86::VFNMADDPD4rrY, X86::VFNMADDPD4rmY, TB_ALIGN_32 },
+ { X86::VFMSUBSS4rr, X86::VFMSUBSS4rm, 0 },
+ { X86::VFMSUBSD4rr, X86::VFMSUBSD4rm, 0 },
+ { X86::VFMSUBPS4rr, X86::VFMSUBPS4rm, TB_ALIGN_16 },
+ { X86::VFMSUBPD4rr, X86::VFMSUBPD4rm, TB_ALIGN_16 },
+ { X86::VFMSUBPS4rrY, X86::VFMSUBPS4rmY, TB_ALIGN_32 },
+ { X86::VFMSUBPD4rrY, X86::VFMSUBPD4rmY, TB_ALIGN_32 },
+ { X86::VFNMSUBSS4rr, X86::VFNMSUBSS4rm, 0 },
+ { X86::VFNMSUBSD4rr, X86::VFNMSUBSD4rm, 0 },
+ { X86::VFNMSUBPS4rr, X86::VFNMSUBPS4rm, TB_ALIGN_16 },
+ { X86::VFNMSUBPD4rr, X86::VFNMSUBPD4rm, TB_ALIGN_16 },
+ { X86::VFNMSUBPS4rrY, X86::VFNMSUBPS4rmY, TB_ALIGN_32 },
+ { X86::VFNMSUBPD4rrY, X86::VFNMSUBPD4rmY, TB_ALIGN_32 },
+ { X86::VFMADDSUBPS4rr, X86::VFMADDSUBPS4rm, TB_ALIGN_16 },
+ { X86::VFMADDSUBPD4rr, X86::VFMADDSUBPD4rm, TB_ALIGN_16 },
+ { X86::VFMADDSUBPS4rrY, X86::VFMADDSUBPS4rmY, TB_ALIGN_32 },
+ { X86::VFMADDSUBPD4rrY, X86::VFMADDSUBPD4rmY, TB_ALIGN_32 },
+ { X86::VFMSUBADDPS4rr, X86::VFMSUBADDPS4rm, TB_ALIGN_16 },
+ { X86::VFMSUBADDPD4rr, X86::VFMSUBADDPD4rm, TB_ALIGN_16 },
+ { X86::VFMSUBADDPS4rrY, X86::VFMSUBADDPS4rmY, TB_ALIGN_32 },
+ { X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4rmY, TB_ALIGN_32 },
};
for (unsigned i = 0, e = array_lengthof(OpTbl3); i != e; ++i) {
@@ -1318,8 +1376,7 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
SrcReg = MI.getOperand(1).getReg();
DstReg = MI.getOperand(0).getReg();
switch (MI.getOpcode()) {
- default:
- llvm_unreachable(0);
+ default: llvm_unreachable("Unreachable!");
case X86::MOVSX16rr8:
case X86::MOVZX16rr8:
case X86::MOVSX32rr8:
@@ -1483,69 +1540,69 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
AliasAnalysis *AA) const {
switch (MI->getOpcode()) {
default: break;
- case X86::MOV8rm:
- case X86::MOV16rm:
- case X86::MOV32rm:
- case X86::MOV64rm:
- case X86::LD_Fp64m:
- case X86::MOVSSrm:
- case X86::MOVSDrm:
- case X86::MOVAPSrm:
- case X86::MOVUPSrm:
- case X86::MOVAPDrm:
- case X86::MOVDQArm:
- case X86::VMOVSSrm:
- case X86::VMOVSDrm:
- case X86::VMOVAPSrm:
- case X86::VMOVUPSrm:
- case X86::VMOVAPDrm:
- case X86::VMOVDQArm:
- case X86::VMOVAPSYrm:
- case X86::VMOVUPSYrm:
- case X86::VMOVAPDYrm:
- case X86::VMOVDQAYrm:
- case X86::MMX_MOVD64rm:
- case X86::MMX_MOVQ64rm:
- case X86::FsVMOVAPSrm:
- case X86::FsVMOVAPDrm:
- case X86::FsMOVAPSrm:
- case X86::FsMOVAPDrm: {
- // Loads from constant pools are trivially rematerializable.
- if (MI->getOperand(1).isReg() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
- MI->isInvariantLoad(AA)) {
- unsigned BaseReg = MI->getOperand(1).getReg();
- if (BaseReg == 0 || BaseReg == X86::RIP)
- return true;
- // Allow re-materialization of PIC load.
- if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal())
- return false;
- const MachineFunction &MF = *MI->getParent()->getParent();
- const MachineRegisterInfo &MRI = MF.getRegInfo();
- return regIsPICBase(BaseReg, MRI);
- }
- return false;
+ case X86::MOV8rm:
+ case X86::MOV16rm:
+ case X86::MOV32rm:
+ case X86::MOV64rm:
+ case X86::LD_Fp64m:
+ case X86::MOVSSrm:
+ case X86::MOVSDrm:
+ case X86::MOVAPSrm:
+ case X86::MOVUPSrm:
+ case X86::MOVAPDrm:
+ case X86::MOVDQArm:
+ case X86::VMOVSSrm:
+ case X86::VMOVSDrm:
+ case X86::VMOVAPSrm:
+ case X86::VMOVUPSrm:
+ case X86::VMOVAPDrm:
+ case X86::VMOVDQArm:
+ case X86::VMOVAPSYrm:
+ case X86::VMOVUPSYrm:
+ case X86::VMOVAPDYrm:
+ case X86::VMOVDQAYrm:
+ case X86::MMX_MOVD64rm:
+ case X86::MMX_MOVQ64rm:
+ case X86::FsVMOVAPSrm:
+ case X86::FsVMOVAPDrm:
+ case X86::FsMOVAPSrm:
+ case X86::FsMOVAPDrm: {
+ // Loads from constant pools are trivially rematerializable.
+ if (MI->getOperand(1).isReg() &&
+ MI->getOperand(2).isImm() &&
+ MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
+ MI->isInvariantLoad(AA)) {
+ unsigned BaseReg = MI->getOperand(1).getReg();
+ if (BaseReg == 0 || BaseReg == X86::RIP)
+ return true;
+ // Allow re-materialization of PIC load.
+ if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal())
+ return false;
+ const MachineFunction &MF = *MI->getParent()->getParent();
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ return regIsPICBase(BaseReg, MRI);
}
+ return false;
+ }
- case X86::LEA32r:
- case X86::LEA64r: {
- if (MI->getOperand(2).isImm() &&
- MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
- !MI->getOperand(4).isReg()) {
- // lea fi#, lea GV, etc. are all rematerializable.
- if (!MI->getOperand(1).isReg())
- return true;
- unsigned BaseReg = MI->getOperand(1).getReg();
- if (BaseReg == 0)
- return true;
- // Allow re-materialization of lea PICBase + x.
- const MachineFunction &MF = *MI->getParent()->getParent();
- const MachineRegisterInfo &MRI = MF.getRegInfo();
- return regIsPICBase(BaseReg, MRI);
- }
- return false;
- }
+ case X86::LEA32r:
+ case X86::LEA64r: {
+ if (MI->getOperand(2).isImm() &&
+ MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
+ !MI->getOperand(4).isReg()) {
+ // lea fi#, lea GV, etc. are all rematerializable.
+ if (!MI->getOperand(1).isReg())
+ return true;
+ unsigned BaseReg = MI->getOperand(1).getReg();
+ if (BaseReg == 0)
+ return true;
+ // Allow re-materialization of lea PICBase + x.
+ const MachineFunction &MF = *MI->getParent()->getParent();
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ return regIsPICBase(BaseReg, MRI);
+ }
+ return false;
+ }
}
// All other instructions marked M_REMATERIALIZABLE are always trivially
@@ -1654,7 +1711,7 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
case X86::MOV64r0: {
if (!isSafeToClobberEFLAGS(MBB, I)) {
switch (Opc) {
- default: break;
+ default: llvm_unreachable("Unreachable!");
case X86::MOV8r0: Opc = X86::MOV8ri; break;
case X86::MOV16r0: Opc = X86::MOV16ri; break;
case X86::MOV32r0: Opc = X86::MOV32ri; break;
@@ -1727,8 +1784,7 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
MachineInstrBuilder MIB = BuildMI(*MFI, MBBI, MI->getDebugLoc(),
get(Opc), leaOutReg);
switch (MIOpc) {
- default:
- llvm_unreachable(0);
+ default: llvm_unreachable("Unreachable!");
case X86::SHL16ri: {
unsigned ShAmt = MI->getOperand(2).getImm();
MIB.addReg(0).addImm(1 << ShAmt)
@@ -1812,10 +1868,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineInstr *MI = MBBI;
MachineFunction &MF = *MI->getParent()->getParent();
// All instructions input are two-addr instructions. Get the known operands.
- unsigned Dest = MI->getOperand(0).getReg();
- unsigned Src = MI->getOperand(1).getReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isKill = MI->getOperand(1).isKill();
+ const MachineOperand &Dest = MI->getOperand(0);
+ const MachineOperand &Src = MI->getOperand(1);
MachineInstr *NewMI = NULL;
// FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When
@@ -1833,11 +1887,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
unsigned B = MI->getOperand(1).getReg();
unsigned C = MI->getOperand(2).getReg();
if (B != C) return 0;
- unsigned A = MI->getOperand(0).getReg();
unsigned M = MI->getOperand(3).getImm();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri))
- .addReg(A, RegState::Define | getDeadRegState(isDead))
- .addReg(B, getKillRegState(isKill)).addImm(M);
+ .addOperand(Dest).addOperand(Src).addImm(M);
break;
}
case X86::SHUFPDrri: {
@@ -1847,15 +1899,13 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
unsigned B = MI->getOperand(1).getReg();
unsigned C = MI->getOperand(2).getReg();
if (B != C) return 0;
- unsigned A = MI->getOperand(0).getReg();
unsigned M = MI->getOperand(3).getImm();
// Convert to PSHUFD mask.
M = ((M & 1) << 1) | ((M & 1) << 3) | ((M & 2) << 4) | ((M & 2) << 6)| 0x44;
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri))
- .addReg(A, RegState::Define | getDeadRegState(isDead))
- .addReg(B, getKillRegState(isKill)).addImm(M);
+ .addOperand(Dest).addOperand(Src).addImm(M);
break;
}
case X86::SHL64ri: {
@@ -1866,15 +1916,14 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (ShAmt == 0 || ShAmt >= 4) return 0;
// LEA can't handle RSP.
- if (TargetRegisterInfo::isVirtualRegister(Src) &&
- !MF.getRegInfo().constrainRegClass(Src, &X86::GR64_NOSPRegClass))
+ if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) &&
+ !MF.getRegInfo().constrainRegClass(Src.getReg(),
+ &X86::GR64_NOSPRegClass))
return 0;
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
- .addReg(Dest, RegState::Define | getDeadRegState(isDead))
- .addReg(0).addImm(1 << ShAmt)
- .addReg(Src, getKillRegState(isKill))
- .addImm(0).addReg(0);
+ .addOperand(Dest)
+ .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0);
break;
}
case X86::SHL32ri: {
@@ -1885,15 +1934,15 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (ShAmt == 0 || ShAmt >= 4) return 0;
// LEA can't handle ESP.
- if (TargetRegisterInfo::isVirtualRegister(Src) &&
- !MF.getRegInfo().constrainRegClass(Src, &X86::GR32_NOSPRegClass))
+ if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) &&
+ !MF.getRegInfo().constrainRegClass(Src.getReg(),
+ &X86::GR32_NOSPRegClass))
return 0;
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addReg(Dest, RegState::Define | getDeadRegState(isDead))
- .addReg(0).addImm(1 << ShAmt)
- .addReg(Src, getKillRegState(isKill)).addImm(0).addReg(0);
+ .addOperand(Dest)
+ .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0);
break;
}
case X86::SHL16ri: {
@@ -1906,10 +1955,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (DisableLEA16)
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addReg(Dest, RegState::Define | getDeadRegState(isDead))
- .addReg(0).addImm(1 << ShAmt)
- .addReg(Src, getKillRegState(isKill))
- .addImm(0).addReg(0);
+ .addOperand(Dest)
+ .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0);
break;
}
default: {
@@ -1932,14 +1979,12 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
(const TargetRegisterClass*)&X86::GR32_NOSPRegClass;
// LEA can't handle RSP.
- if (TargetRegisterInfo::isVirtualRegister(Src) &&
- !MF.getRegInfo().constrainRegClass(Src, RC))
+ if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) &&
+ !MF.getRegInfo().constrainRegClass(Src.getReg(), RC))
return 0;
- NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addReg(Dest, RegState::Define |
- getDeadRegState(isDead)),
- Src, isKill, 1);
+ NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addOperand(Dest).addOperand(Src), 1);
break;
}
case X86::INC16r:
@@ -1947,10 +1992,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (DisableLEA16)
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
- NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addReg(Dest, RegState::Define |
- getDeadRegState(isDead)),
- Src, isKill, 1);
+ NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
+ .addOperand(Dest).addOperand(Src), 1);
break;
case X86::DEC64r:
case X86::DEC32r:
@@ -1962,14 +2005,12 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
(const TargetRegisterClass*)&X86::GR64_NOSPRegClass :
(const TargetRegisterClass*)&X86::GR32_NOSPRegClass;
// LEA can't handle RSP.
- if (TargetRegisterInfo::isVirtualRegister(Src) &&
- !MF.getRegInfo().constrainRegClass(Src, RC))
+ if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) &&
+ !MF.getRegInfo().constrainRegClass(Src.getReg(), RC))
return 0;
- NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addReg(Dest, RegState::Define |
- getDeadRegState(isDead)),
- Src, isKill, -1);
+ NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addOperand(Dest).addOperand(Src), -1);
break;
}
case X86::DEC16r:
@@ -1977,10 +2018,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (DisableLEA16)
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
- NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addReg(Dest, RegState::Define |
- getDeadRegState(isDead)),
- Src, isKill, -1);
+ NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
+ .addOperand(Dest).addOperand(Src), -1);
break;
case X86::ADD64rr:
case X86::ADD64rr_DB:
@@ -2007,9 +2046,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
return 0;
NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addReg(Dest, RegState::Define |
- getDeadRegState(isDead)),
- Src, isKill, Src2, isKill2);
+ .addOperand(Dest),
+ Src.getReg(), Src.isKill(), Src2, isKill2);
// Preserve undefness of the operands.
bool isUndef = MI->getOperand(1).isUndef();
@@ -2029,9 +2067,15 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
unsigned Src2 = MI->getOperand(2).getReg();
bool isKill2 = MI->getOperand(2).isKill();
NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addReg(Dest, RegState::Define |
- getDeadRegState(isDead)),
- Src, isKill, Src2, isKill2);
+ .addOperand(Dest),
+ Src.getReg(), Src.isKill(), Src2, isKill2);
+
+ // Preserve undefness of the operands.
+ bool isUndef = MI->getOperand(1).isUndef();
+ bool isUndef2 = MI->getOperand(2).isUndef();
+ NewMI->getOperand(1).setIsUndef(isUndef);
+ NewMI->getOperand(3).setIsUndef(isUndef2);
+
if (LV && isKill2)
LV->replaceKillInstruction(Src2, MI, NewMI);
break;
@@ -2041,10 +2085,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD64ri32_DB:
case X86::ADD64ri8_DB:
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
- .addReg(Dest, RegState::Define |
- getDeadRegState(isDead)),
- Src, isKill, MI->getOperand(2).getImm());
+ NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
+ .addOperand(Dest).addOperand(Src),
+ MI->getOperand(2).getImm());
break;
case X86::ADD32ri:
case X86::ADD32ri8:
@@ -2052,10 +2095,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD32ri8_DB: {
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
- NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addReg(Dest, RegState::Define |
- getDeadRegState(isDead)),
- Src, isKill, MI->getOperand(2).getImm());
+ NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addOperand(Dest).addOperand(Src),
+ MI->getOperand(2).getImm());
break;
}
case X86::ADD16ri:
@@ -2065,10 +2107,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (DisableLEA16)
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addReg(Dest, RegState::Define |
- getDeadRegState(isDead)),
- Src, isKill, MI->getOperand(2).getImm());
+ NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
+ .addOperand(Dest).addOperand(Src),
+ MI->getOperand(2).getImm());
break;
}
}
@@ -2077,10 +2118,10 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (!NewMI) return 0;
if (LV) { // Update live variables
- if (isKill)
- LV->replaceKillInstruction(Src, MI, NewMI);
- if (isDead)
- LV->replaceKillInstruction(Dest, MI, NewMI);
+ if (Src.isKill())
+ LV->replaceKillInstruction(Src.getReg(), MI, NewMI);
+ if (Dest.isDead())
+ LV->replaceKillInstruction(Dest.getReg(), MI, NewMI);
}
MFI->insert(MBBI, NewMI); // Insert the new inst
@@ -2120,57 +2161,25 @@ X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
MI->getOperand(3).setImm(Size-Amt);
return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
}
- case X86::CMOVB16rr:
- case X86::CMOVB32rr:
- case X86::CMOVB64rr:
- case X86::CMOVAE16rr:
- case X86::CMOVAE32rr:
- case X86::CMOVAE64rr:
- case X86::CMOVE16rr:
- case X86::CMOVE32rr:
- case X86::CMOVE64rr:
- case X86::CMOVNE16rr:
- case X86::CMOVNE32rr:
- case X86::CMOVNE64rr:
- case X86::CMOVBE16rr:
- case X86::CMOVBE32rr:
- case X86::CMOVBE64rr:
- case X86::CMOVA16rr:
- case X86::CMOVA32rr:
- case X86::CMOVA64rr:
- case X86::CMOVL16rr:
- case X86::CMOVL32rr:
- case X86::CMOVL64rr:
- case X86::CMOVGE16rr:
- case X86::CMOVGE32rr:
- case X86::CMOVGE64rr:
- case X86::CMOVLE16rr:
- case X86::CMOVLE32rr:
- case X86::CMOVLE64rr:
- case X86::CMOVG16rr:
- case X86::CMOVG32rr:
- case X86::CMOVG64rr:
- case X86::CMOVS16rr:
- case X86::CMOVS32rr:
- case X86::CMOVS64rr:
- case X86::CMOVNS16rr:
- case X86::CMOVNS32rr:
- case X86::CMOVNS64rr:
- case X86::CMOVP16rr:
- case X86::CMOVP32rr:
- case X86::CMOVP64rr:
- case X86::CMOVNP16rr:
- case X86::CMOVNP32rr:
- case X86::CMOVNP64rr:
- case X86::CMOVO16rr:
- case X86::CMOVO32rr:
- case X86::CMOVO64rr:
- case X86::CMOVNO16rr:
- case X86::CMOVNO32rr:
- case X86::CMOVNO64rr: {
- unsigned Opc = 0;
+ case X86::CMOVB16rr: case X86::CMOVB32rr: case X86::CMOVB64rr:
+ case X86::CMOVAE16rr: case X86::CMOVAE32rr: case X86::CMOVAE64rr:
+ case X86::CMOVE16rr: case X86::CMOVE32rr: case X86::CMOVE64rr:
+ case X86::CMOVNE16rr: case X86::CMOVNE32rr: case X86::CMOVNE64rr:
+ case X86::CMOVBE16rr: case X86::CMOVBE32rr: case X86::CMOVBE64rr:
+ case X86::CMOVA16rr: case X86::CMOVA32rr: case X86::CMOVA64rr:
+ case X86::CMOVL16rr: case X86::CMOVL32rr: case X86::CMOVL64rr:
+ case X86::CMOVGE16rr: case X86::CMOVGE32rr: case X86::CMOVGE64rr:
+ case X86::CMOVLE16rr: case X86::CMOVLE32rr: case X86::CMOVLE64rr:
+ case X86::CMOVG16rr: case X86::CMOVG32rr: case X86::CMOVG64rr:
+ case X86::CMOVS16rr: case X86::CMOVS32rr: case X86::CMOVS64rr:
+ case X86::CMOVNS16rr: case X86::CMOVNS32rr: case X86::CMOVNS64rr:
+ case X86::CMOVP16rr: case X86::CMOVP32rr: case X86::CMOVP64rr:
+ case X86::CMOVNP16rr: case X86::CMOVNP32rr: case X86::CMOVNP64rr:
+ case X86::CMOVO16rr: case X86::CMOVO32rr: case X86::CMOVO64rr:
+ case X86::CMOVNO16rr: case X86::CMOVNO32rr: case X86::CMOVNO64rr: {
+ unsigned Opc;
switch (MI->getOpcode()) {
- default: break;
+ default: llvm_unreachable("Unreachable!");
case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break;
case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break;
case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break;
@@ -2279,7 +2288,7 @@ static X86::CondCode getCondFromSETOpc(unsigned Opc) {
}
/// getCondFromCmovOpc - return condition code of a CMov opcode.
-static X86::CondCode getCondFromCMovOpc(unsigned Opc) {
+X86::CondCode X86::getCondFromCMovOpc(unsigned Opc) {
switch (Opc) {
default: return X86::COND_INVALID;
case X86::CMOVA16rm: case X86::CMOVA16rr: case X86::CMOVA32rm:
@@ -2402,7 +2411,7 @@ static X86::CondCode getSwappedCondition(X86::CondCode CC) {
/// whether it has memory operand.
static unsigned getSETFromCond(X86::CondCode CC,
bool HasMemoryOperand) {
- static const unsigned Opc[16][2] = {
+ static const uint16_t Opc[16][2] = {
{ X86::SETAr, X86::SETAm },
{ X86::SETAEr, X86::SETAEm },
{ X86::SETBr, X86::SETBm },
@@ -2429,7 +2438,7 @@ static unsigned getSETFromCond(X86::CondCode CC,
/// register size in bytes, and operand type.
static unsigned getCMovFromCond(X86::CondCode CC, unsigned RegBytes,
bool HasMemoryOperand) {
- static const unsigned Opc[32][3] = {
+ static const uint16_t Opc[32][3] = {
{ X86::CMOVA16rr, X86::CMOVA32rr, X86::CMOVA64rr },
{ X86::CMOVAE16rr, X86::CMOVAE32rr, X86::CMOVAE64rr },
{ X86::CMOVB16rr, X86::CMOVB32rr, X86::CMOVB64rr },
@@ -2762,19 +2771,18 @@ static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
// SrcReg(GR64) -> DestReg(VR64)
if (X86::GR64RegClass.contains(DestReg)) {
- if (X86::VR128RegClass.contains(SrcReg)) {
+ if (X86::VR128RegClass.contains(SrcReg))
// Copy from a VR128 register to a GR64 register.
return HasAVX ? X86::VMOVPQIto64rr : X86::MOVPQIto64rr;
- } else if (X86::VR64RegClass.contains(SrcReg)) {
+ if (X86::VR64RegClass.contains(SrcReg))
// Copy from a VR64 register to a GR64 register.
return X86::MOVSDto64rr;
- }
} else if (X86::GR64RegClass.contains(SrcReg)) {
// Copy from a GR64 register to a VR128 register.
if (X86::VR128RegClass.contains(DestReg))
return HasAVX ? X86::VMOV64toPQIrr : X86::MOV64toPQIrr;
// Copy from a GR64 register to a VR64 register.
- else if (X86::VR64RegClass.contains(DestReg))
+ if (X86::VR64RegClass.contains(DestReg))
return X86::MOV64toSDrr;
}
@@ -2782,12 +2790,12 @@ static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
// SrcReg(GR32) -> DestReg(FR32)
if (X86::GR32RegClass.contains(DestReg) && X86::FR32RegClass.contains(SrcReg))
- // Copy from a FR32 register to a GR32 register.
- return HasAVX ? X86::VMOVSS2DIrr : X86::MOVSS2DIrr;
+ // Copy from a FR32 register to a GR32 register.
+ return HasAVX ? X86::VMOVSS2DIrr : X86::MOVSS2DIrr;
if (X86::FR32RegClass.contains(DestReg) && X86::GR32RegClass.contains(SrcReg))
- // Copy from a GR32 register to a FR32 register.
- return HasAVX ? X86::VMOVDI2SSrr : X86::MOVDI2SSrr;
+ // Copy from a GR32 register to a FR32 register.
+ return HasAVX ? X86::VMOVDI2SSrr : X86::MOVDI2SSrr;
return 0;
}
@@ -2798,7 +2806,7 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
bool KillSrc) const {
// First deal with the normal symmetric copies.
bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
- unsigned Opc = 0;
+ unsigned Opc;
if (X86::GR64RegClass.contains(DestReg, SrcReg))
Opc = X86::MOV64rr;
else if (X86::GR32RegClass.contains(DestReg, SrcReg))
@@ -2837,7 +2845,8 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
BuildMI(MBB, MI, DL, get(X86::PUSHF64));
BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg);
return;
- } else if (X86::GR32RegClass.contains(DestReg)) {
+ }
+ if (X86::GR32RegClass.contains(DestReg)) {
BuildMI(MBB, MI, DL, get(X86::PUSHF32));
BuildMI(MBB, MI, DL, get(X86::POP32r), DestReg);
return;
@@ -2849,7 +2858,8 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
.addReg(SrcReg, getKillRegState(KillSrc));
BuildMI(MBB, MI, DL, get(X86::POPF64));
return;
- } else if (X86::GR32RegClass.contains(SrcReg)) {
+ }
+ if (X86::GR32RegClass.contains(SrcReg)) {
BuildMI(MBB, MI, DL, get(X86::PUSH32r))
.addReg(SrcReg, getKillRegState(KillSrc));
BuildMI(MBB, MI, DL, get(X86::POPF32));
@@ -3139,11 +3149,19 @@ inline static bool isDefConvertible(MachineInstr *MI) {
case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr:
case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm:
case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm:
+ case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r:
+ case X86::DEC64m: case X86::DEC32m: case X86::DEC16m: case X86::DEC8m:
+ case X86::DEC64_32r: case X86::DEC64_16r:
+ case X86::DEC64_32m: case X86::DEC64_16m:
case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri:
case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8:
case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr:
case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm:
case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm:
+ case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r:
+ case X86::INC64m: case X86::INC32m: case X86::INC16m: case X86::INC8m:
+ case X86::INC64_32r: case X86::INC64_16r:
+ case X86::INC64_32m: case X86::INC64_16m:
case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri:
case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8:
case X86::AND8ri: case X86::AND64rr: case X86::AND32rr:
@@ -3193,7 +3211,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
return false;
// There is no use of the destination register, we can replace SUB with CMP.
switch (CmpInstr->getOpcode()) {
- default: llvm_unreachable(0);
+ default: llvm_unreachable("Unreachable!");
case X86::SUB64rm: NewOpcode = X86::CMP64rm; break;
case X86::SUB32rm: NewOpcode = X86::CMP32rm; break;
case X86::SUB16rm: NewOpcode = X86::CMP16rm; break;
@@ -3318,7 +3336,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
if (OldCC != X86::COND_INVALID)
OpcIsSET = true;
else
- OldCC = getCondFromCMovOpc(Instr.getOpcode());
+ OldCC = X86::getCondFromCMovOpc(Instr.getOpcode());
}
if (OldCC == X86::COND_INVALID) return false;
}
@@ -3383,12 +3401,14 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
Sub->getParent()->insert(MachineBasicBlock::iterator(Sub), Movr0Inst);
}
- // Make sure Sub instruction defines EFLAGS.
+ // Make sure Sub instruction defines EFLAGS and mark the def live.
+ unsigned LastOperand = Sub->getNumOperands() - 1;
assert(Sub->getNumOperands() >= 2 &&
- Sub->getOperand(Sub->getNumOperands()-1).isReg() &&
- Sub->getOperand(Sub->getNumOperands()-1).getReg() == X86::EFLAGS &&
+ Sub->getOperand(LastOperand).isReg() &&
+ Sub->getOperand(LastOperand).getReg() == X86::EFLAGS &&
"EFLAGS should be the last operand of SUB, ADD, OR, XOR, AND");
- Sub->getOperand(Sub->getNumOperands()-1).setIsDef(true);
+ Sub->getOperand(LastOperand).setIsDef(true);
+ Sub->getOperand(LastOperand).setIsDead(false);
CmpInstr->eraseFromParent();
// Modify the condition code of instructions in OpsToUpdate.
@@ -3497,10 +3517,25 @@ static bool Expand2AddrUndef(MachineInstr *MI, const MCInstrDesc &Desc) {
bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
switch (MI->getOpcode()) {
+ case X86::SETB_C8r:
+ return Expand2AddrUndef(MI, get(X86::SBB8rr));
+ case X86::SETB_C16r:
+ return Expand2AddrUndef(MI, get(X86::SBB16rr));
+ case X86::SETB_C32r:
+ return Expand2AddrUndef(MI, get(X86::SBB32rr));
+ case X86::SETB_C64r:
+ return Expand2AddrUndef(MI, get(X86::SBB64rr));
case X86::V_SET0:
case X86::FsFLD0SS:
case X86::FsFLD0SD:
return Expand2AddrUndef(MI, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr));
+ case X86::AVX_SET0:
+ assert(HasAVX && "AVX not supported");
+ return Expand2AddrUndef(MI, get(X86::VXORPSYrr));
+ case X86::V_SETALLONES:
+ return Expand2AddrUndef(MI, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
+ case X86::AVX2_SETALLONES:
+ return Expand2AddrUndef(MI, get(X86::VPCMPEQDYrr));
case X86::TEST8ri_NOREX:
MI->setDesc(get(X86::TEST8ri));
return true;
@@ -3614,14 +3649,16 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
isTwoAddrFold = true;
} else if (i == 0) { // If operand 0
- if (MI->getOpcode() == X86::MOV64r0)
- NewMI = MakeM0Inst(*this, X86::MOV64mi32, MOs, MI);
- else if (MI->getOpcode() == X86::MOV32r0)
- NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI);
- else if (MI->getOpcode() == X86::MOV16r0)
- NewMI = MakeM0Inst(*this, X86::MOV16mi, MOs, MI);
- else if (MI->getOpcode() == X86::MOV8r0)
- NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI);
+ unsigned Opc = 0;
+ switch (MI->getOpcode()) {
+ default: break;
+ case X86::MOV64r0: Opc = X86::MOV64mi32; break;
+ case X86::MOV32r0: Opc = X86::MOV32mi; break;
+ case X86::MOV16r0: Opc = X86::MOV16mi; break;
+ case X86::MOV8r0: Opc = X86::MOV8mi; break;
+ }
+ if (Opc)
+ NewMI = MakeM0Inst(*this, Opc, MOs, MI);
if (NewMI)
return NewMI;
@@ -3799,7 +3836,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
- if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize) &&
+ if (!MF.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return 0;
@@ -3840,7 +3878,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
- if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize) &&
+ if (!MF.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return 0;
@@ -3850,15 +3889,12 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
Alignment = (*LoadMI->memoperands_begin())->getAlignment();
else
switch (LoadMI->getOpcode()) {
- case X86::AVX_SET0PSY:
- case X86::AVX_SET0PDY:
case X86::AVX2_SETALLONES:
- case X86::AVX2_SET0:
+ case X86::AVX_SET0:
Alignment = 32;
break;
case X86::V_SET0:
case X86::V_SETALLONES:
- case X86::AVX_SETALLONES:
Alignment = 16;
break;
case X86::FsFLD0SD:
@@ -3894,11 +3930,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
switch (LoadMI->getOpcode()) {
case X86::V_SET0:
case X86::V_SETALLONES:
- case X86::AVX_SET0PSY:
- case X86::AVX_SET0PDY:
- case X86::AVX_SETALLONES:
case X86::AVX2_SETALLONES:
- case X86::AVX2_SET0:
+ case X86::AVX_SET0:
case X86::FsFLD0SD:
case X86::FsFLD0SS: {
// Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
@@ -3930,15 +3963,12 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
Ty = Type::getFloatTy(MF.getFunction()->getContext());
else if (Opc == X86::FsFLD0SD)
Ty = Type::getDoubleTy(MF.getFunction()->getContext());
- else if (Opc == X86::AVX_SET0PSY || Opc == X86::AVX_SET0PDY)
- Ty = VectorType::get(Type::getFloatTy(MF.getFunction()->getContext()), 8);
- else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX2_SET0)
+ else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0)
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 8);
else
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4);
- bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX_SETALLONES ||
- Opc == X86::AVX2_SETALLONES);
+ bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES);
const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) :
Constant::getNullValue(Ty);
unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
@@ -4013,6 +4043,8 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
OpcodeTablePtr = &RegOp2MemOpTable1;
} else if (OpNum == 2) {
OpcodeTablePtr = &RegOp2MemOpTable2;
+ } else if (OpNum == 3) {
+ OpcodeTablePtr = &RegOp2MemOpTable3;
}
if (OpcodeTablePtr && OpcodeTablePtr->count(Opc))
@@ -4102,7 +4134,6 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
getUndefRegState(MO.isUndef()));
}
// Change CMP32ri r, 0 back to TEST32rr r, r, etc.
- unsigned NewOpc = 0;
switch (DataMI->getOpcode()) {
default: break;
case X86::CMP64ri32:
@@ -4115,8 +4146,9 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
MachineOperand &MO0 = DataMI->getOperand(0);
MachineOperand &MO1 = DataMI->getOperand(1);
if (MO1.getImm() == 0) {
+ unsigned NewOpc;
switch (DataMI->getOpcode()) {
- default: break;
+ default: llvm_unreachable("Unreachable!");
case X86::CMP64ri8:
case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
case X86::CMP32ri8:
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.h b/contrib/llvm/lib/Target/X86/X86InstrInfo.h
index b6f69af..260f054 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.h
@@ -61,6 +61,9 @@ namespace X86 {
// Turn condition code into conditional branch opcode.
unsigned GetCondBranchFromCond(CondCode CC);
+ // Turn CMov opcode into condition code.
+ CondCode getCondFromCMovOpc(unsigned Opc);
+
/// GetOppositeBranchCondition - Return the inverse of the specified cond,
/// e.g. turning COND_E to COND_NE.
CondCode GetOppositeBranchCondition(X86::CondCode CC);
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.td b/contrib/llvm/lib/Target/X86/X86InstrInfo.td
index d293156..650fa95 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.td
@@ -114,7 +114,7 @@ def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
def SDT_X86MEMBARRIER : SDTypeProfile<0, 0, []>;
def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER,
- [SDNPHasChain]>;
+ [SDNPHasChain,SDNPSideEffect]>;
def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER,
[SDNPHasChain]>;
def X86SFence : SDNode<"X86ISD::SFENCE", SDT_X86MEMBARRIER,
@@ -216,6 +216,14 @@ def X86tlsbaseaddr : SDNode<"X86ISD::TLSBASEADDR", SDT_X86TLSBASEADDR,
def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
[SDNPHasChain]>;
+def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP",
+ SDTypeProfile<1, 1, [SDTCisInt<0>,
+ SDTCisPtrTy<1>]>,
+ [SDNPHasChain, SDNPSideEffect]>;
+def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP",
+ SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
+ [SDNPHasChain, SDNPSideEffect]>;
+
def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
@@ -397,7 +405,7 @@ def i64mem_TC : Operand<i64> {
let OperandType = "OPERAND_PCREL",
ParserMatchClass = X86AbsMemAsmOperand,
- PrintMethod = "print_pcrel_imm" in {
+ PrintMethod = "printPCRelImm" in {
def i32imm_pcrel : Operand<i32>;
def i16imm_pcrel : Operand<i16>;
@@ -418,7 +426,7 @@ def SSECC : Operand<i8> {
}
def AVXCC : Operand<i8> {
- let PrintMethod = "printSSECC";
+ let PrintMethod = "printAVXCC";
let OperandType = "OPERAND_IMMEDIATE";
}
@@ -499,7 +507,7 @@ def i64i32imm : Operand<i64> {
// 64-bits but only 32 bits are significant, and those bits are treated as being
// pc relative.
def i64i32imm_pcrel : Operand<i64> {
- let PrintMethod = "print_pcrel_imm";
+ let PrintMethod = "printPCRelImm";
let ParserMatchClass = X86AbsMemAsmOperand;
let OperandType = "OPERAND_PCREL";
}
@@ -552,14 +560,21 @@ def HasMMX : Predicate<"Subtarget->hasMMX()">;
def Has3DNow : Predicate<"Subtarget->has3DNow()">;
def Has3DNowA : Predicate<"Subtarget->has3DNowA()">;
def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
+def UseSSE1 : Predicate<"Subtarget->hasSSE1() && !Subtarget->hasAVX()">;
def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
+def UseSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">;
def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
+def UseSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">;
def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">;
+def UseSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">;
def HasSSE41 : Predicate<"Subtarget->hasSSE41()">;
+def UseSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">;
def HasSSE42 : Predicate<"Subtarget->hasSSE42()">;
+def UseSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">;
def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
def HasAVX : Predicate<"Subtarget->hasAVX()">;
def HasAVX2 : Predicate<"Subtarget->hasAVX2()">;
+def HasAVX1Only : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX2()">;
def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
def HasAES : Predicate<"Subtarget->hasAES()">;
@@ -574,6 +589,7 @@ def HasFSGSBase : Predicate<"Subtarget->hasFSGSBase()">;
def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">;
def HasBMI : Predicate<"Subtarget->hasBMI()">;
def HasBMI2 : Predicate<"Subtarget->hasBMI2()">;
+def HasRTM : Predicate<"Subtarget->hasRTM()">;
def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">;
@@ -1259,28 +1275,46 @@ def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
// Atomic support
//
-
// Atomic swap. These are just normal xchg instructions. But since a memory
// operand is referenced, the atomicity is ensured.
+multiclass ATOMIC_SWAP<bits<8> opc8, bits<8> opc, string mnemonic, string frag,
+ InstrItinClass itin> {
+ let Constraints = "$val = $dst" in {
+ def #NAME#8rm : I<opc8, MRMSrcMem, (outs GR8:$dst),
+ (ins GR8:$val, i8mem:$ptr),
+ !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
+ [(set
+ GR8:$dst,
+ (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))],
+ itin>;
+ def #NAME#16rm : I<opc, MRMSrcMem, (outs GR16:$dst),
+ (ins GR16:$val, i16mem:$ptr),
+ !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
+ [(set
+ GR16:$dst,
+ (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))],
+ itin>, OpSize;
+ def #NAME#32rm : I<opc, MRMSrcMem, (outs GR32:$dst),
+ (ins GR32:$val, i32mem:$ptr),
+ !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
+ [(set
+ GR32:$dst,
+ (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))],
+ itin>;
+ def #NAME#64rm : RI<opc, MRMSrcMem, (outs GR64:$dst),
+ (ins GR64:$val, i64mem:$ptr),
+ !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
+ [(set
+ GR64:$dst,
+ (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))],
+ itin>;
+ }
+}
+
+defm XCHG : ATOMIC_SWAP<0x86, 0x87, "xchg", "atomic_swap", IIC_XCHG_MEM>;
+
+// Swap between registers.
let Constraints = "$val = $dst" in {
-def XCHG8rm : I<0x86, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr),
- "xchg{b}\t{$val, $ptr|$ptr, $val}",
- [(set GR8:$dst, (atomic_swap_8 addr:$ptr, GR8:$val))],
- IIC_XCHG_MEM>;
-def XCHG16rm : I<0x87, MRMSrcMem, (outs GR16:$dst),(ins GR16:$val, i16mem:$ptr),
- "xchg{w}\t{$val, $ptr|$ptr, $val}",
- [(set GR16:$dst, (atomic_swap_16 addr:$ptr, GR16:$val))],
- IIC_XCHG_MEM>,
- OpSize;
-def XCHG32rm : I<0x87, MRMSrcMem, (outs GR32:$dst),(ins GR32:$val, i32mem:$ptr),
- "xchg{l}\t{$val, $ptr|$ptr, $val}",
- [(set GR32:$dst, (atomic_swap_32 addr:$ptr, GR32:$val))],
- IIC_XCHG_MEM>;
-def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst),(ins GR64:$val,i64mem:$ptr),
- "xchg{q}\t{$val, $ptr|$ptr, $val}",
- [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))],
- IIC_XCHG_MEM>;
-
def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst), (ins GR8:$val, GR8:$src),
"xchg{b}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>;
def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst), (ins GR16:$val, GR16:$src),
@@ -1291,6 +1325,7 @@ def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src),
"xchg{q}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>;
}
+// Swap between EAX and other registers.
def XCHG16ar : I<0x90, AddRegFrm, (outs), (ins GR16:$src),
"xchg{w}\t{$src, %ax|AX, $src}", [], IIC_XCHG_REG>, OpSize;
def XCHG32ar : I<0x90, AddRegFrm, (outs), (ins GR32:$src),
@@ -1672,6 +1707,8 @@ include "X86Instr3DNow.td"
include "X86InstrVMX.td"
include "X86InstrSVM.td"
+include "X86InstrTSX.td"
+
// System instructions.
include "X86InstrSystem.td"
diff --git a/contrib/llvm/lib/Target/X86/X86InstrMMX.td b/contrib/llvm/lib/Target/X86/X86InstrMMX.td
index c8f40bb..127af6f 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrMMX.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrMMX.td
@@ -118,11 +118,11 @@ let Constraints = "$src1 = $dst" in {
/// Unary MMX instructions requiring SSSE3.
multiclass SS3I_unop_rm_int_mm<bits<8> opc, string OpcodeStr,
Intrinsic IntId64, OpndItins itins> {
- def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
+ def rr64 : MMXSS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR64:$dst, (IntId64 VR64:$src))], itins.rr>;
- def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
+ def rm64 : MMXSS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR64:$dst,
(IntId64 (bitconvert (memopmmx addr:$src))))],
@@ -134,11 +134,11 @@ let ImmT = NoImm, Constraints = "$src1 = $dst" in {
multiclass SS3I_binop_rm_int_mm<bits<8> opc, string OpcodeStr,
Intrinsic IntId64, OpndItins itins> {
let isCommutable = 0 in
- def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
+ def rr64 : MMXSS38I<opc, MRMSrcReg, (outs VR64:$dst),
(ins VR64:$src1, VR64:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
[(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))], itins.rr>;
- def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
+ def rm64 : MMXSS38I<opc, MRMSrcMem, (outs VR64:$dst),
(ins VR64:$src1, i64mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
[(set VR64:$dst,
@@ -149,11 +149,11 @@ multiclass SS3I_binop_rm_int_mm<bits<8> opc, string OpcodeStr,
/// PALIGN MMX instructions (require SSSE3).
multiclass ssse3_palign_mm<string asm, Intrinsic IntId> {
- def R64irr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
+ def R64irr : MMXSS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
(ins VR64:$src1, VR64:$src2, i8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set VR64:$dst, (IntId VR64:$src1, VR64:$src2, (i8 imm:$src3)))]>;
- def R64irm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
+ def R64irm : MMXSS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
(ins VR64:$src1, i64mem:$src2, i8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set VR64:$dst, (IntId VR64:$src1,
@@ -163,12 +163,10 @@ multiclass ssse3_palign_mm<string asm, Intrinsic IntId> {
multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
string asm, OpndItins itins, Domain d> {
- def irr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
- [(set DstRC:$dst, (Int SrcRC:$src))],
- itins.rr, d>;
- def irm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
- [(set DstRC:$dst, (Int (ld_frag addr:$src)))],
- itins.rm, d>;
+ def irr : MMXPI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
+ [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr, d>;
+ def irm : MMXPI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
+ [(set DstRC:$dst, (Int (ld_frag addr:$src)))], itins.rm, d>;
}
multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC,
@@ -209,8 +207,14 @@ def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src),
let mayStore = 1 in
def MMX_MOVD64mr : MMXI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR64:$src),
"movd\t{$src, $dst|$dst, $src}", [], IIC_MMX_MOV_MM_RM>;
-def MMX_MOVD64grr : MMXI<0x7E, MRMDestReg, (outs), (ins GR32:$dst, VR64:$src),
- "movd\t{$src, $dst|$dst, $src}", [], IIC_MMX_MOV_REG_MM>;
+
+// Low word of MMX to GPR.
+def MMX_X86movd2w : SDNode<"X86ISD::MMX_MOVD2W", SDTypeProfile<1, 1,
+ [SDTCisVT<0, i32>, SDTCisVT<1, x86mmx>]>>;
+def MMX_MOVD64grr : MMXI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR64:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst,
+ (MMX_X86movd2w (x86mmx VR64:$src)))], IIC_MMX_MOV_REG_MM>;
let neverHasSideEffects = 1 in
def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
@@ -243,29 +247,30 @@ def MMX_MOVQ64mr : MMXI<0x7F, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
[(store (x86mmx VR64:$src), addr:$dst)],
IIC_MMX_MOVQ_RM>;
-def MMX_MOVDQ2Qrr : SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst),
- (ins VR128:$src), "movdq2q\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst,
- (x86mmx (bitconvert
- (i64 (vector_extract (v2i64 VR128:$src),
- (iPTR 0))))))],
- IIC_MMX_MOVQ_RR>;
-
-def MMX_MOVQ2DQrr : S2SIi8<0xD6, MRMSrcReg, (outs VR128:$dst),
- (ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (v2i64 (scalar_to_vector
- (i64 (bitconvert (x86mmx VR64:$src))))))],
- IIC_MMX_MOVQ_RR>;
+def MMX_MOVDQ2Qrr : MMXSDIi8<0xD6, MRMSrcReg, (outs VR64:$dst),
+ (ins VR128:$src), "movdq2q\t{$src, $dst|$dst, $src}",
+ [(set VR64:$dst,
+ (x86mmx (bitconvert
+ (i64 (vector_extract (v2i64 VR128:$src),
+ (iPTR 0))))))],
+ IIC_MMX_MOVQ_RR>;
+
+def MMX_MOVQ2DQrr : MMXS2SIi8<0xD6, MRMSrcReg, (outs VR128:$dst),
+ (ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v2i64
+ (scalar_to_vector
+ (i64 (bitconvert (x86mmx VR64:$src))))))],
+ IIC_MMX_MOVQ_RR>;
let neverHasSideEffects = 1 in
-def MMX_MOVQ2FR64rr: S2SIi8<0xD6, MRMSrcReg, (outs FR64:$dst),
- (ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}", [],
- IIC_MMX_MOVQ_RR>;
+def MMX_MOVQ2FR64rr: MMXS2SIi8<0xD6, MRMSrcReg, (outs FR64:$dst),
+ (ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}",
+ [], IIC_MMX_MOVQ_RR>;
-def MMX_MOVFR642Qrr: SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst),
- (ins FR64:$src), "movdq2q\t{$src, $dst|$dst, $src}", [],
- IIC_MMX_MOVQ_RR>;
+def MMX_MOVFR642Qrr: MMXSDIi8<0xD6, MRMSrcReg, (outs VR64:$dst),
+ (ins FR64:$src), "movdq2q\t{$src, $dst|$dst, $src}",
+ [], IIC_MMX_MOVQ_RR>;
def MMX_MOVNTQmr : MMXI<0xE7, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
"movntq\t{$src, $dst|$dst, $src}",
@@ -577,6 +582,7 @@ def MMX_MASKMOVQ64: MMXI64<0xF7, MRMSrcReg, (outs), (ins VR64:$src, VR64:$mask),
IIC_MMX_MASKMOV>;
// 64-bit bit convert.
+let Predicates = [HasSSE2] in {
def : Pat<(x86mmx (bitconvert (i64 GR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>;
def : Pat<(i64 (bitconvert (x86mmx VR64:$src))),
@@ -585,5 +591,6 @@ def : Pat<(f64 (bitconvert (x86mmx VR64:$src))),
(MMX_MOVQ2FR64rr VR64:$src)>;
def : Pat<(x86mmx (bitconvert (f64 FR64:$src))),
(MMX_MOVFR642Qrr FR64:$src)>;
+}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrSSE.td b/contrib/llvm/lib/Target/X86/X86InstrSSE.td
index 20dc81e..6f48d7e 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrSSE.td
@@ -251,35 +251,37 @@ def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
// A 128-bit subvector extract from the first 256-bit vector position
// is a subregister copy that needs no instruction.
-def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (i32 0))),
+def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (iPTR 0))),
(v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
-def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (i32 0))),
+def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))),
(v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
-def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (i32 0))),
+def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (iPTR 0))),
(v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
-def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (i32 0))),
+def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (iPTR 0))),
(v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
-def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (i32 0))),
+def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (iPTR 0))),
(v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
-def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (i32 0))),
+def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (iPTR 0))),
(v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
// A 128-bit subvector insert to the first 256-bit vector position
// is a subregister copy that needs no instruction.
-def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (i32 0)),
+let AddedComplexity = 25 in { // to give priority over vinsertf128rm
+def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)),
(INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
-def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (i32 0)),
+def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)),
(INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
-def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (i32 0)),
+def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)),
(INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
-def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (i32 0)),
+def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)),
(INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
-def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (i32 0)),
+def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (iPTR 0)),
(INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
-def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (i32 0)),
+def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (iPTR 0)),
(INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
+}
// Implicitly promote a 32-bit scalar to a vector.
def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
@@ -362,7 +364,7 @@ let Predicates = [HasAVX] in {
def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
}
-// Alias instructions that map fld0 to pxor for sse.
+// Alias instructions that map fld0 to xorps for sse or vxorps for avx.
// This is expanded by ExpandPostRAPseudos.
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
isPseudo = 1 in {
@@ -382,11 +384,11 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
// We set canFoldAsLoad because this can be converted to a constant-pool
// load of an all-zeros value if folding it would be beneficial.
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isPseudo = 1, neverHasSideEffects = 1 in {
-def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "", []>;
+ isPseudo = 1 in {
+def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
+ [(set VR128:$dst, (v4f32 immAllZerosV))]>;
}
-def : Pat<(v4f32 immAllZerosV), (V_SET0)>;
def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
@@ -394,35 +396,29 @@ def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
-// The same as done above but for AVX. The 256-bit ISA does not support PI,
+// The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI,
// and doesn't need it because on sandy bridge the register is set to zero
// at the rename stage without using any execution unit, so SET0PSY
// and SET0PDY can be used for vector int instructions without penalty
-// FIXME: Change encoding to pseudo! This is blocked right now by the x86
-// JIT implementatioan, it does not expand the instructions below like
-// X86MCInstLower does.
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isCodeGenOnly = 1 in {
-let Predicates = [HasAVX] in {
-def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
- [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
-def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
- [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
-}
-let Predicates = [HasAVX2], neverHasSideEffects = 1 in
-def AVX2_SET0 : PDI<0xef, MRMInitReg, (outs VR256:$dst), (ins), "",
- []>, VEX_4V;
+ isPseudo = 1, Predicates = [HasAVX] in {
+def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "",
+ [(set VR256:$dst, (v8f32 immAllZerosV))]>;
}
-let Predicates = [HasAVX2], AddedComplexity = 5 in {
- def : Pat<(v4i64 immAllZerosV), (AVX2_SET0)>;
- def : Pat<(v8i32 immAllZerosV), (AVX2_SET0)>;
- def : Pat<(v16i16 immAllZerosV), (AVX2_SET0)>;
- def : Pat<(v32i8 immAllZerosV), (AVX2_SET0)>;
+let Predicates = [HasAVX] in
+ def : Pat<(v4f64 immAllZerosV), (AVX_SET0)>;
+
+let Predicates = [HasAVX2] in {
+ def : Pat<(v4i64 immAllZerosV), (AVX_SET0)>;
+ def : Pat<(v8i32 immAllZerosV), (AVX_SET0)>;
+ def : Pat<(v16i16 immAllZerosV), (AVX_SET0)>;
+ def : Pat<(v32i8 immAllZerosV), (AVX_SET0)>;
}
-// AVX has no support for 256-bit integer instructions, but since the 128-bit
+// AVX1 has no support for 256-bit integer instructions, but since the 128-bit
// VPXOR instruction writes zero to its upper part, it's safe build zeros.
+let Predicates = [HasAVX1Only] in {
def : Pat<(v32i8 immAllZerosV), (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
def : Pat<(bc_v32i8 (v8f32 immAllZerosV)),
(SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
@@ -438,22 +434,17 @@ def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
(SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
+}
// We set canFoldAsLoad because this can be converted to a constant-pool
// load of an all-ones value if folding it would be beneficial.
-// FIXME: Change encoding to pseudo! This is blocked right now by the x86
-// JIT implementation, it does not expand the instructions below like
-// X86MCInstLower does.
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isCodeGenOnly = 1, ExeDomain = SSEPackedInt in {
- let Predicates = [HasAVX] in
- def AVX_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
- [(set VR128:$dst, (v4i32 immAllOnesV))]>, VEX_4V;
- def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
- [(set VR128:$dst, (v4i32 immAllOnesV))]>;
+ isPseudo = 1 in {
+ def V_SETALLONES : I<0, Pseudo, (outs VR128:$dst), (ins), "",
+ [(set VR128:$dst, (v4i32 immAllOnesV))]>;
let Predicates = [HasAVX2] in
- def AVX2_SETALLONES : PDI<0x76, MRMInitReg, (outs VR256:$dst), (ins), "",
- [(set VR256:$dst, (v8i32 immAllOnesV))]>, VEX_4V;
+ def AVX2_SETALLONES : I<0, Pseudo, (outs VR256:$dst), (ins), "",
+ [(set VR256:$dst, (v8i32 immAllOnesV))]>;
}
@@ -605,27 +596,27 @@ let Predicates = [HasAVX] in {
// Represent the same patterns above but in the form they appear for
// 256-bit types
def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
- (v4i32 (scalar_to_vector (loadi32 addr:$src))), (i32 0)))),
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
- (v4f32 (scalar_to_vector (loadf32 addr:$src))), (i32 0)))),
+ (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
- (v2f64 (scalar_to_vector (loadf64 addr:$src))), (i32 0)))),
+ (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
}
def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
- (v4f32 (scalar_to_vector FR32:$src)), (i32 0)))),
+ (v4f32 (scalar_to_vector FR32:$src)), (iPTR 0)))),
(SUBREG_TO_REG (i32 0),
(v4f32 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)),
sub_xmm)>;
def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
- (v2f64 (scalar_to_vector FR64:$src)), (i32 0)))),
+ (v2f64 (scalar_to_vector FR64:$src)), (iPTR 0)))),
(SUBREG_TO_REG (i64 0),
(v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)),
sub_xmm)>;
def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
- (v2i64 (scalar_to_vector (loadi64 addr:$src))), (i32 0)))),
+ (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
(SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_xmm)>;
// Move low f64 and clear high bits.
@@ -704,7 +695,7 @@ let Predicates = [HasAVX] in {
(VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
}
-let Predicates = [HasSSE1] in {
+let Predicates = [UseSSE1] in {
let AddedComplexity = 15 in {
// Move scalar to XMM zero-extended, zeroing a VR128 then do a
// MOVSS to the lower bits.
@@ -738,7 +729,7 @@ let Predicates = [HasSSE1] in {
(MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
}
-let Predicates = [HasSSE2] in {
+let Predicates = [UseSSE2] in {
let AddedComplexity = 15 in {
// Move scalar to XMM zero-extended, zeroing a VR128 then do a
// MOVSD to the lower bits.
@@ -822,16 +813,16 @@ defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
"movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
- TB, VEX;
+ TB, VEX, VEX_L;
defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
"movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
- TB, OpSize, VEX;
+ TB, OpSize, VEX, VEX_L;
defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
"movups", SSEPackedSingle, SSE_MOVU_ITINS>,
- TB, VEX;
+ TB, VEX, VEX_L;
defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
"movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
- TB, OpSize, VEX;
+ TB, OpSize, VEX, VEX_L;
defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
"movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
TB;
@@ -864,19 +855,19 @@ def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movaps\t{$src, $dst|$dst, $src}",
[(alignedstore256 (v8f32 VR256:$src), addr:$dst)],
- IIC_SSE_MOVA_P_MR>, VEX;
+ IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movapd\t{$src, $dst|$dst, $src}",
[(alignedstore256 (v4f64 VR256:$src), addr:$dst)],
- IIC_SSE_MOVA_P_MR>, VEX;
+ IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movups\t{$src, $dst|$dst, $src}",
[(store (v8f32 VR256:$src), addr:$dst)],
- IIC_SSE_MOVU_P_MR>, VEX;
+ IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movupd\t{$src, $dst|$dst, $src}",
[(store (v4f64 VR256:$src), addr:$dst)],
- IIC_SSE_MOVU_P_MR>, VEX;
+ IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
// For disassembler
let isCodeGenOnly = 1 in {
@@ -899,33 +890,33 @@ let isCodeGenOnly = 1 in {
def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
"movaps\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>, VEX;
+ IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
"movapd\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>, VEX;
+ IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
"movups\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVU_P_RR>, VEX;
+ IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
"movupd\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVU_P_RR>, VEX;
+ IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
}
let Predicates = [HasAVX] in {
def : Pat<(v8i32 (X86vzmovl
- (insert_subvector undef, (v4i32 VR128:$src), (i32 0)))),
+ (insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
def : Pat<(v4i64 (X86vzmovl
- (insert_subvector undef, (v2i64 VR128:$src), (i32 0)))),
+ (insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
def : Pat<(v8f32 (X86vzmovl
- (insert_subvector undef, (v4f32 VR128:$src), (i32 0)))),
+ (insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
def : Pat<(v4f64 (X86vzmovl
- (insert_subvector undef, (v2f64 VR128:$src), (i32 0)))),
+ (insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
}
@@ -975,10 +966,10 @@ let Predicates = [HasAVX] in {
(VMOVUPDmr addr:$dst, VR128:$src)>;
}
-let Predicates = [HasSSE1] in
+let Predicates = [UseSSE1] in
def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
(MOVUPSmr addr:$dst, VR128:$src)>;
-let Predicates = [HasSSE2] in
+let Predicates = [UseSSE2] in
def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
(MOVUPDmr addr:$dst, VR128:$src)>;
@@ -1028,12 +1019,52 @@ let Predicates = [HasAVX] in {
(VMOVUPSYmr addr:$dst, VR256:$src)>;
def : Pat<(store (v32i8 VR256:$src), addr:$dst),
(VMOVUPSYmr addr:$dst, VR256:$src)>;
+
+ // Special patterns for storing subvector extracts of lower 128-bits
+ // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
+ def : Pat<(alignedstore (v2f64 (extract_subvector
+ (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v4f32 (extract_subvector
+ (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v2i64 (extract_subvector
+ (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v4i32 (extract_subvector
+ (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v8i16 (extract_subvector
+ (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v16i8 (extract_subvector
+ (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
+
+ def : Pat<(store (v2f64 (extract_subvector
+ (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
+ def : Pat<(store (v4f32 (extract_subvector
+ (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
+ def : Pat<(store (v2i64 (extract_subvector
+ (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
+ def : Pat<(store (v4i32 (extract_subvector
+ (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
+ def : Pat<(store (v8i16 (extract_subvector
+ (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
+ def : Pat<(store (v16i8 (extract_subvector
+ (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
}
// Use movaps / movups for SSE integer load / store (one byte shorter).
// The instructions selected below are then converted to MOVDQA/MOVDQU
// during the SSE domain pass.
-let Predicates = [HasSSE1] in {
+let Predicates = [UseSSE1] in {
def : Pat<(alignedloadv2i64 addr:$src),
(MOVAPSrm addr:$src)>;
def : Pat<(loadv2i64 addr:$src),
@@ -1180,7 +1211,7 @@ let Predicates = [HasAVX] in {
(VMOVLPDmr addr:$src1, VR128:$src2)>;
}
-let Predicates = [HasSSE1] in {
+let Predicates = [UseSSE1] in {
// (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
def : Pat<(store (i64 (vector_extract (bc_v2i64 (v4f32 VR128:$src2)),
(iPTR 0))), addr:$src1),
@@ -1205,7 +1236,7 @@ let Predicates = [HasSSE1] in {
(MOVLPSmr addr:$src1, VR128:$src2)>;
}
-let Predicates = [HasSSE2] in {
+let Predicates = [UseSSE2] in {
// Shuffle with MOVLPD
def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
(MOVLPDrm VR128:$src1, addr:$src2)>;
@@ -1271,7 +1302,7 @@ let Predicates = [HasAVX] in {
(VMOVHPSrm VR128:$src1, addr:$src2)>;
// FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
- // is during lowering, where it's not possible to recognize the load fold
+ // is during lowering, where it's not possible to recognize the load fold
// cause it has two uses through a bitcast. One use disappears at isel time
// and the fold opportunity reappears.
def : Pat<(v2f64 (X86Unpckl VR128:$src1,
@@ -1279,7 +1310,7 @@ let Predicates = [HasAVX] in {
(VMOVHPDrm VR128:$src1, addr:$src2)>;
}
-let Predicates = [HasSSE1] in {
+let Predicates = [UseSSE1] in {
// MOVHPS patterns
def : Pat<(X86Movlhps VR128:$src1,
(bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
@@ -1289,9 +1320,9 @@ let Predicates = [HasSSE1] in {
(MOVHPSrm VR128:$src1, addr:$src2)>;
}
-let Predicates = [HasSSE2] in {
+let Predicates = [UseSSE2] in {
// FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
- // is during lowering, where it's not possible to recognize the load fold
+ // is during lowering, where it's not possible to recognize the load fold
// cause it has two uses through a bitcast. One use disappears at isel time
// and the fold opportunity reappears.
def : Pat<(v2f64 (X86Unpckl VR128:$src1,
@@ -1346,7 +1377,7 @@ let Predicates = [HasAVX] in {
(VMOVHLPSrr VR128:$src1, VR128:$src2)>;
}
-let Predicates = [HasSSE1] in {
+let Predicates = [UseSSE1] in {
// MOVLHPS patterns
def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
(MOVLHPSrr VR128:$src1, VR128:$src2)>;
@@ -1456,7 +1487,7 @@ def : InstAlias<"vcvtsi2sd{l}\t{$src, $src1, $dst|$dst, $src1, $src}",
def : InstAlias<"vcvtsi2sd{l}\t{$src, $src1, $dst|$dst, $src1, $src}",
(VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src)>;
-let Predicates = [HasAVX], AddedComplexity = 1 in {
+let Predicates = [HasAVX] in {
def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
(VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
@@ -1628,12 +1659,12 @@ defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, i256mem,
"vcvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle, SSE_CVT_PS>,
- TB, VEX, Requires<[HasAVX]>;
+ TB, VEX, VEX_L, Requires<[HasAVX]>;
defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
"cvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle, SSE_CVT_PS>,
- TB, Requires<[HasSSE2]>;
+ TB, Requires<[UseSSE2]>;
/// SSE 2 Only
@@ -1663,7 +1694,7 @@ def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
[(set FR32:$dst, (fround (loadf64 addr:$src)))],
IIC_SSE_CVT_Scalar_RM>,
XD,
- Requires<[HasSSE2, OptForSize]>;
+ Requires<[UseSSE2, OptForSize]>;
def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
@@ -1684,13 +1715,13 @@ def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg,
"cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
- IIC_SSE_CVT_Scalar_RR>, XD, Requires<[HasSSE2]>;
+ IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>;
def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
"cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtsd2ss
VR128:$src1, sse_load_f64:$src2))],
- IIC_SSE_CVT_Scalar_RM>, XD, Requires<[HasSSE2]>;
+ IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>;
}
// Convert scalar single to scalar double
@@ -1709,30 +1740,28 @@ def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>;
}
-let AddedComplexity = 1 in { // give AVX priority
- def : Pat<(f64 (fextend FR32:$src)),
- (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[HasAVX]>;
- def : Pat<(fextend (loadf32 addr:$src)),
- (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX]>;
+def : Pat<(f64 (fextend FR32:$src)),
+ (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[HasAVX]>;
+def : Pat<(fextend (loadf32 addr:$src)),
+ (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX]>;
- def : Pat<(extloadf32 addr:$src),
- (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>,
- Requires<[HasAVX, OptForSize]>;
- def : Pat<(extloadf32 addr:$src),
- (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>,
- Requires<[HasAVX, OptForSpeed]>;
-} // AddedComplexity = 1
+def : Pat<(extloadf32 addr:$src),
+ (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>,
+ Requires<[HasAVX, OptForSize]>;
+def : Pat<(extloadf32 addr:$src),
+ (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>,
+ Requires<[HasAVX, OptForSpeed]>;
def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
"cvtss2sd\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (fextend FR32:$src))],
IIC_SSE_CVT_Scalar_RR>, XS,
- Requires<[HasSSE2]>;
+ Requires<[UseSSE2]>;
def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
"cvtss2sd\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (extloadf32 addr:$src))],
IIC_SSE_CVT_Scalar_RM>, XS,
- Requires<[HasSSE2, OptForSize]>;
+ Requires<[UseSSE2, OptForSize]>;
// extload f32 -> f64. This matches load+fextend because we have a hack in
// the isel (PreprocessForFPConvert) that can introduce loads after dag
@@ -1740,9 +1769,9 @@ def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
// Since these loads aren't folded into the fextend, we have to match it
// explicitly here.
def : Pat<(fextend (loadf32 addr:$src)),
- (CVTSS2SDrm addr:$src)>, Requires<[HasSSE2]>;
+ (CVTSS2SDrm addr:$src)>, Requires<[UseSSE2]>;
def : Pat<(extloadf32 addr:$src),
- (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[HasSSE2, OptForSpeed]>;
+ (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>;
def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
@@ -1762,13 +1791,13 @@ def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
"cvtss2sd\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
- IIC_SSE_CVT_Scalar_RR>, XS, Requires<[HasSSE2]>;
+ IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>;
def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
"cvtss2sd\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
- IIC_SSE_CVT_Scalar_RM>, XS, Requires<[HasSSE2]>;
+ IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>;
}
// Convert packed single/double fp to doubleword
@@ -1785,12 +1814,12 @@ def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(int_x86_avx_cvt_ps2dq_256 VR256:$src))],
- IIC_SSE_CVT_PS_RR>, VEX;
+ IIC_SSE_CVT_PS_RR>, VEX, VEX_L;
def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)))],
- IIC_SSE_CVT_PS_RM>, VEX;
+ IIC_SSE_CVT_PS_RM>, VEX, VEX_L;
def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
@@ -1824,7 +1853,7 @@ def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX;
+ (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX, VEX_L;
def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@@ -1860,12 +1889,12 @@ def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
- IIC_SSE_CVT_PS_RR>, VEX;
+ IIC_SSE_CVT_PS_RR>, VEX, VEX_L;
def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
(memopv8f32 addr:$src)))],
- IIC_SSE_CVT_PS_RM>, VEX;
+ IIC_SSE_CVT_PS_RM>, VEX, VEX_L;
def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
@@ -1904,7 +1933,7 @@ let Predicates = [HasAVX] in {
(VCVTTPS2DQYrm addr:$src)>;
}
-let Predicates = [HasSSE2] in {
+let Predicates = [UseSSE2] in {
def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
(CVTDQ2PSrr VR128:$src)>;
def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
@@ -1945,7 +1974,7 @@ def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(int_x86_avx_cvtt_pd2dq_256 VR256:$src))],
- IIC_SSE_CVT_PD_RR>, VEX;
+ IIC_SSE_CVT_PD_RR>, VEX, VEX_L;
def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@@ -1978,31 +2007,31 @@ def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
IIC_SSE_CVT_PD_RR>, TB, VEX;
-let neverHasSideEffects = 1, mayLoad = 1 in
def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
- "vcvtps2pd\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_CVT_PD_RM>, TB, VEX;
+ "vcvtps2pd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
+ IIC_SSE_CVT_PD_RM>, TB, VEX;
def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(int_x86_avx_cvt_ps2_pd_256 VR128:$src))],
- IIC_SSE_CVT_PD_RR>, TB, VEX;
+ IIC_SSE_CVT_PD_RR>, TB, VEX, VEX_L;
def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)))],
- IIC_SSE_CVT_PD_RM>, TB, VEX;
+ IIC_SSE_CVT_PD_RM>, TB, VEX, VEX_L;
}
-let Predicates = [HasSSE2] in {
+let Predicates = [UseSSE2] in {
def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
IIC_SSE_CVT_PD_RR>, TB;
-let neverHasSideEffects = 1, mayLoad = 1 in
def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
- "cvtps2pd\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_CVT_PD_RM>, TB;
+ "cvtps2pd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
+ IIC_SSE_CVT_PD_RM>, TB;
}
// Convert Packed DW Integers to Packed Double FP
@@ -2019,11 +2048,11 @@ def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(int_x86_avx_cvtdq2_pd_256
- (bitconvert (memopv2i64 addr:$src))))]>, VEX;
+ (bitconvert (memopv2i64 addr:$src))))]>, VEX, VEX_L;
def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
- (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX;
+ (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX, VEX_L;
}
let neverHasSideEffects = 1, mayLoad = 1 in
@@ -2066,7 +2095,7 @@ def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(int_x86_avx_cvt_pd2_ps_256 VR256:$src))],
- IIC_SSE_CVT_PD_RR>, VEX;
+ IIC_SSE_CVT_PD_RR>, VEX, VEX_L;
def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@@ -2096,6 +2125,10 @@ let Predicates = [HasAVX] in {
(VCVTDQ2PSYrm addr:$src)>;
// Match fround and fextend for 128/256-bit conversions
+ def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
+ (VCVTPD2PSrr VR128:$src)>;
+ def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))),
+ (VCVTPD2PSXrm addr:$src)>;
def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
(VCVTPD2PSYrr VR256:$src)>;
def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
@@ -2105,12 +2138,17 @@ let Predicates = [HasAVX] in {
(VCVTPS2PDrr VR128:$src)>;
def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
(VCVTPS2PDYrr VR128:$src)>;
- def : Pat<(v4f64 (fextend (loadv4f32 addr:$src))),
+ def : Pat<(v4f64 (extloadv4f32 addr:$src)),
(VCVTPS2PDYrm addr:$src)>;
}
-let Predicates = [HasSSE2] in {
- // Match fextend for 128 conversions
+let Predicates = [UseSSE2] in {
+ // Match fround and fextend for 128 conversions
+ def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
+ (CVTPD2PSrr VR128:$src)>;
+ def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))),
+ (CVTPD2PSrm addr:$src)>;
+
def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
(CVTPS2PDrr VR128:$src)>;
}
@@ -2121,7 +2159,7 @@ let Predicates = [HasSSE2] in {
// sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
- Operand CC, SDNode OpNode, ValueType VT,
+ Operand CC, SDNode OpNode, ValueType VT,
PatFrag ld_frag, string asm, string asm_alt,
OpndItins itins> {
def rr : SIi8<0xC2, MRMSrcReg,
@@ -2267,7 +2305,7 @@ let Defs = [EFLAGS] in {
// sse12_cmp_packed - sse 1 & 2 compare packed instructions
multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
- Operand CC, Intrinsic Int, string asm,
+ Operand CC, Intrinsic Int, string asm,
string asm_alt, Domain d> {
def rri : PIi8<0xC2, MRMSrcReg,
(outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
@@ -2300,11 +2338,11 @@ defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_ps_256,
"cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SSEPackedSingle>, TB, VEX_4V;
+ SSEPackedSingle>, TB, VEX_4V, VEX_L;
defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_pd_256,
"cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SSEPackedDouble>, TB, OpSize, VEX_4V;
+ SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
let Constraints = "$src1 = $dst" in {
defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
"cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
@@ -2336,14 +2374,14 @@ def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
(VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
}
-let Predicates = [HasSSE1] in {
+let Predicates = [UseSSE1] in {
def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
(CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
(CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
}
-let Predicates = [HasSSE2] in {
+let Predicates = [UseSSE2] in {
def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
(CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
@@ -2374,13 +2412,13 @@ defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
memopv4f32, SSEPackedSingle>, TB, VEX_4V;
defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
"shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- memopv8f32, SSEPackedSingle>, TB, VEX_4V;
+ memopv8f32, SSEPackedSingle>, TB, VEX_4V, VEX_L;
defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
"shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
"shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
- memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
+ memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
let Constraints = "$src1 = $dst" in {
defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
@@ -2420,7 +2458,7 @@ let Predicates = [HasAVX] in {
(VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
}
-let Predicates = [HasSSE1] in {
+let Predicates = [UseSSE1] in {
def : Pat<(v4i32 (X86Shufp VR128:$src1,
(bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
(SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
@@ -2428,7 +2466,7 @@ let Predicates = [HasSSE1] in {
(SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
}
-let Predicates = [HasSSE2] in {
+let Predicates = [UseSSE2] in {
// Generic SHUFPD patterns
def : Pat<(v2i64 (X86Shufp VR128:$src1,
(memopv2i64 addr:$src2), (i8 imm:$imm))),
@@ -2474,16 +2512,16 @@ defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, memopv8f32,
VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedSingle>, TB, VEX_4V;
+ SSEPackedSingle>, TB, VEX_4V, VEX_L;
defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, memopv4f64,
VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedDouble>, TB, OpSize, VEX_4V;
+ SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, memopv8f32,
VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedSingle>, TB, VEX_4V;
+ SSEPackedSingle>, TB, VEX_4V, VEX_L;
defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, memopv4f64,
VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedDouble>, TB, OpSize, VEX_4V;
+ SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
let Constraints = "$src1 = $dst" in {
defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
@@ -2500,7 +2538,27 @@ let Constraints = "$src1 = $dst" in {
SSEPackedDouble>, TB, OpSize;
} // Constraints = "$src1 = $dst"
-let Predicates = [HasAVX], AddedComplexity = 1 in {
+let Predicates = [HasAVX1Only] in {
+ def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))),
+ (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
+ def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
+ (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
+ def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))),
+ (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
+ def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
+ (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
+
+ def : Pat<(v4i64 (X86Unpckl VR256:$src1, (memopv4i64 addr:$src2))),
+ (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
+ def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
+ (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
+ def : Pat<(v4i64 (X86Unpckh VR256:$src1, (memopv4i64 addr:$src2))),
+ (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
+ def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
+ (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
+}
+
+let Predicates = [HasAVX] in {
// FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the
// problem is during lowering, where it's not possible to recognize the load
// fold cause it has two uses through a bitcast. One use disappears at isel
@@ -2509,7 +2567,7 @@ let Predicates = [HasAVX], AddedComplexity = 1 in {
(VUNPCKLPDrr VR128:$src, VR128:$src)>;
}
-let Predicates = [HasSSE2] in {
+let Predicates = [UseSSE2] in {
// FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the
// problem is during lowering, where it's not possible to recognize the load
// fold cause it has two uses through a bitcast. One use disappears at isel
@@ -2540,10 +2598,11 @@ let Predicates = [HasAVX] in {
"movmskpd", SSEPackedDouble>, TB,
OpSize, VEX;
defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
- "movmskps", SSEPackedSingle>, TB, VEX;
+ "movmskps", SSEPackedSingle>, TB,
+ VEX, VEX_L;
defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
"movmskpd", SSEPackedDouble>, TB,
- OpSize, VEX;
+ OpSize, VEX, VEX_L;
def : Pat<(i32 (X86fgetsign FR32:$src)),
(VMOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128))>;
@@ -2564,11 +2623,11 @@ let Predicates = [HasAVX] in {
OpSize, VEX;
def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
"movmskps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
- SSEPackedSingle>, TB, VEX;
+ SSEPackedSingle>, TB, VEX, VEX_L;
def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
"movmskpd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
SSEPackedDouble>, TB,
- OpSize, VEX;
+ OpSize, VEX, VEX_L;
}
defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
@@ -2578,16 +2637,16 @@ defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
def : Pat<(i32 (X86fgetsign FR32:$src)),
(MOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128))>,
- Requires<[HasSSE1]>;
+ Requires<[UseSSE1]>;
def : Pat<(i64 (X86fgetsign FR32:$src)),
(MOVMSKPSrr64 (COPY_TO_REGCLASS FR32:$src, VR128))>,
- Requires<[HasSSE1]>;
+ Requires<[UseSSE1]>;
def : Pat<(i32 (X86fgetsign FR64:$src)),
(MOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128))>,
- Requires<[HasSSE2]>;
+ Requires<[UseSSE2]>;
def : Pat<(i64 (X86fgetsign FR64:$src)),
(MOVMSKPDrr64 (COPY_TO_REGCLASS FR64:$src, VR128))>,
- Requires<[HasSSE2]>;
+ Requires<[UseSSE2]>;
//===---------------------------------------------------------------------===//
// SSE2 - Packed Integer Logical Instructions
@@ -2646,13 +2705,13 @@ defm PANDN : PDI_binop_rm<0xDF, "pandn", X86andnp, v2i64, VR128, memopv2i64,
let Predicates = [HasAVX2] in {
defm VPANDY : PDI_binop_rm<0xDB, "vpand", and, v4i64, VR256, memopv4i64,
- i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
+ i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPORY : PDI_binop_rm<0xEB, "vpor", or, v4i64, VR256, memopv4i64,
- i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
+ i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPXORY : PDI_binop_rm<0xEF, "vpxor", xor, v4i64, VR256, memopv4i64,
- i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
+ i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPANDNY : PDI_binop_rm<0xDF, "vpandn", X86andnp, v4i64, VR256, memopv4i64,
- i256mem, SSE_BIT_ITINS_P, 0, 0>, VEX_4V;
+ i256mem, SSE_BIT_ITINS_P, 0, 0>, VEX_4V, VEX_L;
}
//===----------------------------------------------------------------------===//
@@ -2683,14 +2742,12 @@ multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
}
// Alias bitwise logical operations using SSE logical ops on packed FP values.
-let mayLoad = 0 in {
- defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand,
- SSE_BIT_ITINS_P>;
- defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for,
- SSE_BIT_ITINS_P>;
- defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor,
- SSE_BIT_ITINS_P>;
-}
+defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand,
+ SSE_BIT_ITINS_P>;
+defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for,
+ SSE_BIT_ITINS_P>;
+defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor,
+ SSE_BIT_ITINS_P>;
let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef,
@@ -2740,7 +2797,7 @@ multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "ps"), f256mem,
[(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
[(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
- (memopv4i64 addr:$src2)))], 0>, TB, VEX_4V;
+ (memopv4i64 addr:$src2)))], 0>, TB, VEX_4V, VEX_L;
defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f256mem,
@@ -2748,7 +2805,7 @@ multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr,
(bc_v4i64 (v4f64 VR256:$src2))))],
[(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
(memopv4i64 addr:$src2)))], 0>,
- TB, OpSize, VEX_4V;
+ TB, OpSize, VEX_4V, VEX_L;
}
// AVX 256-bit packed logical ops forms
@@ -2794,27 +2851,23 @@ multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
SizeItins itins,
bit Is2Addr = 1> {
- let mayLoad = 0 in {
defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
v4f32, f128mem, memopv4f32, SSEPackedSingle, itins.s, Is2Addr>,
TB;
defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
v2f64, f128mem, memopv2f64, SSEPackedDouble, itins.d, Is2Addr>,
TB, OpSize;
- }
}
multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
SDNode OpNode,
SizeItins itins> {
- let mayLoad = 0 in {
- defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
+ defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
v8f32, f256mem, memopv8f32, SSEPackedSingle, itins.s, 0>,
- TB;
- defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
+ TB, VEX_L;
+ defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
v4f64, f256mem, memopv4f64, SSEPackedDouble, itins.d, 0>,
- TB, OpSize;
- }
+ TB, OpSize, VEX_L;
}
multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
@@ -2846,11 +2899,11 @@ multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr,
SizeItins itins> {
defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
!strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
- SSEPackedSingle, itins.s, 0>, TB;
+ SSEPackedSingle, itins.s, 0>, TB, VEX_L;
defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
!strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
- SSEPackedDouble, itins.d, 0>, TB, OpSize;
+ SSEPackedDouble, itins.d, 0>, TB, OpSize, VEX_L;
}
// Binary Arithmetic instructions
@@ -2872,7 +2925,8 @@ let isCommutable = 0 in {
basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S, 0>,
VEX_4V, VEX_LIG;
defm VSUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P, 0>,
- basic_sse12_fp_binop_p_y<0x5C, "sub", fsub, SSE_ALU_ITINS_P>, VEX_4V;
+ basic_sse12_fp_binop_p_y<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,
+ VEX_4V;
defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S, 0>,
basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S, 0>,
VEX_4V, VEX_LIG;
@@ -2923,6 +2977,23 @@ let Constraints = "$src1 = $dst" in {
}
}
+let isCodeGenOnly = 1 in {
+ defm VMAXC: basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S, 0>,
+ VEX_4V, VEX_LIG;
+ defm VMAXC: basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P, 0>,
+ basic_sse12_fp_binop_p_y<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>, VEX_4V;
+ defm VMINC: basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S, 0>,
+ VEX_4V, VEX_LIG;
+ defm VMINC: basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P, 0>,
+ basic_sse12_fp_binop_p_y<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>, VEX_4V;
+ let Constraints = "$src1 = $dst" in {
+ defm MAXC: basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S>,
+ basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>;
+ defm MINC: basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S>,
+ basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>;
+ }
+}
+
/// Unop Arithmetic
/// In addition, we also have a special variant of the scalar form here to
/// represent the associated intrinsic operation. This form is unlike the
@@ -2960,7 +3031,7 @@ multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
[(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
- Requires<[HasSSE1, OptForSize]>;
+ Requires<[UseSSE1, OptForSize]>;
def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (F32Int VR128:$src))], itins.rr>;
@@ -2974,7 +3045,7 @@ multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
!strconcat(OpcodeStr,
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
- let mayLoad = 1 in
+ let mayLoad = 1 in {
def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1,f32mem:$src2),
!strconcat(OpcodeStr,
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
@@ -2982,6 +3053,7 @@ multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
(ins VR128:$src1, ssmem:$src2),
!strconcat(OpcodeStr,
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+ }
}
/// sse1_fp_unop_p - SSE1 unops in packed form.
@@ -3001,11 +3073,11 @@ multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode,
def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (v8f32 (OpNode VR256:$src)))],
- itins.rr>;
+ itins.rr>, VEX_L;
def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))],
- itins.rm>;
+ itins.rm>, VEX_L;
}
/// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
@@ -3027,11 +3099,11 @@ multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (V4F32Int VR256:$src))],
- itins.rr>;
+ itins.rr>, VEX_L;
def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))],
- itins.rm>;
+ itins.rm>, VEX_L;
}
/// sse2_fp_unop_s - SSE2 unops in scalar form.
@@ -3044,7 +3116,7 @@ multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
[(set FR64:$dst, (OpNode (load addr:$src)))], itins.rm>, XD,
- Requires<[HasSSE2, OptForSize]>;
+ Requires<[UseSSE2, OptForSize]>;
def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (F64Int VR128:$src))], itins.rr>;
@@ -3054,20 +3126,20 @@ multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
}
/// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
+let hasSideEffects = 0 in
multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
- let neverHasSideEffects = 1 in {
def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
!strconcat(OpcodeStr,
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
- let mayLoad = 1 in
+ let mayLoad = 1 in {
def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1,f64mem:$src2),
!strconcat(OpcodeStr,
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
- }
def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, sdmem:$src2),
!strconcat(OpcodeStr,
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+ }
}
/// sse2_fp_unop_p - SSE2 unops in vector forms.
@@ -3087,11 +3159,11 @@ multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode,
def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (v4f64 (OpNode VR256:$src)))],
- itins.rr>;
+ itins.rr>, VEX_L;
def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))],
- itins.rm>;
+ itins.rm>, VEX_L;
}
/// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
@@ -3113,11 +3185,11 @@ multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (V2F64Int VR256:$src))],
- itins.rr>;
+ itins.rr>, VEX_L;
def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))],
- itins.rm>;
+ itins.rm>, VEX_L;
}
let Predicates = [HasAVX] in {
@@ -3158,7 +3230,6 @@ let Predicates = [HasAVX] in {
SSE_RCPP>, VEX;
}
-let AddedComplexity = 1 in {
def : Pat<(f32 (fsqrt FR32:$src)),
(VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
def : Pat<(f32 (fsqrt (load addr:$src))),
@@ -3181,9 +3252,8 @@ def : Pat<(f32 (X86frcp FR32:$src)),
def : Pat<(f32 (X86frcp (load addr:$src))),
(VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
Requires<[HasAVX, OptForSize]>;
-}
-let Predicates = [HasAVX], AddedComplexity = 1 in {
+let Predicates = [HasAVX] in {
def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
(COPY_TO_REGCLASS (VSQRTSSr (f32 (IMPLICIT_DEF)),
(COPY_TO_REGCLASS VR128:$src, FR32)),
@@ -3223,17 +3293,52 @@ defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss,
sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTS>,
sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd, SSE_SQRTS>;
+/// sse1_fp_unop_s_rw - SSE1 unops where vector form has a read-write operand.
+multiclass sse1_fp_unop_rw<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ Intrinsic F32Int, OpndItins itins> {
+ def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
+ !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
+ [(set FR32:$dst, (OpNode FR32:$src))]>;
+ // For scalar unary operations, fold a load into the operation
+ // only in OptForSize mode. It eliminates an instruction, but it also
+ // eliminates a whole-register clobber (the load), so it introduces a
+ // partial register update condition.
+ def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
+ !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
+ [(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
+ Requires<[UseSSE1, OptForSize]>;
+ let Constraints = "$src1 = $dst" in {
+ def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
+ [], itins.rr>;
+ def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, ssmem:$src2),
+ !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
+ [], itins.rm>;
+ }
+}
+
// Reciprocal approximations. Note that these typically require refinement
// in order to obtain suitable precision.
-defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss,
- SSE_SQRTS>,
+defm RSQRT : sse1_fp_unop_rw<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss,
+ SSE_SQRTS>,
sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_SQRTS>,
sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps,
SSE_SQRTS>;
-defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss,
- SSE_RCPS>,
+let Predicates = [UseSSE1] in {
+ def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
+ (RSQRTSSr_Int VR128:$src, VR128:$src)>;
+}
+
+defm RCP : sse1_fp_unop_rw<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss,
+ SSE_RCPS>,
sse1_fp_unop_p<0x53, "rcp", X86frcp, SSE_RCPS>,
sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps, SSE_RCPS>;
+let Predicates = [UseSSE1] in {
+ def : Pat<(int_x86_sse_rcp_ss VR128:$src),
+ (RCPSSr_Int VR128:$src, VR128:$src)>;
+}
// There is no f64 version of the reciprocal approximation instructions.
@@ -3271,20 +3376,20 @@ let AddedComplexity = 400 in { // Prefer non-temporal versions
"movntps\t{$src, $dst|$dst, $src}",
[(alignednontemporalstore (v8f32 VR256:$src),
addr:$dst)],
- IIC_SSE_MOVNT>, VEX;
+ IIC_SSE_MOVNT>, VEX, VEX_L;
def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
(ins f256mem:$dst, VR256:$src),
"movntpd\t{$src, $dst|$dst, $src}",
[(alignednontemporalstore (v4f64 VR256:$src),
addr:$dst)],
- IIC_SSE_MOVNT>, VEX;
+ IIC_SSE_MOVNT>, VEX, VEX_L;
let ExeDomain = SSEPackedInt in
def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
(ins f256mem:$dst, VR256:$src),
"movntdq\t{$src, $dst|$dst, $src}",
[(alignednontemporalstore (v4i64 VR256:$src),
addr:$dst)],
- IIC_SSE_MOVNT>, VEX;
+ IIC_SSE_MOVNT>, VEX, VEX_L;
}
let AddedComplexity = 400 in { // Prefer non-temporal versions
@@ -3304,7 +3409,7 @@ def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
IIC_SSE_MOVNT>;
def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
- (MOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
+ (MOVNTDQmr addr:$dst, VR128:$src)>, Requires<[UseSSE2]>;
// There is no AVX form for instructions below this point
def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
@@ -3393,14 +3498,14 @@ def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
VEX;
def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
- VEX;
+ VEX, VEX_L;
}
def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
VEX;
def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
- VEX;
+ VEX, VEX_L;
// For Disassembler
let isCodeGenOnly = 1 in {
@@ -3410,16 +3515,14 @@ def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
VEX;
def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
"movdqa\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>,
- VEX;
+ IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}", [],
IIC_SSE_MOVU_P_RR>,
VEX;
def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
"movdqu\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVU_P_RR>,
- VEX;
+ IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
}
let canFoldAsLoad = 1, mayLoad = 1 in {
@@ -3428,14 +3531,14 @@ def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
VEX;
def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
"movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
- VEX;
+ VEX, VEX_L;
let Predicates = [HasAVX] in {
def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
XS, VEX;
def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
"vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
- XS, VEX;
+ XS, VEX, VEX_L;
}
}
@@ -3447,14 +3550,14 @@ def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
(ins i256mem:$dst, VR256:$src),
"movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
- VEX;
+ VEX, VEX_L;
let Predicates = [HasAVX] in {
def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
XS, VEX;
def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
"vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
- XS, VEX;
+ XS, VEX, VEX_L;
}
}
@@ -3464,7 +3567,7 @@ def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}",
- [], IIC_SSE_MOVU_P_RR>, XS, Requires<[HasSSE2]>;
+ [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
// For Disassembler
let isCodeGenOnly = 1 in {
@@ -3474,7 +3577,7 @@ def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}",
- [], IIC_SSE_MOVU_P_RR>, XS, Requires<[HasSSE2]>;
+ [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
}
let canFoldAsLoad = 1, mayLoad = 1 in {
@@ -3486,7 +3589,7 @@ def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqu\t{$src, $dst|$dst, $src}",
[/*(set VR128:$dst, (loadv2i64 addr:$src))*/],
IIC_SSE_MOVU_P_RM>,
- XS, Requires<[HasSSE2]>;
+ XS, Requires<[UseSSE2]>;
}
let mayStore = 1 in {
@@ -3498,7 +3601,7 @@ def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}",
[/*(store (v2i64 VR128:$src), addr:$dst)*/],
IIC_SSE_MOVU_P_MR>,
- XS, Requires<[HasSSE2]>;
+ XS, Requires<[UseSSE2]>;
}
// Intrinsic forms of MOVDQU load and store
@@ -3512,7 +3615,7 @@ def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}",
[(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)],
IIC_SSE_MOVU_P_MR>,
- XS, Requires<[HasSSE2]>;
+ XS, Requires<[UseSSE2]>;
} // ExeDomain = SSEPackedInt
@@ -3690,82 +3793,82 @@ defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw,
let Predicates = [HasAVX2] in {
defm VPADDBY : PDI_binop_rm<0xFC, "vpaddb", add, v32i8, VR256, memopv4i64,
- i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPADDWY : PDI_binop_rm<0xFD, "vpaddw", add, v16i16, VR256, memopv4i64,
- i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPADDDY : PDI_binop_rm<0xFE, "vpaddd", add, v8i32, VR256, memopv4i64,
- i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPADDQY : PDI_binop_rm<0xD4, "vpaddq", add, v4i64, VR256, memopv4i64,
- i256mem, SSE_INTALUQ_ITINS_P, 1, 0>, VEX_4V;
+ i256mem, SSE_INTALUQ_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPMULLWY : PDI_binop_rm<0xD5, "vpmullw", mul, v16i16, VR256, memopv4i64,
- i256mem, SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
+ i256mem, SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPSUBBY : PDI_binop_rm<0xF8, "vpsubb", sub, v32i8, VR256, memopv4i64,
- i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
defm VPSUBWY : PDI_binop_rm<0xF9, "vpsubw", sub, v16i16,VR256, memopv4i64,
- i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
defm VPSUBDY : PDI_binop_rm<0xFA, "vpsubd", sub, v8i32, VR256, memopv4i64,
- i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
defm VPSUBQY : PDI_binop_rm<0xFB, "vpsubq", sub, v4i64, VR256, memopv4i64,
- i256mem, SSE_INTALUQ_ITINS_P, 0, 0>, VEX_4V;
+ i256mem, SSE_INTALUQ_ITINS_P, 0, 0>, VEX_4V, VEX_L;
defm VPMULUDQY : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v4i64, v8i32,
VR256, memopv4i64, i256mem,
- SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
// Intrinsic forms
defm VPSUBSBY : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_avx2_psubs_b,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
defm VPSUBSWY : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_avx2_psubs_w,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
defm VPSUBUSBY : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_avx2_psubus_b,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
defm VPSUBUSWY : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_avx2_psubus_w,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
defm VPADDSBY : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_avx2_padds_b,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPADDSWY : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_avx2_padds_w,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPADDUSBY : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_avx2_paddus_b,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPADDUSWY : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_avx2_paddus_w,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPMULHUWY : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_avx2_pmulhu_w,
VR256, memopv4i64, i256mem,
- SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPMULHWY : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_avx2_pmulh_w,
VR256, memopv4i64, i256mem,
- SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPMADDWDY : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_avx2_pmadd_wd,
VR256, memopv4i64, i256mem,
- SSE_PMADD, 1, 0>, VEX_4V;
+ SSE_PMADD, 1, 0>, VEX_4V, VEX_L;
defm VPAVGBY : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_avx2_pavg_b,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPAVGWY : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_avx2_pavg_w,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPMINUBY : PDI_binop_rm_int<0xDA, "vpminub", int_x86_avx2_pminu_b,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPMINSWY : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_avx2_pmins_w,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPMAXUBY : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_avx2_pmaxu_b,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPMAXSWY : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_avx2_pmaxs_w,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPSADBWY : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_avx2_psad_bw,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
}
let Constraints = "$src1 = $dst" in {
@@ -3901,30 +4004,30 @@ let ExeDomain = SSEPackedInt in {
let Predicates = [HasAVX2] in {
defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
VR256, v16i16, v8i16, bc_v8i16,
- SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
VR256, v8i32, v4i32, bc_v4i32,
- SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
VR256, v4i64, v2i64, bc_v2i64,
- SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
VR256, v16i16, v8i16, bc_v8i16,
- SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
VR256, v8i32, v4i32, bc_v4i32,
- SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
VR256, v4i64, v2i64, bc_v2i64,
- SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
VR256, v16i16, v8i16, bc_v8i16,
- SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
VR256, v8i32, v4i32, bc_v4i32,
- SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
let ExeDomain = SSEPackedInt in {
// 256-bit logical shifts.
@@ -3933,13 +4036,13 @@ let ExeDomain = SSEPackedInt in {
"vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR256:$dst,
(int_x86_avx2_psll_dq_bs VR256:$src1, imm:$src2))]>,
- VEX_4V;
+ VEX_4V, VEX_L;
def VPSRLDQYri : PDIi8<0x73, MRM3r,
(outs VR256:$dst), (ins VR256:$src1, i32i8imm:$src2),
"vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR256:$dst,
(int_x86_avx2_psrl_dq_bs VR256:$src1, imm:$src2))]>,
- VEX_4V;
+ VEX_4V, VEX_L;
// PSRADQYri doesn't exist in SSE[1-3].
}
} // Predicates = [HasAVX2]
@@ -4010,7 +4113,7 @@ let Predicates = [HasAVX2] in {
(VPSRLDQYri VR256:$src1, (BYTE_imm imm:$src2))>;
}
-let Predicates = [HasSSE2] in {
+let Predicates = [UseSSE2] in {
def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
(PSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
@@ -4053,22 +4156,22 @@ let Predicates = [HasAVX] in {
let Predicates = [HasAVX2] in {
defm VPCMPEQBY : PDI_binop_rm<0x74, "vpcmpeqb", X86pcmpeq, v32i8,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPCMPEQWY : PDI_binop_rm<0x75, "vpcmpeqw", X86pcmpeq, v16i16,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPCMPEQDY : PDI_binop_rm<0x76, "vpcmpeqd", X86pcmpeq, v8i32,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
defm VPCMPGTBY : PDI_binop_rm<0x64, "vpcmpgtb", X86pcmpgt, v32i8,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
defm VPCMPGTWY : PDI_binop_rm<0x65, "vpcmpgtw", X86pcmpgt, v16i16,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
defm VPCMPGTDY : PDI_binop_rm<0x66, "vpcmpgtd", X86pcmpgt, v8i32,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
}
let Constraints = "$src1 = $dst" in {
@@ -4111,13 +4214,13 @@ defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
let Predicates = [HasAVX2] in {
defm VPACKSSWBY : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_avx2_packsswb,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
defm VPACKSSDWY : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_avx2_packssdw,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
defm VPACKUSWBY : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_avx2_packuswb,
VR256, memopv4i64, i256mem,
- SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
}
let Constraints = "$src1 = $dst" in {
@@ -4187,12 +4290,15 @@ let Predicates = [HasAVX] in {
}
let Predicates = [HasAVX2] in {
- defm VPSHUFD : sse2_pshuffle_y<"vpshufd", v8i32, X86PShufd>, TB, OpSize, VEX;
- defm VPSHUFHW : sse2_pshuffle_y<"vpshufhw", v16i16, X86PShufhw>, XS, VEX;
- defm VPSHUFLW : sse2_pshuffle_y<"vpshuflw", v16i16, X86PShuflw>, XD, VEX;
+ defm VPSHUFD : sse2_pshuffle_y<"vpshufd", v8i32, X86PShufd>,
+ TB, OpSize, VEX,VEX_L;
+ defm VPSHUFHW : sse2_pshuffle_y<"vpshufhw", v16i16, X86PShufhw>,
+ XS, VEX, VEX_L;
+ defm VPSHUFLW : sse2_pshuffle_y<"vpshuflw", v16i16, X86PShuflw>,
+ XD, VEX, VEX_L;
}
-let Predicates = [HasSSE2] in {
+let Predicates = [UseSSE2] in {
let AddedComplexity = 5 in
defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, X86PShufd>, TB, OpSize;
@@ -4268,22 +4374,22 @@ let Predicates = [HasAVX] in {
let Predicates = [HasAVX2] in {
defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl,
- bc_v32i8>, VEX_4V;
+ bc_v32i8>, VEX_4V, VEX_L;
defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl,
- bc_v16i16>, VEX_4V;
+ bc_v16i16>, VEX_4V, VEX_L;
defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl,
- bc_v8i32>, VEX_4V;
+ bc_v8i32>, VEX_4V, VEX_L;
defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl,
- bc_v4i64>, VEX_4V;
+ bc_v4i64>, VEX_4V, VEX_L;
defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh,
- bc_v32i8>, VEX_4V;
+ bc_v32i8>, VEX_4V, VEX_L;
defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh,
- bc_v16i16>, VEX_4V;
+ bc_v16i16>, VEX_4V, VEX_L;
defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh,
- bc_v8i32>, VEX_4V;
+ bc_v8i32>, VEX_4V, VEX_L;
defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh,
- bc_v4i64>, VEX_4V;
+ bc_v4i64>, VEX_4V, VEX_L;
}
let Constraints = "$src1 = $dst" in {
@@ -4307,28 +4413,6 @@ let Constraints = "$src1 = $dst" in {
}
} // ExeDomain = SSEPackedInt
-// Patterns for using AVX1 instructions with integer vectors
-// Here to give AVX2 priority
-let Predicates = [HasAVX] in {
- def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))),
- (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
- (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
- def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))),
- (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
- (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
-
- def : Pat<(v4i64 (X86Unpckl VR256:$src1, (memopv4i64 addr:$src2))),
- (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
- (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
- def : Pat<(v4i64 (X86Unpckh VR256:$src1, (memopv4i64 addr:$src2))),
- (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
- (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
-}
-
//===---------------------------------------------------------------------===//
// SSE2 - Packed Integer Extract and Insert
//===---------------------------------------------------------------------===//
@@ -4377,7 +4461,7 @@ let Predicates = [HasAVX] in {
}
let Constraints = "$src1 = $dst" in
- defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
+ defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[UseSSE2]>;
} // ExeDomain = SSEPackedInt
@@ -4397,9 +4481,9 @@ def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
let Predicates = [HasAVX2] in {
def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>, VEX;
+ [(set GR32:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>, VEX, VEX_L;
def VPMOVMSKBYr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
- "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
+ "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
}
def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
@@ -4538,7 +4622,7 @@ def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
// Move Packed Doubleword Int first element to Doubleword Int
//
def VMOVPQIto64rr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
+ "vmov{d|q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
(iPTR 0)))],
IIC_SSE_MOVD_ToGP>,
@@ -4654,14 +4738,14 @@ let Predicates = [HasAVX] in {
}
// Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
- (v4i32 (scalar_to_vector GR32:$src)),(i32 0)))),
+ (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVZDI2PDIrr GR32:$src), sub_xmm)>;
def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
- (v2i64 (scalar_to_vector GR64:$src)),(i32 0)))),
+ (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
(SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
}
-let Predicates = [HasSSE2], AddedComplexity = 20 in {
+let Predicates = [UseSSE2], AddedComplexity = 20 in {
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
(MOVZDI2PDIrm addr:$src)>;
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
@@ -4701,7 +4785,7 @@ def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
[(set VR128:$dst,
(v2i64 (scalar_to_vector (loadi64 addr:$src))))],
IIC_SSE_MOVDQ>, XS,
- Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
+ Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
//===---------------------------------------------------------------------===//
// Move Packed Quadword Int to Quadword Int
@@ -4744,7 +4828,7 @@ def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
(v2i64 (X86vzmovl (v2i64 (scalar_to_vector
(loadi64 addr:$src))))))],
IIC_SSE_MOVDQ>,
- XS, Requires<[HasSSE2]>;
+ XS, Requires<[UseSSE2]>;
let Predicates = [HasAVX], AddedComplexity = 20 in {
def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
@@ -4755,7 +4839,7 @@ let Predicates = [HasAVX], AddedComplexity = 20 in {
(VMOVZQI2PQIrm addr:$src)>;
}
-let Predicates = [HasSSE2], AddedComplexity = 20 in {
+let Predicates = [UseSSE2], AddedComplexity = 20 in {
def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
(MOVZQI2PQIrm addr:$src)>;
def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
@@ -4785,7 +4869,7 @@ def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
IIC_SSE_MOVQ_RR>,
- XS, Requires<[HasSSE2]>;
+ XS, Requires<[UseSSE2]>;
let AddedComplexity = 20 in
def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
@@ -4800,7 +4884,7 @@ def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
[(set VR128:$dst, (v2i64 (X86vzmovl
(loadv2i64 addr:$src))))],
IIC_SSE_MOVDQ>,
- XS, Requires<[HasSSE2]>;
+ XS, Requires<[UseSSE2]>;
}
let AddedComplexity = 20 in {
@@ -4810,7 +4894,7 @@ let AddedComplexity = 20 in {
def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
(VMOVZPQILo2PQIrr VR128:$src)>;
}
- let Predicates = [HasSSE2] in {
+ let Predicates = [UseSSE2] in {
def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
(MOVZPQILo2PQIrm addr:$src)>;
def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
@@ -4862,9 +4946,9 @@ let Predicates = [HasAVX] in {
defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
v4f32, VR128, memopv4f32, f128mem>, VEX;
defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
- v8f32, VR256, memopv8f32, f256mem>, VEX;
+ v8f32, VR256, memopv8f32, f256mem>, VEX, VEX_L;
defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
- v8f32, VR256, memopv8f32, f256mem>, VEX;
+ v8f32, VR256, memopv8f32, f256mem>, VEX, VEX_L;
}
defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
memopv4f32, f128mem>;
@@ -4890,7 +4974,7 @@ let Predicates = [HasAVX] in {
(VMOVSLDUPYrm addr:$src)>;
}
-let Predicates = [HasSSE3] in {
+let Predicates = [UseSSE3] in {
def : Pat<(v4i32 (X86Movshdup VR128:$src)),
(MOVSHDUPrr VR128:$src)>;
def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
@@ -4932,7 +5016,7 @@ def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
let Predicates = [HasAVX] in {
defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
- defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
+ defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX, VEX_L;
}
defm MOVDDUP : sse3_replicate_dfp<"movddup">;
@@ -4959,7 +5043,7 @@ let Predicates = [HasAVX] in {
(VMOVDDUPYrr VR256:$src)>;
}
-let Predicates = [HasSSE3] in {
+let Predicates = [UseSSE3] in {
def : Pat<(X86Movddup (memopv2f64 addr:$src)),
(MOVDDUPrm addr:$src)>;
def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
@@ -4981,7 +5065,8 @@ let Predicates = [HasAVX] in {
[(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
"vlddqu\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
+ [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>,
+ VEX, VEX_L;
}
def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"lddqu\t{$src, $dst|$dst, $src}",
@@ -5014,16 +5099,16 @@ let Predicates = [HasAVX] in {
defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
f128mem, SSE_ALU_F32P, 0>, TB, XD, VEX_4V;
defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
- f256mem, SSE_ALU_F32P, 0>, TB, XD, VEX_4V;
+ f256mem, SSE_ALU_F32P, 0>, TB, XD, VEX_4V, VEX_L;
}
let ExeDomain = SSEPackedDouble in {
defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
f128mem, SSE_ALU_F64P, 0>, TB, OpSize, VEX_4V;
defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
- f256mem, SSE_ALU_F64P, 0>, TB, OpSize, VEX_4V;
+ f256mem, SSE_ALU_F64P, 0>, TB, OpSize, VEX_4V, VEX_L;
}
}
-let Constraints = "$src1 = $dst", Predicates = [HasSSE3] in {
+let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in {
let ExeDomain = SSEPackedSingle in
defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
f128mem, SSE_ALU_F32P>, TB, XD;
@@ -5075,9 +5160,9 @@ let Predicates = [HasAVX] in {
defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
X86fhsub, 0>, VEX_4V;
defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
- X86fhadd, 0>, VEX_4V;
+ X86fhadd, 0>, VEX_4V, VEX_L;
defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
- X86fhsub, 0>, VEX_4V;
+ X86fhsub, 0>, VEX_4V, VEX_L;
}
let ExeDomain = SSEPackedDouble in {
defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
@@ -5085,9 +5170,9 @@ let Predicates = [HasAVX] in {
defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
X86fhsub, 0>, VEX_4V;
defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
- X86fhadd, 0>, VEX_4V;
+ X86fhadd, 0>, VEX_4V, VEX_L;
defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
- X86fhsub, 0>, VEX_4V;
+ X86fhsub, 0>, VEX_4V, VEX_L;
}
}
@@ -5153,11 +5238,11 @@ let Predicates = [HasAVX] in {
let Predicates = [HasAVX2] in {
defm VPABSB : SS3I_unop_rm_int_y<0x1C, "vpabsb",
- int_x86_avx2_pabs_b>, VEX;
+ int_x86_avx2_pabs_b>, VEX, VEX_L;
defm VPABSW : SS3I_unop_rm_int_y<0x1D, "vpabsw",
- int_x86_avx2_pabs_w>, VEX;
+ int_x86_avx2_pabs_w>, VEX, VEX_L;
defm VPABSD : SS3I_unop_rm_int_y<0x1E, "vpabsd",
- int_x86_avx2_pabs_d>, VEX;
+ int_x86_avx2_pabs_d>, VEX, VEX_L;
}
defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb",
@@ -5296,37 +5381,37 @@ let ImmT = NoImm, Predicates = [HasAVX2] in {
let isCommutable = 0 in {
defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, VR256,
memopv4i64, i256mem,
- SSE_PHADDSUBW, 0>, VEX_4V;
+ SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, VR256,
memopv4i64, i256mem,
- SSE_PHADDSUBW, 0>, VEX_4V;
+ SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, VR256,
memopv4i64, i256mem,
- SSE_PHADDSUBW, 0>, VEX_4V;
+ SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, VR256,
memopv4i64, i256mem,
- SSE_PHADDSUBW, 0>, VEX_4V;
+ SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPSIGNBY : SS3I_binop_rm<0x08, "vpsignb", X86psign, v32i8, VR256,
memopv4i64, i256mem,
- SSE_PHADDSUBW, 0>, VEX_4V;
+ SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPSIGNWY : SS3I_binop_rm<0x09, "vpsignw", X86psign, v16i16, VR256,
memopv4i64, i256mem,
- SSE_PHADDSUBW, 0>, VEX_4V;
+ SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPSIGNDY : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v8i32, VR256,
memopv4i64, i256mem,
- SSE_PHADDSUBW, 0>, VEX_4V;
+ SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256,
memopv4i64, i256mem,
- SSE_PHADDSUBW, 0>, VEX_4V;
+ SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
- int_x86_avx2_phadd_sw>, VEX_4V;
+ int_x86_avx2_phadd_sw>, VEX_4V, VEX_L;
defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw",
- int_x86_avx2_phsub_sw>, VEX_4V;
+ int_x86_avx2_phsub_sw>, VEX_4V, VEX_L;
defm VPMADDUBSW : SS3I_binop_rm_int_y<0x04, "vpmaddubsw",
- int_x86_avx2_pmadd_ub_sw>, VEX_4V;
+ int_x86_avx2_pmadd_ub_sw>, VEX_4V, VEX_L;
}
defm VPMULHRSW : SS3I_binop_rm_int_y<0x0B, "vpmulhrsw",
- int_x86_avx2_pmul_hr_sw>, VEX_4V;
+ int_x86_avx2_pmul_hr_sw>, VEX_4V, VEX_L;
}
// None of these have i8 immediate fields.
@@ -5405,8 +5490,8 @@ multiclass ssse3_palign_y<string asm, bit Is2Addr = 1> {
let Predicates = [HasAVX] in
defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
let Predicates = [HasAVX2] in
- defm VPALIGN : ssse3_palign_y<"vpalignr", 0>, VEX_4V;
-let Constraints = "$src1 = $dst", Predicates = [HasSSSE3] in
+ defm VPALIGN : ssse3_palign_y<"vpalignr", 0>, VEX_4V, VEX_L;
+let Constraints = "$src1 = $dst", Predicates = [UseSSSE3] in
defm PALIGN : ssse3_palign<"palignr">;
let Predicates = [HasAVX2] in {
@@ -5431,7 +5516,7 @@ def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
(VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
}
-let Predicates = [HasSSSE3] in {
+let Predicates = [UseSSSE3] in {
def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
(PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
@@ -5512,17 +5597,17 @@ defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
let Predicates = [HasAVX2] in {
defm VPMOVSXBW : SS41I_binop_rm_int16_y<0x20, "vpmovsxbw",
- int_x86_avx2_pmovsxbw>, VEX;
+ int_x86_avx2_pmovsxbw>, VEX, VEX_L;
defm VPMOVSXWD : SS41I_binop_rm_int16_y<0x23, "vpmovsxwd",
- int_x86_avx2_pmovsxwd>, VEX;
+ int_x86_avx2_pmovsxwd>, VEX, VEX_L;
defm VPMOVSXDQ : SS41I_binop_rm_int16_y<0x25, "vpmovsxdq",
- int_x86_avx2_pmovsxdq>, VEX;
+ int_x86_avx2_pmovsxdq>, VEX, VEX_L;
defm VPMOVZXBW : SS41I_binop_rm_int16_y<0x30, "vpmovzxbw",
- int_x86_avx2_pmovzxbw>, VEX;
+ int_x86_avx2_pmovzxbw>, VEX, VEX_L;
defm VPMOVZXWD : SS41I_binop_rm_int16_y<0x33, "vpmovzxwd",
- int_x86_avx2_pmovzxwd>, VEX;
+ int_x86_avx2_pmovzxwd>, VEX, VEX_L;
defm VPMOVZXDQ : SS41I_binop_rm_int16_y<0x35, "vpmovzxdq",
- int_x86_avx2_pmovzxdq>, VEX;
+ int_x86_avx2_pmovzxdq>, VEX, VEX_L;
}
defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
@@ -5538,64 +5623,88 @@ let Predicates = [HasAVX] in {
(VPMOVSXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
(VPMOVSXBWrm addr:$src)>;
+ def : Pat<(int_x86_sse41_pmovsxbw (bc_v16i8 (loadv2i64 addr:$src))),
+ (VPMOVSXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
(VPMOVSXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
(VPMOVSXWDrm addr:$src)>;
+ def : Pat<(int_x86_sse41_pmovsxwd (bc_v8i16 (loadv2i64 addr:$src))),
+ (VPMOVSXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
(VPMOVSXDQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
(VPMOVSXDQrm addr:$src)>;
+ def : Pat<(int_x86_sse41_pmovsxdq (bc_v4i32 (loadv2i64 addr:$src))),
+ (VPMOVSXDQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
(VPMOVZXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
(VPMOVZXBWrm addr:$src)>;
+ def : Pat<(int_x86_sse41_pmovzxbw (bc_v16i8 (loadv2i64 addr:$src))),
+ (VPMOVZXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
(VPMOVZXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
(VPMOVZXWDrm addr:$src)>;
+ def : Pat<(int_x86_sse41_pmovzxwd (bc_v8i16 (loadv2i64 addr:$src))),
+ (VPMOVZXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
(VPMOVZXDQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
(VPMOVZXDQrm addr:$src)>;
+ def : Pat<(int_x86_sse41_pmovzxdq (bc_v4i32 (loadv2i64 addr:$src))),
+ (VPMOVZXDQrm addr:$src)>;
}
-let Predicates = [HasSSE41] in {
+let Predicates = [UseSSE41] in {
// Common patterns involving scalar load.
def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
(PMOVSXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
(PMOVSXBWrm addr:$src)>;
+ def : Pat<(int_x86_sse41_pmovsxbw (bc_v16i8 (loadv2i64 addr:$src))),
+ (PMOVSXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
(PMOVSXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
(PMOVSXWDrm addr:$src)>;
+ def : Pat<(int_x86_sse41_pmovsxwd (bc_v8i16 (loadv2i64 addr:$src))),
+ (PMOVSXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
(PMOVSXDQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
(PMOVSXDQrm addr:$src)>;
+ def : Pat<(int_x86_sse41_pmovsxdq (bc_v4i32 (loadv2i64 addr:$src))),
+ (PMOVSXDQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
(PMOVZXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
(PMOVZXBWrm addr:$src)>;
+ def : Pat<(int_x86_sse41_pmovzxbw (bc_v16i8 (loadv2i64 addr:$src))),
+ (PMOVZXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
(PMOVZXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
(PMOVZXWDrm addr:$src)>;
+ def : Pat<(int_x86_sse41_pmovzxwd (bc_v8i16 (loadv2i64 addr:$src))),
+ (PMOVZXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
(PMOVZXDQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
(PMOVZXDQrm addr:$src)>;
+ def : Pat<(int_x86_sse41_pmovzxdq (bc_v4i32 (loadv2i64 addr:$src))),
+ (PMOVZXDQrm addr:$src)>;
}
let Predicates = [HasAVX2] in {
@@ -5615,7 +5724,7 @@ let Predicates = [HasAVX] in {
def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDrr VR128:$src)>;
}
-let Predicates = [HasSSE41] in {
+let Predicates = [UseSSE41] in {
def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (PMOVSXDQrr VR128:$src)>;
def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (PMOVSXWDrr VR128:$src)>;
}
@@ -5659,13 +5768,13 @@ defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
let Predicates = [HasAVX2] in {
defm VPMOVSXBD : SS41I_binop_rm_int8_y<0x21, "vpmovsxbd",
- int_x86_avx2_pmovsxbd>, VEX;
+ int_x86_avx2_pmovsxbd>, VEX, VEX_L;
defm VPMOVSXWQ : SS41I_binop_rm_int8_y<0x24, "vpmovsxwq",
- int_x86_avx2_pmovsxwq>, VEX;
+ int_x86_avx2_pmovsxwq>, VEX, VEX_L;
defm VPMOVZXBD : SS41I_binop_rm_int8_y<0x31, "vpmovzxbd",
- int_x86_avx2_pmovzxbd>, VEX;
+ int_x86_avx2_pmovzxbd>, VEX, VEX_L;
defm VPMOVZXWQ : SS41I_binop_rm_int8_y<0x34, "vpmovzxwq",
- int_x86_avx2_pmovzxwq>, VEX;
+ int_x86_avx2_pmovzxwq>, VEX, VEX_L;
}
defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
@@ -5686,7 +5795,7 @@ let Predicates = [HasAVX] in {
(VPMOVZXWQrm addr:$src)>;
}
-let Predicates = [HasSSE41] in {
+let Predicates = [UseSSE41] in {
// Common patterns involving scalar load
def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
(PMOVSXBDrm addr:$src)>;
@@ -5734,9 +5843,9 @@ defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
}
let Predicates = [HasAVX2] in {
defm VPMOVSXBQ : SS41I_binop_rm_int4_y<0x22, "vpmovsxbq",
- int_x86_avx2_pmovsxbq>, VEX;
+ int_x86_avx2_pmovsxbq>, VEX, VEX_L;
defm VPMOVZXBQ : SS41I_binop_rm_int4_y<0x32, "vpmovzxbq",
- int_x86_avx2_pmovzxbq>, VEX;
+ int_x86_avx2_pmovzxbq>, VEX, VEX_L;
}
defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
@@ -5754,7 +5863,7 @@ let Predicates = [HasAVX] in {
(VPMOVZXBQrm addr:$src)>;
}
-let Predicates = [HasSSE41] in {
+let Predicates = [UseSSE41] in {
// Common patterns involving scalar load
def : Pat<(int_x86_sse41_pmovsxbq
(bitconvert (v4i32 (X86vzmovl
@@ -5767,6 +5876,100 @@ let Predicates = [HasSSE41] in {
(PMOVZXBQrm addr:$src)>;
}
+let Predicates = [HasAVX2] in {
+ def : Pat<(v16i16 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBWYrr VR128:$src)>;
+ def : Pat<(v8i32 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBDYrr VR128:$src)>;
+ def : Pat<(v4i64 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBQYrr VR128:$src)>;
+
+ def : Pat<(v8i32 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWDYrr VR128:$src)>;
+ def : Pat<(v4i64 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWQYrr VR128:$src)>;
+
+ def : Pat<(v4i64 (X86vzext (v4i32 VR128:$src))), (VPMOVZXDQYrr VR128:$src)>;
+
+ def : Pat<(v16i16 (X86vzext (v32i8 VR256:$src))),
+ (VPMOVZXBWYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
+ def : Pat<(v8i32 (X86vzext (v32i8 VR256:$src))),
+ (VPMOVZXBDYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
+ def : Pat<(v4i64 (X86vzext (v32i8 VR256:$src))),
+ (VPMOVZXBQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
+
+ def : Pat<(v8i32 (X86vzext (v16i16 VR256:$src))),
+ (VPMOVZXWDYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
+ def : Pat<(v4i64 (X86vzext (v16i16 VR256:$src))),
+ (VPMOVZXWQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
+
+ def : Pat<(v4i64 (X86vzext (v8i32 VR256:$src))),
+ (VPMOVZXDQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
+}
+
+let Predicates = [HasAVX] in {
+ def : Pat<(v8i16 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBWrr VR128:$src)>;
+ def : Pat<(v4i32 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBDrr VR128:$src)>;
+ def : Pat<(v2i64 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBQrr VR128:$src)>;
+
+ def : Pat<(v4i32 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWDrr VR128:$src)>;
+ def : Pat<(v2i64 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWQrr VR128:$src)>;
+
+ def : Pat<(v2i64 (X86vzext (v4i32 VR128:$src))), (VPMOVZXDQrr VR128:$src)>;
+
+ def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
+ (VPMOVZXBWrm addr:$src)>;
+ def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
+ (VPMOVZXBWrm addr:$src)>;
+ def : Pat<(v4i32 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
+ (VPMOVZXBDrm addr:$src)>;
+ def : Pat<(v2i64 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))),
+ (VPMOVZXBQrm addr:$src)>;
+
+ def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
+ (VPMOVZXWDrm addr:$src)>;
+ def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
+ (VPMOVZXWDrm addr:$src)>;
+ def : Pat<(v2i64 (X86vzext (v8i16 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
+ (VPMOVZXWQrm addr:$src)>;
+
+ def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
+ (VPMOVZXDQrm addr:$src)>;
+ def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
+ (VPMOVZXDQrm addr:$src)>;
+ def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (X86vzload addr:$src)))))),
+ (VPMOVZXDQrm addr:$src)>;
+}
+
+let Predicates = [UseSSE41] in {
+ def : Pat<(v8i16 (X86vzext (v16i8 VR128:$src))), (PMOVZXBWrr VR128:$src)>;
+ def : Pat<(v4i32 (X86vzext (v16i8 VR128:$src))), (PMOVZXBDrr VR128:$src)>;
+ def : Pat<(v2i64 (X86vzext (v16i8 VR128:$src))), (PMOVZXBQrr VR128:$src)>;
+
+ def : Pat<(v4i32 (X86vzext (v8i16 VR128:$src))), (PMOVZXWDrr VR128:$src)>;
+ def : Pat<(v2i64 (X86vzext (v8i16 VR128:$src))), (PMOVZXWQrr VR128:$src)>;
+
+ def : Pat<(v2i64 (X86vzext (v4i32 VR128:$src))), (PMOVZXDQrr VR128:$src)>;
+
+ def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
+ (PMOVZXBWrm addr:$src)>;
+ def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
+ (PMOVZXBWrm addr:$src)>;
+ def : Pat<(v4i32 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
+ (PMOVZXBDrm addr:$src)>;
+ def : Pat<(v2i64 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))),
+ (PMOVZXBQrm addr:$src)>;
+
+ def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
+ (PMOVZXWDrm addr:$src)>;
+ def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
+ (PMOVZXWDrm addr:$src)>;
+ def : Pat<(v2i64 (X86vzext (v8i16 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
+ (PMOVZXWQrm addr:$src)>;
+
+ def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
+ (PMOVZXDQrm addr:$src)>;
+ def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
+ (PMOVZXDQrm addr:$src)>;
+ def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (X86vzload addr:$src)))))),
+ (PMOVZXDQrm addr:$src)>;
+}
+
//===----------------------------------------------------------------------===//
// SSE4.1 - Extract Instructions
//===----------------------------------------------------------------------===//
@@ -5900,7 +6103,7 @@ def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
imm:$src2))),
addr:$dst),
(EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
- Requires<[HasSSE41]>;
+ Requires<[UseSSE41]>;
//===----------------------------------------------------------------------===//
// SSE4.1 - Insert Instructions
@@ -6147,7 +6350,7 @@ let Predicates = [HasAVX] in {
defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
memopv8f32, memopv4f64,
int_x86_avx_round_ps_256,
- int_x86_avx_round_pd_256>, VEX;
+ int_x86_avx_round_pd_256>, VEX, VEX_L;
defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
int_x86_sse41_round_ss,
int_x86_sse41_round_sd, 0>, VEX_4V, VEX_LIG;
@@ -6172,6 +6375,15 @@ let Predicates = [HasAVX] in {
(VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
def : Pat<(f64 (ftrunc FR64:$src)),
(VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
+
+ def : Pat<(v4f32 (ffloor VR128:$src)),
+ (VROUNDPSr VR128:$src, (i32 0x1))>;
+ def : Pat<(v2f64 (ffloor VR128:$src)),
+ (VROUNDPDr VR128:$src, (i32 0x1))>;
+ def : Pat<(v8f32 (ffloor VR256:$src)),
+ (VROUNDYPSr VR256:$src, (i32 0x1))>;
+ def : Pat<(v4f64 (ffloor VR256:$src)),
+ (VROUNDYPDr VR256:$src, (i32 0x1))>;
}
defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
@@ -6181,26 +6393,33 @@ let Constraints = "$src1 = $dst" in
defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
-def : Pat<(ffloor FR32:$src),
- (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
-def : Pat<(f64 (ffloor FR64:$src)),
- (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
-def : Pat<(f32 (fnearbyint FR32:$src)),
- (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
-def : Pat<(f64 (fnearbyint FR64:$src)),
- (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
-def : Pat<(f32 (fceil FR32:$src)),
- (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
-def : Pat<(f64 (fceil FR64:$src)),
- (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
-def : Pat<(f32 (frint FR32:$src)),
- (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
-def : Pat<(f64 (frint FR64:$src)),
- (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
-def : Pat<(f32 (ftrunc FR32:$src)),
- (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
-def : Pat<(f64 (ftrunc FR64:$src)),
- (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
+let Predicates = [UseSSE41] in {
+ def : Pat<(ffloor FR32:$src),
+ (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
+ def : Pat<(f64 (ffloor FR64:$src)),
+ (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
+ def : Pat<(f32 (fnearbyint FR32:$src)),
+ (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
+ def : Pat<(f64 (fnearbyint FR64:$src)),
+ (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
+ def : Pat<(f32 (fceil FR32:$src)),
+ (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
+ def : Pat<(f64 (fceil FR64:$src)),
+ (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
+ def : Pat<(f32 (frint FR32:$src)),
+ (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
+ def : Pat<(f64 (frint FR64:$src)),
+ (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
+ def : Pat<(f32 (ftrunc FR32:$src)),
+ (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
+ def : Pat<(f64 (ftrunc FR64:$src)),
+ (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
+
+ def : Pat<(v4f32 (ffloor VR128:$src)),
+ (ROUNDPSr VR128:$src, (i32 0x1))>;
+ def : Pat<(v2f64 (ffloor VR128:$src)),
+ (ROUNDPDr VR128:$src, (i32 0x1))>;
+}
//===----------------------------------------------------------------------===//
// SSE4.1 - Packed Bit Test
@@ -6221,11 +6440,11 @@ def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
"vptest\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
- OpSize, VEX;
+ OpSize, VEX, VEX_L;
def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
"vptest\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
- OpSize, VEX;
+ OpSize, VEX, VEX_L;
}
let Defs = [EFLAGS] in {
@@ -6254,11 +6473,13 @@ multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
let Defs = [EFLAGS], Predicates = [HasAVX] in {
let ExeDomain = SSEPackedSingle in {
defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
-defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
+defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>,
+ VEX_L;
}
let ExeDomain = SSEPackedDouble in {
defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
-defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
+defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>,
+ VEX_L;
}
}
@@ -6338,7 +6559,7 @@ multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
(bitconvert (memopv2i64 addr:$src2))))]>, OpSize;
}
-/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
+/// SS41I_binop_rm_int_y - Simple SSE 4.1 binary operator
multiclass SS41I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
Intrinsic IntId256> {
let isCommutable = 1 in
@@ -6381,25 +6602,25 @@ let Predicates = [HasAVX] in {
let Predicates = [HasAVX2] in {
let isCommutable = 0 in
defm VPACKUSDW : SS41I_binop_rm_int_y<0x2B, "vpackusdw",
- int_x86_avx2_packusdw>, VEX_4V;
+ int_x86_avx2_packusdw>, VEX_4V, VEX_L;
defm VPMINSB : SS41I_binop_rm_int_y<0x38, "vpminsb",
- int_x86_avx2_pmins_b>, VEX_4V;
+ int_x86_avx2_pmins_b>, VEX_4V, VEX_L;
defm VPMINSD : SS41I_binop_rm_int_y<0x39, "vpminsd",
- int_x86_avx2_pmins_d>, VEX_4V;
+ int_x86_avx2_pmins_d>, VEX_4V, VEX_L;
defm VPMINUD : SS41I_binop_rm_int_y<0x3B, "vpminud",
- int_x86_avx2_pminu_d>, VEX_4V;
+ int_x86_avx2_pminu_d>, VEX_4V, VEX_L;
defm VPMINUW : SS41I_binop_rm_int_y<0x3A, "vpminuw",
- int_x86_avx2_pminu_w>, VEX_4V;
+ int_x86_avx2_pminu_w>, VEX_4V, VEX_L;
defm VPMAXSB : SS41I_binop_rm_int_y<0x3C, "vpmaxsb",
- int_x86_avx2_pmaxs_b>, VEX_4V;
+ int_x86_avx2_pmaxs_b>, VEX_4V, VEX_L;
defm VPMAXSD : SS41I_binop_rm_int_y<0x3D, "vpmaxsd",
- int_x86_avx2_pmaxs_d>, VEX_4V;
+ int_x86_avx2_pmaxs_d>, VEX_4V, VEX_L;
defm VPMAXUD : SS41I_binop_rm_int_y<0x3F, "vpmaxud",
- int_x86_avx2_pmaxu_d>, VEX_4V;
+ int_x86_avx2_pmaxu_d>, VEX_4V, VEX_L;
defm VPMAXUW : SS41I_binop_rm_int_y<0x3E, "vpmaxuw",
- int_x86_avx2_pmaxu_w>, VEX_4V;
+ int_x86_avx2_pmaxu_w>, VEX_4V, VEX_L;
defm VPMULDQ : SS41I_binop_rm_int_y<0x28, "vpmuldq",
- int_x86_avx2_pmul_dq>, VEX_4V;
+ int_x86_avx2_pmul_dq>, VEX_4V, VEX_L;
}
let Constraints = "$src1 = $dst" in {
@@ -6445,9 +6666,9 @@ let Predicates = [HasAVX] in {
}
let Predicates = [HasAVX2] in {
defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
- memopv4i64, i256mem, 0>, VEX_4V;
+ memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
- memopv4i64, i256mem, 0>, VEX_4V;
+ memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
}
let Constraints = "$src1 = $dst" in {
@@ -6490,13 +6711,15 @@ let Predicates = [HasAVX] in {
defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
VR128, memopv4f32, f128mem, 0>, VEX_4V;
defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
- int_x86_avx_blend_ps_256, VR256, memopv8f32, f256mem, 0>, VEX_4V;
+ int_x86_avx_blend_ps_256, VR256, memopv8f32,
+ f256mem, 0>, VEX_4V, VEX_L;
}
let ExeDomain = SSEPackedDouble in {
defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
VR128, memopv2f64, f128mem, 0>, VEX_4V;
defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
- int_x86_avx_blend_pd_256, VR256, memopv4f64, f256mem, 0>, VEX_4V;
+ int_x86_avx_blend_pd_256,VR256, memopv4f64,
+ f256mem, 0>, VEX_4V, VEX_L;
}
defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
VR128, memopv2i64, i128mem, 0>, VEX_4V;
@@ -6511,15 +6734,15 @@ let Predicates = [HasAVX] in {
VR128, memopv2f64, f128mem, 0>, VEX_4V;
let ExeDomain = SSEPackedSingle in
defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
- VR256, memopv8f32, i256mem, 0>, VEX_4V;
+ VR256, memopv8f32, i256mem, 0>, VEX_4V, VEX_L;
}
let Predicates = [HasAVX2] in {
let isCommutable = 0 in {
defm VPBLENDWY : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_avx2_pblendw,
- VR256, memopv4i64, i256mem, 0>, VEX_4V;
+ VR256, memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
- VR256, memopv4i64, i256mem, 0>, VEX_4V;
+ VR256, memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
}
}
@@ -6570,13 +6793,13 @@ let ExeDomain = SSEPackedDouble in {
defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, f128mem,
memopv2f64, int_x86_sse41_blendvpd>;
defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, f256mem,
- memopv4f64, int_x86_avx_blendv_pd_256>;
+ memopv4f64, int_x86_avx_blendv_pd_256>, VEX_L;
} // ExeDomain = SSEPackedDouble
let ExeDomain = SSEPackedSingle in {
defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, f128mem,
memopv4f32, int_x86_sse41_blendvps>;
defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, f256mem,
- memopv8f32, int_x86_avx_blendv_ps_256>;
+ memopv8f32, int_x86_avx_blendv_ps_256>, VEX_L;
} // ExeDomain = SSEPackedSingle
defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
memopv2i64, int_x86_sse41_pblendvb>;
@@ -6584,7 +6807,7 @@ defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
let Predicates = [HasAVX2] in {
defm VPBLENDVBY : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR256, i256mem,
- memopv4i64, int_x86_avx2_pblendvb>;
+ memopv4i64, int_x86_avx2_pblendvb>, VEX_L;
}
let Predicates = [HasAVX] in {
@@ -6687,7 +6910,7 @@ def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
(PBLENDVBrm0 VR128:$dst, i128mem:$src2)>;
-let Predicates = [HasSSE41] in {
+let Predicates = [UseSSE41] in {
def : Pat<(v16i8 (vselect (v16i8 XMM0), (v16i8 VR128:$src1),
(v16i8 VR128:$src2))),
(PBLENDVBrr0 VR128:$src2, VR128:$src1)>;
@@ -6725,7 +6948,7 @@ let Predicates = [HasAVX2] in
def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
"vmovntdqa\t{$src, $dst|$dst, $src}",
[(set VR256:$dst, (int_x86_avx2_movntdqa addr:$src))]>,
- OpSize, VEX;
+ OpSize, VEX, VEX_L;
def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movntdqa\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
@@ -6761,7 +6984,7 @@ let Predicates = [HasAVX] in
let Predicates = [HasAVX2] in
defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256,
- memopv4i64, i256mem, 0>, VEX_4V;
+ memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
let Constraints = "$src1 = $dst" in
defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128,
@@ -6779,34 +7002,31 @@ multiclass pseudo_pcmpistrm<string asm> {
imm:$src3))]>;
def MEM : PseudoI<(outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
- [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
- VR128:$src1, (load addr:$src2), imm:$src3))]>;
+ [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1,
+ (bc_v16i8 (memopv2i64 addr:$src2)), imm:$src3))]>;
}
let Defs = [EFLAGS], usesCustomInserter = 1 in {
- let AddedComplexity = 1 in
- defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
- defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
+ defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
+ defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[UseSSE42]>;
}
-let Defs = [XMM0, EFLAGS], neverHasSideEffects = 1, Predicates = [HasAVX] in {
- def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
- "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
+multiclass pcmpistrm_SS42AI<string asm> {
+ def rr : SS42AI<0x62, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
+ []>, OpSize;
let mayLoad = 1 in
- def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
- "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
+ def rm :SS42AI<0x62, MRMSrcMem, (outs),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
+ []>, OpSize;
}
let Defs = [XMM0, EFLAGS], neverHasSideEffects = 1 in {
- def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
- "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
- let mayLoad = 1 in
- def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
- "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
+ let Predicates = [HasAVX] in
+ defm VPCMPISTRM128 : pcmpistrm_SS42AI<"vpcmpistrm">, VEX;
+ defm PCMPISTRM128 : pcmpistrm_SS42AI<"pcmpistrm"> ;
}
// Packed Compare Explicit Length Strings, Return Mask
@@ -6817,74 +7037,103 @@ multiclass pseudo_pcmpestrm<string asm> {
VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
def MEM : PseudoI<(outs VR128:$dst),
(ins VR128:$src1, i128mem:$src3, i8imm:$src5),
- [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
- VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
+ [(set VR128:$dst, (int_x86_sse42_pcmpestrm128 VR128:$src1, EAX,
+ (bc_v16i8 (memopv2i64 addr:$src3)), EDX, imm:$src5))]>;
}
let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
- let AddedComplexity = 1 in
- defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
- defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
+ defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
+ defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[UseSSE42]>;
}
-let Predicates = [HasAVX],
- Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
- def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src3, i8imm:$src5),
- "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
+multiclass SS42AI_pcmpestrm<string asm> {
+ def rr : SS42AI<0x60, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src3, i8imm:$src5),
+ !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
+ []>, OpSize;
let mayLoad = 1 in
- def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
- "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
+ def rm : SS42AI<0x60, MRMSrcMem, (outs),
+ (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
+ !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
+ []>, OpSize;
}
let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
- def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src3, i8imm:$src5),
- "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
- let mayLoad = 1 in
- def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
- "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
+ let Predicates = [HasAVX] in
+ defm VPCMPESTRM128 : SS42AI_pcmpestrm<"vpcmpestrm">, VEX;
+ defm PCMPESTRM128 : SS42AI_pcmpestrm<"pcmpestrm">;
}
// Packed Compare Implicit Length Strings, Return Index
-let Defs = [ECX, EFLAGS], neverHasSideEffects = 1 in {
- multiclass SS42AI_pcmpistri<string asm> {
- def rr : SS42AI<0x63, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
- !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
- []>, OpSize;
- let mayLoad = 1 in
- def rm : SS42AI<0x63, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
- !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
- []>, OpSize;
- }
+multiclass pseudo_pcmpistri<string asm> {
+ def REG : PseudoI<(outs GR32:$dst),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ [(set GR32:$dst, EFLAGS,
+ (X86pcmpistri VR128:$src1, VR128:$src2, imm:$src3))]>;
+ def MEM : PseudoI<(outs GR32:$dst),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ [(set GR32:$dst, EFLAGS, (X86pcmpistri VR128:$src1,
+ (bc_v16i8 (memopv2i64 addr:$src2)), imm:$src3))]>;
}
-let Predicates = [HasAVX] in
-defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX;
-defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
+let Defs = [EFLAGS], usesCustomInserter = 1 in {
+ defm VPCMPISTRI : pseudo_pcmpistri<"#VPCMPISTRI">, Requires<[HasAVX]>;
+ defm PCMPISTRI : pseudo_pcmpistri<"#PCMPISTRI">, Requires<[UseSSE42]>;
+}
+
+multiclass SS42AI_pcmpistri<string asm> {
+ def rr : SS42AI<0x63, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
+ []>, OpSize;
+ let mayLoad = 1 in
+ def rm : SS42AI<0x63, MRMSrcMem, (outs),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
+ []>, OpSize;
+}
+
+let Defs = [ECX, EFLAGS], neverHasSideEffects = 1 in {
+ let Predicates = [HasAVX] in
+ defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX;
+ defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
+}
// Packed Compare Explicit Length Strings, Return Index
-let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
- multiclass SS42AI_pcmpestri<string asm> {
- def rr : SS42AI<0x61, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src3, i8imm:$src5),
- !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
- []>, OpSize;
- let mayLoad = 1 in
- def rm : SS42AI<0x61, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
- !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
- []>, OpSize;
- }
+multiclass pseudo_pcmpestri<string asm> {
+ def REG : PseudoI<(outs GR32:$dst),
+ (ins VR128:$src1, VR128:$src3, i8imm:$src5),
+ [(set GR32:$dst, EFLAGS,
+ (X86pcmpestri VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
+ def MEM : PseudoI<(outs GR32:$dst),
+ (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
+ [(set GR32:$dst, EFLAGS,
+ (X86pcmpestri VR128:$src1, EAX, (bc_v16i8 (memopv2i64 addr:$src3)), EDX,
+ imm:$src5))]>;
}
-let Predicates = [HasAVX] in
-defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX;
-defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
+let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
+ defm VPCMPESTRI : pseudo_pcmpestri<"#VPCMPESTRI">, Requires<[HasAVX]>;
+ defm PCMPESTRI : pseudo_pcmpestri<"#PCMPESTRI">, Requires<[UseSSE42]>;
+}
+
+multiclass SS42AI_pcmpestri<string asm> {
+ def rr : SS42AI<0x61, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src3, i8imm:$src5),
+ !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
+ []>, OpSize;
+ let mayLoad = 1 in
+ def rm : SS42AI<0x61, MRMSrcMem, (outs),
+ (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
+ !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
+ []>, OpSize;
+}
+
+let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
+ let Predicates = [HasAVX] in
+ defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX;
+ defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
+}
//===----------------------------------------------------------------------===//
// SSE4.2 - CRC Instructions
@@ -7175,27 +7424,27 @@ let ExeDomain = SSEPackedSingle in {
def VBROADCASTSSrm : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
int_x86_avx_vbroadcast_ss>;
def VBROADCASTSSYrm : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
- int_x86_avx_vbroadcast_ss_256>;
+ int_x86_avx_vbroadcast_ss_256>, VEX_L;
}
let ExeDomain = SSEPackedDouble in
def VBROADCASTSDYrm : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
- int_x86_avx_vbroadcast_sd_256>;
+ int_x86_avx_vbroadcast_sd_256>, VEX_L;
def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
- int_x86_avx_vbroadcastf128_pd_256>;
+ int_x86_avx_vbroadcastf128_pd_256>, VEX_L;
let ExeDomain = SSEPackedSingle in {
def VBROADCASTSSrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR128,
int_x86_avx2_vbroadcast_ss_ps>;
def VBROADCASTSSYrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR256,
- int_x86_avx2_vbroadcast_ss_ps_256>;
+ int_x86_avx2_vbroadcast_ss_ps_256>, VEX_L;
}
let ExeDomain = SSEPackedDouble in
def VBROADCASTSDYrr : avx2_broadcast_reg<0x19, "vbroadcastsd", VR256,
- int_x86_avx2_vbroadcast_sd_pd_256>;
+ int_x86_avx2_vbroadcast_sd_pd_256>, VEX_L;
let Predicates = [HasAVX2] in
def VBROADCASTI128 : avx_broadcast<0x5A, "vbroadcasti128", VR256, i128mem,
- int_x86_avx2_vbroadcasti128>;
+ int_x86_avx2_vbroadcasti128>, VEX_L;
let Predicates = [HasAVX] in
def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
@@ -7209,50 +7458,69 @@ let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR128:$src2, i8imm:$src3),
"vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- []>, VEX_4V;
+ []>, VEX_4V, VEX_L;
let mayLoad = 1 in
def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f128mem:$src2, i8imm:$src3),
"vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- []>, VEX_4V;
+ []>, VEX_4V, VEX_L;
}
let Predicates = [HasAVX] in {
def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
- (i32 imm)),
+ (iPTR imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
- (i32 imm)),
+ (iPTR imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
+
+def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (memopv4f32 addr:$src2),
+ (iPTR imm)),
+ (VINSERTF128rm VR256:$src1, addr:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (memopv2f64 addr:$src2),
+ (iPTR imm)),
+ (VINSERTF128rm VR256:$src1, addr:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+}
+
+let Predicates = [HasAVX1Only] in {
def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
- (i32 imm)),
+ (iPTR imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
- (i32 imm)),
+ (iPTR imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
- (i32 imm)),
+ (iPTR imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
- (i32 imm)),
+ (iPTR imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (loadv4f32 addr:$src2),
- (i32 imm)),
+def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2),
+ (iPTR imm)),
(VINSERTF128rm VR256:$src1, addr:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (loadv2f64 addr:$src2),
- (i32 imm)),
+def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1),
+ (bc_v4i32 (memopv2i64 addr:$src2)),
+ (iPTR imm)),
(VINSERTF128rm VR256:$src1, addr:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
- (i32 imm)),
+def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1),
+ (bc_v16i8 (memopv2i64 addr:$src2)),
+ (iPTR imm)),
+ (VINSERTF128rm VR256:$src1, addr:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1),
+ (bc_v8i16 (memopv2i64 addr:$src2)),
+ (iPTR imm)),
(VINSERTF128rm VR256:$src1, addr:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
}
@@ -7264,64 +7532,69 @@ let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
(ins VR256:$src1, i8imm:$src2),
"vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, VEX;
+ []>, VEX, VEX_L;
let mayStore = 1 in
def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
(ins f128mem:$dst, VR256:$src1, i8imm:$src2),
"vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, VEX;
-}
-
-// Extract and store.
-let Predicates = [HasAVX] in {
- def : Pat<(alignedstore (int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2), addr:$dst),
- (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>;
- def : Pat<(alignedstore (int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2), addr:$dst),
- (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>;
- def : Pat<(alignedstore (int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2), addr:$dst),
- (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>;
-
- def : Pat<(int_x86_sse_storeu_ps addr:$dst, (int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2)),
- (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>;
- def : Pat<(int_x86_sse2_storeu_pd addr:$dst, (int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2)),
- (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>;
- def : Pat<(int_x86_sse2_storeu_dq addr:$dst, (bc_v16i8 (int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2))),
- (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>;
+ []>, VEX, VEX_L;
}
// AVX1 patterns
let Predicates = [HasAVX] in {
-def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
- (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
-def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
- (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
-def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
- (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
-
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
(v4f32 (VEXTRACTF128rr
(v8f32 VR256:$src1),
(EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
(v2f64 (VEXTRACTF128rr
(v4f64 VR256:$src1),
(EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+
+def : Pat<(alignedstore (v4f32 (vextractf128_extract:$ext (v8f32 VR256:$src1),
+ (iPTR imm))), addr:$dst),
+ (VEXTRACTF128mr addr:$dst, VR256:$src1,
+ (EXTRACT_get_vextractf128_imm VR128:$ext))>;
+def : Pat<(alignedstore (v2f64 (vextractf128_extract:$ext (v4f64 VR256:$src1),
+ (iPTR imm))), addr:$dst),
+ (VEXTRACTF128mr addr:$dst, VR256:$src1,
+ (EXTRACT_get_vextractf128_imm VR128:$ext))>;
+}
+
+let Predicates = [HasAVX1Only] in {
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
(v2i64 (VEXTRACTF128rr
- (v4i64 VR256:$src1),
- (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+ (v4i64 VR256:$src1),
+ (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
(v4i32 (VEXTRACTF128rr
- (v8i32 VR256:$src1),
- (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+ (v8i32 VR256:$src1),
+ (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
(v8i16 (VEXTRACTF128rr
- (v16i16 VR256:$src1),
- (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+ (v16i16 VR256:$src1),
+ (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
(v16i8 (VEXTRACTF128rr
- (v32i8 VR256:$src1),
- (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+ (v32i8 VR256:$src1),
+ (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+
+def : Pat<(alignedstore (v2i64 (vextractf128_extract:$ext (v4i64 VR256:$src1),
+ (iPTR imm))), addr:$dst),
+ (VEXTRACTF128mr addr:$dst, VR256:$src1,
+ (EXTRACT_get_vextractf128_imm VR128:$ext))>;
+def : Pat<(alignedstore (v4i32 (vextractf128_extract:$ext (v8i32 VR256:$src1),
+ (iPTR imm))), addr:$dst),
+ (VEXTRACTF128mr addr:$dst, VR256:$src1,
+ (EXTRACT_get_vextractf128_imm VR128:$ext))>;
+def : Pat<(alignedstore (v8i16 (vextractf128_extract:$ext (v16i16 VR256:$src1),
+ (iPTR imm))), addr:$dst),
+ (VEXTRACTF128mr addr:$dst, VR256:$src1,
+ (EXTRACT_get_vextractf128_imm VR128:$ext))>;
+def : Pat<(alignedstore (v16i8 (vextractf128_extract:$ext (v32i8 VR256:$src1),
+ (iPTR imm))), addr:$dst),
+ (VEXTRACTF128mr addr:$dst, VR256:$src1,
+ (EXTRACT_get_vextractf128_imm VR128:$ext))>;
}
//===----------------------------------------------------------------------===//
@@ -7339,7 +7612,7 @@ multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
(ins VR256:$src1, f256mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
- VEX_4V;
+ VEX_4V, VEX_L;
def mr : AVX8I<opc_mr, MRMDestMem, (outs),
(ins f128mem:$dst, VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -7347,7 +7620,7 @@ multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
(ins f256mem:$dst, VR256:$src1, VR256:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
+ [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
}
let ExeDomain = SSEPackedSingle in
@@ -7395,13 +7668,13 @@ let ExeDomain = SSEPackedSingle in {
defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
memopv2i64, int_x86_avx_vpermilvar_ps, v4f32>;
defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
- memopv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>;
+ memopv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>, VEX_L;
}
let ExeDomain = SSEPackedDouble in {
defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
memopv2i64, int_x86_avx_vpermilvar_pd, v2f64>;
defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
- memopv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>;
+ memopv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>, VEX_L;
}
let Predicates = [HasAVX] in {
@@ -7429,38 +7702,38 @@ def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, i8imm:$src3),
"vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR256:$dst, (v8f32 (X86VPerm2x128 VR256:$src1, VR256:$src2,
- (i8 imm:$src3))))]>, VEX_4V;
+ (i8 imm:$src3))))]>, VEX_4V, VEX_L;
def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f256mem:$src2, i8imm:$src3),
"vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (memopv8f32 addr:$src2),
- (i8 imm:$src3)))]>, VEX_4V;
+ (i8 imm:$src3)))]>, VEX_4V, VEX_L;
}
let Predicates = [HasAVX] in {
+def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
+ (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
+def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1,
+ (memopv4f64 addr:$src2), (i8 imm:$imm))),
+ (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
+}
+
+let Predicates = [HasAVX1Only] in {
def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
-def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
- (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
-def : Pat<(v8f32 (X86VPerm2x128 VR256:$src1,
- (memopv8f32 addr:$src2), (i8 imm:$imm))),
- (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1,
(bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
(VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1,
(memopv4i64 addr:$src2), (i8 imm:$imm))),
(VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
-def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1,
- (memopv4f64 addr:$src2), (i8 imm:$imm))),
- (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1,
(bc_v32i8 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
(VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
@@ -7511,9 +7784,9 @@ multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
let Predicates = [HasAVX, HasF16C] in {
defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, int_x86_vcvtph2ps_128>;
- defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>;
+ defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>, VEX_L;
defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, int_x86_vcvtps2ph_128>;
- defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, int_x86_vcvtps2ph_256>;
+ defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, int_x86_vcvtps2ph_256>, VEX_L;
}
//===----------------------------------------------------------------------===//
@@ -7545,7 +7818,7 @@ let isCommutable = 0 in {
defm VPBLENDD : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_128,
VR128, memopv2i64, i128mem>;
defm VPBLENDDY : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_256,
- VR256, memopv4i64, i256mem>;
+ VR256, memopv4i64, i256mem>, VEX_L;
}
//===----------------------------------------------------------------------===//
@@ -7564,11 +7837,12 @@ multiclass avx2_broadcast<bits<8> opc, string OpcodeStr,
(Int128 (scalar_to_vector (ld_frag addr:$src))))]>, VEX;
def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (Int256 VR128:$src))]>, VEX;
+ [(set VR256:$dst, (Int256 VR128:$src))]>, VEX, VEX_L;
def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst,
- (Int256 (scalar_to_vector (ld_frag addr:$src))))]>, VEX;
+ (Int256 (scalar_to_vector (ld_frag addr:$src))))]>,
+ VEX, VEX_L;
}
defm VPBROADCASTB : avx2_broadcast<0x78, "vpbroadcastb", i8mem, loadi8,
@@ -7647,19 +7921,22 @@ let Predicates = [HasAVX2] in {
}
// AVX1 broadcast patterns
-let Predicates = [HasAVX] in {
+let Predicates = [HasAVX1Only] in {
def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
(VBROADCASTSSYrm addr:$src)>;
def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
(VBROADCASTSDYrm addr:$src)>;
+def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
+ (VBROADCASTSSrm addr:$src)>;
+}
+
+let Predicates = [HasAVX] in {
def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))),
(VBROADCASTSSYrm addr:$src)>;
def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))),
(VBROADCASTSDYrm addr:$src)>;
def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))),
(VBROADCASTSSrm addr:$src)>;
-def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
- (VBROADCASTSSrm addr:$src)>;
// Provide fallback in case the load node that is used in the patterns above
// is used by additional users, which prevents the pattern selection.
@@ -7700,7 +7977,8 @@ multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
- (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>, VEX_4V;
+ (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
+ VEX_4V, VEX_L;
def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i256mem:$src2),
!strconcat(OpcodeStr,
@@ -7708,7 +7986,7 @@ multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
[(set VR256:$dst,
(OpVT (X86VPermv VR256:$src1,
(bitconvert (mem_frag addr:$src2)))))]>,
- VEX_4V;
+ VEX_4V, VEX_L;
}
defm VPERMD : avx2_perm<0x36, "vpermd", memopv4i64, v8i32>;
@@ -7722,14 +8000,15 @@ multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
- (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>, VEX;
+ (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
+ VEX, VEX_L;
def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
(ins i256mem:$src1, i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(OpVT (X86VPermi (mem_frag addr:$src1),
- (i8 imm:$src2))))]>, VEX;
+ (i8 imm:$src2))))]>, VEX, VEX_L;
}
defm VPERMQ : avx2_perm_imm<0x00, "vpermq", memopv4i64, v4i64>, VEX_W;
@@ -7739,20 +8018,18 @@ defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", memopv4f64, v4f64>, VEX_W;
//===----------------------------------------------------------------------===//
// VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
//
-let AddedComplexity = 1 in {
def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, i8imm:$src3),
"vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR256:$dst, (v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2,
- (i8 imm:$src3))))]>, VEX_4V;
+ (i8 imm:$src3))))]>, VEX_4V, VEX_L;
def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f256mem:$src2, i8imm:$src3),
"vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (memopv4i64 addr:$src2),
- (i8 imm:$src3)))]>, VEX_4V;
-}
+ (i8 imm:$src3)))]>, VEX_4V, VEX_L;
-let Predicates = [HasAVX2], AddedComplexity = 1 in {
+let Predicates = [HasAVX2] in {
def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
@@ -7779,31 +8056,51 @@ let neverHasSideEffects = 1 in {
def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR128:$src2, i8imm:$src3),
"vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- []>, VEX_4V;
+ []>, VEX_4V, VEX_L;
let mayLoad = 1 in
def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i128mem:$src2, i8imm:$src3),
"vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- []>, VEX_4V;
+ []>, VEX_4V, VEX_L;
}
-let Predicates = [HasAVX2], AddedComplexity = 1 in {
+let Predicates = [HasAVX2] in {
def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
- (i32 imm)),
+ (iPTR imm)),
(VINSERTI128rr VR256:$src1, VR128:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
- (i32 imm)),
+ (iPTR imm)),
(VINSERTI128rr VR256:$src1, VR128:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
- (i32 imm)),
+ (iPTR imm)),
(VINSERTI128rr VR256:$src1, VR128:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
- (i32 imm)),
+ (iPTR imm)),
(VINSERTI128rr VR256:$src1, VR128:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
+
+def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2),
+ (iPTR imm)),
+ (VINSERTI128rm VR256:$src1, addr:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1),
+ (bc_v4i32 (memopv2i64 addr:$src2)),
+ (iPTR imm)),
+ (VINSERTI128rm VR256:$src1, addr:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1),
+ (bc_v16i8 (memopv2i64 addr:$src2)),
+ (iPTR imm)),
+ (VINSERTI128rm VR256:$src1, addr:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1),
+ (bc_v8i16 (memopv2i64 addr:$src2)),
+ (iPTR imm)),
+ (VINSERTI128rm VR256:$src1, addr:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
}
//===----------------------------------------------------------------------===//
@@ -7814,29 +8111,47 @@ def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
"vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_avx2_vextracti128 VR256:$src1, imm:$src2))]>,
- VEX;
+ VEX, VEX_L;
let neverHasSideEffects = 1, mayStore = 1 in
def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
(ins i128mem:$dst, VR256:$src1, i8imm:$src2),
- "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, VEX;
+ "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
+ VEX, VEX_L;
-let Predicates = [HasAVX2], AddedComplexity = 1 in {
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+let Predicates = [HasAVX2] in {
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
(v2i64 (VEXTRACTI128rr
(v4i64 VR256:$src1),
(EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
(v4i32 (VEXTRACTI128rr
(v8i32 VR256:$src1),
(EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
(v8i16 (VEXTRACTI128rr
(v16i16 VR256:$src1),
(EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
(v16i8 (VEXTRACTI128rr
(v32i8 VR256:$src1),
(EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+
+def : Pat<(alignedstore (v2i64 (vextractf128_extract:$ext (v4i64 VR256:$src1),
+ (iPTR imm))), addr:$dst),
+ (VEXTRACTI128mr addr:$dst, VR256:$src1,
+ (EXTRACT_get_vextractf128_imm VR128:$ext))>;
+def : Pat<(alignedstore (v4i32 (vextractf128_extract:$ext (v8i32 VR256:$src1),
+ (iPTR imm))), addr:$dst),
+ (VEXTRACTI128mr addr:$dst, VR256:$src1,
+ (EXTRACT_get_vextractf128_imm VR128:$ext))>;
+def : Pat<(alignedstore (v8i16 (vextractf128_extract:$ext (v16i16 VR256:$src1),
+ (iPTR imm))), addr:$dst),
+ (VEXTRACTI128mr addr:$dst, VR256:$src1,
+ (EXTRACT_get_vextractf128_imm VR128:$ext))>;
+def : Pat<(alignedstore (v16i8 (vextractf128_extract:$ext (v32i8 VR256:$src1),
+ (iPTR imm))), addr:$dst),
+ (VEXTRACTI128mr addr:$dst, VR256:$src1,
+ (EXTRACT_get_vextractf128_imm VR128:$ext))>;
}
//===----------------------------------------------------------------------===//
@@ -7852,7 +8167,8 @@ multiclass avx2_pmovmask<string OpcodeStr,
def Yrm : AVX28I<0x8c, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i256mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>, VEX_4V;
+ [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
+ VEX_4V, VEX_L;
def mr : AVX28I<0x8e, MRMDestMem, (outs),
(ins i128mem:$dst, VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -7860,7 +8176,7 @@ multiclass avx2_pmovmask<string OpcodeStr,
def Ymr : AVX28I<0x8e, MRMDestMem, (outs),
(ins i256mem:$dst, VR256:$src1, VR256:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
+ [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
}
defm VPMASKMOVD : avx2_pmovmask<"vpmaskmovd",
@@ -7898,14 +8214,14 @@ multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(vt256 (OpNode VR256:$src1, (vt256 VR256:$src2))))]>,
- VEX_4V;
+ VEX_4V, VEX_L;
def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i256mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(vt256 (OpNode VR256:$src1,
(vt256 (bitconvert (memopv4i64 addr:$src2))))))]>,
- VEX_4V;
+ VEX_4V, VEX_L;
}
defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td b/contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td
index bdeb63f..893488c 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td
@@ -839,6 +839,16 @@ def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
} // Defs = [EFLAGS]
+def ROT32L2R_imm8 : SDNodeXForm<imm, [{
+ // Convert a ROTL shamt to a ROTR shamt on 32-bit integer.
+ return getI8Imm(32 - N->getZExtValue());
+}]>;
+
+def ROT64L2R_imm8 : SDNodeXForm<imm, [{
+ // Convert a ROTL shamt to a ROTR shamt on 64-bit integer.
+ return getI8Imm(64 - N->getZExtValue());
+}]>;
+
multiclass bmi_rotate<string asm, RegisterClass RC, X86MemOperand x86memop> {
let neverHasSideEffects = 1 in {
def ri : Ii8<0xF0, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, i8imm:$src2),
@@ -873,4 +883,72 @@ let Predicates = [HasBMI2] in {
defm SHRX64 : bmi_shift<"shrx{q}", GR64, i64mem>, T8XD, VEX_W;
defm SHLX32 : bmi_shift<"shlx{l}", GR32, i32mem>, T8, OpSize;
defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem>, T8, OpSize, VEX_W;
+
+ // Prefer RORX which is non-destructive and doesn't update EFLAGS.
+ let AddedComplexity = 10 in {
+ def : Pat<(rotl GR32:$src, (i8 imm:$shamt)),
+ (RORX32ri GR32:$src, (ROT32L2R_imm8 imm:$shamt))>;
+ def : Pat<(rotl GR64:$src, (i8 imm:$shamt)),
+ (RORX64ri GR64:$src, (ROT64L2R_imm8 imm:$shamt))>;
+ }
+
+ def : Pat<(rotl (loadi32 addr:$src), (i8 imm:$shamt)),
+ (RORX32mi addr:$src, (ROT32L2R_imm8 imm:$shamt))>;
+ def : Pat<(rotl (loadi64 addr:$src), (i8 imm:$shamt)),
+ (RORX64mi addr:$src, (ROT64L2R_imm8 imm:$shamt))>;
+
+ // Prefer SARX/SHRX/SHLX over SAR/SHR/SHL with variable shift BUT not
+ // immedidate shift, i.e. the following code is considered better
+ //
+ // mov %edi, %esi
+ // shl $imm, %esi
+ // ... %edi, ...
+ //
+ // than
+ //
+ // movb $imm, %sil
+ // shlx %sil, %edi, %esi
+ // ... %edi, ...
+ //
+ let AddedComplexity = 1 in {
+ def : Pat<(sra GR32:$src1, GR8:$src2),
+ (SARX32rr GR32:$src1,
+ (INSERT_SUBREG
+ (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
+ def : Pat<(sra GR64:$src1, GR8:$src2),
+ (SARX64rr GR64:$src1,
+ (INSERT_SUBREG
+ (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
+
+ def : Pat<(srl GR32:$src1, GR8:$src2),
+ (SHRX32rr GR32:$src1,
+ (INSERT_SUBREG
+ (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
+ def : Pat<(srl GR64:$src1, GR8:$src2),
+ (SHRX64rr GR64:$src1,
+ (INSERT_SUBREG
+ (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
+
+ def : Pat<(shl GR32:$src1, GR8:$src2),
+ (SHLX32rr GR32:$src1,
+ (INSERT_SUBREG
+ (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
+ def : Pat<(shl GR64:$src1, GR8:$src2),
+ (SHLX64rr GR64:$src1,
+ (INSERT_SUBREG
+ (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
+ }
+
+ // Patterns on SARXrm/SHRXrm/SHLXrm are explicitly omitted to favor
+ //
+ // mov (%ecx), %esi
+ // shl $imm, $esi
+ //
+ // over
+ //
+ // movb $imm %al
+ // shlx %al, (%ecx), %esi
+ //
+ // As SARXrr/SHRXrr/SHLXrr is favored on variable shift, the peephole
+ // optimization will fold them into SARXrm/SHRXrm/SHLXrm if possible.
}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrTSX.td b/contrib/llvm/lib/Target/X86/X86InstrTSX.td
new file mode 100644
index 0000000..ad55058
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86InstrTSX.td
@@ -0,0 +1,32 @@
+//===-- X86InstrVMX.td - TSX Instruction Set Extension -----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the instructions that make up the Intel TSX instruction
+// set.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// TSX instructions
+
+let usesCustomInserter = 1 in
+def XBEGIN : I<0, Pseudo, (outs GR32:$dst), (ins),
+ "# XBEGIN", [(set GR32:$dst, (int_x86_xbegin))]>,
+ Requires<[HasRTM]>;
+
+let isBranch = 1, isTerminator = 1, Defs = [EAX] in
+def XBEGIN_4 : Ii32PCRel<0xc7, MRM_F8, (outs), (ins brtarget:$dst),
+ "xbegin\t$dst", []>;
+
+def XEND : I<0x01, MRM_D5, (outs), (ins),
+ "xend", [(int_x86_xend)]>, TB, Requires<[HasRTM]>;
+
+def XABORT : Ii8<0xc6, MRM_F8, (outs), (ins i8imm:$imm),
+ "xabort\t$imm",
+ [(int_x86_xabort imm:$imm)]>, Requires<[HasRTM]>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrXOP.td b/contrib/llvm/lib/Target/X86/X86InstrXOP.td
index 8ec2c68..2aa08fa 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrXOP.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrXOP.td
@@ -75,10 +75,10 @@ multiclass xop2op256<bits<8> opc, string OpcodeStr, Intrinsic Int,
PatFrag memop> {
def rrY : IXOP<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (Int VR256:$src))]>, VEX;
+ [(set VR256:$dst, (Int VR256:$src))]>, VEX, VEX_L;
def rmY : IXOP<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (Int (bitconvert (memop addr:$src))))]>, VEX;
+ [(set VR256:$dst, (Int (bitconvert (memop addr:$src))))]>, VEX, VEX_L;
}
let isAsmParserOnly = 1 in {
@@ -238,7 +238,7 @@ multiclass xop4op256<bits<8> opc, string OpcodeStr, Intrinsic Int> {
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR256:$dst, (Int VR256:$src1, VR256:$src2, VR256:$src3))]>,
- VEX_4V, VEX_I8IMM;
+ VEX_4V, VEX_I8IMM, VEX_L;
def rmY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, i256mem:$src3),
!strconcat(OpcodeStr,
@@ -246,7 +246,7 @@ multiclass xop4op256<bits<8> opc, string OpcodeStr, Intrinsic Int> {
[(set VR256:$dst,
(Int VR256:$src1, VR256:$src2,
(bitconvert (memopv4i64 addr:$src3))))]>,
- VEX_4V, VEX_I8IMM, VEX_W, MemOp4;
+ VEX_4V, VEX_I8IMM, VEX_W, MemOp4, VEX_L;
def mrY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f256mem:$src2, VR256:$src3),
!strconcat(OpcodeStr,
@@ -254,7 +254,7 @@ multiclass xop4op256<bits<8> opc, string OpcodeStr, Intrinsic Int> {
[(set VR256:$dst,
(Int VR256:$src1, (bitconvert (memopv4i64 addr:$src2)),
VR256:$src3))]>,
- VEX_4V, VEX_I8IMM;
+ VEX_4V, VEX_I8IMM, VEX_L;
}
let isAsmParserOnly = 1 in {
@@ -287,20 +287,21 @@ multiclass xop5op<bits<8> opc, string OpcodeStr, Intrinsic Int128,
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
[(set VR256:$dst,
- (Int256 VR256:$src1, VR256:$src2, VR256:$src3, imm:$src4))]>;
+ (Int256 VR256:$src1, VR256:$src2, VR256:$src3, imm:$src4))]>, VEX_L;
def rmY : IXOP5<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, f256mem:$src3, i8imm:$src4),
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
[(set VR256:$dst,
(Int256 VR256:$src1, VR256:$src2, (ld_256 addr:$src3), imm:$src4))]>,
- VEX_W, MemOp4;
+ VEX_W, MemOp4, VEX_L;
def mrY : IXOP5<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f256mem:$src2, VR256:$src3, i8imm:$src4),
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
[(set VR256:$dst,
- (Int256 VR256:$src1, (ld_256 addr:$src2), VR256:$src3, imm:$src4))]>;
+ (Int256 VR256:$src1, (ld_256 addr:$src2), VR256:$src3, imm:$src4))]>,
+ VEX_L;
}
defm VPERMIL2PD : xop5op<0x49, "vpermil2pd", int_x86_xop_vpermil2pd,
diff --git a/contrib/llvm/lib/Target/X86/X86JITInfo.cpp b/contrib/llvm/lib/Target/X86/X86JITInfo.cpp
index 0168d12..764aa5d 100644
--- a/contrib/llvm/lib/Target/X86/X86JITInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86JITInfo.cpp
@@ -532,6 +532,15 @@ uintptr_t X86JITInfo::getPICJumpTableEntry(uintptr_t BB, uintptr_t Entry) {
#endif
}
+template<typename T> static void addUnaligned(void *Pos, T Delta) {
+ T Value;
+ std::memcpy(reinterpret_cast<char*>(&Value), reinterpret_cast<char*>(Pos),
+ sizeof(T));
+ Value += Delta;
+ std::memcpy(reinterpret_cast<char*>(Pos), reinterpret_cast<char*>(&Value),
+ sizeof(T));
+}
+
/// relocate - Before the JIT can run a block of code that has been emitted,
/// it must rewrite the code to contain the actual addresses of any
/// referenced global symbols.
@@ -545,24 +554,24 @@ void X86JITInfo::relocate(void *Function, MachineRelocation *MR,
// PC relative relocation, add the relocated value to the value already in
// memory, after we adjust it for where the PC is.
ResultPtr = ResultPtr -(intptr_t)RelocPos - 4 - MR->getConstantVal();
- *((unsigned*)RelocPos) += (unsigned)ResultPtr;
+ addUnaligned<unsigned>(RelocPos, ResultPtr);
break;
}
case X86::reloc_picrel_word: {
// PIC base relative relocation, add the relocated value to the value
// already in memory, after we adjust it for where the PIC base is.
ResultPtr = ResultPtr - ((intptr_t)Function + MR->getConstantVal());
- *((unsigned*)RelocPos) += (unsigned)ResultPtr;
+ addUnaligned<unsigned>(RelocPos, ResultPtr);
break;
}
case X86::reloc_absolute_word:
case X86::reloc_absolute_word_sext:
// Absolute relocation, just add the relocated value to the value already
// in memory.
- *((unsigned*)RelocPos) += (unsigned)ResultPtr;
+ addUnaligned<unsigned>(RelocPos, ResultPtr);
break;
case X86::reloc_absolute_dword:
- *((intptr_t*)RelocPos) += ResultPtr;
+ addUnaligned<intptr_t>(RelocPos, ResultPtr);
break;
}
}
diff --git a/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp b/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
index 9c0ce4e..cfd68f7 100644
--- a/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#include "X86MCInstLower.h"
#include "X86AsmPrinter.h"
#include "X86COFFMachineModuleInfo.h"
#include "InstPrinter/X86ATTInstPrinter.h"
@@ -29,6 +28,31 @@
#include "llvm/ADT/SmallString.h"
using namespace llvm;
+namespace {
+
+/// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst.
+class X86MCInstLower {
+ MCContext &Ctx;
+ Mangler *Mang;
+ const MachineFunction &MF;
+ const TargetMachine &TM;
+ const MCAsmInfo &MAI;
+ X86AsmPrinter &AsmPrinter;
+public:
+ X86MCInstLower(Mangler *mang, const MachineFunction &MF,
+ X86AsmPrinter &asmprinter);
+
+ void Lower(const MachineInstr *MI, MCInst &OutMI) const;
+
+ MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const;
+ MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
+
+private:
+ MachineModuleInfoMachO &getMachOMMI() const;
+};
+
+} // end anonymous namespace
+
X86MCInstLower::X86MCInstLower(Mangler *mang, const MachineFunction &mf,
X86AsmPrinter &asmprinter)
: Ctx(mf.getContext()), Mang(mang), MF(mf), TM(mf.getTarget()),
@@ -43,15 +67,11 @@ MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const {
/// operand to an MCSymbol.
MCSymbol *X86MCInstLower::
GetSymbolFromOperand(const MachineOperand &MO) const {
- assert((MO.isGlobal() || MO.isSymbol()) && "Isn't a symbol reference");
+ assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference");
SmallString<128> Name;
- if (!MO.isGlobal()) {
- assert(MO.isSymbol());
- Name += MAI.getGlobalPrefix();
- Name += MO.getSymbolName();
- } else {
+ if (MO.isGlobal()) {
const GlobalValue *GV = MO.getGlobal();
bool isImplicitlyPrivate = false;
if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB ||
@@ -61,6 +81,11 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
isImplicitlyPrivate = true;
Mang->getNameWithPrefix(Name, GV, isImplicitlyPrivate);
+ } else if (MO.isSymbol()) {
+ Name += MAI.getGlobalPrefix();
+ Name += MO.getSymbolName();
+ } else if (MO.isMBB()) {
+ Name += MO.getMBB()->getSymbol()->getName();
}
// If the target flags on the operand changes the name of the symbol, do that
@@ -191,7 +216,7 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
if (Expr == 0)
Expr = MCSymbolRefExpr::Create(Sym, RefKind, Ctx);
- if (!MO.isJTI() && MO.getOffset())
+ if (!MO.isJTI() && !MO.isMBB() && MO.getOffset())
Expr = MCBinaryExpr::CreateAdd(Expr,
MCConstantExpr::Create(MO.getOffset(), Ctx),
Ctx);
@@ -324,9 +349,6 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
MCOp = MCOperand::CreateImm(MO.getImm());
break;
case MachineOperand::MO_MachineBasicBlock:
- MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
- MO.getMBB()->getSymbol(), Ctx));
- break;
case MachineOperand::MO_GlobalAddress:
case MachineOperand::MO_ExternalSymbol:
MCOp = LowerSymbolOperand(MO, GetSymbolFromOperand(MO));
@@ -371,18 +393,8 @@ ReSimplify:
case X86::MOVZX64rm8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm8); break;
case X86::MOVZX64rr16: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr16); break;
case X86::MOVZX64rm16: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm16); break;
- case X86::SETB_C8r: LowerUnaryToTwoAddr(OutMI, X86::SBB8rr); break;
- case X86::SETB_C16r: LowerUnaryToTwoAddr(OutMI, X86::SBB16rr); break;
- case X86::SETB_C32r: LowerUnaryToTwoAddr(OutMI, X86::SBB32rr); break;
- case X86::SETB_C64r: LowerUnaryToTwoAddr(OutMI, X86::SBB64rr); break;
case X86::MOV8r0: LowerUnaryToTwoAddr(OutMI, X86::XOR8rr); break;
case X86::MOV32r0: LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); break;
- case X86::V_SETALLONES: LowerUnaryToTwoAddr(OutMI, X86::PCMPEQDrr); break;
- case X86::AVX_SET0PSY: LowerUnaryToTwoAddr(OutMI, X86::VXORPSYrr); break;
- case X86::AVX_SET0PDY: LowerUnaryToTwoAddr(OutMI, X86::VXORPDYrr); break;
- case X86::AVX_SETALLONES: LowerUnaryToTwoAddr(OutMI, X86::VPCMPEQDrr); break;
- case X86::AVX2_SETALLONES: LowerUnaryToTwoAddr(OutMI, X86::VPCMPEQDYrr);break;
- case X86::AVX2_SET0: LowerUnaryToTwoAddr(OutMI, X86::VPXORYrr); break;
case X86::MOV16r0:
LowerSubReg32_Op0(OutMI, X86::MOV32r0); // MOV16r0 -> MOV32r0
diff --git a/contrib/llvm/lib/Target/X86/X86MCInstLower.h b/contrib/llvm/lib/Target/X86/X86MCInstLower.h
deleted file mode 100644
index b4d4cfd..0000000
--- a/contrib/llvm/lib/Target/X86/X86MCInstLower.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//===-- X86MCInstLower.h - Lower MachineInstr to MCInst ---------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef X86_MCINSTLOWER_H
-#define X86_MCINSTLOWER_H
-
-#include "llvm/Support/Compiler.h"
-
-namespace llvm {
- class MCAsmInfo;
- class MCContext;
- class MCInst;
- class MCOperand;
- class MCSymbol;
- class MachineInstr;
- class MachineFunction;
- class MachineModuleInfoMachO;
- class MachineOperand;
- class Mangler;
- class TargetMachine;
- class X86AsmPrinter;
-
-/// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst.
-class LLVM_LIBRARY_VISIBILITY X86MCInstLower {
- MCContext &Ctx;
- Mangler *Mang;
- const MachineFunction &MF;
- const TargetMachine &TM;
- const MCAsmInfo &MAI;
- X86AsmPrinter &AsmPrinter;
-public:
- X86MCInstLower(Mangler *mang, const MachineFunction &MF,
- X86AsmPrinter &asmprinter);
-
- void Lower(const MachineInstr *MI, MCInst &OutMI) const;
-
- MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const;
- MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
-
-private:
- MachineModuleInfoMachO &getMachOMMI() const;
-};
-
-}
-
-#endif
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp b/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
index 877b8f6..73ac747 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -106,23 +106,7 @@ X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
int
X86RegisterInfo::getSEHRegNum(unsigned i) const {
- int reg = X86_MC::getX86RegNum(i);
- switch (i) {
- case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B:
- case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B:
- case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B:
- case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B:
- case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B:
- case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B:
- case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B:
- case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B:
- case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
- case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
- case X86::YMM8: case X86::YMM9: case X86::YMM10: case X86::YMM11:
- case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15:
- reg += 8;
- }
- return reg;
+ return getEncodingValue(i);
}
const TargetRegisterClass *
@@ -245,15 +229,26 @@ const uint16_t *
X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
bool callsEHReturn = false;
bool ghcCall = false;
+ bool oclBiCall = false;
+ bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
if (MF) {
callsEHReturn = MF->getMMI().callsEHReturn();
const Function *F = MF->getFunction();
ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
+ oclBiCall = (F ? F->getCallingConv() == CallingConv::Intel_OCL_BI : false);
}
if (ghcCall)
return CSR_NoRegs_SaveList;
+ if (oclBiCall) {
+ if (HasAVX && IsWin64)
+ return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
+ if (HasAVX && Is64Bit)
+ return CSR_64_Intel_OCL_BI_AVX_SaveList;
+ if (!HasAVX && !IsWin64 && Is64Bit)
+ return CSR_64_Intel_OCL_BI_SaveList;
+ }
if (Is64Bit) {
if (IsWin64)
return CSR_Win64_SaveList;
@@ -268,6 +263,16 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const uint32_t*
X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
+ bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
+
+ if (CC == CallingConv::Intel_OCL_BI) {
+ if (IsWin64 && HasAVX)
+ return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
+ if (Is64Bit && HasAVX)
+ return CSR_64_Intel_OCL_BI_AVX_RegMask;
+ if (!HasAVX && !IsWin64 && Is64Bit)
+ return CSR_64_Intel_OCL_BI_RegMask;
+ }
if (CC == CallingConv::GHC)
return CSR_NoRegs_RegMask;
if (!Is64Bit)
@@ -277,6 +282,11 @@ X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
return CSR_64_RegMask;
}
+const uint32_t*
+X86RegisterInfo::getNoPreservedMask() const {
+ return CSR_NoRegs_RegMask;
+}
+
BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
@@ -398,8 +408,9 @@ bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *F = MF.getFunction();
unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
- bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
- F->hasFnAttr(Attribute::StackAlignment));
+ bool requiresRealignment =
+ ((MFI->getMaxAlignment() > StackAlign) ||
+ F->getFnAttributes().hasAttribute(Attributes::StackAlignment));
// If we've requested that we force align the stack do so now.
if (ForceStackAlign)
@@ -522,7 +533,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
void
X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, RegScavenger *RS) const{
+ int SPAdj, RegScavenger *RS) const {
assert(SPAdj == 0 && "Unexpected");
unsigned i = 0;
@@ -590,9 +601,10 @@ unsigned X86RegisterInfo::getEHHandlerRegister() const {
}
namespace llvm {
-unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
- switch (VT.getSimpleVT().SimpleTy) {
- default: return Reg;
+unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT,
+ bool High) {
+ switch (VT) {
+ default: llvm_unreachable("Unexpected VT");
case MVT::i8:
if (High) {
switch (Reg) {
@@ -608,7 +620,7 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
}
} else {
switch (Reg) {
- default: return 0;
+ default: llvm_unreachable("Unexpected register");
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
return X86::AL;
case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
@@ -645,7 +657,7 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
}
case MVT::i16:
switch (Reg) {
- default: return Reg;
+ default: llvm_unreachable("Unexpected register");
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
return X86::AX;
case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
@@ -681,7 +693,7 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
}
case MVT::i32:
switch (Reg) {
- default: return Reg;
+ default: llvm_unreachable("Unexpected register");
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
return X86::EAX;
case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
@@ -733,7 +745,7 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
}
}
switch (Reg) {
- default: return Reg;
+ default: llvm_unreachable("Unexpected register");
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
return X86::RAX;
case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.h b/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
index 1bc32cb..7932ede 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -58,10 +58,6 @@ private:
public:
X86RegisterInfo(X86TargetMachine &tm, const TargetInstrInfo &tii);
- /// getX86RegNum - Returns the native X86 register number for the given LLVM
- /// register identifier.
- static unsigned getX86RegNum(unsigned RegNo);
-
// FIXME: This should be tablegen'd like getDwarfRegNum is
int getSEHRegNum(unsigned i) const;
@@ -104,6 +100,7 @@ public:
/// callee-save registers on this target.
const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
const uint32_t *getCallPreservedMask(CallingConv::ID) const;
+ const uint32_t *getNoPreservedMask() const;
/// getReservedRegs - Returns a bitset indexed by physical register number
/// indicating if a register is a special register that has particular uses and
@@ -141,8 +138,8 @@ public:
// getX86SubSuperRegister - X86 utility function. It returns the sub or super
// register of a specific X86 register.
-// e.g. getX86SubSuperRegister(X86::EAX, EVT::i16) return X86:AX
-unsigned getX86SubSuperRegister(unsigned, EVT, bool High=false);
+// e.g. getX86SubSuperRegister(X86::EAX, MVT::i16) return X86:AX
+unsigned getX86SubSuperRegister(unsigned, MVT::SimpleValueType, bool High=false);
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.td b/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
index edc7184..be6282a 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
@@ -13,258 +13,264 @@
//
//===----------------------------------------------------------------------===//
-//===----------------------------------------------------------------------===//
-// Register definitions...
-//
-let Namespace = "X86" in {
+class X86Reg<string n, bits<16> Enc, list<Register> subregs = []> : Register<n> {
+ let Namespace = "X86";
+ let HWEncoding = Enc;
+ let SubRegs = subregs;
+}
- // Subregister indices.
+// Subregister indices.
+let Namespace = "X86" in {
def sub_8bit : SubRegIndex;
def sub_8bit_hi : SubRegIndex;
def sub_16bit : SubRegIndex;
def sub_32bit : SubRegIndex;
- def sub_xmm : SubRegIndex;
-
-
- // In the register alias definitions below, we define which registers alias
- // which others. We only specify which registers the small registers alias,
- // because the register file generator is smart enough to figure out that
- // AL aliases AX if we tell it that AX aliased AL (for example).
-
- // Dwarf numbering is different for 32-bit and 64-bit, and there are
- // variations by target as well. Currently the first entry is for X86-64,
- // second - for EH on X86-32/Darwin and third is 'generic' one (X86-32/Linux
- // and debug information on X86-32/Darwin)
-
- // 8-bit registers
- // Low registers
- def AL : Register<"al">;
- def DL : Register<"dl">;
- def CL : Register<"cl">;
- def BL : Register<"bl">;
-
- // X86-64 only, requires REX.
- let CostPerUse = 1 in {
- def SIL : Register<"sil">;
- def DIL : Register<"dil">;
- def BPL : Register<"bpl">;
- def SPL : Register<"spl">;
- def R8B : Register<"r8b">;
- def R9B : Register<"r9b">;
- def R10B : Register<"r10b">;
- def R11B : Register<"r11b">;
- def R12B : Register<"r12b">;
- def R13B : Register<"r13b">;
- def R14B : Register<"r14b">;
- def R15B : Register<"r15b">;
- }
-
- // High registers. On x86-64, these cannot be used in any instruction
- // with a REX prefix.
- def AH : Register<"ah">;
- def DH : Register<"dh">;
- def CH : Register<"ch">;
- def BH : Register<"bh">;
-
- // 16-bit registers
- let SubRegIndices = [sub_8bit, sub_8bit_hi], CoveredBySubRegs = 1 in {
- def AX : RegisterWithSubRegs<"ax", [AL,AH]>;
- def DX : RegisterWithSubRegs<"dx", [DL,DH]>;
- def CX : RegisterWithSubRegs<"cx", [CL,CH]>;
- def BX : RegisterWithSubRegs<"bx", [BL,BH]>;
- }
- let SubRegIndices = [sub_8bit] in {
- def SI : RegisterWithSubRegs<"si", [SIL]>;
- def DI : RegisterWithSubRegs<"di", [DIL]>;
- def BP : RegisterWithSubRegs<"bp", [BPL]>;
- def SP : RegisterWithSubRegs<"sp", [SPL]>;
- }
- def IP : Register<"ip">;
-
- // X86-64 only, requires REX.
- let SubRegIndices = [sub_8bit], CostPerUse = 1 in {
- def R8W : RegisterWithSubRegs<"r8w", [R8B]>;
- def R9W : RegisterWithSubRegs<"r9w", [R9B]>;
- def R10W : RegisterWithSubRegs<"r10w", [R10B]>;
- def R11W : RegisterWithSubRegs<"r11w", [R11B]>;
- def R12W : RegisterWithSubRegs<"r12w", [R12B]>;
- def R13W : RegisterWithSubRegs<"r13w", [R13B]>;
- def R14W : RegisterWithSubRegs<"r14w", [R14B]>;
- def R15W : RegisterWithSubRegs<"r15w", [R15B]>;
- }
- // 32-bit registers
- let SubRegIndices = [sub_16bit] in {
- def EAX : RegisterWithSubRegs<"eax", [AX]>, DwarfRegNum<[-2, 0, 0]>;
- def EDX : RegisterWithSubRegs<"edx", [DX]>, DwarfRegNum<[-2, 2, 2]>;
- def ECX : RegisterWithSubRegs<"ecx", [CX]>, DwarfRegNum<[-2, 1, 1]>;
- def EBX : RegisterWithSubRegs<"ebx", [BX]>, DwarfRegNum<[-2, 3, 3]>;
- def ESI : RegisterWithSubRegs<"esi", [SI]>, DwarfRegNum<[-2, 6, 6]>;
- def EDI : RegisterWithSubRegs<"edi", [DI]>, DwarfRegNum<[-2, 7, 7]>;
- def EBP : RegisterWithSubRegs<"ebp", [BP]>, DwarfRegNum<[-2, 4, 5]>;
- def ESP : RegisterWithSubRegs<"esp", [SP]>, DwarfRegNum<[-2, 5, 4]>;
- def EIP : RegisterWithSubRegs<"eip", [IP]>, DwarfRegNum<[-2, 8, 8]>;
-
- // X86-64 only, requires REX
- let CostPerUse = 1 in {
- def R8D : RegisterWithSubRegs<"r8d", [R8W]>;
- def R9D : RegisterWithSubRegs<"r9d", [R9W]>;
- def R10D : RegisterWithSubRegs<"r10d", [R10W]>;
- def R11D : RegisterWithSubRegs<"r11d", [R11W]>;
- def R12D : RegisterWithSubRegs<"r12d", [R12W]>;
- def R13D : RegisterWithSubRegs<"r13d", [R13W]>;
- def R14D : RegisterWithSubRegs<"r14d", [R14W]>;
- def R15D : RegisterWithSubRegs<"r15d", [R15W]>;
- }}
-
- // 64-bit registers, X86-64 only
- let SubRegIndices = [sub_32bit] in {
- def RAX : RegisterWithSubRegs<"rax", [EAX]>, DwarfRegNum<[0, -2, -2]>;
- def RDX : RegisterWithSubRegs<"rdx", [EDX]>, DwarfRegNum<[1, -2, -2]>;
- def RCX : RegisterWithSubRegs<"rcx", [ECX]>, DwarfRegNum<[2, -2, -2]>;
- def RBX : RegisterWithSubRegs<"rbx", [EBX]>, DwarfRegNum<[3, -2, -2]>;
- def RSI : RegisterWithSubRegs<"rsi", [ESI]>, DwarfRegNum<[4, -2, -2]>;
- def RDI : RegisterWithSubRegs<"rdi", [EDI]>, DwarfRegNum<[5, -2, -2]>;
- def RBP : RegisterWithSubRegs<"rbp", [EBP]>, DwarfRegNum<[6, -2, -2]>;
- def RSP : RegisterWithSubRegs<"rsp", [ESP]>, DwarfRegNum<[7, -2, -2]>;
-
- // These also require REX.
- let CostPerUse = 1 in {
- def R8 : RegisterWithSubRegs<"r8", [R8D]>, DwarfRegNum<[8, -2, -2]>;
- def R9 : RegisterWithSubRegs<"r9", [R9D]>, DwarfRegNum<[9, -2, -2]>;
- def R10 : RegisterWithSubRegs<"r10", [R10D]>, DwarfRegNum<[10, -2, -2]>;
- def R11 : RegisterWithSubRegs<"r11", [R11D]>, DwarfRegNum<[11, -2, -2]>;
- def R12 : RegisterWithSubRegs<"r12", [R12D]>, DwarfRegNum<[12, -2, -2]>;
- def R13 : RegisterWithSubRegs<"r13", [R13D]>, DwarfRegNum<[13, -2, -2]>;
- def R14 : RegisterWithSubRegs<"r14", [R14D]>, DwarfRegNum<[14, -2, -2]>;
- def R15 : RegisterWithSubRegs<"r15", [R15D]>, DwarfRegNum<[15, -2, -2]>;
- def RIP : RegisterWithSubRegs<"rip", [EIP]>, DwarfRegNum<[16, -2, -2]>;
- }}
-
- // MMX Registers. These are actually aliased to ST0 .. ST7
- def MM0 : Register<"mm0">, DwarfRegNum<[41, 29, 29]>;
- def MM1 : Register<"mm1">, DwarfRegNum<[42, 30, 30]>;
- def MM2 : Register<"mm2">, DwarfRegNum<[43, 31, 31]>;
- def MM3 : Register<"mm3">, DwarfRegNum<[44, 32, 32]>;
- def MM4 : Register<"mm4">, DwarfRegNum<[45, 33, 33]>;
- def MM5 : Register<"mm5">, DwarfRegNum<[46, 34, 34]>;
- def MM6 : Register<"mm6">, DwarfRegNum<[47, 35, 35]>;
- def MM7 : Register<"mm7">, DwarfRegNum<[48, 36, 36]>;
-
- // Pseudo Floating Point registers
- def FP0 : Register<"fp0">;
- def FP1 : Register<"fp1">;
- def FP2 : Register<"fp2">;
- def FP3 : Register<"fp3">;
- def FP4 : Register<"fp4">;
- def FP5 : Register<"fp5">;
- def FP6 : Register<"fp6">;
-
- // XMM Registers, used by the various SSE instruction set extensions.
- def XMM0: Register<"xmm0">, DwarfRegNum<[17, 21, 21]>;
- def XMM1: Register<"xmm1">, DwarfRegNum<[18, 22, 22]>;
- def XMM2: Register<"xmm2">, DwarfRegNum<[19, 23, 23]>;
- def XMM3: Register<"xmm3">, DwarfRegNum<[20, 24, 24]>;
- def XMM4: Register<"xmm4">, DwarfRegNum<[21, 25, 25]>;
- def XMM5: Register<"xmm5">, DwarfRegNum<[22, 26, 26]>;
- def XMM6: Register<"xmm6">, DwarfRegNum<[23, 27, 27]>;
- def XMM7: Register<"xmm7">, DwarfRegNum<[24, 28, 28]>;
-
- // X86-64 only
- let CostPerUse = 1 in {
- def XMM8: Register<"xmm8">, DwarfRegNum<[25, -2, -2]>;
- def XMM9: Register<"xmm9">, DwarfRegNum<[26, -2, -2]>;
- def XMM10: Register<"xmm10">, DwarfRegNum<[27, -2, -2]>;
- def XMM11: Register<"xmm11">, DwarfRegNum<[28, -2, -2]>;
- def XMM12: Register<"xmm12">, DwarfRegNum<[29, -2, -2]>;
- def XMM13: Register<"xmm13">, DwarfRegNum<[30, -2, -2]>;
- def XMM14: Register<"xmm14">, DwarfRegNum<[31, -2, -2]>;
- def XMM15: Register<"xmm15">, DwarfRegNum<[32, -2, -2]>;
- } // CostPerUse
-
- // YMM Registers, used by AVX instructions
- let SubRegIndices = [sub_xmm] in {
- def YMM0: RegisterWithSubRegs<"ymm0", [XMM0]>, DwarfRegAlias<XMM0>;
- def YMM1: RegisterWithSubRegs<"ymm1", [XMM1]>, DwarfRegAlias<XMM1>;
- def YMM2: RegisterWithSubRegs<"ymm2", [XMM2]>, DwarfRegAlias<XMM2>;
- def YMM3: RegisterWithSubRegs<"ymm3", [XMM3]>, DwarfRegAlias<XMM3>;
- def YMM4: RegisterWithSubRegs<"ymm4", [XMM4]>, DwarfRegAlias<XMM4>;
- def YMM5: RegisterWithSubRegs<"ymm5", [XMM5]>, DwarfRegAlias<XMM5>;
- def YMM6: RegisterWithSubRegs<"ymm6", [XMM6]>, DwarfRegAlias<XMM6>;
- def YMM7: RegisterWithSubRegs<"ymm7", [XMM7]>, DwarfRegAlias<XMM7>;
- def YMM8: RegisterWithSubRegs<"ymm8", [XMM8]>, DwarfRegAlias<XMM8>;
- def YMM9: RegisterWithSubRegs<"ymm9", [XMM9]>, DwarfRegAlias<XMM9>;
- def YMM10: RegisterWithSubRegs<"ymm10", [XMM10]>, DwarfRegAlias<XMM10>;
- def YMM11: RegisterWithSubRegs<"ymm11", [XMM11]>, DwarfRegAlias<XMM11>;
- def YMM12: RegisterWithSubRegs<"ymm12", [XMM12]>, DwarfRegAlias<XMM12>;
- def YMM13: RegisterWithSubRegs<"ymm13", [XMM13]>, DwarfRegAlias<XMM13>;
- def YMM14: RegisterWithSubRegs<"ymm14", [XMM14]>, DwarfRegAlias<XMM14>;
- def YMM15: RegisterWithSubRegs<"ymm15", [XMM15]>, DwarfRegAlias<XMM15>;
- }
-
- class STRegister<string Name, list<Register> A> : Register<Name> {
- let Aliases = A;
- }
-
- // Floating point stack registers. These don't map one-to-one to the FP
- // pseudo registers, but we still mark them as aliasing FP registers. That
- // way both kinds can be live without exceeding the stack depth. ST registers
- // are only live around inline assembly.
- def ST0 : STRegister<"st(0)", []>, DwarfRegNum<[33, 12, 11]>;
- def ST1 : STRegister<"st(1)", [FP6]>, DwarfRegNum<[34, 13, 12]>;
- def ST2 : STRegister<"st(2)", [FP5]>, DwarfRegNum<[35, 14, 13]>;
- def ST3 : STRegister<"st(3)", [FP4]>, DwarfRegNum<[36, 15, 14]>;
- def ST4 : STRegister<"st(4)", [FP3]>, DwarfRegNum<[37, 16, 15]>;
- def ST5 : STRegister<"st(5)", [FP2]>, DwarfRegNum<[38, 17, 16]>;
- def ST6 : STRegister<"st(6)", [FP1]>, DwarfRegNum<[39, 18, 17]>;
- def ST7 : STRegister<"st(7)", [FP0]>, DwarfRegNum<[40, 19, 18]>;
-
- // Floating-point status word
- def FPSW : Register<"fpsw">;
-
- // Status flags register
- def EFLAGS : Register<"flags">;
-
- // Segment registers
- def CS : Register<"cs">;
- def DS : Register<"ds">;
- def SS : Register<"ss">;
- def ES : Register<"es">;
- def FS : Register<"fs">;
- def GS : Register<"gs">;
-
- // Debug registers
- def DR0 : Register<"dr0">;
- def DR1 : Register<"dr1">;
- def DR2 : Register<"dr2">;
- def DR3 : Register<"dr3">;
- def DR4 : Register<"dr4">;
- def DR5 : Register<"dr5">;
- def DR6 : Register<"dr6">;
- def DR7 : Register<"dr7">;
-
- // Control registers
- def CR0 : Register<"cr0">;
- def CR1 : Register<"cr1">;
- def CR2 : Register<"cr2">;
- def CR3 : Register<"cr3">;
- def CR4 : Register<"cr4">;
- def CR5 : Register<"cr5">;
- def CR6 : Register<"cr6">;
- def CR7 : Register<"cr7">;
- def CR8 : Register<"cr8">;
- def CR9 : Register<"cr9">;
- def CR10 : Register<"cr10">;
- def CR11 : Register<"cr11">;
- def CR12 : Register<"cr12">;
- def CR13 : Register<"cr13">;
- def CR14 : Register<"cr14">;
- def CR15 : Register<"cr15">;
-
- // Pseudo index registers
- def EIZ : Register<"eiz">;
- def RIZ : Register<"riz">;
+ def sub_xmm : SubRegIndex;
}
+//===----------------------------------------------------------------------===//
+// Register definitions...
+//
+
+// In the register alias definitions below, we define which registers alias
+// which others. We only specify which registers the small registers alias,
+// because the register file generator is smart enough to figure out that
+// AL aliases AX if we tell it that AX aliased AL (for example).
+
+// Dwarf numbering is different for 32-bit and 64-bit, and there are
+// variations by target as well. Currently the first entry is for X86-64,
+// second - for EH on X86-32/Darwin and third is 'generic' one (X86-32/Linux
+// and debug information on X86-32/Darwin)
+
+// 8-bit registers
+// Low registers
+def AL : X86Reg<"al", 0>;
+def DL : X86Reg<"dl", 2>;
+def CL : X86Reg<"cl", 1>;
+def BL : X86Reg<"bl", 3>;
+
+// High registers. On x86-64, these cannot be used in any instruction
+// with a REX prefix.
+def AH : X86Reg<"ah", 4>;
+def DH : X86Reg<"dh", 6>;
+def CH : X86Reg<"ch", 5>;
+def BH : X86Reg<"bh", 7>;
+
+// X86-64 only, requires REX.
+let CostPerUse = 1 in {
+def SIL : X86Reg<"sil", 6>;
+def DIL : X86Reg<"dil", 7>;
+def BPL : X86Reg<"bpl", 5>;
+def SPL : X86Reg<"spl", 4>;
+def R8B : X86Reg<"r8b", 8>;
+def R9B : X86Reg<"r9b", 9>;
+def R10B : X86Reg<"r10b", 10>;
+def R11B : X86Reg<"r11b", 11>;
+def R12B : X86Reg<"r12b", 12>;
+def R13B : X86Reg<"r13b", 13>;
+def R14B : X86Reg<"r14b", 14>;
+def R15B : X86Reg<"r15b", 15>;
+}
+
+// 16-bit registers
+let SubRegIndices = [sub_8bit, sub_8bit_hi], CoveredBySubRegs = 1 in {
+def AX : X86Reg<"ax", 0, [AL,AH]>;
+def DX : X86Reg<"dx", 2, [DL,DH]>;
+def CX : X86Reg<"cx", 1, [CL,CH]>;
+def BX : X86Reg<"bx", 3, [BL,BH]>;
+}
+let SubRegIndices = [sub_8bit] in {
+def SI : X86Reg<"si", 6, [SIL]>;
+def DI : X86Reg<"di", 7, [DIL]>;
+def BP : X86Reg<"bp", 5, [BPL]>;
+def SP : X86Reg<"sp", 4, [SPL]>;
+}
+def IP : X86Reg<"ip", 0>;
+
+// X86-64 only, requires REX.
+let SubRegIndices = [sub_8bit], CostPerUse = 1 in {
+def R8W : X86Reg<"r8w", 8, [R8B]>;
+def R9W : X86Reg<"r9w", 9, [R9B]>;
+def R10W : X86Reg<"r10w", 10, [R10B]>;
+def R11W : X86Reg<"r11w", 11, [R11B]>;
+def R12W : X86Reg<"r12w", 12, [R12B]>;
+def R13W : X86Reg<"r13w", 13, [R13B]>;
+def R14W : X86Reg<"r14w", 14, [R14B]>;
+def R15W : X86Reg<"r15w", 15, [R15B]>;
+}
+
+// 32-bit registers
+let SubRegIndices = [sub_16bit] in {
+def EAX : X86Reg<"eax", 0, [AX]>, DwarfRegNum<[-2, 0, 0]>;
+def EDX : X86Reg<"edx", 2, [DX]>, DwarfRegNum<[-2, 2, 2]>;
+def ECX : X86Reg<"ecx", 1, [CX]>, DwarfRegNum<[-2, 1, 1]>;
+def EBX : X86Reg<"ebx", 3, [BX]>, DwarfRegNum<[-2, 3, 3]>;
+def ESI : X86Reg<"esi", 6, [SI]>, DwarfRegNum<[-2, 6, 6]>;
+def EDI : X86Reg<"edi", 7, [DI]>, DwarfRegNum<[-2, 7, 7]>;
+def EBP : X86Reg<"ebp", 5, [BP]>, DwarfRegNum<[-2, 4, 5]>;
+def ESP : X86Reg<"esp", 4, [SP]>, DwarfRegNum<[-2, 5, 4]>;
+def EIP : X86Reg<"eip", 0, [IP]>, DwarfRegNum<[-2, 8, 8]>;
+
+// X86-64 only, requires REX
+let CostPerUse = 1 in {
+def R8D : X86Reg<"r8d", 8, [R8W]>;
+def R9D : X86Reg<"r9d", 9, [R9W]>;
+def R10D : X86Reg<"r10d", 10, [R10W]>;
+def R11D : X86Reg<"r11d", 11, [R11W]>;
+def R12D : X86Reg<"r12d", 12, [R12W]>;
+def R13D : X86Reg<"r13d", 13, [R13W]>;
+def R14D : X86Reg<"r14d", 14, [R14W]>;
+def R15D : X86Reg<"r15d", 15, [R15W]>;
+}}
+
+// 64-bit registers, X86-64 only
+let SubRegIndices = [sub_32bit] in {
+def RAX : X86Reg<"rax", 0, [EAX]>, DwarfRegNum<[0, -2, -2]>;
+def RDX : X86Reg<"rdx", 2, [EDX]>, DwarfRegNum<[1, -2, -2]>;
+def RCX : X86Reg<"rcx", 1, [ECX]>, DwarfRegNum<[2, -2, -2]>;
+def RBX : X86Reg<"rbx", 3, [EBX]>, DwarfRegNum<[3, -2, -2]>;
+def RSI : X86Reg<"rsi", 6, [ESI]>, DwarfRegNum<[4, -2, -2]>;
+def RDI : X86Reg<"rdi", 7, [EDI]>, DwarfRegNum<[5, -2, -2]>;
+def RBP : X86Reg<"rbp", 5, [EBP]>, DwarfRegNum<[6, -2, -2]>;
+def RSP : X86Reg<"rsp", 4, [ESP]>, DwarfRegNum<[7, -2, -2]>;
+
+// These also require REX.
+let CostPerUse = 1 in {
+def R8 : X86Reg<"r8", 8, [R8D]>, DwarfRegNum<[ 8, -2, -2]>;
+def R9 : X86Reg<"r9", 9, [R9D]>, DwarfRegNum<[ 9, -2, -2]>;
+def R10 : X86Reg<"r10", 10, [R10D]>, DwarfRegNum<[10, -2, -2]>;
+def R11 : X86Reg<"r11", 11, [R11D]>, DwarfRegNum<[11, -2, -2]>;
+def R12 : X86Reg<"r12", 12, [R12D]>, DwarfRegNum<[12, -2, -2]>;
+def R13 : X86Reg<"r13", 13, [R13D]>, DwarfRegNum<[13, -2, -2]>;
+def R14 : X86Reg<"r14", 14, [R14D]>, DwarfRegNum<[14, -2, -2]>;
+def R15 : X86Reg<"r15", 15, [R15D]>, DwarfRegNum<[15, -2, -2]>;
+def RIP : X86Reg<"rip", 0, [EIP]>, DwarfRegNum<[16, -2, -2]>;
+}}
+
+// MMX Registers. These are actually aliased to ST0 .. ST7
+def MM0 : X86Reg<"mm0", 0>, DwarfRegNum<[41, 29, 29]>;
+def MM1 : X86Reg<"mm1", 1>, DwarfRegNum<[42, 30, 30]>;
+def MM2 : X86Reg<"mm2", 2>, DwarfRegNum<[43, 31, 31]>;
+def MM3 : X86Reg<"mm3", 3>, DwarfRegNum<[44, 32, 32]>;
+def MM4 : X86Reg<"mm4", 4>, DwarfRegNum<[45, 33, 33]>;
+def MM5 : X86Reg<"mm5", 5>, DwarfRegNum<[46, 34, 34]>;
+def MM6 : X86Reg<"mm6", 6>, DwarfRegNum<[47, 35, 35]>;
+def MM7 : X86Reg<"mm7", 7>, DwarfRegNum<[48, 36, 36]>;
+
+// Pseudo Floating Point registers
+def FP0 : X86Reg<"fp0", 0>;
+def FP1 : X86Reg<"fp1", 0>;
+def FP2 : X86Reg<"fp2", 0>;
+def FP3 : X86Reg<"fp3", 0>;
+def FP4 : X86Reg<"fp4", 0>;
+def FP5 : X86Reg<"fp5", 0>;
+def FP6 : X86Reg<"fp6", 0>;
+
+// XMM Registers, used by the various SSE instruction set extensions.
+def XMM0: X86Reg<"xmm0", 0>, DwarfRegNum<[17, 21, 21]>;
+def XMM1: X86Reg<"xmm1", 1>, DwarfRegNum<[18, 22, 22]>;
+def XMM2: X86Reg<"xmm2", 2>, DwarfRegNum<[19, 23, 23]>;
+def XMM3: X86Reg<"xmm3", 3>, DwarfRegNum<[20, 24, 24]>;
+def XMM4: X86Reg<"xmm4", 4>, DwarfRegNum<[21, 25, 25]>;
+def XMM5: X86Reg<"xmm5", 5>, DwarfRegNum<[22, 26, 26]>;
+def XMM6: X86Reg<"xmm6", 6>, DwarfRegNum<[23, 27, 27]>;
+def XMM7: X86Reg<"xmm7", 7>, DwarfRegNum<[24, 28, 28]>;
+
+// X86-64 only
+let CostPerUse = 1 in {
+def XMM8: X86Reg<"xmm8", 8>, DwarfRegNum<[25, -2, -2]>;
+def XMM9: X86Reg<"xmm9", 9>, DwarfRegNum<[26, -2, -2]>;
+def XMM10: X86Reg<"xmm10", 10>, DwarfRegNum<[27, -2, -2]>;
+def XMM11: X86Reg<"xmm11", 11>, DwarfRegNum<[28, -2, -2]>;
+def XMM12: X86Reg<"xmm12", 12>, DwarfRegNum<[29, -2, -2]>;
+def XMM13: X86Reg<"xmm13", 13>, DwarfRegNum<[30, -2, -2]>;
+def XMM14: X86Reg<"xmm14", 14>, DwarfRegNum<[31, -2, -2]>;
+def XMM15: X86Reg<"xmm15", 15>, DwarfRegNum<[32, -2, -2]>;
+} // CostPerUse
+
+// YMM Registers, used by AVX instructions
+let SubRegIndices = [sub_xmm] in {
+def YMM0: X86Reg<"ymm0", 0, [XMM0]>, DwarfRegAlias<XMM0>;
+def YMM1: X86Reg<"ymm1", 1, [XMM1]>, DwarfRegAlias<XMM1>;
+def YMM2: X86Reg<"ymm2", 2, [XMM2]>, DwarfRegAlias<XMM2>;
+def YMM3: X86Reg<"ymm3", 3, [XMM3]>, DwarfRegAlias<XMM3>;
+def YMM4: X86Reg<"ymm4", 4, [XMM4]>, DwarfRegAlias<XMM4>;
+def YMM5: X86Reg<"ymm5", 5, [XMM5]>, DwarfRegAlias<XMM5>;
+def YMM6: X86Reg<"ymm6", 6, [XMM6]>, DwarfRegAlias<XMM6>;
+def YMM7: X86Reg<"ymm7", 7, [XMM7]>, DwarfRegAlias<XMM7>;
+def YMM8: X86Reg<"ymm8", 8, [XMM8]>, DwarfRegAlias<XMM8>;
+def YMM9: X86Reg<"ymm9", 9, [XMM9]>, DwarfRegAlias<XMM9>;
+def YMM10: X86Reg<"ymm10", 10, [XMM10]>, DwarfRegAlias<XMM10>;
+def YMM11: X86Reg<"ymm11", 11, [XMM11]>, DwarfRegAlias<XMM11>;
+def YMM12: X86Reg<"ymm12", 12, [XMM12]>, DwarfRegAlias<XMM12>;
+def YMM13: X86Reg<"ymm13", 13, [XMM13]>, DwarfRegAlias<XMM13>;
+def YMM14: X86Reg<"ymm14", 14, [XMM14]>, DwarfRegAlias<XMM14>;
+def YMM15: X86Reg<"ymm15", 15, [XMM15]>, DwarfRegAlias<XMM15>;
+}
+
+class STRegister<string n, bits<16> Enc, list<Register> A> : X86Reg<n, Enc> {
+ let Aliases = A;
+}
+
+// Floating point stack registers. These don't map one-to-one to the FP
+// pseudo registers, but we still mark them as aliasing FP registers. That
+// way both kinds can be live without exceeding the stack depth. ST registers
+// are only live around inline assembly.
+def ST0 : STRegister<"st(0)", 0, []>, DwarfRegNum<[33, 12, 11]>;
+def ST1 : STRegister<"st(1)", 1, [FP6]>, DwarfRegNum<[34, 13, 12]>;
+def ST2 : STRegister<"st(2)", 2, [FP5]>, DwarfRegNum<[35, 14, 13]>;
+def ST3 : STRegister<"st(3)", 3, [FP4]>, DwarfRegNum<[36, 15, 14]>;
+def ST4 : STRegister<"st(4)", 4, [FP3]>, DwarfRegNum<[37, 16, 15]>;
+def ST5 : STRegister<"st(5)", 5, [FP2]>, DwarfRegNum<[38, 17, 16]>;
+def ST6 : STRegister<"st(6)", 6, [FP1]>, DwarfRegNum<[39, 18, 17]>;
+def ST7 : STRegister<"st(7)", 7, [FP0]>, DwarfRegNum<[40, 19, 18]>;
+
+// Floating-point status word
+def FPSW : X86Reg<"fpsw", 0>;
+
+// Status flags register
+def EFLAGS : X86Reg<"flags", 0>;
+
+// Segment registers
+def CS : X86Reg<"cs", 1>;
+def DS : X86Reg<"ds", 3>;
+def SS : X86Reg<"ss", 2>;
+def ES : X86Reg<"es", 0>;
+def FS : X86Reg<"fs", 4>;
+def GS : X86Reg<"gs", 5>;
+
+// Debug registers
+def DR0 : X86Reg<"dr0", 0>;
+def DR1 : X86Reg<"dr1", 1>;
+def DR2 : X86Reg<"dr2", 2>;
+def DR3 : X86Reg<"dr3", 3>;
+def DR4 : X86Reg<"dr4", 4>;
+def DR5 : X86Reg<"dr5", 5>;
+def DR6 : X86Reg<"dr6", 6>;
+def DR7 : X86Reg<"dr7", 7>;
+
+// Control registers
+def CR0 : X86Reg<"cr0", 0>;
+def CR1 : X86Reg<"cr1", 1>;
+def CR2 : X86Reg<"cr2", 2>;
+def CR3 : X86Reg<"cr3", 3>;
+def CR4 : X86Reg<"cr4", 4>;
+def CR5 : X86Reg<"cr5", 5>;
+def CR6 : X86Reg<"cr6", 6>;
+def CR7 : X86Reg<"cr7", 7>;
+def CR8 : X86Reg<"cr8", 8>;
+def CR9 : X86Reg<"cr9", 9>;
+def CR10 : X86Reg<"cr10", 10>;
+def CR11 : X86Reg<"cr11", 11>;
+def CR12 : X86Reg<"cr12", 12>;
+def CR13 : X86Reg<"cr13", 13>;
+def CR14 : X86Reg<"cr14", 14>;
+def CR15 : X86Reg<"cr15", 15>;
+
+// Pseudo index registers
+def EIZ : X86Reg<"eiz", 4>;
+def RIZ : X86Reg<"riz", 4>;
+
//===----------------------------------------------------------------------===//
// Register Class Definitions... now that we have all of the pieces, define the
diff --git a/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp b/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
index 00edcbc..723e50c 100644
--- a/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -54,7 +54,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
if (const char *bzeroEntry = V &&
V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
EVT IntPtr = TLI.getPointerTy();
- Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
+ Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Dst;
diff --git a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
index c2db11a..d1ed680 100644
--- a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
+++ b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
@@ -163,17 +163,6 @@ bool X86Subtarget::IsLegalToCallImmediateAddr(const TargetMachine &TM) const {
return isTargetELF() || TM.getRelocationModel() == Reloc::Static;
}
-/// getSpecialAddressLatency - For targets where it is beneficial to
-/// backschedule instructions that compute addresses, return a value
-/// indicating the number of scheduling cycles of backscheduling that
-/// should be attempted.
-unsigned X86Subtarget::getSpecialAddressLatency() const {
- // For x86 out-of-order targets, back-schedule address computations so
- // that loads and stores aren't blocked.
- // This value was chosen arbitrarily.
- return 200;
-}
-
void X86Subtarget::AutoDetectSubtargetFeatures() {
unsigned EAX = 0, EBX = 0, ECX = 0, EDX = 0;
unsigned MaxLevel;
@@ -313,6 +302,10 @@ void X86Subtarget::AutoDetectSubtargetFeatures() {
HasBMI2 = true;
ToggleFeature(X86::FeatureBMI2);
}
+ if (IsIntel && ((EBX >> 11) & 0x1)) {
+ HasRTM = true;
+ ToggleFeature(X86::FeatureRTM);
+ }
}
}
}
@@ -341,11 +334,13 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
, HasLZCNT(false)
, HasBMI(false)
, HasBMI2(false)
+ , HasRTM(false)
, IsBTMemSlow(false)
, IsUAMemFast(false)
, HasVectorUAMem(false)
, HasCmpxchg16b(false)
, UseLeaForSP(false)
+ , HasSlowDivide(false)
, PostRAScheduler(false)
, stackAlignment(4)
// FIXME: this is a known good value for Yonah. How about others?
@@ -400,6 +395,10 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
}
}
+ // CPUName may have been set by the CPU detection code. Make sure the
+ // new MCSchedModel is used.
+ InitMCProcessorInfo(CPUName, FS);
+
if (X86ProcFamily == IntelAtom)
PostRAScheduler = true;
@@ -416,8 +415,8 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
assert((!In64BitMode || HasX86_64) &&
"64-bit code requested on a subtarget that doesn't support it!");
- // Stack alignment is 16 bytes on Darwin, Linux and Solaris (both 32 and 64
- // bit) and for all 64-bit targets.
+ // Stack alignment is 16 bytes on Darwin, Linux and Solaris (both
+ // 32 and 64 bit) and for all 64-bit targets.
if (StackAlignOverride)
stackAlignment = StackAlignOverride;
else if (isTargetDarwin() || isTargetLinux() || isTargetSolaris() ||
diff --git a/contrib/llvm/lib/Target/X86/X86Subtarget.h b/contrib/llvm/lib/Target/X86/X86Subtarget.h
index 6841c5b..8bf4cc7 100644
--- a/contrib/llvm/lib/Target/X86/X86Subtarget.h
+++ b/contrib/llvm/lib/Target/X86/X86Subtarget.h
@@ -118,6 +118,9 @@ protected:
/// HasBMI2 - Processor has BMI2 instructions.
bool HasBMI2;
+ /// HasRTM - Processor has RTM instructions.
+ bool HasRTM;
+
/// IsBTMemSlow - True if BT (bit test) of memory instructions are slow.
bool IsBTMemSlow;
@@ -136,6 +139,10 @@ protected:
/// the stack pointer. This is an optimization for Intel Atom processors.
bool UseLeaForSP;
+ /// HasSlowDivide - True if smaller divides are significantly faster than
+ /// full divides and should be used when possible.
+ bool HasSlowDivide;
+
/// PostRAScheduler - True if using post-register-allocation scheduler.
bool PostRAScheduler;
@@ -205,7 +212,8 @@ public:
bool hasAES() const { return HasAES; }
bool hasPCLMUL() const { return HasPCLMUL; }
bool hasFMA() const { return HasFMA; }
- bool hasFMA4() const { return HasFMA4; }
+ // FIXME: Favor FMA when both are enabled. Is this the right thing to do?
+ bool hasFMA4() const { return HasFMA4 && !HasFMA; }
bool hasXOP() const { return HasXOP; }
bool hasMOVBE() const { return HasMOVBE; }
bool hasRDRAND() const { return HasRDRAND; }
@@ -214,11 +222,13 @@ public:
bool hasLZCNT() const { return HasLZCNT; }
bool hasBMI() const { return HasBMI; }
bool hasBMI2() const { return HasBMI2; }
+ bool hasRTM() const { return HasRTM; }
bool isBTMemSlow() const { return IsBTMemSlow; }
bool isUnalignedMemAccessFast() const { return IsUAMemFast; }
bool hasVectorUAMem() const { return HasVectorUAMem; }
bool hasCmpxchg16b() const { return HasCmpxchg16b; }
bool useLeaForSP() const { return UseLeaForSP; }
+ bool hasSlowDivide() const { return HasSlowDivide; }
bool isAtom() const { return X86ProcFamily == IntelAtom; }
@@ -231,10 +241,10 @@ public:
bool isTargetSolaris() const {
return TargetTriple.getOS() == Triple::Solaris;
}
-
- // ELF is a reasonably sane default and the only other X86 targets we
- // support are Darwin and Windows. Just use "not those".
- bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
+ bool isTargetELF() const {
+ return (TargetTriple.getEnvironment() == Triple::ELF ||
+ TargetTriple.isOSBinFormatELF());
+ }
bool isTargetLinux() const { return TargetTriple.getOS() == Triple::Linux; }
bool isTargetNaCl() const {
return TargetTriple.getOS() == Triple::NativeClient;
@@ -245,7 +255,10 @@ public:
bool isTargetMingw() const { return TargetTriple.getOS() == Triple::MinGW32; }
bool isTargetCygwin() const { return TargetTriple.getOS() == Triple::Cygwin; }
bool isTargetCygMing() const { return TargetTriple.isOSCygMing(); }
- bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
+ bool isTargetCOFF() const {
+ return (TargetTriple.getEnvironment() != Triple::ELF &&
+ TargetTriple.isOSBinFormatCOFF());
+ }
bool isTargetEnvMacho() const { return TargetTriple.isEnvironmentMachO(); }
bool isTargetWin64() const {
@@ -296,12 +309,6 @@ public:
/// returns null.
const char *getBZeroEntry() const;
- /// getSpecialAddressLatency - For targets where it is beneficial to
- /// backschedule instructions that compute addresses, return a value
- /// indicating the number of scheduling cycles of backscheduling that
- /// should be attempted.
- unsigned getSpecialAddressLatency() const;
-
/// enablePostRAScheduler - run for Atom optimization.
bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
TargetSubtargetInfo::AntiDepBreakMode& Mode,
diff --git a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
index b7ba568..158f9dc 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -36,7 +36,7 @@ X86_32TargetMachine::X86_32TargetMachine(const Target &T, StringRef TT,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
: X86TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false),
- DataLayout(getSubtargetImpl()->isTargetDarwin() ?
+ DL(getSubtargetImpl()->isTargetDarwin() ?
"e-p:32:32-f64:32:64-i64:32:64-f80:128:128-f128:128:128-"
"n8:16:32-S128" :
(getSubtargetImpl()->isTargetCygMing() ||
@@ -48,7 +48,8 @@ X86_32TargetMachine::X86_32TargetMachine(const Target &T, StringRef TT,
InstrInfo(*this),
TSInfo(*this),
TLInfo(*this),
- JITInfo(*this) {
+ JITInfo(*this),
+ STTI(&TLInfo), VTTI(&TLInfo) {
}
void X86_64TargetMachine::anchor() { }
@@ -59,12 +60,13 @@ X86_64TargetMachine::X86_64TargetMachine(const Target &T, StringRef TT,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
: X86TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true),
- DataLayout("e-p:64:64-s:64-f64:64:64-i64:64:64-f80:128:128-f128:128:128-"
+ DL("e-p:64:64-s:64-f64:64:64-i64:64:64-f80:128:128-f128:128:128-"
"n8:16:32:64-S128"),
InstrInfo(*this),
TSInfo(*this),
TLInfo(*this),
- JITInfo(*this) {
+ JITInfo(*this),
+ STTI(&TLInfo), VTTI(&TLInfo){
}
/// X86TargetMachine ctor - Create an X86 target.
@@ -78,7 +80,6 @@ X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT,
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS, Options.StackAlignmentOverride, is64Bit),
FrameLowering(*this, Subtarget),
- ELFWriterInfo(is64Bit, true),
InstrItins(Subtarget.getInstrItineraryData()){
// Determine the PICStyle based on the target selected.
if (getRelocationModel() == Reloc::Static) {
@@ -113,6 +114,12 @@ UseVZeroUpper("x86-use-vzeroupper",
cl::desc("Minimize AVX to SSE transition penalty"),
cl::init(true));
+// Temporary option to control early if-conversion for x86 while adding machine
+// models.
+static cl::opt<bool>
+X86EarlyIfConv("x86-early-ifcvt",
+ cl::desc("Enable early if-conversion on X86"));
+
//===----------------------------------------------------------------------===//
// Pass Pipeline Configuration
//===----------------------------------------------------------------------===//
@@ -142,7 +149,7 @@ public:
TargetPassConfig *X86TargetMachine::createPassConfig(PassManagerBase &PM) {
X86PassConfig *PC = new X86PassConfig(this, PM);
- if (Subtarget.hasCMov())
+ if (X86EarlyIfConv && Subtarget.hasCMov())
PC->enablePass(&EarlyIfConverterID);
return PC;
diff --git a/contrib/llvm/lib/Target/X86/X86TargetMachine.h b/contrib/llvm/lib/Target/X86/X86TargetMachine.h
index 8e935af..12311a1 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetMachine.h
+++ b/contrib/llvm/lib/Target/X86/X86TargetMachine.h
@@ -15,7 +15,6 @@
#define X86TARGETMACHINE_H
#include "X86.h"
-#include "X86ELFWriterInfo.h"
#include "X86InstrInfo.h"
#include "X86ISelLowering.h"
#include "X86FrameLowering.h"
@@ -23,8 +22,9 @@
#include "X86SelectionDAGInfo.h"
#include "X86Subtarget.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetTransformImpl.h"
namespace llvm {
@@ -33,7 +33,6 @@ class StringRef;
class X86TargetMachine : public LLVMTargetMachine {
X86Subtarget Subtarget;
X86FrameLowering FrameLowering;
- X86ELFWriterInfo ELFWriterInfo;
InstrItineraryData InstrItins;
public:
@@ -62,9 +61,6 @@ public:
virtual const X86RegisterInfo *getRegisterInfo() const {
return &getInstrInfo()->getRegisterInfo();
}
- virtual const X86ELFWriterInfo *getELFWriterInfo() const {
- return Subtarget.isTargetELF() ? &ELFWriterInfo : 0;
- }
virtual const InstrItineraryData *getInstrItineraryData() const {
return &InstrItins;
}
@@ -80,17 +76,19 @@ public:
///
class X86_32TargetMachine : public X86TargetMachine {
virtual void anchor();
- const TargetData DataLayout; // Calculates type size & alignment
+ const DataLayout DL; // Calculates type size & alignment
X86InstrInfo InstrInfo;
X86SelectionDAGInfo TSInfo;
X86TargetLowering TLInfo;
X86JITInfo JITInfo;
+ ScalarTargetTransformImpl STTI;
+ X86VectorTargetTransformInfo VTTI;
public:
X86_32TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
- virtual const TargetData *getTargetData() const { return &DataLayout; }
+ virtual const DataLayout *getDataLayout() const { return &DL; }
virtual const X86TargetLowering *getTargetLowering() const {
return &TLInfo;
}
@@ -103,23 +101,31 @@ public:
virtual X86JITInfo *getJITInfo() {
return &JITInfo;
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
};
/// X86_64TargetMachine - X86 64-bit target machine.
///
class X86_64TargetMachine : public X86TargetMachine {
virtual void anchor();
- const TargetData DataLayout; // Calculates type size & alignment
+ const DataLayout DL; // Calculates type size & alignment
X86InstrInfo InstrInfo;
X86SelectionDAGInfo TSInfo;
X86TargetLowering TLInfo;
X86JITInfo JITInfo;
+ ScalarTargetTransformImpl STTI;
+ X86VectorTargetTransformInfo VTTI;
public:
X86_64TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
- virtual const TargetData *getTargetData() const { return &DataLayout; }
+ virtual const DataLayout *getDataLayout() const { return &DL; }
virtual const X86TargetLowering *getTargetLowering() const {
return &TLInfo;
}
@@ -132,6 +138,12 @@ public:
virtual X86JITInfo *getJITInfo() {
return &JITInfo;
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp b/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp
index 80b75dc..c4a5887 100644
--- a/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp
+++ b/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp
@@ -42,7 +42,6 @@ namespace {
private:
const TargetInstrInfo *TII; // Machine instruction info.
- MachineBasicBlock *MBB; // Current basic block
// Any YMM register live-in to this function?
bool FnHasLiveInYmm;
@@ -84,7 +83,7 @@ namespace {
// 2) All states must be clean for the result to be clean
// 3) If none above and one unknown, the result state is also unknown
//
- unsigned computeState(unsigned PrevState, unsigned CurState) {
+ static unsigned computeState(unsigned PrevState, unsigned CurState) {
if (PrevState == ST_INIT)
return CurState;
@@ -122,7 +121,7 @@ static bool checkFnHasLiveInYmm(MachineRegisterInfo &MRI) {
}
static bool hasYmmReg(MachineInstr *MI) {
- for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg())
continue;
@@ -148,7 +147,7 @@ bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
const TargetRegisterClass *RC = &X86::VR256RegClass;
for (TargetRegisterClass::iterator i = RC->begin(), e = RC->end();
i != e; i++) {
- if (MRI.isPhysRegUsed(*i)) {
+ if (!MRI.reg_nodbg_empty(*i)) {
YMMUsed = true;
break;
}
@@ -189,7 +188,6 @@ bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF,
MachineBasicBlock &BB) {
bool Changed = false;
unsigned BBNum = BB.getNumber();
- MBB = &BB;
// Don't process already solved BBs
if (BBSolved[BBNum])
@@ -207,7 +205,7 @@ bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF,
// The entry MBB for the function may set the initial state to dirty if
// the function receives any YMM incoming arguments
- if (MBB == MF.begin()) {
+ if (&BB == MF.begin()) {
EntryState = ST_CLEAN;
if (FnHasLiveInYmm)
EntryState = ST_DIRTY;
@@ -253,7 +251,7 @@ bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF,
// When unknown, only compute the information within the block to have
// it available in the exit if possible, but don't change the block.
if (EntryState != ST_UNKNOWN) {
- BuildMI(*MBB, I, dl, TII->get(X86::VZEROUPPER));
+ BuildMI(BB, I, dl, TII->get(X86::VZEROUPPER));
++NumVZU;
}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp b/contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
index c76866f..caae562 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
@@ -31,7 +31,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
@@ -112,7 +112,7 @@ void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
EmitSpecialLLVMGlobal(GV))
return;
- const TargetData *TD = TM.getTargetData();
+ const DataLayout *TD = TM.getDataLayout();
OutStreamer.SwitchSection(getObjFileLowering().SectionForGlobal(GV, Mang,TM));
diff --git a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
index a4e5647..e18d973 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -23,7 +23,7 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/ErrorHandling.h"
@@ -98,12 +98,13 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
bool FP = hasFP(MF);
- bool Nested = MF.getFunction()->
- getAttributes().hasAttrSomewhere(Attribute::Nest);
+ const AttrListPtr &PAL = MF.getFunction()->getAttributes();
- if (Nested) {
- loadFromStack(MBB, MBBI, XCore::R11, 0, dl, TII);
- }
+ for (unsigned I = 0, E = PAL.getNumAttrs(); I != E; ++I)
+ if (PAL.getAttributesAtIndex(I).hasAttribute(Attributes::Nest)) {
+ loadFromStack(MBB, MBBI, XCore::R11, 0, dl, TII);
+ break;
+ }
// Work out frame sizes.
int FrameSize = MFI->getStackSize();
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index 8643ffc..9e7816e 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -285,7 +285,7 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
llvm_unreachable(0);
}
SDValue base = getGlobalAddressWrapper(GA, GV, DAG);
- const TargetData *TD = TM.getTargetData();
+ const DataLayout *TD = TM.getDataLayout();
unsigned Size = TD->getTypeAllocSize(Ty);
SDValue offset = DAG.getNode(ISD::MUL, dl, MVT::i32, BuildGetId(DAG, dl),
DAG.getConstant(Size, MVT::i32));
@@ -298,7 +298,7 @@ LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
DebugLoc DL = Op.getDebugLoc();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
- SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), /*isTarget=*/true);
+ SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy());
return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, getPointerTy(), Result);
}
@@ -405,7 +405,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
if (allowsUnalignedMemoryAccesses(LD->getMemoryVT()))
return SDValue();
- unsigned ABIAlignment = getTargetData()->
+ unsigned ABIAlignment = getDataLayout()->
getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
// Leave aligned load alone.
if (LD->getAlignment() >= ABIAlignment)
@@ -477,7 +477,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
}
// Lower to a call to __misaligned_load(BasePtr).
- Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
+ Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
@@ -507,7 +507,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
return SDValue();
}
- unsigned ABIAlignment = getTargetData()->
+ unsigned ABIAlignment = getDataLayout()->
getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
// Leave aligned store alone.
if (ST->getAlignment() >= ABIAlignment) {
@@ -536,7 +536,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
}
// Lower to a call to __misaligned_store(BasePtr, Value).
- Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
+ Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
@@ -1499,7 +1499,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
if (StoreBits % 8) {
break;
}
- unsigned ABIAlignment = getTargetData()->getABITypeAlignment(
+ unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(
ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
unsigned Alignment = ST->getAlignment();
if (Alignment >= ABIAlignment) {
@@ -1570,7 +1570,7 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
if (Ty->getTypeID() == Type::VoidTyID)
return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
- const TargetData *TD = TM.getTargetData();
+ const DataLayout *TD = TM.getDataLayout();
unsigned Size = TD->getTypeAllocSize(Ty);
if (AM.BaseGV) {
return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
diff --git a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
index ae646a2..3e7666b 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
+++ b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
@@ -33,7 +33,7 @@ def XCoreBranchLink : SDNode<"XCoreISD::BL",SDT_XCoreBranchLink,
SDNPVariadic]>;
def XCoreRetsp : SDNode<"XCoreISD::RETSP", SDTBrind,
- [SDNPHasChain, SDNPOptInGlue]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPMayLoad]>;
def SDT_XCoreBR_JT : SDTypeProfile<0, 2,
[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
@@ -58,7 +58,7 @@ def cprelwrapper : SDNode<"XCoreISD::CPRelativeWrapper", SDT_XCoreAddress,
def SDT_XCoreStwsp : SDTypeProfile<0, 2, [SDTCisInt<1>]>;
def XCoreStwsp : SDNode<"XCoreISD::STWSP", SDT_XCoreStwsp,
- [SDNPHasChain]>;
+ [SDNPHasChain, SDNPMayStore]>;
// These are target-independent nodes, but have target-specific formats.
def SDT_XCoreCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
diff --git a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
index cdd0a08..be5855a 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
@@ -176,7 +176,7 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
#ifndef NDEBUG
DEBUG(errs() << "\nFunction : "
- << MF.getFunction()->getName() << "\n");
+ << MF.getName() << "\n");
DEBUG(errs() << "<--------->\n");
DEBUG(MI.print(errs()));
DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n");
diff --git a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
index 11ec86b..d5a932c 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
@@ -27,12 +27,12 @@ XCoreTargetMachine::XCoreTargetMachine(const Target &T, StringRef TT,
CodeGenOpt::Level OL)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS),
- DataLayout("e-p:32:32:32-a0:0:32-f32:32:32-f64:32:32-i1:8:32-i8:8:32-"
+ DL("e-p:32:32:32-a0:0:32-f32:32:32-f64:32:32-i1:8:32-i8:8:32-"
"i16:16:32-i32:32:32-i64:32:32-n32"),
InstrInfo(),
FrameLowering(Subtarget),
TLInfo(*this),
- TSInfo(*this) {
+ TSInfo(*this), STTI(&TLInfo), VTTI(&TLInfo) {
}
namespace {
diff --git a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h
index 2546681..c60c6a3 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h
@@ -20,17 +20,20 @@
#include "XCoreISelLowering.h"
#include "XCoreSelectionDAGInfo.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetTransformImpl.h"
+#include "llvm/DataLayout.h"
namespace llvm {
class XCoreTargetMachine : public LLVMTargetMachine {
XCoreSubtarget Subtarget;
- const TargetData DataLayout; // Calculates type size & alignment
+ const DataLayout DL; // Calculates type size & alignment
XCoreInstrInfo InstrInfo;
XCoreFrameLowering FrameLowering;
XCoreTargetLowering TLInfo;
XCoreSelectionDAGInfo TSInfo;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
XCoreTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
@@ -53,7 +56,13 @@ public:
virtual const TargetRegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
}
- virtual const TargetData *getTargetData() const { return &DataLayout; }
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
+ virtual const DataLayout *getDataLayout() const { return &DL; }
// Pass Pipeline Configuration
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
diff --git a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index b94dd69..be48b20 100644
--- a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -153,7 +153,8 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
SmallPtrSet<Argument*, 8> ArgsToPromote;
SmallPtrSet<Argument*, 8> ByValArgsToTransform;
for (unsigned i = 0; i != PointerArgs.size(); ++i) {
- bool isByVal = F->paramHasAttr(PointerArgs[i].second+1, Attribute::ByVal);
+ bool isByVal=F->getParamAttributes(PointerArgs[i].second+1).
+ hasAttribute(Attributes::ByVal);
Argument *PtrArg = PointerArgs[i].first;
Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType();
@@ -517,8 +518,10 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
const AttrListPtr &PAL = F->getAttributes();
// Add any return attributes.
- if (Attributes attrs = PAL.getRetAttributes())
- AttributesVec.push_back(AttributeWithIndex::get(0, attrs));
+ Attributes attrs = PAL.getRetAttributes();
+ if (attrs.hasAttributes())
+ AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::ReturnIndex,
+ attrs));
// First, determine the new argument list
unsigned ArgIndex = 1;
@@ -534,7 +537,8 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
} else if (!ArgsToPromote.count(I)) {
// Unchanged argument
Params.push_back(I->getType());
- if (Attributes attrs = PAL.getParamAttributes(ArgIndex))
+ Attributes attrs = PAL.getParamAttributes(ArgIndex);
+ if (attrs.hasAttributes())
AttributesVec.push_back(AttributeWithIndex::get(Params.size(), attrs));
} else if (I->use_empty()) {
// Dead argument (which are always marked as promotable)
@@ -587,19 +591,13 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
}
// Add any function attributes.
- if (Attributes attrs = PAL.getFnAttributes())
- AttributesVec.push_back(AttributeWithIndex::get(~0, attrs));
+ attrs = PAL.getFnAttributes();
+ if (attrs.hasAttributes())
+ AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::FunctionIndex,
+ attrs));
Type *RetTy = FTy->getReturnType();
- // Work around LLVM bug PR56: the CWriter cannot emit varargs functions which
- // have zero fixed arguments.
- bool ExtraArgHack = false;
- if (Params.empty() && FTy->isVarArg()) {
- ExtraArgHack = true;
- Params.push_back(Type::getInt32Ty(F->getContext()));
- }
-
// Construct the new function type using the new arguments.
FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg());
@@ -613,7 +611,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
// Recompute the parameter attributes list based on the new arguments for
// the function.
- NF->setAttributes(AttrListPtr::get(AttributesVec));
+ NF->setAttributes(AttrListPtr::get(F->getContext(), AttributesVec));
AttributesVec.clear();
F->getParent()->getFunctionList().insert(F, NF);
@@ -641,8 +639,10 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
const AttrListPtr &CallPAL = CS.getAttributes();
// Add any return attributes.
- if (Attributes attrs = CallPAL.getRetAttributes())
- AttributesVec.push_back(AttributeWithIndex::get(0, attrs));
+ Attributes attrs = CallPAL.getRetAttributes();
+ if (attrs.hasAttributes())
+ AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::ReturnIndex,
+ attrs));
// Loop over the operands, inserting GEP and loads in the caller as
// appropriate.
@@ -653,7 +653,8 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
if (!ArgsToPromote.count(I) && !ByValArgsToTransform.count(I)) {
Args.push_back(*AI); // Unmodified argument
- if (Attributes Attrs = CallPAL.getParamAttributes(ArgIndex))
+ Attributes Attrs = CallPAL.getParamAttributes(ArgIndex);
+ if (Attrs.hasAttributes())
AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs));
} else if (ByValArgsToTransform.count(I)) {
@@ -711,30 +712,32 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
}
}
- if (ExtraArgHack)
- Args.push_back(Constant::getNullValue(Type::getInt32Ty(F->getContext())));
-
// Push any varargs arguments on the list.
for (; AI != CS.arg_end(); ++AI, ++ArgIndex) {
Args.push_back(*AI);
- if (Attributes Attrs = CallPAL.getParamAttributes(ArgIndex))
+ Attributes Attrs = CallPAL.getParamAttributes(ArgIndex);
+ if (Attrs.hasAttributes())
AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs));
}
// Add any function attributes.
- if (Attributes attrs = CallPAL.getFnAttributes())
- AttributesVec.push_back(AttributeWithIndex::get(~0, attrs));
+ attrs = CallPAL.getFnAttributes();
+ if (attrs.hasAttributes())
+ AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::FunctionIndex,
+ attrs));
Instruction *New;
if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
Args, "", Call);
cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv());
- cast<InvokeInst>(New)->setAttributes(AttrListPtr::get(AttributesVec));
+ cast<InvokeInst>(New)->setAttributes(AttrListPtr::get(II->getContext(),
+ AttributesVec));
} else {
New = CallInst::Create(NF, Args, "", Call);
cast<CallInst>(New)->setCallingConv(CS.getCallingConv());
- cast<CallInst>(New)->setAttributes(AttrListPtr::get(AttributesVec));
+ cast<CallInst>(New)->setAttributes(AttrListPtr::get(New->getContext(),
+ AttributesVec));
if (cast<CallInst>(Call)->isTailCall())
cast<CallInst>(New)->setTailCall();
}
@@ -870,16 +873,9 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
}
// Increment I2 past all of the arguments added for this promoted pointer.
- for (unsigned i = 0, e = ArgIndices.size(); i != e; ++i)
- ++I2;
+ std::advance(I2, ArgIndices.size());
}
- // Notify the alias analysis implementation that we inserted a new argument.
- if (ExtraArgHack)
- AA.copyValue(Constant::getNullValue(Type::getInt32Ty(F->getContext())),
- NF->arg_begin());
-
-
// Tell the alias analysis that the old function is about to disappear.
AA.replaceWithNewValue(F, NF);
diff --git a/contrib/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp b/contrib/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp
new file mode 100644
index 0000000..2e32240
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp
@@ -0,0 +1,47 @@
+//===- BarrierNoopPass.cpp - A barrier pass for the pass manager ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// NOTE: DO NOT USE THIS IF AVOIDABLE
+//
+// This pass is a nonce pass intended to allow manipulation of the implicitly
+// nesting pass manager. For example, it can be used to cause a CGSCC pass
+// manager to be closed prior to running a new collection of function passes.
+//
+// FIXME: This is a huge HACK. This should be removed when the pass manager's
+// nesting is made explicit instead of implicit.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Pass.h"
+#include "llvm/Transforms/IPO.h"
+using namespace llvm;
+
+namespace {
+/// \brief A nonce module pass used to place a barrier in a pass manager.
+///
+/// There is no mechanism for ending a CGSCC pass manager once one is started.
+/// This prevents extension points from having clear deterministic ordering
+/// when they are phrased as non-module passes.
+class BarrierNoop : public ModulePass {
+public:
+ static char ID; // Pass identification.
+
+ BarrierNoop() : ModulePass(ID) {
+ initializeBarrierNoopPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnModule(Module &M) { return false; }
+};
+}
+
+ModulePass *llvm::createBarrierNoopPass() { return new BarrierNoop(); }
+
+char BarrierNoop::ID = 0;
+INITIALIZE_PASS(BarrierNoop, "barrier", "A No-Op Barrier Pass",
+ false, false)
diff --git a/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp b/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
index d8fae8a..e2f0126 100644
--- a/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
@@ -23,7 +23,7 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -50,7 +50,7 @@ namespace {
// alignment to a concrete value.
unsigned getAlignment(GlobalVariable *GV) const;
- const TargetData *TD;
+ const DataLayout *TD;
};
}
@@ -98,7 +98,7 @@ unsigned ConstantMerge::getAlignment(GlobalVariable *GV) const {
}
bool ConstantMerge::runOnModule(Module &M) {
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
// Find all the globals that are marked "used". These cannot be merged.
SmallPtrSet<const GlobalValue*, 8> UsedGlobals;
@@ -107,7 +107,7 @@ bool ConstantMerge::runOnModule(Module &M) {
// Map unique <constants, has-unknown-alignment> pairs to globals. We don't
// want to merge globals of unknown alignment with those of explicit
- // alignment. If we have TargetData, we always know the alignment.
+ // alignment. If we have DataLayout, we always know the alignment.
DenseMap<PointerIntPair<Constant*, 1, bool>, GlobalVariable*> CMap;
// Replacements - This vector contains a list of replacements to perform.
diff --git a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
index fd23a93..4cfd0b2 100644
--- a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -21,7 +21,9 @@
#include "llvm/Transforms/IPO.h"
#include "llvm/CallingConv.h"
#include "llvm/Constant.h"
+#include "llvm/DebugInfo.h"
#include "llvm/DerivedTypes.h"
+#include "llvm/DIBuilder.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
@@ -30,6 +32,7 @@
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
@@ -121,6 +124,15 @@ namespace {
typedef SmallVector<RetOrArg, 5> UseVector;
+ // Map each LLVM function to corresponding metadata with debug info. If
+ // the function is replaced with another one, we should patch the pointer
+ // to LLVM function in metadata.
+ // As the code generation for module is finished (and DIBuilder is
+ // finalized) we assume that subprogram descriptors won't be changed, and
+ // they are stored in map for short duration anyway.
+ typedef DenseMap<Function*, DISubprogram> FunctionDIMap;
+ FunctionDIMap FunctionDIs;
+
protected:
// DAH uses this to specify a different ID.
explicit DAE(char &ID) : ModulePass(ID) {}
@@ -141,6 +153,7 @@ namespace {
unsigned RetValNum = 0);
Liveness SurveyUses(const Value *V, UseVector &MaybeLiveUses);
+ void CollectFunctionDIs(Module &M);
void SurveyFunction(const Function &F);
void MarkValue(const RetOrArg &RA, Liveness L,
const UseVector &MaybeLiveUses);
@@ -180,6 +193,33 @@ INITIALIZE_PASS(DAH, "deadarghaX0r",
ModulePass *llvm::createDeadArgEliminationPass() { return new DAE(); }
ModulePass *llvm::createDeadArgHackingPass() { return new DAH(); }
+/// CollectFunctionDIs - Map each function in the module to its debug info
+/// descriptor.
+void DAE::CollectFunctionDIs(Module &M) {
+ FunctionDIs.clear();
+
+ for (Module::named_metadata_iterator I = M.named_metadata_begin(),
+ E = M.named_metadata_end(); I != E; ++I) {
+ NamedMDNode &NMD = *I;
+ for (unsigned MDIndex = 0, MDNum = NMD.getNumOperands();
+ MDIndex < MDNum; ++MDIndex) {
+ MDNode *Node = NMD.getOperand(MDIndex);
+ if (!DIDescriptor(Node).isCompileUnit())
+ continue;
+ DICompileUnit CU(Node);
+ const DIArray &SPs = CU.getSubprograms();
+ for (unsigned SPIndex = 0, SPNum = SPs.getNumElements();
+ SPIndex < SPNum; ++SPIndex) {
+ DISubprogram SP(SPs.getElement(SPIndex));
+ if (!SP.Verify())
+ continue;
+ if (Function *F = SP.getFunction())
+ FunctionDIs[F] = SP;
+ }
+ }
+ }
+}
+
/// DeleteDeadVarargs - If this is an function that takes a ... list, and if
/// llvm.vastart is never called, the varargs list is dead for the function.
bool DAE::DeleteDeadVarargs(Function &Fn) {
@@ -236,9 +276,11 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
SmallVector<AttributeWithIndex, 8> AttributesVec;
for (unsigned i = 0; PAL.getSlot(i).Index <= NumArgs; ++i)
AttributesVec.push_back(PAL.getSlot(i));
- if (Attributes FnAttrs = PAL.getFnAttributes())
- AttributesVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
- PAL = AttrListPtr::get(AttributesVec);
+ Attributes FnAttrs = PAL.getFnAttributes();
+ if (FnAttrs.hasAttributes())
+ AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::FunctionIndex,
+ FnAttrs));
+ PAL = AttrListPtr::get(Fn.getContext(), AttributesVec);
}
Instruction *New;
@@ -284,6 +326,11 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
I2->takeName(I);
}
+ // Patch the pointer to LLVM function in debug info descriptor.
+ FunctionDIMap::iterator DI = FunctionDIs.find(&Fn);
+ if (DI != FunctionDIs.end())
+ DI->second.replaceFunction(NF);
+
// Finally, nuke the old function.
Fn.eraseFromParent();
return true;
@@ -717,13 +764,17 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
// here. Currently, this should not be possible, but special handling might be
// required when new return value attributes are added.
if (NRetTy->isVoidTy())
- RAttrs &= ~Attribute::typeIncompatible(NRetTy);
+ RAttrs =
+ Attributes::get(NRetTy->getContext(), AttrBuilder(RAttrs).
+ removeAttributes(Attributes::typeIncompatible(NRetTy)));
else
- assert((RAttrs & Attribute::typeIncompatible(NRetTy)) == 0
- && "Return attributes no longer compatible?");
+ assert(!AttrBuilder(RAttrs).
+ hasAttributes(Attributes::typeIncompatible(NRetTy)) &&
+ "Return attributes no longer compatible?");
- if (RAttrs)
- AttributesVec.push_back(AttributeWithIndex::get(0, RAttrs));
+ if (RAttrs.hasAttributes())
+ AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::ReturnIndex,
+ RAttrs));
// Remember which arguments are still alive.
SmallVector<bool, 10> ArgAlive(FTy->getNumParams(), false);
@@ -740,7 +791,8 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
// Get the original parameter attributes (skipping the first one, that is
// for the return value.
- if (Attributes Attrs = PAL.getParamAttributes(i + 1))
+ Attributes Attrs = PAL.getParamAttributes(i + 1);
+ if (Attrs.hasAttributes())
AttributesVec.push_back(AttributeWithIndex::get(Params.size(), Attrs));
} else {
++NumArgumentsEliminated;
@@ -749,11 +801,12 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
}
}
- if (FnAttrs != Attribute::None)
- AttributesVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
+ if (FnAttrs.hasAttributes())
+ AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::FunctionIndex,
+ FnAttrs));
// Reconstruct the AttributesList based on the vector we constructed.
- AttrListPtr NewPAL = AttrListPtr::get(AttributesVec);
+ AttrListPtr NewPAL = AttrListPtr::get(F->getContext(), AttributesVec);
// Create the new function type based on the recomputed parameters.
FunctionType *NFTy = FunctionType::get(NRetTy, Params, FTy->isVarArg());
@@ -786,9 +839,12 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
Attributes RAttrs = CallPAL.getRetAttributes();
Attributes FnAttrs = CallPAL.getFnAttributes();
// Adjust in case the function was changed to return void.
- RAttrs &= ~Attribute::typeIncompatible(NF->getReturnType());
- if (RAttrs)
- AttributesVec.push_back(AttributeWithIndex::get(0, RAttrs));
+ RAttrs =
+ Attributes::get(NF->getContext(), AttrBuilder(RAttrs).
+ removeAttributes(Attributes::typeIncompatible(NF->getReturnType())));
+ if (RAttrs.hasAttributes())
+ AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::ReturnIndex,
+ RAttrs));
// Declare these outside of the loops, so we can reuse them for the second
// loop, which loops the varargs.
@@ -800,22 +856,25 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
if (ArgAlive[i]) {
Args.push_back(*I);
// Get original parameter attributes, but skip return attributes.
- if (Attributes Attrs = CallPAL.getParamAttributes(i + 1))
+ Attributes Attrs = CallPAL.getParamAttributes(i + 1);
+ if (Attrs.hasAttributes())
AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs));
}
// Push any varargs arguments on the list. Don't forget their attributes.
for (CallSite::arg_iterator E = CS.arg_end(); I != E; ++I, ++i) {
Args.push_back(*I);
- if (Attributes Attrs = CallPAL.getParamAttributes(i + 1))
+ Attributes Attrs = CallPAL.getParamAttributes(i + 1);
+ if (Attrs.hasAttributes())
AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs));
}
- if (FnAttrs != Attribute::None)
- AttributesVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
+ if (FnAttrs.hasAttributes())
+ AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::FunctionIndex,
+ FnAttrs));
// Reconstruct the AttributesList based on the vector we constructed.
- AttrListPtr NewCallPAL = AttrListPtr::get(AttributesVec);
+ AttrListPtr NewCallPAL = AttrListPtr::get(F->getContext(), AttributesVec);
Instruction *New;
if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
@@ -952,6 +1011,11 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
BB->getInstList().erase(RI);
}
+ // Patch the pointer to LLVM function in debug info descriptor.
+ FunctionDIMap::iterator DI = FunctionDIs.find(F);
+ if (DI != FunctionDIs.end())
+ DI->second.replaceFunction(NF);
+
// Now that the old function is dead, delete it.
F->eraseFromParent();
@@ -961,6 +1025,9 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
bool DAE::runOnModule(Module &M) {
bool Changed = false;
+ // Collect debug info descriptors for functions.
+ CollectFunctionDIs(M);
+
// First pass: Do a simple check to see if any functions can have their "..."
// removed. We can do this if they never call va_start. This loop cannot be
// fused with the next loop, because deleting a function invalidates
diff --git a/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp b/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp
index 4c7f0ed..6716deb 100644
--- a/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp
@@ -51,32 +51,75 @@ namespace {
// Visit the GlobalVariables.
for (Module::global_iterator I = M.global_begin(), E = M.global_end();
I != E; ++I) {
- if (deleteStuff == (bool)Named.count(I) && !I->isDeclaration()) {
- I->setInitializer(0);
- } else {
+ bool Delete =
+ deleteStuff == (bool)Named.count(I) && !I->isDeclaration();
+ if (!Delete) {
if (I->hasAvailableExternallyLinkage())
continue;
if (I->getName() == "llvm.global_ctors")
continue;
}
- if (I->hasLocalLinkage())
+ bool Local = I->hasLocalLinkage();
+ if (Local)
I->setVisibility(GlobalValue::HiddenVisibility);
- I->setLinkage(GlobalValue::ExternalLinkage);
+
+ if (Local || Delete)
+ I->setLinkage(GlobalValue::ExternalLinkage);
+
+ if (Delete)
+ I->setInitializer(0);
}
// Visit the Functions.
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
- if (deleteStuff == (bool)Named.count(I) && !I->isDeclaration()) {
- I->deleteBody();
- } else {
+ bool Delete =
+ deleteStuff == (bool)Named.count(I) && !I->isDeclaration();
+ if (!Delete) {
if (I->hasAvailableExternallyLinkage())
continue;
}
- if (I->hasLocalLinkage())
+ bool Local = I->hasLocalLinkage();
+ if (Local)
I->setVisibility(GlobalValue::HiddenVisibility);
- I->setLinkage(GlobalValue::ExternalLinkage);
+
+ if (Local || Delete)
+ I->setLinkage(GlobalValue::ExternalLinkage);
+
+ if (Delete)
+ I->deleteBody();
+ }
+
+ // Visit the Aliases.
+ for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
+ I != E;) {
+ Module::alias_iterator CurI = I;
+ ++I;
+
+ if (CurI->hasLocalLinkage()) {
+ CurI->setVisibility(GlobalValue::HiddenVisibility);
+ CurI->setLinkage(GlobalValue::ExternalLinkage);
+ }
+
+ if (deleteStuff == (bool)Named.count(CurI)) {
+ Type *Ty = CurI->getType()->getElementType();
+
+ CurI->removeFromParent();
+ llvm::Value *Declaration;
+ if (FunctionType *FTy = dyn_cast<FunctionType>(Ty)) {
+ Declaration = Function::Create(FTy, GlobalValue::ExternalLinkage,
+ CurI->getName(), &M);
+
+ } else {
+ Declaration =
+ new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage,
+ 0, CurI->getName());
+
+ }
+ CurI->replaceAllUsesWith(Declaration);
+ delete CurI;
+ }
}
return true;
diff --git a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index f3f6228..18409f7 100644
--- a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -28,9 +28,9 @@
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/ADT/SCCIterator.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/UniqueVector.h"
#include "llvm/Support/InstIterator.h"
using namespace llvm;
@@ -212,10 +212,17 @@ bool FunctionAttrs::AddReadAttrs(const CallGraphSCC &SCC) {
MadeChange = true;
// Clear out any existing attributes.
- F->removeAttribute(~0, Attribute::ReadOnly | Attribute::ReadNone);
+ AttrBuilder B;
+ B.addAttribute(Attributes::ReadOnly)
+ .addAttribute(Attributes::ReadNone);
+ F->removeAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(F->getContext(), B));
// Add in the new attribute.
- F->addAttribute(~0, ReadsMemory? Attribute::ReadOnly : Attribute::ReadNone);
+ B.clear();
+ B.addAttribute(ReadsMemory ? Attributes::ReadOnly : Attributes::ReadNone);
+ F->addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(F->getContext(), B));
if (ReadsMemory)
++NumReadOnly;
@@ -276,8 +283,6 @@ namespace {
void tooManyUses() { Captured = true; }
- bool shouldExplore(Use *U) { return true; }
-
bool captured(Use *U) {
CallSite CS(U->getUser());
if (!CS.getInstruction()) { Captured = true; return true; }
@@ -352,6 +357,9 @@ bool FunctionAttrs::AddNoCaptureAttrs(const CallGraphSCC &SCC) {
ArgumentGraph AG;
+ AttrBuilder B;
+ B.addAttribute(Attributes::NoCapture);
+
// Check each function in turn, determining which pointer arguments are not
// captured.
for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
@@ -373,7 +381,7 @@ bool FunctionAttrs::AddNoCaptureAttrs(const CallGraphSCC &SCC) {
for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end();
A != E; ++A) {
if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) {
- A->addAttr(Attribute::NoCapture);
+ A->addAttr(Attributes::get(F->getContext(), B));
++NumNoCapture;
Changed = true;
}
@@ -388,7 +396,7 @@ bool FunctionAttrs::AddNoCaptureAttrs(const CallGraphSCC &SCC) {
if (!Tracker.Captured) {
if (Tracker.Uses.empty()) {
// If it's trivially not captured, mark it nocapture now.
- A->addAttr(Attribute::NoCapture);
+ A->addAttr(Attributes::get(F->getContext(), B));
++NumNoCapture;
Changed = true;
} else {
@@ -421,7 +429,9 @@ bool FunctionAttrs::AddNoCaptureAttrs(const CallGraphSCC &SCC) {
// eg. "void f(int* x) { if (...) f(x); }"
if (ArgumentSCC[0]->Uses.size() == 1 &&
ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) {
- ArgumentSCC[0]->Definition->addAttr(Attribute::NoCapture);
+ ArgumentSCC[0]->
+ Definition->
+ addAttr(Attributes::get(ArgumentSCC[0]->Definition->getContext(), B));
++NumNoCapture;
Changed = true;
}
@@ -463,7 +473,7 @@ bool FunctionAttrs::AddNoCaptureAttrs(const CallGraphSCC &SCC) {
for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
Argument *A = ArgumentSCC[i]->Definition;
- A->addAttr(Attribute::NoCapture);
+ A->addAttr(Attributes::get(A->getContext(), B));
++NumNoCapture;
Changed = true;
}
@@ -476,13 +486,13 @@ bool FunctionAttrs::AddNoCaptureAttrs(const CallGraphSCC &SCC) {
/// or a pointer that doesn't alias any other pointer visible to the caller.
bool FunctionAttrs::IsFunctionMallocLike(Function *F,
SmallPtrSet<Function*, 8> &SCCNodes) const {
- UniqueVector<Value *> FlowsToReturn;
+ SmallSetVector<Value *, 8> FlowsToReturn;
for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I)
if (ReturnInst *Ret = dyn_cast<ReturnInst>(I->getTerminator()))
FlowsToReturn.insert(Ret->getReturnValue());
for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
- Value *RetVal = FlowsToReturn[i+1]; // UniqueVector[0] is reserved.
+ Value *RetVal = FlowsToReturn[i];
if (Constant *C = dyn_cast<Constant>(RetVal)) {
if (!C->isNullValue() && !isa<UndefValue>(C))
@@ -520,7 +530,7 @@ bool FunctionAttrs::IsFunctionMallocLike(Function *F,
case Instruction::Call:
case Instruction::Invoke: {
CallSite CS(RVI);
- if (CS.paramHasAttr(0, Attribute::NoAlias))
+ if (CS.paramHasAttr(0, Attributes::NoAlias))
break;
if (CS.getCalledFunction() &&
SCCNodes.count(CS.getCalledFunction()))
diff --git a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index 6d950d2..591278f 100644
--- a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -25,7 +25,7 @@
#include "llvm/Pass.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/MemoryBuiltins.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
@@ -83,7 +83,7 @@ namespace {
const GlobalStatus &GS);
bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
- TargetData *TD;
+ DataLayout *TD;
TargetLibraryInfo *TLI;
};
}
@@ -225,6 +225,7 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
// Don't hack on volatile stores.
if (SI->isVolatile()) return true;
+
GS.Ordering = StrongerOrdering(GS.Ordering, SI->getOrdering());
// If this is a direct store to the global (i.e., the global is a scalar
@@ -234,6 +235,14 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(
SI->getOperand(1))) {
Value *StoredVal = SI->getOperand(0);
+
+ if (Constant *C = dyn_cast<Constant>(StoredVal)) {
+ if (C->isThreadDependent()) {
+ // The stored value changes between threads; don't track it.
+ return true;
+ }
+ }
+
if (StoredVal == GV->getInitializer()) {
if (GS.StoredType < GlobalStatus::isInitializerStored)
GS.StoredType = GlobalStatus::isInitializerStored;
@@ -346,7 +355,7 @@ static bool isLeakCheckerRoot(GlobalVariable *GV) {
/// Given a value that is stored to a global but never read, determine whether
/// it's safe to remove the store and the chain of computation that feeds the
/// store.
-static bool IsSafeComputationToRemove(Value *V) {
+static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) {
do {
if (isa<Constant>(V))
return true;
@@ -355,7 +364,7 @@ static bool IsSafeComputationToRemove(Value *V) {
if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) ||
isa<GlobalValue>(V))
return false;
- if (isAllocationFn(V))
+ if (isAllocationFn(V, TLI))
return true;
Instruction *I = cast<Instruction>(V);
@@ -376,7 +385,8 @@ static bool IsSafeComputationToRemove(Value *V) {
/// of the global and clean up any that obviously don't assign the global a
/// value that isn't dynamically allocated.
///
-static bool CleanupPointerRootUsers(GlobalVariable *GV) {
+static bool CleanupPointerRootUsers(GlobalVariable *GV,
+ const TargetLibraryInfo *TLI) {
// A brief explanation of leak checkers. The goal is to find bugs where
// pointers are forgotten, causing an accumulating growth in memory
// usage over time. The common strategy for leak checkers is to whitelist the
@@ -432,18 +442,18 @@ static bool CleanupPointerRootUsers(GlobalVariable *GV) {
C->destroyConstant();
// This could have invalidated UI, start over from scratch.
Dead.clear();
- CleanupPointerRootUsers(GV);
+ CleanupPointerRootUsers(GV, TLI);
return true;
}
}
}
for (int i = 0, e = Dead.size(); i != e; ++i) {
- if (IsSafeComputationToRemove(Dead[i].first)) {
+ if (IsSafeComputationToRemove(Dead[i].first, TLI)) {
Dead[i].second->eraseFromParent();
Instruction *I = Dead[i].first;
do {
- if (isAllocationFn(I))
+ if (isAllocationFn(I, TLI))
break;
Instruction *J = dyn_cast<Instruction>(I->getOperand(0));
if (!J)
@@ -463,7 +473,7 @@ static bool CleanupPointerRootUsers(GlobalVariable *GV) {
/// quick scan over the use list to clean up the easy and obvious cruft. This
/// returns true if it made a change.
static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
- TargetData *TD, TargetLibraryInfo *TLI) {
+ DataLayout *TD, TargetLibraryInfo *TLI) {
bool Changed = false;
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) {
User *U = *UI++;
@@ -655,7 +665,7 @@ static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
/// behavior of the program in a more fine-grained way. We have determined that
/// this transformation is safe already. We return the first global variable we
/// insert so that the caller can reprocess it.
-static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
+static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
// Make sure this global only has simple uses that we can SRA.
if (!GlobalUsersSafeToSRA(GV))
return 0;
@@ -931,7 +941,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
/// if the loaded value is dynamically null, then we know that they cannot be
/// reachable with a null optimize away the load.
static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
- TargetData *TD,
+ DataLayout *TD,
TargetLibraryInfo *TLI) {
bool Changed = false;
@@ -961,7 +971,9 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
// If we get here we could have other crazy uses that are transitively
// loaded.
assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) ||
- isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser)) &&
+ isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) ||
+ isa<BitCastInst>(GlobalUser) ||
+ isa<GetElementPtrInst>(GlobalUser)) &&
"Only expect load and stores!");
}
}
@@ -975,7 +987,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
// nor is the global.
if (AllNonStoreUsesGone) {
if (isLeakCheckerRoot(GV)) {
- Changed |= CleanupPointerRootUsers(GV);
+ Changed |= CleanupPointerRootUsers(GV, TLI);
} else {
Changed = true;
CleanupConstantGlobalUsers(GV, 0, TD, TLI);
@@ -993,7 +1005,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
/// instructions that are foldable.
static void ConstantPropUsersOf(Value *V,
- TargetData *TD, TargetLibraryInfo *TLI) {
+ DataLayout *TD, TargetLibraryInfo *TLI) {
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
if (Instruction *I = dyn_cast<Instruction>(*UI++))
if (Constant *NewC = ConstantFoldInstruction(I, TD, TLI)) {
@@ -1016,7 +1028,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
CallInst *CI,
Type *AllocTy,
ConstantInt *NElements,
- TargetData *TD,
+ DataLayout *TD,
TargetLibraryInfo *TLI) {
DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
@@ -1465,9 +1477,10 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
/// it up into multiple allocations of arrays of the fields.
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
- Value *NElems, TargetData *TD) {
+ Value *NElems, DataLayout *TD,
+ const TargetLibraryInfo *TLI) {
DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
- Type *MAT = getMallocAllocatedType(CI);
+ Type *MAT = getMallocAllocatedType(CI, TLI);
StructType *STy = cast<StructType>(MAT);
// There is guaranteed to be at least one use of the malloc (storing
@@ -1656,7 +1669,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
Type *AllocTy,
AtomicOrdering Ordering,
Module::global_iterator &GVI,
- TargetData *TD,
+ DataLayout *TD,
TargetLibraryInfo *TLI) {
if (!TD)
return false;
@@ -1688,7 +1701,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// This eliminates dynamic allocation, avoids an indirection accessing the
// data, and exposes the resultant global to further GlobalOpt.
// We cannot optimize the malloc if we cannot determine malloc array size.
- Value *NElems = getMallocArraySize(CI, TD, true);
+ Value *NElems = getMallocArraySize(CI, TD, TLI, true);
if (!NElems)
return false;
@@ -1725,7 +1738,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// If this is a fixed size array, transform the Malloc to be an alloc of
// structs. malloc [100 x struct],1 -> malloc struct, 100
- if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI))) {
+ if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
@@ -1742,7 +1755,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
CI = cast<CallInst>(Malloc);
}
- GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, true), TD);
+ GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, TLI, true),
+ TD, TLI);
return true;
}
@@ -1754,7 +1768,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
AtomicOrdering Ordering,
Module::global_iterator &GVI,
- TargetData *TD, TargetLibraryInfo *TLI) {
+ DataLayout *TD, TargetLibraryInfo *TLI) {
// Ignore no-op GEPs and bitcasts.
StoredOnceVal = StoredOnceVal->stripPointerCasts();
@@ -1771,8 +1785,8 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
// Optimize away any trapping uses of the loaded value.
if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, TD, TLI))
return true;
- } else if (CallInst *CI = extractMallocCall(StoredOnceVal)) {
- Type *MallocType = getMallocAllocatedType(CI);
+ } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) {
+ Type *MallocType = getMallocAllocatedType(CI, TLI);
if (MallocType &&
TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI,
TD, TLI))
@@ -1964,7 +1978,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
bool Changed;
if (isLeakCheckerRoot(GV)) {
// Delete any constant stores to the global.
- Changed = CleanupPointerRootUsers(GV);
+ Changed = CleanupPointerRootUsers(GV, TLI);
} else {
// Delete any stores we can find to the global. We may not be able to
// make it completely dead though.
@@ -1997,7 +2011,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
++NumMarked;
return true;
} else if (!GV->getInitializer()->getType()->isSingleValueType()) {
- if (TargetData *TD = getAnalysisIfAvailable<TargetData>())
+ if (DataLayout *TD = getAnalysisIfAvailable<DataLayout>())
if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) {
GVI = FirstNewGV; // Don't skip the newly produced globals!
return true;
@@ -2056,25 +2070,26 @@ static void ChangeCalleesToFastCall(Function *F) {
}
}
-static AttrListPtr StripNest(const AttrListPtr &Attrs) {
+static AttrListPtr StripNest(LLVMContext &C, const AttrListPtr &Attrs) {
for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) {
- if ((Attrs.getSlot(i).Attrs & Attribute::Nest) == 0)
+ if (!Attrs.getSlot(i).Attrs.hasAttribute(Attributes::Nest))
continue;
// There can be only one.
- return Attrs.removeAttr(Attrs.getSlot(i).Index, Attribute::Nest);
+ return Attrs.removeAttr(C, Attrs.getSlot(i).Index,
+ Attributes::get(C, Attributes::Nest));
}
return Attrs;
}
static void RemoveNestAttribute(Function *F) {
- F->setAttributes(StripNest(F->getAttributes()));
+ F->setAttributes(StripNest(F->getContext(), F->getAttributes()));
for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
if (isa<BlockAddress>(*UI))
continue;
CallSite User(cast<Instruction>(*UI));
- User.setAttributes(StripNest(User.getAttributes()));
+ User.setAttributes(StripNest(F->getContext(), User.getAttributes()));
}
}
@@ -2103,7 +2118,7 @@ bool GlobalOpt::OptimizeFunctions(Module &M) {
Changed = true;
}
- if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) &&
+ if (F->getAttributes().hasAttrSomewhere(Attributes::Nest) &&
!F->hasAddressTaken()) {
// The function is not used by a trampoline intrinsic, so it is safe
// to remove the 'nest' attribute.
@@ -2251,7 +2266,7 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
SmallPtrSet<Constant*, 8> &SimpleConstants,
- const TargetData *TD);
+ const DataLayout *TD);
/// isSimpleEnoughValueToCommit - Return true if the specified constant can be
@@ -2264,7 +2279,7 @@ isSimpleEnoughValueToCommit(Constant *C,
/// time.
static bool isSimpleEnoughValueToCommitHelper(Constant *C,
SmallPtrSet<Constant*, 8> &SimpleConstants,
- const TargetData *TD) {
+ const DataLayout *TD) {
// Simple integer, undef, constant aggregate zero, global addresses, etc are
// all supported.
if (C->getNumOperands() == 0 || isa<BlockAddress>(C) ||
@@ -2319,7 +2334,7 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
SmallPtrSet<Constant*, 8> &SimpleConstants,
- const TargetData *TD) {
+ const DataLayout *TD) {
// If we already checked this constant, we win.
if (!SimpleConstants.insert(C)) return true;
// Check the constant.
@@ -2450,7 +2465,7 @@ namespace {
/// Once an evaluation call fails, the evaluation object should not be reused.
class Evaluator {
public:
- Evaluator(const TargetData *TD, const TargetLibraryInfo *TLI)
+ Evaluator(const DataLayout *TD, const TargetLibraryInfo *TLI)
: TD(TD), TLI(TLI) {
ValueStack.push_back(new DenseMap<Value*, Constant*>);
}
@@ -2531,7 +2546,7 @@ private:
/// simple enough to live in a static initializer of a global.
SmallPtrSet<Constant*, 8> SimpleConstants;
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLibraryInfo *TLI;
};
@@ -2869,7 +2884,7 @@ bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
/// EvaluateStaticConstructor - Evaluate static constructors in the function, if
/// we can. Return true if we can, false otherwise.
-static bool EvaluateStaticConstructor(Function *F, const TargetData *TD,
+static bool EvaluateStaticConstructor(Function *F, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
// Call the function.
Evaluator Eval(TD, TLI);
@@ -3110,7 +3125,7 @@ bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
bool GlobalOpt::runOnModule(Module &M) {
bool Changed = false;
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
// Try to find the llvm.globalctors list.
diff --git a/contrib/llvm/lib/Transforms/IPO/IPO.cpp b/contrib/llvm/lib/Transforms/IPO/IPO.cpp
index 6233922..5d563d8 100644
--- a/contrib/llvm/lib/Transforms/IPO/IPO.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/IPO.cpp
@@ -1,4 +1,4 @@
-//===-- Scalar.cpp --------------------------------------------------------===//
+//===-- IPO.cpp -----------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -95,7 +95,10 @@ void LLVMAddIPSCCPPass(LLVMPassManagerRef PM) {
}
void LLVMAddInternalizePass(LLVMPassManagerRef PM, unsigned AllButMain) {
- unwrap(PM)->add(createInternalizePass(AllButMain != 0));
+ std::vector<const char *> Export;
+ if (AllButMain)
+ Export.push_back("main");
+ unwrap(PM)->add(createInternalizePass(Export));
}
void LLVMAddStripDeadPrototypesPass(LLVMPassManagerRef PM) {
diff --git a/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp b/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
index 664ddf6..b1c36c1 100644
--- a/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
@@ -23,7 +23,7 @@
#include "llvm/Support/CallSite.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/InlinerPass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/ADT/SmallPtrSet.h"
using namespace llvm;
@@ -65,7 +65,7 @@ Pass *llvm::createAlwaysInlinerPass(bool InsertLifetime) {
/// \brief Minimal filter to detect invalid constructs for inlining.
static bool isInlineViable(Function &F) {
- bool ReturnsTwice = F.hasFnAttr(Attribute::ReturnsTwice);
+ bool ReturnsTwice =F.getFnAttributes().hasAttribute(Attributes::ReturnsTwice);
for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
// Disallow inlining of functions which contain an indirect branch.
if (isa<IndirectBrInst>(BI->getTerminator()))
@@ -114,7 +114,7 @@ InlineCost AlwaysInliner::getInlineCost(CallSite CS) {
if (Callee->isDeclaration()) return InlineCost::getNever();
// Return never for anything not marked as always inline.
- if (!Callee->hasFnAttr(Attribute::AlwaysInline))
+ if (!Callee->getFnAttributes().hasAttribute(Attributes::AlwaysInline))
return InlineCost::getNever();
// Do some minimal analysis to preclude non-viable functions.
diff --git a/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp b/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp
index 50038d8..bf0b1f9 100644
--- a/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp
@@ -22,7 +22,7 @@
#include "llvm/Support/CallSite.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/InlinerPass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
@@ -62,7 +62,7 @@ Pass *llvm::createFunctionInliningPass(int Threshold) {
// doInitialization - Initializes the vector of functions that have been
// annotated with the noinline attribute.
bool SimpleInliner::doInitialization(CallGraph &CG) {
- CA.setTargetData(getAnalysisIfAvailable<TargetData>());
+ CA.setDataLayout(getAnalysisIfAvailable<DataLayout>());
return false;
}
diff --git a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
index 712888a..abcb25f 100644
--- a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
@@ -19,7 +19,8 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/InlineCost.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/IPO/InlinerPass.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/Local.h"
@@ -92,11 +93,11 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
// If the inlined function had a higher stack protection level than the
// calling function, then bump up the caller's stack protection level.
- if (Callee->hasFnAttr(Attribute::StackProtectReq))
- Caller->addFnAttr(Attribute::StackProtectReq);
- else if (Callee->hasFnAttr(Attribute::StackProtect) &&
- !Caller->hasFnAttr(Attribute::StackProtectReq))
- Caller->addFnAttr(Attribute::StackProtect);
+ if (Callee->getFnAttributes().hasAttribute(Attributes::StackProtectReq))
+ Caller->addFnAttr(Attributes::StackProtectReq);
+ else if (Callee->getFnAttributes().hasAttribute(Attributes::StackProtect) &&
+ !Caller->getFnAttributes().hasAttribute(Attributes::StackProtectReq))
+ Caller->addFnAttr(Attributes::StackProtect);
// Look at all of the allocas that we inlined through this call site. If we
// have already inlined other allocas through other calls into this function,
@@ -208,14 +209,15 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const {
// would decrease the threshold.
Function *Caller = CS.getCaller();
bool OptSize = Caller && !Caller->isDeclaration() &&
- Caller->hasFnAttr(Attribute::OptimizeForSize);
- if (!(InlineLimit.getNumOccurrences() > 0) && OptSize && OptSizeThreshold < thres)
+ Caller->getFnAttributes().hasAttribute(Attributes::OptimizeForSize);
+ if (!(InlineLimit.getNumOccurrences() > 0) && OptSize &&
+ OptSizeThreshold < thres)
thres = OptSizeThreshold;
// Listen to the inlinehint attribute when it would increase the threshold.
Function *Callee = CS.getCalledFunction();
bool InlineHint = Callee && !Callee->isDeclaration() &&
- Callee->hasFnAttr(Attribute::InlineHint);
+ Callee->getFnAttributes().hasAttribute(Attributes::InlineHint);
if (InlineHint && HintThreshold > thres)
thres = HintThreshold;
@@ -338,7 +340,8 @@ static bool InlineHistoryIncludes(Function *F, int InlineHistoryID,
bool Inliner::runOnSCC(CallGraphSCC &SCC) {
CallGraph &CG = getAnalysis<CallGraph>();
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
+ const TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
SmallPtrSet<Function*, 8> SCCFunctions;
DEBUG(dbgs() << "Inliner visiting SCC:");
@@ -417,7 +420,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
// just delete the call instead of trying to inline it, regardless of
// size. This happens because IPSCCP propagates the result out of the
// call and then we're left with the dead call.
- if (isInstructionTriviallyDead(CS.getInstruction())) {
+ if (isInstructionTriviallyDead(CS.getInstruction(), TLI)) {
DEBUG(dbgs() << " -> Deleting dead call: "
<< *CS.getInstruction() << "\n");
// Update the call graph by deleting the edge from Callee to Caller.
@@ -530,7 +533,8 @@ bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
// Handle the case when this function is called and we only want to care
// about always-inline functions. This is a bit of a hack to share code
// between here and the InlineAlways pass.
- if (AlwaysInlineOnly && !F->hasFnAttr(Attribute::AlwaysInline))
+ if (AlwaysInlineOnly &&
+ !F->getFnAttributes().hasAttribute(Attributes::AlwaysInline))
continue;
// If the only remaining users of the function are dead constants, remove
diff --git a/contrib/llvm/lib/Transforms/IPO/Internalize.cpp b/contrib/llvm/lib/Transforms/IPO/Internalize.cpp
index fb5869e..aa629cc 100644
--- a/contrib/llvm/lib/Transforms/IPO/Internalize.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/Internalize.cpp
@@ -7,9 +7,9 @@
//
//===----------------------------------------------------------------------===//
//
-// This pass loops over all of the functions in the input module, looking for a
-// main function. If a main function is found, all other functions and all
-// global variables with initializers are marked as internal.
+// This pass loops over all of the functions and variables in the input module.
+// If the function or variable is not in the list of external names given to
+// the pass it is marked as internal.
//
//===----------------------------------------------------------------------===//
@@ -45,12 +45,9 @@ APIList("internalize-public-api-list", cl::value_desc("list"),
namespace {
class InternalizePass : public ModulePass {
std::set<std::string> ExternalNames;
- /// If no api symbols were specified and a main function is defined,
- /// assume the main function is the only API
- bool AllButMain;
public:
static char ID; // Pass identification, replacement for typeid
- explicit InternalizePass(bool AllButMain = true);
+ explicit InternalizePass();
explicit InternalizePass(const std::vector <const char *>& exportList);
void LoadFile(const char *Filename);
virtual bool runOnModule(Module &M);
@@ -66,8 +63,8 @@ char InternalizePass::ID = 0;
INITIALIZE_PASS(InternalizePass, "internalize",
"Internalize Global Symbols", false, false)
-InternalizePass::InternalizePass(bool AllButMain)
- : ModulePass(ID), AllButMain(AllButMain){
+InternalizePass::InternalizePass()
+ : ModulePass(ID) {
initializeInternalizePassPass(*PassRegistry::getPassRegistry());
if (!APIFile.empty()) // If a filename is specified, use it.
LoadFile(APIFile.c_str());
@@ -76,7 +73,7 @@ InternalizePass::InternalizePass(bool AllButMain)
}
InternalizePass::InternalizePass(const std::vector<const char *>&exportList)
- : ModulePass(ID), AllButMain(false){
+ : ModulePass(ID){
initializeInternalizePassPass(*PassRegistry::getPassRegistry());
for(std::vector<const char *>::const_iterator itr = exportList.begin();
itr != exportList.end(); itr++) {
@@ -103,23 +100,6 @@ void InternalizePass::LoadFile(const char *Filename) {
bool InternalizePass::runOnModule(Module &M) {
CallGraph *CG = getAnalysisIfAvailable<CallGraph>();
CallGraphNode *ExternalNode = CG ? CG->getExternalCallingNode() : 0;
-
- if (ExternalNames.empty()) {
- // Return if we're not in 'all but main' mode and have no external api
- if (!AllButMain)
- return false;
- // If no list or file of symbols was specified, check to see if there is a
- // "main" symbol defined in the module. If so, use it, otherwise do not
- // internalize the module, it must be a library or something.
- //
- Function *MainFunc = M.getFunction("main");
- if (MainFunc == 0 || MainFunc->isDeclaration())
- return false; // No main found, must be a library...
-
- // Preserve main, internalize all else.
- ExternalNames.insert(MainFunc->getName());
- }
-
bool Changed = false;
// Never internalize functions which code-gen might insert.
@@ -189,8 +169,8 @@ bool InternalizePass::runOnModule(Module &M) {
return Changed;
}
-ModulePass *llvm::createInternalizePass(bool AllButMain) {
- return new InternalizePass(AllButMain);
+ModulePass *llvm::createInternalizePass() {
+ return new InternalizePass();
}
ModulePass *llvm::createInternalizePass(const std::vector <const char *> &el) {
diff --git a/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp b/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
index 9f70f66..44283dd 100644
--- a/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
@@ -63,7 +63,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include <vector>
using namespace llvm;
@@ -92,19 +92,19 @@ static unsigned profileFunction(const Function *F) {
namespace {
/// ComparableFunction - A struct that pairs together functions with a
-/// TargetData so that we can keep them together as elements in the DenseSet.
+/// DataLayout so that we can keep them together as elements in the DenseSet.
class ComparableFunction {
public:
static const ComparableFunction EmptyKey;
static const ComparableFunction TombstoneKey;
- static TargetData * const LookupOnly;
+ static DataLayout * const LookupOnly;
- ComparableFunction(Function *Func, TargetData *TD)
+ ComparableFunction(Function *Func, DataLayout *TD)
: Func(Func), Hash(profileFunction(Func)), TD(TD) {}
Function *getFunc() const { return Func; }
unsigned getHash() const { return Hash; }
- TargetData *getTD() const { return TD; }
+ DataLayout *getTD() const { return TD; }
// Drops AssertingVH reference to the function. Outside of debug mode, this
// does nothing.
@@ -120,13 +120,13 @@ private:
AssertingVH<Function> Func;
unsigned Hash;
- TargetData *TD;
+ DataLayout *TD;
};
const ComparableFunction ComparableFunction::EmptyKey = ComparableFunction(0);
const ComparableFunction ComparableFunction::TombstoneKey =
ComparableFunction(1);
-TargetData *const ComparableFunction::LookupOnly = (TargetData*)(-1);
+DataLayout *const ComparableFunction::LookupOnly = (DataLayout*)(-1);
}
@@ -150,12 +150,12 @@ namespace llvm {
namespace {
/// FunctionComparator - Compares two functions to determine whether or not
-/// they will generate machine code with the same behaviour. TargetData is
+/// they will generate machine code with the same behaviour. DataLayout is
/// used if available. The comparator always fails conservatively (erring on the
/// side of claiming that two functions are different).
class FunctionComparator {
public:
- FunctionComparator(const TargetData *TD, const Function *F1,
+ FunctionComparator(const DataLayout *TD, const Function *F1,
const Function *F2)
: F1(F1), F2(F2), TD(TD) {}
@@ -190,7 +190,7 @@ private:
// The two functions undergoing comparison.
const Function *F1, *F2;
- const TargetData *TD;
+ const DataLayout *TD;
DenseMap<const Value *, const Value *> id_map;
DenseSet<const Value *> seen_values;
@@ -591,8 +591,8 @@ private:
/// to modify it.
FnSetType FnSet;
- /// TargetData for more accurate GEP comparisons. May be NULL.
- TargetData *TD;
+ /// DataLayout for more accurate GEP comparisons. May be NULL.
+ DataLayout *TD;
/// Whether or not the target supports global aliases.
bool HasGlobalAliases;
@@ -609,7 +609,7 @@ ModulePass *llvm::createMergeFunctionsPass() {
bool MergeFunctions::runOnModule(Module &M) {
bool Changed = false;
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
diff --git a/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp b/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
index 43b4ab5..05253fc 100644
--- a/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -33,13 +33,21 @@
using namespace llvm;
static cl::opt<bool>
-RunVectorization("vectorize", cl::desc("Run vectorization passes"));
+RunLoopVectorization("vectorize-loops",
+ cl::desc("Run the Loop vectorization passes"));
+
+static cl::opt<bool>
+RunBBVectorization("vectorize", cl::desc("Run the BB vectorization passes"));
static cl::opt<bool>
UseGVNAfterVectorization("use-gvn-after-vectorization",
cl::init(false), cl::Hidden,
cl::desc("Run GVN instead of Early CSE after vectorization passes"));
+static cl::opt<bool> UseNewSROA("use-new-sroa",
+ cl::init(true), cl::Hidden,
+ cl::desc("Enable the new, experimental SROA pass"));
+
PassManagerBuilder::PassManagerBuilder() {
OptLevel = 2;
SizeLevel = 0;
@@ -48,7 +56,8 @@ PassManagerBuilder::PassManagerBuilder() {
DisableSimplifyLibCalls = false;
DisableUnitAtATime = false;
DisableUnrollLoops = false;
- Vectorize = RunVectorization;
+ Vectorize = RunBBVectorization;
+ LoopVectorize = RunLoopVectorization;
}
PassManagerBuilder::~PassManagerBuilder() {
@@ -100,7 +109,10 @@ void PassManagerBuilder::populateFunctionPassManager(FunctionPassManager &FPM) {
addInitialAliasAnalysisPasses(FPM);
FPM.add(createCFGSimplificationPass());
- FPM.add(createScalarReplAggregatesPass());
+ if (UseNewSROA)
+ FPM.add(createSROAPass());
+ else
+ FPM.add(createScalarReplAggregatesPass());
FPM.add(createEarlyCSEPass());
FPM.add(createLowerExpectIntrinsicPass());
}
@@ -112,6 +124,14 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
MPM.add(Inliner);
Inliner = 0;
}
+
+ // FIXME: This is a HACK! The inliner pass above implicitly creates a CGSCC
+ // pass manager, but we don't want to add extensions into that pass manager.
+ // To prevent this we must insert a no-op module pass to reset the pass
+ // manager to get the same behavior as EP_OptimizerLast in non-O0 builds.
+ if (!GlobalExtensions->empty() || !Extensions.empty())
+ MPM.add(createBarrierNoopPass());
+
addExtensionsToPM(EP_EnabledOnOptLevel0, MPM);
return;
}
@@ -147,7 +167,10 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
// Start of function pass.
// Break up aggregate allocas, using SSAUpdater.
- MPM.add(createScalarReplAggregatesPass(-1, false));
+ if (UseNewSROA)
+ MPM.add(createSROAPass(/*RequiresDomTree*/ false));
+ else
+ MPM.add(createScalarReplAggregatesPass(-1, false));
MPM.add(createEarlyCSEPass()); // Catch trivial redundancies
if (!DisableSimplifyLibCalls)
MPM.add(createSimplifyLibCallsPass()); // Library Call Optimizations
@@ -166,6 +189,12 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
MPM.add(createIndVarSimplifyPass()); // Canonicalize indvars
MPM.add(createLoopIdiomPass()); // Recognize idioms like memset.
MPM.add(createLoopDeletionPass()); // Delete dead loops
+
+ if (LoopVectorize) {
+ MPM.add(createLoopVectorizePass());
+ MPM.add(createLICMPass());
+ }
+
if (!DisableUnrollLoops)
MPM.add(createLoopUnrollPass()); // Unroll small loops
addExtensionsToPM(EP_LoopOptimizerEnd, MPM);
@@ -201,13 +230,12 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
// FIXME: We shouldn't bother with this anymore.
MPM.add(createStripDeadPrototypesPass()); // Get rid of dead prototypes
- // GlobalOpt already deletes dead functions and globals, at -O3 try a
+ // GlobalOpt already deletes dead functions and globals, at -O2 try a
// late pass of GlobalDCE. It is capable of deleting dead cycles.
- if (OptLevel > 2)
+ if (OptLevel > 1) {
MPM.add(createGlobalDCEPass()); // Remove dead fns and globals.
-
- if (OptLevel > 1)
MPM.add(createConstantMergePass()); // Merge dup global constants
+ }
}
addExtensionsToPM(EP_OptimizerLast, MPM);
}
@@ -222,8 +250,11 @@ void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM,
// Now that composite has been compiled, scan through the module, looking
// for a main function. If main is defined, mark all other functions
// internal.
- if (Internalize)
- PM.add(createInternalizePass(true));
+ if (Internalize) {
+ std::vector<const char*> E;
+ E.push_back("main");
+ PM.add(createInternalizePass(E));
+ }
// Propagate constants at call sites into the functions they call. This
// opens opportunities for globalopt (and inlining) by substituting function
@@ -265,7 +296,10 @@ void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM,
PM.add(createInstructionCombiningPass());
PM.add(createJumpThreadingPass());
// Break up allocas
- PM.add(createScalarReplAggregatesPass());
+ if (UseNewSROA)
+ PM.add(createSROAPass());
+ else
+ PM.add(createScalarReplAggregatesPass());
// Run a few AA driven optimizations here and now, to cleanup the code.
PM.add(createFunctionAttrsPass()); // Add nocapture.
diff --git a/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp b/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
index c8cc8fd..fb4ecbf 100644
--- a/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
@@ -137,16 +137,18 @@ bool PruneEH::runOnSCC(CallGraphSCC &SCC) {
// If the SCC doesn't unwind or doesn't throw, note this fact.
if (!SCCMightUnwind || !SCCMightReturn)
for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
- Attributes NewAttributes = Attribute::None;
+ AttrBuilder NewAttributes;
if (!SCCMightUnwind)
- NewAttributes |= Attribute::NoUnwind;
+ NewAttributes.addAttribute(Attributes::NoUnwind);
if (!SCCMightReturn)
- NewAttributes |= Attribute::NoReturn;
+ NewAttributes.addAttribute(Attributes::NoReturn);
Function *F = (*I)->getFunction();
const AttrListPtr &PAL = F->getAttributes();
- const AttrListPtr &NPAL = PAL.addAttr(~0, NewAttributes);
+ const AttrListPtr &NPAL = PAL.addAttr(F->getContext(), ~0,
+ Attributes::get(F->getContext(),
+ NewAttributes));
if (PAL != NPAL) {
MadeChange = true;
F->setAttributes(NPAL);
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h b/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
index 0d5ef90..7467eca 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
@@ -18,10 +18,11 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Support/InstVisitor.h"
#include "llvm/Support/TargetFolder.h"
+#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
namespace llvm {
class CallSite;
- class TargetData;
+ class DataLayout;
class TargetLibraryInfo;
class DbgDeclareInst;
class MemIntrinsic;
@@ -71,9 +72,10 @@ public:
class LLVM_LIBRARY_VISIBILITY InstCombiner
: public FunctionPass,
public InstVisitor<InstCombiner, Instruction*> {
- TargetData *TD;
+ DataLayout *TD;
TargetLibraryInfo *TLI;
bool MadeIRChange;
+ LibCallSimplifier *Simplifier;
public:
/// Worklist - All of the instructions that need to be simplified.
InstCombineWorklist Worklist;
@@ -95,7 +97,7 @@ public:
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- TargetData *getTargetData() const { return TD; }
+ DataLayout *getDataLayout() const { return TD; }
TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
@@ -218,7 +220,7 @@ private:
Type *Ty);
Instruction *visitCallSite(CallSite CS);
- Instruction *tryOptimizeCall(CallInst *CI, const TargetData *TD);
+ Instruction *tryOptimizeCall(CallInst *CI, const DataLayout *TD);
bool transformConstExprCastCall(CallSite CS);
Instruction *transformCallThroughTrampoline(CallSite CS,
IntrinsicInst *Tramp);
@@ -365,6 +367,10 @@ private:
Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned);
+
+ /// Descale - Return a value X such that Val = X * Scale, or null if none. If
+ /// the multiplication is known not to overflow then NoSignedWrap is set.
+ Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap);
};
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 99b62f8..d8257e6 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -13,7 +13,7 @@
#include "InstCombine.h"
#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/PatternMatch.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index cbe1ca4..48f2704 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -13,7 +13,7 @@
#include "InstCombine.h"
#include "llvm/Support/CallSite.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include "llvm/Transforms/Utils/Local.h"
@@ -29,6 +29,26 @@ static Type *getPromotedType(Type *Ty) {
return Ty;
}
+/// reduceToSingleValueType - Given an aggregate type which ultimately holds a
+/// single scalar element, like {{{type}}} or [1 x type], return type.
+static Type *reduceToSingleValueType(Type *T) {
+ while (!T->isSingleValueType()) {
+ if (StructType *STy = dyn_cast<StructType>(T)) {
+ if (STy->getNumElements() == 1)
+ T = STy->getElementType(0);
+ else
+ break;
+ } else if (ArrayType *ATy = dyn_cast<ArrayType>(T)) {
+ if (ATy->getNumElements() == 1)
+ T = ATy->getElementType();
+ else
+ break;
+ } else
+ break;
+ }
+
+ return T;
+}
Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD);
@@ -74,35 +94,37 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// dest address will be promotable. See if we can find a better type than the
// integer datatype.
Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
+ MDNode *CopyMD = 0;
if (StrippedDest != MI->getArgOperand(0)) {
Type *SrcETy = cast<PointerType>(StrippedDest->getType())
->getElementType();
if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
// The SrcETy might be something like {{{double}}} or [1 x double]. Rip
// down through these levels if so.
- while (!SrcETy->isSingleValueType()) {
- if (StructType *STy = dyn_cast<StructType>(SrcETy)) {
- if (STy->getNumElements() == 1)
- SrcETy = STy->getElementType(0);
- else
- break;
- } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
- if (ATy->getNumElements() == 1)
- SrcETy = ATy->getElementType();
- else
- break;
- } else
- break;
- }
+ SrcETy = reduceToSingleValueType(SrcETy);
if (SrcETy->isSingleValueType()) {
NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
+
+ // If the memcpy has metadata describing the members, see if we can
+ // get the TBAA tag describing our copy.
+ if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
+ if (M->getNumOperands() == 3 &&
+ M->getOperand(0) &&
+ isa<ConstantInt>(M->getOperand(0)) &&
+ cast<ConstantInt>(M->getOperand(0))->isNullValue() &&
+ M->getOperand(1) &&
+ isa<ConstantInt>(M->getOperand(1)) &&
+ cast<ConstantInt>(M->getOperand(1))->getValue() == Size &&
+ M->getOperand(2) &&
+ isa<MDNode>(M->getOperand(2)))
+ CopyMD = cast<MDNode>(M->getOperand(2));
+ }
}
}
}
-
// If the memcpy/memmove provides better alignment info than we can
// infer, use it.
SrcAlign = std::max(SrcAlign, CopyAlign);
@@ -112,8 +134,12 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
L->setAlignment(SrcAlign);
+ if (CopyMD)
+ L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
S->setAlignment(DstAlign);
+ if (CopyMD)
+ S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
// Set the size of the copy to 0, it will be deleted on the next iteration.
MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
@@ -168,7 +194,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
/// the heavy lifting.
///
Instruction *InstCombiner::visitCallInst(CallInst &CI) {
- if (isFreeCall(&CI))
+ if (isFreeCall(&CI, TLI))
return visitFree(CI);
// If the caller function is nounwind, mark the call as nounwind, even if the
@@ -243,7 +269,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
default: break;
case Intrinsic::objectsize: {
uint64_t Size;
- if (getObjectSize(II->getArgOperand(0), Size, TD))
+ if (getObjectSize(II->getArgOperand(0), Size, TD, TLI))
return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
return 0;
}
@@ -731,7 +757,7 @@ Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
/// passed through the varargs area, we can eliminate the use of the cast.
static bool isSafeToEliminateVarargsCast(const CallSite CS,
const CastInst * const CI,
- const TargetData * const TD,
+ const DataLayout * const TD,
const int ix) {
if (!CI->isLosslessCast())
return false;
@@ -752,49 +778,17 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS,
return true;
}
-namespace {
-class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls {
- InstCombiner *IC;
-protected:
- void replaceCall(Value *With) {
- NewInstruction = IC->ReplaceInstUsesWith(*CI, With);
- }
- bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
- if (CI->getArgOperand(SizeCIOp) == CI->getArgOperand(SizeArgOp))
- return true;
- if (ConstantInt *SizeCI =
- dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) {
- if (SizeCI->isAllOnesValue())
- return true;
- if (isString) {
- uint64_t Len = GetStringLength(CI->getArgOperand(SizeArgOp));
- // If the length is 0 we don't know how long it is and so we can't
- // remove the check.
- if (Len == 0) return false;
- return SizeCI->getZExtValue() >= Len;
- }
- if (ConstantInt *Arg = dyn_cast<ConstantInt>(
- CI->getArgOperand(SizeArgOp)))
- return SizeCI->getZExtValue() >= Arg->getZExtValue();
- }
- return false;
- }
-public:
- InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { }
- Instruction *NewInstruction;
-};
-} // end anonymous namespace
-
// Try to fold some different type of calls here.
// Currently we're only working with the checking functions, memcpy_chk,
// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
// strcat_chk and strncat_chk.
-Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
+Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *TD) {
if (CI->getCalledFunction() == 0) return 0;
- InstCombineFortifiedLibCalls Simplifier(this);
- Simplifier.fold(CI, TD, TLI);
- return Simplifier.NewInstruction;
+ if (Value *With = Simplifier->optimizeCall(CI))
+ return ReplaceInstUsesWith(*CI, With);
+
+ return 0;
}
static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
@@ -877,7 +871,7 @@ static IntrinsicInst *FindInitTrampoline(Value *Callee) {
// visitCallSite - Improvements for call and invoke instructions.
//
Instruction *InstCombiner::visitCallSite(CallSite CS) {
- if (isAllocLikeFn(CS.getInstruction()))
+ if (isAllocLikeFn(CS.getInstruction(), TLI))
return visitAllocSite(*CS.getInstruction());
bool Changed = false;
@@ -961,7 +955,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
Changed = true;
}
- // Try to optimize the call if possible, we require TargetData for most of
+ // Try to optimize the call if possible, we require DataLayout for most of
// this. None of these calls are seen as possibly dead so go ahead and
// delete the instruction now.
if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
@@ -1013,8 +1007,8 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
return false; // Cannot transform this return value.
if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
- Attributes RAttrs = CallerPAL.getRetAttributes();
- if (RAttrs & Attribute::typeIncompatible(NewRetTy))
+ AttrBuilder RAttrs = CallerPAL.getRetAttributes();
+ if (RAttrs.hasAttributes(Attributes::typeIncompatible(NewRetTy)))
return false; // Attribute not compatible with transformed value.
}
@@ -1044,12 +1038,13 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
return false; // Cannot transform this parameter value.
Attributes Attrs = CallerPAL.getParamAttributes(i + 1);
- if (Attrs & Attribute::typeIncompatible(ParamTy))
+ if (AttrBuilder(Attrs).
+ hasAttributes(Attributes::typeIncompatible(ParamTy)))
return false; // Attribute not compatible with transformed value.
// If the parameter is passed as a byval argument, then we have to have a
// sized type and the sized type has to have the same size as the old type.
- if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) {
+ if (ParamTy != ActTy && Attrs.hasAttribute(Attributes::ByVal)) {
PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)
return false;
@@ -1101,7 +1096,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
break;
Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
- if (PAttrs & Attribute::VarArgsIncompatible)
+ if (PAttrs.hasIncompatibleWithVarArgsAttrs())
return false;
}
@@ -1114,15 +1109,17 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
attrVec.reserve(NumCommonArgs);
// Get any return attributes.
- Attributes RAttrs = CallerPAL.getRetAttributes();
+ AttrBuilder RAttrs = CallerPAL.getRetAttributes();
// If the return value is not being used, the type may not be compatible
// with the existing attributes. Wipe out any problematic attributes.
- RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
+ RAttrs.removeAttributes(Attributes::typeIncompatible(NewRetTy));
// Add the new return attributes.
- if (RAttrs)
- attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
+ if (RAttrs.hasAttributes())
+ attrVec.push_back(
+ AttributeWithIndex::get(AttrListPtr::ReturnIndex,
+ Attributes::get(FT->getContext(), RAttrs)));
AI = CS.arg_begin();
for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
@@ -1136,7 +1133,8 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
}
// Add any parameter attributes.
- if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
+ Attributes PAttrs = CallerPAL.getParamAttributes(i + 1);
+ if (PAttrs.hasAttributes())
attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
}
@@ -1164,19 +1162,23 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
}
// Add any parameter attributes.
- if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
+ Attributes PAttrs = CallerPAL.getParamAttributes(i + 1);
+ if (PAttrs.hasAttributes())
attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
}
}
}
- if (Attributes FnAttrs = CallerPAL.getFnAttributes())
- attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
+ Attributes FnAttrs = CallerPAL.getFnAttributes();
+ if (FnAttrs.hasAttributes())
+ attrVec.push_back(AttributeWithIndex::get(AttrListPtr::FunctionIndex,
+ FnAttrs));
if (NewRetTy->isVoidTy())
Caller->setName(""); // Void type should not have a name.
- const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec);
+ const AttrListPtr &NewCallerPAL = AttrListPtr::get(Callee->getContext(),
+ attrVec);
Instruction *NC;
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
@@ -1240,8 +1242,9 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
// If the call already has the 'nest' attribute somewhere then give up -
// otherwise 'nest' would occur twice after splicing in the chain.
- if (Attrs.hasAttrSomewhere(Attribute::Nest))
- return 0;
+ for (unsigned I = 0, E = Attrs.getNumAttrs(); I != E; ++I)
+ if (Attrs.getAttributesAtIndex(I).hasAttribute(Attributes::Nest))
+ return 0;
assert(Tramp &&
"transformCallThroughTrampoline called with incorrect CallSite.");
@@ -1254,12 +1257,12 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
if (!NestAttrs.isEmpty()) {
unsigned NestIdx = 1;
Type *NestTy = 0;
- Attributes NestAttr = Attribute::None;
+ Attributes NestAttr;
// Look for a parameter marked with the 'nest' attribute.
for (FunctionType::param_iterator I = NestFTy->param_begin(),
E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
- if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
+ if (NestAttrs.getParamAttributes(NestIdx).hasAttribute(Attributes::Nest)){
// Record the parameter type and any other attributes.
NestTy = *I;
NestAttr = NestAttrs.getParamAttributes(NestIdx);
@@ -1278,8 +1281,10 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
// mean appending it. Likewise for attributes.
// Add any result attributes.
- if (Attributes Attr = Attrs.getRetAttributes())
- NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
+ Attributes Attr = Attrs.getRetAttributes();
+ if (Attr.hasAttributes())
+ NewAttrs.push_back(AttributeWithIndex::get(AttrListPtr::ReturnIndex,
+ Attr));
{
unsigned Idx = 1;
@@ -1299,7 +1304,8 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
// Add the original argument and attributes.
NewArgs.push_back(*I);
- if (Attributes Attr = Attrs.getParamAttributes(Idx))
+ Attr = Attrs.getParamAttributes(Idx);
+ if (Attr.hasAttributes())
NewAttrs.push_back
(AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
@@ -1308,8 +1314,10 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
}
// Add any function attributes.
- if (Attributes Attr = Attrs.getFnAttributes())
- NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
+ Attr = Attrs.getFnAttributes();
+ if (Attr.hasAttributes())
+ NewAttrs.push_back(AttributeWithIndex::get(AttrListPtr::FunctionIndex,
+ Attr));
// The trampoline may have been bitcast to a bogus type (FTy).
// Handle this by synthesizing a new function type, equal to FTy
@@ -1348,7 +1356,7 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
NestF->getType() == PointerType::getUnqual(NewFTy) ?
NestF : ConstantExpr::getBitCast(NestF,
PointerType::getUnqual(NewFTy));
- const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs);
+ const AttrListPtr &NewPAL = AttrListPtr::get(FTy->getContext(), NewAttrs);
Instruction *NewCaller;
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 555b442..bb59db8 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -13,7 +13,7 @@
#include "InstCombine.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/PatternMatch.h"
using namespace llvm;
@@ -78,7 +78,7 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
/// try to eliminate the cast by moving the type information into the alloc.
Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
AllocaInst &AI) {
- // This requires TargetData to get the alloca alignment and size information.
+ // This requires DataLayout to get the alloca alignment and size information.
if (!TD) return 0;
PointerType *PTy = cast<PointerType>(CI.getType());
@@ -229,7 +229,7 @@ isEliminableCastPair(
const CastInst *CI, ///< The first cast instruction
unsigned opcode, ///< The opcode of the second cast instruction
Type *DstTy, ///< The target type for the second cast instruction
- TargetData *TD ///< The target data for pointer size
+ DataLayout *TD ///< The target data for pointer size
) {
Type *SrcTy = CI->getOperand(0)->getType(); // A from above
@@ -238,17 +238,20 @@ isEliminableCastPair(
// Get the opcodes of the two Cast instructions
Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
Instruction::CastOps secondOp = Instruction::CastOps(opcode);
-
+ Type *SrcIntPtrTy = TD && SrcTy->isPtrOrPtrVectorTy() ?
+ TD->getIntPtrType(SrcTy) : 0;
+ Type *MidIntPtrTy = TD && MidTy->isPtrOrPtrVectorTy() ?
+ TD->getIntPtrType(MidTy) : 0;
+ Type *DstIntPtrTy = TD && DstTy->isPtrOrPtrVectorTy() ?
+ TD->getIntPtrType(DstTy) : 0;
unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
- DstTy,
- TD ? TD->getIntPtrType(CI->getContext()) : 0);
-
+ DstTy, SrcIntPtrTy, MidIntPtrTy,
+ DstIntPtrTy);
+
// We don't want to form an inttoptr or ptrtoint that converts to an integer
// type that differs from the pointer size.
- if ((Res == Instruction::IntToPtr &&
- (!TD || SrcTy != TD->getIntPtrType(CI->getContext()))) ||
- (Res == Instruction::PtrToInt &&
- (!TD || DstTy != TD->getIntPtrType(CI->getContext()))))
+ if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
+ (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
Res = 0;
return Instruction::CastOps(Res);
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index bdd310e..7c3f8fe 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -16,7 +16,8 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/PatternMatch.h"
@@ -473,7 +474,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
/// If we can't emit an optimized form for this expression, this returns null.
///
static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
- TargetData &TD = *IC.getTargetData();
+ DataLayout &TD = *IC.getDataLayout();
gep_type_iterator GTI = gep_type_begin(GEP);
// Check to see if this gep only has a single variable index. If so, and if
@@ -2355,8 +2356,25 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// Try not to increase register pressure.
BO0->hasOneUse() && BO1->hasOneUse()) {
// Determine Y and Z in the form icmp (X+Y), (X+Z).
- Value *Y = (A == C || A == D) ? B : A;
- Value *Z = (C == A || C == B) ? D : C;
+ Value *Y, *Z;
+ if (A == C) {
+ // C + B == C + D -> B == D
+ Y = B;
+ Z = D;
+ } else if (A == D) {
+ // D + B == C + D -> B == C
+ Y = B;
+ Z = C;
+ } else if (B == C) {
+ // A + C == C + D -> A == D
+ Y = A;
+ Z = D;
+ } else {
+ assert(B == D);
+ // A + D == C + D -> A == C
+ Y = A;
+ Z = C;
+ }
return new ICmpInst(Pred, Y, Z);
}
@@ -2894,10 +2912,6 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
if (!RHSF)
break;
- // We can't convert a PPC double double.
- if (RHSF->getType()->isPPC_FP128Ty())
- break;
-
const fltSemantics *Sem;
// FIXME: This shouldn't be here.
if (LHSExt->getSrcTy()->isHalfTy())
@@ -2910,6 +2924,8 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
Sem = &APFloat::IEEEquad;
else if (LHSExt->getSrcTy()->isX86_FP80Ty())
Sem = &APFloat::x87DoubleExtended;
+ else if (LHSExt->getSrcTy()->isPPC_FP128Ty())
+ Sem = &APFloat::PPCDoubleDouble;
else
break;
@@ -2985,6 +3001,44 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
return Res;
}
break;
+ case Instruction::Call: {
+ CallInst *CI = cast<CallInst>(LHSI);
+ LibFunc::Func Func;
+ // Various optimization for fabs compared with zero.
+ if (RHSC->isNullValue() && CI->getCalledFunction() &&
+ TLI->getLibFunc(CI->getCalledFunction()->getName(), Func) &&
+ TLI->has(Func)) {
+ if (Func == LibFunc::fabs || Func == LibFunc::fabsf ||
+ Func == LibFunc::fabsl) {
+ switch (I.getPredicate()) {
+ default: break;
+ // fabs(x) < 0 --> false
+ case FCmpInst::FCMP_OLT:
+ return ReplaceInstUsesWith(I, Builder->getFalse());
+ // fabs(x) > 0 --> x != 0
+ case FCmpInst::FCMP_OGT:
+ return new FCmpInst(FCmpInst::FCMP_ONE, CI->getArgOperand(0),
+ RHSC);
+ // fabs(x) <= 0 --> x == 0
+ case FCmpInst::FCMP_OLE:
+ return new FCmpInst(FCmpInst::FCMP_OEQ, CI->getArgOperand(0),
+ RHSC);
+ // fabs(x) >= 0 --> !isnan(x)
+ case FCmpInst::FCMP_OGE:
+ return new FCmpInst(FCmpInst::FCMP_ORD, CI->getArgOperand(0),
+ RHSC);
+ // fabs(x) == 0 --> x == 0
+ // fabs(x) != 0 --> x != 0
+ case FCmpInst::FCMP_OEQ:
+ case FCmpInst::FCMP_UEQ:
+ case FCmpInst::FCMP_ONE:
+ case FCmpInst::FCMP_UNE:
+ return new FCmpInst(I.getPredicate(), CI->getArgOperand(0),
+ RHSC);
+ }
+ }
+ }
+ }
}
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index c485844..4d106fc 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -14,13 +14,161 @@
#include "InstCombine.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Analysis/Loads.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/Statistic.h"
using namespace llvm;
-STATISTIC(NumDeadStore, "Number of dead stores eliminated");
+STATISTIC(NumDeadStore, "Number of dead stores eliminated");
+STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
+
+/// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
+/// some part of a constant global variable. This intentionally only accepts
+/// constant expressions because we can't rewrite arbitrary instructions.
+static bool pointsToConstantGlobal(Value *V) {
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
+ return GV->isConstant();
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+ if (CE->getOpcode() == Instruction::BitCast ||
+ CE->getOpcode() == Instruction::GetElementPtr)
+ return pointsToConstantGlobal(CE->getOperand(0));
+ return false;
+}
+
+/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
+/// pointer to an alloca. Ignore any reads of the pointer, return false if we
+/// see any stores or other unknown uses. If we see pointer arithmetic, keep
+/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
+/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
+/// the alloca, and if the source pointer is a pointer to a constant global, we
+/// can optimize this.
+static bool
+isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
+ SmallVectorImpl<Instruction *> &ToDelete,
+ bool IsOffset = false) {
+ // We track lifetime intrinsics as we encounter them. If we decide to go
+ // ahead and replace the value with the global, this lets the caller quickly
+ // eliminate the markers.
+
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
+ User *U = cast<Instruction>(*UI);
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
+ // Ignore non-volatile loads, they are always ok.
+ if (!LI->isSimple()) return false;
+ continue;
+ }
+
+ if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
+ // If uses of the bitcast are ok, we are ok.
+ if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, ToDelete, IsOffset))
+ return false;
+ continue;
+ }
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
+ // If the GEP has all zero indices, it doesn't offset the pointer. If it
+ // doesn't, it does.
+ if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, ToDelete,
+ IsOffset || !GEP->hasAllZeroIndices()))
+ return false;
+ continue;
+ }
+
+ if (CallSite CS = U) {
+ // If this is the function being called then we treat it like a load and
+ // ignore it.
+ if (CS.isCallee(UI))
+ continue;
+
+ // If this is a readonly/readnone call site, then we know it is just a
+ // load (but one that potentially returns the value itself), so we can
+ // ignore it if we know that the value isn't captured.
+ unsigned ArgNo = CS.getArgumentNo(UI);
+ if (CS.onlyReadsMemory() &&
+ (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
+ continue;
+
+ // If this is being passed as a byval argument, the caller is making a
+ // copy, so it is only a read of the alloca.
+ if (CS.isByValArgument(ArgNo))
+ continue;
+ }
+
+ // Lifetime intrinsics can be handled by the caller.
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
+ if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
+ II->getIntrinsicID() == Intrinsic::lifetime_end) {
+ assert(II->use_empty() && "Lifetime markers have no result to use!");
+ ToDelete.push_back(II);
+ continue;
+ }
+ }
+
+ // If this is isn't our memcpy/memmove, reject it as something we can't
+ // handle.
+ MemTransferInst *MI = dyn_cast<MemTransferInst>(U);
+ if (MI == 0)
+ return false;
+
+ // If the transfer is using the alloca as a source of the transfer, then
+ // ignore it since it is a load (unless the transfer is volatile).
+ if (UI.getOperandNo() == 1) {
+ if (MI->isVolatile()) return false;
+ continue;
+ }
+
+ // If we already have seen a copy, reject the second one.
+ if (TheCopy) return false;
+
+ // If the pointer has been offset from the start of the alloca, we can't
+ // safely handle this.
+ if (IsOffset) return false;
+
+ // If the memintrinsic isn't using the alloca as the dest, reject it.
+ if (UI.getOperandNo() != 0) return false;
+
+ // If the source of the memcpy/move is not a constant global, reject it.
+ if (!pointsToConstantGlobal(MI->getSource()))
+ return false;
+
+ // Otherwise, the transform is safe. Remember the copy instruction.
+ TheCopy = MI;
+ }
+ return true;
+}
+
+/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
+/// modified by a copy from a constant global. If we can prove this, we can
+/// replace any uses of the alloca with uses of the global directly.
+static MemTransferInst *
+isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
+ SmallVectorImpl<Instruction *> &ToDelete) {
+ MemTransferInst *TheCopy = 0;
+ if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
+ return TheCopy;
+ return 0;
+}
+
+/// getPointeeAlignment - Compute the minimum alignment of the value pointed
+/// to by the given pointer.
+static unsigned getPointeeAlignment(Value *V, const DataLayout &TD) {
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+ if (CE->getOpcode() == Instruction::BitCast ||
+ (CE->getOpcode() == Instruction::GetElementPtr &&
+ cast<GEPOperator>(CE)->hasAllZeroIndices()))
+ return getPointeeAlignment(CE->getOperand(0), TD);
+
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
+ if (!GV->isDeclaration())
+ return TD.getPreferredAlignment(GV);
+
+ if (PointerType *PT = dyn_cast<PointerType>(V->getType()))
+ if (PT->getElementType()->isSized())
+ return TD.getABITypeAlignment(PT->getElementType());
+
+ return 0;
+}
Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Ensure that the alloca array size argument has type intptr_t, so that
@@ -99,12 +247,16 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
return &AI;
}
+ // If the alignment of the entry block alloca is 0 (unspecified),
+ // assign it the preferred alignment.
+ if (EntryAI->getAlignment() == 0)
+ EntryAI->setAlignment(
+ TD->getPrefTypeAlignment(EntryAI->getAllocatedType()));
// Replace this zero-sized alloca with the one at the start of the entry
// block after ensuring that the address will be aligned enough for both
// types.
- unsigned MaxAlign =
- std::max(TD->getPrefTypeAlignment(EntryAI->getAllocatedType()),
- TD->getPrefTypeAlignment(AI.getAllocatedType()));
+ unsigned MaxAlign = std::max(EntryAI->getAlignment(),
+ AI.getAlignment());
EntryAI->setAlignment(MaxAlign);
if (AI.getType() != EntryAI->getType())
return new BitCastInst(EntryAI, AI.getType());
@@ -113,6 +265,31 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
}
}
+ if (TD) {
+ // Check to see if this allocation is only modified by a memcpy/memmove from
+ // a constant global whose alignment is equal to or exceeds that of the
+ // allocation. If this is the case, we can change all users to use
+ // the constant global instead. This is commonly produced by the CFE by
+ // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
+ // is only subsequently read.
+ SmallVector<Instruction *, 4> ToDelete;
+ if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
+ if (AI.getAlignment() <= getPointeeAlignment(Copy->getSource(), *TD)) {
+ DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
+ DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
+ for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
+ EraseInstFromFunction(*ToDelete[i]);
+ Constant *TheSrc = cast<Constant>(Copy->getSource());
+ Instruction *NewI
+ = ReplaceInstUsesWith(AI, ConstantExpr::getBitCast(TheSrc,
+ AI.getType()));
+ EraseInstFromFunction(*Copy);
+ ++NumGlobalCopies;
+ return NewI;
+ }
+ }
+ }
+
// At last, use the generic allocation site handler to aggressively remove
// unused allocas.
return visitAllocSite(AI);
@@ -121,7 +298,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
/// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
- const TargetData *TD) {
+ const DataLayout *TD) {
User *CI = cast<User>(LI.getOperand(0));
Value *CastOp = CI->getOperand(0);
@@ -151,14 +328,14 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
SrcPTy = SrcTy->getElementType();
}
- if (IC.getTargetData() &&
+ if (IC.getDataLayout() &&
(SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
SrcPTy->isVectorTy()) &&
// Do not allow turning this into a load of an integer, which is then
// casted to a pointer, this pessimizes pointer analysis a lot.
(SrcPTy->isPointerTy() == LI.getType()->isPointerTy()) &&
- IC.getTargetData()->getTypeSizeInBits(SrcPTy) ==
- IC.getTargetData()->getTypeSizeInBits(DestPTy)) {
+ IC.getDataLayout()->getTypeSizeInBits(SrcPTy) ==
+ IC.getDataLayout()->getTypeSizeInBits(DestPTy)) {
// Okay, we are casting from one integer or pointer type to another of
// the same size. Instead of casting the pointer before the load, cast
@@ -336,11 +513,11 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
// If the pointers point into different address spaces or if they point to
// values with different sizes, we can't do the transformation.
- if (!IC.getTargetData() ||
+ if (!IC.getDataLayout() ||
SrcTy->getAddressSpace() !=
cast<PointerType>(CI->getType())->getAddressSpace() ||
- IC.getTargetData()->getTypeSizeInBits(SrcPTy) !=
- IC.getTargetData()->getTypeSizeInBits(DestPTy))
+ IC.getDataLayout()->getTypeSizeInBits(SrcPTy) !=
+ IC.getDataLayout()->getTypeSizeInBits(DestPTy))
return 0;
// Okay, we are casting from one integer or pointer type to another of
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 35a0bbb..cefe45e 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -37,7 +37,7 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC) {
if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(PowerOf2), m_Value(A))),
m_Value(B))) &&
// The "1" can be any value known to be a power of 2.
- isPowerOfTwo(PowerOf2, IC.getTargetData())) {
+ isPowerOfTwo(PowerOf2, IC.getDataLayout())) {
A = IC.Builder->CreateSub(A, B);
return IC.Builder->CreateShl(PowerOf2, A);
}
@@ -46,7 +46,7 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC) {
// inexact. Similarly for <<.
if (BinaryOperator *I = dyn_cast<BinaryOperator>(V))
if (I->isLogicalShift() &&
- isPowerOfTwo(I->getOperand(0), IC.getTargetData())) {
+ isPowerOfTwo(I->getOperand(0), IC.getDataLayout())) {
// We know that this is an exact/nuw shift and that the input is a
// non-zero context as well.
if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC)) {
@@ -462,12 +462,23 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
}
}
+ // (x lshr C1) udiv C2 --> x udiv (C2 << C1)
+ if (ConstantInt *C2 = dyn_cast<ConstantInt>(Op1)) {
+ Value *X;
+ ConstantInt *C1;
+ if (match(Op0, m_LShr(m_Value(X), m_ConstantInt(C1)))) {
+ APInt NC = C2->getValue().shl(C1->getLimitedValue(C1->getBitWidth()-1));
+ return BinaryOperator::CreateUDiv(X, Builder->getInt(NC));
+ }
+ }
+
// X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
{ const APInt *CI; Value *N;
if (match(Op1, m_Shl(m_Power2(CI), m_Value(N))) ||
match(Op1, m_ZExt(m_Shl(m_Power2(CI), m_Value(N))))) {
if (*CI != 1)
- N = Builder->CreateAdd(N, ConstantInt::get(I.getType(),CI->logBase2()));
+ N = Builder->CreateAdd(N,
+ ConstantInt::get(N->getType(), CI->logBase2()));
if (ZExtInst *Z = dyn_cast<ZExtInst>(Op1))
N = Builder->CreateZExt(N, Z->getDestTy());
if (I.isExact())
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
index 664546c..de9c77e 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -13,7 +13,7 @@
#include "InstCombine.h"
#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/STLExtras.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 291e800..a2d4c88 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -287,7 +287,7 @@ Instruction *InstCombiner::FoldSelectIntoOp(SelectInst &SI, Value *TrueVal,
/// SimplifyWithOpReplaced - See if V simplifies when its operand Op is
/// replaced with RepOp.
static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI) {
// Trivial replacement.
if (V == Op)
@@ -333,6 +333,10 @@ static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
// All operands were constants, fold it.
if (ConstOps.size() == I->getNumOperands()) {
+ if (CmpInst *C = dyn_cast<CmpInst>(I))
+ return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
+ ConstOps[1], TD, TLI);
+
if (LoadInst *LI = dyn_cast<LoadInst>(I))
if (!LI->isVolatile())
return ConstantFoldLoadFromConstPtr(ConstOps[0], TD);
@@ -903,7 +907,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
return &SI;
}
- if (VectorType* VecTy = dyn_cast<VectorType>(SI.getType())) {
+ if (VectorType *VecTy = dyn_cast<VectorType>(SI.getType())) {
unsigned VWidth = VecTy->getNumElements();
APInt UndefElts(VWidth, 0);
APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
@@ -912,6 +916,28 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
return ReplaceInstUsesWith(SI, V);
return &SI;
}
+
+ if (ConstantVector *CV = dyn_cast<ConstantVector>(CondVal)) {
+ // Form a shufflevector instruction.
+ SmallVector<Constant *, 8> Mask(VWidth);
+ Type *Int32Ty = Type::getInt32Ty(CV->getContext());
+ for (unsigned i = 0; i != VWidth; ++i) {
+ Constant *Elem = cast<Constant>(CV->getOperand(i));
+ if (ConstantInt *E = dyn_cast<ConstantInt>(Elem))
+ Mask[i] = ConstantInt::get(Int32Ty, i + (E->isZero() ? VWidth : 0));
+ else if (isa<UndefValue>(Elem))
+ Mask[i] = UndefValue::get(Int32Ty);
+ else
+ return 0;
+ }
+ Constant *MaskVal = ConstantVector::get(Mask);
+ Value *V = Builder->CreateShuffleVector(TrueVal, FalseVal, MaskVal);
+ return ReplaceInstUsesWith(SI, V);
+ }
+
+ if (isa<ConstantAggregateZero>(CondVal)) {
+ return ReplaceInstUsesWith(SI, FalseVal);
+ }
}
return 0;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 4bb2403..57021f1 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -190,7 +190,7 @@ static Value *GetShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
V = IC.Builder->CreateLShr(C, NumBits);
// If we got a constantexpr back, try to simplify it with TD info.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
- V = ConstantFoldConstantExpression(CE, IC.getTargetData(),
+ V = ConstantFoldConstantExpression(CE, IC.getDataLayout(),
IC.getTargetLibraryInfo());
return V;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 54be8ed..602b203 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -14,7 +14,7 @@
#include "InstCombine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/IntrinsicInst.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index cf60f0f..dd7ea14 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -636,8 +636,11 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
// If LHS's width is changed, shift the mask value accordingly.
// If newRHS == NULL, i.e. LHSOp0 == RHSOp0, we want to remap any
- // references to RHSOp0 to LHSOp0, so we don't need to shift the mask.
- if (eltMask >= 0 && newRHS != NULL)
+ // references from RHSOp0 to LHSOp0, so we don't need to shift the mask.
+ // If newRHS == newLHS, we want to remap any references from newRHS to
+ // newLHS so that we can properly identify splats that may occur due to
+ // obfuscation accross the two vectors.
+ if (eltMask >= 0 && newRHS != NULL && newLHS != newRHS)
eltMask += newLHSWidth;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h b/contrib/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h
index 99a02fc..ea654ae 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h
@@ -26,8 +26,8 @@ class LLVM_LIBRARY_VISIBILITY InstCombineWorklist {
SmallVector<Instruction*, 256> Worklist;
DenseMap<Instruction*, unsigned> WorklistMap;
- void operator=(const InstCombineWorklist&RHS); // DO NOT IMPLEMENT
- InstCombineWorklist(const InstCombineWorklist&); // DO NOT IMPLEMENT
+ void operator=(const InstCombineWorklist&RHS) LLVM_DELETED_FUNCTION;
+ InstCombineWorklist(const InstCombineWorklist&) LLVM_DELETED_FUNCTION;
public:
InstCombineWorklist() {}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 68ecd51..9a46f25 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -40,7 +40,7 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/CFG.h"
@@ -88,7 +88,7 @@ void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
Value *InstCombiner::EmitGEPOffset(User *GEP) {
- return llvm::EmitGEPOffset(Builder, *getTargetData(), GEP);
+ return llvm::EmitGEPOffset(Builder, *getDataLayout(), GEP);
}
/// ShouldChangeType - Return true if it is desirable to convert a computation
@@ -805,6 +805,244 @@ static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
return true;
}
+/// Descale - Return a value X such that Val = X * Scale, or null if none. If
+/// the multiplication is known not to overflow then NoSignedWrap is set.
+Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
+ assert(isa<IntegerType>(Val->getType()) && "Can only descale integers!");
+ assert(cast<IntegerType>(Val->getType())->getBitWidth() ==
+ Scale.getBitWidth() && "Scale not compatible with value!");
+
+ // If Val is zero or Scale is one then Val = Val * Scale.
+ if (match(Val, m_Zero()) || Scale == 1) {
+ NoSignedWrap = true;
+ return Val;
+ }
+
+ // If Scale is zero then it does not divide Val.
+ if (Scale.isMinValue())
+ return 0;
+
+ // Look through chains of multiplications, searching for a constant that is
+ // divisible by Scale. For example, descaling X*(Y*(Z*4)) by a factor of 4
+ // will find the constant factor 4 and produce X*(Y*Z). Descaling X*(Y*8) by
+ // a factor of 4 will produce X*(Y*2). The principle of operation is to bore
+ // down from Val:
+ //
+ // Val = M1 * X || Analysis starts here and works down
+ // M1 = M2 * Y || Doesn't descend into terms with more
+ // M2 = Z * 4 \/ than one use
+ //
+ // Then to modify a term at the bottom:
+ //
+ // Val = M1 * X
+ // M1 = Z * Y || Replaced M2 with Z
+ //
+ // Then to work back up correcting nsw flags.
+
+ // Op - the term we are currently analyzing. Starts at Val then drills down.
+ // Replaced with its descaled value before exiting from the drill down loop.
+ Value *Op = Val;
+
+ // Parent - initially null, but after drilling down notes where Op came from.
+ // In the example above, Parent is (Val, 0) when Op is M1, because M1 is the
+ // 0'th operand of Val.
+ std::pair<Instruction*, unsigned> Parent;
+
+ // RequireNoSignedWrap - Set if the transform requires a descaling at deeper
+ // levels that doesn't overflow.
+ bool RequireNoSignedWrap = false;
+
+ // logScale - log base 2 of the scale. Negative if not a power of 2.
+ int32_t logScale = Scale.exactLogBase2();
+
+ for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down
+
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
+ // If Op is a constant divisible by Scale then descale to the quotient.
+ APInt Quotient(Scale), Remainder(Scale); // Init ensures right bitwidth.
+ APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder);
+ if (!Remainder.isMinValue())
+ // Not divisible by Scale.
+ return 0;
+ // Replace with the quotient in the parent.
+ Op = ConstantInt::get(CI->getType(), Quotient);
+ NoSignedWrap = true;
+ break;
+ }
+
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) {
+
+ if (BO->getOpcode() == Instruction::Mul) {
+ // Multiplication.
+ NoSignedWrap = BO->hasNoSignedWrap();
+ if (RequireNoSignedWrap && !NoSignedWrap)
+ return 0;
+
+ // There are three cases for multiplication: multiplication by exactly
+ // the scale, multiplication by a constant different to the scale, and
+ // multiplication by something else.
+ Value *LHS = BO->getOperand(0);
+ Value *RHS = BO->getOperand(1);
+
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
+ // Multiplication by a constant.
+ if (CI->getValue() == Scale) {
+ // Multiplication by exactly the scale, replace the multiplication
+ // by its left-hand side in the parent.
+ Op = LHS;
+ break;
+ }
+
+ // Otherwise drill down into the constant.
+ if (!Op->hasOneUse())
+ return 0;
+
+ Parent = std::make_pair(BO, 1);
+ continue;
+ }
+
+ // Multiplication by something else. Drill down into the left-hand side
+ // since that's where the reassociate pass puts the good stuff.
+ if (!Op->hasOneUse())
+ return 0;
+
+ Parent = std::make_pair(BO, 0);
+ continue;
+ }
+
+ if (logScale > 0 && BO->getOpcode() == Instruction::Shl &&
+ isa<ConstantInt>(BO->getOperand(1))) {
+ // Multiplication by a power of 2.
+ NoSignedWrap = BO->hasNoSignedWrap();
+ if (RequireNoSignedWrap && !NoSignedWrap)
+ return 0;
+
+ Value *LHS = BO->getOperand(0);
+ int32_t Amt = cast<ConstantInt>(BO->getOperand(1))->
+ getLimitedValue(Scale.getBitWidth());
+ // Op = LHS << Amt.
+
+ if (Amt == logScale) {
+ // Multiplication by exactly the scale, replace the multiplication
+ // by its left-hand side in the parent.
+ Op = LHS;
+ break;
+ }
+ if (Amt < logScale || !Op->hasOneUse())
+ return 0;
+
+ // Multiplication by more than the scale. Reduce the multiplying amount
+ // by the scale in the parent.
+ Parent = std::make_pair(BO, 1);
+ Op = ConstantInt::get(BO->getType(), Amt - logScale);
+ break;
+ }
+ }
+
+ if (!Op->hasOneUse())
+ return 0;
+
+ if (CastInst *Cast = dyn_cast<CastInst>(Op)) {
+ if (Cast->getOpcode() == Instruction::SExt) {
+ // Op is sign-extended from a smaller type, descale in the smaller type.
+ unsigned SmallSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
+ APInt SmallScale = Scale.trunc(SmallSize);
+ // Suppose Op = sext X, and we descale X as Y * SmallScale. We want to
+ // descale Op as (sext Y) * Scale. In order to have
+ // sext (Y * SmallScale) = (sext Y) * Scale
+ // some conditions need to hold however: SmallScale must sign-extend to
+ // Scale and the multiplication Y * SmallScale should not overflow.
+ if (SmallScale.sext(Scale.getBitWidth()) != Scale)
+ // SmallScale does not sign-extend to Scale.
+ return 0;
+ assert(SmallScale.exactLogBase2() == logScale);
+ // Require that Y * SmallScale must not overflow.
+ RequireNoSignedWrap = true;
+
+ // Drill down through the cast.
+ Parent = std::make_pair(Cast, 0);
+ Scale = SmallScale;
+ continue;
+ }
+
+ if (Cast->getOpcode() == Instruction::Trunc) {
+ // Op is truncated from a larger type, descale in the larger type.
+ // Suppose Op = trunc X, and we descale X as Y * sext Scale. Then
+ // trunc (Y * sext Scale) = (trunc Y) * Scale
+ // always holds. However (trunc Y) * Scale may overflow even if
+ // trunc (Y * sext Scale) does not, so nsw flags need to be cleared
+ // from this point up in the expression (see later).
+ if (RequireNoSignedWrap)
+ return 0;
+
+ // Drill down through the cast.
+ unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
+ Parent = std::make_pair(Cast, 0);
+ Scale = Scale.sext(LargeSize);
+ if (logScale + 1 == (int32_t)Cast->getType()->getPrimitiveSizeInBits())
+ logScale = -1;
+ assert(Scale.exactLogBase2() == logScale);
+ continue;
+ }
+ }
+
+ // Unsupported expression, bail out.
+ return 0;
+ }
+
+ // We know that we can successfully descale, so from here on we can safely
+ // modify the IR. Op holds the descaled version of the deepest term in the
+ // expression. NoSignedWrap is 'true' if multiplying Op by Scale is known
+ // not to overflow.
+
+ if (!Parent.first)
+ // The expression only had one term.
+ return Op;
+
+ // Rewrite the parent using the descaled version of its operand.
+ assert(Parent.first->hasOneUse() && "Drilled down when more than one use!");
+ assert(Op != Parent.first->getOperand(Parent.second) &&
+ "Descaling was a no-op?");
+ Parent.first->setOperand(Parent.second, Op);
+ Worklist.Add(Parent.first);
+
+ // Now work back up the expression correcting nsw flags. The logic is based
+ // on the following observation: if X * Y is known not to overflow as a signed
+ // multiplication, and Y is replaced by a value Z with smaller absolute value,
+ // then X * Z will not overflow as a signed multiplication either. As we work
+ // our way up, having NoSignedWrap 'true' means that the descaled value at the
+ // current level has strictly smaller absolute value than the original.
+ Instruction *Ancestor = Parent.first;
+ do {
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Ancestor)) {
+ // If the multiplication wasn't nsw then we can't say anything about the
+ // value of the descaled multiplication, and we have to clear nsw flags
+ // from this point on up.
+ bool OpNoSignedWrap = BO->hasNoSignedWrap();
+ NoSignedWrap &= OpNoSignedWrap;
+ if (NoSignedWrap != OpNoSignedWrap) {
+ BO->setHasNoSignedWrap(NoSignedWrap);
+ Worklist.Add(Ancestor);
+ }
+ } else if (Ancestor->getOpcode() == Instruction::Trunc) {
+ // The fact that the descaled input to the trunc has smaller absolute
+ // value than the original input doesn't tell us anything useful about
+ // the absolute values of the truncations.
+ NoSignedWrap = false;
+ }
+ assert((Ancestor->getOpcode() != Instruction::SExt || NoSignedWrap) &&
+ "Failed to keep proper track of nsw flags while drilling down?");
+
+ if (Ancestor == Val)
+ // Got to the top, all done!
+ return Val;
+
+ // Move up one level in the expression.
+ assert(Ancestor->hasOneUse() && "Drilled down when more than one use!");
+ Ancestor = Ancestor->use_back();
+ } while (1);
+}
+
Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
@@ -817,7 +1055,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// by multiples of a zero size type with zero.
if (TD) {
bool MadeChange = false;
- Type *IntPtrTy = TD->getIntPtrType(GEP.getContext());
+ Type *IntPtrTy = TD->getIntPtrType(GEP.getPointerOperandType());
gep_type_iterator GTI = gep_type_begin(GEP);
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
@@ -836,7 +1074,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
}
Type *IndexTy = (*I)->getType();
- if (IndexTy != IntPtrTy && !IndexTy->isVectorTy()) {
+ if (IndexTy != IntPtrTy) {
// If we are using a wider index than needed for this platform, shrink
// it to what we need. If narrower, sign-extend it to what we need.
// This explicit cast can make subsequent optimizations more obvious.
@@ -855,7 +1093,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
return 0;
- // Note that if our source is a gep chain itself that we wait for that
+ // Note that if our source is a gep chain itself then we wait for that
// chain to be resolved before we perform this transformation. This
// avoids us creating a TON of code in some cases.
if (GEPOperator *SrcGEP =
@@ -987,63 +1225,74 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
}
// Transform things like:
+ // %V = mul i64 %N, 4
+ // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V
+ // into: %t1 = getelementptr i32* %arr, i32 %N; bitcast
+ if (TD && ResElTy->isSized() && SrcElTy->isSized()) {
+ // Check that changing the type amounts to dividing the index by a scale
+ // factor.
+ uint64_t ResSize = TD->getTypeAllocSize(ResElTy);
+ uint64_t SrcSize = TD->getTypeAllocSize(SrcElTy);
+ if (ResSize && SrcSize % ResSize == 0) {
+ Value *Idx = GEP.getOperand(1);
+ unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
+ uint64_t Scale = SrcSize / ResSize;
+
+ // Earlier transforms ensure that the index has type IntPtrType, which
+ // considerably simplifies the logic by eliminating implicit casts.
+ assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) &&
+ "Index not cast to pointer width?");
+
+ bool NSW;
+ if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
+ // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
+ // If the multiplication NewIdx * Scale may overflow then the new
+ // GEP may not be "inbounds".
+ Value *NewGEP = GEP.isInBounds() && NSW ?
+ Builder->CreateInBoundsGEP(StrippedPtr, NewIdx, GEP.getName()) :
+ Builder->CreateGEP(StrippedPtr, NewIdx, GEP.getName());
+ // The NewGEP must be pointer typed, so must the old one -> BitCast
+ return new BitCastInst(NewGEP, GEP.getType());
+ }
+ }
+ }
+
+ // Similarly, transform things like:
// getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
// (where tmp = 8*tmp2) into:
// getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
-
- if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) {
+ if (TD && ResElTy->isSized() && SrcElTy->isSized() &&
+ SrcElTy->isArrayTy()) {
+ // Check that changing to the array element type amounts to dividing the
+ // index by a scale factor.
+ uint64_t ResSize = TD->getTypeAllocSize(ResElTy);
uint64_t ArrayEltSize =
- TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
-
- // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
- // allow either a mul, shift, or constant here.
- Value *NewIdx = 0;
- ConstantInt *Scale = 0;
- if (ArrayEltSize == 1) {
- NewIdx = GEP.getOperand(1);
- Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1);
- } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) {
- NewIdx = ConstantInt::get(CI->getType(), 1);
- Scale = CI;
- } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){
- if (Inst->getOpcode() == Instruction::Shl &&
- isa<ConstantInt>(Inst->getOperand(1))) {
- ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
- uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
- Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()),
- 1ULL << ShAmtVal);
- NewIdx = Inst->getOperand(0);
- } else if (Inst->getOpcode() == Instruction::Mul &&
- isa<ConstantInt>(Inst->getOperand(1))) {
- Scale = cast<ConstantInt>(Inst->getOperand(1));
- NewIdx = Inst->getOperand(0);
+ TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
+ if (ResSize && ArrayEltSize % ResSize == 0) {
+ Value *Idx = GEP.getOperand(1);
+ unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
+ uint64_t Scale = ArrayEltSize / ResSize;
+
+ // Earlier transforms ensure that the index has type IntPtrType, which
+ // considerably simplifies the logic by eliminating implicit casts.
+ assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) &&
+ "Index not cast to pointer width?");
+
+ bool NSW;
+ if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
+ // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
+ // If the multiplication NewIdx * Scale may overflow then the new
+ // GEP may not be "inbounds".
+ Value *Off[2];
+ Off[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
+ Off[1] = NewIdx;
+ Value *NewGEP = GEP.isInBounds() && NSW ?
+ Builder->CreateInBoundsGEP(StrippedPtr, Off, GEP.getName()) :
+ Builder->CreateGEP(StrippedPtr, Off, GEP.getName());
+ // The NewGEP must be pointer typed, so must the old one -> BitCast
+ return new BitCastInst(NewGEP, GEP.getType());
}
}
-
- // If the index will be to exactly the right offset with the scale taken
- // out, perform the transformation. Note, we don't know whether Scale is
- // signed or not. We'll use unsigned version of division/modulo
- // operation after making sure Scale doesn't have the sign bit set.
- if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL &&
- Scale->getZExtValue() % ArrayEltSize == 0) {
- Scale = ConstantInt::get(Scale->getType(),
- Scale->getZExtValue() / ArrayEltSize);
- if (Scale->getZExtValue() != 1) {
- Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(),
- false /*ZExt*/);
- NewIdx = Builder->CreateMul(NewIdx, C, "idxscale");
- }
-
- // Insert the new GEP instruction.
- Value *Idx[2];
- Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
- Idx[1] = NewIdx;
- Value *NewGEP = GEP.isInBounds() ?
- Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()):
- Builder->CreateGEP(StrippedPtr, Idx, GEP.getName());
- // The NewGEP must be pointer typed, so must the old one -> BitCast
- return new BitCastInst(NewGEP, GEP.getType());
- }
}
}
}
@@ -1068,7 +1317,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// If the bitcast is of an allocation, and the allocation will be
// converted to match the type of the cast, don't touch this.
if (isa<AllocaInst>(BCI->getOperand(0)) ||
- isAllocationFn(BCI->getOperand(0))) {
+ isAllocationFn(BCI->getOperand(0), TLI)) {
// See if the bitcast simplifies, if so, don't nuke this GEP yet.
if (Instruction *I = visitBitCast(*BCI)) {
if (I != BCI) {
@@ -1107,7 +1356,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
static bool
-isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users) {
+isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users,
+ const TargetLibraryInfo *TLI) {
SmallVector<Instruction*, 4> Worklist;
Worklist.push_back(AI);
@@ -1163,7 +1413,7 @@ isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users) {
}
}
- if (isFreeCall(I)) {
+ if (isFreeCall(I, TLI)) {
Users.push_back(I);
continue;
}
@@ -1188,7 +1438,7 @@ Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
// to null and free calls, delete the calls and replace the comparisons with
// true or false as appropriate.
SmallVector<WeakVH, 64> Users;
- if (isAllocSiteRemovable(&MI, Users)) {
+ if (isAllocSiteRemovable(&MI, Users, TLI)) {
for (unsigned i = 0, e = Users.size(); i != e; ++i) {
Instruction *I = cast_or_null<Instruction>(&*Users[i]);
if (!I) continue;
@@ -1853,7 +2103,7 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
static bool AddReachableCodeToWorklist(BasicBlock *BB,
SmallPtrSet<BasicBlock*, 64> &Visited,
InstCombiner &IC,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI) {
bool MadeIRChange = false;
SmallVector<BasicBlock*, 256> Worklist;
@@ -1872,7 +2122,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
Instruction *Inst = BBI++;
// DCE instruction if trivially dead.
- if (isInstructionTriviallyDead(Inst)) {
+ if (isInstructionTriviallyDead(Inst, TLI)) {
++NumDeadInst;
DEBUG(errs() << "IC: DCE: " << *Inst << '\n');
Inst->eraseFromParent();
@@ -2002,7 +2252,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
if (I == 0) continue; // skip null values.
// Check to see if we can DCE the instruction.
- if (isInstructionTriviallyDead(I)) {
+ if (isInstructionTriviallyDead(I, TLI)) {
DEBUG(errs() << "IC: DCE: " << *I << '\n');
EraseInstFromFunction(*I);
++NumDeadInst;
@@ -2102,7 +2352,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
// If the instruction was modified, it's possible that it is now dead.
// if so, remove it.
- if (isInstructionTriviallyDead(I)) {
+ if (isInstructionTriviallyDead(I, TLI)) {
EraseInstFromFunction(*I);
} else {
Worklist.Add(I);
@@ -2117,9 +2367,27 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
return MadeIRChange;
}
+namespace {
+class InstCombinerLibCallSimplifier : public LibCallSimplifier {
+ InstCombiner *IC;
+public:
+ InstCombinerLibCallSimplifier(const DataLayout *TD,
+ const TargetLibraryInfo *TLI,
+ InstCombiner *IC)
+ : LibCallSimplifier(TD, TLI) {
+ this->IC = IC;
+ }
+
+ /// replaceAllUsesWith - override so that instruction replacement
+ /// can be defined in terms of the instruction combiner framework.
+ virtual void replaceAllUsesWith(Instruction *I, Value *With) const {
+ IC->ReplaceInstUsesWith(*I, With);
+ }
+};
+}
bool InstCombiner::runOnFunction(Function &F) {
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
/// Builder - This is an IRBuilder that automatically inserts new
@@ -2129,6 +2397,9 @@ bool InstCombiner::runOnFunction(Function &F) {
InstCombineIRInserter(Worklist));
Builder = &TheBuilder;
+ InstCombinerLibCallSimplifier TheSimplifier(TD, TLI, this);
+ Simplifier = &TheSimplifier;
+
bool EverMadeChange = false;
// Lower dbg.declare intrinsics otherwise their value may be clobbered
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 17b83ce..b7be462 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -15,7 +15,7 @@
#define DEBUG_TYPE "asan"
-#include "FunctionBlackList.h"
+#include "BlackList.h"
#include "llvm/Function.h"
#include "llvm/IRBuilder.h"
#include "llvm/InlineAsm.h"
@@ -35,7 +35,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/system_error.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -61,6 +61,8 @@ static const int kAsanCtorAndCtorPriority = 1;
static const char *kAsanReportErrorTemplate = "__asan_report_";
static const char *kAsanRegisterGlobalsName = "__asan_register_globals";
static const char *kAsanUnregisterGlobalsName = "__asan_unregister_globals";
+static const char *kAsanPoisonGlobalsName = "__asan_before_dynamic_init";
+static const char *kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init";
static const char *kAsanInitName = "__asan_init";
static const char *kAsanHandleNoReturnName = "__asan_handle_no_return";
static const char *kAsanMappingOffsetName = "__asan_mapping_offset";
@@ -106,6 +108,8 @@ static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
// This flag may need to be replaced with -f[no]asan-globals.
static cl::opt<bool> ClGlobals("asan-globals",
cl::desc("Handle global objects"), cl::Hidden, cl::init(true));
+static cl::opt<bool> ClInitializers("asan-initialization-order",
+ cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(false));
static cl::opt<bool> ClMemIntrin("asan-memintrin",
cl::desc("Handle memset/memcpy/memmove"), cl::Hidden, cl::init(true));
// This flag may need to be replaced with -fasan-blacklist.
@@ -144,41 +148,33 @@ static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"),
cl::Hidden, cl::init(-1));
namespace {
-
-/// An object of this type is created while instrumenting every function.
-struct AsanFunctionContext {
- AsanFunctionContext(Function &Function) : F(Function) { }
-
- Function &F;
-};
-
/// AddressSanitizer: instrument the code in module to find memory bugs.
-struct AddressSanitizer : public ModulePass {
+struct AddressSanitizer : public FunctionPass {
AddressSanitizer();
virtual const char *getPassName() const;
- void instrumentMop(AsanFunctionContext &AFC, Instruction *I);
- void instrumentAddress(AsanFunctionContext &AFC,
- Instruction *OrigIns, IRBuilder<> &IRB,
+ void instrumentMop(Instruction *I);
+ void instrumentAddress(Instruction *OrigIns, IRBuilder<> &IRB,
Value *Addr, uint32_t TypeSize, bool IsWrite);
Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
Value *ShadowValue, uint32_t TypeSize);
Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
bool IsWrite, size_t AccessSizeIndex);
- bool instrumentMemIntrinsic(AsanFunctionContext &AFC, MemIntrinsic *MI);
- void instrumentMemIntrinsicParam(AsanFunctionContext &AFC,
- Instruction *OrigIns, Value *Addr,
+ bool instrumentMemIntrinsic(MemIntrinsic *MI);
+ void instrumentMemIntrinsicParam(Instruction *OrigIns, Value *Addr,
Value *Size,
Instruction *InsertBefore, bool IsWrite);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
- bool handleFunction(Module &M, Function &F);
+ bool runOnFunction(Function &F);
+ void createInitializerPoisonCalls(Module &M,
+ Value *FirstAddr, Value *LastAddr);
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
- bool poisonStackInFunction(Module &M, Function &F);
- virtual bool runOnModule(Module &M);
+ bool poisonStackInFunction(Function &F);
+ virtual bool doInitialization(Module &M);
+ virtual bool doFinalization(Module &M);
bool insertGlobalRedzones(Module &M);
static char ID; // Pass identification, replacement for typeid
private:
-
uint64_t getAllocaSizeInBytes(AllocaInst *AI) {
Type *Ty = AI->getAllocatedType();
uint64_t SizeInBytes = TD->getTypeAllocSize(Ty);
@@ -194,12 +190,15 @@ struct AddressSanitizer : public ModulePass {
}
Function *checkInterfaceFunction(Constant *FuncOrBitcast);
+ bool ShouldInstrumentGlobal(GlobalVariable *G);
void PoisonStack(const ArrayRef<AllocaInst*> &AllocaVec, IRBuilder<> IRB,
Value *ShadowBase, bool DoPoison);
bool LooksLikeCodeInBug11395(Instruction *I);
+ void FindDynamicInitializers(Module &M);
+ bool HasDynamicInitializer(GlobalVariable *G);
LLVMContext *C;
- TargetData *TD;
+ DataLayout *TD;
uint64_t MappingOffset;
int MappingScale;
size_t RedzoneSize;
@@ -208,11 +207,15 @@ struct AddressSanitizer : public ModulePass {
Type *IntptrPtrTy;
Function *AsanCtorFunction;
Function *AsanInitFunction;
+ Function *AsanStackMallocFunc, *AsanStackFreeFunc;
+ Function *AsanHandleNoReturnFunc;
Instruction *CtorInsertBefore;
- OwningPtr<FunctionBlackList> BL;
+ OwningPtr<BlackList> BL;
// This array is indexed by AccessIsWrite and log2(AccessSize).
Function *AsanErrorCallback[2][kNumberOfAccessSizes];
InlineAsm *EmptyAsm;
+ SmallSet<GlobalValue*, 32> DynamicallyInitializedGlobals;
+ SmallSet<GlobalValue*, 32> GlobalsCreatedByAsan;
};
} // namespace
@@ -221,8 +224,8 @@ char AddressSanitizer::ID = 0;
INITIALIZE_PASS(AddressSanitizer, "asan",
"AddressSanitizer: detects use-after-free and out-of-bounds bugs.",
false, false)
-AddressSanitizer::AddressSanitizer() : ModulePass(ID) { }
-ModulePass *llvm::createAddressSanitizerPass() {
+AddressSanitizer::AddressSanitizer() : FunctionPass(ID) { }
+FunctionPass *llvm::createAddressSanitizerPass() {
return new AddressSanitizer();
}
@@ -243,38 +246,6 @@ static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str) {
GlobalValue::PrivateLinkage, StrConst, "");
}
-// Split the basic block and insert an if-then code.
-// Before:
-// Head
-// Cmp
-// Tail
-// After:
-// Head
-// if (Cmp)
-// ThenBlock
-// Tail
-//
-// ThenBlock block is created and its terminator is returned.
-// If Unreachable, ThenBlock is terminated with UnreachableInst, otherwise
-// it is terminated with BranchInst to Tail.
-static TerminatorInst *splitBlockAndInsertIfThen(Value *Cmp, bool Unreachable) {
- Instruction *SplitBefore = cast<Instruction>(Cmp)->getNextNode();
- BasicBlock *Head = SplitBefore->getParent();
- BasicBlock *Tail = Head->splitBasicBlock(SplitBefore);
- TerminatorInst *HeadOldTerm = Head->getTerminator();
- LLVMContext &C = Head->getParent()->getParent()->getContext();
- BasicBlock *ThenBlock = BasicBlock::Create(C, "", Head->getParent(), Tail);
- TerminatorInst *CheckTerm;
- if (Unreachable)
- CheckTerm = new UnreachableInst(C, ThenBlock);
- else
- CheckTerm = BranchInst::Create(Tail, ThenBlock);
- BranchInst *HeadNewTerm =
- BranchInst::Create(/*ifTrue*/ThenBlock, /*ifFalse*/Tail, Cmp);
- ReplaceInstWithInst(HeadOldTerm, HeadNewTerm);
- return CheckTerm;
-}
-
Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
// Shadow >> scale
Shadow = IRB.CreateLShr(Shadow, MappingScale);
@@ -286,12 +257,12 @@ Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
}
void AddressSanitizer::instrumentMemIntrinsicParam(
- AsanFunctionContext &AFC, Instruction *OrigIns,
+ Instruction *OrigIns,
Value *Addr, Value *Size, Instruction *InsertBefore, bool IsWrite) {
// Check the first byte.
{
IRBuilder<> IRB(InsertBefore);
- instrumentAddress(AFC, OrigIns, IRB, Addr, 8, IsWrite);
+ instrumentAddress(OrigIns, IRB, Addr, 8, IsWrite);
}
// Check the last byte.
{
@@ -301,13 +272,12 @@ void AddressSanitizer::instrumentMemIntrinsicParam(
SizeMinusOne = IRB.CreateIntCast(SizeMinusOne, IntptrTy, false);
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
Value *AddrPlusSizeMinisOne = IRB.CreateAdd(AddrLong, SizeMinusOne);
- instrumentAddress(AFC, OrigIns, IRB, AddrPlusSizeMinisOne, 8, IsWrite);
+ instrumentAddress(OrigIns, IRB, AddrPlusSizeMinisOne, 8, IsWrite);
}
}
// Instrument memset/memmove/memcpy
-bool AddressSanitizer::instrumentMemIntrinsic(AsanFunctionContext &AFC,
- MemIntrinsic *MI) {
+bool AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
Value *Dst = MI->getDest();
MemTransferInst *MemTran = dyn_cast<MemTransferInst>(MI);
Value *Src = MemTran ? MemTran->getSource() : 0;
@@ -323,12 +293,12 @@ bool AddressSanitizer::instrumentMemIntrinsic(AsanFunctionContext &AFC,
Value *Cmp = IRB.CreateICmpNE(Length,
Constant::getNullValue(Length->getType()));
- InsertBefore = splitBlockAndInsertIfThen(Cmp, false);
+ InsertBefore = SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false);
}
- instrumentMemIntrinsicParam(AFC, MI, Dst, Length, InsertBefore, true);
+ instrumentMemIntrinsicParam(MI, Dst, Length, InsertBefore, true);
if (Src)
- instrumentMemIntrinsicParam(AFC, MI, Src, Length, InsertBefore, false);
+ instrumentMemIntrinsicParam(MI, Src, Length, InsertBefore, false);
return true;
}
@@ -358,14 +328,50 @@ static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite) {
return NULL;
}
-void AddressSanitizer::instrumentMop(AsanFunctionContext &AFC, Instruction *I) {
- bool IsWrite;
+void AddressSanitizer::FindDynamicInitializers(Module& M) {
+ // Clang generates metadata identifying all dynamically initialized globals.
+ NamedMDNode *DynamicGlobals =
+ M.getNamedMetadata("llvm.asan.dynamically_initialized_globals");
+ if (!DynamicGlobals)
+ return;
+ for (int i = 0, n = DynamicGlobals->getNumOperands(); i < n; ++i) {
+ MDNode *MDN = DynamicGlobals->getOperand(i);
+ assert(MDN->getNumOperands() == 1);
+ Value *VG = MDN->getOperand(0);
+ // The optimizer may optimize away a global entirely, in which case we
+ // cannot instrument access to it.
+ if (!VG)
+ continue;
+
+ GlobalVariable *G = cast<GlobalVariable>(VG);
+ DynamicallyInitializedGlobals.insert(G);
+ }
+}
+// Returns true if a global variable is initialized dynamically in this TU.
+bool AddressSanitizer::HasDynamicInitializer(GlobalVariable *G) {
+ return DynamicallyInitializedGlobals.count(G);
+}
+
+void AddressSanitizer::instrumentMop(Instruction *I) {
+ bool IsWrite = false;
Value *Addr = isInterestingMemoryAccess(I, &IsWrite);
assert(Addr);
- if (ClOpt && ClOptGlobals && isa<GlobalVariable>(Addr)) {
- // We are accessing a global scalar variable. Nothing to catch here.
- return;
+ if (ClOpt && ClOptGlobals) {
+ if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) {
+ // If initialization order checking is disabled, a simple access to a
+ // dynamically initialized global is always valid.
+ if (!ClInitializers)
+ return;
+ // If a global variable does not have dynamic initialization we don't
+ // have to instrument it. However, if a global has external linkage, we
+ // assume it has dynamic initialization, as it may have an initializer
+ // in a different TU.
+ if (G->getLinkage() != GlobalVariable::ExternalLinkage &&
+ !HasDynamicInitializer(G))
+ return;
+ }
}
+
Type *OrigPtrTy = Addr->getType();
Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
@@ -379,7 +385,7 @@ void AddressSanitizer::instrumentMop(AsanFunctionContext &AFC, Instruction *I) {
}
IRBuilder<> IRB(I);
- instrumentAddress(AFC, I, IRB, Addr, TypeSize, IsWrite);
+ instrumentAddress(I, IRB, Addr, TypeSize, IsWrite);
}
// Validate the result of Module::getOrInsertFunction called for an interface
@@ -424,8 +430,7 @@ Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
}
-void AddressSanitizer::instrumentAddress(AsanFunctionContext &AFC,
- Instruction *OrigIns,
+void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
IRBuilder<> &IRB, Value *Addr,
uint32_t TypeSize, bool IsWrite) {
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
@@ -444,17 +449,19 @@ void AddressSanitizer::instrumentAddress(AsanFunctionContext &AFC,
TerminatorInst *CrashTerm = 0;
if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
- TerminatorInst *CheckTerm = splitBlockAndInsertIfThen(Cmp, false);
+ TerminatorInst *CheckTerm =
+ SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false);
assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional());
BasicBlock *NextBB = CheckTerm->getSuccessor(0);
IRB.SetInsertPoint(CheckTerm);
Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
- BasicBlock *CrashBlock = BasicBlock::Create(*C, "", &AFC.F, NextBB);
+ BasicBlock *CrashBlock =
+ BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
CrashTerm = new UnreachableInst(*C, CrashBlock);
BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
ReplaceInstWithInst(CheckTerm, NewTerm);
} else {
- CrashTerm = splitBlockAndInsertIfThen(Cmp, true);
+ CrashTerm = SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), true);
}
Instruction *Crash =
@@ -462,68 +469,108 @@ void AddressSanitizer::instrumentAddress(AsanFunctionContext &AFC,
Crash->setDebugLoc(OrigIns->getDebugLoc());
}
+void AddressSanitizer::createInitializerPoisonCalls(Module &M,
+ Value *FirstAddr,
+ Value *LastAddr) {
+ // We do all of our poisoning and unpoisoning within _GLOBAL__I_a.
+ Function *GlobalInit = M.getFunction("_GLOBAL__I_a");
+ // If that function is not present, this TU contains no globals, or they have
+ // all been optimized away
+ if (!GlobalInit)
+ return;
+
+ // Set up the arguments to our poison/unpoison functions.
+ IRBuilder<> IRB(GlobalInit->begin()->getFirstInsertionPt());
+
+ // Declare our poisoning and unpoisoning functions.
+ Function *AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ AsanPoisonGlobals->setLinkage(Function::ExternalLinkage);
+ Function *AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanUnpoisonGlobalsName, IRB.getVoidTy(), NULL));
+ AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage);
+
+ // Add a call to poison all external globals before the given function starts.
+ IRB.CreateCall2(AsanPoisonGlobals, FirstAddr, LastAddr);
+
+ // Add calls to unpoison all globals before each return instruction.
+ for (Function::iterator I = GlobalInit->begin(), E = GlobalInit->end();
+ I != E; ++I) {
+ if (ReturnInst *RI = dyn_cast<ReturnInst>(I->getTerminator())) {
+ CallInst::Create(AsanUnpoisonGlobals, "", RI);
+ }
+ }
+}
+
+bool AddressSanitizer::ShouldInstrumentGlobal(GlobalVariable *G) {
+ Type *Ty = cast<PointerType>(G->getType())->getElementType();
+ DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
+
+ if (BL->isIn(*G)) return false;
+ if (!Ty->isSized()) return false;
+ if (!G->hasInitializer()) return false;
+ if (GlobalsCreatedByAsan.count(G)) return false; // Our own global.
+ // Touch only those globals that will not be defined in other modules.
+ // Don't handle ODR type linkages since other modules may be built w/o asan.
+ if (G->getLinkage() != GlobalVariable::ExternalLinkage &&
+ G->getLinkage() != GlobalVariable::PrivateLinkage &&
+ G->getLinkage() != GlobalVariable::InternalLinkage)
+ return false;
+ // Two problems with thread-locals:
+ // - The address of the main thread's copy can't be computed at link-time.
+ // - Need to poison all copies, not just the main thread's one.
+ if (G->isThreadLocal())
+ return false;
+ // For now, just ignore this Alloca if the alignment is large.
+ if (G->getAlignment() > RedzoneSize) return false;
+
+ // Ignore all the globals with the names starting with "\01L_OBJC_".
+ // Many of those are put into the .cstring section. The linker compresses
+ // that section by removing the spare \0s after the string terminator, so
+ // our redzones get broken.
+ if ((G->getName().find("\01L_OBJC_") == 0) ||
+ (G->getName().find("\01l_OBJC_") == 0)) {
+ DEBUG(dbgs() << "Ignoring \\01L_OBJC_* global: " << *G);
+ return false;
+ }
+
+ if (G->hasSection()) {
+ StringRef Section(G->getSection());
+ // Ignore the globals from the __OBJC section. The ObjC runtime assumes
+ // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
+ // them.
+ if ((Section.find("__OBJC,") == 0) ||
+ (Section.find("__DATA, __objc_") == 0)) {
+ DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G);
+ return false;
+ }
+ // See http://code.google.com/p/address-sanitizer/issues/detail?id=32
+ // Constant CFString instances are compiled in the following way:
+ // -- the string buffer is emitted into
+ // __TEXT,__cstring,cstring_literals
+ // -- the constant NSConstantString structure referencing that buffer
+ // is placed into __DATA,__cfstring
+ // Therefore there's no point in placing redzones into __DATA,__cfstring.
+ // Moreover, it causes the linker to crash on OS X 10.7
+ if (Section.find("__DATA,__cfstring") == 0) {
+ DEBUG(dbgs() << "Ignoring CFString: " << *G);
+ return false;
+ }
+ }
+
+ return true;
+}
+
// This function replaces all global variables with new variables that have
// trailing redzones. It also creates a function that poisons
// redzones and inserts this function into llvm.global_ctors.
bool AddressSanitizer::insertGlobalRedzones(Module &M) {
SmallVector<GlobalVariable *, 16> GlobalsToChange;
- for (Module::GlobalListType::iterator G = M.getGlobalList().begin(),
- E = M.getGlobalList().end(); G != E; ++G) {
- Type *Ty = cast<PointerType>(G->getType())->getElementType();
- DEBUG(dbgs() << "GLOBAL: " << *G);
-
- if (!Ty->isSized()) continue;
- if (!G->hasInitializer()) continue;
- // Touch only those globals that will not be defined in other modules.
- // Don't handle ODR type linkages since other modules may be built w/o asan.
- if (G->getLinkage() != GlobalVariable::ExternalLinkage &&
- G->getLinkage() != GlobalVariable::PrivateLinkage &&
- G->getLinkage() != GlobalVariable::InternalLinkage)
- continue;
- // Two problems with thread-locals:
- // - The address of the main thread's copy can't be computed at link-time.
- // - Need to poison all copies, not just the main thread's one.
- if (G->isThreadLocal())
- continue;
- // For now, just ignore this Alloca if the alignment is large.
- if (G->getAlignment() > RedzoneSize) continue;
-
- // Ignore all the globals with the names starting with "\01L_OBJC_".
- // Many of those are put into the .cstring section. The linker compresses
- // that section by removing the spare \0s after the string terminator, so
- // our redzones get broken.
- if ((G->getName().find("\01L_OBJC_") == 0) ||
- (G->getName().find("\01l_OBJC_") == 0)) {
- DEBUG(dbgs() << "Ignoring \\01L_OBJC_* global: " << *G);
- continue;
- }
-
- if (G->hasSection()) {
- StringRef Section(G->getSection());
- // Ignore the globals from the __OBJC section. The ObjC runtime assumes
- // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
- // them.
- if ((Section.find("__OBJC,") == 0) ||
- (Section.find("__DATA, __objc_") == 0)) {
- DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G);
- continue;
- }
- // See http://code.google.com/p/address-sanitizer/issues/detail?id=32
- // Constant CFString instances are compiled in the following way:
- // -- the string buffer is emitted into
- // __TEXT,__cstring,cstring_literals
- // -- the constant NSConstantString structure referencing that buffer
- // is placed into __DATA,__cfstring
- // Therefore there's no point in placing redzones into __DATA,__cfstring.
- // Moreover, it causes the linker to crash on OS X 10.7
- if (Section.find("__DATA,__cfstring") == 0) {
- DEBUG(dbgs() << "Ignoring CFString: " << *G);
- continue;
- }
- }
-
- GlobalsToChange.push_back(G);
+ for (Module::GlobalListType::iterator G = M.global_begin(),
+ E = M.global_end(); G != E; ++G) {
+ if (ShouldInstrumentGlobal(G))
+ GlobalsToChange.push_back(G);
}
size_t n = GlobalsToChange.size();
@@ -534,13 +581,22 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) {
// size_t size;
// size_t size_with_redzone;
// const char *name;
+ // size_t has_dynamic_init;
// We initialize an array of such structures and pass it to a run-time call.
StructType *GlobalStructTy = StructType::get(IntptrTy, IntptrTy,
- IntptrTy, IntptrTy, NULL);
- SmallVector<Constant *, 16> Initializers(n);
+ IntptrTy, IntptrTy,
+ IntptrTy, NULL);
+ SmallVector<Constant *, 16> Initializers(n), DynamicInit;
IRBuilder<> IRB(CtorInsertBefore);
+ if (ClInitializers)
+ FindDynamicInitializers(M);
+
+ // The addresses of the first and last dynamically initialized globals in
+ // this TU. Used in initialization order checking.
+ Value *FirstDynamic = 0, *LastDynamic = 0;
+
for (size_t i = 0; i < n; i++) {
GlobalVariable *G = GlobalsToChange[i];
PointerType *PtrTy = cast<PointerType>(G->getType());
@@ -549,6 +605,10 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) {
uint64_t RightRedzoneSize = RedzoneSize +
(RedzoneSize - (SizeInBytes % RedzoneSize));
Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
+ // Determine whether this global should be poisoned in initialization.
+ bool GlobalHasDynamicInitializer = HasDynamicInitializer(G);
+ // Don't check initialization order if this global is blacklisted.
+ GlobalHasDynamicInitializer &= !BL->isInInit(*G);
StructType *NewTy = StructType::get(Ty, RightRedZoneTy, NULL);
Constant *NewInitializer = ConstantStruct::get(
@@ -583,8 +643,17 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) {
ConstantInt::get(IntptrTy, SizeInBytes),
ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
ConstantExpr::getPointerCast(Name, IntptrTy),
+ ConstantInt::get(IntptrTy, GlobalHasDynamicInitializer),
NULL);
- DEBUG(dbgs() << "NEW GLOBAL:\n" << *NewGlobal);
+
+ // Populate the first and last globals declared in this TU.
+ if (ClInitializers && GlobalHasDynamicInitializer) {
+ LastDynamic = ConstantExpr::getPointerCast(NewGlobal, IntptrTy);
+ if (FirstDynamic == 0)
+ FirstDynamic = LastDynamic;
+ }
+
+ DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
}
ArrayType *ArrayOfGlobalStructTy = ArrayType::get(GlobalStructTy, n);
@@ -592,8 +661,13 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) {
M, ArrayOfGlobalStructTy, false, GlobalVariable::PrivateLinkage,
ConstantArray::get(ArrayOfGlobalStructTy, Initializers), "");
+ // Create calls for poisoning before initializers run and unpoisoning after.
+ if (ClInitializers && FirstDynamic && LastDynamic)
+ createInitializerPoisonCalls(M, FirstDynamic, LastDynamic);
+
Function *AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ kAsanRegisterGlobalsName, IRB.getVoidTy(),
+ IntptrTy, IntptrTy, NULL));
AsanRegisterGlobals->setLinkage(Function::ExternalLinkage);
IRB.CreateCall2(AsanRegisterGlobals,
@@ -623,12 +697,13 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) {
}
// virtual
-bool AddressSanitizer::runOnModule(Module &M) {
+bool AddressSanitizer::doInitialization(Module &M) {
// Initialize the private fields. No one has accessed them before.
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
+
if (!TD)
return false;
- BL.reset(new FunctionBlackList(ClBlackListFile));
+ BL.reset(new BlackList(ClBlackListFile));
C = &(M.getContext());
LongSize = TD->getPointerSizeInBits();
@@ -656,17 +731,27 @@ bool AddressSanitizer::runOnModule(Module &M) {
std::string FunctionName = std::string(kAsanReportErrorTemplate) +
(AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex);
// If we are merging crash callbacks, they have two parameters.
- AsanErrorCallback[AccessIsWrite][AccessSizeIndex] = cast<Function>(
- M.getOrInsertFunction(FunctionName, IRB.getVoidTy(), IntptrTy, NULL));
+ AsanErrorCallback[AccessIsWrite][AccessSizeIndex] =
+ checkInterfaceFunction(M.getOrInsertFunction(
+ FunctionName, IRB.getVoidTy(), IntptrTy, NULL));
}
}
+
+ AsanStackMallocFunc = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanStackMallocName, IntptrTy, IntptrTy, IntptrTy, NULL));
+ AsanStackFreeFunc = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanStackFreeName, IRB.getVoidTy(),
+ IntptrTy, IntptrTy, IntptrTy, NULL));
+ AsanHandleNoReturnFunc = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanHandleNoReturnName, IRB.getVoidTy(), NULL));
+
// We insert an empty inline asm after __asan_report* to avoid callback merge.
EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
StringRef(""), StringRef(""),
/*hasSideEffects=*/true);
llvm::Triple targetTriple(M.getTargetTriple());
- bool isAndroid = targetTriple.getEnvironment() == llvm::Triple::ANDROIDEABI;
+ bool isAndroid = targetTriple.getEnvironment() == llvm::Triple::Android;
MappingOffset = isAndroid ? kDefaultShadowOffsetAndroid :
(LongSize == 32 ? kDefaultShadowOffset32 : kDefaultShadowOffset64);
@@ -686,10 +771,6 @@ bool AddressSanitizer::runOnModule(Module &M) {
// For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
RedzoneSize = std::max(32, (int)(1 << MappingScale));
- bool Res = false;
-
- if (ClGlobals)
- Res |= insertGlobalRedzones(M);
if (ClMappingOffsetLog >= 0) {
// Tell the run-time the current values of mapping offset and scale.
@@ -709,17 +790,20 @@ bool AddressSanitizer::runOnModule(Module &M) {
IRB.CreateLoad(asan_mapping_scale, true);
}
-
- for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
- if (F->isDeclaration()) continue;
- Res |= handleFunction(M, *F);
- }
-
appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndCtorPriority);
- return Res;
+ return true;
+}
+
+bool AddressSanitizer::doFinalization(Module &M) {
+ // We transform the globals at the very end so that the optimization analysis
+ // works on the original globals.
+ if (ClGlobals)
+ return insertGlobalRedzones(M);
+ return false;
}
+
bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
// For each NSObject descendant having a +load method, this method is invoked
// by the ObjC runtime before any of the static constructors is called.
@@ -736,19 +820,22 @@ bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
return false;
}
-bool AddressSanitizer::handleFunction(Module &M, Function &F) {
+bool AddressSanitizer::runOnFunction(Function &F) {
if (BL->isIn(F)) return false;
if (&F == AsanCtorFunction) return false;
+ DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
// If needed, insert __asan_init before checking for AddressSafety attr.
maybeInsertAsanInitAtFunctionEntry(F);
- if (!F.hasFnAttr(Attribute::AddressSafety)) return false;
+ if (!F.getFnAttributes().hasAttribute(Attributes::AddressSafety))
+ return false;
if (!ClDebugFunc.empty() && ClDebugFunc != F.getName())
return false;
- // We want to instrument every address only once per basic block
- // (unless there are calls between uses).
+
+ // We want to instrument every address only once per basic block (unless there
+ // are calls between uses).
SmallSet<Value*, 16> TempsToInstrument;
SmallVector<Instruction*, 16> ToInstrument;
SmallVector<Instruction*, 8> NoReturnCalls;
@@ -786,8 +873,6 @@ bool AddressSanitizer::handleFunction(Module &M, Function &F) {
}
}
- AsanFunctionContext AFC(F);
-
// Instrument.
int NumInstrumented = 0;
for (size_t i = 0, n = ToInstrument.size(); i != n; i++) {
@@ -795,25 +880,23 @@ bool AddressSanitizer::handleFunction(Module &M, Function &F) {
if (ClDebugMin < 0 || ClDebugMax < 0 ||
(NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
if (isInterestingMemoryAccess(Inst, &IsWrite))
- instrumentMop(AFC, Inst);
+ instrumentMop(Inst);
else
- instrumentMemIntrinsic(AFC, cast<MemIntrinsic>(Inst));
+ instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
}
NumInstrumented++;
}
- DEBUG(dbgs() << F);
-
- bool ChangedStack = poisonStackInFunction(M, F);
+ bool ChangedStack = poisonStackInFunction(F);
// We must unpoison the stack before every NoReturn call (throw, _exit, etc).
// See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
for (size_t i = 0, n = NoReturnCalls.size(); i != n; i++) {
Instruction *CI = NoReturnCalls[i];
IRBuilder<> IRB(CI);
- IRB.CreateCall(M.getOrInsertFunction(kAsanHandleNoReturnName,
- IRB.getVoidTy(), NULL));
+ IRB.CreateCall(AsanHandleNoReturnFunc);
}
+ DEBUG(dbgs() << "ASAN done instrumenting:\n" << F << "\n");
return NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty();
}
@@ -926,7 +1009,7 @@ bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
// compiler hoists the load of the shadow value somewhere too high.
// This causes asan to report a non-existing bug on 453.povray.
// It sounds like an LLVM bug.
-bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) {
+bool AddressSanitizer::poisonStackInFunction(Function &F) {
if (!ClStack) return false;
SmallVector<AllocaInst*, 16> AllocaVec;
SmallVector<Instruction*, 8> RetVec;
@@ -976,8 +1059,6 @@ bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) {
Value *LocalStackBase = OrigStackBase;
if (DoStackMalloc) {
- Value *AsanStackMallocFunc = M.getOrInsertFunction(
- kAsanStackMallocName, IntptrTy, IntptrTy, IntptrTy, NULL);
LocalStackBase = IRB.CreateCall2(AsanStackMallocFunc,
ConstantInt::get(IntptrTy, LocalStackSize), OrigStackBase);
}
@@ -1012,22 +1093,16 @@ bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) {
Value *BasePlus1 = IRB.CreateAdd(LocalStackBase,
ConstantInt::get(IntptrTy, LongSize/8));
BasePlus1 = IRB.CreateIntToPtr(BasePlus1, IntptrPtrTy);
- Value *Description = IRB.CreatePointerCast(
- createPrivateGlobalForString(M, StackDescription.str()),
- IntptrTy);
+ GlobalVariable *StackDescriptionGlobal =
+ createPrivateGlobalForString(*F.getParent(), StackDescription.str());
+ GlobalsCreatedByAsan.insert(StackDescriptionGlobal);
+ Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
IRB.CreateStore(Description, BasePlus1);
// Poison the stack redzones at the entry.
Value *ShadowBase = memToShadow(LocalStackBase, IRB);
PoisonStack(ArrayRef<AllocaInst*>(AllocaVec), IRB, ShadowBase, true);
- Value *AsanStackFreeFunc = NULL;
- if (DoStackMalloc) {
- AsanStackFreeFunc = M.getOrInsertFunction(
- kAsanStackFreeName, IRB.getVoidTy(),
- IntptrTy, IntptrTy, IntptrTy, NULL);
- }
-
// Unpoison the stack before all ret instructions.
for (size_t i = 0, n = RetVec.size(); i < n; i++) {
Instruction *Ret = RetVec[i];
@@ -1046,6 +1121,10 @@ bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) {
}
}
+ // We are done. Remove the old unused alloca instructions.
+ for (size_t i = 0, n = AllocaVec.size(); i < n; i++)
+ AllocaVec[i]->eraseFromParent();
+
if (ClDebugStack) {
DEBUG(dbgs() << F);
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/BlackList.cpp b/contrib/llvm/lib/Transforms/Instrumentation/BlackList.cpp
new file mode 100644
index 0000000..ef34b8a
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Instrumentation/BlackList.cpp
@@ -0,0 +1,105 @@
+//===-- BlackList.cpp - blacklist for sanitizers --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a utility class for instrumentation passes (like AddressSanitizer
+// or ThreadSanitizer) to avoid instrumenting some functions or global
+// variables based on a user-supplied blacklist.
+//
+//===----------------------------------------------------------------------===//
+
+#include <utility>
+#include <string>
+
+#include "BlackList.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Module.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+
+namespace llvm {
+
+BlackList::BlackList(const StringRef Path) {
+ // Validate and open blacklist file.
+ if (!Path.size()) return;
+ OwningPtr<MemoryBuffer> File;
+ if (error_code EC = MemoryBuffer::getFile(Path, File)) {
+ report_fatal_error("Can't open blacklist file: " + Path + ": " +
+ EC.message());
+ }
+
+ // Iterate through each line in the blacklist file.
+ SmallVector<StringRef, 16> Lines;
+ SplitString(File.take()->getBuffer(), Lines, "\n\r");
+ StringMap<std::string> Regexps;
+ for (SmallVector<StringRef, 16>::iterator I = Lines.begin(), E = Lines.end();
+ I != E; ++I) {
+ // Ignore empty lines and lines starting with "#"
+ if (I->empty() || I->startswith("#"))
+ continue;
+ // Get our prefix and unparsed regexp.
+ std::pair<StringRef, StringRef> SplitLine = I->split(":");
+ StringRef Prefix = SplitLine.first;
+ std::string Regexp = SplitLine.second;
+
+ // Replace * with .*
+ for (size_t pos = 0; (pos = Regexp.find("*", pos)) != std::string::npos;
+ pos += strlen(".*")) {
+ Regexp.replace(pos, strlen("*"), ".*");
+ }
+
+ // Check that the regexp is valid.
+ Regex CheckRE(Regexp);
+ std::string Error;
+ if (!CheckRE.isValid(Error)) {
+ report_fatal_error("malformed blacklist regex: " + SplitLine.second +
+ ": " + Error);
+ }
+
+ // Add this regexp into the proper group by its prefix.
+ if (Regexps[Prefix].size())
+ Regexps[Prefix] += "|";
+ Regexps[Prefix] += Regexp;
+ }
+
+ // Iterate through each of the prefixes, and create Regexs for them.
+ for (StringMap<std::string>::iterator I = Regexps.begin(), E = Regexps.end();
+ I != E; ++I) {
+ Entries[I->getKey()] = new Regex(I->getValue());
+ }
+}
+
+bool BlackList::isIn(const Function &F) {
+ return isIn(*F.getParent()) || inSection("fun", F.getName());
+}
+
+bool BlackList::isIn(const GlobalVariable &G) {
+ return isIn(*G.getParent()) || inSection("global", G.getName());
+}
+
+bool BlackList::isIn(const Module &M) {
+ return inSection("src", M.getModuleIdentifier());
+}
+
+bool BlackList::isInInit(const GlobalVariable &G) {
+ return isIn(*G.getParent()) || inSection("global-init", G.getName());
+}
+
+bool BlackList::inSection(const StringRef Section,
+ const StringRef Query) {
+ Regex *FunctionRegex = Entries[Section];
+ return FunctionRegex ? FunctionRegex->match(Query) : false;
+}
+
+} // namespace llvm
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/BlackList.h b/contrib/llvm/lib/Transforms/Instrumentation/BlackList.h
new file mode 100644
index 0000000..f3c05a5
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Instrumentation/BlackList.h
@@ -0,0 +1,57 @@
+//===-- BlackList.h - blacklist for sanitizers ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+//
+// This is a utility class for instrumentation passes (like AddressSanitizer
+// or ThreadSanitizer) to avoid instrumenting some functions or global
+// variables based on a user-supplied blacklist.
+//
+// The blacklist disables instrumentation of various functions and global
+// variables. Each line contains a prefix, followed by a wild card expression.
+// Empty lines and lines starting with "#" are ignored.
+// ---
+// # Blacklisted items:
+// fun:*_ZN4base6subtle*
+// global:*global_with_bad_access_or_initialization*
+// global-init:*global_with_initialization_issues*
+// src:file_with_tricky_code.cc
+// ---
+// Note that the wild card is in fact an llvm::Regex, but * is automatically
+// replaced with .*
+// This is similar to the "ignore" feature of ThreadSanitizer.
+// http://code.google.com/p/data-race-test/wiki/ThreadSanitizerIgnores
+//
+//===----------------------------------------------------------------------===//
+//
+
+#include "llvm/ADT/StringMap.h"
+
+namespace llvm {
+class Function;
+class GlobalVariable;
+class Module;
+class Regex;
+class StringRef;
+
+class BlackList {
+ public:
+ BlackList(const StringRef Path);
+ // Returns whether either this function or it's source file are blacklisted.
+ bool isIn(const Function &F);
+ // Returns whether either this global or it's source file are blacklisted.
+ bool isIn(const GlobalVariable &G);
+ // Returns whether this module is blacklisted by filename.
+ bool isIn(const Module &M);
+ // Returns whether a global should be excluded from initialization checking.
+ bool isInInit(const GlobalVariable &G);
+ private:
+ StringMap<Regex*> Entries;
+
+ bool inSection(const StringRef Section, const StringRef Query);
+};
+
+} // namespace llvm
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
index 09e0f14..7810b1b 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -23,7 +23,8 @@
#include "llvm/Support/InstIterator.h"
#include "llvm/Support/TargetFolder.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Instrumentation.h"
using namespace llvm;
@@ -47,11 +48,13 @@ namespace {
virtual bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<TargetData>();
+ AU.addRequired<DataLayout>();
+ AU.addRequired<TargetLibraryInfo>();
}
private:
- const TargetData *TD;
+ const DataLayout *TD;
+ const TargetLibraryInfo *TLI;
ObjectSizeOffsetEvaluator *ObjSizeEval;
BuilderTy *Builder;
Instruction *Inst;
@@ -140,7 +143,7 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
Value *Offset = SizeOffset.second;
ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size);
- IntegerType *IntTy = TD->getIntPtrType(Inst->getContext());
+ Type *IntTy = TD->getIntPtrType(Ptr->getType());
Value *NeededSizeVal = ConstantInt::get(IntTy, NeededSize);
// three checks are required to ensure safety:
@@ -165,12 +168,13 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
}
bool BoundsChecking::runOnFunction(Function &F) {
- TD = &getAnalysis<TargetData>();
+ TD = &getAnalysis<DataLayout>();
+ TLI = &getAnalysis<TargetLibraryInfo>();
TrapBB = 0;
BuilderTy TheBuilder(F.getContext(), TargetFolder(TD));
Builder = &TheBuilder;
- ObjectSizeOffsetEvaluator TheObjSizeEval(TD, F.getContext());
+ ObjectSizeOffsetEvaluator TheObjSizeEval(TD, TLI, F.getContext());
ObjSizeEval = &TheObjSizeEval;
// check HANDLE_MEMORY_INST in include/llvm/Instruction.def for memory
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.cpp b/contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.cpp
deleted file mode 100644
index 188ea4d..0000000
--- a/contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.cpp
+++ /dev/null
@@ -1,79 +0,0 @@
-//===-- FunctionBlackList.cpp - blacklist of functions --------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This is a utility class for instrumentation passes (like AddressSanitizer
-// or ThreadSanitizer) to avoid instrumenting some functions based on
-// user-supplied blacklist.
-//
-//===----------------------------------------------------------------------===//
-
-#include "FunctionBlackList.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/Function.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/Regex.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/system_error.h"
-
-namespace llvm {
-
-FunctionBlackList::FunctionBlackList(const std::string &Path) {
- Functions = NULL;
- const char *kFunPrefix = "fun:";
- if (!Path.size()) return;
- std::string Fun;
-
- OwningPtr<MemoryBuffer> File;
- if (error_code EC = MemoryBuffer::getFile(Path.c_str(), File)) {
- report_fatal_error("Can't open blacklist file " + Path + ": " +
- EC.message());
- }
- MemoryBuffer *Buff = File.take();
- const char *Data = Buff->getBufferStart();
- size_t DataLen = Buff->getBufferSize();
- SmallVector<StringRef, 16> Lines;
- SplitString(StringRef(Data, DataLen), Lines, "\n\r");
- for (size_t i = 0, numLines = Lines.size(); i < numLines; i++) {
- if (Lines[i].startswith(kFunPrefix)) {
- std::string ThisFunc = Lines[i].substr(strlen(kFunPrefix));
- std::string ThisFuncRE;
- // add ThisFunc replacing * with .*
- for (size_t j = 0, n = ThisFunc.size(); j < n; j++) {
- if (ThisFunc[j] == '*')
- ThisFuncRE += '.';
- ThisFuncRE += ThisFunc[j];
- }
- // Check that the regexp is valid.
- Regex CheckRE(ThisFuncRE);
- std::string Error;
- if (!CheckRE.isValid(Error))
- report_fatal_error("malformed blacklist regex: " + ThisFunc +
- ": " + Error);
- // Append to the final regexp.
- if (Fun.size())
- Fun += "|";
- Fun += ThisFuncRE;
- }
- }
- if (Fun.size()) {
- Functions = new Regex(Fun);
- }
-}
-
-bool FunctionBlackList::isIn(const Function &F) {
- if (Functions) {
- bool Res = Functions->match(F.getName());
- return Res;
- }
- return false;
-}
-
-} // namespace llvm
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.h b/contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.h
deleted file mode 100644
index c1239b9..0000000
--- a/contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.h
+++ /dev/null
@@ -1,37 +0,0 @@
-//===-- FunctionBlackList.cpp - blacklist of functions ----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//===----------------------------------------------------------------------===//
-//
-// This is a utility class for instrumentation passes (like AddressSanitizer
-// or ThreadSanitizer) to avoid instrumenting some functions based on
-// user-supplied blacklist.
-//
-//===----------------------------------------------------------------------===//
-//
-
-#include <string>
-
-namespace llvm {
-class Function;
-class Regex;
-
-// Blacklisted functions are not instrumented.
-// The blacklist file contains one or more lines like this:
-// ---
-// fun:FunctionWildCard
-// ---
-// This is similar to the "ignore" feature of ThreadSanitizer.
-// http://code.google.com/p/data-race-test/wiki/ThreadSanitizerIgnores
-class FunctionBlackList {
- public:
- FunctionBlackList(const std::string &Path);
- bool isIn(const Function &F);
- private:
- Regex *Functions;
-};
-
-} // namespace llvm
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index 264a6a6..e9192e5 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -88,11 +88,11 @@ namespace {
// Add the function to write out all our counters to the global destructor
// list.
- void insertCounterWriteout(SmallVector<std::pair<GlobalVariable *,
- MDNode *>, 8> &);
+ void insertCounterWriteout(ArrayRef<std::pair<GlobalVariable*, MDNode*> >);
void insertIndirectCounterIncrement();
+ void insertFlush(ArrayRef<std::pair<GlobalVariable*, MDNode*> >);
- std::string mangleName(DICompileUnit CU, std::string NewStem);
+ std::string mangleName(DICompileUnit CU, const char *NewStem);
bool EmitNotes;
bool EmitData;
@@ -329,7 +329,7 @@ namespace {
};
}
-std::string GCOVProfiler::mangleName(DICompileUnit CU, std::string NewStem) {
+std::string GCOVProfiler::mangleName(DICompileUnit CU, const char *NewStem) {
if (NamedMDNode *GCov = M->getNamedMetadata("llvm.gcov")) {
for (int i = 0, e = GCov->getNumOperands(); i != e; ++i) {
MDNode *N = GCov->getOperand(i);
@@ -519,6 +519,7 @@ bool GCOVProfiler::emitProfileArcs() {
}
insertCounterWriteout(CountersBySP);
+ insertFlush(CountersBySP);
}
if (InsertIndCounterIncrCode)
@@ -630,14 +631,15 @@ GlobalVariable *GCOVProfiler::getEdgeStateValue() {
}
void GCOVProfiler::insertCounterWriteout(
- SmallVector<std::pair<GlobalVariable *, MDNode *>, 8> &CountersBySP) {
- FunctionType *WriteoutFTy =
- FunctionType::get(Type::getVoidTy(*Ctx), false);
- Function *WriteoutF = Function::Create(WriteoutFTy,
- GlobalValue::InternalLinkage,
- "__llvm_gcov_writeout", M);
+ ArrayRef<std::pair<GlobalVariable *, MDNode *> > CountersBySP) {
+ FunctionType *WriteoutFTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
+ Function *WriteoutF = M->getFunction("__llvm_gcov_writeout");
+ if (!WriteoutF)
+ WriteoutF = Function::Create(WriteoutFTy, GlobalValue::InternalLinkage,
+ "__llvm_gcov_writeout", M);
WriteoutF->setUnnamedAddr(true);
- BasicBlock *BB = BasicBlock::Create(*Ctx, "", WriteoutF);
+
+ BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", WriteoutF);
IRBuilder<> Builder(BB);
Constant *StartFile = getStartFileFunc();
@@ -648,11 +650,11 @@ void GCOVProfiler::insertCounterWriteout(
NamedMDNode *CU_Nodes = M->getNamedMetadata("llvm.dbg.cu");
if (CU_Nodes) {
for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) {
- DICompileUnit compile_unit(CU_Nodes->getOperand(i));
- std::string FilenameGcda = mangleName(compile_unit, "gcda");
+ DICompileUnit CU(CU_Nodes->getOperand(i));
+ std::string FilenameGcda = mangleName(CU, "gcda");
Builder.CreateCall(StartFile,
Builder.CreateGlobalStringPtr(FilenameGcda));
- for (SmallVector<std::pair<GlobalVariable *, MDNode *>, 8>::iterator
+ for (ArrayRef<std::pair<GlobalVariable *, MDNode *> >::iterator
I = CountersBySP.begin(), E = CountersBySP.end();
I != E; ++I) {
DISubprogram SP(I->second);
@@ -680,7 +682,7 @@ void GCOVProfiler::insertCounterWriteout(
"__llvm_gcov_init", M);
F->setUnnamedAddr(true);
F->setLinkage(GlobalValue::InternalLinkage);
- F->addFnAttr(Attribute::NoInline);
+ F->addFnAttr(Attributes::NoInline);
BB = BasicBlock::Create(*Ctx, "entry", F);
Builder.SetInsertPoint(BB);
@@ -699,7 +701,7 @@ void GCOVProfiler::insertIndirectCounterIncrement() {
cast<Function>(GCOVProfiler::getIncrementIndirectCounterFunc());
Fn->setUnnamedAddr(true);
Fn->setLinkage(GlobalValue::InternalLinkage);
- Fn->addFnAttr(Attribute::NoInline);
+ Fn->addFnAttr(Attributes::NoInline);
Type *Int32Ty = Type::getInt32Ty(*Ctx);
Type *Int64Ty = Type::getInt64Ty(*Ctx);
@@ -745,3 +747,42 @@ void GCOVProfiler::insertIndirectCounterIncrement() {
Builder.SetInsertPoint(Exit);
Builder.CreateRetVoid();
}
+
+void GCOVProfiler::
+insertFlush(ArrayRef<std::pair<GlobalVariable*, MDNode*> > CountersBySP) {
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
+ Function *FlushF = M->getFunction("__gcov_flush");
+ if (!FlushF)
+ FlushF = Function::Create(FTy, GlobalValue::InternalLinkage,
+ "__gcov_flush", M);
+ else
+ FlushF->setLinkage(GlobalValue::InternalLinkage);
+ FlushF->setUnnamedAddr(true);
+
+ BasicBlock *Entry = BasicBlock::Create(*Ctx, "entry", FlushF);
+
+ // Write out the current counters.
+ Constant *WriteoutF = M->getFunction("__llvm_gcov_writeout");
+ assert(WriteoutF && "Need to create the writeout function first!");
+
+ IRBuilder<> Builder(Entry);
+ Builder.CreateCall(WriteoutF);
+
+ // Zero out the counters.
+ for (ArrayRef<std::pair<GlobalVariable *, MDNode *> >::iterator
+ I = CountersBySP.begin(), E = CountersBySP.end();
+ I != E; ++I) {
+ GlobalVariable *GV = I->first;
+ Constant *Null = Constant::getNullValue(GV->getType()->getElementType());
+ Builder.CreateStore(Null, GV);
+ }
+
+ Type *RetTy = FlushF->getReturnType();
+ if (RetTy == Type::getVoidTy(*Ctx))
+ Builder.CreateRetVoid();
+ else if (RetTy->isIntegerTy())
+ // Used if __gcov_flush was implicitly declared.
+ Builder.CreateRet(ConstantInt::get(RetTy, 0));
+ else
+ report_fatal_error("invalid return type for __gcov_flush");
+}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/MaximumSpanningTree.h b/contrib/llvm/lib/Transforms/Instrumentation/MaximumSpanningTree.h
index f76c77e..a4bb5a6 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/MaximumSpanningTree.h
+++ b/contrib/llvm/lib/Transforms/Instrumentation/MaximumSpanningTree.h
@@ -26,30 +26,6 @@ namespace llvm {
/// The type parameter T determines the type of the nodes of the graph.
template <typename T>
class MaximumSpanningTree {
-
- // A comparing class for comparing weighted edges.
- template <typename CT>
- struct EdgeWeightCompare {
- bool operator()(typename MaximumSpanningTree<CT>::EdgeWeight X,
- typename MaximumSpanningTree<CT>::EdgeWeight Y) const {
- if (X.second > Y.second) return true;
- if (X.second < Y.second) return false;
- if (const BasicBlock *BBX = dyn_cast<BasicBlock>(X.first.first)) {
- if (const BasicBlock *BBY = dyn_cast<BasicBlock>(Y.first.first)) {
- if (BBX->size() > BBY->size()) return true;
- if (BBX->size() < BBY->size()) return false;
- }
- }
- if (const BasicBlock *BBX = dyn_cast<BasicBlock>(X.first.second)) {
- if (const BasicBlock *BBY = dyn_cast<BasicBlock>(Y.first.second)) {
- if (BBX->size() > BBY->size()) return true;
- if (BBX->size() < BBY->size()) return false;
- }
- }
- return false;
- }
- };
-
public:
typedef std::pair<const T*, const T*> Edge;
typedef std::pair<Edge, double> EdgeWeight;
@@ -59,6 +35,33 @@ namespace llvm {
MaxSpanTree MST;
+ private:
+ // A comparing class for comparing weighted edges.
+ struct EdgeWeightCompare {
+ static bool getBlockSize(const T *X) {
+ const BasicBlock *BB = dyn_cast_or_null<BasicBlock>(X);
+ return BB ? BB->size() : 0;
+ }
+
+ bool operator()(EdgeWeight X, EdgeWeight Y) const {
+ if (X.second > Y.second) return true;
+ if (X.second < Y.second) return false;
+
+ // Equal edge weights: break ties by comparing block sizes.
+ size_t XSizeA = getBlockSize(X.first.first);
+ size_t YSizeA = getBlockSize(Y.first.first);
+ if (XSizeA > YSizeA) return true;
+ if (XSizeA < YSizeA) return false;
+
+ size_t XSizeB = getBlockSize(X.first.second);
+ size_t YSizeB = getBlockSize(Y.first.second);
+ if (XSizeB > YSizeB) return true;
+ if (XSizeB < YSizeB) return false;
+
+ return false;
+ }
+ };
+
public:
static char ID; // Class identification, replacement for typeinfo
@@ -66,7 +69,7 @@ namespace llvm {
/// spanning tree.
MaximumSpanningTree(EdgeWeights &EdgeVector) {
- std::stable_sort(EdgeVector.begin(), EdgeVector.end(), EdgeWeightCompare<T>());
+ std::stable_sort(EdgeVector.begin(), EdgeVector.end(), EdgeWeightCompare());
// Create spanning tree, Forest contains a special data structure
// that makes checking if two nodes are already in a common (sub-)tree
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index dc0fa71..9e10fc4 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -21,7 +21,7 @@
#define DEBUG_TYPE "tsan"
-#include "FunctionBlackList.h"
+#include "BlackList.h"
#include "llvm/Function.h"
#include "llvm/IRBuilder.h"
#include "llvm/Intrinsics.h"
@@ -38,7 +38,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
@@ -47,10 +47,19 @@ using namespace llvm;
static cl::opt<std::string> ClBlackListFile("tsan-blacklist",
cl::desc("Blacklist file"), cl::Hidden);
+static cl::opt<bool> ClInstrumentMemoryAccesses(
+ "tsan-instrument-memory-accesses", cl::init(true),
+ cl::desc("Instrument memory accesses"), cl::Hidden);
+static cl::opt<bool> ClInstrumentFuncEntryExit(
+ "tsan-instrument-func-entry-exit", cl::init(true),
+ cl::desc("Instrument function entry and exit"), cl::Hidden);
+static cl::opt<bool> ClInstrumentAtomics(
+ "tsan-instrument-atomics", cl::init(true),
+ cl::desc("Instrument atomics"), cl::Hidden);
STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
-STATISTIC(NumOmittedReadsBeforeWrite,
+STATISTIC(NumOmittedReadsBeforeWrite,
"Number of reads ignored due to following writes");
STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
@@ -76,8 +85,8 @@ struct ThreadSanitizer : public FunctionPass {
bool addrPointsToConstantData(Value *Addr);
int getMemoryAccessFuncIndex(Value *Addr);
- TargetData *TD;
- OwningPtr<FunctionBlackList> BL;
+ DataLayout *TD;
+ OwningPtr<BlackList> BL;
IntegerType *OrdTy;
// Callbacks to run-time library are computed in doInitialization.
Function *TsanFuncEntry;
@@ -88,6 +97,10 @@ struct ThreadSanitizer : public FunctionPass {
Function *TsanWrite[kNumberOfAccessSizes];
Function *TsanAtomicLoad[kNumberOfAccessSizes];
Function *TsanAtomicStore[kNumberOfAccessSizes];
+ Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes];
+ Function *TsanAtomicCAS[kNumberOfAccessSizes];
+ Function *TsanAtomicThreadFence;
+ Function *TsanAtomicSignalFence;
Function *TsanVptrUpdate;
};
} // namespace
@@ -118,10 +131,10 @@ static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
}
bool ThreadSanitizer::doInitialization(Module &M) {
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
if (!TD)
return false;
- BL.reset(new FunctionBlackList(ClBlackListFile));
+ BL.reset(new BlackList(ClBlackListFile));
// Always insert a call to __tsan_init into the module's CTORs.
IRBuilder<> IRB(M.getContext());
@@ -158,10 +171,42 @@ bool ThreadSanitizer::doInitialization(Module &M) {
TsanAtomicStore[i] = checkInterfaceFunction(M.getOrInsertFunction(
AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy,
NULL));
+
+ for (int op = AtomicRMWInst::FIRST_BINOP;
+ op <= AtomicRMWInst::LAST_BINOP; ++op) {
+ TsanAtomicRMW[op][i] = NULL;
+ const char *NamePart = NULL;
+ if (op == AtomicRMWInst::Xchg)
+ NamePart = "_exchange";
+ else if (op == AtomicRMWInst::Add)
+ NamePart = "_fetch_add";
+ else if (op == AtomicRMWInst::Sub)
+ NamePart = "_fetch_sub";
+ else if (op == AtomicRMWInst::And)
+ NamePart = "_fetch_and";
+ else if (op == AtomicRMWInst::Or)
+ NamePart = "_fetch_or";
+ else if (op == AtomicRMWInst::Xor)
+ NamePart = "_fetch_xor";
+ else
+ continue;
+ SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
+ TsanAtomicRMW[op][i] = checkInterfaceFunction(M.getOrInsertFunction(
+ RMWName, Ty, PtrTy, Ty, OrdTy, NULL));
+ }
+
+ SmallString<32> AtomicCASName("__tsan_atomic" + itostr(BitSize) +
+ "_compare_exchange_val");
+ TsanAtomicCAS[i] = checkInterfaceFunction(M.getOrInsertFunction(
+ AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, NULL));
}
TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction(
"__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), NULL));
+ TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction(
+ "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, NULL));
+ TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction(
+ "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, NULL));
return true;
}
@@ -186,7 +231,7 @@ bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
NumOmittedReadsFromConstantGlobals++;
return true;
}
- } else if(LoadInst *L = dyn_cast<LoadInst>(Addr)) {
+ } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
if (isVtableAccess(L)) {
// Reads from a vtable pointer can not race with any writes.
NumOmittedReadsFromVtable++;
@@ -244,8 +289,8 @@ static bool isAtomic(Instruction *I) {
return true;
if (isa<AtomicCmpXchgInst>(I))
return true;
- if (FenceInst *FI = dyn_cast<FenceInst>(I))
- return FI->getSynchScope() == CrossThread;
+ if (isa<FenceInst>(I))
+ return true;
return false;
}
@@ -284,17 +329,19 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
// (e.g. variables that do not escape, etc).
// Instrument memory accesses.
- for (size_t i = 0, n = AllLoadsAndStores.size(); i < n; ++i) {
- Res |= instrumentLoadOrStore(AllLoadsAndStores[i]);
- }
+ if (ClInstrumentMemoryAccesses)
+ for (size_t i = 0, n = AllLoadsAndStores.size(); i < n; ++i) {
+ Res |= instrumentLoadOrStore(AllLoadsAndStores[i]);
+ }
// Instrument atomic memory accesses.
- for (size_t i = 0, n = AtomicAccesses.size(); i < n; ++i) {
- Res |= instrumentAtomic(AtomicAccesses[i]);
- }
+ if (ClInstrumentAtomics)
+ for (size_t i = 0, n = AtomicAccesses.size(); i < n; ++i) {
+ Res |= instrumentAtomic(AtomicAccesses[i]);
+ }
// Instrument function entry/exit points if there were instrumented accesses.
- if (Res || HasCalls) {
+ if ((Res || HasCalls) && ClInstrumentFuncEntryExit) {
IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
Value *ReturnAddress = IRB.CreateCall(
Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
@@ -343,12 +390,12 @@ static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
switch (ord) {
case NotAtomic: assert(false);
case Unordered: // Fall-through.
- case Monotonic: v = 1 << 0; break;
- // case Consume: v = 1 << 1; break; // Not specified yet.
- case Acquire: v = 1 << 2; break;
- case Release: v = 1 << 3; break;
- case AcquireRelease: v = 1 << 4; break;
- case SequentiallyConsistent: v = 1 << 5; break;
+ case Monotonic: v = 0; break;
+ // case Consume: v = 1; break; // Not specified yet.
+ case Acquire: v = 2; break;
+ case Release: v = 3; break;
+ case AcquireRelease: v = 4; break;
+ case SequentiallyConsistent: v = 5; break;
}
return IRB->getInt32(v);
}
@@ -385,12 +432,44 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
CallInst *C = CallInst::Create(TsanAtomicStore[Idx],
ArrayRef<Value*>(Args));
ReplaceInstWithInst(I, C);
- } else if (isa<AtomicRMWInst>(I)) {
- // FIXME: Not yet supported.
- } else if (isa<AtomicCmpXchgInst>(I)) {
- // FIXME: Not yet supported.
- } else if (isa<FenceInst>(I)) {
- // FIXME: Not yet supported.
+ } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
+ Value *Addr = RMWI->getPointerOperand();
+ int Idx = getMemoryAccessFuncIndex(Addr);
+ if (Idx < 0)
+ return false;
+ Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx];
+ if (F == NULL)
+ return false;
+ const size_t ByteSize = 1 << Idx;
+ const size_t BitSize = ByteSize * 8;
+ Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
+ Type *PtrTy = Ty->getPointerTo();
+ Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
+ IRB.CreateIntCast(RMWI->getValOperand(), Ty, false),
+ createOrdering(&IRB, RMWI->getOrdering())};
+ CallInst *C = CallInst::Create(F, ArrayRef<Value*>(Args));
+ ReplaceInstWithInst(I, C);
+ } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
+ Value *Addr = CASI->getPointerOperand();
+ int Idx = getMemoryAccessFuncIndex(Addr);
+ if (Idx < 0)
+ return false;
+ const size_t ByteSize = 1 << Idx;
+ const size_t BitSize = ByteSize * 8;
+ Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
+ Type *PtrTy = Ty->getPointerTo();
+ Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
+ IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false),
+ IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false),
+ createOrdering(&IRB, CASI->getOrdering())};
+ CallInst *C = CallInst::Create(TsanAtomicCAS[Idx], ArrayRef<Value*>(Args));
+ ReplaceInstWithInst(I, C);
+ } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
+ Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
+ Function *F = FI->getSynchScope() == SingleThread ?
+ TsanAtomicSignalFence : TsanAtomicThreadFence;
+ CallInst *C = CallInst::Create(F, ArrayRef<Value*>(Args));
+ ReplaceInstWithInst(I, C);
}
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp b/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
index a3c426a..123ed0f 100644
--- a/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -27,6 +27,7 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/DominatorInternals.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ProfileInfo.h"
#include "llvm/Assembly/Writer.h"
@@ -37,12 +38,13 @@
#include "llvm/Support/PatternMatch.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Transforms/Utils/AddrModeMatcher.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/BuildLibCalls.h"
+#include "llvm/Transforms/Utils/BypassSlowDivision.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
using namespace llvm::PatternMatch;
@@ -146,9 +148,18 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
TLInfo = &getAnalysis<TargetLibraryInfo>();
DT = getAnalysisIfAvailable<DominatorTree>();
PFI = getAnalysisIfAvailable<ProfileInfo>();
- OptSize = F.hasFnAttr(Attribute::OptimizeForSize);
+ OptSize = F.getFnAttributes().hasAttribute(Attributes::OptimizeForSize);
+
+ /// This optimization identifies DIV instructions that can be
+ /// profitably bypassed and carried out with a shorter, faster divide.
+ if (TLI && TLI->isSlowDivBypassed()) {
+ const DenseMap<unsigned int, unsigned int> &BypassWidths =
+ TLI->getBypassSlowDivWidths();
+ for (Function::iterator I = F.begin(); I != F.end(); I++)
+ EverMadeChange |= bypassSlowDivision(F, I, BypassWidths);
+ }
- // First pass, eliminate blocks that contain only PHI nodes and an
+ // Eliminate blocks that contain only PHI nodes and an
// unconditional branch.
EverMadeChange |= EliminateMostlyEmptyBlocks(F);
@@ -160,7 +171,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
bool MadeChange = true;
while (MadeChange) {
MadeChange = false;
- for (Function::iterator I = F.begin(), E = F.end(); I != E; ) {
+ for (Function::iterator I = F.begin(); I != F.end(); ) {
BasicBlock *BB = I++;
MadeChange |= OptimizeBlock(*BB);
}
@@ -215,11 +226,13 @@ bool CodeGenPrepare::EliminateFallThrough(Function &F) {
// edge, just collapse it.
BasicBlock *SinglePred = BB->getSinglePredecessor();
- if (!SinglePred || SinglePred == BB) continue;
+ // Don't merge if BB's address is taken.
+ if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue;
BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
if (Term && !Term->isConditional()) {
Changed = true;
+ DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n");
// Remember if SinglePred was the entry block of the function.
// If so, we will need to move BB back to the entry position.
bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
@@ -230,7 +243,6 @@ bool CodeGenPrepare::EliminateFallThrough(Function &F) {
// We have erased a block. Update the iterator.
I = BB;
- DEBUG(dbgs() << "Merged:\n"<< *SinglePred << "\n\n\n");
}
}
return Changed;
@@ -610,7 +622,7 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
// happens.
WeakVH IterHandle(CurInstIterator);
- replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getTargetData() : 0,
+ replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getDataLayout() : 0,
TLInfo, ModifiedDT ? 0 : DT);
// If the iterator instruction was recursively deleted, start over at the
@@ -634,8 +646,8 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
// From here on out we're working with named functions.
if (CI->getCalledFunction() == 0) return false;
- // We'll need TargetData from here on out.
- const TargetData *TD = TLI ? TLI->getTargetData() : 0;
+ // We'll need DataLayout from here on out.
+ const DataLayout *TD = TLI ? TLI->getDataLayout() : 0;
if (!TD) return false;
// Lower all default uses of _chk calls. This is very similar
@@ -649,6 +661,7 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
/// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return
/// instructions to the predecessor to enable tail call optimizations. The
/// case it is currently looking for is:
+/// @code
/// bb0:
/// %tmp0 = tail call i32 @f0()
/// br label %return
@@ -661,9 +674,11 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
/// return:
/// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
/// ret i32 %retval
+/// @endcode
///
/// =>
///
+/// @code
/// bb0:
/// %tmp0 = tail call i32 @f0()
/// ret i32 %tmp0
@@ -673,7 +688,7 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
/// bb2:
/// %tmp2 = tail call i32 @f2()
/// ret i32 %tmp2
-///
+/// @endcode
bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) {
if (!TLI)
return false;
@@ -699,7 +714,8 @@ bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) {
// See llvm::isInTailCallPosition().
const Function *F = BB->getParent();
Attributes CallerRetAttr = F->getAttributes().getRetAttributes();
- if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
+ if (CallerRetAttr.hasAttribute(Attributes::ZExt) ||
+ CallerRetAttr.hasAttribute(Attributes::SExt))
return false;
// Make sure there are no instructions between the PHI and return, or that the
@@ -757,7 +773,10 @@ bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) {
// Conservatively require the attributes of the call to match those of the
// return. Ignore noalias because it doesn't affect the call sequence.
Attributes CalleeRetAttr = CS.getAttributes().getRetAttributes();
- if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
+ if (AttrBuilder(CalleeRetAttr).
+ removeAttribute(Attributes::NoAlias) !=
+ AttrBuilder(CallerRetAttr).
+ removeAttribute(Attributes::NoAlias))
continue;
// Make sure the call instruction is followed by an unconditional branch to
@@ -774,7 +793,7 @@ bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) {
}
// If we eliminated all predecessors of the block, delete the block now.
- if (Changed && pred_begin(BB) == pred_end(BB))
+ if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
BB->eraseFromParent();
return Changed;
@@ -914,7 +933,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
<< *MemoryInst);
Type *IntPtrTy =
- TLI->getTargetData()->getIntPtrType(AccessTy->getContext());
+ TLI->getDataLayout()->getIntPtrType(AccessTy->getContext());
Value *Result = 0;
@@ -988,7 +1007,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
WeakVH IterHandle(CurInstIterator);
BasicBlock *BB = CurInstIterator->getParent();
- RecursivelyDeleteTriviallyDeadInstructions(Repl);
+ RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo);
if (IterHandle != CurInstIterator) {
// If the iterator instruction was recursively deleted, start over at the
@@ -1174,17 +1193,32 @@ static bool isFormingBranchFromSelectProfitable(SelectInst *SI) {
}
+/// If we have a SelectInst that will likely profit from branch prediction,
+/// turn it into a branch.
bool CodeGenPrepare::OptimizeSelectInst(SelectInst *SI) {
- // If we have a SelectInst that will likely profit from branch prediction,
- // turn it into a branch.
- if (DisableSelectToBranch || OptSize || !TLI ||
- !TLI->isPredictableSelectExpensive())
- return false;
+ bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
- if (!SI->getCondition()->getType()->isIntegerTy(1) ||
- !isFormingBranchFromSelectProfitable(SI))
+ // Can we convert the 'select' to CF ?
+ if (DisableSelectToBranch || OptSize || !TLI || VectorCond)
return false;
+ TargetLowering::SelectSupportKind SelectKind;
+ if (VectorCond)
+ SelectKind = TargetLowering::VectorMaskSelect;
+ else if (SI->getType()->isVectorTy())
+ SelectKind = TargetLowering::ScalarCondVectorVal;
+ else
+ SelectKind = TargetLowering::ScalarValSelect;
+
+ // Do we have efficient codegen support for this kind of 'selects' ?
+ if (TLI->isSelectSupported(SelectKind)) {
+ // We have efficient codegen support for the select instruction.
+ // Check if it is profitable to keep this 'select'.
+ if (!TLI->isPredictableSelectExpensive() ||
+ !isFormingBranchFromSelectProfitable(SI))
+ return false;
+ }
+
ModifiedDT = true;
// First, we split the block containing the select into 2 blocks.
@@ -1302,7 +1336,7 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
bool MadeChange = false;
CurInstIterator = BB.begin();
- for (BasicBlock::iterator E = BB.end(); CurInstIterator != E; )
+ while (CurInstIterator != BB.end())
MadeChange |= OptimizeInst(CurInstIterator++);
return MadeChange;
diff --git a/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp b/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp
index 5430f62..369720b 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp
@@ -24,7 +24,7 @@
#include "llvm/Constant.h"
#include "llvm/Instruction.h"
#include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/InstIterator.h"
#include "llvm/ADT/Statistic.h"
@@ -67,7 +67,7 @@ bool ConstantPropagation::runOnFunction(Function &F) {
WorkList.insert(&*i);
}
bool Changed = false;
- TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
while (!WorkList.empty()) {
diff --git a/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 9b0aadb..3ec6f3d 100644
--- a/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -235,6 +235,11 @@ bool CorrelatedValuePropagation::processSwitch(SwitchInst *SI) {
// This case never fires - remove it.
CI.getCaseSuccessor()->removePredecessor(BB);
SI->removeCase(CI); // Does not invalidate the iterator.
+
+ // The condition can be modified by removePredecessor's PHI simplification
+ // logic.
+ Cond = SI->getCondition();
+
++NumDeadCases;
Changed = true;
} else if (State == LazyValueInfo::True) {
diff --git a/contrib/llvm/lib/Transforms/Scalar/DCE.cpp b/contrib/llvm/lib/Transforms/Scalar/DCE.cpp
index 8dbcc23..a2e074f 100644
--- a/contrib/llvm/lib/Transforms/Scalar/DCE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/DCE.cpp
@@ -22,6 +22,7 @@
#include "llvm/Instruction.h"
#include "llvm/Pass.h"
#include "llvm/Support/InstIterator.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/Statistic.h"
using namespace llvm;
@@ -38,10 +39,11 @@ namespace {
initializeDeadInstEliminationPass(*PassRegistry::getPassRegistry());
}
virtual bool runOnBasicBlock(BasicBlock &BB) {
+ TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
bool Changed = false;
for (BasicBlock::iterator DI = BB.begin(); DI != BB.end(); ) {
Instruction *Inst = DI++;
- if (isInstructionTriviallyDead(Inst)) {
+ if (isInstructionTriviallyDead(Inst, TLI)) {
Inst->eraseFromParent();
Changed = true;
++DIEEliminated;
@@ -87,6 +89,8 @@ char DCE::ID = 0;
INITIALIZE_PASS(DCE, "dce", "Dead Code Elimination", false, false)
bool DCE::runOnFunction(Function &F) {
+ TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
+
// Start out with all of the instructions in the worklist...
std::vector<Instruction*> WorkList;
for (inst_iterator i = inst_begin(F), e = inst_end(F); i != e; ++i)
@@ -101,7 +105,7 @@ bool DCE::runOnFunction(Function &F) {
Instruction *I = WorkList.back();
WorkList.pop_back();
- if (isInstructionTriviallyDead(I)) { // If the instruction is dead.
+ if (isInstructionTriviallyDead(I, TLI)) { // If the instruction is dead.
// Loop over all of the values that the instruction uses, if there are
// instructions being used, add them to the worklist, because they might
// go dead after this one is removed.
@@ -114,13 +118,8 @@ bool DCE::runOnFunction(Function &F) {
I->eraseFromParent();
// Remove the instruction from the worklist if it still exists in it.
- for (std::vector<Instruction*>::iterator WI = WorkList.begin();
- WI != WorkList.end(); ) {
- if (*WI == I)
- WI = WorkList.erase(WI);
- else
- ++WI;
- }
+ WorkList.erase(std::remove(WorkList.begin(), WorkList.end(), I),
+ WorkList.end());
MadeChange = true;
++DCEEliminated;
diff --git a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 8b1283f..736cc05 100644
--- a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -29,7 +29,8 @@
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/Debug.h"
#include "llvm/ADT/SetVector.h"
@@ -45,6 +46,7 @@ namespace {
AliasAnalysis *AA;
MemoryDependenceAnalysis *MD;
DominatorTree *DT;
+ const TargetLibraryInfo *TLI;
static char ID; // Pass identification, replacement for typeid
DSE() : FunctionPass(ID), AA(0), MD(0), DT(0) {
@@ -55,6 +57,7 @@ namespace {
AA = &getAnalysis<AliasAnalysis>();
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTree>();
+ TLI = AA->getTargetLibraryInfo();
bool Changed = false;
for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
@@ -106,6 +109,7 @@ FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
///
static void DeleteDeadInstruction(Instruction *I,
MemoryDependenceAnalysis &MD,
+ const TargetLibraryInfo *TLI,
SmallSetVector<Value*, 16> *ValueSet = 0) {
SmallVector<Instruction*, 32> NowDeadInsts;
@@ -130,7 +134,7 @@ static void DeleteDeadInstruction(Instruction *I,
if (!Op->use_empty()) continue;
if (Instruction *OpI = dyn_cast<Instruction>(Op))
- if (isInstructionTriviallyDead(OpI))
+ if (isInstructionTriviallyDead(OpI, TLI))
NowDeadInsts.push_back(OpI);
}
@@ -143,7 +147,7 @@ static void DeleteDeadInstruction(Instruction *I,
/// hasMemoryWrite - Does this instruction write some memory? This only returns
/// true for things that we can analyze with other helpers below.
-static bool hasMemoryWrite(Instruction *I) {
+static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) {
if (isa<StoreInst>(I))
return true;
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
@@ -158,6 +162,26 @@ static bool hasMemoryWrite(Instruction *I) {
return true;
}
}
+ if (CallSite CS = I) {
+ if (Function *F = CS.getCalledFunction()) {
+ if (TLI && TLI->has(LibFunc::strcpy) &&
+ F->getName() == TLI->getName(LibFunc::strcpy)) {
+ return true;
+ }
+ if (TLI && TLI->has(LibFunc::strncpy) &&
+ F->getName() == TLI->getName(LibFunc::strncpy)) {
+ return true;
+ }
+ if (TLI && TLI->has(LibFunc::strcat) &&
+ F->getName() == TLI->getName(LibFunc::strcat)) {
+ return true;
+ }
+ if (TLI && TLI->has(LibFunc::strncat) &&
+ F->getName() == TLI->getName(LibFunc::strncat)) {
+ return true;
+ }
+ }
+ }
return false;
}
@@ -175,7 +199,7 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
// If we don't have target data around, an unknown size in Location means
// that we should use the size of the pointee type. This isn't valid for
// memset/memcpy, which writes more than an i8.
- if (Loc.Size == AliasAnalysis::UnknownSize && AA.getTargetData() == 0)
+ if (Loc.Size == AliasAnalysis::UnknownSize && AA.getDataLayout() == 0)
return AliasAnalysis::Location();
return Loc;
}
@@ -189,7 +213,7 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
// If we don't have target data around, an unknown size in Location means
// that we should use the size of the pointee type. This isn't valid for
// init.trampoline, which writes more than an i8.
- if (AA.getTargetData() == 0) return AliasAnalysis::Location();
+ if (AA.getDataLayout() == 0) return AliasAnalysis::Location();
// FIXME: We don't know the size of the trampoline, so we can't really
// handle it here.
@@ -205,7 +229,8 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
/// instruction if any.
static AliasAnalysis::Location
getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
- assert(hasMemoryWrite(Inst) && "Unknown instruction case");
+ assert(hasMemoryWrite(Inst, AA.getTargetLibraryInfo()) &&
+ "Unknown instruction case");
// The only instructions that both read and write are the mem transfer
// instructions (memcpy/memmove).
@@ -222,23 +247,29 @@ static bool isRemovable(Instruction *I) {
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->isUnordered();
- IntrinsicInst *II = cast<IntrinsicInst>(I);
- switch (II->getIntrinsicID()) {
- default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
- case Intrinsic::lifetime_end:
- // Never remove dead lifetime_end's, e.g. because it is followed by a
- // free.
- return false;
- case Intrinsic::init_trampoline:
- // Always safe to remove init_trampoline.
- return true;
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ switch (II->getIntrinsicID()) {
+ default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
+ case Intrinsic::lifetime_end:
+ // Never remove dead lifetime_end's, e.g. because it is followed by a
+ // free.
+ return false;
+ case Intrinsic::init_trampoline:
+ // Always safe to remove init_trampoline.
+ return true;
- case Intrinsic::memset:
- case Intrinsic::memmove:
- case Intrinsic::memcpy:
- // Don't remove volatile memory intrinsics.
- return !cast<MemIntrinsic>(II)->isVolatile();
+ case Intrinsic::memset:
+ case Intrinsic::memmove:
+ case Intrinsic::memcpy:
+ // Don't remove volatile memory intrinsics.
+ return !cast<MemIntrinsic>(II)->isVolatile();
+ }
}
+
+ if (CallSite CS = I)
+ return CS.getInstruction()->use_empty();
+
+ return false;
}
@@ -249,14 +280,19 @@ static bool isShortenable(Instruction *I) {
if (isa<StoreInst>(I))
return false;
- IntrinsicInst *II = cast<IntrinsicInst>(I);
- switch (II->getIntrinsicID()) {
- default: return false;
- case Intrinsic::memset:
- case Intrinsic::memcpy:
- // Do shorten memory intrinsics.
- return true;
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ switch (II->getIntrinsicID()) {
+ default: return false;
+ case Intrinsic::memset:
+ case Intrinsic::memcpy:
+ // Do shorten memory intrinsics.
+ return true;
+ }
}
+
+ // Don't shorten libcalls calls for now.
+
+ return false;
}
/// getStoredPointerOperand - Return the pointer that is being written to.
@@ -266,17 +302,23 @@ static Value *getStoredPointerOperand(Instruction *I) {
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
return MI->getDest();
- IntrinsicInst *II = cast<IntrinsicInst>(I);
- switch (II->getIntrinsicID()) {
- default: llvm_unreachable("Unexpected intrinsic!");
- case Intrinsic::init_trampoline:
- return II->getArgOperand(0);
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ switch (II->getIntrinsicID()) {
+ default: llvm_unreachable("Unexpected intrinsic!");
+ case Intrinsic::init_trampoline:
+ return II->getArgOperand(0);
+ }
}
+
+ CallSite CS = I;
+ // All the supported functions so far happen to have dest as their first
+ // argument.
+ return CS.getArgument(0);
}
static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) {
uint64_t Size;
- if (getObjectSize(V, Size, AA.getTargetData()))
+ if (getObjectSize(V, Size, AA.getDataLayout(), AA.getTargetLibraryInfo()))
return Size;
return AliasAnalysis::UnknownSize;
}
@@ -309,10 +351,10 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// comparison.
if (Later.Size == AliasAnalysis::UnknownSize ||
Earlier.Size == AliasAnalysis::UnknownSize) {
- // If we have no TargetData information around, then the size of the store
+ // If we have no DataLayout information around, then the size of the store
// is inferrable from the pointee type. If they are the same type, then
// we know that the store is safe.
- if (AA.getTargetData() == 0 &&
+ if (AA.getDataLayout() == 0 &&
Later.Ptr->getType() == Earlier.Ptr->getType())
return OverwriteComplete;
@@ -328,13 +370,13 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// larger than the earlier one.
if (Later.Size == AliasAnalysis::UnknownSize ||
Earlier.Size == AliasAnalysis::UnknownSize ||
- AA.getTargetData() == 0)
+ AA.getDataLayout() == 0)
return OverwriteUnknown;
// Check to see if the later store is to the entire object (either a global,
// an alloca, or a byval argument). If so, then it clearly overwrites any
// other store to the same object.
- const TargetData &TD = *AA.getTargetData();
+ const DataLayout &TD = *AA.getDataLayout();
const Value *UO1 = GetUnderlyingObject(P1, &TD),
*UO2 = GetUnderlyingObject(P2, &TD);
@@ -454,13 +496,13 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
Instruction *Inst = BBI++;
// Handle 'free' calls specially.
- if (CallInst *F = isFreeCall(Inst)) {
+ if (CallInst *F = isFreeCall(Inst, TLI)) {
MadeChange |= HandleFree(F);
continue;
}
// If we find something that writes memory, get its memory dependence.
- if (!hasMemoryWrite(Inst))
+ if (!hasMemoryWrite(Inst, TLI))
continue;
MemDepResult InstDep = MD->getDependency(Inst);
@@ -483,7 +525,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
// in case we need it.
WeakVH NextInst(BBI);
- DeleteDeadInstruction(SI, *MD);
+ DeleteDeadInstruction(SI, *MD, TLI);
if (NextInst == 0) // Next instruction deleted.
BBI = BB.begin();
@@ -530,7 +572,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
<< *DepWrite << "\n KILLER: " << *Inst << '\n');
// Delete the store and now-dead instructions that feed it.
- DeleteDeadInstruction(DepWrite, *MD);
+ DeleteDeadInstruction(DepWrite, *MD, TLI);
++NumFastStores;
MadeChange = true;
@@ -627,7 +669,7 @@ bool DSE::HandleFree(CallInst *F) {
MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB);
while (Dep.isDef() || Dep.isClobber()) {
Instruction *Dependency = Dep.getInst();
- if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency))
+ if (!hasMemoryWrite(Dependency, TLI) || !isRemovable(Dependency))
break;
Value *DepPointer =
@@ -640,7 +682,7 @@ bool DSE::HandleFree(CallInst *F) {
Instruction *Next = llvm::next(BasicBlock::iterator(Dependency));
// DCE instructions only used to calculate that store
- DeleteDeadInstruction(Dependency, *MD);
+ DeleteDeadInstruction(Dependency, *MD, TLI);
++NumFastStores;
MadeChange = true;
@@ -659,6 +701,22 @@ bool DSE::HandleFree(CallInst *F) {
return MadeChange;
}
+namespace {
+ struct CouldRef {
+ typedef Value *argument_type;
+ const CallSite CS;
+ AliasAnalysis *AA;
+
+ bool operator()(Value *I) {
+ // See if the call site touches the value.
+ AliasAnalysis::ModRefResult A =
+ AA->getModRefInfo(CS, I, getPointerSize(I, *AA));
+
+ return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref;
+ }
+ };
+}
+
/// handleEndBlock - Remove dead stores to stack-allocated locations in the
/// function end block. Ex:
/// %A = alloca i32
@@ -680,7 +738,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// Okay, so these are dead heap objects, but if the pointer never escapes
// then it's leaked by this function anyways.
- else if (isAllocLikeFn(I) && !PointerMayBeCaptured(I, true, true))
+ else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true))
DeadStackObjects.insert(I);
}
@@ -696,7 +754,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
--BBI;
// If we find a store, check to see if it points into a dead stack value.
- if (hasMemoryWrite(BBI) && isRemovable(BBI)) {
+ if (hasMemoryWrite(BBI, TLI) && isRemovable(BBI)) {
// See through pointer-to-pointer bitcasts
SmallVector<Value *, 4> Pointers;
GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers);
@@ -724,7 +782,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
dbgs() << '\n');
// DCE instructions only used to calculate that store.
- DeleteDeadInstruction(Dead, *MD, &DeadStackObjects);
+ DeleteDeadInstruction(Dead, *MD, TLI, &DeadStackObjects);
++NumFastStores;
MadeChange = true;
continue;
@@ -732,9 +790,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
}
// Remove any dead non-memory-mutating instructions.
- if (isInstructionTriviallyDead(BBI)) {
+ if (isInstructionTriviallyDead(BBI, TLI)) {
Instruction *Inst = BBI++;
- DeleteDeadInstruction(Inst, *MD, &DeadStackObjects);
+ DeleteDeadInstruction(Inst, *MD, TLI, &DeadStackObjects);
++NumFastOther;
MadeChange = true;
continue;
@@ -750,7 +808,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
if (CallSite CS = cast<Value>(BBI)) {
// Remove allocation function calls from the list of dead stack objects;
// there can't be any references before the definition.
- if (isAllocLikeFn(BBI))
+ if (isAllocLikeFn(BBI, TLI))
DeadStackObjects.remove(BBI);
// If this call does not access memory, it can't be loading any of our
@@ -760,20 +818,8 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// If the call might load from any of our allocas, then any store above
// the call is live.
- SmallVector<Value*, 8> LiveAllocas;
- for (SmallSetVector<Value*, 16>::iterator I = DeadStackObjects.begin(),
- E = DeadStackObjects.end(); I != E; ++I) {
- // See if the call site touches it.
- AliasAnalysis::ModRefResult A =
- AA->getModRefInfo(CS, *I, getPointerSize(*I, *AA));
-
- if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref)
- LiveAllocas.push_back(*I);
- }
-
- for (SmallVector<Value*, 8>::iterator I = LiveAllocas.begin(),
- E = LiveAllocas.end(); I != E; ++I)
- DeadStackObjects.remove(*I);
+ CouldRef Pred = { CS, AA };
+ DeadStackObjects.remove_if(Pred);
// If all of the allocas were clobbered by the call then we're not going
// to find anything else to process.
@@ -816,6 +862,20 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
return MadeChange;
}
+namespace {
+ struct CouldAlias {
+ typedef Value *argument_type;
+ const AliasAnalysis::Location &LoadedLoc;
+ AliasAnalysis *AA;
+
+ bool operator()(Value *I) {
+ // See if the loaded location could alias the stack location.
+ AliasAnalysis::Location StackLoc(I, getPointerSize(I, *AA));
+ return !AA->isNoAlias(StackLoc, LoadedLoc);
+ }
+ };
+}
+
/// RemoveAccessedObjects - Check to see if the specified location may alias any
/// of the stack objects in the DeadStackObjects set. If so, they become live
/// because the location is being loaded.
@@ -834,16 +894,7 @@ void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
return;
}
- SmallVector<Value*, 16> NowLive;
- for (SmallSetVector<Value*, 16>::iterator I = DeadStackObjects.begin(),
- E = DeadStackObjects.end(); I != E; ++I) {
- // See if the loaded location could alias the stack location.
- AliasAnalysis::Location StackLoc(*I, getPointerSize(*I, *AA));
- if (!AA->isNoAlias(StackLoc, LoadedLoc))
- NowLive.push_back(*I);
- }
-
- for (SmallVector<Value*, 16>::iterator I = NowLive.begin(), E = NowLive.end();
- I != E; ++I)
- DeadStackObjects.remove(*I);
+ // Remove objects that could alias LoadedLoc.
+ CouldAlias Pred = { LoadedLoc, AA };
+ DeadStackObjects.remove_if(Pred);
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index 9759549..101009d 100644
--- a/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -18,11 +18,12 @@
#include "llvm/Pass.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/RecyclingAllocator.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/ScopedHashTable.h"
#include "llvm/ADT/Statistic.h"
#include <deque>
@@ -90,35 +91,56 @@ template<> struct DenseMapInfo<SimpleValue> {
unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
Instruction *Inst = Val.Inst;
-
// Hash in all of the operands as pointers.
- unsigned Res = 0;
- for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
- Res ^= getHash(Inst->getOperand(i)) << (i & 0xF);
+ if (BinaryOperator* BinOp = dyn_cast<BinaryOperator>(Inst)) {
+ Value *LHS = BinOp->getOperand(0);
+ Value *RHS = BinOp->getOperand(1);
+ if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
+ std::swap(LHS, RHS);
+
+ if (isa<OverflowingBinaryOperator>(BinOp)) {
+ // Hash the overflow behavior
+ unsigned Overflow =
+ BinOp->hasNoSignedWrap() * OverflowingBinaryOperator::NoSignedWrap |
+ BinOp->hasNoUnsignedWrap() * OverflowingBinaryOperator::NoUnsignedWrap;
+ return hash_combine(BinOp->getOpcode(), Overflow, LHS, RHS);
+ }
- if (CastInst *CI = dyn_cast<CastInst>(Inst))
- Res ^= getHash(CI->getType());
- else if (CmpInst *CI = dyn_cast<CmpInst>(Inst))
- Res ^= CI->getPredicate();
- else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst)) {
- for (ExtractValueInst::idx_iterator I = EVI->idx_begin(),
- E = EVI->idx_end(); I != E; ++I)
- Res ^= *I;
- } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst)) {
- for (InsertValueInst::idx_iterator I = IVI->idx_begin(),
- E = IVI->idx_end(); I != E; ++I)
- Res ^= *I;
- } else {
- // nothing extra to hash in.
- assert((isa<CallInst>(Inst) ||
- isa<BinaryOperator>(Inst) || isa<GetElementPtrInst>(Inst) ||
- isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) ||
- isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst)) &&
- "Invalid/unknown instruction");
+ return hash_combine(BinOp->getOpcode(), LHS, RHS);
}
+ if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) {
+ Value *LHS = CI->getOperand(0);
+ Value *RHS = CI->getOperand(1);
+ CmpInst::Predicate Pred = CI->getPredicate();
+ if (Inst->getOperand(0) > Inst->getOperand(1)) {
+ std::swap(LHS, RHS);
+ Pred = CI->getSwappedPredicate();
+ }
+ return hash_combine(Inst->getOpcode(), Pred, LHS, RHS);
+ }
+
+ if (CastInst *CI = dyn_cast<CastInst>(Inst))
+ return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
+
+ if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst))
+ return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
+ hash_combine_range(EVI->idx_begin(), EVI->idx_end()));
+
+ if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst))
+ return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
+ IVI->getOperand(1),
+ hash_combine_range(IVI->idx_begin(), IVI->idx_end()));
+
+ assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) ||
+ isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) ||
+ isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
+ isa<ShuffleVectorInst>(Inst)) && "Invalid/unknown instruction");
+
// Mix in the opcode.
- return (Res << 1) ^ Inst->getOpcode();
+ return hash_combine(Inst->getOpcode(),
+ hash_combine_range(Inst->value_op_begin(),
+ Inst->value_op_end()));
}
bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
@@ -128,7 +150,41 @@ bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
return LHSI == RHSI;
if (LHSI->getOpcode() != RHSI->getOpcode()) return false;
- return LHSI->isIdenticalTo(RHSI);
+ if (LHSI->isIdenticalTo(RHSI)) return true;
+
+ // If we're not strictly identical, we still might be a commutable instruction
+ if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) {
+ if (!LHSBinOp->isCommutative())
+ return false;
+
+ assert(isa<BinaryOperator>(RHSI)
+ && "same opcode, but different instruction type?");
+ BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI);
+
+ // Check overflow attributes
+ if (isa<OverflowingBinaryOperator>(LHSBinOp)) {
+ assert(isa<OverflowingBinaryOperator>(RHSBinOp)
+ && "same opcode, but different operator type?");
+ if (LHSBinOp->hasNoUnsignedWrap() != RHSBinOp->hasNoUnsignedWrap() ||
+ LHSBinOp->hasNoSignedWrap() != RHSBinOp->hasNoSignedWrap())
+ return false;
+ }
+
+ // Commuted equality
+ return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
+ LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
+ }
+ if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
+ assert(isa<CmpInst>(RHSI)
+ && "same opcode, but different instruction type?");
+ CmpInst *RHSCmp = cast<CmpInst>(RHSI);
+ // Commuted equality
+ return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
+ LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
+ LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
+ }
+
+ return false;
}
//===----------------------------------------------------------------------===//
@@ -216,7 +272,7 @@ namespace {
/// cases.
class EarlyCSE : public FunctionPass {
public:
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLibraryInfo *TLI;
DominatorTree *DT;
typedef RecyclingAllocator<BumpPtrAllocator,
@@ -274,7 +330,8 @@ private:
CallScope(*availableCalls) {}
private:
- NodeScope(const NodeScope&); // DO NOT IMPLEMENT
+ NodeScope(const NodeScope&) LLVM_DELETED_FUNCTION;
+ void operator=(const NodeScope&) LLVM_DELETED_FUNCTION;
ScopedHTType::ScopeTy Scope;
LoadHTType::ScopeTy LoadScope;
@@ -313,7 +370,8 @@ private:
void process() { Processed = true; }
private:
- StackNode(const StackNode&); // DO NOT IMPLEMENT
+ StackNode(const StackNode&) LLVM_DELETED_FUNCTION;
+ void operator=(const StackNode&) LLVM_DELETED_FUNCTION;
// Members.
unsigned CurrentGeneration;
@@ -374,7 +432,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
Instruction *Inst = I++;
// Dead instructions should just be removed.
- if (isInstructionTriviallyDead(Inst)) {
+ if (isInstructionTriviallyDead(Inst, TLI)) {
DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
Inst->eraseFromParent();
Changed = true;
@@ -506,7 +564,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
bool EarlyCSE::runOnFunction(Function &F) {
std::deque<StackNode *> nodesToProcess;
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
DT = &getAnalysis<DominatorTree>();
diff --git a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
index 4822fd0..f003e06 100644
--- a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -41,7 +41,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/PatternMatch.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -271,16 +271,16 @@ void ValueTable::add(Value *V, uint32_t num) {
valueNumbering.insert(std::make_pair(V, num));
}
-uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
+uint32_t ValueTable::lookup_or_add_call(CallInst *C) {
if (AA->doesNotAccessMemory(C)) {
Expression exp = create_expression(C);
- uint32_t& e = expressionNumbering[exp];
+ uint32_t &e = expressionNumbering[exp];
if (!e) e = nextValueNumber++;
valueNumbering[C] = e;
return e;
} else if (AA->onlyReadsMemory(C)) {
Expression exp = create_expression(C);
- uint32_t& e = expressionNumbering[exp];
+ uint32_t &e = expressionNumbering[exp];
if (!e) {
e = nextValueNumber++;
valueNumbering[C] = e;
@@ -413,7 +413,7 @@ uint32_t ValueTable::lookup_or_add(Value *V) {
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
- case Instruction::Or :
+ case Instruction::Or:
case Instruction::Xor:
case Instruction::ICmp:
case Instruction::FCmp:
@@ -503,7 +503,7 @@ namespace {
bool NoLoads;
MemoryDependenceAnalysis *MD;
DominatorTree *DT;
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLibraryInfo *TLI;
ValueTable VN;
@@ -535,7 +535,7 @@ namespace {
InstrsToErase.push_back(I);
}
- const TargetData *getTargetData() const { return TD; }
+ const DataLayout *getDataLayout() const { return TD; }
DominatorTree &getDominatorTree() const { return *DT; }
AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
MemoryDependenceAnalysis &getMemDep() const { return *MD; }
@@ -632,6 +632,7 @@ INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false)
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void GVN::dump(DenseMap<uint32_t, Value*>& d) {
errs() << "{\n";
for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
@@ -641,6 +642,7 @@ void GVN::dump(DenseMap<uint32_t, Value*>& d) {
}
errs() << "}\n";
}
+#endif
/// IsValueFullyAvailableInBlock - Return true if we can prove that the value
/// we're analyzing is fully available in the specified block. As we go, keep
@@ -728,7 +730,7 @@ SpeculationFailure:
/// CoerceAvailableValueToLoadType will succeed.
static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
Type *LoadTy,
- const TargetData &TD) {
+ const DataLayout &TD) {
// If the loaded or stored value is an first class array or struct, don't try
// to transform them. We need to be able to bitcast to integer.
if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
@@ -744,7 +746,6 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
return true;
}
-
/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
/// then a load from a must-aliased pointer of a different type, try to coerce
/// the stored value. LoadedTy is the type of the load we want to replace and
@@ -754,7 +755,7 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
Type *LoadedTy,
Instruction *InsertPt,
- const TargetData &TD) {
+ const DataLayout &TD) {
if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
return 0;
@@ -767,24 +768,25 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
// If the store and reload are the same size, we can always reuse it.
if (StoreSize == LoadSize) {
// Pointer to Pointer -> use bitcast.
- if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy())
+ if (StoredValTy->getScalarType()->isPointerTy() &&
+ LoadedTy->getScalarType()->isPointerTy())
return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
// Convert source pointers to integers, which can be bitcast.
- if (StoredValTy->isPointerTy()) {
- StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
+ if (StoredValTy->getScalarType()->isPointerTy()) {
+ StoredValTy = TD.getIntPtrType(StoredValTy);
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
}
Type *TypeToCastTo = LoadedTy;
- if (TypeToCastTo->isPointerTy())
- TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
+ if (TypeToCastTo->getScalarType()->isPointerTy())
+ TypeToCastTo = TD.getIntPtrType(TypeToCastTo);
if (StoredValTy != TypeToCastTo)
StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
// Cast to pointer if the load needs a pointer type.
- if (LoadedTy->isPointerTy())
+ if (LoadedTy->getScalarType()->isPointerTy())
StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
return StoredVal;
@@ -796,8 +798,8 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
// Convert source pointers to integers, which can be manipulated.
- if (StoredValTy->isPointerTy()) {
- StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
+ if (StoredValTy->getScalarType()->isPointerTy()) {
+ StoredValTy = TD.getIntPtrType(StoredValTy);
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
}
@@ -822,7 +824,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
return StoredVal;
// If the result is a pointer, inttoptr.
- if (LoadedTy->isPointerTy())
+ if (LoadedTy->getScalarType()->isPointerTy())
return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
// Otherwise, bitcast.
@@ -840,7 +842,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
Value *WritePtr,
uint64_t WriteSizeInBits,
- const TargetData &TD) {
+ const DataLayout &TD) {
// If the loaded or stored value is a first class array or struct, don't try
// to transform them. We need to be able to bitcast to integer.
if (LoadTy->isStructTy() || LoadTy->isArrayTy())
@@ -913,7 +915,7 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
/// memdep query of a load that ends up being a clobbering store.
static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
StoreInst *DepSI,
- const TargetData &TD) {
+ const DataLayout &TD) {
// Cannot handle reading from store of first-class aggregate yet.
if (DepSI->getValueOperand()->getType()->isStructTy() ||
DepSI->getValueOperand()->getType()->isArrayTy())
@@ -929,7 +931,7 @@ static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
/// memdep query of a load that ends up being clobbered by another load. See if
/// the other load can feed into the second load.
static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
- LoadInst *DepLI, const TargetData &TD){
+ LoadInst *DepLI, const DataLayout &TD){
// Cannot handle reading from store of first-class aggregate yet.
if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy())
return -1;
@@ -957,7 +959,7 @@ static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
MemIntrinsic *MI,
- const TargetData &TD) {
+ const DataLayout &TD) {
// If the mem operation is a non-constant size, we can't handle it.
ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
if (SizeCst == 0) return -1;
@@ -1007,7 +1009,7 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
/// before we give up.
static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
Type *LoadTy,
- Instruction *InsertPt, const TargetData &TD){
+ Instruction *InsertPt, const DataLayout &TD){
LLVMContext &Ctx = SrcVal->getType()->getContext();
uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
@@ -1017,8 +1019,9 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
// Compute which bits of the stored value are being used by the load. Convert
// to an integer type to start with.
- if (SrcVal->getType()->isPointerTy())
- SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx));
+ if (SrcVal->getType()->getScalarType()->isPointerTy())
+ SrcVal = Builder.CreatePtrToInt(SrcVal,
+ TD.getIntPtrType(SrcVal->getType()));
if (!SrcVal->getType()->isIntegerTy())
SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8));
@@ -1046,7 +1049,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
Type *LoadTy, Instruction *InsertPt,
GVN &gvn) {
- const TargetData &TD = *gvn.getTargetData();
+ const DataLayout &TD = *gvn.getDataLayout();
// If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to
// widen SrcVal out to a larger load.
unsigned SrcValSize = TD.getTypeStoreSize(SrcVal->getType());
@@ -1105,7 +1108,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
/// memdep query of a load that ends up being a clobbering mem intrinsic.
static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
Type *LoadTy, Instruction *InsertPt,
- const TargetData &TD){
+ const DataLayout &TD){
LLVMContext &Ctx = LoadTy->getContext();
uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
@@ -1229,7 +1232,7 @@ struct AvailableValueInBlock {
if (isSimpleValue()) {
Res = getSimpleValue();
if (Res->getType() != LoadTy) {
- const TargetData *TD = gvn.getTargetData();
+ const DataLayout *TD = gvn.getDataLayout();
assert(TD && "Need target data to handle type mismatch case");
Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
*TD);
@@ -1251,7 +1254,7 @@ struct AvailableValueInBlock {
<< *Res << '\n' << "\n\n\n");
}
} else {
- const TargetData *TD = gvn.getTargetData();
+ const DataLayout *TD = gvn.getDataLayout();
assert(TD && "Need target data to handle type mismatch case");
Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
LoadTy, BB->getTerminator(), *TD);
@@ -1299,7 +1302,7 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
// If new PHI nodes were created, notify alias analysis.
- if (V->getType()->isPointerTy()) {
+ if (V->getType()->getScalarType()->isPointerTy()) {
AliasAnalysis *AA = gvn.getAliasAnalysis();
for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
@@ -1436,7 +1439,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
Instruction *DepInst = DepInfo.getInst();
// Loading the allocation -> undef.
- if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst) ||
+ if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
// Loading immediately after lifetime begin -> undef.
isLifetimeStart(DepInst)) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
@@ -1496,7 +1499,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
if (isa<PHINode>(V))
V->takeName(LI);
- if (V->getType()->isPointerTy())
+ if (V->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(LI);
++NumGVNLoad;
@@ -1728,7 +1731,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
LI->replaceAllUsesWith(V);
if (isa<PHINode>(V))
V->takeName(LI);
- if (V->getType()->isPointerTy())
+ if (V->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(LI);
++NumPRELoad;
@@ -1855,7 +1858,7 @@ bool GVN::processLoad(LoadInst *L) {
// Replace the load!
L->replaceAllUsesWith(AvailVal);
- if (AvailVal->getType()->isPointerTy())
+ if (AvailVal->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(AvailVal);
markInstructionForDeletion(L);
++NumGVNLoad;
@@ -1912,7 +1915,7 @@ bool GVN::processLoad(LoadInst *L) {
// Remove it!
L->replaceAllUsesWith(StoredVal);
- if (StoredVal->getType()->isPointerTy())
+ if (StoredVal->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(StoredVal);
markInstructionForDeletion(L);
++NumGVNLoad;
@@ -1941,7 +1944,7 @@ bool GVN::processLoad(LoadInst *L) {
// Remove it!
patchAndReplaceAllUsesWith(AvailableVal, L);
- if (DepLI->getType()->isPointerTy())
+ if (DepLI->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(DepLI);
markInstructionForDeletion(L);
++NumGVNLoad;
@@ -1951,7 +1954,7 @@ bool GVN::processLoad(LoadInst *L) {
// If this load really doesn't depend on anything, then we must be loading an
// undef value. This can happen when loading for a fresh allocation with no
// intervening stores, for example.
- if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst)) {
+ if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI)) {
L->replaceAllUsesWith(UndefValue::get(L->getType()));
markInstructionForDeletion(L);
++NumGVNLoad;
@@ -2182,7 +2185,7 @@ bool GVN::processInstruction(Instruction *I) {
// "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
if (Value *V = SimplifyInstruction(I, TD, TLI, DT)) {
I->replaceAllUsesWith(V);
- if (MD && V->getType()->isPointerTy())
+ if (MD && V->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(I);
++NumGVNSimpl;
@@ -2231,12 +2234,20 @@ bool GVN::processInstruction(Instruction *I) {
Value *SwitchCond = SI->getCondition();
BasicBlock *Parent = SI->getParent();
bool Changed = false;
+
+ // Remember how many outgoing edges there are to every successor.
+ SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
+ for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i)
+ ++SwitchEdges[SI->getSuccessor(i)];
+
for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
i != e; ++i) {
BasicBlock *Dst = i.getCaseSuccessor();
- BasicBlockEdge E(Parent, Dst);
- if (E.isSingleEdge())
+ // If there is only a single edge, propagate the case value into it.
+ if (SwitchEdges.lookup(Dst) == 1) {
+ BasicBlockEdge E(Parent, Dst);
Changed |= propagateEquality(SwitchCond, i.getCaseValue(), E);
+ }
}
return Changed;
}
@@ -2274,7 +2285,7 @@ bool GVN::processInstruction(Instruction *I) {
// Remove it!
patchAndReplaceAllUsesWith(repl, I);
- if (MD && repl->getType()->isPointerTy())
+ if (MD && repl->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(repl);
markInstructionForDeletion(I);
return true;
@@ -2285,7 +2296,7 @@ bool GVN::runOnFunction(Function& F) {
if (!NoLoads)
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTree>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
VN.setMemDep(MD);
@@ -2522,7 +2533,7 @@ bool GVN::performPRE(Function &F) {
addToLeaderTable(ValNo, Phi, CurrentBlock);
Phi->setDebugLoc(CurInst->getDebugLoc());
CurInst->replaceAllUsesWith(Phi);
- if (Phi->getType()->isPointerTy()) {
+ if (Phi->getType()->getScalarType()->isPointerTy()) {
// Because we have added a PHI-use of the pointer value, it has now
// "escaped" from alias analysis' perspective. We need to inform
// AA of this.
diff --git a/contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp b/contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp
index b36a3cb..6301aad 100644
--- a/contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp
@@ -62,7 +62,7 @@
#include "llvm/Intrinsics.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/ADT/Statistic.h"
@@ -98,9 +98,9 @@ namespace {
}
struct GlobalCmp {
- const TargetData *TD;
+ const DataLayout *TD;
- GlobalCmp(const TargetData *td) : TD(td) { }
+ GlobalCmp(const DataLayout *td) : TD(td) { }
bool operator()(const GlobalVariable *GV1, const GlobalVariable *GV2) {
Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
@@ -119,7 +119,7 @@ INITIALIZE_PASS(GlobalMerge, "global-merge",
bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
Module &M, bool isConst) const {
- const TargetData *TD = TLI->getTargetData();
+ const DataLayout *TD = TLI->getDataLayout();
// FIXME: Infer the maximum possible offset depending on the actual users
// (these max offsets are different for the users inside Thumb or ARM
@@ -170,7 +170,7 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
bool GlobalMerge::doInitialization(Module &M) {
SmallVector<GlobalVariable*, 16> Globals, ConstGlobals, BSSGlobals;
- const TargetData *TD = TLI->getTargetData();
+ const DataLayout *TD = TLI->getDataLayout();
unsigned MaxOffset = TLI->getMaximalGlobalOffset();
bool Changed = false;
diff --git a/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index 37f8bdf..310fd61 100644
--- a/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -43,7 +43,8 @@
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/SimplifyIndVar.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
@@ -67,7 +68,8 @@ namespace {
LoopInfo *LI;
ScalarEvolution *SE;
DominatorTree *DT;
- TargetData *TD;
+ DataLayout *TD;
+ TargetLibraryInfo *TLI;
SmallVector<WeakVH, 16> DeadInsts;
bool Changed;
@@ -218,8 +220,6 @@ static Instruction *getInsertPointForUses(Instruction *User, Value *Def,
/// ConvertToSInt - Convert APF to an integer, if possible.
static bool ConvertToSInt(const APFloat &APF, int64_t &IntVal) {
bool isExact = false;
- if (&APF.getSemantics() == &APFloat::PPCDoubleDouble)
- return false;
// See if we can convert this to an int64_t
uint64_t UIntVal;
if (APF.convertToInteger(&UIntVal, 64, true, APFloat::rmTowardZero,
@@ -414,11 +414,11 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
// new comparison.
NewCompare->takeName(Compare);
Compare->replaceAllUsesWith(NewCompare);
- RecursivelyDeleteTriviallyDeadInstructions(Compare);
+ RecursivelyDeleteTriviallyDeadInstructions(Compare, TLI);
// Delete the old floating point increment.
Incr->replaceAllUsesWith(UndefValue::get(Incr->getType()));
- RecursivelyDeleteTriviallyDeadInstructions(Incr);
+ RecursivelyDeleteTriviallyDeadInstructions(Incr, TLI);
// If the FP induction variable still has uses, this is because something else
// in the loop uses its value. In order to canonicalize the induction
@@ -431,7 +431,7 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
Value *Conv = new SIToFPInst(NewPHI, PN->getType(), "indvar.conv",
PN->getParent()->getFirstInsertionPt());
PN->replaceAllUsesWith(Conv);
- RecursivelyDeleteTriviallyDeadInstructions(PN);
+ RecursivelyDeleteTriviallyDeadInstructions(PN, TLI);
}
Changed = true;
}
@@ -549,15 +549,17 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
PN->setIncomingValue(i, ExitVal);
- // If this instruction is dead now, delete it.
- RecursivelyDeleteTriviallyDeadInstructions(Inst);
+ // If this instruction is dead now, delete it. Don't do it now to avoid
+ // invalidating iterators.
+ if (isInstructionTriviallyDead(Inst, TLI))
+ DeadInsts.push_back(Inst);
if (NumPreds == 1) {
// Completely replace a single-pred PHI. This is safe, because the
// NewVal won't be variant in the loop, so we don't need an LCSSA phi
// node anymore.
PN->replaceAllUsesWith(ExitVal);
- RecursivelyDeleteTriviallyDeadInstructions(PN);
+ PN->eraseFromParent();
}
}
if (NumPreds != 1) {
@@ -595,13 +597,13 @@ namespace {
class WideIVVisitor : public IVVisitor {
ScalarEvolution *SE;
- const TargetData *TD;
+ const DataLayout *TD;
public:
WideIVInfo WI;
WideIVVisitor(PHINode *NarrowIV, ScalarEvolution *SCEV,
- const TargetData *TData) :
+ const DataLayout *TData) :
SE(SCEV), TD(TData) { WI.NarrowIV = NarrowIV; }
// Implement the interface used by simplifyUsersOfIV.
@@ -1259,8 +1261,13 @@ static bool needsLFTR(Loop *L, DominatorTree *DT) {
if (!Phi)
return true;
+ // Do LFTR if PHI node is defined in the loop, but is *not* a counter.
+ int Idx = Phi->getBasicBlockIndex(L->getLoopLatch());
+ if (Idx < 0)
+ return true;
+
// Do LFTR if the exit condition's IV is *not* a simple counter.
- Value *IncV = Phi->getIncomingValueForBlock(L->getLoopLatch());
+ Value *IncV = Phi->getIncomingValue(Idx);
return Phi != getLoopPhiForCounter(IncV, L, DT);
}
@@ -1339,7 +1346,7 @@ static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) {
/// could at least handle constant BECounts.
static PHINode *
FindLoopCounter(Loop *L, const SCEV *BECount,
- ScalarEvolution *SE, DominatorTree *DT, const TargetData *TD) {
+ ScalarEvolution *SE, DominatorTree *DT, const DataLayout *TD) {
uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType());
Value *Cond =
@@ -1696,7 +1703,8 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
LI = &getAnalysis<LoopInfo>();
SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTree>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
+ TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
DeadInsts.clear();
Changed = false;
@@ -1763,7 +1771,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
while (!DeadInsts.empty())
if (Instruction *Inst =
dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()))
- RecursivelyDeleteTriviallyDeadInstructions(Inst);
+ RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI);
// The Rewriter may not be used from this point on.
@@ -1772,7 +1780,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
SinkUnusedInvariants(L);
// Clean up dead instructions.
- Changed |= DeleteDeadPHIs(L->getHeader());
+ Changed |= DeleteDeadPHIs(L->getHeader(), TLI);
// Check a post-condition.
assert(L->isLCSSAForm(*DT) &&
"Indvars did not leave the loop in lcssa form!");
diff --git a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index dd42c59..e7ffa09 100644
--- a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -23,7 +23,7 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
@@ -75,7 +75,7 @@ namespace {
/// revectored to the false side of the second if.
///
class JumpThreading : public FunctionPass {
- TargetData *TD;
+ DataLayout *TD;
TargetLibraryInfo *TLI;
LazyValueInfo *LVI;
#ifdef NDEBUG
@@ -147,7 +147,7 @@ FunctionPass *llvm::createJumpThreadingPass() { return new JumpThreading(); }
///
bool JumpThreading::runOnFunction(Function &F) {
DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
LVI = &getAnalysis<LazyValueInfo>();
@@ -1455,7 +1455,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
// At this point, the IR is fully up to date and consistent. Do a quick scan
// over the new instructions and zap any that are constants or dead. This
// frequently happens because of phi translation.
- SimplifyInstructionsInBlock(NewBB, TD);
+ SimplifyInstructionsInBlock(NewBB, TD, TLI);
// Threaded an edge!
++NumThreads;
diff --git a/contrib/llvm/lib/Transforms/Scalar/LICM.cpp b/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
index 0192e92..4818437 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -46,7 +46,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
@@ -100,7 +100,7 @@ namespace {
LoopInfo *LI; // Current LoopInfo
DominatorTree *DT; // Dominator Tree for the current Loop.
- TargetData *TD; // TargetData for constant folding.
+ DataLayout *TD; // DataLayout for constant folding.
TargetLibraryInfo *TLI; // TargetLibraryInfo for constant folding.
// State that is updated as we process loops.
@@ -108,6 +108,9 @@ namespace {
BasicBlock *Preheader; // The preheader block of the current loop...
Loop *CurLoop; // The current loop we are working on...
AliasSetTracker *CurAST; // AliasSet information for the current loop...
+ bool MayThrow; // The current loop contains an instruction which
+ // may throw, thus preventing code motion of
+ // instructions with side effects.
DenseMap<Loop*, AliasSetTracker*> LoopToAliasSetMap;
/// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info.
@@ -204,7 +207,7 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTree>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
CurAST = new AliasSetTracker(*AA);
@@ -240,6 +243,15 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
CurAST->add(*BB); // Incorporate the specified basic block
}
+ MayThrow = false;
+ // TODO: We've already searched for instructions which may throw in subloops.
+ // We may want to reuse this information.
+ for (Loop::block_iterator BB = L->block_begin(), BBE = L->block_end();
+ (BB != BBE) && !MayThrow ; ++BB)
+ for (BasicBlock::iterator I = (*BB)->begin(), E = (*BB)->end();
+ (I != E) && !MayThrow; ++I)
+ MayThrow |= I->mayThrow();
+
// We want to visit all of the instructions in this loop... that are not parts
// of our subloops (they have already had their invariants hoisted out of
// their loop, into this loop, so there is no need to process the BODIES of
@@ -307,7 +319,7 @@ void LICM::SinkRegion(DomTreeNode *N) {
// If the instruction is dead, we would try to sink it because it isn't used
// in the loop, instead, just delete it.
- if (isInstructionTriviallyDead(&I)) {
+ if (isInstructionTriviallyDead(&I, TLI)) {
DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
++II;
CurAST->deleteValue(&I);
@@ -418,17 +430,22 @@ bool LICM::canSinkOrHoistInst(Instruction &I) {
if (!FoundMod) return true;
}
- // FIXME: This should use mod/ref information to see if we can hoist or sink
- // the call.
+ // FIXME: This should use mod/ref information to see if we can hoist or
+ // sink the call.
return false;
}
- // Otherwise these instructions are hoistable/sinkable
- return isa<BinaryOperator>(I) || isa<CastInst>(I) ||
- isa<SelectInst>(I) || isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
- isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
- isa<ShuffleVectorInst>(I);
+ // Only these instructions are hoistable/sinkable.
+ bool HoistableKind = (isa<BinaryOperator>(I) || isa<CastInst>(I) ||
+ isa<SelectInst>(I) || isa<GetElementPtrInst>(I) ||
+ isa<CmpInst>(I) || isa<InsertElementInst>(I) ||
+ isa<ExtractElementInst>(I) ||
+ isa<ShuffleVectorInst>(I));
+ if (!HoistableKind)
+ return false;
+
+ return isSafeToExecuteUnconditionally(I);
}
/// isNotUsedInLoop - Return true if the only users of this instruction are
@@ -604,6 +621,12 @@ bool LICM::isSafeToExecuteUnconditionally(Instruction &Inst) {
}
bool LICM::isGuaranteedToExecute(Instruction &Inst) {
+
+ // Somewhere in this loop there is an instruction which may throw and make us
+ // exit the loop.
+ if (MayThrow)
+ return false;
+
// Otherwise we have to check to make sure that the instruction dominates all
// of the exit blocks. If it doesn't, then there is a path out of the loop
// which does not execute this instruction, so we can't hoist it.
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index ac1082c..a44e798 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -54,7 +54,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@@ -65,7 +65,7 @@ STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
namespace {
class LoopIdiomRecognize : public LoopPass {
Loop *CurLoop;
- const TargetData *TD;
+ const DataLayout *TD;
DominatorTree *DT;
ScalarEvolution *SE;
TargetLibraryInfo *TLI;
@@ -132,7 +132,8 @@ Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognize(); }
/// and zero out all the operands of this instruction. If any of them become
/// dead, delete them and the computation tree that feeds them.
///
-static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE) {
+static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE,
+ const TargetLibraryInfo *TLI) {
SmallVector<Instruction*, 32> NowDeadInsts;
NowDeadInsts.push_back(I);
@@ -153,7 +154,7 @@ static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE) {
if (!Op->use_empty()) continue;
if (Instruction *OpI = dyn_cast<Instruction>(Op))
- if (isInstructionTriviallyDead(OpI))
+ if (isInstructionTriviallyDead(OpI, TLI))
NowDeadInsts.push_back(OpI);
}
@@ -164,15 +165,21 @@ static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE) {
/// deleteIfDeadInstruction - If the specified value is a dead instruction,
/// delete it and any recursively used instructions.
-static void deleteIfDeadInstruction(Value *V, ScalarEvolution &SE) {
+static void deleteIfDeadInstruction(Value *V, ScalarEvolution &SE,
+ const TargetLibraryInfo *TLI) {
if (Instruction *I = dyn_cast<Instruction>(V))
- if (isInstructionTriviallyDead(I))
- deleteDeadInstruction(I, SE);
+ if (isInstructionTriviallyDead(I, TLI))
+ deleteDeadInstruction(I, SE, TLI);
}
bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) {
CurLoop = L;
+ // If the loop could not be converted to canonical form, it must have an
+ // indirectbr in it, just give up.
+ if (!L->getLoopPreheader())
+ return false;
+
// Disable loop idiom recognition if the function's name is a common idiom.
StringRef Name = L->getHeader()->getParent()->getName();
if (Name == "memset" || Name == "memcpy")
@@ -192,7 +199,7 @@ bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) {
return false;
// We require target data for now.
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
if (TD == 0) return false;
DT = &getAnalysis<DominatorTree>();
@@ -401,7 +408,7 @@ static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access,
///
/// Note that we don't ever attempt to use memset_pattern8 or 4, because these
/// just replicate their input array and then pass on to memset_pattern16.
-static Constant *getMemSetPatternValue(Value *V, const TargetData &TD) {
+static Constant *getMemSetPatternValue(Value *V, const DataLayout &TD) {
// If the value isn't a constant, we can't promote it to being in a constant
// array. We could theoretically do a store to an alloca or something, but
// that doesn't seem worthwhile.
@@ -490,7 +497,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
StoreSize, getAnalysis<AliasAnalysis>(), TheStore)){
Expander.clear();
// If we generated new code for the base pointer, clean up.
- deleteIfDeadInstruction(BasePtr, *SE);
+ deleteIfDeadInstruction(BasePtr, *SE, TLI);
return false;
}
@@ -538,7 +545,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// Okay, the memset has been formed. Zap the original store and anything that
// feeds into it.
- deleteDeadInstruction(TheStore, *SE);
+ deleteDeadInstruction(TheStore, *SE, TLI);
++NumMemSet;
return true;
}
@@ -579,7 +586,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
getAnalysis<AliasAnalysis>(), SI)) {
Expander.clear();
// If we generated new code for the base pointer, clean up.
- deleteIfDeadInstruction(StoreBasePtr, *SE);
+ deleteIfDeadInstruction(StoreBasePtr, *SE, TLI);
return false;
}
@@ -594,8 +601,8 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
StoreSize, getAnalysis<AliasAnalysis>(), SI)) {
Expander.clear();
// If we generated new code for the base pointer, clean up.
- deleteIfDeadInstruction(LoadBasePtr, *SE);
- deleteIfDeadInstruction(StoreBasePtr, *SE);
+ deleteIfDeadInstruction(LoadBasePtr, *SE, TLI);
+ deleteIfDeadInstruction(StoreBasePtr, *SE, TLI);
return false;
}
@@ -628,7 +635,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
// Okay, the memset has been formed. Zap the original store and anything that
// feeds into it.
- deleteDeadInstruction(SI, *SE);
+ deleteDeadInstruction(SI, *SE, TLI);
++NumMemCpy;
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
index 982400c..558f62e 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
@@ -18,7 +18,7 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
@@ -66,7 +66,7 @@ Pass *llvm::createLoopInstSimplifyPass() {
bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>();
LoopInfo *LI = &getAnalysis<LoopInfo>();
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
SmallVector<BasicBlock*, 8> ExitBlocks;
@@ -120,7 +120,7 @@ bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
++NumSimplified;
}
}
- LocalChanged |= RecursivelyDeleteTriviallyDeadInstructions(I);
+ LocalChanged |= RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
if (IsSubloopHeader && !isa<PHINode>(I))
break;
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
index 7eeb152..abe07aa 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
@@ -24,6 +24,7 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
+#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/ADT/Statistic.h"
using namespace llvm;
@@ -256,6 +257,7 @@ bool LoopRotate::rotateLoop(Loop *L) {
return false;
BasicBlock *OrigHeader = L->getHeader();
+ BasicBlock *OrigLatch = L->getLoopLatch();
BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator());
if (BI == 0 || BI->isUnconditional())
@@ -267,13 +269,9 @@ bool LoopRotate::rotateLoop(Loop *L) {
if (!L->isLoopExiting(OrigHeader))
return false;
- // Updating PHInodes in loops with multiple exits adds complexity.
- // Keep it simple, and restrict loop rotation to loops with one exit only.
- // In future, lift this restriction and support for multiple exits if
- // required.
- SmallVector<BasicBlock*, 8> ExitBlocks;
- L->getExitBlocks(ExitBlocks);
- if (ExitBlocks.size() > 1)
+ // If the loop latch already contains a branch that leaves the loop then the
+ // loop is already rotated.
+ if (OrigLatch == 0 || L->isLoopExiting(OrigLatch))
return false;
// Check size of original header and reject loop if it is very big.
@@ -286,11 +284,10 @@ bool LoopRotate::rotateLoop(Loop *L) {
// Now, this loop is suitable for rotation.
BasicBlock *OrigPreheader = L->getLoopPreheader();
- BasicBlock *OrigLatch = L->getLoopLatch();
// If the loop could not be converted to canonical form, it must have an
// indirectbr in it, just give up.
- if (OrigPreheader == 0 || OrigLatch == 0)
+ if (OrigPreheader == 0)
return false;
// Anything ScalarEvolution may know about this loop or the PHI nodes
@@ -298,6 +295,8 @@ bool LoopRotate::rotateLoop(Loop *L) {
if (ScalarEvolution *SE = getAnalysisIfAvailable<ScalarEvolution>())
SE->forgetLoop(L);
+ DEBUG(dbgs() << "LoopRotation: rotating "; L->dump());
+
// Find new Loop header. NewHeader is a Header's one and only successor
// that is inside loop. Header's other successor is outside the
// loop. Otherwise loop is not suitable for rotation.
@@ -408,10 +407,19 @@ bool LoopRotate::rotateLoop(Loop *L) {
// Update DominatorTree to reflect the CFG change we just made. Then split
// edges as necessary to preserve LoopSimplify form.
if (DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>()) {
- // Since OrigPreheader now has the conditional branch to Exit block, it is
- // the dominator of Exit.
- DT->changeImmediateDominator(Exit, OrigPreheader);
- DT->changeImmediateDominator(NewHeader, OrigPreheader);
+ // Everything that was dominated by the old loop header is now dominated
+ // by the original loop preheader. Conceptually the header was merged
+ // into the preheader, even though we reuse the actual block as a new
+ // loop latch.
+ DomTreeNode *OrigHeaderNode = DT->getNode(OrigHeader);
+ SmallVector<DomTreeNode *, 8> HeaderChildren(OrigHeaderNode->begin(),
+ OrigHeaderNode->end());
+ DomTreeNode *OrigPreheaderNode = DT->getNode(OrigPreheader);
+ for (unsigned I = 0, E = HeaderChildren.size(); I != E; ++I)
+ DT->changeImmediateDominator(HeaderChildren[I], OrigPreheaderNode);
+
+ assert(DT->getNode(Exit)->getIDom() == OrigPreheaderNode);
+ assert(DT->getNode(NewHeader)->getIDom() == OrigPreheaderNode);
// Update OrigHeader to be dominated by the new header block.
DT->changeImmediateDominator(OrigHeader, OrigLatch);
@@ -440,6 +448,35 @@ bool LoopRotate::rotateLoop(Loop *L) {
// Update OrigHeader to be dominated by the new header block.
DT->changeImmediateDominator(NewHeader, OrigPreheader);
DT->changeImmediateDominator(OrigHeader, OrigLatch);
+
+ // Brute force incremental dominator tree update. Call
+ // findNearestCommonDominator on all CFG predecessors of each child of the
+ // original header.
+ DomTreeNode *OrigHeaderNode = DT->getNode(OrigHeader);
+ SmallVector<DomTreeNode *, 8> HeaderChildren(OrigHeaderNode->begin(),
+ OrigHeaderNode->end());
+ bool Changed;
+ do {
+ Changed = false;
+ for (unsigned I = 0, E = HeaderChildren.size(); I != E; ++I) {
+ DomTreeNode *Node = HeaderChildren[I];
+ BasicBlock *BB = Node->getBlock();
+
+ pred_iterator PI = pred_begin(BB);
+ BasicBlock *NearestDom = *PI;
+ for (pred_iterator PE = pred_end(BB); PI != PE; ++PI)
+ NearestDom = DT->findNearestCommonDominator(NearestDom, *PI);
+
+ // Remember if this changes the DomTree.
+ if (Node->getIDom()->getBlock() != NearestDom) {
+ DT->changeImmediateDominator(BB, NearestDom);
+ Changed = true;
+ }
+ }
+
+ // If the dominator changed, this may have an effect on other
+ // predecessors, continue until we reach a fixpoint.
+ } while (Changed);
}
}
@@ -452,6 +489,8 @@ bool LoopRotate::rotateLoop(Loop *L) {
// emitted code isn't too gross in this common case.
MergeBlockIntoPredecessor(OrigHeader, this);
+ DEBUG(dbgs() << "LoopRotation: into "; L->dump());
+
++NumRotated;
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index b14a713..958348d 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -54,7 +54,7 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "loop-reduce"
-#include "llvm/Transforms/Scalar.h"
+#include "llvm/AddressingMode.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
@@ -64,6 +64,7 @@
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Assembly/Writer.h"
+#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/SmallBitVector.h"
@@ -121,9 +122,11 @@ void RegSortData::print(raw_ostream &OS) const {
OS << "[NumUses=" << UsedByIndices.count() << ']';
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void RegSortData::dump() const {
print(errs()); errs() << '\n';
}
+#endif
namespace {
@@ -223,7 +226,7 @@ namespace {
struct Formula {
/// AM - This is used to represent complex addressing, as well as other kinds
/// of interesting uses.
- TargetLowering::AddrMode AM;
+ AddrMode AM;
/// BaseRegs - The list of "base" registers for this use. When this is
/// non-empty, AM.HasBaseReg should be set to true.
@@ -414,9 +417,11 @@ void Formula::print(raw_ostream &OS) const {
}
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void Formula::dump() const {
print(errs()); errs() << '\n';
}
+#endif
/// isAddRecSExtable - Return true if the given addrec can be sign-extended
/// without changing its value.
@@ -738,7 +743,8 @@ DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) {
bool Changed = false;
while (!DeadInsts.empty()) {
- Instruction *I = dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val());
+ Value *V = DeadInsts.pop_back_val();
+ Instruction *I = dyn_cast_or_null<Instruction>(V);
if (I == 0 || !isInstructionTriviallyDead(I))
continue;
@@ -973,9 +979,11 @@ void Cost::print(raw_ostream &OS) const {
OS << ", plus " << SetupCost << " setup cost";
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void Cost::dump() const {
print(errs()); errs() << '\n';
}
+#endif
namespace {
@@ -1059,9 +1067,11 @@ void LSRFixup::print(raw_ostream &OS) const {
OS << ", Offset=" << Offset;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void LSRFixup::dump() const {
print(errs()); errs() << '\n';
}
+#endif
namespace {
@@ -1251,14 +1261,16 @@ void LSRUse::print(raw_ostream &OS) const {
OS << ", widest fixup type: " << *WidestFixupType;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void LSRUse::dump() const {
print(errs()); errs() << '\n';
}
+#endif
/// isLegalUse - Test whether the use described by AM is "legal", meaning it can
/// be completely folded into the user instruction at isel time. This includes
/// address-mode folding and special icmp tricks.
-static bool isLegalUse(const TargetLowering::AddrMode &AM,
+static bool isLegalUse(const AddrMode &AM,
LSRUse::KindType Kind, Type *AccessTy,
const TargetLowering *TLI) {
switch (Kind) {
@@ -1315,7 +1327,7 @@ static bool isLegalUse(const TargetLowering::AddrMode &AM,
llvm_unreachable("Invalid LSRUse Kind!");
}
-static bool isLegalUse(TargetLowering::AddrMode AM,
+static bool isLegalUse(AddrMode AM,
int64_t MinOffset, int64_t MaxOffset,
LSRUse::KindType Kind, Type *AccessTy,
const TargetLowering *TLI) {
@@ -1346,7 +1358,7 @@ static bool isAlwaysFoldable(int64_t BaseOffs,
// Conservatively, create an address with an immediate and a
// base and a scale.
- TargetLowering::AddrMode AM;
+ AddrMode AM;
AM.BaseOffs = BaseOffs;
AM.BaseGV = BaseGV;
AM.HasBaseReg = HasBaseReg;
@@ -1384,7 +1396,7 @@ static bool isAlwaysFoldable(const SCEV *S,
// Conservatively, create an address with an immediate and a
// base and a scale.
- TargetLowering::AddrMode AM;
+ AddrMode AM;
AM.BaseOffs = BaseOffs;
AM.BaseGV = BaseGV;
AM.HasBaseReg = HasBaseReg;
@@ -2009,7 +2021,7 @@ LSRInstance::OptimizeLoopTermCond() {
goto decline_post_inc;
// Check for possible scaled-address reuse.
Type *AccessTy = getAccessType(UI->getUser());
- TargetLowering::AddrMode AM;
+ AddrMode AM;
AM.Scale = C->getSExtValue();
if (TLI->isLegalAddressingMode(AM, AccessTy))
goto decline_post_inc;
@@ -3435,9 +3447,11 @@ void WorkItem::print(raw_ostream &OS) const {
<< " , add offset " << Imm;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void WorkItem::dump() const {
print(errs()); errs() << '\n';
}
+#endif
/// GenerateCrossUseConstantOffsets - Look for registers which are a constant
/// distance apart and try to form reuse opportunities between them.
@@ -4451,17 +4465,21 @@ void LSRInstance::RewriteForPHI(PHINode *PN,
SplitLandingPadPredecessors(Parent, BB, "", "", P, NewBBs);
NewBB = NewBBs[0];
}
-
- // If PN is outside of the loop and BB is in the loop, we want to
- // move the block to be immediately before the PHI block, not
- // immediately after BB.
- if (L->contains(BB) && !L->contains(PN))
- NewBB->moveBefore(PN->getParent());
-
- // Splitting the edge can reduce the number of PHI entries we have.
- e = PN->getNumIncomingValues();
- BB = NewBB;
- i = PN->getBasicBlockIndex(BB);
+ // If NewBB==NULL, then SplitCriticalEdge refused to split because all
+ // phi predecessors are identical. The simple thing to do is skip
+ // splitting in this case rather than complicate the API.
+ if (NewBB) {
+ // If PN is outside of the loop and BB is in the loop, we want to
+ // move the block to be immediately before the PHI block, not
+ // immediately after BB.
+ if (L->contains(BB) && !L->contains(PN))
+ NewBB->moveBefore(PN->getParent());
+
+ // Splitting the edge can reduce the number of PHI entries we have.
+ e = PN->getNumIncomingValues();
+ BB = NewBB;
+ i = PN->getBasicBlockIndex(BB);
+ }
}
}
@@ -4730,9 +4748,11 @@ void LSRInstance::print(raw_ostream &OS) const {
print_uses(OS);
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void LSRInstance::dump() const {
print(errs()); errs() << '\n';
}
+#endif
namespace {
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index 09a186f..0d781ac 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -22,7 +22,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/UnrollLoop.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include <climits>
using namespace llvm;
@@ -113,7 +113,7 @@ Pass *llvm::createLoopUnrollPass(int Threshold, int Count, int AllowPartial) {
/// ApproximateLoopSize - Approximate the size of the loop.
static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls,
- const TargetData *TD) {
+ const DataLayout *TD) {
CodeMetrics Metrics;
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I)
@@ -145,7 +145,8 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
// not user specified.
unsigned Threshold = CurrentThreshold;
if (!UserThreshold &&
- Header->getParent()->hasFnAttr(Attribute::OptimizeForSize))
+ Header->getParent()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize))
Threshold = OptSizeUnrollThreshold;
// Find trip count and trip multiple if count is not available
@@ -178,7 +179,7 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
// Enforce the threshold.
if (Threshold != NoThreshold) {
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
unsigned NumInlineCandidates;
unsigned LoopSize = ApproximateLoopSize(L, NumInlineCandidates, TD);
DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n");
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
index 58f7739..047b43e 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -638,7 +638,8 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
// Check to see if it would be profitable to unswitch current loop.
// Do not do non-trivial unswitch while optimizing for size.
- if (OptimizeForSize || F->hasFnAttr(Attribute::OptimizeForSize))
+ if (OptimizeForSize ||
+ F->getFnAttributes().hasAttribute(Attributes::OptimizeForSize))
return false;
UnswitchNontrivialCondition(LoopCond, Val, currentLoop);
@@ -906,13 +907,9 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
/// specified.
static void RemoveFromWorklist(Instruction *I,
std::vector<Instruction*> &Worklist) {
- std::vector<Instruction*>::iterator WI = std::find(Worklist.begin(),
- Worklist.end(), I);
- while (WI != Worklist.end()) {
- unsigned Offset = WI-Worklist.begin();
- Worklist.erase(WI);
- WI = std::find(Worklist.begin()+Offset, Worklist.end(), I);
- }
+
+ Worklist.erase(std::remove(Worklist.begin(), Worklist.end(), I),
+ Worklist.end());
}
/// ReplaceUsesOfWith - When we find that I really equals V, remove I from the
diff --git a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 2a5ee33..517657cf 100644
--- a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -27,7 +27,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include <list>
@@ -38,8 +38,8 @@ STATISTIC(NumMemSetInfer, "Number of memsets inferred");
STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
-static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
- bool &VariableIdxFound, const TargetData &TD){
+static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
+ bool &VariableIdxFound, const DataLayout &TD){
// Skip over the first indices.
gep_type_iterator GTI = gep_type_begin(GEP);
for (unsigned i = 1; i != Idx; ++i, ++GTI)
@@ -72,11 +72,11 @@ static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
/// constant offset, and return that constant offset. For example, Ptr1 might
/// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8.
static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
- const TargetData &TD) {
+ const DataLayout &TD) {
Ptr1 = Ptr1->stripPointerCasts();
Ptr2 = Ptr2->stripPointerCasts();
- GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1);
- GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2);
+ GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
+ GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
bool VariableIdxFound = false;
@@ -141,12 +141,12 @@ struct MemsetRange {
/// TheStores - The actual stores that make up this range.
SmallVector<Instruction*, 16> TheStores;
- bool isProfitableToUseMemset(const TargetData &TD) const;
+ bool isProfitableToUseMemset(const DataLayout &TD) const;
};
} // end anon namespace
-bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const {
+bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const {
// If we found more than 4 stores to merge or 16 bytes, use memset.
if (TheStores.size() >= 4 || End-Start >= 16) return true;
@@ -192,9 +192,9 @@ class MemsetRanges {
/// because each element is relatively large and expensive to copy.
std::list<MemsetRange> Ranges;
typedef std::list<MemsetRange>::iterator range_iterator;
- const TargetData &TD;
+ const DataLayout &TD;
public:
- MemsetRanges(const TargetData &td) : TD(td) {}
+ MemsetRanges(const DataLayout &td) : TD(td) {}
typedef std::list<MemsetRange>::const_iterator const_iterator;
const_iterator begin() const { return Ranges.begin(); }
@@ -302,7 +302,7 @@ namespace {
class MemCpyOpt : public FunctionPass {
MemoryDependenceAnalysis *MD;
TargetLibraryInfo *TLI;
- const TargetData *TD;
+ const DataLayout *TD;
public:
static char ID; // Pass identification, replacement for typeid
MemCpyOpt() : FunctionPass(ID) {
@@ -332,7 +332,7 @@ namespace {
bool processMemCpy(MemCpyInst *M);
bool processMemMove(MemMoveInst *M);
bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
- uint64_t cpyLen, CallInst *C);
+ uint64_t cpyLen, unsigned cpyAlign, CallInst *C);
bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
uint64_t MSize);
bool processByValArgument(CallSite CS, unsigned ArgNo);
@@ -509,10 +509,18 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
}
if (C) {
+ unsigned storeAlign = SI->getAlignment();
+ if (!storeAlign)
+ storeAlign = TD->getABITypeAlignment(SI->getOperand(0)->getType());
+ unsigned loadAlign = LI->getAlignment();
+ if (!loadAlign)
+ loadAlign = TD->getABITypeAlignment(LI->getType());
+
bool changed = performCallSlotOptzn(LI,
SI->getPointerOperand()->stripPointerCasts(),
LI->getPointerOperand()->stripPointerCasts(),
- TD->getTypeStoreSize(SI->getOperand(0)->getType()), C);
+ TD->getTypeStoreSize(SI->getOperand(0)->getType()),
+ std::min(storeAlign, loadAlign), C);
if (changed) {
MD->removeInstruction(SI);
SI->eraseFromParent();
@@ -559,7 +567,8 @@ bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
/// the call write its result directly into the destination of the memcpy.
bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
Value *cpyDest, Value *cpySrc,
- uint64_t cpyLen, CallInst *C) {
+ uint64_t cpyLen, unsigned cpyAlign,
+ CallInst *C) {
// The general transformation to keep in mind is
//
// call @func(..., src, ...)
@@ -625,6 +634,16 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
return false;
}
+ // Check that dest points to memory that is at least as aligned as src.
+ unsigned srcAlign = srcAlloca->getAlignment();
+ if (!srcAlign)
+ srcAlign = TD->getABITypeAlignment(srcAlloca->getAllocatedType());
+ bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
+ // If dest is not aligned enough and we can't increase its alignment then
+ // bail out.
+ if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
+ return false;
+
// Check that src is not accessed except via the call and the memcpy. This
// guarantees that it holds only undefined values when passed in (so the final
// memcpy can be dropped), that it is not read or written between the call and
@@ -673,20 +692,26 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
bool changedArgument = false;
for (unsigned i = 0; i < CS.arg_size(); ++i)
if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
- if (cpySrc->getType() != cpyDest->getType())
- cpyDest = CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
- cpyDest->getName(), C);
+ Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest
+ : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
+ cpyDest->getName(), C);
changedArgument = true;
- if (CS.getArgument(i)->getType() == cpyDest->getType())
- CS.setArgument(i, cpyDest);
+ if (CS.getArgument(i)->getType() == Dest->getType())
+ CS.setArgument(i, Dest);
else
- CS.setArgument(i, CastInst::CreatePointerCast(cpyDest,
- CS.getArgument(i)->getType(), cpyDest->getName(), C));
+ CS.setArgument(i, CastInst::CreatePointerCast(Dest,
+ CS.getArgument(i)->getType(), Dest->getName(), C));
}
if (!changedArgument)
return false;
+ // If the destination wasn't sufficiently aligned then increase its alignment.
+ if (!isDestSufficientlyAligned) {
+ assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
+ cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
+ }
+
// Drop any cached information about the call, because we may have changed
// its dependence information by changing its parameter.
MD->removeInstruction(C);
@@ -813,7 +838,8 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
if (DepInfo.isClobber()) {
if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
- CopySize->getZExtValue(), C)) {
+ CopySize->getZExtValue(), M->getAlignment(),
+ C)) {
MD->removeInstruction(M);
M->eraseFromParent();
return true;
@@ -974,7 +1000,7 @@ bool MemCpyOpt::iterateOnFunction(Function &F) {
bool MemCpyOpt::runOnFunction(Function &F) {
bool MadeChange = false;
MD = &getAnalysis<MemoryDependenceAnalysis>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
// If we don't have at least memset and memcpy, there is little point of doing
diff --git a/contrib/llvm/lib/Transforms/Scalar/ObjCARC.cpp b/contrib/llvm/lib/Transforms/Scalar/ObjCARC.cpp
index 3222f20..dfdf505 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ObjCARC.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ObjCARC.cpp
@@ -29,6 +29,7 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "objc-arc"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/ADT/DenseMap.h"
using namespace llvm;
@@ -1120,9 +1121,8 @@ namespace {
bool relatedSelect(const SelectInst *A, const Value *B);
bool relatedPHI(const PHINode *A, const Value *B);
- // Do not implement.
- void operator=(const ProvenanceAnalysis &);
- ProvenanceAnalysis(const ProvenanceAnalysis &);
+ void operator=(const ProvenanceAnalysis &) LLVM_DELETED_FUNCTION;
+ ProvenanceAnalysis(const ProvenanceAnalysis &) LLVM_DELETED_FUNCTION;
public:
ProvenanceAnalysis() {}
@@ -1236,16 +1236,19 @@ bool ProvenanceAnalysis::relatedCheck(const Value *A, const Value *B) {
// An ObjC-Identified object can't alias a load if it is never locally stored.
if (AIsIdentified) {
+ // Check for an obvious escape.
+ if (isa<LoadInst>(B))
+ return isStoredObjCPointer(A);
if (BIsIdentified) {
- // If both pointers have provenance, they can be directly compared.
- if (A != B)
- return false;
- } else {
- if (isa<LoadInst>(B))
- return isStoredObjCPointer(A);
+ // Check for an obvious escape.
+ if (isa<LoadInst>(A))
+ return isStoredObjCPointer(B);
+ // Both pointers are identified and escapes aren't an evident problem.
+ return false;
}
- } else {
- if (BIsIdentified && isa<LoadInst>(A))
+ } else if (BIsIdentified) {
+ // Check for an obvious escape.
+ if (isa<LoadInst>(A))
return isStoredObjCPointer(B);
}
@@ -1381,9 +1384,6 @@ namespace {
/// PtrState - This class summarizes several per-pointer runtime properties
/// which are propogated through the flow graph.
class PtrState {
- /// NestCount - The known minimum level of retain+release nesting.
- unsigned NestCount;
-
/// KnownPositiveRefCount - True if the reference count is known to
/// be incremented.
bool KnownPositiveRefCount;
@@ -1401,7 +1401,7 @@ namespace {
/// TODO: Encapsulate this better.
RRInfo RRI;
- PtrState() : NestCount(0), KnownPositiveRefCount(false), Partial(false),
+ PtrState() : KnownPositiveRefCount(false), Partial(false),
Seq(S_None) {}
void SetKnownPositiveRefCount() {
@@ -1416,18 +1416,6 @@ namespace {
return KnownPositiveRefCount;
}
- void IncrementNestCount() {
- if (NestCount != UINT_MAX) ++NestCount;
- }
-
- void DecrementNestCount() {
- if (NestCount != 0) --NestCount;
- }
-
- bool IsKnownNested() const {
- return NestCount > 0;
- }
-
void SetSeq(Sequence NewSeq) {
Seq = NewSeq;
}
@@ -1454,7 +1442,6 @@ void
PtrState::Merge(const PtrState &Other, bool TopDown) {
Seq = MergeSeqs(Seq, Other.Seq, TopDown);
KnownPositiveRefCount = KnownPositiveRefCount && Other.KnownPositiveRefCount;
- NestCount = std::min(NestCount, Other.NestCount);
// We can't merge a plain objc_retain with an objc_retainBlock.
if (RRI.IsRetainBlock != Other.RRI.IsRetainBlock)
@@ -1610,6 +1597,12 @@ void BBState::MergePred(const BBState &Other) {
// loop backedge. Loop backedges are special.
TopDownPathCount += Other.TopDownPathCount;
+ // Check for overflow. If we have overflow, fall back to conservative behavior.
+ if (TopDownPathCount < Other.TopDownPathCount) {
+ clearTopDownPointers();
+ return;
+ }
+
// For each entry in the other set, if our set has an entry with the same key,
// merge the entries. Otherwise, copy the entry and merge it with an empty
// entry.
@@ -1635,6 +1628,12 @@ void BBState::MergeSucc(const BBState &Other) {
// loop backedge. Loop backedges are special.
BottomUpPathCount += Other.BottomUpPathCount;
+ // Check for overflow. If we have overflow, fall back to conservative behavior.
+ if (BottomUpPathCount < Other.BottomUpPathCount) {
+ clearBottomUpPointers();
+ return;
+ }
+
// For each entry in the other set, if our set has an entry with the
// same key, merge the entries. Otherwise, copy the entry and merge
// it with an empty entry.
@@ -1789,7 +1788,9 @@ Constant *ObjCARCOpt::getRetainRVCallee(Module *M) {
Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
Type *Params[] = { I8X };
FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
- AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind);
+ AttrListPtr Attributes =
+ AttrListPtr().addAttr(M->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::get(C, Attributes::NoUnwind));
RetainRVCallee =
M->getOrInsertFunction("objc_retainAutoreleasedReturnValue", FTy,
Attributes);
@@ -1803,7 +1804,9 @@ Constant *ObjCARCOpt::getAutoreleaseRVCallee(Module *M) {
Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
Type *Params[] = { I8X };
FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
- AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind);
+ AttrListPtr Attributes =
+ AttrListPtr().addAttr(M->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::get(C, Attributes::NoUnwind));
AutoreleaseRVCallee =
M->getOrInsertFunction("objc_autoreleaseReturnValue", FTy,
Attributes);
@@ -1815,7 +1818,9 @@ Constant *ObjCARCOpt::getReleaseCallee(Module *M) {
if (!ReleaseCallee) {
LLVMContext &C = M->getContext();
Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
- AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind);
+ AttrListPtr Attributes =
+ AttrListPtr().addAttr(M->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::get(C, Attributes::NoUnwind));
ReleaseCallee =
M->getOrInsertFunction(
"objc_release",
@@ -1829,7 +1834,9 @@ Constant *ObjCARCOpt::getRetainCallee(Module *M) {
if (!RetainCallee) {
LLVMContext &C = M->getContext();
Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
- AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind);
+ AttrListPtr Attributes =
+ AttrListPtr().addAttr(M->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::get(C, Attributes::NoUnwind));
RetainCallee =
M->getOrInsertFunction(
"objc_retain",
@@ -1858,7 +1865,9 @@ Constant *ObjCARCOpt::getAutoreleaseCallee(Module *M) {
if (!AutoreleaseCallee) {
LLVMContext &C = M->getContext();
Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
- AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind);
+ AttrListPtr Attributes =
+ AttrListPtr().addAttr(M->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::get(C, Attributes::NoUnwind));
AutoreleaseCallee =
M->getOrInsertFunction(
"objc_autorelease",
@@ -1868,6 +1877,26 @@ Constant *ObjCARCOpt::getAutoreleaseCallee(Module *M) {
return AutoreleaseCallee;
}
+/// IsPotentialUse - Test whether the given value is possible a
+/// reference-counted pointer, including tests which utilize AliasAnalysis.
+static bool IsPotentialUse(const Value *Op, AliasAnalysis &AA) {
+ // First make the rudimentary check.
+ if (!IsPotentialUse(Op))
+ return false;
+
+ // Objects in constant memory are not reference-counted.
+ if (AA.pointsToConstantMemory(Op))
+ return false;
+
+ // Pointers in constant memory are not pointing to reference-counted objects.
+ if (const LoadInst *LI = dyn_cast<LoadInst>(Op))
+ if (AA.pointsToConstantMemory(LI->getPointerOperand()))
+ return false;
+
+ // Otherwise assume the worst.
+ return true;
+}
+
/// CanAlterRefCount - Test whether the given instruction can result in a
/// reference count modification (positive or negative) for the pointer's
/// object.
@@ -1894,7 +1923,7 @@ CanAlterRefCount(const Instruction *Inst, const Value *Ptr,
for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
I != E; ++I) {
const Value *Op = *I;
- if (IsPotentialUse(Op) && PA.related(Ptr, Op))
+ if (IsPotentialUse(Op, *PA.getAA()) && PA.related(Ptr, Op))
return true;
}
return false;
@@ -1919,14 +1948,14 @@ CanUse(const Instruction *Inst, const Value *Ptr, ProvenanceAnalysis &PA,
// Comparing a pointer with null, or any other constant, isn't really a use,
// because we don't care what the pointer points to, or about the values
// of any other dynamic reference-counted pointers.
- if (!IsPotentialUse(ICI->getOperand(1)))
+ if (!IsPotentialUse(ICI->getOperand(1), *PA.getAA()))
return false;
} else if (ImmutableCallSite CS = static_cast<const Value *>(Inst)) {
// For calls, just check the arguments (and not the callee operand).
for (ImmutableCallSite::arg_iterator OI = CS.arg_begin(),
OE = CS.arg_end(); OI != OE; ++OI) {
const Value *Op = *OI;
- if (IsPotentialUse(Op) && PA.related(Ptr, Op))
+ if (IsPotentialUse(Op, *PA.getAA()) && PA.related(Ptr, Op))
return true;
}
return false;
@@ -1936,14 +1965,14 @@ CanUse(const Instruction *Inst, const Value *Ptr, ProvenanceAnalysis &PA,
const Value *Op = GetUnderlyingObjCPtr(SI->getPointerOperand());
// If we can't tell what the underlying object was, assume there is a
// dependence.
- return IsPotentialUse(Op) && PA.related(Op, Ptr);
+ return IsPotentialUse(Op, *PA.getAA()) && PA.related(Op, Ptr);
}
// Check each operand for a match.
for (User::const_op_iterator OI = Inst->op_begin(), OE = Inst->op_end();
OI != OE; ++OI) {
const Value *Op = *OI;
- if (IsPotentialUse(Op) && PA.related(Ptr, Op))
+ if (IsPotentialUse(Op, *PA.getAA()) && PA.related(Ptr, Op))
return true;
}
return false;
@@ -2612,11 +2641,11 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
S.ResetSequenceProgress(ReleaseMetadata ? S_MovableRelease : S_Release);
S.RRI.ReleaseMetadata = ReleaseMetadata;
- S.RRI.KnownSafe = S.IsKnownNested() || S.IsKnownIncremented();
+ S.RRI.KnownSafe = S.IsKnownIncremented();
S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
S.RRI.Calls.insert(Inst);
- S.IncrementNestCount();
+ S.SetKnownPositiveRefCount();
break;
}
case IC_RetainBlock:
@@ -2631,7 +2660,6 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
PtrState &S = MyStates.getPtrBottomUpState(Arg);
S.SetKnownPositiveRefCount();
- S.DecrementNestCount();
switch (S.GetSeq()) {
case S_Stop:
@@ -2747,8 +2775,9 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
// Merge the states from each successor to compute the initial state
// for the current block.
- for (BBState::edge_iterator SI(MyStates.succ_begin()),
- SE(MyStates.succ_end()); SI != SE; ++SI) {
+ BBState::edge_iterator SI(MyStates.succ_begin()),
+ SE(MyStates.succ_end());
+ if (SI != SE) {
const BasicBlock *Succ = *SI;
DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
assert(I != BBStates.end());
@@ -2760,7 +2789,6 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
assert(I != BBStates.end());
MyStates.MergeSucc(I->second);
}
- break;
}
// Visit all the instructions, bottom-up.
@@ -2823,12 +2851,11 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
S.ResetSequenceProgress(S_Retain);
S.RRI.IsRetainBlock = Class == IC_RetainBlock;
- // Don't check S.IsKnownIncremented() here because it's not sufficient.
- S.RRI.KnownSafe = S.IsKnownNested();
+ S.RRI.KnownSafe = S.IsKnownIncremented();
S.RRI.Calls.insert(Inst);
}
- S.IncrementNestCount();
+ S.SetKnownPositiveRefCount();
// A retain can be a potential use; procede to the generic checking
// code below.
@@ -2838,7 +2865,7 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
Arg = GetObjCArg(Inst);
PtrState &S = MyStates.getPtrTopDownState(Arg);
- S.DecrementNestCount();
+ S.ClearRefCount();
switch (S.GetSeq()) {
case S_Retain:
@@ -2935,8 +2962,9 @@ ObjCARCOpt::VisitTopDown(BasicBlock *BB,
// Merge the states from each predecessor to compute the initial state
// for the current block.
- for (BBState::edge_iterator PI(MyStates.pred_begin()),
- PE(MyStates.pred_end()); PI != PE; ++PI) {
+ BBState::edge_iterator PI(MyStates.pred_begin()),
+ PE(MyStates.pred_end());
+ if (PI != PE) {
const BasicBlock *Pred = *PI;
DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
assert(I != BBStates.end());
@@ -2948,7 +2976,6 @@ ObjCARCOpt::VisitTopDown(BasicBlock *BB,
assert(I != BBStates.end());
MyStates.MergePred(I->second);
}
- break;
}
// Visit all the instructions, top-down.
@@ -3532,19 +3559,19 @@ bool ObjCARCOpt::OptimizeSequences(Function &F) {
}
/// OptimizeReturns - Look for this pattern:
-///
+/// \code
/// %call = call i8* @something(...)
/// %2 = call i8* @objc_retain(i8* %call)
/// %3 = call i8* @objc_autorelease(i8* %2)
/// ret i8* %3
-///
+/// \endcode
/// And delete the retain and autorelease.
///
/// Otherwise if it's just this:
-///
+/// \code
/// %3 = call i8* @objc_autorelease(i8* %2)
/// ret i8* %3
-///
+/// \endcode
/// convert the autorelease to autoreleaseRV.
void ObjCARCOpt::OptimizeReturns(Function &F) {
if (!F.getReturnType()->isPointerTy())
@@ -3814,8 +3841,9 @@ Constant *ObjCARCContract::getStoreStrongCallee(Module *M) {
Type *Params[] = { I8XX, I8X };
AttrListPtr Attributes = AttrListPtr()
- .addAttr(~0u, Attribute::NoUnwind)
- .addAttr(1, Attribute::NoCapture);
+ .addAttr(M->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::get(C, Attributes::NoUnwind))
+ .addAttr(M->getContext(), 1, Attributes::get(C, Attributes::NoCapture));
StoreStrongCallee =
M->getOrInsertFunction(
@@ -3832,7 +3860,9 @@ Constant *ObjCARCContract::getRetainAutoreleaseCallee(Module *M) {
Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
Type *Params[] = { I8X };
FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
- AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind);
+ AttrListPtr Attributes =
+ AttrListPtr().addAttr(M->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::get(C, Attributes::NoUnwind));
RetainAutoreleaseCallee =
M->getOrInsertFunction("objc_retainAutorelease", FTy, Attributes);
}
@@ -3845,7 +3875,9 @@ Constant *ObjCARCContract::getRetainAutoreleaseRVCallee(Module *M) {
Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
Type *Params[] = { I8X };
FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
- AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind);
+ AttrListPtr Attributes =
+ AttrListPtr().addAttr(M->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::get(C, Attributes::NoUnwind));
RetainAutoreleaseRVCallee =
M->getOrInsertFunction("objc_retainAutoreleaseReturnValue", FTy,
Attributes);
diff --git a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
index 09687d8..7a40797 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
@@ -339,36 +339,6 @@ static void IncorporateWeight(APInt &LHS, const APInt &RHS, unsigned Opcode) {
}
}
-/// EvaluateRepeatedConstant - Compute C op C op ... op C where the constant C
-/// is repeated Weight times.
-static Constant *EvaluateRepeatedConstant(unsigned Opcode, Constant *C,
- APInt Weight) {
- // For addition the result can be efficiently computed as the product of the
- // constant and the weight.
- if (Opcode == Instruction::Add)
- return ConstantExpr::getMul(C, ConstantInt::get(C->getContext(), Weight));
-
- // The weight might be huge, so compute by repeated squaring to ensure that
- // compile time is proportional to the logarithm of the weight.
- Constant *Result = 0;
- Constant *Power = C; // Successively C, C op C, (C op C) op (C op C) etc.
- // Visit the bits in Weight.
- while (Weight != 0) {
- // If the current bit in Weight is non-zero do Result = Result op Power.
- if (Weight[0])
- Result = Result ? ConstantExpr::get(Opcode, Result, Power) : Power;
- // Move on to the next bit if any more are non-zero.
- Weight = Weight.lshr(1);
- if (Weight.isMinValue())
- break;
- // Square the power.
- Power = ConstantExpr::get(Opcode, Power, Power);
- }
-
- assert(Result && "Only positive weights supported!");
- return Result;
-}
-
typedef std::pair<Value*, APInt> RepeatedValue;
/// LinearizeExprTree - Given an associative binary expression, return the leaf
@@ -382,9 +352,7 @@ typedef std::pair<Value*, APInt> RepeatedValue;
/// op
/// (Ops[N].first op Ops[N].first op ... Ops[N].first) <- Ops[N].second times
///
-/// Note that the values Ops[0].first, ..., Ops[N].first are all distinct, and
-/// they are all non-constant except possibly for the last one, which if it is
-/// constant will have weight one (Ops[N].second === 1).
+/// Note that the values Ops[0].first, ..., Ops[N].first are all distinct.
///
/// This routine may modify the function, in which case it returns 'true'. The
/// changes it makes may well be destructive, changing the value computed by 'I'
@@ -604,7 +572,6 @@ static bool LinearizeExprTree(BinaryOperator *I,
// The leaves, repeated according to their weights, represent the linearized
// form of the expression.
- Constant *Cst = 0; // Accumulate constants here.
for (unsigned i = 0, e = LeafOrder.size(); i != e; ++i) {
Value *V = LeafOrder[i];
LeafMap::iterator It = Leaves.find(V);
@@ -618,31 +585,14 @@ static bool LinearizeExprTree(BinaryOperator *I,
continue;
// Ensure the leaf is only output once.
It->second = 0;
- // Glob all constants together into Cst.
- if (Constant *C = dyn_cast<Constant>(V)) {
- C = EvaluateRepeatedConstant(Opcode, C, Weight);
- Cst = Cst ? ConstantExpr::get(Opcode, Cst, C) : C;
- continue;
- }
- // Add non-constant
Ops.push_back(std::make_pair(V, Weight));
}
- // Add any constants back into Ops, all globbed together and reduced to having
- // weight 1 for the convenience of users.
- Constant *Identity = ConstantExpr::getBinOpIdentity(Opcode, I->getType());
- if (Cst && Cst != Identity) {
- // If combining multiple constants resulted in the absorber then the entire
- // expression must evaluate to the absorber.
- if (Cst == Absorber)
- Ops.clear();
- Ops.push_back(std::make_pair(Cst, APInt(Bitwidth, 1)));
- }
-
// For nilpotent operations or addition there may be no operands, for example
// because the expression was "X xor X" or consisted of 2^Bitwidth additions:
// in both cases the weight reduces to 0 causing the value to be skipped.
if (Ops.empty()) {
+ Constant *Identity = ConstantExpr::getBinOpIdentity(Opcode, I->getType());
assert(Identity && "Associative operation without identity!");
Ops.push_back(std::make_pair(Identity, APInt(Bitwidth, 1)));
}
@@ -656,8 +606,8 @@ void Reassociate::RewriteExprTree(BinaryOperator *I,
SmallVectorImpl<ValueEntry> &Ops) {
assert(Ops.size() > 1 && "Single values should be used directly!");
- // Since our optimizations never increase the number of operations, the new
- // expression can always be written by reusing the existing binary operators
+ // Since our optimizations should never increase the number of operations, the
+ // new expression can usually be written reusing the existing binary operators
// from the original expression tree, without creating any new instructions,
// though the rewritten expression may have a completely different topology.
// We take care to not change anything if the new expression will be the same
@@ -671,6 +621,20 @@ void Reassociate::RewriteExprTree(BinaryOperator *I,
unsigned Opcode = I->getOpcode();
BinaryOperator *Op = I;
+ /// NotRewritable - The operands being written will be the leaves of the new
+ /// expression and must not be used as inner nodes (via NodesToRewrite) by
+ /// mistake. Inner nodes are always reassociable, and usually leaves are not
+ /// (if they were they would have been incorporated into the expression and so
+ /// would not be leaves), so most of the time there is no danger of this. But
+ /// in rare cases a leaf may become reassociable if an optimization kills uses
+ /// of it, or it may momentarily become reassociable during rewriting (below)
+ /// due it being removed as an operand of one of its uses. Ensure that misuse
+ /// of leaf nodes as inner nodes cannot occur by remembering all of the future
+ /// leaves and refusing to reuse any of them as inner nodes.
+ SmallPtrSet<Value*, 8> NotRewritable;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ NotRewritable.insert(Ops[i].Op);
+
// ExpressionChanged - Non-null if the rewritten expression differs from the
// original in some non-trivial way, requiring the clearing of optional flags.
// Flags are cleared from the operator in ExpressionChanged up to I inclusive.
@@ -703,12 +667,14 @@ void Reassociate::RewriteExprTree(BinaryOperator *I,
// the old operands with the new ones.
DEBUG(dbgs() << "RA: " << *Op << '\n');
if (NewLHS != OldLHS) {
- if (BinaryOperator *BO = isReassociableOp(OldLHS, Opcode))
+ BinaryOperator *BO = isReassociableOp(OldLHS, Opcode);
+ if (BO && !NotRewritable.count(BO))
NodesToRewrite.push_back(BO);
Op->setOperand(0, NewLHS);
}
if (NewRHS != OldRHS) {
- if (BinaryOperator *BO = isReassociableOp(OldRHS, Opcode))
+ BinaryOperator *BO = isReassociableOp(OldRHS, Opcode);
+ if (BO && !NotRewritable.count(BO))
NodesToRewrite.push_back(BO);
Op->setOperand(1, NewRHS);
}
@@ -732,7 +698,8 @@ void Reassociate::RewriteExprTree(BinaryOperator *I,
Op->swapOperands();
} else {
// Overwrite with the new right-hand side.
- if (BinaryOperator *BO = isReassociableOp(Op->getOperand(1), Opcode))
+ BinaryOperator *BO = isReassociableOp(Op->getOperand(1), Opcode);
+ if (BO && !NotRewritable.count(BO))
NodesToRewrite.push_back(BO);
Op->setOperand(1, NewRHS);
ExpressionChanged = Op;
@@ -745,7 +712,8 @@ void Reassociate::RewriteExprTree(BinaryOperator *I,
// Now deal with the left-hand side. If this is already an operation node
// from the original expression then just rewrite the rest of the expression
// into it.
- if (BinaryOperator *BO = isReassociableOp(Op->getOperand(0), Opcode)) {
+ BinaryOperator *BO = isReassociableOp(Op->getOperand(0), Opcode);
+ if (BO && !NotRewritable.count(BO)) {
Op = BO;
continue;
}
@@ -1446,9 +1414,26 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I,
SmallVectorImpl<ValueEntry> &Ops) {
// Now that we have the linearized expression tree, try to optimize it.
// Start by folding any constants that we found.
- if (Ops.size() == 1) return Ops[0].Op;
-
+ Constant *Cst = 0;
unsigned Opcode = I->getOpcode();
+ while (!Ops.empty() && isa<Constant>(Ops.back().Op)) {
+ Constant *C = cast<Constant>(Ops.pop_back_val().Op);
+ Cst = Cst ? ConstantExpr::get(Opcode, C, Cst) : C;
+ }
+ // If there was nothing but constants then we are done.
+ if (Ops.empty())
+ return Cst;
+
+ // Put the combined constant back at the end of the operand list, except if
+ // there is no point. For example, an add of 0 gets dropped here, while a
+ // multiplication by zero turns the whole expression into zero.
+ if (Cst && Cst != ConstantExpr::getBinOpIdentity(Opcode, I->getType())) {
+ if (Cst == ConstantExpr::getBinOpAbsorber(Opcode, I->getType()))
+ return Cst;
+ Ops.push_back(ValueEntry(0, Cst));
+ }
+
+ if (Ops.size() == 1) return Ops[0].Op;
// Handle destructive annihilation due to identities between elements in the
// argument list here.
diff --git a/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp b/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp
index 2c39aab..686520e 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp
@@ -26,7 +26,7 @@
#include "llvm/Pass.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
@@ -153,7 +153,7 @@ namespace {
/// Constant Propagation.
///
class SCCPSolver : public InstVisitor<SCCPSolver> {
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLibraryInfo *TLI;
SmallPtrSet<BasicBlock*, 8> BBExecutable; // The BBs that are executable.
DenseMap<Value*, LatticeVal> ValueState; // The state each value is in.
@@ -205,7 +205,7 @@ class SCCPSolver : public InstVisitor<SCCPSolver> {
typedef std::pair<BasicBlock*, BasicBlock*> Edge;
DenseSet<Edge> KnownFeasibleEdges;
public:
- SCCPSolver(const TargetData *td, const TargetLibraryInfo *tli)
+ SCCPSolver(const DataLayout *td, const TargetLibraryInfo *tli)
: TD(td), TLI(tli) {}
/// MarkBlockExecutable - This method can be used by clients to mark all of
@@ -1564,7 +1564,7 @@ static void DeleteInstructionInBlock(BasicBlock *BB) {
//
bool SCCP::runOnFunction(Function &F) {
DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n");
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
SCCPSolver Solver(TD, TLI);
@@ -1693,7 +1693,7 @@ static bool AddressIsTaken(const GlobalValue *GV) {
}
bool IPSCCP::runOnModule(Module &M) {
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
SCCPSolver Solver(TD, TLI);
diff --git a/contrib/llvm/lib/Transforms/Scalar/SROA.cpp b/contrib/llvm/lib/Transforms/Scalar/SROA.cpp
new file mode 100644
index 0000000..ccc2f7a
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -0,0 +1,3697 @@
+//===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This transformation implements the well known scalar replacement of
+/// aggregates transformation. It tries to identify promotable elements of an
+/// aggregate alloca, and promote them to registers. It will also try to
+/// convert uses of an element (or set of elements) of an alloca into a vector
+/// or bitfield-style integer scalar if appropriate.
+///
+/// It works to do this with minimal slicing of the alloca so that regions
+/// which are merely transferred in and out of external memory remain unchanged
+/// and are not decomposed to scalar code.
+///
+/// Because this also performs alloca promotion, it can be thought of as also
+/// serving the purpose of SSA formation. The algorithm iterates on the
+/// function until all opportunities for promotion have been realized.
+///
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "sroa"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Constants.h"
+#include "llvm/DIBuilder.h"
+#include "llvm/DebugInfo.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/IRBuilder.h"
+#include "llvm/Instructions.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Operator.h"
+#include "llvm/Pass.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/Loads.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/Support/InstVisitor.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/PromoteMemToReg.h"
+#include "llvm/Transforms/Utils/SSAUpdater.h"
+using namespace llvm;
+
+STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
+STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
+STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
+STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
+STATISTIC(NumDeleted, "Number of instructions deleted");
+STATISTIC(NumVectorized, "Number of vectorized aggregates");
+
+/// Hidden option to force the pass to not use DomTree and mem2reg, instead
+/// forming SSA values through the SSAUpdater infrastructure.
+static cl::opt<bool>
+ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden);
+
+namespace {
+/// \brief Alloca partitioning representation.
+///
+/// This class represents a partitioning of an alloca into slices, and
+/// information about the nature of uses of each slice of the alloca. The goal
+/// is that this information is sufficient to decide if and how to split the
+/// alloca apart and replace slices with scalars. It is also intended that this
+/// structure can capture the relevant information needed both to decide about
+/// and to enact these transformations.
+class AllocaPartitioning {
+public:
+ /// \brief A common base class for representing a half-open byte range.
+ struct ByteRange {
+ /// \brief The beginning offset of the range.
+ uint64_t BeginOffset;
+
+ /// \brief The ending offset, not included in the range.
+ uint64_t EndOffset;
+
+ ByteRange() : BeginOffset(), EndOffset() {}
+ ByteRange(uint64_t BeginOffset, uint64_t EndOffset)
+ : BeginOffset(BeginOffset), EndOffset(EndOffset) {}
+
+ /// \brief Support for ordering ranges.
+ ///
+ /// This provides an ordering over ranges such that start offsets are
+ /// always increasing, and within equal start offsets, the end offsets are
+ /// decreasing. Thus the spanning range comes first in a cluster with the
+ /// same start position.
+ bool operator<(const ByteRange &RHS) const {
+ if (BeginOffset < RHS.BeginOffset) return true;
+ if (BeginOffset > RHS.BeginOffset) return false;
+ if (EndOffset > RHS.EndOffset) return true;
+ return false;
+ }
+
+ /// \brief Support comparison with a single offset to allow binary searches.
+ friend bool operator<(const ByteRange &LHS, uint64_t RHSOffset) {
+ return LHS.BeginOffset < RHSOffset;
+ }
+
+ friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
+ const ByteRange &RHS) {
+ return LHSOffset < RHS.BeginOffset;
+ }
+
+ bool operator==(const ByteRange &RHS) const {
+ return BeginOffset == RHS.BeginOffset && EndOffset == RHS.EndOffset;
+ }
+ bool operator!=(const ByteRange &RHS) const { return !operator==(RHS); }
+ };
+
+ /// \brief A partition of an alloca.
+ ///
+ /// This structure represents a contiguous partition of the alloca. These are
+ /// formed by examining the uses of the alloca. During formation, they may
+ /// overlap but once an AllocaPartitioning is built, the Partitions within it
+ /// are all disjoint.
+ struct Partition : public ByteRange {
+ /// \brief Whether this partition is splittable into smaller partitions.
+ ///
+ /// We flag partitions as splittable when they are formed entirely due to
+ /// accesses by trivially splittable operations such as memset and memcpy.
+ bool IsSplittable;
+
+ /// \brief Test whether a partition has been marked as dead.
+ bool isDead() const {
+ if (BeginOffset == UINT64_MAX) {
+ assert(EndOffset == UINT64_MAX);
+ return true;
+ }
+ return false;
+ }
+
+ /// \brief Kill a partition.
+ /// This is accomplished by setting both its beginning and end offset to
+ /// the maximum possible value.
+ void kill() {
+ assert(!isDead() && "He's Dead, Jim!");
+ BeginOffset = EndOffset = UINT64_MAX;
+ }
+
+ Partition() : ByteRange(), IsSplittable() {}
+ Partition(uint64_t BeginOffset, uint64_t EndOffset, bool IsSplittable)
+ : ByteRange(BeginOffset, EndOffset), IsSplittable(IsSplittable) {}
+ };
+
+ /// \brief A particular use of a partition of the alloca.
+ ///
+ /// This structure is used to associate uses of a partition with it. They
+ /// mark the range of bytes which are referenced by a particular instruction,
+ /// and includes a handle to the user itself and the pointer value in use.
+ /// The bounds of these uses are determined by intersecting the bounds of the
+ /// memory use itself with a particular partition. As a consequence there is
+ /// intentionally overlap between various uses of the same partition.
+ struct PartitionUse : public ByteRange {
+ /// \brief The use in question. Provides access to both user and used value.
+ ///
+ /// Note that this may be null if the partition use is *dead*, that is, it
+ /// should be ignored.
+ Use *U;
+
+ PartitionUse() : ByteRange(), U() {}
+ PartitionUse(uint64_t BeginOffset, uint64_t EndOffset, Use *U)
+ : ByteRange(BeginOffset, EndOffset), U(U) {}
+ };
+
+ /// \brief Construct a partitioning of a particular alloca.
+ ///
+ /// Construction does most of the work for partitioning the alloca. This
+ /// performs the necessary walks of users and builds a partitioning from it.
+ AllocaPartitioning(const DataLayout &TD, AllocaInst &AI);
+
+ /// \brief Test whether a pointer to the allocation escapes our analysis.
+ ///
+ /// If this is true, the partitioning is never fully built and should be
+ /// ignored.
+ bool isEscaped() const { return PointerEscapingInstr; }
+
+ /// \brief Support for iterating over the partitions.
+ /// @{
+ typedef SmallVectorImpl<Partition>::iterator iterator;
+ iterator begin() { return Partitions.begin(); }
+ iterator end() { return Partitions.end(); }
+
+ typedef SmallVectorImpl<Partition>::const_iterator const_iterator;
+ const_iterator begin() const { return Partitions.begin(); }
+ const_iterator end() const { return Partitions.end(); }
+ /// @}
+
+ /// \brief Support for iterating over and manipulating a particular
+ /// partition's uses.
+ ///
+ /// The iteration support provided for uses is more limited, but also
+ /// includes some manipulation routines to support rewriting the uses of
+ /// partitions during SROA.
+ /// @{
+ typedef SmallVectorImpl<PartitionUse>::iterator use_iterator;
+ use_iterator use_begin(unsigned Idx) { return Uses[Idx].begin(); }
+ use_iterator use_begin(const_iterator I) { return Uses[I - begin()].begin(); }
+ use_iterator use_end(unsigned Idx) { return Uses[Idx].end(); }
+ use_iterator use_end(const_iterator I) { return Uses[I - begin()].end(); }
+
+ typedef SmallVectorImpl<PartitionUse>::const_iterator const_use_iterator;
+ const_use_iterator use_begin(unsigned Idx) const { return Uses[Idx].begin(); }
+ const_use_iterator use_begin(const_iterator I) const {
+ return Uses[I - begin()].begin();
+ }
+ const_use_iterator use_end(unsigned Idx) const { return Uses[Idx].end(); }
+ const_use_iterator use_end(const_iterator I) const {
+ return Uses[I - begin()].end();
+ }
+
+ unsigned use_size(unsigned Idx) const { return Uses[Idx].size(); }
+ unsigned use_size(const_iterator I) const { return Uses[I - begin()].size(); }
+ const PartitionUse &getUse(unsigned PIdx, unsigned UIdx) const {
+ return Uses[PIdx][UIdx];
+ }
+ const PartitionUse &getUse(const_iterator I, unsigned UIdx) const {
+ return Uses[I - begin()][UIdx];
+ }
+
+ void use_push_back(unsigned Idx, const PartitionUse &PU) {
+ Uses[Idx].push_back(PU);
+ }
+ void use_push_back(const_iterator I, const PartitionUse &PU) {
+ Uses[I - begin()].push_back(PU);
+ }
+ /// @}
+
+ /// \brief Allow iterating the dead users for this alloca.
+ ///
+ /// These are instructions which will never actually use the alloca as they
+ /// are outside the allocated range. They are safe to replace with undef and
+ /// delete.
+ /// @{
+ typedef SmallVectorImpl<Instruction *>::const_iterator dead_user_iterator;
+ dead_user_iterator dead_user_begin() const { return DeadUsers.begin(); }
+ dead_user_iterator dead_user_end() const { return DeadUsers.end(); }
+ /// @}
+
+ /// \brief Allow iterating the dead expressions referring to this alloca.
+ ///
+ /// These are operands which have cannot actually be used to refer to the
+ /// alloca as they are outside its range and the user doesn't correct for
+ /// that. These mostly consist of PHI node inputs and the like which we just
+ /// need to replace with undef.
+ /// @{
+ typedef SmallVectorImpl<Use *>::const_iterator dead_op_iterator;
+ dead_op_iterator dead_op_begin() const { return DeadOperands.begin(); }
+ dead_op_iterator dead_op_end() const { return DeadOperands.end(); }
+ /// @}
+
+ /// \brief MemTransferInst auxiliary data.
+ /// This struct provides some auxiliary data about memory transfer
+ /// intrinsics such as memcpy and memmove. These intrinsics can use two
+ /// different ranges within the same alloca, and provide other challenges to
+ /// correctly represent. We stash extra data to help us untangle this
+ /// after the partitioning is complete.
+ struct MemTransferOffsets {
+ /// The destination begin and end offsets when the destination is within
+ /// this alloca. If the end offset is zero the destination is not within
+ /// this alloca.
+ uint64_t DestBegin, DestEnd;
+
+ /// The source begin and end offsets when the source is within this alloca.
+ /// If the end offset is zero, the source is not within this alloca.
+ uint64_t SourceBegin, SourceEnd;
+
+ /// Flag for whether an alloca is splittable.
+ bool IsSplittable;
+ };
+ MemTransferOffsets getMemTransferOffsets(MemTransferInst &II) const {
+ return MemTransferInstData.lookup(&II);
+ }
+
+ /// \brief Map from a PHI or select operand back to a partition.
+ ///
+ /// When manipulating PHI nodes or selects, they can use more than one
+ /// partition of an alloca. We store a special mapping to allow finding the
+ /// partition referenced by each of these operands, if any.
+ iterator findPartitionForPHIOrSelectOperand(Use *U) {
+ SmallDenseMap<Use *, std::pair<unsigned, unsigned> >::const_iterator MapIt
+ = PHIOrSelectOpMap.find(U);
+ if (MapIt == PHIOrSelectOpMap.end())
+ return end();
+
+ return begin() + MapIt->second.first;
+ }
+
+ /// \brief Map from a PHI or select operand back to the specific use of
+ /// a partition.
+ ///
+ /// Similar to mapping these operands back to the partitions, this maps
+ /// directly to the use structure of that partition.
+ use_iterator findPartitionUseForPHIOrSelectOperand(Use *U) {
+ SmallDenseMap<Use *, std::pair<unsigned, unsigned> >::const_iterator MapIt
+ = PHIOrSelectOpMap.find(U);
+ assert(MapIt != PHIOrSelectOpMap.end());
+ return Uses[MapIt->second.first].begin() + MapIt->second.second;
+ }
+
+ /// \brief Compute a common type among the uses of a particular partition.
+ ///
+ /// This routines walks all of the uses of a particular partition and tries
+ /// to find a common type between them. Untyped operations such as memset and
+ /// memcpy are ignored.
+ Type *getCommonType(iterator I) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const;
+ void printUsers(raw_ostream &OS, const_iterator I,
+ StringRef Indent = " ") const;
+ void print(raw_ostream &OS) const;
+ void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump(const_iterator I) const;
+ void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump() const;
+#endif
+
+private:
+ template <typename DerivedT, typename RetT = void> class BuilderBase;
+ class PartitionBuilder;
+ friend class AllocaPartitioning::PartitionBuilder;
+ class UseBuilder;
+ friend class AllocaPartitioning::UseBuilder;
+
+#ifndef NDEBUG
+ /// \brief Handle to alloca instruction to simplify method interfaces.
+ AllocaInst &AI;
+#endif
+
+ /// \brief The instruction responsible for this alloca having no partitioning.
+ ///
+ /// When an instruction (potentially) escapes the pointer to the alloca, we
+ /// store a pointer to that here and abort trying to partition the alloca.
+ /// This will be null if the alloca is partitioned successfully.
+ Instruction *PointerEscapingInstr;
+
+ /// \brief The partitions of the alloca.
+ ///
+ /// We store a vector of the partitions over the alloca here. This vector is
+ /// sorted by increasing begin offset, and then by decreasing end offset. See
+ /// the Partition inner class for more details. Initially (during
+ /// construction) there are overlaps, but we form a disjoint sequence of
+ /// partitions while finishing construction and a fully constructed object is
+ /// expected to always have this as a disjoint space.
+ SmallVector<Partition, 8> Partitions;
+
+ /// \brief The uses of the partitions.
+ ///
+ /// This is essentially a mapping from each partition to a list of uses of
+ /// that partition. The mapping is done with a Uses vector that has the exact
+ /// same number of entries as the partition vector. Each entry is itself
+ /// a vector of the uses.
+ SmallVector<SmallVector<PartitionUse, 2>, 8> Uses;
+
+ /// \brief Instructions which will become dead if we rewrite the alloca.
+ ///
+ /// Note that these are not separated by partition. This is because we expect
+ /// a partitioned alloca to be completely rewritten or not rewritten at all.
+ /// If rewritten, all these instructions can simply be removed and replaced
+ /// with undef as they come from outside of the allocated space.
+ SmallVector<Instruction *, 8> DeadUsers;
+
+ /// \brief Operands which will become dead if we rewrite the alloca.
+ ///
+ /// These are operands that in their particular use can be replaced with
+ /// undef when we rewrite the alloca. These show up in out-of-bounds inputs
+ /// to PHI nodes and the like. They aren't entirely dead (there might be
+ /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we
+ /// want to swap this particular input for undef to simplify the use lists of
+ /// the alloca.
+ SmallVector<Use *, 8> DeadOperands;
+
+ /// \brief The underlying storage for auxiliary memcpy and memset info.
+ SmallDenseMap<MemTransferInst *, MemTransferOffsets, 4> MemTransferInstData;
+
+ /// \brief A side datastructure used when building up the partitions and uses.
+ ///
+ /// This mapping is only really used during the initial building of the
+ /// partitioning so that we can retain information about PHI and select nodes
+ /// processed.
+ SmallDenseMap<Instruction *, std::pair<uint64_t, bool> > PHIOrSelectSizes;
+
+ /// \brief Auxiliary information for particular PHI or select operands.
+ SmallDenseMap<Use *, std::pair<unsigned, unsigned>, 4> PHIOrSelectOpMap;
+
+ /// \brief A utility routine called from the constructor.
+ ///
+ /// This does what it says on the tin. It is the key of the alloca partition
+ /// splitting and merging. After it is called we have the desired disjoint
+ /// collection of partitions.
+ void splitAndMergePartitions();
+};
+}
+
+template <typename DerivedT, typename RetT>
+class AllocaPartitioning::BuilderBase
+ : public InstVisitor<DerivedT, RetT> {
+public:
+ BuilderBase(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
+ : TD(TD),
+ AllocSize(TD.getTypeAllocSize(AI.getAllocatedType())),
+ P(P) {
+ enqueueUsers(AI, 0);
+ }
+
+protected:
+ const DataLayout &TD;
+ const uint64_t AllocSize;
+ AllocaPartitioning &P;
+
+ SmallPtrSet<Use *, 8> VisitedUses;
+
+ struct OffsetUse {
+ Use *U;
+ int64_t Offset;
+ };
+ SmallVector<OffsetUse, 8> Queue;
+
+ // The active offset and use while visiting.
+ Use *U;
+ int64_t Offset;
+
+ void enqueueUsers(Instruction &I, int64_t UserOffset) {
+ for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
+ UI != UE; ++UI) {
+ if (VisitedUses.insert(&UI.getUse())) {
+ OffsetUse OU = { &UI.getUse(), UserOffset };
+ Queue.push_back(OU);
+ }
+ }
+ }
+
+ bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) {
+ GEPOffset = Offset;
+ for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI);
+ GTI != GTE; ++GTI) {
+ ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
+ if (!OpC)
+ return false;
+ if (OpC->isZero())
+ continue;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ unsigned ElementIdx = OpC->getZExtValue();
+ const StructLayout *SL = TD.getStructLayout(STy);
+ uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
+ // Check that we can continue to model this GEP in a signed 64-bit offset.
+ if (ElementOffset > INT64_MAX ||
+ (GEPOffset >= 0 &&
+ ((uint64_t)GEPOffset + ElementOffset) > INT64_MAX)) {
+ DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding "
+ << "what can be represented in an int64_t!\n"
+ << " alloca: " << P.AI << "\n");
+ return false;
+ }
+ if (GEPOffset < 0)
+ GEPOffset = ElementOffset + (uint64_t)-GEPOffset;
+ else
+ GEPOffset += ElementOffset;
+ continue;
+ }
+
+ APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits());
+ Index *= APInt(Index.getBitWidth(),
+ TD.getTypeAllocSize(GTI.getIndexedType()));
+ Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset,
+ /*isSigned*/true);
+ // Check if the result can be stored in our int64_t offset.
+ if (!Index.isSignedIntN(sizeof(GEPOffset) * 8)) {
+ DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding "
+ << "what can be represented in an int64_t!\n"
+ << " alloca: " << P.AI << "\n");
+ return false;
+ }
+
+ GEPOffset = Index.getSExtValue();
+ }
+ return true;
+ }
+
+ Value *foldSelectInst(SelectInst &SI) {
+ // If the condition being selected on is a constant or the same value is
+ // being selected between, fold the select. Yes this does (rarely) happen
+ // early on.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
+ return SI.getOperand(1+CI->isZero());
+ if (SI.getOperand(1) == SI.getOperand(2)) {
+ assert(*U == SI.getOperand(1));
+ return SI.getOperand(1);
+ }
+ return 0;
+ }
+};
+
+/// \brief Builder for the alloca partitioning.
+///
+/// This class builds an alloca partitioning by recursively visiting the uses
+/// of an alloca and splitting the partitions for each load and store at each
+/// offset.
+class AllocaPartitioning::PartitionBuilder
+ : public BuilderBase<PartitionBuilder, bool> {
+ friend class InstVisitor<PartitionBuilder, bool>;
+
+ SmallDenseMap<Instruction *, unsigned> MemTransferPartitionMap;
+
+public:
+ PartitionBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
+ : BuilderBase<PartitionBuilder, bool>(TD, AI, P) {}
+
+ /// \brief Run the builder over the allocation.
+ bool operator()() {
+ // Note that we have to re-evaluate size on each trip through the loop as
+ // the queue grows at the tail.
+ for (unsigned Idx = 0; Idx < Queue.size(); ++Idx) {
+ U = Queue[Idx].U;
+ Offset = Queue[Idx].Offset;
+ if (!visit(cast<Instruction>(U->getUser())))
+ return false;
+ }
+ return true;
+ }
+
+private:
+ bool markAsEscaping(Instruction &I) {
+ P.PointerEscapingInstr = &I;
+ return false;
+ }
+
+ void insertUse(Instruction &I, int64_t Offset, uint64_t Size,
+ bool IsSplittable = false) {
+ // Completely skip uses which have a zero size or don't overlap the
+ // allocation.
+ if (Size == 0 ||
+ (Offset >= 0 && (uint64_t)Offset >= AllocSize) ||
+ (Offset < 0 && (uint64_t)-Offset >= Size)) {
+ DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
+ << " which starts past the end of the " << AllocSize
+ << " byte alloca:\n"
+ << " alloca: " << P.AI << "\n"
+ << " use: " << I << "\n");
+ return;
+ }
+
+ // Clamp the start to the beginning of the allocation.
+ if (Offset < 0) {
+ DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
+ << " to start at the beginning of the alloca:\n"
+ << " alloca: " << P.AI << "\n"
+ << " use: " << I << "\n");
+ Size -= (uint64_t)-Offset;
+ Offset = 0;
+ }
+
+ uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size;
+
+ // Clamp the end offset to the end of the allocation. Note that this is
+ // formulated to handle even the case where "BeginOffset + Size" overflows.
+ // NOTE! This may appear superficially to be something we could ignore
+ // entirely, but that is not so! There may be PHI-node uses where some
+ // instructions are dead but not others. We can't completely ignore the
+ // PHI node, and so have to record at least the information here.
+ assert(AllocSize >= BeginOffset); // Established above.
+ if (Size > AllocSize - BeginOffset) {
+ DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
+ << " to remain within the " << AllocSize << " byte alloca:\n"
+ << " alloca: " << P.AI << "\n"
+ << " use: " << I << "\n");
+ EndOffset = AllocSize;
+ }
+
+ Partition New(BeginOffset, EndOffset, IsSplittable);
+ P.Partitions.push_back(New);
+ }
+
+ bool handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset,
+ bool IsVolatile) {
+ uint64_t Size = TD.getTypeStoreSize(Ty);
+
+ // If this memory access can be shown to *statically* extend outside the
+ // bounds of of the allocation, it's behavior is undefined, so simply
+ // ignore it. Note that this is more strict than the generic clamping
+ // behavior of insertUse. We also try to handle cases which might run the
+ // risk of overflow.
+ // FIXME: We should instead consider the pointer to have escaped if this
+ // function is being instrumented for addressing bugs or race conditions.
+ if (Offset < 0 || (uint64_t)Offset >= AllocSize ||
+ Size > (AllocSize - (uint64_t)Offset)) {
+ DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte "
+ << (isa<LoadInst>(I) ? "load" : "store") << " @" << Offset
+ << " which extends past the end of the " << AllocSize
+ << " byte alloca:\n"
+ << " alloca: " << P.AI << "\n"
+ << " use: " << I << "\n");
+ return true;
+ }
+
+ // We allow splitting of loads and stores where the type is an integer type
+ // and which cover the entire alloca. Such integer loads and stores
+ // often require decomposition into fine grained loads and stores.
+ bool IsSplittable = false;
+ if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
+ IsSplittable = !IsVolatile && ITy->getBitWidth() == AllocSize*8;
+
+ insertUse(I, Offset, Size, IsSplittable);
+ return true;
+ }
+
+ bool visitBitCastInst(BitCastInst &BC) {
+ enqueueUsers(BC, Offset);
+ return true;
+ }
+
+ bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
+ int64_t GEPOffset;
+ if (!computeConstantGEPOffset(GEPI, GEPOffset))
+ return markAsEscaping(GEPI);
+
+ enqueueUsers(GEPI, GEPOffset);
+ return true;
+ }
+
+ bool visitLoadInst(LoadInst &LI) {
+ assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
+ "All simple FCA loads should have been pre-split");
+ return handleLoadOrStore(LI.getType(), LI, Offset, LI.isVolatile());
+ }
+
+ bool visitStoreInst(StoreInst &SI) {
+ Value *ValOp = SI.getValueOperand();
+ if (ValOp == *U)
+ return markAsEscaping(SI);
+
+ assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
+ "All simple FCA stores should have been pre-split");
+ return handleLoadOrStore(ValOp->getType(), SI, Offset, SI.isVolatile());
+ }
+
+
+ bool visitMemSetInst(MemSetInst &II) {
+ assert(II.getRawDest() == *U && "Pointer use is not the destination?");
+ ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
+ uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
+ insertUse(II, Offset, Size, Length);
+ return true;
+ }
+
+ bool visitMemTransferInst(MemTransferInst &II) {
+ ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
+ uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
+ if (!Size)
+ // Zero-length mem transfer intrinsics can be ignored entirely.
+ return true;
+
+ MemTransferOffsets &Offsets = P.MemTransferInstData[&II];
+
+ // Only intrinsics with a constant length can be split.
+ Offsets.IsSplittable = Length;
+
+ if (*U == II.getRawDest()) {
+ Offsets.DestBegin = Offset;
+ Offsets.DestEnd = Offset + Size;
+ }
+ if (*U == II.getRawSource()) {
+ Offsets.SourceBegin = Offset;
+ Offsets.SourceEnd = Offset + Size;
+ }
+
+ // If we have set up end offsets for both the source and the destination,
+ // we have found both sides of this transfer pointing at the same alloca.
+ bool SeenBothEnds = Offsets.SourceEnd && Offsets.DestEnd;
+ if (SeenBothEnds && II.getRawDest() != II.getRawSource()) {
+ unsigned PrevIdx = MemTransferPartitionMap[&II];
+
+ // Check if the begin offsets match and this is a non-volatile transfer.
+ // In that case, we can completely elide the transfer.
+ if (!II.isVolatile() && Offsets.SourceBegin == Offsets.DestBegin) {
+ P.Partitions[PrevIdx].kill();
+ return true;
+ }
+
+ // Otherwise we have an offset transfer within the same alloca. We can't
+ // split those.
+ P.Partitions[PrevIdx].IsSplittable = Offsets.IsSplittable = false;
+ } else if (SeenBothEnds) {
+ // Handle the case where this exact use provides both ends of the
+ // operation.
+ assert(II.getRawDest() == II.getRawSource());
+
+ // For non-volatile transfers this is a no-op.
+ if (!II.isVolatile())
+ return true;
+
+ // Otherwise just suppress splitting.
+ Offsets.IsSplittable = false;
+ }
+
+
+ // Insert the use now that we've fixed up the splittable nature.
+ insertUse(II, Offset, Size, Offsets.IsSplittable);
+
+ // Setup the mapping from intrinsic to partition of we've not seen both
+ // ends of this transfer.
+ if (!SeenBothEnds) {
+ unsigned NewIdx = P.Partitions.size() - 1;
+ bool Inserted
+ = MemTransferPartitionMap.insert(std::make_pair(&II, NewIdx)).second;
+ assert(Inserted &&
+ "Already have intrinsic in map but haven't seen both ends");
+ (void)Inserted;
+ }
+
+ return true;
+ }
+
+ // Disable SRoA for any intrinsics except for lifetime invariants.
+ // FIXME: What about debug instrinsics? This matches old behavior, but
+ // doesn't make sense.
+ bool visitIntrinsicInst(IntrinsicInst &II) {
+ if (II.getIntrinsicID() == Intrinsic::lifetime_start ||
+ II.getIntrinsicID() == Intrinsic::lifetime_end) {
+ ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
+ uint64_t Size = std::min(AllocSize - Offset, Length->getLimitedValue());
+ insertUse(II, Offset, Size, true);
+ return true;
+ }
+
+ return markAsEscaping(II);
+ }
+
+ Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
+ // We consider any PHI or select that results in a direct load or store of
+ // the same offset to be a viable use for partitioning purposes. These uses
+ // are considered unsplittable and the size is the maximum loaded or stored
+ // size.
+ SmallPtrSet<Instruction *, 4> Visited;
+ SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses;
+ Visited.insert(Root);
+ Uses.push_back(std::make_pair(cast<Instruction>(*U), Root));
+ // If there are no loads or stores, the access is dead. We mark that as
+ // a size zero access.
+ Size = 0;
+ do {
+ Instruction *I, *UsedI;
+ llvm::tie(UsedI, I) = Uses.pop_back_val();
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ Size = std::max(Size, TD.getTypeStoreSize(LI->getType()));
+ continue;
+ }
+ if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
+ Value *Op = SI->getOperand(0);
+ if (Op == UsedI)
+ return SI;
+ Size = std::max(Size, TD.getTypeStoreSize(Op->getType()));
+ continue;
+ }
+
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
+ if (!GEP->hasAllZeroIndices())
+ return GEP;
+ } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) &&
+ !isa<SelectInst>(I)) {
+ return I;
+ }
+
+ for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
+ ++UI)
+ if (Visited.insert(cast<Instruction>(*UI)))
+ Uses.push_back(std::make_pair(I, cast<Instruction>(*UI)));
+ } while (!Uses.empty());
+
+ return 0;
+ }
+
+ bool visitPHINode(PHINode &PN) {
+ // See if we already have computed info on this node.
+ std::pair<uint64_t, bool> &PHIInfo = P.PHIOrSelectSizes[&PN];
+ if (PHIInfo.first) {
+ PHIInfo.second = true;
+ insertUse(PN, Offset, PHIInfo.first);
+ return true;
+ }
+
+ // Check for an unsafe use of the PHI node.
+ if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&PN, PHIInfo.first))
+ return markAsEscaping(*EscapingI);
+
+ insertUse(PN, Offset, PHIInfo.first);
+ return true;
+ }
+
+ bool visitSelectInst(SelectInst &SI) {
+ if (Value *Result = foldSelectInst(SI)) {
+ if (Result == *U)
+ // If the result of the constant fold will be the pointer, recurse
+ // through the select as if we had RAUW'ed it.
+ enqueueUsers(SI, Offset);
+
+ return true;
+ }
+
+ // See if we already have computed info on this node.
+ std::pair<uint64_t, bool> &SelectInfo = P.PHIOrSelectSizes[&SI];
+ if (SelectInfo.first) {
+ SelectInfo.second = true;
+ insertUse(SI, Offset, SelectInfo.first);
+ return true;
+ }
+
+ // Check for an unsafe use of the PHI node.
+ if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&SI, SelectInfo.first))
+ return markAsEscaping(*EscapingI);
+
+ insertUse(SI, Offset, SelectInfo.first);
+ return true;
+ }
+
+ /// \brief Disable SROA entirely if there are unhandled users of the alloca.
+ bool visitInstruction(Instruction &I) { return markAsEscaping(I); }
+};
+
+
+/// \brief Use adder for the alloca partitioning.
+///
+/// This class adds the uses of an alloca to all of the partitions which they
+/// use. For splittable partitions, this can end up doing essentially a linear
+/// walk of the partitions, but the number of steps remains bounded by the
+/// total result instruction size:
+/// - The number of partitions is a result of the number unsplittable
+/// instructions using the alloca.
+/// - The number of users of each partition is at worst the total number of
+/// splittable instructions using the alloca.
+/// Thus we will produce N * M instructions in the end, where N are the number
+/// of unsplittable uses and M are the number of splittable. This visitor does
+/// the exact same number of updates to the partitioning.
+///
+/// In the more common case, this visitor will leverage the fact that the
+/// partition space is pre-sorted, and do a logarithmic search for the
+/// partition needed, making the total visit a classical ((N + M) * log(N))
+/// complexity operation.
+class AllocaPartitioning::UseBuilder : public BuilderBase<UseBuilder> {
+ friend class InstVisitor<UseBuilder>;
+
+ /// \brief Set to de-duplicate dead instructions found in the use walk.
+ SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
+
+public:
+ UseBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
+ : BuilderBase<UseBuilder>(TD, AI, P) {}
+
+ /// \brief Run the builder over the allocation.
+ void operator()() {
+ // Note that we have to re-evaluate size on each trip through the loop as
+ // the queue grows at the tail.
+ for (unsigned Idx = 0; Idx < Queue.size(); ++Idx) {
+ U = Queue[Idx].U;
+ Offset = Queue[Idx].Offset;
+ this->visit(cast<Instruction>(U->getUser()));
+ }
+ }
+
+private:
+ void markAsDead(Instruction &I) {
+ if (VisitedDeadInsts.insert(&I))
+ P.DeadUsers.push_back(&I);
+ }
+
+ void insertUse(Instruction &User, int64_t Offset, uint64_t Size) {
+ // If the use has a zero size or extends outside of the allocation, record
+ // it as a dead use for elimination later.
+ if (Size == 0 || (uint64_t)Offset >= AllocSize ||
+ (Offset < 0 && (uint64_t)-Offset >= Size))
+ return markAsDead(User);
+
+ // Clamp the start to the beginning of the allocation.
+ if (Offset < 0) {
+ Size -= (uint64_t)-Offset;
+ Offset = 0;
+ }
+
+ uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size;
+
+ // Clamp the end offset to the end of the allocation. Note that this is
+ // formulated to handle even the case where "BeginOffset + Size" overflows.
+ assert(AllocSize >= BeginOffset); // Established above.
+ if (Size > AllocSize - BeginOffset)
+ EndOffset = AllocSize;
+
+ // NB: This only works if we have zero overlapping partitions.
+ iterator B = std::lower_bound(P.begin(), P.end(), BeginOffset);
+ if (B != P.begin() && llvm::prior(B)->EndOffset > BeginOffset)
+ B = llvm::prior(B);
+ for (iterator I = B, E = P.end(); I != E && I->BeginOffset < EndOffset;
+ ++I) {
+ PartitionUse NewPU(std::max(I->BeginOffset, BeginOffset),
+ std::min(I->EndOffset, EndOffset), U);
+ P.use_push_back(I, NewPU);
+ if (isa<PHINode>(U->getUser()) || isa<SelectInst>(U->getUser()))
+ P.PHIOrSelectOpMap[U]
+ = std::make_pair(I - P.begin(), P.Uses[I - P.begin()].size() - 1);
+ }
+ }
+
+ void handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) {
+ uint64_t Size = TD.getTypeStoreSize(Ty);
+
+ // If this memory access can be shown to *statically* extend outside the
+ // bounds of of the allocation, it's behavior is undefined, so simply
+ // ignore it. Note that this is more strict than the generic clamping
+ // behavior of insertUse.
+ if (Offset < 0 || (uint64_t)Offset >= AllocSize ||
+ Size > (AllocSize - (uint64_t)Offset))
+ return markAsDead(I);
+
+ insertUse(I, Offset, Size);
+ }
+
+ void visitBitCastInst(BitCastInst &BC) {
+ if (BC.use_empty())
+ return markAsDead(BC);
+
+ enqueueUsers(BC, Offset);
+ }
+
+ void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
+ if (GEPI.use_empty())
+ return markAsDead(GEPI);
+
+ int64_t GEPOffset;
+ if (!computeConstantGEPOffset(GEPI, GEPOffset))
+ llvm_unreachable("Unable to compute constant offset for use");
+
+ enqueueUsers(GEPI, GEPOffset);
+ }
+
+ void visitLoadInst(LoadInst &LI) {
+ handleLoadOrStore(LI.getType(), LI, Offset);
+ }
+
+ void visitStoreInst(StoreInst &SI) {
+ handleLoadOrStore(SI.getOperand(0)->getType(), SI, Offset);
+ }
+
+ void visitMemSetInst(MemSetInst &II) {
+ ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
+ uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
+ insertUse(II, Offset, Size);
+ }
+
+ void visitMemTransferInst(MemTransferInst &II) {
+ ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
+ uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
+ if (!Size)
+ return markAsDead(II);
+
+ MemTransferOffsets &Offsets = P.MemTransferInstData[&II];
+ if (!II.isVolatile() && Offsets.DestEnd && Offsets.SourceEnd &&
+ Offsets.DestBegin == Offsets.SourceBegin)
+ return markAsDead(II); // Skip identity transfers without side-effects.
+
+ insertUse(II, Offset, Size);
+ }
+
+ void visitIntrinsicInst(IntrinsicInst &II) {
+ assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
+ II.getIntrinsicID() == Intrinsic::lifetime_end);
+
+ ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
+ insertUse(II, Offset,
+ std::min(AllocSize - Offset, Length->getLimitedValue()));
+ }
+
+ void insertPHIOrSelect(Instruction &User, uint64_t Offset) {
+ uint64_t Size = P.PHIOrSelectSizes.lookup(&User).first;
+
+ // For PHI and select operands outside the alloca, we can't nuke the entire
+ // phi or select -- the other side might still be relevant, so we special
+ // case them here and use a separate structure to track the operands
+ // themselves which should be replaced with undef.
+ if (Offset >= AllocSize) {
+ P.DeadOperands.push_back(U);
+ return;
+ }
+
+ insertUse(User, Offset, Size);
+ }
+ void visitPHINode(PHINode &PN) {
+ if (PN.use_empty())
+ return markAsDead(PN);
+
+ insertPHIOrSelect(PN, Offset);
+ }
+ void visitSelectInst(SelectInst &SI) {
+ if (SI.use_empty())
+ return markAsDead(SI);
+
+ if (Value *Result = foldSelectInst(SI)) {
+ if (Result == *U)
+ // If the result of the constant fold will be the pointer, recurse
+ // through the select as if we had RAUW'ed it.
+ enqueueUsers(SI, Offset);
+ else
+ // Otherwise the operand to the select is dead, and we can replace it
+ // with undef.
+ P.DeadOperands.push_back(U);
+
+ return;
+ }
+
+ insertPHIOrSelect(SI, Offset);
+ }
+
+ /// \brief Unreachable, we've already visited the alloca once.
+ void visitInstruction(Instruction &I) {
+ llvm_unreachable("Unhandled instruction in use builder.");
+ }
+};
+
+void AllocaPartitioning::splitAndMergePartitions() {
+ size_t NumDeadPartitions = 0;
+
+ // Track the range of splittable partitions that we pass when accumulating
+ // overlapping unsplittable partitions.
+ uint64_t SplitEndOffset = 0ull;
+
+ Partition New(0ull, 0ull, false);
+
+ for (unsigned i = 0, j = i, e = Partitions.size(); i != e; i = j) {
+ ++j;
+
+ if (!Partitions[i].IsSplittable || New.BeginOffset == New.EndOffset) {
+ assert(New.BeginOffset == New.EndOffset);
+ New = Partitions[i];
+ } else {
+ assert(New.IsSplittable);
+ New.EndOffset = std::max(New.EndOffset, Partitions[i].EndOffset);
+ }
+ assert(New.BeginOffset != New.EndOffset);
+
+ // Scan the overlapping partitions.
+ while (j != e && New.EndOffset > Partitions[j].BeginOffset) {
+ // If the new partition we are forming is splittable, stop at the first
+ // unsplittable partition.
+ if (New.IsSplittable && !Partitions[j].IsSplittable)
+ break;
+
+ // Grow the new partition to include any equally splittable range. 'j' is
+ // always equally splittable when New is splittable, but when New is not
+ // splittable, we may subsume some (or part of some) splitable partition
+ // without growing the new one.
+ if (New.IsSplittable == Partitions[j].IsSplittable) {
+ New.EndOffset = std::max(New.EndOffset, Partitions[j].EndOffset);
+ } else {
+ assert(!New.IsSplittable);
+ assert(Partitions[j].IsSplittable);
+ SplitEndOffset = std::max(SplitEndOffset, Partitions[j].EndOffset);
+ }
+
+ Partitions[j].kill();
+ ++NumDeadPartitions;
+ ++j;
+ }
+
+ // If the new partition is splittable, chop off the end as soon as the
+ // unsplittable subsequent partition starts and ensure we eventually cover
+ // the splittable area.
+ if (j != e && New.IsSplittable) {
+ SplitEndOffset = std::max(SplitEndOffset, New.EndOffset);
+ New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset);
+ }
+
+ // Add the new partition if it differs from the original one and is
+ // non-empty. We can end up with an empty partition here if it was
+ // splittable but there is an unsplittable one that starts at the same
+ // offset.
+ if (New != Partitions[i]) {
+ if (New.BeginOffset != New.EndOffset)
+ Partitions.push_back(New);
+ // Mark the old one for removal.
+ Partitions[i].kill();
+ ++NumDeadPartitions;
+ }
+
+ New.BeginOffset = New.EndOffset;
+ if (!New.IsSplittable) {
+ New.EndOffset = std::max(New.EndOffset, SplitEndOffset);
+ if (j != e && !Partitions[j].IsSplittable)
+ New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset);
+ New.IsSplittable = true;
+ // If there is a trailing splittable partition which won't be fused into
+ // the next splittable partition go ahead and add it onto the partitions
+ // list.
+ if (New.BeginOffset < New.EndOffset &&
+ (j == e || !Partitions[j].IsSplittable ||
+ New.EndOffset < Partitions[j].BeginOffset)) {
+ Partitions.push_back(New);
+ New.BeginOffset = New.EndOffset = 0ull;
+ }
+ }
+ }
+
+ // Re-sort the partitions now that they have been split and merged into
+ // disjoint set of partitions. Also remove any of the dead partitions we've
+ // replaced in the process.
+ std::sort(Partitions.begin(), Partitions.end());
+ if (NumDeadPartitions) {
+ assert(Partitions.back().isDead());
+ assert((ptrdiff_t)NumDeadPartitions ==
+ std::count(Partitions.begin(), Partitions.end(), Partitions.back()));
+ }
+ Partitions.erase(Partitions.end() - NumDeadPartitions, Partitions.end());
+}
+
+AllocaPartitioning::AllocaPartitioning(const DataLayout &TD, AllocaInst &AI)
+ :
+#ifndef NDEBUG
+ AI(AI),
+#endif
+ PointerEscapingInstr(0) {
+ PartitionBuilder PB(TD, AI, *this);
+ if (!PB())
+ return;
+
+ // Sort the uses. This arranges for the offsets to be in ascending order,
+ // and the sizes to be in descending order.
+ std::sort(Partitions.begin(), Partitions.end());
+
+ // Remove any partitions from the back which are marked as dead.
+ while (!Partitions.empty() && Partitions.back().isDead())
+ Partitions.pop_back();
+
+ if (Partitions.size() > 1) {
+ // Intersect splittability for all partitions with equal offsets and sizes.
+ // Then remove all but the first so that we have a sequence of non-equal but
+ // potentially overlapping partitions.
+ for (iterator I = Partitions.begin(), J = I, E = Partitions.end(); I != E;
+ I = J) {
+ ++J;
+ while (J != E && *I == *J) {
+ I->IsSplittable &= J->IsSplittable;
+ ++J;
+ }
+ }
+ Partitions.erase(std::unique(Partitions.begin(), Partitions.end()),
+ Partitions.end());
+
+ // Split splittable and merge unsplittable partitions into a disjoint set
+ // of partitions over the used space of the allocation.
+ splitAndMergePartitions();
+ }
+
+ // Now build up the user lists for each of these disjoint partitions by
+ // re-walking the recursive users of the alloca.
+ Uses.resize(Partitions.size());
+ UseBuilder UB(TD, AI, *this);
+ UB();
+}
+
+Type *AllocaPartitioning::getCommonType(iterator I) const {
+ Type *Ty = 0;
+ for (const_use_iterator UI = use_begin(I), UE = use_end(I); UI != UE; ++UI) {
+ if (!UI->U)
+ continue; // Skip dead uses.
+ if (isa<IntrinsicInst>(*UI->U->getUser()))
+ continue;
+ if (UI->BeginOffset != I->BeginOffset || UI->EndOffset != I->EndOffset)
+ continue;
+
+ Type *UserTy = 0;
+ if (LoadInst *LI = dyn_cast<LoadInst>(UI->U->getUser())) {
+ UserTy = LI->getType();
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(UI->U->getUser())) {
+ UserTy = SI->getValueOperand()->getType();
+ } else {
+ return 0; // Bail if we have weird uses.
+ }
+
+ if (IntegerType *ITy = dyn_cast<IntegerType>(UserTy)) {
+ // If the type is larger than the partition, skip it. We only encounter
+ // this for split integer operations where we want to use the type of the
+ // entity causing the split.
+ if (ITy->getBitWidth() > (I->EndOffset - I->BeginOffset)*8)
+ continue;
+
+ // If we have found an integer type use covering the alloca, use that
+ // regardless of the other types, as integers are often used for a "bucket
+ // of bits" type.
+ return ITy;
+ }
+
+ if (Ty && Ty != UserTy)
+ return 0;
+
+ Ty = UserTy;
+ }
+ return Ty;
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+
+void AllocaPartitioning::print(raw_ostream &OS, const_iterator I,
+ StringRef Indent) const {
+ OS << Indent << "partition #" << (I - begin())
+ << " [" << I->BeginOffset << "," << I->EndOffset << ")"
+ << (I->IsSplittable ? " (splittable)" : "")
+ << (Uses[I - begin()].empty() ? " (zero uses)" : "")
+ << "\n";
+}
+
+void AllocaPartitioning::printUsers(raw_ostream &OS, const_iterator I,
+ StringRef Indent) const {
+ for (const_use_iterator UI = use_begin(I), UE = use_end(I);
+ UI != UE; ++UI) {
+ if (!UI->U)
+ continue; // Skip dead uses.
+ OS << Indent << " [" << UI->BeginOffset << "," << UI->EndOffset << ") "
+ << "used by: " << *UI->U->getUser() << "\n";
+ if (MemTransferInst *II = dyn_cast<MemTransferInst>(UI->U->getUser())) {
+ const MemTransferOffsets &MTO = MemTransferInstData.lookup(II);
+ bool IsDest;
+ if (!MTO.IsSplittable)
+ IsDest = UI->BeginOffset == MTO.DestBegin;
+ else
+ IsDest = MTO.DestBegin != 0u;
+ OS << Indent << " (original " << (IsDest ? "dest" : "source") << ": "
+ << "[" << (IsDest ? MTO.DestBegin : MTO.SourceBegin)
+ << "," << (IsDest ? MTO.DestEnd : MTO.SourceEnd) << ")\n";
+ }
+ }
+}
+
+void AllocaPartitioning::print(raw_ostream &OS) const {
+ if (PointerEscapingInstr) {
+ OS << "No partitioning for alloca: " << AI << "\n"
+ << " A pointer to this alloca escaped by:\n"
+ << " " << *PointerEscapingInstr << "\n";
+ return;
+ }
+
+ OS << "Partitioning of alloca: " << AI << "\n";
+ unsigned Num = 0;
+ for (const_iterator I = begin(), E = end(); I != E; ++I, ++Num) {
+ print(OS, I);
+ printUsers(OS, I);
+ }
+}
+
+void AllocaPartitioning::dump(const_iterator I) const { print(dbgs(), I); }
+void AllocaPartitioning::dump() const { print(dbgs()); }
+
+#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+
+
+namespace {
+/// \brief Implementation of LoadAndStorePromoter for promoting allocas.
+///
+/// This subclass of LoadAndStorePromoter adds overrides to handle promoting
+/// the loads and stores of an alloca instruction, as well as updating its
+/// debug information. This is used when a domtree is unavailable and thus
+/// mem2reg in its full form can't be used to handle promotion of allocas to
+/// scalar values.
+class AllocaPromoter : public LoadAndStorePromoter {
+ AllocaInst &AI;
+ DIBuilder &DIB;
+
+ SmallVector<DbgDeclareInst *, 4> DDIs;
+ SmallVector<DbgValueInst *, 4> DVIs;
+
+public:
+ AllocaPromoter(const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
+ AllocaInst &AI, DIBuilder &DIB)
+ : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {}
+
+ void run(const SmallVectorImpl<Instruction*> &Insts) {
+ // Remember which alloca we're promoting (for isInstInList).
+ if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) {
+ for (Value::use_iterator UI = DebugNode->use_begin(),
+ UE = DebugNode->use_end();
+ UI != UE; ++UI)
+ if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
+ DDIs.push_back(DDI);
+ else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
+ DVIs.push_back(DVI);
+ }
+
+ LoadAndStorePromoter::run(Insts);
+ AI.eraseFromParent();
+ while (!DDIs.empty())
+ DDIs.pop_back_val()->eraseFromParent();
+ while (!DVIs.empty())
+ DVIs.pop_back_val()->eraseFromParent();
+ }
+
+ virtual bool isInstInList(Instruction *I,
+ const SmallVectorImpl<Instruction*> &Insts) const {
+ if (LoadInst *LI = dyn_cast<LoadInst>(I))
+ return LI->getOperand(0) == &AI;
+ return cast<StoreInst>(I)->getPointerOperand() == &AI;
+ }
+
+ virtual void updateDebugInfo(Instruction *Inst) const {
+ for (SmallVector<DbgDeclareInst *, 4>::const_iterator I = DDIs.begin(),
+ E = DDIs.end(); I != E; ++I) {
+ DbgDeclareInst *DDI = *I;
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
+ ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
+ else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
+ ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
+ }
+ for (SmallVector<DbgValueInst *, 4>::const_iterator I = DVIs.begin(),
+ E = DVIs.end(); I != E; ++I) {
+ DbgValueInst *DVI = *I;
+ Value *Arg = NULL;
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ // If an argument is zero extended then use argument directly. The ZExt
+ // may be zapped by an optimization pass in future.
+ if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
+ Arg = dyn_cast<Argument>(ZExt->getOperand(0));
+ if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
+ Arg = dyn_cast<Argument>(SExt->getOperand(0));
+ if (!Arg)
+ Arg = SI->getOperand(0);
+ } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
+ Arg = LI->getOperand(0);
+ } else {
+ continue;
+ }
+ Instruction *DbgVal =
+ DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()),
+ Inst);
+ DbgVal->setDebugLoc(DVI->getDebugLoc());
+ }
+ }
+};
+} // end anon namespace
+
+
+namespace {
+/// \brief An optimization pass providing Scalar Replacement of Aggregates.
+///
+/// This pass takes allocations which can be completely analyzed (that is, they
+/// don't escape) and tries to turn them into scalar SSA values. There are
+/// a few steps to this process.
+///
+/// 1) It takes allocations of aggregates and analyzes the ways in which they
+/// are used to try to split them into smaller allocations, ideally of
+/// a single scalar data type. It will split up memcpy and memset accesses
+/// as necessary and try to isolate invidual scalar accesses.
+/// 2) It will transform accesses into forms which are suitable for SSA value
+/// promotion. This can be replacing a memset with a scalar store of an
+/// integer value, or it can involve speculating operations on a PHI or
+/// select to be a PHI or select of the results.
+/// 3) Finally, this will try to detect a pattern of accesses which map cleanly
+/// onto insert and extract operations on a vector value, and convert them to
+/// this form. By doing so, it will enable promotion of vector aggregates to
+/// SSA vector values.
+class SROA : public FunctionPass {
+ const bool RequiresDomTree;
+
+ LLVMContext *C;
+ const DataLayout *TD;
+ DominatorTree *DT;
+
+ /// \brief Worklist of alloca instructions to simplify.
+ ///
+ /// Each alloca in the function is added to this. Each new alloca formed gets
+ /// added to it as well to recursively simplify unless that alloca can be
+ /// directly promoted. Finally, each time we rewrite a use of an alloca other
+ /// the one being actively rewritten, we add it back onto the list if not
+ /// already present to ensure it is re-visited.
+ SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > Worklist;
+
+ /// \brief A collection of instructions to delete.
+ /// We try to batch deletions to simplify code and make things a bit more
+ /// efficient.
+ SetVector<Instruction *, SmallVector<Instruction *, 8> > DeadInsts;
+
+ /// \brief Post-promotion worklist.
+ ///
+ /// Sometimes we discover an alloca which has a high probability of becoming
+ /// viable for SROA after a round of promotion takes place. In those cases,
+ /// the alloca is enqueued here for re-processing.
+ ///
+ /// Note that we have to be very careful to clear allocas out of this list in
+ /// the event they are deleted.
+ SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > PostPromotionWorklist;
+
+ /// \brief A collection of alloca instructions we can directly promote.
+ std::vector<AllocaInst *> PromotableAllocas;
+
+public:
+ SROA(bool RequiresDomTree = true)
+ : FunctionPass(ID), RequiresDomTree(RequiresDomTree),
+ C(0), TD(0), DT(0) {
+ initializeSROAPass(*PassRegistry::getPassRegistry());
+ }
+ bool runOnFunction(Function &F);
+ void getAnalysisUsage(AnalysisUsage &AU) const;
+
+ const char *getPassName() const { return "SROA"; }
+ static char ID;
+
+private:
+ friend class PHIOrSelectSpeculator;
+ friend class AllocaPartitionRewriter;
+ friend class AllocaPartitionVectorRewriter;
+
+ bool rewriteAllocaPartition(AllocaInst &AI,
+ AllocaPartitioning &P,
+ AllocaPartitioning::iterator PI);
+ bool splitAlloca(AllocaInst &AI, AllocaPartitioning &P);
+ bool runOnAlloca(AllocaInst &AI);
+ void deleteDeadInstructions(SmallPtrSet<AllocaInst *, 4> &DeletedAllocas);
+ bool promoteAllocas(Function &F);
+};
+}
+
+char SROA::ID = 0;
+
+FunctionPass *llvm::createSROAPass(bool RequiresDomTree) {
+ return new SROA(RequiresDomTree);
+}
+
+INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates",
+ false, false)
+
+namespace {
+/// \brief Visitor to speculate PHIs and Selects where possible.
+class PHIOrSelectSpeculator : public InstVisitor<PHIOrSelectSpeculator> {
+ // Befriend the base class so it can delegate to private visit methods.
+ friend class llvm::InstVisitor<PHIOrSelectSpeculator>;
+
+ const DataLayout &TD;
+ AllocaPartitioning &P;
+ SROA &Pass;
+
+public:
+ PHIOrSelectSpeculator(const DataLayout &TD, AllocaPartitioning &P, SROA &Pass)
+ : TD(TD), P(P), Pass(Pass) {}
+
+ /// \brief Visit the users of an alloca partition and rewrite them.
+ void visitUsers(AllocaPartitioning::const_iterator PI) {
+ // Note that we need to use an index here as the underlying vector of uses
+ // may be grown during speculation. However, we never need to re-visit the
+ // new uses, and so we can use the initial size bound.
+ for (unsigned Idx = 0, Size = P.use_size(PI); Idx != Size; ++Idx) {
+ const AllocaPartitioning::PartitionUse &PU = P.getUse(PI, Idx);
+ if (!PU.U)
+ continue; // Skip dead use.
+
+ visit(cast<Instruction>(PU.U->getUser()));
+ }
+ }
+
+private:
+ // By default, skip this instruction.
+ void visitInstruction(Instruction &I) {}
+
+ /// PHI instructions that use an alloca and are subsequently loaded can be
+ /// rewritten to load both input pointers in the pred blocks and then PHI the
+ /// results, allowing the load of the alloca to be promoted.
+ /// From this:
+ /// %P2 = phi [i32* %Alloca, i32* %Other]
+ /// %V = load i32* %P2
+ /// to:
+ /// %V1 = load i32* %Alloca -> will be mem2reg'd
+ /// ...
+ /// %V2 = load i32* %Other
+ /// ...
+ /// %V = phi [i32 %V1, i32 %V2]
+ ///
+ /// We can do this to a select if its only uses are loads and if the operands
+ /// to the select can be loaded unconditionally.
+ ///
+ /// FIXME: This should be hoisted into a generic utility, likely in
+ /// Transforms/Util/Local.h
+ bool isSafePHIToSpeculate(PHINode &PN, SmallVectorImpl<LoadInst *> &Loads) {
+ // For now, we can only do this promotion if the load is in the same block
+ // as the PHI, and if there are no stores between the phi and load.
+ // TODO: Allow recursive phi users.
+ // TODO: Allow stores.
+ BasicBlock *BB = PN.getParent();
+ unsigned MaxAlign = 0;
+ for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end();
+ UI != UE; ++UI) {
+ LoadInst *LI = dyn_cast<LoadInst>(*UI);
+ if (LI == 0 || !LI->isSimple()) return false;
+
+ // For now we only allow loads in the same block as the PHI. This is
+ // a common case that happens when instcombine merges two loads through
+ // a PHI.
+ if (LI->getParent() != BB) return false;
+
+ // Ensure that there are no instructions between the PHI and the load that
+ // could store.
+ for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI)
+ if (BBI->mayWriteToMemory())
+ return false;
+
+ MaxAlign = std::max(MaxAlign, LI->getAlignment());
+ Loads.push_back(LI);
+ }
+
+ // We can only transform this if it is safe to push the loads into the
+ // predecessor blocks. The only thing to watch out for is that we can't put
+ // a possibly trapping load in the predecessor if it is a critical edge.
+ for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num;
+ ++Idx) {
+ TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator();
+ Value *InVal = PN.getIncomingValue(Idx);
+
+ // If the value is produced by the terminator of the predecessor (an
+ // invoke) or it has side-effects, there is no valid place to put a load
+ // in the predecessor.
+ if (TI == InVal || TI->mayHaveSideEffects())
+ return false;
+
+ // If the predecessor has a single successor, then the edge isn't
+ // critical.
+ if (TI->getNumSuccessors() == 1)
+ continue;
+
+ // If this pointer is always safe to load, or if we can prove that there
+ // is already a load in the block, then we can move the load to the pred
+ // block.
+ if (InVal->isDereferenceablePointer() ||
+ isSafeToLoadUnconditionally(InVal, TI, MaxAlign, &TD))
+ continue;
+
+ return false;
+ }
+
+ return true;
+ }
+
+ void visitPHINode(PHINode &PN) {
+ DEBUG(dbgs() << " original: " << PN << "\n");
+
+ SmallVector<LoadInst *, 4> Loads;
+ if (!isSafePHIToSpeculate(PN, Loads))
+ return;
+
+ assert(!Loads.empty());
+
+ Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
+ IRBuilder<> PHIBuilder(&PN);
+ PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
+ PN.getName() + ".sroa.speculated");
+
+ // Get the TBAA tag and alignment to use from one of the loads. It doesn't
+ // matter which one we get and if any differ, it doesn't matter.
+ LoadInst *SomeLoad = cast<LoadInst>(Loads.back());
+ MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
+ unsigned Align = SomeLoad->getAlignment();
+
+ // Rewrite all loads of the PN to use the new PHI.
+ do {
+ LoadInst *LI = Loads.pop_back_val();
+ LI->replaceAllUsesWith(NewPN);
+ Pass.DeadInsts.insert(LI);
+ } while (!Loads.empty());
+
+ // Inject loads into all of the pred blocks.
+ for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
+ BasicBlock *Pred = PN.getIncomingBlock(Idx);
+ TerminatorInst *TI = Pred->getTerminator();
+ Use *InUse = &PN.getOperandUse(PN.getOperandNumForIncomingValue(Idx));
+ Value *InVal = PN.getIncomingValue(Idx);
+ IRBuilder<> PredBuilder(TI);
+
+ LoadInst *Load
+ = PredBuilder.CreateLoad(InVal, (PN.getName() + ".sroa.speculate.load." +
+ Pred->getName()));
+ ++NumLoadsSpeculated;
+ Load->setAlignment(Align);
+ if (TBAATag)
+ Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+ NewPN->addIncoming(Load, Pred);
+
+ Instruction *Ptr = dyn_cast<Instruction>(InVal);
+ if (!Ptr)
+ // No uses to rewrite.
+ continue;
+
+ // Try to lookup and rewrite any partition uses corresponding to this phi
+ // input.
+ AllocaPartitioning::iterator PI
+ = P.findPartitionForPHIOrSelectOperand(InUse);
+ if (PI == P.end())
+ continue;
+
+ // Replace the Use in the PartitionUse for this operand with the Use
+ // inside the load.
+ AllocaPartitioning::use_iterator UI
+ = P.findPartitionUseForPHIOrSelectOperand(InUse);
+ assert(isa<PHINode>(*UI->U->getUser()));
+ UI->U = &Load->getOperandUse(Load->getPointerOperandIndex());
+ }
+ DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
+ }
+
+ /// Select instructions that use an alloca and are subsequently loaded can be
+ /// rewritten to load both input pointers and then select between the result,
+ /// allowing the load of the alloca to be promoted.
+ /// From this:
+ /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
+ /// %V = load i32* %P2
+ /// to:
+ /// %V1 = load i32* %Alloca -> will be mem2reg'd
+ /// %V2 = load i32* %Other
+ /// %V = select i1 %cond, i32 %V1, i32 %V2
+ ///
+ /// We can do this to a select if its only uses are loads and if the operand
+ /// to the select can be loaded unconditionally.
+ bool isSafeSelectToSpeculate(SelectInst &SI,
+ SmallVectorImpl<LoadInst *> &Loads) {
+ Value *TValue = SI.getTrueValue();
+ Value *FValue = SI.getFalseValue();
+ bool TDerefable = TValue->isDereferenceablePointer();
+ bool FDerefable = FValue->isDereferenceablePointer();
+
+ for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end();
+ UI != UE; ++UI) {
+ LoadInst *LI = dyn_cast<LoadInst>(*UI);
+ if (LI == 0 || !LI->isSimple()) return false;
+
+ // Both operands to the select need to be dereferencable, either
+ // absolutely (e.g. allocas) or at this point because we can see other
+ // accesses to it.
+ if (!TDerefable && !isSafeToLoadUnconditionally(TValue, LI,
+ LI->getAlignment(), &TD))
+ return false;
+ if (!FDerefable && !isSafeToLoadUnconditionally(FValue, LI,
+ LI->getAlignment(), &TD))
+ return false;
+ Loads.push_back(LI);
+ }
+
+ return true;
+ }
+
+ void visitSelectInst(SelectInst &SI) {
+ DEBUG(dbgs() << " original: " << SI << "\n");
+ IRBuilder<> IRB(&SI);
+
+ // If the select isn't safe to speculate, just use simple logic to emit it.
+ SmallVector<LoadInst *, 4> Loads;
+ if (!isSafeSelectToSpeculate(SI, Loads))
+ return;
+
+ Use *Ops[2] = { &SI.getOperandUse(1), &SI.getOperandUse(2) };
+ AllocaPartitioning::iterator PIs[2];
+ AllocaPartitioning::PartitionUse PUs[2];
+ for (unsigned i = 0, e = 2; i != e; ++i) {
+ PIs[i] = P.findPartitionForPHIOrSelectOperand(Ops[i]);
+ if (PIs[i] != P.end()) {
+ // If the pointer is within the partitioning, remove the select from
+ // its uses. We'll add in the new loads below.
+ AllocaPartitioning::use_iterator UI
+ = P.findPartitionUseForPHIOrSelectOperand(Ops[i]);
+ PUs[i] = *UI;
+ // Clear out the use here so that the offsets into the use list remain
+ // stable but this use is ignored when rewriting.
+ UI->U = 0;
+ }
+ }
+
+ Value *TV = SI.getTrueValue();
+ Value *FV = SI.getFalseValue();
+ // Replace the loads of the select with a select of two loads.
+ while (!Loads.empty()) {
+ LoadInst *LI = Loads.pop_back_val();
+
+ IRB.SetInsertPoint(LI);
+ LoadInst *TL =
+ IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true");
+ LoadInst *FL =
+ IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false");
+ NumLoadsSpeculated += 2;
+
+ // Transfer alignment and TBAA info if present.
+ TL->setAlignment(LI->getAlignment());
+ FL->setAlignment(LI->getAlignment());
+ if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) {
+ TL->setMetadata(LLVMContext::MD_tbaa, Tag);
+ FL->setMetadata(LLVMContext::MD_tbaa, Tag);
+ }
+
+ Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
+ LI->getName() + ".sroa.speculated");
+
+ LoadInst *Loads[2] = { TL, FL };
+ for (unsigned i = 0, e = 2; i != e; ++i) {
+ if (PIs[i] != P.end()) {
+ Use *LoadUse = &Loads[i]->getOperandUse(0);
+ assert(PUs[i].U->get() == LoadUse->get());
+ PUs[i].U = LoadUse;
+ P.use_push_back(PIs[i], PUs[i]);
+ }
+ }
+
+ DEBUG(dbgs() << " speculated to: " << *V << "\n");
+ LI->replaceAllUsesWith(V);
+ Pass.DeadInsts.insert(LI);
+ }
+ }
+};
+}
+
+/// \brief Accumulate the constant offsets in a GEP into a single APInt offset.
+///
+/// If the provided GEP is all-constant, the total byte offset formed by the
+/// GEP is computed and Offset is set to it. If the GEP has any non-constant
+/// operands, the function returns false and the value of Offset is unmodified.
+static bool accumulateGEPOffsets(const DataLayout &TD, GEPOperator &GEP,
+ APInt &Offset) {
+ APInt GEPOffset(Offset.getBitWidth(), 0);
+ for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
+ GTI != GTE; ++GTI) {
+ ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
+ if (!OpC)
+ return false;
+ if (OpC->isZero()) continue;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ unsigned ElementIdx = OpC->getZExtValue();
+ const StructLayout *SL = TD.getStructLayout(STy);
+ GEPOffset += APInt(Offset.getBitWidth(),
+ SL->getElementOffset(ElementIdx));
+ continue;
+ }
+
+ APInt TypeSize(Offset.getBitWidth(),
+ TD.getTypeAllocSize(GTI.getIndexedType()));
+ if (VectorType *VTy = dyn_cast<VectorType>(*GTI)) {
+ assert((VTy->getScalarSizeInBits() % 8) == 0 &&
+ "vector element size is not a multiple of 8, cannot GEP over it");
+ TypeSize = VTy->getScalarSizeInBits() / 8;
+ }
+
+ GEPOffset += OpC->getValue().sextOrTrunc(Offset.getBitWidth()) * TypeSize;
+ }
+ Offset = GEPOffset;
+ return true;
+}
+
+/// \brief Build a GEP out of a base pointer and indices.
+///
+/// This will return the BasePtr if that is valid, or build a new GEP
+/// instruction using the IRBuilder if GEP-ing is needed.
+static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr,
+ SmallVectorImpl<Value *> &Indices,
+ const Twine &Prefix) {
+ if (Indices.empty())
+ return BasePtr;
+
+ // A single zero index is a no-op, so check for this and avoid building a GEP
+ // in that case.
+ if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
+ return BasePtr;
+
+ return IRB.CreateInBoundsGEP(BasePtr, Indices, Prefix + ".idx");
+}
+
+/// \brief Get a natural GEP off of the BasePtr walking through Ty toward
+/// TargetTy without changing the offset of the pointer.
+///
+/// This routine assumes we've already established a properly offset GEP with
+/// Indices, and arrived at the Ty type. The goal is to continue to GEP with
+/// zero-indices down through type layers until we find one the same as
+/// TargetTy. If we can't find one with the same type, we at least try to use
+/// one with the same size. If none of that works, we just produce the GEP as
+/// indicated by Indices to have the correct offset.
+static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD,
+ Value *BasePtr, Type *Ty, Type *TargetTy,
+ SmallVectorImpl<Value *> &Indices,
+ const Twine &Prefix) {
+ if (Ty == TargetTy)
+ return buildGEP(IRB, BasePtr, Indices, Prefix);
+
+ // See if we can descend into a struct and locate a field with the correct
+ // type.
+ unsigned NumLayers = 0;
+ Type *ElementTy = Ty;
+ do {
+ if (ElementTy->isPointerTy())
+ break;
+ if (SequentialType *SeqTy = dyn_cast<SequentialType>(ElementTy)) {
+ ElementTy = SeqTy->getElementType();
+ // Note that we use the default address space as this index is over an
+ // array or a vector, not a pointer.
+ Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits(0), 0)));
+ } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
+ if (STy->element_begin() == STy->element_end())
+ break; // Nothing left to descend into.
+ ElementTy = *STy->element_begin();
+ Indices.push_back(IRB.getInt32(0));
+ } else {
+ break;
+ }
+ ++NumLayers;
+ } while (ElementTy != TargetTy);
+ if (ElementTy != TargetTy)
+ Indices.erase(Indices.end() - NumLayers, Indices.end());
+
+ return buildGEP(IRB, BasePtr, Indices, Prefix);
+}
+
+/// \brief Recursively compute indices for a natural GEP.
+///
+/// This is the recursive step for getNaturalGEPWithOffset that walks down the
+/// element types adding appropriate indices for the GEP.
+static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD,
+ Value *Ptr, Type *Ty, APInt &Offset,
+ Type *TargetTy,
+ SmallVectorImpl<Value *> &Indices,
+ const Twine &Prefix) {
+ if (Offset == 0)
+ return getNaturalGEPWithType(IRB, TD, Ptr, Ty, TargetTy, Indices, Prefix);
+
+ // We can't recurse through pointer types.
+ if (Ty->isPointerTy())
+ return 0;
+
+ // We try to analyze GEPs over vectors here, but note that these GEPs are
+ // extremely poorly defined currently. The long-term goal is to remove GEPing
+ // over a vector from the IR completely.
+ if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
+ unsigned ElementSizeInBits = VecTy->getScalarSizeInBits();
+ if (ElementSizeInBits % 8)
+ return 0; // GEPs over non-multiple of 8 size vector elements are invalid.
+ APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
+ APInt NumSkippedElements = Offset.sdiv(ElementSize);
+ if (NumSkippedElements.ugt(VecTy->getNumElements()))
+ return 0;
+ Offset -= NumSkippedElements * ElementSize;
+ Indices.push_back(IRB.getInt(NumSkippedElements));
+ return getNaturalGEPRecursively(IRB, TD, Ptr, VecTy->getElementType(),
+ Offset, TargetTy, Indices, Prefix);
+ }
+
+ if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
+ Type *ElementTy = ArrTy->getElementType();
+ APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy));
+ APInt NumSkippedElements = Offset.sdiv(ElementSize);
+ if (NumSkippedElements.ugt(ArrTy->getNumElements()))
+ return 0;
+
+ Offset -= NumSkippedElements * ElementSize;
+ Indices.push_back(IRB.getInt(NumSkippedElements));
+ return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
+ Indices, Prefix);
+ }
+
+ StructType *STy = dyn_cast<StructType>(Ty);
+ if (!STy)
+ return 0;
+
+ const StructLayout *SL = TD.getStructLayout(STy);
+ uint64_t StructOffset = Offset.getZExtValue();
+ if (StructOffset >= SL->getSizeInBytes())
+ return 0;
+ unsigned Index = SL->getElementContainingOffset(StructOffset);
+ Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
+ Type *ElementTy = STy->getElementType(Index);
+ if (Offset.uge(TD.getTypeAllocSize(ElementTy)))
+ return 0; // The offset points into alignment padding.
+
+ Indices.push_back(IRB.getInt32(Index));
+ return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
+ Indices, Prefix);
+}
+
+/// \brief Get a natural GEP from a base pointer to a particular offset and
+/// resulting in a particular type.
+///
+/// The goal is to produce a "natural" looking GEP that works with the existing
+/// composite types to arrive at the appropriate offset and element type for
+/// a pointer. TargetTy is the element type the returned GEP should point-to if
+/// possible. We recurse by decreasing Offset, adding the appropriate index to
+/// Indices, and setting Ty to the result subtype.
+///
+/// If no natural GEP can be constructed, this function returns null.
+static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const DataLayout &TD,
+ Value *Ptr, APInt Offset, Type *TargetTy,
+ SmallVectorImpl<Value *> &Indices,
+ const Twine &Prefix) {
+ PointerType *Ty = cast<PointerType>(Ptr->getType());
+
+ // Don't consider any GEPs through an i8* as natural unless the TargetTy is
+ // an i8.
+ if (Ty == IRB.getInt8PtrTy() && TargetTy->isIntegerTy(8))
+ return 0;
+
+ Type *ElementTy = Ty->getElementType();
+ if (!ElementTy->isSized())
+ return 0; // We can't GEP through an unsized element.
+ APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy));
+ if (ElementSize == 0)
+ return 0; // Zero-length arrays can't help us build a natural GEP.
+ APInt NumSkippedElements = Offset.sdiv(ElementSize);
+
+ Offset -= NumSkippedElements * ElementSize;
+ Indices.push_back(IRB.getInt(NumSkippedElements));
+ return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
+ Indices, Prefix);
+}
+
+/// \brief Compute an adjusted pointer from Ptr by Offset bytes where the
+/// resulting pointer has PointerTy.
+///
+/// This tries very hard to compute a "natural" GEP which arrives at the offset
+/// and produces the pointer type desired. Where it cannot, it will try to use
+/// the natural GEP to arrive at the offset and bitcast to the type. Where that
+/// fails, it will try to use an existing i8* and GEP to the byte offset and
+/// bitcast to the type.
+///
+/// The strategy for finding the more natural GEPs is to peel off layers of the
+/// pointer, walking back through bit casts and GEPs, searching for a base
+/// pointer from which we can compute a natural GEP with the desired
+/// properities. The algorithm tries to fold as many constant indices into
+/// a single GEP as possible, thus making each GEP more independent of the
+/// surrounding code.
+static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD,
+ Value *Ptr, APInt Offset, Type *PointerTy,
+ const Twine &Prefix) {
+ // Even though we don't look through PHI nodes, we could be called on an
+ // instruction in an unreachable block, which may be on a cycle.
+ SmallPtrSet<Value *, 4> Visited;
+ Visited.insert(Ptr);
+ SmallVector<Value *, 4> Indices;
+
+ // We may end up computing an offset pointer that has the wrong type. If we
+ // never are able to compute one directly that has the correct type, we'll
+ // fall back to it, so keep it around here.
+ Value *OffsetPtr = 0;
+
+ // Remember any i8 pointer we come across to re-use if we need to do a raw
+ // byte offset.
+ Value *Int8Ptr = 0;
+ APInt Int8PtrOffset(Offset.getBitWidth(), 0);
+
+ Type *TargetTy = PointerTy->getPointerElementType();
+
+ do {
+ // First fold any existing GEPs into the offset.
+ while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
+ APInt GEPOffset(Offset.getBitWidth(), 0);
+ if (!accumulateGEPOffsets(TD, *GEP, GEPOffset))
+ break;
+ Offset += GEPOffset;
+ Ptr = GEP->getPointerOperand();
+ if (!Visited.insert(Ptr))
+ break;
+ }
+
+ // See if we can perform a natural GEP here.
+ Indices.clear();
+ if (Value *P = getNaturalGEPWithOffset(IRB, TD, Ptr, Offset, TargetTy,
+ Indices, Prefix)) {
+ if (P->getType() == PointerTy) {
+ // Zap any offset pointer that we ended up computing in previous rounds.
+ if (OffsetPtr && OffsetPtr->use_empty())
+ if (Instruction *I = dyn_cast<Instruction>(OffsetPtr))
+ I->eraseFromParent();
+ return P;
+ }
+ if (!OffsetPtr) {
+ OffsetPtr = P;
+ }
+ }
+
+ // Stash this pointer if we've found an i8*.
+ if (Ptr->getType()->isIntegerTy(8)) {
+ Int8Ptr = Ptr;
+ Int8PtrOffset = Offset;
+ }
+
+ // Peel off a layer of the pointer and update the offset appropriately.
+ if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
+ Ptr = cast<Operator>(Ptr)->getOperand(0);
+ } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
+ if (GA->mayBeOverridden())
+ break;
+ Ptr = GA->getAliasee();
+ } else {
+ break;
+ }
+ assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!");
+ } while (Visited.insert(Ptr));
+
+ if (!OffsetPtr) {
+ if (!Int8Ptr) {
+ Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(),
+ Prefix + ".raw_cast");
+ Int8PtrOffset = Offset;
+ }
+
+ OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr :
+ IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset),
+ Prefix + ".raw_idx");
+ }
+ Ptr = OffsetPtr;
+
+ // On the off chance we were targeting i8*, guard the bitcast here.
+ if (Ptr->getType() != PointerTy)
+ Ptr = IRB.CreateBitCast(Ptr, PointerTy, Prefix + ".cast");
+
+ return Ptr;
+}
+
+/// \brief Test whether we can convert a value from the old to the new type.
+///
+/// This predicate should be used to guard calls to convertValue in order to
+/// ensure that we only try to convert viable values. The strategy is that we
+/// will peel off single element struct and array wrappings to get to an
+/// underlying value, and convert that value.
+static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) {
+ if (OldTy == NewTy)
+ return true;
+ if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy))
+ return false;
+ if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType())
+ return false;
+
+ if (NewTy->isPointerTy() || OldTy->isPointerTy()) {
+ if (NewTy->isPointerTy() && OldTy->isPointerTy())
+ return true;
+ if (NewTy->isIntegerTy() || OldTy->isIntegerTy())
+ return true;
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Generic routine to convert an SSA value to a value of a different
+/// type.
+///
+/// This will try various different casting techniques, such as bitcasts,
+/// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
+/// two types for viability with this routine.
+static Value *convertValue(const DataLayout &DL, IRBuilder<> &IRB, Value *V,
+ Type *Ty) {
+ assert(canConvertValue(DL, V->getType(), Ty) &&
+ "Value not convertable to type");
+ if (V->getType() == Ty)
+ return V;
+ if (V->getType()->isIntegerTy() && Ty->isPointerTy())
+ return IRB.CreateIntToPtr(V, Ty);
+ if (V->getType()->isPointerTy() && Ty->isIntegerTy())
+ return IRB.CreatePtrToInt(V, Ty);
+
+ return IRB.CreateBitCast(V, Ty);
+}
+
+/// \brief Test whether the given alloca partition can be promoted to a vector.
+///
+/// This is a quick test to check whether we can rewrite a particular alloca
+/// partition (and its newly formed alloca) into a vector alloca with only
+/// whole-vector loads and stores such that it could be promoted to a vector
+/// SSA value. We only can ensure this for a limited set of operations, and we
+/// don't want to do the rewrites unless we are confident that the result will
+/// be promotable, so we have an early test here.
+static bool isVectorPromotionViable(const DataLayout &TD,
+ Type *AllocaTy,
+ AllocaPartitioning &P,
+ uint64_t PartitionBeginOffset,
+ uint64_t PartitionEndOffset,
+ AllocaPartitioning::const_use_iterator I,
+ AllocaPartitioning::const_use_iterator E) {
+ VectorType *Ty = dyn_cast<VectorType>(AllocaTy);
+ if (!Ty)
+ return false;
+
+ uint64_t VecSize = TD.getTypeSizeInBits(Ty);
+ uint64_t ElementSize = Ty->getScalarSizeInBits();
+
+ // While the definition of LLVM vectors is bitpacked, we don't support sizes
+ // that aren't byte sized.
+ if (ElementSize % 8)
+ return false;
+ assert((VecSize % 8) == 0 && "vector size not a multiple of element size?");
+ VecSize /= 8;
+ ElementSize /= 8;
+
+ for (; I != E; ++I) {
+ if (!I->U)
+ continue; // Skip dead use.
+
+ uint64_t BeginOffset = I->BeginOffset - PartitionBeginOffset;
+ uint64_t BeginIndex = BeginOffset / ElementSize;
+ if (BeginIndex * ElementSize != BeginOffset ||
+ BeginIndex >= Ty->getNumElements())
+ return false;
+ uint64_t EndOffset = I->EndOffset - PartitionBeginOffset;
+ uint64_t EndIndex = EndOffset / ElementSize;
+ if (EndIndex * ElementSize != EndOffset ||
+ EndIndex > Ty->getNumElements())
+ return false;
+
+ // FIXME: We should build shuffle vector instructions to handle
+ // non-element-sized accesses.
+ if ((EndOffset - BeginOffset) != ElementSize &&
+ (EndOffset - BeginOffset) != VecSize)
+ return false;
+
+ if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I->U->getUser())) {
+ if (MI->isVolatile())
+ return false;
+ if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I->U->getUser())) {
+ const AllocaPartitioning::MemTransferOffsets &MTO
+ = P.getMemTransferOffsets(*MTI);
+ if (!MTO.IsSplittable)
+ return false;
+ }
+ } else if (I->U->get()->getType()->getPointerElementType()->isStructTy()) {
+ // Disable vector promotion when there are loads or stores of an FCA.
+ return false;
+ } else if (LoadInst *LI = dyn_cast<LoadInst>(I->U->getUser())) {
+ if (LI->isVolatile())
+ return false;
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(I->U->getUser())) {
+ if (SI->isVolatile())
+ return false;
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+
+/// \brief Test whether the given alloca partition's integer operations can be
+/// widened to promotable ones.
+///
+/// This is a quick test to check whether we can rewrite the integer loads and
+/// stores to a particular alloca into wider loads and stores and be able to
+/// promote the resulting alloca.
+static bool isIntegerWideningViable(const DataLayout &TD,
+ Type *AllocaTy,
+ uint64_t AllocBeginOffset,
+ AllocaPartitioning &P,
+ AllocaPartitioning::const_use_iterator I,
+ AllocaPartitioning::const_use_iterator E) {
+ uint64_t SizeInBits = TD.getTypeSizeInBits(AllocaTy);
+
+ // Don't try to handle allocas with bit-padding.
+ if (SizeInBits != TD.getTypeStoreSizeInBits(AllocaTy))
+ return false;
+
+ // We need to ensure that an integer type with the appropriate bitwidth can
+ // be converted to the alloca type, whatever that is. We don't want to force
+ // the alloca itself to have an integer type if there is a more suitable one.
+ Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits);
+ if (!canConvertValue(TD, AllocaTy, IntTy) ||
+ !canConvertValue(TD, IntTy, AllocaTy))
+ return false;
+
+ uint64_t Size = TD.getTypeStoreSize(AllocaTy);
+
+ // Check the uses to ensure the uses are (likely) promoteable integer uses.
+ // Also ensure that the alloca has a covering load or store. We don't want
+ // to widen the integer operotains only to fail to promote due to some other
+ // unsplittable entry (which we may make splittable later).
+ bool WholeAllocaOp = false;
+ for (; I != E; ++I) {
+ if (!I->U)
+ continue; // Skip dead use.
+
+ uint64_t RelBegin = I->BeginOffset - AllocBeginOffset;
+ uint64_t RelEnd = I->EndOffset - AllocBeginOffset;
+
+ // We can't reasonably handle cases where the load or store extends past
+ // the end of the aloca's type and into its padding.
+ if (RelEnd > Size)
+ return false;
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(I->U->getUser())) {
+ if (LI->isVolatile())
+ return false;
+ if (RelBegin == 0 && RelEnd == Size)
+ WholeAllocaOp = true;
+ if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
+ if (ITy->getBitWidth() < TD.getTypeStoreSize(ITy))
+ return false;
+ continue;
+ }
+ // Non-integer loads need to be convertible from the alloca type so that
+ // they are promotable.
+ if (RelBegin != 0 || RelEnd != Size ||
+ !canConvertValue(TD, AllocaTy, LI->getType()))
+ return false;
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(I->U->getUser())) {
+ Type *ValueTy = SI->getValueOperand()->getType();
+ if (SI->isVolatile())
+ return false;
+ if (RelBegin == 0 && RelEnd == Size)
+ WholeAllocaOp = true;
+ if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) {
+ if (ITy->getBitWidth() < TD.getTypeStoreSize(ITy))
+ return false;
+ continue;
+ }
+ // Non-integer stores need to be convertible to the alloca type so that
+ // they are promotable.
+ if (RelBegin != 0 || RelEnd != Size ||
+ !canConvertValue(TD, ValueTy, AllocaTy))
+ return false;
+ } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I->U->getUser())) {
+ if (MI->isVolatile())
+ return false;
+ if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I->U->getUser())) {
+ const AllocaPartitioning::MemTransferOffsets &MTO
+ = P.getMemTransferOffsets(*MTI);
+ if (!MTO.IsSplittable)
+ return false;
+ }
+ } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->U->getUser())) {
+ if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
+ II->getIntrinsicID() != Intrinsic::lifetime_end)
+ return false;
+ } else {
+ return false;
+ }
+ }
+ return WholeAllocaOp;
+}
+
+static Value *extractInteger(const DataLayout &DL, IRBuilder<> &IRB, Value *V,
+ IntegerType *Ty, uint64_t Offset,
+ const Twine &Name) {
+ DEBUG(dbgs() << " start: " << *V << "\n");
+ IntegerType *IntTy = cast<IntegerType>(V->getType());
+ assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
+ "Element extends past full value");
+ uint64_t ShAmt = 8*Offset;
+ if (DL.isBigEndian())
+ ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
+ if (ShAmt) {
+ V = IRB.CreateLShr(V, ShAmt, Name + ".shift");
+ DEBUG(dbgs() << " shifted: " << *V << "\n");
+ }
+ assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
+ "Cannot extract to a larger integer!");
+ if (Ty != IntTy) {
+ V = IRB.CreateTrunc(V, Ty, Name + ".trunc");
+ DEBUG(dbgs() << " trunced: " << *V << "\n");
+ }
+ return V;
+}
+
+static Value *insertInteger(const DataLayout &DL, IRBuilder<> &IRB, Value *Old,
+ Value *V, uint64_t Offset, const Twine &Name) {
+ IntegerType *IntTy = cast<IntegerType>(Old->getType());
+ IntegerType *Ty = cast<IntegerType>(V->getType());
+ assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
+ "Cannot insert a larger integer!");
+ DEBUG(dbgs() << " start: " << *V << "\n");
+ if (Ty != IntTy) {
+ V = IRB.CreateZExt(V, IntTy, Name + ".ext");
+ DEBUG(dbgs() << " extended: " << *V << "\n");
+ }
+ assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
+ "Element store outside of alloca store");
+ uint64_t ShAmt = 8*Offset;
+ if (DL.isBigEndian())
+ ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
+ if (ShAmt) {
+ V = IRB.CreateShl(V, ShAmt, Name + ".shift");
+ DEBUG(dbgs() << " shifted: " << *V << "\n");
+ }
+
+ if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) {
+ APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt);
+ Old = IRB.CreateAnd(Old, Mask, Name + ".mask");
+ DEBUG(dbgs() << " masked: " << *Old << "\n");
+ V = IRB.CreateOr(Old, V, Name + ".insert");
+ DEBUG(dbgs() << " inserted: " << *V << "\n");
+ }
+ return V;
+}
+
+namespace {
+/// \brief Visitor to rewrite instructions using a partition of an alloca to
+/// use a new alloca.
+///
+/// Also implements the rewriting to vector-based accesses when the partition
+/// passes the isVectorPromotionViable predicate. Most of the rewriting logic
+/// lives here.
+class AllocaPartitionRewriter : public InstVisitor<AllocaPartitionRewriter,
+ bool> {
+ // Befriend the base class so it can delegate to private visit methods.
+ friend class llvm::InstVisitor<AllocaPartitionRewriter, bool>;
+
+ const DataLayout &TD;
+ AllocaPartitioning &P;
+ SROA &Pass;
+ AllocaInst &OldAI, &NewAI;
+ const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
+ Type *NewAllocaTy;
+
+ // If we are rewriting an alloca partition which can be written as pure
+ // vector operations, we stash extra information here. When VecTy is
+ // non-null, we have some strict guarantees about the rewriten alloca:
+ // - The new alloca is exactly the size of the vector type here.
+ // - The accesses all either map to the entire vector or to a single
+ // element.
+ // - The set of accessing instructions is only one of those handled above
+ // in isVectorPromotionViable. Generally these are the same access kinds
+ // which are promotable via mem2reg.
+ VectorType *VecTy;
+ Type *ElementTy;
+ uint64_t ElementSize;
+
+ // This is a convenience and flag variable that will be null unless the new
+ // alloca's integer operations should be widened to this integer type due to
+ // passing isIntegerWideningViable above. If it is non-null, the desired
+ // integer type will be stored here for easy access during rewriting.
+ IntegerType *IntTy;
+
+ // The offset of the partition user currently being rewritten.
+ uint64_t BeginOffset, EndOffset;
+ Use *OldUse;
+ Instruction *OldPtr;
+
+ // The name prefix to use when rewriting instructions for this alloca.
+ std::string NamePrefix;
+
+public:
+ AllocaPartitionRewriter(const DataLayout &TD, AllocaPartitioning &P,
+ AllocaPartitioning::iterator PI,
+ SROA &Pass, AllocaInst &OldAI, AllocaInst &NewAI,
+ uint64_t NewBeginOffset, uint64_t NewEndOffset)
+ : TD(TD), P(P), Pass(Pass),
+ OldAI(OldAI), NewAI(NewAI),
+ NewAllocaBeginOffset(NewBeginOffset),
+ NewAllocaEndOffset(NewEndOffset),
+ NewAllocaTy(NewAI.getAllocatedType()),
+ VecTy(), ElementTy(), ElementSize(), IntTy(),
+ BeginOffset(), EndOffset() {
+ }
+
+ /// \brief Visit the users of the alloca partition and rewrite them.
+ bool visitUsers(AllocaPartitioning::const_use_iterator I,
+ AllocaPartitioning::const_use_iterator E) {
+ if (isVectorPromotionViable(TD, NewAI.getAllocatedType(), P,
+ NewAllocaBeginOffset, NewAllocaEndOffset,
+ I, E)) {
+ ++NumVectorized;
+ VecTy = cast<VectorType>(NewAI.getAllocatedType());
+ ElementTy = VecTy->getElementType();
+ assert((VecTy->getScalarSizeInBits() % 8) == 0 &&
+ "Only multiple-of-8 sized vector elements are viable");
+ ElementSize = VecTy->getScalarSizeInBits() / 8;
+ } else if (isIntegerWideningViable(TD, NewAI.getAllocatedType(),
+ NewAllocaBeginOffset, P, I, E)) {
+ IntTy = Type::getIntNTy(NewAI.getContext(),
+ TD.getTypeSizeInBits(NewAI.getAllocatedType()));
+ }
+ bool CanSROA = true;
+ for (; I != E; ++I) {
+ if (!I->U)
+ continue; // Skip dead uses.
+ BeginOffset = I->BeginOffset;
+ EndOffset = I->EndOffset;
+ OldUse = I->U;
+ OldPtr = cast<Instruction>(I->U->get());
+ NamePrefix = (Twine(NewAI.getName()) + "." + Twine(BeginOffset)).str();
+ CanSROA &= visit(cast<Instruction>(I->U->getUser()));
+ }
+ if (VecTy) {
+ assert(CanSROA);
+ VecTy = 0;
+ ElementTy = 0;
+ ElementSize = 0;
+ }
+ if (IntTy) {
+ assert(CanSROA);
+ IntTy = 0;
+ }
+ return CanSROA;
+ }
+
+private:
+ // Every instruction which can end up as a user must have a rewrite rule.
+ bool visitInstruction(Instruction &I) {
+ DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n");
+ llvm_unreachable("No rewrite rule for this instruction!");
+ }
+
+ Twine getName(const Twine &Suffix) {
+ return NamePrefix + Suffix;
+ }
+
+ Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) {
+ assert(BeginOffset >= NewAllocaBeginOffset);
+ APInt Offset(TD.getPointerSizeInBits(), BeginOffset - NewAllocaBeginOffset);
+ return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName(""));
+ }
+
+ /// \brief Compute suitable alignment to access an offset into the new alloca.
+ unsigned getOffsetAlign(uint64_t Offset) {
+ unsigned NewAIAlign = NewAI.getAlignment();
+ if (!NewAIAlign)
+ NewAIAlign = TD.getABITypeAlignment(NewAI.getAllocatedType());
+ return MinAlign(NewAIAlign, Offset);
+ }
+
+ /// \brief Compute suitable alignment to access this partition of the new
+ /// alloca.
+ unsigned getPartitionAlign() {
+ return getOffsetAlign(BeginOffset - NewAllocaBeginOffset);
+ }
+
+ /// \brief Compute suitable alignment to access a type at an offset of the
+ /// new alloca.
+ ///
+ /// \returns zero if the type's ABI alignment is a suitable alignment,
+ /// otherwise returns the maximal suitable alignment.
+ unsigned getOffsetTypeAlign(Type *Ty, uint64_t Offset) {
+ unsigned Align = getOffsetAlign(Offset);
+ return Align == TD.getABITypeAlignment(Ty) ? 0 : Align;
+ }
+
+ /// \brief Compute suitable alignment to access a type at the beginning of
+ /// this partition of the new alloca.
+ ///
+ /// See \c getOffsetTypeAlign for details; this routine delegates to it.
+ unsigned getPartitionTypeAlign(Type *Ty) {
+ return getOffsetTypeAlign(Ty, BeginOffset - NewAllocaBeginOffset);
+ }
+
+ ConstantInt *getIndex(IRBuilder<> &IRB, uint64_t Offset) {
+ assert(VecTy && "Can only call getIndex when rewriting a vector");
+ uint64_t RelOffset = Offset - NewAllocaBeginOffset;
+ assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds");
+ uint32_t Index = RelOffset / ElementSize;
+ assert(Index * ElementSize == RelOffset);
+ return IRB.getInt32(Index);
+ }
+
+ void deleteIfTriviallyDead(Value *V) {
+ Instruction *I = cast<Instruction>(V);
+ if (isInstructionTriviallyDead(I))
+ Pass.DeadInsts.insert(I);
+ }
+
+ Value *rewriteVectorizedLoadInst(IRBuilder<> &IRB, LoadInst &LI, Value *OldOp) {
+ Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ getName(".load"));
+ if (LI.getType() == VecTy->getElementType() ||
+ BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) {
+ V = IRB.CreateExtractElement(V, getIndex(IRB, BeginOffset),
+ getName(".extract"));
+ }
+ return V;
+ }
+
+ Value *rewriteIntegerLoad(IRBuilder<> &IRB, LoadInst &LI) {
+ assert(IntTy && "We cannot insert an integer to the alloca");
+ assert(!LI.isVolatile());
+ Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ getName(".load"));
+ V = convertValue(TD, IRB, V, IntTy);
+ assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
+ uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
+ if (Offset > 0 || EndOffset < NewAllocaEndOffset)
+ V = extractInteger(TD, IRB, V, cast<IntegerType>(LI.getType()), Offset,
+ getName(".extract"));
+ return V;
+ }
+
+ bool visitLoadInst(LoadInst &LI) {
+ DEBUG(dbgs() << " original: " << LI << "\n");
+ Value *OldOp = LI.getOperand(0);
+ assert(OldOp == OldPtr);
+ IRBuilder<> IRB(&LI);
+
+ uint64_t Size = EndOffset - BeginOffset;
+ bool IsSplitIntLoad = Size < TD.getTypeStoreSize(LI.getType());
+
+ // If this memory access can be shown to *statically* extend outside the
+ // bounds of the original allocation it's behavior is undefined. Rather
+ // than trying to transform it, just replace it with undef.
+ // FIXME: We should do something more clever for functions being
+ // instrumented by asan.
+ // FIXME: Eventually, once ASan and friends can flush out bugs here, this
+ // should be transformed to a load of null making it unreachable.
+ uint64_t OldAllocSize = TD.getTypeAllocSize(OldAI.getAllocatedType());
+ if (TD.getTypeStoreSize(LI.getType()) > OldAllocSize) {
+ LI.replaceAllUsesWith(UndefValue::get(LI.getType()));
+ Pass.DeadInsts.insert(&LI);
+ deleteIfTriviallyDead(OldOp);
+ DEBUG(dbgs() << " to: undef!!\n");
+ return true;
+ }
+
+ Type *TargetTy = IsSplitIntLoad ? Type::getIntNTy(LI.getContext(), Size * 8)
+ : LI.getType();
+ bool IsPtrAdjusted = false;
+ Value *V;
+ if (VecTy) {
+ V = rewriteVectorizedLoadInst(IRB, LI, OldOp);
+ } else if (IntTy && LI.getType()->isIntegerTy()) {
+ V = rewriteIntegerLoad(IRB, LI);
+ } else if (BeginOffset == NewAllocaBeginOffset &&
+ canConvertValue(TD, NewAllocaTy, LI.getType())) {
+ V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ LI.isVolatile(), getName(".load"));
+ } else {
+ Type *LTy = TargetTy->getPointerTo();
+ V = IRB.CreateAlignedLoad(getAdjustedAllocaPtr(IRB, LTy),
+ getPartitionTypeAlign(TargetTy),
+ LI.isVolatile(), getName(".load"));
+ IsPtrAdjusted = true;
+ }
+ V = convertValue(TD, IRB, V, TargetTy);
+
+ if (IsSplitIntLoad) {
+ assert(!LI.isVolatile());
+ assert(LI.getType()->isIntegerTy() &&
+ "Only integer type loads and stores are split");
+ assert(LI.getType()->getIntegerBitWidth() ==
+ TD.getTypeStoreSizeInBits(LI.getType()) &&
+ "Non-byte-multiple bit width");
+ assert(LI.getType()->getIntegerBitWidth() ==
+ TD.getTypeAllocSizeInBits(OldAI.getAllocatedType()) &&
+ "Only alloca-wide loads can be split and recomposed");
+ // Move the insertion point just past the load so that we can refer to it.
+ IRB.SetInsertPoint(llvm::next(BasicBlock::iterator(&LI)));
+ // Create a placeholder value with the same type as LI to use as the
+ // basis for the new value. This allows us to replace the uses of LI with
+ // the computed value, and then replace the placeholder with LI, leaving
+ // LI only used for this computation.
+ Value *Placeholder
+ = new LoadInst(UndefValue::get(LI.getType()->getPointerTo()));
+ V = insertInteger(TD, IRB, Placeholder, V, BeginOffset,
+ getName(".insert"));
+ LI.replaceAllUsesWith(V);
+ Placeholder->replaceAllUsesWith(&LI);
+ delete Placeholder;
+ } else {
+ LI.replaceAllUsesWith(V);
+ }
+
+ Pass.DeadInsts.insert(&LI);
+ deleteIfTriviallyDead(OldOp);
+ DEBUG(dbgs() << " to: " << *V << "\n");
+ return !LI.isVolatile() && !IsPtrAdjusted;
+ }
+
+ bool rewriteVectorizedStoreInst(IRBuilder<> &IRB, Value *V,
+ StoreInst &SI, Value *OldOp) {
+ if (V->getType() == ElementTy ||
+ BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) {
+ if (V->getType() != ElementTy)
+ V = convertValue(TD, IRB, V, ElementTy);
+ LoadInst *LI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ getName(".load"));
+ V = IRB.CreateInsertElement(LI, V, getIndex(IRB, BeginOffset),
+ getName(".insert"));
+ } else if (V->getType() != VecTy) {
+ V = convertValue(TD, IRB, V, VecTy);
+ }
+ StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
+ Pass.DeadInsts.insert(&SI);
+
+ (void)Store;
+ DEBUG(dbgs() << " to: " << *Store << "\n");
+ return true;
+ }
+
+ bool rewriteIntegerStore(IRBuilder<> &IRB, Value *V, StoreInst &SI) {
+ assert(IntTy && "We cannot extract an integer from the alloca");
+ assert(!SI.isVolatile());
+ if (TD.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
+ Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ getName(".oldload"));
+ Old = convertValue(TD, IRB, Old, IntTy);
+ assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
+ uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
+ V = insertInteger(TD, IRB, Old, SI.getValueOperand(), Offset,
+ getName(".insert"));
+ }
+ V = convertValue(TD, IRB, V, NewAllocaTy);
+ StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
+ Pass.DeadInsts.insert(&SI);
+ (void)Store;
+ DEBUG(dbgs() << " to: " << *Store << "\n");
+ return true;
+ }
+
+ bool visitStoreInst(StoreInst &SI) {
+ DEBUG(dbgs() << " original: " << SI << "\n");
+ Value *OldOp = SI.getOperand(1);
+ assert(OldOp == OldPtr);
+ IRBuilder<> IRB(&SI);
+
+ Value *V = SI.getValueOperand();
+
+ // Strip all inbounds GEPs and pointer casts to try to dig out any root
+ // alloca that should be re-examined after promoting this alloca.
+ if (V->getType()->isPointerTy())
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets()))
+ Pass.PostPromotionWorklist.insert(AI);
+
+ uint64_t Size = EndOffset - BeginOffset;
+ if (Size < TD.getTypeStoreSize(V->getType())) {
+ assert(!SI.isVolatile());
+ assert(V->getType()->isIntegerTy() &&
+ "Only integer type loads and stores are split");
+ assert(V->getType()->getIntegerBitWidth() ==
+ TD.getTypeStoreSizeInBits(V->getType()) &&
+ "Non-byte-multiple bit width");
+ assert(V->getType()->getIntegerBitWidth() ==
+ TD.getTypeSizeInBits(OldAI.getAllocatedType()) &&
+ "Only alloca-wide stores can be split and recomposed");
+ IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), Size * 8);
+ V = extractInteger(TD, IRB, V, NarrowTy, BeginOffset,
+ getName(".extract"));
+ }
+
+ if (VecTy)
+ return rewriteVectorizedStoreInst(IRB, V, SI, OldOp);
+ if (IntTy && V->getType()->isIntegerTy())
+ return rewriteIntegerStore(IRB, V, SI);
+
+ StoreInst *NewSI;
+ if (BeginOffset == NewAllocaBeginOffset &&
+ canConvertValue(TD, V->getType(), NewAllocaTy)) {
+ V = convertValue(TD, IRB, V, NewAllocaTy);
+ NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
+ SI.isVolatile());
+ } else {
+ Value *NewPtr = getAdjustedAllocaPtr(IRB, V->getType()->getPointerTo());
+ NewSI = IRB.CreateAlignedStore(V, NewPtr,
+ getPartitionTypeAlign(V->getType()),
+ SI.isVolatile());
+ }
+ (void)NewSI;
+ Pass.DeadInsts.insert(&SI);
+ deleteIfTriviallyDead(OldOp);
+
+ DEBUG(dbgs() << " to: " << *NewSI << "\n");
+ return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile();
+ }
+
+ bool visitMemSetInst(MemSetInst &II) {
+ DEBUG(dbgs() << " original: " << II << "\n");
+ IRBuilder<> IRB(&II);
+ assert(II.getRawDest() == OldPtr);
+
+ // If the memset has a variable size, it cannot be split, just adjust the
+ // pointer to the new alloca.
+ if (!isa<Constant>(II.getLength())) {
+ II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType()));
+ Type *CstTy = II.getAlignmentCst()->getType();
+ II.setAlignment(ConstantInt::get(CstTy, getPartitionAlign()));
+
+ deleteIfTriviallyDead(OldPtr);
+ return false;
+ }
+
+ // Record this instruction for deletion.
+ Pass.DeadInsts.insert(&II);
+
+ Type *AllocaTy = NewAI.getAllocatedType();
+ Type *ScalarTy = AllocaTy->getScalarType();
+
+ // If this doesn't map cleanly onto the alloca type, and that type isn't
+ // a single value type, just emit a memset.
+ if (!VecTy && !IntTy &&
+ (BeginOffset != NewAllocaBeginOffset ||
+ EndOffset != NewAllocaEndOffset ||
+ !AllocaTy->isSingleValueType() ||
+ !TD.isLegalInteger(TD.getTypeSizeInBits(ScalarTy)))) {
+ Type *SizeTy = II.getLength()->getType();
+ Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset);
+ CallInst *New
+ = IRB.CreateMemSet(getAdjustedAllocaPtr(IRB,
+ II.getRawDest()->getType()),
+ II.getValue(), Size, getPartitionAlign(),
+ II.isVolatile());
+ (void)New;
+ DEBUG(dbgs() << " to: " << *New << "\n");
+ return false;
+ }
+
+ // If we can represent this as a simple value, we have to build the actual
+ // value to store, which requires expanding the byte present in memset to
+ // a sensible representation for the alloca type. This is essentially
+ // splatting the byte to a sufficiently wide integer, bitcasting to the
+ // desired scalar type, and splatting it across any desired vector type.
+ uint64_t Size = EndOffset - BeginOffset;
+ Value *V = II.getValue();
+ IntegerType *VTy = cast<IntegerType>(V->getType());
+ Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size*8);
+ if (Size*8 > VTy->getBitWidth())
+ V = IRB.CreateMul(IRB.CreateZExt(V, SplatIntTy, getName(".zext")),
+ ConstantExpr::getUDiv(
+ Constant::getAllOnesValue(SplatIntTy),
+ ConstantExpr::getZExt(
+ Constant::getAllOnesValue(V->getType()),
+ SplatIntTy)),
+ getName(".isplat"));
+
+ // If this is an element-wide memset of a vectorizable alloca, insert it.
+ if (VecTy && (BeginOffset > NewAllocaBeginOffset ||
+ EndOffset < NewAllocaEndOffset)) {
+ if (V->getType() != ScalarTy)
+ V = convertValue(TD, IRB, V, ScalarTy);
+ StoreInst *Store = IRB.CreateAlignedStore(
+ IRB.CreateInsertElement(IRB.CreateAlignedLoad(&NewAI,
+ NewAI.getAlignment(),
+ getName(".load")),
+ V, getIndex(IRB, BeginOffset),
+ getName(".insert")),
+ &NewAI, NewAI.getAlignment());
+ (void)Store;
+ DEBUG(dbgs() << " to: " << *Store << "\n");
+ return true;
+ }
+
+ // If this is a memset on an alloca where we can widen stores, insert the
+ // set integer.
+ if (IntTy && (BeginOffset > NewAllocaBeginOffset ||
+ EndOffset < NewAllocaEndOffset)) {
+ assert(!II.isVolatile());
+ Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ getName(".oldload"));
+ Old = convertValue(TD, IRB, Old, IntTy);
+ assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
+ uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
+ V = insertInteger(TD, IRB, Old, V, Offset, getName(".insert"));
+ }
+
+ if (V->getType() != AllocaTy)
+ V = convertValue(TD, IRB, V, AllocaTy);
+
+ Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
+ II.isVolatile());
+ (void)New;
+ DEBUG(dbgs() << " to: " << *New << "\n");
+ return !II.isVolatile();
+ }
+
+ bool visitMemTransferInst(MemTransferInst &II) {
+ // Rewriting of memory transfer instructions can be a bit tricky. We break
+ // them into two categories: split intrinsics and unsplit intrinsics.
+
+ DEBUG(dbgs() << " original: " << II << "\n");
+ IRBuilder<> IRB(&II);
+
+ assert(II.getRawSource() == OldPtr || II.getRawDest() == OldPtr);
+ bool IsDest = II.getRawDest() == OldPtr;
+
+ const AllocaPartitioning::MemTransferOffsets &MTO
+ = P.getMemTransferOffsets(II);
+
+ // Compute the relative offset within the transfer.
+ unsigned IntPtrWidth = TD.getPointerSizeInBits();
+ APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin
+ : MTO.SourceBegin));
+
+ unsigned Align = II.getAlignment();
+ if (Align > 1)
+ Align = MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(),
+ MinAlign(II.getAlignment(), getPartitionAlign()));
+
+ // For unsplit intrinsics, we simply modify the source and destination
+ // pointers in place. This isn't just an optimization, it is a matter of
+ // correctness. With unsplit intrinsics we may be dealing with transfers
+ // within a single alloca before SROA ran, or with transfers that have
+ // a variable length. We may also be dealing with memmove instead of
+ // memcpy, and so simply updating the pointers is the necessary for us to
+ // update both source and dest of a single call.
+ if (!MTO.IsSplittable) {
+ Value *OldOp = IsDest ? II.getRawDest() : II.getRawSource();
+ if (IsDest)
+ II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType()));
+ else
+ II.setSource(getAdjustedAllocaPtr(IRB, II.getRawSource()->getType()));
+
+ Type *CstTy = II.getAlignmentCst()->getType();
+ II.setAlignment(ConstantInt::get(CstTy, Align));
+
+ DEBUG(dbgs() << " to: " << II << "\n");
+ deleteIfTriviallyDead(OldOp);
+ return false;
+ }
+ // For split transfer intrinsics we have an incredibly useful assurance:
+ // the source and destination do not reside within the same alloca, and at
+ // least one of them does not escape. This means that we can replace
+ // memmove with memcpy, and we don't need to worry about all manner of
+ // downsides to splitting and transforming the operations.
+
+ // If this doesn't map cleanly onto the alloca type, and that type isn't
+ // a single value type, just emit a memcpy.
+ bool EmitMemCpy
+ = !VecTy && !IntTy && (BeginOffset != NewAllocaBeginOffset ||
+ EndOffset != NewAllocaEndOffset ||
+ !NewAI.getAllocatedType()->isSingleValueType());
+
+ // If we're just going to emit a memcpy, the alloca hasn't changed, and the
+ // size hasn't been shrunk based on analysis of the viable range, this is
+ // a no-op.
+ if (EmitMemCpy && &OldAI == &NewAI) {
+ uint64_t OrigBegin = IsDest ? MTO.DestBegin : MTO.SourceBegin;
+ uint64_t OrigEnd = IsDest ? MTO.DestEnd : MTO.SourceEnd;
+ // Ensure the start lines up.
+ assert(BeginOffset == OrigBegin);
+ (void)OrigBegin;
+
+ // Rewrite the size as needed.
+ if (EndOffset != OrigEnd)
+ II.setLength(ConstantInt::get(II.getLength()->getType(),
+ EndOffset - BeginOffset));
+ return false;
+ }
+ // Record this instruction for deletion.
+ Pass.DeadInsts.insert(&II);
+
+ bool IsWholeAlloca = BeginOffset == NewAllocaBeginOffset &&
+ EndOffset == NewAllocaEndOffset;
+ bool IsVectorElement = VecTy && !IsWholeAlloca;
+ uint64_t Size = EndOffset - BeginOffset;
+ IntegerType *SubIntTy
+ = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : 0;
+
+ Type *OtherPtrTy = IsDest ? II.getRawSource()->getType()
+ : II.getRawDest()->getType();
+ if (!EmitMemCpy) {
+ if (IsVectorElement)
+ OtherPtrTy = VecTy->getElementType()->getPointerTo();
+ else if (IntTy && !IsWholeAlloca)
+ OtherPtrTy = SubIntTy->getPointerTo();
+ else
+ OtherPtrTy = NewAI.getType();
+ }
+
+ // Compute the other pointer, folding as much as possible to produce
+ // a single, simple GEP in most cases.
+ Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
+ OtherPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy,
+ getName("." + OtherPtr->getName()));
+
+ // Strip all inbounds GEPs and pointer casts to try to dig out any root
+ // alloca that should be re-examined after rewriting this instruction.
+ if (AllocaInst *AI
+ = dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets()))
+ Pass.Worklist.insert(AI);
+
+ if (EmitMemCpy) {
+ Value *OurPtr
+ = getAdjustedAllocaPtr(IRB, IsDest ? II.getRawDest()->getType()
+ : II.getRawSource()->getType());
+ Type *SizeTy = II.getLength()->getType();
+ Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset);
+
+ CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr,
+ IsDest ? OtherPtr : OurPtr,
+ Size, Align, II.isVolatile());
+ (void)New;
+ DEBUG(dbgs() << " to: " << *New << "\n");
+ return false;
+ }
+
+ // Note that we clamp the alignment to 1 here as a 0 alignment for a memcpy
+ // is equivalent to 1, but that isn't true if we end up rewriting this as
+ // a load or store.
+ if (!Align)
+ Align = 1;
+
+ Value *SrcPtr = OtherPtr;
+ Value *DstPtr = &NewAI;
+ if (!IsDest)
+ std::swap(SrcPtr, DstPtr);
+
+ Value *Src;
+ if (IsVectorElement && !IsDest) {
+ // We have to extract rather than load.
+ Src = IRB.CreateExtractElement(
+ IRB.CreateAlignedLoad(SrcPtr, Align, getName(".copyload")),
+ getIndex(IRB, BeginOffset),
+ getName(".copyextract"));
+ } else if (IntTy && !IsWholeAlloca && !IsDest) {
+ Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ getName(".load"));
+ Src = convertValue(TD, IRB, Src, IntTy);
+ assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
+ uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
+ Src = extractInteger(TD, IRB, Src, SubIntTy, Offset, getName(".extract"));
+ } else {
+ Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(),
+ getName(".copyload"));
+ }
+
+ if (IntTy && !IsWholeAlloca && IsDest) {
+ Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ getName(".oldload"));
+ Old = convertValue(TD, IRB, Old, IntTy);
+ assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
+ uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
+ Src = insertInteger(TD, IRB, Old, Src, Offset, getName(".insert"));
+ Src = convertValue(TD, IRB, Src, NewAllocaTy);
+ }
+
+ if (IsVectorElement && IsDest) {
+ // We have to insert into a loaded copy before storing.
+ Src = IRB.CreateInsertElement(
+ IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), getName(".load")),
+ Src, getIndex(IRB, BeginOffset),
+ getName(".insert"));
+ }
+
+ StoreInst *Store = cast<StoreInst>(
+ IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile()));
+ (void)Store;
+ DEBUG(dbgs() << " to: " << *Store << "\n");
+ return !II.isVolatile();
+ }
+
+ bool visitIntrinsicInst(IntrinsicInst &II) {
+ assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
+ II.getIntrinsicID() == Intrinsic::lifetime_end);
+ DEBUG(dbgs() << " original: " << II << "\n");
+ IRBuilder<> IRB(&II);
+ assert(II.getArgOperand(1) == OldPtr);
+
+ // Record this instruction for deletion.
+ Pass.DeadInsts.insert(&II);
+
+ ConstantInt *Size
+ = ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
+ EndOffset - BeginOffset);
+ Value *Ptr = getAdjustedAllocaPtr(IRB, II.getArgOperand(1)->getType());
+ Value *New;
+ if (II.getIntrinsicID() == Intrinsic::lifetime_start)
+ New = IRB.CreateLifetimeStart(Ptr, Size);
+ else
+ New = IRB.CreateLifetimeEnd(Ptr, Size);
+
+ DEBUG(dbgs() << " to: " << *New << "\n");
+ return true;
+ }
+
+ bool visitPHINode(PHINode &PN) {
+ DEBUG(dbgs() << " original: " << PN << "\n");
+
+ // We would like to compute a new pointer in only one place, but have it be
+ // as local as possible to the PHI. To do that, we re-use the location of
+ // the old pointer, which necessarily must be in the right position to
+ // dominate the PHI.
+ IRBuilder<> PtrBuilder(cast<Instruction>(OldPtr));
+
+ Value *NewPtr = getAdjustedAllocaPtr(PtrBuilder, OldPtr->getType());
+ // Replace the operands which were using the old pointer.
+ std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr);
+
+ DEBUG(dbgs() << " to: " << PN << "\n");
+ deleteIfTriviallyDead(OldPtr);
+ return false;
+ }
+
+ bool visitSelectInst(SelectInst &SI) {
+ DEBUG(dbgs() << " original: " << SI << "\n");
+ IRBuilder<> IRB(&SI);
+
+ // Find the operand we need to rewrite here.
+ bool IsTrueVal = SI.getTrueValue() == OldPtr;
+ if (IsTrueVal)
+ assert(SI.getFalseValue() != OldPtr && "Pointer is both operands!");
+ else
+ assert(SI.getFalseValue() == OldPtr && "Pointer isn't an operand!");
+
+ Value *NewPtr = getAdjustedAllocaPtr(IRB, OldPtr->getType());
+ SI.setOperand(IsTrueVal ? 1 : 2, NewPtr);
+ DEBUG(dbgs() << " to: " << SI << "\n");
+ deleteIfTriviallyDead(OldPtr);
+ return false;
+ }
+
+};
+}
+
+namespace {
+/// \brief Visitor to rewrite aggregate loads and stores as scalar.
+///
+/// This pass aggressively rewrites all aggregate loads and stores on
+/// a particular pointer (or any pointer derived from it which we can identify)
+/// with scalar loads and stores.
+class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
+ // Befriend the base class so it can delegate to private visit methods.
+ friend class llvm::InstVisitor<AggLoadStoreRewriter, bool>;
+
+ const DataLayout &TD;
+
+ /// Queue of pointer uses to analyze and potentially rewrite.
+ SmallVector<Use *, 8> Queue;
+
+ /// Set to prevent us from cycling with phi nodes and loops.
+ SmallPtrSet<User *, 8> Visited;
+
+ /// The current pointer use being rewritten. This is used to dig up the used
+ /// value (as opposed to the user).
+ Use *U;
+
+public:
+ AggLoadStoreRewriter(const DataLayout &TD) : TD(TD) {}
+
+ /// Rewrite loads and stores through a pointer and all pointers derived from
+ /// it.
+ bool rewrite(Instruction &I) {
+ DEBUG(dbgs() << " Rewriting FCA loads and stores...\n");
+ enqueueUsers(I);
+ bool Changed = false;
+ while (!Queue.empty()) {
+ U = Queue.pop_back_val();
+ Changed |= visit(cast<Instruction>(U->getUser()));
+ }
+ return Changed;
+ }
+
+private:
+ /// Enqueue all the users of the given instruction for further processing.
+ /// This uses a set to de-duplicate users.
+ void enqueueUsers(Instruction &I) {
+ for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
+ ++UI)
+ if (Visited.insert(*UI))
+ Queue.push_back(&UI.getUse());
+ }
+
+ // Conservative default is to not rewrite anything.
+ bool visitInstruction(Instruction &I) { return false; }
+
+ /// \brief Generic recursive split emission class.
+ template <typename Derived>
+ class OpSplitter {
+ protected:
+ /// The builder used to form new instructions.
+ IRBuilder<> IRB;
+ /// The indices which to be used with insert- or extractvalue to select the
+ /// appropriate value within the aggregate.
+ SmallVector<unsigned, 4> Indices;
+ /// The indices to a GEP instruction which will move Ptr to the correct slot
+ /// within the aggregate.
+ SmallVector<Value *, 4> GEPIndices;
+ /// The base pointer of the original op, used as a base for GEPing the
+ /// split operations.
+ Value *Ptr;
+
+ /// Initialize the splitter with an insertion point, Ptr and start with a
+ /// single zero GEP index.
+ OpSplitter(Instruction *InsertionPoint, Value *Ptr)
+ : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {}
+
+ public:
+ /// \brief Generic recursive split emission routine.
+ ///
+ /// This method recursively splits an aggregate op (load or store) into
+ /// scalar or vector ops. It splits recursively until it hits a single value
+ /// and emits that single value operation via the template argument.
+ ///
+ /// The logic of this routine relies on GEPs and insertvalue and
+ /// extractvalue all operating with the same fundamental index list, merely
+ /// formatted differently (GEPs need actual values).
+ ///
+ /// \param Ty The type being split recursively into smaller ops.
+ /// \param Agg The aggregate value being built up or stored, depending on
+ /// whether this is splitting a load or a store respectively.
+ void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) {
+ if (Ty->isSingleValueType())
+ return static_cast<Derived *>(this)->emitFunc(Ty, Agg, Name);
+
+ if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ unsigned OldSize = Indices.size();
+ (void)OldSize;
+ for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size;
+ ++Idx) {
+ assert(Indices.size() == OldSize && "Did not return to the old size");
+ Indices.push_back(Idx);
+ GEPIndices.push_back(IRB.getInt32(Idx));
+ emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx));
+ GEPIndices.pop_back();
+ Indices.pop_back();
+ }
+ return;
+ }
+
+ if (StructType *STy = dyn_cast<StructType>(Ty)) {
+ unsigned OldSize = Indices.size();
+ (void)OldSize;
+ for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size;
+ ++Idx) {
+ assert(Indices.size() == OldSize && "Did not return to the old size");
+ Indices.push_back(Idx);
+ GEPIndices.push_back(IRB.getInt32(Idx));
+ emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx));
+ GEPIndices.pop_back();
+ Indices.pop_back();
+ }
+ return;
+ }
+
+ llvm_unreachable("Only arrays and structs are aggregate loadable types");
+ }
+ };
+
+ struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> {
+ LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr)
+ : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr) {}
+
+ /// Emit a leaf load of a single value. This is called at the leaves of the
+ /// recursive emission to actually load values.
+ void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
+ assert(Ty->isSingleValueType());
+ // Load the single value and insert it using the indices.
+ Value *Load = IRB.CreateLoad(IRB.CreateInBoundsGEP(Ptr, GEPIndices,
+ Name + ".gep"),
+ Name + ".load");
+ Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
+ DEBUG(dbgs() << " to: " << *Load << "\n");
+ }
+ };
+
+ bool visitLoadInst(LoadInst &LI) {
+ assert(LI.getPointerOperand() == *U);
+ if (!LI.isSimple() || LI.getType()->isSingleValueType())
+ return false;
+
+ // We have an aggregate being loaded, split it apart.
+ DEBUG(dbgs() << " original: " << LI << "\n");
+ LoadOpSplitter Splitter(&LI, *U);
+ Value *V = UndefValue::get(LI.getType());
+ Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
+ LI.replaceAllUsesWith(V);
+ LI.eraseFromParent();
+ return true;
+ }
+
+ struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
+ StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr)
+ : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr) {}
+
+ /// Emit a leaf store of a single value. This is called at the leaves of the
+ /// recursive emission to actually produce stores.
+ void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
+ assert(Ty->isSingleValueType());
+ // Extract the single value and store it using the indices.
+ Value *Store = IRB.CreateStore(
+ IRB.CreateExtractValue(Agg, Indices, Name + ".extract"),
+ IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep"));
+ (void)Store;
+ DEBUG(dbgs() << " to: " << *Store << "\n");
+ }
+ };
+
+ bool visitStoreInst(StoreInst &SI) {
+ if (!SI.isSimple() || SI.getPointerOperand() != *U)
+ return false;
+ Value *V = SI.getValueOperand();
+ if (V->getType()->isSingleValueType())
+ return false;
+
+ // We have an aggregate being stored, split it apart.
+ DEBUG(dbgs() << " original: " << SI << "\n");
+ StoreOpSplitter Splitter(&SI, *U);
+ Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
+ SI.eraseFromParent();
+ return true;
+ }
+
+ bool visitBitCastInst(BitCastInst &BC) {
+ enqueueUsers(BC);
+ return false;
+ }
+
+ bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
+ enqueueUsers(GEPI);
+ return false;
+ }
+
+ bool visitPHINode(PHINode &PN) {
+ enqueueUsers(PN);
+ return false;
+ }
+
+ bool visitSelectInst(SelectInst &SI) {
+ enqueueUsers(SI);
+ return false;
+ }
+};
+}
+
+/// \brief Strip aggregate type wrapping.
+///
+/// This removes no-op aggregate types wrapping an underlying type. It will
+/// strip as many layers of types as it can without changing either the type
+/// size or the allocated size.
+static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) {
+ if (Ty->isSingleValueType())
+ return Ty;
+
+ uint64_t AllocSize = DL.getTypeAllocSize(Ty);
+ uint64_t TypeSize = DL.getTypeSizeInBits(Ty);
+
+ Type *InnerTy;
+ if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
+ InnerTy = ArrTy->getElementType();
+ } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
+ const StructLayout *SL = DL.getStructLayout(STy);
+ unsigned Index = SL->getElementContainingOffset(0);
+ InnerTy = STy->getElementType(Index);
+ } else {
+ return Ty;
+ }
+
+ if (AllocSize > DL.getTypeAllocSize(InnerTy) ||
+ TypeSize > DL.getTypeSizeInBits(InnerTy))
+ return Ty;
+
+ return stripAggregateTypeWrapping(DL, InnerTy);
+}
+
+/// \brief Try to find a partition of the aggregate type passed in for a given
+/// offset and size.
+///
+/// This recurses through the aggregate type and tries to compute a subtype
+/// based on the offset and size. When the offset and size span a sub-section
+/// of an array, it will even compute a new array type for that sub-section,
+/// and the same for structs.
+///
+/// Note that this routine is very strict and tries to find a partition of the
+/// type which produces the *exact* right offset and size. It is not forgiving
+/// when the size or offset cause either end of type-based partition to be off.
+/// Also, this is a best-effort routine. It is reasonable to give up and not
+/// return a type if necessary.
+static Type *getTypePartition(const DataLayout &TD, Type *Ty,
+ uint64_t Offset, uint64_t Size) {
+ if (Offset == 0 && TD.getTypeAllocSize(Ty) == Size)
+ return stripAggregateTypeWrapping(TD, Ty);
+ if (Offset > TD.getTypeAllocSize(Ty) ||
+ (TD.getTypeAllocSize(Ty) - Offset) < Size)
+ return 0;
+
+ if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
+ // We can't partition pointers...
+ if (SeqTy->isPointerTy())
+ return 0;
+
+ Type *ElementTy = SeqTy->getElementType();
+ uint64_t ElementSize = TD.getTypeAllocSize(ElementTy);
+ uint64_t NumSkippedElements = Offset / ElementSize;
+ if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy))
+ if (NumSkippedElements >= ArrTy->getNumElements())
+ return 0;
+ if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy))
+ if (NumSkippedElements >= VecTy->getNumElements())
+ return 0;
+ Offset -= NumSkippedElements * ElementSize;
+
+ // First check if we need to recurse.
+ if (Offset > 0 || Size < ElementSize) {
+ // Bail if the partition ends in a different array element.
+ if ((Offset + Size) > ElementSize)
+ return 0;
+ // Recurse through the element type trying to peel off offset bytes.
+ return getTypePartition(TD, ElementTy, Offset, Size);
+ }
+ assert(Offset == 0);
+
+ if (Size == ElementSize)
+ return stripAggregateTypeWrapping(TD, ElementTy);
+ assert(Size > ElementSize);
+ uint64_t NumElements = Size / ElementSize;
+ if (NumElements * ElementSize != Size)
+ return 0;
+ return ArrayType::get(ElementTy, NumElements);
+ }
+
+ StructType *STy = dyn_cast<StructType>(Ty);
+ if (!STy)
+ return 0;
+
+ const StructLayout *SL = TD.getStructLayout(STy);
+ if (Offset >= SL->getSizeInBytes())
+ return 0;
+ uint64_t EndOffset = Offset + Size;
+ if (EndOffset > SL->getSizeInBytes())
+ return 0;
+
+ unsigned Index = SL->getElementContainingOffset(Offset);
+ Offset -= SL->getElementOffset(Index);
+
+ Type *ElementTy = STy->getElementType(Index);
+ uint64_t ElementSize = TD.getTypeAllocSize(ElementTy);
+ if (Offset >= ElementSize)
+ return 0; // The offset points into alignment padding.
+
+ // See if any partition must be contained by the element.
+ if (Offset > 0 || Size < ElementSize) {
+ if ((Offset + Size) > ElementSize)
+ return 0;
+ return getTypePartition(TD, ElementTy, Offset, Size);
+ }
+ assert(Offset == 0);
+
+ if (Size == ElementSize)
+ return stripAggregateTypeWrapping(TD, ElementTy);
+
+ StructType::element_iterator EI = STy->element_begin() + Index,
+ EE = STy->element_end();
+ if (EndOffset < SL->getSizeInBytes()) {
+ unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
+ if (Index == EndIndex)
+ return 0; // Within a single element and its padding.
+
+ // Don't try to form "natural" types if the elements don't line up with the
+ // expected size.
+ // FIXME: We could potentially recurse down through the last element in the
+ // sub-struct to find a natural end point.
+ if (SL->getElementOffset(EndIndex) != EndOffset)
+ return 0;
+
+ assert(Index < EndIndex);
+ EE = STy->element_begin() + EndIndex;
+ }
+
+ // Try to build up a sub-structure.
+ StructType *SubTy = StructType::get(STy->getContext(), makeArrayRef(EI, EE),
+ STy->isPacked());
+ const StructLayout *SubSL = TD.getStructLayout(SubTy);
+ if (Size != SubSL->getSizeInBytes())
+ return 0; // The sub-struct doesn't have quite the size needed.
+
+ return SubTy;
+}
+
+/// \brief Rewrite an alloca partition's users.
+///
+/// This routine drives both of the rewriting goals of the SROA pass. It tries
+/// to rewrite uses of an alloca partition to be conducive for SSA value
+/// promotion. If the partition needs a new, more refined alloca, this will
+/// build that new alloca, preserving as much type information as possible, and
+/// rewrite the uses of the old alloca to point at the new one and have the
+/// appropriate new offsets. It also evaluates how successful the rewrite was
+/// at enabling promotion and if it was successful queues the alloca to be
+/// promoted.
+bool SROA::rewriteAllocaPartition(AllocaInst &AI,
+ AllocaPartitioning &P,
+ AllocaPartitioning::iterator PI) {
+ uint64_t AllocaSize = PI->EndOffset - PI->BeginOffset;
+ bool IsLive = false;
+ for (AllocaPartitioning::use_iterator UI = P.use_begin(PI),
+ UE = P.use_end(PI);
+ UI != UE && !IsLive; ++UI)
+ if (UI->U)
+ IsLive = true;
+ if (!IsLive)
+ return false; // No live uses left of this partition.
+
+ DEBUG(dbgs() << "Speculating PHIs and selects in partition "
+ << "[" << PI->BeginOffset << "," << PI->EndOffset << ")\n");
+
+ PHIOrSelectSpeculator Speculator(*TD, P, *this);
+ DEBUG(dbgs() << " speculating ");
+ DEBUG(P.print(dbgs(), PI, ""));
+ Speculator.visitUsers(PI);
+
+ // Try to compute a friendly type for this partition of the alloca. This
+ // won't always succeed, in which case we fall back to a legal integer type
+ // or an i8 array of an appropriate size.
+ Type *AllocaTy = 0;
+ if (Type *PartitionTy = P.getCommonType(PI))
+ if (TD->getTypeAllocSize(PartitionTy) >= AllocaSize)
+ AllocaTy = PartitionTy;
+ if (!AllocaTy)
+ if (Type *PartitionTy = getTypePartition(*TD, AI.getAllocatedType(),
+ PI->BeginOffset, AllocaSize))
+ AllocaTy = PartitionTy;
+ if ((!AllocaTy ||
+ (AllocaTy->isArrayTy() &&
+ AllocaTy->getArrayElementType()->isIntegerTy())) &&
+ TD->isLegalInteger(AllocaSize * 8))
+ AllocaTy = Type::getIntNTy(*C, AllocaSize * 8);
+ if (!AllocaTy)
+ AllocaTy = ArrayType::get(Type::getInt8Ty(*C), AllocaSize);
+ assert(TD->getTypeAllocSize(AllocaTy) >= AllocaSize);
+
+ // Check for the case where we're going to rewrite to a new alloca of the
+ // exact same type as the original, and with the same access offsets. In that
+ // case, re-use the existing alloca, but still run through the rewriter to
+ // performe phi and select speculation.
+ AllocaInst *NewAI;
+ if (AllocaTy == AI.getAllocatedType()) {
+ assert(PI->BeginOffset == 0 &&
+ "Non-zero begin offset but same alloca type");
+ assert(PI == P.begin() && "Begin offset is zero on later partition");
+ NewAI = &AI;
+ } else {
+ unsigned Alignment = AI.getAlignment();
+ if (!Alignment) {
+ // The minimum alignment which users can rely on when the explicit
+ // alignment is omitted or zero is that required by the ABI for this
+ // type.
+ Alignment = TD->getABITypeAlignment(AI.getAllocatedType());
+ }
+ Alignment = MinAlign(Alignment, PI->BeginOffset);
+ // If we will get at least this much alignment from the type alone, leave
+ // the alloca's alignment unconstrained.
+ if (Alignment <= TD->getABITypeAlignment(AllocaTy))
+ Alignment = 0;
+ NewAI = new AllocaInst(AllocaTy, 0, Alignment,
+ AI.getName() + ".sroa." + Twine(PI - P.begin()),
+ &AI);
+ ++NumNewAllocas;
+ }
+
+ DEBUG(dbgs() << "Rewriting alloca partition "
+ << "[" << PI->BeginOffset << "," << PI->EndOffset << ") to: "
+ << *NewAI << "\n");
+
+ // Track the high watermark of the post-promotion worklist. We will reset it
+ // to this point if the alloca is not in fact scheduled for promotion.
+ unsigned PPWOldSize = PostPromotionWorklist.size();
+
+ AllocaPartitionRewriter Rewriter(*TD, P, PI, *this, AI, *NewAI,
+ PI->BeginOffset, PI->EndOffset);
+ DEBUG(dbgs() << " rewriting ");
+ DEBUG(P.print(dbgs(), PI, ""));
+ bool Promotable = Rewriter.visitUsers(P.use_begin(PI), P.use_end(PI));
+ if (Promotable) {
+ DEBUG(dbgs() << " and queuing for promotion\n");
+ PromotableAllocas.push_back(NewAI);
+ } else if (NewAI != &AI) {
+ // If we can't promote the alloca, iterate on it to check for new
+ // refinements exposed by splitting the current alloca. Don't iterate on an
+ // alloca which didn't actually change and didn't get promoted.
+ Worklist.insert(NewAI);
+ }
+
+ // Drop any post-promotion work items if promotion didn't happen.
+ if (!Promotable)
+ while (PostPromotionWorklist.size() > PPWOldSize)
+ PostPromotionWorklist.pop_back();
+
+ return true;
+}
+
+/// \brief Walks the partitioning of an alloca rewriting uses of each partition.
+bool SROA::splitAlloca(AllocaInst &AI, AllocaPartitioning &P) {
+ bool Changed = false;
+ for (AllocaPartitioning::iterator PI = P.begin(), PE = P.end(); PI != PE;
+ ++PI)
+ Changed |= rewriteAllocaPartition(AI, P, PI);
+
+ return Changed;
+}
+
+/// \brief Analyze an alloca for SROA.
+///
+/// This analyzes the alloca to ensure we can reason about it, builds
+/// a partitioning of the alloca, and then hands it off to be split and
+/// rewritten as needed.
+bool SROA::runOnAlloca(AllocaInst &AI) {
+ DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
+ ++NumAllocasAnalyzed;
+
+ // Special case dead allocas, as they're trivial.
+ if (AI.use_empty()) {
+ AI.eraseFromParent();
+ return true;
+ }
+
+ // Skip alloca forms that this analysis can't handle.
+ if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() ||
+ TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
+ return false;
+
+ bool Changed = false;
+
+ // First, split any FCA loads and stores touching this alloca to promote
+ // better splitting and promotion opportunities.
+ AggLoadStoreRewriter AggRewriter(*TD);
+ Changed |= AggRewriter.rewrite(AI);
+
+ // Build the partition set using a recursive instruction-visiting builder.
+ AllocaPartitioning P(*TD, AI);
+ DEBUG(P.print(dbgs()));
+ if (P.isEscaped())
+ return Changed;
+
+ // Delete all the dead users of this alloca before splitting and rewriting it.
+ for (AllocaPartitioning::dead_user_iterator DI = P.dead_user_begin(),
+ DE = P.dead_user_end();
+ DI != DE; ++DI) {
+ Changed = true;
+ (*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType()));
+ DeadInsts.insert(*DI);
+ }
+ for (AllocaPartitioning::dead_op_iterator DO = P.dead_op_begin(),
+ DE = P.dead_op_end();
+ DO != DE; ++DO) {
+ Value *OldV = **DO;
+ // Clobber the use with an undef value.
+ **DO = UndefValue::get(OldV->getType());
+ if (Instruction *OldI = dyn_cast<Instruction>(OldV))
+ if (isInstructionTriviallyDead(OldI)) {
+ Changed = true;
+ DeadInsts.insert(OldI);
+ }
+ }
+
+ // No partitions to split. Leave the dead alloca for a later pass to clean up.
+ if (P.begin() == P.end())
+ return Changed;
+
+ return splitAlloca(AI, P) || Changed;
+}
+
+/// \brief Delete the dead instructions accumulated in this run.
+///
+/// Recursively deletes the dead instructions we've accumulated. This is done
+/// at the very end to maximize locality of the recursive delete and to
+/// minimize the problems of invalidated instruction pointers as such pointers
+/// are used heavily in the intermediate stages of the algorithm.
+///
+/// We also record the alloca instructions deleted here so that they aren't
+/// subsequently handed to mem2reg to promote.
+void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
+ while (!DeadInsts.empty()) {
+ Instruction *I = DeadInsts.pop_back_val();
+ DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
+
+ I->replaceAllUsesWith(UndefValue::get(I->getType()));
+
+ for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
+ if (Instruction *U = dyn_cast<Instruction>(*OI)) {
+ // Zero out the operand and see if it becomes trivially dead.
+ *OI = 0;
+ if (isInstructionTriviallyDead(U))
+ DeadInsts.insert(U);
+ }
+
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
+ DeletedAllocas.insert(AI);
+
+ ++NumDeleted;
+ I->eraseFromParent();
+ }
+}
+
+/// \brief Promote the allocas, using the best available technique.
+///
+/// This attempts to promote whatever allocas have been identified as viable in
+/// the PromotableAllocas list. If that list is empty, there is nothing to do.
+/// If there is a domtree available, we attempt to promote using the full power
+/// of mem2reg. Otherwise, we build and use the AllocaPromoter above which is
+/// based on the SSAUpdater utilities. This function returns whether any
+/// promotion occured.
+bool SROA::promoteAllocas(Function &F) {
+ if (PromotableAllocas.empty())
+ return false;
+
+ NumPromoted += PromotableAllocas.size();
+
+ if (DT && !ForceSSAUpdater) {
+ DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
+ PromoteMemToReg(PromotableAllocas, *DT);
+ PromotableAllocas.clear();
+ return true;
+ }
+
+ DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n");
+ SSAUpdater SSA;
+ DIBuilder DIB(*F.getParent());
+ SmallVector<Instruction*, 64> Insts;
+
+ for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) {
+ AllocaInst *AI = PromotableAllocas[Idx];
+ for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end();
+ UI != UE;) {
+ Instruction *I = cast<Instruction>(*UI++);
+ // FIXME: Currently the SSAUpdater infrastructure doesn't reason about
+ // lifetime intrinsics and so we strip them (and the bitcasts+GEPs
+ // leading to them) here. Eventually it should use them to optimize the
+ // scalar values produced.
+ if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
+ assert(onlyUsedByLifetimeMarkers(I) &&
+ "Found a bitcast used outside of a lifetime marker.");
+ while (!I->use_empty())
+ cast<Instruction>(*I->use_begin())->eraseFromParent();
+ I->eraseFromParent();
+ continue;
+ }
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ assert(II->getIntrinsicID() == Intrinsic::lifetime_start ||
+ II->getIntrinsicID() == Intrinsic::lifetime_end);
+ II->eraseFromParent();
+ continue;
+ }
+
+ Insts.push_back(I);
+ }
+ AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts);
+ Insts.clear();
+ }
+
+ PromotableAllocas.clear();
+ return true;
+}
+
+namespace {
+ /// \brief A predicate to test whether an alloca belongs to a set.
+ class IsAllocaInSet {
+ typedef SmallPtrSet<AllocaInst *, 4> SetType;
+ const SetType &Set;
+
+ public:
+ typedef AllocaInst *argument_type;
+
+ IsAllocaInSet(const SetType &Set) : Set(Set) {}
+ bool operator()(AllocaInst *AI) const { return Set.count(AI); }
+ };
+}
+
+bool SROA::runOnFunction(Function &F) {
+ DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
+ C = &F.getContext();
+ TD = getAnalysisIfAvailable<DataLayout>();
+ if (!TD) {
+ DEBUG(dbgs() << " Skipping SROA -- no target data!\n");
+ return false;
+ }
+ DT = getAnalysisIfAvailable<DominatorTree>();
+
+ BasicBlock &EntryBB = F.getEntryBlock();
+ for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end());
+ I != E; ++I)
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
+ Worklist.insert(AI);
+
+ bool Changed = false;
+ // A set of deleted alloca instruction pointers which should be removed from
+ // the list of promotable allocas.
+ SmallPtrSet<AllocaInst *, 4> DeletedAllocas;
+
+ do {
+ while (!Worklist.empty()) {
+ Changed |= runOnAlloca(*Worklist.pop_back_val());
+ deleteDeadInstructions(DeletedAllocas);
+
+ // Remove the deleted allocas from various lists so that we don't try to
+ // continue processing them.
+ if (!DeletedAllocas.empty()) {
+ Worklist.remove_if(IsAllocaInSet(DeletedAllocas));
+ PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas));
+ PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(),
+ PromotableAllocas.end(),
+ IsAllocaInSet(DeletedAllocas)),
+ PromotableAllocas.end());
+ DeletedAllocas.clear();
+ }
+ }
+
+ Changed |= promoteAllocas(F);
+
+ Worklist = PostPromotionWorklist;
+ PostPromotionWorklist.clear();
+ } while (!Worklist.empty());
+
+ return Changed;
+}
+
+void SROA::getAnalysisUsage(AnalysisUsage &AU) const {
+ if (RequiresDomTree)
+ AU.addRequired<DominatorTree>();
+ AU.setPreservesCFG();
+}
diff --git a/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp b/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp
index 48318c8..39630fd 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp
@@ -19,7 +19,7 @@
#include "llvm/PassManager.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/Verifier.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Transforms/Scalar.h"
using namespace llvm;
@@ -59,6 +59,7 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeRegToMemPass(Registry);
initializeSCCPPass(Registry);
initializeIPSCCPPass(Registry);
+ initializeSROAPass(Registry);
initializeSROA_DTPass(Registry);
initializeSROA_SSAUpPass(Registry);
initializeCFGSimplifyPassPass(Registry);
diff --git a/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 6637126..a46d09c 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -46,7 +46,7 @@
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -56,7 +56,6 @@ STATISTIC(NumReplaced, "Number of allocas broken up");
STATISTIC(NumPromoted, "Number of allocas promoted");
STATISTIC(NumAdjusted, "Number of scalar allocas adjusted to allow promotion");
STATISTIC(NumConverted, "Number of aggregates converted to scalar");
-STATISTIC(NumGlobals, "Number of allocas copied from constant global");
namespace {
struct SROA : public FunctionPass {
@@ -88,7 +87,7 @@ namespace {
private:
bool HasDomTree;
- TargetData *TD;
+ DataLayout *TD;
/// DeadInsts - Keep track of instructions we have made dead, so that
/// we can remove them after we are done working.
@@ -183,9 +182,6 @@ namespace {
void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts);
bool ShouldAttemptScalarRepl(AllocaInst *AI);
-
- static MemTransferInst *isOnlyCopiedFromConstantGlobal(
- AllocaInst *AI, SmallVector<Instruction*, 4> &ToDelete);
};
// SROA_DT - SROA that uses DominatorTree.
@@ -262,7 +258,7 @@ namespace {
class ConvertToScalarInfo {
/// AllocaSize - The size of the alloca being considered in bytes.
unsigned AllocaSize;
- const TargetData &TD;
+ const DataLayout &TD;
unsigned ScalarLoadThreshold;
/// IsNotTrivial - This is set to true if there is some access to the object
@@ -305,7 +301,7 @@ class ConvertToScalarInfo {
bool HadDynamicAccess;
public:
- explicit ConvertToScalarInfo(unsigned Size, const TargetData &td,
+ explicit ConvertToScalarInfo(unsigned Size, const DataLayout &td,
unsigned SLT)
: AllocaSize(Size), TD(td), ScalarLoadThreshold(SLT), IsNotTrivial(false),
ScalarKind(Unknown), VectorTy(0), HadNonMemTransferAccess(false),
@@ -1024,11 +1020,11 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
bool SROA::runOnFunction(Function &F) {
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
bool Changed = performPromotion(F);
- // FIXME: ScalarRepl currently depends on TargetData more than it
+ // FIXME: ScalarRepl currently depends on DataLayout more than it
// theoretically needs to. It should be refactored in order to support
// target-independent IR. Until this is done, just skip the actual
// scalar-replacement portion of this pass.
@@ -1138,7 +1134,7 @@ public:
///
/// We can do this to a select if its only uses are loads and if the operand to
/// the select can be loaded unconditionally.
-static bool isSafeSelectToSpeculate(SelectInst *SI, const TargetData *TD) {
+static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *TD) {
bool TDerefable = SI->getTrueValue()->isDereferenceablePointer();
bool FDerefable = SI->getFalseValue()->isDereferenceablePointer();
@@ -1176,7 +1172,7 @@ static bool isSafeSelectToSpeculate(SelectInst *SI, const TargetData *TD) {
///
/// We can do this to a select if its only uses are loads and if the operand to
/// the select can be loaded unconditionally.
-static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) {
+static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *TD) {
// For now, we can only do this promotion if the load is in the same block as
// the PHI, and if there are no stores between the phi and load.
// TODO: Allow recursive phi users.
@@ -1240,7 +1236,7 @@ static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) {
/// direct (non-volatile) loads and stores to it. If the alloca is close but
/// not quite there, this will transform the code to allow promotion. As such,
/// it is a non-pure predicate.
-static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
+static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) {
SetVector<Instruction*, SmallVector<Instruction*, 4>,
SmallPtrSet<Instruction*, 4> > InstsToRewrite;
@@ -1465,26 +1461,6 @@ bool SROA::ShouldAttemptScalarRepl(AllocaInst *AI) {
return false;
}
-/// getPointeeAlignment - Compute the minimum alignment of the value pointed
-/// to by the given pointer.
-static unsigned getPointeeAlignment(Value *V, const TargetData &TD) {
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
- if (CE->getOpcode() == Instruction::BitCast ||
- (CE->getOpcode() == Instruction::GetElementPtr &&
- cast<GEPOperator>(CE)->hasAllZeroIndices()))
- return getPointeeAlignment(CE->getOperand(0), TD);
-
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
- if (!GV->isDeclaration())
- return TD.getPreferredAlignment(GV);
-
- if (PointerType *PT = dyn_cast<PointerType>(V->getType()))
- return TD.getABITypeAlignment(PT->getElementType());
-
- return 0;
-}
-
-
// performScalarRepl - This algorithm is a simple worklist driven algorithm,
// which runs on all of the alloca instructions in the function, removing them
// if they are only used by getelementptr instructions.
@@ -1516,29 +1492,6 @@ bool SROA::performScalarRepl(Function &F) {
if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized())
continue;
- // Check to see if this allocation is only modified by a memcpy/memmove from
- // a constant global whose alignment is equal to or exceeds that of the
- // allocation. If this is the case, we can change all users to use
- // the constant global instead. This is commonly produced by the CFE by
- // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
- // is only subsequently read.
- SmallVector<Instruction *, 4> ToDelete;
- if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(AI, ToDelete)) {
- if (AI->getAlignment() <= getPointeeAlignment(Copy->getSource(), *TD)) {
- DEBUG(dbgs() << "Found alloca equal to global: " << *AI << '\n');
- DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
- for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
- ToDelete[i]->eraseFromParent();
- Constant *TheSrc = cast<Constant>(Copy->getSource());
- AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType()));
- Copy->eraseFromParent(); // Don't mutate the global.
- AI->eraseFromParent();
- ++NumGlobals;
- Changed = true;
- continue;
- }
- }
-
// Check to see if we can perform the core SROA transformation. We cannot
// transform the allocation instruction if it is an array allocation
// (allocations OF arrays are ok though), and an allocation of a scalar
@@ -2584,7 +2537,7 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
/// HasPadding - Return true if the specified type has any structure or
/// alignment padding in between the elements that would be split apart
/// by SROA; return false otherwise.
-static bool HasPadding(Type *Ty, const TargetData &TD) {
+static bool HasPadding(Type *Ty, const DataLayout &TD) {
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Ty = ATy->getElementType();
return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
@@ -2656,134 +2609,3 @@ bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
return true;
}
-
-
-
-/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to
-/// some part of a constant global variable. This intentionally only accepts
-/// constant expressions because we don't can't rewrite arbitrary instructions.
-static bool PointsToConstantGlobal(Value *V) {
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
- return GV->isConstant();
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
- if (CE->getOpcode() == Instruction::BitCast ||
- CE->getOpcode() == Instruction::GetElementPtr)
- return PointsToConstantGlobal(CE->getOperand(0));
- return false;
-}
-
-/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
-/// pointer to an alloca. Ignore any reads of the pointer, return false if we
-/// see any stores or other unknown uses. If we see pointer arithmetic, keep
-/// track of whether it moves the pointer (with isOffset) but otherwise traverse
-/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
-/// the alloca, and if the source pointer is a pointer to a constant global, we
-/// can optimize this.
-static bool
-isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
- bool isOffset,
- SmallVector<Instruction *, 4> &LifetimeMarkers) {
- // We track lifetime intrinsics as we encounter them. If we decide to go
- // ahead and replace the value with the global, this lets the caller quickly
- // eliminate the markers.
-
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
- User *U = cast<Instruction>(*UI);
-
- if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
- // Ignore non-volatile loads, they are always ok.
- if (!LI->isSimple()) return false;
- continue;
- }
-
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
- // If uses of the bitcast are ok, we are ok.
- if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset,
- LifetimeMarkers))
- return false;
- continue;
- }
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
- // If the GEP has all zero indices, it doesn't offset the pointer. If it
- // doesn't, it does.
- if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy,
- isOffset || !GEP->hasAllZeroIndices(),
- LifetimeMarkers))
- return false;
- continue;
- }
-
- if (CallSite CS = U) {
- // If this is the function being called then we treat it like a load and
- // ignore it.
- if (CS.isCallee(UI))
- continue;
-
- // If this is a readonly/readnone call site, then we know it is just a
- // load (but one that potentially returns the value itself), so we can
- // ignore it if we know that the value isn't captured.
- unsigned ArgNo = CS.getArgumentNo(UI);
- if (CS.onlyReadsMemory() &&
- (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
- continue;
-
- // If this is being passed as a byval argument, the caller is making a
- // copy, so it is only a read of the alloca.
- if (CS.isByValArgument(ArgNo))
- continue;
- }
-
- // Lifetime intrinsics can be handled by the caller.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
- if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
- II->getIntrinsicID() == Intrinsic::lifetime_end) {
- assert(II->use_empty() && "Lifetime markers have no result to use!");
- LifetimeMarkers.push_back(II);
- continue;
- }
- }
-
- // If this is isn't our memcpy/memmove, reject it as something we can't
- // handle.
- MemTransferInst *MI = dyn_cast<MemTransferInst>(U);
- if (MI == 0)
- return false;
-
- // If the transfer is using the alloca as a source of the transfer, then
- // ignore it since it is a load (unless the transfer is volatile).
- if (UI.getOperandNo() == 1) {
- if (MI->isVolatile()) return false;
- continue;
- }
-
- // If we already have seen a copy, reject the second one.
- if (TheCopy) return false;
-
- // If the pointer has been offset from the start of the alloca, we can't
- // safely handle this.
- if (isOffset) return false;
-
- // If the memintrinsic isn't using the alloca as the dest, reject it.
- if (UI.getOperandNo() != 0) return false;
-
- // If the source of the memcpy/move is not a constant global, reject it.
- if (!PointsToConstantGlobal(MI->getSource()))
- return false;
-
- // Otherwise, the transform is safe. Remember the copy instruction.
- TheCopy = MI;
- }
- return true;
-}
-
-/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
-/// modified by a copy from a constant global. If we can prove this, we can
-/// replace any uses of the alloca with uses of the global directly.
-MemTransferInst *
-SROA::isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
- SmallVector<Instruction*, 4> &ToDelete) {
- MemTransferInst *TheCopy = 0;
- if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false, ToDelete))
- return TheCopy;
- return 0;
-}
diff --git a/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index d13e4ab..9f24bb6 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -31,10 +31,11 @@
#include "llvm/Attributes.h"
#include "llvm/Support/CFG.h"
#include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/TargetTransformInfo.h"
using namespace llvm;
STATISTIC(NumSimpl, "Number of blocks simplified");
@@ -59,9 +60,9 @@ FunctionPass *llvm::createCFGSimplificationPass() {
return new CFGSimplifyPass();
}
-/// ChangeToUnreachable - Insert an unreachable instruction before the specified
+/// changeToUnreachable - Insert an unreachable instruction before the specified
/// instruction, making it and the rest of the code in the block dead.
-static void ChangeToUnreachable(Instruction *I, bool UseLLVMTrap) {
+static void changeToUnreachable(Instruction *I, bool UseLLVMTrap) {
BasicBlock *BB = I->getParent();
// Loop over all of the successors, removing BB's entry from any PHI
// nodes.
@@ -87,8 +88,8 @@ static void ChangeToUnreachable(Instruction *I, bool UseLLVMTrap) {
}
}
-/// ChangeToCall - Convert the specified invoke into a normal call.
-static void ChangeToCall(InvokeInst *II) {
+/// changeToCall - Convert the specified invoke into a normal call.
+static void changeToCall(InvokeInst *II) {
SmallVector<Value*, 8> Args(II->op_begin(), II->op_end() - 3);
CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args, "", II);
NewCall->takeName(II);
@@ -105,7 +106,7 @@ static void ChangeToCall(InvokeInst *II) {
II->eraseFromParent();
}
-static bool MarkAliveBlocks(BasicBlock *BB,
+static bool markAliveBlocks(BasicBlock *BB,
SmallPtrSet<BasicBlock*, 128> &Reachable) {
SmallVector<BasicBlock*, 128> Worklist;
@@ -129,7 +130,7 @@ static bool MarkAliveBlocks(BasicBlock *BB,
++BBI;
if (!isa<UnreachableInst>(BBI)) {
// Don't insert a call to llvm.trap right before the unreachable.
- ChangeToUnreachable(BBI, false);
+ changeToUnreachable(BBI, false);
Changed = true;
}
break;
@@ -148,7 +149,7 @@ static bool MarkAliveBlocks(BasicBlock *BB,
if (isa<UndefValue>(Ptr) ||
(isa<ConstantPointerNull>(Ptr) &&
SI->getPointerAddressSpace() == 0)) {
- ChangeToUnreachable(SI, true);
+ changeToUnreachable(SI, true);
Changed = true;
break;
}
@@ -159,7 +160,7 @@ static bool MarkAliveBlocks(BasicBlock *BB,
if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
Value *Callee = II->getCalledValue();
if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
- ChangeToUnreachable(II, true);
+ changeToUnreachable(II, true);
Changed = true;
} else if (II->doesNotThrow()) {
if (II->use_empty() && II->onlyReadsMemory()) {
@@ -168,7 +169,7 @@ static bool MarkAliveBlocks(BasicBlock *BB,
II->getUnwindDest()->removePredecessor(II->getParent());
II->eraseFromParent();
} else
- ChangeToCall(II);
+ changeToCall(II);
Changed = true;
}
}
@@ -180,12 +181,12 @@ static bool MarkAliveBlocks(BasicBlock *BB,
return Changed;
}
-/// RemoveUnreachableBlocksFromFn - Remove blocks that are not reachable, even
+/// removeUnreachableBlocksFromFn - Remove blocks that are not reachable, even
/// if they are in a dead cycle. Return true if a change was made, false
/// otherwise.
-static bool RemoveUnreachableBlocksFromFn(Function &F) {
+static bool removeUnreachableBlocksFromFn(Function &F) {
SmallPtrSet<BasicBlock*, 128> Reachable;
- bool Changed = MarkAliveBlocks(F.begin(), Reachable);
+ bool Changed = markAliveBlocks(F.begin(), Reachable);
// If there are unreachable blocks in the CFG...
if (Reachable.size() == F.size())
@@ -215,9 +216,9 @@ static bool RemoveUnreachableBlocksFromFn(Function &F) {
return true;
}
-/// MergeEmptyReturnBlocks - If we have more than one empty (other than phi
+/// mergeEmptyReturnBlocks - If we have more than one empty (other than phi
/// node) return blocks, merge them together to promote recursive block merging.
-static bool MergeEmptyReturnBlocks(Function &F) {
+static bool mergeEmptyReturnBlocks(Function &F) {
bool Changed = false;
BasicBlock *RetBlock = 0;
@@ -291,9 +292,10 @@ static bool MergeEmptyReturnBlocks(Function &F) {
return Changed;
}
-/// IterativeSimplifyCFG - Call SimplifyCFG on all the blocks in the function,
+/// iterativelySimplifyCFG - Call SimplifyCFG on all the blocks in the function,
/// iterating until no more changes are made.
-static bool IterativeSimplifyCFG(Function &F, const TargetData *TD) {
+static bool iterativelySimplifyCFG(Function &F, const DataLayout *TD,
+ const TargetTransformInfo *TTI) {
bool Changed = false;
bool LocalChange = true;
while (LocalChange) {
@@ -302,7 +304,7 @@ static bool IterativeSimplifyCFG(Function &F, const TargetData *TD) {
// Loop over all of the basic blocks and remove them if they are unneeded...
//
for (Function::iterator BBIt = F.begin(); BBIt != F.end(); ) {
- if (SimplifyCFG(BBIt++, TD)) {
+ if (SimplifyCFG(BBIt++, TD, TTI)) {
LocalChange = true;
++NumSimpl;
}
@@ -316,25 +318,27 @@ static bool IterativeSimplifyCFG(Function &F, const TargetData *TD) {
// simplify the CFG.
//
bool CFGSimplifyPass::runOnFunction(Function &F) {
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
- bool EverChanged = RemoveUnreachableBlocksFromFn(F);
- EverChanged |= MergeEmptyReturnBlocks(F);
- EverChanged |= IterativeSimplifyCFG(F, TD);
+ const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
+ const TargetTransformInfo *TTI =
+ getAnalysisIfAvailable<TargetTransformInfo>();
+ bool EverChanged = removeUnreachableBlocksFromFn(F);
+ EverChanged |= mergeEmptyReturnBlocks(F);
+ EverChanged |= iterativelySimplifyCFG(F, TD, TTI);
// If neither pass changed anything, we're done.
if (!EverChanged) return false;
- // IterativeSimplifyCFG can (rarely) make some loops dead. If this happens,
- // RemoveUnreachableBlocksFromFn is needed to nuke them, which means we should
+ // iterativelySimplifyCFG can (rarely) make some loops dead. If this happens,
+ // removeUnreachableBlocksFromFn is needed to nuke them, which means we should
// iterate between the two optimizations. We structure the code like this to
- // avoid reruning IterativeSimplifyCFG if the second pass of
- // RemoveUnreachableBlocksFromFn doesn't do anything.
- if (!RemoveUnreachableBlocksFromFn(F))
+ // avoid reruning iterativelySimplifyCFG if the second pass of
+ // removeUnreachableBlocksFromFn doesn't do anything.
+ if (!removeUnreachableBlocksFromFn(F))
return true;
do {
- EverChanged = IterativeSimplifyCFG(F, TD);
- EverChanged |= RemoveUnreachableBlocksFromFn(F);
+ EverChanged = iterativelySimplifyCFG(F, TD, TTI);
+ EverChanged |= removeUnreachableBlocksFromFn(F);
} while (EverChanged);
return true;
diff --git a/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index f110320..17d07cd 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
@@ -28,9 +28,10 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Config/config.h" // FIXME: Shouldn't depend on host!
using namespace llvm;
@@ -38,6 +39,10 @@ using namespace llvm;
STATISTIC(NumSimplified, "Number of library calls simplified");
STATISTIC(NumAnnotated, "Number of attributes added to library functions");
+static cl::opt<bool> UnsafeFPShrink("enable-double-float-shrink", cl::Hidden,
+ cl::init(false),
+ cl::desc("Enable unsafe double to float "
+ "shrinking for math lib calls"));
//===----------------------------------------------------------------------===//
// Optimizer Base Class
//===----------------------------------------------------------------------===//
@@ -48,7 +53,7 @@ namespace {
class LibCallOptimization {
protected:
Function *Caller;
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLibraryInfo *TLI;
LLVMContext* Context;
public:
@@ -63,7 +68,7 @@ public:
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B)
=0;
- Value *OptimizeCall(CallInst *CI, const TargetData *TD,
+ Value *OptimizeCall(CallInst *CI, const DataLayout *TD,
const TargetLibraryInfo *TLI, IRBuilder<> &B) {
Caller = CI->getParent()->getParent();
this->TD = TD;
@@ -85,22 +90,6 @@ public:
// Helper Functions
//===----------------------------------------------------------------------===//
-/// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
-/// value is equal or not-equal to zero.
-static bool IsOnlyUsedInZeroEqualityComparison(Value *V) {
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
- UI != E; ++UI) {
- if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
- if (IC->isEquality())
- if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
- if (C->isNullValue())
- continue;
- // Unknown instruction.
- return false;
- }
- return true;
-}
-
static bool CallHasFloatingPointArgument(const CallInst *CI) {
for (CallInst::const_op_iterator it = CI->op_begin(), e = CI->op_end();
it != e; ++it) {
@@ -110,799 +99,62 @@ static bool CallHasFloatingPointArgument(const CallInst *CI) {
return false;
}
-/// IsOnlyUsedInEqualityComparison - Return true if it is only used in equality
-/// comparisons with With.
-static bool IsOnlyUsedInEqualityComparison(Value *V, Value *With) {
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
- UI != E; ++UI) {
- if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
- if (IC->isEquality() && IC->getOperand(1) == With)
- continue;
- // Unknown instruction.
- return false;
- }
- return true;
-}
-
+namespace {
//===----------------------------------------------------------------------===//
-// String and Memory LibCall Optimizations
+// Math Library Optimizations
//===----------------------------------------------------------------------===//
//===---------------------------------------===//
-// 'strcat' Optimizations
-namespace {
-struct StrCatOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // Verify the "strcat" function prototype.
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 2 ||
- FT->getReturnType() != B.getInt8PtrTy() ||
- FT->getParamType(0) != FT->getReturnType() ||
- FT->getParamType(1) != FT->getReturnType())
- return 0;
-
- // Extract some information from the instruction
- Value *Dst = CI->getArgOperand(0);
- Value *Src = CI->getArgOperand(1);
-
- // See if we can get the length of the input string.
- uint64_t Len = GetStringLength(Src);
- if (Len == 0) return 0;
- --Len; // Unbias length.
-
- // Handle the simple, do-nothing case: strcat(x, "") -> x
- if (Len == 0)
- return Dst;
-
- // These optimizations require TargetData.
- if (!TD) return 0;
-
- return EmitStrLenMemCpy(Src, Dst, Len, B);
- }
-
- Value *EmitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len, IRBuilder<> &B) {
- // We need to find the end of the destination string. That's where the
- // memory is to be moved to. We just generate a call to strlen.
- Value *DstLen = EmitStrLen(Dst, B, TD, TLI);
- if (!DstLen)
- return 0;
-
- // Now that we have the destination's length, we must index into the
- // destination's pointer to get the actual memcpy destination (end of
- // the string .. we're concatenating).
- Value *CpyDst = B.CreateGEP(Dst, DstLen, "endptr");
-
- // We have enough information to now generate the memcpy call to do the
- // concatenation for us. Make a memcpy to copy the nul byte with align = 1.
- B.CreateMemCpy(CpyDst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len + 1), 1);
- return Dst;
- }
-};
-
-//===---------------------------------------===//
-// 'strncat' Optimizations
-
-struct StrNCatOpt : public StrCatOpt {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // Verify the "strncat" function prototype.
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 3 ||
- FT->getReturnType() != B.getInt8PtrTy() ||
- FT->getParamType(0) != FT->getReturnType() ||
- FT->getParamType(1) != FT->getReturnType() ||
- !FT->getParamType(2)->isIntegerTy())
- return 0;
-
- // Extract some information from the instruction
- Value *Dst = CI->getArgOperand(0);
- Value *Src = CI->getArgOperand(1);
- uint64_t Len;
-
- // We don't do anything if length is not constant
- if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
- Len = LengthArg->getZExtValue();
- else
- return 0;
-
- // See if we can get the length of the input string.
- uint64_t SrcLen = GetStringLength(Src);
- if (SrcLen == 0) return 0;
- --SrcLen; // Unbias length.
-
- // Handle the simple, do-nothing cases:
- // strncat(x, "", c) -> x
- // strncat(x, c, 0) -> x
- if (SrcLen == 0 || Len == 0) return Dst;
-
- // These optimizations require TargetData.
- if (!TD) return 0;
-
- // We don't optimize this case
- if (Len < SrcLen) return 0;
-
- // strncat(x, s, c) -> strcat(x, s)
- // s is constant so the strcat can be optimized further
- return EmitStrLenMemCpy(Src, Dst, SrcLen, B);
- }
-};
-
-//===---------------------------------------===//
-// 'strchr' Optimizations
-
-struct StrChrOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // Verify the "strchr" function prototype.
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 2 ||
- FT->getReturnType() != B.getInt8PtrTy() ||
- FT->getParamType(0) != FT->getReturnType() ||
- !FT->getParamType(1)->isIntegerTy(32))
- return 0;
-
- Value *SrcStr = CI->getArgOperand(0);
-
- // If the second operand is non-constant, see if we can compute the length
- // of the input string and turn this into memchr.
- ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
- if (CharC == 0) {
- // These optimizations require TargetData.
- if (!TD) return 0;
-
- uint64_t Len = GetStringLength(SrcStr);
- if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32.
- return 0;
-
- return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
- ConstantInt::get(TD->getIntPtrType(*Context), Len),
- B, TD, TLI);
- }
-
- // Otherwise, the character is a constant, see if the first argument is
- // a string literal. If so, we can constant fold.
- StringRef Str;
- if (!getConstantStringInfo(SrcStr, Str))
- return 0;
-
- // Compute the offset, make sure to handle the case when we're searching for
- // zero (a weird way to spell strlen).
- size_t I = CharC->getSExtValue() == 0 ?
- Str.size() : Str.find(CharC->getSExtValue());
- if (I == StringRef::npos) // Didn't find the char. strchr returns null.
- return Constant::getNullValue(CI->getType());
-
- // strchr(s+n,c) -> gep(s+n+i,c)
- return B.CreateGEP(SrcStr, B.getInt64(I), "strchr");
- }
-};
-
-//===---------------------------------------===//
-// 'strrchr' Optimizations
-
-struct StrRChrOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // Verify the "strrchr" function prototype.
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 2 ||
- FT->getReturnType() != B.getInt8PtrTy() ||
- FT->getParamType(0) != FT->getReturnType() ||
- !FT->getParamType(1)->isIntegerTy(32))
- return 0;
-
- Value *SrcStr = CI->getArgOperand(0);
- ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
-
- // Cannot fold anything if we're not looking for a constant.
- if (!CharC)
- return 0;
-
- StringRef Str;
- if (!getConstantStringInfo(SrcStr, Str)) {
- // strrchr(s, 0) -> strchr(s, 0)
- if (TD && CharC->isZero())
- return EmitStrChr(SrcStr, '\0', B, TD, TLI);
- return 0;
- }
-
- // Compute the offset.
- size_t I = CharC->getSExtValue() == 0 ?
- Str.size() : Str.rfind(CharC->getSExtValue());
- if (I == StringRef::npos) // Didn't find the char. Return null.
- return Constant::getNullValue(CI->getType());
-
- // strrchr(s+n,c) -> gep(s+n+i,c)
- return B.CreateGEP(SrcStr, B.getInt64(I), "strrchr");
- }
-};
-
-//===---------------------------------------===//
-// 'strcmp' Optimizations
-
-struct StrCmpOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // Verify the "strcmp" function prototype.
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 2 ||
- !FT->getReturnType()->isIntegerTy(32) ||
- FT->getParamType(0) != FT->getParamType(1) ||
- FT->getParamType(0) != B.getInt8PtrTy())
- return 0;
-
- Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
- if (Str1P == Str2P) // strcmp(x,x) -> 0
- return ConstantInt::get(CI->getType(), 0);
-
- StringRef Str1, Str2;
- bool HasStr1 = getConstantStringInfo(Str1P, Str1);
- bool HasStr2 = getConstantStringInfo(Str2P, Str2);
-
- // strcmp(x, y) -> cnst (if both x and y are constant strings)
- if (HasStr1 && HasStr2)
- return ConstantInt::get(CI->getType(), Str1.compare(Str2));
-
- if (HasStr1 && Str1.empty()) // strcmp("", x) -> -*x
- return B.CreateNeg(B.CreateZExt(B.CreateLoad(Str2P, "strcmpload"),
- CI->getType()));
-
- if (HasStr2 && Str2.empty()) // strcmp(x,"") -> *x
- return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType());
-
- // strcmp(P, "x") -> memcmp(P, "x", 2)
- uint64_t Len1 = GetStringLength(Str1P);
- uint64_t Len2 = GetStringLength(Str2P);
- if (Len1 && Len2) {
- // These optimizations require TargetData.
- if (!TD) return 0;
-
- return EmitMemCmp(Str1P, Str2P,
- ConstantInt::get(TD->getIntPtrType(*Context),
- std::min(Len1, Len2)), B, TD, TLI);
- }
-
- return 0;
- }
-};
-
-//===---------------------------------------===//
-// 'strncmp' Optimizations
-
-struct StrNCmpOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // Verify the "strncmp" function prototype.
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 3 ||
- !FT->getReturnType()->isIntegerTy(32) ||
- FT->getParamType(0) != FT->getParamType(1) ||
- FT->getParamType(0) != B.getInt8PtrTy() ||
- !FT->getParamType(2)->isIntegerTy())
- return 0;
-
- Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
- if (Str1P == Str2P) // strncmp(x,x,n) -> 0
- return ConstantInt::get(CI->getType(), 0);
-
- // Get the length argument if it is constant.
- uint64_t Length;
- if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
- Length = LengthArg->getZExtValue();
- else
- return 0;
-
- if (Length == 0) // strncmp(x,y,0) -> 0
- return ConstantInt::get(CI->getType(), 0);
-
- if (TD && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
- return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, TD, TLI);
-
- StringRef Str1, Str2;
- bool HasStr1 = getConstantStringInfo(Str1P, Str1);
- bool HasStr2 = getConstantStringInfo(Str2P, Str2);
-
- // strncmp(x, y) -> cnst (if both x and y are constant strings)
- if (HasStr1 && HasStr2) {
- StringRef SubStr1 = Str1.substr(0, Length);
- StringRef SubStr2 = Str2.substr(0, Length);
- return ConstantInt::get(CI->getType(), SubStr1.compare(SubStr2));
- }
-
- if (HasStr1 && Str1.empty()) // strncmp("", x, n) -> -*x
- return B.CreateNeg(B.CreateZExt(B.CreateLoad(Str2P, "strcmpload"),
- CI->getType()));
-
- if (HasStr2 && Str2.empty()) // strncmp(x, "", n) -> *x
- return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType());
-
- return 0;
- }
-};
-
-
-//===---------------------------------------===//
-// 'strcpy' Optimizations
-
-struct StrCpyOpt : public LibCallOptimization {
- bool OptChkCall; // True if it's optimizing a __strcpy_chk libcall.
-
- StrCpyOpt(bool c) : OptChkCall(c) {}
-
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // Verify the "strcpy" function prototype.
- unsigned NumParams = OptChkCall ? 3 : 2;
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != NumParams ||
- FT->getReturnType() != FT->getParamType(0) ||
- FT->getParamType(0) != FT->getParamType(1) ||
- FT->getParamType(0) != B.getInt8PtrTy())
- return 0;
-
- Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
- if (Dst == Src) // strcpy(x,x) -> x
- return Src;
-
- // These optimizations require TargetData.
- if (!TD) return 0;
-
- // See if we can get the length of the input string.
- uint64_t Len = GetStringLength(Src);
- if (Len == 0) return 0;
-
- // We have enough information to now generate the memcpy call to do the
- // concatenation for us. Make a memcpy to copy the nul byte with align = 1.
- if (!OptChkCall ||
- !EmitMemCpyChk(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len),
- CI->getArgOperand(2), B, TD, TLI))
- B.CreateMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len), 1);
- return Dst;
- }
-};
-
-//===---------------------------------------===//
-// 'stpcpy' Optimizations
-
-struct StpCpyOpt: public LibCallOptimization {
- bool OptChkCall; // True if it's optimizing a __stpcpy_chk libcall.
-
- StpCpyOpt(bool c) : OptChkCall(c) {}
-
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // Verify the "stpcpy" function prototype.
- unsigned NumParams = OptChkCall ? 3 : 2;
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != NumParams ||
- FT->getReturnType() != FT->getParamType(0) ||
- FT->getParamType(0) != FT->getParamType(1) ||
- FT->getParamType(0) != B.getInt8PtrTy())
- return 0;
-
- // These optimizations require TargetData.
- if (!TD) return 0;
-
- Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
- if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)
- Value *StrLen = EmitStrLen(Src, B, TD, TLI);
- return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0;
- }
-
- // See if we can get the length of the input string.
- uint64_t Len = GetStringLength(Src);
- if (Len == 0) return 0;
-
- Value *LenV = ConstantInt::get(TD->getIntPtrType(*Context), Len);
- Value *DstEnd = B.CreateGEP(Dst,
- ConstantInt::get(TD->getIntPtrType(*Context),
- Len - 1));
-
- // We have enough information to now generate the memcpy call to do the
- // copy for us. Make a memcpy to copy the nul byte with align = 1.
- if (!OptChkCall || !EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B,
- TD, TLI))
- B.CreateMemCpy(Dst, Src, LenV, 1);
- return DstEnd;
- }
-};
-
-//===---------------------------------------===//
-// 'strncpy' Optimizations
-
-struct StrNCpyOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
- FT->getParamType(0) != FT->getParamType(1) ||
- FT->getParamType(0) != B.getInt8PtrTy() ||
- !FT->getParamType(2)->isIntegerTy())
- return 0;
-
- Value *Dst = CI->getArgOperand(0);
- Value *Src = CI->getArgOperand(1);
- Value *LenOp = CI->getArgOperand(2);
-
- // See if we can get the length of the input string.
- uint64_t SrcLen = GetStringLength(Src);
- if (SrcLen == 0) return 0;
- --SrcLen;
-
- if (SrcLen == 0) {
- // strncpy(x, "", y) -> memset(x, '\0', y, 1)
- B.CreateMemSet(Dst, B.getInt8('\0'), LenOp, 1);
- return Dst;
- }
-
- uint64_t Len;
- if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(LenOp))
- Len = LengthArg->getZExtValue();
- else
- return 0;
-
- if (Len == 0) return Dst; // strncpy(x, y, 0) -> x
-
- // These optimizations require TargetData.
- if (!TD) return 0;
-
- // Let strncpy handle the zero padding
- if (Len > SrcLen+1) return 0;
-
- // strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant]
- B.CreateMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len), 1);
-
- return Dst;
- }
-};
-
-//===---------------------------------------===//
-// 'strlen' Optimizations
-
-struct StrLenOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 1 ||
- FT->getParamType(0) != B.getInt8PtrTy() ||
- !FT->getReturnType()->isIntegerTy())
- return 0;
-
- Value *Src = CI->getArgOperand(0);
-
- // Constant folding: strlen("xyz") -> 3
- if (uint64_t Len = GetStringLength(Src))
- return ConstantInt::get(CI->getType(), Len-1);
-
- // strlen(x) != 0 --> *x != 0
- // strlen(x) == 0 --> *x == 0
- if (IsOnlyUsedInZeroEqualityComparison(CI))
- return B.CreateZExt(B.CreateLoad(Src, "strlenfirst"), CI->getType());
- return 0;
- }
-};
-
-
-//===---------------------------------------===//
-// 'strpbrk' Optimizations
-
-struct StrPBrkOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 2 ||
- FT->getParamType(0) != B.getInt8PtrTy() ||
- FT->getParamType(1) != FT->getParamType(0) ||
- FT->getReturnType() != FT->getParamType(0))
- return 0;
-
- StringRef S1, S2;
- bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
- bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
-
- // strpbrk(s, "") -> NULL
- // strpbrk("", s) -> NULL
- if ((HasS1 && S1.empty()) || (HasS2 && S2.empty()))
- return Constant::getNullValue(CI->getType());
-
- // Constant folding.
- if (HasS1 && HasS2) {
- size_t I = S1.find_first_of(S2);
- if (I == std::string::npos) // No match.
- return Constant::getNullValue(CI->getType());
-
- return B.CreateGEP(CI->getArgOperand(0), B.getInt64(I), "strpbrk");
- }
-
- // strpbrk(s, "a") -> strchr(s, 'a')
- if (TD && HasS2 && S2.size() == 1)
- return EmitStrChr(CI->getArgOperand(0), S2[0], B, TD, TLI);
-
- return 0;
- }
-};
-
-//===---------------------------------------===//
-// 'strto*' Optimizations. This handles strtol, strtod, strtof, strtoul, etc.
-
-struct StrToOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- FunctionType *FT = Callee->getFunctionType();
- if ((FT->getNumParams() != 2 && FT->getNumParams() != 3) ||
- !FT->getParamType(0)->isPointerTy() ||
- !FT->getParamType(1)->isPointerTy())
- return 0;
-
- Value *EndPtr = CI->getArgOperand(1);
- if (isa<ConstantPointerNull>(EndPtr)) {
- // With a null EndPtr, this function won't capture the main argument.
- // It would be readonly too, except that it still may write to errno.
- CI->addAttribute(1, Attribute::NoCapture);
- }
-
- return 0;
- }
-};
-
-//===---------------------------------------===//
-// 'strspn' Optimizations
-
-struct StrSpnOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 2 ||
- FT->getParamType(0) != B.getInt8PtrTy() ||
- FT->getParamType(1) != FT->getParamType(0) ||
- !FT->getReturnType()->isIntegerTy())
- return 0;
-
- StringRef S1, S2;
- bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
- bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
-
- // strspn(s, "") -> 0
- // strspn("", s) -> 0
- if ((HasS1 && S1.empty()) || (HasS2 && S2.empty()))
- return Constant::getNullValue(CI->getType());
-
- // Constant folding.
- if (HasS1 && HasS2) {
- size_t Pos = S1.find_first_not_of(S2);
- if (Pos == StringRef::npos) Pos = S1.size();
- return ConstantInt::get(CI->getType(), Pos);
- }
-
- return 0;
- }
-};
-
-//===---------------------------------------===//
-// 'strcspn' Optimizations
-
-struct StrCSpnOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 2 ||
- FT->getParamType(0) != B.getInt8PtrTy() ||
- FT->getParamType(1) != FT->getParamType(0) ||
- !FT->getReturnType()->isIntegerTy())
- return 0;
-
- StringRef S1, S2;
- bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
- bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
-
- // strcspn("", s) -> 0
- if (HasS1 && S1.empty())
- return Constant::getNullValue(CI->getType());
-
- // Constant folding.
- if (HasS1 && HasS2) {
- size_t Pos = S1.find_first_of(S2);
- if (Pos == StringRef::npos) Pos = S1.size();
- return ConstantInt::get(CI->getType(), Pos);
- }
-
- // strcspn(s, "") -> strlen(s)
- if (TD && HasS2 && S2.empty())
- return EmitStrLen(CI->getArgOperand(0), B, TD, TLI);
-
- return 0;
- }
-};
-
-//===---------------------------------------===//
-// 'strstr' Optimizations
+// Double -> Float Shrinking Optimizations for Unary Functions like 'floor'
-struct StrStrOpt : public LibCallOptimization {
+struct UnaryDoubleFPOpt : public LibCallOptimization {
+ bool CheckRetType;
+ UnaryDoubleFPOpt(bool CheckReturnType): CheckRetType(CheckReturnType) {}
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 2 ||
- !FT->getParamType(0)->isPointerTy() ||
- !FT->getParamType(1)->isPointerTy() ||
- !FT->getReturnType()->isPointerTy())
+ if (FT->getNumParams() != 1 || !FT->getReturnType()->isDoubleTy() ||
+ !FT->getParamType(0)->isDoubleTy())
return 0;
- // fold strstr(x, x) -> x.
- if (CI->getArgOperand(0) == CI->getArgOperand(1))
- return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
-
- // fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0
- if (TD && IsOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
- Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, TD, TLI);
- if (!StrLen)
- return 0;
- Value *StrNCmp = EmitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1),
- StrLen, B, TD, TLI);
- if (!StrNCmp)
- return 0;
- for (Value::use_iterator UI = CI->use_begin(), UE = CI->use_end();
- UI != UE; ) {
- ICmpInst *Old = cast<ICmpInst>(*UI++);
- Value *Cmp = B.CreateICmp(Old->getPredicate(), StrNCmp,
- ConstantInt::getNullValue(StrNCmp->getType()),
- "cmp");
- Old->replaceAllUsesWith(Cmp);
- Old->eraseFromParent();
+ if (CheckRetType) {
+ // Check if all the uses for function like 'sin' are converted to float.
+ for (Value::use_iterator UseI = CI->use_begin(); UseI != CI->use_end();
+ ++UseI) {
+ FPTruncInst *Cast = dyn_cast<FPTruncInst>(*UseI);
+ if (Cast == 0 || !Cast->getType()->isFloatTy())
+ return 0;
}
- return CI;
- }
-
- // See if either input string is a constant string.
- StringRef SearchStr, ToFindStr;
- bool HasStr1 = getConstantStringInfo(CI->getArgOperand(0), SearchStr);
- bool HasStr2 = getConstantStringInfo(CI->getArgOperand(1), ToFindStr);
-
- // fold strstr(x, "") -> x.
- if (HasStr2 && ToFindStr.empty())
- return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
-
- // If both strings are known, constant fold it.
- if (HasStr1 && HasStr2) {
- std::string::size_type Offset = SearchStr.find(ToFindStr);
-
- if (Offset == StringRef::npos) // strstr("foo", "bar") -> null
- return Constant::getNullValue(CI->getType());
-
- // strstr("abcd", "bc") -> gep((char*)"abcd", 1)
- Value *Result = CastToCStr(CI->getArgOperand(0), B);
- Result = B.CreateConstInBoundsGEP1_64(Result, Offset, "strstr");
- return B.CreateBitCast(Result, CI->getType());
- }
-
- // fold strstr(x, "y") -> strchr(x, 'y').
- if (HasStr2 && ToFindStr.size() == 1) {
- Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B, TD, TLI);
- return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : 0;
}
- return 0;
- }
-};
-
-
-//===---------------------------------------===//
-// 'memcmp' Optimizations
-
-struct MemCmpOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 3 || !FT->getParamType(0)->isPointerTy() ||
- !FT->getParamType(1)->isPointerTy() ||
- !FT->getReturnType()->isIntegerTy(32))
- return 0;
- Value *LHS = CI->getArgOperand(0), *RHS = CI->getArgOperand(1);
-
- if (LHS == RHS) // memcmp(s,s,x) -> 0
- return Constant::getNullValue(CI->getType());
-
- // Make sure we have a constant length.
- ConstantInt *LenC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
- if (!LenC) return 0;
- uint64_t Len = LenC->getZExtValue();
-
- if (Len == 0) // memcmp(s1,s2,0) -> 0
- return Constant::getNullValue(CI->getType());
-
- // memcmp(S1,S2,1) -> *(unsigned char*)LHS - *(unsigned char*)RHS
- if (Len == 1) {
- Value *LHSV = B.CreateZExt(B.CreateLoad(CastToCStr(LHS, B), "lhsc"),
- CI->getType(), "lhsv");
- Value *RHSV = B.CreateZExt(B.CreateLoad(CastToCStr(RHS, B), "rhsc"),
- CI->getType(), "rhsv");
- return B.CreateSub(LHSV, RHSV, "chardiff");
- }
-
- // Constant folding: memcmp(x, y, l) -> cnst (all arguments are constant)
- StringRef LHSStr, RHSStr;
- if (getConstantStringInfo(LHS, LHSStr) &&
- getConstantStringInfo(RHS, RHSStr)) {
- // Make sure we're not reading out-of-bounds memory.
- if (Len > LHSStr.size() || Len > RHSStr.size())
- return 0;
- uint64_t Ret = memcmp(LHSStr.data(), RHSStr.data(), Len);
- return ConstantInt::get(CI->getType(), Ret);
- }
-
- return 0;
- }
-};
-
-//===---------------------------------------===//
-// 'memcpy' Optimizations
-
-struct MemCpyOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // These optimizations require TargetData.
- if (!TD) return 0;
-
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
- !FT->getParamType(0)->isPointerTy() ||
- !FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(*Context))
- return 0;
-
- // memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
- B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), 1);
- return CI->getArgOperand(0);
- }
-};
-
-//===---------------------------------------===//
-// 'memmove' Optimizations
-
-struct MemMoveOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // These optimizations require TargetData.
- if (!TD) return 0;
-
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
- !FT->getParamType(0)->isPointerTy() ||
- !FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(*Context))
- return 0;
-
- // memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
- B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), 1);
- return CI->getArgOperand(0);
- }
-};
-
-//===---------------------------------------===//
-// 'memset' Optimizations
-
-struct MemSetOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // These optimizations require TargetData.
- if (!TD) return 0;
-
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
- !FT->getParamType(0)->isPointerTy() ||
- !FT->getParamType(1)->isIntegerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(*Context))
+ // If this is something like 'floor((double)floatval)', convert to floorf.
+ FPExtInst *Cast = dyn_cast<FPExtInst>(CI->getArgOperand(0));
+ if (Cast == 0 || !Cast->getOperand(0)->getType()->isFloatTy())
return 0;
- // memset(p, v, n) -> llvm.memset(p, v, n, 1)
- Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false);
- B.CreateMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), 1);
- return CI->getArgOperand(0);
+ // floor((double)floatval) -> (double)floorf(floatval)
+ Value *V = Cast->getOperand(0);
+ V = EmitUnaryFloatFnCall(V, Callee->getName(), B, Callee->getAttributes());
+ return B.CreateFPExt(V, B.getDoubleTy());
}
};
-//===----------------------------------------------------------------------===//
-// Math Library Optimizations
-//===----------------------------------------------------------------------===//
-
//===---------------------------------------===//
// 'cos*' Optimizations
-
struct CosOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *Ret = NULL;
+ if (UnsafeFPShrink && Callee->getName() == "cos" &&
+ TLI->has(LibFunc::cosf)) {
+ UnaryDoubleFPOpt UnsafeUnaryDoubleFP(true);
+ Ret = UnsafeUnaryDoubleFP.CallOptimizer(Callee, CI, B);
+ }
+
FunctionType *FT = Callee->getFunctionType();
// Just make sure this has 1 argument of FP type, which matches the
// result type.
if (FT->getNumParams() != 1 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isFloatingPointTy())
- return 0;
+ return Ret;
// cos(-x) -> cos(x)
Value *Op1 = CI->getArgOperand(0);
@@ -910,7 +162,7 @@ struct CosOpt : public LibCallOptimization {
BinaryOperator *BinExpr = cast<BinaryOperator>(Op1);
return B.CreateCall(Callee, BinExpr->getOperand(1), "cos");
}
- return 0;
+ return Ret;
}
};
@@ -919,13 +171,20 @@ struct CosOpt : public LibCallOptimization {
struct PowOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *Ret = NULL;
+ if (UnsafeFPShrink && Callee->getName() == "pow" &&
+ TLI->has(LibFunc::powf)) {
+ UnaryDoubleFPOpt UnsafeUnaryDoubleFP(true);
+ Ret = UnsafeUnaryDoubleFP.CallOptimizer(Callee, CI, B);
+ }
+
FunctionType *FT = Callee->getFunctionType();
// Just make sure this has 2 arguments of the same FP type, which match the
// result type.
if (FT->getNumParams() != 2 || FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
!FT->getParamType(0)->isFloatingPointTy())
- return 0;
+ return Ret;
Value *Op1 = CI->getArgOperand(0), *Op2 = CI->getArgOperand(1);
if (ConstantFP *Op1C = dyn_cast<ConstantFP>(Op1)) {
@@ -936,7 +195,7 @@ struct PowOpt : public LibCallOptimization {
}
ConstantFP *Op2C = dyn_cast<ConstantFP>(Op2);
- if (Op2C == 0) return 0;
+ if (Op2C == 0) return Ret;
if (Op2C->getValueAPF().isZero()) // pow(x, 0.0) -> 1.0
return ConstantFP::get(CI->getType(), 1.0);
@@ -974,12 +233,19 @@ struct PowOpt : public LibCallOptimization {
struct Exp2Opt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *Ret = NULL;
+ if (UnsafeFPShrink && Callee->getName() == "exp2" &&
+ TLI->has(LibFunc::exp2)) {
+ UnaryDoubleFPOpt UnsafeUnaryDoubleFP(true);
+ Ret = UnsafeUnaryDoubleFP.CallOptimizer(Callee, CI, B);
+ }
+
FunctionType *FT = Callee->getFunctionType();
// Just make sure this has 1 argument of FP type, which matches the
// result type.
if (FT->getNumParams() != 1 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isFloatingPointTy())
- return 0;
+ return Ret;
Value *Op = CI->getArgOperand(0);
// Turn exp2(sitofp(x)) -> ldexp(1.0, sext(x)) if sizeof(x) <= 32
@@ -1016,29 +282,7 @@ struct Exp2Opt : public LibCallOptimization {
return CI;
}
- return 0;
- }
-};
-
-//===---------------------------------------===//
-// Double -> Float Shrinking Optimizations for Unary Functions like 'floor'
-
-struct UnaryDoubleFPOpt : public LibCallOptimization {
- virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 1 || !FT->getReturnType()->isDoubleTy() ||
- !FT->getParamType(0)->isDoubleTy())
- return 0;
-
- // If this is something like 'floor((double)floatval)', convert to floorf.
- FPExtInst *Cast = dyn_cast<FPExtInst>(CI->getArgOperand(0));
- if (Cast == 0 || !Cast->getOperand(0)->getType()->isFloatTy())
- return 0;
-
- // floor((double)floatval) -> (double)floorf(floatval)
- Value *V = Cast->getOperand(0);
- V = EmitUnaryFloatFnCall(V, Callee->getName(), B, Callee->getAttributes());
- return B.CreateFPExt(V, B.getDoubleTy());
+ return Ret;
}
};
@@ -1063,8 +307,8 @@ struct FFSOpt : public LibCallOptimization {
// Constant fold.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
- if (CI->getValue() == 0) // ffs(0) -> 0.
- return Constant::getNullValue(CI->getType());
+ if (CI->isZero()) // ffs(0) -> 0.
+ return B.getInt32(0);
// ffs(c) -> cttz(c)+1
return B.getInt32(CI->getValue().countTrailingZeros() + 1);
}
@@ -1267,7 +511,7 @@ struct SPrintFOpt : public LibCallOptimization {
if (FormatStr[i] == '%')
return 0; // we found a format specifier, bail out.
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
@@ -1297,7 +541,7 @@ struct SPrintFOpt : public LibCallOptimization {
}
if (FormatStr[1] == 's') {
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
// sprintf(dest, "%s", str) -> llvm.memcpy(dest, str, strlen(str)+1, 1)
@@ -1385,7 +629,7 @@ struct FWriteOpt : public LibCallOptimization {
struct FPutsOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
// Require two pointers. Also, we can't optimize if return value is used.
@@ -1422,7 +666,7 @@ struct FPrintFOpt : public LibCallOptimization {
if (FormatStr[i] == '%') // Could handle %% -> % if we cared.
return 0; // We found a format specifier.
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
Value *NewCI = EmitFWrite(CI->getArgOperand(1),
@@ -1524,17 +768,9 @@ namespace {
TargetLibraryInfo *TLI;
StringMap<LibCallOptimization*> Optimizations;
- // String and Memory LibCall Optimizations
- StrCatOpt StrCat; StrNCatOpt StrNCat; StrChrOpt StrChr; StrRChrOpt StrRChr;
- StrCmpOpt StrCmp; StrNCmpOpt StrNCmp;
- StrCpyOpt StrCpy; StrCpyOpt StrCpyChk;
- StpCpyOpt StpCpy; StpCpyOpt StpCpyChk;
- StrNCpyOpt StrNCpy;
- StrLenOpt StrLen; StrPBrkOpt StrPBrk;
- StrToOpt StrTo; StrSpnOpt StrSpn; StrCSpnOpt StrCSpn; StrStrOpt StrStr;
- MemCmpOpt MemCmp; MemCpyOpt MemCpy; MemMoveOpt MemMove; MemSetOpt MemSet;
// Math Library Optimizations
- CosOpt Cos; PowOpt Pow; Exp2Opt Exp2; UnaryDoubleFPOpt UnaryDoubleFP;
+ CosOpt Cos; PowOpt Pow; Exp2Opt Exp2;
+ UnaryDoubleFPOpt UnaryDoubleFP, UnsafeUnaryDoubleFP;
// Integer Optimizations
FFSOpt FFS; AbsOpt Abs; IsDigitOpt IsDigit; IsAsciiOpt IsAscii;
ToAsciiOpt ToAscii;
@@ -1546,11 +782,13 @@ namespace {
bool Modified; // This is only used by doInitialization.
public:
static char ID; // Pass identification
- SimplifyLibCalls() : FunctionPass(ID), StrCpy(false), StrCpyChk(true),
- StpCpy(false), StpCpyChk(true) {
+ SimplifyLibCalls() : FunctionPass(ID), UnaryDoubleFP(false),
+ UnsafeUnaryDoubleFP(true) {
initializeSimplifyLibCallsPass(*PassRegistry::getPassRegistry());
}
void AddOpt(LibFunc::Func F, LibCallOptimization* Opt);
+ void AddOpt(LibFunc::Func F1, LibFunc::Func F2, LibCallOptimization* Opt);
+
void InitOptimizations();
bool runOnFunction(Function &F);
@@ -1586,40 +824,15 @@ void SimplifyLibCalls::AddOpt(LibFunc::Func F, LibCallOptimization* Opt) {
Optimizations[TLI->getName(F)] = Opt;
}
+void SimplifyLibCalls::AddOpt(LibFunc::Func F1, LibFunc::Func F2,
+ LibCallOptimization* Opt) {
+ if (TLI->has(F1) && TLI->has(F2))
+ Optimizations[TLI->getName(F1)] = Opt;
+}
+
/// Optimizations - Populate the Optimizations map with all the optimizations
/// we know.
void SimplifyLibCalls::InitOptimizations() {
- // String and Memory LibCall Optimizations
- Optimizations["strcat"] = &StrCat;
- Optimizations["strncat"] = &StrNCat;
- Optimizations["strchr"] = &StrChr;
- Optimizations["strrchr"] = &StrRChr;
- Optimizations["strcmp"] = &StrCmp;
- Optimizations["strncmp"] = &StrNCmp;
- Optimizations["strcpy"] = &StrCpy;
- Optimizations["strncpy"] = &StrNCpy;
- Optimizations["stpcpy"] = &StpCpy;
- Optimizations["strlen"] = &StrLen;
- Optimizations["strpbrk"] = &StrPBrk;
- Optimizations["strtol"] = &StrTo;
- Optimizations["strtod"] = &StrTo;
- Optimizations["strtof"] = &StrTo;
- Optimizations["strtoul"] = &StrTo;
- Optimizations["strtoll"] = &StrTo;
- Optimizations["strtold"] = &StrTo;
- Optimizations["strtoull"] = &StrTo;
- Optimizations["strspn"] = &StrSpn;
- Optimizations["strcspn"] = &StrCSpn;
- Optimizations["strstr"] = &StrStr;
- Optimizations["memcmp"] = &MemCmp;
- AddOpt(LibFunc::memcpy, &MemCpy);
- Optimizations["memmove"] = &MemMove;
- AddOpt(LibFunc::memset, &MemSet);
-
- // _chk variants of String and Memory LibCall Optimizations.
- Optimizations["__strcpy_chk"] = &StrCpyChk;
- Optimizations["__stpcpy_chk"] = &StpCpyChk;
-
// Math Library Optimizations
Optimizations["cosf"] = &Cos;
Optimizations["cos"] = &Cos;
@@ -1641,16 +854,37 @@ void SimplifyLibCalls::InitOptimizations() {
Optimizations["llvm.exp2.f64"] = &Exp2;
Optimizations["llvm.exp2.f32"] = &Exp2;
- if (TLI->has(LibFunc::floor) && TLI->has(LibFunc::floorf))
- Optimizations["floor"] = &UnaryDoubleFP;
- if (TLI->has(LibFunc::ceil) && TLI->has(LibFunc::ceilf))
- Optimizations["ceil"] = &UnaryDoubleFP;
- if (TLI->has(LibFunc::round) && TLI->has(LibFunc::roundf))
- Optimizations["round"] = &UnaryDoubleFP;
- if (TLI->has(LibFunc::rint) && TLI->has(LibFunc::rintf))
- Optimizations["rint"] = &UnaryDoubleFP;
- if (TLI->has(LibFunc::nearbyint) && TLI->has(LibFunc::nearbyintf))
- Optimizations["nearbyint"] = &UnaryDoubleFP;
+ AddOpt(LibFunc::ceil, LibFunc::ceilf, &UnaryDoubleFP);
+ AddOpt(LibFunc::fabs, LibFunc::fabsf, &UnaryDoubleFP);
+ AddOpt(LibFunc::floor, LibFunc::floorf, &UnaryDoubleFP);
+ AddOpt(LibFunc::rint, LibFunc::rintf, &UnaryDoubleFP);
+ AddOpt(LibFunc::round, LibFunc::roundf, &UnaryDoubleFP);
+ AddOpt(LibFunc::nearbyint, LibFunc::nearbyintf, &UnaryDoubleFP);
+ AddOpt(LibFunc::trunc, LibFunc::truncf, &UnaryDoubleFP);
+
+ if(UnsafeFPShrink) {
+ AddOpt(LibFunc::acos, LibFunc::acosf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::acosh, LibFunc::acoshf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::asin, LibFunc::asinf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::asinh, LibFunc::asinhf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::atan, LibFunc::atanf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::atanh, LibFunc::atanhf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::cbrt, LibFunc::cbrtf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::cosh, LibFunc::coshf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::exp, LibFunc::expf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::exp10, LibFunc::exp10f, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::expm1, LibFunc::expm1f, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::log, LibFunc::logf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::log10, LibFunc::log10f, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::log1p, LibFunc::log1pf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::log2, LibFunc::log2f, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::logb, LibFunc::logbf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::sin, LibFunc::sinf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::sinh, LibFunc::sinhf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::sqrt, LibFunc::sqrtf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::tan, LibFunc::tanf, &UnsafeUnaryDoubleFP);
+ AddOpt(LibFunc::tanh, LibFunc::tanhf, &UnsafeUnaryDoubleFP);
+ }
// Integer Optimizations
Optimizations["ffs"] = &FFS;
@@ -1681,7 +915,7 @@ bool SimplifyLibCalls::runOnFunction(Function &F) {
if (Optimizations.empty())
InitOptimizations();
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
IRBuilder<> Builder(F.getContext());
diff --git a/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp b/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
index d831452..6815e41 100644
--- a/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
@@ -16,7 +16,7 @@
#include "llvm/GlobalValue.h"
#include "llvm/Instruction.h"
#include "llvm/Assembly/Writer.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/PatternMatch.h"
@@ -55,10 +55,12 @@ void ExtAddrMode::print(raw_ostream &OS) const {
OS << ']';
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void ExtAddrMode::dump() const {
print(dbgs());
dbgs() << '\n';
}
+#endif
/// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode.
@@ -219,7 +221,7 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
unsigned VariableScale = 0;
int64_t ConstantOffset = 0;
- const TargetData *TD = TLI.getTargetData();
+ const DataLayout *TD = TLI.getDataLayout();
gep_type_iterator GTI = gep_type_begin(AddrInst);
for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
diff --git a/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
index 2679b93..9fea113 100644
--- a/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -22,7 +22,7 @@
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Support/ErrorHandling.h"
@@ -94,7 +94,7 @@ void llvm::FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P) {
/// is dead. Also recursively delete any operands that become dead as
/// a result. This includes tracing the def-use list from the PHI to see if
/// it is ultimately unused or if it reaches an unused cycle.
-bool llvm::DeleteDeadPHIs(BasicBlock *BB) {
+bool llvm::DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI) {
// Recursively deleting a PHI may cause multiple PHIs to be deleted
// or RAUW'd undef, so use an array of WeakVH for the PHIs to delete.
SmallVector<WeakVH, 8> PHIs;
@@ -105,7 +105,7 @@ bool llvm::DeleteDeadPHIs(BasicBlock *BB) {
bool Changed = false;
for (unsigned i = 0, e = PHIs.size(); i != e; ++i)
if (PHINode *PN = dyn_cast_or_null<PHINode>(PHIs[i].operator Value*()))
- Changed |= RecursivelyDeleteDeadPHINode(PN);
+ Changed |= RecursivelyDeleteDeadPHINode(PN, TLI);
return Changed;
}
@@ -687,3 +687,42 @@ ReturnInst *llvm::FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
return cast<ReturnInst>(NewRet);
}
+/// SplitBlockAndInsertIfThen - Split the containing block at the
+/// specified instruction - everything before and including Cmp stays
+/// in the old basic block, and everything after Cmp is moved to a
+/// new block. The two blocks are connected by a conditional branch
+/// (with value of Cmp being the condition).
+/// Before:
+/// Head
+/// Cmp
+/// Tail
+/// After:
+/// Head
+/// Cmp
+/// if (Cmp)
+/// ThenBlock
+/// Tail
+///
+/// If Unreachable is true, then ThenBlock ends with
+/// UnreachableInst, otherwise it branches to Tail.
+/// Returns the NewBasicBlock's terminator.
+
+TerminatorInst *llvm::SplitBlockAndInsertIfThen(Instruction *Cmp,
+ bool Unreachable, MDNode *BranchWeights) {
+ Instruction *SplitBefore = Cmp->getNextNode();
+ BasicBlock *Head = SplitBefore->getParent();
+ BasicBlock *Tail = Head->splitBasicBlock(SplitBefore);
+ TerminatorInst *HeadOldTerm = Head->getTerminator();
+ LLVMContext &C = Head->getContext();
+ BasicBlock *ThenBlock = BasicBlock::Create(C, "", Head->getParent(), Tail);
+ TerminatorInst *CheckTerm;
+ if (Unreachable)
+ CheckTerm = new UnreachableInst(C, ThenBlock);
+ else
+ CheckTerm = BranchInst::Create(Tail, ThenBlock);
+ BranchInst *HeadNewTerm =
+ BranchInst::Create(/*ifTrue*/ThenBlock, /*ifFalse*/Tail, Cmp);
+ HeadNewTerm->setMetadata(LLVMContext::MD_prof, BranchWeights);
+ ReplaceInstWithInst(HeadOldTerm, HeadNewTerm);
+ return CheckTerm;
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp b/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
index e13fd71..74b2ee1 100644
--- a/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -22,7 +22,7 @@
#include "llvm/Module.h"
#include "llvm/Type.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
using namespace llvm;
@@ -34,19 +34,22 @@ Value *llvm::CastToCStr(Value *V, IRBuilder<> &B) {
/// EmitStrLen - Emit a call to the strlen function to the builder, for the
/// specified pointer. This always returns an integer value of size intptr_t.
-Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD,
+Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::strlen))
return 0;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[2];
- AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
- AWI[1] = AttributeWithIndex::get(~0u, Attribute::ReadOnly |
- Attribute::NoUnwind);
+ AWI[0] = AttributeWithIndex::get(M->getContext(), 1, Attributes::NoCapture);
+ Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind };
+ AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
+ ArrayRef<Attributes::AttrVal>(AVs, 2));
LLVMContext &Context = B.GetInsertBlock()->getContext();
- Constant *StrLen = M->getOrInsertFunction("strlen", AttrListPtr::get(AWI),
+ Constant *StrLen = M->getOrInsertFunction("strlen",
+ AttrListPtr::get(M->getContext(),
+ AWI),
TD->getIntPtrType(Context),
B.getInt8PtrTy(),
NULL);
@@ -61,18 +64,21 @@ Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD,
/// specified pointer. Ptr is required to be some pointer type, MaxLen must
/// be of size_t type, and the return value has 'intptr_t' type.
Value *llvm::EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI) {
+ const DataLayout *TD, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::strnlen))
return 0;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[2];
- AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
- AWI[1] = AttributeWithIndex::get(~0u, Attribute::ReadOnly |
- Attribute::NoUnwind);
+ AWI[0] = AttributeWithIndex::get(M->getContext(), 1, Attributes::NoCapture);
+ Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind };
+ AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
+ ArrayRef<Attributes::AttrVal>(AVs, 2));
LLVMContext &Context = B.GetInsertBlock()->getContext();
- Constant *StrNLen = M->getOrInsertFunction("strnlen", AttrListPtr::get(AWI),
+ Constant *StrNLen = M->getOrInsertFunction("strnlen",
+ AttrListPtr::get(M->getContext(),
+ AWI),
TD->getIntPtrType(Context),
B.getInt8PtrTy(),
TD->getIntPtrType(Context),
@@ -88,17 +94,21 @@ Value *llvm::EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
/// specified pointer and character. Ptr is required to be some pointer type,
/// and the return value has 'i8*' type.
Value *llvm::EmitStrChr(Value *Ptr, char C, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI) {
+ const DataLayout *TD, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::strchr))
return 0;
Module *M = B.GetInsertBlock()->getParent()->getParent();
+ Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind };
AttributeWithIndex AWI =
- AttributeWithIndex::get(~0u, Attribute::ReadOnly | Attribute::NoUnwind);
+ AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
+ ArrayRef<Attributes::AttrVal>(AVs, 2));
Type *I8Ptr = B.getInt8PtrTy();
Type *I32Ty = B.getInt32Ty();
- Constant *StrChr = M->getOrInsertFunction("strchr", AttrListPtr::get(AWI),
+ Constant *StrChr = M->getOrInsertFunction("strchr",
+ AttrListPtr::get(M->getContext(),
+ AWI),
I8Ptr, I8Ptr, I32Ty, NULL);
CallInst *CI = B.CreateCall2(StrChr, CastToCStr(Ptr, B),
ConstantInt::get(I32Ty, C), "strchr");
@@ -109,20 +119,23 @@ Value *llvm::EmitStrChr(Value *Ptr, char C, IRBuilder<> &B,
/// EmitStrNCmp - Emit a call to the strncmp function to the builder.
Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len,
- IRBuilder<> &B, const TargetData *TD,
+ IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::strncmp))
return 0;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[3];
- AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
- AWI[1] = AttributeWithIndex::get(2, Attribute::NoCapture);
- AWI[2] = AttributeWithIndex::get(~0u, Attribute::ReadOnly |
- Attribute::NoUnwind);
+ AWI[0] = AttributeWithIndex::get(M->getContext(), 1, Attributes::NoCapture);
+ AWI[1] = AttributeWithIndex::get(M->getContext(), 2, Attributes::NoCapture);
+ Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind };
+ AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
+ ArrayRef<Attributes::AttrVal>(AVs, 2));
LLVMContext &Context = B.GetInsertBlock()->getContext();
- Value *StrNCmp = M->getOrInsertFunction("strncmp", AttrListPtr::get(AWI),
+ Value *StrNCmp = M->getOrInsertFunction("strncmp",
+ AttrListPtr::get(M->getContext(),
+ AWI),
B.getInt32Ty(),
B.getInt8PtrTy(),
B.getInt8PtrTy(),
@@ -139,17 +152,19 @@ Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len,
/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments.
Value *llvm::EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
StringRef Name) {
if (!TLI->has(LibFunc::strcpy))
return 0;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[2];
- AWI[0] = AttributeWithIndex::get(2, Attribute::NoCapture);
- AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
+ AWI[0] = AttributeWithIndex::get(M->getContext(), 2, Attributes::NoCapture);
+ AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::NoUnwind);
Type *I8Ptr = B.getInt8PtrTy();
- Value *StrCpy = M->getOrInsertFunction(Name, AttrListPtr::get(AWI),
+ Value *StrCpy = M->getOrInsertFunction(Name,
+ AttrListPtr::get(M->getContext(), AWI),
I8Ptr, I8Ptr, I8Ptr, NULL);
CallInst *CI = B.CreateCall2(StrCpy, CastToCStr(Dst, B), CastToCStr(Src, B),
Name);
@@ -161,17 +176,20 @@ Value *llvm::EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
/// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the
/// specified pointer arguments.
Value *llvm::EmitStrNCpy(Value *Dst, Value *Src, Value *Len,
- IRBuilder<> &B, const TargetData *TD,
+ IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI, StringRef Name) {
if (!TLI->has(LibFunc::strncpy))
return 0;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[2];
- AWI[0] = AttributeWithIndex::get(2, Attribute::NoCapture);
- AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
+ AWI[0] = AttributeWithIndex::get(M->getContext(), 2, Attributes::NoCapture);
+ AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::NoUnwind);
Type *I8Ptr = B.getInt8PtrTy();
- Value *StrNCpy = M->getOrInsertFunction(Name, AttrListPtr::get(AWI),
+ Value *StrNCpy = M->getOrInsertFunction(Name,
+ AttrListPtr::get(M->getContext(),
+ AWI),
I8Ptr, I8Ptr, I8Ptr,
Len->getType(), NULL);
CallInst *CI = B.CreateCall3(StrNCpy, CastToCStr(Dst, B), CastToCStr(Src, B),
@@ -185,17 +203,18 @@ Value *llvm::EmitStrNCpy(Value *Dst, Value *Src, Value *Len,
/// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
/// are pointers.
Value *llvm::EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
- IRBuilder<> &B, const TargetData *TD,
+ IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::memcpy_chk))
return 0;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI;
- AWI = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
+ AWI = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::NoUnwind);
LLVMContext &Context = B.GetInsertBlock()->getContext();
Value *MemCpy = M->getOrInsertFunction("__memcpy_chk",
- AttrListPtr::get(AWI),
+ AttrListPtr::get(M->getContext(), AWI),
B.getInt8PtrTy(),
B.getInt8PtrTy(),
B.getInt8PtrTy(),
@@ -212,16 +231,19 @@ Value *llvm::EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
/// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
/// a pointer, Val is an i32 value, and Len is an 'intptr_t' value.
Value *llvm::EmitMemChr(Value *Ptr, Value *Val,
- Value *Len, IRBuilder<> &B, const TargetData *TD,
+ Value *Len, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::memchr))
return 0;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI;
- AWI = AttributeWithIndex::get(~0u, Attribute::ReadOnly | Attribute::NoUnwind);
+ Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind };
+ AWI = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
+ ArrayRef<Attributes::AttrVal>(AVs, 2));
LLVMContext &Context = B.GetInsertBlock()->getContext();
- Value *MemChr = M->getOrInsertFunction("memchr", AttrListPtr::get(AWI),
+ Value *MemChr = M->getOrInsertFunction("memchr",
+ AttrListPtr::get(M->getContext(), AWI),
B.getInt8PtrTy(),
B.getInt8PtrTy(),
B.getInt32Ty(),
@@ -237,20 +259,22 @@ Value *llvm::EmitMemChr(Value *Ptr, Value *Val,
/// EmitMemCmp - Emit a call to the memcmp function.
Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2,
- Value *Len, IRBuilder<> &B, const TargetData *TD,
+ Value *Len, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::memcmp))
return 0;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[3];
- AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
- AWI[1] = AttributeWithIndex::get(2, Attribute::NoCapture);
- AWI[2] = AttributeWithIndex::get(~0u, Attribute::ReadOnly |
- Attribute::NoUnwind);
+ AWI[0] = AttributeWithIndex::get(M->getContext(), 1, Attributes::NoCapture);
+ AWI[1] = AttributeWithIndex::get(M->getContext(), 2, Attributes::NoCapture);
+ Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind };
+ AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
+ ArrayRef<Attributes::AttrVal>(AVs, 2));
LLVMContext &Context = B.GetInsertBlock()->getContext();
- Value *MemCmp = M->getOrInsertFunction("memcmp", AttrListPtr::get(AWI),
+ Value *MemCmp = M->getOrInsertFunction("memcmp",
+ AttrListPtr::get(M->getContext(), AWI),
B.getInt32Ty(),
B.getInt8PtrTy(),
B.getInt8PtrTy(),
@@ -294,7 +318,7 @@ Value *llvm::EmitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B,
/// EmitPutChar - Emit a call to the putchar function. This assumes that Char
/// is an integer.
-Value *llvm::EmitPutChar(Value *Char, IRBuilder<> &B, const TargetData *TD,
+Value *llvm::EmitPutChar(Value *Char, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::putchar))
return 0;
@@ -316,17 +340,19 @@ Value *llvm::EmitPutChar(Value *Char, IRBuilder<> &B, const TargetData *TD,
/// EmitPutS - Emit a call to the puts function. This assumes that Str is
/// some pointer.
-Value *llvm::EmitPutS(Value *Str, IRBuilder<> &B, const TargetData *TD,
+Value *llvm::EmitPutS(Value *Str, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::puts))
return 0;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[2];
- AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
- AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
+ AWI[0] = AttributeWithIndex::get(M->getContext(), 1, Attributes::NoCapture);
+ AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::NoUnwind);
- Value *PutS = M->getOrInsertFunction("puts", AttrListPtr::get(AWI),
+ Value *PutS = M->getOrInsertFunction("puts",
+ AttrListPtr::get(M->getContext(), AWI),
B.getInt32Ty(),
B.getInt8PtrTy(),
NULL);
@@ -339,17 +365,19 @@ Value *llvm::EmitPutS(Value *Str, IRBuilder<> &B, const TargetData *TD,
/// EmitFPutC - Emit a call to the fputc function. This assumes that Char is
/// an integer and File is a pointer to FILE.
Value *llvm::EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI) {
+ const DataLayout *TD, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::fputc))
return 0;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[2];
- AWI[0] = AttributeWithIndex::get(2, Attribute::NoCapture);
- AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
+ AWI[0] = AttributeWithIndex::get(M->getContext(), 2, Attributes::NoCapture);
+ AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::NoUnwind);
Constant *F;
if (File->getType()->isPointerTy())
- F = M->getOrInsertFunction("fputc", AttrListPtr::get(AWI),
+ F = M->getOrInsertFunction("fputc",
+ AttrListPtr::get(M->getContext(), AWI),
B.getInt32Ty(),
B.getInt32Ty(), File->getType(),
NULL);
@@ -370,19 +398,21 @@ Value *llvm::EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
/// EmitFPutS - Emit a call to the puts function. Str is required to be a
/// pointer and File is a pointer to FILE.
Value *llvm::EmitFPutS(Value *Str, Value *File, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI) {
+ const DataLayout *TD, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::fputs))
return 0;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[3];
- AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
- AWI[1] = AttributeWithIndex::get(2, Attribute::NoCapture);
- AWI[2] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
+ AWI[0] = AttributeWithIndex::get(M->getContext(), 1, Attributes::NoCapture);
+ AWI[1] = AttributeWithIndex::get(M->getContext(), 2, Attributes::NoCapture);
+ AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::NoUnwind);
StringRef FPutsName = TLI->getName(LibFunc::fputs);
Constant *F;
if (File->getType()->isPointerTy())
- F = M->getOrInsertFunction(FPutsName, AttrListPtr::get(AWI),
+ F = M->getOrInsertFunction(FPutsName,
+ AttrListPtr::get(M->getContext(), AWI),
B.getInt32Ty(),
B.getInt8PtrTy(),
File->getType(), NULL);
@@ -400,21 +430,23 @@ Value *llvm::EmitFPutS(Value *Str, Value *File, IRBuilder<> &B,
/// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is
/// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
Value *llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File,
- IRBuilder<> &B, const TargetData *TD,
+ IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::fwrite))
return 0;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[3];
- AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
- AWI[1] = AttributeWithIndex::get(4, Attribute::NoCapture);
- AWI[2] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
+ AWI[0] = AttributeWithIndex::get(M->getContext(), 1, Attributes::NoCapture);
+ AWI[1] = AttributeWithIndex::get(M->getContext(), 4, Attributes::NoCapture);
+ AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::NoUnwind);
LLVMContext &Context = B.GetInsertBlock()->getContext();
StringRef FWriteName = TLI->getName(LibFunc::fwrite);
Constant *F;
if (File->getType()->isPointerTy())
- F = M->getOrInsertFunction(FWriteName, AttrListPtr::get(AWI),
+ F = M->getOrInsertFunction(FWriteName,
+ AttrListPtr::get(M->getContext(), AWI),
TD->getIntPtrType(Context),
B.getInt8PtrTy(),
TD->getIntPtrType(Context),
@@ -436,9 +468,9 @@ Value *llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File,
SimplifyFortifiedLibCalls::~SimplifyFortifiedLibCalls() { }
-bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD,
+bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
- // We really need TargetData for later.
+ // We really need DataLayout for later.
if (!TD) return false;
this->CI = CI;
diff --git a/contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp b/contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
new file mode 100644
index 0000000..bee2f7b
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
@@ -0,0 +1,262 @@
+//===-- BypassSlowDivision.cpp - Bypass slow division ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an optimization for div and rem on architectures that
+// execute short instructions significantly faster than longer instructions.
+// For example, on Intel Atom 32-bit divides are slow enough that during
+// runtime it is profitable to check the value of the operands, and if they are
+// positive and less than 256 use an unsigned 8-bit divide.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "bypass-slow-division"
+#include "llvm/Instructions.h"
+#include "llvm/Function.h"
+#include "llvm/IRBuilder.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Transforms/Utils/BypassSlowDivision.h"
+
+using namespace llvm;
+
+namespace {
+ struct DivOpInfo {
+ bool SignedOp;
+ Value *Dividend;
+ Value *Divisor;
+
+ DivOpInfo(bool InSignedOp, Value *InDividend, Value *InDivisor)
+ : SignedOp(InSignedOp), Dividend(InDividend), Divisor(InDivisor) {}
+ };
+
+ struct DivPhiNodes {
+ PHINode *Quotient;
+ PHINode *Remainder;
+
+ DivPhiNodes(PHINode *InQuotient, PHINode *InRemainder)
+ : Quotient(InQuotient), Remainder(InRemainder) {}
+ };
+}
+
+namespace llvm {
+ template<>
+ struct DenseMapInfo<DivOpInfo> {
+ static bool isEqual(const DivOpInfo &Val1, const DivOpInfo &Val2) {
+ return Val1.SignedOp == Val2.SignedOp &&
+ Val1.Dividend == Val2.Dividend &&
+ Val1.Divisor == Val2.Divisor;
+ }
+
+ static DivOpInfo getEmptyKey() {
+ return DivOpInfo(false, 0, 0);
+ }
+
+ static DivOpInfo getTombstoneKey() {
+ return DivOpInfo(true, 0, 0);
+ }
+
+ static unsigned getHashValue(const DivOpInfo &Val) {
+ return (unsigned)(reinterpret_cast<uintptr_t>(Val.Dividend) ^
+ reinterpret_cast<uintptr_t>(Val.Divisor)) ^
+ (unsigned)Val.SignedOp;
+ }
+ };
+
+ typedef DenseMap<DivOpInfo, DivPhiNodes> DivCacheTy;
+}
+
+// insertFastDiv - Substitutes the div/rem instruction with code that checks the
+// value of the operands and uses a shorter-faster div/rem instruction when
+// possible and the longer-slower div/rem instruction otherwise.
+static bool insertFastDiv(Function &F,
+ Function::iterator &I,
+ BasicBlock::iterator &J,
+ IntegerType *BypassType,
+ bool UseDivOp,
+ bool UseSignedOp,
+ DivCacheTy &PerBBDivCache) {
+ // Get instruction operands
+ Instruction *Instr = J;
+ Value *Dividend = Instr->getOperand(0);
+ Value *Divisor = Instr->getOperand(1);
+
+ if (isa<ConstantInt>(Divisor) ||
+ (isa<ConstantInt>(Dividend) && isa<ConstantInt>(Divisor))) {
+ // Operations with immediate values should have
+ // been solved and replaced during compile time.
+ return false;
+ }
+
+ // Basic Block is split before divide
+ BasicBlock *MainBB = I;
+ BasicBlock *SuccessorBB = I->splitBasicBlock(J);
+ ++I; //advance iterator I to successorBB
+
+ // Add new basic block for slow divide operation
+ BasicBlock *SlowBB = BasicBlock::Create(F.getContext(), "",
+ MainBB->getParent(), SuccessorBB);
+ SlowBB->moveBefore(SuccessorBB);
+ IRBuilder<> SlowBuilder(SlowBB, SlowBB->begin());
+ Value *SlowQuotientV;
+ Value *SlowRemainderV;
+ if (UseSignedOp) {
+ SlowQuotientV = SlowBuilder.CreateSDiv(Dividend, Divisor);
+ SlowRemainderV = SlowBuilder.CreateSRem(Dividend, Divisor);
+ } else {
+ SlowQuotientV = SlowBuilder.CreateUDiv(Dividend, Divisor);
+ SlowRemainderV = SlowBuilder.CreateURem(Dividend, Divisor);
+ }
+ SlowBuilder.CreateBr(SuccessorBB);
+
+ // Add new basic block for fast divide operation
+ BasicBlock *FastBB = BasicBlock::Create(F.getContext(), "",
+ MainBB->getParent(), SuccessorBB);
+ FastBB->moveBefore(SlowBB);
+ IRBuilder<> FastBuilder(FastBB, FastBB->begin());
+ Value *ShortDivisorV = FastBuilder.CreateCast(Instruction::Trunc, Divisor,
+ BypassType);
+ Value *ShortDividendV = FastBuilder.CreateCast(Instruction::Trunc, Dividend,
+ BypassType);
+
+ // udiv/urem because optimization only handles positive numbers
+ Value *ShortQuotientV = FastBuilder.CreateExactUDiv(ShortDividendV,
+ ShortDivisorV);
+ Value *ShortRemainderV = FastBuilder.CreateURem(ShortDividendV,
+ ShortDivisorV);
+ Value *FastQuotientV = FastBuilder.CreateCast(Instruction::ZExt,
+ ShortQuotientV,
+ Dividend->getType());
+ Value *FastRemainderV = FastBuilder.CreateCast(Instruction::ZExt,
+ ShortRemainderV,
+ Dividend->getType());
+ FastBuilder.CreateBr(SuccessorBB);
+
+ // Phi nodes for result of div and rem
+ IRBuilder<> SuccessorBuilder(SuccessorBB, SuccessorBB->begin());
+ PHINode *QuoPhi = SuccessorBuilder.CreatePHI(Instr->getType(), 2);
+ QuoPhi->addIncoming(SlowQuotientV, SlowBB);
+ QuoPhi->addIncoming(FastQuotientV, FastBB);
+ PHINode *RemPhi = SuccessorBuilder.CreatePHI(Instr->getType(), 2);
+ RemPhi->addIncoming(SlowRemainderV, SlowBB);
+ RemPhi->addIncoming(FastRemainderV, FastBB);
+
+ // Replace Instr with appropriate phi node
+ if (UseDivOp)
+ Instr->replaceAllUsesWith(QuoPhi);
+ else
+ Instr->replaceAllUsesWith(RemPhi);
+ Instr->eraseFromParent();
+
+ // Combine operands into a single value with OR for value testing below
+ MainBB->getInstList().back().eraseFromParent();
+ IRBuilder<> MainBuilder(MainBB, MainBB->end());
+ Value *OrV = MainBuilder.CreateOr(Dividend, Divisor);
+
+ // BitMask is inverted to check if the operands are
+ // larger than the bypass type
+ uint64_t BitMask = ~BypassType->getBitMask();
+ Value *AndV = MainBuilder.CreateAnd(OrV, BitMask);
+
+ // Compare operand values and branch
+ Value *ZeroV = MainBuilder.getInt32(0);
+ Value *CmpV = MainBuilder.CreateICmpEQ(AndV, ZeroV);
+ MainBuilder.CreateCondBr(CmpV, FastBB, SlowBB);
+
+ // point iterator J at first instruction of successorBB
+ J = I->begin();
+
+ // Cache phi nodes to be used later in place of other instances
+ // of div or rem with the same sign, dividend, and divisor
+ DivOpInfo Key(UseSignedOp, Dividend, Divisor);
+ DivPhiNodes Value(QuoPhi, RemPhi);
+ PerBBDivCache.insert(std::pair<DivOpInfo, DivPhiNodes>(Key, Value));
+ return true;
+}
+
+// reuseOrInsertFastDiv - Reuses previously computed dividend or remainder if
+// operands and operation are identical. Otherwise call insertFastDiv to perform
+// the optimization and cache the resulting dividend and remainder.
+static bool reuseOrInsertFastDiv(Function &F,
+ Function::iterator &I,
+ BasicBlock::iterator &J,
+ IntegerType *BypassType,
+ bool UseDivOp,
+ bool UseSignedOp,
+ DivCacheTy &PerBBDivCache) {
+ // Get instruction operands
+ Instruction *Instr = J;
+ DivOpInfo Key(UseSignedOp, Instr->getOperand(0), Instr->getOperand(1));
+ DivCacheTy::iterator CacheI = PerBBDivCache.find(Key);
+
+ if (CacheI == PerBBDivCache.end()) {
+ // If previous instance does not exist, insert fast div
+ return insertFastDiv(F, I, J, BypassType, UseDivOp, UseSignedOp,
+ PerBBDivCache);
+ }
+
+ // Replace operation value with previously generated phi node
+ DivPhiNodes &Value = CacheI->second;
+ if (UseDivOp) {
+ // Replace all uses of div instruction with quotient phi node
+ J->replaceAllUsesWith(Value.Quotient);
+ } else {
+ // Replace all uses of rem instruction with remainder phi node
+ J->replaceAllUsesWith(Value.Remainder);
+ }
+
+ // Advance to next operation
+ ++J;
+
+ // Remove redundant operation
+ Instr->eraseFromParent();
+ return true;
+}
+
+// bypassSlowDivision - This optimization identifies DIV instructions that can
+// be profitably bypassed and carried out with a shorter, faster divide.
+bool llvm::bypassSlowDivision(Function &F,
+ Function::iterator &I,
+ const DenseMap<unsigned int, unsigned int> &BypassWidths) {
+ DivCacheTy DivCache;
+
+ bool MadeChange = false;
+ for (BasicBlock::iterator J = I->begin(); J != I->end(); J++) {
+
+ // Get instruction details
+ unsigned Opcode = J->getOpcode();
+ bool UseDivOp = Opcode == Instruction::SDiv || Opcode == Instruction::UDiv;
+ bool UseRemOp = Opcode == Instruction::SRem || Opcode == Instruction::URem;
+ bool UseSignedOp = Opcode == Instruction::SDiv ||
+ Opcode == Instruction::SRem;
+
+ // Only optimize div or rem ops
+ if (!UseDivOp && !UseRemOp)
+ continue;
+
+ // Skip division on vector types, only optimize integer instructions
+ if (!J->getType()->isIntegerTy())
+ continue;
+
+ // Get bitwidth of div/rem instruction
+ IntegerType *T = cast<IntegerType>(J->getType());
+ int bitwidth = T->getBitWidth();
+
+ // Continue if bitwidth is not bypassed
+ DenseMap<unsigned int, unsigned int>::const_iterator BI = BypassWidths.find(bitwidth);
+ if (BI == BypassWidths.end())
+ continue;
+
+ // Get type for div/rem instruction with bypass bitwidth
+ IntegerType *BT = IntegerType::get(J->getContext(), BI->second);
+
+ MadeChange |= reuseOrInsertFastDiv(F, I, J, BT, UseDivOp,
+ UseSignedOp, DivCache);
+ }
+
+ return MadeChange;
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp b/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
index 99237b8..7ba9f6d 100644
--- a/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -98,10 +98,14 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
Anew->addAttr( OldFunc->getAttributes()
.getParamAttributes(I->getArgNo() + 1));
NewFunc->setAttributes(NewFunc->getAttributes()
- .addAttr(0, OldFunc->getAttributes()
+ .addAttr(NewFunc->getContext(),
+ AttrListPtr::ReturnIndex,
+ OldFunc->getAttributes()
.getRetAttributes()));
NewFunc->setAttributes(NewFunc->getAttributes()
- .addAttr(~0, OldFunc->getAttributes()
+ .addAttr(NewFunc->getContext(),
+ AttrListPtr::FunctionIndex,
+ OldFunc->getAttributes()
.getFnAttributes()));
}
@@ -202,14 +206,14 @@ namespace {
bool ModuleLevelChanges;
const char *NameSuffix;
ClonedCodeInfo *CodeInfo;
- const TargetData *TD;
+ const DataLayout *TD;
public:
PruningFunctionCloner(Function *newFunc, const Function *oldFunc,
ValueToValueMapTy &valueMap,
bool moduleLevelChanges,
const char *nameSuffix,
ClonedCodeInfo *codeInfo,
- const TargetData *td)
+ const DataLayout *td)
: NewFunc(newFunc), OldFunc(oldFunc),
VMap(valueMap), ModuleLevelChanges(moduleLevelChanges),
NameSuffix(nameSuffix), CodeInfo(codeInfo), TD(td) {
@@ -365,7 +369,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix,
ClonedCodeInfo *CodeInfo,
- const TargetData *TD,
+ const DataLayout *TD,
Instruction *TheCall) {
assert(NameSuffix && "NameSuffix cannot be null!");
diff --git a/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
index c545cd6..281714f 100644
--- a/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
@@ -346,7 +346,7 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
header->getName(), M);
// If the old function is no-throw, so is the new one.
if (oldFunction->doesNotThrow())
- newFunction->setDoesNotThrow(true);
+ newFunction->setDoesNotThrow();
newFunction->getBasicBlockList().push_back(newRootNode);
diff --git a/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp b/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 89e89e7..009847f 100644
--- a/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -27,7 +27,7 @@
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Support/CallSite.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@@ -357,7 +357,7 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
Type *VoidPtrTy = Type::getInt8PtrTy(Context);
- // Create the alloca. If we have TargetData, use nice alignment.
+ // Create the alloca. If we have DataLayout, use nice alignment.
unsigned Align = 1;
if (IFI.TD)
Align = IFI.TD->getPrefTypeAlignment(AggTy);
diff --git a/contrib/llvm/lib/Transforms/Utils/IntegerDivision.cpp b/contrib/llvm/lib/Transforms/Utils/IntegerDivision.cpp
new file mode 100644
index 0000000..55227e2
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Utils/IntegerDivision.cpp
@@ -0,0 +1,420 @@
+//===-- IntegerDivision.cpp - Expand integer division ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an implementation of 32bit scalar integer division for
+// targets that don't have native support. It's largely derived from
+// compiler-rt's implementation of __udivsi3, but hand-tuned to reduce the
+// amount of control flow
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "integer-division"
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/IRBuilder.h"
+#include "llvm/Transforms/Utils/IntegerDivision.h"
+
+using namespace llvm;
+
+/// Generate code to compute the remainder of two signed integers. Returns the
+/// remainder, which will have the sign of the dividend. Builder's insert point
+/// should be pointing where the caller wants code generated, e.g. at the srem
+/// instruction. This will generate a urem in the process, and Builder's insert
+/// point will be pointing at the uren (if present, i.e. not folded), ready to
+/// be expanded if the user wishes
+static Value *generateSignedRemainderCode(Value *Dividend, Value *Divisor,
+ IRBuilder<> &Builder) {
+ ConstantInt *ThirtyOne = Builder.getInt32(31);
+
+ // ; %dividend_sgn = ashr i32 %dividend, 31
+ // ; %divisor_sgn = ashr i32 %divisor, 31
+ // ; %dvd_xor = xor i32 %dividend, %dividend_sgn
+ // ; %dvs_xor = xor i32 %divisor, %divisor_sgn
+ // ; %u_dividend = sub i32 %dvd_xor, %dividend_sgn
+ // ; %u_divisor = sub i32 %dvs_xor, %divisor_sgn
+ // ; %urem = urem i32 %dividend, %divisor
+ // ; %xored = xor i32 %urem, %dividend_sgn
+ // ; %srem = sub i32 %xored, %dividend_sgn
+ Value *DividendSign = Builder.CreateAShr(Dividend, ThirtyOne);
+ Value *DivisorSign = Builder.CreateAShr(Divisor, ThirtyOne);
+ Value *DvdXor = Builder.CreateXor(Dividend, DividendSign);
+ Value *DvsXor = Builder.CreateXor(Divisor, DivisorSign);
+ Value *UDividend = Builder.CreateSub(DvdXor, DividendSign);
+ Value *UDivisor = Builder.CreateSub(DvsXor, DivisorSign);
+ Value *URem = Builder.CreateURem(UDividend, UDivisor);
+ Value *Xored = Builder.CreateXor(URem, DividendSign);
+ Value *SRem = Builder.CreateSub(Xored, DividendSign);
+
+ if (Instruction *URemInst = dyn_cast<Instruction>(URem))
+ Builder.SetInsertPoint(URemInst);
+
+ return SRem;
+}
+
+
+/// Generate code to compute the remainder of two unsigned integers. Returns the
+/// remainder. Builder's insert point should be pointing where the caller wants
+/// code generated, e.g. at the urem instruction. This will generate a udiv in
+/// the process, and Builder's insert point will be pointing at the udiv (if
+/// present, i.e. not folded), ready to be expanded if the user wishes
+static Value *generatedUnsignedRemainderCode(Value *Dividend, Value *Divisor,
+ IRBuilder<> &Builder) {
+ // Remainder = Dividend - Quotient*Divisor
+
+ // ; %quotient = udiv i32 %dividend, %divisor
+ // ; %product = mul i32 %divisor, %quotient
+ // ; %remainder = sub i32 %dividend, %product
+ Value *Quotient = Builder.CreateUDiv(Dividend, Divisor);
+ Value *Product = Builder.CreateMul(Divisor, Quotient);
+ Value *Remainder = Builder.CreateSub(Dividend, Product);
+
+ if (Instruction *UDiv = dyn_cast<Instruction>(Quotient))
+ Builder.SetInsertPoint(UDiv);
+
+ return Remainder;
+}
+
+/// Generate code to divide two signed integers. Returns the quotient, rounded
+/// towards 0. Builder's insert point should be pointing where the caller wants
+/// code generated, e.g. at the sdiv instruction. This will generate a udiv in
+/// the process, and Builder's insert point will be pointing at the udiv (if
+/// present, i.e. not folded), ready to be expanded if the user wishes.
+static Value *generateSignedDivisionCode(Value *Dividend, Value *Divisor,
+ IRBuilder<> &Builder) {
+ // Implementation taken from compiler-rt's __divsi3
+
+ ConstantInt *ThirtyOne = Builder.getInt32(31);
+
+ // ; %tmp = ashr i32 %dividend, 31
+ // ; %tmp1 = ashr i32 %divisor, 31
+ // ; %tmp2 = xor i32 %tmp, %dividend
+ // ; %u_dvnd = sub nsw i32 %tmp2, %tmp
+ // ; %tmp3 = xor i32 %tmp1, %divisor
+ // ; %u_dvsr = sub nsw i32 %tmp3, %tmp1
+ // ; %q_sgn = xor i32 %tmp1, %tmp
+ // ; %q_mag = udiv i32 %u_dvnd, %u_dvsr
+ // ; %tmp4 = xor i32 %q_mag, %q_sgn
+ // ; %q = sub i32 %tmp4, %q_sgn
+ Value *Tmp = Builder.CreateAShr(Dividend, ThirtyOne);
+ Value *Tmp1 = Builder.CreateAShr(Divisor, ThirtyOne);
+ Value *Tmp2 = Builder.CreateXor(Tmp, Dividend);
+ Value *U_Dvnd = Builder.CreateSub(Tmp2, Tmp);
+ Value *Tmp3 = Builder.CreateXor(Tmp1, Divisor);
+ Value *U_Dvsr = Builder.CreateSub(Tmp3, Tmp1);
+ Value *Q_Sgn = Builder.CreateXor(Tmp1, Tmp);
+ Value *Q_Mag = Builder.CreateUDiv(U_Dvnd, U_Dvsr);
+ Value *Tmp4 = Builder.CreateXor(Q_Mag, Q_Sgn);
+ Value *Q = Builder.CreateSub(Tmp4, Q_Sgn);
+
+ if (Instruction *UDiv = dyn_cast<Instruction>(Q_Mag))
+ Builder.SetInsertPoint(UDiv);
+
+ return Q;
+}
+
+/// Generates code to divide two unsigned scalar 32-bit integers. Returns the
+/// quotient, rounded towards 0. Builder's insert point should be pointing where
+/// the caller wants code generated, e.g. at the udiv instruction.
+static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
+ IRBuilder<> &Builder) {
+ // The basic algorithm can be found in the compiler-rt project's
+ // implementation of __udivsi3.c. Here, we do a lower-level IR based approach
+ // that's been hand-tuned to lessen the amount of control flow involved.
+
+ // Some helper values
+ IntegerType *I32Ty = Builder.getInt32Ty();
+
+ ConstantInt *Zero = Builder.getInt32(0);
+ ConstantInt *One = Builder.getInt32(1);
+ ConstantInt *ThirtyOne = Builder.getInt32(31);
+ ConstantInt *NegOne = ConstantInt::getSigned(I32Ty, -1);
+ ConstantInt *True = Builder.getTrue();
+
+ BasicBlock *IBB = Builder.GetInsertBlock();
+ Function *F = IBB->getParent();
+ Function *CTLZi32 = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
+ I32Ty);
+
+ // Our CFG is going to look like:
+ // +---------------------+
+ // | special-cases |
+ // | ... |
+ // +---------------------+
+ // | |
+ // | +----------+
+ // | | bb1 |
+ // | | ... |
+ // | +----------+
+ // | | |
+ // | | +------------+
+ // | | | preheader |
+ // | | | ... |
+ // | | +------------+
+ // | | |
+ // | | | +---+
+ // | | | | |
+ // | | +------------+ |
+ // | | | do-while | |
+ // | | | ... | |
+ // | | +------------+ |
+ // | | | | |
+ // | +-----------+ +---+
+ // | | loop-exit |
+ // | | ... |
+ // | +-----------+
+ // | |
+ // +-------+
+ // | ... |
+ // | end |
+ // +-------+
+ BasicBlock *SpecialCases = Builder.GetInsertBlock();
+ SpecialCases->setName(Twine(SpecialCases->getName(), "_udiv-special-cases"));
+ BasicBlock *End = SpecialCases->splitBasicBlock(Builder.GetInsertPoint(),
+ "udiv-end");
+ BasicBlock *LoopExit = BasicBlock::Create(Builder.getContext(),
+ "udiv-loop-exit", F, End);
+ BasicBlock *DoWhile = BasicBlock::Create(Builder.getContext(),
+ "udiv-do-while", F, End);
+ BasicBlock *Preheader = BasicBlock::Create(Builder.getContext(),
+ "udiv-preheader", F, End);
+ BasicBlock *BB1 = BasicBlock::Create(Builder.getContext(),
+ "udiv-bb1", F, End);
+
+ // We'll be overwriting the terminator to insert our extra blocks
+ SpecialCases->getTerminator()->eraseFromParent();
+
+ // First off, check for special cases: dividend or divisor is zero, divisor
+ // is greater than dividend, and divisor is 1.
+ // ; special-cases:
+ // ; %ret0_1 = icmp eq i32 %divisor, 0
+ // ; %ret0_2 = icmp eq i32 %dividend, 0
+ // ; %ret0_3 = or i1 %ret0_1, %ret0_2
+ // ; %tmp0 = tail call i32 @llvm.ctlz.i32(i32 %divisor, i1 true)
+ // ; %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %dividend, i1 true)
+ // ; %sr = sub nsw i32 %tmp0, %tmp1
+ // ; %ret0_4 = icmp ugt i32 %sr, 31
+ // ; %ret0 = or i1 %ret0_3, %ret0_4
+ // ; %retDividend = icmp eq i32 %sr, 31
+ // ; %retVal = select i1 %ret0, i32 0, i32 %dividend
+ // ; %earlyRet = or i1 %ret0, %retDividend
+ // ; br i1 %earlyRet, label %end, label %bb1
+ Builder.SetInsertPoint(SpecialCases);
+ Value *Ret0_1 = Builder.CreateICmpEQ(Divisor, Zero);
+ Value *Ret0_2 = Builder.CreateICmpEQ(Dividend, Zero);
+ Value *Ret0_3 = Builder.CreateOr(Ret0_1, Ret0_2);
+ Value *Tmp0 = Builder.CreateCall2(CTLZi32, Divisor, True);
+ Value *Tmp1 = Builder.CreateCall2(CTLZi32, Dividend, True);
+ Value *SR = Builder.CreateSub(Tmp0, Tmp1);
+ Value *Ret0_4 = Builder.CreateICmpUGT(SR, ThirtyOne);
+ Value *Ret0 = Builder.CreateOr(Ret0_3, Ret0_4);
+ Value *RetDividend = Builder.CreateICmpEQ(SR, ThirtyOne);
+ Value *RetVal = Builder.CreateSelect(Ret0, Zero, Dividend);
+ Value *EarlyRet = Builder.CreateOr(Ret0, RetDividend);
+ Builder.CreateCondBr(EarlyRet, End, BB1);
+
+ // ; bb1: ; preds = %special-cases
+ // ; %sr_1 = add i32 %sr, 1
+ // ; %tmp2 = sub i32 31, %sr
+ // ; %q = shl i32 %dividend, %tmp2
+ // ; %skipLoop = icmp eq i32 %sr_1, 0
+ // ; br i1 %skipLoop, label %loop-exit, label %preheader
+ Builder.SetInsertPoint(BB1);
+ Value *SR_1 = Builder.CreateAdd(SR, One);
+ Value *Tmp2 = Builder.CreateSub(ThirtyOne, SR);
+ Value *Q = Builder.CreateShl(Dividend, Tmp2);
+ Value *SkipLoop = Builder.CreateICmpEQ(SR_1, Zero);
+ Builder.CreateCondBr(SkipLoop, LoopExit, Preheader);
+
+ // ; preheader: ; preds = %bb1
+ // ; %tmp3 = lshr i32 %dividend, %sr_1
+ // ; %tmp4 = add i32 %divisor, -1
+ // ; br label %do-while
+ Builder.SetInsertPoint(Preheader);
+ Value *Tmp3 = Builder.CreateLShr(Dividend, SR_1);
+ Value *Tmp4 = Builder.CreateAdd(Divisor, NegOne);
+ Builder.CreateBr(DoWhile);
+
+ // ; do-while: ; preds = %do-while, %preheader
+ // ; %carry_1 = phi i32 [ 0, %preheader ], [ %carry, %do-while ]
+ // ; %sr_3 = phi i32 [ %sr_1, %preheader ], [ %sr_2, %do-while ]
+ // ; %r_1 = phi i32 [ %tmp3, %preheader ], [ %r, %do-while ]
+ // ; %q_2 = phi i32 [ %q, %preheader ], [ %q_1, %do-while ]
+ // ; %tmp5 = shl i32 %r_1, 1
+ // ; %tmp6 = lshr i32 %q_2, 31
+ // ; %tmp7 = or i32 %tmp5, %tmp6
+ // ; %tmp8 = shl i32 %q_2, 1
+ // ; %q_1 = or i32 %carry_1, %tmp8
+ // ; %tmp9 = sub i32 %tmp4, %tmp7
+ // ; %tmp10 = ashr i32 %tmp9, 31
+ // ; %carry = and i32 %tmp10, 1
+ // ; %tmp11 = and i32 %tmp10, %divisor
+ // ; %r = sub i32 %tmp7, %tmp11
+ // ; %sr_2 = add i32 %sr_3, -1
+ // ; %tmp12 = icmp eq i32 %sr_2, 0
+ // ; br i1 %tmp12, label %loop-exit, label %do-while
+ Builder.SetInsertPoint(DoWhile);
+ PHINode *Carry_1 = Builder.CreatePHI(I32Ty, 2);
+ PHINode *SR_3 = Builder.CreatePHI(I32Ty, 2);
+ PHINode *R_1 = Builder.CreatePHI(I32Ty, 2);
+ PHINode *Q_2 = Builder.CreatePHI(I32Ty, 2);
+ Value *Tmp5 = Builder.CreateShl(R_1, One);
+ Value *Tmp6 = Builder.CreateLShr(Q_2, ThirtyOne);
+ Value *Tmp7 = Builder.CreateOr(Tmp5, Tmp6);
+ Value *Tmp8 = Builder.CreateShl(Q_2, One);
+ Value *Q_1 = Builder.CreateOr(Carry_1, Tmp8);
+ Value *Tmp9 = Builder.CreateSub(Tmp4, Tmp7);
+ Value *Tmp10 = Builder.CreateAShr(Tmp9, 31);
+ Value *Carry = Builder.CreateAnd(Tmp10, One);
+ Value *Tmp11 = Builder.CreateAnd(Tmp10, Divisor);
+ Value *R = Builder.CreateSub(Tmp7, Tmp11);
+ Value *SR_2 = Builder.CreateAdd(SR_3, NegOne);
+ Value *Tmp12 = Builder.CreateICmpEQ(SR_2, Zero);
+ Builder.CreateCondBr(Tmp12, LoopExit, DoWhile);
+
+ // ; loop-exit: ; preds = %do-while, %bb1
+ // ; %carry_2 = phi i32 [ 0, %bb1 ], [ %carry, %do-while ]
+ // ; %q_3 = phi i32 [ %q, %bb1 ], [ %q_1, %do-while ]
+ // ; %tmp13 = shl i32 %q_3, 1
+ // ; %q_4 = or i32 %carry_2, %tmp13
+ // ; br label %end
+ Builder.SetInsertPoint(LoopExit);
+ PHINode *Carry_2 = Builder.CreatePHI(I32Ty, 2);
+ PHINode *Q_3 = Builder.CreatePHI(I32Ty, 2);
+ Value *Tmp13 = Builder.CreateShl(Q_3, One);
+ Value *Q_4 = Builder.CreateOr(Carry_2, Tmp13);
+ Builder.CreateBr(End);
+
+ // ; end: ; preds = %loop-exit, %special-cases
+ // ; %q_5 = phi i32 [ %q_4, %loop-exit ], [ %retVal, %special-cases ]
+ // ; ret i32 %q_5
+ Builder.SetInsertPoint(End, End->begin());
+ PHINode *Q_5 = Builder.CreatePHI(I32Ty, 2);
+
+ // Populate the Phis, since all values have now been created. Our Phis were:
+ // ; %carry_1 = phi i32 [ 0, %preheader ], [ %carry, %do-while ]
+ Carry_1->addIncoming(Zero, Preheader);
+ Carry_1->addIncoming(Carry, DoWhile);
+ // ; %sr_3 = phi i32 [ %sr_1, %preheader ], [ %sr_2, %do-while ]
+ SR_3->addIncoming(SR_1, Preheader);
+ SR_3->addIncoming(SR_2, DoWhile);
+ // ; %r_1 = phi i32 [ %tmp3, %preheader ], [ %r, %do-while ]
+ R_1->addIncoming(Tmp3, Preheader);
+ R_1->addIncoming(R, DoWhile);
+ // ; %q_2 = phi i32 [ %q, %preheader ], [ %q_1, %do-while ]
+ Q_2->addIncoming(Q, Preheader);
+ Q_2->addIncoming(Q_1, DoWhile);
+ // ; %carry_2 = phi i32 [ 0, %bb1 ], [ %carry, %do-while ]
+ Carry_2->addIncoming(Zero, BB1);
+ Carry_2->addIncoming(Carry, DoWhile);
+ // ; %q_3 = phi i32 [ %q, %bb1 ], [ %q_1, %do-while ]
+ Q_3->addIncoming(Q, BB1);
+ Q_3->addIncoming(Q_1, DoWhile);
+ // ; %q_5 = phi i32 [ %q_4, %loop-exit ], [ %retVal, %special-cases ]
+ Q_5->addIncoming(Q_4, LoopExit);
+ Q_5->addIncoming(RetVal, SpecialCases);
+
+ return Q_5;
+}
+
+/// Generate code to calculate the remainder of two integers, replacing Rem with
+/// the generated code. This currently generates code using the udiv expansion,
+/// but future work includes generating more specialized code, e.g. when more
+/// information about the operands are known. Currently only implements 32bit
+/// scalar division (due to udiv's limitation), but future work is removing this
+/// limitation.
+///
+/// @brief Replace Rem with generated code.
+bool llvm::expandRemainder(BinaryOperator *Rem) {
+ assert((Rem->getOpcode() == Instruction::SRem ||
+ Rem->getOpcode() == Instruction::URem) &&
+ "Trying to expand remainder from a non-remainder function");
+
+ IRBuilder<> Builder(Rem);
+
+ // First prepare the sign if it's a signed remainder
+ if (Rem->getOpcode() == Instruction::SRem) {
+ Value *Remainder = generateSignedRemainderCode(Rem->getOperand(0),
+ Rem->getOperand(1), Builder);
+
+ Rem->replaceAllUsesWith(Remainder);
+ Rem->dropAllReferences();
+ Rem->eraseFromParent();
+
+ // If we didn't actually generate a udiv instruction, we're done
+ BinaryOperator *BO = dyn_cast<BinaryOperator>(Builder.GetInsertPoint());
+ if (!BO || BO->getOpcode() != Instruction::URem)
+ return true;
+
+ Rem = BO;
+ }
+
+ Value *Remainder = generatedUnsignedRemainderCode(Rem->getOperand(0),
+ Rem->getOperand(1),
+ Builder);
+
+ Rem->replaceAllUsesWith(Remainder);
+ Rem->dropAllReferences();
+ Rem->eraseFromParent();
+
+ // Expand the udiv
+ if (BinaryOperator *UDiv = dyn_cast<BinaryOperator>(Builder.GetInsertPoint())) {
+ assert(UDiv->getOpcode() == Instruction::UDiv && "Non-udiv in expansion?");
+ expandDivision(UDiv);
+ }
+
+ return true;
+}
+
+
+/// Generate code to divide two integers, replacing Div with the generated
+/// code. This currently generates code similarly to compiler-rt's
+/// implementations, but future work includes generating more specialized code
+/// when more information about the operands are known. Currently only
+/// implements 32bit scalar division, but future work is removing this
+/// limitation.
+///
+/// @brief Replace Div with generated code.
+bool llvm::expandDivision(BinaryOperator *Div) {
+ assert((Div->getOpcode() == Instruction::SDiv ||
+ Div->getOpcode() == Instruction::UDiv) &&
+ "Trying to expand division from a non-division function");
+
+ IRBuilder<> Builder(Div);
+
+ if (Div->getType()->isVectorTy())
+ llvm_unreachable("Div over vectors not supported");
+
+ // First prepare the sign if it's a signed division
+ if (Div->getOpcode() == Instruction::SDiv) {
+ // Lower the code to unsigned division, and reset Div to point to the udiv.
+ Value *Quotient = generateSignedDivisionCode(Div->getOperand(0),
+ Div->getOperand(1), Builder);
+ Div->replaceAllUsesWith(Quotient);
+ Div->dropAllReferences();
+ Div->eraseFromParent();
+
+ // If we didn't actually generate a udiv instruction, we're done
+ BinaryOperator *BO = dyn_cast<BinaryOperator>(Builder.GetInsertPoint());
+ if (!BO || BO->getOpcode() != Instruction::UDiv)
+ return true;
+
+ Div = BO;
+ }
+
+ // Insert the unsigned division code
+ Value *Quotient = generateUnsignedDivisionCode(Div->getOperand(0),
+ Div->getOperand(1),
+ Builder);
+ Div->replaceAllUsesWith(Quotient);
+ Div->dropAllReferences();
+ Div->eraseFromParent();
+
+ return true;
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp b/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
index b654111..5e05c83 100644
--- a/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
@@ -53,6 +53,8 @@ namespace {
// Cached analysis information for the current function.
DominatorTree *DT;
+ LoopInfo *LI;
+ ScalarEvolution *SE;
std::vector<BasicBlock*> LoopBlocks;
PredIteratorCache PredCache;
Loop *L;
@@ -117,6 +119,8 @@ bool LCSSA::runOnLoop(Loop *TheLoop, LPPassManager &LPM) {
L = TheLoop;
DT = &getAnalysis<DominatorTree>();
+ LI = &getAnalysis<LoopInfo>();
+ SE = getAnalysisIfAvailable<ScalarEvolution>();
// Get the set of exiting blocks.
SmallVector<BasicBlock*, 8> ExitBlocks;
@@ -156,6 +160,12 @@ bool LCSSA::runOnLoop(Loop *TheLoop, LPPassManager &LPM) {
MadeChange |= ProcessInstruction(I, ExitBlocks);
}
}
+
+ // If we modified the code, remove any caches about the loop from SCEV to
+ // avoid dangling entries.
+ // FIXME: This is a big hammer, can we clear the cache more selectively?
+ if (SE && MadeChange)
+ SE->forgetLoop(L);
assert(L->isLCSSAForm(*DT));
PredCache.clear();
@@ -245,7 +255,7 @@ bool LCSSA::ProcessInstruction(Instruction *Inst,
// Remember that this phi makes the value alive in this block.
SSAUpdate.AddAvailableValue(ExitBB, PN);
}
-
+
// Rewrite all uses outside the loop in terms of the new PHIs we just
// inserted.
for (unsigned i = 0, e = UsesToRewrite.size(); i != e; ++i) {
@@ -260,6 +270,9 @@ bool LCSSA::ProcessInstruction(Instruction *Inst,
if (isa<PHINode>(UserBB->begin()) &&
isExitBlock(UserBB, ExitBlocks)) {
+ // Tell the VHs that the uses changed. This updates SCEV's caches.
+ if (UsesToRewrite[i]->get()->hasValueHandle())
+ ValueHandleBase::ValueIsRAUWd(*UsesToRewrite[i], UserBB->begin());
UsesToRewrite[i]->set(UserBB->begin());
continue;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/Local.cpp b/contrib/llvm/lib/Transforms/Utils/Local.cpp
index bed7d72..a954d82 100644
--- a/contrib/llvm/lib/Transforms/Utils/Local.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/Local.cpp
@@ -23,6 +23,7 @@
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Intrinsics.h"
+#include "llvm/MDBuilder.h"
#include "llvm/Metadata.h"
#include "llvm/Operator.h"
#include "llvm/ADT/DenseMap.h"
@@ -38,7 +39,7 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -52,7 +53,8 @@ using namespace llvm;
/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
/// conditions and indirectbr addresses this might make dead if
/// DeleteDeadConditions is true.
-bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) {
+bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
+ const TargetLibraryInfo *TLI) {
TerminatorInst *T = BB->getTerminator();
IRBuilder<> Builder(T);
@@ -96,7 +98,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) {
Value *Cond = BI->getCondition();
BI->eraseFromParent();
if (DeleteDeadConditions)
- RecursivelyDeleteTriviallyDeadInstructions(Cond);
+ RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
return true;
}
return false;
@@ -121,6 +123,27 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) {
// Check to see if this branch is going to the same place as the default
// dest. If so, eliminate it as an explicit compare.
if (i.getCaseSuccessor() == DefaultDest) {
+ MDNode* MD = SI->getMetadata(LLVMContext::MD_prof);
+ // MD should have 2 + NumCases operands.
+ if (MD && MD->getNumOperands() == 2 + SI->getNumCases()) {
+ // Collect branch weights into a vector.
+ SmallVector<uint32_t, 8> Weights;
+ for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
+ ++MD_i) {
+ ConstantInt* CI = dyn_cast<ConstantInt>(MD->getOperand(MD_i));
+ assert(CI);
+ Weights.push_back(CI->getValue().getZExtValue());
+ }
+ // Merge weight of this case to the default weight.
+ unsigned idx = i.getCaseIndex();
+ Weights[0] += Weights[idx+1];
+ // Remove weight for this case.
+ std::swap(Weights[idx+1], Weights.back());
+ Weights.pop_back();
+ SI->setMetadata(LLVMContext::MD_prof,
+ MDBuilder(BB->getContext()).
+ createBranchWeights(Weights));
+ }
// Remove this entry.
DefaultDest->removePredecessor(SI->getParent());
SI->removeCase(i);
@@ -161,7 +184,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) {
Value *Cond = SI->getCondition();
SI->eraseFromParent();
if (DeleteDeadConditions)
- RecursivelyDeleteTriviallyDeadInstructions(Cond);
+ RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
return true;
}
@@ -177,8 +200,20 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) {
"cond");
// Insert the new branch.
- Builder.CreateCondBr(Cond, FirstCase.getCaseSuccessor(),
- SI->getDefaultDest());
+ BranchInst *NewBr = Builder.CreateCondBr(Cond,
+ FirstCase.getCaseSuccessor(),
+ SI->getDefaultDest());
+ MDNode* MD = SI->getMetadata(LLVMContext::MD_prof);
+ if (MD && MD->getNumOperands() == 3) {
+ ConstantInt *SICase = dyn_cast<ConstantInt>(MD->getOperand(2));
+ ConstantInt *SIDef = dyn_cast<ConstantInt>(MD->getOperand(1));
+ assert(SICase && SIDef);
+ // The TrueWeight should be the weight for the single case of SI.
+ NewBr->setMetadata(LLVMContext::MD_prof,
+ MDBuilder(BB->getContext()).
+ createBranchWeights(SICase->getValue().getZExtValue(),
+ SIDef->getValue().getZExtValue()));
+ }
// Delete the old switch.
SI->eraseFromParent();
@@ -205,7 +240,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) {
Value *Address = IBI->getAddress();
IBI->eraseFromParent();
if (DeleteDeadConditions)
- RecursivelyDeleteTriviallyDeadInstructions(Address);
+ RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
// If we didn't find our destination in the IBI successor list, then we
// have undefined behavior. Replace the unconditional branch with an
@@ -230,7 +265,8 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) {
/// isInstructionTriviallyDead - Return true if the result produced by the
/// instruction is not used, and the instruction has no side effects.
///
-bool llvm::isInstructionTriviallyDead(Instruction *I) {
+bool llvm::isInstructionTriviallyDead(Instruction *I,
+ const TargetLibraryInfo *TLI) {
if (!I->use_empty() || isa<TerminatorInst>(I)) return false;
// We don't want the landingpad instruction removed by anything this general.
@@ -265,9 +301,9 @@ bool llvm::isInstructionTriviallyDead(Instruction *I) {
return isa<UndefValue>(II->getArgOperand(1));
}
- if (isAllocLikeFn(I)) return true;
+ if (isAllocLikeFn(I, TLI)) return true;
- if (CallInst *CI = isFreeCall(I))
+ if (CallInst *CI = isFreeCall(I, TLI))
if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
return C->isNullValue() || isa<UndefValue>(C);
@@ -278,9 +314,11 @@ bool llvm::isInstructionTriviallyDead(Instruction *I) {
/// trivially dead instruction, delete it. If that makes any of its operands
/// trivially dead, delete them too, recursively. Return true if any
/// instructions were deleted.
-bool llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V) {
+bool
+llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
+ const TargetLibraryInfo *TLI) {
Instruction *I = dyn_cast<Instruction>(V);
- if (!I || !I->use_empty() || !isInstructionTriviallyDead(I))
+ if (!I || !I->use_empty() || !isInstructionTriviallyDead(I, TLI))
return false;
SmallVector<Instruction*, 16> DeadInsts;
@@ -301,7 +339,7 @@ bool llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V) {
// operand, and if it is 'trivially' dead, delete it in a future loop
// iteration.
if (Instruction *OpI = dyn_cast<Instruction>(OpV))
- if (isInstructionTriviallyDead(OpI))
+ if (isInstructionTriviallyDead(OpI, TLI))
DeadInsts.push_back(OpI);
}
@@ -334,19 +372,20 @@ static bool areAllUsesEqual(Instruction *I) {
/// either forms a cycle or is terminated by a trivially dead instruction,
/// delete it. If that makes any of its operands trivially dead, delete them
/// too, recursively. Return true if a change was made.
-bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN) {
+bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
+ const TargetLibraryInfo *TLI) {
SmallPtrSet<Instruction*, 4> Visited;
for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
I = cast<Instruction>(*I->use_begin())) {
if (I->use_empty())
- return RecursivelyDeleteTriviallyDeadInstructions(I);
+ return RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
// If we find an instruction more than once, we're on a cycle that
// won't prove fruitful.
if (!Visited.insert(I)) {
// Break the cycle and delete the instruction and its operands.
I->replaceAllUsesWith(UndefValue::get(I->getType()));
- (void)RecursivelyDeleteTriviallyDeadInstructions(I);
+ (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
return true;
}
}
@@ -358,7 +397,8 @@ bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN) {
///
/// This returns true if it changed the code, note that it can delete
/// instructions in other blocks as well in this block.
-bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD) {
+bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD,
+ const TargetLibraryInfo *TLI) {
bool MadeChange = false;
#ifndef NDEBUG
@@ -381,7 +421,7 @@ bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD) {
continue;
}
- MadeChange |= RecursivelyDeleteTriviallyDeadInstructions(Inst);
+ MadeChange |= RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI);
if (BIHandle != BI)
BI = BB->begin();
}
@@ -405,7 +445,7 @@ bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD) {
/// .. and delete the predecessor corresponding to the '1', this will attempt to
/// recursively fold the and to 0.
void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
- TargetData *TD) {
+ DataLayout *TD) {
// This only adjusts blocks with PHI nodes.
if (!isa<PHINode>(BB->begin()))
return;
@@ -720,7 +760,7 @@ bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
/// their preferred alignment from the beginning.
///
static unsigned enforceKnownAlignment(Value *V, unsigned Align,
- unsigned PrefAlign, const TargetData *TD) {
+ unsigned PrefAlign, const DataLayout *TD) {
V = V->stripPointerCasts();
if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
@@ -763,7 +803,7 @@ static unsigned enforceKnownAlignment(Value *V, unsigned Align,
/// and it is more than the alignment of the ultimate object, see if we can
/// increase the alignment of the ultimate object, making this check succeed.
unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
- const TargetData *TD) {
+ const DataLayout *TD) {
assert(V->getType()->isPointerTy() &&
"getOrEnforceKnownAlignment expects a pointer!");
unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
diff --git a/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp b/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
index 0bc185d..9d9e201 100644
--- a/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
@@ -46,6 +46,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/Type.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/DependenceAnalysis.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopPass.h"
@@ -89,6 +90,7 @@ namespace {
AU.addPreserved<AliasAnalysis>();
AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<DependenceAnalysis>();
AU.addPreservedID(BreakCriticalEdgesID); // No critical edges added.
}
@@ -194,6 +196,11 @@ ReprocessLoop:
BI->setCondition(ConstantInt::get(Cond->getType(),
!L->contains(BI->getSuccessor(0))));
+
+ // This may make the loop analyzable, force SCEV recomputation.
+ if (SE)
+ SE->forgetLoop(L);
+
Changed = true;
}
}
diff --git a/contrib/llvm/lib/Transforms/Utils/MetaRenamer.cpp b/contrib/llvm/lib/Transforms/Utils/MetaRenamer.cpp
new file mode 100644
index 0000000..233bc12
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Utils/MetaRenamer.cpp
@@ -0,0 +1,132 @@
+//===- MetaRenamer.cpp - Rename everything with metasyntatic names --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass renames everything with metasyntatic names. The intent is to use
+// this pass after bugpoint reduction to conceal the nature of the original
+// program.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Type.h"
+#include "llvm/TypeFinder.h"
+
+using namespace llvm;
+
+namespace {
+
+ // This PRNG is from the ISO C spec. It is intentionally simple and
+ // unsuitable for cryptographic use. We're just looking for enough
+ // variety to surprise and delight users.
+ struct PRNG {
+ unsigned long next;
+
+ void srand(unsigned int seed) {
+ next = seed;
+ }
+
+ int rand(void) {
+ next = next * 1103515245 + 12345;
+ return (unsigned int)(next / 65536) % 32768;
+ }
+ };
+
+ struct MetaRenamer : public ModulePass {
+ static char ID; // Pass identification, replacement for typeid
+ MetaRenamer() : ModulePass(ID) {
+ initializeMetaRenamerPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ }
+
+ bool runOnModule(Module &M) {
+ static const char *metaNames[] = {
+ // See http://en.wikipedia.org/wiki/Metasyntactic_variable
+ "foo", "bar", "baz", "quux", "barney", "snork", "zot", "blam", "hoge",
+ "wibble", "wobble", "widget", "wombat", "ham", "eggs", "pluto", "spam"
+ };
+
+ // Seed our PRNG with simple additive sum of ModuleID. We're looking to
+ // simply avoid always having the same function names, and we need to
+ // remain deterministic.
+ unsigned int randSeed = 0;
+ for (std::string::const_iterator I = M.getModuleIdentifier().begin(),
+ E = M.getModuleIdentifier().end(); I != E; ++I)
+ randSeed += *I;
+
+ PRNG prng;
+ prng.srand(randSeed);
+
+ // Rename all aliases
+ for (Module::alias_iterator AI = M.alias_begin(), AE = M.alias_end();
+ AI != AE; ++AI)
+ AI->setName("alias");
+
+ // Rename all global variables
+ for (Module::global_iterator GI = M.global_begin(), GE = M.global_end();
+ GI != GE; ++GI)
+ GI->setName("global");
+
+ // Rename all struct types
+ TypeFinder StructTypes;
+ StructTypes.run(M, true);
+ for (unsigned i = 0, e = StructTypes.size(); i != e; ++i) {
+ StructType *STy = StructTypes[i];
+ if (STy->isLiteral() || STy->getName().empty()) continue;
+
+ SmallString<128> NameStorage;
+ STy->setName((Twine("struct.") + metaNames[prng.rand() %
+ array_lengthof(metaNames)]).toStringRef(NameStorage));
+ }
+
+ // Rename all functions
+ for (Module::iterator FI = M.begin(), FE = M.end();
+ FI != FE; ++FI) {
+ FI->setName(metaNames[prng.rand() % array_lengthof(metaNames)]);
+ runOnFunction(*FI);
+ }
+ return true;
+ }
+
+ bool runOnFunction(Function &F) {
+ for (Function::arg_iterator AI = F.arg_begin(), AE = F.arg_end();
+ AI != AE; ++AI)
+ if (!AI->getType()->isVoidTy())
+ AI->setName("arg");
+
+ for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
+ BB->setName("bb");
+
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
+ if (!I->getType()->isVoidTy())
+ I->setName("tmp");
+ }
+ return true;
+ }
+ };
+}
+
+char MetaRenamer::ID = 0;
+INITIALIZE_PASS(MetaRenamer, "metarenamer",
+ "Assign new names to everything", false, false)
+//===----------------------------------------------------------------------===//
+//
+// MetaRenamer - Rename everything with metasyntactic names.
+//
+ModulePass *llvm::createMetaRenamerPass() {
+ return new MetaRenamer();
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index dd5e20e..558de9d 100644
--- a/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -212,9 +212,13 @@ namespace {
///
DenseMap<AllocaInst*, unsigned> AllocaLookup;
- /// NewPhiNodes - The PhiNodes we're adding.
+ /// NewPhiNodes - The PhiNodes we're adding. That map is used to simplify
+ /// some Phi nodes as we iterate over it, so it should have deterministic
+ /// iterators. We could use a MapVector, but since we already maintain a
+ /// map from BasicBlock* to a stable numbering (BBNumbers), the DenseMap is
+ /// more efficient (also supports removal).
///
- DenseMap<std::pair<BasicBlock*, unsigned>, PHINode*> NewPhiNodes;
+ DenseMap<std::pair<unsigned, unsigned>, PHINode*> NewPhiNodes;
/// PhiToAllocaMap - For each PHI node, keep track of which entry in Allocas
/// it corresponds to.
@@ -588,7 +592,11 @@ void PromoteMem2Reg::run() {
while (EliminatedAPHI) {
EliminatedAPHI = false;
- for (DenseMap<std::pair<BasicBlock*, unsigned>, PHINode*>::iterator I =
+ // Iterating over NewPhiNodes is deterministic, so it is safe to try to
+ // simplify and RAUW them as we go. If it was not, we could add uses to
+ // the values we replace with in a non deterministic order, thus creating
+ // non deterministic def->use chains.
+ for (DenseMap<std::pair<unsigned, unsigned>, PHINode*>::iterator I =
NewPhiNodes.begin(), E = NewPhiNodes.end(); I != E;) {
PHINode *PN = I->second;
@@ -612,7 +620,7 @@ void PromoteMem2Reg::run() {
// have incoming values for all predecessors. Loop over all PHI nodes we have
// created, inserting undef values if they are missing any incoming values.
//
- for (DenseMap<std::pair<BasicBlock*, unsigned>, PHINode*>::iterator I =
+ for (DenseMap<std::pair<unsigned, unsigned>, PHINode*>::iterator I =
NewPhiNodes.begin(), E = NewPhiNodes.end(); I != E; ++I) {
// We want to do this once per basic block. As such, only process a block
// when we find the PHI that is the first entry in the block.
@@ -992,7 +1000,7 @@ void PromoteMem2Reg::PromoteSingleBlockAlloca(AllocaInst *AI, AllocaInfo &Info,
bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo,
unsigned &Version) {
// Look up the basic-block in question.
- PHINode *&PN = NewPhiNodes[std::make_pair(BB, AllocaNo)];
+ PHINode *&PN = NewPhiNodes[std::make_pair(BBNumbers[BB], AllocaNo)];
// If the BB already has a phi node added for the i'th alloca then we're done!
if (PN) return false;
diff --git a/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp b/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
index e568a61..72d4199 100644
--- a/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
@@ -39,7 +39,7 @@ SSAUpdater::SSAUpdater(SmallVectorImpl<PHINode*> *NewPHI)
: AV(0), ProtoType(0), ProtoName(), InsertedPHIs(NewPHI) {}
SSAUpdater::~SSAUpdater() {
- delete &getAvailableVals(AV);
+ delete static_cast<AvailableValsTy*>(AV);
}
/// Initialize - Reset this object to get ready for a new set of SSA
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 518df7c..c767da6 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -14,6 +14,7 @@
#define DEBUG_TYPE "simplifycfg"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Constants.h"
+#include "llvm/DataLayout.h"
#include "llvm/DerivedTypes.h"
#include "llvm/GlobalVariable.h"
#include "llvm/IRBuilder.h"
@@ -22,6 +23,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/MDBuilder.h"
#include "llvm/Metadata.h"
+#include "llvm/Module.h"
#include "llvm/Operator.h"
#include "llvm/Type.h"
#include "llvm/ADT/DenseMap.h"
@@ -38,7 +40,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/NoFolder.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/TargetTransformInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include <algorithm>
#include <set>
@@ -53,6 +55,13 @@ static cl::opt<bool>
DupRet("simplifycfg-dup-ret", cl::Hidden, cl::init(false),
cl::desc("Duplicate return instructions into unconditional branches"));
+static cl::opt<bool>
+SinkCommon("simplifycfg-sink-common", cl::Hidden, cl::init(true),
+ cl::desc("Sink common instructions down to the end block"));
+
+STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps");
+STATISTIC(NumLookupTables, "Number of switch instructions turned into lookup tables");
+STATISTIC(NumSinkCommons, "Number of common instructions sunk down to the end block");
STATISTIC(NumSpeculations, "Number of speculative executed instructions");
namespace {
@@ -68,10 +77,13 @@ namespace {
// Comparing pointers is ok as we only rely on the order for uniquing.
return Value < RHS.Value;
}
+
+ bool operator==(BasicBlock *RHSDest) const { return Dest == RHSDest; }
};
class SimplifyCFGOpt {
- const TargetData *const TD;
+ const DataLayout *const TD;
+ const TargetTransformInfo *const TTI;
Value *isValueEqualityComparison(TerminatorInst *TI);
BasicBlock *GetValueEqualityComparisonCases(TerminatorInst *TI,
@@ -91,7 +103,8 @@ class SimplifyCFGOpt {
bool SimplifyCondBranch(BranchInst *BI, IRBuilder <>&Builder);
public:
- explicit SimplifyCFGOpt(const TargetData *td) : TD(td) {}
+ SimplifyCFGOpt(const DataLayout *td, const TargetTransformInfo *tti)
+ : TD(td), TTI(tti) {}
bool run(BasicBlock *BB);
};
}
@@ -101,14 +114,14 @@ public:
///
static bool SafeToMergeTerminators(TerminatorInst *SI1, TerminatorInst *SI2) {
if (SI1 == SI2) return false; // Can't merge with self!
-
+
// It is not safe to merge these two switch instructions if they have a common
// successor, and if that successor has a PHI node, and if *that* PHI node has
// conflicting incoming values from the two switch blocks.
BasicBlock *SI1BB = SI1->getParent();
BasicBlock *SI2BB = SI2->getParent();
SmallPtrSet<BasicBlock*, 16> SI1Succs(succ_begin(SI1BB), succ_end(SI1BB));
-
+
for (succ_iterator I = succ_begin(SI2BB), E = succ_end(SI2BB); I != E; ++I)
if (SI1Succs.count(*I))
for (BasicBlock::iterator BBI = (*I)->begin();
@@ -118,7 +131,7 @@ static bool SafeToMergeTerminators(TerminatorInst *SI1, TerminatorInst *SI2) {
PN->getIncomingValueForBlock(SI2BB))
return false;
}
-
+
return true;
}
@@ -135,7 +148,7 @@ static bool isProfitableToFoldUnconditional(BranchInst *SI1,
assert(SI1->isUnconditional() && SI2->isConditional());
// We fold the unconditional branch if we can easily update all PHI nodes in
- // common successors:
+ // common successors:
// 1> We have a constant incoming value for the conditional branch;
// 2> We have "Cond" as the incoming value for the unconditional branch;
// 3> SI2->getCondition() and Cond have same operands.
@@ -170,7 +183,7 @@ static bool isProfitableToFoldUnconditional(BranchInst *SI1,
static void AddPredecessorToBlock(BasicBlock *Succ, BasicBlock *NewPred,
BasicBlock *ExistPred) {
if (!isa<PHINode>(Succ->begin())) return; // Quick exit if nothing to do
-
+
PHINode *PN;
for (BasicBlock::iterator I = Succ->begin();
(PN = dyn_cast<PHINode>(I)); ++I)
@@ -222,7 +235,7 @@ static Value *GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
// doesn't dominate BB.
if (Pred2->getSinglePredecessor() == 0)
return 0;
-
+
// If we found a conditional branch predecessor, make sure that it branches
// to BB and Pred2Br. If it doesn't, this isn't an "if statement".
if (Pred1Br->getSuccessor(0) == BB &&
@@ -252,7 +265,7 @@ static Value *GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
// Otherwise, if this is a conditional branch, then we can use it!
BranchInst *BI = dyn_cast<BranchInst>(CommonPred->getTerminator());
if (BI == 0) return 0;
-
+
assert(BI->isConditional() && "Two successors but not conditional?");
if (BI->getSuccessor(0) == Pred1) {
IfTrue = Pred1;
@@ -345,7 +358,7 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
// If we aren't allowing aggressive promotion anymore, then don't consider
// instructions in the 'if region'.
if (AggressiveInsts == 0) return false;
-
+
// If we have seen this instruction before, don't count it again.
if (AggressiveInsts->count(I)) return true;
@@ -374,7 +387,7 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
/// GetConstantInt - Extract ConstantInt from value, looking through IntToPtr
/// and PointerNullValue. Return NULL if value is not a constant int.
-static ConstantInt *GetConstantInt(Value *V, const TargetData *TD) {
+static ConstantInt *GetConstantInt(Value *V, const DataLayout *TD) {
// Normal constant int.
ConstantInt *CI = dyn_cast<ConstantInt>(V);
if (CI || !TD || !isa<Constant>(V) || !V->getType()->isPointerTy())
@@ -382,7 +395,7 @@ static ConstantInt *GetConstantInt(Value *V, const TargetData *TD) {
// This is some kind of pointer constant. Turn it into a pointer-sized
// ConstantInt if possible.
- IntegerType *PtrTy = TD->getIntPtrType(V->getContext());
+ IntegerType *PtrTy = cast<IntegerType>(TD->getIntPtrType(V->getType()));
// Null pointer means 0, see SelectionDAGBuilder::getValue(const Value*).
if (isa<ConstantPointerNull>(V))
@@ -408,10 +421,10 @@ static ConstantInt *GetConstantInt(Value *V, const TargetData *TD) {
/// Values vector.
static Value *
GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
- const TargetData *TD, bool isEQ, unsigned &UsedICmps) {
+ const DataLayout *TD, bool isEQ, unsigned &UsedICmps) {
Instruction *I = dyn_cast<Instruction>(V);
if (I == 0) return 0;
-
+
// If this is an icmp against a constant, handle this as one of the cases.
if (ICmpInst *ICI = dyn_cast<ICmpInst>(I)) {
if (ConstantInt *C = GetConstantInt(I->getOperand(1), TD)) {
@@ -420,21 +433,21 @@ GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
Vals.push_back(C);
return I->getOperand(0);
}
-
+
// If we have "x ult 3" comparison, for example, then we can add 0,1,2 to
// the set.
ConstantRange Span =
ConstantRange::makeICmpRegion(ICI->getPredicate(), C->getValue());
-
+
// If this is an and/!= check then we want to optimize "x ugt 2" into
// x != 0 && x != 1.
if (!isEQ)
Span = Span.inverse();
-
+
// If there are a ton of values, we don't want to make a ginormous switch.
if (Span.getSetSize().ugt(8) || Span.isEmptySet())
return 0;
-
+
for (APInt Tmp = Span.getLower(); Tmp != Span.getUpper(); ++Tmp)
Vals.push_back(ConstantInt::get(V->getContext(), Tmp));
UsedICmps++;
@@ -442,11 +455,11 @@ GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
}
return 0;
}
-
+
// Otherwise, we can only handle an | or &, depending on isEQ.
if (I->getOpcode() != (isEQ ? Instruction::Or : Instruction::And))
return 0;
-
+
unsigned NumValsBeforeLHS = Vals.size();
unsigned UsedICmpsBeforeLHS = UsedICmps;
if (Value *LHS = GatherConstantCompares(I->getOperand(0), Vals, Extra, TD,
@@ -467,12 +480,12 @@ GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
Extra = I->getOperand(1);
return LHS;
}
-
+
Vals.resize(NumValsBeforeLHS);
UsedICmps = UsedICmpsBeforeLHS;
return 0;
}
-
+
// If the LHS can't be folded in, but Extra is available and RHS can, try to
// use LHS as Extra.
if (Extra == 0 || Extra == I->getOperand(0)) {
@@ -484,7 +497,7 @@ GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
assert(Vals.size() == NumValsBeforeLHS);
Extra = OldExtra;
}
-
+
return 0;
}
@@ -556,11 +569,7 @@ GetValueEqualityComparisonCases(TerminatorInst *TI,
/// in the list that match the specified block.
static void EliminateBlockCases(BasicBlock *BB,
std::vector<ValueEqualityComparisonCase> &Cases) {
- for (unsigned i = 0, e = Cases.size(); i != e; ++i)
- if (Cases[i].Dest == BB) {
- Cases.erase(Cases.begin()+i);
- --i; --e;
- }
+ Cases.erase(std::remove(Cases.begin(), Cases.end(), BB), Cases.end());
}
/// ValuesOverlap - Return true if there are any keys in C1 that exist in C2 as
@@ -615,6 +624,9 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
assert(ThisVal && "This isn't a value comparison!!");
if (ThisVal != PredVal) return false; // Different predicates.
+ // TODO: Preserve branch weight metadata, similarly to how
+ // FoldValueComparisonIntoPredecessors preserves it.
+
// Find out information about when control will move from Pred to TI's block.
std::vector<ValueEqualityComparisonCase> PredCases;
BasicBlock *PredDef = GetValueEqualityComparisonCases(Pred->getTerminator(),
@@ -634,7 +646,7 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
// can simplify TI.
if (!ValuesOverlap(PredCases, ThisCases))
return false;
-
+
if (isa<BranchInst>(TI)) {
// Okay, one of the successors of this condbr is dead. Convert it to a
// uncond br.
@@ -652,7 +664,7 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
EraseTerminatorInstAndDCECond(TI);
return true;
}
-
+
SwitchInst *SI = cast<SwitchInst>(TI);
// Okay, TI has cases that are statically dead, prune them away.
SmallPtrSet<Constant*, 16> DeadCases;
@@ -662,18 +674,37 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
<< "Through successor TI: " << *TI);
+ // Collect branch weights into a vector.
+ SmallVector<uint32_t, 8> Weights;
+ MDNode* MD = SI->getMetadata(LLVMContext::MD_prof);
+ bool HasWeight = MD && (MD->getNumOperands() == 2 + SI->getNumCases());
+ if (HasWeight)
+ for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
+ ++MD_i) {
+ ConstantInt* CI = dyn_cast<ConstantInt>(MD->getOperand(MD_i));
+ assert(CI);
+ Weights.push_back(CI->getValue().getZExtValue());
+ }
for (SwitchInst::CaseIt i = SI->case_end(), e = SI->case_begin(); i != e;) {
--i;
if (DeadCases.count(i.getCaseValue())) {
+ if (HasWeight) {
+ std::swap(Weights[i.getCaseIndex()+1], Weights.back());
+ Weights.pop_back();
+ }
i.getCaseSuccessor()->removePredecessor(TI->getParent());
SI->removeCase(i);
}
}
+ if (HasWeight && Weights.size() >= 2)
+ SI->setMetadata(LLVMContext::MD_prof,
+ MDBuilder(SI->getParent()->getContext()).
+ createBranchWeights(Weights));
DEBUG(dbgs() << "Leaving: " << *TI << "\n");
return true;
}
-
+
// Otherwise, TI's block must correspond to some matched value. Find out
// which value (or set of values) this is.
ConstantInt *TIV = 0;
@@ -729,8 +760,8 @@ namespace {
}
static int ConstantIntSortPredicate(const void *P1, const void *P2) {
- const ConstantInt *LHS = *(const ConstantInt**)P1;
- const ConstantInt *RHS = *(const ConstantInt**)P2;
+ const ConstantInt *LHS = *(const ConstantInt*const*)P1;
+ const ConstantInt *RHS = *(const ConstantInt*const*)P2;
if (LHS->getValue().ult(RHS->getValue()))
return 1;
if (LHS->getValue() == RHS->getValue())
@@ -738,6 +769,56 @@ static int ConstantIntSortPredicate(const void *P1, const void *P2) {
return -1;
}
+static inline bool HasBranchWeights(const Instruction* I) {
+ MDNode* ProfMD = I->getMetadata(LLVMContext::MD_prof);
+ if (ProfMD && ProfMD->getOperand(0))
+ if (MDString* MDS = dyn_cast<MDString>(ProfMD->getOperand(0)))
+ return MDS->getString().equals("branch_weights");
+
+ return false;
+}
+
+/// Get Weights of a given TerminatorInst, the default weight is at the front
+/// of the vector. If TI is a conditional eq, we need to swap the branch-weight
+/// metadata.
+static void GetBranchWeights(TerminatorInst *TI,
+ SmallVectorImpl<uint64_t> &Weights) {
+ MDNode* MD = TI->getMetadata(LLVMContext::MD_prof);
+ assert(MD);
+ for (unsigned i = 1, e = MD->getNumOperands(); i < e; ++i) {
+ ConstantInt* CI = dyn_cast<ConstantInt>(MD->getOperand(i));
+ assert(CI);
+ Weights.push_back(CI->getValue().getZExtValue());
+ }
+
+ // If TI is a conditional eq, the default case is the false case,
+ // and the corresponding branch-weight data is at index 2. We swap the
+ // default weight to be the first entry.
+ if (BranchInst* BI = dyn_cast<BranchInst>(TI)) {
+ assert(Weights.size() == 2);
+ ICmpInst *ICI = cast<ICmpInst>(BI->getCondition());
+ if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
+ std::swap(Weights.front(), Weights.back());
+ }
+}
+
+/// Sees if any of the weights are too big for a uint32_t, and halves all the
+/// weights if any are.
+static void FitWeights(MutableArrayRef<uint64_t> Weights) {
+ bool Halve = false;
+ for (unsigned i = 0; i < Weights.size(); ++i)
+ if (Weights[i] > UINT_MAX) {
+ Halve = true;
+ break;
+ }
+
+ if (! Halve)
+ return;
+
+ for (unsigned i = 0; i < Weights.size(); ++i)
+ Weights[i] /= 2;
+}
+
/// FoldValueComparisonIntoPredecessors - The specified terminator is a value
/// equality comparison instruction (either a switch or a branch on "X == c").
/// See if any of the predecessors of the terminator block are value comparisons
@@ -770,6 +851,31 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
// build.
SmallVector<BasicBlock*, 8> NewSuccessors;
+ // Update the branch weight metadata along the way
+ SmallVector<uint64_t, 8> Weights;
+ bool PredHasWeights = HasBranchWeights(PTI);
+ bool SuccHasWeights = HasBranchWeights(TI);
+
+ if (PredHasWeights) {
+ GetBranchWeights(PTI, Weights);
+ // branch-weight metadata is inconsistant here.
+ if (Weights.size() != 1 + PredCases.size())
+ PredHasWeights = SuccHasWeights = false;
+ } else if (SuccHasWeights)
+ // If there are no predecessor weights but there are successor weights,
+ // populate Weights with 1, which will later be scaled to the sum of
+ // successor's weights
+ Weights.assign(1 + PredCases.size(), 1);
+
+ SmallVector<uint64_t, 8> SuccWeights;
+ if (SuccHasWeights) {
+ GetBranchWeights(TI, SuccWeights);
+ // branch-weight metadata is inconsistant here.
+ if (SuccWeights.size() != 1 + BBCases.size())
+ PredHasWeights = SuccHasWeights = false;
+ } else if (PredHasWeights)
+ SuccWeights.assign(1 + BBCases.size(), 1);
+
if (PredDefault == BB) {
// If this is the default destination from PTI, only the edges in TI
// that don't occur in PTI, or that branch to BB will be activated.
@@ -780,6 +886,14 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
else {
// The default destination is BB, we don't need explicit targets.
std::swap(PredCases[i], PredCases.back());
+
+ if (PredHasWeights || SuccHasWeights) {
+ // Increase weight for the default case.
+ Weights[0] += Weights[i+1];
+ std::swap(Weights[i+1], Weights.back());
+ Weights.pop_back();
+ }
+
PredCases.pop_back();
--i; --e;
}
@@ -790,21 +904,47 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
PredDefault = BBDefault;
NewSuccessors.push_back(BBDefault);
}
+
+ unsigned CasesFromPred = Weights.size();
+ uint64_t ValidTotalSuccWeight = 0;
for (unsigned i = 0, e = BBCases.size(); i != e; ++i)
if (!PTIHandled.count(BBCases[i].Value) &&
BBCases[i].Dest != BBDefault) {
PredCases.push_back(BBCases[i]);
NewSuccessors.push_back(BBCases[i].Dest);
+ if (SuccHasWeights || PredHasWeights) {
+ // The default weight is at index 0, so weight for the ith case
+ // should be at index i+1. Scale the cases from successor by
+ // PredDefaultWeight (Weights[0]).
+ Weights.push_back(Weights[0] * SuccWeights[i+1]);
+ ValidTotalSuccWeight += SuccWeights[i+1];
+ }
}
+ if (SuccHasWeights || PredHasWeights) {
+ ValidTotalSuccWeight += SuccWeights[0];
+ // Scale the cases from predecessor by ValidTotalSuccWeight.
+ for (unsigned i = 1; i < CasesFromPred; ++i)
+ Weights[i] *= ValidTotalSuccWeight;
+ // Scale the default weight by SuccDefaultWeight (SuccWeights[0]).
+ Weights[0] *= SuccWeights[0];
+ }
} else {
// If this is not the default destination from PSI, only the edges
// in SI that occur in PSI with a destination of BB will be
// activated.
std::set<ConstantInt*, ConstantIntOrdering> PTIHandled;
+ std::map<ConstantInt*, uint64_t> WeightsForHandled;
for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
if (PredCases[i].Dest == BB) {
PTIHandled.insert(PredCases[i].Value);
+
+ if (PredHasWeights || SuccHasWeights) {
+ WeightsForHandled[PredCases[i].Value] = Weights[i+1];
+ std::swap(Weights[i+1], Weights.back());
+ Weights.pop_back();
+ }
+
std::swap(PredCases[i], PredCases.back());
PredCases.pop_back();
--i; --e;
@@ -815,6 +955,8 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
for (unsigned i = 0, e = BBCases.size(); i != e; ++i)
if (PTIHandled.count(BBCases[i].Value)) {
// If this is one we are capable of getting...
+ if (PredHasWeights || SuccHasWeights)
+ Weights.push_back(WeightsForHandled[BBCases[i].Value]);
PredCases.push_back(BBCases[i]);
NewSuccessors.push_back(BBCases[i].Dest);
PTIHandled.erase(BBCases[i].Value);// This constant is taken care of
@@ -822,9 +964,11 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
// If there are any constants vectored to BB that TI doesn't handle,
// they must go to the default destination of TI.
- for (std::set<ConstantInt*, ConstantIntOrdering>::iterator I =
+ for (std::set<ConstantInt*, ConstantIntOrdering>::iterator I =
PTIHandled.begin(),
E = PTIHandled.end(); I != E; ++I) {
+ if (PredHasWeights || SuccHasWeights)
+ Weights.push_back(WeightsForHandled[*I]);
PredCases.push_back(ValueEqualityComparisonCase(*I, BBDefault));
NewSuccessors.push_back(BBDefault);
}
@@ -839,7 +983,7 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
Builder.SetInsertPoint(PTI);
// Convert pointer to int before we switch.
if (CV->getType()->isPointerTy()) {
- assert(TD && "Cannot switch on pointer without TargetData");
+ assert(TD && "Cannot switch on pointer without DataLayout");
CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getContext()),
"magicptr");
}
@@ -851,6 +995,17 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
NewSI->addCase(PredCases[i].Value, PredCases[i].Dest);
+ if (PredHasWeights || SuccHasWeights) {
+ // Halve the weights if any of them cannot fit in an uint32_t
+ FitWeights(Weights);
+
+ SmallVector<uint32_t, 8> MDWeights(Weights.begin(), Weights.end());
+
+ NewSI->setMetadata(LLVMContext::MD_prof,
+ MDBuilder(BB->getContext()).
+ createBranchWeights(MDWeights));
+ }
+
EraseTerminatorInstAndDCECond(PTI);
// Okay, last check. If BB is still a successor of PSI, then we must
@@ -984,11 +1139,11 @@ HoistTerminator:
Value *BB1V = PN->getIncomingValueForBlock(BB1);
Value *BB2V = PN->getIncomingValueForBlock(BB2);
if (BB1V == BB2V) continue;
-
+
// These values do not agree. Insert a select instruction before NT
// that determines the right value.
SelectInst *&SI = InsertedSelects[std::make_pair(BB1V, BB2V)];
- if (SI == 0)
+ if (SI == 0)
SI = cast<SelectInst>
(Builder.CreateSelect(BI->getCondition(), BB1V, BB2V,
BB1V->getName()+"."+BB2V->getName()));
@@ -1008,6 +1163,175 @@ HoistTerminator:
return true;
}
+/// SinkThenElseCodeToEnd - Given an unconditional branch that goes to BBEnd,
+/// check whether BBEnd has only two predecessors and the other predecessor
+/// ends with an unconditional branch. If it is true, sink any common code
+/// in the two predecessors to BBEnd.
+static bool SinkThenElseCodeToEnd(BranchInst *BI1) {
+ assert(BI1->isUnconditional());
+ BasicBlock *BB1 = BI1->getParent();
+ BasicBlock *BBEnd = BI1->getSuccessor(0);
+
+ // Check that BBEnd has two predecessors and the other predecessor ends with
+ // an unconditional branch.
+ pred_iterator PI = pred_begin(BBEnd), PE = pred_end(BBEnd);
+ BasicBlock *Pred0 = *PI++;
+ if (PI == PE) // Only one predecessor.
+ return false;
+ BasicBlock *Pred1 = *PI++;
+ if (PI != PE) // More than two predecessors.
+ return false;
+ BasicBlock *BB2 = (Pred0 == BB1) ? Pred1 : Pred0;
+ BranchInst *BI2 = dyn_cast<BranchInst>(BB2->getTerminator());
+ if (!BI2 || !BI2->isUnconditional())
+ return false;
+
+ // Gather the PHI nodes in BBEnd.
+ std::map<Value*, std::pair<Value*, PHINode*> > MapValueFromBB1ToBB2;
+ Instruction *FirstNonPhiInBBEnd = 0;
+ for (BasicBlock::iterator I = BBEnd->begin(), E = BBEnd->end();
+ I != E; ++I) {
+ if (PHINode *PN = dyn_cast<PHINode>(I)) {
+ Value *BB1V = PN->getIncomingValueForBlock(BB1);
+ Value *BB2V = PN->getIncomingValueForBlock(BB2);
+ MapValueFromBB1ToBB2[BB1V] = std::make_pair(BB2V, PN);
+ } else {
+ FirstNonPhiInBBEnd = &*I;
+ break;
+ }
+ }
+ if (!FirstNonPhiInBBEnd)
+ return false;
+
+
+ // This does very trivial matching, with limited scanning, to find identical
+ // instructions in the two blocks. We scan backward for obviously identical
+ // instructions in an identical order.
+ BasicBlock::InstListType::reverse_iterator RI1 = BB1->getInstList().rbegin(),
+ RE1 = BB1->getInstList().rend(), RI2 = BB2->getInstList().rbegin(),
+ RE2 = BB2->getInstList().rend();
+ // Skip debug info.
+ while (RI1 != RE1 && isa<DbgInfoIntrinsic>(&*RI1)) ++RI1;
+ if (RI1 == RE1)
+ return false;
+ while (RI2 != RE2 && isa<DbgInfoIntrinsic>(&*RI2)) ++RI2;
+ if (RI2 == RE2)
+ return false;
+ // Skip the unconditional branches.
+ ++RI1;
+ ++RI2;
+
+ bool Changed = false;
+ while (RI1 != RE1 && RI2 != RE2) {
+ // Skip debug info.
+ while (RI1 != RE1 && isa<DbgInfoIntrinsic>(&*RI1)) ++RI1;
+ if (RI1 == RE1)
+ return Changed;
+ while (RI2 != RE2 && isa<DbgInfoIntrinsic>(&*RI2)) ++RI2;
+ if (RI2 == RE2)
+ return Changed;
+
+ Instruction *I1 = &*RI1, *I2 = &*RI2;
+ // I1 and I2 should have a single use in the same PHI node, and they
+ // perform the same operation.
+ // Cannot move control-flow-involving, volatile loads, vaarg, etc.
+ if (isa<PHINode>(I1) || isa<PHINode>(I2) ||
+ isa<TerminatorInst>(I1) || isa<TerminatorInst>(I2) ||
+ isa<LandingPadInst>(I1) || isa<LandingPadInst>(I2) ||
+ isa<AllocaInst>(I1) || isa<AllocaInst>(I2) ||
+ I1->mayHaveSideEffects() || I2->mayHaveSideEffects() ||
+ I1->mayReadOrWriteMemory() || I2->mayReadOrWriteMemory() ||
+ !I1->hasOneUse() || !I2->hasOneUse() ||
+ MapValueFromBB1ToBB2.find(I1) == MapValueFromBB1ToBB2.end() ||
+ MapValueFromBB1ToBB2[I1].first != I2)
+ return Changed;
+
+ // Check whether we should swap the operands of ICmpInst.
+ ICmpInst *ICmp1 = dyn_cast<ICmpInst>(I1), *ICmp2 = dyn_cast<ICmpInst>(I2);
+ bool SwapOpnds = false;
+ if (ICmp1 && ICmp2 &&
+ ICmp1->getOperand(0) != ICmp2->getOperand(0) &&
+ ICmp1->getOperand(1) != ICmp2->getOperand(1) &&
+ (ICmp1->getOperand(0) == ICmp2->getOperand(1) ||
+ ICmp1->getOperand(1) == ICmp2->getOperand(0))) {
+ ICmp2->swapOperands();
+ SwapOpnds = true;
+ }
+ if (!I1->isSameOperationAs(I2)) {
+ if (SwapOpnds)
+ ICmp2->swapOperands();
+ return Changed;
+ }
+
+ // The operands should be either the same or they need to be generated
+ // with a PHI node after sinking. We only handle the case where there is
+ // a single pair of different operands.
+ Value *DifferentOp1 = 0, *DifferentOp2 = 0;
+ unsigned Op1Idx = 0;
+ for (unsigned I = 0, E = I1->getNumOperands(); I != E; ++I) {
+ if (I1->getOperand(I) == I2->getOperand(I))
+ continue;
+ // Early exit if we have more-than one pair of different operands or
+ // the different operand is already in MapValueFromBB1ToBB2.
+ // Early exit if we need a PHI node to replace a constant.
+ if (DifferentOp1 ||
+ MapValueFromBB1ToBB2.find(I1->getOperand(I)) !=
+ MapValueFromBB1ToBB2.end() ||
+ isa<Constant>(I1->getOperand(I)) ||
+ isa<Constant>(I2->getOperand(I))) {
+ // If we can't sink the instructions, undo the swapping.
+ if (SwapOpnds)
+ ICmp2->swapOperands();
+ return Changed;
+ }
+ DifferentOp1 = I1->getOperand(I);
+ Op1Idx = I;
+ DifferentOp2 = I2->getOperand(I);
+ }
+
+ // We insert the pair of different operands to MapValueFromBB1ToBB2 and
+ // remove (I1, I2) from MapValueFromBB1ToBB2.
+ if (DifferentOp1) {
+ PHINode *NewPN = PHINode::Create(DifferentOp1->getType(), 2,
+ DifferentOp1->getName() + ".sink",
+ BBEnd->begin());
+ MapValueFromBB1ToBB2[DifferentOp1] = std::make_pair(DifferentOp2, NewPN);
+ // I1 should use NewPN instead of DifferentOp1.
+ I1->setOperand(Op1Idx, NewPN);
+ NewPN->addIncoming(DifferentOp1, BB1);
+ NewPN->addIncoming(DifferentOp2, BB2);
+ DEBUG(dbgs() << "Create PHI node " << *NewPN << "\n";);
+ }
+ PHINode *OldPN = MapValueFromBB1ToBB2[I1].second;
+ MapValueFromBB1ToBB2.erase(I1);
+
+ DEBUG(dbgs() << "SINK common instructions " << *I1 << "\n";);
+ DEBUG(dbgs() << " " << *I2 << "\n";);
+ // We need to update RE1 and RE2 if we are going to sink the first
+ // instruction in the basic block down.
+ bool UpdateRE1 = (I1 == BB1->begin()), UpdateRE2 = (I2 == BB2->begin());
+ // Sink the instruction.
+ BBEnd->getInstList().splice(FirstNonPhiInBBEnd, BB1->getInstList(), I1);
+ if (!OldPN->use_empty())
+ OldPN->replaceAllUsesWith(I1);
+ OldPN->eraseFromParent();
+
+ if (!I2->use_empty())
+ I2->replaceAllUsesWith(I1);
+ I1->intersectOptionalDataWith(I2);
+ I2->eraseFromParent();
+
+ if (UpdateRE1)
+ RE1 = BB1->getInstList().rend();
+ if (UpdateRE2)
+ RE2 = BB2->getInstList().rend();
+ FirstNonPhiInBBEnd = I1;
+ NumSinkCommons++;
+ Changed = true;
+ }
+ return Changed;
+}
+
/// SpeculativelyExecuteBB - Given a conditional branch that goes to BB1
/// and an BB2 and the only successor of BB1 is BB2, hoist simple code
/// (for now, restricted to a single instruction that's side effect free) from
@@ -1056,7 +1380,7 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *BB1) {
// Do not hoist the instruction if any of its operands are defined but not
// used in this BB. The transformation will prevent the operand from
// being sunk into the use block.
- for (User::op_iterator i = HInst->op_begin(), e = HInst->op_end();
+ for (User::op_iterator i = HInst->op_begin(), e = HInst->op_end();
i != e; ++i) {
Instruction *OpI = dyn_cast<Instruction>(*i);
if (OpI && OpI->getParent() == BIParent &&
@@ -1112,7 +1436,7 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *BB1) {
// as well.
if (PHIs.empty())
return false;
-
+
// If we get here, we can hoist the instruction and if-convert.
DEBUG(dbgs() << "SPECULATIVELY EXECUTING BB" << *BB1 << "\n";);
@@ -1162,13 +1486,13 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *BB1) {
static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
BranchInst *BI = cast<BranchInst>(BB->getTerminator());
unsigned Size = 0;
-
+
for (BasicBlock::iterator BBI = BB->begin(); &*BBI != BI; ++BBI) {
if (isa<DbgInfoIntrinsic>(BBI))
continue;
if (Size > 10) return false; // Don't clone large BB's.
++Size;
-
+
// We can only support instructions that do not define values that are
// live outside of the current basic block.
for (Value::use_iterator UI = BBI->use_begin(), E = BBI->use_end();
@@ -1176,7 +1500,7 @@ static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
Instruction *U = cast<Instruction>(*UI);
if (U->getParent() != BB || isa<PHINode>(U)) return false;
}
-
+
// Looks ok, continue checking.
}
@@ -1187,38 +1511,38 @@ static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
/// that is defined in the same block as the branch and if any PHI entries are
/// constants, thread edges corresponding to that entry to be branches to their
/// ultimate destination.
-static bool FoldCondBranchOnPHI(BranchInst *BI, const TargetData *TD) {
+static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) {
BasicBlock *BB = BI->getParent();
PHINode *PN = dyn_cast<PHINode>(BI->getCondition());
// NOTE: we currently cannot transform this case if the PHI node is used
// outside of the block.
if (!PN || PN->getParent() != BB || !PN->hasOneUse())
return false;
-
+
// Degenerate case of a single entry PHI.
if (PN->getNumIncomingValues() == 1) {
FoldSingleEntryPHINodes(PN->getParent());
- return true;
+ return true;
}
// Now we know that this block has multiple preds and two succs.
if (!BlockIsSimpleEnoughToThreadThrough(BB)) return false;
-
+
// Okay, this is a simple enough basic block. See if any phi values are
// constants.
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
ConstantInt *CB = dyn_cast<ConstantInt>(PN->getIncomingValue(i));
if (CB == 0 || !CB->getType()->isIntegerTy(1)) continue;
-
+
// Okay, we now know that all edges from PredBB should be revectored to
// branch to RealDest.
BasicBlock *PredBB = PN->getIncomingBlock(i);
BasicBlock *RealDest = BI->getSuccessor(!CB->getZExtValue());
-
+
if (RealDest == BB) continue; // Skip self loops.
// Skip if the predecessor's terminator is an indirect branch.
if (isa<IndirectBrInst>(PredBB->getTerminator())) continue;
-
+
// The dest block might have PHI nodes, other predecessors and other
// difficult cases. Instead of being smart about this, just insert a new
// block that jumps to the destination block, effectively splitting
@@ -1227,7 +1551,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const TargetData *TD) {
RealDest->getName()+".critedge",
RealDest->getParent(), RealDest);
BranchInst::Create(RealDest, EdgeBB);
-
+
// Update PHI nodes.
AddPredecessorToBlock(RealDest, EdgeBB, BB);
@@ -1244,7 +1568,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const TargetData *TD) {
// Clone the instruction.
Instruction *N = BBI->clone();
if (BBI->hasName()) N->setName(BBI->getName()+".c");
-
+
// Update operands due to translation.
for (User::op_iterator i = N->op_begin(), e = N->op_end();
i != e; ++i) {
@@ -1252,7 +1576,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const TargetData *TD) {
if (PI != TranslateMap.end())
*i = PI->second;
}
-
+
// Check for trivial simplification.
if (Value *V = SimplifyInstruction(N, TD)) {
TranslateMap[BBI] = V;
@@ -1283,7 +1607,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const TargetData *TD) {
/// FoldTwoEntryPHINode - Given a BB that starts with the specified two-entry
/// PHI node, see if we can eliminate it.
-static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) {
+static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *TD) {
// Ok, this is a two entry PHI node. Check to see if this is a simple "if
// statement", which has a very simple dominance structure. Basically, we
// are trying to find the condition that is being branched on, which
@@ -1297,7 +1621,7 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) {
// Don't bother if the branch will be constant folded trivially.
isa<ConstantInt>(IfCond))
return false;
-
+
// Okay, we found that we can merge this two-entry phi node into a select.
// Doing so would require us to fold *all* two entry phi nodes in this block.
// At some point this becomes non-profitable (particularly if the target
@@ -1307,14 +1631,14 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) {
for (BasicBlock::iterator I = BB->begin(); isa<PHINode>(I); ++NumPhis, ++I)
if (NumPhis > 2)
return false;
-
+
// Loop over the PHI's seeing if we can promote them all to select
// instructions. While we are at it, keep track of the instructions
// that need to be moved to the dominating block.
SmallPtrSet<Instruction*, 4> AggressiveInsts;
unsigned MaxCostVal0 = PHINodeFoldingThreshold,
MaxCostVal1 = PHINodeFoldingThreshold;
-
+
for (BasicBlock::iterator II = BB->begin(); isa<PHINode>(II);) {
PHINode *PN = cast<PHINode>(II++);
if (Value *V = SimplifyInstruction(PN, TD)) {
@@ -1322,19 +1646,19 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) {
PN->eraseFromParent();
continue;
}
-
+
if (!DominatesMergePoint(PN->getIncomingValue(0), BB, &AggressiveInsts,
MaxCostVal0) ||
!DominatesMergePoint(PN->getIncomingValue(1), BB, &AggressiveInsts,
MaxCostVal1))
return false;
}
-
+
// If we folded the first phi, PN dangles at this point. Refresh it. If
// we ran out of PHIs then we simplified them all.
PN = dyn_cast<PHINode>(BB->begin());
if (PN == 0) return true;
-
+
// Don't fold i1 branches on PHIs which contain binary operators. These can
// often be turned into switches and other things.
if (PN->getType()->isIntegerTy(1) &&
@@ -1342,7 +1666,7 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) {
isa<BinaryOperator>(PN->getIncomingValue(1)) ||
isa<BinaryOperator>(IfCond)))
return false;
-
+
// If we all PHI nodes are promotable, check to make sure that all
// instructions in the predecessor blocks can be promoted as well. If
// not, we won't be able to get rid of the control flow, so it's not
@@ -1362,7 +1686,7 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) {
return false;
}
}
-
+
if (cast<BranchInst>(IfBlock2->getTerminator())->isConditional()) {
IfBlock2 = 0;
} else {
@@ -1375,15 +1699,15 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) {
return false;
}
}
-
+
DEBUG(dbgs() << "FOUND IF CONDITION! " << *IfCond << " T: "
<< IfTrue->getName() << " F: " << IfFalse->getName() << "\n");
-
+
// If we can still promote the PHI nodes after this gauntlet of tests,
// do all of the PHI's now.
Instruction *InsertPt = DomBlock->getTerminator();
IRBuilder<true, NoFolder> Builder(InsertPt);
-
+
// Move all 'aggressive' instructions, which are defined in the
// conditional parts of the if's up to the dominating block.
if (IfBlock1)
@@ -1394,19 +1718,19 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) {
DomBlock->getInstList().splice(InsertPt,
IfBlock2->getInstList(), IfBlock2->begin(),
IfBlock2->getTerminator());
-
+
while (PHINode *PN = dyn_cast<PHINode>(BB->begin())) {
// Change the PHI node into a select instruction.
Value *TrueVal = PN->getIncomingValue(PN->getIncomingBlock(0) == IfFalse);
Value *FalseVal = PN->getIncomingValue(PN->getIncomingBlock(0) == IfTrue);
-
- SelectInst *NV =
+
+ SelectInst *NV =
cast<SelectInst>(Builder.CreateSelect(IfCond, TrueVal, FalseVal, ""));
PN->replaceAllUsesWith(NV);
NV->takeName(PN);
PN->eraseFromParent();
}
-
+
// At this point, IfBlock1 and IfBlock2 are both empty, so our if statement
// has been flattened. Change DomBlock to jump directly to our new block to
// avoid other simplifycfg's kicking in on the diamond.
@@ -1420,14 +1744,14 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) {
/// SimplifyCondBranchToTwoReturns - If we found a conditional branch that goes
/// to two returning blocks, try to merge them together into one return,
/// introducing a select if the return values disagree.
-static bool SimplifyCondBranchToTwoReturns(BranchInst *BI,
+static bool SimplifyCondBranchToTwoReturns(BranchInst *BI,
IRBuilder<> &Builder) {
assert(BI->isConditional() && "Must be a conditional branch");
BasicBlock *TrueSucc = BI->getSuccessor(0);
BasicBlock *FalseSucc = BI->getSuccessor(1);
ReturnInst *TrueRet = cast<ReturnInst>(TrueSucc->getTerminator());
ReturnInst *FalseRet = cast<ReturnInst>(FalseSucc->getTerminator());
-
+
// Check to ensure both blocks are empty (just a return) or optionally empty
// with PHI nodes. If there are other instructions, merging would cause extra
// computation on one path or the other.
@@ -1447,12 +1771,12 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI,
EraseTerminatorInstAndDCECond(BI);
return true;
}
-
+
// Otherwise, figure out what the true and false return values are
// so we can insert a new select instruction.
Value *TrueValue = TrueRet->getReturnValue();
Value *FalseValue = FalseRet->getReturnValue();
-
+
// Unwrap any PHI nodes in the return blocks.
if (PHINode *TVPN = dyn_cast_or_null<PHINode>(TrueValue))
if (TVPN->getParent() == TrueSucc)
@@ -1460,7 +1784,7 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI,
if (PHINode *FVPN = dyn_cast_or_null<PHINode>(FalseValue))
if (FVPN->getParent() == FalseSucc)
FalseValue = FVPN->getIncomingValueForBlock(BI->getParent());
-
+
// In order for this transformation to be safe, we must be able to
// unconditionally execute both operands to the return. This is
// normally the case, but we could have a potentially-trapping
@@ -1472,12 +1796,12 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI,
if (ConstantExpr *FCV = dyn_cast_or_null<ConstantExpr>(FalseValue))
if (FCV->canTrap())
return false;
-
+
// Okay, we collected all the mapped values and checked them for sanity, and
// defined to really do this transformation. First, update the CFG.
TrueSucc->removePredecessor(BI->getParent());
FalseSucc->removePredecessor(BI->getParent());
-
+
// Insert select instructions where needed.
Value *BrCond = BI->getCondition();
if (TrueValue) {
@@ -1491,15 +1815,15 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI,
}
}
- Value *RI = !TrueValue ?
+ Value *RI = !TrueValue ?
Builder.CreateRetVoid() : Builder.CreateRet(TrueValue);
(void) RI;
-
+
DEBUG(dbgs() << "\nCHANGING BRANCH TO TWO RETURNS INTO SELECT:"
<< "\n " << *BI << "NewRet = " << *RI
<< "TRUEBLOCK: " << *TrueSucc << "FALSEBLOCK: "<< *FalseSucc);
-
+
EraseTerminatorInstAndDCECond(BI);
return true;
@@ -1510,7 +1834,7 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI,
/// parameters and return true, or returns false if no or invalid metadata was
/// found.
static bool ExtractBranchMetadata(BranchInst *BI,
- APInt &ProbTrue, APInt &ProbFalse) {
+ uint64_t &ProbTrue, uint64_t &ProbFalse) {
assert(BI->isConditional() &&
"Looking for probabilities on unconditional branch?");
MDNode *ProfileData = BI->getMetadata(LLVMContext::MD_prof);
@@ -1518,35 +1842,11 @@ static bool ExtractBranchMetadata(BranchInst *BI,
ConstantInt *CITrue = dyn_cast<ConstantInt>(ProfileData->getOperand(1));
ConstantInt *CIFalse = dyn_cast<ConstantInt>(ProfileData->getOperand(2));
if (!CITrue || !CIFalse) return false;
- ProbTrue = CITrue->getValue();
- ProbFalse = CIFalse->getValue();
- assert(ProbTrue.getBitWidth() == 32 && ProbFalse.getBitWidth() == 32 &&
- "Branch probability metadata must be 32-bit integers");
+ ProbTrue = CITrue->getValue().getZExtValue();
+ ProbFalse = CIFalse->getValue().getZExtValue();
return true;
}
-/// MultiplyAndLosePrecision - Multiplies A and B, then returns the result. In
-/// the event of overflow, logically-shifts all four inputs right until the
-/// multiply fits.
-static APInt MultiplyAndLosePrecision(APInt &A, APInt &B, APInt &C, APInt &D,
- unsigned &BitsLost) {
- BitsLost = 0;
- bool Overflow = false;
- APInt Result = A.umul_ov(B, Overflow);
- if (Overflow) {
- APInt MaxB = APInt::getMaxValue(A.getBitWidth()).udiv(A);
- do {
- B = B.lshr(1);
- ++BitsLost;
- } while (B.ugt(MaxB));
- A = A.lshr(BitsLost);
- C = C.lshr(BitsLost);
- D = D.lshr(BitsLost);
- Result = A * B;
- }
- return Result;
-}
-
/// checkCSEInPredecessor - Return true if the given instruction is available
/// in its predecessor block. If yes, the instruction will be removed.
///
@@ -1600,7 +1900,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
if (Cond == 0)
return false;
}
-
+
if (Cond == 0 || (!isa<CmpInst>(Cond) && !isa<BinaryOperator>(Cond)) ||
Cond->getParent() != BB || !Cond->hasOneUse())
return false;
@@ -1623,7 +1923,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
isSafeToSpeculativelyExecute(FrontIt)) {
BonusInst = &*FrontIt;
++FrontIt;
-
+
// Ignore dbg intrinsics.
while (isa<DbgInfoIntrinsic>(FrontIt)) ++FrontIt;
}
@@ -1631,13 +1931,13 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// Only a single bonus inst is allowed.
if (&*FrontIt != Cond)
return false;
-
+
// Make sure the instruction after the condition is the cond branch.
BasicBlock::iterator CondIt = Cond; ++CondIt;
// Ingore dbg intrinsics.
while (isa<DbgInfoIntrinsic>(CondIt)) ++CondIt;
-
+
if (&*CondIt != BI)
return false;
@@ -1649,7 +1949,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Cond->getOperand(1)))
if (CE->canTrap())
return false;
-
+
// Finally, don't infinitely unroll conditional loops.
BasicBlock *TrueDest = BI->getSuccessor(0);
BasicBlock *FalseDest = (BI->isConditional()) ? BI->getSuccessor(1) : 0;
@@ -1659,22 +1959,22 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
BasicBlock *PredBlock = *PI;
BranchInst *PBI = dyn_cast<BranchInst>(PredBlock->getTerminator());
-
+
// Check that we have two conditional branches. If there is a PHI node in
// the common successor, verify that the same value flows in from both
// blocks.
SmallVector<PHINode*, 4> PHIs;
if (PBI == 0 || PBI->isUnconditional() ||
- (BI->isConditional() &&
+ (BI->isConditional() &&
!SafeToMergeTerminators(BI, PBI)) ||
(!BI->isConditional() &&
!isProfitableToFoldUnconditional(BI, PBI, Cond, PHIs)))
continue;
-
+
// Determine if the two branches share a common destination.
- Instruction::BinaryOps Opc;
+ Instruction::BinaryOps Opc = Instruction::BinaryOpsEnd;
bool InvertPredCond = false;
-
+
if (BI->isConditional()) {
if (PBI->getSuccessor(0) == TrueDest)
Opc = Instruction::Or;
@@ -1693,7 +1993,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// Ensure that any values used in the bonus instruction are also used
// by the terminator of the predecessor. This means that those values
- // must already have been resolved, so we won't be inhibiting the
+ // must already have been resolved, so we won't be inhibiting the
// out-of-order core by speculating them earlier.
if (BonusInst) {
// Collect the values used by the bonus inst
@@ -1707,47 +2007,47 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
SmallVector<std::pair<Value*, unsigned>, 4> Worklist;
Worklist.push_back(std::make_pair(PBI->getOperand(0), 0));
-
+
// Walk up to four levels back up the use-def chain of the predecessor's
// terminator to see if all those values were used. The choice of four
// levels is arbitrary, to provide a compile-time-cost bound.
while (!Worklist.empty()) {
std::pair<Value*, unsigned> Pair = Worklist.back();
Worklist.pop_back();
-
+
if (Pair.second >= 4) continue;
UsedValues.erase(Pair.first);
if (UsedValues.empty()) break;
-
+
if (Instruction *I = dyn_cast<Instruction>(Pair.first)) {
for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end();
OI != OE; ++OI)
Worklist.push_back(std::make_pair(OI->get(), Pair.second+1));
- }
+ }
}
-
+
if (!UsedValues.empty()) return false;
}
DEBUG(dbgs() << "FOLDING BRANCH TO COMMON DEST:\n" << *PBI << *BB);
- IRBuilder<> Builder(PBI);
+ IRBuilder<> Builder(PBI);
// If we need to invert the condition in the pred block to match, do so now.
if (InvertPredCond) {
Value *NewCond = PBI->getCondition();
-
+
if (NewCond->hasOneUse() && isa<CmpInst>(NewCond)) {
CmpInst *CI = cast<CmpInst>(NewCond);
CI->setPredicate(CI->getInversePredicate());
} else {
- NewCond = Builder.CreateNot(NewCond,
+ NewCond = Builder.CreateNot(NewCond,
PBI->getCondition()->getName()+".not");
}
-
+
PBI->setCondition(NewCond);
PBI->swapSuccessors();
}
-
+
// If we have a bonus inst, clone it into the predecessor block.
Instruction *NewBonus = 0;
if (BonusInst) {
@@ -1756,7 +2056,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
NewBonus->takeName(BonusInst);
BonusInst->setName(BonusInst->getName()+".old");
}
-
+
// Clone Cond into the predecessor basic block, and or/and the
// two conditions together.
Instruction *New = Cond->clone();
@@ -1764,21 +2064,60 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
PredBlock->getInstList().insert(PBI, New);
New->takeName(Cond);
Cond->setName(New->getName()+".old");
-
+
if (BI->isConditional()) {
- Instruction *NewCond =
+ Instruction *NewCond =
cast<Instruction>(Builder.CreateBinOp(Opc, PBI->getCondition(),
New, "or.cond"));
PBI->setCondition(NewCond);
+ uint64_t PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight;
+ bool PredHasWeights = ExtractBranchMetadata(PBI, PredTrueWeight,
+ PredFalseWeight);
+ bool SuccHasWeights = ExtractBranchMetadata(BI, SuccTrueWeight,
+ SuccFalseWeight);
+ SmallVector<uint64_t, 8> NewWeights;
+
if (PBI->getSuccessor(0) == BB) {
+ if (PredHasWeights && SuccHasWeights) {
+ // PBI: br i1 %x, BB, FalseDest
+ // BI: br i1 %y, TrueDest, FalseDest
+ //TrueWeight is TrueWeight for PBI * TrueWeight for BI.
+ NewWeights.push_back(PredTrueWeight * SuccTrueWeight);
+ //FalseWeight is FalseWeight for PBI * TotalWeight for BI +
+ // TrueWeight for PBI * FalseWeight for BI.
+ // We assume that total weights of a BranchInst can fit into 32 bits.
+ // Therefore, we will not have overflow using 64-bit arithmetic.
+ NewWeights.push_back(PredFalseWeight * (SuccFalseWeight +
+ SuccTrueWeight) + PredTrueWeight * SuccFalseWeight);
+ }
AddPredecessorToBlock(TrueDest, PredBlock, BB);
PBI->setSuccessor(0, TrueDest);
}
if (PBI->getSuccessor(1) == BB) {
+ if (PredHasWeights && SuccHasWeights) {
+ // PBI: br i1 %x, TrueDest, BB
+ // BI: br i1 %y, TrueDest, FalseDest
+ //TrueWeight is TrueWeight for PBI * TotalWeight for BI +
+ // FalseWeight for PBI * TrueWeight for BI.
+ NewWeights.push_back(PredTrueWeight * (SuccFalseWeight +
+ SuccTrueWeight) + PredFalseWeight * SuccTrueWeight);
+ //FalseWeight is FalseWeight for PBI * FalseWeight for BI.
+ NewWeights.push_back(PredFalseWeight * SuccFalseWeight);
+ }
AddPredecessorToBlock(FalseDest, PredBlock, BB);
PBI->setSuccessor(1, FalseDest);
}
+ if (NewWeights.size() == 2) {
+ // Halve the weights if any of them cannot fit in an uint32_t
+ FitWeights(NewWeights);
+
+ SmallVector<uint32_t, 8> MDWeights(NewWeights.begin(),NewWeights.end());
+ PBI->setMetadata(LLVMContext::MD_prof,
+ MDBuilder(BI->getContext()).
+ createBranchWeights(MDWeights));
+ } else
+ PBI->setMetadata(LLVMContext::MD_prof, NULL);
} else {
// Update PHI nodes in the common successors.
for (unsigned i = 0, e = PHIs.size(); i != e; ++i) {
@@ -1806,7 +2145,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// Create (PBI_Cond and BI_Value) or (!PBI_Cond and PBI_C)
// PBI_C is true: (PBI_Cond and BI_Value) or (!PBI_Cond)
// is false: PBI_Cond and BI_Value
- MergedCond =
+ MergedCond =
cast<Instruction>(Builder.CreateBinOp(Instruction::And,
PBI->getCondition(), New,
"and.cond"));
@@ -1814,7 +2153,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
Instruction *NotCond =
cast<Instruction>(Builder.CreateNot(PBI->getCondition(),
"not.cond"));
- MergedCond =
+ MergedCond =
cast<Instruction>(Builder.CreateBinOp(Instruction::Or,
NotCond, MergedCond,
"or.cond"));
@@ -1833,95 +2172,11 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// TODO: If BB is reachable from all paths through PredBlock, then we
// could replace PBI's branch probabilities with BI's.
- // Merge probability data into PredBlock's branch.
- APInt A, B, C, D;
- if (PBI->isConditional() && BI->isConditional() &&
- ExtractBranchMetadata(PBI, C, D) && ExtractBranchMetadata(BI, A, B)) {
- // Given IR which does:
- // bbA:
- // br i1 %x, label %bbB, label %bbC
- // bbB:
- // br i1 %y, label %bbD, label %bbC
- // Let's call the probability that we take the edge from %bbA to %bbB
- // 'a', from %bbA to %bbC, 'b', from %bbB to %bbD 'c' and from %bbB to
- // %bbC probability 'd'.
- //
- // We transform the IR into:
- // bbA:
- // br i1 %z, label %bbD, label %bbC
- // where the probability of going to %bbD is (a*c) and going to bbC is
- // (b+a*d).
- //
- // Probabilities aren't stored as ratios directly. Using branch weights,
- // we get:
- // (a*c)% = A*C, (b+(a*d))% = A*D+B*C+B*D.
-
- // In the event of overflow, we want to drop the LSB of the input
- // probabilities.
- unsigned BitsLost;
-
- // Ignore overflow result on ProbTrue.
- APInt ProbTrue = MultiplyAndLosePrecision(A, C, B, D, BitsLost);
-
- APInt Tmp1 = MultiplyAndLosePrecision(B, D, A, C, BitsLost);
- if (BitsLost) {
- ProbTrue = ProbTrue.lshr(BitsLost*2);
- }
-
- APInt Tmp2 = MultiplyAndLosePrecision(A, D, C, B, BitsLost);
- if (BitsLost) {
- ProbTrue = ProbTrue.lshr(BitsLost*2);
- Tmp1 = Tmp1.lshr(BitsLost*2);
- }
-
- APInt Tmp3 = MultiplyAndLosePrecision(B, C, A, D, BitsLost);
- if (BitsLost) {
- ProbTrue = ProbTrue.lshr(BitsLost*2);
- Tmp1 = Tmp1.lshr(BitsLost*2);
- Tmp2 = Tmp2.lshr(BitsLost*2);
- }
-
- bool Overflow1 = false, Overflow2 = false;
- APInt Tmp4 = Tmp2.uadd_ov(Tmp3, Overflow1);
- APInt ProbFalse = Tmp4.uadd_ov(Tmp1, Overflow2);
-
- if (Overflow1 || Overflow2) {
- ProbTrue = ProbTrue.lshr(1);
- Tmp1 = Tmp1.lshr(1);
- Tmp2 = Tmp2.lshr(1);
- Tmp3 = Tmp3.lshr(1);
- Tmp4 = Tmp2 + Tmp3;
- ProbFalse = Tmp4 + Tmp1;
- }
-
- // The sum of branch weights must fit in 32-bits.
- if (ProbTrue.isNegative() && ProbFalse.isNegative()) {
- ProbTrue = ProbTrue.lshr(1);
- ProbFalse = ProbFalse.lshr(1);
- }
-
- if (ProbTrue != ProbFalse) {
- // Normalize the result.
- APInt GCD = APIntOps::GreatestCommonDivisor(ProbTrue, ProbFalse);
- ProbTrue = ProbTrue.udiv(GCD);
- ProbFalse = ProbFalse.udiv(GCD);
-
- MDBuilder MDB(BI->getContext());
- MDNode *N = MDB.createBranchWeights(ProbTrue.getZExtValue(),
- ProbFalse.getZExtValue());
- PBI->setMetadata(LLVMContext::MD_prof, N);
- } else {
- PBI->setMetadata(LLVMContext::MD_prof, NULL);
- }
- } else {
- PBI->setMetadata(LLVMContext::MD_prof, NULL);
- }
-
// Copy any debug value intrinsics into the end of PredBlock.
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
if (isa<DbgInfoIntrinsic>(*I))
I->clone()->insertBefore(PBI);
-
+
return true;
}
return false;
@@ -1936,7 +2191,7 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
BasicBlock *BB = BI->getParent();
// If this block ends with a branch instruction, and if there is a
- // predecessor that ends on a branch of the same condition, make
+ // predecessor that ends on a branch of the same condition, make
// this conditional branch redundant.
if (PBI->getCondition() == BI->getCondition() &&
PBI->getSuccessor(0) != PBI->getSuccessor(1)) {
@@ -1945,11 +2200,11 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
if (BB->getSinglePredecessor()) {
// Turn this into a branch on constant.
bool CondIsTrue = PBI->getSuccessor(0) == BB;
- BI->setCondition(ConstantInt::get(Type::getInt1Ty(BB->getContext()),
+ BI->setCondition(ConstantInt::get(Type::getInt1Ty(BB->getContext()),
CondIsTrue));
return true; // Nuke the branch on constant.
}
-
+
// Otherwise, if there are multiple predecessors, insert a PHI that merges
// in the constant and simplify the block result. Subsequent passes of
// simplifycfg will thread the block.
@@ -1969,18 +2224,18 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
PBI->getCondition() == BI->getCondition() &&
PBI->getSuccessor(0) != PBI->getSuccessor(1)) {
bool CondIsTrue = PBI->getSuccessor(0) == BB;
- NewPN->addIncoming(ConstantInt::get(Type::getInt1Ty(BB->getContext()),
+ NewPN->addIncoming(ConstantInt::get(Type::getInt1Ty(BB->getContext()),
CondIsTrue), P);
} else {
NewPN->addIncoming(BI->getCondition(), P);
}
}
-
+
BI->setCondition(NewPN);
return true;
}
}
-
+
// If this is a conditional branch in an empty block, and if any
// predecessors is a conditional branch to one of our destinations,
// fold the conditions into logical ops and one cond br.
@@ -1991,11 +2246,11 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
if (&*BBI != BI)
return false;
-
+
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(BI->getCondition()))
if (CE->canTrap())
return false;
-
+
int PBIOp, BIOp;
if (PBI->getSuccessor(0) == BI->getSuccessor(0))
PBIOp = BIOp = 0;
@@ -2007,31 +2262,31 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
PBIOp = BIOp = 1;
else
return false;
-
+
// Check to make sure that the other destination of this branch
// isn't BB itself. If so, this is an infinite loop that will
// keep getting unwound.
if (PBI->getSuccessor(PBIOp) == BB)
return false;
-
- // Do not perform this transformation if it would require
+
+ // Do not perform this transformation if it would require
// insertion of a large number of select instructions. For targets
// without predication/cmovs, this is a big pessimization.
BasicBlock *CommonDest = PBI->getSuccessor(PBIOp);
-
+
unsigned NumPhis = 0;
for (BasicBlock::iterator II = CommonDest->begin();
isa<PHINode>(II); ++II, ++NumPhis)
if (NumPhis > 2) // Disable this xform.
return false;
-
+
// Finally, if everything is ok, fold the branches to logical ops.
BasicBlock *OtherDest = BI->getSuccessor(BIOp ^ 1);
-
+
DEBUG(dbgs() << "FOLDING BRs:" << *PBI->getParent()
<< "AND: " << *BI->getParent());
-
-
+
+
// If OtherDest *is* BB, then BB is a basic block with a single conditional
// branch in it, where one edge (OtherDest) goes back to itself but the other
// exits. We don't *know* that the program avoids the infinite loop
@@ -2046,13 +2301,13 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
"infloop", BB->getParent());
BranchInst::Create(InfLoopBlock, InfLoopBlock);
OtherDest = InfLoopBlock;
- }
-
+ }
+
DEBUG(dbgs() << *PBI->getParent()->getParent());
// BI may have other predecessors. Because of this, we leave
// it alone, but modify PBI.
-
+
// Make sure we get to CommonDest on True&True directions.
Value *PBICond = PBI->getCondition();
IRBuilder<true, NoFolder> Builder(PBI);
@@ -2065,16 +2320,43 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
// Merge the conditions.
Value *Cond = Builder.CreateOr(PBICond, BICond, "brmerge");
-
+
// Modify PBI to branch on the new condition to the new dests.
PBI->setCondition(Cond);
PBI->setSuccessor(0, CommonDest);
PBI->setSuccessor(1, OtherDest);
-
+
+ // Update branch weight for PBI.
+ uint64_t PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight;
+ bool PredHasWeights = ExtractBranchMetadata(PBI, PredTrueWeight,
+ PredFalseWeight);
+ bool SuccHasWeights = ExtractBranchMetadata(BI, SuccTrueWeight,
+ SuccFalseWeight);
+ if (PredHasWeights && SuccHasWeights) {
+ uint64_t PredCommon = PBIOp ? PredFalseWeight : PredTrueWeight;
+ uint64_t PredOther = PBIOp ?PredTrueWeight : PredFalseWeight;
+ uint64_t SuccCommon = BIOp ? SuccFalseWeight : SuccTrueWeight;
+ uint64_t SuccOther = BIOp ? SuccTrueWeight : SuccFalseWeight;
+ // The weight to CommonDest should be PredCommon * SuccTotal +
+ // PredOther * SuccCommon.
+ // The weight to OtherDest should be PredOther * SuccOther.
+ SmallVector<uint64_t, 2> NewWeights;
+ NewWeights.push_back(PredCommon * (SuccCommon + SuccOther) +
+ PredOther * SuccCommon);
+ NewWeights.push_back(PredOther * SuccOther);
+ // Halve the weights if any of them cannot fit in an uint32_t
+ FitWeights(NewWeights);
+
+ SmallVector<uint32_t, 2> MDWeights(NewWeights.begin(),NewWeights.end());
+ PBI->setMetadata(LLVMContext::MD_prof,
+ MDBuilder(BI->getContext()).
+ createBranchWeights(MDWeights));
+ }
+
// OtherDest may have phi nodes. If so, add an entry from PBI's
// block that are identical to the entries for BI's block.
AddPredecessorToBlock(OtherDest, PBI->getParent(), BB);
-
+
// We know that the CommonDest already had an edge from PBI to
// it. If it has PHIs though, the PHIs may have different
// entries for BB and PBI's BB. If so, insert a select to make
@@ -2092,10 +2374,10 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
PN->setIncomingValue(PBBIdx, NV);
}
}
-
+
DEBUG(dbgs() << "INTO: " << *PBI->getParent());
DEBUG(dbgs() << *PBI->getParent()->getParent());
-
+
// This basic block is probably dead. We know it has at least
// one fewer predecessor.
return true;
@@ -2107,7 +2389,9 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
// Also makes sure not to introduce new successors by assuming that edges to
// non-successor TrueBBs and FalseBBs aren't reachable.
static bool SimplifyTerminatorOnSelect(TerminatorInst *OldTerm, Value *Cond,
- BasicBlock *TrueBB, BasicBlock *FalseBB){
+ BasicBlock *TrueBB, BasicBlock *FalseBB,
+ uint32_t TrueWeight,
+ uint32_t FalseWeight){
// Remove any superfluous successor edges from the CFG.
// First, figure out which successors to preserve.
// If TrueBB and FalseBB are equal, only try to preserve one copy of that
@@ -2136,10 +2420,15 @@ static bool SimplifyTerminatorOnSelect(TerminatorInst *OldTerm, Value *Cond,
// We were only looking for one successor, and it was present.
// Create an unconditional branch to it.
Builder.CreateBr(TrueBB);
- else
+ else {
// We found both of the successors we were looking for.
// Create a conditional branch sharing the condition of the select.
- Builder.CreateCondBr(Cond, TrueBB, FalseBB);
+ BranchInst *NewBI = Builder.CreateCondBr(Cond, TrueBB, FalseBB);
+ if (TrueWeight != FalseWeight)
+ NewBI->setMetadata(LLVMContext::MD_prof,
+ MDBuilder(OldTerm->getContext()).
+ createBranchWeights(TrueWeight, FalseWeight));
+ }
} else if (KeepEdge1 && (KeepEdge2 || TrueBB == FalseBB)) {
// Neither of the selected blocks were successors, so this
// terminator must be unreachable.
@@ -2176,8 +2465,23 @@ static bool SimplifySwitchOnSelect(SwitchInst *SI, SelectInst *Select) {
BasicBlock *TrueBB = SI->findCaseValue(TrueVal).getCaseSuccessor();
BasicBlock *FalseBB = SI->findCaseValue(FalseVal).getCaseSuccessor();
+ // Get weight for TrueBB and FalseBB.
+ uint32_t TrueWeight = 0, FalseWeight = 0;
+ SmallVector<uint64_t, 8> Weights;
+ bool HasWeights = HasBranchWeights(SI);
+ if (HasWeights) {
+ GetBranchWeights(SI, Weights);
+ if (Weights.size() == 1 + SI->getNumCases()) {
+ TrueWeight = (uint32_t)Weights[SI->findCaseValue(TrueVal).
+ getSuccessorIndex()];
+ FalseWeight = (uint32_t)Weights[SI->findCaseValue(FalseVal).
+ getSuccessorIndex()];
+ }
+ }
+
// Perform the actual simplification.
- return SimplifyTerminatorOnSelect(SI, Condition, TrueBB, FalseBB);
+ return SimplifyTerminatorOnSelect(SI, Condition, TrueBB, FalseBB,
+ TrueWeight, FalseWeight);
}
// SimplifyIndirectBrOnSelect - Replaces
@@ -2197,7 +2501,8 @@ static bool SimplifyIndirectBrOnSelect(IndirectBrInst *IBI, SelectInst *SI) {
BasicBlock *FalseBB = FBA->getBasicBlock();
// Perform the actual simplification.
- return SimplifyTerminatorOnSelect(IBI, SI->getCondition(), TrueBB, FalseBB);
+ return SimplifyTerminatorOnSelect(IBI, SI->getCondition(), TrueBB, FalseBB,
+ 0, 0);
}
/// TryToSimplifyUncondBranchWithICmpInIt - This is called when we find an icmp
@@ -2214,11 +2519,11 @@ static bool SimplifyIndirectBrOnSelect(IndirectBrInst *IBI, SelectInst *SI) {
/// br label %end
/// end:
/// ... = phi i1 [ true, %entry ], [ %tmp, %DEFAULT ], [ true, %entry ]
-///
+///
/// We prefer to split the edge to 'end' so that there is a true/false entry to
/// the PHI, merging the third icmp into the switch.
static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI,
- const TargetData *TD,
+ const DataLayout *TD,
IRBuilder<> &Builder) {
BasicBlock *BB = ICI->getParent();
@@ -2228,17 +2533,17 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI,
Value *V = ICI->getOperand(0);
ConstantInt *Cst = cast<ConstantInt>(ICI->getOperand(1));
-
+
// The pattern we're looking for is where our only predecessor is a switch on
// 'V' and this block is the default case for the switch. In this case we can
// fold the compared value into the switch to simplify things.
BasicBlock *Pred = BB->getSinglePredecessor();
if (Pred == 0 || !isa<SwitchInst>(Pred->getTerminator())) return false;
-
+
SwitchInst *SI = cast<SwitchInst>(Pred->getTerminator());
if (SI->getCondition() != V)
return false;
-
+
// If BB is reachable on a non-default case, then we simply know the value of
// V in this block. Substitute it and constant fold the icmp instruction
// away.
@@ -2246,7 +2551,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI,
ConstantInt *VVal = SI->findCaseDest(BB);
assert(VVal && "Should have a unique destination value");
ICI->setOperand(0, VVal);
-
+
if (Value *V = SimplifyInstruction(ICI, TD)) {
ICI->replaceAllUsesWith(V);
ICI->eraseFromParent();
@@ -2254,7 +2559,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI,
// BB is now empty, so it is likely to simplify away.
return SimplifyCFG(BB) | true;
}
-
+
// Ok, the block is reachable from the default dest. If the constant we're
// comparing exists in one of the other edges, then we can constant fold ICI
// and zap it.
@@ -2264,13 +2569,13 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI,
V = ConstantInt::getFalse(BB->getContext());
else
V = ConstantInt::getTrue(BB->getContext());
-
+
ICI->replaceAllUsesWith(V);
ICI->eraseFromParent();
// BB is now empty, so it is likely to simplify away.
return SimplifyCFG(BB) | true;
}
-
+
// The use of the icmp has to be in the 'end' block, by the only PHI node in
// the block.
BasicBlock *SuccBlock = BB->getTerminator()->getSuccessor(0);
@@ -2296,8 +2601,23 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI,
// the switch to the merge point on the compared value.
BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "switch.edge",
BB->getParent(), BB);
+ SmallVector<uint64_t, 8> Weights;
+ bool HasWeights = HasBranchWeights(SI);
+ if (HasWeights) {
+ GetBranchWeights(SI, Weights);
+ if (Weights.size() == 1 + SI->getNumCases()) {
+ // Split weight for default case to case for "Cst".
+ Weights[0] = (Weights[0]+1) >> 1;
+ Weights.push_back(Weights[0]);
+
+ SmallVector<uint32_t, 8> MDWeights(Weights.begin(), Weights.end());
+ SI->setMetadata(LLVMContext::MD_prof,
+ MDBuilder(SI->getContext()).
+ createBranchWeights(MDWeights));
+ }
+ }
SI->addCase(Cst, NewBB);
-
+
// NewBB branches to the phi block, add the uncond branch and the phi entry.
Builder.SetInsertPoint(NewBB);
Builder.SetCurrentDebugLocation(SI->getDebugLoc());
@@ -2309,12 +2629,12 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI,
/// SimplifyBranchOnICmpChain - The specified branch is a conditional branch.
/// Check to see if it is branching on an or/and chain of icmp instructions, and
/// fold it into a switch instruction if so.
-static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD,
+static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *TD,
IRBuilder<> &Builder) {
Instruction *Cond = dyn_cast<Instruction>(BI->getCondition());
if (Cond == 0) return false;
-
-
+
+
// Change br (X == 0 | X == 1), T, F into a switch instruction.
// If this is a bunch of seteq's or'd together, or if it's a bunch of
// 'setne's and'ed together, collect them.
@@ -2323,7 +2643,7 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD,
bool TrueWhenEqual = true;
Value *ExtraCase = 0;
unsigned UsedICmps = 0;
-
+
if (Cond->getOpcode() == Instruction::Or) {
CompVal = GatherConstantCompares(Cond, Values, ExtraCase, TD, true,
UsedICmps);
@@ -2332,7 +2652,7 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD,
UsedICmps);
TrueWhenEqual = false;
}
-
+
// If we didn't have a multiply compared value, fail.
if (CompVal == 0) return false;
@@ -2344,21 +2664,24 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD,
// instruction can't handle, remove them now.
array_pod_sort(Values.begin(), Values.end(), ConstantIntSortPredicate);
Values.erase(std::unique(Values.begin(), Values.end()), Values.end());
-
+
// If Extra was used, we require at least two switch values to do the
// transformation. A switch with one value is just an cond branch.
if (ExtraCase && Values.size() < 2) return false;
-
+
+ // TODO: Preserve branch weight metadata, similarly to how
+ // FoldValueComparisonIntoPredecessors preserves it.
+
// Figure out which block is which destination.
BasicBlock *DefaultBB = BI->getSuccessor(1);
BasicBlock *EdgeBB = BI->getSuccessor(0);
if (!TrueWhenEqual) std::swap(DefaultBB, EdgeBB);
-
+
BasicBlock *BB = BI->getParent();
-
+
DEBUG(dbgs() << "Converting 'icmp' chain with " << Values.size()
<< " cases into SWITCH. BB is:\n" << *BB);
-
+
// If there are any extra values that couldn't be folded into the switch
// then we evaluate them with an explicit branch first. Split the block
// right before the condbr to handle it.
@@ -2372,13 +2695,13 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD,
Builder.CreateCondBr(ExtraCase, EdgeBB, NewBB);
else
Builder.CreateCondBr(ExtraCase, NewBB, EdgeBB);
-
+
OldTI->eraseFromParent();
-
+
// If there are PHI nodes in EdgeBB, then we need to add a new entry to them
// for the edge we just added.
AddPredecessorToBlock(EdgeBB, BB, NewBB);
-
+
DEBUG(dbgs() << " ** 'icmp' chain unhandled condition: " << *ExtraCase
<< "\nEXTRABB = " << *BB);
BB = NewBB;
@@ -2387,19 +2710,19 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD,
Builder.SetInsertPoint(BI);
// Convert pointer to int before we switch.
if (CompVal->getType()->isPointerTy()) {
- assert(TD && "Cannot switch on pointer without TargetData");
+ assert(TD && "Cannot switch on pointer without DataLayout");
CompVal = Builder.CreatePtrToInt(CompVal,
TD->getIntPtrType(CompVal->getContext()),
"magicptr");
}
-
+
// Create the new switch instruction now.
SwitchInst *New = Builder.CreateSwitch(CompVal, DefaultBB, Values.size());
// Add all of the 'cases' to the switch instruction.
for (unsigned i = 0, e = Values.size(); i != e; ++i)
New->addCase(Values[i], EdgeBB);
-
+
// We added edges from PI to the EdgeBB. As such, if there were any
// PHI nodes in EdgeBB, they need entries to be added corresponding to
// the number of edges added.
@@ -2410,10 +2733,10 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD,
for (unsigned i = 0, e = Values.size()-1; i != e; ++i)
PN->addIncoming(InVal, BB);
}
-
+
// Erase the old branch instruction.
EraseTerminatorInstAndDCECond(BI);
-
+
DEBUG(dbgs() << " ** 'icmp' chain result is:\n" << *BB << '\n');
return true;
}
@@ -2467,7 +2790,7 @@ bool SimplifyCFGOpt::SimplifyResume(ResumeInst *RI, IRBuilder<> &Builder) {
bool SimplifyCFGOpt::SimplifyReturn(ReturnInst *RI, IRBuilder<> &Builder) {
BasicBlock *BB = RI->getParent();
if (!BB->getFirstNonPHIOrDbg()->isTerminator()) return false;
-
+
// Find predecessors that end with branches.
SmallVector<BasicBlock*, 8> UncondBranchPreds;
SmallVector<BranchInst*, 8> CondBranchPreds;
@@ -2481,7 +2804,7 @@ bool SimplifyCFGOpt::SimplifyReturn(ReturnInst *RI, IRBuilder<> &Builder) {
CondBranchPreds.push_back(BI);
}
}
-
+
// If we found some, do the transformation!
if (!UncondBranchPreds.empty() && DupRet) {
while (!UncondBranchPreds.empty()) {
@@ -2490,21 +2813,21 @@ bool SimplifyCFGOpt::SimplifyReturn(ReturnInst *RI, IRBuilder<> &Builder) {
<< "INTO UNCOND BRANCH PRED: " << *Pred);
(void)FoldReturnIntoUncondBranch(RI, BB, Pred);
}
-
+
// If we eliminated all predecessors of the block, delete the block now.
if (pred_begin(BB) == pred_end(BB))
// We know there are no successors, so just nuke the block.
BB->eraseFromParent();
-
+
return true;
}
-
+
// Check out all of the conditional branches going to this return
// instruction. If any of them just select between returns, change the
// branch itself into a select/return pair.
while (!CondBranchPreds.empty()) {
BranchInst *BI = CondBranchPreds.pop_back_val();
-
+
// Check to see if the non-BB successor is also a return block.
if (isa<ReturnInst>(BI->getSuccessor(0)->getTerminator()) &&
isa<ReturnInst>(BI->getSuccessor(1)->getTerminator()) &&
@@ -2516,9 +2839,9 @@ bool SimplifyCFGOpt::SimplifyReturn(ReturnInst *RI, IRBuilder<> &Builder) {
bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
BasicBlock *BB = UI->getParent();
-
+
bool Changed = false;
-
+
// If there are any instructions immediately before the unreachable that can
// be removed, do so.
while (UI != BB->begin()) {
@@ -2558,11 +2881,11 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
BBI->eraseFromParent();
Changed = true;
}
-
+
// If the unreachable instruction is the first in the block, take a gander
// at all of the predecessors of this instruction, and simplify them.
if (&BB->front() != UI) return Changed;
-
+
SmallVector<BasicBlock*, 8> Preds(pred_begin(BB), pred_end(BB));
for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
TerminatorInst *TI = Preds[i]->getTerminator();
@@ -2615,7 +2938,7 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
BasicBlock *MaxBlock = 0;
for (std::map<BasicBlock*, std::pair<unsigned, unsigned> >::iterator
I = Popularity.begin(), E = Popularity.end(); I != E; ++I) {
- if (I->second.first > MaxPop ||
+ if (I->second.first > MaxPop ||
(I->second.first == MaxPop && MaxIndex > I->second.second)) {
MaxPop = I->second.first;
MaxIndex = I->second.second;
@@ -2627,13 +2950,13 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
// edges to it.
SI->setDefaultDest(MaxBlock);
Changed = true;
-
+
// If MaxBlock has phinodes in it, remove MaxPop-1 entries from
// it.
if (isa<PHINode>(MaxBlock->begin()))
for (unsigned i = 0; i != MaxPop-1; ++i)
MaxBlock->removePredecessor(SI->getParent());
-
+
for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
i != e; ++i)
if (i.getCaseSuccessor() == MaxBlock) {
@@ -2648,7 +2971,7 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
// place to note that the call does not throw though.
BranchInst *BI = Builder.CreateBr(II->getNormalDest());
II->removeFromParent(); // Take out of symbol table
-
+
// Insert the call now...
SmallVector<Value*, 8> Args(II->op_begin(), II->op_end()-3);
Builder.SetInsertPoint(BI);
@@ -2663,7 +2986,7 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
}
}
}
-
+
// If this block is now dead, remove it.
if (pred_begin(BB) == pred_end(BB) &&
BB != &BB->getParent()->getEntryBlock()) {
@@ -2706,9 +3029,28 @@ static bool TurnSwitchRangeIntoICmp(SwitchInst *SI, IRBuilder<> &Builder) {
if (!Offset->isNullValue())
Sub = Builder.CreateAdd(Sub, Offset, Sub->getName()+".off");
Value *Cmp = Builder.CreateICmpULT(Sub, NumCases, "switch");
- Builder.CreateCondBr(
+ BranchInst *NewBI = Builder.CreateCondBr(
Cmp, SI->case_begin().getCaseSuccessor(), SI->getDefaultDest());
+ // Update weight for the newly-created conditional branch.
+ SmallVector<uint64_t, 8> Weights;
+ bool HasWeights = HasBranchWeights(SI);
+ if (HasWeights) {
+ GetBranchWeights(SI, Weights);
+ if (Weights.size() == 1 + SI->getNumCases()) {
+ // Combine all weights for the cases to be the true weight of NewBI.
+ // We assume that the sum of all weights for a Terminator can fit into 32
+ // bits.
+ uint32_t NewTrueWeight = 0;
+ for (unsigned I = 1, E = Weights.size(); I != E; ++I)
+ NewTrueWeight += (uint32_t)Weights[I];
+ NewBI->setMetadata(LLVMContext::MD_prof,
+ MDBuilder(SI->getContext()).
+ createBranchWeights(NewTrueWeight,
+ (uint32_t)Weights[0]));
+ }
+ }
+
// Prune obsolete incoming values off the successor's PHI nodes.
for (BasicBlock::iterator BBI = SI->case_begin().getCaseSuccessor()->begin();
isa<PHINode>(BBI); ++BBI) {
@@ -2739,15 +3081,33 @@ static bool EliminateDeadSwitchCases(SwitchInst *SI) {
}
}
+ SmallVector<uint64_t, 8> Weights;
+ bool HasWeight = HasBranchWeights(SI);
+ if (HasWeight) {
+ GetBranchWeights(SI, Weights);
+ HasWeight = (Weights.size() == 1 + SI->getNumCases());
+ }
+
// Remove dead cases from the switch.
for (unsigned I = 0, E = DeadCases.size(); I != E; ++I) {
SwitchInst::CaseIt Case = SI->findCaseValue(DeadCases[I]);
assert(Case != SI->case_default() &&
"Case was not found. Probably mistake in DeadCases forming.");
+ if (HasWeight) {
+ std::swap(Weights[Case.getCaseIndex()+1], Weights.back());
+ Weights.pop_back();
+ }
+
// Prune unused values from PHI nodes.
Case.getCaseSuccessor()->removePredecessor(SI->getParent());
SI->removeCase(Case);
}
+ if (HasWeight) {
+ SmallVector<uint32_t, 8> MDWeights(Weights.begin(), Weights.end());
+ SI->setMetadata(LLVMContext::MD_prof,
+ MDBuilder(SI->getParent()->getContext()).
+ createBranchWeights(MDWeights));
+ }
return !DeadCases.empty();
}
@@ -2823,33 +3183,512 @@ static bool ForwardSwitchConditionToPHI(SwitchInst *SI) {
return Changed;
}
-bool SimplifyCFGOpt::SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) {
- // If this switch is too complex to want to look at, ignore it.
- if (!isValueEqualityComparison(SI))
+/// ValidLookupTableConstant - Return true if the backend will be able to handle
+/// initializing an array of constants like C.
+static bool ValidLookupTableConstant(Constant *C) {
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
+ return CE->isGEPWithNoNotionalOverIndexing();
+
+ return isa<ConstantFP>(C) ||
+ isa<ConstantInt>(C) ||
+ isa<ConstantPointerNull>(C) ||
+ isa<GlobalValue>(C) ||
+ isa<UndefValue>(C);
+}
+
+/// LookupConstant - If V is a Constant, return it. Otherwise, try to look up
+/// its constant value in ConstantPool, returning 0 if it's not there.
+static Constant *LookupConstant(Value *V,
+ const SmallDenseMap<Value*, Constant*>& ConstantPool) {
+ if (Constant *C = dyn_cast<Constant>(V))
+ return C;
+ return ConstantPool.lookup(V);
+}
+
+/// ConstantFold - Try to fold instruction I into a constant. This works for
+/// simple instructions such as binary operations where both operands are
+/// constant or can be replaced by constants from the ConstantPool. Returns the
+/// resulting constant on success, 0 otherwise.
+static Constant *ConstantFold(Instruction *I,
+ const SmallDenseMap<Value*, Constant*>& ConstantPool) {
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
+ Constant *A = LookupConstant(BO->getOperand(0), ConstantPool);
+ if (!A)
+ return 0;
+ Constant *B = LookupConstant(BO->getOperand(1), ConstantPool);
+ if (!B)
+ return 0;
+ return ConstantExpr::get(BO->getOpcode(), A, B);
+ }
+
+ if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) {
+ Constant *A = LookupConstant(I->getOperand(0), ConstantPool);
+ if (!A)
+ return 0;
+ Constant *B = LookupConstant(I->getOperand(1), ConstantPool);
+ if (!B)
+ return 0;
+ return ConstantExpr::getCompare(Cmp->getPredicate(), A, B);
+ }
+
+ if (SelectInst *Select = dyn_cast<SelectInst>(I)) {
+ Constant *A = LookupConstant(Select->getCondition(), ConstantPool);
+ if (!A)
+ return 0;
+ if (A->isAllOnesValue())
+ return LookupConstant(Select->getTrueValue(), ConstantPool);
+ if (A->isNullValue())
+ return LookupConstant(Select->getFalseValue(), ConstantPool);
+ return 0;
+ }
+
+ if (CastInst *Cast = dyn_cast<CastInst>(I)) {
+ Constant *A = LookupConstant(I->getOperand(0), ConstantPool);
+ if (!A)
+ return 0;
+ return ConstantExpr::getCast(Cast->getOpcode(), A, Cast->getDestTy());
+ }
+
+ return 0;
+}
+
+/// GetCaseResults - Try to determine the resulting constant values in phi nodes
+/// at the common destination basic block, *CommonDest, for one of the case
+/// destionations CaseDest corresponding to value CaseVal (0 for the default
+/// case), of a switch instruction SI.
+static bool GetCaseResults(SwitchInst *SI,
+ ConstantInt *CaseVal,
+ BasicBlock *CaseDest,
+ BasicBlock **CommonDest,
+ SmallVector<std::pair<PHINode*,Constant*>, 4> &Res) {
+ // The block from which we enter the common destination.
+ BasicBlock *Pred = SI->getParent();
+
+ // If CaseDest is empty except for some side-effect free instructions through
+ // which we can constant-propagate the CaseVal, continue to its successor.
+ SmallDenseMap<Value*, Constant*> ConstantPool;
+ ConstantPool.insert(std::make_pair(SI->getCondition(), CaseVal));
+ for (BasicBlock::iterator I = CaseDest->begin(), E = CaseDest->end(); I != E;
+ ++I) {
+ if (TerminatorInst *T = dyn_cast<TerminatorInst>(I)) {
+ // If the terminator is a simple branch, continue to the next block.
+ if (T->getNumSuccessors() != 1)
+ return false;
+ Pred = CaseDest;
+ CaseDest = T->getSuccessor(0);
+ } else if (isa<DbgInfoIntrinsic>(I)) {
+ // Skip debug intrinsic.
+ continue;
+ } else if (Constant *C = ConstantFold(I, ConstantPool)) {
+ // Instruction is side-effect free and constant.
+ ConstantPool.insert(std::make_pair(I, C));
+ } else {
+ break;
+ }
+ }
+
+ // If we did not have a CommonDest before, use the current one.
+ if (!*CommonDest)
+ *CommonDest = CaseDest;
+ // If the destination isn't the common one, abort.
+ if (CaseDest != *CommonDest)
+ return false;
+
+ // Get the values for this case from phi nodes in the destination block.
+ BasicBlock::iterator I = (*CommonDest)->begin();
+ while (PHINode *PHI = dyn_cast<PHINode>(I++)) {
+ int Idx = PHI->getBasicBlockIndex(Pred);
+ if (Idx == -1)
+ continue;
+
+ Constant *ConstVal = LookupConstant(PHI->getIncomingValue(Idx),
+ ConstantPool);
+ if (!ConstVal)
+ return false;
+
+ // Note: If the constant comes from constant-propagating the case value
+ // through the CaseDest basic block, it will be safe to remove the
+ // instructions in that block. They cannot be used (except in the phi nodes
+ // we visit) outside CaseDest, because that block does not dominate its
+ // successor. If it did, we would not be in this phi node.
+
+ // Be conservative about which kinds of constants we support.
+ if (!ValidLookupTableConstant(ConstVal))
+ return false;
+
+ Res.push_back(std::make_pair(PHI, ConstVal));
+ }
+
+ return true;
+}
+
+namespace {
+ /// SwitchLookupTable - This class represents a lookup table that can be used
+ /// to replace a switch.
+ class SwitchLookupTable {
+ public:
+ /// SwitchLookupTable - Create a lookup table to use as a switch replacement
+ /// with the contents of Values, using DefaultValue to fill any holes in the
+ /// table.
+ SwitchLookupTable(Module &M,
+ uint64_t TableSize,
+ ConstantInt *Offset,
+ const SmallVector<std::pair<ConstantInt*, Constant*>, 4>& Values,
+ Constant *DefaultValue,
+ const DataLayout *TD);
+
+ /// BuildLookup - Build instructions with Builder to retrieve the value at
+ /// the position given by Index in the lookup table.
+ Value *BuildLookup(Value *Index, IRBuilder<> &Builder);
+
+ /// WouldFitInRegister - Return true if a table with TableSize elements of
+ /// type ElementType would fit in a target-legal register.
+ static bool WouldFitInRegister(const DataLayout *TD,
+ uint64_t TableSize,
+ const Type *ElementType);
+
+ private:
+ // Depending on the contents of the table, it can be represented in
+ // different ways.
+ enum {
+ // For tables where each element contains the same value, we just have to
+ // store that single value and return it for each lookup.
+ SingleValueKind,
+
+ // For small tables with integer elements, we can pack them into a bitmap
+ // that fits into a target-legal register. Values are retrieved by
+ // shift and mask operations.
+ BitMapKind,
+
+ // The table is stored as an array of values. Values are retrieved by load
+ // instructions from the table.
+ ArrayKind
+ } Kind;
+
+ // For SingleValueKind, this is the single value.
+ Constant *SingleValue;
+
+ // For BitMapKind, this is the bitmap.
+ ConstantInt *BitMap;
+ IntegerType *BitMapElementTy;
+
+ // For ArrayKind, this is the array.
+ GlobalVariable *Array;
+ };
+}
+
+SwitchLookupTable::SwitchLookupTable(Module &M,
+ uint64_t TableSize,
+ ConstantInt *Offset,
+ const SmallVector<std::pair<ConstantInt*, Constant*>, 4>& Values,
+ Constant *DefaultValue,
+ const DataLayout *TD) {
+ assert(Values.size() && "Can't build lookup table without values!");
+ assert(TableSize >= Values.size() && "Can't fit values in table!");
+
+ // If all values in the table are equal, this is that value.
+ SingleValue = Values.begin()->second;
+
+ // Build up the table contents.
+ SmallVector<Constant*, 64> TableContents(TableSize);
+ for (size_t I = 0, E = Values.size(); I != E; ++I) {
+ ConstantInt *CaseVal = Values[I].first;
+ Constant *CaseRes = Values[I].second;
+ assert(CaseRes->getType() == DefaultValue->getType());
+
+ uint64_t Idx = (CaseVal->getValue() - Offset->getValue())
+ .getLimitedValue();
+ TableContents[Idx] = CaseRes;
+
+ if (CaseRes != SingleValue)
+ SingleValue = 0;
+ }
+
+ // Fill in any holes in the table with the default result.
+ if (Values.size() < TableSize) {
+ for (uint64_t I = 0; I < TableSize; ++I) {
+ if (!TableContents[I])
+ TableContents[I] = DefaultValue;
+ }
+
+ if (DefaultValue != SingleValue)
+ SingleValue = 0;
+ }
+
+ // If each element in the table contains the same value, we only need to store
+ // that single value.
+ if (SingleValue) {
+ Kind = SingleValueKind;
+ return;
+ }
+
+ // If the type is integer and the table fits in a register, build a bitmap.
+ if (WouldFitInRegister(TD, TableSize, DefaultValue->getType())) {
+ IntegerType *IT = cast<IntegerType>(DefaultValue->getType());
+ APInt TableInt(TableSize * IT->getBitWidth(), 0);
+ for (uint64_t I = TableSize; I > 0; --I) {
+ TableInt <<= IT->getBitWidth();
+ // Insert values into the bitmap. Undef values are set to zero.
+ if (!isa<UndefValue>(TableContents[I - 1])) {
+ ConstantInt *Val = cast<ConstantInt>(TableContents[I - 1]);
+ TableInt |= Val->getValue().zext(TableInt.getBitWidth());
+ }
+ }
+ BitMap = ConstantInt::get(M.getContext(), TableInt);
+ BitMapElementTy = IT;
+ Kind = BitMapKind;
+ ++NumBitMaps;
+ return;
+ }
+
+ // Store the table in an array.
+ ArrayType *ArrayTy = ArrayType::get(DefaultValue->getType(), TableSize);
+ Constant *Initializer = ConstantArray::get(ArrayTy, TableContents);
+
+ Array = new GlobalVariable(M, ArrayTy, /*constant=*/ true,
+ GlobalVariable::PrivateLinkage,
+ Initializer,
+ "switch.table");
+ Array->setUnnamedAddr(true);
+ Kind = ArrayKind;
+}
+
+Value *SwitchLookupTable::BuildLookup(Value *Index, IRBuilder<> &Builder) {
+ switch (Kind) {
+ case SingleValueKind:
+ return SingleValue;
+ case BitMapKind: {
+ // Type of the bitmap (e.g. i59).
+ IntegerType *MapTy = BitMap->getType();
+
+ // Cast Index to the same type as the bitmap.
+ // Note: The Index is <= the number of elements in the table, so
+ // truncating it to the width of the bitmask is safe.
+ Value *ShiftAmt = Builder.CreateZExtOrTrunc(Index, MapTy, "switch.cast");
+
+ // Multiply the shift amount by the element width.
+ ShiftAmt = Builder.CreateMul(ShiftAmt,
+ ConstantInt::get(MapTy, BitMapElementTy->getBitWidth()),
+ "switch.shiftamt");
+
+ // Shift down.
+ Value *DownShifted = Builder.CreateLShr(BitMap, ShiftAmt,
+ "switch.downshift");
+ // Mask off.
+ return Builder.CreateTrunc(DownShifted, BitMapElementTy,
+ "switch.masked");
+ }
+ case ArrayKind: {
+ Value *GEPIndices[] = { Builder.getInt32(0), Index };
+ Value *GEP = Builder.CreateInBoundsGEP(Array, GEPIndices,
+ "switch.gep");
+ return Builder.CreateLoad(GEP, "switch.load");
+ }
+ }
+ llvm_unreachable("Unknown lookup table kind!");
+}
+
+bool SwitchLookupTable::WouldFitInRegister(const DataLayout *TD,
+ uint64_t TableSize,
+ const Type *ElementType) {
+ if (!TD)
+ return false;
+ const IntegerType *IT = dyn_cast<IntegerType>(ElementType);
+ if (!IT)
+ return false;
+ // FIXME: If the type is wider than it needs to be, e.g. i8 but all values
+ // are <= 15, we could try to narrow the type.
+
+ // Avoid overflow, fitsInLegalInteger uses unsigned int for the width.
+ if (TableSize >= UINT_MAX/IT->getBitWidth())
+ return false;
+ return TD->fitsInLegalInteger(TableSize * IT->getBitWidth());
+}
+
+/// ShouldBuildLookupTable - Determine whether a lookup table should be built
+/// for this switch, based on the number of caes, size of the table and the
+/// types of the results.
+static bool ShouldBuildLookupTable(SwitchInst *SI,
+ uint64_t TableSize,
+ const DataLayout *TD,
+ const SmallDenseMap<PHINode*, Type*>& ResultTypes) {
+ // The table density should be at least 40%. This is the same criterion as for
+ // jump tables, see SelectionDAGBuilder::handleJTSwitchCase.
+ // FIXME: Find the best cut-off.
+ if (SI->getNumCases() > TableSize || TableSize >= UINT64_MAX / 10)
+ return false; // TableSize overflowed, or mul below might overflow.
+ if (SI->getNumCases() * 10 >= TableSize * 4)
+ return true;
+
+ // If each table would fit in a register, we should build it anyway.
+ for (SmallDenseMap<PHINode*, Type*>::const_iterator I = ResultTypes.begin(),
+ E = ResultTypes.end(); I != E; ++I) {
+ if (!SwitchLookupTable::WouldFitInRegister(TD, TableSize, I->second))
+ return false;
+ }
+ return true;
+}
+
+/// SwitchToLookupTable - If the switch is only used to initialize one or more
+/// phi nodes in a common successor block with different constant values,
+/// replace the switch with lookup tables.
+static bool SwitchToLookupTable(SwitchInst *SI,
+ IRBuilder<> &Builder,
+ const DataLayout* TD,
+ const TargetTransformInfo *TTI) {
+ assert(SI->getNumCases() > 1 && "Degenerate switch?");
+
+ // Only build lookup table when we have a target that supports it.
+ if (!TTI || !TTI->getScalarTargetTransformInfo() ||
+ !TTI->getScalarTargetTransformInfo()->shouldBuildLookupTables())
return false;
+ // FIXME: If the switch is too sparse for a lookup table, perhaps we could
+ // split off a dense part and build a lookup table for that.
+
+ // FIXME: This creates arrays of GEPs to constant strings, which means each
+ // GEP needs a runtime relocation in PIC code. We should just build one big
+ // string and lookup indices into that.
+
+ // Ignore the switch if the number of cases is too small.
+ // This is similar to the check when building jump tables in
+ // SelectionDAGBuilder::handleJTSwitchCase.
+ // FIXME: Determine the best cut-off.
+ if (SI->getNumCases() < 4)
+ return false;
+
+ // Figure out the corresponding result for each case value and phi node in the
+ // common destination, as well as the the min and max case values.
+ assert(SI->case_begin() != SI->case_end());
+ SwitchInst::CaseIt CI = SI->case_begin();
+ ConstantInt *MinCaseVal = CI.getCaseValue();
+ ConstantInt *MaxCaseVal = CI.getCaseValue();
+
+ BasicBlock *CommonDest = 0;
+ typedef SmallVector<std::pair<ConstantInt*, Constant*>, 4> ResultListTy;
+ SmallDenseMap<PHINode*, ResultListTy> ResultLists;
+ SmallDenseMap<PHINode*, Constant*> DefaultResults;
+ SmallDenseMap<PHINode*, Type*> ResultTypes;
+ SmallVector<PHINode*, 4> PHIs;
+
+ for (SwitchInst::CaseIt E = SI->case_end(); CI != E; ++CI) {
+ ConstantInt *CaseVal = CI.getCaseValue();
+ if (CaseVal->getValue().slt(MinCaseVal->getValue()))
+ MinCaseVal = CaseVal;
+ if (CaseVal->getValue().sgt(MaxCaseVal->getValue()))
+ MaxCaseVal = CaseVal;
+
+ // Resulting value at phi nodes for this case value.
+ typedef SmallVector<std::pair<PHINode*, Constant*>, 4> ResultsTy;
+ ResultsTy Results;
+ if (!GetCaseResults(SI, CaseVal, CI.getCaseSuccessor(), &CommonDest,
+ Results))
+ return false;
+
+ // Append the result from this case to the list for each phi.
+ for (ResultsTy::iterator I = Results.begin(), E = Results.end(); I!=E; ++I) {
+ if (!ResultLists.count(I->first))
+ PHIs.push_back(I->first);
+ ResultLists[I->first].push_back(std::make_pair(CaseVal, I->second));
+ }
+ }
+
+ // Get the resulting values for the default case.
+ SmallVector<std::pair<PHINode*, Constant*>, 4> DefaultResultsList;
+ if (!GetCaseResults(SI, 0, SI->getDefaultDest(), &CommonDest,
+ DefaultResultsList))
+ return false;
+ for (size_t I = 0, E = DefaultResultsList.size(); I != E; ++I) {
+ PHINode *PHI = DefaultResultsList[I].first;
+ Constant *Result = DefaultResultsList[I].second;
+ DefaultResults[PHI] = Result;
+ ResultTypes[PHI] = Result->getType();
+ }
+
+ APInt RangeSpread = MaxCaseVal->getValue() - MinCaseVal->getValue();
+ uint64_t TableSize = RangeSpread.getLimitedValue() + 1;
+ if (!ShouldBuildLookupTable(SI, TableSize, TD, ResultTypes))
+ return false;
+
+ // Create the BB that does the lookups.
+ Module &Mod = *CommonDest->getParent()->getParent();
+ BasicBlock *LookupBB = BasicBlock::Create(Mod.getContext(),
+ "switch.lookup",
+ CommonDest->getParent(),
+ CommonDest);
+
+ // Check whether the condition value is within the case range, and branch to
+ // the new BB.
+ Builder.SetInsertPoint(SI);
+ Value *TableIndex = Builder.CreateSub(SI->getCondition(), MinCaseVal,
+ "switch.tableidx");
+ Value *Cmp = Builder.CreateICmpULT(TableIndex, ConstantInt::get(
+ MinCaseVal->getType(), TableSize));
+ Builder.CreateCondBr(Cmp, LookupBB, SI->getDefaultDest());
+
+ // Populate the BB that does the lookups.
+ Builder.SetInsertPoint(LookupBB);
+ bool ReturnedEarly = false;
+ for (size_t I = 0, E = PHIs.size(); I != E; ++I) {
+ PHINode *PHI = PHIs[I];
+
+ SwitchLookupTable Table(Mod, TableSize, MinCaseVal, ResultLists[PHI],
+ DefaultResults[PHI], TD);
+
+ Value *Result = Table.BuildLookup(TableIndex, Builder);
+
+ // If the result is used to return immediately from the function, we want to
+ // do that right here.
+ if (PHI->hasOneUse() && isa<ReturnInst>(*PHI->use_begin()) &&
+ *PHI->use_begin() == CommonDest->getFirstNonPHIOrDbg()) {
+ Builder.CreateRet(Result);
+ ReturnedEarly = true;
+ break;
+ }
+
+ PHI->addIncoming(Result, LookupBB);
+ }
+
+ if (!ReturnedEarly)
+ Builder.CreateBr(CommonDest);
+
+ // Remove the switch.
+ for (unsigned i = 0; i < SI->getNumSuccessors(); ++i) {
+ BasicBlock *Succ = SI->getSuccessor(i);
+ if (Succ == SI->getDefaultDest()) continue;
+ Succ->removePredecessor(SI->getParent());
+ }
+ SI->eraseFromParent();
+
+ ++NumLookupTables;
+ return true;
+}
+
+bool SimplifyCFGOpt::SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) {
BasicBlock *BB = SI->getParent();
- // If we only have one predecessor, and if it is a branch on this value,
- // see if that predecessor totally determines the outcome of this switch.
- if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
- if (SimplifyEqualityComparisonWithOnlyPredecessor(SI, OnlyPred, Builder))
- return SimplifyCFG(BB) | true;
+ if (isValueEqualityComparison(SI)) {
+ // If we only have one predecessor, and if it is a branch on this value,
+ // see if that predecessor totally determines the outcome of this switch.
+ if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
+ if (SimplifyEqualityComparisonWithOnlyPredecessor(SI, OnlyPred, Builder))
+ return SimplifyCFG(BB) | true;
- Value *Cond = SI->getCondition();
- if (SelectInst *Select = dyn_cast<SelectInst>(Cond))
- if (SimplifySwitchOnSelect(SI, Select))
- return SimplifyCFG(BB) | true;
+ Value *Cond = SI->getCondition();
+ if (SelectInst *Select = dyn_cast<SelectInst>(Cond))
+ if (SimplifySwitchOnSelect(SI, Select))
+ return SimplifyCFG(BB) | true;
- // If the block only contains the switch, see if we can fold the block
- // away into any preds.
- BasicBlock::iterator BBI = BB->begin();
- // Ignore dbg intrinsics.
- while (isa<DbgInfoIntrinsic>(BBI))
- ++BBI;
- if (SI == &*BBI)
- if (FoldValueComparisonIntoPredecessors(SI, Builder))
- return SimplifyCFG(BB) | true;
+ // If the block only contains the switch, see if we can fold the block
+ // away into any preds.
+ BasicBlock::iterator BBI = BB->begin();
+ // Ignore dbg intrinsics.
+ while (isa<DbgInfoIntrinsic>(BBI))
+ ++BBI;
+ if (SI == &*BBI)
+ if (FoldValueComparisonIntoPredecessors(SI, Builder))
+ return SimplifyCFG(BB) | true;
+ }
// Try to transform the switch into an icmp and a branch.
if (TurnSwitchRangeIntoICmp(SI, Builder))
@@ -2862,13 +3701,16 @@ bool SimplifyCFGOpt::SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) {
if (ForwardSwitchConditionToPHI(SI))
return SimplifyCFG(BB) | true;
+ if (SwitchToLookupTable(SI, Builder, TD, TTI))
+ return SimplifyCFG(BB) | true;
+
return false;
}
bool SimplifyCFGOpt::SimplifyIndirectBr(IndirectBrInst *IBI) {
BasicBlock *BB = IBI->getParent();
bool Changed = false;
-
+
// Eliminate redundant destinations.
SmallPtrSet<Value *, 8> Succs;
for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
@@ -2879,7 +3721,7 @@ bool SimplifyCFGOpt::SimplifyIndirectBr(IndirectBrInst *IBI) {
--i; --e;
Changed = true;
}
- }
+ }
if (IBI->getNumDestinations() == 0) {
// If the indirectbr has no successors, change it to unreachable.
@@ -2887,14 +3729,14 @@ bool SimplifyCFGOpt::SimplifyIndirectBr(IndirectBrInst *IBI) {
EraseTerminatorInstAndDCECond(IBI);
return true;
}
-
+
if (IBI->getNumDestinations() == 1) {
// If the indirectbr has one successor, change it to a direct branch.
BranchInst::Create(IBI->getDestination(0), IBI);
EraseTerminatorInstAndDCECond(IBI);
return true;
}
-
+
if (SelectInst *SI = dyn_cast<SelectInst>(IBI->getAddress())) {
if (SimplifyIndirectBrOnSelect(IBI, SI))
return SimplifyCFG(BB) | true;
@@ -2904,13 +3746,16 @@ bool SimplifyCFGOpt::SimplifyIndirectBr(IndirectBrInst *IBI) {
bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){
BasicBlock *BB = BI->getParent();
-
+
+ if (SinkCommon && SinkThenElseCodeToEnd(BI))
+ return true;
+
// If the Terminator is the only non-phi instruction, simplify the block.
BasicBlock::iterator I = BB->getFirstNonPHIOrDbgOrLifetime();
if (I->isTerminator() && BB != &BB->getParent()->getEntryBlock() &&
TryToSimplifyUncondBranchFromEmptyBlock(BB))
return true;
-
+
// If the only instruction in the block is a seteq/setne comparison
// against a constant, try to simplify the block.
if (ICmpInst *ICI = dyn_cast<ICmpInst>(I))
@@ -2921,7 +3766,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){
TryToSimplifyUncondBranchWithICmpInIt(ICI, TD, Builder))
return true;
}
-
+
// If this basic block is ONLY a compare and a branch, and if a predecessor
// branches to us and our successor, fold the comparison into the
// predecessor and use logical operations to update the incoming value
@@ -2934,7 +3779,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){
bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
BasicBlock *BB = BI->getParent();
-
+
// Conditional branch
if (isValueEqualityComparison(BI)) {
// If we only have one predecessor, and if it is a branch on this value,
@@ -2943,7 +3788,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
if (SimplifyEqualityComparisonWithOnlyPredecessor(BI, OnlyPred, Builder))
return SimplifyCFG(BB) | true;
-
+
// This block must be empty, except for the setcond inst, if it exists.
// Ignore dbg intrinsics.
BasicBlock::iterator I = BB->begin();
@@ -2962,17 +3807,17 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
return SimplifyCFG(BB) | true;
}
}
-
+
// Try to turn "br (X == 0 | X == 1), T, F" into a switch instruction.
if (SimplifyBranchOnICmpChain(BI, TD, Builder))
return true;
-
+
// If this basic block is ONLY a compare and a branch, and if a predecessor
// branches to us and one of our successors, fold the comparison into the
// predecessor and use logical operations to pick the right destination.
if (FoldBranchToCommonDest(BI))
return SimplifyCFG(BB) | true;
-
+
// We have a conditional branch to two blocks that are only reachable
// from BI. We know that the condbr dominates the two blocks, so see if
// there is any identical code in the "then" and "else" blocks. If so, we
@@ -2999,14 +3844,14 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1)))
return SimplifyCFG(BB) | true;
}
-
+
// If this is a branch on a phi node in the current block, thread control
// through this block if any PHI node entries are constants.
if (PHINode *PN = dyn_cast<PHINode>(BI->getCondition()))
if (PN->getParent() == BI->getParent())
if (FoldCondBranchOnPHI(BI, TD))
return SimplifyCFG(BB) | true;
-
+
// Scan predecessor blocks for conditional branches.
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator()))
@@ -3023,11 +3868,12 @@ static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I) {
if (!C)
return false;
- if (!I->hasOneUse()) // Only look at single-use instructions, for compile time
+ if (I->use_empty())
return false;
if (C->isNullValue()) {
- Instruction *Use = I->use_back();
+ // Only look at the first use, avoid hurting compile time with long uselists
+ User *Use = *I->use_begin();
// Now make sure that there are no instructions in between that can alter
// control flow (eg. calls)
@@ -3114,7 +3960,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
//
if (MergeBlockIntoPredecessor(BB))
return true;
-
+
IRBuilder<> Builder(BB);
// If there is a trivial two-entry PHI node in this basic block, and we can
@@ -3152,6 +3998,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
/// eliminates unreachable basic blocks, and does other "peephole" optimization
/// of the CFG. It returns true if a modification was made.
///
-bool llvm::SimplifyCFG(BasicBlock *BB, const TargetData *TD) {
- return SimplifyCFGOpt(TD).run(BB);
+bool llvm::SimplifyCFG(BasicBlock *BB, const DataLayout *TD,
+ const TargetTransformInfo *TTI) {
+ return SimplifyCFGOpt(TD, TTI).run(BB);
}
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
index 5d673f1..110f3808 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -24,7 +24,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/SimplifyIndVar.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
@@ -44,7 +44,7 @@ namespace {
Loop *L;
LoopInfo *LI;
ScalarEvolution *SE;
- const TargetData *TD; // May be NULL
+ const DataLayout *TD; // May be NULL
SmallVectorImpl<WeakVH> &DeadInsts;
@@ -56,7 +56,7 @@ namespace {
L(Loop),
LI(LPM->getAnalysisIfAvailable<LoopInfo>()),
SE(SE),
- TD(LPM->getAnalysisIfAvailable<TargetData>()),
+ TD(LPM->getAnalysisIfAvailable<DataLayout>()),
DeadInsts(Dead),
Changed(false) {
assert(LI && "IV simplification requires LoopInfo");
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
index 81eb9e0..65353dc 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
@@ -23,7 +23,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
@@ -46,7 +46,7 @@ namespace {
/// runOnFunction - Remove instructions that simplify.
bool runOnFunction(Function &F) {
const DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>();
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
SmallPtrSet<const Instruction*, 8> S1, S2, *ToSimplify = &S1, *Next = &S2;
bool Changed = false;
@@ -72,7 +72,7 @@ namespace {
++NumSimplified;
Changed = true;
}
- Changed |= RecursivelyDeleteTriviallyDeadInstructions(I);
+ Changed |= RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
}
// Place the list of instructions to simplify on the next loop iteration
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
new file mode 100644
index 0000000..c3ea638
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -0,0 +1,1149 @@
+//===------ SimplifyLibCalls.cpp - Library calls simplifier ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a utility pass used for testing the InstructionSimplify analysis.
+// The analysis is applied to every instruction, and if it simplifies then the
+// instruction is replaced by the simplification. If you are looking for a pass
+// that performs serious instruction folding, use the instcombine pass instead.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
+#include "llvm/DataLayout.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Function.h"
+#include "llvm/IRBuilder.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Transforms/Utils/BuildLibCalls.h"
+
+using namespace llvm;
+
+/// This class is the abstract base class for the set of optimizations that
+/// corresponds to one library call.
+namespace {
+class LibCallOptimization {
+protected:
+ Function *Caller;
+ const DataLayout *TD;
+ const TargetLibraryInfo *TLI;
+ const LibCallSimplifier *LCS;
+ LLVMContext* Context;
+public:
+ LibCallOptimization() { }
+ virtual ~LibCallOptimization() {}
+
+ /// callOptimizer - This pure virtual method is implemented by base classes to
+ /// do various optimizations. If this returns null then no transformation was
+ /// performed. If it returns CI, then it transformed the call and CI is to be
+ /// deleted. If it returns something else, replace CI with the new value and
+ /// delete CI.
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B)
+ =0;
+
+ Value *optimizeCall(CallInst *CI, const DataLayout *TD,
+ const TargetLibraryInfo *TLI,
+ const LibCallSimplifier *LCS, IRBuilder<> &B) {
+ Caller = CI->getParent()->getParent();
+ this->TD = TD;
+ this->TLI = TLI;
+ this->LCS = LCS;
+ if (CI->getCalledFunction())
+ Context = &CI->getCalledFunction()->getContext();
+
+ // We never change the calling convention.
+ if (CI->getCallingConv() != llvm::CallingConv::C)
+ return NULL;
+
+ return callOptimizer(CI->getCalledFunction(), CI, B);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Helper Functions
+//===----------------------------------------------------------------------===//
+
+/// isOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
+/// value is equal or not-equal to zero.
+static bool isOnlyUsedInZeroEqualityComparison(Value *V) {
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
+ UI != E; ++UI) {
+ if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
+ if (IC->isEquality())
+ if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
+ if (C->isNullValue())
+ continue;
+ // Unknown instruction.
+ return false;
+ }
+ return true;
+}
+
+/// isOnlyUsedInEqualityComparison - Return true if it is only used in equality
+/// comparisons with With.
+static bool isOnlyUsedInEqualityComparison(Value *V, Value *With) {
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
+ UI != E; ++UI) {
+ if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
+ if (IC->isEquality() && IC->getOperand(1) == With)
+ continue;
+ // Unknown instruction.
+ return false;
+ }
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Fortified Library Call Optimizations
+//===----------------------------------------------------------------------===//
+
+struct FortifiedLibCallOptimization : public LibCallOptimization {
+protected:
+ virtual bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp,
+ bool isString) const = 0;
+};
+
+struct InstFortifiedLibCallOptimization : public FortifiedLibCallOptimization {
+ CallInst *CI;
+
+ bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
+ if (CI->getArgOperand(SizeCIOp) == CI->getArgOperand(SizeArgOp))
+ return true;
+ if (ConstantInt *SizeCI =
+ dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) {
+ if (SizeCI->isAllOnesValue())
+ return true;
+ if (isString) {
+ uint64_t Len = GetStringLength(CI->getArgOperand(SizeArgOp));
+ // If the length is 0 we don't know how long it is and so we can't
+ // remove the check.
+ if (Len == 0) return false;
+ return SizeCI->getZExtValue() >= Len;
+ }
+ if (ConstantInt *Arg = dyn_cast<ConstantInt>(
+ CI->getArgOperand(SizeArgOp)))
+ return SizeCI->getZExtValue() >= Arg->getZExtValue();
+ }
+ return false;
+ }
+};
+
+struct MemCpyChkOpt : public InstFortifiedLibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ this->CI = CI;
+ FunctionType *FT = Callee->getFunctionType();
+ LLVMContext &Context = CI->getParent()->getContext();
+
+ // Check if this has the right signature.
+ if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
+ !FT->getParamType(0)->isPointerTy() ||
+ !FT->getParamType(1)->isPointerTy() ||
+ FT->getParamType(2) != TD->getIntPtrType(Context) ||
+ FT->getParamType(3) != TD->getIntPtrType(Context))
+ return 0;
+
+ if (isFoldable(3, 2, false)) {
+ B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), 1);
+ return CI->getArgOperand(0);
+ }
+ return 0;
+ }
+};
+
+struct MemMoveChkOpt : public InstFortifiedLibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ this->CI = CI;
+ FunctionType *FT = Callee->getFunctionType();
+ LLVMContext &Context = CI->getParent()->getContext();
+
+ // Check if this has the right signature.
+ if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
+ !FT->getParamType(0)->isPointerTy() ||
+ !FT->getParamType(1)->isPointerTy() ||
+ FT->getParamType(2) != TD->getIntPtrType(Context) ||
+ FT->getParamType(3) != TD->getIntPtrType(Context))
+ return 0;
+
+ if (isFoldable(3, 2, false)) {
+ B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), 1);
+ return CI->getArgOperand(0);
+ }
+ return 0;
+ }
+};
+
+struct MemSetChkOpt : public InstFortifiedLibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ this->CI = CI;
+ FunctionType *FT = Callee->getFunctionType();
+ LLVMContext &Context = CI->getParent()->getContext();
+
+ // Check if this has the right signature.
+ if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
+ !FT->getParamType(0)->isPointerTy() ||
+ !FT->getParamType(1)->isIntegerTy() ||
+ FT->getParamType(2) != TD->getIntPtrType(Context) ||
+ FT->getParamType(3) != TD->getIntPtrType(Context))
+ return 0;
+
+ if (isFoldable(3, 2, false)) {
+ Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(),
+ false);
+ B.CreateMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), 1);
+ return CI->getArgOperand(0);
+ }
+ return 0;
+ }
+};
+
+struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ this->CI = CI;
+ StringRef Name = Callee->getName();
+ FunctionType *FT = Callee->getFunctionType();
+ LLVMContext &Context = CI->getParent()->getContext();
+
+ // Check if this has the right signature.
+ if (FT->getNumParams() != 3 ||
+ FT->getReturnType() != FT->getParamType(0) ||
+ FT->getParamType(0) != FT->getParamType(1) ||
+ FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
+ FT->getParamType(2) != TD->getIntPtrType(Context))
+ return 0;
+
+ Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
+ if (Dst == Src) // __strcpy_chk(x,x) -> x
+ return Src;
+
+ // If a) we don't have any length information, or b) we know this will
+ // fit then just lower to a plain strcpy. Otherwise we'll keep our
+ // strcpy_chk call which may fail at runtime if the size is too long.
+ // TODO: It might be nice to get a maximum length out of the possible
+ // string lengths for varying.
+ if (isFoldable(2, 1, true)) {
+ Value *Ret = EmitStrCpy(Dst, Src, B, TD, TLI, Name.substr(2, 6));
+ return Ret;
+ } else {
+ // Maybe we can stil fold __strcpy_chk to __memcpy_chk.
+ uint64_t Len = GetStringLength(Src);
+ if (Len == 0) return 0;
+
+ // This optimization require DataLayout.
+ if (!TD) return 0;
+
+ Value *Ret =
+ EmitMemCpyChk(Dst, Src,
+ ConstantInt::get(TD->getIntPtrType(Context), Len),
+ CI->getArgOperand(2), B, TD, TLI);
+ return Ret;
+ }
+ return 0;
+ }
+};
+
+struct StpCpyChkOpt : public InstFortifiedLibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ this->CI = CI;
+ StringRef Name = Callee->getName();
+ FunctionType *FT = Callee->getFunctionType();
+ LLVMContext &Context = CI->getParent()->getContext();
+
+ // Check if this has the right signature.
+ if (FT->getNumParams() != 3 ||
+ FT->getReturnType() != FT->getParamType(0) ||
+ FT->getParamType(0) != FT->getParamType(1) ||
+ FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
+ FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)))
+ return 0;
+
+ Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
+ if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)
+ Value *StrLen = EmitStrLen(Src, B, TD, TLI);
+ return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0;
+ }
+
+ // If a) we don't have any length information, or b) we know this will
+ // fit then just lower to a plain stpcpy. Otherwise we'll keep our
+ // stpcpy_chk call which may fail at runtime if the size is too long.
+ // TODO: It might be nice to get a maximum length out of the possible
+ // string lengths for varying.
+ if (isFoldable(2, 1, true)) {
+ Value *Ret = EmitStrCpy(Dst, Src, B, TD, TLI, Name.substr(2, 6));
+ return Ret;
+ } else {
+ // Maybe we can stil fold __stpcpy_chk to __memcpy_chk.
+ uint64_t Len = GetStringLength(Src);
+ if (Len == 0) return 0;
+
+ // This optimization require DataLayout.
+ if (!TD) return 0;
+
+ Type *PT = FT->getParamType(0);
+ Value *LenV = ConstantInt::get(TD->getIntPtrType(PT), Len);
+ Value *DstEnd = B.CreateGEP(Dst,
+ ConstantInt::get(TD->getIntPtrType(PT),
+ Len - 1));
+ if (!EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B, TD, TLI))
+ return 0;
+ return DstEnd;
+ }
+ return 0;
+ }
+};
+
+struct StrNCpyChkOpt : public InstFortifiedLibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ this->CI = CI;
+ StringRef Name = Callee->getName();
+ FunctionType *FT = Callee->getFunctionType();
+ LLVMContext &Context = CI->getParent()->getContext();
+
+ // Check if this has the right signature.
+ if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
+ FT->getParamType(0) != FT->getParamType(1) ||
+ FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
+ !FT->getParamType(2)->isIntegerTy() ||
+ FT->getParamType(3) != TD->getIntPtrType(Context))
+ return 0;
+
+ if (isFoldable(3, 2, false)) {
+ Value *Ret = EmitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), B, TD, TLI,
+ Name.substr(2, 7));
+ return Ret;
+ }
+ return 0;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// String and Memory Library Call Optimizations
+//===----------------------------------------------------------------------===//
+
+struct StrCatOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ // Verify the "strcat" function prototype.
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 2 ||
+ FT->getReturnType() != B.getInt8PtrTy() ||
+ FT->getParamType(0) != FT->getReturnType() ||
+ FT->getParamType(1) != FT->getReturnType())
+ return 0;
+
+ // Extract some information from the instruction
+ Value *Dst = CI->getArgOperand(0);
+ Value *Src = CI->getArgOperand(1);
+
+ // See if we can get the length of the input string.
+ uint64_t Len = GetStringLength(Src);
+ if (Len == 0) return 0;
+ --Len; // Unbias length.
+
+ // Handle the simple, do-nothing case: strcat(x, "") -> x
+ if (Len == 0)
+ return Dst;
+
+ // These optimizations require DataLayout.
+ if (!TD) return 0;
+
+ return emitStrLenMemCpy(Src, Dst, Len, B);
+ }
+
+ Value *emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len,
+ IRBuilder<> &B) {
+ // We need to find the end of the destination string. That's where the
+ // memory is to be moved to. We just generate a call to strlen.
+ Value *DstLen = EmitStrLen(Dst, B, TD, TLI);
+ if (!DstLen)
+ return 0;
+
+ // Now that we have the destination's length, we must index into the
+ // destination's pointer to get the actual memcpy destination (end of
+ // the string .. we're concatenating).
+ Value *CpyDst = B.CreateGEP(Dst, DstLen, "endptr");
+
+ // We have enough information to now generate the memcpy call to do the
+ // concatenation for us. Make a memcpy to copy the nul byte with align = 1.
+ B.CreateMemCpy(CpyDst, Src,
+ ConstantInt::get(TD->getIntPtrType(*Context), Len + 1), 1);
+ return Dst;
+ }
+};
+
+struct StrNCatOpt : public StrCatOpt {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ // Verify the "strncat" function prototype.
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 3 ||
+ FT->getReturnType() != B.getInt8PtrTy() ||
+ FT->getParamType(0) != FT->getReturnType() ||
+ FT->getParamType(1) != FT->getReturnType() ||
+ !FT->getParamType(2)->isIntegerTy())
+ return 0;
+
+ // Extract some information from the instruction
+ Value *Dst = CI->getArgOperand(0);
+ Value *Src = CI->getArgOperand(1);
+ uint64_t Len;
+
+ // We don't do anything if length is not constant
+ if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
+ Len = LengthArg->getZExtValue();
+ else
+ return 0;
+
+ // See if we can get the length of the input string.
+ uint64_t SrcLen = GetStringLength(Src);
+ if (SrcLen == 0) return 0;
+ --SrcLen; // Unbias length.
+
+ // Handle the simple, do-nothing cases:
+ // strncat(x, "", c) -> x
+ // strncat(x, c, 0) -> x
+ if (SrcLen == 0 || Len == 0) return Dst;
+
+ // These optimizations require DataLayout.
+ if (!TD) return 0;
+
+ // We don't optimize this case
+ if (Len < SrcLen) return 0;
+
+ // strncat(x, s, c) -> strcat(x, s)
+ // s is constant so the strcat can be optimized further
+ return emitStrLenMemCpy(Src, Dst, SrcLen, B);
+ }
+};
+
+struct StrChrOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ // Verify the "strchr" function prototype.
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 2 ||
+ FT->getReturnType() != B.getInt8PtrTy() ||
+ FT->getParamType(0) != FT->getReturnType() ||
+ !FT->getParamType(1)->isIntegerTy(32))
+ return 0;
+
+ Value *SrcStr = CI->getArgOperand(0);
+
+ // If the second operand is non-constant, see if we can compute the length
+ // of the input string and turn this into memchr.
+ ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
+ if (CharC == 0) {
+ // These optimizations require DataLayout.
+ if (!TD) return 0;
+
+ uint64_t Len = GetStringLength(SrcStr);
+ if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32.
+ return 0;
+
+ return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
+ ConstantInt::get(TD->getIntPtrType(*Context), Len),
+ B, TD, TLI);
+ }
+
+ // Otherwise, the character is a constant, see if the first argument is
+ // a string literal. If so, we can constant fold.
+ StringRef Str;
+ if (!getConstantStringInfo(SrcStr, Str))
+ return 0;
+
+ // Compute the offset, make sure to handle the case when we're searching for
+ // zero (a weird way to spell strlen).
+ size_t I = CharC->getSExtValue() == 0 ?
+ Str.size() : Str.find(CharC->getSExtValue());
+ if (I == StringRef::npos) // Didn't find the char. strchr returns null.
+ return Constant::getNullValue(CI->getType());
+
+ // strchr(s+n,c) -> gep(s+n+i,c)
+ return B.CreateGEP(SrcStr, B.getInt64(I), "strchr");
+ }
+};
+
+struct StrRChrOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ // Verify the "strrchr" function prototype.
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 2 ||
+ FT->getReturnType() != B.getInt8PtrTy() ||
+ FT->getParamType(0) != FT->getReturnType() ||
+ !FT->getParamType(1)->isIntegerTy(32))
+ return 0;
+
+ Value *SrcStr = CI->getArgOperand(0);
+ ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
+
+ // Cannot fold anything if we're not looking for a constant.
+ if (!CharC)
+ return 0;
+
+ StringRef Str;
+ if (!getConstantStringInfo(SrcStr, Str)) {
+ // strrchr(s, 0) -> strchr(s, 0)
+ if (TD && CharC->isZero())
+ return EmitStrChr(SrcStr, '\0', B, TD, TLI);
+ return 0;
+ }
+
+ // Compute the offset.
+ size_t I = CharC->getSExtValue() == 0 ?
+ Str.size() : Str.rfind(CharC->getSExtValue());
+ if (I == StringRef::npos) // Didn't find the char. Return null.
+ return Constant::getNullValue(CI->getType());
+
+ // strrchr(s+n,c) -> gep(s+n+i,c)
+ return B.CreateGEP(SrcStr, B.getInt64(I), "strrchr");
+ }
+};
+
+struct StrCmpOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ // Verify the "strcmp" function prototype.
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 2 ||
+ !FT->getReturnType()->isIntegerTy(32) ||
+ FT->getParamType(0) != FT->getParamType(1) ||
+ FT->getParamType(0) != B.getInt8PtrTy())
+ return 0;
+
+ Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
+ if (Str1P == Str2P) // strcmp(x,x) -> 0
+ return ConstantInt::get(CI->getType(), 0);
+
+ StringRef Str1, Str2;
+ bool HasStr1 = getConstantStringInfo(Str1P, Str1);
+ bool HasStr2 = getConstantStringInfo(Str2P, Str2);
+
+ // strcmp(x, y) -> cnst (if both x and y are constant strings)
+ if (HasStr1 && HasStr2)
+ return ConstantInt::get(CI->getType(), Str1.compare(Str2));
+
+ if (HasStr1 && Str1.empty()) // strcmp("", x) -> -*x
+ return B.CreateNeg(B.CreateZExt(B.CreateLoad(Str2P, "strcmpload"),
+ CI->getType()));
+
+ if (HasStr2 && Str2.empty()) // strcmp(x,"") -> *x
+ return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType());
+
+ // strcmp(P, "x") -> memcmp(P, "x", 2)
+ uint64_t Len1 = GetStringLength(Str1P);
+ uint64_t Len2 = GetStringLength(Str2P);
+ if (Len1 && Len2) {
+ // These optimizations require DataLayout.
+ if (!TD) return 0;
+
+ return EmitMemCmp(Str1P, Str2P,
+ ConstantInt::get(TD->getIntPtrType(*Context),
+ std::min(Len1, Len2)), B, TD, TLI);
+ }
+
+ return 0;
+ }
+};
+
+struct StrNCmpOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ // Verify the "strncmp" function prototype.
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 3 ||
+ !FT->getReturnType()->isIntegerTy(32) ||
+ FT->getParamType(0) != FT->getParamType(1) ||
+ FT->getParamType(0) != B.getInt8PtrTy() ||
+ !FT->getParamType(2)->isIntegerTy())
+ return 0;
+
+ Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
+ if (Str1P == Str2P) // strncmp(x,x,n) -> 0
+ return ConstantInt::get(CI->getType(), 0);
+
+ // Get the length argument if it is constant.
+ uint64_t Length;
+ if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
+ Length = LengthArg->getZExtValue();
+ else
+ return 0;
+
+ if (Length == 0) // strncmp(x,y,0) -> 0
+ return ConstantInt::get(CI->getType(), 0);
+
+ if (TD && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
+ return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, TD, TLI);
+
+ StringRef Str1, Str2;
+ bool HasStr1 = getConstantStringInfo(Str1P, Str1);
+ bool HasStr2 = getConstantStringInfo(Str2P, Str2);
+
+ // strncmp(x, y) -> cnst (if both x and y are constant strings)
+ if (HasStr1 && HasStr2) {
+ StringRef SubStr1 = Str1.substr(0, Length);
+ StringRef SubStr2 = Str2.substr(0, Length);
+ return ConstantInt::get(CI->getType(), SubStr1.compare(SubStr2));
+ }
+
+ if (HasStr1 && Str1.empty()) // strncmp("", x, n) -> -*x
+ return B.CreateNeg(B.CreateZExt(B.CreateLoad(Str2P, "strcmpload"),
+ CI->getType()));
+
+ if (HasStr2 && Str2.empty()) // strncmp(x, "", n) -> *x
+ return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType());
+
+ return 0;
+ }
+};
+
+struct StrCpyOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ // Verify the "strcpy" function prototype.
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 2 ||
+ FT->getReturnType() != FT->getParamType(0) ||
+ FT->getParamType(0) != FT->getParamType(1) ||
+ FT->getParamType(0) != B.getInt8PtrTy())
+ return 0;
+
+ Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
+ if (Dst == Src) // strcpy(x,x) -> x
+ return Src;
+
+ // These optimizations require DataLayout.
+ if (!TD) return 0;
+
+ // See if we can get the length of the input string.
+ uint64_t Len = GetStringLength(Src);
+ if (Len == 0) return 0;
+
+ // We have enough information to now generate the memcpy call to do the
+ // copy for us. Make a memcpy to copy the nul byte with align = 1.
+ B.CreateMemCpy(Dst, Src,
+ ConstantInt::get(TD->getIntPtrType(*Context), Len), 1);
+ return Dst;
+ }
+};
+
+struct StpCpyOpt: public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ // Verify the "stpcpy" function prototype.
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 2 ||
+ FT->getReturnType() != FT->getParamType(0) ||
+ FT->getParamType(0) != FT->getParamType(1) ||
+ FT->getParamType(0) != B.getInt8PtrTy())
+ return 0;
+
+ // These optimizations require DataLayout.
+ if (!TD) return 0;
+
+ Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
+ if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)
+ Value *StrLen = EmitStrLen(Src, B, TD, TLI);
+ return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0;
+ }
+
+ // See if we can get the length of the input string.
+ uint64_t Len = GetStringLength(Src);
+ if (Len == 0) return 0;
+
+ Type *PT = FT->getParamType(0);
+ Value *LenV = ConstantInt::get(TD->getIntPtrType(PT), Len);
+ Value *DstEnd = B.CreateGEP(Dst,
+ ConstantInt::get(TD->getIntPtrType(PT),
+ Len - 1));
+
+ // We have enough information to now generate the memcpy call to do the
+ // copy for us. Make a memcpy to copy the nul byte with align = 1.
+ B.CreateMemCpy(Dst, Src, LenV, 1);
+ return DstEnd;
+ }
+};
+
+struct StrNCpyOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
+ FT->getParamType(0) != FT->getParamType(1) ||
+ FT->getParamType(0) != B.getInt8PtrTy() ||
+ !FT->getParamType(2)->isIntegerTy())
+ return 0;
+
+ Value *Dst = CI->getArgOperand(0);
+ Value *Src = CI->getArgOperand(1);
+ Value *LenOp = CI->getArgOperand(2);
+
+ // See if we can get the length of the input string.
+ uint64_t SrcLen = GetStringLength(Src);
+ if (SrcLen == 0) return 0;
+ --SrcLen;
+
+ if (SrcLen == 0) {
+ // strncpy(x, "", y) -> memset(x, '\0', y, 1)
+ B.CreateMemSet(Dst, B.getInt8('\0'), LenOp, 1);
+ return Dst;
+ }
+
+ uint64_t Len;
+ if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(LenOp))
+ Len = LengthArg->getZExtValue();
+ else
+ return 0;
+
+ if (Len == 0) return Dst; // strncpy(x, y, 0) -> x
+
+ // These optimizations require DataLayout.
+ if (!TD) return 0;
+
+ // Let strncpy handle the zero padding
+ if (Len > SrcLen+1) return 0;
+
+ Type *PT = FT->getParamType(0);
+ // strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant]
+ B.CreateMemCpy(Dst, Src,
+ ConstantInt::get(TD->getIntPtrType(PT), Len), 1);
+
+ return Dst;
+ }
+};
+
+struct StrLenOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 1 ||
+ FT->getParamType(0) != B.getInt8PtrTy() ||
+ !FT->getReturnType()->isIntegerTy())
+ return 0;
+
+ Value *Src = CI->getArgOperand(0);
+
+ // Constant folding: strlen("xyz") -> 3
+ if (uint64_t Len = GetStringLength(Src))
+ return ConstantInt::get(CI->getType(), Len-1);
+
+ // strlen(x) != 0 --> *x != 0
+ // strlen(x) == 0 --> *x == 0
+ if (isOnlyUsedInZeroEqualityComparison(CI))
+ return B.CreateZExt(B.CreateLoad(Src, "strlenfirst"), CI->getType());
+ return 0;
+ }
+};
+
+struct StrPBrkOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 2 ||
+ FT->getParamType(0) != B.getInt8PtrTy() ||
+ FT->getParamType(1) != FT->getParamType(0) ||
+ FT->getReturnType() != FT->getParamType(0))
+ return 0;
+
+ StringRef S1, S2;
+ bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
+ bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
+
+ // strpbrk(s, "") -> NULL
+ // strpbrk("", s) -> NULL
+ if ((HasS1 && S1.empty()) || (HasS2 && S2.empty()))
+ return Constant::getNullValue(CI->getType());
+
+ // Constant folding.
+ if (HasS1 && HasS2) {
+ size_t I = S1.find_first_of(S2);
+ if (I == std::string::npos) // No match.
+ return Constant::getNullValue(CI->getType());
+
+ return B.CreateGEP(CI->getArgOperand(0), B.getInt64(I), "strpbrk");
+ }
+
+ // strpbrk(s, "a") -> strchr(s, 'a')
+ if (TD && HasS2 && S2.size() == 1)
+ return EmitStrChr(CI->getArgOperand(0), S2[0], B, TD, TLI);
+
+ return 0;
+ }
+};
+
+struct StrToOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ FunctionType *FT = Callee->getFunctionType();
+ if ((FT->getNumParams() != 2 && FT->getNumParams() != 3) ||
+ !FT->getParamType(0)->isPointerTy() ||
+ !FT->getParamType(1)->isPointerTy())
+ return 0;
+
+ Value *EndPtr = CI->getArgOperand(1);
+ if (isa<ConstantPointerNull>(EndPtr)) {
+ // With a null EndPtr, this function won't capture the main argument.
+ // It would be readonly too, except that it still may write to errno.
+ CI->addAttribute(1, Attributes::get(Callee->getContext(),
+ Attributes::NoCapture));
+ }
+
+ return 0;
+ }
+};
+
+struct StrSpnOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 2 ||
+ FT->getParamType(0) != B.getInt8PtrTy() ||
+ FT->getParamType(1) != FT->getParamType(0) ||
+ !FT->getReturnType()->isIntegerTy())
+ return 0;
+
+ StringRef S1, S2;
+ bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
+ bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
+
+ // strspn(s, "") -> 0
+ // strspn("", s) -> 0
+ if ((HasS1 && S1.empty()) || (HasS2 && S2.empty()))
+ return Constant::getNullValue(CI->getType());
+
+ // Constant folding.
+ if (HasS1 && HasS2) {
+ size_t Pos = S1.find_first_not_of(S2);
+ if (Pos == StringRef::npos) Pos = S1.size();
+ return ConstantInt::get(CI->getType(), Pos);
+ }
+
+ return 0;
+ }
+};
+
+struct StrCSpnOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 2 ||
+ FT->getParamType(0) != B.getInt8PtrTy() ||
+ FT->getParamType(1) != FT->getParamType(0) ||
+ !FT->getReturnType()->isIntegerTy())
+ return 0;
+
+ StringRef S1, S2;
+ bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
+ bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
+
+ // strcspn("", s) -> 0
+ if (HasS1 && S1.empty())
+ return Constant::getNullValue(CI->getType());
+
+ // Constant folding.
+ if (HasS1 && HasS2) {
+ size_t Pos = S1.find_first_of(S2);
+ if (Pos == StringRef::npos) Pos = S1.size();
+ return ConstantInt::get(CI->getType(), Pos);
+ }
+
+ // strcspn(s, "") -> strlen(s)
+ if (TD && HasS2 && S2.empty())
+ return EmitStrLen(CI->getArgOperand(0), B, TD, TLI);
+
+ return 0;
+ }
+};
+
+struct StrStrOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 2 ||
+ !FT->getParamType(0)->isPointerTy() ||
+ !FT->getParamType(1)->isPointerTy() ||
+ !FT->getReturnType()->isPointerTy())
+ return 0;
+
+ // fold strstr(x, x) -> x.
+ if (CI->getArgOperand(0) == CI->getArgOperand(1))
+ return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
+
+ // fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0
+ if (TD && isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
+ Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, TD, TLI);
+ if (!StrLen)
+ return 0;
+ Value *StrNCmp = EmitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1),
+ StrLen, B, TD, TLI);
+ if (!StrNCmp)
+ return 0;
+ for (Value::use_iterator UI = CI->use_begin(), UE = CI->use_end();
+ UI != UE; ) {
+ ICmpInst *Old = cast<ICmpInst>(*UI++);
+ Value *Cmp = B.CreateICmp(Old->getPredicate(), StrNCmp,
+ ConstantInt::getNullValue(StrNCmp->getType()),
+ "cmp");
+ LCS->replaceAllUsesWith(Old, Cmp);
+ }
+ return CI;
+ }
+
+ // See if either input string is a constant string.
+ StringRef SearchStr, ToFindStr;
+ bool HasStr1 = getConstantStringInfo(CI->getArgOperand(0), SearchStr);
+ bool HasStr2 = getConstantStringInfo(CI->getArgOperand(1), ToFindStr);
+
+ // fold strstr(x, "") -> x.
+ if (HasStr2 && ToFindStr.empty())
+ return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
+
+ // If both strings are known, constant fold it.
+ if (HasStr1 && HasStr2) {
+ std::string::size_type Offset = SearchStr.find(ToFindStr);
+
+ if (Offset == StringRef::npos) // strstr("foo", "bar") -> null
+ return Constant::getNullValue(CI->getType());
+
+ // strstr("abcd", "bc") -> gep((char*)"abcd", 1)
+ Value *Result = CastToCStr(CI->getArgOperand(0), B);
+ Result = B.CreateConstInBoundsGEP1_64(Result, Offset, "strstr");
+ return B.CreateBitCast(Result, CI->getType());
+ }
+
+ // fold strstr(x, "y") -> strchr(x, 'y').
+ if (HasStr2 && ToFindStr.size() == 1) {
+ Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B, TD, TLI);
+ return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : 0;
+ }
+ return 0;
+ }
+};
+
+struct MemCmpOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 3 || !FT->getParamType(0)->isPointerTy() ||
+ !FT->getParamType(1)->isPointerTy() ||
+ !FT->getReturnType()->isIntegerTy(32))
+ return 0;
+
+ Value *LHS = CI->getArgOperand(0), *RHS = CI->getArgOperand(1);
+
+ if (LHS == RHS) // memcmp(s,s,x) -> 0
+ return Constant::getNullValue(CI->getType());
+
+ // Make sure we have a constant length.
+ ConstantInt *LenC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
+ if (!LenC) return 0;
+ uint64_t Len = LenC->getZExtValue();
+
+ if (Len == 0) // memcmp(s1,s2,0) -> 0
+ return Constant::getNullValue(CI->getType());
+
+ // memcmp(S1,S2,1) -> *(unsigned char*)LHS - *(unsigned char*)RHS
+ if (Len == 1) {
+ Value *LHSV = B.CreateZExt(B.CreateLoad(CastToCStr(LHS, B), "lhsc"),
+ CI->getType(), "lhsv");
+ Value *RHSV = B.CreateZExt(B.CreateLoad(CastToCStr(RHS, B), "rhsc"),
+ CI->getType(), "rhsv");
+ return B.CreateSub(LHSV, RHSV, "chardiff");
+ }
+
+ // Constant folding: memcmp(x, y, l) -> cnst (all arguments are constant)
+ StringRef LHSStr, RHSStr;
+ if (getConstantStringInfo(LHS, LHSStr) &&
+ getConstantStringInfo(RHS, RHSStr)) {
+ // Make sure we're not reading out-of-bounds memory.
+ if (Len > LHSStr.size() || Len > RHSStr.size())
+ return 0;
+ uint64_t Ret = memcmp(LHSStr.data(), RHSStr.data(), Len);
+ return ConstantInt::get(CI->getType(), Ret);
+ }
+
+ return 0;
+ }
+};
+
+struct MemCpyOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ // These optimizations require DataLayout.
+ if (!TD) return 0;
+
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
+ !FT->getParamType(0)->isPointerTy() ||
+ !FT->getParamType(1)->isPointerTy() ||
+ FT->getParamType(2) != TD->getIntPtrType(*Context))
+ return 0;
+
+ // memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
+ B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), 1);
+ return CI->getArgOperand(0);
+ }
+};
+
+struct MemMoveOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ // These optimizations require DataLayout.
+ if (!TD) return 0;
+
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
+ !FT->getParamType(0)->isPointerTy() ||
+ !FT->getParamType(1)->isPointerTy() ||
+ FT->getParamType(2) != TD->getIntPtrType(*Context))
+ return 0;
+
+ // memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
+ B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), 1);
+ return CI->getArgOperand(0);
+ }
+};
+
+struct MemSetOpt : public LibCallOptimization {
+ virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ // These optimizations require DataLayout.
+ if (!TD) return 0;
+
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
+ !FT->getParamType(0)->isPointerTy() ||
+ !FT->getParamType(1)->isIntegerTy() ||
+ FT->getParamType(2) != TD->getIntPtrType(*Context))
+ return 0;
+
+ // memset(p, v, n) -> llvm.memset(p, v, n, 1)
+ Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false);
+ B.CreateMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), 1);
+ return CI->getArgOperand(0);
+ }
+};
+
+} // End anonymous namespace.
+
+namespace llvm {
+
+class LibCallSimplifierImpl {
+ const DataLayout *TD;
+ const TargetLibraryInfo *TLI;
+ const LibCallSimplifier *LCS;
+ StringMap<LibCallOptimization*> Optimizations;
+
+ // Fortified library call optimizations.
+ MemCpyChkOpt MemCpyChk;
+ MemMoveChkOpt MemMoveChk;
+ MemSetChkOpt MemSetChk;
+ StrCpyChkOpt StrCpyChk;
+ StpCpyChkOpt StpCpyChk;
+ StrNCpyChkOpt StrNCpyChk;
+
+ // String library call optimizations.
+ StrCatOpt StrCat;
+ StrNCatOpt StrNCat;
+ StrChrOpt StrChr;
+ StrRChrOpt StrRChr;
+ StrCmpOpt StrCmp;
+ StrNCmpOpt StrNCmp;
+ StrCpyOpt StrCpy;
+ StpCpyOpt StpCpy;
+ StrNCpyOpt StrNCpy;
+ StrLenOpt StrLen;
+ StrPBrkOpt StrPBrk;
+ StrToOpt StrTo;
+ StrSpnOpt StrSpn;
+ StrCSpnOpt StrCSpn;
+ StrStrOpt StrStr;
+
+ // Memory library call optimizations.
+ MemCmpOpt MemCmp;
+ MemCpyOpt MemCpy;
+ MemMoveOpt MemMove;
+ MemSetOpt MemSet;
+
+ void initOptimizations();
+ void addOpt(LibFunc::Func F, LibCallOptimization* Opt);
+public:
+ LibCallSimplifierImpl(const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const LibCallSimplifier *LCS) {
+ this->TD = TD;
+ this->TLI = TLI;
+ this->LCS = LCS;
+ }
+
+ Value *optimizeCall(CallInst *CI);
+};
+
+void LibCallSimplifierImpl::initOptimizations() {
+ // Fortified library call optimizations.
+ Optimizations["__memcpy_chk"] = &MemCpyChk;
+ Optimizations["__memmove_chk"] = &MemMoveChk;
+ Optimizations["__memset_chk"] = &MemSetChk;
+ Optimizations["__strcpy_chk"] = &StrCpyChk;
+ Optimizations["__stpcpy_chk"] = &StpCpyChk;
+ Optimizations["__strncpy_chk"] = &StrNCpyChk;
+ Optimizations["__stpncpy_chk"] = &StrNCpyChk;
+
+ // String library call optimizations.
+ addOpt(LibFunc::strcat, &StrCat);
+ addOpt(LibFunc::strncat, &StrNCat);
+ addOpt(LibFunc::strchr, &StrChr);
+ addOpt(LibFunc::strrchr, &StrRChr);
+ addOpt(LibFunc::strcmp, &StrCmp);
+ addOpt(LibFunc::strncmp, &StrNCmp);
+ addOpt(LibFunc::strcpy, &StrCpy);
+ addOpt(LibFunc::stpcpy, &StpCpy);
+ addOpt(LibFunc::strncpy, &StrNCpy);
+ addOpt(LibFunc::strlen, &StrLen);
+ addOpt(LibFunc::strpbrk, &StrPBrk);
+ addOpt(LibFunc::strtol, &StrTo);
+ addOpt(LibFunc::strtod, &StrTo);
+ addOpt(LibFunc::strtof, &StrTo);
+ addOpt(LibFunc::strtoul, &StrTo);
+ addOpt(LibFunc::strtoll, &StrTo);
+ addOpt(LibFunc::strtold, &StrTo);
+ addOpt(LibFunc::strtoull, &StrTo);
+ addOpt(LibFunc::strspn, &StrSpn);
+ addOpt(LibFunc::strcspn, &StrCSpn);
+ addOpt(LibFunc::strstr, &StrStr);
+
+ // Memory library call optimizations.
+ addOpt(LibFunc::memcmp, &MemCmp);
+ addOpt(LibFunc::memcpy, &MemCpy);
+ addOpt(LibFunc::memmove, &MemMove);
+ addOpt(LibFunc::memset, &MemSet);
+}
+
+Value *LibCallSimplifierImpl::optimizeCall(CallInst *CI) {
+ if (Optimizations.empty())
+ initOptimizations();
+
+ Function *Callee = CI->getCalledFunction();
+ LibCallOptimization *LCO = Optimizations.lookup(Callee->getName());
+ if (LCO) {
+ IRBuilder<> Builder(CI);
+ return LCO->optimizeCall(CI, TD, TLI, LCS, Builder);
+ }
+ return 0;
+}
+
+void LibCallSimplifierImpl::addOpt(LibFunc::Func F, LibCallOptimization* Opt) {
+ if (TLI->has(F))
+ Optimizations[TLI->getName(F)] = Opt;
+}
+
+LibCallSimplifier::LibCallSimplifier(const DataLayout *TD,
+ const TargetLibraryInfo *TLI) {
+ Impl = new LibCallSimplifierImpl(TD, TLI, this);
+}
+
+LibCallSimplifier::~LibCallSimplifier() {
+ delete Impl;
+}
+
+Value *LibCallSimplifier::optimizeCall(CallInst *CI) {
+ return Impl->optimizeCall(CI);
+}
+
+void LibCallSimplifier::replaceAllUsesWith(Instruction *I, Value *With) const {
+ I->replaceAllUsesWith(With);
+ I->eraseFromParent();
+}
+
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/Utils.cpp b/contrib/llvm/lib/Transforms/Utils/Utils.cpp
index 24e8c8f..5812d46 100644
--- a/contrib/llvm/lib/Transforms/Utils/Utils.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/Utils.cpp
@@ -29,6 +29,7 @@ void llvm::initializeTransformUtils(PassRegistry &Registry) {
initializePromotePassPass(Registry);
initializeUnifyFunctionExitNodesPass(Registry);
initializeInstSimplifierPass(Registry);
+ initializeMetaRenamerPass(Registry);
}
/// LLVMInitializeTransformUtils - C binding for initializeTransformUtilsPasses.
diff --git a/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp b/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
index fc2538d..a30b093 100644
--- a/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
@@ -21,7 +21,7 @@
using namespace llvm;
// Out of line method to get vtable etc for class.
-void ValueMapTypeRemapper::Anchor() {}
+void ValueMapTypeRemapper::anchor() {}
Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper) {
diff --git a/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp b/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
index 62d23cb..f7be3e3 100644
--- a/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
+++ b/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
@@ -28,12 +28,14 @@
#include "llvm/Type.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AliasSetTracker.h"
+#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -41,17 +43,27 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ValueHandle.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
+#include "llvm/TargetTransformInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Vectorize.h"
#include <algorithm>
#include <map>
using namespace llvm;
+static cl::opt<bool>
+IgnoreTargetInfo("bb-vectorize-ignore-target-info", cl::init(false),
+ cl::Hidden, cl::desc("Ignore target information"));
+
static cl::opt<unsigned>
ReqChainDepth("bb-vectorize-req-chain-depth", cl::init(6), cl::Hidden,
cl::desc("The required chain depth for vectorization"));
+static cl::opt<bool>
+UseChainDepthWithTI("bb-vectorize-use-chain-depth", cl::init(false),
+ cl::Hidden, cl::desc("Use the chain depth requirement with"
+ " target information"));
+
static cl::opt<unsigned>
SearchLimit("bb-vectorize-search-limit", cl::init(400), cl::Hidden,
cl::desc("The maximum search distance for instruction pairs"));
@@ -93,8 +105,9 @@ static cl::opt<bool>
NoFloats("bb-vectorize-no-floats", cl::init(false), cl::Hidden,
cl::desc("Don't try to vectorize floating-point values"));
+// FIXME: This should default to false once pointer vector support works.
static cl::opt<bool>
-NoPointers("bb-vectorize-no-pointers", cl::init(false), cl::Hidden,
+NoPointers("bb-vectorize-no-pointers", cl::init(/*false*/ true), cl::Hidden,
cl::desc("Don't try to vectorize pointer values"));
static cl::opt<bool>
@@ -159,6 +172,12 @@ DebugCycleCheck("bb-vectorize-debug-cycle-check",
cl::init(false), cl::Hidden,
cl::desc("When debugging is enabled, output information on the"
" cycle-checking process"));
+
+static cl::opt<bool>
+PrintAfterEveryPair("bb-vectorize-debug-print-after-every-pair",
+ cl::init(false), cl::Hidden,
+ cl::desc("When debugging is enabled, dump the basic block after"
+ " every pair is fused"));
#endif
STATISTIC(NumFusedOps, "Number of operations fused by bb-vectorize");
@@ -177,13 +196,19 @@ namespace {
BBVectorize(Pass *P, const VectorizeConfig &C)
: BasicBlockPass(ID), Config(C) {
AA = &P->getAnalysis<AliasAnalysis>();
+ DT = &P->getAnalysis<DominatorTree>();
SE = &P->getAnalysis<ScalarEvolution>();
- TD = P->getAnalysisIfAvailable<TargetData>();
+ TD = P->getAnalysisIfAvailable<DataLayout>();
+ TTI = IgnoreTargetInfo ? 0 :
+ P->getAnalysisIfAvailable<TargetTransformInfo>();
+ VTTI = TTI ? TTI->getVectorTargetTransformInfo() : 0;
}
typedef std::pair<Value *, Value *> ValuePair;
+ typedef std::pair<ValuePair, int> ValuePairWithCost;
typedef std::pair<ValuePair, size_t> ValuePairWithDepth;
typedef std::pair<ValuePair, ValuePair> VPPair; // A ValuePair pair
+ typedef std::pair<VPPair, unsigned> VPPairWithType;
typedef std::pair<std::multimap<Value *, Value *>::iterator,
std::multimap<Value *, Value *>::iterator> VPIteratorPair;
typedef std::pair<std::multimap<ValuePair, ValuePair>::iterator,
@@ -191,8 +216,11 @@ namespace {
VPPIteratorPair;
AliasAnalysis *AA;
+ DominatorTree *DT;
ScalarEvolution *SE;
- TargetData *TD;
+ DataLayout *TD;
+ TargetTransformInfo *TTI;
+ const VectorTargetTransformInfo *VTTI;
// FIXME: const correct?
@@ -201,11 +229,23 @@ namespace {
bool getCandidatePairs(BasicBlock &BB,
BasicBlock::iterator &Start,
std::multimap<Value *, Value *> &CandidatePairs,
+ DenseSet<ValuePair> &FixedOrderPairs,
+ DenseMap<ValuePair, int> &CandidatePairCostSavings,
std::vector<Value *> &PairableInsts, bool NonPow2Len);
+ // FIXME: The current implementation does not account for pairs that
+ // are connected in multiple ways. For example:
+ // C1 = A1 / A2; C2 = A2 / A1 (which may be both direct and a swap)
+ enum PairConnectionType {
+ PairConnectionDirect,
+ PairConnectionSwap,
+ PairConnectionSplat
+ };
+
void computeConnectedPairs(std::multimap<Value *, Value *> &CandidatePairs,
std::vector<Value *> &PairableInsts,
- std::multimap<ValuePair, ValuePair> &ConnectedPairs);
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ DenseMap<VPPair, unsigned> &PairConnectionTypes);
void buildDepMap(BasicBlock &BB,
std::multimap<Value *, Value *> &CandidatePairs,
@@ -213,19 +253,29 @@ namespace {
DenseSet<ValuePair> &PairableInstUsers);
void choosePairs(std::multimap<Value *, Value *> &CandidatePairs,
+ DenseMap<ValuePair, int> &CandidatePairCostSavings,
std::vector<Value *> &PairableInsts,
+ DenseSet<ValuePair> &FixedOrderPairs,
+ DenseMap<VPPair, unsigned> &PairConnectionTypes,
std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairDeps,
DenseSet<ValuePair> &PairableInstUsers,
DenseMap<Value *, Value *>& ChosenPairs);
void fuseChosenPairs(BasicBlock &BB,
std::vector<Value *> &PairableInsts,
- DenseMap<Value *, Value *>& ChosenPairs);
+ DenseMap<Value *, Value *>& ChosenPairs,
+ DenseSet<ValuePair> &FixedOrderPairs,
+ DenseMap<VPPair, unsigned> &PairConnectionTypes,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairDeps);
+
bool isInstVectorizable(Instruction *I, bool &IsSimpleLoadStore);
bool areInstsCompatible(Instruction *I, Instruction *J,
- bool IsSimpleLoadStore, bool NonPow2Len);
+ bool IsSimpleLoadStore, bool NonPow2Len,
+ int &CostSavings, int &FixedOrder);
bool trackUsesOfI(DenseSet<Value *> &Users,
AliasSetTracker &WriteSet, Instruction *I,
@@ -236,6 +286,7 @@ namespace {
std::multimap<Value *, Value *> &CandidatePairs,
std::vector<Value *> &PairableInsts,
std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ DenseMap<VPPair, unsigned> &PairConnectionTypes,
ValuePair P);
bool pairsConflict(ValuePair P, ValuePair Q,
@@ -267,17 +318,21 @@ namespace {
void findBestTreeFor(
std::multimap<Value *, Value *> &CandidatePairs,
+ DenseMap<ValuePair, int> &CandidatePairCostSavings,
std::vector<Value *> &PairableInsts,
+ DenseSet<ValuePair> &FixedOrderPairs,
+ DenseMap<VPPair, unsigned> &PairConnectionTypes,
std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairDeps,
DenseSet<ValuePair> &PairableInstUsers,
std::multimap<ValuePair, ValuePair> &PairableInstUserMap,
DenseMap<Value *, Value *> &ChosenPairs,
DenseSet<ValuePair> &BestTree, size_t &BestMaxDepth,
- size_t &BestEffSize, VPIteratorPair ChoiceRange,
+ int &BestEffSize, VPIteratorPair ChoiceRange,
bool UseCycleCheck);
Value *getReplacementPointerInput(LLVMContext& Context, Instruction *I,
- Instruction *J, unsigned o, bool FlipMemInputs);
+ Instruction *J, unsigned o);
void fillNewShuffleMask(LLVMContext& Context, Instruction *J,
unsigned MaskOffset, unsigned NumInElem,
@@ -289,20 +344,20 @@ namespace {
bool expandIEChain(LLVMContext& Context, Instruction *I, Instruction *J,
unsigned o, Value *&LOp, unsigned numElemL,
- Type *ArgTypeL, Type *ArgTypeR,
+ Type *ArgTypeL, Type *ArgTypeR, bool IBeforeJ,
unsigned IdxOff = 0);
Value *getReplacementInput(LLVMContext& Context, Instruction *I,
- Instruction *J, unsigned o, bool FlipMemInputs);
+ Instruction *J, unsigned o, bool IBeforeJ);
void getReplacementInputsForPair(LLVMContext& Context, Instruction *I,
Instruction *J, SmallVector<Value *, 3> &ReplacedOperands,
- bool FlipMemInputs);
+ bool IBeforeJ);
void replaceOutputsOfPair(LLVMContext& Context, Instruction *I,
Instruction *J, Instruction *K,
Instruction *&InsertionPt, Instruction *&K1,
- Instruction *&K2, bool FlipMemInputs);
+ Instruction *&K2);
void collectPairLoadMoveSet(BasicBlock &BB,
DenseMap<Value *, Value *> &ChosenPairs,
@@ -314,10 +369,6 @@ namespace {
DenseMap<Value *, Value *> &ChosenPairs,
std::multimap<Value *, Value *> &LoadMoveSet);
- void collectPtrInfo(std::vector<Value *> &PairableInsts,
- DenseMap<Value *, Value *> &ChosenPairs,
- DenseSet<Value *> &LowPtrInsts);
-
bool canMoveUsesOfIAfterJ(BasicBlock &BB,
std::multimap<Value *, Value *> &LoadMoveSet,
Instruction *I, Instruction *J);
@@ -330,13 +381,22 @@ namespace {
void combineMetadata(Instruction *K, const Instruction *J);
bool vectorizeBB(BasicBlock &BB) {
+ if (!DT->isReachableFromEntry(&BB)) {
+ DEBUG(dbgs() << "BBV: skipping unreachable " << BB.getName() <<
+ " in " << BB.getParent()->getName() << "\n");
+ return false;
+ }
+
+ DEBUG(if (VTTI) dbgs() << "BBV: using target information\n");
+
bool changed = false;
// Iterate a sufficient number of times to merge types of size 1 bit,
// then 2 bits, then 4, etc. up to half of the target vector width of the
// target vector register.
unsigned n = 1;
for (unsigned v = 2;
- v <= Config.VectorBits && (!Config.MaxIter || n <= Config.MaxIter);
+ (VTTI || v <= Config.VectorBits) &&
+ (!Config.MaxIter || n <= Config.MaxIter);
v *= 2, ++n) {
DEBUG(dbgs() << "BBV: fusing loop #" << n <<
" for " << BB.getName() << " in " <<
@@ -363,8 +423,12 @@ namespace {
virtual bool runOnBasicBlock(BasicBlock &BB) {
AA = &getAnalysis<AliasAnalysis>();
+ DT = &getAnalysis<DominatorTree>();
SE = &getAnalysis<ScalarEvolution>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
+ TTI = IgnoreTargetInfo ? 0 :
+ getAnalysisIfAvailable<TargetTransformInfo>();
+ VTTI = TTI ? TTI->getVectorTargetTransformInfo() : 0;
return vectorizeBB(BB);
}
@@ -372,8 +436,10 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
BasicBlockPass::getAnalysisUsage(AU);
AU.addRequired<AliasAnalysis>();
+ AU.addRequired<DominatorTree>();
AU.addRequired<ScalarEvolution>();
AU.addPreserved<AliasAnalysis>();
+ AU.addPreserved<DominatorTree>();
AU.addPreserved<ScalarEvolution>();
AU.setPreservesCFG();
}
@@ -415,6 +481,14 @@ namespace {
T2 = cast<CastInst>(I)->getSrcTy();
else
T2 = T1;
+
+ if (SelectInst *SI = dyn_cast<SelectInst>(I)) {
+ T2 = SI->getCondition()->getType();
+ } else if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(I)) {
+ T2 = SI->getOperand(0)->getType();
+ } else if (CmpInst *CI = dyn_cast<CmpInst>(I)) {
+ T2 = CI->getOperand(0)->getType();
+ }
}
// Returns the weight associated with the provided value. A chain of
@@ -446,6 +520,62 @@ namespace {
return 1;
}
+ // Returns the cost of the provided instruction using VTTI.
+ // This does not handle loads and stores.
+ unsigned getInstrCost(unsigned Opcode, Type *T1, Type *T2) {
+ switch (Opcode) {
+ default: break;
+ case Instruction::GetElementPtr:
+ // We mark this instruction as zero-cost because scalar GEPs are usually
+ // lowered to the intruction addressing mode. At the moment we don't
+ // generate vector GEPs.
+ return 0;
+ case Instruction::Br:
+ return VTTI->getCFInstrCost(Opcode);
+ case Instruction::PHI:
+ return 0;
+ case Instruction::Add:
+ case Instruction::FAdd:
+ case Instruction::Sub:
+ case Instruction::FSub:
+ case Instruction::Mul:
+ case Instruction::FMul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::FDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::FRem:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ return VTTI->getArithmeticInstrCost(Opcode, T1);
+ case Instruction::Select:
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ return VTTI->getCmpSelInstrCost(Opcode, T1, T2);
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::FPExt:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::SIToFP:
+ case Instruction::UIToFP:
+ case Instruction::Trunc:
+ case Instruction::FPTrunc:
+ case Instruction::BitCast:
+ case Instruction::ShuffleVector:
+ return VTTI->getCastInstrCost(Opcode, T1, T2);
+ }
+
+ return 1;
+ }
+
// This determines the relative offset of two loads or stores, returning
// true if the offset could be determined to be some constant value.
// For example, if OffsetInElmts == 1, then J accesses the memory directly
@@ -453,20 +583,30 @@ namespace {
// directly after J.
bool getPairPtrInfo(Instruction *I, Instruction *J,
Value *&IPtr, Value *&JPtr, unsigned &IAlignment, unsigned &JAlignment,
- int64_t &OffsetInElmts) {
+ unsigned &IAddressSpace, unsigned &JAddressSpace,
+ int64_t &OffsetInElmts, bool ComputeOffset = true) {
OffsetInElmts = 0;
- if (isa<LoadInst>(I)) {
- IPtr = cast<LoadInst>(I)->getPointerOperand();
- JPtr = cast<LoadInst>(J)->getPointerOperand();
- IAlignment = cast<LoadInst>(I)->getAlignment();
- JAlignment = cast<LoadInst>(J)->getAlignment();
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ LoadInst *LJ = cast<LoadInst>(J);
+ IPtr = LI->getPointerOperand();
+ JPtr = LJ->getPointerOperand();
+ IAlignment = LI->getAlignment();
+ JAlignment = LJ->getAlignment();
+ IAddressSpace = LI->getPointerAddressSpace();
+ JAddressSpace = LJ->getPointerAddressSpace();
} else {
- IPtr = cast<StoreInst>(I)->getPointerOperand();
- JPtr = cast<StoreInst>(J)->getPointerOperand();
- IAlignment = cast<StoreInst>(I)->getAlignment();
- JAlignment = cast<StoreInst>(J)->getAlignment();
+ StoreInst *SI = cast<StoreInst>(I), *SJ = cast<StoreInst>(J);
+ IPtr = SI->getPointerOperand();
+ JPtr = SJ->getPointerOperand();
+ IAlignment = SI->getAlignment();
+ JAlignment = SJ->getAlignment();
+ IAddressSpace = SI->getPointerAddressSpace();
+ JAddressSpace = SJ->getPointerAddressSpace();
}
+ if (!ComputeOffset)
+ return true;
+
const SCEV *IPtrSCEV = SE->getSCEV(IPtr);
const SCEV *JPtrSCEV = SE->getSCEV(JPtr);
@@ -536,6 +676,19 @@ namespace {
return false;
}
+
+ bool isPureIEChain(InsertElementInst *IE) {
+ InsertElementInst *IENext = IE;
+ do {
+ if (!isa<UndefValue>(IENext->getOperand(0)) &&
+ !isa<InsertElementInst>(IENext->getOperand(0))) {
+ return false;
+ }
+ } while ((IENext =
+ dyn_cast<InsertElementInst>(IENext->getOperand(0))));
+
+ return true;
+ }
};
// This function implements one vectorization iteration on the provided
@@ -546,11 +699,18 @@ namespace {
std::vector<Value *> AllPairableInsts;
DenseMap<Value *, Value *> AllChosenPairs;
+ DenseSet<ValuePair> AllFixedOrderPairs;
+ DenseMap<VPPair, unsigned> AllPairConnectionTypes;
+ std::multimap<ValuePair, ValuePair> AllConnectedPairs, AllConnectedPairDeps;
do {
std::vector<Value *> PairableInsts;
std::multimap<Value *, Value *> CandidatePairs;
+ DenseSet<ValuePair> FixedOrderPairs;
+ DenseMap<ValuePair, int> CandidatePairCostSavings;
ShouldContinue = getCandidatePairs(BB, Start, CandidatePairs,
+ FixedOrderPairs,
+ CandidatePairCostSavings,
PairableInsts, NonPow2Len);
if (PairableInsts.empty()) continue;
@@ -563,10 +723,18 @@ namespace {
// Note that it only matters that both members of the second pair use some
// element of the first pair (to allow for splatting).
- std::multimap<ValuePair, ValuePair> ConnectedPairs;
- computeConnectedPairs(CandidatePairs, PairableInsts, ConnectedPairs);
+ std::multimap<ValuePair, ValuePair> ConnectedPairs, ConnectedPairDeps;
+ DenseMap<VPPair, unsigned> PairConnectionTypes;
+ computeConnectedPairs(CandidatePairs, PairableInsts, ConnectedPairs,
+ PairConnectionTypes);
if (ConnectedPairs.empty()) continue;
+ for (std::multimap<ValuePair, ValuePair>::iterator
+ I = ConnectedPairs.begin(), IE = ConnectedPairs.end();
+ I != IE; ++I) {
+ ConnectedPairDeps.insert(VPPair(I->second, I->first));
+ }
+
// Build the pairable-instruction dependency map
DenseSet<ValuePair> PairableInstUsers;
buildDepMap(BB, CandidatePairs, PairableInsts, PairableInstUsers);
@@ -578,13 +746,48 @@ namespace {
// variables.
DenseMap<Value *, Value *> ChosenPairs;
- choosePairs(CandidatePairs, PairableInsts, ConnectedPairs,
+ choosePairs(CandidatePairs, CandidatePairCostSavings,
+ PairableInsts, FixedOrderPairs, PairConnectionTypes,
+ ConnectedPairs, ConnectedPairDeps,
PairableInstUsers, ChosenPairs);
if (ChosenPairs.empty()) continue;
AllPairableInsts.insert(AllPairableInsts.end(), PairableInsts.begin(),
PairableInsts.end());
AllChosenPairs.insert(ChosenPairs.begin(), ChosenPairs.end());
+
+ // Only for the chosen pairs, propagate information on fixed-order pairs,
+ // pair connections, and their types to the data structures used by the
+ // pair fusion procedures.
+ for (DenseMap<Value *, Value *>::iterator I = ChosenPairs.begin(),
+ IE = ChosenPairs.end(); I != IE; ++I) {
+ if (FixedOrderPairs.count(*I))
+ AllFixedOrderPairs.insert(*I);
+ else if (FixedOrderPairs.count(ValuePair(I->second, I->first)))
+ AllFixedOrderPairs.insert(ValuePair(I->second, I->first));
+
+ for (DenseMap<Value *, Value *>::iterator J = ChosenPairs.begin();
+ J != IE; ++J) {
+ DenseMap<VPPair, unsigned>::iterator K =
+ PairConnectionTypes.find(VPPair(*I, *J));
+ if (K != PairConnectionTypes.end()) {
+ AllPairConnectionTypes.insert(*K);
+ } else {
+ K = PairConnectionTypes.find(VPPair(*J, *I));
+ if (K != PairConnectionTypes.end())
+ AllPairConnectionTypes.insert(*K);
+ }
+ }
+ }
+
+ for (std::multimap<ValuePair, ValuePair>::iterator
+ I = ConnectedPairs.begin(), IE = ConnectedPairs.end();
+ I != IE; ++I) {
+ if (AllPairConnectionTypes.count(*I)) {
+ AllConnectedPairs.insert(*I);
+ AllConnectedPairDeps.insert(VPPair(I->second, I->first));
+ }
+ }
} while (ShouldContinue);
if (AllChosenPairs.empty()) return false;
@@ -597,11 +800,13 @@ namespace {
// replaced with a vector_extract on the result. Subsequent optimization
// passes should coalesce the build/extract combinations.
- fuseChosenPairs(BB, AllPairableInsts, AllChosenPairs);
+ fuseChosenPairs(BB, AllPairableInsts, AllChosenPairs, AllFixedOrderPairs,
+ AllPairConnectionTypes,
+ AllConnectedPairs, AllConnectedPairDeps);
// It is important to cleanup here so that future iterations of this
// function have less work to do.
- (void) SimplifyInstructionsInBlock(&BB, TD);
+ (void) SimplifyInstructionsInBlock(&BB, TD, AA->getTargetLibraryInfo());
return true;
}
@@ -667,15 +872,22 @@ namespace {
!(VectorType::isValidElementType(T2) || T2->isVectorTy()))
return false;
- if (T1->getScalarSizeInBits() == 1 && T2->getScalarSizeInBits() == 1) {
+ if (T1->getScalarSizeInBits() == 1) {
if (!Config.VectorizeBools)
return false;
} else {
- if (!Config.VectorizeInts
- && (T1->isIntOrIntVectorTy() || T2->isIntOrIntVectorTy()))
+ if (!Config.VectorizeInts && T1->isIntOrIntVectorTy())
return false;
}
-
+
+ if (T2->getScalarSizeInBits() == 1) {
+ if (!Config.VectorizeBools)
+ return false;
+ } else {
+ if (!Config.VectorizeInts && T2->isIntOrIntVectorTy())
+ return false;
+ }
+
if (!Config.VectorizeFloats
&& (T1->isFPOrFPVectorTy() || T2->isFPOrFPVectorTy()))
return false;
@@ -691,8 +903,8 @@ namespace {
T2->getScalarType()->isPointerTy()))
return false;
- if (T1->getPrimitiveSizeInBits() >= Config.VectorBits ||
- T2->getPrimitiveSizeInBits() >= Config.VectorBits)
+ if (!VTTI && (T1->getPrimitiveSizeInBits() >= Config.VectorBits ||
+ T2->getPrimitiveSizeInBits() >= Config.VectorBits))
return false;
return true;
@@ -703,10 +915,14 @@ namespace {
// that I has already been determined to be vectorizable and that J is not
// in the use tree of I.
bool BBVectorize::areInstsCompatible(Instruction *I, Instruction *J,
- bool IsSimpleLoadStore, bool NonPow2Len) {
+ bool IsSimpleLoadStore, bool NonPow2Len,
+ int &CostSavings, int &FixedOrder) {
DEBUG(if (DebugInstructionExamination) dbgs() << "BBV: looking at " << *I <<
" <-> " << *J << "\n");
+ CostSavings = 0;
+ FixedOrder = 0;
+
// Loads and stores can be merged if they have different alignments,
// but are otherwise the same.
if (!J->isSameOperationAs(I, Instruction::CompareIgnoringAlignment |
@@ -719,38 +935,84 @@ namespace {
unsigned MaxTypeBits = std::max(
IT1->getPrimitiveSizeInBits() + JT1->getPrimitiveSizeInBits(),
IT2->getPrimitiveSizeInBits() + JT2->getPrimitiveSizeInBits());
- if (MaxTypeBits > Config.VectorBits)
+ if (!VTTI && MaxTypeBits > Config.VectorBits)
return false;
// FIXME: handle addsub-type operations!
if (IsSimpleLoadStore) {
Value *IPtr, *JPtr;
- unsigned IAlignment, JAlignment;
+ unsigned IAlignment, JAlignment, IAddressSpace, JAddressSpace;
int64_t OffsetInElmts = 0;
if (getPairPtrInfo(I, J, IPtr, JPtr, IAlignment, JAlignment,
+ IAddressSpace, JAddressSpace,
OffsetInElmts) && abs64(OffsetInElmts) == 1) {
- if (Config.AlignedOnly) {
- Type *aTypeI = isa<StoreInst>(I) ?
- cast<StoreInst>(I)->getValueOperand()->getType() : I->getType();
- Type *aTypeJ = isa<StoreInst>(J) ?
- cast<StoreInst>(J)->getValueOperand()->getType() : J->getType();
+ FixedOrder = (int) OffsetInElmts;
+ unsigned BottomAlignment = IAlignment;
+ if (OffsetInElmts < 0) BottomAlignment = JAlignment;
+
+ Type *aTypeI = isa<StoreInst>(I) ?
+ cast<StoreInst>(I)->getValueOperand()->getType() : I->getType();
+ Type *aTypeJ = isa<StoreInst>(J) ?
+ cast<StoreInst>(J)->getValueOperand()->getType() : J->getType();
+ Type *VType = getVecTypeForPair(aTypeI, aTypeJ);
+ if (Config.AlignedOnly) {
// An aligned load or store is possible only if the instruction
// with the lower offset has an alignment suitable for the
// vector type.
- unsigned BottomAlignment = IAlignment;
- if (OffsetInElmts < 0) BottomAlignment = JAlignment;
-
- Type *VType = getVecTypeForPair(aTypeI, aTypeJ);
unsigned VecAlignment = TD->getPrefTypeAlignment(VType);
if (BottomAlignment < VecAlignment)
return false;
}
+
+ if (VTTI) {
+ unsigned ICost = VTTI->getMemoryOpCost(I->getOpcode(), I->getType(),
+ IAlignment, IAddressSpace);
+ unsigned JCost = VTTI->getMemoryOpCost(J->getOpcode(), J->getType(),
+ JAlignment, JAddressSpace);
+ unsigned VCost = VTTI->getMemoryOpCost(I->getOpcode(), VType,
+ BottomAlignment,
+ IAddressSpace);
+ if (VCost > ICost + JCost)
+ return false;
+
+ // We don't want to fuse to a type that will be split, even
+ // if the two input types will also be split and there is no other
+ // associated cost.
+ unsigned VParts = VTTI->getNumberOfParts(VType);
+ if (VParts > 1)
+ return false;
+ else if (!VParts && VCost == ICost + JCost)
+ return false;
+
+ CostSavings = ICost + JCost - VCost;
+ }
} else {
return false;
}
+ } else if (VTTI) {
+ unsigned ICost = getInstrCost(I->getOpcode(), IT1, IT2);
+ unsigned JCost = getInstrCost(J->getOpcode(), JT1, JT2);
+ Type *VT1 = getVecTypeForPair(IT1, JT1),
+ *VT2 = getVecTypeForPair(IT2, JT2);
+ unsigned VCost = getInstrCost(I->getOpcode(), VT1, VT2);
+
+ if (VCost > ICost + JCost)
+ return false;
+
+ // We don't want to fuse to a type that will be split, even
+ // if the two input types will also be split and there is no other
+ // associated cost.
+ unsigned VParts1 = VTTI->getNumberOfParts(VT1),
+ VParts2 = VTTI->getNumberOfParts(VT2);
+ if (VParts1 > 1 || VParts2 > 1)
+ return false;
+ else if ((!VParts1 || !VParts2) && VCost == ICost + JCost)
+ return false;
+
+ CostSavings = ICost + JCost - VCost;
}
// The powi intrinsic is special because only the first argument is
@@ -833,6 +1095,8 @@ namespace {
bool BBVectorize::getCandidatePairs(BasicBlock &BB,
BasicBlock::iterator &Start,
std::multimap<Value *, Value *> &CandidatePairs,
+ DenseSet<ValuePair> &FixedOrderPairs,
+ DenseMap<ValuePair, int> &CandidatePairCostSavings,
std::vector<Value *> &PairableInsts, bool NonPow2Len) {
BasicBlock::iterator E = BB.end();
if (Start == E) return false;
@@ -869,7 +1133,9 @@ namespace {
// J does not use I, and comes before the first use of I, so it can be
// merged with I if the instructions are compatible.
- if (!areInstsCompatible(I, J, IsSimpleLoadStore, NonPow2Len)) continue;
+ int CostSavings, FixedOrder;
+ if (!areInstsCompatible(I, J, IsSimpleLoadStore, NonPow2Len,
+ CostSavings, FixedOrder)) continue;
// J is a candidate for merging with I.
if (!PairableInsts.size() ||
@@ -878,6 +1144,14 @@ namespace {
}
CandidatePairs.insert(ValuePair(I, J));
+ if (VTTI)
+ CandidatePairCostSavings.insert(ValuePairWithCost(ValuePair(I, J),
+ CostSavings));
+
+ if (FixedOrder == 1)
+ FixedOrderPairs.insert(ValuePair(I, J));
+ else if (FixedOrder == -1)
+ FixedOrderPairs.insert(ValuePair(J, I));
// The next call to this function must start after the last instruction
// selected during this invocation.
@@ -887,7 +1161,8 @@ namespace {
}
DEBUG(if (DebugCandidateSelection) dbgs() << "BBV: candidate pair "
- << *I << " <-> " << *J << "\n");
+ << *I << " <-> " << *J << " (cost savings: " <<
+ CostSavings << ")\n");
// If we have already found too many pairs, break here and this function
// will be called again starting after the last instruction selected
@@ -915,6 +1190,7 @@ namespace {
std::multimap<Value *, Value *> &CandidatePairs,
std::vector<Value *> &PairableInsts,
std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ DenseMap<VPPair, unsigned> &PairConnectionTypes,
ValuePair P) {
StoreInst *SI, *SJ;
@@ -946,12 +1222,18 @@ namespace {
VPIteratorPair JPairRange = CandidatePairs.equal_range(*J);
// Look for <I, J>:
- if (isSecondInIteratorPair<Value*>(*J, IPairRange))
- ConnectedPairs.insert(VPPair(P, ValuePair(*I, *J)));
+ if (isSecondInIteratorPair<Value*>(*J, IPairRange)) {
+ VPPair VP(P, ValuePair(*I, *J));
+ ConnectedPairs.insert(VP);
+ PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionDirect));
+ }
// Look for <J, I>:
- if (isSecondInIteratorPair<Value*>(*I, JPairRange))
- ConnectedPairs.insert(VPPair(P, ValuePair(*J, *I)));
+ if (isSecondInIteratorPair<Value*>(*I, JPairRange)) {
+ VPPair VP(P, ValuePair(*J, *I));
+ ConnectedPairs.insert(VP);
+ PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionSwap));
+ }
}
if (Config.SplatBreaksChain) continue;
@@ -962,8 +1244,11 @@ namespace {
P.first == SJ->getPointerOperand())
continue;
- if (isSecondInIteratorPair<Value*>(*J, IPairRange))
- ConnectedPairs.insert(VPPair(P, ValuePair(*I, *J)));
+ if (isSecondInIteratorPair<Value*>(*J, IPairRange)) {
+ VPPair VP(P, ValuePair(*I, *J));
+ ConnectedPairs.insert(VP);
+ PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionSplat));
+ }
}
}
@@ -985,8 +1270,11 @@ namespace {
P.second == SJ->getPointerOperand())
continue;
- if (isSecondInIteratorPair<Value*>(*J, IPairRange))
- ConnectedPairs.insert(VPPair(P, ValuePair(*I, *J)));
+ if (isSecondInIteratorPair<Value*>(*J, IPairRange)) {
+ VPPair VP(P, ValuePair(*I, *J));
+ ConnectedPairs.insert(VP);
+ PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionSplat));
+ }
}
}
}
@@ -997,7 +1285,8 @@ namespace {
void BBVectorize::computeConnectedPairs(
std::multimap<Value *, Value *> &CandidatePairs,
std::vector<Value *> &PairableInsts,
- std::multimap<ValuePair, ValuePair> &ConnectedPairs) {
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ DenseMap<VPPair, unsigned> &PairConnectionTypes) {
for (std::vector<Value *>::iterator PI = PairableInsts.begin(),
PE = PairableInsts.end(); PI != PE; ++PI) {
@@ -1006,7 +1295,7 @@ namespace {
for (std::multimap<Value *, Value *>::iterator P = choiceRange.first;
P != choiceRange.second; ++P)
computePairsConnectedTo(CandidatePairs, PairableInsts,
- ConnectedPairs, *P);
+ ConnectedPairs, PairConnectionTypes, *P);
}
DEBUG(dbgs() << "BBV: found " << ConnectedPairs.size()
@@ -1196,7 +1485,7 @@ namespace {
PrunedTree.insert(QTop.first);
// Visit each child, pruning as necessary...
- DenseMap<ValuePair, size_t> BestChildren;
+ SmallVector<ValuePairWithDepth, 8> BestChildren;
VPPIteratorPair QTopRange = ConnectedPairs.equal_range(QTop.first);
for (std::multimap<ValuePair, ValuePair>::iterator K = QTopRange.first;
K != QTopRange.second; ++K) {
@@ -1228,7 +1517,7 @@ namespace {
DenseSet<ValuePair> CurrentPairs;
bool CanAdd = true;
- for (DenseMap<ValuePair, size_t>::iterator C2
+ for (SmallVector<ValuePairWithDepth, 8>::iterator C2
= BestChildren.begin(), E2 = BestChildren.end();
C2 != E2; ++C2) {
if (C2->first.first == C->first.first ||
@@ -1313,22 +1602,22 @@ namespace {
// to an already-selected child. Check for this here, and if a
// conflict is found, then remove the previously-selected child
// before adding this one in its place.
- for (DenseMap<ValuePair, size_t>::iterator C2
+ for (SmallVector<ValuePairWithDepth, 8>::iterator C2
= BestChildren.begin(); C2 != BestChildren.end();) {
if (C2->first.first == C->first.first ||
C2->first.first == C->first.second ||
C2->first.second == C->first.first ||
C2->first.second == C->first.second ||
pairsConflict(C2->first, C->first, PairableInstUsers))
- BestChildren.erase(C2++);
+ C2 = BestChildren.erase(C2);
else
++C2;
}
- BestChildren.insert(ValuePairWithDepth(C->first, C->second));
+ BestChildren.push_back(ValuePairWithDepth(C->first, C->second));
}
- for (DenseMap<ValuePair, size_t>::iterator C
+ for (SmallVector<ValuePairWithDepth, 8>::iterator C
= BestChildren.begin(), E2 = BestChildren.end();
C != E2; ++C) {
size_t DepthF = getDepthFactor(C->first.first);
@@ -1341,13 +1630,17 @@ namespace {
// pairs, given the choice of root pairs as an iterator range.
void BBVectorize::findBestTreeFor(
std::multimap<Value *, Value *> &CandidatePairs,
+ DenseMap<ValuePair, int> &CandidatePairCostSavings,
std::vector<Value *> &PairableInsts,
+ DenseSet<ValuePair> &FixedOrderPairs,
+ DenseMap<VPPair, unsigned> &PairConnectionTypes,
std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairDeps,
DenseSet<ValuePair> &PairableInstUsers,
std::multimap<ValuePair, ValuePair> &PairableInstUserMap,
DenseMap<Value *, Value *> &ChosenPairs,
DenseSet<ValuePair> &BestTree, size_t &BestMaxDepth,
- size_t &BestEffSize, VPIteratorPair ChoiceRange,
+ int &BestEffSize, VPIteratorPair ChoiceRange,
bool UseCycleCheck) {
for (std::multimap<Value *, Value *>::iterator J = ChoiceRange.first;
J != ChoiceRange.second; ++J) {
@@ -1397,17 +1690,289 @@ namespace {
PairableInstUsers, PairableInstUserMap, ChosenPairs, Tree,
PrunedTree, *J, UseCycleCheck);
- size_t EffSize = 0;
- for (DenseSet<ValuePair>::iterator S = PrunedTree.begin(),
- E = PrunedTree.end(); S != E; ++S)
- EffSize += getDepthFactor(S->first);
+ int EffSize = 0;
+ if (VTTI) {
+ DenseSet<Value *> PrunedTreeInstrs;
+ for (DenseSet<ValuePair>::iterator S = PrunedTree.begin(),
+ E = PrunedTree.end(); S != E; ++S) {
+ PrunedTreeInstrs.insert(S->first);
+ PrunedTreeInstrs.insert(S->second);
+ }
+
+ // The set of pairs that have already contributed to the total cost.
+ DenseSet<ValuePair> IncomingPairs;
+
+ // If the cost model were perfect, this might not be necessary; but we
+ // need to make sure that we don't get stuck vectorizing our own
+ // shuffle chains.
+ bool HasNontrivialInsts = false;
+
+ // The node weights represent the cost savings associated with
+ // fusing the pair of instructions.
+ for (DenseSet<ValuePair>::iterator S = PrunedTree.begin(),
+ E = PrunedTree.end(); S != E; ++S) {
+ if (!isa<ShuffleVectorInst>(S->first) &&
+ !isa<InsertElementInst>(S->first) &&
+ !isa<ExtractElementInst>(S->first))
+ HasNontrivialInsts = true;
+
+ bool FlipOrder = false;
+
+ if (getDepthFactor(S->first)) {
+ int ESContrib = CandidatePairCostSavings.find(*S)->second;
+ DEBUG(if (DebugPairSelection) dbgs() << "\tweight {"
+ << *S->first << " <-> " << *S->second << "} = " <<
+ ESContrib << "\n");
+ EffSize += ESContrib;
+ }
+
+ // The edge weights contribute in a negative sense: they represent
+ // the cost of shuffles.
+ VPPIteratorPair IP = ConnectedPairDeps.equal_range(*S);
+ if (IP.first != ConnectedPairDeps.end()) {
+ unsigned NumDepsDirect = 0, NumDepsSwap = 0;
+ for (std::multimap<ValuePair, ValuePair>::iterator Q = IP.first;
+ Q != IP.second; ++Q) {
+ if (!PrunedTree.count(Q->second))
+ continue;
+ DenseMap<VPPair, unsigned>::iterator R =
+ PairConnectionTypes.find(VPPair(Q->second, Q->first));
+ assert(R != PairConnectionTypes.end() &&
+ "Cannot find pair connection type");
+ if (R->second == PairConnectionDirect)
+ ++NumDepsDirect;
+ else if (R->second == PairConnectionSwap)
+ ++NumDepsSwap;
+ }
+
+ // If there are more swaps than direct connections, then
+ // the pair order will be flipped during fusion. So the real
+ // number of swaps is the minimum number.
+ FlipOrder = !FixedOrderPairs.count(*S) &&
+ ((NumDepsSwap > NumDepsDirect) ||
+ FixedOrderPairs.count(ValuePair(S->second, S->first)));
+
+ for (std::multimap<ValuePair, ValuePair>::iterator Q = IP.first;
+ Q != IP.second; ++Q) {
+ if (!PrunedTree.count(Q->second))
+ continue;
+ DenseMap<VPPair, unsigned>::iterator R =
+ PairConnectionTypes.find(VPPair(Q->second, Q->first));
+ assert(R != PairConnectionTypes.end() &&
+ "Cannot find pair connection type");
+ Type *Ty1 = Q->second.first->getType(),
+ *Ty2 = Q->second.second->getType();
+ Type *VTy = getVecTypeForPair(Ty1, Ty2);
+ if ((R->second == PairConnectionDirect && FlipOrder) ||
+ (R->second == PairConnectionSwap && !FlipOrder) ||
+ R->second == PairConnectionSplat) {
+ int ESContrib = (int) getInstrCost(Instruction::ShuffleVector,
+ VTy, VTy);
+ DEBUG(if (DebugPairSelection) dbgs() << "\tcost {" <<
+ *Q->second.first << " <-> " << *Q->second.second <<
+ "} -> {" <<
+ *S->first << " <-> " << *S->second << "} = " <<
+ ESContrib << "\n");
+ EffSize -= ESContrib;
+ }
+ }
+ }
+
+ // Compute the cost of outgoing edges. We assume that edges outgoing
+ // to shuffles, inserts or extracts can be merged, and so contribute
+ // no additional cost.
+ if (!S->first->getType()->isVoidTy()) {
+ Type *Ty1 = S->first->getType(),
+ *Ty2 = S->second->getType();
+ Type *VTy = getVecTypeForPair(Ty1, Ty2);
+
+ bool NeedsExtraction = false;
+ for (Value::use_iterator I = S->first->use_begin(),
+ IE = S->first->use_end(); I != IE; ++I) {
+ if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(*I)) {
+ // Shuffle can be folded if it has no other input
+ if (isa<UndefValue>(SI->getOperand(1)))
+ continue;
+ }
+ if (isa<ExtractElementInst>(*I))
+ continue;
+ if (PrunedTreeInstrs.count(*I))
+ continue;
+ NeedsExtraction = true;
+ break;
+ }
+
+ if (NeedsExtraction) {
+ int ESContrib;
+ if (Ty1->isVectorTy())
+ ESContrib = (int) getInstrCost(Instruction::ShuffleVector,
+ Ty1, VTy);
+ else
+ ESContrib = (int) VTTI->getVectorInstrCost(
+ Instruction::ExtractElement, VTy, 0);
+
+ DEBUG(if (DebugPairSelection) dbgs() << "\tcost {" <<
+ *S->first << "} = " << ESContrib << "\n");
+ EffSize -= ESContrib;
+ }
+
+ NeedsExtraction = false;
+ for (Value::use_iterator I = S->second->use_begin(),
+ IE = S->second->use_end(); I != IE; ++I) {
+ if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(*I)) {
+ // Shuffle can be folded if it has no other input
+ if (isa<UndefValue>(SI->getOperand(1)))
+ continue;
+ }
+ if (isa<ExtractElementInst>(*I))
+ continue;
+ if (PrunedTreeInstrs.count(*I))
+ continue;
+ NeedsExtraction = true;
+ break;
+ }
+
+ if (NeedsExtraction) {
+ int ESContrib;
+ if (Ty2->isVectorTy())
+ ESContrib = (int) getInstrCost(Instruction::ShuffleVector,
+ Ty2, VTy);
+ else
+ ESContrib = (int) VTTI->getVectorInstrCost(
+ Instruction::ExtractElement, VTy, 1);
+ DEBUG(if (DebugPairSelection) dbgs() << "\tcost {" <<
+ *S->second << "} = " << ESContrib << "\n");
+ EffSize -= ESContrib;
+ }
+ }
+
+ // Compute the cost of incoming edges.
+ if (!isa<LoadInst>(S->first) && !isa<StoreInst>(S->first)) {
+ Instruction *S1 = cast<Instruction>(S->first),
+ *S2 = cast<Instruction>(S->second);
+ for (unsigned o = 0; o < S1->getNumOperands(); ++o) {
+ Value *O1 = S1->getOperand(o), *O2 = S2->getOperand(o);
+
+ // Combining constants into vector constants (or small vector
+ // constants into larger ones are assumed free).
+ if (isa<Constant>(O1) && isa<Constant>(O2))
+ continue;
+
+ if (FlipOrder)
+ std::swap(O1, O2);
+
+ ValuePair VP = ValuePair(O1, O2);
+ ValuePair VPR = ValuePair(O2, O1);
+
+ // Internal edges are not handled here.
+ if (PrunedTree.count(VP) || PrunedTree.count(VPR))
+ continue;
+
+ Type *Ty1 = O1->getType(),
+ *Ty2 = O2->getType();
+ Type *VTy = getVecTypeForPair(Ty1, Ty2);
+
+ // Combining vector operations of the same type is also assumed
+ // folded with other operations.
+ if (Ty1 == Ty2) {
+ // If both are insert elements, then both can be widened.
+ InsertElementInst *IEO1 = dyn_cast<InsertElementInst>(O1),
+ *IEO2 = dyn_cast<InsertElementInst>(O2);
+ if (IEO1 && IEO2 && isPureIEChain(IEO1) && isPureIEChain(IEO2))
+ continue;
+ // If both are extract elements, and both have the same input
+ // type, then they can be replaced with a shuffle
+ ExtractElementInst *EIO1 = dyn_cast<ExtractElementInst>(O1),
+ *EIO2 = dyn_cast<ExtractElementInst>(O2);
+ if (EIO1 && EIO2 &&
+ EIO1->getOperand(0)->getType() ==
+ EIO2->getOperand(0)->getType())
+ continue;
+ // If both are a shuffle with equal operand types and only two
+ // unqiue operands, then they can be replaced with a single
+ // shuffle
+ ShuffleVectorInst *SIO1 = dyn_cast<ShuffleVectorInst>(O1),
+ *SIO2 = dyn_cast<ShuffleVectorInst>(O2);
+ if (SIO1 && SIO2 &&
+ SIO1->getOperand(0)->getType() ==
+ SIO2->getOperand(0)->getType()) {
+ SmallSet<Value *, 4> SIOps;
+ SIOps.insert(SIO1->getOperand(0));
+ SIOps.insert(SIO1->getOperand(1));
+ SIOps.insert(SIO2->getOperand(0));
+ SIOps.insert(SIO2->getOperand(1));
+ if (SIOps.size() <= 2)
+ continue;
+ }
+ }
+
+ int ESContrib;
+ // This pair has already been formed.
+ if (IncomingPairs.count(VP)) {
+ continue;
+ } else if (IncomingPairs.count(VPR)) {
+ ESContrib = (int) getInstrCost(Instruction::ShuffleVector,
+ VTy, VTy);
+ } else if (!Ty1->isVectorTy() && !Ty2->isVectorTy()) {
+ ESContrib = (int) VTTI->getVectorInstrCost(
+ Instruction::InsertElement, VTy, 0);
+ ESContrib += (int) VTTI->getVectorInstrCost(
+ Instruction::InsertElement, VTy, 1);
+ } else if (!Ty1->isVectorTy()) {
+ // O1 needs to be inserted into a vector of size O2, and then
+ // both need to be shuffled together.
+ ESContrib = (int) VTTI->getVectorInstrCost(
+ Instruction::InsertElement, Ty2, 0);
+ ESContrib += (int) getInstrCost(Instruction::ShuffleVector,
+ VTy, Ty2);
+ } else if (!Ty2->isVectorTy()) {
+ // O2 needs to be inserted into a vector of size O1, and then
+ // both need to be shuffled together.
+ ESContrib = (int) VTTI->getVectorInstrCost(
+ Instruction::InsertElement, Ty1, 0);
+ ESContrib += (int) getInstrCost(Instruction::ShuffleVector,
+ VTy, Ty1);
+ } else {
+ Type *TyBig = Ty1, *TySmall = Ty2;
+ if (Ty2->getVectorNumElements() > Ty1->getVectorNumElements())
+ std::swap(TyBig, TySmall);
+
+ ESContrib = (int) getInstrCost(Instruction::ShuffleVector,
+ VTy, TyBig);
+ if (TyBig != TySmall)
+ ESContrib += (int) getInstrCost(Instruction::ShuffleVector,
+ TyBig, TySmall);
+ }
+
+ DEBUG(if (DebugPairSelection) dbgs() << "\tcost {"
+ << *O1 << " <-> " << *O2 << "} = " <<
+ ESContrib << "\n");
+ EffSize -= ESContrib;
+ IncomingPairs.insert(VP);
+ }
+ }
+ }
+
+ if (!HasNontrivialInsts) {
+ DEBUG(if (DebugPairSelection) dbgs() <<
+ "\tNo non-trivial instructions in tree;"
+ " override to zero effective size\n");
+ EffSize = 0;
+ }
+ } else {
+ for (DenseSet<ValuePair>::iterator S = PrunedTree.begin(),
+ E = PrunedTree.end(); S != E; ++S)
+ EffSize += (int) getDepthFactor(S->first);
+ }
DEBUG(if (DebugPairSelection)
dbgs() << "BBV: found pruned Tree for pair {"
<< *J->first << " <-> " << *J->second << "} of depth " <<
MaxDepth << " and size " << PrunedTree.size() <<
" (effective size: " << EffSize << ")\n");
- if (MaxDepth >= Config.ReqChainDepth && EffSize > BestEffSize) {
+ if (((VTTI && !UseChainDepthWithTI) ||
+ MaxDepth >= Config.ReqChainDepth) &&
+ EffSize > 0 && EffSize > BestEffSize) {
BestMaxDepth = MaxDepth;
BestEffSize = EffSize;
BestTree = PrunedTree;
@@ -1419,8 +1984,12 @@ namespace {
// that will be fused into vector instructions.
void BBVectorize::choosePairs(
std::multimap<Value *, Value *> &CandidatePairs,
+ DenseMap<ValuePair, int> &CandidatePairCostSavings,
std::vector<Value *> &PairableInsts,
+ DenseSet<ValuePair> &FixedOrderPairs,
+ DenseMap<VPPair, unsigned> &PairConnectionTypes,
std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairDeps,
DenseSet<ValuePair> &PairableInstUsers,
DenseMap<Value *, Value *>& ChosenPairs) {
bool UseCycleCheck =
@@ -1435,9 +2004,12 @@ namespace {
VPIteratorPair ChoiceRange = CandidatePairs.equal_range(*I);
// The best pair to choose and its tree:
- size_t BestMaxDepth = 0, BestEffSize = 0;
+ size_t BestMaxDepth = 0;
+ int BestEffSize = 0;
DenseSet<ValuePair> BestTree;
- findBestTreeFor(CandidatePairs, PairableInsts, ConnectedPairs,
+ findBestTreeFor(CandidatePairs, CandidatePairCostSavings,
+ PairableInsts, FixedOrderPairs, PairConnectionTypes,
+ ConnectedPairs, ConnectedPairDeps,
PairableInstUsers, PairableInstUserMap, ChosenPairs,
BestTree, BestMaxDepth, BestEffSize, ChoiceRange,
UseCycleCheck);
@@ -1490,24 +2062,19 @@ namespace {
// Returns the value that is to be used as the pointer input to the vector
// instruction that fuses I with J.
Value *BBVectorize::getReplacementPointerInput(LLVMContext& Context,
- Instruction *I, Instruction *J, unsigned o,
- bool FlipMemInputs) {
+ Instruction *I, Instruction *J, unsigned o) {
Value *IPtr, *JPtr;
- unsigned IAlignment, JAlignment;
+ unsigned IAlignment, JAlignment, IAddressSpace, JAddressSpace;
int64_t OffsetInElmts;
- // Note: the analysis might fail here, that is why FlipMemInputs has
+ // Note: the analysis might fail here, that is why the pair order has
// been precomputed (OffsetInElmts must be unused here).
(void) getPairPtrInfo(I, J, IPtr, JPtr, IAlignment, JAlignment,
- OffsetInElmts);
+ IAddressSpace, JAddressSpace,
+ OffsetInElmts, false);
// The pointer value is taken to be the one with the lowest offset.
- Value *VPtr;
- if (!FlipMemInputs) {
- VPtr = IPtr;
- } else {
- VPtr = JPtr;
- }
+ Value *VPtr = IPtr;
Type *ArgTypeI = cast<PointerType>(IPtr->getType())->getElementType();
Type *ArgTypeJ = cast<PointerType>(JPtr->getType())->getElementType();
@@ -1515,7 +2082,7 @@ namespace {
Type *VArgPtrType = PointerType::get(VArgType,
cast<PointerType>(IPtr->getType())->getAddressSpace());
return new BitCastInst(VPtr, VArgPtrType, getReplacementName(I, true, o),
- /* insert before */ FlipMemInputs ? J : I);
+ /* insert before */ I);
}
void BBVectorize::fillNewShuffleMask(LLVMContext& Context, Instruction *J,
@@ -1585,23 +2152,12 @@ namespace {
Instruction *J, unsigned o, Value *&LOp,
unsigned numElemL,
Type *ArgTypeL, Type *ArgTypeH,
- unsigned IdxOff) {
+ bool IBeforeJ, unsigned IdxOff) {
bool ExpandedIEChain = false;
if (InsertElementInst *LIE = dyn_cast<InsertElementInst>(LOp)) {
// If we have a pure insertelement chain, then this can be rewritten
// into a chain that directly builds the larger type.
- bool PureChain = true;
- InsertElementInst *LIENext = LIE;
- do {
- if (!isa<UndefValue>(LIENext->getOperand(0)) &&
- !isa<InsertElementInst>(LIENext->getOperand(0))) {
- PureChain = false;
- break;
- }
- } while ((LIENext =
- dyn_cast<InsertElementInst>(LIENext->getOperand(0))));
-
- if (PureChain) {
+ if (isPureIEChain(LIE)) {
SmallVector<Value *, 8> VectElemts(numElemL,
UndefValue::get(ArgTypeL->getScalarType()));
InsertElementInst *LIENext = LIE;
@@ -1619,8 +2175,9 @@ namespace {
LIENext = InsertElementInst::Create(LIEPrev, VectElemts[i],
ConstantInt::get(Type::getInt32Ty(Context),
i + IdxOff),
- getReplacementName(I, true, o, i+1));
- LIENext->insertBefore(J);
+ getReplacementName(IBeforeJ ? I : J,
+ true, o, i+1));
+ LIENext->insertBefore(IBeforeJ ? J : I);
LIEPrev = LIENext;
}
@@ -1635,7 +2192,7 @@ namespace {
// Returns the value to be used as the specified operand of the vector
// instruction that fuses I with J.
Value *BBVectorize::getReplacementInput(LLVMContext& Context, Instruction *I,
- Instruction *J, unsigned o, bool FlipMemInputs) {
+ Instruction *J, unsigned o, bool IBeforeJ) {
Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0);
Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), 1);
@@ -1646,12 +2203,6 @@ namespace {
Instruction *L = I, *H = J;
Type *ArgTypeL = ArgTypeI, *ArgTypeH = ArgTypeJ;
- if (FlipMemInputs) {
- L = J;
- H = I;
- ArgTypeL = ArgTypeJ;
- ArgTypeH = ArgTypeI;
- }
unsigned numElemL;
if (ArgTypeL->isVectorTy())
@@ -1804,8 +2355,9 @@ namespace {
Instruction *S =
new ShuffleVectorInst(I1, UndefValue::get(I1T),
ConstantVector::get(Mask),
- getReplacementName(I, true, o));
- S->insertBefore(J);
+ getReplacementName(IBeforeJ ? I : J,
+ true, o));
+ S->insertBefore(IBeforeJ ? J : I);
return S;
}
@@ -1826,8 +2378,9 @@ namespace {
Instruction *NewI1 =
new ShuffleVectorInst(I1, UndefValue::get(I1T),
ConstantVector::get(Mask),
- getReplacementName(I, true, o, 1));
- NewI1->insertBefore(J);
+ getReplacementName(IBeforeJ ? I : J,
+ true, o, 1));
+ NewI1->insertBefore(IBeforeJ ? J : I);
I1 = NewI1;
I1T = I2T;
I1Elem = I2Elem;
@@ -1842,8 +2395,9 @@ namespace {
Instruction *NewI2 =
new ShuffleVectorInst(I2, UndefValue::get(I2T),
ConstantVector::get(Mask),
- getReplacementName(I, true, o, 1));
- NewI2->insertBefore(J);
+ getReplacementName(IBeforeJ ? I : J,
+ true, o, 1));
+ NewI2->insertBefore(IBeforeJ ? J : I);
I2 = NewI2;
I2T = I1T;
I2Elem = I1Elem;
@@ -1863,8 +2417,8 @@ namespace {
Instruction *NewOp =
new ShuffleVectorInst(I1, I2, ConstantVector::get(Mask),
- getReplacementName(I, true, o));
- NewOp->insertBefore(J);
+ getReplacementName(IBeforeJ ? I : J, true, o));
+ NewOp->insertBefore(IBeforeJ ? J : I);
return NewOp;
}
}
@@ -1872,17 +2426,17 @@ namespace {
Type *ArgType = ArgTypeL;
if (numElemL < numElemH) {
if (numElemL == 1 && expandIEChain(Context, I, J, o, HOp, numElemH,
- ArgTypeL, VArgType, 1)) {
+ ArgTypeL, VArgType, IBeforeJ, 1)) {
// This is another short-circuit case: we're combining a scalar into
// a vector that is formed by an IE chain. We've just expanded the IE
// chain, now insert the scalar and we're done.
Instruction *S = InsertElementInst::Create(HOp, LOp, CV0,
- getReplacementName(I, true, o));
- S->insertBefore(J);
+ getReplacementName(IBeforeJ ? I : J, true, o));
+ S->insertBefore(IBeforeJ ? J : I);
return S;
} else if (!expandIEChain(Context, I, J, o, LOp, numElemL, ArgTypeL,
- ArgTypeH)) {
+ ArgTypeH, IBeforeJ)) {
// The two vector inputs to the shuffle must be the same length,
// so extend the smaller vector to be the same length as the larger one.
Instruction *NLOp;
@@ -1897,29 +2451,32 @@ namespace {
NLOp = new ShuffleVectorInst(LOp, UndefValue::get(ArgTypeL),
ConstantVector::get(Mask),
- getReplacementName(I, true, o, 1));
+ getReplacementName(IBeforeJ ? I : J,
+ true, o, 1));
} else {
NLOp = InsertElementInst::Create(UndefValue::get(ArgTypeH), LOp, CV0,
- getReplacementName(I, true, o, 1));
+ getReplacementName(IBeforeJ ? I : J,
+ true, o, 1));
}
- NLOp->insertBefore(J);
+ NLOp->insertBefore(IBeforeJ ? J : I);
LOp = NLOp;
}
ArgType = ArgTypeH;
} else if (numElemL > numElemH) {
if (numElemH == 1 && expandIEChain(Context, I, J, o, LOp, numElemL,
- ArgTypeH, VArgType)) {
+ ArgTypeH, VArgType, IBeforeJ)) {
Instruction *S =
InsertElementInst::Create(LOp, HOp,
ConstantInt::get(Type::getInt32Ty(Context),
numElemL),
- getReplacementName(I, true, o));
- S->insertBefore(J);
+ getReplacementName(IBeforeJ ? I : J,
+ true, o));
+ S->insertBefore(IBeforeJ ? J : I);
return S;
} else if (!expandIEChain(Context, I, J, o, HOp, numElemH, ArgTypeH,
- ArgTypeL)) {
+ ArgTypeL, IBeforeJ)) {
Instruction *NHOp;
if (numElemH > 1) {
std::vector<Constant *> Mask(numElemL);
@@ -1931,13 +2488,15 @@ namespace {
NHOp = new ShuffleVectorInst(HOp, UndefValue::get(ArgTypeH),
ConstantVector::get(Mask),
- getReplacementName(I, true, o, 1));
+ getReplacementName(IBeforeJ ? I : J,
+ true, o, 1));
} else {
NHOp = InsertElementInst::Create(UndefValue::get(ArgTypeL), HOp, CV0,
- getReplacementName(I, true, o, 1));
+ getReplacementName(IBeforeJ ? I : J,
+ true, o, 1));
}
- NHOp->insertBefore(J);
+ NHOp->insertBefore(IBeforeJ ? J : I);
HOp = NHOp;
}
}
@@ -1955,19 +2514,21 @@ namespace {
}
Instruction *BV = new ShuffleVectorInst(LOp, HOp,
- ConstantVector::get(Mask),
- getReplacementName(I, true, o));
- BV->insertBefore(J);
+ ConstantVector::get(Mask),
+ getReplacementName(IBeforeJ ? I : J, true, o));
+ BV->insertBefore(IBeforeJ ? J : I);
return BV;
}
Instruction *BV1 = InsertElementInst::Create(
UndefValue::get(VArgType), LOp, CV0,
- getReplacementName(I, true, o, 1));
- BV1->insertBefore(I);
+ getReplacementName(IBeforeJ ? I : J,
+ true, o, 1));
+ BV1->insertBefore(IBeforeJ ? J : I);
Instruction *BV2 = InsertElementInst::Create(BV1, HOp, CV1,
- getReplacementName(I, true, o, 2));
- BV2->insertBefore(J);
+ getReplacementName(IBeforeJ ? I : J,
+ true, o, 2));
+ BV2->insertBefore(IBeforeJ ? J : I);
return BV2;
}
@@ -1976,7 +2537,7 @@ namespace {
void BBVectorize::getReplacementInputsForPair(LLVMContext& Context,
Instruction *I, Instruction *J,
SmallVector<Value *, 3> &ReplacedOperands,
- bool FlipMemInputs) {
+ bool IBeforeJ) {
unsigned NumOperands = I->getNumOperands();
for (unsigned p = 0, o = NumOperands-1; p < NumOperands; ++p, --o) {
@@ -1985,8 +2546,7 @@ namespace {
if (isa<LoadInst>(I) || (o == 1 && isa<StoreInst>(I))) {
// This is the pointer for a load/store instruction.
- ReplacedOperands[o] = getReplacementPointerInput(Context, I, J, o,
- FlipMemInputs);
+ ReplacedOperands[o] = getReplacementPointerInput(Context, I, J, o);
continue;
} else if (isa<CallInst>(I)) {
Function *F = cast<CallInst>(I)->getCalledFunction();
@@ -2014,8 +2574,7 @@ namespace {
continue;
}
- ReplacedOperands[o] =
- getReplacementInput(Context, I, J, o, FlipMemInputs);
+ ReplacedOperands[o] = getReplacementInput(Context, I, J, o, IBeforeJ);
}
}
@@ -2026,8 +2585,7 @@ namespace {
void BBVectorize::replaceOutputsOfPair(LLVMContext& Context, Instruction *I,
Instruction *J, Instruction *K,
Instruction *&InsertionPt,
- Instruction *&K1, Instruction *&K2,
- bool FlipMemInputs) {
+ Instruction *&K1, Instruction *&K2) {
if (isa<StoreInst>(I)) {
AA->replaceWithNewValue(I, K);
AA->replaceWithNewValue(J, K);
@@ -2057,13 +2615,11 @@ namespace {
}
K1 = new ShuffleVectorInst(K, UndefValue::get(VType),
- ConstantVector::get(
- FlipMemInputs ? Mask2 : Mask1),
+ ConstantVector::get( Mask1),
getReplacementName(K, false, 1));
} else {
Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0);
- Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), numElem-1);
- K1 = ExtractElementInst::Create(K, FlipMemInputs ? CV1 : CV0,
+ K1 = ExtractElementInst::Create(K, CV0,
getReplacementName(K, false, 1));
}
@@ -2075,13 +2631,11 @@ namespace {
}
K2 = new ShuffleVectorInst(K, UndefValue::get(VType),
- ConstantVector::get(
- FlipMemInputs ? Mask1 : Mask2),
+ ConstantVector::get( Mask2),
getReplacementName(K, false, 2));
} else {
- Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0);
Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), numElem-1);
- K2 = ExtractElementInst::Create(K, FlipMemInputs ? CV0 : CV1,
+ K2 = ExtractElementInst::Create(K, CV1,
getReplacementName(K, false, 2));
}
@@ -2181,36 +2735,6 @@ namespace {
}
}
- // As with the aliasing information, SCEV can also change because of
- // vectorization. This information is used to compute relative pointer
- // offsets; the necessary information will be cached here prior to
- // fusion.
- void BBVectorize::collectPtrInfo(std::vector<Value *> &PairableInsts,
- DenseMap<Value *, Value *> &ChosenPairs,
- DenseSet<Value *> &LowPtrInsts) {
- for (std::vector<Value *>::iterator PI = PairableInsts.begin(),
- PIE = PairableInsts.end(); PI != PIE; ++PI) {
- DenseMap<Value *, Value *>::iterator P = ChosenPairs.find(*PI);
- if (P == ChosenPairs.end()) continue;
-
- Instruction *I = cast<Instruction>(P->first);
- Instruction *J = cast<Instruction>(P->second);
-
- if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
- continue;
-
- Value *IPtr, *JPtr;
- unsigned IAlignment, JAlignment;
- int64_t OffsetInElmts;
- if (!getPairPtrInfo(I, J, IPtr, JPtr, IAlignment, JAlignment,
- OffsetInElmts) || abs64(OffsetInElmts) != 1)
- llvm_unreachable("Pre-fusion pointer analysis failed");
-
- Value *LowPI = (OffsetInElmts > 0) ? I : J;
- LowPtrInsts.insert(LowPI);
- }
- }
-
// When the first instruction in each pair is cloned, it will inherit its
// parent's metadata. This metadata must be combined with that of the other
// instruction in a safe way.
@@ -2244,27 +2768,27 @@ namespace {
// second member).
void BBVectorize::fuseChosenPairs(BasicBlock &BB,
std::vector<Value *> &PairableInsts,
- DenseMap<Value *, Value *> &ChosenPairs) {
+ DenseMap<Value *, Value *> &ChosenPairs,
+ DenseSet<ValuePair> &FixedOrderPairs,
+ DenseMap<VPPair, unsigned> &PairConnectionTypes,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairDeps) {
LLVMContext& Context = BB.getContext();
// During the vectorization process, the order of the pairs to be fused
// could be flipped. So we'll add each pair, flipped, into the ChosenPairs
// list. After a pair is fused, the flipped pair is removed from the list.
- std::vector<ValuePair> FlippedPairs;
- FlippedPairs.reserve(ChosenPairs.size());
+ DenseSet<ValuePair> FlippedPairs;
for (DenseMap<Value *, Value *>::iterator P = ChosenPairs.begin(),
E = ChosenPairs.end(); P != E; ++P)
- FlippedPairs.push_back(ValuePair(P->second, P->first));
- for (std::vector<ValuePair>::iterator P = FlippedPairs.begin(),
+ FlippedPairs.insert(ValuePair(P->second, P->first));
+ for (DenseSet<ValuePair>::iterator P = FlippedPairs.begin(),
E = FlippedPairs.end(); P != E; ++P)
ChosenPairs.insert(*P);
std::multimap<Value *, Value *> LoadMoveSet;
collectLoadMoveSet(BB, PairableInsts, ChosenPairs, LoadMoveSet);
- DenseSet<Value *> LowPtrInsts;
- collectPtrInfo(PairableInsts, ChosenPairs, LowPtrInsts);
-
DEBUG(dbgs() << "BBV: initial: \n" << BB << "\n");
for (BasicBlock::iterator PI = BB.getFirstInsertionPt(); PI != BB.end();) {
@@ -2304,44 +2828,92 @@ namespace {
continue;
}
- bool FlipMemInputs = false;
- if (isa<LoadInst>(I) || isa<StoreInst>(I))
- FlipMemInputs = (LowPtrInsts.find(I) == LowPtrInsts.end());
+ // If the pair must have the other order, then flip it.
+ bool FlipPairOrder = FixedOrderPairs.count(ValuePair(J, I));
+ if (!FlipPairOrder && !FixedOrderPairs.count(ValuePair(I, J))) {
+ // This pair does not have a fixed order, and so we might want to
+ // flip it if that will yield fewer shuffles. We count the number
+ // of dependencies connected via swaps, and those directly connected,
+ // and flip the order if the number of swaps is greater.
+ bool OrigOrder = true;
+ VPPIteratorPair IP = ConnectedPairDeps.equal_range(ValuePair(I, J));
+ if (IP.first == ConnectedPairDeps.end()) {
+ IP = ConnectedPairDeps.equal_range(ValuePair(J, I));
+ OrigOrder = false;
+ }
+ if (IP.first != ConnectedPairDeps.end()) {
+ unsigned NumDepsDirect = 0, NumDepsSwap = 0;
+ for (std::multimap<ValuePair, ValuePair>::iterator Q = IP.first;
+ Q != IP.second; ++Q) {
+ DenseMap<VPPair, unsigned>::iterator R =
+ PairConnectionTypes.find(VPPair(Q->second, Q->first));
+ assert(R != PairConnectionTypes.end() &&
+ "Cannot find pair connection type");
+ if (R->second == PairConnectionDirect)
+ ++NumDepsDirect;
+ else if (R->second == PairConnectionSwap)
+ ++NumDepsSwap;
+ }
+
+ if (!OrigOrder)
+ std::swap(NumDepsDirect, NumDepsSwap);
+
+ if (NumDepsSwap > NumDepsDirect) {
+ FlipPairOrder = true;
+ DEBUG(dbgs() << "BBV: reordering pair: " << *I <<
+ " <-> " << *J << "\n");
+ }
+ }
+ }
+
+ Instruction *L = I, *H = J;
+ if (FlipPairOrder)
+ std::swap(H, L);
+
+ // If the pair being fused uses the opposite order from that in the pair
+ // connection map, then we need to flip the types.
+ VPPIteratorPair IP = ConnectedPairs.equal_range(ValuePair(H, L));
+ for (std::multimap<ValuePair, ValuePair>::iterator Q = IP.first;
+ Q != IP.second; ++Q) {
+ DenseMap<VPPair, unsigned>::iterator R = PairConnectionTypes.find(*Q);
+ assert(R != PairConnectionTypes.end() &&
+ "Cannot find pair connection type");
+ if (R->second == PairConnectionDirect)
+ R->second = PairConnectionSwap;
+ else if (R->second == PairConnectionSwap)
+ R->second = PairConnectionDirect;
+ }
+
+ bool LBeforeH = !FlipPairOrder;
unsigned NumOperands = I->getNumOperands();
SmallVector<Value *, 3> ReplacedOperands(NumOperands);
- getReplacementInputsForPair(Context, I, J, ReplacedOperands,
- FlipMemInputs);
+ getReplacementInputsForPair(Context, L, H, ReplacedOperands,
+ LBeforeH);
// Make a copy of the original operation, change its type to the vector
// type and replace its operands with the vector operands.
- Instruction *K = I->clone();
- if (I->hasName()) K->takeName(I);
+ Instruction *K = L->clone();
+ if (L->hasName())
+ K->takeName(L);
+ else if (H->hasName())
+ K->takeName(H);
if (!isa<StoreInst>(K))
- K->mutateType(getVecTypeForPair(I->getType(), J->getType()));
+ K->mutateType(getVecTypeForPair(L->getType(), H->getType()));
- combineMetadata(K, J);
+ combineMetadata(K, H);
+ K->intersectOptionalDataWith(H);
for (unsigned o = 0; o < NumOperands; ++o)
K->setOperand(o, ReplacedOperands[o]);
- // If we've flipped the memory inputs, make sure that we take the correct
- // alignment.
- if (FlipMemInputs) {
- if (isa<StoreInst>(K))
- cast<StoreInst>(K)->setAlignment(cast<StoreInst>(J)->getAlignment());
- else
- cast<LoadInst>(K)->setAlignment(cast<LoadInst>(J)->getAlignment());
- }
-
K->insertAfter(J);
// Instruction insertion point:
Instruction *InsertionPt = K;
Instruction *K1 = 0, *K2 = 0;
- replaceOutputsOfPair(Context, I, J, K, InsertionPt, K1, K2,
- FlipMemInputs);
+ replaceOutputsOfPair(Context, L, H, K, InsertionPt, K1, K2);
// The use tree of the first original instruction must be moved to after
// the location of the second instruction. The entire use tree of the
@@ -2351,10 +2923,10 @@ namespace {
moveUsesOfIAfterJ(BB, LoadMoveSet, InsertionPt, I, J);
if (!isa<StoreInst>(I)) {
- I->replaceAllUsesWith(K1);
- J->replaceAllUsesWith(K2);
- AA->replaceWithNewValue(I, K1);
- AA->replaceWithNewValue(J, K2);
+ L->replaceAllUsesWith(K1);
+ H->replaceAllUsesWith(K2);
+ AA->replaceWithNewValue(L, K1);
+ AA->replaceWithNewValue(H, K2);
}
// Instructions that may read from memory may be in the load move set.
@@ -2387,6 +2959,9 @@ namespace {
SE->forgetValue(J);
I->eraseFromParent();
J->eraseFromParent();
+
+ DEBUG(if (PrintAfterEveryPair) dbgs() << "BBV: block is now: \n" <<
+ BB << "\n");
}
DEBUG(dbgs() << "BBV: final: \n" << BB << "\n");
@@ -2397,6 +2972,7 @@ char BBVectorize::ID = 0;
static const char bb_vectorize_name[] = "Basic-Block Vectorization";
INITIALIZE_PASS_BEGIN(BBVectorize, BBV_NAME, bb_vectorize_name, false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_END(BBVectorize, BBV_NAME, bb_vectorize_name, false, false)
diff --git a/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
new file mode 100644
index 0000000..a7ef248
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -0,0 +1,1941 @@
+//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
+// and generates target-independent LLVM-IR. Legalization of the IR is done
+// in the codegen. However, the vectorizes uses (will use) the codegen
+// interfaces to generate IR that is likely to result in an optimal binary.
+//
+// The loop vectorizer combines consecutive loop iteration into a single
+// 'wide' iteration. After this transformation the index is incremented
+// by the SIMD vector width, and not by one.
+//
+// This pass has three parts:
+// 1. The main loop pass that drives the different parts.
+// 2. LoopVectorizationLegality - A unit that checks for the legality
+// of the vectorization.
+// 3. SingleBlockLoopVectorizer - A unit that performs the actual
+// widening of instructions.
+// 4. LoopVectorizationCostModel - A unit that checks for the profitability
+// of vectorization. It decides on the optimal vector width, which
+// can be one, if vectorization is not profitable.
+//===----------------------------------------------------------------------===//
+//
+// The reduction-variable vectorization is based on the paper:
+// D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
+//
+// Variable uniformity checks are inspired by:
+// Karrenberg, R. and Hack, S. Whole Function Vectorization.
+//
+// Other ideas/concepts are from:
+// A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
+//
+//===----------------------------------------------------------------------===//
+#define LV_NAME "loop-vectorize"
+#define DEBUG_TYPE LV_NAME
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Pass.h"
+#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Value.h"
+#include "llvm/Function.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Module.h"
+#include "llvm/Type.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AliasSetTracker.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ScalarEvolutionExpander.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/TargetTransformInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include <algorithm>
+using namespace llvm;
+
+static cl::opt<unsigned>
+VectorizationFactor("force-vector-width", cl::init(0), cl::Hidden,
+ cl::desc("Set the default vectorization width. Zero is autoselect."));
+
+/// We don't vectorize loops with a known constant trip count below this number.
+const unsigned TinyTripCountThreshold = 16;
+
+/// When performing a runtime memory check, do not check more than this
+/// number of pointers. Notice that the check is quadratic!
+const unsigned RuntimeMemoryCheckThreshold = 2;
+
+namespace {
+
+// Forward declarations.
+class LoopVectorizationLegality;
+class LoopVectorizationCostModel;
+
+/// SingleBlockLoopVectorizer vectorizes loops which contain only one basic
+/// block to a specified vectorization factor (VF).
+/// This class performs the widening of scalars into vectors, or multiple
+/// scalars. This class also implements the following features:
+/// * It inserts an epilogue loop for handling loops that don't have iteration
+/// counts that are known to be a multiple of the vectorization factor.
+/// * It handles the code generation for reduction variables.
+/// * Scalarization (implementation using scalars) of un-vectorizable
+/// instructions.
+/// SingleBlockLoopVectorizer does not perform any vectorization-legality
+/// checks, and relies on the caller to check for the different legality
+/// aspects. The SingleBlockLoopVectorizer relies on the
+/// LoopVectorizationLegality class to provide information about the induction
+/// and reduction variables that were found to a given vectorization factor.
+class SingleBlockLoopVectorizer {
+public:
+ /// Ctor.
+ SingleBlockLoopVectorizer(Loop *Orig, ScalarEvolution *Se, LoopInfo *Li,
+ DominatorTree *dt, LPPassManager *Lpm,
+ unsigned VecWidth):
+ OrigLoop(Orig), SE(Se), LI(Li), DT(dt), LPM(Lpm), VF(VecWidth),
+ Builder(Se->getContext()), Induction(0), OldInduction(0) { }
+
+ // Perform the actual loop widening (vectorization).
+ void vectorize(LoopVectorizationLegality *Legal) {
+ ///Create a new empty loop. Unlink the old loop and connect the new one.
+ createEmptyLoop(Legal);
+ /// Widen each instruction in the old loop to a new one in the new loop.
+ /// Use the Legality module to find the induction and reduction variables.
+ vectorizeLoop(Legal);
+ // Register the new loop and update the analysis passes.
+ updateAnalysis();
+ }
+
+private:
+ /// Create an empty loop, based on the loop ranges of the old loop.
+ void createEmptyLoop(LoopVectorizationLegality *Legal);
+ /// Copy and widen the instructions from the old loop.
+ void vectorizeLoop(LoopVectorizationLegality *Legal);
+ /// Insert the new loop to the loop hierarchy and pass manager
+ /// and update the analysis passes.
+ void updateAnalysis();
+
+ /// This instruction is un-vectorizable. Implement it as a sequence
+ /// of scalars.
+ void scalarizeInstruction(Instruction *Instr);
+
+ /// Create a broadcast instruction. This method generates a broadcast
+ /// instruction (shuffle) for loop invariant values and for the induction
+ /// value. If this is the induction variable then we extend it to N, N+1, ...
+ /// this is needed because each iteration in the loop corresponds to a SIMD
+ /// element.
+ Value *getBroadcastInstrs(Value *V);
+
+ /// This is a helper function used by getBroadcastInstrs. It adds 0, 1, 2 ..
+ /// for each element in the vector. Starting from zero.
+ Value *getConsecutiveVector(Value* Val);
+
+ /// When we go over instructions in the basic block we rely on previous
+ /// values within the current basic block or on loop invariant values.
+ /// When we widen (vectorize) values we place them in the map. If the values
+ /// are not within the map, they have to be loop invariant, so we simply
+ /// broadcast them into a vector.
+ Value *getVectorValue(Value *V);
+
+ /// Get a uniform vector of constant integers. We use this to get
+ /// vectors of ones and zeros for the reduction code.
+ Constant* getUniformVector(unsigned Val, Type* ScalarTy);
+
+ typedef DenseMap<Value*, Value*> ValueMap;
+
+ /// The original loop.
+ Loop *OrigLoop;
+ // Scev analysis to use.
+ ScalarEvolution *SE;
+ // Loop Info.
+ LoopInfo *LI;
+ // Dominator Tree.
+ DominatorTree *DT;
+ // Loop Pass Manager;
+ LPPassManager *LPM;
+ // The vectorization factor to use.
+ unsigned VF;
+
+ // The builder that we use
+ IRBuilder<> Builder;
+
+ // --- Vectorization state ---
+
+ /// The vector-loop preheader.
+ BasicBlock *LoopVectorPreHeader;
+ /// The scalar-loop preheader.
+ BasicBlock *LoopScalarPreHeader;
+ /// Middle Block between the vector and the scalar.
+ BasicBlock *LoopMiddleBlock;
+ ///The ExitBlock of the scalar loop.
+ BasicBlock *LoopExitBlock;
+ ///The vector loop body.
+ BasicBlock *LoopVectorBody;
+ ///The scalar loop body.
+ BasicBlock *LoopScalarBody;
+ ///The first bypass block.
+ BasicBlock *LoopBypassBlock;
+
+ /// The new Induction variable which was added to the new block.
+ PHINode *Induction;
+ /// The induction variable of the old basic block.
+ PHINode *OldInduction;
+ // Maps scalars to widened vectors.
+ ValueMap WidenMap;
+};
+
+/// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
+/// to what vectorization factor.
+/// This class does not look at the profitability of vectorization, only the
+/// legality. This class has two main kinds of checks:
+/// * Memory checks - The code in canVectorizeMemory checks if vectorization
+/// will change the order of memory accesses in a way that will change the
+/// correctness of the program.
+/// * Scalars checks - The code in canVectorizeBlock checks for a number
+/// of different conditions, such as the availability of a single induction
+/// variable, that all types are supported and vectorize-able, etc.
+/// This code reflects the capabilities of SingleBlockLoopVectorizer.
+/// This class is also used by SingleBlockLoopVectorizer for identifying
+/// induction variable and the different reduction variables.
+class LoopVectorizationLegality {
+public:
+ LoopVectorizationLegality(Loop *Lp, ScalarEvolution *Se, DataLayout *Dl):
+ TheLoop(Lp), SE(Se), DL(Dl), Induction(0) { }
+
+ /// This represents the kinds of reductions that we support.
+ enum ReductionKind {
+ NoReduction, /// Not a reduction.
+ IntegerAdd, /// Sum of numbers.
+ IntegerMult, /// Product of numbers.
+ IntegerOr, /// Bitwise or logical OR of numbers.
+ IntegerAnd, /// Bitwise or logical AND of numbers.
+ IntegerXor /// Bitwise or logical XOR of numbers.
+ };
+
+ /// This POD struct holds information about reduction variables.
+ struct ReductionDescriptor {
+ // Default C'tor
+ ReductionDescriptor():
+ StartValue(0), LoopExitInstr(0), Kind(NoReduction) {}
+
+ // C'tor.
+ ReductionDescriptor(Value *Start, Instruction *Exit, ReductionKind K):
+ StartValue(Start), LoopExitInstr(Exit), Kind(K) {}
+
+ // The starting value of the reduction.
+ // It does not have to be zero!
+ Value *StartValue;
+ // The instruction who's value is used outside the loop.
+ Instruction *LoopExitInstr;
+ // The kind of the reduction.
+ ReductionKind Kind;
+ };
+
+ // This POD struct holds information about the memory runtime legality
+ // check that a group of pointers do not overlap.
+ struct RuntimePointerCheck {
+ /// This flag indicates if we need to add the runtime check.
+ bool Need;
+ /// Holds the pointers that we need to check.
+ SmallVector<Value*, 2> Pointers;
+ };
+
+ /// ReductionList contains the reduction descriptors for all
+ /// of the reductions that were found in the loop.
+ typedef DenseMap<PHINode*, ReductionDescriptor> ReductionList;
+
+ /// Returns true if it is legal to vectorize this loop.
+ /// This does not mean that it is profitable to vectorize this
+ /// loop, only that it is legal to do so.
+ bool canVectorize();
+
+ /// Returns the Induction variable.
+ PHINode *getInduction() {return Induction;}
+
+ /// Returns the reduction variables found in the loop.
+ ReductionList *getReductionVars() { return &Reductions; }
+
+ /// Check if the pointer returned by this GEP is consecutive
+ /// when the index is vectorized. This happens when the last
+ /// index of the GEP is consecutive, like the induction variable.
+ /// This check allows us to vectorize A[idx] into a wide load/store.
+ bool isConsecutiveGep(Value *Ptr);
+
+ /// Returns true if the value V is uniform within the loop.
+ bool isUniform(Value *V);
+
+ /// Returns true if this instruction will remain scalar after vectorization.
+ bool isUniformAfterVectorization(Instruction* I) {return Uniforms.count(I);}
+
+ /// Returns the information that we collected about runtime memory check.
+ RuntimePointerCheck *getRuntimePointerCheck() {return &PtrRtCheck; }
+private:
+ /// Check if a single basic block loop is vectorizable.
+ /// At this point we know that this is a loop with a constant trip count
+ /// and we only need to check individual instructions.
+ bool canVectorizeBlock(BasicBlock &BB);
+
+ /// When we vectorize loops we may change the order in which
+ /// we read and write from memory. This method checks if it is
+ /// legal to vectorize the code, considering only memory constrains.
+ /// Returns true if BB is vectorizable
+ bool canVectorizeMemory(BasicBlock &BB);
+
+ /// Returns True, if 'Phi' is the kind of reduction variable for type
+ /// 'Kind'. If this is a reduction variable, it adds it to ReductionList.
+ bool AddReductionVar(PHINode *Phi, ReductionKind Kind);
+ /// Returns true if the instruction I can be a reduction variable of type
+ /// 'Kind'.
+ bool isReductionInstr(Instruction *I, ReductionKind Kind);
+ /// Returns True, if 'Phi' is an induction variable.
+ bool isInductionVariable(PHINode *Phi);
+ /// Return true if can compute the address bounds of Ptr within the loop.
+ bool hasComputableBounds(Value *Ptr);
+
+ /// The loop that we evaluate.
+ Loop *TheLoop;
+ /// Scev analysis.
+ ScalarEvolution *SE;
+ /// DataLayout analysis.
+ DataLayout *DL;
+
+ // --- vectorization state --- //
+
+ /// Holds the induction variable.
+ PHINode *Induction;
+ /// Holds the reduction variables.
+ ReductionList Reductions;
+ /// Allowed outside users. This holds the reduction
+ /// vars which can be accessed from outside the loop.
+ SmallPtrSet<Value*, 4> AllowedExit;
+ /// This set holds the variables which are known to be uniform after
+ /// vectorization.
+ SmallPtrSet<Instruction*, 4> Uniforms;
+ /// We need to check that all of the pointers in this list are disjoint
+ /// at runtime.
+ RuntimePointerCheck PtrRtCheck;
+};
+
+/// LoopVectorizationCostModel - estimates the expected speedups due to
+/// vectorization.
+/// In many cases vectorization is not profitable. This can happen because
+/// of a number of reasons. In this class we mainly attempt to predict
+/// the expected speedup/slowdowns due to the supported instruction set.
+/// We use the VectorTargetTransformInfo to query the different backends
+/// for the cost of different operations.
+class LoopVectorizationCostModel {
+public:
+ /// C'tor.
+ LoopVectorizationCostModel(Loop *Lp, ScalarEvolution *Se,
+ LoopVectorizationLegality *Leg,
+ const VectorTargetTransformInfo *Vtti):
+ TheLoop(Lp), SE(Se), Legal(Leg), VTTI(Vtti) { }
+
+ /// Returns the most profitable vectorization factor for the loop that is
+ /// smaller or equal to the VF argument. This method checks every power
+ /// of two up to VF.
+ unsigned findBestVectorizationFactor(unsigned VF = 8);
+
+private:
+ /// Returns the expected execution cost. The unit of the cost does
+ /// not matter because we use the 'cost' units to compare different
+ /// vector widths. The cost that is returned is *not* normalized by
+ /// the factor width.
+ unsigned expectedCost(unsigned VF);
+
+ /// Returns the execution time cost of an instruction for a given vector
+ /// width. Vector width of one means scalar.
+ unsigned getInstructionCost(Instruction *I, unsigned VF);
+
+ /// A helper function for converting Scalar types to vector types.
+ /// If the incoming type is void, we return void. If the VF is 1, we return
+ /// the scalar type.
+ static Type* ToVectorTy(Type *Scalar, unsigned VF);
+
+ /// The loop that we evaluate.
+ Loop *TheLoop;
+ /// Scev analysis.
+ ScalarEvolution *SE;
+
+ /// Vectorization legality.
+ LoopVectorizationLegality *Legal;
+ /// Vector target information.
+ const VectorTargetTransformInfo *VTTI;
+};
+
+struct LoopVectorize : public LoopPass {
+ static char ID; // Pass identification, replacement for typeid
+
+ LoopVectorize() : LoopPass(ID) {
+ initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
+ }
+
+ ScalarEvolution *SE;
+ DataLayout *DL;
+ LoopInfo *LI;
+ TargetTransformInfo *TTI;
+ DominatorTree *DT;
+
+ virtual bool runOnLoop(Loop *L, LPPassManager &LPM) {
+ // We only vectorize innermost loops.
+ if (!L->empty())
+ return false;
+
+ SE = &getAnalysis<ScalarEvolution>();
+ DL = getAnalysisIfAvailable<DataLayout>();
+ LI = &getAnalysis<LoopInfo>();
+ TTI = getAnalysisIfAvailable<TargetTransformInfo>();
+ DT = &getAnalysis<DominatorTree>();
+
+ DEBUG(dbgs() << "LV: Checking a loop in \"" <<
+ L->getHeader()->getParent()->getName() << "\"\n");
+
+ // Check if it is legal to vectorize the loop.
+ LoopVectorizationLegality LVL(L, SE, DL);
+ if (!LVL.canVectorize()) {
+ DEBUG(dbgs() << "LV: Not vectorizing.\n");
+ return false;
+ }
+
+ // Select the preffered vectorization factor.
+ unsigned VF = 1;
+ if (VectorizationFactor == 0) {
+ const VectorTargetTransformInfo *VTTI = 0;
+ if (TTI)
+ VTTI = TTI->getVectorTargetTransformInfo();
+ // Use the cost model.
+ LoopVectorizationCostModel CM(L, SE, &LVL, VTTI);
+ VF = CM.findBestVectorizationFactor();
+
+ if (VF == 1) {
+ DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
+ return false;
+ }
+
+ } else {
+ // Use the user command flag.
+ VF = VectorizationFactor;
+ }
+
+ DEBUG(dbgs() << "LV: Found a vectorizable loop ("<< VF << ") in "<<
+ L->getHeader()->getParent()->getParent()->getModuleIdentifier()<<
+ "\n");
+
+ // If we decided that it is *legal* to vectorizer the loop then do it.
+ SingleBlockLoopVectorizer LB(L, SE, LI, DT, &LPM, VF);
+ LB.vectorize(&LVL);
+
+ DEBUG(verifyFunction(*L->getHeader()->getParent()));
+ return true;
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ LoopPass::getAnalysisUsage(AU);
+ AU.addRequiredID(LoopSimplifyID);
+ AU.addRequiredID(LCSSAID);
+ AU.addRequired<LoopInfo>();
+ AU.addRequired<ScalarEvolution>();
+ AU.addRequired<DominatorTree>();
+ AU.addPreserved<LoopInfo>();
+ AU.addPreserved<DominatorTree>();
+ }
+
+};
+
+Value *SingleBlockLoopVectorizer::getBroadcastInstrs(Value *V) {
+ // Instructions that access the old induction variable
+ // actually want to get the new one.
+ if (V == OldInduction)
+ V = Induction;
+ // Create the types.
+ LLVMContext &C = V->getContext();
+ Type *VTy = VectorType::get(V->getType(), VF);
+ Type *I32 = IntegerType::getInt32Ty(C);
+ Constant *Zero = ConstantInt::get(I32, 0);
+ Value *Zeros = ConstantAggregateZero::get(VectorType::get(I32, VF));
+ Value *UndefVal = UndefValue::get(VTy);
+ // Insert the value into a new vector.
+ Value *SingleElem = Builder.CreateInsertElement(UndefVal, V, Zero);
+ // Broadcast the scalar into all locations in the vector.
+ Value *Shuf = Builder.CreateShuffleVector(SingleElem, UndefVal, Zeros,
+ "broadcast");
+ // We are accessing the induction variable. Make sure to promote the
+ // index for each consecutive SIMD lane. This adds 0,1,2 ... to all lanes.
+ if (V == Induction)
+ return getConsecutiveVector(Shuf);
+ return Shuf;
+}
+
+Value *SingleBlockLoopVectorizer::getConsecutiveVector(Value* Val) {
+ assert(Val->getType()->isVectorTy() && "Must be a vector");
+ assert(Val->getType()->getScalarType()->isIntegerTy() &&
+ "Elem must be an integer");
+ // Create the types.
+ Type *ITy = Val->getType()->getScalarType();
+ VectorType *Ty = cast<VectorType>(Val->getType());
+ unsigned VLen = Ty->getNumElements();
+ SmallVector<Constant*, 8> Indices;
+
+ // Create a vector of consecutive numbers from zero to VF.
+ for (unsigned i = 0; i < VLen; ++i)
+ Indices.push_back(ConstantInt::get(ITy, i));
+
+ // Add the consecutive indices to the vector value.
+ Constant *Cv = ConstantVector::get(Indices);
+ assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
+ return Builder.CreateAdd(Val, Cv, "induction");
+}
+
+bool LoopVectorizationLegality::isConsecutiveGep(Value *Ptr) {
+ GetElementPtrInst *Gep = dyn_cast_or_null<GetElementPtrInst>(Ptr);
+ if (!Gep)
+ return false;
+
+ unsigned NumOperands = Gep->getNumOperands();
+ Value *LastIndex = Gep->getOperand(NumOperands - 1);
+
+ // Check that all of the gep indices are uniform except for the last.
+ for (unsigned i = 0; i < NumOperands - 1; ++i)
+ if (!SE->isLoopInvariant(SE->getSCEV(Gep->getOperand(i)), TheLoop))
+ return false;
+
+ // We can emit wide load/stores only of the last index is the induction
+ // variable.
+ const SCEV *Last = SE->getSCEV(LastIndex);
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Last)) {
+ const SCEV *Step = AR->getStepRecurrence(*SE);
+
+ // The memory is consecutive because the last index is consecutive
+ // and all other indices are loop invariant.
+ if (Step->isOne())
+ return true;
+ }
+
+ return false;
+}
+
+bool LoopVectorizationLegality::isUniform(Value *V) {
+ return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop));
+}
+
+Value *SingleBlockLoopVectorizer::getVectorValue(Value *V) {
+ assert(!V->getType()->isVectorTy() && "Can't widen a vector");
+ // If we saved a vectorized copy of V, use it.
+ Value *&MapEntry = WidenMap[V];
+ if (MapEntry)
+ return MapEntry;
+
+ // Broadcast V and save the value for future uses.
+ Value *B = getBroadcastInstrs(V);
+ MapEntry = B;
+ return B;
+}
+
+Constant*
+SingleBlockLoopVectorizer::getUniformVector(unsigned Val, Type* ScalarTy) {
+ SmallVector<Constant*, 8> Indices;
+ // Create a vector of consecutive numbers from zero to VF.
+ for (unsigned i = 0; i < VF; ++i)
+ Indices.push_back(ConstantInt::get(ScalarTy, Val, true));
+
+ // Add the consecutive indices to the vector value.
+ return ConstantVector::get(Indices);
+}
+
+void SingleBlockLoopVectorizer::scalarizeInstruction(Instruction *Instr) {
+ assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
+ // Holds vector parameters or scalars, in case of uniform vals.
+ SmallVector<Value*, 8> Params;
+
+ // Find all of the vectorized parameters.
+ for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
+ Value *SrcOp = Instr->getOperand(op);
+
+ // If we are accessing the old induction variable, use the new one.
+ if (SrcOp == OldInduction) {
+ Params.push_back(getBroadcastInstrs(Induction));
+ continue;
+ }
+
+ // Try using previously calculated values.
+ Instruction *SrcInst = dyn_cast<Instruction>(SrcOp);
+
+ // If the src is an instruction that appeared earlier in the basic block
+ // then it should already be vectorized.
+ if (SrcInst && SrcInst->getParent() == Instr->getParent()) {
+ assert(WidenMap.count(SrcInst) && "Source operand is unavailable");
+ // The parameter is a vector value from earlier.
+ Params.push_back(WidenMap[SrcInst]);
+ } else {
+ // The parameter is a scalar from outside the loop. Maybe even a constant.
+ Params.push_back(SrcOp);
+ }
+ }
+
+ assert(Params.size() == Instr->getNumOperands() &&
+ "Invalid number of operands");
+
+ // Does this instruction return a value ?
+ bool IsVoidRetTy = Instr->getType()->isVoidTy();
+ Value *VecResults = 0;
+
+ // If we have a return value, create an empty vector. We place the scalarized
+ // instructions in this vector.
+ if (!IsVoidRetTy)
+ VecResults = UndefValue::get(VectorType::get(Instr->getType(), VF));
+
+ // For each scalar that we create:
+ for (unsigned i = 0; i < VF; ++i) {
+ Instruction *Cloned = Instr->clone();
+ if (!IsVoidRetTy)
+ Cloned->setName(Instr->getName() + ".cloned");
+ // Replace the operands of the cloned instrucions with extracted scalars.
+ for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
+ Value *Op = Params[op];
+ // Param is a vector. Need to extract the right lane.
+ if (Op->getType()->isVectorTy())
+ Op = Builder.CreateExtractElement(Op, Builder.getInt32(i));
+ Cloned->setOperand(op, Op);
+ }
+
+ // Place the cloned scalar in the new loop.
+ Builder.Insert(Cloned);
+
+ // If the original scalar returns a value we need to place it in a vector
+ // so that future users will be able to use it.
+ if (!IsVoidRetTy)
+ VecResults = Builder.CreateInsertElement(VecResults, Cloned,
+ Builder.getInt32(i));
+ }
+
+ if (!IsVoidRetTy)
+ WidenMap[Instr] = VecResults;
+}
+
+void
+SingleBlockLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) {
+ /*
+ In this function we generate a new loop. The new loop will contain
+ the vectorized instructions while the old loop will continue to run the
+ scalar remainder.
+
+ [ ] <-- vector loop bypass.
+ / |
+ / v
+| [ ] <-- vector pre header.
+| |
+| v
+| [ ] \
+| [ ]_| <-- vector loop.
+| |
+ \ v
+ >[ ] <--- middle-block.
+ / |
+ / v
+| [ ] <--- new preheader.
+| |
+| v
+| [ ] \
+| [ ]_| <-- old scalar loop to handle remainder.
+ \ |
+ \ v
+ >[ ] <-- exit block.
+ ...
+ */
+
+ OldInduction = Legal->getInduction();
+ assert(OldInduction && "We must have a single phi node.");
+ Type *IdxTy = OldInduction->getType();
+
+ // Find the loop boundaries.
+ const SCEV *ExitCount = SE->getExitCount(OrigLoop, OrigLoop->getHeader());
+ assert(ExitCount != SE->getCouldNotCompute() && "Invalid loop count");
+
+ // Get the total trip count from the count by adding 1.
+ ExitCount = SE->getAddExpr(ExitCount,
+ SE->getConstant(ExitCount->getType(), 1));
+ // We may need to extend the index in case there is a type mismatch.
+ // We know that the count starts at zero and does not overflow.
+ // We are using Zext because it should be less expensive.
+ if (ExitCount->getType() != IdxTy)
+ ExitCount = SE->getZeroExtendExpr(ExitCount, IdxTy);
+
+ // This is the original scalar-loop preheader.
+ BasicBlock *BypassBlock = OrigLoop->getLoopPreheader();
+ BasicBlock *ExitBlock = OrigLoop->getExitBlock();
+ assert(ExitBlock && "Must have an exit block");
+
+ // The loop index does not have to start at Zero. It starts with this value.
+ Value *StartIdx = OldInduction->getIncomingValueForBlock(BypassBlock);
+
+ assert(OrigLoop->getNumBlocks() == 1 && "Invalid loop");
+ assert(BypassBlock && "Invalid loop structure");
+
+ BasicBlock *VectorPH =
+ BypassBlock->splitBasicBlock(BypassBlock->getTerminator(), "vector.ph");
+ BasicBlock *VecBody = VectorPH->splitBasicBlock(VectorPH->getTerminator(),
+ "vector.body");
+
+ BasicBlock *MiddleBlock = VecBody->splitBasicBlock(VecBody->getTerminator(),
+ "middle.block");
+ BasicBlock *ScalarPH =
+ MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(),
+ "scalar.preheader");
+ // Find the induction variable.
+ BasicBlock *OldBasicBlock = OrigLoop->getHeader();
+
+ // Use this IR builder to create the loop instructions (Phi, Br, Cmp)
+ // inside the loop.
+ Builder.SetInsertPoint(VecBody->getFirstInsertionPt());
+
+ // Generate the induction variable.
+ Induction = Builder.CreatePHI(IdxTy, 2, "index");
+ Constant *Step = ConstantInt::get(IdxTy, VF);
+
+ // Expand the trip count and place the new instructions in the preheader.
+ // Notice that the pre-header does not change, only the loop body.
+ SCEVExpander Exp(*SE, "induction");
+ Instruction *Loc = BypassBlock->getTerminator();
+
+ // Count holds the overall loop count (N).
+ Value *Count = Exp.expandCodeFor(ExitCount, Induction->getType(), Loc);
+
+ // Add the start index to the loop count to get the new end index.
+ Value *IdxEnd = BinaryOperator::CreateAdd(Count, StartIdx, "end.idx", Loc);
+
+ // Now we need to generate the expression for N - (N % VF), which is
+ // the part that the vectorized body will execute.
+ Constant *CIVF = ConstantInt::get(IdxTy, VF);
+ Value *R = BinaryOperator::CreateURem(Count, CIVF, "n.mod.vf", Loc);
+ Value *CountRoundDown = BinaryOperator::CreateSub(Count, R, "n.vec", Loc);
+ Value *IdxEndRoundDown = BinaryOperator::CreateAdd(CountRoundDown, StartIdx,
+ "end.idx.rnd.down", Loc);
+
+ // Now, compare the new count to zero. If it is zero, jump to the scalar part.
+ Value *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
+ IdxEndRoundDown,
+ StartIdx,
+ "cmp.zero", Loc);
+
+ LoopVectorizationLegality::RuntimePointerCheck *PtrRtCheck =
+ Legal->getRuntimePointerCheck();
+ Value *MemoryRuntimeCheck = 0;
+ if (PtrRtCheck->Need) {
+ unsigned NumPointers = PtrRtCheck->Pointers.size();
+ SmallVector<Value* , 2> Starts;
+ SmallVector<Value* , 2> Ends;
+
+ // Use this type for pointer arithmetic.
+ Type* PtrArithTy = PtrRtCheck->Pointers[0]->getType();
+
+ for (unsigned i=0; i < NumPointers; ++i) {
+ Value *Ptr = PtrRtCheck->Pointers[i];
+ const SCEV *Sc = SE->getSCEV(Ptr);
+
+ if (SE->isLoopInvariant(Sc, OrigLoop)) {
+ DEBUG(dbgs() << "LV1: Adding RT check for a loop invariant ptr:" <<
+ *Ptr <<"\n");
+ Starts.push_back(Ptr);
+ Ends.push_back(Ptr);
+ } else {
+ DEBUG(dbgs() << "LV: Adding RT check for range:" << *Ptr <<"\n");
+ const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
+ Value *Start = Exp.expandCodeFor(AR->getStart(), PtrArithTy, Loc);
+ const SCEV *Ex = SE->getExitCount(OrigLoop, OrigLoop->getHeader());
+ const SCEV *ScEnd = AR->evaluateAtIteration(Ex, *SE);
+ assert(!isa<SCEVCouldNotCompute>(ScEnd) && "Invalid scev range.");
+ Value *End = Exp.expandCodeFor(ScEnd, PtrArithTy, Loc);
+ Starts.push_back(Start);
+ Ends.push_back(End);
+ }
+ }
+
+ for (unsigned i=0; i < NumPointers; ++i) {
+ for (unsigned j=i+1; j < NumPointers; ++j) {
+ Value *Cmp0 = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULE,
+ Starts[0], Ends[1], "bound0", Loc);
+ Value *Cmp1 = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULE,
+ Starts[1], Ends[0], "bound1", Loc);
+ Value *IsConflict = BinaryOperator::Create(Instruction::And, Cmp0, Cmp1,
+ "found.conflict", Loc);
+ if (MemoryRuntimeCheck) {
+ MemoryRuntimeCheck = BinaryOperator::Create(Instruction::Or,
+ MemoryRuntimeCheck,
+ IsConflict,
+ "conflict.rdx", Loc);
+ } else {
+ MemoryRuntimeCheck = IsConflict;
+ }
+ }
+ }
+ }// end of need-runtime-check code.
+
+ // If we are using memory runtime checks, include them in.
+ if (MemoryRuntimeCheck) {
+ Cmp = BinaryOperator::Create(Instruction::Or, Cmp, MemoryRuntimeCheck,
+ "CntOrMem", Loc);
+ }
+
+ BranchInst::Create(MiddleBlock, VectorPH, Cmp, Loc);
+ // Remove the old terminator.
+ Loc->eraseFromParent();
+
+ // We are going to resume the execution of the scalar loop.
+ // This PHI decides on what number to start. If we come from the
+ // vector loop then we need to start with the end index minus the
+ // index modulo VF. If we come from a bypass edge then we need to start
+ // from the real start.
+ PHINode* ResumeIndex = PHINode::Create(IdxTy, 2, "resume.idx",
+ MiddleBlock->getTerminator());
+ ResumeIndex->addIncoming(StartIdx, BypassBlock);
+ ResumeIndex->addIncoming(IdxEndRoundDown, VecBody);
+
+ // Add a check in the middle block to see if we have completed
+ // all of the iterations in the first vector loop.
+ // If (N - N%VF) == N, then we *don't* need to run the remainder.
+ Value *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, IdxEnd,
+ ResumeIndex, "cmp.n",
+ MiddleBlock->getTerminator());
+
+ BranchInst::Create(ExitBlock, ScalarPH, CmpN, MiddleBlock->getTerminator());
+ // Remove the old terminator.
+ MiddleBlock->getTerminator()->eraseFromParent();
+
+ // Create i+1 and fill the PHINode.
+ Value *NextIdx = Builder.CreateAdd(Induction, Step, "index.next");
+ Induction->addIncoming(StartIdx, VectorPH);
+ Induction->addIncoming(NextIdx, VecBody);
+ // Create the compare.
+ Value *ICmp = Builder.CreateICmpEQ(NextIdx, IdxEndRoundDown);
+ Builder.CreateCondBr(ICmp, MiddleBlock, VecBody);
+
+ // Now we have two terminators. Remove the old one from the block.
+ VecBody->getTerminator()->eraseFromParent();
+
+ // Fix the scalar body iteration count.
+ unsigned BlockIdx = OldInduction->getBasicBlockIndex(ScalarPH);
+ OldInduction->setIncomingValue(BlockIdx, ResumeIndex);
+
+ // Get ready to start creating new instructions into the vectorized body.
+ Builder.SetInsertPoint(VecBody->getFirstInsertionPt());
+
+ // Register the new loop.
+ Loop* Lp = new Loop();
+ LPM->insertLoop(Lp, OrigLoop->getParentLoop());
+
+ Lp->addBasicBlockToLoop(VecBody, LI->getBase());
+
+ Loop *ParentLoop = OrigLoop->getParentLoop();
+ if (ParentLoop) {
+ ParentLoop->addBasicBlockToLoop(ScalarPH, LI->getBase());
+ ParentLoop->addBasicBlockToLoop(VectorPH, LI->getBase());
+ ParentLoop->addBasicBlockToLoop(MiddleBlock, LI->getBase());
+ }
+
+ // Save the state.
+ LoopVectorPreHeader = VectorPH;
+ LoopScalarPreHeader = ScalarPH;
+ LoopMiddleBlock = MiddleBlock;
+ LoopExitBlock = ExitBlock;
+ LoopVectorBody = VecBody;
+ LoopScalarBody = OldBasicBlock;
+ LoopBypassBlock = BypassBlock;
+}
+
+/// This function returns the identity element (or neutral element) for
+/// the operation K.
+static unsigned
+getReductionIdentity(LoopVectorizationLegality::ReductionKind K) {
+ switch (K) {
+ case LoopVectorizationLegality::IntegerXor:
+ case LoopVectorizationLegality::IntegerAdd:
+ case LoopVectorizationLegality::IntegerOr:
+ // Adding, Xoring, Oring zero to a number does not change it.
+ return 0;
+ case LoopVectorizationLegality::IntegerMult:
+ // Multiplying a number by 1 does not change it.
+ return 1;
+ case LoopVectorizationLegality::IntegerAnd:
+ // AND-ing a number with an all-1 value does not change it.
+ return -1;
+ default:
+ llvm_unreachable("Unknown reduction kind");
+ }
+}
+
+void
+SingleBlockLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) {
+ //===------------------------------------------------===//
+ //
+ // Notice: any optimization or new instruction that go
+ // into the code below should be also be implemented in
+ // the cost-model.
+ //
+ //===------------------------------------------------===//
+ typedef SmallVector<PHINode*, 4> PhiVector;
+ BasicBlock &BB = *OrigLoop->getHeader();
+ Constant *Zero = ConstantInt::get(
+ IntegerType::getInt32Ty(BB.getContext()), 0);
+
+ // In order to support reduction variables we need to be able to vectorize
+ // Phi nodes. Phi nodes have cycles, so we need to vectorize them in two
+ // steages. First, we create a new vector PHI node with no incoming edges.
+ // We use this value when we vectorize all of the instructions that use the
+ // PHI. Next, after all of the instructions in the block are complete we
+ // add the new incoming edges to the PHI. At this point all of the
+ // instructions in the basic block are vectorized, so we can use them to
+ // construct the PHI.
+ PhiVector PHIsToFix;
+
+ // For each instruction in the old loop.
+ for (BasicBlock::iterator it = BB.begin(), e = BB.end(); it != e; ++it) {
+ Instruction *Inst = it;
+
+ switch (Inst->getOpcode()) {
+ case Instruction::Br:
+ // Nothing to do for PHIs and BR, since we already took care of the
+ // loop control flow instructions.
+ continue;
+ case Instruction::PHI:{
+ PHINode* P = cast<PHINode>(Inst);
+ // Special handling for the induction var.
+ if (OldInduction == Inst)
+ continue;
+ // This is phase one of vectorizing PHIs.
+ // This has to be a reduction variable.
+ assert(Legal->getReductionVars()->count(P) && "Not a Reduction");
+ Type *VecTy = VectorType::get(Inst->getType(), VF);
+ WidenMap[Inst] = Builder.CreatePHI(VecTy, 2, "vec.phi");
+ PHIsToFix.push_back(P);
+ continue;
+ }
+ case Instruction::Add:
+ case Instruction::FAdd:
+ case Instruction::Sub:
+ case Instruction::FSub:
+ case Instruction::Mul:
+ case Instruction::FMul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::FDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::FRem:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: {
+ // Just widen binops.
+ BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
+ Value *A = getVectorValue(Inst->getOperand(0));
+ Value *B = getVectorValue(Inst->getOperand(1));
+
+ // Use this vector value for all users of the original instruction.
+ Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B);
+ WidenMap[Inst] = V;
+
+ // Update the NSW, NUW and Exact flags.
+ BinaryOperator *VecOp = cast<BinaryOperator>(V);
+ if (isa<OverflowingBinaryOperator>(BinOp)) {
+ VecOp->setHasNoSignedWrap(BinOp->hasNoSignedWrap());
+ VecOp->setHasNoUnsignedWrap(BinOp->hasNoUnsignedWrap());
+ }
+ if (isa<PossiblyExactOperator>(VecOp))
+ VecOp->setIsExact(BinOp->isExact());
+ break;
+ }
+ case Instruction::Select: {
+ // Widen selects.
+ // If the selector is loop invariant we can create a select
+ // instruction with a scalar condition. Otherwise, use vector-select.
+ Value *Cond = Inst->getOperand(0);
+ bool InvariantCond = SE->isLoopInvariant(SE->getSCEV(Cond), OrigLoop);
+
+ // The condition can be loop invariant but still defined inside the
+ // loop. This means that we can't just use the original 'cond' value.
+ // We have to take the 'vectorized' value and pick the first lane.
+ // Instcombine will make this a no-op.
+ Cond = getVectorValue(Cond);
+ if (InvariantCond)
+ Cond = Builder.CreateExtractElement(Cond, Builder.getInt32(0));
+
+ Value *Op0 = getVectorValue(Inst->getOperand(1));
+ Value *Op1 = getVectorValue(Inst->getOperand(2));
+ WidenMap[Inst] = Builder.CreateSelect(Cond, Op0, Op1);
+ break;
+ }
+
+ case Instruction::ICmp:
+ case Instruction::FCmp: {
+ // Widen compares. Generate vector compares.
+ bool FCmp = (Inst->getOpcode() == Instruction::FCmp);
+ CmpInst *Cmp = dyn_cast<CmpInst>(Inst);
+ Value *A = getVectorValue(Inst->getOperand(0));
+ Value *B = getVectorValue(Inst->getOperand(1));
+ if (FCmp)
+ WidenMap[Inst] = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
+ else
+ WidenMap[Inst] = Builder.CreateICmp(Cmp->getPredicate(), A, B);
+ break;
+ }
+
+ case Instruction::Store: {
+ // Attempt to issue a wide store.
+ StoreInst *SI = dyn_cast<StoreInst>(Inst);
+ Type *StTy = VectorType::get(SI->getValueOperand()->getType(), VF);
+ Value *Ptr = SI->getPointerOperand();
+ unsigned Alignment = SI->getAlignment();
+
+ assert(!Legal->isUniform(Ptr) &&
+ "We do not allow storing to uniform addresses");
+
+ GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr);
+
+ // This store does not use GEPs.
+ if (!Legal->isConsecutiveGep(Gep)) {
+ scalarizeInstruction(Inst);
+ break;
+ }
+
+ // The last index does not have to be the induction. It can be
+ // consecutive and be a function of the index. For example A[I+1];
+ unsigned NumOperands = Gep->getNumOperands();
+ Value *LastIndex = getVectorValue(Gep->getOperand(NumOperands - 1));
+ LastIndex = Builder.CreateExtractElement(LastIndex, Zero);
+
+ // Create the new GEP with the new induction variable.
+ GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone());
+ Gep2->setOperand(NumOperands - 1, LastIndex);
+ Ptr = Builder.Insert(Gep2);
+ Ptr = Builder.CreateBitCast(Ptr, StTy->getPointerTo());
+ Value *Val = getVectorValue(SI->getValueOperand());
+ Builder.CreateStore(Val, Ptr)->setAlignment(Alignment);
+ break;
+ }
+ case Instruction::Load: {
+ // Attempt to issue a wide load.
+ LoadInst *LI = dyn_cast<LoadInst>(Inst);
+ Type *RetTy = VectorType::get(LI->getType(), VF);
+ Value *Ptr = LI->getPointerOperand();
+ unsigned Alignment = LI->getAlignment();
+ GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr);
+
+ // If we don't have a gep, or that the pointer is loop invariant,
+ // scalarize the load.
+ if (!Gep || Legal->isUniform(Gep) || !Legal->isConsecutiveGep(Gep)) {
+ scalarizeInstruction(Inst);
+ break;
+ }
+
+ // The last index does not have to be the induction. It can be
+ // consecutive and be a function of the index. For example A[I+1];
+ unsigned NumOperands = Gep->getNumOperands();
+ Value *LastIndex = getVectorValue(Gep->getOperand(NumOperands -1));
+ LastIndex = Builder.CreateExtractElement(LastIndex, Zero);
+
+ // Create the new GEP with the new induction variable.
+ GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone());
+ Gep2->setOperand(NumOperands - 1, LastIndex);
+ Ptr = Builder.Insert(Gep2);
+ Ptr = Builder.CreateBitCast(Ptr, RetTy->getPointerTo());
+ LI = Builder.CreateLoad(Ptr);
+ LI->setAlignment(Alignment);
+ // Use this vector value for all users of the load.
+ WidenMap[Inst] = LI;
+ break;
+ }
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::FPExt:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::SIToFP:
+ case Instruction::UIToFP:
+ case Instruction::Trunc:
+ case Instruction::FPTrunc:
+ case Instruction::BitCast: {
+ /// Vectorize bitcasts.
+ CastInst *CI = dyn_cast<CastInst>(Inst);
+ Value *A = getVectorValue(Inst->getOperand(0));
+ Type *DestTy = VectorType::get(CI->getType()->getScalarType(), VF);
+ WidenMap[Inst] = Builder.CreateCast(CI->getOpcode(), A, DestTy);
+ break;
+ }
+
+ default:
+ /// All other instructions are unsupported. Scalarize them.
+ scalarizeInstruction(Inst);
+ break;
+ }// end of switch.
+ }// end of for_each instr.
+
+ // At this point every instruction in the original loop is widended to
+ // a vector form. We are almost done. Now, we need to fix the PHI nodes
+ // that we vectorized. The PHI nodes are currently empty because we did
+ // not want to introduce cycles. Notice that the remaining PHI nodes
+ // that we need to fix are reduction variables.
+
+ // Create the 'reduced' values for each of the induction vars.
+ // The reduced values are the vector values that we scalarize and combine
+ // after the loop is finished.
+ for (PhiVector::iterator it = PHIsToFix.begin(), e = PHIsToFix.end();
+ it != e; ++it) {
+ PHINode *RdxPhi = *it;
+ PHINode *VecRdxPhi = dyn_cast<PHINode>(WidenMap[RdxPhi]);
+ assert(RdxPhi && "Unable to recover vectorized PHI");
+
+ // Find the reduction variable descriptor.
+ assert(Legal->getReductionVars()->count(RdxPhi) &&
+ "Unable to find the reduction variable");
+ LoopVectorizationLegality::ReductionDescriptor RdxDesc =
+ (*Legal->getReductionVars())[RdxPhi];
+
+ // We need to generate a reduction vector from the incoming scalar.
+ // To do so, we need to generate the 'identity' vector and overide
+ // one of the elements with the incoming scalar reduction. We need
+ // to do it in the vector-loop preheader.
+ Builder.SetInsertPoint(LoopBypassBlock->getTerminator());
+
+ // This is the vector-clone of the value that leaves the loop.
+ Value *VectorExit = getVectorValue(RdxDesc.LoopExitInstr);
+ Type *VecTy = VectorExit->getType();
+
+ // Find the reduction identity variable. Zero for addition, or, xor,
+ // one for multiplication, -1 for And.
+ Constant *Identity = getUniformVector(getReductionIdentity(RdxDesc.Kind),
+ VecTy->getScalarType());
+
+ // This vector is the Identity vector where the first element is the
+ // incoming scalar reduction.
+ Value *VectorStart = Builder.CreateInsertElement(Identity,
+ RdxDesc.StartValue, Zero);
+
+
+ // Fix the vector-loop phi.
+ // We created the induction variable so we know that the
+ // preheader is the first entry.
+ BasicBlock *VecPreheader = Induction->getIncomingBlock(0);
+
+ // Reductions do not have to start at zero. They can start with
+ // any loop invariant values.
+ VecRdxPhi->addIncoming(VectorStart, VecPreheader);
+ unsigned SelfEdgeIdx = (RdxPhi)->getBasicBlockIndex(LoopScalarBody);
+ Value *Val = getVectorValue(RdxPhi->getIncomingValue(SelfEdgeIdx));
+ VecRdxPhi->addIncoming(Val, LoopVectorBody);
+
+ // Before each round, move the insertion point right between
+ // the PHIs and the values we are going to write.
+ // This allows us to write both PHINodes and the extractelement
+ // instructions.
+ Builder.SetInsertPoint(LoopMiddleBlock->getFirstInsertionPt());
+
+ // This PHINode contains the vectorized reduction variable, or
+ // the initial value vector, if we bypass the vector loop.
+ PHINode *NewPhi = Builder.CreatePHI(VecTy, 2, "rdx.vec.exit.phi");
+ NewPhi->addIncoming(VectorStart, LoopBypassBlock);
+ NewPhi->addIncoming(getVectorValue(RdxDesc.LoopExitInstr), LoopVectorBody);
+
+ // Extract the first scalar.
+ Value *Scalar0 =
+ Builder.CreateExtractElement(NewPhi, Builder.getInt32(0));
+ // Extract and reduce the remaining vector elements.
+ for (unsigned i=1; i < VF; ++i) {
+ Value *Scalar1 =
+ Builder.CreateExtractElement(NewPhi, Builder.getInt32(i));
+ switch (RdxDesc.Kind) {
+ case LoopVectorizationLegality::IntegerAdd:
+ Scalar0 = Builder.CreateAdd(Scalar0, Scalar1);
+ break;
+ case LoopVectorizationLegality::IntegerMult:
+ Scalar0 = Builder.CreateMul(Scalar0, Scalar1);
+ break;
+ case LoopVectorizationLegality::IntegerOr:
+ Scalar0 = Builder.CreateOr(Scalar0, Scalar1);
+ break;
+ case LoopVectorizationLegality::IntegerAnd:
+ Scalar0 = Builder.CreateAnd(Scalar0, Scalar1);
+ break;
+ case LoopVectorizationLegality::IntegerXor:
+ Scalar0 = Builder.CreateXor(Scalar0, Scalar1);
+ break;
+ default:
+ llvm_unreachable("Unknown reduction operation");
+ }
+ }
+
+ // Now, we need to fix the users of the reduction variable
+ // inside and outside of the scalar remainder loop.
+ // We know that the loop is in LCSSA form. We need to update the
+ // PHI nodes in the exit blocks.
+ for (BasicBlock::iterator LEI = LoopExitBlock->begin(),
+ LEE = LoopExitBlock->end(); LEI != LEE; ++LEI) {
+ PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI);
+ if (!LCSSAPhi) continue;
+
+ // All PHINodes need to have a single entry edge, or two if
+ // we already fixed them.
+ assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
+
+ // We found our reduction value exit-PHI. Update it with the
+ // incoming bypass edge.
+ if (LCSSAPhi->getIncomingValue(0) == RdxDesc.LoopExitInstr) {
+ // Add an edge coming from the bypass.
+ LCSSAPhi->addIncoming(Scalar0, LoopMiddleBlock);
+ break;
+ }
+ }// end of the LCSSA phi scan.
+
+ // Fix the scalar loop reduction variable with the incoming reduction sum
+ // from the vector body and from the backedge value.
+ int IncomingEdgeBlockIdx = (RdxPhi)->getBasicBlockIndex(LoopScalarBody);
+ int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); // The other block.
+ (RdxPhi)->setIncomingValue(SelfEdgeBlockIdx, Scalar0);
+ (RdxPhi)->setIncomingValue(IncomingEdgeBlockIdx, RdxDesc.LoopExitInstr);
+ }// end of for each redux variable.
+}
+
+void SingleBlockLoopVectorizer::updateAnalysis() {
+ // The original basic block.
+ SE->forgetLoop(OrigLoop);
+
+ // Update the dominator tree information.
+ assert(DT->properlyDominates(LoopBypassBlock, LoopExitBlock) &&
+ "Entry does not dominate exit.");
+
+ DT->addNewBlock(LoopVectorPreHeader, LoopBypassBlock);
+ DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader);
+ DT->addNewBlock(LoopMiddleBlock, LoopBypassBlock);
+ DT->addNewBlock(LoopScalarPreHeader, LoopMiddleBlock);
+ DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
+ DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
+
+ DEBUG(DT->verifyAnalysis());
+}
+
+bool LoopVectorizationLegality::canVectorize() {
+ if (!TheLoop->getLoopPreheader()) {
+ assert(false && "No preheader!!");
+ DEBUG(dbgs() << "LV: Loop not normalized." << "\n");
+ return false;
+ }
+
+ // We can only vectorize single basic block loops.
+ unsigned NumBlocks = TheLoop->getNumBlocks();
+ if (NumBlocks != 1) {
+ DEBUG(dbgs() << "LV: Too many blocks:" << NumBlocks << "\n");
+ return false;
+ }
+
+ // We need to have a loop header.
+ BasicBlock *BB = TheLoop->getHeader();
+ DEBUG(dbgs() << "LV: Found a loop: " << BB->getName() << "\n");
+
+ // ScalarEvolution needs to be able to find the exit count.
+ const SCEV *ExitCount = SE->getExitCount(TheLoop, BB);
+ if (ExitCount == SE->getCouldNotCompute()) {
+ DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n");
+ return false;
+ }
+
+ // Do not loop-vectorize loops with a tiny trip count.
+ unsigned TC = SE->getSmallConstantTripCount(TheLoop, BB);
+ if (TC > 0u && TC < TinyTripCountThreshold) {
+ DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " <<
+ "This loop is not worth vectorizing.\n");
+ return false;
+ }
+
+ // Go over each instruction and look at memory deps.
+ if (!canVectorizeBlock(*BB)) {
+ DEBUG(dbgs() << "LV: Can't vectorize this loop header\n");
+ return false;
+ }
+
+ DEBUG(dbgs() << "LV: We can vectorize this loop" <<
+ (PtrRtCheck.Need ? " (with a runtime bound check)" : "")
+ <<"!\n");
+
+ // Okay! We can vectorize. At this point we don't have any other mem analysis
+ // which may limit our maximum vectorization factor, so just return true with
+ // no restrictions.
+ return true;
+}
+
+bool LoopVectorizationLegality::canVectorizeBlock(BasicBlock &BB) {
+ // Scan the instructions in the block and look for hazards.
+ for (BasicBlock::iterator it = BB.begin(), e = BB.end(); it != e; ++it) {
+ Instruction *I = it;
+
+ PHINode *Phi = dyn_cast<PHINode>(I);
+ if (Phi) {
+ // This should not happen because the loop should be normalized.
+ if (Phi->getNumIncomingValues() != 2) {
+ DEBUG(dbgs() << "LV: Found an invalid PHI.\n");
+ return false;
+ }
+ // We only look at integer phi nodes.
+ if (!Phi->getType()->isIntegerTy()) {
+ DEBUG(dbgs() << "LV: Found an non-int PHI.\n");
+ return false;
+ }
+
+ if (isInductionVariable(Phi)) {
+ if (Induction) {
+ DEBUG(dbgs() << "LV: Found too many inductions."<< *Phi <<"\n");
+ return false;
+ }
+ DEBUG(dbgs() << "LV: Found the induction PHI."<< *Phi <<"\n");
+ Induction = Phi;
+ continue;
+ }
+ if (AddReductionVar(Phi, IntegerAdd)) {
+ DEBUG(dbgs() << "LV: Found an ADD reduction PHI."<< *Phi <<"\n");
+ continue;
+ }
+ if (AddReductionVar(Phi, IntegerMult)) {
+ DEBUG(dbgs() << "LV: Found a MUL reduction PHI."<< *Phi <<"\n");
+ continue;
+ }
+ if (AddReductionVar(Phi, IntegerOr)) {
+ DEBUG(dbgs() << "LV: Found an OR reduction PHI."<< *Phi <<"\n");
+ continue;
+ }
+ if (AddReductionVar(Phi, IntegerAnd)) {
+ DEBUG(dbgs() << "LV: Found an AND reduction PHI."<< *Phi <<"\n");
+ continue;
+ }
+ if (AddReductionVar(Phi, IntegerXor)) {
+ DEBUG(dbgs() << "LV: Found a XOR reduction PHI."<< *Phi <<"\n");
+ continue;
+ }
+
+ DEBUG(dbgs() << "LV: Found an unidentified PHI."<< *Phi <<"\n");
+ return false;
+ }// end of PHI handling
+
+ // We still don't handle functions.
+ CallInst *CI = dyn_cast<CallInst>(I);
+ if (CI) {
+ DEBUG(dbgs() << "LV: Found a call site.\n");
+ return false;
+ }
+
+ // We do not re-vectorize vectors.
+ if (!VectorType::isValidElementType(I->getType()) &&
+ !I->getType()->isVoidTy()) {
+ DEBUG(dbgs() << "LV: Found unvectorizable type." << "\n");
+ return false;
+ }
+
+ // Reduction instructions are allowed to have exit users.
+ // All other instructions must not have external users.
+ if (!AllowedExit.count(I))
+ //Check that all of the users of the loop are inside the BB.
+ for (Value::use_iterator it = I->use_begin(), e = I->use_end();
+ it != e; ++it) {
+ Instruction *U = cast<Instruction>(*it);
+ // This user may be a reduction exit value.
+ BasicBlock *Parent = U->getParent();
+ if (Parent != &BB) {
+ DEBUG(dbgs() << "LV: Found an outside user for : "<< *U << "\n");
+ return false;
+ }
+ }
+ } // next instr.
+
+ if (!Induction) {
+ DEBUG(dbgs() << "LV: Did not find an induction var.\n");
+ return false;
+ }
+
+ // Don't vectorize if the memory dependencies do not allow vectorization.
+ if (!canVectorizeMemory(BB))
+ return false;
+
+ // We now know that the loop is vectorizable!
+ // Collect variables that will remain uniform after vectorization.
+ std::vector<Value*> Worklist;
+
+ // Start with the conditional branch and walk up the block.
+ Worklist.push_back(BB.getTerminator()->getOperand(0));
+
+ while (Worklist.size()) {
+ Instruction *I = dyn_cast<Instruction>(Worklist.back());
+ Worklist.pop_back();
+ // Look at instructions inside this block.
+ if (!I) continue;
+ if (I->getParent() != &BB) continue;
+
+ // Stop when reaching PHI nodes.
+ if (isa<PHINode>(I)) {
+ assert(I == Induction && "Found a uniform PHI that is not the induction");
+ break;
+ }
+
+ // This is a known uniform.
+ Uniforms.insert(I);
+
+ // Insert all operands.
+ for (int i=0, Op = I->getNumOperands(); i < Op; ++i) {
+ Worklist.push_back(I->getOperand(i));
+ }
+ }
+
+ return true;
+}
+
+bool LoopVectorizationLegality::canVectorizeMemory(BasicBlock &BB) {
+ typedef SmallVector<Value*, 16> ValueVector;
+ typedef SmallPtrSet<Value*, 16> ValueSet;
+ // Holds the Load and Store *instructions*.
+ ValueVector Loads;
+ ValueVector Stores;
+ PtrRtCheck.Pointers.clear();
+ PtrRtCheck.Need = false;
+
+ // Scan the BB and collect legal loads and stores.
+ for (BasicBlock::iterator it = BB.begin(), e = BB.end(); it != e; ++it) {
+ Instruction *I = it;
+
+ // If this is a load, save it. If this instruction can read from memory
+ // but is not a load, then we quit. Notice that we don't handle function
+ // calls that read or write.
+ if (I->mayReadFromMemory()) {
+ LoadInst *Ld = dyn_cast<LoadInst>(I);
+ if (!Ld) return false;
+ if (!Ld->isSimple()) {
+ DEBUG(dbgs() << "LV: Found a non-simple load.\n");
+ return false;
+ }
+ Loads.push_back(Ld);
+ continue;
+ }
+
+ // Save store instructions. Abort if other instructions write to memory.
+ if (I->mayWriteToMemory()) {
+ StoreInst *St = dyn_cast<StoreInst>(I);
+ if (!St) return false;
+ if (!St->isSimple()) {
+ DEBUG(dbgs() << "LV: Found a non-simple store.\n");
+ return false;
+ }
+ Stores.push_back(St);
+ }
+ } // next instr.
+
+ // Now we have two lists that hold the loads and the stores.
+ // Next, we find the pointers that they use.
+
+ // Check if we see any stores. If there are no stores, then we don't
+ // care if the pointers are *restrict*.
+ if (!Stores.size()) {
+ DEBUG(dbgs() << "LV: Found a read-only loop!\n");
+ return true;
+ }
+
+ // Holds the read and read-write *pointers* that we find.
+ ValueVector Reads;
+ ValueVector ReadWrites;
+
+ // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
+ // multiple times on the same object. If the ptr is accessed twice, once
+ // for read and once for write, it will only appear once (on the write
+ // list). This is okay, since we are going to check for conflicts between
+ // writes and between reads and writes, but not between reads and reads.
+ ValueSet Seen;
+
+ ValueVector::iterator I, IE;
+ for (I = Stores.begin(), IE = Stores.end(); I != IE; ++I) {
+ StoreInst *ST = dyn_cast<StoreInst>(*I);
+ assert(ST && "Bad StoreInst");
+ Value* Ptr = ST->getPointerOperand();
+
+ if (isUniform(Ptr)) {
+ DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n");
+ return false;
+ }
+
+ // If we did *not* see this pointer before, insert it to
+ // the read-write list. At this phase it is only a 'write' list.
+ if (Seen.insert(Ptr))
+ ReadWrites.push_back(Ptr);
+ }
+
+ for (I = Loads.begin(), IE = Loads.end(); I != IE; ++I) {
+ LoadInst *LD = dyn_cast<LoadInst>(*I);
+ assert(LD && "Bad LoadInst");
+ Value* Ptr = LD->getPointerOperand();
+ // If we did *not* see this pointer before, insert it to the
+ // read list. If we *did* see it before, then it is already in
+ // the read-write list. This allows us to vectorize expressions
+ // such as A[i] += x; Because the address of A[i] is a read-write
+ // pointer. This only works if the index of A[i] is consecutive.
+ // If the address of i is unknown (for example A[B[i]]) then we may
+ // read a few words, modify, and write a few words, and some of the
+ // words may be written to the same address.
+ if (Seen.insert(Ptr) || !isConsecutiveGep(Ptr))
+ Reads.push_back(Ptr);
+ }
+
+ // If we write (or read-write) to a single destination and there are no
+ // other reads in this loop then is it safe to vectorize.
+ if (ReadWrites.size() == 1 && Reads.size() == 0) {
+ DEBUG(dbgs() << "LV: Found a write-only loop!\n");
+ return true;
+ }
+
+ // Find pointers with computable bounds. We are going to use this information
+ // to place a runtime bound check.
+ bool RT = true;
+ for (I = ReadWrites.begin(), IE = ReadWrites.end(); I != IE; ++I)
+ if (hasComputableBounds(*I)) {
+ PtrRtCheck.Pointers.push_back(*I);
+ DEBUG(dbgs() << "LV: Found a runtime check ptr:" << **I <<"\n");
+ } else {
+ RT = false;
+ break;
+ }
+ for (I = Reads.begin(), IE = Reads.end(); I != IE; ++I)
+ if (hasComputableBounds(*I)) {
+ PtrRtCheck.Pointers.push_back(*I);
+ DEBUG(dbgs() << "LV: Found a runtime check ptr:" << **I <<"\n");
+ } else {
+ RT = false;
+ break;
+ }
+
+ // Check that we did not collect too many pointers or found a
+ // unsizeable pointer.
+ if (!RT || PtrRtCheck.Pointers.size() > RuntimeMemoryCheckThreshold) {
+ PtrRtCheck.Pointers.clear();
+ RT = false;
+ }
+
+ PtrRtCheck.Need = RT;
+
+ if (RT) {
+ DEBUG(dbgs() << "LV: We can perform a memory runtime check if needed.\n");
+ }
+
+ // Now that the pointers are in two lists (Reads and ReadWrites), we
+ // can check that there are no conflicts between each of the writes and
+ // between the writes to the reads.
+ ValueSet WriteObjects;
+ ValueVector TempObjects;
+
+ // Check that the read-writes do not conflict with other read-write
+ // pointers.
+ for (I = ReadWrites.begin(), IE = ReadWrites.end(); I != IE; ++I) {
+ GetUnderlyingObjects(*I, TempObjects, DL);
+ for (ValueVector::iterator it=TempObjects.begin(), e=TempObjects.end();
+ it != e; ++it) {
+ if (!isIdentifiedObject(*it)) {
+ DEBUG(dbgs() << "LV: Found an unidentified write ptr:"<< **it <<"\n");
+ return RT;
+ }
+ if (!WriteObjects.insert(*it)) {
+ DEBUG(dbgs() << "LV: Found a possible write-write reorder:"
+ << **it <<"\n");
+ return RT;
+ }
+ }
+ TempObjects.clear();
+ }
+
+ /// Check that the reads don't conflict with the read-writes.
+ for (I = Reads.begin(), IE = Reads.end(); I != IE; ++I) {
+ GetUnderlyingObjects(*I, TempObjects, DL);
+ for (ValueVector::iterator it=TempObjects.begin(), e=TempObjects.end();
+ it != e; ++it) {
+ if (!isIdentifiedObject(*it)) {
+ DEBUG(dbgs() << "LV: Found an unidentified read ptr:"<< **it <<"\n");
+ return RT;
+ }
+ if (WriteObjects.count(*it)) {
+ DEBUG(dbgs() << "LV: Found a possible read/write reorder:"
+ << **it <<"\n");
+ return RT;
+ }
+ }
+ TempObjects.clear();
+ }
+
+ // It is safe to vectorize and we don't need any runtime checks.
+ DEBUG(dbgs() << "LV: We don't need a runtime memory check.\n");
+ PtrRtCheck.Pointers.clear();
+ PtrRtCheck.Need = false;
+ return true;
+}
+
+bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi,
+ ReductionKind Kind) {
+ if (Phi->getNumIncomingValues() != 2)
+ return false;
+
+ // Find the possible incoming reduction variable.
+ BasicBlock *BB = Phi->getParent();
+ int SelfEdgeIdx = Phi->getBasicBlockIndex(BB);
+ int InEdgeBlockIdx = (SelfEdgeIdx ? 0 : 1); // The other entry.
+ Value *RdxStart = Phi->getIncomingValue(InEdgeBlockIdx);
+
+ // ExitInstruction is the single value which is used outside the loop.
+ // We only allow for a single reduction value to be used outside the loop.
+ // This includes users of the reduction, variables (which form a cycle
+ // which ends in the phi node).
+ Instruction *ExitInstruction = 0;
+
+ // Iter is our iterator. We start with the PHI node and scan for all of the
+ // users of this instruction. All users must be instructions which can be
+ // used as reduction variables (such as ADD). We may have a single
+ // out-of-block user. They cycle must end with the original PHI.
+ // Also, we can't have multiple block-local users.
+ Instruction *Iter = Phi;
+ while (true) {
+ // Any reduction instr must be of one of the allowed kinds.
+ if (!isReductionInstr(Iter, Kind))
+ return false;
+
+ // Did we found a user inside this block ?
+ bool FoundInBlockUser = false;
+ // Did we reach the initial PHI node ?
+ bool FoundStartPHI = false;
+
+ // If the instruction has no users then this is a broken
+ // chain and can't be a reduction variable.
+ if (Iter->use_empty())
+ return false;
+
+ // For each of the *users* of iter.
+ for (Value::use_iterator it = Iter->use_begin(), e = Iter->use_end();
+ it != e; ++it) {
+ Instruction *U = cast<Instruction>(*it);
+ // We already know that the PHI is a user.
+ if (U == Phi) {
+ FoundStartPHI = true;
+ continue;
+ }
+ // Check if we found the exit user.
+ BasicBlock *Parent = U->getParent();
+ if (Parent != BB) {
+ // We must have a single exit instruction.
+ if (ExitInstruction != 0)
+ return false;
+ ExitInstruction = Iter;
+ }
+ // We can't have multiple inside users.
+ if (FoundInBlockUser)
+ return false;
+ FoundInBlockUser = true;
+ Iter = U;
+ }
+
+ // We found a reduction var if we have reached the original
+ // phi node and we only have a single instruction with out-of-loop
+ // users.
+ if (FoundStartPHI && ExitInstruction) {
+ // This instruction is allowed to have out-of-loop users.
+ AllowedExit.insert(ExitInstruction);
+
+ // Save the description of this reduction variable.
+ ReductionDescriptor RD(RdxStart, ExitInstruction, Kind);
+ Reductions[Phi] = RD;
+ return true;
+ }
+ }
+}
+
+bool
+LoopVectorizationLegality::isReductionInstr(Instruction *I,
+ ReductionKind Kind) {
+ switch (I->getOpcode()) {
+ default:
+ return false;
+ case Instruction::PHI:
+ // possibly.
+ return true;
+ case Instruction::Add:
+ case Instruction::Sub:
+ return Kind == IntegerAdd;
+ case Instruction::Mul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ return Kind == IntegerMult;
+ case Instruction::And:
+ return Kind == IntegerAnd;
+ case Instruction::Or:
+ return Kind == IntegerOr;
+ case Instruction::Xor:
+ return Kind == IntegerXor;
+ }
+}
+
+bool LoopVectorizationLegality::isInductionVariable(PHINode *Phi) {
+ // Check that the PHI is consecutive and starts at zero.
+ const SCEV *PhiScev = SE->getSCEV(Phi);
+ const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
+ if (!AR) {
+ DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
+ return false;
+ }
+ const SCEV *Step = AR->getStepRecurrence(*SE);
+
+ if (!Step->isOne()) {
+ DEBUG(dbgs() << "LV: PHI stride does not equal one.\n");
+ return false;
+ }
+ return true;
+}
+
+bool LoopVectorizationLegality::hasComputableBounds(Value *Ptr) {
+ const SCEV *PhiScev = SE->getSCEV(Ptr);
+ const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
+ if (!AR)
+ return false;
+
+ return AR->isAffine();
+}
+
+unsigned
+LoopVectorizationCostModel::findBestVectorizationFactor(unsigned VF) {
+ if (!VTTI) {
+ DEBUG(dbgs() << "LV: No vector target information. Not vectorizing. \n");
+ return 1;
+ }
+
+ float Cost = expectedCost(1);
+ unsigned Width = 1;
+ DEBUG(dbgs() << "LV: Scalar loop costs: "<< (int)Cost << ".\n");
+ for (unsigned i=2; i <= VF; i*=2) {
+ // Notice that the vector loop needs to be executed less times, so
+ // we need to divide the cost of the vector loops by the width of
+ // the vector elements.
+ float VectorCost = expectedCost(i) / (float)i;
+ DEBUG(dbgs() << "LV: Vector loop of width "<< i << " costs: " <<
+ (int)VectorCost << ".\n");
+ if (VectorCost < Cost) {
+ Cost = VectorCost;
+ Width = i;
+ }
+ }
+
+ DEBUG(dbgs() << "LV: Selecting VF = : "<< Width << ".\n");
+ return Width;
+}
+
+unsigned LoopVectorizationCostModel::expectedCost(unsigned VF) {
+ // We can only estimate the cost of single basic block loops.
+ assert(1 == TheLoop->getNumBlocks() && "Too many blocks in loop");
+
+ BasicBlock *BB = TheLoop->getHeader();
+ unsigned Cost = 0;
+
+ // For each instruction in the old loop.
+ for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
+ Instruction *Inst = it;
+ unsigned C = getInstructionCost(Inst, VF);
+ Cost += C;
+ DEBUG(dbgs() << "LV: Found an estimated cost of "<< C <<" for VF "<< VF <<
+ " For instruction: "<< *Inst << "\n");
+ }
+
+ return Cost;
+}
+
+unsigned
+LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
+ assert(VTTI && "Invalid vector target transformation info");
+
+ // If we know that this instruction will remain uniform, check the cost of
+ // the scalar version.
+ if (Legal->isUniformAfterVectorization(I))
+ VF = 1;
+
+ Type *RetTy = I->getType();
+ Type *VectorTy = ToVectorTy(RetTy, VF);
+
+
+ // TODO: We need to estimate the cost of intrinsic calls.
+ switch (I->getOpcode()) {
+ case Instruction::GetElementPtr:
+ // We mark this instruction as zero-cost because scalar GEPs are usually
+ // lowered to the intruction addressing mode. At the moment we don't
+ // generate vector geps.
+ return 0;
+ case Instruction::Br: {
+ return VTTI->getCFInstrCost(I->getOpcode());
+ }
+ case Instruction::PHI:
+ return 0;
+ case Instruction::Add:
+ case Instruction::FAdd:
+ case Instruction::Sub:
+ case Instruction::FSub:
+ case Instruction::Mul:
+ case Instruction::FMul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::FDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::FRem:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: {
+ return VTTI->getArithmeticInstrCost(I->getOpcode(), VectorTy);
+ }
+ case Instruction::Select: {
+ SelectInst *SI = cast<SelectInst>(I);
+ const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
+ bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
+ Type *CondTy = SI->getCondition()->getType();
+ if (ScalarCond)
+ CondTy = VectorType::get(CondTy, VF);
+
+ return VTTI->getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy);
+ }
+ case Instruction::ICmp:
+ case Instruction::FCmp: {
+ Type *ValTy = I->getOperand(0)->getType();
+ VectorTy = ToVectorTy(ValTy, VF);
+ return VTTI->getCmpSelInstrCost(I->getOpcode(), VectorTy);
+ }
+ case Instruction::Store: {
+ StoreInst *SI = cast<StoreInst>(I);
+ Type *ValTy = SI->getValueOperand()->getType();
+ VectorTy = ToVectorTy(ValTy, VF);
+
+ if (VF == 1)
+ return VTTI->getMemoryOpCost(I->getOpcode(), ValTy,
+ SI->getAlignment(), SI->getPointerAddressSpace());
+
+ // Scalarized stores.
+ if (!Legal->isConsecutiveGep(SI->getPointerOperand())) {
+ unsigned Cost = 0;
+ unsigned ExtCost = VTTI->getInstrCost(Instruction::ExtractElement,
+ ValTy);
+ // The cost of extracting from the value vector.
+ Cost += VF * (ExtCost);
+ // The cost of the scalar stores.
+ Cost += VF * VTTI->getMemoryOpCost(I->getOpcode(),
+ ValTy->getScalarType(),
+ SI->getAlignment(),
+ SI->getPointerAddressSpace());
+ return Cost;
+ }
+
+ // Wide stores.
+ return VTTI->getMemoryOpCost(I->getOpcode(), VectorTy, SI->getAlignment(),
+ SI->getPointerAddressSpace());
+ }
+ case Instruction::Load: {
+ LoadInst *LI = cast<LoadInst>(I);
+
+ if (VF == 1)
+ return VTTI->getMemoryOpCost(I->getOpcode(), RetTy,
+ LI->getAlignment(),
+ LI->getPointerAddressSpace());
+
+ // Scalarized loads.
+ if (!Legal->isConsecutiveGep(LI->getPointerOperand())) {
+ unsigned Cost = 0;
+ unsigned InCost = VTTI->getInstrCost(Instruction::InsertElement, RetTy);
+ // The cost of inserting the loaded value into the result vector.
+ Cost += VF * (InCost);
+ // The cost of the scalar stores.
+ Cost += VF * VTTI->getMemoryOpCost(I->getOpcode(),
+ RetTy->getScalarType(),
+ LI->getAlignment(),
+ LI->getPointerAddressSpace());
+ return Cost;
+ }
+
+ // Wide loads.
+ return VTTI->getMemoryOpCost(I->getOpcode(), VectorTy, LI->getAlignment(),
+ LI->getPointerAddressSpace());
+ }
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::FPExt:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::SIToFP:
+ case Instruction::UIToFP:
+ case Instruction::Trunc:
+ case Instruction::FPTrunc:
+ case Instruction::BitCast: {
+ Type *SrcVecTy = ToVectorTy(I->getOperand(0)->getType(), VF);
+ return VTTI->getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy);
+ }
+ default: {
+ // We are scalarizing the instruction. Return the cost of the scalar
+ // instruction, plus the cost of insert and extract into vector
+ // elements, times the vector width.
+ unsigned Cost = 0;
+
+ bool IsVoid = RetTy->isVoidTy();
+
+ unsigned InsCost = (IsVoid ? 0 :
+ VTTI->getInstrCost(Instruction::InsertElement,
+ VectorTy));
+
+ unsigned ExtCost = VTTI->getInstrCost(Instruction::ExtractElement,
+ VectorTy);
+
+ // The cost of inserting the results plus extracting each one of the
+ // operands.
+ Cost += VF * (InsCost + ExtCost * I->getNumOperands());
+
+ // The cost of executing VF copies of the scalar instruction.
+ Cost += VF * VTTI->getInstrCost(I->getOpcode(), RetTy);
+ return Cost;
+ }
+ }// end of switch.
+}
+
+Type* LoopVectorizationCostModel::ToVectorTy(Type *Scalar, unsigned VF) {
+ if (Scalar->isVoidTy() || VF == 1)
+ return Scalar;
+ return VectorType::get(Scalar, VF);
+}
+
+} // namespace
+
+char LoopVectorize::ID = 0;
+static const char lv_name[] = "Loop Vectorization";
+INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
+INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
+
+namespace llvm {
+ Pass *createLoopVectorizePass() {
+ return new LoopVectorize();
+ }
+}
+
diff --git a/contrib/llvm/lib/Transforms/Vectorize/Vectorize.cpp b/contrib/llvm/lib/Transforms/Vectorize/Vectorize.cpp
index 1ef6002..d26973a 100644
--- a/contrib/llvm/lib/Transforms/Vectorize/Vectorize.cpp
+++ b/contrib/llvm/lib/Transforms/Vectorize/Vectorize.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements common infrastructure for libLLVMVectorizeOpts.a, which
+// This file implements common infrastructure for libLLVMVectorizeOpts.a, which
// implements several vectorization transformations over the LLVM intermediate
// representation, including the C bindings for that library.
//
@@ -23,10 +23,11 @@
using namespace llvm;
-/// initializeVectorizationPasses - Initialize all passes linked into the
+/// initializeVectorizationPasses - Initialize all passes linked into the
/// Vectorization library.
void llvm::initializeVectorization(PassRegistry &Registry) {
initializeBBVectorizePass(Registry);
+ initializeLoopVectorizePass(Registry);
}
void LLVMInitializeVectorization(LLVMPassRegistryRef R) {
@@ -37,3 +38,6 @@ void LLVMAddBBVectorizePass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createBBVectorizePass());
}
+void LLVMAddLoopVectorizePass(LLVMPassManagerRef PM) {
+ unwrap(PM)->add(createLoopVectorizePass());
+}
diff --git a/contrib/llvm/lib/VMCore/AsmWriter.cpp b/contrib/llvm/lib/VMCore/AsmWriter.cpp
index 7ef1131..b72c17f 100644
--- a/contrib/llvm/lib/VMCore/AsmWriter.cpp
+++ b/contrib/llvm/lib/VMCore/AsmWriter.cpp
@@ -66,6 +66,25 @@ static const Module *getModuleFromVal(const Value *V) {
return 0;
}
+static void PrintCallingConv(unsigned cc, raw_ostream &Out)
+{
+ switch (cc) {
+ case CallingConv::Fast: Out << "fastcc"; break;
+ case CallingConv::Cold: Out << "coldcc"; break;
+ case CallingConv::X86_StdCall: Out << "x86_stdcallcc"; break;
+ case CallingConv::X86_FastCall: Out << "x86_fastcallcc"; break;
+ case CallingConv::X86_ThisCall: Out << "x86_thiscallcc"; break;
+ case CallingConv::Intel_OCL_BI: Out << "intel_ocl_bicc"; break;
+ case CallingConv::ARM_APCS: Out << "arm_apcscc"; break;
+ case CallingConv::ARM_AAPCS: Out << "arm_aapcscc"; break;
+ case CallingConv::ARM_AAPCS_VFP:Out << "arm_aapcs_vfpcc"; break;
+ case CallingConv::MSP430_INTR: Out << "msp430_intrcc"; break;
+ case CallingConv::PTX_Kernel: Out << "ptx_kernel"; break;
+ case CallingConv::PTX_Device: Out << "ptx_device"; break;
+ default: Out << "cc" << cc; break;
+ }
+}
+
// PrintEscapedString - Print each character of the specified string, escaping
// it if it is not printable or if it is an escape char.
static void PrintEscapedString(StringRef Name, raw_ostream &Out) {
@@ -141,8 +160,8 @@ static void PrintLLVMName(raw_ostream &OS, const Value *V) {
/// TypePrinting - Type printing machinery.
namespace {
class TypePrinting {
- TypePrinting(const TypePrinting &); // DO NOT IMPLEMENT
- void operator=(const TypePrinting&); // DO NOT IMPLEMENT
+ TypePrinting(const TypePrinting &) LLVM_DELETED_FUNCTION;
+ void operator=(const TypePrinting&) LLVM_DELETED_FUNCTION;
public:
/// NamedTypes - The named types that are used by the current module.
@@ -380,8 +399,8 @@ private:
/// Add all of the functions arguments, basic blocks, and instructions.
void processFunction();
- SlotTracker(const SlotTracker &); // DO NOT IMPLEMENT
- void operator=(const SlotTracker &); // DO NOT IMPLEMENT
+ SlotTracker(const SlotTracker &) LLVM_DELETED_FUNCTION;
+ void operator=(const SlotTracker &) LLVM_DELETED_FUNCTION;
};
} // end anonymous namespace
@@ -1029,6 +1048,9 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
Out << "sideeffect ";
if (IA->isAlignStack())
Out << "alignstack ";
+ // We don't emit the AD_ATT dialect as it's the assumed default.
+ if (IA->getDialect() == InlineAsm::AD_Intel)
+ Out << "inteldialect ";
Out << '"';
PrintEscapedString(IA->getAsmString(), Out);
Out << "\", \"";
@@ -1222,8 +1244,8 @@ void AssemblyWriter::writeParamOperand(const Value *Operand,
// Print the type
TypePrinter.print(Operand->getType(), Out);
// Print parameter attributes list
- if (Attrs != Attribute::None)
- Out << ' ' << Attribute::getAsString(Attrs);
+ if (Attrs.hasAttributes())
+ Out << ' ' << Attrs.getAsString();
Out << ' ';
// Print the operand
WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule);
@@ -1285,8 +1307,9 @@ void AssemblyWriter::printModule(const Module *M) {
// Output all globals.
if (!M->global_empty()) Out << '\n';
for (Module::const_global_iterator I = M->global_begin(), E = M->global_end();
- I != E; ++I)
- printGlobal(I);
+ I != E; ++I) {
+ printGlobal(I); Out << '\n';
+ }
// Output all aliases.
if (!M->alias_empty()) Out << "\n";
@@ -1353,12 +1376,12 @@ static void PrintLinkage(GlobalValue::LinkageTypes LT,
case GlobalValue::LinkerPrivateWeakLinkage:
Out << "linker_private_weak ";
break;
- case GlobalValue::LinkerPrivateWeakDefAutoLinkage:
- Out << "linker_private_weak_def_auto ";
- break;
case GlobalValue::InternalLinkage: Out << "internal "; break;
case GlobalValue::LinkOnceAnyLinkage: Out << "linkonce "; break;
case GlobalValue::LinkOnceODRLinkage: Out << "linkonce_odr "; break;
+ case GlobalValue::LinkOnceODRAutoHideLinkage:
+ Out << "linkonce_odr_auto_hide ";
+ break;
case GlobalValue::WeakAnyLinkage: Out << "weak "; break;
case GlobalValue::WeakODRLinkage: Out << "weak_odr "; break;
case GlobalValue::CommonLinkage: Out << "common "; break;
@@ -1436,7 +1459,6 @@ void AssemblyWriter::printGlobal(const GlobalVariable *GV) {
Out << ", align " << GV->getAlignment();
printInfoComment(*GV);
- Out << '\n';
}
void AssemblyWriter::printAlias(const GlobalAlias *GA) {
@@ -1527,27 +1549,16 @@ void AssemblyWriter::printFunction(const Function *F) {
PrintVisibility(F->getVisibility(), Out);
// Print the calling convention.
- switch (F->getCallingConv()) {
- case CallingConv::C: break; // default
- case CallingConv::Fast: Out << "fastcc "; break;
- case CallingConv::Cold: Out << "coldcc "; break;
- case CallingConv::X86_StdCall: Out << "x86_stdcallcc "; break;
- case CallingConv::X86_FastCall: Out << "x86_fastcallcc "; break;
- case CallingConv::X86_ThisCall: Out << "x86_thiscallcc "; break;
- case CallingConv::ARM_APCS: Out << "arm_apcscc "; break;
- case CallingConv::ARM_AAPCS: Out << "arm_aapcscc "; break;
- case CallingConv::ARM_AAPCS_VFP:Out << "arm_aapcs_vfpcc "; break;
- case CallingConv::MSP430_INTR: Out << "msp430_intrcc "; break;
- case CallingConv::PTX_Kernel: Out << "ptx_kernel "; break;
- case CallingConv::PTX_Device: Out << "ptx_device "; break;
- default: Out << "cc" << F->getCallingConv() << " "; break;
+ if (F->getCallingConv() != CallingConv::C) {
+ PrintCallingConv(F->getCallingConv(), Out);
+ Out << " ";
}
FunctionType *FT = F->getFunctionType();
const AttrListPtr &Attrs = F->getAttributes();
Attributes RetAttrs = Attrs.getRetAttributes();
- if (RetAttrs != Attribute::None)
- Out << Attribute::getAsString(Attrs.getRetAttributes()) << ' ';
+ if (RetAttrs.hasAttributes())
+ Out << Attrs.getRetAttributes().getAsString() << ' ';
TypePrinter.print(F->getReturnType(), Out);
Out << ' ';
WriteAsOperandInternal(Out, F, &TypePrinter, &Machine, F->getParent());
@@ -1576,8 +1587,8 @@ void AssemblyWriter::printFunction(const Function *F) {
TypePrinter.print(FT->getParamType(i), Out);
Attributes ArgAttrs = Attrs.getParamAttributes(i+1);
- if (ArgAttrs != Attribute::None)
- Out << ' ' << Attribute::getAsString(ArgAttrs);
+ if (ArgAttrs.hasAttributes())
+ Out << ' ' << ArgAttrs.getAsString();
}
}
@@ -1590,8 +1601,8 @@ void AssemblyWriter::printFunction(const Function *F) {
if (F->hasUnnamedAddr())
Out << " unnamed_addr";
Attributes FnAttrs = Attrs.getFnAttributes();
- if (FnAttrs != Attribute::None)
- Out << ' ' << Attribute::getAsString(Attrs.getFnAttributes());
+ if (FnAttrs.hasAttributes())
+ Out << ' ' << Attrs.getFnAttributes().getAsString();
if (F->hasSection()) {
Out << " section \"";
PrintEscapedString(F->getSection(), Out);
@@ -1624,8 +1635,8 @@ void AssemblyWriter::printArgument(const Argument *Arg,
TypePrinter.print(Arg->getType(), Out);
// Output parameter attributes list
- if (Attrs != Attribute::None)
- Out << ' ' << Attribute::getAsString(Attrs);
+ if (Attrs.hasAttributes())
+ Out << ' ' << Attrs.getAsString();
// Output name, if available...
if (Arg->hasName()) {
@@ -1828,20 +1839,9 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << " void";
} else if (const CallInst *CI = dyn_cast<CallInst>(&I)) {
// Print the calling convention being used.
- switch (CI->getCallingConv()) {
- case CallingConv::C: break; // default
- case CallingConv::Fast: Out << " fastcc"; break;
- case CallingConv::Cold: Out << " coldcc"; break;
- case CallingConv::X86_StdCall: Out << " x86_stdcallcc"; break;
- case CallingConv::X86_FastCall: Out << " x86_fastcallcc"; break;
- case CallingConv::X86_ThisCall: Out << " x86_thiscallcc"; break;
- case CallingConv::ARM_APCS: Out << " arm_apcscc "; break;
- case CallingConv::ARM_AAPCS: Out << " arm_aapcscc "; break;
- case CallingConv::ARM_AAPCS_VFP:Out << " arm_aapcs_vfpcc "; break;
- case CallingConv::MSP430_INTR: Out << " msp430_intrcc "; break;
- case CallingConv::PTX_Kernel: Out << " ptx_kernel"; break;
- case CallingConv::PTX_Device: Out << " ptx_device"; break;
- default: Out << " cc" << CI->getCallingConv(); break;
+ if (CI->getCallingConv() != CallingConv::C) {
+ Out << " ";
+ PrintCallingConv(CI->getCallingConv(), Out);
}
Operand = CI->getCalledValue();
@@ -1850,8 +1850,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Type *RetTy = FTy->getReturnType();
const AttrListPtr &PAL = CI->getAttributes();
- if (PAL.getRetAttributes() != Attribute::None)
- Out << ' ' << Attribute::getAsString(PAL.getRetAttributes());
+ if (PAL.getRetAttributes().hasAttributes())
+ Out << ' ' << PAL.getRetAttributes().getAsString();
// If possible, print out the short form of the call instruction. We can
// only do this if the first argument is a pointer to a nonvararg function,
@@ -1874,8 +1874,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
writeParamOperand(CI->getArgOperand(op), PAL.getParamAttributes(op + 1));
}
Out << ')';
- if (PAL.getFnAttributes() != Attribute::None)
- Out << ' ' << Attribute::getAsString(PAL.getFnAttributes());
+ if (PAL.getFnAttributes().hasAttributes())
+ Out << ' ' << PAL.getFnAttributes().getAsString();
} else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I)) {
Operand = II->getCalledValue();
PointerType *PTy = cast<PointerType>(Operand->getType());
@@ -1884,24 +1884,13 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
const AttrListPtr &PAL = II->getAttributes();
// Print the calling convention being used.
- switch (II->getCallingConv()) {
- case CallingConv::C: break; // default
- case CallingConv::Fast: Out << " fastcc"; break;
- case CallingConv::Cold: Out << " coldcc"; break;
- case CallingConv::X86_StdCall: Out << " x86_stdcallcc"; break;
- case CallingConv::X86_FastCall: Out << " x86_fastcallcc"; break;
- case CallingConv::X86_ThisCall: Out << " x86_thiscallcc"; break;
- case CallingConv::ARM_APCS: Out << " arm_apcscc "; break;
- case CallingConv::ARM_AAPCS: Out << " arm_aapcscc "; break;
- case CallingConv::ARM_AAPCS_VFP:Out << " arm_aapcs_vfpcc "; break;
- case CallingConv::MSP430_INTR: Out << " msp430_intrcc "; break;
- case CallingConv::PTX_Kernel: Out << " ptx_kernel"; break;
- case CallingConv::PTX_Device: Out << " ptx_device"; break;
- default: Out << " cc" << II->getCallingConv(); break;
+ if (II->getCallingConv() != CallingConv::C) {
+ Out << " ";
+ PrintCallingConv(II->getCallingConv(), Out);
}
- if (PAL.getRetAttributes() != Attribute::None)
- Out << ' ' << Attribute::getAsString(PAL.getRetAttributes());
+ if (PAL.getRetAttributes().hasAttributes())
+ Out << ' ' << PAL.getRetAttributes().getAsString();
// If possible, print out the short form of the invoke instruction. We can
// only do this if the first argument is a pointer to a nonvararg function,
@@ -1925,8 +1914,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
}
Out << ')';
- if (PAL.getFnAttributes() != Attribute::None)
- Out << ' ' << Attribute::getAsString(PAL.getFnAttributes());
+ if (PAL.getFnAttributes().hasAttributes())
+ Out << ' ' << PAL.getFnAttributes().getAsString();
Out << "\n to ";
writeOperand(II->getNormalDest(), true);
diff --git a/contrib/llvm/lib/VMCore/Attributes.cpp b/contrib/llvm/lib/VMCore/Attributes.cpp
index c8219eb..f1268e6 100644
--- a/contrib/llvm/lib/VMCore/Attributes.cpp
+++ b/contrib/llvm/lib/VMCore/Attributes.cpp
@@ -7,11 +7,14 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the AttributesList class and Attribute utilities.
+// This file implements the Attributes, AttributeImpl, AttrBuilder,
+// AttributeListImpl, and AttrListPtr classes.
//
//===----------------------------------------------------------------------===//
#include "llvm/Attributes.h"
+#include "AttributesImpl.h"
+#include "LLVMContextImpl.h"
#include "llvm/Type.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/FoldingSet.h"
@@ -23,215 +26,382 @@
using namespace llvm;
//===----------------------------------------------------------------------===//
-// Attribute Function Definitions
+// Attributes Implementation
//===----------------------------------------------------------------------===//
-std::string Attribute::getAsString(Attributes Attrs) {
+Attributes Attributes::get(LLVMContext &Context, ArrayRef<AttrVal> Vals) {
+ AttrBuilder B;
+ for (ArrayRef<AttrVal>::iterator I = Vals.begin(), E = Vals.end();
+ I != E; ++I)
+ B.addAttribute(*I);
+ return Attributes::get(Context, B);
+}
+
+Attributes Attributes::get(LLVMContext &Context, AttrBuilder &B) {
+ // If there are no attributes, return an empty Attributes class.
+ if (!B.hasAttributes())
+ return Attributes();
+
+ // Otherwise, build a key to look up the existing attributes.
+ LLVMContextImpl *pImpl = Context.pImpl;
+ FoldingSetNodeID ID;
+ ID.AddInteger(B.Raw());
+
+ void *InsertPoint;
+ AttributesImpl *PA = pImpl->AttrsSet.FindNodeOrInsertPos(ID, InsertPoint);
+
+ if (!PA) {
+ // If we didn't find any existing attributes of the same shape then create a
+ // new one and insert it.
+ PA = new AttributesImpl(B.Raw());
+ pImpl->AttrsSet.InsertNode(PA, InsertPoint);
+ }
+
+ // Return the AttributesList that we found or created.
+ return Attributes(PA);
+}
+
+bool Attributes::hasAttribute(AttrVal Val) const {
+ return Attrs && Attrs->hasAttribute(Val);
+}
+
+bool Attributes::hasAttributes() const {
+ return Attrs && Attrs->hasAttributes();
+}
+
+bool Attributes::hasAttributes(const Attributes &A) const {
+ return Attrs && Attrs->hasAttributes(A);
+}
+
+/// This returns the alignment field of an attribute as a byte alignment value.
+unsigned Attributes::getAlignment() const {
+ if (!hasAttribute(Attributes::Alignment))
+ return 0;
+ return 1U << ((Attrs->getAlignment() >> 16) - 1);
+}
+
+/// This returns the stack alignment field of an attribute as a byte alignment
+/// value.
+unsigned Attributes::getStackAlignment() const {
+ if (!hasAttribute(Attributes::StackAlignment))
+ return 0;
+ return 1U << ((Attrs->getStackAlignment() >> 26) - 1);
+}
+
+uint64_t Attributes::Raw() const {
+ return Attrs ? Attrs->Raw() : 0;
+}
+
+Attributes Attributes::typeIncompatible(Type *Ty) {
+ AttrBuilder Incompatible;
+
+ if (!Ty->isIntegerTy())
+ // Attributes that only apply to integers.
+ Incompatible.addAttribute(Attributes::SExt)
+ .addAttribute(Attributes::ZExt);
+
+ if (!Ty->isPointerTy())
+ // Attributes that only apply to pointers.
+ Incompatible.addAttribute(Attributes::ByVal)
+ .addAttribute(Attributes::Nest)
+ .addAttribute(Attributes::NoAlias)
+ .addAttribute(Attributes::NoCapture)
+ .addAttribute(Attributes::StructRet);
+
+ return Attributes::get(Ty->getContext(), Incompatible);
+}
+
+/// encodeLLVMAttributesForBitcode - This returns an integer containing an
+/// encoding of all the LLVM attributes found in the given attribute bitset.
+/// Any change to this encoding is a breaking change to bitcode compatibility.
+uint64_t Attributes::encodeLLVMAttributesForBitcode(Attributes Attrs) {
+ // FIXME: It doesn't make sense to store the alignment information as an
+ // expanded out value, we should store it as a log2 value. However, we can't
+ // just change that here without breaking bitcode compatibility. If this ever
+ // becomes a problem in practice, we should introduce new tag numbers in the
+ // bitcode file and have those tags use a more efficiently encoded alignment
+ // field.
+
+ // Store the alignment in the bitcode as a 16-bit raw value instead of a 5-bit
+ // log2 encoded value. Shift the bits above the alignment up by 11 bits.
+ uint64_t EncodedAttrs = Attrs.Raw() & 0xffff;
+ if (Attrs.hasAttribute(Attributes::Alignment))
+ EncodedAttrs |= Attrs.getAlignment() << 16;
+ EncodedAttrs |= (Attrs.Raw() & (0xffffULL << 21)) << 11;
+ return EncodedAttrs;
+}
+
+/// decodeLLVMAttributesForBitcode - This returns an attribute bitset containing
+/// the LLVM attributes that have been decoded from the given integer. This
+/// function must stay in sync with 'encodeLLVMAttributesForBitcode'.
+Attributes Attributes::decodeLLVMAttributesForBitcode(LLVMContext &C,
+ uint64_t EncodedAttrs) {
+ // The alignment is stored as a 16-bit raw value from bits 31--16. We shift
+ // the bits above 31 down by 11 bits.
+ unsigned Alignment = (EncodedAttrs & (0xffffULL << 16)) >> 16;
+ assert((!Alignment || isPowerOf2_32(Alignment)) &&
+ "Alignment must be a power of two.");
+
+ AttrBuilder B(EncodedAttrs & 0xffff);
+ if (Alignment)
+ B.addAlignmentAttr(Alignment);
+ B.addRawValue((EncodedAttrs & (0xffffULL << 32)) >> 11);
+ return Attributes::get(C, B);
+}
+
+std::string Attributes::getAsString() const {
std::string Result;
- if (Attrs & Attribute::ZExt)
+ if (hasAttribute(Attributes::ZExt))
Result += "zeroext ";
- if (Attrs & Attribute::SExt)
+ if (hasAttribute(Attributes::SExt))
Result += "signext ";
- if (Attrs & Attribute::NoReturn)
+ if (hasAttribute(Attributes::NoReturn))
Result += "noreturn ";
- if (Attrs & Attribute::NoUnwind)
+ if (hasAttribute(Attributes::NoUnwind))
Result += "nounwind ";
- if (Attrs & Attribute::UWTable)
+ if (hasAttribute(Attributes::UWTable))
Result += "uwtable ";
- if (Attrs & Attribute::ReturnsTwice)
+ if (hasAttribute(Attributes::ReturnsTwice))
Result += "returns_twice ";
- if (Attrs & Attribute::InReg)
+ if (hasAttribute(Attributes::InReg))
Result += "inreg ";
- if (Attrs & Attribute::NoAlias)
+ if (hasAttribute(Attributes::NoAlias))
Result += "noalias ";
- if (Attrs & Attribute::NoCapture)
+ if (hasAttribute(Attributes::NoCapture))
Result += "nocapture ";
- if (Attrs & Attribute::StructRet)
+ if (hasAttribute(Attributes::StructRet))
Result += "sret ";
- if (Attrs & Attribute::ByVal)
+ if (hasAttribute(Attributes::ByVal))
Result += "byval ";
- if (Attrs & Attribute::Nest)
+ if (hasAttribute(Attributes::Nest))
Result += "nest ";
- if (Attrs & Attribute::ReadNone)
+ if (hasAttribute(Attributes::ReadNone))
Result += "readnone ";
- if (Attrs & Attribute::ReadOnly)
+ if (hasAttribute(Attributes::ReadOnly))
Result += "readonly ";
- if (Attrs & Attribute::OptimizeForSize)
+ if (hasAttribute(Attributes::OptimizeForSize))
Result += "optsize ";
- if (Attrs & Attribute::NoInline)
+ if (hasAttribute(Attributes::NoInline))
Result += "noinline ";
- if (Attrs & Attribute::InlineHint)
+ if (hasAttribute(Attributes::InlineHint))
Result += "inlinehint ";
- if (Attrs & Attribute::AlwaysInline)
+ if (hasAttribute(Attributes::AlwaysInline))
Result += "alwaysinline ";
- if (Attrs & Attribute::StackProtect)
+ if (hasAttribute(Attributes::StackProtect))
Result += "ssp ";
- if (Attrs & Attribute::StackProtectReq)
+ if (hasAttribute(Attributes::StackProtectReq))
Result += "sspreq ";
- if (Attrs & Attribute::NoRedZone)
+ if (hasAttribute(Attributes::NoRedZone))
Result += "noredzone ";
- if (Attrs & Attribute::NoImplicitFloat)
+ if (hasAttribute(Attributes::NoImplicitFloat))
Result += "noimplicitfloat ";
- if (Attrs & Attribute::Naked)
+ if (hasAttribute(Attributes::Naked))
Result += "naked ";
- if (Attrs & Attribute::NonLazyBind)
+ if (hasAttribute(Attributes::NonLazyBind))
Result += "nonlazybind ";
- if (Attrs & Attribute::AddressSafety)
+ if (hasAttribute(Attributes::AddressSafety))
Result += "address_safety ";
- if (Attrs & Attribute::StackAlignment) {
+ if (hasAttribute(Attributes::MinSize))
+ Result += "minsize ";
+ if (hasAttribute(Attributes::StackAlignment)) {
Result += "alignstack(";
- Result += utostr(Attribute::getStackAlignmentFromAttrs(Attrs));
+ Result += utostr(getStackAlignment());
Result += ") ";
}
- if (Attrs & Attribute::Alignment) {
+ if (hasAttribute(Attributes::Alignment)) {
Result += "align ";
- Result += utostr(Attribute::getAlignmentFromAttrs(Attrs));
+ Result += utostr(getAlignment());
Result += " ";
}
- if (Attrs & Attribute::IANSDialect)
- Result += "ia_nsdialect ";
-
// Trim the trailing space.
assert(!Result.empty() && "Unknown attribute!");
Result.erase(Result.end()-1);
return Result;
}
-Attributes Attribute::typeIncompatible(Type *Ty) {
- Attributes Incompatible = None;
-
- if (!Ty->isIntegerTy())
- // Attributes that only apply to integers.
- Incompatible |= SExt | ZExt;
-
- if (!Ty->isPointerTy())
- // Attributes that only apply to pointers.
- Incompatible |= ByVal | Nest | NoAlias | StructRet | NoCapture;
-
- return Incompatible;
+//===----------------------------------------------------------------------===//
+// AttrBuilder Implementation
+//===----------------------------------------------------------------------===//
+
+AttrBuilder &AttrBuilder::addAttribute(Attributes::AttrVal Val){
+ Bits |= AttributesImpl::getAttrMask(Val);
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::addRawValue(uint64_t Val) {
+ Bits |= Val;
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::addAlignmentAttr(unsigned Align) {
+ if (Align == 0) return *this;
+ assert(isPowerOf2_32(Align) && "Alignment must be a power of two.");
+ assert(Align <= 0x40000000 && "Alignment too large.");
+ Bits |= (Log2_32(Align) + 1) << 16;
+ return *this;
+}
+AttrBuilder &AttrBuilder::addStackAlignmentAttr(unsigned Align){
+ // Default alignment, allow the target to define how to align it.
+ if (Align == 0) return *this;
+ assert(isPowerOf2_32(Align) && "Alignment must be a power of two.");
+ assert(Align <= 0x100 && "Alignment too large.");
+ Bits |= (Log2_32(Align) + 1) << 26;
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::removeAttribute(Attributes::AttrVal Val) {
+ Bits &= ~AttributesImpl::getAttrMask(Val);
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::addAttributes(const Attributes &A) {
+ Bits |= A.Raw();
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::removeAttributes(const Attributes &A){
+ Bits &= ~A.Raw();
+ return *this;
+}
+
+bool AttrBuilder::hasAttribute(Attributes::AttrVal A) const {
+ return Bits & AttributesImpl::getAttrMask(A);
+}
+
+bool AttrBuilder::hasAttributes() const {
+ return Bits != 0;
+}
+bool AttrBuilder::hasAttributes(const Attributes &A) const {
+ return Bits & A.Raw();
+}
+bool AttrBuilder::hasAlignmentAttr() const {
+ return Bits & AttributesImpl::getAttrMask(Attributes::Alignment);
+}
+
+uint64_t AttrBuilder::getAlignment() const {
+ if (!hasAlignmentAttr())
+ return 0;
+ return 1U <<
+ (((Bits & AttributesImpl::getAttrMask(Attributes::Alignment)) >> 16) - 1);
+}
+
+uint64_t AttrBuilder::getStackAlignment() const {
+ if (!hasAlignmentAttr())
+ return 0;
+ return 1U <<
+ (((Bits & AttributesImpl::getAttrMask(Attributes::StackAlignment))>>26)-1);
}
//===----------------------------------------------------------------------===//
-// AttributeListImpl Definition
+// AttributeImpl Definition
//===----------------------------------------------------------------------===//
-namespace llvm {
- class AttributeListImpl;
+uint64_t AttributesImpl::getAttrMask(uint64_t Val) {
+ switch (Val) {
+ case Attributes::None: return 0;
+ case Attributes::ZExt: return 1 << 0;
+ case Attributes::SExt: return 1 << 1;
+ case Attributes::NoReturn: return 1 << 2;
+ case Attributes::InReg: return 1 << 3;
+ case Attributes::StructRet: return 1 << 4;
+ case Attributes::NoUnwind: return 1 << 5;
+ case Attributes::NoAlias: return 1 << 6;
+ case Attributes::ByVal: return 1 << 7;
+ case Attributes::Nest: return 1 << 8;
+ case Attributes::ReadNone: return 1 << 9;
+ case Attributes::ReadOnly: return 1 << 10;
+ case Attributes::NoInline: return 1 << 11;
+ case Attributes::AlwaysInline: return 1 << 12;
+ case Attributes::OptimizeForSize: return 1 << 13;
+ case Attributes::StackProtect: return 1 << 14;
+ case Attributes::StackProtectReq: return 1 << 15;
+ case Attributes::Alignment: return 31 << 16;
+ case Attributes::NoCapture: return 1 << 21;
+ case Attributes::NoRedZone: return 1 << 22;
+ case Attributes::NoImplicitFloat: return 1 << 23;
+ case Attributes::Naked: return 1 << 24;
+ case Attributes::InlineHint: return 1 << 25;
+ case Attributes::StackAlignment: return 7 << 26;
+ case Attributes::ReturnsTwice: return 1 << 29;
+ case Attributes::UWTable: return 1 << 30;
+ case Attributes::NonLazyBind: return 1U << 31;
+ case Attributes::AddressSafety: return 1ULL << 32;
+ case Attributes::MinSize: return 1ULL << 33;
+ }
+ llvm_unreachable("Unsupported attribute type");
}
-static ManagedStatic<FoldingSet<AttributeListImpl> > AttributesLists;
+bool AttributesImpl::hasAttribute(uint64_t A) const {
+ return (Bits & getAttrMask(A)) != 0;
+}
-namespace llvm {
-static ManagedStatic<sys::SmartMutex<true> > ALMutex;
+bool AttributesImpl::hasAttributes() const {
+ return Bits != 0;
+}
-class AttributeListImpl : public FoldingSetNode {
- sys::cas_flag RefCount;
-
- // AttributesList is uniqued, these should not be publicly available.
- void operator=(const AttributeListImpl &); // Do not implement
- AttributeListImpl(const AttributeListImpl &); // Do not implement
- ~AttributeListImpl(); // Private implementation
-public:
- SmallVector<AttributeWithIndex, 4> Attrs;
-
- AttributeListImpl(ArrayRef<AttributeWithIndex> attrs)
- : Attrs(attrs.begin(), attrs.end()) {
- RefCount = 0;
- }
-
- void AddRef() {
- sys::SmartScopedLock<true> Lock(*ALMutex);
- ++RefCount;
- }
- void DropRef() {
- sys::SmartScopedLock<true> Lock(*ALMutex);
- if (!AttributesLists.isConstructed())
- return;
- sys::cas_flag new_val = --RefCount;
- if (new_val == 0)
- delete this;
- }
-
- void Profile(FoldingSetNodeID &ID) const {
- Profile(ID, Attrs);
- }
- static void Profile(FoldingSetNodeID &ID, ArrayRef<AttributeWithIndex> Attrs){
- for (unsigned i = 0, e = Attrs.size(); i != e; ++i) {
- ID.AddInteger(Attrs[i].Attrs.Raw());
- ID.AddInteger(Attrs[i].Index);
- }
- }
-};
+bool AttributesImpl::hasAttributes(const Attributes &A) const {
+ return Bits & A.Raw(); // FIXME: Raw() won't work here in the future.
}
-AttributeListImpl::~AttributeListImpl() {
- // NOTE: Lock must be acquired by caller.
- AttributesLists->RemoveNode(this);
+uint64_t AttributesImpl::getAlignment() const {
+ return Bits & getAttrMask(Attributes::Alignment);
}
+uint64_t AttributesImpl::getStackAlignment() const {
+ return Bits & getAttrMask(Attributes::StackAlignment);
+}
-AttrListPtr AttrListPtr::get(ArrayRef<AttributeWithIndex> Attrs) {
+//===----------------------------------------------------------------------===//
+// AttributeListImpl Definition
+//===----------------------------------------------------------------------===//
+
+AttrListPtr AttrListPtr::get(LLVMContext &C,
+ ArrayRef<AttributeWithIndex> Attrs) {
// If there are no attributes then return a null AttributesList pointer.
if (Attrs.empty())
return AttrListPtr();
-
+
#ifndef NDEBUG
for (unsigned i = 0, e = Attrs.size(); i != e; ++i) {
- assert(Attrs[i].Attrs != Attribute::None &&
+ assert(Attrs[i].Attrs.hasAttributes() &&
"Pointless attribute!");
assert((!i || Attrs[i-1].Index < Attrs[i].Index) &&
"Misordered AttributesList!");
}
#endif
-
+
// Otherwise, build a key to look up the existing attributes.
+ LLVMContextImpl *pImpl = C.pImpl;
FoldingSetNodeID ID;
AttributeListImpl::Profile(ID, Attrs);
- void *InsertPos;
-
- sys::SmartScopedLock<true> Lock(*ALMutex);
-
- AttributeListImpl *PAL =
- AttributesLists->FindNodeOrInsertPos(ID, InsertPos);
-
+
+ void *InsertPoint;
+ AttributeListImpl *PA = pImpl->AttrsLists.FindNodeOrInsertPos(ID,
+ InsertPoint);
+
// If we didn't find any existing attributes of the same shape then
// create a new one and insert it.
- if (!PAL) {
- PAL = new AttributeListImpl(Attrs);
- AttributesLists->InsertNode(PAL, InsertPos);
+ if (!PA) {
+ PA = new AttributeListImpl(Attrs);
+ pImpl->AttrsLists.InsertNode(PA, InsertPoint);
}
-
+
// Return the AttributesList that we found or created.
- return AttrListPtr(PAL);
+ return AttrListPtr(PA);
}
-
//===----------------------------------------------------------------------===//
// AttrListPtr Method Implementations
//===----------------------------------------------------------------------===//
-AttrListPtr::AttrListPtr(AttributeListImpl *LI) : AttrList(LI) {
- if (LI) LI->AddRef();
-}
-
-AttrListPtr::AttrListPtr(const AttrListPtr &P) : AttrList(P.AttrList) {
- if (AttrList) AttrList->AddRef();
-}
-
const AttrListPtr &AttrListPtr::operator=(const AttrListPtr &RHS) {
- sys::SmartScopedLock<true> Lock(*ALMutex);
if (AttrList == RHS.AttrList) return *this;
- if (AttrList) AttrList->DropRef();
+
AttrList = RHS.AttrList;
- if (AttrList) AttrList->AddRef();
return *this;
}
-AttrListPtr::~AttrListPtr() {
- if (AttrList) AttrList->DropRef();
-}
-
-/// getNumSlots - Return the number of slots used in this attribute list.
+/// getNumSlots - Return the number of slots used in this attribute list.
/// This is the number of arguments that have an attribute set on them
/// (including the function itself).
unsigned AttrListPtr::getNumSlots() const {
@@ -245,48 +415,60 @@ const AttributeWithIndex &AttrListPtr::getSlot(unsigned Slot) const {
return AttrList->Attrs[Slot];
}
-
-/// getAttributes - The attributes for the specified index are
-/// returned. Attributes for the result are denoted with Idx = 0.
-/// Function notes are denoted with idx = ~0.
+/// getAttributes - The attributes for the specified index are returned.
+/// Attributes for the result are denoted with Idx = 0. Function notes are
+/// denoted with idx = ~0.
Attributes AttrListPtr::getAttributes(unsigned Idx) const {
- if (AttrList == 0) return Attribute::None;
-
+ if (AttrList == 0) return Attributes();
+
const SmallVector<AttributeWithIndex, 4> &Attrs = AttrList->Attrs;
for (unsigned i = 0, e = Attrs.size(); i != e && Attrs[i].Index <= Idx; ++i)
if (Attrs[i].Index == Idx)
return Attrs[i].Attrs;
- return Attribute::None;
+
+ return Attributes();
}
/// hasAttrSomewhere - Return true if the specified attribute is set for at
/// least one parameter or for the return value.
-bool AttrListPtr::hasAttrSomewhere(Attributes Attr) const {
+bool AttrListPtr::hasAttrSomewhere(Attributes::AttrVal Attr) const {
if (AttrList == 0) return false;
-
+
const SmallVector<AttributeWithIndex, 4> &Attrs = AttrList->Attrs;
for (unsigned i = 0, e = Attrs.size(); i != e; ++i)
- if (Attrs[i].Attrs & Attr)
+ if (Attrs[i].Attrs.hasAttribute(Attr))
return true;
+
return false;
}
+unsigned AttrListPtr::getNumAttrs() const {
+ return AttrList ? AttrList->Attrs.size() : 0;
+}
+
+Attributes &AttrListPtr::getAttributesAtIndex(unsigned i) const {
+ assert(AttrList && "Trying to get an attribute from an empty list!");
+ assert(i < AttrList->Attrs.size() && "Index out of range!");
+ return AttrList->Attrs[i].Attrs;
+}
-AttrListPtr AttrListPtr::addAttr(unsigned Idx, Attributes Attrs) const {
+AttrListPtr AttrListPtr::addAttr(LLVMContext &C, unsigned Idx,
+ Attributes Attrs) const {
Attributes OldAttrs = getAttributes(Idx);
#ifndef NDEBUG
// FIXME it is not obvious how this should work for alignment.
// For now, say we can't change a known alignment.
- Attributes OldAlign = OldAttrs & Attribute::Alignment;
- Attributes NewAlign = Attrs & Attribute::Alignment;
+ unsigned OldAlign = OldAttrs.getAlignment();
+ unsigned NewAlign = Attrs.getAlignment();
assert((!OldAlign || !NewAlign || OldAlign == NewAlign) &&
"Attempt to change alignment!");
#endif
-
- Attributes NewAttrs = OldAttrs | Attrs;
- if (NewAttrs == OldAttrs)
+
+ AttrBuilder NewAttrs =
+ AttrBuilder(OldAttrs).addAttributes(Attrs);
+ if (NewAttrs == AttrBuilder(OldAttrs))
return *this;
-
+
SmallVector<AttributeWithIndex, 8> NewAttrList;
if (AttrList == 0)
NewAttrList.push_back(AttributeWithIndex::get(Idx, Attrs));
@@ -299,61 +481,67 @@ AttrListPtr AttrListPtr::addAttr(unsigned Idx, Attributes Attrs) const {
// If there are attributes already at this index, merge them in.
if (i != e && OldAttrList[i].Index == Idx) {
- Attrs |= OldAttrList[i].Attrs;
+ Attrs =
+ Attributes::get(C, AttrBuilder(Attrs).
+ addAttributes(OldAttrList[i].Attrs));
++i;
}
-
+
NewAttrList.push_back(AttributeWithIndex::get(Idx, Attrs));
-
+
// Copy attributes for arguments after this one.
- NewAttrList.insert(NewAttrList.end(),
+ NewAttrList.insert(NewAttrList.end(),
OldAttrList.begin()+i, OldAttrList.end());
}
-
- return get(NewAttrList);
+
+ return get(C, NewAttrList);
}
-AttrListPtr AttrListPtr::removeAttr(unsigned Idx, Attributes Attrs) const {
+AttrListPtr AttrListPtr::removeAttr(LLVMContext &C, unsigned Idx,
+ Attributes Attrs) const {
#ifndef NDEBUG
// FIXME it is not obvious how this should work for alignment.
// For now, say we can't pass in alignment, which no current use does.
- assert(!(Attrs & Attribute::Alignment) && "Attempt to exclude alignment!");
+ assert(!Attrs.hasAttribute(Attributes::Alignment) &&
+ "Attempt to exclude alignment!");
#endif
if (AttrList == 0) return AttrListPtr();
-
+
Attributes OldAttrs = getAttributes(Idx);
- Attributes NewAttrs = OldAttrs & ~Attrs;
- if (NewAttrs == OldAttrs)
+ AttrBuilder NewAttrs =
+ AttrBuilder(OldAttrs).removeAttributes(Attrs);
+ if (NewAttrs == AttrBuilder(OldAttrs))
return *this;
SmallVector<AttributeWithIndex, 8> NewAttrList;
const SmallVector<AttributeWithIndex, 4> &OldAttrList = AttrList->Attrs;
unsigned i = 0, e = OldAttrList.size();
-
+
// Copy attributes for arguments before this one.
for (; i != e && OldAttrList[i].Index < Idx; ++i)
NewAttrList.push_back(OldAttrList[i]);
-
+
// If there are attributes already at this index, merge them in.
assert(OldAttrList[i].Index == Idx && "Attribute isn't set?");
- Attrs = OldAttrList[i].Attrs & ~Attrs;
+ Attrs = Attributes::get(C, AttrBuilder(OldAttrList[i].Attrs).
+ removeAttributes(Attrs));
++i;
- if (Attrs) // If any attributes left for this parameter, add them.
+ if (Attrs.hasAttributes()) // If any attributes left for this param, add them.
NewAttrList.push_back(AttributeWithIndex::get(Idx, Attrs));
-
+
// Copy attributes for arguments after this one.
- NewAttrList.insert(NewAttrList.end(),
+ NewAttrList.insert(NewAttrList.end(),
OldAttrList.begin()+i, OldAttrList.end());
-
- return get(NewAttrList);
+
+ return get(C, NewAttrList);
}
void AttrListPtr::dump() const {
dbgs() << "PAL[ ";
for (unsigned i = 0; i < getNumSlots(); ++i) {
const AttributeWithIndex &PAWI = getSlot(i);
- dbgs() << "{" << PAWI.Index << "," << PAWI.Attrs << "} ";
+ dbgs() << "{" << PAWI.Index << "," << PAWI.Attrs.getAsString() << "} ";
}
-
+
dbgs() << "]\n";
}
diff --git a/contrib/llvm/lib/VMCore/AttributesImpl.h b/contrib/llvm/lib/VMCore/AttributesImpl.h
new file mode 100644
index 0000000..5c107e1
--- /dev/null
+++ b/contrib/llvm/lib/VMCore/AttributesImpl.h
@@ -0,0 +1,71 @@
+//===-- AttributesImpl.h - Attributes Internals -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various helper methods and classes used by LLVMContextImpl
+// for creating and managing attributes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ATTRIBUTESIMPL_H
+#define LLVM_ATTRIBUTESIMPL_H
+
+#include "llvm/Attributes.h"
+#include "llvm/ADT/FoldingSet.h"
+
+namespace llvm {
+
+class AttributesImpl : public FoldingSetNode {
+ uint64_t Bits; // FIXME: We will be expanding this.
+public:
+ AttributesImpl(uint64_t bits) : Bits(bits) {}
+
+ bool hasAttribute(uint64_t A) const;
+
+ bool hasAttributes() const;
+ bool hasAttributes(const Attributes &A) const;
+
+ uint64_t getAlignment() const;
+ uint64_t getStackAlignment() const;
+
+ uint64_t Raw() const { return Bits; } // FIXME: Remove.
+
+ static uint64_t getAttrMask(uint64_t Val);
+
+ void Profile(FoldingSetNodeID &ID) const {
+ Profile(ID, Bits);
+ }
+ static void Profile(FoldingSetNodeID &ID, uint64_t Bits) {
+ ID.AddInteger(Bits);
+ }
+};
+
+class AttributeListImpl : public FoldingSetNode {
+ // AttributesList is uniqued, these should not be publicly available.
+ void operator=(const AttributeListImpl &) LLVM_DELETED_FUNCTION;
+ AttributeListImpl(const AttributeListImpl &) LLVM_DELETED_FUNCTION;
+public:
+ SmallVector<AttributeWithIndex, 4> Attrs;
+
+ AttributeListImpl(ArrayRef<AttributeWithIndex> attrs)
+ : Attrs(attrs.begin(), attrs.end()) {}
+
+ void Profile(FoldingSetNodeID &ID) const {
+ Profile(ID, Attrs);
+ }
+ static void Profile(FoldingSetNodeID &ID, ArrayRef<AttributeWithIndex> Attrs){
+ for (unsigned i = 0, e = Attrs.size(); i != e; ++i) {
+ ID.AddInteger(Attrs[i].Attrs.Raw());
+ ID.AddInteger(Attrs[i].Index);
+ }
+ }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/VMCore/AutoUpgrade.cpp b/contrib/llvm/lib/VMCore/AutoUpgrade.cpp
index 094ca75..5fff460 100644
--- a/contrib/llvm/lib/VMCore/AutoUpgrade.cpp
+++ b/contrib/llvm/lib/VMCore/AutoUpgrade.cpp
@@ -148,7 +148,8 @@ bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
if (NewFn)
F = NewFn;
if (unsigned id = F->getIntrinsicID())
- F->setAttributes(Intrinsic::getAttributes((Intrinsic::ID)id));
+ F->setAttributes(Intrinsic::getAttributes(F->getContext(),
+ (Intrinsic::ID)id));
return Upgraded;
}
diff --git a/contrib/llvm/lib/VMCore/ConstantFold.cpp b/contrib/llvm/lib/VMCore/ConstantFold.cpp
index 8e82876..fe3edac 100644
--- a/contrib/llvm/lib/VMCore/ConstantFold.cpp
+++ b/contrib/llvm/lib/VMCore/ConstantFold.cpp
@@ -12,7 +12,7 @@
// ConstantExpr::get* methods to automatically fold constants when possible.
//
// The current constant folding implementation is implemented in two pieces: the
-// pieces that don't need TargetData, and the pieces that do. This is to avoid
+// pieces that don't need DataLayout, and the pieces that do. This is to avoid
// a dependence in VMCore on Target.
//
//===----------------------------------------------------------------------===//
@@ -87,9 +87,13 @@ foldConstantCastPair(
Instruction::CastOps firstOp = Instruction::CastOps(Op->getOpcode());
Instruction::CastOps secondOp = Instruction::CastOps(opc);
+ // Assume that pointers are never more than 64 bits wide.
+ IntegerType *FakeIntPtrTy = Type::getInt64Ty(DstTy->getContext());
+
// Let CastInst::isEliminableCastPair do the heavy lifting.
return CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, DstTy,
- Type::getInt64Ty(DstTy->getContext()));
+ FakeIntPtrTy, FakeIntPtrTy,
+ FakeIntPtrTy);
}
static Constant *FoldBitCast(Constant *V, Type *DestTy) {
@@ -514,10 +518,6 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
return UndefValue::get(DestTy);
}
- // No compile-time operations on this type yet.
- if (V->getType()->isPPC_FP128Ty() || DestTy->isPPC_FP128Ty())
- return 0;
-
if (V->isNullValue() && !DestTy->isX86_MMXTy())
return Constant::getNullValue(DestTy);
@@ -576,6 +576,7 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
DestTy->isDoubleTy() ? APFloat::IEEEdouble :
DestTy->isX86_FP80Ty() ? APFloat::x87DoubleExtended :
DestTy->isFP128Ty() ? APFloat::IEEEquad :
+ DestTy->isPPC_FP128Ty() ? APFloat::PPCDoubleDouble :
APFloat::Bogus,
APFloat::rmNearestTiesToEven, &ignored);
return ConstantFP::get(V->getContext(), Val);
@@ -646,7 +647,8 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
case Instruction::SIToFP:
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
APInt api = CI->getValue();
- APFloat apf(APInt::getNullValue(DestTy->getPrimitiveSizeInBits()), true);
+ APFloat apf(APInt::getNullValue(DestTy->getPrimitiveSizeInBits()),
+ !DestTy->isPPC_FP128Ty() /* isEEEE */);
(void)apf.convertFromAPInt(api,
opc==Instruction::SIToFP,
APFloat::rmNearestTiesToEven);
@@ -867,10 +869,6 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
Constant *C1, Constant *C2) {
- // No compile-time operations on this type yet.
- if (C1->getType()->isPPC_FP128Ty())
- return 0;
-
// Handle UndefValue up front.
if (isa<UndefValue>(C1) || isa<UndefValue>(C2)) {
switch (Opcode) {
@@ -1273,10 +1271,6 @@ static FCmpInst::Predicate evaluateFCmpRelation(Constant *V1, Constant *V2) {
assert(V1->getType() == V2->getType() &&
"Cannot compare values of different types!");
- // No compile-time operations on this type yet.
- if (V1->getType()->isPPC_FP128Ty())
- return FCmpInst::BAD_FCMP_PREDICATE;
-
// Handle degenerate case quickly
if (V1 == V2) return FCmpInst::FCMP_OEQ;
@@ -1602,10 +1596,6 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
return ConstantInt::get(ResultTy, CmpInst::isTrueWhenEqual(pred));
}
- // No compile-time operations on this type yet.
- if (C1->getType()->isPPC_FP128Ty())
- return 0;
-
// icmp eq/ne(null,GV) -> false/true
if (C1->isNullValue()) {
if (const GlobalValue *GV = dyn_cast<GlobalValue>(C2))
diff --git a/contrib/llvm/lib/VMCore/Constants.cpp b/contrib/llvm/lib/VMCore/Constants.cpp
index a4e21e1..edd6a73 100644
--- a/contrib/llvm/lib/VMCore/Constants.cpp
+++ b/contrib/llvm/lib/VMCore/Constants.cpp
@@ -245,6 +245,33 @@ bool Constant::canTrap() const {
}
}
+/// isThreadDependent - Return true if the value can vary between threads.
+bool Constant::isThreadDependent() const {
+ SmallPtrSet<const Constant*, 64> Visited;
+ SmallVector<const Constant*, 64> WorkList;
+ WorkList.push_back(this);
+ Visited.insert(this);
+
+ while (!WorkList.empty()) {
+ const Constant *C = WorkList.pop_back_val();
+
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) {
+ if (GV->isThreadLocal())
+ return true;
+ }
+
+ for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) {
+ const Constant *D = dyn_cast<Constant>(C->getOperand(I));
+ if (!D)
+ continue;
+ if (Visited.insert(D))
+ WorkList.push_back(D);
+ }
+ }
+
+ return false;
+}
+
/// isConstantUsed - Return true if the constant has users other than constant
/// exprs and other dangling things.
bool Constant::isConstantUsed() const {
diff --git a/contrib/llvm/lib/VMCore/ConstantsContext.h b/contrib/llvm/lib/VMCore/ConstantsContext.h
index 8903a8f..996eb12 100644
--- a/contrib/llvm/lib/VMCore/ConstantsContext.h
+++ b/contrib/llvm/lib/VMCore/ConstantsContext.h
@@ -33,7 +33,7 @@ struct ConstantTraits;
/// behind the scenes to implement unary constant exprs.
class UnaryConstantExpr : public ConstantExpr {
virtual void anchor();
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly one operand
void *operator new(size_t s) {
@@ -50,7 +50,7 @@ public:
/// behind the scenes to implement binary constant exprs.
class BinaryConstantExpr : public ConstantExpr {
virtual void anchor();
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly two operands
void *operator new(size_t s) {
@@ -71,7 +71,7 @@ public:
/// behind the scenes to implement select constant exprs.
class SelectConstantExpr : public ConstantExpr {
virtual void anchor();
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly three operands
void *operator new(size_t s) {
@@ -92,7 +92,7 @@ public:
/// extractelement constant exprs.
class ExtractElementConstantExpr : public ConstantExpr {
virtual void anchor();
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly two operands
void *operator new(size_t s) {
@@ -113,7 +113,7 @@ public:
/// insertelement constant exprs.
class InsertElementConstantExpr : public ConstantExpr {
virtual void anchor();
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly three operands
void *operator new(size_t s) {
@@ -135,7 +135,7 @@ public:
/// shufflevector constant exprs.
class ShuffleVectorConstantExpr : public ConstantExpr {
virtual void anchor();
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly three operands
void *operator new(size_t s) {
@@ -160,7 +160,7 @@ public:
/// extractvalue constant exprs.
class ExtractValueConstantExpr : public ConstantExpr {
virtual void anchor();
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly one operand
void *operator new(size_t s) {
@@ -186,7 +186,7 @@ public:
/// insertvalue constant exprs.
class InsertValueConstantExpr : public ConstantExpr {
virtual void anchor();
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly one operand
void *operator new(size_t s) {
@@ -234,7 +234,7 @@ public:
// needed in order to store the predicate value for these instructions.
class CompareConstantExpr : public ConstantExpr {
virtual void anchor();
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly two operands
void *operator new(size_t s) {
@@ -352,18 +352,21 @@ struct ExprMapKeyType {
struct InlineAsmKeyType {
InlineAsmKeyType(StringRef AsmString,
StringRef Constraints, bool hasSideEffects,
- bool isAlignStack)
+ bool isAlignStack, InlineAsm::AsmDialect asmDialect)
: asm_string(AsmString), constraints(Constraints),
- has_side_effects(hasSideEffects), is_align_stack(isAlignStack) {}
+ has_side_effects(hasSideEffects), is_align_stack(isAlignStack),
+ asm_dialect(asmDialect) {}
std::string asm_string;
std::string constraints;
bool has_side_effects;
bool is_align_stack;
+ InlineAsm::AsmDialect asm_dialect;
bool operator==(const InlineAsmKeyType& that) const {
return this->asm_string == that.asm_string &&
this->constraints == that.constraints &&
this->has_side_effects == that.has_side_effects &&
- this->is_align_stack == that.is_align_stack;
+ this->is_align_stack == that.is_align_stack &&
+ this->asm_dialect == that.asm_dialect;
}
bool operator<(const InlineAsmKeyType& that) const {
if (this->asm_string != that.asm_string)
@@ -374,6 +377,8 @@ struct InlineAsmKeyType {
return this->has_side_effects < that.has_side_effects;
if (this->is_align_stack != that.is_align_stack)
return this->is_align_stack < that.is_align_stack;
+ if (this->asm_dialect != that.asm_dialect)
+ return this->asm_dialect < that.asm_dialect;
return false;
}
@@ -490,7 +495,8 @@ template<>
struct ConstantCreator<InlineAsm, PointerType, InlineAsmKeyType> {
static InlineAsm *create(PointerType *Ty, const InlineAsmKeyType &Key) {
return new InlineAsm(Ty, Key.asm_string, Key.constraints,
- Key.has_side_effects, Key.is_align_stack);
+ Key.has_side_effects, Key.is_align_stack,
+ Key.asm_dialect);
}
};
@@ -499,7 +505,8 @@ struct ConstantKeyData<InlineAsm> {
typedef InlineAsmKeyType ValType;
static ValType getValType(InlineAsm *Asm) {
return InlineAsmKeyType(Asm->getAsmString(), Asm->getConstraintString(),
- Asm->hasSideEffects(), Asm->isAlignStack());
+ Asm->hasSideEffects(), Asm->isAlignStack(),
+ Asm->getDialect());
}
};
diff --git a/contrib/llvm/lib/VMCore/Core.cpp b/contrib/llvm/lib/VMCore/Core.cpp
index 972db3c..847bc13 100644
--- a/contrib/llvm/lib/VMCore/Core.cpp
+++ b/contrib/llvm/lib/VMCore/Core.cpp
@@ -568,6 +568,19 @@ const char *LLVMGetMDString(LLVMValueRef V, unsigned* Len) {
return 0;
}
+unsigned LLVMGetMDNodeNumOperands(LLVMValueRef V)
+{
+ return cast<MDNode>(unwrap(V))->getNumOperands();
+}
+
+void LLVMGetMDNodeOperands(LLVMValueRef V, LLVMValueRef *Dest)
+{
+ const MDNode *N = cast<MDNode>(unwrap(V));
+ const unsigned numOperands = N->getNumOperands();
+ for (unsigned i = 0; i < numOperands; i++)
+ Dest[i] = wrap(N->getOperand(i));
+}
+
unsigned LLVMGetNamedMetadataNumOperands(LLVMModuleRef M, const char* name)
{
if (NamedMDNode *N = unwrap(M)->getNamedMetadata(name)) {
@@ -1084,6 +1097,8 @@ LLVMLinkage LLVMGetLinkage(LLVMValueRef Global) {
return LLVMLinkOnceAnyLinkage;
case GlobalValue::LinkOnceODRLinkage:
return LLVMLinkOnceODRLinkage;
+ case GlobalValue::LinkOnceODRAutoHideLinkage:
+ return LLVMLinkOnceODRAutoHideLinkage;
case GlobalValue::WeakAnyLinkage:
return LLVMWeakAnyLinkage;
case GlobalValue::WeakODRLinkage:
@@ -1098,8 +1113,6 @@ LLVMLinkage LLVMGetLinkage(LLVMValueRef Global) {
return LLVMLinkerPrivateLinkage;
case GlobalValue::LinkerPrivateWeakLinkage:
return LLVMLinkerPrivateWeakLinkage;
- case GlobalValue::LinkerPrivateWeakDefAutoLinkage:
- return LLVMLinkerPrivateWeakDefAutoLinkage;
case GlobalValue::DLLImportLinkage:
return LLVMDLLImportLinkage;
case GlobalValue::DLLExportLinkage:
@@ -1129,6 +1142,9 @@ void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) {
case LLVMLinkOnceODRLinkage:
GV->setLinkage(GlobalValue::LinkOnceODRLinkage);
break;
+ case LLVMLinkOnceODRAutoHideLinkage:
+ GV->setLinkage(GlobalValue::LinkOnceODRAutoHideLinkage);
+ break;
case LLVMWeakAnyLinkage:
GV->setLinkage(GlobalValue::WeakAnyLinkage);
break;
@@ -1150,9 +1166,6 @@ void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) {
case LLVMLinkerPrivateWeakLinkage:
GV->setLinkage(GlobalValue::LinkerPrivateWeakLinkage);
break;
- case LLVMLinkerPrivateWeakDefAutoLinkage:
- GV->setLinkage(GlobalValue::LinkerPrivateWeakDefAutoLinkage);
- break;
case LLVMDLLImportLinkage:
GV->setLinkage(GlobalValue::DLLImportLinkage);
break;
@@ -1368,14 +1381,20 @@ void LLVMSetGC(LLVMValueRef Fn, const char *GC) {
void LLVMAddFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) {
Function *Func = unwrap<Function>(Fn);
const AttrListPtr PAL = Func->getAttributes();
- const AttrListPtr PALnew = PAL.addAttr(~0U, Attributes(PA));
+ AttrBuilder B(PA);
+ const AttrListPtr PALnew =
+ PAL.addAttr(Func->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::get(Func->getContext(), B));
Func->setAttributes(PALnew);
}
void LLVMRemoveFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) {
Function *Func = unwrap<Function>(Fn);
const AttrListPtr PAL = Func->getAttributes();
- const AttrListPtr PALnew = PAL.removeAttr(~0U, Attributes(PA));
+ AttrBuilder B(PA);
+ const AttrListPtr PALnew =
+ PAL.removeAttr(Func->getContext(), AttrListPtr::FunctionIndex,
+ Attributes::get(Func->getContext(), B));
Func->setAttributes(PALnew);
}
@@ -1445,11 +1464,15 @@ LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg) {
}
void LLVMAddAttribute(LLVMValueRef Arg, LLVMAttribute PA) {
- unwrap<Argument>(Arg)->addAttr(Attributes(PA));
+ Argument *A = unwrap<Argument>(Arg);
+ AttrBuilder B(PA);
+ A->addAttr(Attributes::get(A->getContext(), B));
}
void LLVMRemoveAttribute(LLVMValueRef Arg, LLVMAttribute PA) {
- unwrap<Argument>(Arg)->removeAttr(Attributes(PA));
+ Argument *A = unwrap<Argument>(Arg);
+ AttrBuilder B(PA);
+ A->removeAttr(Attributes::get(A->getContext(), B));
}
LLVMAttribute LLVMGetAttribute(LLVMValueRef Arg) {
@@ -1461,8 +1484,10 @@ LLVMAttribute LLVMGetAttribute(LLVMValueRef Arg) {
void LLVMSetParamAlignment(LLVMValueRef Arg, unsigned align) {
- unwrap<Argument>(Arg)->addAttr(
- Attribute::constructAlignmentFromInt(align));
+ AttrBuilder B;
+ B.addAlignmentAttr(align);
+ unwrap<Argument>(Arg)->addAttr(Attributes::
+ get(unwrap<Argument>(Arg)->getContext(), B));
}
/*--.. Operations on basic blocks ..........................................--*/
@@ -1651,23 +1676,28 @@ void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC) {
void LLVMAddInstrAttribute(LLVMValueRef Instr, unsigned index,
LLVMAttribute PA) {
CallSite Call = CallSite(unwrap<Instruction>(Instr));
+ AttrBuilder B(PA);
Call.setAttributes(
- Call.getAttributes().addAttr(index, Attributes(PA)));
+ Call.getAttributes().addAttr(Call->getContext(), index,
+ Attributes::get(Call->getContext(), B)));
}
void LLVMRemoveInstrAttribute(LLVMValueRef Instr, unsigned index,
LLVMAttribute PA) {
CallSite Call = CallSite(unwrap<Instruction>(Instr));
+ AttrBuilder B(PA);
Call.setAttributes(
- Call.getAttributes().removeAttr(index, Attributes(PA)));
+ Call.getAttributes().removeAttr(Call->getContext(), index,
+ Attributes::get(Call->getContext(), B)));
}
void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index,
unsigned align) {
CallSite Call = CallSite(unwrap<Instruction>(Instr));
- Call.setAttributes(
- Call.getAttributes().addAttr(index,
- Attribute::constructAlignmentFromInt(align)));
+ AttrBuilder B;
+ B.addAlignmentAttr(align);
+ Call.setAttributes(Call.getAttributes().addAttr(Call->getContext(), index,
+ Attributes::get(Call->getContext(), B)));
}
/*--.. Operations on call instructions (only) ..............................--*/
diff --git a/contrib/llvm/lib/VMCore/DIBuilder.cpp b/contrib/llvm/lib/VMCore/DIBuilder.cpp
index f5894e9..152b825 100644
--- a/contrib/llvm/lib/VMCore/DIBuilder.cpp
+++ b/contrib/llvm/lib/VMCore/DIBuilder.cpp
@@ -492,7 +492,8 @@ DIType DIBuilder::createStructType(DIDescriptor Context, StringRef Name,
NULL,
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang),
- Constant::getNullValue(Type::getInt32Ty(VMContext))
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
};
return DIType(MDNode::get(VMContext, Elts));
}
@@ -550,7 +551,7 @@ DIType DIBuilder::createEnumerationType(DIDescriptor Scope, StringRef Name,
uint64_t SizeInBits,
uint64_t AlignInBits,
DIArray Elements,
- DIType ClassType, unsigned Flags) {
+ DIType ClassType) {
// TAG_enumeration_type is encoded in DICompositeType format.
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_enumeration_type),
@@ -561,7 +562,7 @@ DIType DIBuilder::createEnumerationType(DIDescriptor Scope, StringRef Name,
ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
ClassType,
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
@@ -640,6 +641,30 @@ DIType DIBuilder::createArtificialType(DIType Ty) {
return DIType(MDNode::get(VMContext, Elts));
}
+/// createArtificialType - Create a new DIType with "artificial" flag set.
+DIType DIBuilder::createObjectPointerType(DIType Ty) {
+ if (Ty.isObjectPointer())
+ return Ty;
+
+ SmallVector<Value *, 9> Elts;
+ MDNode *N = Ty;
+ assert (N && "Unexpected input DIType!");
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
+ if (Value *V = N->getOperand(i))
+ Elts.push_back(V);
+ else
+ Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext)));
+ }
+
+ unsigned CurFlags = Ty.getFlags();
+ CurFlags = CurFlags | (DIType::FlagObjectPointer | DIType::FlagArtificial);
+
+ // Flags are stored at this slot.
+ Elts[8] = ConstantInt::get(Type::getInt32Ty(VMContext), CurFlags);
+
+ return DIType(MDNode::get(VMContext, Elts));
+}
+
/// retainType - Retain DIType in a module even if it is not referenced
/// through debug info anchors.
void DIBuilder::retainType(DIType T) {
@@ -682,7 +707,9 @@ DIType DIBuilder::createTemporaryType(DIFile F) {
/// can be RAUW'd if the full type is seen.
DIType DIBuilder::createForwardDecl(unsigned Tag, StringRef Name,
DIDescriptor Scope, DIFile F,
- unsigned Line, unsigned RuntimeLang) {
+ unsigned Line, unsigned RuntimeLang,
+ uint64_t SizeInBits,
+ uint64_t AlignInBits) {
// Create a temporary MDNode.
Value *Elts[] = {
GetTagConstant(VMContext, Tag),
@@ -690,9 +717,8 @@ DIType DIBuilder::createForwardDecl(unsigned Tag, StringRef Name,
MDString::get(VMContext, Name),
F,
ConstantInt::get(Type::getInt32Ty(VMContext), Line),
- // To ease transition include sizes etc of 0.
- ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
ConstantInt::get(Type::getInt32Ty(VMContext),
DIDescriptor::FlagFwdDecl),
diff --git a/contrib/llvm/lib/Target/TargetData.cpp b/contrib/llvm/lib/VMCore/DataLayout.cpp
index cc6dc1e..19cf0f5 100644
--- a/contrib/llvm/lib/Target/TargetData.cpp
+++ b/contrib/llvm/lib/VMCore/DataLayout.cpp
@@ -1,4 +1,4 @@
-//===-- TargetData.cpp - Data size & alignment routines --------------------==//
+//===-- DataLayout.cpp - Data size & alignment routines --------------------==//
//
// The LLVM Compiler Infrastructure
//
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines target properties related to datatype size/offset/alignment
+// This file defines layout properties related to datatype size/offset/alignment
// information.
//
// This structure should be created once, filled in if the defaults are not
@@ -16,7 +16,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
@@ -31,17 +31,17 @@
#include <cstdlib>
using namespace llvm;
-// Handle the Pass registration stuff necessary to use TargetData's.
+// Handle the Pass registration stuff necessary to use DataLayout's.
// Register the default SparcV9 implementation...
-INITIALIZE_PASS(TargetData, "targetdata", "Target Data Layout", false, true)
-char TargetData::ID = 0;
+INITIALIZE_PASS(DataLayout, "datalayout", "Data Layout", false, true)
+char DataLayout::ID = 0;
//===----------------------------------------------------------------------===//
// Support for StructLayout
//===----------------------------------------------------------------------===//
-StructLayout::StructLayout(StructType *ST, const TargetData &TD) {
+StructLayout::StructLayout(StructType *ST, const DataLayout &TD) {
assert(!ST->isOpaque() && "Cannot get layout of opaque structs");
StructAlignment = 0;
StructSize = 0;
@@ -54,7 +54,7 @@ StructLayout::StructLayout(StructType *ST, const TargetData &TD) {
// Add padding if necessary to align the data element properly.
if ((StructSize & (TyAlign-1)) != 0)
- StructSize = TargetData::RoundUpAlignment(StructSize, TyAlign);
+ StructSize = DataLayout::RoundUpAlignment(StructSize, TyAlign);
// Keep track of maximum alignment constraint.
StructAlignment = std::max(TyAlign, StructAlignment);
@@ -69,7 +69,7 @@ StructLayout::StructLayout(StructType *ST, const TargetData &TD) {
// Add padding to the end of the struct so that it could be put in an array
// and all array elements would be aligned correctly.
if ((StructSize & (StructAlignment-1)) != 0)
- StructSize = TargetData::RoundUpAlignment(StructSize, StructAlignment);
+ StructSize = DataLayout::RoundUpAlignment(StructSize, StructAlignment);
}
@@ -94,14 +94,14 @@ unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const {
}
//===----------------------------------------------------------------------===//
-// TargetAlignElem, TargetAlign support
+// LayoutAlignElem, LayoutAlign support
//===----------------------------------------------------------------------===//
-TargetAlignElem
-TargetAlignElem::get(AlignTypeEnum align_type, unsigned abi_align,
+LayoutAlignElem
+LayoutAlignElem::get(AlignTypeEnum align_type, unsigned abi_align,
unsigned pref_align, uint32_t bit_width) {
assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
- TargetAlignElem retval;
+ LayoutAlignElem retval;
retval.AlignType = align_type;
retval.ABIAlign = abi_align;
retval.PrefAlign = pref_align;
@@ -110,18 +110,46 @@ TargetAlignElem::get(AlignTypeEnum align_type, unsigned abi_align,
}
bool
-TargetAlignElem::operator==(const TargetAlignElem &rhs) const {
+LayoutAlignElem::operator==(const LayoutAlignElem &rhs) const {
return (AlignType == rhs.AlignType
&& ABIAlign == rhs.ABIAlign
&& PrefAlign == rhs.PrefAlign
&& TypeBitWidth == rhs.TypeBitWidth);
}
-const TargetAlignElem
-TargetData::InvalidAlignmentElem = { (AlignTypeEnum)0xFF, 0, 0, 0 };
+const LayoutAlignElem
+DataLayout::InvalidAlignmentElem =
+ LayoutAlignElem::get((AlignTypeEnum) -1, 0, 0, 0);
//===----------------------------------------------------------------------===//
-// TargetData Class Implementation
+// PointerAlignElem, PointerAlign support
+//===----------------------------------------------------------------------===//
+
+PointerAlignElem
+PointerAlignElem::get(uint32_t addr_space, unsigned abi_align,
+ unsigned pref_align, uint32_t bit_width) {
+ assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
+ PointerAlignElem retval;
+ retval.AddressSpace = addr_space;
+ retval.ABIAlign = abi_align;
+ retval.PrefAlign = pref_align;
+ retval.TypeBitWidth = bit_width;
+ return retval;
+}
+
+bool
+PointerAlignElem::operator==(const PointerAlignElem &rhs) const {
+ return (ABIAlign == rhs.ABIAlign
+ && AddressSpace == rhs.AddressSpace
+ && PrefAlign == rhs.PrefAlign
+ && TypeBitWidth == rhs.TypeBitWidth);
+}
+
+const PointerAlignElem
+DataLayout::InvalidPointerElem = PointerAlignElem::get(~0U, 0U, 0U, 0U);
+
+//===----------------------------------------------------------------------===//
+// DataLayout Class Implementation
//===----------------------------------------------------------------------===//
/// getInt - Get an integer ignoring errors.
@@ -131,14 +159,11 @@ static int getInt(StringRef R) {
return Result;
}
-void TargetData::init() {
- initializeTargetDataPass(*PassRegistry::getPassRegistry());
+void DataLayout::init() {
+ initializeDataLayoutPass(*PassRegistry::getPassRegistry());
LayoutMap = 0;
LittleEndian = false;
- PointerMemSize = 8;
- PointerABIAlign = 8;
- PointerPrefAlign = PointerABIAlign;
StackNaturalAlign = 0;
// Default alignments
@@ -154,9 +179,10 @@ void TargetData::init() {
setAlignment(VECTOR_ALIGN, 8, 8, 64); // v2i32, v1i64, ...
setAlignment(VECTOR_ALIGN, 16, 16, 128); // v16i8, v8i16, v4i32, ...
setAlignment(AGGREGATE_ALIGN, 0, 8, 0); // struct
+ setPointerAlignment(0, 8, 8, 8);
}
-std::string TargetData::parseSpecifier(StringRef Desc, TargetData *td) {
+std::string DataLayout::parseSpecifier(StringRef Desc, DataLayout *td) {
if (td)
td->init();
@@ -185,13 +211,16 @@ std::string TargetData::parseSpecifier(StringRef Desc, TargetData *td) {
td->LittleEndian = true;
break;
case 'p': {
- // Pointer size.
+ int AddrSpace = 0;
+ if (Specifier.size() > 1) {
+ AddrSpace = getInt(Specifier.substr(1));
+ if (AddrSpace < 0 || AddrSpace > (1 << 24))
+ return "Invalid address space, must be a positive 24bit integer";
+ }
Split = Token.split(':');
int PointerMemSizeBits = getInt(Split.first);
if (PointerMemSizeBits < 0 || PointerMemSizeBits % 8 != 0)
return "invalid pointer size, must be a positive 8-bit multiple";
- if (td)
- td->PointerMemSize = PointerMemSizeBits / 8;
// Pointer ABI alignment.
Split = Split.second.split(':');
@@ -200,8 +229,6 @@ std::string TargetData::parseSpecifier(StringRef Desc, TargetData *td) {
return "invalid pointer ABI alignment, "
"must be a positive 8-bit multiple";
}
- if (td)
- td->PointerABIAlign = PointerABIAlignBits / 8;
// Pointer preferred alignment.
Split = Split.second.split(':');
@@ -210,11 +237,12 @@ std::string TargetData::parseSpecifier(StringRef Desc, TargetData *td) {
return "invalid pointer preferred alignment, "
"must be a positive 8-bit multiple";
}
- if (td) {
- td->PointerPrefAlign = PointerPrefAlignBits / 8;
- if (td->PointerPrefAlign == 0)
- td->PointerPrefAlign = td->PointerABIAlign;
- }
+
+ if (PointerPrefAlignBits == 0)
+ PointerPrefAlignBits = PointerABIAlignBits;
+ if (td)
+ td->setPointerAlignment(AddrSpace, PointerABIAlignBits/8,
+ PointerPrefAlignBits/8, PointerMemSizeBits/8);
break;
}
case 'i':
@@ -256,7 +284,7 @@ std::string TargetData::parseSpecifier(StringRef Desc, TargetData *td) {
unsigned PrefAlign = PrefAlignBits / 8;
if (PrefAlign == 0)
PrefAlign = ABIAlign;
-
+
if (td)
td->setAlignment(AlignType, ABIAlign, PrefAlign, Size);
break;
@@ -266,8 +294,8 @@ std::string TargetData::parseSpecifier(StringRef Desc, TargetData *td) {
do {
int Width = getInt(Specifier);
if (Width <= 0) {
- return std::string("invalid native integer size \'") + Specifier.str() +
- "\', must be a positive integer.";
+ return std::string("invalid native integer size \'") +
+ Specifier.str() + "\', must be a positive integer.";
}
if (td && Width != 0)
td->LegalIntWidths.push_back(Width);
@@ -298,24 +326,26 @@ std::string TargetData::parseSpecifier(StringRef Desc, TargetData *td) {
///
/// @note This has to exist, because this is a pass, but it should never be
/// used.
-TargetData::TargetData() : ImmutablePass(ID) {
- report_fatal_error("Bad TargetData ctor used. "
- "Tool did not specify a TargetData to use?");
+DataLayout::DataLayout() : ImmutablePass(ID) {
+ report_fatal_error("Bad DataLayout ctor used. "
+ "Tool did not specify a DataLayout to use?");
}
-TargetData::TargetData(const Module *M)
+DataLayout::DataLayout(const Module *M)
: ImmutablePass(ID) {
std::string errMsg = parseSpecifier(M->getDataLayout(), this);
- assert(errMsg == "" && "Module M has malformed target data layout string.");
+ assert(errMsg == "" && "Module M has malformed data layout string.");
(void)errMsg;
}
void
-TargetData::setAlignment(AlignTypeEnum align_type, unsigned abi_align,
+DataLayout::setAlignment(AlignTypeEnum align_type, unsigned abi_align,
unsigned pref_align, uint32_t bit_width) {
assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
+ assert(pref_align < (1 << 16) && "Alignment doesn't fit in bitfield");
+ assert(bit_width < (1 << 24) && "Bit width doesn't fit in bitfield");
for (unsigned i = 0, e = Alignments.size(); i != e; ++i) {
- if (Alignments[i].AlignType == align_type &&
+ if (Alignments[i].AlignType == (unsigned)align_type &&
Alignments[i].TypeBitWidth == bit_width) {
// Update the abi, preferred alignments.
Alignments[i].ABIAlign = abi_align;
@@ -324,20 +354,35 @@ TargetData::setAlignment(AlignTypeEnum align_type, unsigned abi_align,
}
}
- Alignments.push_back(TargetAlignElem::get(align_type, abi_align,
+ Alignments.push_back(LayoutAlignElem::get(align_type, abi_align,
pref_align, bit_width));
}
+void
+DataLayout::setPointerAlignment(uint32_t addr_space, unsigned abi_align,
+ unsigned pref_align, uint32_t bit_width) {
+ assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
+ DenseMap<unsigned,PointerAlignElem>::iterator val = Pointers.find(addr_space);
+ if (val == Pointers.end()) {
+ Pointers[addr_space] = PointerAlignElem::get(addr_space,
+ abi_align, pref_align, bit_width);
+ } else {
+ val->second.ABIAlign = abi_align;
+ val->second.PrefAlign = pref_align;
+ val->second.TypeBitWidth = bit_width;
+ }
+}
+
/// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or
-/// preferred if ABIInfo = false) the target wants for the specified datatype.
-unsigned TargetData::getAlignmentInfo(AlignTypeEnum AlignType,
+/// preferred if ABIInfo = false) the layout wants for the specified datatype.
+unsigned DataLayout::getAlignmentInfo(AlignTypeEnum AlignType,
uint32_t BitWidth, bool ABIInfo,
Type *Ty) const {
// Check to see if we have an exact match and remember the best match we see.
int BestMatchIdx = -1;
int LargestInt = -1;
for (unsigned i = 0, e = Alignments.size(); i != e; ++i) {
- if (Alignments[i].AlignType == AlignType &&
+ if (Alignments[i].AlignType == (unsigned)AlignType &&
Alignments[i].TypeBitWidth == BitWidth)
return ABIInfo ? Alignments[i].ABIAlign : Alignments[i].PrefAlign;
@@ -410,11 +455,11 @@ public:
} // end anonymous namespace
-TargetData::~TargetData() {
+DataLayout::~DataLayout() {
delete static_cast<StructLayoutMap*>(LayoutMap);
}
-const StructLayout *TargetData::getStructLayout(StructType *Ty) const {
+const StructLayout *DataLayout::getStructLayout(StructType *Ty) const {
if (!LayoutMap)
LayoutMap = new StructLayoutMap();
@@ -437,17 +482,35 @@ const StructLayout *TargetData::getStructLayout(StructType *Ty) const {
return L;
}
-std::string TargetData::getStringRepresentation() const {
+std::string DataLayout::getStringRepresentation() const {
std::string Result;
raw_string_ostream OS(Result);
- OS << (LittleEndian ? "e" : "E")
- << "-p:" << PointerMemSize*8 << ':' << PointerABIAlign*8
- << ':' << PointerPrefAlign*8
- << "-S" << StackNaturalAlign*8;
+ OS << (LittleEndian ? "e" : "E");
+ SmallVector<unsigned, 8> addrSpaces;
+ // Lets get all of the known address spaces and sort them
+ // into increasing order so that we can emit the string
+ // in a cleaner format.
+ for (DenseMap<unsigned, PointerAlignElem>::const_iterator
+ pib = Pointers.begin(), pie = Pointers.end();
+ pib != pie; ++pib) {
+ addrSpaces.push_back(pib->first);
+ }
+ std::sort(addrSpaces.begin(), addrSpaces.end());
+ for (SmallVector<unsigned, 8>::iterator asb = addrSpaces.begin(),
+ ase = addrSpaces.end(); asb != ase; ++asb) {
+ const PointerAlignElem &PI = Pointers.find(*asb)->second;
+ OS << "-p";
+ if (PI.AddressSpace) {
+ OS << PI.AddressSpace;
+ }
+ OS << ":" << PI.TypeBitWidth*8 << ':' << PI.ABIAlign*8
+ << ':' << PI.PrefAlign*8;
+ }
+ OS << "-S" << StackNaturalAlign*8;
for (unsigned i = 0, e = Alignments.size(); i != e; ++i) {
- const TargetAlignElem &AI = Alignments[i];
+ const LayoutAlignElem &AI = Alignments[i];
OS << '-' << (char)AI.AlignType << AI.TypeBitWidth << ':'
<< AI.ABIAlign*8 << ':' << AI.PrefAlign*8;
}
@@ -462,12 +525,15 @@ std::string TargetData::getStringRepresentation() const {
}
-uint64_t TargetData::getTypeSizeInBits(Type *Ty) const {
+uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const {
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
switch (Ty->getTypeID()) {
case Type::LabelTyID:
- case Type::PointerTyID:
- return getPointerSizeInBits();
+ return getPointerSizeInBits(0);
+ case Type::PointerTyID: {
+ unsigned AS = dyn_cast<PointerType>(Ty)->getAddressSpace();
+ return getPointerSizeInBits(AS);
+ }
case Type::ArrayTyID: {
ArrayType *ATy = cast<ArrayType>(Ty);
return getTypeAllocSizeInBits(ATy->getElementType())*ATy->getNumElements();
@@ -493,10 +559,12 @@ uint64_t TargetData::getTypeSizeInBits(Type *Ty) const {
// only 80 bits contain information.
case Type::X86_FP80TyID:
return 80;
- case Type::VectorTyID:
- return cast<VectorType>(Ty)->getBitWidth();
+ case Type::VectorTyID: {
+ VectorType *VTy = cast<VectorType>(Ty);
+ return VTy->getNumElements()*getTypeSizeInBits(VTy->getElementType());
+ }
default:
- llvm_unreachable("TargetData::getTypeSizeInBits(): Unsupported type");
+ llvm_unreachable("DataLayout::getTypeSizeInBits(): Unsupported type");
}
}
@@ -508,17 +576,22 @@ uint64_t TargetData::getTypeSizeInBits(Type *Ty) const {
Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref
== false) for the requested type \a Ty.
*/
-unsigned TargetData::getAlignment(Type *Ty, bool abi_or_pref) const {
+unsigned DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
int AlignType = -1;
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
switch (Ty->getTypeID()) {
// Early escape for the non-numeric types.
case Type::LabelTyID:
- case Type::PointerTyID:
return (abi_or_pref
- ? getPointerABIAlignment()
- : getPointerPrefAlignment());
+ ? getPointerABIAlignment(0)
+ : getPointerPrefAlignment(0));
+ case Type::PointerTyID: {
+ unsigned AS = dyn_cast<PointerType>(Ty)->getAddressSpace();
+ return (abi_or_pref
+ ? getPointerABIAlignment(AS)
+ : getPointerPrefAlignment(AS));
+ }
case Type::ArrayTyID:
return getAlignment(cast<ArrayType>(Ty)->getElementType(), abi_or_pref);
@@ -558,18 +631,18 @@ unsigned TargetData::getAlignment(Type *Ty, bool abi_or_pref) const {
abi_or_pref, Ty);
}
-unsigned TargetData::getABITypeAlignment(Type *Ty) const {
+unsigned DataLayout::getABITypeAlignment(Type *Ty) const {
return getAlignment(Ty, true);
}
/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
/// an integer type of the specified bitwidth.
-unsigned TargetData::getABIIntegerTypeAlignment(unsigned BitWidth) const {
+unsigned DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const {
return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, 0);
}
-unsigned TargetData::getCallFrameTypeAlignment(Type *Ty) const {
+unsigned DataLayout::getCallFrameTypeAlignment(Type *Ty) const {
for (unsigned i = 0, e = Alignments.size(); i != e; ++i)
if (Alignments[i].AlignType == STACK_ALIGN)
return Alignments[i].ABIAlign;
@@ -577,24 +650,37 @@ unsigned TargetData::getCallFrameTypeAlignment(Type *Ty) const {
return getABITypeAlignment(Ty);
}
-unsigned TargetData::getPrefTypeAlignment(Type *Ty) const {
+unsigned DataLayout::getPrefTypeAlignment(Type *Ty) const {
return getAlignment(Ty, false);
}
-unsigned TargetData::getPreferredTypeAlignmentShift(Type *Ty) const {
+unsigned DataLayout::getPreferredTypeAlignmentShift(Type *Ty) const {
unsigned Align = getPrefTypeAlignment(Ty);
assert(!(Align & (Align-1)) && "Alignment is not a power of two!");
return Log2_32(Align);
}
-/// getIntPtrType - Return an unsigned integer type that is the same size or
-/// greater to the host pointer size.
-IntegerType *TargetData::getIntPtrType(LLVMContext &C) const {
- return IntegerType::get(C, getPointerSizeInBits());
+/// getIntPtrType - Return an integer type with size at least as big as that
+/// of a pointer in the given address space.
+IntegerType *DataLayout::getIntPtrType(LLVMContext &C,
+ unsigned AddressSpace) const {
+ return IntegerType::get(C, getPointerSizeInBits(AddressSpace));
}
+/// getIntPtrType - Return an integer (vector of integer) type with size at
+/// least as big as that of a pointer of the given pointer (vector of pointer)
+/// type.
+Type *DataLayout::getIntPtrType(Type *Ty) const {
+ assert(Ty->isPtrOrPtrVectorTy() &&
+ "Expected a pointer or pointer vector type.");
+ unsigned NumBits = getTypeSizeInBits(Ty->getScalarType());
+ IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits);
+ if (VectorType *VecTy = dyn_cast<VectorType>(Ty))
+ return VectorType::get(IntTy, VecTy->getNumElements());
+ return IntTy;
+}
-uint64_t TargetData::getIndexedOffset(Type *ptrTy,
+uint64_t DataLayout::getIndexedOffset(Type *ptrTy,
ArrayRef<Value *> Indices) const {
Type *Ty = ptrTy;
assert(Ty->isPointerTy() && "Illegal argument for getIndexedOffset()");
@@ -634,7 +720,7 @@ uint64_t TargetData::getIndexedOffset(Type *ptrTy,
/// getPreferredAlignment - Return the preferred alignment of the specified
/// global. This includes an explicitly requested alignment (if the global
/// has one).
-unsigned TargetData::getPreferredAlignment(const GlobalVariable *GV) const {
+unsigned DataLayout::getPreferredAlignment(const GlobalVariable *GV) const {
Type *ElemType = GV->getType()->getElementType();
unsigned Alignment = getPrefTypeAlignment(ElemType);
unsigned GVAlignment = GV->getAlignment();
@@ -658,6 +744,6 @@ unsigned TargetData::getPreferredAlignment(const GlobalVariable *GV) const {
/// getPreferredAlignmentLog - Return the preferred alignment of the
/// specified global, returned in log form. This includes an explicitly
/// requested alignment (if the global has one).
-unsigned TargetData::getPreferredAlignmentLog(const GlobalVariable *GV) const {
+unsigned DataLayout::getPreferredAlignmentLog(const GlobalVariable *GV) const {
return Log2_32(getPreferredAlignment(GV));
}
diff --git a/contrib/llvm/lib/VMCore/DebugInfo.cpp b/contrib/llvm/lib/VMCore/DebugInfo.cpp
index c8f8f7d..3029ce2 100644
--- a/contrib/llvm/lib/VMCore/DebugInfo.cpp
+++ b/contrib/llvm/lib/VMCore/DebugInfo.cpp
@@ -111,6 +111,16 @@ Function *DIDescriptor::getFunctionField(unsigned Elt) const {
return 0;
}
+void DIDescriptor::replaceFunctionField(unsigned Elt, Function *F) {
+ if (DbgNode == 0)
+ return;
+
+ if (Elt < DbgNode->getNumOperands()) {
+ MDNode *Node = const_cast<MDNode*>(DbgNode);
+ Node->replaceOperandWith(Elt, F);
+ }
+}
+
unsigned DIVariable::getNumAddrElements() const {
if (getVersion() <= LLVMDebugVersion8)
return DbgNode->getNumOperands()-6;
diff --git a/contrib/llvm/lib/VMCore/Dominators.cpp b/contrib/llvm/lib/VMCore/Dominators.cpp
index 60bdeac..77b2403 100644
--- a/contrib/llvm/lib/VMCore/Dominators.cpp
+++ b/contrib/llvm/lib/VMCore/Dominators.cpp
@@ -161,6 +161,11 @@ bool DominatorTree::dominates(const Instruction *Def,
bool DominatorTree::dominates(const BasicBlockEdge &BBE,
const BasicBlock *UseBB) const {
+ // Assert that we have a single edge. We could handle them by simply
+ // returning false, but since isSingleEdge is linear on the number of
+ // edges, the callers can normally handle them more efficiently.
+ assert(BBE.isSingleEdge());
+
// If the BB the edge ends in doesn't dominate the use BB, then the
// edge also doesn't.
const BasicBlock *Start = BBE.getStart();
@@ -207,6 +212,11 @@ bool DominatorTree::dominates(const BasicBlockEdge &BBE,
bool DominatorTree::dominates(const BasicBlockEdge &BBE,
const Use &U) const {
+ // Assert that we have a single edge. We could handle them by simply
+ // returning false, but since isSingleEdge is linear on the number of
+ // edges, the callers can normally handle them more efficiently.
+ assert(BBE.isSingleEdge());
+
Instruction *UserInst = cast<Instruction>(U.getUser());
// A PHI in the end of the edge is dominated by it.
PHINode *PN = dyn_cast<PHINode>(UserInst);
diff --git a/contrib/llvm/lib/VMCore/Function.cpp b/contrib/llvm/lib/VMCore/Function.cpp
index 2e0b316..9c4f2d9 100644
--- a/contrib/llvm/lib/VMCore/Function.cpp
+++ b/contrib/llvm/lib/VMCore/Function.cpp
@@ -78,7 +78,8 @@ unsigned Argument::getArgNo() const {
/// in its containing function.
bool Argument::hasByValAttr() const {
if (!getType()->isPointerTy()) return false;
- return getParent()->paramHasAttr(getArgNo()+1, Attribute::ByVal);
+ return getParent()->getParamAttributes(getArgNo()+1).
+ hasAttribute(Attributes::ByVal);
}
unsigned Argument::getParamAlignment() const {
@@ -91,21 +92,24 @@ unsigned Argument::getParamAlignment() const {
/// it in its containing function.
bool Argument::hasNestAttr() const {
if (!getType()->isPointerTy()) return false;
- return getParent()->paramHasAttr(getArgNo()+1, Attribute::Nest);
+ return getParent()->getParamAttributes(getArgNo()+1).
+ hasAttribute(Attributes::Nest);
}
/// hasNoAliasAttr - Return true if this argument has the noalias attribute on
/// it in its containing function.
bool Argument::hasNoAliasAttr() const {
if (!getType()->isPointerTy()) return false;
- return getParent()->paramHasAttr(getArgNo()+1, Attribute::NoAlias);
+ return getParent()->getParamAttributes(getArgNo()+1).
+ hasAttribute(Attributes::NoAlias);
}
/// hasNoCaptureAttr - Return true if this argument has the nocapture attribute
/// on it in its containing function.
bool Argument::hasNoCaptureAttr() const {
if (!getType()->isPointerTy()) return false;
- return getParent()->paramHasAttr(getArgNo()+1, Attribute::NoCapture);
+ return getParent()->getParamAttributes(getArgNo()+1).
+ hasAttribute(Attributes::NoCapture);
}
/// hasSRetAttr - Return true if this argument has the sret attribute on
@@ -114,7 +118,8 @@ bool Argument::hasStructRetAttr() const {
if (!getType()->isPointerTy()) return false;
if (this != getParent()->arg_begin())
return false; // StructRet param must be first param
- return getParent()->paramHasAttr(1, Attribute::StructRet);
+ return getParent()->getParamAttributes(1).
+ hasAttribute(Attributes::StructRet);
}
/// addAttr - Add a Attribute to an argument
@@ -180,7 +185,7 @@ Function::Function(FunctionType *Ty, LinkageTypes Linkage,
// Ensure intrinsics have the right parameter attributes.
if (unsigned IID = getIntrinsicID())
- setAttributes(Intrinsic::getAttributes(Intrinsic::ID(IID)));
+ setAttributes(Intrinsic::getAttributes(getContext(), Intrinsic::ID(IID)));
}
@@ -244,13 +249,13 @@ void Function::dropAllReferences() {
void Function::addAttribute(unsigned i, Attributes attr) {
AttrListPtr PAL = getAttributes();
- PAL = PAL.addAttr(i, attr);
+ PAL = PAL.addAttr(getContext(), i, attr);
setAttributes(PAL);
}
void Function::removeAttribute(unsigned i, Attributes attr) {
AttrListPtr PAL = getAttributes();
- PAL = PAL.removeAttr(i, attr);
+ PAL = PAL.removeAttr(getContext(), i, attr);
setAttributes(PAL);
}
diff --git a/contrib/llvm/lib/VMCore/GCOV.cpp b/contrib/llvm/lib/VMCore/GCOV.cpp
index 003a5d4..ea2f0a6 100644
--- a/contrib/llvm/lib/VMCore/GCOV.cpp
+++ b/contrib/llvm/lib/VMCore/GCOV.cpp
@@ -28,19 +28,19 @@ GCOVFile::~GCOVFile() {
}
/// isGCDAFile - Return true if Format identifies a .gcda file.
-static bool isGCDAFile(GCOVFormat Format) {
- return Format == GCDA_402 || Format == GCDA_404;
+static bool isGCDAFile(GCOV::GCOVFormat Format) {
+ return Format == GCOV::GCDA_402 || Format == GCOV::GCDA_404;
}
/// isGCNOFile - Return true if Format identifies a .gcno file.
-static bool isGCNOFile(GCOVFormat Format) {
- return Format == GCNO_402 || Format == GCNO_404;
+static bool isGCNOFile(GCOV::GCOVFormat Format) {
+ return Format == GCOV::GCNO_402 || Format == GCOV::GCNO_404;
}
/// read - Read GCOV buffer.
bool GCOVFile::read(GCOVBuffer &Buffer) {
- GCOVFormat Format = Buffer.readGCOVFormat();
- if (Format == InvalidGCOV)
+ GCOV::GCOVFormat Format = Buffer.readGCOVFormat();
+ if (Format == GCOV::InvalidGCOV)
return false;
unsigned i = 0;
@@ -48,7 +48,7 @@ bool GCOVFile::read(GCOVBuffer &Buffer) {
GCOVFunction *GFun = NULL;
if (isGCDAFile(Format)) {
// Use existing function while reading .gcda file.
- assert (i < Functions.size() && ".gcda data does not match .gcno data");
+ assert(i < Functions.size() && ".gcda data does not match .gcno data");
GFun = Functions[i];
} else if (isGCNOFile(Format)){
GFun = new GCOVFunction();
@@ -87,21 +87,21 @@ GCOVFunction::~GCOVFunction() {
/// read - Read a aunction from the buffer. Return false if buffer cursor
/// does not point to a function tag.
-bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) {
+bool GCOVFunction::read(GCOVBuffer &Buff, GCOV::GCOVFormat Format) {
if (!Buff.readFunctionTag())
return false;
Buff.readInt(); // Function header length
Ident = Buff.readInt();
Buff.readInt(); // Checksum #1
- if (Format != GCNO_402)
+ if (Format != GCOV::GCNO_402)
Buff.readInt(); // Checksum #2
Name = Buff.readString();
- if (Format == GCNO_402 || Format == GCNO_404)
+ if (Format == GCOV::GCNO_402 || Format == GCOV::GCNO_404)
Filename = Buff.readString();
- if (Format == GCDA_402 || Format == GCDA_404) {
+ if (Format == GCOV::GCDA_402 || Format == GCOV::GCDA_404) {
Buff.readArcTag();
uint32_t Count = Buff.readInt() / 2;
for (unsigned i = 0, e = Count; i != e; ++i) {
@@ -113,7 +113,9 @@ bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) {
LineNumber = Buff.readInt();
// read blocks.
- assert (Buff.readBlockTag() && "Block Tag not found!");
+ bool BlockTagFound = Buff.readBlockTag();
+ (void)BlockTagFound;
+ assert(BlockTagFound && "Block Tag not found!");
uint32_t BlockCount = Buff.readInt();
for (int i = 0, e = BlockCount; i != e; ++i) {
Buff.readInt(); // Block flags;
@@ -124,7 +126,7 @@ bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) {
while (Buff.readEdgeTag()) {
uint32_t EdgeCount = (Buff.readInt() - 1) / 2;
uint32_t BlockNo = Buff.readInt();
- assert (BlockNo < BlockCount && "Unexpected Block number!");
+ assert(BlockNo < BlockCount && "Unexpected Block number!");
for (int i = 0, e = EdgeCount; i != e; ++i) {
Blocks[BlockNo]->addEdge(Buff.readInt());
Buff.readInt(); // Edge flag
@@ -136,7 +138,7 @@ bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) {
uint32_t LineTableLength = Buff.readInt();
uint32_t Size = Buff.getCursor() + LineTableLength*4;
uint32_t BlockNo = Buff.readInt();
- assert (BlockNo < BlockCount && "Unexpected Block number!");
+ assert(BlockNo < BlockCount && "Unexpected Block number!");
GCOVBlock *Block = Blocks[BlockNo];
Buff.readInt(); // flag
while (Buff.getCursor() != (Size - 4)) {
diff --git a/contrib/llvm/lib/VMCore/IRBuilder.cpp b/contrib/llvm/lib/VMCore/IRBuilder.cpp
index 5c4e6d9..04f08fe 100644
--- a/contrib/llvm/lib/VMCore/IRBuilder.cpp
+++ b/contrib/llvm/lib/VMCore/IRBuilder.cpp
@@ -80,7 +80,7 @@ CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned Align,
CallInst *IRBuilderBase::
CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align,
- bool isVolatile, MDNode *TBAATag) {
+ bool isVolatile, MDNode *TBAATag, MDNode *TBAAStructTag) {
Dst = getCastedInt8PtrValue(Dst);
Src = getCastedInt8PtrValue(Src);
@@ -94,6 +94,10 @@ CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align,
// Set the TBAA info if present.
if (TBAATag)
CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ // Set the TBAA Struct info if present.
+ if (TBAAStructTag)
+ CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
return CI;
}
diff --git a/contrib/llvm/lib/VMCore/InlineAsm.cpp b/contrib/llvm/lib/VMCore/InlineAsm.cpp
index 736e370..2e636aa 100644
--- a/contrib/llvm/lib/VMCore/InlineAsm.cpp
+++ b/contrib/llvm/lib/VMCore/InlineAsm.cpp
@@ -27,19 +27,20 @@ InlineAsm::~InlineAsm() {
InlineAsm *InlineAsm::get(FunctionType *Ty, StringRef AsmString,
StringRef Constraints, bool hasSideEffects,
- bool isAlignStack) {
- InlineAsmKeyType Key(AsmString, Constraints, hasSideEffects, isAlignStack);
+ bool isAlignStack, AsmDialect asmDialect) {
+ InlineAsmKeyType Key(AsmString, Constraints, hasSideEffects, isAlignStack,
+ asmDialect);
LLVMContextImpl *pImpl = Ty->getContext().pImpl;
return pImpl->InlineAsms.getOrCreate(PointerType::getUnqual(Ty), Key);
}
InlineAsm::InlineAsm(PointerType *Ty, const std::string &asmString,
const std::string &constraints, bool hasSideEffects,
- bool isAlignStack)
+ bool isAlignStack, AsmDialect asmDialect)
: Value(Ty, Value::InlineAsmVal),
- AsmString(asmString),
- Constraints(constraints), HasSideEffects(hasSideEffects),
- IsAlignStack(isAlignStack) {
+ AsmString(asmString), Constraints(constraints),
+ HasSideEffects(hasSideEffects), IsAlignStack(isAlignStack),
+ Dialect(asmDialect) {
// Do various checks on the constraint string and type.
assert(Verify(getFunctionType(), constraints) &&
diff --git a/contrib/llvm/lib/VMCore/Instructions.cpp b/contrib/llvm/lib/VMCore/Instructions.cpp
index 9af98e8..94bd2a1 100644
--- a/contrib/llvm/lib/VMCore/Instructions.cpp
+++ b/contrib/llvm/lib/VMCore/Instructions.cpp
@@ -332,21 +332,30 @@ CallInst::CallInst(const CallInst &CI)
void CallInst::addAttribute(unsigned i, Attributes attr) {
AttrListPtr PAL = getAttributes();
- PAL = PAL.addAttr(i, attr);
+ PAL = PAL.addAttr(getContext(), i, attr);
setAttributes(PAL);
}
void CallInst::removeAttribute(unsigned i, Attributes attr) {
AttrListPtr PAL = getAttributes();
- PAL = PAL.removeAttr(i, attr);
+ PAL = PAL.removeAttr(getContext(), i, attr);
setAttributes(PAL);
}
-bool CallInst::paramHasAttr(unsigned i, Attributes attr) const {
- if (AttributeList.paramHasAttr(i, attr))
+bool CallInst::hasFnAttr(Attributes::AttrVal A) const {
+ if (AttributeList.getParamAttributes(AttrListPtr::FunctionIndex)
+ .hasAttribute(A))
return true;
if (const Function *F = getCalledFunction())
- return F->paramHasAttr(i, attr);
+ return F->getParamAttributes(AttrListPtr::FunctionIndex).hasAttribute(A);
+ return false;
+}
+
+bool CallInst::paramHasAttr(unsigned i, Attributes::AttrVal A) const {
+ if (AttributeList.getParamAttributes(i).hasAttribute(A))
+ return true;
+ if (const Function *F = getCalledFunction())
+ return F->getParamAttributes(i).hasAttribute(A);
return false;
}
@@ -562,23 +571,32 @@ void InvokeInst::setSuccessorV(unsigned idx, BasicBlock *B) {
return setSuccessor(idx, B);
}
-bool InvokeInst::paramHasAttr(unsigned i, Attributes attr) const {
- if (AttributeList.paramHasAttr(i, attr))
+bool InvokeInst::hasFnAttr(Attributes::AttrVal A) const {
+ if (AttributeList.getParamAttributes(AttrListPtr::FunctionIndex).
+ hasAttribute(A))
return true;
if (const Function *F = getCalledFunction())
- return F->paramHasAttr(i, attr);
+ return F->getParamAttributes(AttrListPtr::FunctionIndex).hasAttribute(A);
+ return false;
+}
+
+bool InvokeInst::paramHasAttr(unsigned i, Attributes::AttrVal A) const {
+ if (AttributeList.getParamAttributes(i).hasAttribute(A))
+ return true;
+ if (const Function *F = getCalledFunction())
+ return F->getParamAttributes(i).hasAttribute(A);
return false;
}
void InvokeInst::addAttribute(unsigned i, Attributes attr) {
AttrListPtr PAL = getAttributes();
- PAL = PAL.addAttr(i, attr);
+ PAL = PAL.addAttr(getContext(), i, attr);
setAttributes(PAL);
}
void InvokeInst::removeAttribute(unsigned i, Attributes attr) {
AttrListPtr PAL = getAttributes();
- PAL = PAL.removeAttr(i, attr);
+ PAL = PAL.removeAttr(getContext(), i, attr);
setAttributes(PAL);
}
@@ -1381,18 +1399,6 @@ Type *GetElementPtrInst::getIndexedType(Type *Ptr, ArrayRef<uint64_t> IdxList) {
return getIndexedTypeInternal(Ptr, IdxList);
}
-unsigned GetElementPtrInst::getAddressSpace(Value *Ptr) {
- Type *Ty = Ptr->getType();
-
- if (VectorType *VTy = dyn_cast<VectorType>(Ty))
- Ty = VTy->getElementType();
-
- if (PointerType *PTy = dyn_cast<PointerType>(Ty))
- return PTy->getAddressSpace();
-
- llvm_unreachable("Invalid GEP pointer type");
-}
-
/// hasAllZeroIndices - Return true if all of the indices of this GEP are
/// zeros. If so, the result pointer and the first operand have the same
/// value, just potentially different types.
@@ -2112,7 +2118,8 @@ bool CastInst::isNoopCast(Type *IntPtrTy) const {
/// If no such cast is permited, the function returns 0.
unsigned CastInst::isEliminableCastPair(
Instruction::CastOps firstOp, Instruction::CastOps secondOp,
- Type *SrcTy, Type *MidTy, Type *DstTy, Type *IntPtrTy) {
+ Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
+ Type *DstIntPtrTy) {
// Define the 144 possibilities for these two cast instructions. The values
// in this matrix determine what to do in a given situation and select the
// case in the switch below. The rows correspond to firstOp, the columns
@@ -2215,9 +2222,9 @@ unsigned CastInst::isEliminableCastPair(
return 0;
case 7: {
// ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size
- if (!IntPtrTy)
+ if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
return 0;
- unsigned PtrSize = IntPtrTy->getScalarSizeInBits();
+ unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
unsigned MidSize = MidTy->getScalarSizeInBits();
if (MidSize >= PtrSize)
return Instruction::BitCast;
@@ -2256,9 +2263,9 @@ unsigned CastInst::isEliminableCastPair(
return 0;
case 13: {
// inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
- if (!IntPtrTy)
+ if (!MidIntPtrTy)
return 0;
- unsigned PtrSize = IntPtrTy->getScalarSizeInBits();
+ unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
unsigned SrcSize = SrcTy->getScalarSizeInBits();
unsigned DstSize = DstTy->getScalarSizeInBits();
if (SrcSize <= PtrSize && SrcSize == DstSize)
@@ -2836,7 +2843,7 @@ BitCastInst::BitCastInst(
// CmpInst Classes
//===----------------------------------------------------------------------===//
-void CmpInst::Anchor() const {}
+void CmpInst::anchor() {}
CmpInst::CmpInst(Type *ty, OtherOps op, unsigned short predicate,
Value *LHS, Value *RHS, const Twine &Name,
diff --git a/contrib/llvm/lib/VMCore/LLVMContext.cpp b/contrib/llvm/lib/VMCore/LLVMContext.cpp
index f07f0b3..2446ec9 100644
--- a/contrib/llvm/lib/VMCore/LLVMContext.cpp
+++ b/contrib/llvm/lib/VMCore/LLVMContext.cpp
@@ -53,6 +53,11 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
unsigned RangeID = getMDKindID("range");
assert(RangeID == MD_range && "range kind id drifted");
(void)RangeID;
+
+ // Create the 'tbaa.struct' metadata kind.
+ unsigned TBAAStructID = getMDKindID("tbaa.struct");
+ assert(TBAAStructID == MD_tbaa_struct && "tbaa.struct kind id drifted");
+ (void)TBAAStructID;
}
LLVMContext::~LLVMContext() { delete pImpl; }
diff --git a/contrib/llvm/lib/VMCore/LLVMContextImpl.cpp b/contrib/llvm/lib/VMCore/LLVMContextImpl.cpp
index 6279bb8..d35d284 100644
--- a/contrib/llvm/lib/VMCore/LLVMContextImpl.cpp
+++ b/contrib/llvm/lib/VMCore/LLVMContextImpl.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "LLVMContextImpl.h"
+#include "llvm/Attributes.h"
#include "llvm/Module.h"
#include "llvm/ADT/STLExtras.h"
#include <algorithm>
@@ -93,7 +94,21 @@ LLVMContextImpl::~LLVMContextImpl() {
E = CDSConstants.end(); I != E; ++I)
delete I->second;
CDSConstants.clear();
-
+
+ // Destroy attributes.
+ for (FoldingSetIterator<AttributesImpl> I = AttrsSet.begin(),
+ E = AttrsSet.end(); I != E; ) {
+ FoldingSetIterator<AttributesImpl> Elem = I++;
+ delete &*Elem;
+ }
+
+ // Destroy attribute lists.
+ for (FoldingSetIterator<AttributeListImpl> I = AttrsLists.begin(),
+ E = AttrsLists.end(); I != E; ) {
+ FoldingSetIterator<AttributeListImpl> Elem = I++;
+ delete &*Elem;
+ }
+
// Destroy MDNodes. ~MDNode can move and remove nodes between the MDNodeSet
// and the NonUniquedMDNodes sets, so copy the values out first.
SmallVector<MDNode*, 8> MDNodes;
@@ -107,6 +122,7 @@ LLVMContextImpl::~LLVMContextImpl() {
(*I)->destroy();
assert(MDNodeSet.empty() && NonUniquedMDNodes.empty() &&
"Destroying all MDNodes didn't empty the Context's sets.");
+
// Destroy MDStrings.
DeleteContainerSeconds(MDStringCache);
}
diff --git a/contrib/llvm/lib/VMCore/LLVMContextImpl.h b/contrib/llvm/lib/VMCore/LLVMContextImpl.h
index 2252028..90cf424 100644
--- a/contrib/llvm/lib/VMCore/LLVMContextImpl.h
+++ b/contrib/llvm/lib/VMCore/LLVMContextImpl.h
@@ -16,6 +16,7 @@
#define LLVM_LLVMCONTEXT_IMPL_H
#include "llvm/LLVMContext.h"
+#include "AttributesImpl.h"
#include "ConstantsContext.h"
#include "LeaksContext.h"
#include "llvm/Constants.h"
@@ -253,10 +254,14 @@ public:
typedef DenseMap<DenseMapAPFloatKeyInfo::KeyTy, ConstantFP*,
DenseMapAPFloatKeyInfo> FPMapTy;
FPMapTy FPConstants;
-
+
+ FoldingSet<AttributesImpl> AttrsSet;
+ FoldingSet<AttributeListImpl> AttrsLists;
+
StringMap<Value*> MDStringCache;
-
+
FoldingSet<MDNode> MDNodeSet;
+
// MDNodes may be uniqued or not uniqued. When they're not uniqued, they
// aren't in the MDNodeSet, but they're still shared between objects, so no
// one object can destroy them. This set allows us to at least destroy them
diff --git a/contrib/llvm/lib/VMCore/PassManager.cpp b/contrib/llvm/lib/VMCore/PassManager.cpp
index 4530c04..53f1149 100644
--- a/contrib/llvm/lib/VMCore/PassManager.cpp
+++ b/contrib/llvm/lib/VMCore/PassManager.cpp
@@ -1189,7 +1189,7 @@ void PMDataManager::dumpAnalysisUsage(StringRef Msg, const Pass *P,
assert(PassDebugging >= Details);
if (Set.empty())
return;
- dbgs() << (void*)P << std::string(getDepth()*2+3, ' ') << Msg << " Analyses:";
+ dbgs() << (const void*)P << std::string(getDepth()*2+3, ' ') << Msg << " Analyses:";
for (unsigned i = 0; i != Set.size(); ++i) {
if (i) dbgs() << ',';
const PassInfo *PInf = PassRegistry::getPassRegistry()->getPassInfo(Set[i]);
diff --git a/contrib/llvm/lib/VMCore/TargetTransformInfo.cpp b/contrib/llvm/lib/VMCore/TargetTransformInfo.cpp
new file mode 100644
index 0000000..e91c29c
--- /dev/null
+++ b/contrib/llvm/lib/VMCore/TargetTransformInfo.cpp
@@ -0,0 +1,31 @@
+//===- llvm/VMCore/TargetTransformInfo.cpp ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/TargetTransformInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+/// Default ctor.
+///
+/// @note This has to exist, because this is a pass, but it should never be
+/// used.
+TargetTransformInfo::TargetTransformInfo() : ImmutablePass(ID) {
+ /// You are seeing this error because your pass required the TTI
+ /// using a call to "getAnalysis<TargetTransformInfo>()", and you did
+ /// not initialize a machine target which can provide the TTI.
+ /// You should use "getAnalysisIfAvailable<TargetTransformInfo>()" instead.
+ report_fatal_error("Bad TargetTransformInfo ctor used. "
+ "Tool did not specify a TargetTransformInfo to use?");
+}
+
+INITIALIZE_PASS(TargetTransformInfo, "targettransforminfo",
+ "Target Transform Info", false, true)
+char TargetTransformInfo::ID = 0;
+
diff --git a/contrib/llvm/lib/VMCore/Type.cpp b/contrib/llvm/lib/VMCore/Type.cpp
index 5e9a00f..1656ab2 100644
--- a/contrib/llvm/lib/VMCore/Type.cpp
+++ b/contrib/llvm/lib/VMCore/Type.cpp
@@ -47,35 +47,17 @@ Type *Type::getScalarType() {
return this;
}
+const Type *Type::getScalarType() const {
+ if (const VectorType *VTy = dyn_cast<VectorType>(this))
+ return VTy->getElementType();
+ return this;
+}
+
/// isIntegerTy - Return true if this is an IntegerType of the specified width.
bool Type::isIntegerTy(unsigned Bitwidth) const {
return isIntegerTy() && cast<IntegerType>(this)->getBitWidth() == Bitwidth;
}
-/// isIntOrIntVectorTy - Return true if this is an integer type or a vector of
-/// integer types.
-///
-bool Type::isIntOrIntVectorTy() const {
- if (isIntegerTy())
- return true;
- if (getTypeID() != Type::VectorTyID) return false;
-
- return cast<VectorType>(this)->getElementType()->isIntegerTy();
-}
-
-/// isFPOrFPVectorTy - Return true if this is a FP type or a vector of FP types.
-///
-bool Type::isFPOrFPVectorTy() const {
- if (getTypeID() == Type::HalfTyID || getTypeID() == Type::FloatTyID ||
- getTypeID() == Type::DoubleTyID ||
- getTypeID() == Type::FP128TyID || getTypeID() == Type::X86_FP80TyID ||
- getTypeID() == Type::PPC_FP128TyID)
- return true;
- if (getTypeID() != Type::VectorTyID) return false;
-
- return cast<VectorType>(this)->getElementType()->isFloatingPointTy();
-}
-
// canLosslesslyBitCastTo - Return true if this type can be converted to
// 'Ty' without any reinterpretation of bits. For example, i8* to i32*.
//
@@ -220,8 +202,6 @@ Type *Type::getStructElementType(unsigned N) const {
return cast<StructType>(this)->getElementType(N);
}
-
-
Type *Type::getSequentialElementType() const {
return cast<SequentialType>(this)->getElementType();
}
@@ -235,12 +215,10 @@ unsigned Type::getVectorNumElements() const {
}
unsigned Type::getPointerAddressSpace() const {
- return cast<PointerType>(this)->getAddressSpace();
+ return cast<PointerType>(getScalarType())->getAddressSpace();
}
-
-
//===----------------------------------------------------------------------===//
// Primitive 'Type' data
//===----------------------------------------------------------------------===//
@@ -400,12 +378,10 @@ FunctionType *FunctionType::get(Type *ReturnType,
return FT;
}
-
FunctionType *FunctionType::get(Type *Result, bool isVarArg) {
return get(Result, ArrayRef<Type *>(), isVarArg);
}
-
/// isValidReturnType - Return true if the specified type is valid as a return
/// type.
bool FunctionType::isValidReturnType(Type *RetTy) {
@@ -553,7 +529,6 @@ StructType *StructType::create(LLVMContext &Context) {
return create(Context, StringRef());
}
-
StructType *StructType::create(ArrayRef<Type*> Elements, StringRef Name,
bool isPacked) {
assert(!Elements.empty() &&
@@ -637,7 +612,6 @@ bool StructType::isLayoutIdentical(StructType *Other) const {
return std::equal(element_begin(), element_end(), Other->element_begin());
}
-
/// getTypeByName - Return the type with the specified name, or null if there
/// is none by that name.
StructType *Module::getTypeByName(StringRef Name) const {
@@ -700,7 +674,6 @@ ArrayType::ArrayType(Type *ElType, uint64_t NumEl)
NumElements = NumEl;
}
-
ArrayType *ArrayType::get(Type *elementType, uint64_t NumElements) {
Type *ElementType = const_cast<Type*>(elementType);
assert(isValidElementType(ElementType) && "Invalid type for array element!");
diff --git a/contrib/llvm/lib/VMCore/User.cpp b/contrib/llvm/lib/VMCore/User.cpp
index 5f35ce4..e847ce6 100644
--- a/contrib/llvm/lib/VMCore/User.cpp
+++ b/contrib/llvm/lib/VMCore/User.cpp
@@ -10,6 +10,7 @@
#include "llvm/Constant.h"
#include "llvm/GlobalValue.h"
#include "llvm/User.h"
+#include "llvm/Operator.h"
namespace llvm {
@@ -78,4 +79,12 @@ void User::operator delete(void *Usr) {
::operator delete(Storage);
}
+//===----------------------------------------------------------------------===//
+// Operator Class
+//===----------------------------------------------------------------------===//
+
+Operator::~Operator() {
+ llvm_unreachable("should never destroy an Operator");
+}
+
} // End llvm namespace
diff --git a/contrib/llvm/lib/VMCore/Value.cpp b/contrib/llvm/lib/VMCore/Value.cpp
index d871108..8d0720d 100644
--- a/contrib/llvm/lib/VMCore/Value.cpp
+++ b/contrib/llvm/lib/VMCore/Value.cpp
@@ -394,7 +394,7 @@ static bool isDereferenceablePointer(const Value *V,
// It's also not always safe to follow a bitcast, for example:
// bitcast i8* (alloca i8) to i32*
// would result in a 4-byte load from a 1-byte alloca. Some cases could
- // be handled using TargetData to check sizes and alignments though.
+ // be handled using DataLayout to check sizes and alignments though.
// These are obviously ok.
if (isa<AllocaInst>(V)) return true;
diff --git a/contrib/llvm/lib/VMCore/ValueTypes.cpp b/contrib/llvm/lib/VMCore/ValueTypes.cpp
index d1ca953..2ee9f0f 100644
--- a/contrib/llvm/lib/VMCore/ValueTypes.cpp
+++ b/contrib/llvm/lib/VMCore/ValueTypes.cpp
@@ -55,24 +55,32 @@ bool EVT::isExtendedVector() const {
return LLVMTy->isVectorTy();
}
+bool EVT::isExtended16BitVector() const {
+ return isExtendedVector() && getExtendedSizeInBits() == 16;
+}
+
+bool EVT::isExtended32BitVector() const {
+ return isExtendedVector() && getExtendedSizeInBits() == 32;
+}
+
bool EVT::isExtended64BitVector() const {
- return isExtendedVector() && getSizeInBits() == 64;
+ return isExtendedVector() && getExtendedSizeInBits() == 64;
}
bool EVT::isExtended128BitVector() const {
- return isExtendedVector() && getSizeInBits() == 128;
+ return isExtendedVector() && getExtendedSizeInBits() == 128;
}
bool EVT::isExtended256BitVector() const {
- return isExtendedVector() && getSizeInBits() == 256;
+ return isExtendedVector() && getExtendedSizeInBits() == 256;
}
bool EVT::isExtended512BitVector() const {
- return isExtendedVector() && getSizeInBits() == 512;
+ return isExtendedVector() && getExtendedSizeInBits() == 512;
}
bool EVT::isExtended1024BitVector() const {
- return isExtendedVector() && getSizeInBits() == 1024;
+ return isExtendedVector() && getExtendedSizeInBits() == 1024;
}
EVT EVT::getExtendedVectorElementType() const {
@@ -120,15 +128,21 @@ std::string EVT::getEVTString() const {
case MVT::Other: return "ch";
case MVT::Glue: return "glue";
case MVT::x86mmx: return "x86mmx";
+ case MVT::v2i1: return "v2i1";
+ case MVT::v4i1: return "v4i1";
+ case MVT::v8i1: return "v8i1";
+ case MVT::v16i1: return "v16i1";
case MVT::v2i8: return "v2i8";
case MVT::v4i8: return "v4i8";
case MVT::v8i8: return "v8i8";
case MVT::v16i8: return "v16i8";
case MVT::v32i8: return "v32i8";
+ case MVT::v1i16: return "v1i16";
case MVT::v2i16: return "v2i16";
case MVT::v4i16: return "v4i16";
case MVT::v8i16: return "v8i16";
case MVT::v16i16: return "v16i16";
+ case MVT::v1i32: return "v1i32";
case MVT::v2i32: return "v2i32";
case MVT::v4i32: return "v4i32";
case MVT::v8i32: return "v8i32";
@@ -171,15 +185,21 @@ Type *EVT::getTypeForEVT(LLVMContext &Context) const {
case MVT::f128: return Type::getFP128Ty(Context);
case MVT::ppcf128: return Type::getPPC_FP128Ty(Context);
case MVT::x86mmx: return Type::getX86_MMXTy(Context);
+ case MVT::v2i1: return VectorType::get(Type::getInt1Ty(Context), 2);
+ case MVT::v4i1: return VectorType::get(Type::getInt1Ty(Context), 4);
+ case MVT::v8i1: return VectorType::get(Type::getInt1Ty(Context), 8);
+ case MVT::v16i1: return VectorType::get(Type::getInt1Ty(Context), 16);
case MVT::v2i8: return VectorType::get(Type::getInt8Ty(Context), 2);
case MVT::v4i8: return VectorType::get(Type::getInt8Ty(Context), 4);
case MVT::v8i8: return VectorType::get(Type::getInt8Ty(Context), 8);
case MVT::v16i8: return VectorType::get(Type::getInt8Ty(Context), 16);
case MVT::v32i8: return VectorType::get(Type::getInt8Ty(Context), 32);
+ case MVT::v1i16: return VectorType::get(Type::getInt16Ty(Context), 1);
case MVT::v2i16: return VectorType::get(Type::getInt16Ty(Context), 2);
case MVT::v4i16: return VectorType::get(Type::getInt16Ty(Context), 4);
case MVT::v8i16: return VectorType::get(Type::getInt16Ty(Context), 8);
case MVT::v16i16: return VectorType::get(Type::getInt16Ty(Context), 16);
+ case MVT::v1i32: return VectorType::get(Type::getInt32Ty(Context), 1);
case MVT::v2i32: return VectorType::get(Type::getInt32Ty(Context), 2);
case MVT::v4i32: return VectorType::get(Type::getInt32Ty(Context), 4);
case MVT::v8i32: return VectorType::get(Type::getInt32Ty(Context), 8);
diff --git a/contrib/llvm/lib/VMCore/Verifier.cpp b/contrib/llvm/lib/VMCore/Verifier.cpp
index 38914b3..eb40b09 100644
--- a/contrib/llvm/lib/VMCore/Verifier.cpp
+++ b/contrib/llvm/lib/VMCore/Verifier.cpp
@@ -400,8 +400,8 @@ void Verifier::visitGlobalValue(GlobalValue &GV) {
"Only global arrays can have appending linkage!", GVar);
}
- Assert1(!GV.hasLinkerPrivateWeakDefAutoLinkage() || GV.hasDefaultVisibility(),
- "linker_private_weak_def_auto can only have default visibility!",
+ Assert1(!GV.hasLinkOnceODRAutoHideLinkage() || GV.hasDefaultVisibility(),
+ "linkonce_odr_auto_hide can only have default visibility!",
&GV);
}
@@ -526,40 +526,60 @@ void Verifier::visitMDNode(MDNode &MD, Function *F) {
// value of the specified type. The value V is printed in error messages.
void Verifier::VerifyParameterAttrs(Attributes Attrs, Type *Ty,
bool isReturnValue, const Value *V) {
- if (Attrs == Attribute::None)
+ if (!Attrs.hasAttributes())
return;
- Attributes FnCheckAttr = Attrs & Attribute::FunctionOnly;
- Assert1(!FnCheckAttr, "Attribute " + Attribute::getAsString(FnCheckAttr) +
- " only applies to the function!", V);
-
- if (isReturnValue) {
- Attributes RetI = Attrs & Attribute::ParameterOnly;
- Assert1(!RetI, "Attribute " + Attribute::getAsString(RetI) +
- " does not apply to return values!", V);
- }
-
- for (unsigned i = 0;
- i < array_lengthof(Attribute::MutuallyIncompatible); ++i) {
- Attributes MutI = Attrs & Attribute::MutuallyIncompatible[i];
- Assert1(MutI.isEmptyOrSingleton(), "Attributes " +
- Attribute::getAsString(MutI) + " are incompatible!", V);
- }
-
- Attributes TypeI = Attrs & Attribute::typeIncompatible(Ty);
- Assert1(!TypeI, "Wrong type for attribute " +
- Attribute::getAsString(TypeI), V);
-
- Attributes ByValI = Attrs & Attribute::ByVal;
- if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
- Assert1(!ByValI || PTy->getElementType()->isSized(),
- "Attribute " + Attribute::getAsString(ByValI) +
- " does not support unsized types!", V);
- } else {
- Assert1(!ByValI,
- "Attribute " + Attribute::getAsString(ByValI) +
- " only applies to parameters with pointer type!", V);
- }
+ Assert1(!Attrs.hasFunctionOnlyAttrs(),
+ "Some attributes in '" + Attrs.getAsString() +
+ "' only apply to functions!", V);
+
+ if (isReturnValue)
+ Assert1(!Attrs.hasParameterOnlyAttrs(),
+ "Attributes 'byval', 'nest', 'sret', and 'nocapture' "
+ "do not apply to return values!", V);
+
+ // Check for mutually incompatible attributes.
+ Assert1(!((Attrs.hasAttribute(Attributes::ByVal) &&
+ Attrs.hasAttribute(Attributes::Nest)) ||
+ (Attrs.hasAttribute(Attributes::ByVal) &&
+ Attrs.hasAttribute(Attributes::StructRet)) ||
+ (Attrs.hasAttribute(Attributes::Nest) &&
+ Attrs.hasAttribute(Attributes::StructRet))), "Attributes "
+ "'byval, nest, and sret' are incompatible!", V);
+
+ Assert1(!((Attrs.hasAttribute(Attributes::ByVal) &&
+ Attrs.hasAttribute(Attributes::Nest)) ||
+ (Attrs.hasAttribute(Attributes::ByVal) &&
+ Attrs.hasAttribute(Attributes::InReg)) ||
+ (Attrs.hasAttribute(Attributes::Nest) &&
+ Attrs.hasAttribute(Attributes::InReg))), "Attributes "
+ "'byval, nest, and inreg' are incompatible!", V);
+
+ Assert1(!(Attrs.hasAttribute(Attributes::ZExt) &&
+ Attrs.hasAttribute(Attributes::SExt)), "Attributes "
+ "'zeroext and signext' are incompatible!", V);
+
+ Assert1(!(Attrs.hasAttribute(Attributes::ReadNone) &&
+ Attrs.hasAttribute(Attributes::ReadOnly)), "Attributes "
+ "'readnone and readonly' are incompatible!", V);
+
+ Assert1(!(Attrs.hasAttribute(Attributes::NoInline) &&
+ Attrs.hasAttribute(Attributes::AlwaysInline)), "Attributes "
+ "'noinline and alwaysinline' are incompatible!", V);
+
+ Assert1(!AttrBuilder(Attrs).
+ hasAttributes(Attributes::typeIncompatible(Ty)),
+ "Wrong types for attribute: " +
+ Attributes::typeIncompatible(Ty).getAsString(), V);
+
+ if (PointerType *PTy = dyn_cast<PointerType>(Ty))
+ Assert1(!Attrs.hasAttribute(Attributes::ByVal) ||
+ PTy->getElementType()->isSized(),
+ "Attribute 'byval' does not support unsized types!", V);
+ else
+ Assert1(!Attrs.hasAttribute(Attributes::ByVal),
+ "Attribute 'byval' only applies to parameters with pointer type!",
+ V);
}
// VerifyFunctionAttrs - Check parameter attributes against a function type.
@@ -585,26 +605,50 @@ void Verifier::VerifyFunctionAttrs(FunctionType *FT,
VerifyParameterAttrs(Attr.Attrs, Ty, Attr.Index == 0, V);
- if (Attr.Attrs & Attribute::Nest) {
+ if (Attr.Attrs.hasAttribute(Attributes::Nest)) {
Assert1(!SawNest, "More than one parameter has attribute nest!", V);
SawNest = true;
}
- if (Attr.Attrs & Attribute::StructRet)
+ if (Attr.Attrs.hasAttribute(Attributes::StructRet))
Assert1(Attr.Index == 1, "Attribute sret not on first parameter!", V);
}
Attributes FAttrs = Attrs.getFnAttributes();
- Attributes NotFn = FAttrs & (~Attribute::FunctionOnly);
- Assert1(!NotFn, "Attribute " + Attribute::getAsString(NotFn) +
- " does not apply to the function!", V);
-
- for (unsigned i = 0;
- i < array_lengthof(Attribute::MutuallyIncompatible); ++i) {
- Attributes MutI = FAttrs & Attribute::MutuallyIncompatible[i];
- Assert1(MutI.isEmptyOrSingleton(), "Attributes " +
- Attribute::getAsString(MutI) + " are incompatible!", V);
- }
+ AttrBuilder NotFn(FAttrs);
+ NotFn.removeFunctionOnlyAttrs();
+ Assert1(!NotFn.hasAttributes(), "Attributes '" +
+ Attributes::get(V->getContext(), NotFn).getAsString() +
+ "' do not apply to the function!", V);
+
+ // Check for mutually incompatible attributes.
+ Assert1(!((FAttrs.hasAttribute(Attributes::ByVal) &&
+ FAttrs.hasAttribute(Attributes::Nest)) ||
+ (FAttrs.hasAttribute(Attributes::ByVal) &&
+ FAttrs.hasAttribute(Attributes::StructRet)) ||
+ (FAttrs.hasAttribute(Attributes::Nest) &&
+ FAttrs.hasAttribute(Attributes::StructRet))), "Attributes "
+ "'byval, nest, and sret' are incompatible!", V);
+
+ Assert1(!((FAttrs.hasAttribute(Attributes::ByVal) &&
+ FAttrs.hasAttribute(Attributes::Nest)) ||
+ (FAttrs.hasAttribute(Attributes::ByVal) &&
+ FAttrs.hasAttribute(Attributes::InReg)) ||
+ (FAttrs.hasAttribute(Attributes::Nest) &&
+ FAttrs.hasAttribute(Attributes::InReg))), "Attributes "
+ "'byval, nest, and inreg' are incompatible!", V);
+
+ Assert1(!(FAttrs.hasAttribute(Attributes::ZExt) &&
+ FAttrs.hasAttribute(Attributes::SExt)), "Attributes "
+ "'zeroext and signext' are incompatible!", V);
+
+ Assert1(!(FAttrs.hasAttribute(Attributes::ReadNone) &&
+ FAttrs.hasAttribute(Attributes::ReadOnly)), "Attributes "
+ "'readnone and readonly' are incompatible!", V);
+
+ Assert1(!(FAttrs.hasAttribute(Attributes::NoInline) &&
+ FAttrs.hasAttribute(Attributes::AlwaysInline)), "Attributes "
+ "'noinline and alwaysinline' are incompatible!", V);
}
static bool VerifyAttributeCount(const AttrListPtr &Attrs, unsigned Params) {
@@ -661,6 +705,7 @@ void Verifier::visitFunction(Function &F) {
case CallingConv::Cold:
case CallingConv::X86_FastCall:
case CallingConv::X86_ThisCall:
+ case CallingConv::Intel_OCL_BI:
case CallingConv::PTX_Kernel:
case CallingConv::PTX_Device:
Assert1(!F.isVarArg(),
@@ -1170,9 +1215,8 @@ void Verifier::VerifyCallSite(CallSite CS) {
VerifyParameterAttrs(Attr, CS.getArgument(Idx-1)->getType(), false, I);
- Attributes VArgI = Attr & Attribute::VarArgsIncompatible;
- Assert1(!VArgI, "Attribute " + Attribute::getAsString(VArgI) +
- " cannot be used for vararg call arguments!", I);
+ Assert1(!Attr.hasIncompatibleWithVarArgsAttrs(),
+ "Attribute 'sret' cannot be used for vararg call arguments!", I);
}
// Verify that there's no metadata unless it's a direct call to an intrinsic.
@@ -1378,6 +1422,15 @@ void Verifier::visitLoadInst(LoadInst &LI) {
"Load cannot have Release ordering", &LI);
Assert1(LI.getAlignment() != 0,
"Atomic load must specify explicit alignment", &LI);
+ if (!ElTy->isPointerTy()) {
+ Assert2(ElTy->isIntegerTy(),
+ "atomic store operand must have integer type!",
+ &LI, ElTy);
+ unsigned Size = ElTy->getPrimitiveSizeInBits();
+ Assert2(Size >= 8 && !(Size & (Size - 1)),
+ "atomic store operand must be power-of-two byte-sized integer",
+ &LI, ElTy);
+ }
} else {
Assert1(LI.getSynchScope() == CrossThread,
"Non-atomic load cannot have SynchronizationScope specified", &LI);
@@ -1444,6 +1497,15 @@ void Verifier::visitStoreInst(StoreInst &SI) {
"Store cannot have Acquire ordering", &SI);
Assert1(SI.getAlignment() != 0,
"Atomic store must specify explicit alignment", &SI);
+ if (!ElTy->isPointerTy()) {
+ Assert2(ElTy->isIntegerTy(),
+ "atomic store operand must have integer type!",
+ &SI, ElTy);
+ unsigned Size = ElTy->getPrimitiveSizeInBits();
+ Assert2(Size >= 8 && !(Size & (Size - 1)),
+ "atomic store operand must be power-of-two byte-sized integer",
+ &SI, ElTy);
+ }
} else {
Assert1(SI.getSynchScope() == CrossThread,
"Non-atomic store cannot have SynchronizationScope specified", &SI);
@@ -1471,6 +1533,13 @@ void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
Assert1(PTy, "First cmpxchg operand must be a pointer.", &CXI);
Type *ElTy = PTy->getElementType();
+ Assert2(ElTy->isIntegerTy(),
+ "cmpxchg operand must have integer type!",
+ &CXI, ElTy);
+ unsigned Size = ElTy->getPrimitiveSizeInBits();
+ Assert2(Size >= 8 && !(Size & (Size - 1)),
+ "cmpxchg operand must be power-of-two byte-sized integer",
+ &CXI, ElTy);
Assert2(ElTy == CXI.getOperand(1)->getType(),
"Expected value type does not match pointer operand type!",
&CXI, ElTy);
@@ -1488,6 +1557,13 @@ void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
Assert1(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
Type *ElTy = PTy->getElementType();
+ Assert2(ElTy->isIntegerTy(),
+ "atomicrmw operand must have integer type!",
+ &RMWI, ElTy);
+ unsigned Size = ElTy->getPrimitiveSizeInBits();
+ Assert2(Size >= 8 && !(Size & (Size - 1)),
+ "atomicrmw operand must be power-of-two byte-sized integer",
+ &RMWI, ElTy);
Assert2(ElTy == RMWI.getOperand(1)->getType(),
"Argument value type does not match pointer operand type!",
&RMWI, ElTy);
@@ -1575,6 +1651,13 @@ void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
Instruction *Op = cast<Instruction>(I.getOperand(i));
+ // If the we have an invalid invoke, don't try to compute the dominance.
+ // We already reject it in the invoke specific checks and the dominance
+ // computation doesn't handle multiple edges.
+ if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
+ if (II->getNormalDest() == II->getUnwindDest())
+ return;
+ }
const Use &U = I.getOperandUse(i);
Assert2(InstsInThisBlock.count(Op) || DT->dominates(Op, U),
diff --git a/contrib/llvm/tools/bugpoint/ExtractFunction.cpp b/contrib/llvm/tools/bugpoint/ExtractFunction.cpp
index 888d2c8..b40b4f1 100644
--- a/contrib/llvm/tools/bugpoint/ExtractFunction.cpp
+++ b/contrib/llvm/tools/bugpoint/ExtractFunction.cpp
@@ -14,6 +14,7 @@
#include "BugDriver.h"
#include "llvm/Constants.h"
+#include "llvm/DataLayout.h"
#include "llvm/DerivedTypes.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
@@ -25,7 +26,6 @@
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/CodeExtractor.h"
-#include "llvm/Target/TargetData.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FileUtilities.h"
diff --git a/contrib/llvm/tools/bugpoint/OptimizerDriver.cpp b/contrib/llvm/tools/bugpoint/OptimizerDriver.cpp
index fb090ee..c56911a 100644
--- a/contrib/llvm/tools/bugpoint/OptimizerDriver.cpp
+++ b/contrib/llvm/tools/bugpoint/OptimizerDriver.cpp
@@ -16,11 +16,11 @@
//===----------------------------------------------------------------------===//
#include "BugDriver.h"
+#include "llvm/DataLayout.h"
#include "llvm/Module.h"
#include "llvm/PassManager.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/Target/TargetData.h"
#include "llvm/Support/FileUtilities.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/SystemUtils.h"
diff --git a/contrib/llvm/tools/clang/include/clang-c/Index.h b/contrib/llvm/tools/clang/include/clang-c/Index.h
index edd3cbb..aa3403c 100644
--- a/contrib/llvm/tools/clang/include/clang-c/Index.h
+++ b/contrib/llvm/tools/clang/include/clang-c/Index.h
@@ -23,6 +23,34 @@
#include "clang-c/Platform.h"
#include "clang-c/CXString.h"
+/**
+ * \brief The version constants for the libclang API.
+ * CINDEX_VERSION_MINOR should increase when there are API additions.
+ * CINDEX_VERSION_MAJOR is intended for "major" source/ABI breaking changes.
+ *
+ * The policy about the libclang API was always to keep it source and ABI
+ * compatible, thus CINDEX_VERSION_MAJOR is expected to remain stable.
+ */
+#define CINDEX_VERSION_MAJOR 0
+#define CINDEX_VERSION_MINOR 6
+
+#define CINDEX_VERSION_ENCODE(major, minor) ( \
+ ((major) * 10000) \
+ + ((minor) * 1))
+
+#define CINDEX_VERSION CINDEX_VERSION_ENCODE( \
+ CINDEX_VERSION_MAJOR, \
+ CINDEX_VERSION_MINOR )
+
+#define CINDEX_VERSION_STRINGIZE_(major, minor) \
+ #major"."#minor
+#define CINDEX_VERSION_STRINGIZE(major, minor) \
+ CINDEX_VERSION_STRINGIZE_(major, minor)
+
+#define CINDEX_VERSION_STRING CINDEX_VERSION_STRINGIZE( \
+ CINDEX_VERSION_MAJOR, \
+ CINDEX_VERSION_MINOR)
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -383,7 +411,7 @@ CINDEX_LINKAGE unsigned clang_equalRanges(CXSourceRange range1,
CXSourceRange range2);
/**
- * \brief Returns non-zero if \arg range is null.
+ * \brief Returns non-zero if \p range is null.
*/
CINDEX_LINKAGE int clang_Range_isNull(CXSourceRange range);
@@ -1035,13 +1063,15 @@ enum CXTranslationUnit_Flags {
* code-completion operations.
*/
CXTranslationUnit_CacheCompletionResults = 0x08,
+
/**
- * \brief DEPRECATED: Enable precompiled preambles in C++.
+ * \brief Used to indicate that the translation unit will be serialized with
+ * \c clang_saveTranslationUnit.
*
- * Note: this is a *temporary* option that is available only while
- * we are testing C++ precompiled preamble support. It is deprecated.
+ * This option is typically used when parsing a header with the intent of
+ * producing a precompiled header.
*/
- CXTranslationUnit_CXXPrecompiledPreamble = 0x10,
+ CXTranslationUnit_ForSerialization = 0x10,
/**
* \brief DEPRECATED: Enabled chained precompiled preambles in C++.
@@ -1904,9 +1934,10 @@ enum CXCursorKind {
*/
CXCursor_ReturnStmt = 214,
- /** \brief A GNU inline assembly statement extension.
+ /** \brief A GCC inline assembly statement extension.
*/
- CXCursor_AsmStmt = 215,
+ CXCursor_GCCAsmStmt = 215,
+ CXCursor_AsmStmt = CXCursor_GCCAsmStmt,
/** \brief Objective-C's overall \@try-\@catch-\@finally statement.
*/
@@ -2009,7 +2040,15 @@ enum CXCursorKind {
CXCursor_MacroInstantiation = CXCursor_MacroExpansion,
CXCursor_InclusionDirective = 503,
CXCursor_FirstPreprocessing = CXCursor_PreprocessingDirective,
- CXCursor_LastPreprocessing = CXCursor_InclusionDirective
+ CXCursor_LastPreprocessing = CXCursor_InclusionDirective,
+
+ /* Extra Declarations */
+ /**
+ * \brief A module import declaration.
+ */
+ CXCursor_ModuleImportDecl = 600,
+ CXCursor_FirstExtraDecl = CXCursor_ModuleImportDecl,
+ CXCursor_LastExtraDecl = CXCursor_ModuleImportDecl
};
/**
@@ -2040,7 +2079,8 @@ typedef struct {
* \brief A comment AST node.
*/
typedef struct {
- const void *Data;
+ const void *ASTNode;
+ CXTranslationUnit TranslationUnit;
} CXComment;
/**
@@ -2068,9 +2108,9 @@ CINDEX_LINKAGE CXCursor clang_getTranslationUnitCursor(CXTranslationUnit);
CINDEX_LINKAGE unsigned clang_equalCursors(CXCursor, CXCursor);
/**
- * \brief Returns non-zero if \arg cursor is null.
+ * \brief Returns non-zero if \p cursor is null.
*/
-CINDEX_LINKAGE int clang_Cursor_isNull(CXCursor);
+CINDEX_LINKAGE int clang_Cursor_isNull(CXCursor cursor);
/**
* \brief Compute a hash value for the given cursor.
@@ -2585,6 +2625,7 @@ enum CXCallingConv {
CXCallingConv_X86Pascal = 5,
CXCallingConv_AAPCS = 6,
CXCallingConv_AAPCS_VFP = 7,
+ CXCallingConv_PnaclCall = 8,
CXCallingConv_Invalid = 100,
CXCallingConv_Unexposed = 200
@@ -3164,6 +3205,12 @@ CINDEX_LINKAGE int clang_Cursor_getObjCSelectorIndex(CXCursor);
CINDEX_LINKAGE int clang_Cursor_isDynamicCall(CXCursor C);
/**
+ * \brief Given a cursor pointing to an ObjC message, returns the CXType of the
+ * receiver.
+ */
+CINDEX_LINKAGE CXType clang_Cursor_getReceiverType(CXCursor C);
+
+/**
* \brief Given a cursor that represents a declaration, return the associated
* comment's source range. The range may include multiple consecutive comments
* with whitespace in between.
@@ -3195,6 +3242,65 @@ CINDEX_LINKAGE CXComment clang_Cursor_getParsedComment(CXCursor C);
*/
/**
+ * \defgroup CINDEX_MODULE Module introspection
+ *
+ * The functions in this group provide access to information about modules.
+ *
+ * @{
+ */
+
+typedef void *CXModule;
+
+/**
+ * \brief Given a CXCursor_ModuleImportDecl cursor, return the associated module.
+ */
+CINDEX_LINKAGE CXModule clang_Cursor_getModule(CXCursor C);
+
+/**
+ * \param Module a module object.
+ *
+ * \returns the parent of a sub-module or NULL if the given module is top-level,
+ * e.g. for 'std.vector' it will return the 'std' module.
+ */
+CINDEX_LINKAGE CXModule clang_Module_getParent(CXModule Module);
+
+/**
+ * \param Module a module object.
+ *
+ * \returns the name of the module, e.g. for the 'std.vector' sub-module it
+ * will return "vector".
+ */
+CINDEX_LINKAGE CXString clang_Module_getName(CXModule Module);
+
+/**
+ * \param Module a module object.
+ *
+ * \returns the full name of the module, e.g. "std.vector".
+ */
+CINDEX_LINKAGE CXString clang_Module_getFullName(CXModule Module);
+
+/**
+ * \param Module a module object.
+ *
+ * \returns the number of top level headers associated with this module.
+ */
+CINDEX_LINKAGE unsigned clang_Module_getNumTopLevelHeaders(CXModule Module);
+
+/**
+ * \param Module a module object.
+ *
+ * \param Index top level header index (zero-based).
+ *
+ * \returns the specified top level header associated with the module.
+ */
+CINDEX_LINKAGE
+CXFile clang_Module_getTopLevelHeader(CXModule Module, unsigned Index);
+
+/**
+ * @}
+ */
+
+/**
* \defgroup CINDEX_COMMENT Comment AST introspection
*
* The routines in this group provide access to information in the
@@ -3272,7 +3378,7 @@ enum CXCommentKind {
* \brief A \\param or \\arg command that describes the function parameter
* (name, passing direction, description).
*
- * \brief For example: \\param [in] ParamName description.
+ * For example: \\param [in] ParamName description.
*/
CXComment_ParamCommand = 7,
@@ -3280,7 +3386,7 @@ enum CXCommentKind {
* \brief A \\tparam command that describes a template parameter (name and
* description).
*
- * \brief For example: \\tparam T description.
+ * For example: \\tparam T description.
*/
CXComment_TParamCommand = 8,
@@ -3379,7 +3485,7 @@ CINDEX_LINKAGE unsigned clang_Comment_getNumChildren(CXComment Comment);
/**
* \param Comment AST node of any kind.
*
- * \param ArgIdx argument index (zero-based).
+ * \param ChildIdx child index (zero-based).
*
* \returns the specified child of the AST node.
*/
@@ -3692,14 +3798,11 @@ CINDEX_LINKAGE CXString clang_FullComment_getAsHTML(CXComment Comment);
* A Relax NG schema for the XML can be found in comment-xml-schema.rng file
* inside clang source tree.
*
- * \param TU the translation unit \c Comment belongs to.
- *
* \param Comment a \c CXComment_FullComment AST node.
*
* \returns string containing an XML document.
*/
-CINDEX_LINKAGE CXString clang_FullComment_getAsXML(CXTranslationUnit TU,
- CXComment Comment);
+CINDEX_LINKAGE CXString clang_FullComment_getAsXML(CXComment Comment);
/**
* @}
@@ -4323,8 +4426,7 @@ clang_getCompletionAnnotation(CXCompletionString completion_string,
* \param completion_string The code completion string whose parent is
* being queried.
*
- * \param kind If non-NULL, will be set to the kind of the parent context,
- * or CXCursor_NotImplemented if there is no context.
+ * \param kind DEPRECATED: always set to CXCursor_NotImplemented if non-NULL.
*
* \returns The name of the completion parent, e.g., "NSObject" if
* the completion string represents a method in the NSObject class.
@@ -4917,22 +5019,35 @@ typedef struct {
CXFile file;
int isImport;
int isAngled;
+ /**
+ * \brief Non-zero if the directive was automatically turned into a module
+ * import.
+ */
+ int isModuleImport;
} CXIdxIncludedFileInfo;
/**
* \brief Data for IndexerCallbacks#importedASTFile.
*/
typedef struct {
+ /**
+ * \brief Top level AST file containing the imported PCH, module or submodule.
+ */
CXFile file;
/**
- * \brief Location where the file is imported. It is useful mostly for
- * modules.
+ * \brief The imported module or NULL if the AST file is a PCH.
+ */
+ CXModule module;
+ /**
+ * \brief Location where the file is imported. Applicable only for modules.
*/
CXIdxLoc loc;
/**
- * \brief Non-zero if the AST file is a module otherwise it's a PCH.
+ * \brief Non-zero if an inclusion directive was automatically turned into
+ * a module import. Applicable only for modules.
*/
- int isModule;
+ int isImplicit;
+
} CXIdxImportedASTFileInfo;
typedef enum {
@@ -4965,7 +5080,8 @@ typedef enum {
CXIdxEntity_CXXConstructor = 22,
CXIdxEntity_CXXDestructor = 23,
CXIdxEntity_CXXConversionFunction = 24,
- CXIdxEntity_CXXTypeAlias = 25
+ CXIdxEntity_CXXTypeAlias = 25,
+ CXIdxEntity_CXXInterface = 26
} CXIdxEntityKind;
@@ -5183,8 +5299,8 @@ typedef struct {
*
* AST files will not get indexed (there will not be callbacks to index all
* the entities in an AST file). The recommended action is that, if the AST
- * file is not already indexed, to block further indexing and initiate a new
- * indexing job specific to the AST file.
+ * file is not already indexed, to initiate a new indexing job specific to
+ * the AST file.
*/
CXIdxClientASTFile (*importedASTFile)(CXClientData client_data,
const CXIdxImportedASTFileInfo *);
diff --git a/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMT.h b/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMT.h
index 86a6cbb..cce8661 100644
--- a/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMT.h
+++ b/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMT.h
@@ -12,6 +12,7 @@
#include "clang/ARCMigrate/FileRemapper.h"
#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Basic/SourceLocation.h"
namespace clang {
class ASTContext;
@@ -51,7 +52,7 @@ bool applyTransformations(CompilerInvocation &origCI,
DiagnosticConsumer *DiagClient);
/// \brief Applies automatic modifications and produces temporary files
-/// and metadata into the \arg outputDir path.
+/// and metadata into the \p outputDir path.
///
/// \param emitPremigrationARCErrors if true all ARC errors will get emitted
/// even if the migrator can fix them, but the function will still return false
@@ -68,7 +69,7 @@ bool migrateWithTemporaryFiles(CompilerInvocation &origCI,
bool emitPremigrationARCErrors,
StringRef plistOut);
-/// \brief Get the set of file remappings from the \arg outputDir path that
+/// \brief Get the set of file remappings from the \p outputDir path that
/// migrateWithTemporaryFiles produced.
///
/// \returns false if no error is produced, true otherwise.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h b/contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h
index 69a3866..37b0740 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h
@@ -19,12 +19,14 @@ namespace clang {
class CXXRecordDecl;
class DeclGroupRef;
class HandleTagDeclDefinition;
+ class PPMutationListener;
class ASTMutationListener;
class ASTDeserializationListener; // layering violation because void* is ugly
class SemaConsumer; // layering violation required for safe SemaConsumer
class TagDecl;
class VarDecl;
class FunctionDecl;
+ class ImportDecl;
/// ASTConsumer - This is an abstract interface that should be implemented by
/// clients that read ASTs. This abstraction layer allows the client to be
@@ -79,6 +81,11 @@ public:
/// The default implementation ignored them.
virtual void HandleTopLevelDeclInObjCContainer(DeclGroupRef D);
+ /// \brief Handle an ImportDecl that was implicitly created due to an
+ /// inclusion directive.
+ /// The default implementation passes it to HandleTopLevelDecl.
+ virtual void HandleImplicitImportDecl(ImportDecl *D);
+
/// CompleteTentativeDefinition - Callback invoked at the end of a translation
/// unit to notify the consumer that the given tentative definition should be
/// completed.
@@ -105,6 +112,11 @@ public:
/// it was actually used.
virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {}
+ /// \brief If the consumer is interested in preprocessor entities getting
+ /// modified after their initial creation, it should return a pointer to
+ /// a PPMutationListener here.
+ virtual PPMutationListener *GetPPMutationListener() { return 0; }
+
/// \brief If the consumer is interested in entities getting modified after
/// their initial creation, it should return a pointer to
/// an ASTMutationListener here.
@@ -118,9 +130,6 @@ public:
/// PrintStats - If desired, print any statistics.
virtual void PrintStats() {}
-
- // Support isa/cast/dyn_cast
- static bool classof(const ASTConsumer *) { return true; }
};
} // end namespace clang.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h b/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
index cad3ad2..f0934b7 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the ASTContext interface.
-//
+///
+/// \file
+/// \brief Defines the clang::ASTContext interface.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_ASTCONTEXT_H
@@ -28,6 +29,7 @@
#include "clang/AST/Type.h"
#include "clang/AST/CanonicalType.h"
#include "clang/AST/RawCommentList.h"
+#include "clang/AST/CommentCommandTraits.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
@@ -84,8 +86,8 @@ namespace clang {
class FullComment;
}
-/// ASTContext - This class holds long-lived AST nodes (such as types and
-/// decls) that can be referred to throughout the semantic analysis of a file.
+/// \brief Holds long-lived AST nodes (such as types and decls) that can be
+/// referred to throughout the semantic analysis of a file.
class ASTContext : public RefCountedBase<ASTContext> {
ASTContext &this_() { return *this; }
@@ -144,19 +146,20 @@ class ASTContext : public RefCountedBase<ASTContext> {
mutable NestedNameSpecifier *GlobalNestedNameSpecifier;
friend class NestedNameSpecifier;
- /// ASTRecordLayouts - A cache mapping from RecordDecls to ASTRecordLayouts.
- /// This is lazily created. This is intentionally not serialized.
+ /// \brief A cache mapping from RecordDecls to ASTRecordLayouts.
+ ///
+ /// This is lazily created. This is intentionally not serialized.
mutable llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>
ASTRecordLayouts;
mutable llvm::DenseMap<const ObjCContainerDecl*, const ASTRecordLayout*>
ObjCLayouts;
- /// TypeInfoMap - A cache from types to size and alignment information.
+ /// \brief A cache from types to size and alignment information.
typedef llvm::DenseMap<const Type*,
std::pair<uint64_t, unsigned> > TypeInfoMap;
mutable TypeInfoMap MemoizedTypeInfo;
- /// KeyFunctions - A cache mapping from CXXRecordDecls to key functions.
+ /// \brief A cache mapping from CXXRecordDecls to key functions.
llvm::DenseMap<const CXXRecordDecl*, const CXXMethodDecl*> KeyFunctions;
/// \brief Mapping from ObjCContainers to their ObjCImplementations.
@@ -170,7 +173,7 @@ class ASTContext : public RefCountedBase<ASTContext> {
llvm::DenseMap<const VarDecl*, Expr*> BlockVarCopyInits;
/// \brief Mapping from class scope functions specialization to their
- /// template patterns.
+ /// template patterns.
llvm::DenseMap<const FunctionDecl*, FunctionDecl*>
ClassScopeSpecializationPattern;
@@ -206,17 +209,20 @@ class ASTContext : public RefCountedBase<ASTContext> {
/// __builtin_va_list type.
mutable TypedefDecl *BuiltinVaListDecl;
- /// \brief The typedef for the predefined 'id' type.
+ /// \brief The typedef for the predefined \c id type.
mutable TypedefDecl *ObjCIdDecl;
- /// \brief The typedef for the predefined 'SEL' type.
+ /// \brief The typedef for the predefined \c SEL type.
mutable TypedefDecl *ObjCSelDecl;
- /// \brief The typedef for the predefined 'Class' type.
+ /// \brief The typedef for the predefined \c Class type.
mutable TypedefDecl *ObjCClassDecl;
- /// \brief The typedef for the predefined 'Protocol' class in Objective-C.
+ /// \brief The typedef for the predefined \c Protocol class in Objective-C.
mutable ObjCInterfaceDecl *ObjCProtocolClassDecl;
+
+ /// \brief The typedef for the predefined 'BOOL' type.
+ mutable TypedefDecl *BOOLDecl;
// Typedefs which may be provided defining the structure of Objective-C
// pseudo-builtins
@@ -296,9 +302,10 @@ class ASTContext : public RefCountedBase<ASTContext> {
InstantiatedFromStaticDataMember;
/// \brief Keeps track of the declaration from which a UsingDecl was
- /// created during instantiation. The source declaration is always
- /// a UsingDecl, an UnresolvedUsingValueDecl, or an
- /// UnresolvedUsingTypenameDecl.
+ /// created during instantiation.
+ ///
+ /// The source declaration is always a UsingDecl, an UnresolvedUsingValueDecl,
+ /// or an UnresolvedUsingTypenameDecl.
///
/// For example:
/// \code
@@ -337,9 +344,8 @@ class ASTContext : public RefCountedBase<ASTContext> {
/// mangling context.
llvm::DenseMap<const DeclContext *, LambdaMangleContext> LambdaMangleContexts;
- /// \brief Mapping that stores parameterIndex values for ParmVarDecls
- /// when that value exceeds the bitfield size of
- /// ParmVarDeclBits.ParameterIndex.
+ /// \brief Mapping that stores parameterIndex values for ParmVarDecls when
+ /// that value exceeds the bitfield size of ParmVarDeclBits.ParameterIndex.
typedef llvm::DenseMap<const VarDecl *, unsigned> ParameterIndexTable;
ParameterIndexTable ParamIndices;
@@ -348,10 +354,10 @@ class ASTContext : public RefCountedBase<ASTContext> {
TranslationUnitDecl *TUDecl;
- /// SourceMgr - The associated SourceManager object.
+ /// \brief The associated SourceManager object.a
SourceManager &SourceMgr;
- /// LangOpts - The language options used to create the AST associated with
+ /// \brief The language options used to create the AST associated with
/// this ASTContext object.
LangOptions &LangOpts;
@@ -387,9 +393,11 @@ public:
OwningPtr<ExternalASTSource> ExternalSource;
ASTMutationListener *Listener;
- clang::PrintingPolicy getPrintingPolicy() const { return PrintingPolicy; }
+ const clang::PrintingPolicy &getPrintingPolicy() const {
+ return PrintingPolicy;
+ }
- void setPrintingPolicy(clang::PrintingPolicy Policy) {
+ void setPrintingPolicy(const clang::PrintingPolicy &Policy) {
PrintingPolicy = Policy;
}
@@ -508,6 +516,8 @@ public:
}
void addComment(const RawComment &RC) {
+ assert(LangOpts.RetainCommentsFromSystemHeaders ||
+ !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
Comments.addComment(RC, BumpAlloc);
}
@@ -522,7 +532,22 @@ public:
/// Return parsed documentation comment attached to a given declaration.
/// Returns NULL if no comment is attached.
- comments::FullComment *getCommentForDecl(const Decl *D) const;
+ ///
+ /// \param PP the Preprocessor used with this TU. Could be NULL if
+ /// preprocessor is not available.
+ comments::FullComment *getCommentForDecl(const Decl *D,
+ const Preprocessor *PP) const;
+
+ comments::FullComment *cloneFullComment(comments::FullComment *FC,
+ const Decl *D) const;
+
+private:
+ mutable comments::CommandTraits CommentCommandTraits;
+
+public:
+ comments::CommandTraits &getCommentCommandTraits() const {
+ return CommentCommandTraits;
+ }
/// \brief Retrieve the attributes for the given declaration.
AttrVec& getDeclAttrs(const Decl *D);
@@ -547,7 +572,7 @@ public:
TemplateSpecializationKind TSK,
SourceLocation PointOfInstantiation = SourceLocation());
- /// \brief If the given using decl is an instantiation of a
+ /// \brief If the given using decl \p Inst is an instantiation of a
/// (possibly unresolved) using decl from a template instantiation,
/// return it.
NamedDecl *getInstantiatedFromUsingDecl(UsingDecl *Inst);
@@ -564,28 +589,28 @@ public:
void setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, FieldDecl *Tmpl);
- /// ZeroBitfieldFollowsNonBitfield - return 'true" if 'FD' is a zero-length
- /// bitfield which follows the non-bitfield 'LastFD'.
+ /// \brief Return \c true if \p FD is a zero-length bitfield which follows
+ /// the non-bitfield \p LastFD.
bool ZeroBitfieldFollowsNonBitfield(const FieldDecl *FD,
const FieldDecl *LastFD) const;
- /// ZeroBitfieldFollowsBitfield - return 'true" if 'FD' is a zero-length
- /// bitfield which follows the bitfield 'LastFD'.
+ /// \brief Return \c true if \p FD is a zero-length bitfield which follows
+ /// the bitfield \p LastFD.
bool ZeroBitfieldFollowsBitfield(const FieldDecl *FD,
const FieldDecl *LastFD) const;
- /// BitfieldFollowsBitfield - return 'true" if 'FD' is a
- /// bitfield which follows the bitfield 'LastFD'.
+ /// \brief Return \c true if \p FD is a bitfield which follows the bitfield
+ /// \p LastFD.
bool BitfieldFollowsBitfield(const FieldDecl *FD,
const FieldDecl *LastFD) const;
- /// NonBitfieldFollowsBitfield - return 'true" if 'FD' is not a
- /// bitfield which follows the bitfield 'LastFD'.
+ /// \brief Return \c true if \p FD is not a bitfield which follows the
+ /// bitfield \p LastFD.
bool NonBitfieldFollowsBitfield(const FieldDecl *FD,
const FieldDecl *LastFD) const;
- /// BitfieldFollowsNonBitfield - return 'true" if 'FD' is a
- /// bitfield which follows the none bitfield 'LastFD'.
+ /// \brief Return \c true if \p FD is a bitfield which follows the
+ /// non-bitfield \p LastFD.
bool BitfieldFollowsNonBitfield(const FieldDecl *FD,
const FieldDecl *LastFD) const;
@@ -603,6 +628,17 @@ public:
/// Overridden method.
void addOverriddenMethod(const CXXMethodDecl *Method,
const CXXMethodDecl *Overridden);
+
+ /// \brief Return C++ or ObjC overridden methods for the given \p Method.
+ ///
+ /// An ObjC method is considered to override any method in the class's
+ /// base classes, its protocols, or its categories' protocols, that has
+ /// the same selector and is of the same kind (class or instance).
+ /// A method in an implementation is not considered as overriding the same
+ /// method in the interface or its categories.
+ void getOverriddenMethods(
+ const NamedDecl *Method,
+ SmallVectorImpl<const NamedDecl *> &Overridden) const;
/// \brief Notify the AST context that a new import declaration has been
/// parsed or implicitly created within this translation unit.
@@ -673,6 +709,7 @@ public:
CanQualType FloatComplexTy, DoubleComplexTy, LongDoubleComplexTy;
CanQualType VoidPtrTy, NullPtrTy;
CanQualType DependentTy, OverloadTy, BoundMemberTy, UnknownAnyTy;
+ CanQualType BuiltinFnTy;
CanQualType PseudoObjectTy, ARCUnbridgedCastTy;
CanQualType ObjCBuiltinIdTy, ObjCBuiltinClassTy, ObjCBuiltinSelTy;
CanQualType ObjCBuiltinBoolTy;
@@ -731,77 +768,85 @@ public:
//===--------------------------------------------------------------------===//
private:
- /// getExtQualType - Return a type with extended qualifiers.
+ /// \brief Return a type with extended qualifiers.
QualType getExtQualType(const Type *Base, Qualifiers Quals) const;
QualType getTypeDeclTypeSlow(const TypeDecl *Decl) const;
public:
- /// getAddSpaceQualType - Return the uniqued reference to the type for an
- /// address space qualified type with the specified type and address space.
+ /// \brief Return the uniqued reference to the type for an address space
+ /// qualified type with the specified type and address space.
+ ///
/// The resulting type has a union of the qualifiers from T and the address
/// space. If T already has an address space specifier, it is silently
/// replaced.
QualType getAddrSpaceQualType(QualType T, unsigned AddressSpace) const;
- /// getObjCGCQualType - Returns the uniqued reference to the type for an
- /// objc gc qualified type. The retulting type has a union of the qualifiers
- /// from T and the gc attribute.
+ /// \brief Return the uniqued reference to the type for an Objective-C
+ /// gc-qualified type.
+ ///
+ /// The retulting type has a union of the qualifiers from T and the gc
+ /// attribute.
QualType getObjCGCQualType(QualType T, Qualifiers::GC gcAttr) const;
- /// getRestrictType - Returns the uniqued reference to the type for a
- /// 'restrict' qualified type. The resulting type has a union of the
- /// qualifiers from T and 'restrict'.
+ /// \brief Return the uniqued reference to the type for a \c restrict
+ /// qualified type.
+ ///
+ /// The resulting type has a union of the qualifiers from \p T and
+ /// \c restrict.
QualType getRestrictType(QualType T) const {
return T.withFastQualifiers(Qualifiers::Restrict);
}
- /// getVolatileType - Returns the uniqued reference to the type for a
- /// 'volatile' qualified type. The resulting type has a union of the
- /// qualifiers from T and 'volatile'.
+ /// \brief Return the uniqued reference to the type for a \c volatile
+ /// qualified type.
+ ///
+ /// The resulting type has a union of the qualifiers from \p T and
+ /// \c volatile.
QualType getVolatileType(QualType T) const {
return T.withFastQualifiers(Qualifiers::Volatile);
}
- /// getConstType - Returns the uniqued reference to the type for a
- /// 'const' qualified type. The resulting type has a union of the
- /// qualifiers from T and 'const'.
+ /// \brief Return the uniqued reference to the type for a \c const
+ /// qualified type.
///
- /// It can be reasonably expected that this will always be
- /// equivalent to calling T.withConst().
+ /// The resulting type has a union of the qualifiers from \p T and \c const.
+ ///
+ /// It can be reasonably expected that this will always be equivalent to
+ /// calling T.withConst().
QualType getConstType(QualType T) const { return T.withConst(); }
- /// adjustFunctionType - Change the ExtInfo on a function type.
+ /// \brief Change the ExtInfo on a function type.
const FunctionType *adjustFunctionType(const FunctionType *Fn,
FunctionType::ExtInfo EInfo);
- /// getComplexType - Return the uniqued reference to the type for a complex
+ /// \brief Return the uniqued reference to the type for a complex
/// number with the specified element type.
QualType getComplexType(QualType T) const;
CanQualType getComplexType(CanQualType T) const {
return CanQualType::CreateUnsafe(getComplexType((QualType) T));
}
- /// getPointerType - Return the uniqued reference to the type for a pointer to
+ /// \brief Return the uniqued reference to the type for a pointer to
/// the specified type.
QualType getPointerType(QualType T) const;
CanQualType getPointerType(CanQualType T) const {
return CanQualType::CreateUnsafe(getPointerType((QualType) T));
}
- /// getAtomicType - Return the uniqued reference to the atomic type for
- /// the specified type.
+ /// \brief Return the uniqued reference to the atomic type for the specified
+ /// type.
QualType getAtomicType(QualType T) const;
- /// getBlockPointerType - Return the uniqued reference to the type for a block
- /// of the specified type.
+ /// \brief Return the uniqued reference to the type for a block of the
+ /// specified type.
QualType getBlockPointerType(QualType T) const;
- /// This gets the struct used to keep track of the descriptor for pointer to
+ /// Gets the struct used to keep track of the descriptor for pointer to
/// blocks.
QualType getBlockDescriptorType() const;
- /// This gets the struct used to keep track of the extended descriptor for
+ /// Gets the struct used to keep track of the extended descriptor for
/// pointer to blocks.
QualType getBlockDescriptorExtendedType() const;
@@ -812,78 +857,82 @@ public:
return cudaConfigureCallDecl;
}
- /// This builds the struct used for __block variables.
+ /// Builds the struct used for __block variables.
QualType BuildByRefType(StringRef DeclName, QualType Ty) const;
/// Returns true iff we need copy/dispose helpers for the given type.
bool BlockRequiresCopying(QualType Ty) const;
- /// getLValueReferenceType - Return the uniqued reference to the type for an
- /// lvalue reference to the specified type.
+ /// \brief Return the uniqued reference to the type for an lvalue reference
+ /// to the specified type.
QualType getLValueReferenceType(QualType T, bool SpelledAsLValue = true)
const;
- /// getRValueReferenceType - Return the uniqued reference to the type for an
- /// rvalue reference to the specified type.
+ /// \brief Return the uniqued reference to the type for an rvalue reference
+ /// to the specified type.
QualType getRValueReferenceType(QualType T) const;
- /// getMemberPointerType - Return the uniqued reference to the type for a
- /// member pointer to the specified type in the specified class. The class
- /// is a Type because it could be a dependent name.
+ /// \brief Return the uniqued reference to the type for a member pointer to
+ /// the specified type in the specified class.
+ ///
+ /// The class \p Cls is a \c Type because it could be a dependent name.
QualType getMemberPointerType(QualType T, const Type *Cls) const;
- /// getVariableArrayType - Returns a non-unique reference to the type for a
- /// variable array of the specified element type.
+ /// \brief Return a non-unique reference to the type for a variable array of
+ /// the specified element type.
QualType getVariableArrayType(QualType EltTy, Expr *NumElts,
ArrayType::ArraySizeModifier ASM,
unsigned IndexTypeQuals,
SourceRange Brackets) const;
- /// getDependentSizedArrayType - Returns a non-unique reference to
- /// the type for a dependently-sized array of the specified element
- /// type. FIXME: We will need these to be uniqued, or at least
- /// comparable, at some point.
+ /// \brief Return a non-unique reference to the type for a dependently-sized
+ /// array of the specified element type.
+ ///
+ /// FIXME: We will need these to be uniqued, or at least comparable, at some
+ /// point.
QualType getDependentSizedArrayType(QualType EltTy, Expr *NumElts,
ArrayType::ArraySizeModifier ASM,
unsigned IndexTypeQuals,
SourceRange Brackets) const;
- /// getIncompleteArrayType - Returns a unique reference to the type for a
- /// incomplete array of the specified element type.
+ /// \brief Return a unique reference to the type for an incomplete array of
+ /// the specified element type.
QualType getIncompleteArrayType(QualType EltTy,
ArrayType::ArraySizeModifier ASM,
unsigned IndexTypeQuals) const;
- /// getConstantArrayType - Return the unique reference to the type for a
- /// constant array of the specified element type.
+ /// \brief Return the unique reference to the type for a constant array of
+ /// the specified element type.
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize,
ArrayType::ArraySizeModifier ASM,
unsigned IndexTypeQuals) const;
- /// getVariableArrayDecayedType - Returns a vla type where known sizes
- /// are replaced with [*].
+ /// \brief Returns a vla type where known sizes are replaced with [*].
QualType getVariableArrayDecayedType(QualType Ty) const;
- /// getVectorType - Return the unique reference to a vector type of
- /// the specified element type and size. VectorType must be a built-in type.
+ /// \brief Return the unique reference to a vector type of the specified
+ /// element type and size.
+ ///
+ /// \pre \p VectorType must be a built-in type.
QualType getVectorType(QualType VectorType, unsigned NumElts,
VectorType::VectorKind VecKind) const;
- /// getExtVectorType - Return the unique reference to an extended vector type
- /// of the specified element type and size. VectorType must be a built-in
- /// type.
+ /// \brief Return the unique reference to an extended vector type
+ /// of the specified element type and size.
+ ///
+ /// \pre \p VectorType must be a built-in type.
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const;
- /// getDependentSizedExtVectorType - Returns a non-unique reference to
- /// the type for a dependently-sized vector of the specified element
- /// type. FIXME: We will need these to be uniqued, or at least
- /// comparable, at some point.
+ /// \pre Return a non-unique reference to the type for a dependently-sized
+ /// vector of the specified element type.
+ ///
+ /// FIXME: We will need these to be uniqued, or at least comparable, at some
+ /// point.
QualType getDependentSizedExtVectorType(QualType VectorType,
Expr *SizeExpr,
SourceLocation AttrLoc) const;
- /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
- ///
+ /// \brief Return a K&R style C function type like 'int()'.
QualType getFunctionNoProtoType(QualType ResultTy,
const FunctionType::ExtInfo &Info) const;
@@ -891,14 +940,13 @@ public:
return getFunctionNoProtoType(ResultTy, FunctionType::ExtInfo());
}
- /// getFunctionType - Return a normal function type with a typed
- /// argument list.
+ /// \brief Return a normal function type with a typed argument list.
QualType getFunctionType(QualType ResultTy,
const QualType *Args, unsigned NumArgs,
const FunctionProtoType::ExtProtoInfo &EPI) const;
- /// getTypeDeclType - Return the unique reference to the type for
- /// the specified type declaration.
+ /// \brief Return the unique reference to the type for the specified type
+ /// declaration.
QualType getTypeDeclType(const TypeDecl *Decl,
const TypeDecl *PrevDecl = 0) const {
assert(Decl && "Passed null for Decl param");
@@ -913,8 +961,8 @@ public:
return getTypeDeclTypeSlow(Decl);
}
- /// getTypedefType - Return the unique reference to the type for the
- /// specified typedef-name decl.
+ /// \brief Return the unique reference to the type for the specified
+ /// typedef-name decl.
QualType getTypedefType(const TypedefNameDecl *Decl,
QualType Canon = QualType()) const;
@@ -986,69 +1034,75 @@ public:
ObjCProtocolDecl * const *Protocols,
unsigned NumProtocols) const;
- /// getObjCObjectPointerType - Return a ObjCObjectPointerType type
- /// for the given ObjCObjectType.
+ /// \brief Return a ObjCObjectPointerType type for the given ObjCObjectType.
QualType getObjCObjectPointerType(QualType OIT) const;
- /// getTypeOfType - GCC extension.
+ /// \brief GCC extension.
QualType getTypeOfExprType(Expr *e) const;
QualType getTypeOfType(QualType t) const;
- /// getDecltypeType - C++0x decltype.
+ /// \brief C++11 decltype.
QualType getDecltypeType(Expr *e, QualType UnderlyingType) const;
- /// getUnaryTransformType - unary type transforms
+ /// \brief Unary type transforms
QualType getUnaryTransformType(QualType BaseType, QualType UnderlyingType,
UnaryTransformType::UTTKind UKind) const;
- /// getAutoType - C++0x deduced auto type.
+ /// \brief C++11 deduced auto type.
QualType getAutoType(QualType DeducedType) const;
- /// getAutoDeductType - C++0x deduction pattern for 'auto' type.
+ /// \brief C++11 deduction pattern for 'auto' type.
QualType getAutoDeductType() const;
- /// getAutoRRefDeductType - C++0x deduction pattern for 'auto &&' type.
+ /// \brief C++11 deduction pattern for 'auto &&' type.
QualType getAutoRRefDeductType() const;
- /// getTagDeclType - Return the unique reference to the type for the
- /// specified TagDecl (struct/union/class/enum) decl.
+ /// \brief Return the unique reference to the type for the specified TagDecl
+ /// (struct/union/class/enum) decl.
QualType getTagDeclType(const TagDecl *Decl) const;
- /// getSizeType - Return the unique type for "size_t" (C99 7.17), defined
- /// in <stddef.h>. The sizeof operator requires this (C99 6.5.3.4p4).
+ /// \brief Return the unique type for "size_t" (C99 7.17), defined in
+ /// <stddef.h>.
+ ///
+ /// The sizeof operator requires this (C99 6.5.3.4p4).
CanQualType getSizeType() const;
- /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5),
- /// defined in <stdint.h>.
+ /// \brief Return the unique type for "intmax_t" (C99 7.18.1.5), defined in
+ /// <stdint.h>.
CanQualType getIntMaxType() const;
- /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5),
- /// defined in <stdint.h>.
+ /// \brief Return the unique type for "uintmax_t" (C99 7.18.1.5), defined in
+ /// <stdint.h>.
CanQualType getUIntMaxType() const;
- /// getWCharType - In C++, this returns the unique wchar_t type. In C99, this
+ /// \brief In C++, this returns the unique wchar_t type. In C99, this
/// returns a type compatible with the type defined in <stddef.h> as defined
/// by the target.
QualType getWCharType() const { return WCharTy; }
- /// getSignedWCharType - Return the type of "signed wchar_t".
+ /// \brief Return the type of "signed wchar_t".
+ ///
/// Used when in C++, as a GCC extension.
QualType getSignedWCharType() const;
- /// getUnsignedWCharType - Return the type of "unsigned wchar_t".
+ /// \brief Return the type of "unsigned wchar_t".
+ ///
/// Used when in C++, as a GCC extension.
QualType getUnsignedWCharType() const;
- /// getWIntType - In C99, this returns a type compatible with the type
+ /// \brief In C99, this returns a type compatible with the type
/// defined in <stddef.h> as defined by the target.
QualType getWIntType() const { return WIntTy; }
- /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
- /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
+ /// \brief Return the unique type for "ptrdiff_t" (C99 7.17) defined in
+ /// <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
QualType getPointerDiffType() const;
- // getCFConstantStringType - Return the C structure type used to represent
- // constant CFStrings.
+ /// \brief Return the unique type for "pid_t" defined in
+ /// <sys/types.h>. We need this to compute the correct type for vfork().
+ QualType getProcessIDType() const;
+
+ /// \brief Return the C structure type used to represent constant CFStrings.
QualType getCFConstantStringType() const;
/// Get the structure type used to representation CFStrings, or NULL
@@ -1074,21 +1128,21 @@ public:
ObjCNSStringType = T;
}
- /// \brief Retrieve the type that 'id' has been defined to, which may be
- /// different from the built-in 'id' if 'id' has been typedef'd.
+ /// \brief Retrieve the type that \c id has been defined to, which may be
+ /// different from the built-in \c id if \c id has been typedef'd.
QualType getObjCIdRedefinitionType() const {
if (ObjCIdRedefinitionType.isNull())
return getObjCIdType();
return ObjCIdRedefinitionType;
}
- /// \brief Set the user-written type that redefines 'id'.
+ /// \brief Set the user-written type that redefines \c id.
void setObjCIdRedefinitionType(QualType RedefType) {
ObjCIdRedefinitionType = RedefType;
}
- /// \brief Retrieve the type that 'Class' has been defined to, which may be
- /// different from the built-in 'Class' if 'Class' has been typedef'd.
+ /// \brief Retrieve the type that \c Class has been defined to, which may be
+ /// different from the built-in \c Class if \c Class has been typedef'd.
QualType getObjCClassRedefinitionType() const {
if (ObjCClassRedefinitionType.isNull())
return getObjCClassType();
@@ -1175,27 +1229,29 @@ public:
return getLangOpts().CPlusPlus ? BoolTy : IntTy;
}
- /// getObjCEncodingForType - Emit the ObjC type encoding for the
- /// given type into \arg S. If \arg NameFields is specified then
- /// record field names are also encoded.
- void getObjCEncodingForType(QualType t, std::string &S,
+ /// \brief Emit the Objective-CC type encoding for the given type \p T into
+ /// \p S.
+ ///
+ /// If \p Field is specified then record field names are also encoded.
+ void getObjCEncodingForType(QualType T, std::string &S,
const FieldDecl *Field=0) const;
void getLegacyIntegralTypeEncoding(QualType &t) const;
- // Put the string version of type qualifiers into S.
+ /// \brief Put the string version of the type qualifiers \p QT into \p S.
void getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
std::string &S) const;
- /// getObjCEncodingForFunctionDecl - Returns the encoded type for this
- /// function. This is in the same format as Objective-C method encodings.
+ /// \brief Emit the encoded type for the function \p Decl into \p S.
+ ///
+ /// This is in the same format as Objective-C method encodings.
///
/// \returns true if an error occurred (e.g., because one of the parameter
/// types is incomplete), false otherwise.
bool getObjCEncodingForFunctionDecl(const FunctionDecl *Decl, std::string& S);
- /// getObjCEncodingForMethodDecl - Return the encoded type for this method
- /// declaration.
+ /// \brief Emit the encoded type for the method declaration \p Decl into
+ /// \p S.
///
/// \returns true if an error occurred (e.g., because one of the parameter
/// types is incomplete), false otherwise.
@@ -1203,8 +1259,7 @@ public:
bool Extended = false)
const;
- /// getObjCEncodingForBlock - Return the encoded type for this block
- /// declaration.
+ /// \brief Return the encoded type for this block declaration.
std::string getObjCEncodingForBlock(const BlockExpr *blockExpr) const;
/// getObjCEncodingForPropertyDecl - Return the encoded type for
@@ -1218,16 +1273,18 @@ public:
bool ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
ObjCProtocolDecl *rProto) const;
- /// getObjCEncodingTypeSize returns size of type for objective-c encoding
- /// purpose in characters.
- CharUnits getObjCEncodingTypeSize(QualType t) const;
+ /// \brief Return the size of type \p T for Objective-C encoding purpose,
+ /// in characters.
+ CharUnits getObjCEncodingTypeSize(QualType T) const;
- /// \brief Retrieve the typedef corresponding to the predefined 'id' type
+ /// \brief Retrieve the typedef corresponding to the predefined \c id type
/// in Objective-C.
TypedefDecl *getObjCIdDecl() const;
- /// This setter/getter represents the ObjC 'id' type. It is setup lazily, by
- /// Sema. id is always a (typedef for a) pointer type, a pointer to a struct.
+ /// \brief Represents the Objective-CC \c id type.
+ ///
+ /// This is set up lazily, by Sema. \c id is always a (typedef for a)
+ /// pointer type, a pointer to a struct.
QualType getObjCIdType() const {
return getTypeDeclType(getObjCIdDecl());
}
@@ -1246,48 +1303,64 @@ public:
/// Objective-C 'Class' type.
TypedefDecl *getObjCClassDecl() const;
- /// This setter/getter repreents the ObjC 'Class' type. It is setup lazily, by
- /// Sema. 'Class' is always a (typedef for a) pointer type, a pointer to a
- /// struct.
+ /// \brief Represents the Objective-C \c Class type.
+ ///
+ /// This is set up lazily, by Sema. \c Class is always a (typedef for a)
+ /// pointer type, a pointer to a struct.
QualType getObjCClassType() const {
return getTypeDeclType(getObjCClassDecl());
}
/// \brief Retrieve the Objective-C class declaration corresponding to
- /// the predefined 'Protocol' class.
+ /// the predefined \c Protocol class.
ObjCInterfaceDecl *getObjCProtocolDecl() const;
+
+ /// \brief Retrieve declaration of 'BOOL' typedef
+ TypedefDecl *getBOOLDecl() const {
+ return BOOLDecl;
+ }
+
+ /// \brief Save declaration of 'BOOL' typedef
+ void setBOOLDecl(TypedefDecl *TD) {
+ BOOLDecl = TD;
+ }
+
+ /// \brief type of 'BOOL' type.
+ QualType getBOOLType() const {
+ return getTypeDeclType(getBOOLDecl());
+ }
- /// \brief Retrieve the type of the Objective-C "Protocol" class.
+ /// \brief Retrieve the type of the Objective-C \c Protocol class.
QualType getObjCProtoType() const {
return getObjCInterfaceType(getObjCProtocolDecl());
}
/// \brief Retrieve the C type declaration corresponding to the predefined
- /// __builtin_va_list type.
+ /// \c __builtin_va_list type.
TypedefDecl *getBuiltinVaListDecl() const;
- /// \brief Retrieve the type of the __builtin_va_list type.
+ /// \brief Retrieve the type of the \c __builtin_va_list type.
QualType getBuiltinVaListType() const {
return getTypeDeclType(getBuiltinVaListDecl());
}
/// \brief Retrieve the C type declaration corresponding to the predefined
- /// __va_list_tag type used to help define the __builtin_va_list type for
- /// some targets.
+ /// \c __va_list_tag type used to help define the \c __builtin_va_list type
+ /// for some targets.
QualType getVaListTagType() const;
- /// getCVRQualifiedType - Returns a type with additional const,
- /// volatile, or restrict qualifiers.
+ /// \brief Return a type with additional \c const, \c volatile, or
+ /// \c restrict qualifiers.
QualType getCVRQualifiedType(QualType T, unsigned CVR) const {
return getQualifiedType(T, Qualifiers::fromCVRMask(CVR));
}
- /// getQualifiedType - Un-split a SplitQualType.
+ /// \brief Un-split a SplitQualType.
QualType getQualifiedType(SplitQualType split) const {
return getQualifiedType(split.Ty, split.Quals);
}
- /// getQualifiedType - Returns a type with additional qualifiers.
+ /// \brief Return a type with additional qualifiers.
QualType getQualifiedType(QualType T, Qualifiers Qs) const {
if (!Qs.hasNonFastQualifiers())
return T.withFastQualifiers(Qs.getFastQualifiers());
@@ -1296,15 +1369,16 @@ public:
return getExtQualType(Ptr, Qc);
}
- /// getQualifiedType - Returns a type with additional qualifiers.
+ /// \brief Return a type with additional qualifiers.
QualType getQualifiedType(const Type *T, Qualifiers Qs) const {
if (!Qs.hasNonFastQualifiers())
return QualType(T, Qs.getFastQualifiers());
return getExtQualType(T, Qs);
}
- /// getLifetimeQualifiedType - Returns a type with the given
- /// lifetime qualifier.
+ /// \brief Return a type with the given lifetime qualifier.
+ ///
+ /// \pre Neither type.ObjCLifetime() nor \p lifetime may be \c OCL_None.
QualType getLifetimeQualifiedType(QualType type,
Qualifiers::ObjCLifetime lifetime) {
assert(type.getObjCLifetime() == Qualifiers::OCL_None);
@@ -1341,8 +1415,9 @@ public:
GE_Missing_ucontext ///< Missing a type from <ucontext.h>
};
- /// GetBuiltinType - Return the type for the specified builtin. If
- /// IntegerConstantArgs is non-null, it is filled in with a bitmask of
+ /// \brief Return the type for the specified builtin.
+ ///
+ /// If \p IntegerConstantArgs is non-null, it is filled in with a bitmask of
/// arguments to the builtin that are required to be integer constant
/// expressions.
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error,
@@ -1357,19 +1432,19 @@ private:
//===--------------------------------------------------------------------===//
public:
- /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
- /// garbage collection attribute.
- ///
+ /// \brief Return one of the GCNone, Weak or Strong Objective-C garbage
+ /// collection attributes.
Qualifiers::GC getObjCGCAttrKind(QualType Ty) const;
- /// areCompatibleVectorTypes - Return true if the given vector types
- /// are of the same unqualified type or if they are equivalent to the same
- /// GCC vector type, ignoring whether they are target-specific (AltiVec or
- /// Neon) types.
+ /// \brief Return true if the given vector types are of the same unqualified
+ /// type or if they are equivalent to the same GCC vector type.
+ ///
+ /// \note This ignores whether they are target-specific (AltiVec or Neon)
+ /// types.
bool areCompatibleVectorTypes(QualType FirstVec, QualType SecondVec);
- /// isObjCNSObjectType - Return true if this is an NSObject object with
- /// its NSObject attribute set.
+ /// \brief Return true if this is an \c NSObject object with its \c NSObject
+ /// attribute set.
static bool isObjCNSObjectType(QualType Ty) {
return Ty->isObjCNSObjectType();
}
@@ -1378,19 +1453,17 @@ public:
// Type Sizing and Analysis
//===--------------------------------------------------------------------===//
- /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
- /// scalar floating point type.
+ /// \brief Return the APFloat 'semantics' for the specified scalar floating
+ /// point type.
const llvm::fltSemantics &getFloatTypeSemantics(QualType T) const;
- /// getTypeInfo - Get the size and alignment of the specified complete type in
- /// bits.
+ /// \brief Get the size and alignment of the specified complete type in bits.
std::pair<uint64_t, unsigned> getTypeInfo(const Type *T) const;
std::pair<uint64_t, unsigned> getTypeInfo(QualType T) const {
return getTypeInfo(T.getTypePtr());
}
- /// getTypeSize - Return the size of the specified type, in bits. This method
- /// does not work on incomplete types.
+ /// \brief Return the size of the specified (complete) type \p T, in bits.
uint64_t getTypeSize(QualType T) const {
return getTypeInfo(T).first;
}
@@ -1398,24 +1471,24 @@ public:
return getTypeInfo(T).first;
}
- /// getCharWidth - Return the size of the character type, in bits
+ /// \brief Return the size of the character type, in bits.
uint64_t getCharWidth() const {
return getTypeSize(CharTy);
}
- /// toCharUnitsFromBits - Convert a size in bits to a size in characters.
+ /// \brief Convert a size in bits to a size in characters.
CharUnits toCharUnitsFromBits(int64_t BitSize) const;
- /// toBits - Convert a size in characters to a size in bits.
+ /// \brief Convert a size in characters to a size in bits.
int64_t toBits(CharUnits CharSize) const;
- /// getTypeSizeInChars - Return the size of the specified type, in characters.
- /// This method does not work on incomplete types.
+ /// \brief Return the size of the specified (complete) type \p T, in
+ /// characters.
CharUnits getTypeSizeInChars(QualType T) const;
CharUnits getTypeSizeInChars(const Type *T) const;
- /// getTypeAlign - Return the ABI-specified alignment of a type, in bits.
- /// This method does not work on incomplete types.
+ /// \brief Return the ABI-specified alignment of a (complete) type \p T, in
+ /// bits.
unsigned getTypeAlign(QualType T) const {
return getTypeInfo(T).second;
}
@@ -1423,49 +1496,59 @@ public:
return getTypeInfo(T).second;
}
- /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
- /// characters. This method does not work on incomplete types.
+ /// \brief Return the ABI-specified alignment of a (complete) type \p T, in
+ /// characters.
CharUnits getTypeAlignInChars(QualType T) const;
CharUnits getTypeAlignInChars(const Type *T) const;
+
+ // getTypeInfoDataSizeInChars - Return the size of a type, in chars. If the
+ // type is a record, its data size is returned.
+ std::pair<CharUnits, CharUnits> getTypeInfoDataSizeInChars(QualType T) const;
std::pair<CharUnits, CharUnits> getTypeInfoInChars(const Type *T) const;
std::pair<CharUnits, CharUnits> getTypeInfoInChars(QualType T) const;
- /// getPreferredTypeAlign - Return the "preferred" alignment of the specified
- /// type for the current target in bits. This can be different than the ABI
- /// alignment in cases where it is beneficial for performance to overalign
- /// a data type.
+ /// \brief Return the "preferred" alignment of the specified type \p T for
+ /// the current target, in bits.
+ ///
+ /// This can be different than the ABI alignment in cases where it is
+ /// beneficial for performance to overalign a data type.
unsigned getPreferredTypeAlign(const Type *T) const;
- /// getDeclAlign - Return a conservative estimate of the alignment of
- /// the specified decl. Note that bitfields do not have a valid alignment, so
- /// this method will assert on them.
- /// If @p RefAsPointee, references are treated like their underlying type
+ /// \brief Return a conservative estimate of the alignment of the specified
+ /// decl \p D.
+ ///
+ /// \pre \p D must not be a bitfield type, as bitfields do not have a valid
+ /// alignment.
+ ///
+ /// If \p RefAsPointee, references are treated like their underlying type
/// (for alignof), else they're treated like pointers (for CodeGen).
CharUnits getDeclAlign(const Decl *D, bool RefAsPointee = false) const;
- /// getASTRecordLayout - Get or compute information about the layout of the
- /// specified record (struct/union/class), which indicates its size and field
+ /// \brief Get or compute information about the layout of the specified
+ /// record (struct/union/class) \p D, which indicates its size and field
/// position information.
const ASTRecordLayout &getASTRecordLayout(const RecordDecl *D) const;
- /// getASTObjCInterfaceLayout - Get or compute information about the
- /// layout of the specified Objective-C interface.
+ /// \brief Get or compute information about the layout of the specified
+ /// Objective-C interface.
const ASTRecordLayout &getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D)
const;
void DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS,
bool Simple = false) const;
- /// getASTObjCImplementationLayout - Get or compute information about
- /// the layout of the specified Objective-C implementation. This may
- /// differ from the interface if synthesized ivars are present.
+ /// \brief Get or compute information about the layout of the specified
+ /// Objective-C implementation.
+ ///
+ /// This may differ from the interface if synthesized ivars are present.
const ASTRecordLayout &
getASTObjCImplementationLayout(const ObjCImplementationDecl *D) const;
- /// getKeyFunction - Get the key function for the given record decl, or NULL
- /// if there isn't one. The key function is, according to the Itanium C++ ABI
- /// section 5.2.3:
+ /// \brief Get the key function for the given record decl, or NULL if there
+ /// isn't one.
+ ///
+ /// The key function is, according to the Itanium C++ ABI section 5.2.3:
///
/// ...the first non-pure virtual function that is not inline at the point
/// of class definition.
@@ -1489,12 +1572,14 @@ public:
// Type Operators
//===--------------------------------------------------------------------===//
- /// getCanonicalType - Return the canonical (structural) type corresponding to
- /// the specified potentially non-canonical type. The non-canonical version
- /// of a type may have many "decorated" versions of types. Decorators can
- /// include typedefs, 'typeof' operators, etc. The returned type is guaranteed
- /// to be free of any of these, allowing two canonical types to be compared
- /// for exact equality with a simple pointer comparison.
+ /// \brief Return the canonical (structural) type corresponding to the
+ /// specified potentially non-canonical type \p T.
+ ///
+ /// The non-canonical version of a type may have many "decorated" versions of
+ /// types. Decorators can include typedefs, 'typeof' operators, etc. The
+ /// returned type is guaranteed to be free of any of these, allowing two
+ /// canonical types to be compared for exact equality with a simple pointer
+ /// comparison.
CanQualType getCanonicalType(QualType T) const {
return CanQualType::CreateUnsafe(T.getCanonicalType());
}
@@ -1503,21 +1588,23 @@ public:
return T->getCanonicalTypeInternal().getTypePtr();
}
- /// getCanonicalParamType - Return the canonical parameter type
- /// corresponding to the specific potentially non-canonical one.
+ /// \brief Return the canonical parameter type corresponding to the specific
+ /// potentially non-canonical one.
+ ///
/// Qualifiers are stripped off, functions are turned into function
/// pointers, and arrays decay one level into pointers.
CanQualType getCanonicalParamType(QualType T) const;
- /// \brief Determine whether the given types are equivalent.
+ /// \brief Determine whether the given types \p T1 and \p T2 are equivalent.
bool hasSameType(QualType T1, QualType T2) const {
return getCanonicalType(T1) == getCanonicalType(T2);
}
- /// \brief Returns this type as a completely-unqualified array type,
- /// capturing the qualifiers in Quals. This will remove the minimal amount of
- /// sugaring from the types, similar to the behavior of
- /// QualType::getUnqualifiedType().
+ /// \brief Return this type as a completely-unqualified array type,
+ /// capturing the qualifiers in \p Quals.
+ ///
+ /// This will remove the minimal amount of sugaring from the types, similar
+ /// to the behavior of QualType::getUnqualifiedType().
///
/// \param T is the qualified type, which may be an ArrayType
///
@@ -1628,15 +1715,16 @@ public:
return dyn_cast_or_null<DependentSizedArrayType>(getAsArrayType(T));
}
- /// getBaseElementType - Returns the innermost element type of an array type.
+ /// \brief Return the innermost element type of an array type.
+ ///
/// For example, will return "int" for int[m][n]
QualType getBaseElementType(const ArrayType *VAT) const;
- /// getBaseElementType - Returns the innermost element type of a type
- /// (which needn't actually be an array type).
+ /// \brief Return the innermost element type of a type (which needn't
+ /// actually be an array type).
QualType getBaseElementType(QualType QT) const;
- /// getConstantArrayElementCount - Returns number of constant array elements.
+ /// \brief Return number of constant array elements.
uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const;
/// \brief Perform adjustment on the parameter type of a function.
@@ -1651,21 +1739,22 @@ public:
/// cv-qualifiers.
QualType getSignatureParameterType(QualType T) const;
- /// getArrayDecayedType - Return the properly qualified result of decaying the
- /// specified array type to a pointer. This operation is non-trivial when
- /// handling typedefs etc. The canonical type of "T" must be an array type,
- /// this returns a pointer to a properly qualified element of the array.
+ /// \brief Return the properly qualified result of decaying the specified
+ /// array type to a pointer.
+ ///
+ /// This operation is non-trivial when handling typedefs etc. The canonical
+ /// type of \p T must be an array type, this returns a pointer to a properly
+ /// qualified element of the array.
///
/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
QualType getArrayDecayedType(QualType T) const;
- /// getPromotedIntegerType - Returns the type that Promotable will
- /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
- /// integer type.
+ /// \brief Return the type that \p PromotableType will promote to: C99
+ /// 6.3.1.1p2, assuming that \p PromotableType is a promotable integer type.
QualType getPromotedIntegerType(QualType PromotableType) const;
- /// \brief Recurses in pointer/array types until it finds an objc retainable
- /// type and returns its ownership.
+ /// \brief Recurses in pointer/array types until it finds an Objective-C
+ /// retainable type and returns its ownership.
Qualifiers::ObjCLifetime getInnerObjCOwnership(QualType T) const;
/// \brief Whether this is a promotable bitfield reference according
@@ -1675,21 +1764,24 @@ public:
/// promotion occurs.
QualType isPromotableBitField(Expr *E) const;
- /// getIntegerTypeOrder - Returns the highest ranked integer type:
- /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
- /// LHS < RHS, return -1.
+ /// \brief Return the highest ranked integer type, see C99 6.3.1.8p1.
+ ///
+ /// If \p LHS > \p RHS, returns 1. If \p LHS == \p RHS, returns 0. If
+ /// \p LHS < \p RHS, return -1.
int getIntegerTypeOrder(QualType LHS, QualType RHS) const;
- /// getFloatingTypeOrder - Compare the rank of the two specified floating
- /// point types, ignoring the domain of the type (i.e. 'double' ==
- /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
- /// LHS < RHS, return -1.
+ /// \brief Compare the rank of the two specified floating point types,
+ /// ignoring the domain of the type (i.e. 'double' == '_Complex double').
+ ///
+ /// If \p LHS > \p RHS, returns 1. If \p LHS == \p RHS, returns 0. If
+ /// \p LHS < \p RHS, return -1.
int getFloatingTypeOrder(QualType LHS, QualType RHS) const;
- /// getFloatingTypeOfSizeWithinDomain - Returns a real floating
- /// point or a complex type (based on typeDomain/typeSize).
- /// 'typeDomain' is a real floating point or complex type.
- /// 'typeSize' is a real floating point or complex type.
+ /// \brief Return a real floating point or a complex type (based on
+ /// \p typeDomain/\p typeSize).
+ ///
+ /// \param typeDomain a real floating point or complex type.
+ /// \param typeSize a real floating point or complex type.
QualType getFloatingTypeOfSizeWithinDomain(QualType typeSize,
QualType typeDomain) const;
@@ -1787,7 +1879,7 @@ public:
// Per C99 6.2.5p6, for every signed integer type, there is a corresponding
// unsigned integer type. This method takes a signed type, and returns the
// corresponding unsigned integer type.
- QualType getCorrespondingUnsignedType(QualType T);
+ QualType getCorrespondingUnsignedType(QualType T) const;
//===--------------------------------------------------------------------===//
// Type Iterators.
@@ -1805,8 +1897,8 @@ public:
// Integer Values
//===--------------------------------------------------------------------===//
- /// MakeIntValue - Make an APSInt of the appropriate width and
- /// signedness for the given \arg Value and integer \arg Type.
+ /// \brief Make an APSInt of the appropriate width and signedness for the
+ /// given \p Value and integer \p Type.
llvm::APSInt MakeIntValue(uint64_t Value, QualType Type) const {
llvm::APSInt Res(getIntWidth(Type),
!Type->isSignedIntegerOrEnumerationType());
@@ -1816,12 +1908,14 @@ public:
bool isSentinelNullExpr(const Expr *E);
- /// \brief Get the implementation of ObjCInterfaceDecl,or NULL if none exists.
+ /// \brief Get the implementation of the ObjCInterfaceDecl \p D, or NULL if
+ /// none exists.
ObjCImplementationDecl *getObjCImplementation(ObjCInterfaceDecl *D);
- /// \brief Get the implementation of ObjCCategoryDecl, or NULL if none exists.
+ /// \brief Get the implementation of the ObjCCategoryDecl \p D, or NULL if
+ /// none exists.
ObjCCategoryImplDecl *getObjCImplementation(ObjCCategoryDecl *D);
- /// \brief returns true if there is at least one \@implementation in TU.
+ /// \brief Return true if there is at least one \@implementation in the TU.
bool AnyObjCImplementation() {
return !ObjCImpls.empty();
}
@@ -1834,7 +1928,7 @@ public:
ObjCCategoryImplDecl *ImplD);
/// \brief Get the duplicate declaration of a ObjCMethod in the same
- /// interface, or null if non exists.
+ /// interface, or null if none exists.
const ObjCMethodDecl *getObjCMethodRedeclaration(
const ObjCMethodDecl *MD) const {
return ObjCMethodRedecls.lookup(MD);
@@ -1846,16 +1940,16 @@ public:
ObjCMethodRedecls[MD] = Redecl;
}
- /// \brief Returns the objc interface that \arg ND belongs to if it is a
- /// objc method/property/ivar etc. that is part of an interface,
+ /// \brief Returns the Objective-C interface that \p ND belongs to if it is
+ /// an Objective-C method/property/ivar etc. that is part of an interface,
/// otherwise returns null.
ObjCInterfaceDecl *getObjContainingInterface(NamedDecl *ND) const;
/// \brief Set the copy inialization expression of a block var decl.
void setBlockVarCopyInits(VarDecl*VD, Expr* Init);
- /// \brief Get the copy initialization expression of VarDecl,or NULL if
- /// none exists.
- Expr *getBlockVarCopyInits(const VarDecl*VD);
+ /// \brief Get the copy initialization expression of the VarDecl \p VD, or
+ /// NULL if none exists.
+ Expr *getBlockVarCopyInits(const VarDecl* VD);
/// \brief Allocate an uninitialized TypeSourceInfo.
///
@@ -1882,9 +1976,9 @@ public:
/// \brief Add a deallocation callback that will be invoked when the
/// ASTContext is destroyed.
///
- /// \brief Callback A callback function that will be invoked on destruction.
+ /// \param Callback A callback function that will be invoked on destruction.
///
- /// \brief Data Pointer data that will be provided to the callback function
+ /// \param Data Pointer data that will be provided to the callback function
/// when it is called.
void AddDeallocation(void (*Callback)(void*), void *Data);
@@ -1957,8 +2051,8 @@ public:
static unsigned NumImplicitDestructorsDeclared;
private:
- ASTContext(const ASTContext&); // DO NOT IMPLEMENT
- void operator=(const ASTContext&); // DO NOT IMPLEMENT
+ ASTContext(const ASTContext &) LLVM_DELETED_FUNCTION;
+ void operator=(const ASTContext &) LLVM_DELETED_FUNCTION;
public:
/// \brief Initialize built-in types.
@@ -1974,7 +2068,7 @@ public:
private:
void InitBuiltinType(CanQualType &R, BuiltinType::Kind K);
- // Return the ObjC type encoding for a given type.
+ // Return the Objective-C type encoding for a given type.
void getObjCEncodingForTypeImpl(QualType t, std::string &S,
bool ExpandPointedToStructures,
bool ExpandStructures,
@@ -2017,13 +2111,13 @@ private:
void ReleaseDeclContextMaps();
};
-/// @brief Utility function for constructing a nullary selector.
+/// \brief Utility function for constructing a nullary selector.
static inline Selector GetNullarySelector(StringRef name, ASTContext& Ctx) {
IdentifierInfo* II = &Ctx.Idents.get(name);
return Ctx.Selectors.getSelector(0, &II);
}
-/// @brief Utility function for constructing an unary selector.
+/// \brief Utility function for constructing an unary selector.
static inline Selector GetUnarySelector(StringRef name, ASTContext& Ctx) {
IdentifierInfo* II = &Ctx.Idents.get(name);
return Ctx.Selectors.getSelector(1, &II);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h b/contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h
index cb038a0..56d1526 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h
@@ -13,6 +13,8 @@
#ifndef LLVM_CLANG_AST_ASTMUTATIONLISTENER_H
#define LLVM_CLANG_AST_ASTMUTATIONLISTENER_H
+#include "clang/Basic/SourceLocation.h"
+
namespace clang {
class Decl;
class DeclContext;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Attr.h b/contrib/llvm/tools/clang/include/clang/AST/Attr.h
index b17bd48..12a9855 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Attr.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Attr.h
@@ -107,9 +107,6 @@ public:
// Pretty print this attribute.
virtual void printPretty(llvm::raw_ostream &OS,
const PrintingPolicy &Policy) const = 0;
-
- // Implement isa/cast/dyncast/etc.
- static bool classof(const Attr *) { return true; }
};
class InheritableAttr : public Attr {
@@ -125,7 +122,6 @@ public:
static bool classof(const Attr *A) {
return A->getKind() <= attr::LAST_INHERITABLE;
}
- static bool classof(const InheritableAttr *) { return true; }
};
class InheritableParamAttr : public InheritableAttr {
@@ -139,7 +135,6 @@ public:
static bool classof(const Attr *A) {
return A->getKind() <= attr::LAST_INHERITABLE_PARAM;
}
- static bool classof(const InheritableParamAttr *) { return true; }
};
#include "clang/AST/Attrs.inc"
diff --git a/contrib/llvm/tools/clang/include/clang/AST/BuiltinTypes.def b/contrib/llvm/tools/clang/include/clang/AST/BuiltinTypes.def
index 34e6fc5..ba322fb 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/BuiltinTypes.def
+++ b/contrib/llvm/tools/clang/include/clang/AST/BuiltinTypes.def
@@ -206,6 +206,8 @@ PLACEHOLDER_TYPE(PseudoObject, PseudoObjectTy)
// unknown type, most notably explicit casts.
PLACEHOLDER_TYPE(UnknownAny, UnknownAnyTy)
+PLACEHOLDER_TYPE(BuiltinFn, BuiltinFnTy)
+
// The type of a cast which, in ARC, would normally require a
// __bridge, but which might be okay depending on the immediate
// context.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h b/contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h
index ee6eba7..87bdbe0 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h
@@ -19,7 +19,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeOrdering.h"
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include <list>
@@ -271,15 +271,14 @@ struct UniqueVirtualMethod {
/// pair is the virtual method that overrides it (including the
/// subobject in which that virtual function occurs).
class OverridingMethods {
- llvm::DenseMap<unsigned, SmallVector<UniqueVirtualMethod, 4> >
- Overrides;
+ typedef SmallVector<UniqueVirtualMethod, 4> ValuesT;
+ typedef llvm::MapVector<unsigned, ValuesT> MapType;
+ MapType Overrides;
public:
// Iterate over the set of subobjects that have overriding methods.
- typedef llvm::DenseMap<unsigned, SmallVector<UniqueVirtualMethod, 4> >
- ::iterator iterator;
- typedef llvm::DenseMap<unsigned, SmallVector<UniqueVirtualMethod, 4> >
- ::const_iterator const_iterator;
+ typedef MapType::iterator iterator;
+ typedef MapType::const_iterator const_iterator;
iterator begin() { return Overrides.begin(); }
const_iterator begin() const { return Overrides.begin(); }
iterator end() { return Overrides.end(); }
@@ -357,8 +356,8 @@ public:
/// 0 represents the virtua base class subobject of that type, while
/// subobject numbers greater than 0 refer to non-virtual base class
/// subobjects of that type.
-class CXXFinalOverriderMap
- : public llvm::DenseMap<const CXXMethodDecl *, OverridingMethods> { };
+class CXXFinalOverriderMap
+ : public llvm::MapVector<const CXXMethodDecl *, OverridingMethods> { };
/// \brief A set of all the primary bases for a class.
class CXXIndirectPrimaryBaseSet
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h b/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h
index 6cce888..ea307bf 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h
@@ -276,6 +276,7 @@ public:
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isMemberFunctionPointerType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isClassType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isStructureType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isInterfaceType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isStructureOrClassType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isUnionType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isComplexIntegerType)
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CharUnits.h b/contrib/llvm/tools/clang/include/clang/AST/CharUnits.h
index 5be3582..12e74b3 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/CharUnits.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/CharUnits.h
@@ -164,8 +164,8 @@ namespace clang {
QuantityType getQuantity() const { return Quantity; }
/// RoundUpToAlignment - Returns the next integer (mod 2**64) that is
- /// greater than or equal to this quantity and is a multiple of \arg
- /// Align. Align must be non-zero.
+ /// greater than or equal to this quantity and is a multiple of \p Align.
+ /// Align must be non-zero.
CharUnits RoundUpToAlignment(const CharUnits &Align) {
return CharUnits(llvm::RoundUpToAlignment(Quantity,
Align.Quantity));
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Comment.h b/contrib/llvm/tools/clang/include/clang/AST/Comment.h
index 01aaac3..316a180 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Comment.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Comment.h
@@ -16,6 +16,8 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/AST/Type.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "clang/AST/DeclObjC.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
@@ -25,7 +27,7 @@ class ParmVarDecl;
class TemplateParameterList;
namespace comments {
-
+class FullComment;
/// Any part of the comment.
/// Abstract class.
class Comment {
@@ -74,8 +76,9 @@ protected:
unsigned : NumInlineContentCommentBits;
unsigned RenderKind : 2;
+ unsigned CommandID : 8;
};
- enum { NumInlineCommandCommentBits = NumInlineContentCommentBits + 1 };
+ enum { NumInlineCommandCommentBits = NumInlineContentCommentBits + 10 };
class HTMLStartTagCommentBitfields {
friend class HTMLStartTagComment;
@@ -101,10 +104,19 @@ protected:
};
enum { NumParagraphCommentBits = NumCommentBits + 2 };
+ class BlockCommandCommentBitfields {
+ friend class BlockCommandComment;
+
+ unsigned : NumCommentBits;
+
+ unsigned CommandID : 8;
+ };
+ enum { NumBlockCommandCommentBits = NumCommentBits + 8 };
+
class ParamCommandCommentBitfields {
friend class ParamCommandComment;
- unsigned : NumCommentBits;
+ unsigned : NumBlockCommandCommentBits;
/// Parameter passing direction, see ParamCommandComment::PassDirection.
unsigned Direction : 2;
@@ -112,7 +124,7 @@ protected:
/// True if direction was specified explicitly in the comment.
unsigned IsDirectionExplicit : 1;
};
- enum { NumParamCommandCommentBits = 11 };
+ enum { NumParamCommandCommentBits = NumBlockCommandCommentBits + 3 };
union {
CommentBitfields CommentBits;
@@ -121,6 +133,7 @@ protected:
InlineCommandCommentBitfields InlineCommandCommentBits;
HTMLStartTagCommentBitfields HTMLStartTagCommentBits;
ParagraphCommentBitfields ParagraphCommentBits;
+ BlockCommandCommentBitfields BlockCommandCommentBits;
ParamCommandCommentBitfields ParamCommandCommentBits;
};
@@ -158,10 +171,9 @@ public:
const char *getCommentKindName() const;
LLVM_ATTRIBUTE_USED void dump() const;
- LLVM_ATTRIBUTE_USED void dump(SourceManager &SM) const;
- void dump(llvm::raw_ostream &OS, SourceManager *SM) const;
-
- static bool classof(const Comment *) { return true; }
+ LLVM_ATTRIBUTE_USED void dump(const ASTContext &Context) const;
+ void dump(llvm::raw_ostream &OS, const CommandTraits *Traits,
+ const SourceManager *SM) const;
SourceRange getSourceRange() const LLVM_READONLY { return Range; }
@@ -204,8 +216,6 @@ public:
C->getCommentKind() <= LastInlineContentCommentConstant;
}
- static bool classof(const InlineContentComment *) { return true; }
-
void addTrailingNewline() {
InlineContentCommentBits.HasTrailingNewline = 1;
}
@@ -232,8 +242,6 @@ public:
return C->getCommentKind() == TextCommentKind;
}
- static bool classof(const TextComment *) { return true; }
-
child_iterator child_begin() const { return NULL; }
child_iterator child_end() const { return NULL; }
@@ -273,35 +281,35 @@ public:
};
protected:
- /// Command name.
- StringRef Name;
-
/// Command arguments.
llvm::ArrayRef<Argument> Args;
public:
InlineCommandComment(SourceLocation LocBegin,
SourceLocation LocEnd,
- StringRef Name,
+ unsigned CommandID,
RenderKind RK,
llvm::ArrayRef<Argument> Args) :
InlineContentComment(InlineCommandCommentKind, LocBegin, LocEnd),
- Name(Name), Args(Args) {
+ Args(Args) {
InlineCommandCommentBits.RenderKind = RK;
+ InlineCommandCommentBits.CommandID = CommandID;
}
static bool classof(const Comment *C) {
return C->getCommentKind() == InlineCommandCommentKind;
}
- static bool classof(const InlineCommandComment *) { return true; }
-
child_iterator child_begin() const { return NULL; }
child_iterator child_end() const { return NULL; }
- StringRef getCommandName() const {
- return Name;
+ unsigned getCommandID() const {
+ return InlineCommandCommentBits.CommandID;
+ }
+
+ StringRef getCommandName(const CommandTraits &Traits) const {
+ return Traits.getCommandInfo(getCommandID())->Name;
}
SourceRange getCommandNameRange() const {
@@ -352,8 +360,6 @@ public:
C->getCommentKind() <= LastHTMLTagCommentConstant;
}
- static bool classof(const HTMLTagComment *) { return true; }
-
StringRef getTagName() const LLVM_READONLY { return TagName; }
SourceRange getTagNameSourceRange() const LLVM_READONLY {
@@ -419,8 +425,6 @@ public:
return C->getCommentKind() == HTMLStartTagCommentKind;
}
- static bool classof(const HTMLStartTagComment *) { return true; }
-
child_iterator child_begin() const { return NULL; }
child_iterator child_end() const { return NULL; }
@@ -476,8 +480,6 @@ public:
return C->getCommentKind() == HTMLEndTagCommentKind;
}
- static bool classof(const HTMLEndTagComment *) { return true; }
-
child_iterator child_begin() const { return NULL; }
child_iterator child_end() const { return NULL; }
@@ -498,8 +500,6 @@ public:
return C->getCommentKind() >= FirstBlockContentCommentConstant &&
C->getCommentKind() <= LastBlockContentCommentConstant;
}
-
- static bool classof(const BlockContentComment *) { return true; }
};
/// A single paragraph that contains inline content.
@@ -529,8 +529,6 @@ public:
return C->getCommentKind() == ParagraphCommentKind;
}
- static bool classof(const ParagraphComment *) { return true; }
-
child_iterator child_begin() const {
return reinterpret_cast<child_iterator>(Content.begin());
}
@@ -566,9 +564,6 @@ public:
};
protected:
- /// Command name.
- StringRef Name;
-
/// Word-like arguments.
llvm::ArrayRef<Argument> Args;
@@ -578,21 +573,21 @@ protected:
BlockCommandComment(CommentKind K,
SourceLocation LocBegin,
SourceLocation LocEnd,
- StringRef Name) :
+ unsigned CommandID) :
BlockContentComment(K, LocBegin, LocEnd),
- Name(Name),
Paragraph(NULL) {
- setLocation(getCommandNameRange().getBegin());
+ setLocation(getCommandNameBeginLoc());
+ BlockCommandCommentBits.CommandID = CommandID;
}
public:
BlockCommandComment(SourceLocation LocBegin,
SourceLocation LocEnd,
- StringRef Name) :
+ unsigned CommandID) :
BlockContentComment(BlockCommandCommentKind, LocBegin, LocEnd),
- Name(Name),
Paragraph(NULL) {
- setLocation(getCommandNameRange().getBegin());
+ setLocation(getCommandNameBeginLoc());
+ BlockCommandCommentBits.CommandID = CommandID;
}
static bool classof(const Comment *C) {
@@ -600,8 +595,6 @@ public:
C->getCommentKind() <= LastBlockCommandCommentConstant;
}
- static bool classof(const BlockCommandComment *) { return true; }
-
child_iterator child_begin() const {
return reinterpret_cast<child_iterator>(&Paragraph);
}
@@ -610,12 +603,21 @@ public:
return reinterpret_cast<child_iterator>(&Paragraph + 1);
}
- StringRef getCommandName() const {
- return Name;
+ unsigned getCommandID() const {
+ return BlockCommandCommentBits.CommandID;
}
- SourceRange getCommandNameRange() const {
- return SourceRange(getLocStart().getLocWithOffset(1),
+ StringRef getCommandName(const CommandTraits &Traits) const {
+ return Traits.getCommandInfo(getCommandID())->Name;
+ }
+
+ SourceLocation getCommandNameBeginLoc() const {
+ return getLocStart().getLocWithOffset(1);
+ }
+
+ SourceRange getCommandNameRange(const CommandTraits &Traits) const {
+ StringRef Name = getCommandName(Traits);
+ return SourceRange(getCommandNameBeginLoc(),
getLocStart().getLocWithOffset(1 + Name.size()));
}
@@ -667,8 +669,9 @@ public:
ParamCommandComment(SourceLocation LocBegin,
SourceLocation LocEnd,
- StringRef Name) :
- BlockCommandComment(ParamCommandCommentKind, LocBegin, LocEnd, Name),
+ unsigned CommandID) :
+ BlockCommandComment(ParamCommandCommentKind, LocBegin, LocEnd,
+ CommandID),
ParamIndex(InvalidParamIndex) {
ParamCommandCommentBits.Direction = In;
ParamCommandCommentBits.IsDirectionExplicit = false;
@@ -678,8 +681,6 @@ public:
return C->getCommentKind() == ParamCommandCommentKind;
}
- static bool classof(const ParamCommandComment *) { return true; }
-
enum PassDirection {
In,
Out,
@@ -705,7 +706,9 @@ public:
return getNumArgs() > 0;
}
- StringRef getParamName() const {
+ StringRef getParamName(const FullComment *FC) const;
+
+ StringRef getParamNameAsWritten() const {
return Args[0].Text;
}
@@ -748,21 +751,21 @@ private:
public:
TParamCommandComment(SourceLocation LocBegin,
SourceLocation LocEnd,
- StringRef Name) :
- BlockCommandComment(TParamCommandCommentKind, LocBegin, LocEnd, Name)
+ unsigned CommandID) :
+ BlockCommandComment(TParamCommandCommentKind, LocBegin, LocEnd, CommandID)
{ }
static bool classof(const Comment *C) {
return C->getCommentKind() == TParamCommandCommentKind;
}
- static bool classof(const TParamCommandComment *) { return true; }
-
bool hasParamName() const {
return getNumArgs() > 0;
}
- StringRef getParamName() const {
+ StringRef getParamName(const FullComment *FC) const;
+
+ StringRef getParamNameAsWritten() const {
return Args[0].Text;
}
@@ -807,8 +810,6 @@ public:
return C->getCommentKind() == VerbatimBlockLineCommentKind;
}
- static bool classof(const VerbatimBlockLineComment *) { return true; }
-
child_iterator child_begin() const { return NULL; }
child_iterator child_end() const { return NULL; }
@@ -830,17 +831,15 @@ protected:
public:
VerbatimBlockComment(SourceLocation LocBegin,
SourceLocation LocEnd,
- StringRef Name) :
+ unsigned CommandID) :
BlockCommandComment(VerbatimBlockCommentKind,
- LocBegin, LocEnd, Name)
+ LocBegin, LocEnd, CommandID)
{ }
static bool classof(const Comment *C) {
return C->getCommentKind() == VerbatimBlockCommentKind;
}
- static bool classof(const VerbatimBlockComment *) { return true; }
-
child_iterator child_begin() const {
return reinterpret_cast<child_iterator>(Lines.begin());
}
@@ -882,12 +881,12 @@ protected:
public:
VerbatimLineComment(SourceLocation LocBegin,
SourceLocation LocEnd,
- StringRef Name,
+ unsigned CommandID,
SourceLocation TextBegin,
StringRef Text) :
BlockCommandComment(VerbatimLineCommentKind,
LocBegin, LocEnd,
- Name),
+ CommandID),
Text(Text),
TextBegin(TextBegin)
{ }
@@ -896,8 +895,6 @@ public:
return C->getCommentKind() == VerbatimLineCommentKind;
}
- static bool classof(const VerbatimLineComment *) { return true; }
-
child_iterator child_begin() const { return NULL; }
child_iterator child_end() const { return NULL; }
@@ -913,23 +910,34 @@ public:
/// Information about the declaration, useful to clients of FullComment.
struct DeclInfo {
- /// Declaration the comment is attached to. Should not be NULL.
- const Decl *ThisDecl;
-
- /// Parameters that can be referenced by \\param if \c ThisDecl is something
+ /// Declaration the comment is actually attached to (in the source).
+ /// Should not be NULL.
+ const Decl *CommentDecl;
+
+ /// CurrentDecl is the declaration with which the FullComment is associated.
+ ///
+ /// It can be different from \c CommentDecl. It happens when we we decide
+ /// that the comment originally attached to \c CommentDecl is fine for
+ /// \c CurrentDecl too (for example, for a redeclaration or an overrider of
+ /// \c CommentDecl).
+ ///
+ /// The information in the DeclInfo corresponds to CurrentDecl.
+ const Decl *CurrentDecl;
+
+ /// Parameters that can be referenced by \\param if \c CommentDecl is something
/// that we consider a "function".
ArrayRef<const ParmVarDecl *> ParamVars;
- /// Function result type if \c ThisDecl is something that we consider
+ /// Function result type if \c CommentDecl is something that we consider
/// a "function".
QualType ResultType;
- /// Template parameters that can be referenced by \\tparam if \c ThisDecl is
+ /// Template parameters that can be referenced by \\tparam if \c CommentDecl is
/// a template (\c IsTemplateDecl or \c IsTemplatePartialSpecialization is
/// true).
const TemplateParameterList *TemplateParameters;
- /// A simplified description of \c ThisDecl kind that should be good enough
+ /// A simplified description of \c CommentDecl kind that should be good enough
/// for documentation rendering purposes.
enum DeclKind {
/// Everything else not explicitly mentioned below.
@@ -942,7 +950,9 @@ struct DeclInfo {
/// \li member function,
/// \li member function template,
/// \li member function template specialization,
- /// \li ObjC method.
+ /// \li ObjC method,
+ /// \li a typedef for a function pointer, member function pointer,
+ /// ObjC block.
FunctionKind,
/// Something that we consider a "class":
@@ -968,7 +978,7 @@ struct DeclInfo {
EnumKind
};
- /// What kind of template specialization \c ThisDecl is.
+ /// What kind of template specialization \c CommentDecl is.
enum TemplateDeclKind {
NotTemplate,
Template,
@@ -976,24 +986,24 @@ struct DeclInfo {
TemplatePartialSpecialization
};
- /// If false, only \c ThisDecl is valid.
+ /// If false, only \c CommentDecl is valid.
unsigned IsFilled : 1;
- /// Simplified kind of \c ThisDecl, see\c DeclKind enum.
+ /// Simplified kind of \c CommentDecl, see \c DeclKind enum.
unsigned Kind : 3;
- /// Is \c ThisDecl a template declaration.
+ /// Is \c CommentDecl a template declaration.
unsigned TemplateKind : 2;
- /// Is \c ThisDecl an ObjCMethodDecl.
+ /// Is \c CommentDecl an ObjCMethodDecl.
unsigned IsObjCMethod : 1;
- /// Is \c ThisDecl a non-static member function of C++ class or
+ /// Is \c CommentDecl a non-static member function of C++ class or
/// instance method of ObjC class.
/// Can be true only if \c IsFunctionDecl is true.
unsigned IsInstanceMethod : 1;
- /// Is \c ThisDecl a static member function of C++ class or
+ /// Is \c CommentDecl a static member function of C++ class or
/// class method of ObjC class.
/// Can be true only if \c IsFunctionDecl is true.
unsigned IsClassMethod : 1;
@@ -1012,7 +1022,6 @@ struct DeclInfo {
/// A full comment attached to a declaration, contains block content.
class FullComment : public Comment {
llvm::ArrayRef<BlockContentComment *> Blocks;
-
DeclInfo *ThisDeclInfo;
public:
@@ -1031,27 +1040,31 @@ public:
return C->getCommentKind() == FullCommentKind;
}
- static bool classof(const FullComment *) { return true; }
-
child_iterator child_begin() const {
return reinterpret_cast<child_iterator>(Blocks.begin());
}
child_iterator child_end() const {
- return reinterpret_cast<child_iterator>(Blocks.end());
+ return reinterpret_cast<child_iterator>(Blocks.end());
}
const Decl *getDecl() const LLVM_READONLY {
- return ThisDeclInfo->ThisDecl;
+ return ThisDeclInfo->CommentDecl;
}
-
+
const DeclInfo *getDeclInfo() const LLVM_READONLY {
if (!ThisDeclInfo->IsFilled)
ThisDeclInfo->fill();
return ThisDeclInfo;
}
+
+ DeclInfo *getThisDeclInfo() const LLVM_READONLY {
+ return ThisDeclInfo;
+ }
+
+ llvm::ArrayRef<BlockContentComment *> getBlocks() const { return Blocks; }
+
};
-
} // end namespace comments
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CommentBriefParser.h b/contrib/llvm/tools/clang/include/clang/AST/CommentBriefParser.h
index 003c337..5d50886 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/CommentBriefParser.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/CommentBriefParser.h
@@ -44,8 +44,7 @@ class BriefParser {
public:
BriefParser(Lexer &L, const CommandTraits &Traits);
- /// Return \\brief paragraph, if it exists; otherwise return the first
- /// paragraph.
+ /// Return the best "brief description" we can find.
std::string Parse();
};
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CommentCommandTraits.h b/contrib/llvm/tools/clang/include/clang/AST/CommentCommandTraits.h
index 5f0269a..6d44c70 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/CommentCommandTraits.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/CommentCommandTraits.h
@@ -19,136 +19,132 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ErrorHandling.h"
namespace clang {
namespace comments {
-/// This class provides informaiton about commands that can be used
-/// in comments.
-class CommandTraits {
-public:
- CommandTraits() { }
+/// \brief Information about a single command.
+///
+/// When reordering, adding or removing members please update the corresponding
+/// TableGen backend.
+struct CommandInfo {
+ unsigned getID() const {
+ return ID;
+ }
+
+ const char *Name;
+
+ /// Name of the command that ends the verbatim block.
+ const char *EndCommandName;
+
+ unsigned ID : 8;
+
+ /// Number of word-like arguments for a given block command, except for
+ /// \\param and \\tparam commands -- these have special argument parsers.
+ unsigned NumArgs : 4;
- /// \brief Check if a given command is a verbatim-like block command.
+ /// True if this command is a inline command (of any kind).
+ unsigned IsInlineCommand : 1;
+
+ /// True if this command is a block command (of any kind).
+ unsigned IsBlockCommand : 1;
+
+ /// True if this command is introducing a brief documentation
+ /// paragraph (\\brief or an alias).
+ unsigned IsBriefCommand : 1;
+
+ /// True if this command is \\returns or an alias.
+ unsigned IsReturnsCommand : 1;
+
+ /// True if this command is introducing documentation for a function
+ /// parameter (\\param or an alias).
+ unsigned IsParamCommand : 1;
+
+ /// True if this command is introducing documentation for
+ /// a template parameter (\\tparam or an alias).
+ unsigned IsTParamCommand : 1;
+
+ /// True if this command is \\deprecated or an alias.
+ unsigned IsDeprecatedCommand : 1;
+
+ /// True if we don't want to warn about this command being passed an empty
+ /// paragraph. Meaningful only for block commands.
+ unsigned IsEmptyParagraphAllowed : 1;
+
+ /// \brief True if this command is a verbatim-like block command.
///
/// A verbatim-like block command eats every character (except line starting
/// decorations) until matching end command is seen or comment end is hit.
- ///
- /// \param StartName name of the command that starts the verbatim block.
- /// \param [out] EndName name of the command that ends the verbatim block.
- ///
- /// \returns true if a given command is a verbatim block command.
- bool isVerbatimBlockCommand(StringRef StartName, StringRef &EndName) const;
+ unsigned IsVerbatimBlockCommand : 1;
- /// \brief Register a new verbatim block command.
- void addVerbatimBlockCommand(StringRef StartName, StringRef EndName);
+ /// \brief True if this command is an end command for a verbatim-like block.
+ unsigned IsVerbatimBlockEndCommand : 1;
- /// \brief Check if a given command is a verbatim line command.
+ /// \brief True if this command is a verbatim line command.
///
/// A verbatim-like line command eats everything until a newline is seen or
/// comment end is hit.
- bool isVerbatimLineCommand(StringRef Name) const;
+ unsigned IsVerbatimLineCommand : 1;
- /// \brief Check if a given command is a command that contains a declaration
- /// for the entity being documented.
+ /// \brief True if this command contains a declaration for the entity being
+ /// documented.
///
/// For example:
/// \code
/// \fn void f(int a);
/// \endcode
- bool isDeclarationCommand(StringRef Name) const;
+ unsigned IsDeclarationCommand : 1;
- /// \brief Register a new verbatim line command.
- void addVerbatimLineCommand(StringRef Name);
+ /// \brief True if this command is unknown. This \c CommandInfo object was
+ /// created during parsing.
+ unsigned IsUnknownCommand : 1;
+};
- /// \brief Check if a given command is a block command (of any kind).
- bool isBlockCommand(StringRef Name) const;
+/// This class provides information about commands that can be used
+/// in comments.
+class CommandTraits {
+public:
+ CommandTraits(llvm::BumpPtrAllocator &Allocator);
- /// \brief Check if a given command is introducing documentation for
- /// a function parameter (\\param or an alias).
- bool isParamCommand(StringRef Name) const;
+ /// \returns a CommandInfo object for a given command name or
+ /// NULL if no CommandInfo object exists for this command.
+ const CommandInfo *getCommandInfoOrNULL(StringRef Name) const;
- /// \brief Check if a given command is introducing documentation for
- /// a template parameter (\\tparam or an alias).
- bool isTParamCommand(StringRef Name) const;
+ const CommandInfo *getCommandInfo(StringRef Name) const {
+ if (const CommandInfo *Info = getCommandInfoOrNULL(Name))
+ return Info;
+ llvm_unreachable("the command should be known");
+ }
- /// \brief Check if a given command is introducing a brief documentation
- /// paragraph (\\brief or an alias).
- bool isBriefCommand(StringRef Name) const;
+ const CommandInfo *getCommandInfo(unsigned CommandID) const;
- /// \brief Check if a given command is \\brief or an alias.
- bool isReturnsCommand(StringRef Name) const;
+ const CommandInfo *registerUnknownCommand(StringRef CommandName);
- /// \returns the number of word-like arguments for a given block command,
- /// except for \\param and \\tparam commands -- these have special argument
- /// parsers.
- unsigned getBlockCommandNumArgs(StringRef Name) const;
+ /// \returns a CommandInfo object for a given command name or
+ /// NULL if \c Name is not a builtin command.
+ static const CommandInfo *getBuiltinCommandInfo(StringRef Name);
- /// \brief Check if a given command is a inline command (of any kind).
- bool isInlineCommand(StringRef Name) const;
+ /// \returns a CommandInfo object for a given command ID or
+ /// NULL if \c CommandID is not a builtin command.
+ static const CommandInfo *getBuiltinCommandInfo(unsigned CommandID);
private:
- struct VerbatimBlockCommand {
- StringRef StartName;
- StringRef EndName;
- };
-
- typedef SmallVector<VerbatimBlockCommand, 4> VerbatimBlockCommandVector;
+ CommandTraits(const CommandTraits &) LLVM_DELETED_FUNCTION;
+ void operator=(const CommandTraits &) LLVM_DELETED_FUNCTION;
- /// Registered additional verbatim-like block commands.
- VerbatimBlockCommandVector VerbatimBlockCommands;
+ const CommandInfo *getRegisteredCommandInfo(StringRef Name) const;
+ const CommandInfo *getRegisteredCommandInfo(unsigned CommandID) const;
- struct VerbatimLineCommand {
- StringRef Name;
- };
+ unsigned NextID;
- typedef SmallVector<VerbatimLineCommand, 4> VerbatimLineCommandVector;
+ /// Allocator for CommandInfo objects.
+ llvm::BumpPtrAllocator &Allocator;
- /// Registered verbatim-like line commands.
- VerbatimLineCommandVector VerbatimLineCommands;
+ SmallVector<CommandInfo *, 4> RegisteredCommands;
};
-inline bool CommandTraits::isBlockCommand(StringRef Name) const {
- return isBriefCommand(Name) || isReturnsCommand(Name) ||
- isParamCommand(Name) || isTParamCommand(Name) ||
- llvm::StringSwitch<bool>(Name)
- .Case("author", true)
- .Case("authors", true)
- .Case("pre", true)
- .Case("post", true)
- .Default(false);
-}
-
-inline bool CommandTraits::isParamCommand(StringRef Name) const {
- return Name == "param";
-}
-
-inline bool CommandTraits::isTParamCommand(StringRef Name) const {
- return Name == "tparam" || // Doxygen
- Name == "templatefield"; // HeaderDoc
-}
-
-inline bool CommandTraits::isBriefCommand(StringRef Name) const {
- return Name == "brief" || Name == "short";
-}
-
-inline bool CommandTraits::isReturnsCommand(StringRef Name) const {
- return Name == "returns" || Name == "return" || Name == "result";
-}
-
-inline unsigned CommandTraits::getBlockCommandNumArgs(StringRef Name) const {
- return 0;
-}
-
-inline bool CommandTraits::isInlineCommand(StringRef Name) const {
- return llvm::StringSwitch<bool>(Name)
- .Case("b", true)
- .Cases("c", "p", true)
- .Cases("a", "e", "em", true)
- .Default(false);
-}
-
} // end namespace comments
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CommentCommands.td b/contrib/llvm/tools/clang/include/clang/AST/CommentCommands.td
new file mode 100644
index 0000000..3d8bad8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/CommentCommands.td
@@ -0,0 +1,156 @@
+class Command<string name> {
+ string Name = name;
+ string EndCommandName = "";
+
+ int NumArgs = 0;
+
+ bit IsInlineCommand = 0;
+
+ bit IsBlockCommand = 0;
+ bit IsBriefCommand = 0;
+ bit IsReturnsCommand = 0;
+ bit IsParamCommand = 0;
+ bit IsTParamCommand = 0;
+ bit IsDeprecatedCommand = 0;
+
+ bit IsEmptyParagraphAllowed = 0;
+
+ bit IsVerbatimBlockCommand = 0;
+ bit IsVerbatimBlockEndCommand = 0;
+ bit IsVerbatimLineCommand = 0;
+ bit IsDeclarationCommand = 0;
+}
+
+class InlineCommand<string name> : Command<name> {
+ let IsInlineCommand = 1;
+}
+
+class BlockCommand<string name> : Command<name> {
+ let IsBlockCommand = 1;
+}
+
+class VerbatimBlockCommand<string name> : Command<name> {
+ let EndCommandName = name;
+ let IsVerbatimBlockCommand = 1;
+}
+
+multiclass VerbatimBlockCommand<string name, string endCommandName> {
+ def Begin : Command<name> {
+ let EndCommandName = endCommandName;
+ let IsVerbatimBlockCommand = 1;
+ }
+
+ def End : Command<endCommandName> {
+ let IsVerbatimBlockEndCommand = 1;
+ }
+}
+
+class VerbatimLineCommand<string name> : Command<name> {
+ let IsVerbatimLineCommand = 1;
+}
+
+class DeclarationVerbatimLineCommand<string name> :
+ VerbatimLineCommand<name> {
+ let IsDeclarationCommand = 1;
+}
+
+def B : InlineCommand<"b">;
+def C : InlineCommand<"c">;
+def P : InlineCommand<"p">;
+def A : InlineCommand<"a">;
+def E : InlineCommand<"e">;
+def Em : InlineCommand<"em">;
+
+def Brief : BlockCommand<"brief"> { let IsBriefCommand = 1; }
+def Short : BlockCommand<"short"> { let IsBriefCommand = 1; }
+
+def Returns : BlockCommand<"returns"> { let IsReturnsCommand = 1; }
+def Return : BlockCommand<"return"> { let IsReturnsCommand = 1; }
+def Result : BlockCommand<"result"> { let IsReturnsCommand = 1; }
+
+def Param : BlockCommand<"param"> { let IsParamCommand = 1; }
+
+// Doxygen
+def Tparam : BlockCommand<"tparam"> { let IsTParamCommand = 1; }
+
+// HeaderDoc
+def Templatefield : BlockCommand<"templatefield"> { let IsTParamCommand = 1; }
+
+def Deprecated : BlockCommand<"deprecated"> {
+ let IsEmptyParagraphAllowed = 1;
+ let IsDeprecatedCommand = 1;
+}
+
+def Author : BlockCommand<"author">;
+def Authors : BlockCommand<"authors">;
+def Bug : BlockCommand<"bug">;
+def Copyright : BlockCommand<"copyright">;
+def Date : BlockCommand<"date">;
+def Details : BlockCommand<"details">;
+def Invariant : BlockCommand<"invariant">;
+def Note : BlockCommand<"note">;
+def Post : BlockCommand<"post">;
+def Pre : BlockCommand<"pre">;
+def Remark : BlockCommand<"remark">;
+def Remarks : BlockCommand<"remarks">;
+def Sa : BlockCommand<"sa">;
+def See : BlockCommand<"see">;
+def Since : BlockCommand<"since">;
+def Todo : BlockCommand<"todo">;
+def Version : BlockCommand<"version">;
+def Warning : BlockCommand<"warning">;
+
+defm Code : VerbatimBlockCommand<"code", "endcode">;
+defm Verbatim : VerbatimBlockCommand<"verbatim", "endverbatim">;
+defm Htmlonly : VerbatimBlockCommand<"htmlonly", "endhtmlonly">;
+defm Latexonly : VerbatimBlockCommand<"latexonly", "endlatexonly">;
+defm Xmlonly : VerbatimBlockCommand<"xmlonly", "endxmlonly">;
+defm Manonly : VerbatimBlockCommand<"manonly", "endmanonly">;
+defm Rtfonly : VerbatimBlockCommand<"rtfonly", "endrtfonly">;
+
+defm Dot : VerbatimBlockCommand<"dot", "enddot">;
+defm Msc : VerbatimBlockCommand<"msc", "endmsc">;
+
+// These commands have special support in lexer.
+def FDollar : VerbatimBlockCommand<"f$">; // Inline LaTeX formula
+defm FBracket : VerbatimBlockCommand<"f[", "f]">; // Displayed LaTeX formula
+defm FBrace : VerbatimBlockCommand<"f{", "f}">; // LaTeX environment
+
+def Defgroup : VerbatimLineCommand<"defgroup">;
+def Ingroup : VerbatimLineCommand<"ingroup">;
+def Addtogroup : VerbatimLineCommand<"addtogroup">;
+def Weakgroup : VerbatimLineCommand<"weakgroup">;
+def Name : VerbatimLineCommand<"name">;
+
+def Section : VerbatimLineCommand<"section">;
+def Subsection : VerbatimLineCommand<"subsection">;
+def Subsubsection : VerbatimLineCommand<"subsubsection">;
+def Paragraph : VerbatimLineCommand<"paragraph">;
+
+def Mainpage : VerbatimLineCommand<"mainpage">;
+def Subpage : VerbatimLineCommand<"subpage">;
+def Ref : VerbatimLineCommand<"ref">;
+
+// Doxygen commands.
+def Fn : DeclarationVerbatimLineCommand<"fn">;
+def Namespace : DeclarationVerbatimLineCommand<"namespace">;
+def Overload : DeclarationVerbatimLineCommand<"overload">;
+def Property : DeclarationVerbatimLineCommand<"property">;
+def Typedef : DeclarationVerbatimLineCommand<"typedef">;
+def Var : DeclarationVerbatimLineCommand<"var">;
+
+// HeaderDoc commands.
+def Class : DeclarationVerbatimLineCommand<"class">;
+def Interface : DeclarationVerbatimLineCommand<"interface">;
+def Protocol : DeclarationVerbatimLineCommand<"protocol">;
+def Category : DeclarationVerbatimLineCommand<"category">;
+def Template : DeclarationVerbatimLineCommand<"template">;
+def Function : DeclarationVerbatimLineCommand<"function">;
+def Method : DeclarationVerbatimLineCommand<"method">;
+def Callback : DeclarationVerbatimLineCommand<"callback">;
+def Const : DeclarationVerbatimLineCommand<"const">;
+def Constant : DeclarationVerbatimLineCommand<"constant">;
+def Struct : DeclarationVerbatimLineCommand<"struct">;
+def Union : DeclarationVerbatimLineCommand<"union">;
+def Enum : DeclarationVerbatimLineCommand<"enum">;
+
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CommentHTMLTags.td b/contrib/llvm/tools/clang/include/clang/AST/CommentHTMLTags.td
new file mode 100644
index 0000000..f98e32d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/CommentHTMLTags.td
@@ -0,0 +1,54 @@
+class Tag<string spelling> {
+ string Spelling = spelling;
+ bit EndTagOptional = 0;
+ bit EndTagForbidden = 0;
+}
+
+def Em : Tag<"em">;
+def Strong : Tag<"strong">;
+def Tt : Tag<"tt">;
+def I : Tag<"i">;
+def B : Tag<"b">;
+def Big : Tag<"big">;
+def Small : Tag<"small">;
+def Strike : Tag<"strike">;
+def S : Tag<"s">;
+def U : Tag<"u">;
+def Font : Tag<"font">;
+def A : Tag<"a">;
+def Hr : Tag<"hr"> { let EndTagForbidden = 1; }
+def Div : Tag<"div">;
+def Span : Tag<"span">;
+def H1 : Tag<"h1">;
+def H2 : Tag<"h2">;
+def H3 : Tag<"h3">;
+def H4 : Tag<"h4">;
+def H5 : Tag<"h5">;
+def H6 : Tag<"h6">;
+def Code : Tag<"code">;
+def Blockquote : Tag<"blockquote">;
+def Sub : Tag<"sub">;
+def Sup : Tag<"sup">;
+def Img : Tag<"img"> { let EndTagForbidden = 1; }
+def P : Tag<"p"> { let EndTagOptional = 1; }
+def Br : Tag<"br"> { let EndTagForbidden = 1; }
+def Pre : Tag<"pre">;
+def Ins : Tag<"ins">;
+def Del : Tag<"del">;
+def Ul : Tag<"ul">;
+def Ol : Tag<"ol">;
+def Li : Tag<"li"> { let EndTagOptional = 1; }
+def Dl : Tag<"dl">;
+def Dt : Tag<"dt"> { let EndTagOptional = 1; }
+def Dd : Tag<"dd"> { let EndTagOptional = 1; }
+def Table : Tag<"table">;
+def Caption : Tag<"caption">;
+def Thead : Tag<"thead"> { let EndTagOptional = 1; }
+def Tfoot : Tag<"tfoot"> { let EndTagOptional = 1; }
+def Tbody : Tag<"tbody"> { let EndTagOptional = 1; }
+def Colgroup : Tag<"colgroup"> { let EndTagOptional = 1; }
+def Col : Tag<"col"> { let EndTagForbidden = 1; }
+def Tr : Tag<"tr"> { let EndTagOptional = 1; }
+def Th : Tag<"th"> { let EndTagOptional = 1; }
+def Td : Tag<"td"> { let EndTagOptional = 1; }
+
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CommentLexer.h b/contrib/llvm/tools/clang/include/clang/AST/CommentLexer.h
index 7a24b11..f263697 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/CommentLexer.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/CommentLexer.h
@@ -26,6 +26,7 @@ namespace comments {
class Lexer;
class TextTokenRetokenizer;
+struct CommandInfo;
class CommandTraits;
namespace tok {
@@ -33,7 +34,8 @@ enum TokenKind {
eof,
newline,
text,
- command,
+ unknown_command, // Command that does not have an ID.
+ command, // Command with an ID.
verbatim_block_begin,
verbatim_block_line,
verbatim_block_end,
@@ -49,11 +51,6 @@ enum TokenKind {
};
} // end namespace tok
-class CommentOptions {
-public:
- bool Markdown;
-};
-
/// \brief Comment token.
class Token {
friend class Lexer;
@@ -70,8 +67,14 @@ class Token {
unsigned Length;
/// Contains text value associated with a token.
- const char *TextPtr1;
- unsigned TextLen1;
+ const char *TextPtr;
+
+ /// Integer value associated with a token.
+ ///
+ /// If the token is a konwn command, contains command ID and TextPtr is
+ /// unused (command spelling can be found with CommandTraits). Otherwise,
+ /// contains the length of the string that starts at TextPtr.
+ unsigned IntVal;
public:
SourceLocation getLocation() const LLVM_READONLY { return Loc; }
@@ -94,113 +97,120 @@ public:
StringRef getText() const LLVM_READONLY {
assert(is(tok::text));
- return StringRef(TextPtr1, TextLen1);
+ return StringRef(TextPtr, IntVal);
}
void setText(StringRef Text) {
assert(is(tok::text));
- TextPtr1 = Text.data();
- TextLen1 = Text.size();
+ TextPtr = Text.data();
+ IntVal = Text.size();
+ }
+
+ StringRef getUnknownCommandName() const LLVM_READONLY {
+ assert(is(tok::unknown_command));
+ return StringRef(TextPtr, IntVal);
+ }
+
+ void setUnknownCommandName(StringRef Name) {
+ assert(is(tok::unknown_command));
+ TextPtr = Name.data();
+ IntVal = Name.size();
}
- StringRef getCommandName() const LLVM_READONLY {
+ unsigned getCommandID() const LLVM_READONLY {
assert(is(tok::command));
- return StringRef(TextPtr1, TextLen1);
+ return IntVal;
}
- void setCommandName(StringRef Name) {
+ void setCommandID(unsigned ID) {
assert(is(tok::command));
- TextPtr1 = Name.data();
- TextLen1 = Name.size();
+ IntVal = ID;
}
- StringRef getVerbatimBlockName() const LLVM_READONLY {
+ unsigned getVerbatimBlockID() const LLVM_READONLY {
assert(is(tok::verbatim_block_begin) || is(tok::verbatim_block_end));
- return StringRef(TextPtr1, TextLen1);
+ return IntVal;
}
- void setVerbatimBlockName(StringRef Name) {
+ void setVerbatimBlockID(unsigned ID) {
assert(is(tok::verbatim_block_begin) || is(tok::verbatim_block_end));
- TextPtr1 = Name.data();
- TextLen1 = Name.size();
+ IntVal = ID;
}
StringRef getVerbatimBlockText() const LLVM_READONLY {
assert(is(tok::verbatim_block_line));
- return StringRef(TextPtr1, TextLen1);
+ return StringRef(TextPtr, IntVal);
}
void setVerbatimBlockText(StringRef Text) {
assert(is(tok::verbatim_block_line));
- TextPtr1 = Text.data();
- TextLen1 = Text.size();
+ TextPtr = Text.data();
+ IntVal = Text.size();
}
- /// Returns the name of verbatim line command.
- StringRef getVerbatimLineName() const LLVM_READONLY {
+ unsigned getVerbatimLineID() const LLVM_READONLY {
assert(is(tok::verbatim_line_name));
- return StringRef(TextPtr1, TextLen1);
+ return IntVal;
}
- void setVerbatimLineName(StringRef Name) {
+ void setVerbatimLineID(unsigned ID) {
assert(is(tok::verbatim_line_name));
- TextPtr1 = Name.data();
- TextLen1 = Name.size();
+ IntVal = ID;
}
StringRef getVerbatimLineText() const LLVM_READONLY {
assert(is(tok::verbatim_line_text));
- return StringRef(TextPtr1, TextLen1);
+ return StringRef(TextPtr, IntVal);
}
void setVerbatimLineText(StringRef Text) {
assert(is(tok::verbatim_line_text));
- TextPtr1 = Text.data();
- TextLen1 = Text.size();
+ TextPtr = Text.data();
+ IntVal = Text.size();
}
StringRef getHTMLTagStartName() const LLVM_READONLY {
assert(is(tok::html_start_tag));
- return StringRef(TextPtr1, TextLen1);
+ return StringRef(TextPtr, IntVal);
}
void setHTMLTagStartName(StringRef Name) {
assert(is(tok::html_start_tag));
- TextPtr1 = Name.data();
- TextLen1 = Name.size();
+ TextPtr = Name.data();
+ IntVal = Name.size();
}
StringRef getHTMLIdent() const LLVM_READONLY {
assert(is(tok::html_ident));
- return StringRef(TextPtr1, TextLen1);
+ return StringRef(TextPtr, IntVal);
}
void setHTMLIdent(StringRef Name) {
assert(is(tok::html_ident));
- TextPtr1 = Name.data();
- TextLen1 = Name.size();
+ TextPtr = Name.data();
+ IntVal = Name.size();
}
StringRef getHTMLQuotedString() const LLVM_READONLY {
assert(is(tok::html_quoted_string));
- return StringRef(TextPtr1, TextLen1);
+ return StringRef(TextPtr, IntVal);
}
void setHTMLQuotedString(StringRef Str) {
assert(is(tok::html_quoted_string));
- TextPtr1 = Str.data();
- TextLen1 = Str.size();
+ TextPtr = Str.data();
+ IntVal = Str.size();
}
StringRef getHTMLTagEndName() const LLVM_READONLY {
assert(is(tok::html_end_tag));
- return StringRef(TextPtr1, TextLen1);
+ return StringRef(TextPtr, IntVal);
}
void setHTMLTagEndName(StringRef Name) {
assert(is(tok::html_end_tag));
- TextPtr1 = Name.data();
- TextLen1 = Name.size();
+ TextPtr = Name.data();
+ IntVal = Name.size();
}
void dump(const Lexer &L, const SourceManager &SM) const;
@@ -209,8 +219,8 @@ public:
/// \brief Comment lexer.
class Lexer {
private:
- Lexer(const Lexer&); // DO NOT IMPLEMENT
- void operator=(const Lexer&); // DO NOT IMPLEMENT
+ Lexer(const Lexer &) LLVM_DELETED_FUNCTION;
+ void operator=(const Lexer &) LLVM_DELETED_FUNCTION;
/// Allocator for strings that are semantic values of tokens and have to be
/// computed (for example, resolved decimal character references).
@@ -221,7 +231,6 @@ private:
const char *const BufferStart;
const char *const BufferEnd;
SourceLocation FileLoc;
- CommentOptions CommOpts;
const char *BufferPtr;
@@ -286,8 +295,8 @@ private:
Result.setKind(Kind);
Result.setLength(TokLen);
#ifndef NDEBUG
- Result.TextPtr1 = "<UNSET>";
- Result.TextLen1 = 7;
+ Result.TextPtr = "<UNSET>";
+ Result.IntVal = 7;
#endif
BufferPtr = TokEnd;
}
@@ -314,13 +323,14 @@ private:
void setupAndLexVerbatimBlock(Token &T,
const char *TextBegin,
- char Marker, StringRef EndName);
+ char Marker, const CommandInfo *Info);
void lexVerbatimBlockFirstLine(Token &T);
void lexVerbatimBlockBody(Token &T);
- void setupAndLexVerbatimLine(Token &T, const char *TextBegin);
+ void setupAndLexVerbatimLine(Token &T, const char *TextBegin,
+ const CommandInfo *Info);
void lexVerbatimLineText(Token &T);
@@ -336,7 +346,7 @@ private:
public:
Lexer(llvm::BumpPtrAllocator &Allocator, const CommandTraits &Traits,
- SourceLocation FileLoc, const CommentOptions &CommOpts,
+ SourceLocation FileLoc,
const char *BufferStart, const char *BufferEnd);
void lex(Token &T);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CommentParser.h b/contrib/llvm/tools/clang/include/clang/AST/CommentParser.h
index 0390799..19e1d57 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/CommentParser.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/CommentParser.h
@@ -28,8 +28,8 @@ class CommandTraits;
/// Doxygen comment parser.
class Parser {
- Parser(const Parser&); // DO NOT IMPLEMENT
- void operator=(const Parser&); // DO NOT IMPLEMENT
+ Parser(const Parser &) LLVM_DELETED_FUNCTION;
+ void operator=(const Parser &) LLVM_DELETED_FUNCTION;
friend class TextTokenRetokenizer;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CommentSema.h b/contrib/llvm/tools/clang/include/clang/AST/CommentSema.h
index e1756ca..0340b3c 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/CommentSema.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/CommentSema.h
@@ -25,13 +25,14 @@
namespace clang {
class Decl;
class SourceMgr;
+class Preprocessor;
namespace comments {
class CommandTraits;
class Sema {
- Sema(const Sema&); // DO NOT IMPLEMENT
- void operator=(const Sema&); // DO NOT IMPLEMENT
+ Sema(const Sema &) LLVM_DELETED_FUNCTION;
+ void operator=(const Sema &) LLVM_DELETED_FUNCTION;
/// Allocator for AST nodes.
llvm::BumpPtrAllocator &Allocator;
@@ -41,18 +42,13 @@ class Sema {
DiagnosticsEngine &Diags;
- const CommandTraits &Traits;
+ CommandTraits &Traits;
+
+ const Preprocessor *PP;
/// Information about the declaration this comment is attached to.
DeclInfo *ThisDeclInfo;
- /// Comment AST nodes that correspond to \c ParamVars for which we have
- /// found a \\param command or NULL if no documentation was found so far.
- ///
- /// Has correct size and contains valid values if \c DeclInfo->IsFilled is
- /// true.
- llvm::SmallVector<ParamCommandComment *, 8> ParamVarDocs;
-
/// Comment AST nodes that correspond to parameter names in
/// \c TemplateParameters.
///
@@ -75,7 +71,8 @@ class Sema {
public:
Sema(llvm::BumpPtrAllocator &Allocator, const SourceManager &SourceMgr,
- DiagnosticsEngine &Diags, const CommandTraits &Traits);
+ DiagnosticsEngine &Diags, CommandTraits &Traits,
+ const Preprocessor *PP);
void setDecl(const Decl *D);
@@ -96,7 +93,7 @@ public:
BlockCommandComment *actOnBlockCommandStart(SourceLocation LocBegin,
SourceLocation LocEnd,
- StringRef Name);
+ unsigned CommandID);
void actOnBlockCommandArgs(BlockCommandComment *Command,
ArrayRef<BlockCommandComment::Argument> Args);
@@ -106,7 +103,7 @@ public:
ParamCommandComment *actOnParamCommandStart(SourceLocation LocBegin,
SourceLocation LocEnd,
- StringRef Name);
+ unsigned CommandID);
void actOnParamCommandDirectionArg(ParamCommandComment *Command,
SourceLocation ArgLocBegin,
@@ -123,7 +120,7 @@ public:
TParamCommandComment *actOnTParamCommandStart(SourceLocation LocBegin,
SourceLocation LocEnd,
- StringRef Name);
+ unsigned CommandID);
void actOnTParamCommandParamNameArg(TParamCommandComment *Command,
SourceLocation ArgLocBegin,
@@ -135,25 +132,29 @@ public:
InlineCommandComment *actOnInlineCommand(SourceLocation CommandLocBegin,
SourceLocation CommandLocEnd,
- StringRef CommandName);
+ unsigned CommandID);
InlineCommandComment *actOnInlineCommand(SourceLocation CommandLocBegin,
SourceLocation CommandLocEnd,
- StringRef CommandName,
+ unsigned CommandID,
SourceLocation ArgLocBegin,
SourceLocation ArgLocEnd,
StringRef Arg);
InlineContentComment *actOnUnknownCommand(SourceLocation LocBegin,
SourceLocation LocEnd,
- StringRef Name);
+ StringRef CommandName);
+
+ InlineContentComment *actOnUnknownCommand(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ unsigned CommandID);
TextComment *actOnText(SourceLocation LocBegin,
SourceLocation LocEnd,
StringRef Text);
VerbatimBlockComment *actOnVerbatimBlockStart(SourceLocation Loc,
- StringRef Name);
+ unsigned CommandID);
VerbatimBlockLineComment *actOnVerbatimBlockLine(SourceLocation Loc,
StringRef Text);
@@ -164,7 +165,7 @@ public:
ArrayRef<VerbatimBlockLineComment *> Lines);
VerbatimLineComment *actOnVerbatimLine(SourceLocation LocBegin,
- StringRef Name,
+ unsigned CommandID,
SourceLocation TextBegin,
StringRef Text);
@@ -190,6 +191,12 @@ public:
/// used only once per comment, e.g., \\brief and \\returns.
void checkBlockCommandDuplicate(const BlockCommandComment *Command);
+ void checkDeprecatedCommand(const BlockCommandComment *Comment);
+
+ /// Resolve parameter names to parameter indexes in function declaration.
+ /// Emit diagnostics about unknown parametrs.
+ void resolveParamCommandIndexes(const FullComment *FC);
+
bool isFunctionDecl();
bool isTemplateOrSpecialization();
@@ -218,9 +225,6 @@ public:
InlineCommandComment::RenderKind
getInlineCommandRenderKind(StringRef Name) const;
-
- bool isHTMLEndTagOptional(StringRef Name);
- bool isHTMLEndTagForbidden(StringRef Name);
};
} // end namespace comments
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Decl.h b/contrib/llvm/tools/clang/include/clang/AST/Decl.h
index e9f25b3..087a585 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Decl.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Decl.h
@@ -88,7 +88,6 @@ public:
static TranslationUnitDecl *Create(ASTContext &C);
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const TranslationUnitDecl *D) { return true; }
static bool classofKind(Kind K) { return K == TranslationUnit; }
static DeclContext *castToDeclContext(const TranslationUnitDecl *D) {
return static_cast<DeclContext *>(const_cast<TranslationUnitDecl*>(D));
@@ -214,16 +213,19 @@ public:
bool isCXXInstanceMember() const;
class LinkageInfo {
- Linkage linkage_;
- Visibility visibility_;
- bool explicit_;
+ uint8_t linkage_ : 2;
+ uint8_t visibility_ : 2;
+ uint8_t explicit_ : 1;
void setVisibility(Visibility V, bool E) { visibility_ = V; explicit_ = E; }
public:
LinkageInfo() : linkage_(ExternalLinkage), visibility_(DefaultVisibility),
explicit_(false) {}
LinkageInfo(Linkage L, Visibility V, bool E)
- : linkage_(L), visibility_(V), explicit_(E) {}
+ : linkage_(L), visibility_(V), explicit_(E) {
+ assert(linkage() == L && visibility() == V && visibilityExplicit() == E &&
+ "Enum truncated!");
+ }
static LinkageInfo external() {
return LinkageInfo();
@@ -238,8 +240,8 @@ public:
return LinkageInfo(NoLinkage, DefaultVisibility, false);
}
- Linkage linkage() const { return linkage_; }
- Visibility visibility() const { return visibility_; }
+ Linkage linkage() const { return (Linkage)linkage_; }
+ Visibility visibility() const { return (Visibility)visibility_; }
bool visibilityExplicit() const { return explicit_; }
void setLinkage(Linkage L) { linkage_ = L; }
@@ -337,7 +339,6 @@ public:
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const NamedDecl *D) { return true; }
static bool classofKind(Kind K) { return K >= firstNamed && K <= lastNamed; }
};
@@ -383,7 +384,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const LabelDecl *D) { return true; }
static bool classofKind(Kind K) { return K == Label; }
};
@@ -509,7 +509,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const NamespaceDecl *D) { return true; }
static bool classofKind(Kind K) { return K == Namespace; }
static DeclContext *castToDeclContext(const NamespaceDecl *D) {
return static_cast<DeclContext *>(const_cast<NamespaceDecl*>(D));
@@ -545,7 +544,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ValueDecl *D) { return true; }
static bool classofKind(Kind K) { return K >= firstValue && K <= lastValue; }
};
@@ -578,8 +576,8 @@ struct QualifierInfo {
private:
// Copy constructor and copy assignment are disabled.
- QualifierInfo(const QualifierInfo&);
- QualifierInfo& operator=(const QualifierInfo&);
+ QualifierInfo(const QualifierInfo&) LLVM_DELETED_FUNCTION;
+ QualifierInfo& operator=(const QualifierInfo&) LLVM_DELETED_FUNCTION;
};
/// \brief Represents a ValueDecl that came out of a declarator.
@@ -666,7 +664,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const DeclaratorDecl *D) { return true; }
static bool classofKind(Kind K) {
return K >= firstDeclarator && K <= lastDeclarator;
}
@@ -712,7 +709,7 @@ public:
typedef clang::StorageClass StorageClass;
/// getStorageClassSpecifierString - Return the string used to
- /// specify the storage class \arg SC.
+ /// specify the storage class \p SC.
///
/// It is illegal to call this function with SC == None.
static const char *getStorageClassSpecifierString(StorageClass SC);
@@ -1208,7 +1205,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const VarDecl *D) { return true; }
static bool classofKind(Kind K) { return K >= firstVar && K <= lastVar; }
};
@@ -1229,7 +1225,6 @@ public:
}
// Implement isa/cast/dyncast/etc.
- static bool classof(const ImplicitParamDecl *D) { return true; }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == ImplicitParam; }
};
@@ -1399,7 +1394,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ParmVarDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ParmVar; }
private:
@@ -2070,7 +2064,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const FunctionDecl *D) { return true; }
static bool classofKind(Kind K) {
return K >= firstFunction && K <= lastFunction;
}
@@ -2204,7 +2197,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const FieldDecl *D) { return true; }
static bool classofKind(Kind K) { return K >= firstField && K <= lastField; }
friend class ASTDeclReader;
@@ -2243,7 +2235,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const EnumConstantDecl *D) { return true; }
static bool classofKind(Kind K) { return K == EnumConstant; }
friend class StmtIteratorBase;
@@ -2287,7 +2278,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const IndirectFieldDecl *D) { return true; }
static bool classofKind(Kind K) { return K == IndirectField; }
friend class ASTDeclReader;
};
@@ -2334,7 +2324,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const TypeDecl *D) { return true; }
static bool classofKind(Kind K) { return K >= firstType && K <= lastType; }
};
@@ -2390,7 +2379,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const TypedefNameDecl *D) { return true; }
static bool classofKind(Kind K) {
return K >= firstTypedefName && K <= lastTypedefName;
}
@@ -2413,7 +2401,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const TypedefDecl *D) { return true; }
static bool classofKind(Kind K) { return K == Typedef; }
};
@@ -2434,7 +2421,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const TypeAliasDecl *D) { return true; }
static bool classofKind(Kind K) { return K == TypeAlias; }
};
@@ -2448,7 +2434,7 @@ public:
private:
// FIXME: This can be packed into the bitfields in Decl.
/// TagDeclKind - The TagKind enum.
- unsigned TagDeclKind : 2;
+ unsigned TagDeclKind : 3;
/// IsCompleteDefinition - True if this is a definition ("struct foo
/// {};"), false if it is a declaration ("struct foo;"). It is not
@@ -2625,6 +2611,7 @@ public:
void setTagKind(TagKind TK) { TagDeclKind = TK; }
bool isStruct() const { return getTagKind() == TTK_Struct; }
+ bool isInterface() const { return getTagKind() == TTK_Interface; }
bool isClass() const { return getTagKind() == TTK_Class; }
bool isUnion() const { return getTagKind() == TTK_Union; }
bool isEnum() const { return getTagKind() == TTK_Enum; }
@@ -2665,7 +2652,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const TagDecl *D) { return true; }
static bool classofKind(Kind K) { return K >= firstTag && K <= lastTag; }
static DeclContext *castToDeclContext(const TagDecl *D) {
@@ -2895,7 +2881,6 @@ public:
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const EnumDecl *D) { return true; }
static bool classofKind(Kind K) { return K == Enum; }
friend class ASTDeclReader;
@@ -3026,11 +3011,15 @@ public:
virtual void completeDefinition();
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const RecordDecl *D) { return true; }
static bool classofKind(Kind K) {
return K >= firstRecord && K <= lastRecord;
}
+ /// isMsStrust - Get whether or not this is an ms_struct which can
+ /// be turned on with an attribute, pragma, or -mms-bitfields
+ /// commandline option.
+ bool isMsStruct(const ASTContext &C) const;
+
private:
/// \brief Deserialize just the fields.
void LoadFieldsFromExternalStorage() const;
@@ -3062,7 +3051,6 @@ public:
void setAsmString(StringLiteral *Asm) { AsmString = Asm; }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const FileScopeAsmDecl *D) { return true; }
static bool classofKind(Kind K) { return K == FileScopeAsm; }
};
@@ -3208,7 +3196,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const BlockDecl *D) { return true; }
static bool classofKind(Kind K) { return K == Block; }
static DeclContext *castToDeclContext(const BlockDecl *D) {
return static_cast<DeclContext *>(const_cast<BlockDecl*>(D));
@@ -3282,7 +3269,6 @@ public:
virtual SourceRange getSourceRange() const LLVM_READONLY;
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ImportDecl *D) { return true; }
static bool classofKind(Kind K) { return K == Import; }
};
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h b/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
index 0f59609..50e2027 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
@@ -430,16 +430,10 @@ public:
void dropAttr() {
if (!HasAttrs) return;
- AttrVec &Attrs = getAttrs();
- for (unsigned i = 0, e = Attrs.size(); i != e; /* in loop */) {
- if (isa<T>(Attrs[i])) {
- Attrs.erase(Attrs.begin() + i);
- --e;
- }
- else
- ++i;
- }
- if (Attrs.empty())
+ AttrVec &Vec = getAttrs();
+ Vec.erase(std::remove_if(Vec.begin(), Vec.end(), isa<T, Attr*>), Vec.end());
+
+ if (Vec.empty())
HasAttrs = false;
}
@@ -844,8 +838,6 @@ public:
IdentifierNamespace |= IDNS_NonMemberOperator;
}
- // Implement isa/cast/dyncast/etc.
- static bool classof(const Decl *) { return true; }
static bool classofKind(Kind K) { return true; }
static DeclContext *castToDeclContext(const Decl *);
static Decl *castFromDeclContext(const DeclContext *);
@@ -1479,6 +1471,13 @@ public:
inline ddiag_iterator ddiag_end() const;
// Low-level accessors
+
+ /// \brief Mark the lookup table as needing to be built. This should be
+ /// used only if setHasExternalLexicalStorage() has been called.
+ void setMustBuildLookupTable() {
+ assert(ExternalLexicalStorage && "Requires external lexical storage");
+ LookupPtr.setInt(true);
+ }
/// \brief Retrieve the internal representation of the lookup structure.
/// This may omit some names if we are lazily building the structure.
@@ -1516,10 +1515,6 @@ public:
static bool classof(const Decl *D);
static bool classof(const DeclContext *D) { return true; }
-#define DECL(NAME, BASE)
-#define DECL_CONTEXT(NAME) \
- static bool classof(const NAME##Decl *D) { return true; }
-#include "clang/AST/DeclNodes.inc"
LLVM_ATTRIBUTE_USED void dumpDeclContext() const;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h b/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
index 2d95f03..9cb56e2 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
@@ -145,7 +145,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const AccessSpecDecl *D) { return true; }
static bool classofKind(Kind K) { return K == AccessSpec; }
};
@@ -563,9 +562,10 @@ class CXXRecordDecl : public RecordDecl {
struct LambdaDefinitionData : public DefinitionData {
typedef LambdaExpr::Capture Capture;
- LambdaDefinitionData(CXXRecordDecl *D, bool Dependent)
+ LambdaDefinitionData(CXXRecordDecl *D, TypeSourceInfo *Info, bool Dependent)
: DefinitionData(D), Dependent(Dependent), NumCaptures(0),
- NumExplicitCaptures(0), ManglingNumber(0), ContextDecl(0), Captures(0)
+ NumExplicitCaptures(0), ManglingNumber(0), ContextDecl(0), Captures(0),
+ MethodTyInfo(Info)
{
IsLambda = true;
}
@@ -598,7 +598,10 @@ class CXXRecordDecl : public RecordDecl {
/// \brief The list of captures, both explicit and implicit, for this
/// lambda.
- Capture *Captures;
+ Capture *Captures;
+
+ /// \brief The type of the call method.
+ TypeSourceInfo *MethodTyInfo;
};
struct DefinitionData &data() {
@@ -705,7 +708,8 @@ public:
IdentifierInfo *Id, CXXRecordDecl* PrevDecl=0,
bool DelayTypeCreation = false);
static CXXRecordDecl *CreateLambda(const ASTContext &C, DeclContext *DC,
- SourceLocation Loc, bool DependentLambda);
+ TypeSourceInfo *Info, SourceLocation Loc,
+ bool DependentLambda);
static CXXRecordDecl *CreateDeserialized(const ASTContext &C, unsigned ID);
bool isDynamicClass() const {
@@ -1303,7 +1307,7 @@ public:
/// \brief Function type used by forallBases() as a callback.
///
- /// \param Base the definition of the base class
+ /// \param BaseDefinition the definition of the base class
///
/// \returns true if this base matched the search criteria
typedef bool ForallBasesCallback(const CXXRecordDecl *BaseDefinition,
@@ -1500,15 +1504,15 @@ public:
bool isDependentLambda() const {
return isLambda() && getLambdaData().Dependent;
}
-
+
+ TypeSourceInfo *getLambdaTypeInfo() const {
+ return getLambdaData().MethodTyInfo;
+ }
+
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) {
return K >= firstCXXRecord && K <= lastCXXRecord;
}
- static bool classof(const CXXRecordDecl *D) { return true; }
- static bool classof(const ClassTemplateSpecializationDecl *D) {
- return true;
- }
friend class ASTDeclReader;
friend class ASTDeclWriter;
@@ -1549,14 +1553,16 @@ public:
bool isStatic() const { return getStorageClass() == SC_Static; }
bool isInstance() const { return !isStatic(); }
- bool isConst() { return getType()->castAs<FunctionType>()->isConst(); }
- bool isVolatile() { return getType()->castAs<FunctionType>()->isVolatile(); }
+ bool isConst() const { return getType()->castAs<FunctionType>()->isConst(); }
+ bool isVolatile() const { return getType()->castAs<FunctionType>()->isVolatile(); }
bool isVirtual() const {
CXXMethodDecl *CD =
cast<CXXMethodDecl>(const_cast<CXXMethodDecl*>(this)->getCanonicalDecl());
- if (CD->isVirtualAsWritten())
+ // Methods declared in interfaces are automatically (pure) virtual.
+ if (CD->isVirtualAsWritten() ||
+ (CD->getParent()->isInterface() && CD->isUserProvided()))
return true;
return (CD->begin_overridden_methods() != CD->end_overridden_methods());
@@ -1661,7 +1667,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const CXXMethodDecl *D) { return true; }
static bool classofKind(Kind K) {
return K >= firstCXXMethod && K <= lastCXXMethod;
}
@@ -2141,7 +2146,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const CXXConstructorDecl *D) { return true; }
static bool classofKind(Kind K) { return K == CXXConstructor; }
friend class ASTDeclReader;
@@ -2213,7 +2217,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const CXXDestructorDecl *D) { return true; }
static bool classofKind(Kind K) { return K == CXXDestructor; }
friend class ASTDeclReader;
@@ -2280,7 +2283,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const CXXConversionDecl *D) { return true; }
static bool classofKind(Kind K) { return K == CXXConversion; }
friend class ASTDeclReader;
@@ -2350,7 +2352,6 @@ public:
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const LinkageSpecDecl *D) { return true; }
static bool classofKind(Kind K) { return K == LinkageSpec; }
static DeclContext *castToDeclContext(const LinkageSpecDecl *D) {
return static_cast<DeclContext *>(const_cast<LinkageSpecDecl*>(D));
@@ -2454,7 +2455,6 @@ public:
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const UsingDirectiveDecl *D) { return true; }
static bool classofKind(Kind K) { return K == UsingDirective; }
// Friend for getUsingDirectiveName.
@@ -2548,7 +2548,6 @@ public:
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const NamespaceAliasDecl *D) { return true; }
static bool classofKind(Kind K) { return K == NamespaceAlias; }
};
@@ -2619,7 +2618,6 @@ public:
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const UsingShadowDecl *D) { return true; }
static bool classofKind(Kind K) { return K == Decl::UsingShadow; }
friend class ASTDeclReader;
@@ -2751,7 +2749,6 @@ public:
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const UsingDecl *D) { return true; }
static bool classofKind(Kind K) { return K == Using; }
friend class ASTDeclReader;
@@ -2825,7 +2822,6 @@ public:
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const UnresolvedUsingValueDecl *D) { return true; }
static bool classofKind(Kind K) { return K == UnresolvedUsingValue; }
friend class ASTDeclReader;
@@ -2891,7 +2887,6 @@ public:
CreateDeserialized(ASTContext &C, unsigned ID);
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const UnresolvedUsingTypenameDecl *D) { return true; }
static bool classofKind(Kind K) { return K == UnresolvedUsingTypename; }
};
@@ -2931,7 +2926,6 @@ public:
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(StaticAssertDecl *D) { return true; }
static bool classofKind(Kind K) { return K == StaticAssert; }
friend class ASTDeclReader;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h b/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
index 9a64f08..37e4586 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
@@ -16,6 +16,7 @@
#define LLVM_CLANG_AST_DECLFRIEND_H
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
#include "llvm/Support/Compiler.h"
namespace clang {
@@ -104,9 +105,15 @@ public:
/// Retrieves the source range for the friend declaration.
SourceRange getSourceRange() const LLVM_READONLY {
- /* FIXME: consider the case of templates wrt start of range. */
- if (NamedDecl *ND = getFriendDecl())
+ if (NamedDecl *ND = getFriendDecl()) {
+ if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(ND))
+ return FTD->getSourceRange();
+ if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(ND)) {
+ if (DD->getOuterLocStart() != DD->getInnerLocStart())
+ return DD->getSourceRange();
+ }
return SourceRange(getFriendLoc(), ND->getLocEnd());
+ }
else if (TypeSourceInfo *TInfo = getFriendType())
return SourceRange(getFriendLoc(), TInfo->getTypeLoc().getEndLoc());
else
@@ -123,7 +130,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const FriendDecl *D) { return true; }
static bool classofKind(Kind K) { return K == Decl::Friend; }
friend class ASTDeclReader;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h b/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
index 6c39f2c..8b27dd8 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
@@ -33,8 +33,8 @@ class ObjCPropertyImplDecl;
class CXXCtorInitializer;
class ObjCListBase {
- void operator=(const ObjCListBase &); // DO NOT IMPLEMENT
- ObjCListBase(const ObjCListBase&); // DO NOT IMPLEMENT
+ ObjCListBase(const ObjCListBase &) LLVM_DELETED_FUNCTION;
+ void operator=(const ObjCListBase &) LLVM_DELETED_FUNCTION;
protected:
/// List is an array of pointers to objects that are not owned by this object.
void **List;
@@ -123,8 +123,8 @@ private:
unsigned IsInstance : 1;
unsigned IsVariadic : 1;
- // Synthesized declaration method for a property setter/getter
- unsigned IsSynthesized : 1;
+ /// True if this method is the getter or setter for an explicit property.
+ unsigned IsPropertyAccessor : 1;
// Method has a definition.
unsigned IsDefined : 1;
@@ -174,8 +174,7 @@ private:
SourceLocation DeclEndLoc; // the location of the ';' or '{'.
// The following are only used for method definitions, null otherwise.
- // FIXME: space savings opportunity, consider a sub-class.
- Stmt *Body;
+ LazyDeclStmtPtr Body;
/// SelfDecl - Decl for the implicit self parameter. This is lazily
/// constructed by createImplicitParams.
@@ -227,7 +226,7 @@ private:
DeclContext *contextDecl,
bool isInstance = true,
bool isVariadic = false,
- bool isSynthesized = false,
+ bool isPropertyAccessor = false,
bool isImplicitlyDeclared = false,
bool isDefined = false,
ImplementationControl impControl = None,
@@ -235,14 +234,14 @@ private:
: NamedDecl(ObjCMethod, contextDecl, beginLoc, SelInfo),
DeclContext(ObjCMethod), Family(InvalidObjCMethodFamily),
IsInstance(isInstance), IsVariadic(isVariadic),
- IsSynthesized(isSynthesized),
+ IsPropertyAccessor(isPropertyAccessor),
IsDefined(isDefined), IsRedeclaration(0), HasRedeclaration(0),
DeclImplementation(impControl), objcDeclQualifier(OBJC_TQ_None),
RelatedResultType(HasRelatedResultType),
SelLocsKind(SelLoc_StandardNoSpace), IsOverriding(0),
MethodDeclType(T), ResultTInfo(ResultTInfo),
ParamsAndSelLocs(0), NumParams(0),
- DeclEndLoc(endLoc), Body(0), SelfDecl(0), CmdDecl(0) {
+ DeclEndLoc(endLoc), Body(), SelfDecl(0), CmdDecl(0) {
setImplicit(isImplicitlyDeclared);
}
@@ -261,7 +260,7 @@ public:
DeclContext *contextDecl,
bool isInstance = true,
bool isVariadic = false,
- bool isSynthesized = false,
+ bool isPropertyAccessor = false,
bool isImplicitlyDeclared = false,
bool isDefined = false,
ImplementationControl impControl = None,
@@ -363,7 +362,7 @@ public:
}
/// \brief Sets the method's parameters and selector source locations.
- /// If the method is implicit (not coming from source) \arg SelLocs is
+ /// If the method is implicit (not coming from source) \p SelLocs is
/// ignored.
void setMethodParams(ASTContext &C,
ArrayRef<ParmVarDecl*> Params,
@@ -403,8 +402,8 @@ public:
bool isClassMethod() const { return !IsInstance; }
- bool isSynthesized() const { return IsSynthesized; }
- void setSynthesized(bool isSynth) { IsSynthesized = isSynth; }
+ bool isPropertyAccessor() const { return IsPropertyAccessor; }
+ void setPropertyAccessor(bool isAccessor) { IsPropertyAccessor = isAccessor; }
bool isDefined() const { return IsDefined; }
void setDefined(bool isDefined) { IsDefined = isDefined; }
@@ -418,7 +417,25 @@ public:
/// method in the interface or its categories.
bool isOverriding() const { return IsOverriding; }
void setOverriding(bool isOverriding) { IsOverriding = isOverriding; }
-
+
+ /// \brief Return overridden methods for the given \p Method.
+ ///
+ /// An ObjC method is considered to override any method in the class's
+ /// base classes (and base's categories), its protocols, or its categories'
+ /// protocols, that has
+ /// the same selector and is of the same kind (class or instance).
+ /// A method in an implementation is not considered as overriding the same
+ /// method in the interface or its categories.
+ void getOverriddenMethods(
+ SmallVectorImpl<const ObjCMethodDecl *> &Overridden) const;
+
+ /// \brief Returns the property associated with this method's selector.
+ ///
+ /// Note that even if this particular method is not marked as a property
+ /// accessor, it is still possible for it to match a property declared in a
+ /// superclass. Pass \c false if you only want to check the current class.
+ const ObjCPropertyDecl *findPropertyDecl(bool CheckOverrides = true) const;
+
// Related to protocols declared in \@protocol
void setDeclImplementation(ImplementationControl ic) {
DeclImplementation = ic;
@@ -427,10 +444,15 @@ public:
return ImplementationControl(DeclImplementation);
}
- virtual Stmt *getBody() const {
- return (Stmt*) Body;
- }
- CompoundStmt *getCompoundBody() { return (CompoundStmt*)Body; }
+ /// \brief Determine whether this method has a body.
+ virtual bool hasBody() const { return Body; }
+
+ /// \brief Retrieve the body of this method, if it has one.
+ virtual Stmt *getBody() const;
+
+ void setLazyBody(uint64_t Offset) { Body = Offset; }
+
+ CompoundStmt *getCompoundBody() { return (CompoundStmt*)getBody(); }
void setBody(Stmt *B) { Body = B; }
/// \brief Returns whether this specific method is a definition.
@@ -438,7 +460,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCMethodDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ObjCMethod; }
static DeclContext *castToDeclContext(const ObjCMethodDecl *D) {
return static_cast<DeclContext *>(const_cast<ObjCMethodDecl*>(D));
@@ -520,6 +541,13 @@ public:
ObjCPropertyDecl *FindPropertyDeclaration(IdentifierInfo *PropertyId) const;
+ typedef llvm::DenseMap<IdentifierInfo*, ObjCPropertyDecl*> PropertyMap;
+
+ /// This routine collects list of properties to be implemented in the class.
+ /// This includes, class's and its conforming protocols' properties.
+ /// Note, the superclass's properties are not included in the list.
+ virtual void collectPropertiesToImplement(PropertyMap &PM) const {}
+
SourceLocation getAtStartLoc() const { return AtStart; }
void setAtStartLoc(SourceLocation Loc) { AtStart = Loc; }
@@ -537,7 +565,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCContainerDecl *D) { return true; }
static bool classofKind(Kind K) {
return K >= firstObjCContainer &&
K <= lastObjCContainer;
@@ -880,6 +907,8 @@ public:
ObjCPropertyDecl
*FindPropertyVisibleInPrimaryClass(IdentifierInfo *PropertyId) const;
+ virtual void collectPropertiesToImplement(PropertyMap &PM) const;
+
/// isSuperClassOf - Return true if this class is the specified class or is a
/// super class of the specified interface class.
bool isSuperClassOf(const ObjCInterfaceDecl *I) const {
@@ -992,7 +1021,6 @@ public:
void setTypeForDecl(const Type *TD) const { TypeForDecl = TD; }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCInterfaceDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ObjCInterface; }
friend class ASTReader;
@@ -1065,7 +1093,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCIvarDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ObjCIvar; }
private:
/// NextIvar - Next Ivar in the list of ivars declared in class; class's
@@ -1098,7 +1125,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCAtDefsFieldDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ObjCAtDefsField; }
};
@@ -1277,8 +1303,9 @@ public:
return getFirstDeclaration();
}
+ virtual void collectPropertiesToImplement(PropertyMap &PM) const;
+
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCProtocolDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ObjCProtocol; }
friend class ASTReader;
@@ -1402,7 +1429,6 @@ public:
SourceLocation getIvarRBraceLoc() const { return IvarRBraceLoc; }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCCategoryDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ObjCCategory; }
friend class ASTDeclReader;
@@ -1455,7 +1481,6 @@ public:
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCImplDecl *D) { return true; }
static bool classofKind(Kind K) {
return K >= firstObjCImpl && K <= lastObjCImpl;
}
@@ -1532,7 +1557,6 @@ public:
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCCategoryImplDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ObjCCategoryImpl;}
friend class ASTDeclReader;
@@ -1568,8 +1592,12 @@ class ObjCImplementationDecl : public ObjCImplDecl {
CXXCtorInitializer **IvarInitializers;
unsigned NumIvarInitializers;
- /// true if class has a .cxx_[construct,destruct] method.
- bool HasCXXStructors : 1;
+ /// Do the ivars of this class require initialization other than
+ /// zero-initialization?
+ bool HasNonZeroConstructors : 1;
+
+ /// Do the ivars of this class require non-trivial destruction?
+ bool HasDestructors : 1;
ObjCImplementationDecl(DeclContext *DC,
ObjCInterfaceDecl *classInterface,
@@ -1581,7 +1609,7 @@ class ObjCImplementationDecl : public ObjCImplDecl {
SuperClass(superDecl), IvarLBraceLoc(IvarLBraceLoc),
IvarRBraceLoc(IvarRBraceLoc),
IvarInitializers(0), NumIvarInitializers(0),
- HasCXXStructors(false) {}
+ HasNonZeroConstructors(false), HasDestructors(false) {}
public:
static ObjCImplementationDecl *Create(ASTContext &C, DeclContext *DC,
ObjCInterfaceDecl *classInterface,
@@ -1625,8 +1653,15 @@ public:
CXXCtorInitializer ** initializers,
unsigned numInitializers);
- bool hasCXXStructors() const { return HasCXXStructors; }
- void setHasCXXStructors(bool val) { HasCXXStructors = val; }
+ /// Do any of the ivars of this class (not counting its base classes)
+ /// require construction other than zero-initialization?
+ bool hasNonZeroConstructors() const { return HasNonZeroConstructors; }
+ void setHasNonZeroConstructors(bool val) { HasNonZeroConstructors = val; }
+
+ /// Do any of the ivars of this class (not counting its base classes)
+ /// require non-trivial destruction?
+ bool hasDestructors() const { return HasDestructors; }
+ void setHasDestructors(bool val) { HasDestructors = val; }
/// getIdentifier - Get the identifier that names the class
/// interface associated with this implementation.
@@ -1676,7 +1711,6 @@ public:
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCImplementationDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ObjCImplementation; }
friend class ASTDeclReader;
@@ -1708,7 +1742,6 @@ public:
void setClassInterface(ObjCInterfaceDecl *D) { AliasedClass = D; }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCCompatibleAliasDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ObjCCompatibleAlias; }
};
@@ -1882,13 +1915,15 @@ public:
virtual SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(AtLoc, getLocation());
}
+
+ /// Get the default name of the synthesized ivar.
+ IdentifierInfo *getDefaultSynthIvarName(ASTContext &Ctx) const;
/// Lookup a property by name in the specified DeclContext.
static ObjCPropertyDecl *findPropertyDecl(const DeclContext *DC,
IdentifierInfo *propertyID);
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCPropertyDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ObjCProperty; }
};
@@ -1999,7 +2034,6 @@ public:
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCPropertyImplDecl *D) { return true; }
static bool classofKind(Decl::Kind K) { return K == ObjCPropertyImpl; }
friend class ASTDeclReader;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h b/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
index 7affc7e..8620116 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
@@ -50,7 +50,11 @@ class TemplateParameterList {
/// The number of template parameters in this template
/// parameter list.
- unsigned NumParams;
+ unsigned NumParams : 31;
+
+ /// Whether this template parameter list contains an unexpanded parameter
+ /// pack.
+ unsigned ContainsUnexpandedParameterPack : 1;
protected:
TemplateParameterList(SourceLocation TemplateLoc, SourceLocation LAngleLoc,
@@ -104,6 +108,12 @@ public:
/// the second template parameter list will have depth 1, etc.
unsigned getDepth() const;
+ /// \brief Determine whether this template parameter list contains an
+ /// unexpanded parameter pack.
+ bool containsUnexpandedParameterPack() const {
+ return ContainsUnexpandedParameterPack;
+ }
+
SourceLocation getTemplateLoc() const { return TemplateLoc; }
SourceLocation getLAngleLoc() const { return LAngleLoc; }
SourceLocation getRAngleLoc() const { return RAngleLoc; }
@@ -139,8 +149,8 @@ class TemplateArgumentList {
/// argument list.
unsigned NumArguments;
- TemplateArgumentList(const TemplateArgumentList &Other); // DO NOT IMPL
- void operator=(const TemplateArgumentList &Other); // DO NOT IMPL
+ TemplateArgumentList(const TemplateArgumentList &Other) LLVM_DELETED_FUNCTION;
+ void operator=(const TemplateArgumentList &Other) LLVM_DELETED_FUNCTION;
TemplateArgumentList(const TemplateArgument *Args, unsigned NumArgs,
bool Owned)
@@ -233,12 +243,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const TemplateDecl *D) { return true; }
- static bool classof(const RedeclarableTemplateDecl *D) { return true; }
- static bool classof(const FunctionTemplateDecl *D) { return true; }
- static bool classof(const ClassTemplateDecl *D) { return true; }
- static bool classof(const TemplateTemplateParmDecl *D) { return true; }
- static bool classof(const TypeAliasTemplateDecl *D) { return true; }
static bool classofKind(Kind K) {
return K >= firstTemplate && K <= lastTemplate;
}
@@ -678,10 +682,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const RedeclarableTemplateDecl *D) { return true; }
- static bool classof(const FunctionTemplateDecl *D) { return true; }
- static bool classof(const ClassTemplateDecl *D) { return true; }
- static bool classof(const TypeAliasTemplateDecl *D) { return true; }
static bool classofKind(Kind K) {
return K >= firstRedeclarableTemplate && K <= lastRedeclarableTemplate;
}
@@ -827,7 +827,6 @@ public:
// Implement isa/cast/dyncast support
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const FunctionTemplateDecl *D) { return true; }
static bool classofKind(Kind K) { return K == FunctionTemplate; }
friend class ASTDeclReader;
@@ -969,7 +968,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const TemplateTypeParmDecl *D) { return true; }
static bool classofKind(Kind K) { return K == TemplateTypeParm; }
};
@@ -1090,8 +1088,17 @@ public:
/// \endcode
bool isParameterPack() const { return ParameterPack; }
+ /// \brief Whether this parameter pack is a pack expansion.
+ ///
+ /// A non-type template parameter pack is a pack expansion if its type
+ /// contains an unexpanded parameter pack. In this case, we will have
+ /// built a PackExpansionType wrapping the type.
+ bool isPackExpansion() const {
+ return ParameterPack && getType()->getAs<PackExpansionType>();
+ }
+
/// \brief Whether this parameter is a non-type template parameter pack
- /// that has different types at different positions.
+ /// that has a known list of different types at different positions.
///
/// A parameter pack is an expanded parameter pack when the original
/// parameter pack's type was itself a pack expansion, and that expansion
@@ -1141,7 +1148,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const NonTypeTemplateParmDecl *D) { return true; }
static bool classofKind(Kind K) { return K == NonTypeTemplateParm; }
};
@@ -1165,23 +1171,47 @@ class TemplateTemplateParmDecl : public TemplateDecl,
/// \brief Whether this parameter is a parameter pack.
bool ParameterPack;
+ /// \brief Whether this template template parameter is an "expanded"
+ /// parameter pack, meaning that it is a pack expansion and we
+ /// already know the set of template parameters that expansion expands to.
+ bool ExpandedParameterPack;
+
+ /// \brief The number of parameters in an expanded parameter pack.
+ unsigned NumExpandedParams;
+
TemplateTemplateParmDecl(DeclContext *DC, SourceLocation L,
unsigned D, unsigned P, bool ParameterPack,
IdentifierInfo *Id, TemplateParameterList *Params)
: TemplateDecl(TemplateTemplateParm, DC, L, Id, Params),
TemplateParmPosition(D, P), DefaultArgument(),
- DefaultArgumentWasInherited(false), ParameterPack(ParameterPack)
+ DefaultArgumentWasInherited(false), ParameterPack(ParameterPack),
+ ExpandedParameterPack(false), NumExpandedParams(0)
{ }
+ TemplateTemplateParmDecl(DeclContext *DC, SourceLocation L,
+ unsigned D, unsigned P,
+ IdentifierInfo *Id, TemplateParameterList *Params,
+ unsigned NumExpansions,
+ TemplateParameterList * const *Expansions);
+
public:
static TemplateTemplateParmDecl *Create(const ASTContext &C, DeclContext *DC,
SourceLocation L, unsigned D,
unsigned P, bool ParameterPack,
IdentifierInfo *Id,
TemplateParameterList *Params);
+ static TemplateTemplateParmDecl *Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation L, unsigned D,
+ unsigned P,
+ IdentifierInfo *Id,
+ TemplateParameterList *Params,
+ llvm::ArrayRef<TemplateParameterList*> Expansions);
- static TemplateTemplateParmDecl *CreateDeserialized(ASTContext &C,
+ static TemplateTemplateParmDecl *CreateDeserialized(ASTContext &C,
unsigned ID);
+ static TemplateTemplateParmDecl *CreateDeserialized(ASTContext &C,
+ unsigned ID,
+ unsigned NumExpansions);
using TemplateParmPosition::getDepth;
using TemplateParmPosition::getPosition;
@@ -1195,6 +1225,49 @@ public:
/// \endcode
bool isParameterPack() const { return ParameterPack; }
+ /// \brief Whether this parameter pack is a pack expansion.
+ ///
+ /// A template template parameter pack is a pack expansion if its template
+ /// parameter list contains an unexpanded parameter pack.
+ bool isPackExpansion() const {
+ return ParameterPack &&
+ getTemplateParameters()->containsUnexpandedParameterPack();
+ }
+
+ /// \brief Whether this parameter is a template template parameter pack that
+ /// has a known list of different template parameter lists at different
+ /// positions.
+ ///
+ /// A parameter pack is an expanded parameter pack when the original parameter
+ /// pack's template parameter list was itself a pack expansion, and that
+ /// expansion has already been expanded. For exampe, given:
+ ///
+ /// \code
+ /// template<typename...Types> struct Outer {
+ /// template<template<Types> class...Templates> struct Inner;
+ /// };
+ /// \endcode
+ ///
+ /// The parameter pack \c Templates is a pack expansion, which expands the
+ /// pack \c Types. When \c Types is supplied with template arguments by
+ /// instantiating \c Outer, the instantiation of \c Templates is an expanded
+ /// parameter pack.
+ bool isExpandedParameterPack() const { return ExpandedParameterPack; }
+
+ /// \brief Retrieves the number of expansion template parameters in
+ /// an expanded parameter pack.
+ unsigned getNumExpansionTemplateParameters() const {
+ assert(ExpandedParameterPack && "Not an expansion parameter pack");
+ return NumExpandedParams;
+ }
+
+ /// \brief Retrieve a particular expansion type within an expanded parameter
+ /// pack.
+ TemplateParameterList *getExpansionTemplateParameters(unsigned I) const {
+ assert(I < NumExpandedParams && "Out-of-range expansion type index");
+ return reinterpret_cast<TemplateParameterList *const *>(this + 1)[I];
+ }
+
/// \brief Determine whether this template parameter has a default
/// argument.
bool hasDefaultArgument() const {
@@ -1238,7 +1311,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const TemplateTemplateParmDecl *D) { return true; }
static bool classofKind(Kind K) { return K == TemplateTemplateParm; }
friend class ASTDeclReader;
@@ -1505,14 +1577,6 @@ public:
K <= lastClassTemplateSpecialization;
}
- static bool classof(const ClassTemplateSpecializationDecl *) {
- return true;
- }
-
- static bool classof(const ClassTemplatePartialSpecializationDecl *) {
- return true;
- }
-
friend class ASTDeclReader;
friend class ASTDeclWriter;
};
@@ -1681,10 +1745,6 @@ public:
return K == ClassTemplatePartialSpecialization;
}
- static bool classof(const ClassTemplatePartialSpecializationDecl *) {
- return true;
- }
-
friend class ASTDeclReader;
friend class ASTDeclWriter;
};
@@ -1886,7 +1946,6 @@ public:
// Implement isa/cast/dyncast support
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ClassTemplateDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ClassTemplate; }
friend class ASTDeclReader;
@@ -1984,7 +2043,6 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == Decl::FriendTemplate; }
- static bool classof(const FriendTemplateDecl *D) { return true; }
friend class ASTDeclReader;
};
@@ -2059,7 +2117,6 @@ public:
// Implement isa/cast/dyncast support
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const TypeAliasTemplateDecl *D) { return true; }
static bool classofKind(Kind K) { return K == TypeAliasTemplate; }
friend class ASTDeclReader;
@@ -2123,9 +2180,6 @@ public:
static bool classofKind(Kind K) {
return K == Decl::ClassScopeFunctionSpecialization;
}
- static bool classof(const ClassScopeFunctionSpecializationDecl *D) {
- return true;
- }
friend class ASTDeclReader;
friend class ASTDeclWriter;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h b/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h
index 6146525..d991c73 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h
@@ -334,8 +334,8 @@ class DeclarationNameTable {
CXXOperatorIdName *CXXOperatorNames; // Operator names
void *CXXLiteralOperatorNames; // Actually a CXXOperatorIdName*
- DeclarationNameTable(const DeclarationNameTable&); // NONCOPYABLE
- DeclarationNameTable& operator=(const DeclarationNameTable&); // NONCOPYABLE
+ DeclarationNameTable(const DeclarationNameTable&) LLVM_DELETED_FUNCTION;
+ void operator=(const DeclarationNameTable&) LLVM_DELETED_FUNCTION;
public:
DeclarationNameTable(const ASTContext &C);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Expr.h b/contrib/llvm/tools/clang/include/clang/AST/Expr.h
index 89c003c..dc83654 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Expr.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Expr.h
@@ -34,6 +34,7 @@
namespace clang {
class ASTContext;
class APValue;
+ class CastExpr;
class Decl;
class IdentifierInfo;
class ParmVarDecl;
@@ -42,6 +43,7 @@ namespace clang {
class BlockDecl;
class CXXBaseSpecifier;
class CXXOperatorCallExpr;
+ class MaterializeTemporaryExpr;
class CXXMemberCallExpr;
class ObjCPropertyRefExpr;
class OpaqueValueExpr;
@@ -49,6 +51,48 @@ namespace clang {
/// \brief A simple array of base specifiers.
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
+/// \brief An adjustment to be made to the temporary created when emitting a
+/// reference binding, which accesses a particular subobject of that temporary.
+struct SubobjectAdjustment {
+ enum {
+ DerivedToBaseAdjustment,
+ FieldAdjustment,
+ MemberPointerAdjustment
+ } Kind;
+
+ union {
+ struct {
+ const CastExpr *BasePath;
+ const CXXRecordDecl *DerivedClass;
+ } DerivedToBase;
+
+ FieldDecl *Field;
+
+ struct {
+ const MemberPointerType *MPT;
+ Expr *RHS;
+ } Ptr;
+ };
+
+ SubobjectAdjustment(const CastExpr *BasePath,
+ const CXXRecordDecl *DerivedClass)
+ : Kind(DerivedToBaseAdjustment) {
+ DerivedToBase.BasePath = BasePath;
+ DerivedToBase.DerivedClass = DerivedClass;
+ }
+
+ SubobjectAdjustment(FieldDecl *Field)
+ : Kind(FieldAdjustment) {
+ this->Field = Field;
+ }
+
+ SubobjectAdjustment(const MemberPointerType *MPT, Expr *RHS)
+ : Kind(MemberPointerAdjustment) {
+ this->Ptr.MPT = MPT;
+ this->Ptr.RHS = RHS;
+ }
+};
+
/// Expr - This represents one expression. Note that Expr's are subclasses of
/// Stmt. This allows an expression to be transparently used any place a Stmt
/// is required.
@@ -220,15 +264,6 @@ public:
/// Reasons why an expression might not be an l-value.
LValueClassification ClassifyLValue(ASTContext &Ctx) const;
- /// isModifiableLvalue - C99 6.3.2.1: an lvalue that does not have array type,
- /// does not have an incomplete type, does not have a const-qualified type,
- /// and if it is a structure or union, does not have any member (including,
- /// recursively, any member or element of all contained aggregates or unions)
- /// with a const-qualified type.
- ///
- /// \param Loc [in,out] - A source location which *may* be filled
- /// in with the location of the expression making this a
- /// non-modifiable lvalue, if specified.
enum isModifiableLvalueResult {
MLV_Valid,
MLV_NotObjectType,
@@ -247,6 +282,15 @@ public:
MLV_ClassTemporary,
MLV_ArrayTemporary
};
+ /// isModifiableLvalue - C99 6.3.2.1: an lvalue that does not have array type,
+ /// does not have an incomplete type, does not have a const-qualified type,
+ /// and if it is a structure or union, does not have any member (including,
+ /// recursively, any member or element of all contained aggregates or unions)
+ /// with a const-qualified type.
+ ///
+ /// \param Loc [in,out] - A source location which *may* be filled
+ /// in with the location of the expression making this a
+ /// non-modifiable lvalue, if specified.
isModifiableLvalueResult isModifiableLvalue(ASTContext &Ctx,
SourceLocation *Loc = 0) const;
@@ -392,6 +436,9 @@ public:
/// property, find the underlying property reference expression.
const ObjCPropertyRefExpr *getObjCProperty() const;
+ /// \brief Check if this expression is the ObjC 'self' implicit parameter.
+ bool isObjCSelfExpr() const;
+
/// \brief Returns whether this expression refers to a vector element.
bool refersToVectorElement() const;
@@ -692,11 +739,22 @@ public:
/// behavior if the object isn't dynamically of the derived type.
const CXXRecordDecl *getBestDynamicClassType() const;
+ /// Walk outwards from an expression we want to bind a reference to and
+ /// find the expression whose lifetime needs to be extended. Record
+ /// the adjustments needed along the path.
+ const Expr *
+ skipRValueSubobjectAdjustments(
+ SmallVectorImpl<SubobjectAdjustment> &Adjustments) const;
+
+ /// Skip irrelevant expressions to find what should be materialize for
+ /// binding with a reference.
+ const Expr *
+ findMaterializedTemporary(const MaterializeTemporaryExpr *&MTE) const;
+
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstExprConstant &&
T->getStmtClass() <= lastExprConstant;
}
- static bool classof(const Expr *) { return true; }
};
@@ -762,7 +820,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == OpaqueValueExprClass;
}
- static bool classof(const OpaqueValueExpr *) { return true; }
};
/// \brief A reference to a declared variable, function, enum, etc.
@@ -1059,7 +1116,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclRefExprClass;
}
- static bool classof(const DeclRefExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -1109,7 +1165,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == PredefinedExprClass;
}
- static bool classof(const PredefinedExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -1132,8 +1187,8 @@ class APNumericStorage {
bool hasAllocation() const { return llvm::APInt::getNumWords(BitWidth) > 1; }
- APNumericStorage(const APNumericStorage&); // do not implement
- APNumericStorage& operator=(const APNumericStorage&); // do not implement
+ APNumericStorage(const APNumericStorage &) LLVM_DELETED_FUNCTION;
+ void operator=(const APNumericStorage &) LLVM_DELETED_FUNCTION;
protected:
APNumericStorage() : VAL(0), BitWidth(0) { }
@@ -1196,7 +1251,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == IntegerLiteralClass;
}
- static bool classof(const IntegerLiteral *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -1243,7 +1297,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CharacterLiteralClass;
}
- static bool classof(const CharacterLiteral *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -1286,7 +1339,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == FloatingLiteralClass;
}
- static bool classof(const FloatingLiteral *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -1317,7 +1369,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ImaginaryLiteralClass;
}
- static bool classof(const ImaginaryLiteral *) { return true; }
// Iterators
child_range children() { return child_range(&Val, &Val+1); }
@@ -1479,7 +1530,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == StringLiteralClass;
}
- static bool classof(const StringLiteral *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -1520,7 +1570,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ParenExprClass;
}
- static bool classof(const ParenExpr *) { return true; }
// Iterators
child_range children() { return child_range(&Val, &Val+1); }
@@ -1610,7 +1659,7 @@ public:
/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
/// corresponds to, e.g. "sizeof" or "[pre]++"
- static const char *getOpcodeStr(Opcode Op);
+ static StringRef getOpcodeStr(Opcode Op);
/// \brief Retrieve the unary opcode that corresponds to the given
/// overloaded operator.
@@ -1631,7 +1680,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == UnaryOperatorClass;
}
- static bool classof(const UnaryOperator *) { return true; }
// Iterators
child_range children() { return child_range(&Val, &Val+1); }
@@ -1757,8 +1805,7 @@ private:
OffsetOfExpr(ASTContext &C, QualType type,
SourceLocation OperatorLoc, TypeSourceInfo *tsi,
- OffsetOfNode* compsPtr, unsigned numComps,
- Expr** exprsPtr, unsigned numExprs,
+ ArrayRef<OffsetOfNode> comps, ArrayRef<Expr*> exprs,
SourceLocation RParenLoc);
explicit OffsetOfExpr(unsigned numComps, unsigned numExprs)
@@ -1769,9 +1816,8 @@ public:
static OffsetOfExpr *Create(ASTContext &C, QualType type,
SourceLocation OperatorLoc, TypeSourceInfo *tsi,
- OffsetOfNode* compsPtr, unsigned numComps,
- Expr** exprsPtr, unsigned numExprs,
- SourceLocation RParenLoc);
+ ArrayRef<OffsetOfNode> comps,
+ ArrayRef<Expr*> exprs, SourceLocation RParenLoc);
static OffsetOfExpr *CreateEmpty(ASTContext &C,
unsigned NumComps, unsigned NumExprs);
@@ -1832,8 +1878,6 @@ public:
return T->getStmtClass() == OffsetOfExprClass;
}
- static bool classof(const OffsetOfExpr *) { return true; }
-
// Iterators
child_range children() {
Stmt **begin =
@@ -1937,7 +1981,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == UnaryExprOrTypeTraitExprClass;
}
- static bool classof(const UnaryExprOrTypeTraitExpr *) { return true; }
// Iterators
child_range children();
@@ -2017,7 +2060,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ArraySubscriptExprClass;
}
- static bool classof(const ArraySubscriptExpr *) { return true; }
// Iterators
child_range children() {
@@ -2041,7 +2083,7 @@ class CallExpr : public Expr {
protected:
// These versions of the constructor are for derived classes.
CallExpr(ASTContext& C, StmtClass SC, Expr *fn, unsigned NumPreArgs,
- Expr **args, unsigned numargs, QualType t, ExprValueKind VK,
+ ArrayRef<Expr*> args, QualType t, ExprValueKind VK,
SourceLocation rparenloc);
CallExpr(ASTContext &C, StmtClass SC, unsigned NumPreArgs, EmptyShell Empty);
@@ -2061,7 +2103,7 @@ protected:
unsigned getNumPreArgs() const { return CallExprBits.NumPreArgs; }
public:
- CallExpr(ASTContext& C, Expr *fn, Expr **args, unsigned numargs, QualType t,
+ CallExpr(ASTContext& C, Expr *fn, ArrayRef<Expr*> args, QualType t,
ExprValueKind VK, SourceLocation rparenloc);
/// \brief Build an empty call expression.
@@ -2153,7 +2195,6 @@ public:
return T->getStmtClass() >= firstCallExprConstant &&
T->getStmtClass() <= lastCallExprConstant;
}
- static bool classof(const CallExpr *) { return true; }
// Iterators
child_range children() {
@@ -2440,7 +2481,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == MemberExprClass;
}
- static bool classof(const MemberExpr *) { return true; }
// Iterators
child_range children() { return child_range(&Base, &Base+1); }
@@ -2506,7 +2546,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundLiteralExprClass;
}
- static bool classof(const CompoundLiteralExpr *) { return true; }
// Iterators
child_range children() { return child_range(&Init, &Init+1); }
@@ -2597,7 +2636,6 @@ public:
return T->getStmtClass() >= firstCastExprConstant &&
T->getStmtClass() <= lastCastExprConstant;
}
- static bool classof(const CastExpr *) { return true; }
// Iterators
child_range children() { return child_range(&Op, &Op+1); }
@@ -2661,7 +2699,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ImplicitCastExprClass;
}
- static bool classof(const ImplicitCastExpr *) { return true; }
};
inline Expr *Expr::IgnoreImpCasts() {
@@ -2716,7 +2753,6 @@ public:
return T->getStmtClass() >= firstExplicitCastExprConstant &&
T->getStmtClass() <= lastExplicitCastExprConstant;
}
- static bool classof(const ExplicitCastExpr *) { return true; }
};
/// CStyleCastExpr - An explicit cast in C (C99 6.5.4) or a C-style
@@ -2757,7 +2793,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CStyleCastExprClass;
}
- static bool classof(const CStyleCastExpr *) { return true; }
};
/// \brief A builtin binary operation expression such as "x + y" or "x <= y".
@@ -2784,6 +2819,12 @@ public:
private:
unsigned Opc : 6;
+
+ // Records the FP_CONTRACT pragma status at the point that this binary
+ // operator was parsed. This bit is only meaningful for operations on
+ // floating point types. For all other types it should default to
+ // false.
+ unsigned FPContractable : 1;
SourceLocation OpLoc;
enum { LHS, RHS, END_EXPR };
@@ -2792,7 +2833,7 @@ public:
BinaryOperator(Expr *lhs, Expr *rhs, Opcode opc, QualType ResTy,
ExprValueKind VK, ExprObjectKind OK,
- SourceLocation opLoc)
+ SourceLocation opLoc, bool fpContractable)
: Expr(BinaryOperatorClass, ResTy, VK, OK,
lhs->isTypeDependent() || rhs->isTypeDependent(),
lhs->isValueDependent() || rhs->isValueDependent(),
@@ -2800,7 +2841,7 @@ public:
rhs->isInstantiationDependent()),
(lhs->containsUnexpandedParameterPack() ||
rhs->containsUnexpandedParameterPack())),
- Opc(opc), OpLoc(opLoc) {
+ Opc(opc), FPContractable(fpContractable), OpLoc(opLoc) {
SubExprs[LHS] = lhs;
SubExprs[RHS] = rhs;
assert(!isCompoundAssignmentOp() &&
@@ -2829,9 +2870,9 @@ public:
/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
/// corresponds to, e.g. "<<=".
- static const char *getOpcodeStr(Opcode Op);
+ static StringRef getOpcodeStr(Opcode Op);
- const char *getOpcodeStr() const { return getOpcodeStr(getOpcode()); }
+ StringRef getOpcodeStr() const { return getOpcodeStr(getOpcode()); }
/// \brief Retrieve the binary opcode that corresponds to the given
/// overloaded operator.
@@ -2894,17 +2935,24 @@ public:
return S->getStmtClass() >= firstBinaryOperatorConstant &&
S->getStmtClass() <= lastBinaryOperatorConstant;
}
- static bool classof(const BinaryOperator *) { return true; }
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
+ // Set the FP contractability status of this operator. Only meaningful for
+ // operations on floating point types.
+ void setFPContractable(bool FPC) { FPContractable = FPC; }
+
+ // Get the FP contractability status of this operator. Only meaningful for
+ // operations on floating point types.
+ bool isFPContractable() const { return FPContractable; }
+
protected:
BinaryOperator(Expr *lhs, Expr *rhs, Opcode opc, QualType ResTy,
ExprValueKind VK, ExprObjectKind OK,
- SourceLocation opLoc, bool dead)
+ SourceLocation opLoc, bool fpContractable, bool dead2)
: Expr(CompoundAssignOperatorClass, ResTy, VK, OK,
lhs->isTypeDependent() || rhs->isTypeDependent(),
lhs->isValueDependent() || rhs->isValueDependent(),
@@ -2912,7 +2960,7 @@ protected:
rhs->isInstantiationDependent()),
(lhs->containsUnexpandedParameterPack() ||
rhs->containsUnexpandedParameterPack())),
- Opc(opc), OpLoc(opLoc) {
+ Opc(opc), FPContractable(fpContractable), OpLoc(opLoc) {
SubExprs[LHS] = lhs;
SubExprs[RHS] = rhs;
}
@@ -2934,8 +2982,9 @@ public:
CompoundAssignOperator(Expr *lhs, Expr *rhs, Opcode opc, QualType ResType,
ExprValueKind VK, ExprObjectKind OK,
QualType CompLHSType, QualType CompResultType,
- SourceLocation OpLoc)
- : BinaryOperator(lhs, rhs, opc, ResType, VK, OK, OpLoc, true),
+ SourceLocation OpLoc, bool fpContractable)
+ : BinaryOperator(lhs, rhs, opc, ResType, VK, OK, OpLoc, fpContractable,
+ true),
ComputationLHSType(CompLHSType),
ComputationResultType(CompResultType) {
assert(isCompoundAssignmentOp() &&
@@ -2955,7 +3004,6 @@ public:
QualType getComputationResultType() const { return ComputationResultType; }
void setComputationResultType(QualType T) { ComputationResultType = T; }
- static bool classof(const CompoundAssignOperator *) { return true; }
static bool classof(const Stmt *S) {
return S->getStmtClass() == CompoundAssignOperatorClass;
}
@@ -3001,7 +3049,6 @@ public:
return T->getStmtClass() == ConditionalOperatorClass ||
T->getStmtClass() == BinaryConditionalOperatorClass;
}
- static bool classof(const AbstractConditionalOperator *) { return true; }
};
/// ConditionalOperator - The ?: ternary operator. The GNU "missing
@@ -3060,7 +3107,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ConditionalOperatorClass;
}
- static bool classof(const ConditionalOperator *) { return true; }
// Iterators
child_range children() {
@@ -3142,7 +3188,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == BinaryConditionalOperatorClass;
}
- static bool classof(const BinaryConditionalOperator *) { return true; }
// Iterators
child_range children() {
@@ -3198,7 +3243,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == AddrLabelExprClass;
}
- static bool classof(const AddrLabelExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -3242,7 +3286,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == StmtExprClass;
}
- static bool classof(const StmtExpr *) { return true; }
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
@@ -3266,9 +3309,8 @@ class ShuffleVectorExpr : public Expr {
unsigned NumExprs;
public:
- ShuffleVectorExpr(ASTContext &C, Expr **args, unsigned nexpr,
- QualType Type, SourceLocation BLoc,
- SourceLocation RP);
+ ShuffleVectorExpr(ASTContext &C, ArrayRef<Expr*> args, QualType Type,
+ SourceLocation BLoc, SourceLocation RP);
/// \brief Build an empty vector-shuffle expression.
explicit ShuffleVectorExpr(EmptyShell Empty)
@@ -3286,7 +3328,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ShuffleVectorExprClass;
}
- static bool classof(const ShuffleVectorExpr *) { return true; }
/// getNumSubExprs - Return the size of the SubExprs array. This includes the
/// constant expression, the actual arguments passed in, and the function
@@ -3308,7 +3349,7 @@ public:
void setExprs(ASTContext &C, Expr ** Exprs, unsigned NumExprs);
- unsigned getShuffleMaskIdx(ASTContext &Ctx, unsigned N) {
+ unsigned getShuffleMaskIdx(ASTContext &Ctx, unsigned N) const {
assert((N < NumExprs - 2) && "Shuffle idx out of range!");
return getExpr(N+2)->EvaluateKnownConstInt(Ctx).getZExtValue();
}
@@ -3381,7 +3422,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ChooseExprClass;
}
- static bool classof(const ChooseExpr *) { return true; }
// Iterators
child_range children() {
@@ -3418,7 +3458,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == GNUNullExprClass;
}
- static bool classof(const GNUNullExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -3464,7 +3503,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == VAArgExprClass;
}
- static bool classof(const VAArgExpr *) { return true; }
// Iterators
child_range children() { return child_range(&Val, &Val+1); }
@@ -3501,21 +3539,32 @@ public:
/// initializer lists may still have fewer initializers than there are
/// elements to initialize within the object.
///
+/// After semantic analysis has completed, given an initializer list,
+/// method isSemanticForm() returns true if and only if this is the
+/// semantic form of the initializer list (note: the same AST node
+/// may at the same time be the syntactic form).
/// Given the semantic form of the initializer list, one can retrieve
-/// the original syntactic form of that initializer list (if it
-/// exists) using getSyntacticForm(). Since many initializer lists
-/// have the same syntactic and semantic forms, getSyntacticForm() may
-/// return NULL, indicating that the current initializer list also
-/// serves as its syntactic form.
+/// the syntactic form of that initializer list (when different)
+/// using method getSyntacticForm(); the method returns null if applied
+/// to a initializer list which is already in syntactic form.
+/// Similarly, given the syntactic form (i.e., an initializer list such
+/// that isSemanticForm() returns false), one can retrieve the semantic
+/// form using method getSemanticForm().
+/// Since many initializer lists have the same syntactic and semantic forms,
+/// getSyntacticForm() may return NULL, indicating that the current
+/// semantic initializer list also serves as its syntactic form.
class InitListExpr : public Expr {
// FIXME: Eliminate this vector in favor of ASTContext allocation
typedef ASTVector<Stmt *> InitExprsTy;
InitExprsTy InitExprs;
SourceLocation LBraceLoc, RBraceLoc;
- /// Contains the initializer list that describes the syntactic form
- /// written in the source code.
- InitListExpr *SyntacticForm;
+ /// The alternative form of the initializer list (if it exists).
+ /// The int part of the pair stores whether this initalizer list is
+ /// in semantic form. If not null, the pointer points to:
+ /// - the syntactic form, if this is in semantic form;
+ /// - the semantic form, if this is in syntactic form.
+ llvm::PointerIntPair<InitListExpr *, 1, bool> AltForm;
/// \brief Either:
/// If this initializer list initializes an array with more elements than
@@ -3528,8 +3577,7 @@ class InitListExpr : public Expr {
public:
InitListExpr(ASTContext &C, SourceLocation lbraceloc,
- Expr **initexprs, unsigned numinits,
- SourceLocation rbraceloc);
+ ArrayRef<Expr*> initExprs, SourceLocation rbraceloc);
/// \brief Build an empty initializer list.
explicit InitListExpr(ASTContext &C, EmptyShell Empty)
@@ -3621,12 +3669,20 @@ public:
SourceLocation getRBraceLoc() const { return RBraceLoc; }
void setRBraceLoc(SourceLocation Loc) { RBraceLoc = Loc; }
- /// @brief Retrieve the initializer list that describes the
- /// syntactic form of the initializer.
- ///
- ///
- InitListExpr *getSyntacticForm() const { return SyntacticForm; }
- void setSyntacticForm(InitListExpr *Init) { SyntacticForm = Init; }
+ bool isSemanticForm() const { return AltForm.getInt(); }
+ InitListExpr *getSemanticForm() const {
+ return isSemanticForm() ? 0 : AltForm.getPointer();
+ }
+ InitListExpr *getSyntacticForm() const {
+ return isSemanticForm() ? AltForm.getPointer() : 0;
+ }
+
+ void setSyntacticForm(InitListExpr *Init) {
+ AltForm.setPointer(Init);
+ AltForm.setInt(true);
+ Init->AltForm.setPointer(this);
+ Init->AltForm.setInt(false);
+ }
bool hadArrayRangeDesignator() const {
return InitListExprBits.HadArrayRangeDesignator != 0;
@@ -3647,7 +3703,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == InitListExprClass;
}
- static bool classof(const InitListExpr *) { return true; }
// Iterators
child_range children() {
@@ -3723,8 +3778,7 @@ private:
DesignatedInitExpr(ASTContext &C, QualType Ty, unsigned NumDesignators,
const Designator *Designators,
SourceLocation EqualOrColonLoc, bool GNUSyntax,
- Expr **IndexExprs, unsigned NumIndexExprs,
- Expr *Init);
+ ArrayRef<Expr*> IndexExprs, Expr *Init);
explicit DesignatedInitExpr(unsigned NumSubExprs)
: Expr(DesignatedInitExprClass, EmptyShell()),
@@ -3885,7 +3939,7 @@ public:
static DesignatedInitExpr *Create(ASTContext &C, Designator *Designators,
unsigned NumDesignators,
- Expr **IndexExprs, unsigned NumIndexExprs,
+ ArrayRef<Expr*> IndexExprs,
SourceLocation EqualOrColonLoc,
bool GNUSyntax, Expr *Init);
@@ -3985,7 +4039,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == DesignatedInitExprClass;
}
- static bool classof(const DesignatedInitExpr *) { return true; }
// Iterators
child_range children() {
@@ -4015,7 +4068,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ImplicitValueInitExprClass;
}
- static bool classof(const ImplicitValueInitExpr *) { return true; }
SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange();
@@ -4032,8 +4084,8 @@ class ParenListExpr : public Expr {
SourceLocation LParenLoc, RParenLoc;
public:
- ParenListExpr(ASTContext& C, SourceLocation lparenloc, Expr **exprs,
- unsigned numexprs, SourceLocation rparenloc);
+ ParenListExpr(ASTContext& C, SourceLocation lparenloc, ArrayRef<Expr*> exprs,
+ SourceLocation rparenloc);
/// \brief Build an empty paren list.
explicit ParenListExpr(EmptyShell Empty) : Expr(ParenListExprClass, Empty) { }
@@ -4061,7 +4113,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ParenListExprClass;
}
- static bool classof(const ParenListExpr *) { return true; }
// Iterators
child_range children() {
@@ -4109,18 +4160,18 @@ class GenericSelectionExpr : public Expr {
public:
GenericSelectionExpr(ASTContext &Context,
SourceLocation GenericLoc, Expr *ControllingExpr,
- TypeSourceInfo **AssocTypes, Expr **AssocExprs,
- unsigned NumAssocs, SourceLocation DefaultLoc,
- SourceLocation RParenLoc,
+ ArrayRef<TypeSourceInfo*> AssocTypes,
+ ArrayRef<Expr*> AssocExprs,
+ SourceLocation DefaultLoc, SourceLocation RParenLoc,
bool ContainsUnexpandedParameterPack,
unsigned ResultIndex);
/// This constructor is used in the result-dependent case.
GenericSelectionExpr(ASTContext &Context,
SourceLocation GenericLoc, Expr *ControllingExpr,
- TypeSourceInfo **AssocTypes, Expr **AssocExprs,
- unsigned NumAssocs, SourceLocation DefaultLoc,
- SourceLocation RParenLoc,
+ ArrayRef<TypeSourceInfo*> AssocTypes,
+ ArrayRef<Expr*> AssocExprs,
+ SourceLocation DefaultLoc, SourceLocation RParenLoc,
bool ContainsUnexpandedParameterPack);
explicit GenericSelectionExpr(EmptyShell Empty)
@@ -4176,7 +4227,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == GenericSelectionExprClass;
}
- static bool classof(const GenericSelectionExpr *) { return true; }
child_range children() {
return child_range(SubExprs, SubExprs+END_EXPR+NumAssocs);
@@ -4247,7 +4297,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ExtVectorElementExprClass;
}
- static bool classof(const ExtVectorElementExpr *) { return true; }
// Iterators
child_range children() { return child_range(&Base, &Base+1); }
@@ -4289,7 +4338,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == BlockExprClass;
}
- static bool classof(const BlockExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -4336,7 +4384,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == AsTypeExprClass;
}
- static bool classof(const AsTypeExpr *) { return true; }
// Iterators
child_range children() { return child_range(&SrcExpr, &SrcExpr+1); }
@@ -4473,7 +4520,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == PseudoObjectExprClass;
}
- static bool classof(const PseudoObjectExpr *) { return true; }
};
/// AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*,
@@ -4501,7 +4547,7 @@ private:
friend class ASTStmtReader;
public:
- AtomicExpr(SourceLocation BLoc, Expr **args, unsigned nexpr, QualType t,
+ AtomicExpr(SourceLocation BLoc, ArrayRef<Expr*> args, QualType t,
AtomicOp op, SourceLocation RP);
/// \brief Determine the number of arguments the specified atomic builtin
@@ -4563,7 +4609,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == AtomicExprClass;
}
- static bool classof(const AtomicExpr *) { return true; }
// Iterators
child_range children() {
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h b/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
index ecfa9e2..9c759db 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
@@ -53,14 +53,19 @@ class CXXOperatorCallExpr : public CallExpr {
OverloadedOperatorKind Operator;
SourceRange Range;
+ // Record the FP_CONTRACT state that applies to this operator call. Only
+ // meaningful for floating point types. For other types this value can be
+ // set to false.
+ unsigned FPContractable : 1;
+
SourceRange getSourceRangeImpl() const LLVM_READONLY;
public:
CXXOperatorCallExpr(ASTContext& C, OverloadedOperatorKind Op, Expr *fn,
- Expr **args, unsigned numargs, QualType t,
- ExprValueKind VK, SourceLocation operatorloc)
- : CallExpr(C, CXXOperatorCallExprClass, fn, 0, args, numargs, t, VK,
+ ArrayRef<Expr*> args, QualType t, ExprValueKind VK,
+ SourceLocation operatorloc, bool fpContractable)
+ : CallExpr(C, CXXOperatorCallExprClass, fn, 0, args, t, VK,
operatorloc),
- Operator(Op) {
+ Operator(Op), FPContractable(fpContractable) {
Range = getSourceRangeImpl();
}
explicit CXXOperatorCallExpr(ASTContext& C, EmptyShell Empty) :
@@ -83,7 +88,14 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXOperatorCallExprClass;
}
- static bool classof(const CXXOperatorCallExpr *) { return true; }
+
+ // Set the FP contractability status of this operator. Only meaningful for
+ // operations on floating point types.
+ void setFPContractable(bool FPC) { FPContractable = FPC; }
+
+ // Get the FP contractability status of this operator. Only meaningful for
+ // operations on floating point types.
+ bool isFPContractable() const { return FPContractable; }
friend class ASTStmtReader;
friend class ASTStmtWriter;
@@ -99,9 +111,9 @@ public:
/// the object argument).
class CXXMemberCallExpr : public CallExpr {
public:
- CXXMemberCallExpr(ASTContext &C, Expr *fn, Expr **args, unsigned numargs,
+ CXXMemberCallExpr(ASTContext &C, Expr *fn, ArrayRef<Expr*> args,
QualType t, ExprValueKind VK, SourceLocation RP)
- : CallExpr(C, CXXMemberCallExprClass, fn, 0, args, numargs, t, VK, RP) {}
+ : CallExpr(C, CXXMemberCallExprClass, fn, 0, args, t, VK, RP) {}
CXXMemberCallExpr(ASTContext &C, EmptyShell Empty)
: CallExpr(C, CXXMemberCallExprClass, Empty) { }
@@ -124,7 +136,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXMemberCallExprClass;
}
- static bool classof(const CXXMemberCallExpr *) { return true; }
};
/// CUDAKernelCallExpr - Represents a call to a CUDA kernel function.
@@ -134,10 +145,9 @@ private:
public:
CUDAKernelCallExpr(ASTContext &C, Expr *fn, CallExpr *Config,
- Expr **args, unsigned numargs, QualType t,
- ExprValueKind VK, SourceLocation RP)
- : CallExpr(C, CUDAKernelCallExprClass, fn, END_PREARG, args, numargs, t, VK,
- RP) {
+ ArrayRef<Expr*> args, QualType t, ExprValueKind VK,
+ SourceLocation RP)
+ : CallExpr(C, CUDAKernelCallExprClass, fn, END_PREARG, args, t, VK, RP) {
setConfig(Config);
}
@@ -153,7 +163,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CUDAKernelCallExprClass;
}
- static bool classof(const CUDAKernelCallExpr *) { return true; }
};
/// CXXNamedCastExpr - Abstract class common to all of the C++ "named"
@@ -205,7 +214,6 @@ public:
return false;
}
}
- static bool classof(const CXXNamedCastExpr *) { return true; }
};
/// CXXStaticCastExpr - A C++ @c static_cast expression
@@ -235,7 +243,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXStaticCastExprClass;
}
- static bool classof(const CXXStaticCastExpr *) { return true; }
};
/// CXXDynamicCastExpr - A C++ @c dynamic_cast expression
@@ -269,7 +276,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXDynamicCastExprClass;
}
- static bool classof(const CXXDynamicCastExpr *) { return true; }
};
/// CXXReinterpretCastExpr - A C++ @c reinterpret_cast expression (C++
@@ -301,7 +307,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXReinterpretCastExprClass;
}
- static bool classof(const CXXReinterpretCastExpr *) { return true; }
};
/// CXXConstCastExpr - A C++ @c const_cast expression (C++ [expr.const.cast]),
@@ -329,7 +334,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXConstCastExprClass;
}
- static bool classof(const CXXConstCastExpr *) { return true; }
};
/// UserDefinedLiteral - A call to a literal operator (C++11 [over.literal])
@@ -346,11 +350,11 @@ class UserDefinedLiteral : public CallExpr {
SourceLocation UDSuffixLoc;
public:
- UserDefinedLiteral(ASTContext &C, Expr *Fn, Expr **Args, unsigned NumArgs,
+ UserDefinedLiteral(ASTContext &C, Expr *Fn, ArrayRef<Expr*> Args,
QualType T, ExprValueKind VK, SourceLocation LitEndLoc,
SourceLocation SuffixLoc)
- : CallExpr(C, UserDefinedLiteralClass, Fn, 0, Args, NumArgs, T, VK,
- LitEndLoc), UDSuffixLoc(SuffixLoc) {}
+ : CallExpr(C, UserDefinedLiteralClass, Fn, 0, Args, T, VK, LitEndLoc),
+ UDSuffixLoc(SuffixLoc) {}
explicit UserDefinedLiteral(ASTContext &C, EmptyShell Empty)
: CallExpr(C, UserDefinedLiteralClass, Empty) {}
@@ -398,7 +402,6 @@ public:
static bool classof(const Stmt *S) {
return S->getStmtClass() == UserDefinedLiteralClass;
}
- static bool classof(const UserDefinedLiteral *) { return true; }
friend class ASTStmtReader;
friend class ASTStmtWriter;
@@ -429,7 +432,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXBoolLiteralExprClass;
}
- static bool classof(const CXXBoolLiteralExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -455,7 +457,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXNullPtrLiteralExprClass;
}
- static bool classof(const CXXNullPtrLiteralExpr *) { return true; }
child_range children() { return child_range(); }
};
@@ -536,7 +537,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXTypeidExprClass;
}
- static bool classof(const CXXTypeidExpr *) { return true; }
// Iterators
child_range children() {
@@ -611,7 +611,9 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXUuidofExprClass;
}
- static bool classof(const CXXUuidofExpr *) { return true; }
+
+ /// Grabs __declspec(uuid()) off a type, or returns 0 if there is none.
+ static UuidAttr *GetUuidAttrOfType(QualType QT);
// Iterators
child_range children() {
@@ -659,7 +661,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXThisExprClass;
}
- static bool classof(const CXXThisExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -710,7 +711,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXThrowExprClass;
}
- static bool classof(const CXXThrowExpr *) { return true; }
// Iterators
child_range children() {
@@ -798,7 +798,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXDefaultArgExprClass;
}
- static bool classof(const CXXDefaultArgExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -875,7 +874,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXBindTemporaryExprClass;
}
- static bool classof(const CXXBindTemporaryExpr *) { return true; }
// Iterators
child_range children() { return child_range(&SubExpr, &SubExpr + 1); }
@@ -908,7 +906,7 @@ protected:
CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
SourceLocation Loc,
CXXConstructorDecl *d, bool elidable,
- Expr **args, unsigned numargs,
+ ArrayRef<Expr *> Args,
bool HadMultipleCandidates,
bool ListInitialization,
bool ZeroInitialization,
@@ -934,7 +932,7 @@ public:
static CXXConstructExpr *Create(ASTContext &C, QualType T,
SourceLocation Loc,
CXXConstructorDecl *D, bool Elidable,
- Expr **Args, unsigned NumArgs,
+ ArrayRef<Expr *> Args,
bool HadMultipleCandidates,
bool ListInitialization,
bool ZeroInitialization,
@@ -1011,7 +1009,6 @@ public:
return T->getStmtClass() == CXXConstructExprClass ||
T->getStmtClass() == CXXTemporaryObjectExprClass;
}
- static bool classof(const CXXConstructExpr *) { return true; }
// Iterators
child_range children() {
@@ -1066,7 +1063,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXFunctionalCastExprClass;
}
- static bool classof(const CXXFunctionalCastExpr *) { return true; }
};
/// @brief Represents a C++ functional cast expression that builds a
@@ -1090,7 +1086,7 @@ class CXXTemporaryObjectExpr : public CXXConstructExpr {
public:
CXXTemporaryObjectExpr(ASTContext &C, CXXConstructorDecl *Cons,
TypeSourceInfo *Type,
- Expr **Args,unsigned NumArgs,
+ ArrayRef<Expr *> Args,
SourceRange parenRange,
bool HadMultipleCandidates,
bool ZeroInitialization = false);
@@ -1104,7 +1100,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXTemporaryObjectExprClass;
}
- static bool classof(const CXXTemporaryObjectExpr *) { return true; }
friend class ASTStmtReader;
};
@@ -1281,8 +1276,11 @@ private:
/// \brief Retrieve the complete set of array-index variables.
VarDecl **getArrayIndexVars() const {
+ unsigned ArrayIndexSize =
+ llvm::RoundUpToAlignment(sizeof(unsigned) * (NumCaptures + 1),
+ llvm::alignOf<VarDecl*>());
return reinterpret_cast<VarDecl **>(
- getArrayIndexStarts() + NumCaptures + 1);
+ reinterpret_cast<char*>(getArrayIndexStarts()) + ArrayIndexSize);
}
public:
@@ -1394,7 +1392,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == LambdaExprClass;
}
- static bool classof(const LambdaExpr *) { return true; }
SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(IntroducerRange.getBegin(), ClosingBrace);
@@ -1442,7 +1439,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXScalarValueInitExprClass;
}
- static bool classof(const CXXScalarValueInitExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -1467,8 +1463,8 @@ class CXXNewExpr : public Expr {
/// the source range covering the parenthesized type-id.
SourceRange TypeIdParens;
- /// \brief Location of the first token.
- SourceLocation StartLoc;
+ /// \brief Range of the entire new expression.
+ SourceRange Range;
/// \brief Source-range of a paren-delimited initializer.
SourceRange DirectInitRange;
@@ -1498,11 +1494,11 @@ public:
CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
FunctionDecl *operatorDelete, bool usualArrayDeleteWantsSize,
- Expr **placementArgs, unsigned numPlaceArgs,
+ ArrayRef<Expr*> placementArgs,
SourceRange typeIdParens, Expr *arraySize,
InitializationStyle initializationStyle, Expr *initializer,
QualType ty, TypeSourceInfo *AllocatedTypeInfo,
- SourceLocation startLoc, SourceRange directInitRange);
+ SourceRange Range, SourceRange directInitRange);
explicit CXXNewExpr(EmptyShell Shell)
: Expr(CXXNewExprClass, Shell), SubExprs(0) { }
@@ -1580,7 +1576,7 @@ public:
}
/// \brief Returns the CXXConstructExpr from this new-expression, or NULL.
- const CXXConstructExpr* getConstructExpr() {
+ const CXXConstructExpr* getConstructExpr() const {
return dyn_cast_or_null<CXXConstructExpr>(getInitializer());
}
@@ -1617,19 +1613,18 @@ public:
return SubExprs + Array + hasInitializer() + getNumPlacementArgs();
}
- SourceLocation getStartLoc() const { return StartLoc; }
- SourceLocation getEndLoc() const;
+ SourceLocation getStartLoc() const { return Range.getBegin(); }
+ SourceLocation getEndLoc() const { return Range.getEnd(); }
SourceRange getDirectInitRange() const { return DirectInitRange; }
SourceRange getSourceRange() const LLVM_READONLY {
- return SourceRange(getStartLoc(), getEndLoc());
+ return Range;
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXNewExprClass;
}
- static bool classof(const CXXNewExpr *) { return true; }
// Iterators
child_range children() {
@@ -1700,7 +1695,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXDeleteExprClass;
}
- static bool classof(const CXXDeleteExpr *) { return true; }
// Iterators
child_range children() { return child_range(&Argument, &Argument+1); }
@@ -1889,7 +1883,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXPseudoDestructorExprClass;
}
- static bool classof(const CXXPseudoDestructorExpr *) { return true; }
// Iterators
child_range children() { return child_range(&Base, &Base + 1); }
@@ -1945,7 +1938,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == UnaryTypeTraitExprClass;
}
- static bool classof(const UnaryTypeTraitExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -2017,7 +2009,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == BinaryTypeTraitExprClass;
}
- static bool classof(const BinaryTypeTraitExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -2111,7 +2102,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == TypeTraitExprClass;
}
- static bool classof(const TypeTraitExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -2186,7 +2176,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ArrayTypeTraitExprClass;
}
- static bool classof(const ArrayTypeTraitExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -2245,7 +2234,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ExpressionTraitExprClass;
}
- static bool classof(const ExpressionTraitExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -2432,7 +2420,6 @@ public:
return T->getStmtClass() == UnresolvedLookupExprClass ||
T->getStmtClass() == UnresolvedMemberExprClass;
}
- static bool classof(const OverloadExpr *) { return true; }
friend class ASTStmtReader;
friend class ASTStmtWriter;
@@ -2454,10 +2441,6 @@ class UnresolvedLookupExpr : public OverloadExpr {
/// call.
bool RequiresADL;
- /// True if namespace ::std should be considered an associated namespace
- /// for the purposes of argument-dependent lookup. See C++0x [stmt.ranged]p1.
- bool StdIsAssociatedNamespace;
-
/// True if these lookup results are overloaded. This is pretty
/// trivially rederivable if we urgently need to kill this field.
bool Overloaded;
@@ -2476,19 +2459,16 @@ class UnresolvedLookupExpr : public OverloadExpr {
const DeclarationNameInfo &NameInfo,
bool RequiresADL, bool Overloaded,
const TemplateArgumentListInfo *TemplateArgs,
- UnresolvedSetIterator Begin, UnresolvedSetIterator End,
- bool StdIsAssociatedNamespace)
+ UnresolvedSetIterator Begin, UnresolvedSetIterator End)
: OverloadExpr(UnresolvedLookupExprClass, C, QualifierLoc, TemplateKWLoc,
NameInfo, TemplateArgs, Begin, End, false, false, false),
RequiresADL(RequiresADL),
- StdIsAssociatedNamespace(StdIsAssociatedNamespace),
Overloaded(Overloaded), NamingClass(NamingClass)
{}
UnresolvedLookupExpr(EmptyShell Empty)
: OverloadExpr(UnresolvedLookupExprClass, Empty),
- RequiresADL(false), StdIsAssociatedNamespace(false), Overloaded(false),
- NamingClass(0)
+ RequiresADL(false), Overloaded(false), NamingClass(0)
{}
friend class ASTStmtReader;
@@ -2500,14 +2480,10 @@ public:
const DeclarationNameInfo &NameInfo,
bool ADL, bool Overloaded,
UnresolvedSetIterator Begin,
- UnresolvedSetIterator End,
- bool StdIsAssociatedNamespace = false) {
- assert((ADL || !StdIsAssociatedNamespace) &&
- "std considered associated namespace when not performing ADL");
+ UnresolvedSetIterator End) {
return new(C) UnresolvedLookupExpr(C, NamingClass, QualifierLoc,
SourceLocation(), NameInfo,
- ADL, Overloaded, 0, Begin, End,
- StdIsAssociatedNamespace);
+ ADL, Overloaded, 0, Begin, End);
}
static UnresolvedLookupExpr *Create(ASTContext &C,
@@ -2528,10 +2504,6 @@ public:
/// argument-dependent lookup.
bool requiresADL() const { return RequiresADL; }
- /// True if namespace \::std should be artificially added to the set of
- /// associated namespaces for argument-dependent lookup purposes.
- bool isStdAssociatedNamespace() const { return StdIsAssociatedNamespace; }
-
/// True if this lookup is overloaded.
bool isOverloaded() const { return Overloaded; }
@@ -2554,7 +2526,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == UnresolvedLookupExprClass;
}
- static bool classof(const UnresolvedLookupExpr *) { return true; }
};
/// \brief A qualified reference to a name whose declaration cannot
@@ -2705,7 +2676,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == DependentScopeDeclRefExprClass;
}
- static bool classof(const DependentScopeDeclRefExpr *) { return true; }
child_range children() { return child_range(); }
@@ -2778,7 +2748,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ExprWithCleanupsClass;
}
- static bool classof(const ExprWithCleanups *) { return true; }
// Iterators
child_range children() { return child_range(&SubExpr, &SubExpr + 1); }
@@ -2820,8 +2789,7 @@ class CXXUnresolvedConstructExpr : public Expr {
CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
- Expr **Args,
- unsigned NumArgs,
+ ArrayRef<Expr*> Args,
SourceLocation RParenLoc);
CXXUnresolvedConstructExpr(EmptyShell Empty, unsigned NumArgs)
@@ -2833,8 +2801,7 @@ public:
static CXXUnresolvedConstructExpr *Create(ASTContext &C,
TypeSourceInfo *Type,
SourceLocation LParenLoc,
- Expr **Args,
- unsigned NumArgs,
+ ArrayRef<Expr*> Args,
SourceLocation RParenLoc);
static CXXUnresolvedConstructExpr *CreateEmpty(ASTContext &C,
@@ -2893,7 +2860,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXUnresolvedConstructExprClass;
}
- static bool classof(const CXXUnresolvedConstructExpr *) { return true; }
// Iterators
child_range children() {
@@ -3142,7 +3108,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXDependentScopeMemberExprClass;
}
- static bool classof(const CXXDependentScopeMemberExpr *) { return true; }
// Iterators
child_range children() {
@@ -3276,7 +3241,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == UnresolvedMemberExprClass;
}
- static bool classof(const UnresolvedMemberExpr *) { return true; }
// Iterators
child_range children() {
@@ -3320,7 +3284,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXNoexceptExprClass;
}
- static bool classof(const CXXNoexceptExpr *) { return true; }
// Iterators
child_range children() { return child_range(&Operand, &Operand + 1); }
@@ -3397,7 +3360,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == PackExpansionExprClass;
}
- static bool classof(const PackExpansionExpr *) { return true; }
// Iterators
child_range children() {
@@ -3503,7 +3465,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == SizeOfPackExprClass;
}
- static bool classof(const SizeOfPackExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -3548,9 +3509,6 @@ public:
static bool classof(const Stmt *s) {
return s->getStmtClass() == SubstNonTypeTemplateParmExprClass;
}
- static bool classof(const SubstNonTypeTemplateParmExpr *) {
- return true;
- }
// Iterators
child_range children() { return child_range(&Replacement, &Replacement+1); }
@@ -3561,7 +3519,7 @@ public:
///
/// When a pack expansion in the source code contains multiple parameter packs
/// and those parameter packs correspond to different levels of template
-/// parameter lists, this node node is used to represent a non-type template
+/// parameter lists, this node is used to represent a non-type template
/// parameter pack from an outer level, which has already had its argument pack
/// substituted but that still lives within a pack expansion that itself
/// could not be instantiated. When actually performing a substitution into
@@ -3608,14 +3566,77 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == SubstNonTypeTemplateParmPackExprClass;
}
- static bool classof(const SubstNonTypeTemplateParmPackExpr *) {
- return true;
- }
// Iterators
child_range children() { return child_range(); }
};
+/// \brief Represents a reference to a function parameter pack that has been
+/// substituted but not yet expanded.
+///
+/// When a pack expansion contains multiple parameter packs at different levels,
+/// this node is used to represent a function parameter pack at an outer level
+/// which we have already substituted to refer to expanded parameters, but where
+/// the containing pack expansion cannot yet be expanded.
+///
+/// \code
+/// template<typename...Ts> struct S {
+/// template<typename...Us> auto f(Ts ...ts) -> decltype(g(Us(ts)...));
+/// };
+/// template struct S<int, int>;
+/// \endcode
+class FunctionParmPackExpr : public Expr {
+ /// \brief The function parameter pack which was referenced.
+ ParmVarDecl *ParamPack;
+
+ /// \brief The location of the function parameter pack reference.
+ SourceLocation NameLoc;
+
+ /// \brief The number of expansions of this pack.
+ unsigned NumParameters;
+
+ FunctionParmPackExpr(QualType T, ParmVarDecl *ParamPack,
+ SourceLocation NameLoc, unsigned NumParams,
+ Decl * const *Params);
+
+ friend class ASTReader;
+ friend class ASTStmtReader;
+
+public:
+ static FunctionParmPackExpr *Create(ASTContext &Context, QualType T,
+ ParmVarDecl *ParamPack,
+ SourceLocation NameLoc,
+ llvm::ArrayRef<Decl*> Params);
+ static FunctionParmPackExpr *CreateEmpty(ASTContext &Context,
+ unsigned NumParams);
+
+ /// \brief Get the parameter pack which this expression refers to.
+ ParmVarDecl *getParameterPack() const { return ParamPack; }
+
+ /// \brief Get the location of the parameter pack.
+ SourceLocation getParameterPackLocation() const { return NameLoc; }
+
+ /// \brief Iterators over the parameters which the parameter pack expanded
+ /// into.
+ typedef ParmVarDecl * const *iterator;
+ iterator begin() const { return reinterpret_cast<iterator>(this+1); }
+ iterator end() const { return begin() + NumParameters; }
+
+ /// \brief Get the number of parameters in this parameter pack.
+ unsigned getNumExpansions() const { return NumParameters; }
+
+ /// \brief Get an expansion of the parameter pack by index.
+ ParmVarDecl *getExpansion(unsigned I) const { return begin()[I]; }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return NameLoc; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == FunctionParmPackExprClass;
+ }
+
+ child_range children() { return child_range(); }
+};
+
/// \brief Represents a prvalue temporary that written into memory so that
/// a reference can bind to it.
///
@@ -3670,9 +3691,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == MaterializeTemporaryExprClass;
}
- static bool classof(const MaterializeTemporaryExpr *) {
- return true;
- }
// Iterators
child_range children() { return child_range(&Temporary, &Temporary + 1); }
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h b/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h
index 93a5ada..27f5da0 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h
@@ -51,7 +51,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCStringLiteralClass;
}
- static bool classof(const ObjCStringLiteral *) { return true; }
// Iterators
child_range children() { return child_range(&String, &String+1); }
@@ -81,7 +80,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCBoolLiteralExprClass;
}
- static bool classof(const ObjCBoolLiteralExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -121,7 +119,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCBoxedExprClass;
}
- static bool classof(const ObjCBoxedExpr *) { return true; }
// Iterators
child_range children() { return child_range(&SubExpr, &SubExpr+1); }
@@ -156,7 +153,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCArrayLiteralClass;
}
- static bool classof(const ObjCArrayLiteral *) { return true; }
/// \brief Retrieve elements of array of literals.
Expr **getElements() { return reinterpret_cast<Expr **>(this + 1); }
@@ -319,7 +315,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCDictionaryLiteralClass;
}
- static bool classof(const ObjCDictionaryLiteral *) { return true; }
// Iterators
child_range children() {
@@ -372,7 +367,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCEncodeExprClass;
}
- static bool classof(const ObjCEncodeExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -409,7 +403,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCSelectorExprClass;
}
- static bool classof(const ObjCSelectorExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -447,7 +440,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCProtocolExprClass;
}
- static bool classof(const ObjCProtocolExpr *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -501,7 +493,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCIvarRefExprClass;
}
- static bool classof(const ObjCIvarRefExpr *) { return true; }
// Iterators
child_range children() { return child_range(&Base, &Base+1); }
@@ -715,7 +706,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCPropertyRefExprClass;
}
- static bool classof(const ObjCPropertyRefExpr *) { return true; }
// Iterators
child_range children() {
@@ -813,7 +803,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCSubscriptRefExprClass;
}
- static bool classof(const ObjCSubscriptRefExpr *) { return true; }
Expr *getBaseExpr() const { return cast<Expr>(SubExprs[BASE]); }
void setBaseExpr(Stmt *S) { SubExprs[BASE] = S; }
@@ -1156,10 +1145,8 @@ public:
return getReceiverKind() == Class || getReceiverKind() == SuperClass;
}
- /// \brief Returns the receiver of an instance message.
- ///
- /// \brief Returns the object expression for an instance message, or
- /// NULL for a message that is not an instance message.
+ /// \brief Returns the object expression (receiver) for an instance message,
+ /// or null for a message that is not an instance message.
Expr *getInstanceReceiver() {
if (getReceiverKind() == Instance)
return static_cast<Expr *>(getReceiverPointer());
@@ -1208,6 +1195,17 @@ public:
return SourceLocation();
}
+ /// \brief Retrieve the receiver type to which this message is being directed.
+ ///
+ /// This routine cross-cuts all of the different kinds of message
+ /// sends to determine what the underlying (statically known) type
+ /// of the receiver will be; use \c getReceiverKind() to determine
+ /// whether the message is a class or an instance method, whether it
+ /// is a send to super or not, etc.
+ ///
+ /// \returns The type of the receiver.
+ QualType getReceiverType() const;
+
/// \brief Retrieve the Objective-C interface to which this message
/// is being directed, if known.
///
@@ -1344,7 +1342,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCMessageExprClass;
}
- static bool classof(const ObjCMessageExpr *) { return true; }
// Iterators
child_range children();
@@ -1409,7 +1406,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCIsaExprClass;
}
- static bool classof(const ObjCIsaExpr *) { return true; }
// Iterators
child_range children() { return child_range(&Base, &Base+1); }
@@ -1483,7 +1479,6 @@ public:
static bool classof(const Stmt *s) {
return s->getStmtClass() == ObjCIndirectCopyRestoreExprClass;
}
- static bool classof(const ObjCIndirectCopyRestoreExpr *) { return true; }
};
/// \brief An Objective-C "bridged" cast expression, which casts between
@@ -1532,8 +1527,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCBridgedCastExprClass;
}
- static bool classof(const ObjCBridgedCastExpr *) { return true; }
-
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h b/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
index 7aedfe2..db2bddb 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
@@ -162,7 +162,7 @@ public:
}
/// \brief Get the decls that are contained in a file in the Offset/Length
- /// range. \arg Length can be 0 to indicate a point at \arg Offset instead of
+ /// range. \p Length can be 0 to indicate a point at \p Offset instead of
/// a range.
virtual void FindFileRegionDecls(FileID File, unsigned Offset,unsigned Length,
SmallVectorImpl<Decl *> &Decls) {}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/NSAPI.h b/contrib/llvm/tools/clang/include/clang/AST/NSAPI.h
index 51ae1da..f9fd1f9 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/NSAPI.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/NSAPI.h
@@ -83,7 +83,7 @@ public:
/// \brief The Objective-C NSArray selectors.
Selector getNSArraySelector(NSArrayMethodKind MK) const;
- /// \brief Return NSArrayMethodKind if \arg Sel is such a selector.
+ /// \brief Return NSArrayMethodKind if \p Sel is such a selector.
llvm::Optional<NSArrayMethodKind> getNSArrayMethodKind(Selector Sel);
/// \brief Enumerates the NSDictionary methods used to generate literals.
@@ -104,7 +104,7 @@ public:
/// \brief The Objective-C NSDictionary selectors.
Selector getNSDictionarySelector(NSDictionaryMethodKind MK) const;
- /// \brief Return NSDictionaryMethodKind if \arg Sel is such a selector.
+ /// \brief Return NSDictionaryMethodKind if \p Sel is such a selector.
llvm::Optional<NSDictionaryMethodKind>
getNSDictionaryMethodKind(Selector Sel);
@@ -169,7 +169,7 @@ public:
Sel == getNSNumberLiteralSelector(MK, true);
}
- /// \brief Return NSNumberLiteralMethodKind if \arg Sel is such a selector.
+ /// \brief Return NSNumberLiteralMethodKind if \p Sel is such a selector.
llvm::Optional<NSNumberLiteralMethodKind>
getNSNumberLiteralMethodKind(Selector Sel) const;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h b/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h
index a5aec1f..bf9e1cb 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h
@@ -97,8 +97,7 @@ private:
Specifier(Other.Specifier) {
}
- NestedNameSpecifier &operator=(const NestedNameSpecifier &); // do not
- // implement
+ void operator=(const NestedNameSpecifier &) LLVM_DELETED_FUNCTION;
/// \brief Either find or insert the given nested name specifier
/// mockup in the given context.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h b/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h
index 6359414..18169fd 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h
@@ -288,7 +288,11 @@ enum CastKind {
///
/// This particular cast kind is used for the conversion from a C++11
/// lambda expression to a block pointer.
- CK_CopyAndAutoreleaseBlockObject
+ CK_CopyAndAutoreleaseBlockObject,
+
+ // Convert a builtin function to a function pointer; only allowed in the
+ // callee of a call expression.
+ CK_BuiltinFnToFnPtr
};
static const CastKind CK_Invalid = static_cast<CastKind>(-1);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h b/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h
index f2c015f..7babc1b 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h
@@ -39,6 +39,7 @@ struct PrintingPolicy {
SuppressUnwrittenScope(false), SuppressInitializers(false),
ConstantArraySizeAsWritten(false), AnonymousTagLocations(true),
SuppressStrongLifetime(false), Bool(LO.Bool),
+ TerseOutput(false), SuppressAttributes(false),
DumpSourceManager(0) { }
/// \brief What language we're printing.
@@ -134,6 +135,17 @@ struct PrintingPolicy {
/// doesn't actually have 'bool' (because, e.g., it is defined as a macro).
unsigned Bool : 1;
+ /// \brief Provide a 'terse' output.
+ ///
+ /// For example, in this mode we don't print function bodies, class members,
+ /// declarations inside namespaces etc. Effectively, this should print
+ /// only the requested declaration.
+ unsigned TerseOutput : 1;
+
+ /// \brief When true, do not print attributes attached to the declaration.
+ ///
+ unsigned SuppressAttributes : 1;
+
/// \brief If we are "dumping" rather than "pretty-printing", this points to
/// a SourceManager which will be used to dump SourceLocations. Dumping
/// involves printing the internal details of the AST and pretty-printing
diff --git a/contrib/llvm/tools/clang/include/clang/AST/RawCommentList.h b/contrib/llvm/tools/clang/include/clang/AST/RawCommentList.h
index 630626b..3a8b218 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/RawCommentList.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/RawCommentList.h
@@ -18,6 +18,7 @@ namespace clang {
class ASTContext;
class ASTReader;
class Decl;
+class Preprocessor;
namespace comments {
class FullComment;
@@ -114,7 +115,8 @@ public:
}
/// Parse the comment, assuming it is attached to decl \c D.
- comments::FullComment *parse(const ASTContext &Context, const Decl *D) const;
+ comments::FullComment *parse(const ASTContext &Context,
+ const Preprocessor *PP, const Decl *D) const;
private:
SourceRange Range;
@@ -188,7 +190,7 @@ public:
private:
SourceManager &SourceMgr;
std::vector<RawComment *> Comments;
- RawComment LastComment;
+ SourceLocation PrevCommentEndLoc;
bool OnlyWhitespaceSeen;
void addCommentsToFront(const std::vector<RawComment *> &C) {
diff --git a/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h b/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h
index 3a870d0..3655646 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h
@@ -136,8 +136,8 @@ private:
void Destroy(ASTContext &Ctx);
- ASTRecordLayout(const ASTRecordLayout&); // DO NOT IMPLEMENT
- void operator=(const ASTRecordLayout&); // DO NOT IMPLEMENT
+ ASTRecordLayout(const ASTRecordLayout &) LLVM_DELETED_FUNCTION;
+ void operator=(const ASTRecordLayout &) LLVM_DELETED_FUNCTION;
public:
/// getAlignment - Get the record alignment in characters.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
index 2e56a48..f96e067 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -721,6 +721,7 @@ bool RecursiveASTVisitor<Derived>::TraverseTemplateArgument(
case TemplateArgument::Null:
case TemplateArgument::Declaration:
case TemplateArgument::Integral:
+ case TemplateArgument::NullPtr:
return true;
case TemplateArgument::Type:
@@ -753,6 +754,7 @@ bool RecursiveASTVisitor<Derived>::TraverseTemplateArgumentLoc(
case TemplateArgument::Null:
case TemplateArgument::Declaration:
case TemplateArgument::Integral:
+ case TemplateArgument::NullPtr:
return true;
case TemplateArgument::Type: {
@@ -799,7 +801,7 @@ bool RecursiveASTVisitor<Derived>::TraverseConstructorInitializer(
if (TypeSourceInfo *TInfo = Init->getTypeSourceInfo())
TRY_TO(TraverseTypeLoc(TInfo->getTypeLoc()));
- if (Init->isWritten())
+ if (Init->isWritten() || getDerived().shouldVisitImplicitCode())
TRY_TO(TraverseStmt(Init->getInit()));
return true;
}
@@ -1827,7 +1829,7 @@ bool RecursiveASTVisitor<Derived>::Traverse##STMT (STMT *S) { \
return true; \
}
-DEF_TRAVERSE_STMT(AsmStmt, {
+DEF_TRAVERSE_STMT(GCCAsmStmt, {
TRY_TO(TraverseStmt(S->getAsmString()));
for (unsigned I = 0, E = S->getNumInputs(); I < E; ++I) {
TRY_TO(TraverseStmt(S->getInputConstraintLiteral(I)));
@@ -1836,7 +1838,7 @@ DEF_TRAVERSE_STMT(AsmStmt, {
TRY_TO(TraverseStmt(S->getOutputConstraintLiteral(I)));
}
for (unsigned I = 0, E = S->getNumClobbers(); I < E; ++I) {
- TRY_TO(TraverseStmt(S->getClobber(I)));
+ TRY_TO(TraverseStmt(S->getClobberStringLiteral(I)));
}
// children() iterates over inputExpr and outputExpr.
})
@@ -2141,7 +2143,9 @@ DEF_TRAVERSE_STMT(BlockExpr, {
return true; // no child statements to loop through.
})
DEF_TRAVERSE_STMT(ChooseExpr, { })
-DEF_TRAVERSE_STMT(CompoundLiteralExpr, { })
+DEF_TRAVERSE_STMT(CompoundLiteralExpr, {
+ TRY_TO(TraverseTypeLoc(S->getTypeSourceInfo()->getTypeLoc()));
+})
DEF_TRAVERSE_STMT(CXXBindTemporaryExpr, { })
DEF_TRAVERSE_STMT(CXXBoolLiteralExpr, { })
DEF_TRAVERSE_STMT(CXXDefaultArgExpr, { })
@@ -2219,6 +2223,7 @@ DEF_TRAVERSE_STMT(PackExpansionExpr, { })
DEF_TRAVERSE_STMT(SizeOfPackExpr, { })
DEF_TRAVERSE_STMT(SubstNonTypeTemplateParmPackExpr, { })
DEF_TRAVERSE_STMT(SubstNonTypeTemplateParmExpr, { })
+DEF_TRAVERSE_STMT(FunctionParmPackExpr, { })
DEF_TRAVERSE_STMT(MaterializeTemporaryExpr, { })
DEF_TRAVERSE_STMT(AtomicExpr, { })
diff --git a/contrib/llvm/tools/clang/include/clang/AST/SelectorLocationsKind.h b/contrib/llvm/tools/clang/include/clang/AST/SelectorLocationsKind.h
index cd43a5c..6d903f8 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/SelectorLocationsKind.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/SelectorLocationsKind.h
@@ -42,7 +42,7 @@ enum SelectorLocationsKind {
SelLoc_StandardWithSpace = 2
};
-/// \brief Returns true if all \arg SelLocs are in a "standard" location.
+/// \brief Returns true if all \p SelLocs are in a "standard" location.
SelectorLocationsKind hasStandardSelectorLocs(Selector Sel,
ArrayRef<SourceLocation> SelLocs,
ArrayRef<Expr *> Args,
@@ -60,7 +60,7 @@ SourceLocation getStandardSelectorLoc(unsigned Index,
ArrayRef<Expr *> Args,
SourceLocation EndLoc);
-/// \brief Returns true if all \arg SelLocs are in a "standard" location.
+/// \brief Returns true if all \p SelLocs are in a "standard" location.
SelectorLocationsKind hasStandardSelectorLocs(Selector Sel,
ArrayRef<SourceLocation> SelLocs,
ArrayRef<ParmVarDecl *> Args,
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Stmt.h b/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
index 35fb693..a9bbb48 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
@@ -392,9 +392,6 @@ public:
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
- // Implement isa<T> support.
- static bool classof(const Stmt *) { return true; }
-
/// hasImplicitControlFlow - Some statements (e.g. short circuited operations)
/// contain implicit control-flow in the order their subexpressions
/// are evaluated. This predicate returns true if this statement has
@@ -424,12 +421,12 @@ public:
/// \brief Produce a unique representation of the given statement.
///
- /// \brief ID once the profiling operation is complete, will contain
+ /// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
- /// \brief Context the AST context in which the statement resides
+ /// \param Context the AST context in which the statement resides
///
- /// \brief Canonical whether the profile should be based on the canonical
+ /// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
@@ -480,7 +477,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
- static bool classof(const DeclStmt *) { return true; }
// Iterators over subexpressions.
child_range children() {
@@ -535,7 +531,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
- static bool classof(const NullStmt *) { return true; }
child_range children() { return child_range(); }
@@ -615,7 +610,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
- static bool classof(const CompoundStmt *) { return true; }
// Iterators
child_range children() {
@@ -654,7 +648,6 @@ public:
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
- static bool classof(const SwitchCase *) { return true; }
};
class CaseStmt : public SwitchCase {
@@ -714,7 +707,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
- static bool classof(const CaseStmt *) { return true; }
// Iterators
child_range children() {
@@ -749,7 +741,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
- static bool classof(const DefaultStmt *) { return true; }
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
@@ -788,7 +779,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
- static bool classof(const LabelStmt *) { return true; }
};
@@ -837,7 +827,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
- static bool classof(const AttributedStmt *) { return true; }
};
@@ -906,7 +895,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
- static bool classof(const IfStmt *) { return true; }
};
/// SwitchStmt - This represents a 'switch' stmt.
@@ -1000,7 +988,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
- static bool classof(const SwitchStmt *) { return true; }
};
@@ -1050,7 +1037,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
- static bool classof(const WhileStmt *) { return true; }
// Iterators
child_range children() {
@@ -1099,7 +1085,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
- static bool classof(const DoStmt *) { return true; }
// Iterators
child_range children() {
@@ -1171,7 +1156,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
- static bool classof(const ForStmt *) { return true; }
// Iterators
child_range children() {
@@ -1206,7 +1190,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
- static bool classof(const GotoStmt *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -1251,7 +1234,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
- static bool classof(const IndirectGotoStmt *) { return true; }
// Iterators
child_range children() { return child_range(&Target, &Target+1); }
@@ -1278,7 +1260,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
- static bool classof(const ContinueStmt *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -1302,7 +1283,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
- static bool classof(const BreakStmt *) { return true; }
// Iterators
child_range children() { return child_range(); }
@@ -1354,7 +1334,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
- static bool classof(const ReturnStmt *) { return true; }
// Iterators
child_range children() {
@@ -1363,48 +1342,184 @@ public:
}
};
-/// AsmStmt - This represents a GNU inline-assembly statement extension.
+/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
///
class AsmStmt : public Stmt {
- SourceLocation AsmLoc, RParenLoc;
- StringLiteral *AsmStr;
-
+protected:
+ SourceLocation AsmLoc;
+ /// \brief True if the assembly statement does not have any input or output
+ /// operands.
bool IsSimple;
+
+ /// \brief If true, treat this inline assembly as having side effects.
+ /// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
- bool MSAsm;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
- // FIXME: If we wanted to, we could allocate all of these in one big array.
IdentifierInfo **Names;
- StringLiteral **Constraints;
Stmt **Exprs;
- StringLiteral **Clobbers;
-public:
- AsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile,
- bool msasm, unsigned numoutputs, unsigned numinputs,
- IdentifierInfo **names, StringLiteral **constraints,
- Expr **exprs, StringLiteral *asmstr, unsigned numclobbers,
- StringLiteral **clobbers, SourceLocation rparenloc);
+ AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
+ unsigned numoutputs, unsigned numinputs, unsigned numclobbers) :
+ Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
+ NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { }
+public:
/// \brief Build an empty inline-assembly statement.
- explicit AsmStmt(EmptyShell Empty) : Stmt(AsmStmtClass, Empty),
- Names(0), Constraints(0), Exprs(0), Clobbers(0) { }
+ explicit AsmStmt(StmtClass SC, EmptyShell Empty) :
+ Stmt(SC, Empty), Names(0), Exprs(0) { }
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
- SourceLocation getRParenLoc() const { return RParenLoc; }
- void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- bool isVolatile() const { return IsVolatile; }
- void setVolatile(bool V) { IsVolatile = V; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
- bool isMSAsm() const { return MSAsm; }
- void setMSAsm(bool V) { MSAsm = V; }
+
+ bool isVolatile() const { return IsVolatile; }
+ void setVolatile(bool V) { IsVolatile = V; }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(); }
+
+ //===--- Asm String Analysis ---===//
+
+ /// Assemble final IR asm string.
+ std::string generateAsmString(ASTContext &C) const;
+
+ //===--- Output operands ---===//
+
+ unsigned getNumOutputs() const { return NumOutputs; }
+
+ IdentifierInfo *getOutputIdentifier(unsigned i) const {
+ return Names[i];
+ }
+
+ StringRef getOutputName(unsigned i) const {
+ if (IdentifierInfo *II = getOutputIdentifier(i))
+ return II->getName();
+
+ return StringRef();
+ }
+
+ /// getOutputConstraint - Return the constraint string for the specified
+ /// output operand. All output constraints are known to be non-empty (either
+ /// '=' or '+').
+ StringRef getOutputConstraint(unsigned i) const;
+
+ /// isOutputPlusConstraint - Return true if the specified output constraint
+ /// is a "+" constraint (which is both an input and an output) or false if it
+ /// is an "=" constraint (just an output).
+ bool isOutputPlusConstraint(unsigned i) const {
+ return getOutputConstraint(i)[0] == '+';
+ }
+
+ const Expr *getOutputExpr(unsigned i) const;
+
+ /// getNumPlusOperands - Return the number of output operands that have a "+"
+ /// constraint.
+ unsigned getNumPlusOperands() const;
+
+ //===--- Input operands ---===//
+
+ unsigned getNumInputs() const { return NumInputs; }
+
+ IdentifierInfo *getInputIdentifier(unsigned i) const {
+ return Names[i + NumOutputs];
+ }
+
+ StringRef getInputName(unsigned i) const {
+ if (IdentifierInfo *II = getInputIdentifier(i))
+ return II->getName();
+
+ return StringRef();
+ }
+
+ /// getInputConstraint - Return the specified input constraint. Unlike output
+ /// constraints, these can be empty.
+ StringRef getInputConstraint(unsigned i) const;
+
+ const Expr *getInputExpr(unsigned i) const;
+
+ //===--- Other ---===//
+
+ unsigned getNumClobbers() const { return NumClobbers; }
+ StringRef getClobber(unsigned i) const;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == GCCAsmStmtClass ||
+ T->getStmtClass() == MSAsmStmtClass;
+ }
+
+ // Input expr iterators.
+
+ typedef ExprIterator inputs_iterator;
+ typedef ConstExprIterator const_inputs_iterator;
+
+ inputs_iterator begin_inputs() {
+ return &Exprs[0] + NumOutputs;
+ }
+
+ inputs_iterator end_inputs() {
+ return &Exprs[0] + NumOutputs + NumInputs;
+ }
+
+ const_inputs_iterator begin_inputs() const {
+ return &Exprs[0] + NumOutputs;
+ }
+
+ const_inputs_iterator end_inputs() const {
+ return &Exprs[0] + NumOutputs + NumInputs;
+ }
+
+ // Output expr iterators.
+
+ typedef ExprIterator outputs_iterator;
+ typedef ConstExprIterator const_outputs_iterator;
+
+ outputs_iterator begin_outputs() {
+ return &Exprs[0];
+ }
+ outputs_iterator end_outputs() {
+ return &Exprs[0] + NumOutputs;
+ }
+
+ const_outputs_iterator begin_outputs() const {
+ return &Exprs[0];
+ }
+ const_outputs_iterator end_outputs() const {
+ return &Exprs[0] + NumOutputs;
+ }
+
+ child_range children() {
+ return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
+ }
+};
+
+/// This represents a GCC inline-assembly statement extension.
+///
+class GCCAsmStmt : public AsmStmt {
+ SourceLocation RParenLoc;
+ StringLiteral *AsmStr;
+
+ // FIXME: If we wanted to, we could allocate all of these in one big array.
+ StringLiteral **Constraints;
+ StringLiteral **Clobbers;
+
+public:
+ GCCAsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple,
+ bool isvolatile, unsigned numoutputs, unsigned numinputs,
+ IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
+ StringLiteral *asmstr, unsigned numclobbers,
+ StringLiteral **clobbers, SourceLocation rparenloc);
+
+ /// \brief Build an empty inline-assembly statement.
+ explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty),
+ Constraints(0), Clobbers(0) { }
+
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
@@ -1461,25 +1576,11 @@ public:
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
ASTContext &C, unsigned &DiagOffs) const;
+ /// Assemble final IR asm string.
+ std::string generateAsmString(ASTContext &C) const;
//===--- Output operands ---===//
- unsigned getNumOutputs() const { return NumOutputs; }
-
- IdentifierInfo *getOutputIdentifier(unsigned i) const {
- return Names[i];
- }
-
- StringRef getOutputName(unsigned i) const {
- if (IdentifierInfo *II = getOutputIdentifier(i))
- return II->getName();
-
- return StringRef();
- }
-
- /// getOutputConstraint - Return the constraint string for the specified
- /// output operand. All output constraints are known to be non-empty (either
- /// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
@@ -1492,37 +1593,11 @@ public:
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
- return const_cast<AsmStmt*>(this)->getOutputExpr(i);
+ return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
- /// isOutputPlusConstraint - Return true if the specified output constraint
- /// is a "+" constraint (which is both an input and an output) or false if it
- /// is an "=" constraint (just an output).
- bool isOutputPlusConstraint(unsigned i) const {
- return getOutputConstraint(i)[0] == '+';
- }
-
- /// getNumPlusOperands - Return the number of output operands that have a "+"
- /// constraint.
- unsigned getNumPlusOperands() const;
-
//===--- Input operands ---===//
- unsigned getNumInputs() const { return NumInputs; }
-
- IdentifierInfo *getInputIdentifier(unsigned i) const {
- return Names[i + NumOutputs];
- }
-
- StringRef getInputName(unsigned i) const {
- if (IdentifierInfo *II = getInputIdentifier(i))
- return II->getName();
-
- return StringRef();
- }
-
- /// getInputConstraint - Return the specified input constraint. Unlike output
- /// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
@@ -1536,7 +1611,7 @@ public:
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
- return const_cast<AsmStmt*>(this)->getInputExpr(i);
+ return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
void setOutputsAndInputsAndClobbers(ASTContext &C,
@@ -1555,90 +1630,45 @@ public:
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
- unsigned getNumClobbers() const { return NumClobbers; }
- StringLiteral *getClobber(unsigned i) { return Clobbers[i]; }
- const StringLiteral *getClobber(unsigned i) const { return Clobbers[i]; }
+ StringRef getClobber(unsigned i) const;
+ StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
+ const StringLiteral *getClobberStringLiteral(unsigned i) const {
+ return Clobbers[i];
+ }
SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(AsmLoc, RParenLoc);
}
- static bool classof(const Stmt *T) {return T->getStmtClass() == AsmStmtClass;}
- static bool classof(const AsmStmt *) { return true; }
-
- // Input expr iterators.
-
- typedef ExprIterator inputs_iterator;
- typedef ConstExprIterator const_inputs_iterator;
-
- inputs_iterator begin_inputs() {
- return &Exprs[0] + NumOutputs;
- }
-
- inputs_iterator end_inputs() {
- return &Exprs[0] + NumOutputs + NumInputs;
- }
-
- const_inputs_iterator begin_inputs() const {
- return &Exprs[0] + NumOutputs;
- }
-
- const_inputs_iterator end_inputs() const {
- return &Exprs[0] + NumOutputs + NumInputs;
- }
-
- // Output expr iterators.
-
- typedef ExprIterator outputs_iterator;
- typedef ConstExprIterator const_outputs_iterator;
-
- outputs_iterator begin_outputs() {
- return &Exprs[0];
- }
- outputs_iterator end_outputs() {
- return &Exprs[0] + NumOutputs;
- }
-
- const_outputs_iterator begin_outputs() const {
- return &Exprs[0];
- }
- const_outputs_iterator end_outputs() const {
- return &Exprs[0] + NumOutputs;
- }
-
- child_range children() {
- return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == GCCAsmStmtClass;
}
};
-/// MSAsmStmt - This represents a MS inline-assembly statement extension.
+/// This represents a Microsoft inline-assembly statement extension.
///
-class MSAsmStmt : public Stmt {
+class MSAsmStmt : public AsmStmt {
SourceLocation AsmLoc, LBraceLoc, EndLoc;
std::string AsmStr;
- bool IsSimple;
- bool IsVolatile;
-
unsigned NumAsmToks;
- unsigned NumInputs;
- unsigned NumOutputs;
- unsigned NumClobbers;
Token *AsmToks;
- IdentifierInfo **Names;
- Stmt **Exprs;
+ StringRef *Constraints;
StringRef *Clobbers;
public:
MSAsmStmt(ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc,
bool issimple, bool isvolatile, ArrayRef<Token> asmtoks,
- ArrayRef<IdentifierInfo*> inputs, ArrayRef<IdentifierInfo*> outputs,
- StringRef asmstr, ArrayRef<StringRef> clobbers,
- SourceLocation endloc);
+ unsigned numoutputs, unsigned numinputs,
+ ArrayRef<IdentifierInfo*> names, ArrayRef<StringRef> constraints,
+ ArrayRef<Expr*> exprs, StringRef asmstr,
+ ArrayRef<StringRef> clobbers, SourceLocation endloc);
+
+ /// \brief Build an empty MS-style inline-assembly statement.
+ explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty),
+ NumAsmToks(0), AsmToks(0), Constraints(0), Clobbers(0) { }
- SourceLocation getAsmLoc() const { return AsmLoc; }
- void setAsmLoc(SourceLocation L) { AsmLoc = L; }
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
@@ -1649,20 +1679,42 @@ public:
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
- bool isVolatile() const { return IsVolatile; }
- void setVolatile(bool V) { IsVolatile = V; }
- bool isSimple() const { return IsSimple; }
- void setSimple(bool V) { IsSimple = V; }
-
//===--- Asm String Analysis ---===//
const std::string *getAsmString() const { return &AsmStr; }
std::string *getAsmString() { return &AsmStr; }
void setAsmString(StringRef &E) { AsmStr = E.str(); }
+ /// Assemble final IR asm string.
+ std::string generateAsmString(ASTContext &C) const;
+
+ //===--- Output operands ---===//
+
+ StringRef getOutputConstraint(unsigned i) const {
+ return Constraints[i];
+ }
+
+ Expr *getOutputExpr(unsigned i);
+
+ const Expr *getOutputExpr(unsigned i) const {
+ return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
+ }
+
+ //===--- Input operands ---===//
+
+ StringRef getInputConstraint(unsigned i) const {
+ return Constraints[i + NumOutputs];
+ }
+
+ Expr *getInputExpr(unsigned i);
+ void setInputExpr(unsigned i, Expr *E);
+
+ const Expr *getInputExpr(unsigned i) const {
+ return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
+ }
+
//===--- Other ---===//
- unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const { return Clobbers[i]; }
SourceRange getSourceRange() const LLVM_READONLY {
@@ -1671,7 +1723,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
- static bool classof(const MSAsmStmt *) { return true; }
child_range children() {
return child_range(&Exprs[0], &Exprs[0]);
@@ -1720,8 +1771,6 @@ public:
return T->getStmtClass() == SEHExceptStmtClass;
}
- static bool classof(SEHExceptStmt *) { return true; }
-
};
class SEHFinallyStmt : public Stmt {
@@ -1757,8 +1806,6 @@ public:
return T->getStmtClass() == SEHFinallyStmtClass;
}
- static bool classof(SEHFinallyStmt *) { return true; }
-
};
class SEHTryStmt : public Stmt {
@@ -1810,8 +1857,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
-
- static bool classof(SEHTryStmt *) { return true; }
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h b/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h
index a948722..f4e4dcd 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h
@@ -50,7 +50,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXCatchStmtClass;
}
- static bool classof(const CXXCatchStmt *) { return true; }
child_range children() { return child_range(&HandlerBlock, &HandlerBlock+1); }
@@ -111,7 +110,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXTryStmtClass;
}
- static bool classof(const CXXTryStmt *) { return true; }
child_range children() {
return child_range(getStmts(), getStmts() + getNumHandlers() + 1);
@@ -196,7 +194,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXForRangeStmtClass;
}
- static bool classof(const CXXForRangeStmt *) { return true; }
// Iterators
child_range children() {
@@ -286,8 +283,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSDependentExistsStmtClass;
}
-
- static bool classof(MSDependentExistsStmt *) { return true; }
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h b/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h
index e7e1232..d7a73a7 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h
@@ -61,7 +61,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCForCollectionStmtClass;
}
- static bool classof(const ObjCForCollectionStmt *) { return true; }
// Iterators
child_range children() {
@@ -112,7 +111,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCAtCatchStmtClass;
}
- static bool classof(const ObjCAtCatchStmt *) { return true; }
child_range children() { return child_range(&Body, &Body + 1); }
};
@@ -143,7 +141,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCAtFinallyStmtClass;
}
- static bool classof(const ObjCAtFinallyStmt *) { return true; }
child_range children() {
return child_range(&AtFinallyStmt, &AtFinallyStmt+1);
@@ -244,7 +241,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCAtTryStmtClass;
}
- static bool classof(const ObjCAtTryStmt *) { return true; }
child_range children() {
return child_range(getStmts(),
@@ -303,7 +299,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCAtSynchronizedStmtClass;
}
- static bool classof(const ObjCAtSynchronizedStmt *) { return true; }
child_range children() {
return child_range(&SubStmts[0], &SubStmts[0]+END_EXPR);
@@ -339,7 +334,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCAtThrowStmtClass;
}
- static bool classof(const ObjCAtThrowStmt *) { return true; }
child_range children() { return child_range(&Throw, &Throw+1); }
};
@@ -371,7 +365,6 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCAutoreleasePoolStmtClass;
}
- static bool classof(const ObjCAutoreleasePoolStmt *) { return true; }
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
};
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h b/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
index 5047028..1c0abde 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
@@ -28,11 +28,11 @@ namespace llvm {
namespace clang {
-class Decl;
class DiagnosticBuilder;
class Expr;
struct PrintingPolicy;
class TypeSourceInfo;
+class ValueDecl;
/// \brief Represents a template argument within a class template
/// specialization.
@@ -43,12 +43,14 @@ public:
/// \brief Represents an empty template argument, e.g., one that has not
/// been deduced.
Null = 0,
- /// The template argument is a type. Its value is stored in the
- /// TypeOrValue field.
+ /// The template argument is a type.
Type,
- /// The template argument is a declaration that was provided for a pointer
- /// or reference non-type template parameter.
+ /// The template argument is a declaration that was provided for a pointer,
+ /// reference, or pointer to member non-type template parameter.
Declaration,
+ /// The template argument is a null pointer or null pointer to member that
+ /// was provided for a non-type template parameter.
+ NullPtr,
/// The template argument is an integral value stored in an llvm::APSInt
/// that was provided for an integral non-type template parameter.
Integral,
@@ -73,6 +75,10 @@ private:
union {
uintptr_t TypeOrValue;
struct {
+ ValueDecl *D;
+ bool ForRefParam;
+ } DeclArg;
+ struct {
// We store a decomposed APSInt with the data allocated by ASTContext if
// BitWidth > 64. The memory may be shared between multiple
// TemplateArgument instances.
@@ -101,15 +107,18 @@ public:
TemplateArgument() : Kind(Null), TypeOrValue(0) { }
/// \brief Construct a template type argument.
- TemplateArgument(QualType T) : Kind(Type) {
+ TemplateArgument(QualType T, bool isNullPtr = false)
+ : Kind(isNullPtr ? NullPtr : Type) {
TypeOrValue = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
}
/// \brief Construct a template argument that refers to a
/// declaration, which is either an external declaration or a
/// template declaration.
- TemplateArgument(Decl *D) : Kind(Declaration) {
- TypeOrValue = reinterpret_cast<uintptr_t>(D);
+ TemplateArgument(ValueDecl *D, bool ForRefParam) : Kind(Declaration) {
+ assert(D && "Expected decl");
+ DeclArg.D = D;
+ DeclArg.ForRefParam = ForRefParam;
}
/// \brief Construct an integral constant template argument. The memory to
@@ -177,6 +186,10 @@ public:
this->Args.NumArgs = NumArgs;
}
+ static TemplateArgument getEmptyPack() {
+ return TemplateArgument((TemplateArgument*)0, 0);
+ }
+
/// \brief Create a new template argument pack by copying the given set of
/// template arguments.
static TemplateArgument CreatePackCopy(ASTContext &Context,
@@ -205,34 +218,43 @@ public:
/// \brief Determine whether this template argument is a pack expansion.
bool isPackExpansion() const;
- /// \brief Retrieve the template argument as a type.
+ /// \brief Retrieve the type for a type template argument.
QualType getAsType() const {
- if (Kind != Type)
- return QualType();
-
+ assert(Kind == Type && "Unexpected kind");
return QualType::getFromOpaquePtr(reinterpret_cast<void*>(TypeOrValue));
}
- /// \brief Retrieve the template argument as a declaration.
- Decl *getAsDecl() const {
- if (Kind != Declaration)
- return 0;
- return reinterpret_cast<Decl *>(TypeOrValue);
+ /// \brief Retrieve the declaration for a declaration non-type
+ /// template argument.
+ ValueDecl *getAsDecl() const {
+ assert(Kind == Declaration && "Unexpected kind");
+ return DeclArg.D;
+ }
+
+ /// \brief Retrieve whether a declaration is binding to a
+ /// reference parameter in a declaration non-type template argument.
+ bool isDeclForReferenceParam() const {
+ assert(Kind == Declaration && "Unexpected kind");
+ return DeclArg.ForRefParam;
}
- /// \brief Retrieve the template argument as a template name.
+ /// \brief Retrieve the type for null non-type template argument.
+ QualType getNullPtrType() const {
+ assert(Kind == NullPtr && "Unexpected kind");
+ return QualType::getFromOpaquePtr(reinterpret_cast<void*>(TypeOrValue));
+ }
+
+ /// \brief Retrieve the template name for a template name argument.
TemplateName getAsTemplate() const {
- if (Kind != Template)
- return TemplateName();
-
+ assert(Kind == Template && "Unexpected kind");
return TemplateName::getFromVoidPointer(TemplateArg.Name);
}
/// \brief Retrieve the template argument as a template name; if the argument
/// is a pack expansion, return the pattern as a template name.
TemplateName getAsTemplateOrTemplatePattern() const {
- if (Kind != Template && Kind != TemplateExpansion)
- return TemplateName();
+ assert((Kind == Template || Kind == TemplateExpansion) &&
+ "Unexpected kind");
return TemplateName::getFromVoidPointer(TemplateArg.Name);
}
@@ -244,6 +266,7 @@ public:
/// \brief Retrieve the template argument as an integral value.
// FIXME: Provide a way to read the integral data without copying the value.
llvm::APSInt getAsIntegral() const {
+ assert(Kind == Integral && "Unexpected kind");
using namespace llvm;
if (Integer.BitWidth <= 64)
return APSInt(APInt(Integer.BitWidth, Integer.VAL), Integer.IsUnsigned);
@@ -255,23 +278,18 @@ public:
/// \brief Retrieve the type of the integral value.
QualType getIntegralType() const {
- if (Kind != Integral)
- return QualType();
-
+ assert(Kind == Integral && "Unexpected kind");
return QualType::getFromOpaquePtr(Integer.Type);
}
void setIntegralType(QualType T) {
- assert(Kind == Integral &&
- "Cannot set the integral type of a non-integral template argument");
+ assert(Kind == Integral && "Unexpected kind");
Integer.Type = T.getAsOpaquePtr();
}
/// \brief Retrieve the template argument as an expression.
Expr *getAsExpr() const {
- if (Kind != Expression)
- return 0;
-
+ assert(Kind == Expression && "Unexpected kind");
return reinterpret_cast<Expr *>(TypeOrValue);
}
@@ -436,7 +454,17 @@ public:
assert(Argument.getKind() == TemplateArgument::Declaration);
return LocInfo.getAsExpr();
}
-
+
+ Expr *getSourceNullPtrExpression() const {
+ assert(Argument.getKind() == TemplateArgument::NullPtr);
+ return LocInfo.getAsExpr();
+ }
+
+ Expr *getSourceIntegralExpression() const {
+ assert(Argument.getKind() == TemplateArgument::Integral);
+ return LocInfo.getAsExpr();
+ }
+
NestedNameSpecifierLoc getTemplateQualifierLoc() const {
assert(Argument.getKind() == TemplateArgument::Template ||
Argument.getKind() == TemplateArgument::TemplateExpansion);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Type.h b/contrib/llvm/tools/clang/include/clang/AST/Type.h
index 6564b66..6900a7d 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Type.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Type.h
@@ -20,6 +20,7 @@
#include "clang/Basic/Linkage.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/Visibility.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TemplateName.h"
#include "llvm/Support/type_traits.h"
@@ -160,6 +161,44 @@ public:
Qualifiers() : Mask(0) {}
+ /// \brief Returns the common set of qualifiers while removing them from
+ /// the given sets.
+ static Qualifiers removeCommonQualifiers(Qualifiers &L, Qualifiers &R) {
+ // If both are only CVR-qualified, bit operations are sufficient.
+ if (!(L.Mask & ~CVRMask) && !(R.Mask & ~CVRMask)) {
+ Qualifiers Q;
+ Q.Mask = L.Mask & R.Mask;
+ L.Mask &= ~Q.Mask;
+ R.Mask &= ~Q.Mask;
+ return Q;
+ }
+
+ Qualifiers Q;
+ unsigned CommonCRV = L.getCVRQualifiers() & R.getCVRQualifiers();
+ Q.addCVRQualifiers(CommonCRV);
+ L.removeCVRQualifiers(CommonCRV);
+ R.removeCVRQualifiers(CommonCRV);
+
+ if (L.getObjCGCAttr() == R.getObjCGCAttr()) {
+ Q.setObjCGCAttr(L.getObjCGCAttr());
+ L.removeObjCGCAttr();
+ R.removeObjCGCAttr();
+ }
+
+ if (L.getObjCLifetime() == R.getObjCLifetime()) {
+ Q.setObjCLifetime(L.getObjCLifetime());
+ L.removeObjCLifetime();
+ R.removeObjCLifetime();
+ }
+
+ if (L.getAddressSpace() == R.getAddressSpace()) {
+ Q.setAddressSpace(L.getAddressSpace());
+ L.removeAddressSpace();
+ R.removeAddressSpace();
+ }
+ return Q;
+ }
+
static Qualifiers fromFastMask(unsigned Mask) {
Qualifiers Qs;
Qs.addFastQualifiers(Mask);
@@ -333,6 +372,23 @@ public:
}
}
+ /// \brief Remove the qualifiers from the given set from this set.
+ void removeQualifiers(Qualifiers Q) {
+ // If the other set doesn't have any non-boolean qualifiers, just
+ // bit-and the inverse in.
+ if (!(Q.Mask & ~CVRMask))
+ Mask &= ~Q.Mask;
+ else {
+ Mask &= ~(Q.Mask & CVRMask);
+ if (getObjCGCAttr() == Q.getObjCGCAttr())
+ removeObjCGCAttr();
+ if (getObjCLifetime() == Q.getObjCLifetime())
+ removeObjCLifetime();
+ if (getAddressSpace() == Q.getAddressSpace())
+ removeAddressSpace();
+ }
+ }
+
/// \brief Add the qualifiers from the given set to this set, given that
/// they don't conflict.
void addConsistentQualifiers(Qualifiers qs) {
@@ -400,7 +456,7 @@ public:
}
Qualifiers &operator-=(Qualifiers R) {
- Mask = Mask & ~(R.Mask);
+ removeQualifiers(R);
return *this;
}
@@ -435,18 +491,6 @@ private:
static const uint32_t AddressSpaceShift = 8;
};
-/// CallingConv - Specifies the calling convention that a function uses.
-enum CallingConv {
- CC_Default,
- CC_C, // __attribute__((cdecl))
- CC_X86StdCall, // __attribute__((stdcall))
- CC_X86FastCall, // __attribute__((fastcall))
- CC_X86ThisCall, // __attribute__((thiscall))
- CC_X86Pascal, // __attribute__((pascal))
- CC_AAPCS, // __attribute__((pcs("aapcs")))
- CC_AAPCS_VFP // __attribute__((pcs("aapcs-vfp")))
-};
-
/// A std::pair-like structure for storing a qualified type split
/// into its local qualifiers and its locally-unqualified type.
struct SplitQualType {
@@ -1126,8 +1170,8 @@ public:
};
private:
- Type(const Type&); // DO NOT IMPLEMENT.
- void operator=(const Type&); // DO NOT IMPLEMENT.
+ Type(const Type &) LLVM_DELETED_FUNCTION;
+ void operator=(const Type &) LLVM_DELETED_FUNCTION;
/// Bitfields required by the Type class.
class TypeBitfields {
@@ -1225,7 +1269,7 @@ protected:
/// Extra information which affects how the function is called, like
/// regparm and the calling convention.
- unsigned ExtInfo : 8;
+ unsigned ExtInfo : 9;
/// TypeQuals - Used only by FunctionProtoType, put here to pack with the
/// other bitfields.
@@ -1512,6 +1556,7 @@ public:
bool isRecordType() const;
bool isClassType() const;
bool isStructureType() const;
+ bool isInterfaceType() const;
bool isStructureOrClassType() const;
bool isUnionType() const;
bool isComplexIntegerType() const; // GCC _Complex integer type.
@@ -1630,13 +1675,19 @@ public:
const ObjCObjectPointerType *getAsObjCQualifiedIdType() const;
const ObjCObjectPointerType *getAsObjCQualifiedClassType() const;
const ObjCObjectType *getAsObjCQualifiedInterfaceType() const;
- const CXXRecordDecl *getCXXRecordDeclForPointerType() const;
/// \brief Retrieves the CXXRecordDecl that this type refers to, either
/// because the type is a RecordType or because it is the injected-class-name
/// type of a class template or class template partial specialization.
CXXRecordDecl *getAsCXXRecordDecl() const;
+ /// If this is a pointer or reference to a RecordType, return the
+ /// CXXRecordDecl that that type refers to.
+ ///
+ /// If this is not a pointer or reference, or the type being pointed to does
+ /// not refer to a CXXRecordDecl, returns NULL.
+ const CXXRecordDecl *getPointeeCXXRecordDecl() const;
+
/// \brief Get the AutoType whose type will be deduced for a variable with
/// an initializer of this type. This looks through declarators like pointer
/// types, but not through decltype or typedefs.
@@ -1738,8 +1789,6 @@ public:
CanQualType getCanonicalTypeUnqualified() const; // in CanonicalType.h
LLVM_ATTRIBUTE_USED void dump() const;
- static bool classof(const Type *) { return true; }
-
friend class ASTReader;
friend class ASTWriter;
};
@@ -1748,6 +1797,11 @@ public:
/// until it reaches a TypedefType or a non-sugared type.
template <> const TypedefType *Type::getAs() const;
+/// \brief This will check for a TemplateSpecializationType by removing any
+/// existing sugar until it reaches a TemplateSpecializationType or a
+/// non-sugared type.
+template <> const TemplateSpecializationType *Type::getAs() const;
+
// We can do canonical leaf types faster, because we don't have to
// worry about preserving child type decoration.
#define TYPE(Class, Base)
@@ -1834,7 +1888,6 @@ public:
}
static bool classof(const Type *T) { return T->getTypeClass() == Builtin; }
- static bool classof(const BuiltinType *) { return true; }
};
/// ComplexType - C99 6.2.5p11 - Complex values. This supports the C99 complex
@@ -1865,7 +1918,6 @@ public:
}
static bool classof(const Type *T) { return T->getTypeClass() == Complex; }
- static bool classof(const ComplexType *) { return true; }
};
/// ParenType - Sugar for parentheses used when specifying types.
@@ -1897,7 +1949,6 @@ public:
}
static bool classof(const Type *T) { return T->getTypeClass() == Paren; }
- static bool classof(const ParenType *) { return true; }
};
/// PointerType - C99 6.7.5.1 - Pointer Declarators.
@@ -1929,7 +1980,6 @@ public:
}
static bool classof(const Type *T) { return T->getTypeClass() == Pointer; }
- static bool classof(const PointerType *) { return true; }
};
/// BlockPointerType - pointer to a block type.
@@ -1965,7 +2015,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == BlockPointer;
}
- static bool classof(const BlockPointerType *) { return true; }
};
/// ReferenceType - Base for LValueReferenceType and RValueReferenceType
@@ -2013,7 +2062,6 @@ public:
return T->getTypeClass() == LValueReference ||
T->getTypeClass() == RValueReference;
}
- static bool classof(const ReferenceType *) { return true; }
};
/// LValueReferenceType - C++ [dcl.ref] - Lvalue reference
@@ -2031,7 +2079,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == LValueReference;
}
- static bool classof(const LValueReferenceType *) { return true; }
};
/// RValueReferenceType - C++0x [dcl.ref] - Rvalue reference
@@ -2048,7 +2095,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == RValueReference;
}
- static bool classof(const RValueReferenceType *) { return true; }
};
/// MemberPointerType - C++ 8.3.3 - Pointers to members
@@ -2103,7 +2149,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == MemberPointer;
}
- static bool classof(const MemberPointerType *) { return true; }
};
/// ArrayType - C99 6.7.5.2 - Array Declarators.
@@ -2159,7 +2204,6 @@ public:
T->getTypeClass() == IncompleteArray ||
T->getTypeClass() == DependentSizedArray;
}
- static bool classof(const ArrayType *) { return true; }
};
/// ConstantArrayType - This class represents the canonical version of
@@ -2211,7 +2255,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == ConstantArray;
}
- static bool classof(const ConstantArrayType *) { return true; }
};
/// IncompleteArrayType - This class represents C arrays with an unspecified
@@ -2231,7 +2274,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == IncompleteArray;
}
- static bool classof(const IncompleteArrayType *) { return true; }
friend class StmtIteratorBase;
@@ -2294,7 +2336,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == VariableArray;
}
- static bool classof(const VariableArrayType *) { return true; }
friend class StmtIteratorBase;
@@ -2351,7 +2392,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == DependentSizedArray;
}
- static bool classof(const DependentSizedArrayType *) { return true; }
friend class StmtIteratorBase;
@@ -2397,7 +2437,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == DependentSizedExtVector;
}
- static bool classof(const DependentSizedExtVectorType *) { return true; }
void Profile(llvm::FoldingSetNodeID &ID) {
Profile(ID, Context, getElementType(), getSizeExpr());
@@ -2463,7 +2502,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == Vector || T->getTypeClass() == ExtVector;
}
- static bool classof(const VectorType *) { return true; }
};
/// ExtVectorType - Extended vector type. This type is created using
@@ -2529,7 +2567,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == ExtVector;
}
- static bool classof(const ExtVectorType *) { return true; }
};
/// FunctionType - C99 6.7.5.3 - Function Declarators. This is the common base
@@ -2561,19 +2598,19 @@ class FunctionType : public Type {
// * AST read and write
// * Codegen
class ExtInfo {
- // Feel free to rearrange or add bits, but if you go over 8,
+ // Feel free to rearrange or add bits, but if you go over 9,
// you'll need to adjust both the Bits field below and
// Type::FunctionTypeBitfields.
// | CC |noreturn|produces|regparm|
- // |0 .. 2| 3 | 4 | 5 .. 7|
+ // |0 .. 3| 4 | 5 | 6 .. 8|
//
// regparm is either 0 (no regparm attribute) or the regparm value+1.
- enum { CallConvMask = 0x7 };
- enum { NoReturnMask = 0x8 };
- enum { ProducesResultMask = 0x10 };
+ enum { CallConvMask = 0xF };
+ enum { NoReturnMask = 0x10 };
+ enum { ProducesResultMask = 0x20 };
enum { RegParmMask = ~(CallConvMask | NoReturnMask | ProducesResultMask),
- RegParmOffset = 5 }; // Assumed to be the last field
+ RegParmOffset = 6 }; // Assumed to be the last field
uint16_t Bits;
@@ -2692,7 +2729,6 @@ public:
return T->getTypeClass() == FunctionNoProto ||
T->getTypeClass() == FunctionProto;
}
- static bool classof(const FunctionType *) { return true; }
};
/// FunctionNoProtoType - Represents a K&R-style 'int foo()' function, which has
@@ -2724,7 +2760,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == FunctionNoProto;
}
- static bool classof(const FunctionNoProtoType *) { return true; }
};
/// FunctionProtoType - Represents a prototype with argument type info, e.g.
@@ -2972,14 +3007,13 @@ public:
// FIXME: Remove the string version.
void printExceptionSpecification(std::string &S,
- PrintingPolicy Policy) const;
+ const PrintingPolicy &Policy) const;
void printExceptionSpecification(raw_ostream &OS,
- PrintingPolicy Policy) const;
+ const PrintingPolicy &Policy) const;
static bool classof(const Type *T) {
return T->getTypeClass() == FunctionProto;
}
- static bool classof(const FunctionProtoType *) { return true; }
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx);
static void Profile(llvm::FoldingSetNodeID &ID, QualType Result,
@@ -3010,7 +3044,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == UnresolvedUsing;
}
- static bool classof(const UnresolvedUsingType *) { return true; }
void Profile(llvm::FoldingSetNodeID &ID) {
return Profile(ID, Decl);
@@ -3042,7 +3075,6 @@ public:
QualType desugar() const;
static bool classof(const Type *T) { return T->getTypeClass() == Typedef; }
- static bool classof(const TypedefType *) { return true; }
};
/// TypeOfExprType (GCC extension).
@@ -3062,7 +3094,6 @@ public:
bool isSugared() const;
static bool classof(const Type *T) { return T->getTypeClass() == TypeOfExpr; }
- static bool classof(const TypeOfExprType *) { return true; }
};
/// \brief Internal representation of canonical, dependent
@@ -3109,7 +3140,6 @@ public:
bool isSugared() const { return true; }
static bool classof(const Type *T) { return T->getTypeClass() == TypeOf; }
- static bool classof(const TypeOfType *) { return true; }
};
/// DecltypeType (C++0x)
@@ -3131,7 +3161,6 @@ public:
bool isSugared() const;
static bool classof(const Type *T) { return T->getTypeClass() == Decltype; }
- static bool classof(const DecltypeType *) { return true; }
};
/// \brief Internal representation of canonical, dependent
@@ -3184,7 +3213,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == UnaryTransform;
}
- static bool classof(const UnaryTransformType *) { return true; }
};
class TagType : public Type {
@@ -3207,7 +3235,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() >= TagFirst && T->getTypeClass() <= TagLast;
}
- static bool classof(const TagType *) { return true; }
};
/// RecordType - This is a helper class that allows the use of isa/cast/dyncast
@@ -3234,7 +3261,6 @@ public:
QualType desugar() const { return QualType(this, 0); }
static bool classof(const Type *T) { return T->getTypeClass() == Record; }
- static bool classof(const RecordType *) { return true; }
};
/// EnumType - This is a helper class that allows the use of isa/cast/dyncast
@@ -3253,7 +3279,6 @@ public:
QualType desugar() const { return QualType(this, 0); }
static bool classof(const Type *T) { return T->getTypeClass() == Enum; }
- static bool classof(const EnumType *) { return true; }
};
/// AttributedType - An attributed type is a type to which a type
@@ -3297,7 +3322,8 @@ public:
attr_fastcall,
attr_stdcall,
attr_thiscall,
- attr_pascal
+ attr_pascal,
+ attr_pnaclcall
};
private:
@@ -3341,7 +3367,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == Attributed;
}
- static bool classof(const AttributedType *T) { return true; }
};
class TemplateTypeParmType : public Type, public llvm::FoldingSetNode {
@@ -3415,7 +3440,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == TemplateTypeParm;
}
- static bool classof(const TemplateTypeParmType *T) { return true; }
};
/// \brief Represents the result of substituting a type for a template
@@ -3466,7 +3490,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == SubstTemplateTypeParm;
}
- static bool classof(const SubstTemplateTypeParmType *T) { return true; }
};
/// \brief Represents the result of substituting a set of types for a template
@@ -3519,7 +3542,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == SubstTemplateTypeParmPack;
}
- static bool classof(const SubstTemplateTypeParmPackType *T) { return true; }
};
/// \brief Represents a C++0x auto type.
@@ -3562,7 +3584,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == Auto;
}
- static bool classof(const AutoType *T) { return true; }
};
/// \brief Represents a type template specialization; the template
@@ -3726,7 +3747,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == TemplateSpecialization;
}
- static bool classof(const TemplateSpecializationType *T) { return true; }
};
/// \brief The injected class name of a C++ class template or class
@@ -3789,13 +3809,14 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == InjectedClassName;
}
- static bool classof(const InjectedClassNameType *T) { return true; }
};
/// \brief The kind of a tag type.
enum TagTypeKind {
/// \brief The "struct" keyword.
TTK_Struct,
+ /// \brief The "__interface" keyword.
+ TTK_Interface,
/// \brief The "union" keyword.
TTK_Union,
/// \brief The "class" keyword.
@@ -3809,6 +3830,8 @@ enum TagTypeKind {
enum ElaboratedTypeKeyword {
/// \brief The "struct" keyword introduces the elaborated-type-specifier.
ETK_Struct,
+ /// \brief The "__interface" keyword introduces the elaborated-type-specifier.
+ ETK_Interface,
/// \brief The "union" keyword introduces the elaborated-type-specifier.
ETK_Union,
/// \brief The "class" keyword introduces the elaborated-type-specifier.
@@ -3932,7 +3955,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == Elaborated;
}
- static bool classof(const ElaboratedType *T) { return true; }
};
/// \brief Represents a qualified type name for which the type name is
@@ -3996,7 +4018,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == DependentName;
}
- static bool classof(const DependentNameType *T) { return true; }
};
/// DependentTemplateSpecializationType - Represents a template
@@ -4067,9 +4088,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == DependentTemplateSpecialization;
}
- static bool classof(const DependentTemplateSpecializationType *T) {
- return true;
- }
};
/// \brief Represents a pack expansion of types.
@@ -4150,9 +4168,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == PackExpansion;
}
- static bool classof(const PackExpansionType *T) {
- return true;
- }
};
/// ObjCObjectType - Represents a class type in Objective C.
@@ -4263,7 +4278,6 @@ public:
return T->getTypeClass() == ObjCObject ||
T->getTypeClass() == ObjCInterface;
}
- static bool classof(const ObjCObjectType *) { return true; }
};
/// ObjCObjectTypeImpl - A class providing a concrete implementation
@@ -4327,7 +4341,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == ObjCInterface;
}
- static bool classof(const ObjCInterfaceType *) { return true; }
// Nonsense to "hide" certain members of ObjCObjectType within this
// class. People asking for protocols on an ObjCInterfaceType are
@@ -4477,7 +4490,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == ObjCObjectPointer;
}
- static bool classof(const ObjCObjectPointerType *) { return true; }
};
class AtomicType : public Type, public llvm::FoldingSetNode {
@@ -4508,7 +4520,6 @@ class AtomicType : public Type, public llvm::FoldingSetNode {
static bool classof(const Type *T) {
return T->getTypeClass() == Atomic;
}
- static bool classof(const AtomicType *) { return true; }
};
/// A qualifier set is used to build a set of qualifiers.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h b/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
index 11a878d..8a04bd8 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
@@ -159,8 +159,6 @@ public:
return !(LHS == RHS);
}
- static bool classof(const TypeLoc *TL) { return true; }
-
private:
static void initializeImpl(ASTContext &Context, TypeLoc TL,
SourceLocation Loc);
@@ -192,7 +190,6 @@ public:
static bool classof(const TypeLoc *TL) {
return !TL->getType().hasLocalQualifiers();
}
- static bool classof(const UnqualTypeLoc *TL) { return true; }
};
/// \brief Wrapper of type source information for a type with
@@ -237,7 +234,6 @@ public:
static bool classof(const TypeLoc *TL) {
return TL->getType().hasLocalQualifiers();
}
- static bool classof(const QualifiedTypeLoc *TL) { return true; }
};
inline UnqualTypeLoc TypeLoc::getUnqualifiedLoc() const {
@@ -250,11 +246,11 @@ inline UnqualTypeLoc TypeLoc::getUnqualifiedLoc() const {
/// to a particular Type subclass. It is accepted for a single
/// TypeLoc class to correspond to multiple Type classes.
///
-/// \param Base a class from which to derive
-/// \param Derived the class deriving from this one
-/// \param TypeClass the concrete Type subclass associated with this
+/// \tparam Base a class from which to derive
+/// \tparam Derived the class deriving from this one
+/// \tparam TypeClass the concrete Type subclass associated with this
/// location type
-/// \param LocalData the structure type of local location data for
+/// \tparam LocalData the structure type of local location data for
/// this type
///
/// sizeof(LocalData) needs to be a multiple of sizeof(void*) or
@@ -303,9 +299,6 @@ public:
static bool classof(const UnqualTypeLoc *TL) {
return Derived::classofType(TL->getTypePtr());
}
- static bool classof(const Derived *TL) {
- return true;
- }
TypeLoc getNextTypeLoc() const {
return getNextTypeLoc(asDerived()->getInnerType());
@@ -380,9 +373,6 @@ public:
static bool classof(const UnqualTypeLoc *TL) {
return Derived::classofType(TL->getTypePtr());
}
- static bool classof(const Derived *TL) {
- return true;
- }
const TypeClass *getTypePtr() const {
return cast<TypeClass>(Base::getTypePtr());
@@ -417,7 +407,6 @@ public:
}
static bool classof(const TypeLoc *TL);
- static bool classof(const TypeSpecTypeLoc *TL) { return true; }
};
@@ -866,6 +855,7 @@ public:
void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setNameLoc(Loc);
+ setNameEndLoc(Loc);
}
};
@@ -1060,6 +1050,8 @@ public:
struct FunctionLocInfo {
SourceLocation LocalRangeBegin;
+ SourceLocation LParenLoc;
+ SourceLocation RParenLoc;
SourceLocation LocalRangeEnd;
};
@@ -1083,6 +1075,24 @@ public:
getLocalData()->LocalRangeEnd = L;
}
+ SourceLocation getLParenLoc() const {
+ return this->getLocalData()->LParenLoc;
+ }
+ void setLParenLoc(SourceLocation Loc) {
+ this->getLocalData()->LParenLoc = Loc;
+ }
+
+ SourceLocation getRParenLoc() const {
+ return this->getLocalData()->RParenLoc;
+ }
+ void setRParenLoc(SourceLocation Loc) {
+ this->getLocalData()->RParenLoc = Loc;
+ }
+
+ SourceRange getParensRange() const {
+ return SourceRange(getLParenLoc(), getRParenLoc());
+ }
+
ArrayRef<ParmVarDecl *> getParams() const {
return ArrayRef<ParmVarDecl *>(getParmArray(), getNumArgs());
}
@@ -1110,6 +1120,8 @@ public:
void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setLocalRangeBegin(Loc);
+ setLParenLoc(Loc);
+ setRParenLoc(Loc);
setLocalRangeEnd(Loc);
for (unsigned i = 0, e = getNumArgs(); i != e; ++i)
setArg(i, NULL);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/UnresolvedSet.h b/contrib/llvm/tools/clang/include/clang/AST/UnresolvedSet.h
index 0918dc4..9f11ee5 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/UnresolvedSet.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/UnresolvedSet.h
@@ -94,7 +94,7 @@ class UnresolvedSetImpl {
private:
template <unsigned N> friend class UnresolvedSet;
UnresolvedSetImpl() {}
- UnresolvedSetImpl(const UnresolvedSetImpl &) {}
+ UnresolvedSetImpl(const UnresolvedSetImpl &) LLVM_DELETED_FUNCTION;
public:
// We don't currently support assignment through this iterator, so we might
diff --git a/contrib/llvm/tools/clang/include/clang/AST/VTableBuilder.h b/contrib/llvm/tools/clang/include/clang/AST/VTableBuilder.h
index 392dad9..a6aa40b 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/VTableBuilder.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/VTableBuilder.h
@@ -147,9 +147,10 @@ private:
assert((ComponentKind == CK_VCallOffset ||
ComponentKind == CK_VBaseOffset ||
ComponentKind == CK_OffsetToTop) && "Invalid component kind!");
- assert(Offset.getQuantity() <= ((1LL << 56) - 1) && "Offset is too big!");
+ assert(Offset.getQuantity() < (1LL << 56) && "Offset is too big!");
+ assert(Offset.getQuantity() >= -(1LL << 56) && "Offset is too small!");
- Value = ((Offset.getQuantity() << 3) | ComponentKind);
+ Value = (uint64_t(Offset.getQuantity()) << 3) | ComponentKind;
}
VTableComponent(Kind ComponentKind, uintptr_t Ptr) {
diff --git a/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchFinder.h b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchFinder.h
index dd237ee..30b4050 100644
--- a/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchFinder.h
+++ b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchFinder.h
@@ -85,7 +85,14 @@ public:
class MatchCallback {
public:
virtual ~MatchCallback();
+
+ /// \brief Called on every match by the \c MatchFinder.
virtual void run(const MatchResult &Result) = 0;
+
+ /// \brief Called at the start of each translation unit.
+ ///
+ /// Optionally override to do per translation unit tasks.
+ virtual void onStartOfTranslationUnit() {}
};
/// \brief Called when parsing is finished. Intended for testing only.
@@ -112,11 +119,24 @@ public:
MatchCallback *Action);
void addMatcher(const StatementMatcher &NodeMatch,
MatchCallback *Action);
+ void addMatcher(const NestedNameSpecifierMatcher &NodeMatch,
+ MatchCallback *Action);
+ void addMatcher(const NestedNameSpecifierLocMatcher &NodeMatch,
+ MatchCallback *Action);
+ void addMatcher(const TypeLocMatcher &NodeMatch,
+ MatchCallback *Action);
/// @}
/// \brief Creates a clang ASTConsumer that finds all matches.
clang::ASTConsumer *newASTConsumer();
+ /// \brief Finds all matches on the given \c Node.
+ ///
+ /// @{
+ void findAll(const Decl &Node, ASTContext &Context);
+ void findAll(const Stmt &Node, ASTContext &Context);
+ /// @}
+
/// \brief Registers a callback to notify the end of parsing.
///
/// The provided closure is called after parsing is done, before the AST is
@@ -125,11 +145,10 @@ public:
void registerTestCallbackAfterParsing(ParsingDoneTestCallback *ParsingDone);
private:
- /// \brief The MatchCallback*'s will be called every time the
- /// UntypedBaseMatcher matches on the AST.
- std::vector< std::pair<
- const internal::UntypedBaseMatcher*,
- MatchCallback*> > Triggers;
+ /// \brief For each \c DynTypedMatcher a \c MatchCallback that will be called
+ /// when it matches.
+ std::vector<std::pair<const internal::DynTypedMatcher*, MatchCallback*> >
+ MatcherCallbackPairs;
/// \brief Called when parsing is done.
ParsingDoneTestCallback *ParsingDone;
diff --git a/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchers.h b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchers.h
index 33ef3dc..a70dd5c 100644
--- a/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchers.h
+++ b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchers.h
@@ -14,7 +14,7 @@
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
-// record(hasName("MyClass"))
+// recordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
@@ -25,7 +25,7 @@
//
// For example, when we're interested in child classes of a certain class, we
// would write:
-// record(hasName("MyClass"), hasChild(id("child", record())))
+// recordDecl(hasName("MyClass"), hasChild(id("child", recordDecl())))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the id(...) calls to the nodes that were
@@ -57,52 +57,47 @@ namespace ast_matchers {
/// \brief Maps string IDs to AST nodes matched by parts of a matcher.
///
-/// The bound nodes are generated by adding id(...) matchers into the
-/// match expression around the matchers for the nodes we want to access later.
+/// The bound nodes are generated by calling \c bind("id") on the node matchers
+/// of the nodes we want to access later.
///
-/// The instances of BoundNodes are created by MatchFinder when the user's
+/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
- /// \brief Returns the AST node bound to 'ID'.
- /// Returns NULL if there was no node bound to 'ID' or if there is a node but
+ /// \brief Returns the AST node bound to \c ID.
+ ///
+ /// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
- /// FIXME: We'll need one of those for every base type.
+ template <typename T>
+ const T *getNodeAs(StringRef ID) const {
+ return MyBoundNodes.getNodeAs<T>(ID);
+ }
+
+ /// \brief Deprecated. Please use \c getNodeAs instead.
/// @{
template <typename T>
const T *getDeclAs(StringRef ID) const {
- return getNodeAs<T>(DeclBindings, ID);
+ return getNodeAs<T>(ID);
}
template <typename T>
const T *getStmtAs(StringRef ID) const {
- return getNodeAs<T>(StmtBindings, ID);
+ return getNodeAs<T>(ID);
}
/// @}
private:
/// \brief Create BoundNodes from a pre-filled map of bindings.
- BoundNodes(const std::map<std::string, const Decl*> &DeclBindings,
- const std::map<std::string, const Stmt*> &StmtBindings)
- : DeclBindings(DeclBindings), StmtBindings(StmtBindings) {}
-
- template <typename T, typename MapT>
- const T *getNodeAs(const MapT &Bindings, StringRef ID) const {
- typename MapT::const_iterator It = Bindings.find(ID);
- if (It == Bindings.end()) {
- return NULL;
- }
- return llvm::dyn_cast<T>(It->second);
- }
+ BoundNodes(internal::BoundNodesMap &MyBoundNodes)
+ : MyBoundNodes(MyBoundNodes) {}
- std::map<std::string, const Decl*> DeclBindings;
- std::map<std::string, const Stmt*> StmtBindings;
+ internal::BoundNodesMap MyBoundNodes;
friend class internal::BoundNodesTree;
};
-/// \brief If the provided matcher matches a node, binds the node to 'ID'.
+/// \brief If the provided matcher matches a node, binds the node to \c ID.
///
-/// FIXME: Add example for accessing it.
+/// FIXME: Do we want to support this now that we have bind()?
template <typename T>
internal::Matcher<T> id(const std::string &ID,
const internal::BindableMatcher<T> &InnerMatcher) {
@@ -113,20 +108,27 @@ internal::Matcher<T> id(const std::string &ID,
/// hierarchy.
/// @{
typedef internal::Matcher<Decl> DeclarationMatcher;
-typedef internal::Matcher<QualType> TypeMatcher;
typedef internal::Matcher<Stmt> StatementMatcher;
+typedef internal::Matcher<QualType> TypeMatcher;
+typedef internal::Matcher<TypeLoc> TypeLocMatcher;
+typedef internal::Matcher<NestedNameSpecifier> NestedNameSpecifierMatcher;
+typedef internal::Matcher<NestedNameSpecifierLoc> NestedNameSpecifierLocMatcher;
/// @}
/// \brief Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
-/// to a internal::Matcher<> type such as TypeMatcher.
+/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
-/// Example: DeclarationMatcher(anything()) matches all declarations, e.g.,
+/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
+/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
+/// \endcode
+///
+/// Usable as: Any Matcher
inline internal::PolymorphicMatcherWithParam0<internal::TrueMatcher> anything() {
return internal::PolymorphicMatcherWithParam0<internal::TrueMatcher>();
}
@@ -144,53 +146,69 @@ const internal::VariadicDynCastAllOfMatcher<Decl, Decl> decl;
/// \brief Matches a declaration of anything that could have a name.
///
-/// Example matches X, S, the anonymous union type, i, and U;
+/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
+/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
-const internal::VariadicDynCastAllOfMatcher<
- Decl,
- NamedDecl> nameableDeclaration;
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// \brief Matches C++ class declarations.
///
-/// Example matches X, Z
+/// Example matches \c X, \c Z
+/// \code
/// class X;
/// template<class T> class Z {};
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
Decl,
- CXXRecordDecl> record;
+ CXXRecordDecl> recordDecl;
+
+/// \brief Matches C++ class template declarations.
+///
+/// Example matches \c Z
+/// \code
+/// template<class T> class Z {};
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<
+ Decl,
+ ClassTemplateDecl> classTemplateDecl;
/// \brief Matches C++ class template specializations.
///
/// Given
+/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
-/// classTemplateSpecialization()
+/// \endcode
+/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
const internal::VariadicDynCastAllOfMatcher<
Decl,
- ClassTemplateSpecializationDecl> classTemplateSpecialization;
+ ClassTemplateSpecializationDecl> classTemplateSpecializationDecl;
/// \brief Matches classTemplateSpecializations that have at least one
-/// TemplateArgument matching the given Matcher.
+/// TemplateArgument matching the given InnerMatcher.
///
/// Given
+/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
-/// classTemplateSpecialization(hasAnyTemplateArgument(
+/// \endcode
+/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasAnyTemplateArgument,
- internal::Matcher<TemplateArgument>, Matcher) {
+ internal::Matcher<TemplateArgument>, InnerMatcher) {
const TemplateArgumentList &List = Node.getTemplateArgs();
for (unsigned i = 0; i < List.size(); ++i) {
- if (Matcher.matches(List.get(i), Finder, Builder))
+ if (InnerMatcher.matches(List.get(i), Finder, Builder))
return true;
}
return false;
@@ -201,19 +219,25 @@ AST_MATCHER_P(ClassTemplateSpecializationDecl, hasAnyTemplateArgument,
///
/// Parentheses and explicit casts are not discarded.
/// Given
+/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
+/// \endcode
/// The matchers
-/// variable(hasInitializer(ignoringImpCasts(integerLiteral())))
-/// variable(hasInitializer(ignoringImpCasts(declarationReference())))
+/// \code
+/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
+/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
+/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
-/// while
-/// variable(hasInitializer(integerLiteral()))
-/// variable(hasInitializer(declarationReference()))
+/// While
+/// \code
+/// varDecl(hasInitializer(integerLiteral()))
+/// varDecl(hasInitializer(declRefExpr()))
+/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
@@ -225,15 +249,17 @@ AST_MATCHER_P(Expr, ignoringImpCasts,
///
/// Implicit and non-C Style casts are also discarded.
/// Given
+/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
+/// \endcode
/// The matcher
-/// variable(hasInitializer(ignoringParenCasts(integerLiteral())))
+/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
-/// variable(hasInitializer(integerLiteral()))
+/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
@@ -244,21 +270,21 @@ AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
///
/// Explicit casts are not discarded.
/// Given
+/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
+/// \endcode
/// The matchers
-/// variable(hasInitializer(ignoringParenImpCasts(
-/// integerLiteral())))
-/// variable(hasInitializer(ignoringParenImpCasts(
-/// declarationReference())))
+/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
+/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
-/// variable(hasInitializer(integerLiteral()))
-/// variable(hasInitializer(declarationReference()))
+/// varDecl(hasInitializer(integerLiteral()))
+/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
@@ -266,101 +292,119 @@ AST_MATCHER_P(Expr, ignoringParenImpCasts,
}
/// \brief Matches classTemplateSpecializations where the n'th TemplateArgument
-/// matches the given Matcher.
+/// matches the given InnerMatcher.
///
/// Given
+/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
-/// classTemplateSpecialization(hasTemplateArgument(
+/// \endcode
+/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
AST_MATCHER_P2(ClassTemplateSpecializationDecl, hasTemplateArgument,
- unsigned, N, internal::Matcher<TemplateArgument>, Matcher) {
+ unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
const TemplateArgumentList &List = Node.getTemplateArgs();
if (List.size() <= N)
return false;
- return Matcher.matches(List.get(N), Finder, Builder);
+ return InnerMatcher.matches(List.get(N), Finder, Builder);
}
/// \brief Matches a TemplateArgument that refers to a certain type.
///
/// Given
+/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
-/// classTemplateSpecialization(hasAnyTemplateArgument(
+/// \endcode
+/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
- internal::Matcher<QualType>, Matcher) {
+ internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
- return Matcher.matches(Node.getAsType(), Finder, Builder);
+ return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// \brief Matches a TemplateArgument that refers to a certain declaration.
///
/// Given
+/// \code
/// template<typename T> struct A {};
/// struct B { B* next; };
/// A<&B::next> a;
-/// classTemplateSpecialization(hasAnyTemplateArgument(
-/// refersToDeclaration(field(hasName("next"))))
-/// matches the specialization \c A<&B::next> with \c field(...) matching
+/// \endcode
+/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
+/// refersToDeclaration(fieldDecl(hasName("next"))))
+/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
- internal::Matcher<Decl>, Matcher) {
- if (const Decl *Declaration = Node.getAsDecl())
- return Matcher.matches(*Declaration, Finder, Builder);
+ internal::Matcher<Decl>, InnerMatcher) {
+ if (Node.getKind() == TemplateArgument::Declaration)
+ return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// \brief Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
+/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
Decl,
- CXXConstructorDecl> constructor;
+ CXXConstructorDecl> constructorDecl;
/// \brief Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
+/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
-const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> destructor;
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<
+ Decl,
+ CXXDestructorDecl> destructorDecl;
/// \brief Matches enum declarations.
///
/// Example matches X
+/// \code
/// enum X {
/// A, B, C
/// };
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// \brief Matches enum constants.
///
/// Example matches A, B, C
+/// \code
/// enum X {
/// A, B, C
/// };
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
Decl,
- EnumConstantDecl> enumConstant;
+ EnumConstantDecl> enumConstantDecl;
/// \brief Matches method declarations.
///
/// Example matches y
+/// \code
/// class X { void y() };
-const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> method;
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> methodDecl;
/// \brief Matches variable declarations.
///
@@ -368,76 +412,111 @@ const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> method;
/// "field" declarations in Clang parlance.
///
/// Example matches a
+/// \code
/// int a;
-const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> variable;
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// \brief Matches field declarations.
///
/// Given
+/// \code
/// class X { int m; };
-/// field()
+/// \endcode
+/// fieldDecl()
/// matches 'm'.
-const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> field;
+const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// \brief Matches function declarations.
///
/// Example matches f
+/// \code
/// void f();
-const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> function;
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl;
+/// \brief Matches C++ function template declarations.
+///
+/// Example matches f
+/// \code
+/// template<class T> void f(T t) {}
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<
+ Decl,
+ FunctionTemplateDecl> functionTemplateDecl;
/// \brief Matches statements.
///
/// Given
+/// \code
/// { ++a; }
-/// statement()
+/// \endcode
+/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
-const internal::VariadicDynCastAllOfMatcher<Stmt, Stmt> statement;
+const internal::VariadicDynCastAllOfMatcher<Stmt, Stmt> stmt;
/// \brief Matches declaration statements.
///
/// Given
+/// \code
/// int a;
-/// declarationStatement()
+/// \endcode
+/// declStmt()
/// matches 'int a'.
const internal::VariadicDynCastAllOfMatcher<
Stmt,
- DeclStmt> declarationStatement;
+ DeclStmt> declStmt;
/// \brief Matches member expressions.
///
/// Given
+/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
-/// memberExpression()
+/// \endcode
+/// memberExpr()
/// matches this->x, x, y.x, a, this->b
-const internal::VariadicDynCastAllOfMatcher<
- Stmt,
- MemberExpr> memberExpression;
+const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// \brief Matches call expressions.
///
/// Example matches x.y() and y()
+/// \code
/// X x;
/// x.y();
/// y();
-const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> call;
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
+
+/// \brief Matches lambda expressions.
+///
+/// Example matches [&](){return 5;}
+/// \code
+/// [&](){return 5;}
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// \brief Matches member call expressions.
///
/// Example matches x.y()
+/// \code
/// X x;
/// x.y();
-const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> memberCall;
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ CXXMemberCallExpr> memberCallExpr;
/// \brief Matches init list expressions.
///
/// Given
+/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
+/// \endcode
/// initList()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr;
@@ -445,8 +524,10 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr;
/// \brief Matches using declarations.
///
/// Given
+/// \code
/// namespace X { int x; }
/// using X::x;
+/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
@@ -454,49 +535,89 @@ const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// \brief Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
-/// (matcher = constructorCall())
+/// (matcher = constructExpr())
+/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
Stmt,
- CXXConstructExpr> constructorCall;
+ CXXConstructExpr> constructExpr;
+
+/// \brief Matches implicit and explicit this expressions.
+///
+/// Example matches the implicit this expression in "return i".
+/// (matcher = thisExpr())
+/// \code
+/// struct foo {
+/// int i;
+/// int f() { return i; }
+/// };
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> thisExpr;
/// \brief Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
-/// (matcher = bindTemporaryExpression())
+/// (matcher = bindTemporaryExpr())
+/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
Stmt,
- CXXBindTemporaryExpr> bindTemporaryExpression;
+ CXXBindTemporaryExpr> bindTemporaryExpr;
+
+/// \brief Matches nodes where temporaries are materialized.
+///
+/// Example: Given
+/// \code
+/// struct T {void func()};
+/// T f();
+/// void g(T);
+/// \endcode
+/// materializeTemporaryExpr() matches 'f()' in these statements
+/// \code
+/// T u(f());
+/// g(f());
+/// \endcode
+/// but does not match
+/// \code
+/// f();
+/// f().func();
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ MaterializeTemporaryExpr> materializeTemporaryExpr;
/// \brief Matches new expressions.
///
/// Given
+/// \code
/// new X;
-/// newExpression()
+/// \endcode
+/// newExpr()
/// matches 'new X'.
-const internal::VariadicDynCastAllOfMatcher<
- Stmt,
- CXXNewExpr> newExpression;
+const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> newExpr;
/// \brief Matches delete expressions.
///
/// Given
+/// \code
/// delete X;
-/// deleteExpression()
+/// \endcode
+/// deleteExpr()
/// matches 'delete X'.
-const internal::VariadicDynCastAllOfMatcher<
- Stmt,
- CXXDeleteExpr> deleteExpression;
+const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> deleteExpr;
/// \brief Matches array subscript expressions.
///
/// Given
+/// \code
/// int i = a[1];
+/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
const internal::VariadicDynCastAllOfMatcher<
@@ -507,12 +628,14 @@ const internal::VariadicDynCastAllOfMatcher<
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
-/// (matcher = defaultArgument())
+/// (matcher = defaultArgExpr())
+/// \code
/// void f(int x, int y = 0);
/// f(42);
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
Stmt,
- CXXDefaultArgExpr> defaultArgument;
+ CXXDefaultArgExpr> defaultArgExpr;
/// \brief Matches overloaded operator calls.
///
@@ -522,50 +645,67 @@ const internal::VariadicDynCastAllOfMatcher<
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
-/// (matcher = overloadedOperatorCall())
+/// (matcher = operatorCallExpr())
+/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
Stmt,
- CXXOperatorCallExpr> overloadedOperatorCall;
+ CXXOperatorCallExpr> operatorCallExpr;
/// \brief Matches expressions.
///
/// Example matches x()
+/// \code
/// void f() { x(); }
-const internal::VariadicDynCastAllOfMatcher<
- Stmt,
- Expr> expression;
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// \brief Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
+/// \code
/// bool x;
/// if (x) {}
-const internal::VariadicDynCastAllOfMatcher<
- Stmt,
- DeclRefExpr> declarationReference;
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr;
/// \brief Matches if statements.
///
/// Example matches 'if (x) {}'
+/// \code
/// if (x) {}
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// \brief Matches for statements.
///
/// Example matches 'for (;;) {}'
+/// \code
/// for (;;) {}
-const internal::VariadicDynCastAllOfMatcher<
- Stmt, ForStmt> forStmt;
+/// int i[] = {1, 2, 3}; for (auto a : i);
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
+
+/// \brief Matches range-based for statements.
+///
+/// forRangeStmt() matches 'for (auto a : i)'
+/// \code
+/// int i[] = {1, 2, 3}; for (auto a : i);
+/// for(int j = 0; j < 5; ++j);
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> forRangeStmt;
/// \brief Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
+/// \code
/// for (x; x < N; ++x) { }
+/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
@@ -576,9 +716,11 @@ AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
/// \brief Matches the initialization statement of a for loop.
///
/// Example:
-/// forStmt(hasLoopInit(declarationStatement()))
+/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
+/// \code
/// for (int x = 0; x < N; ++x) { }
+/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
@@ -588,53 +730,167 @@ AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
/// \brief Matches while statements.
///
/// Given
+/// \code
/// while (true) {}
+/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
-const internal::VariadicDynCastAllOfMatcher<
- Stmt,
- WhileStmt> whileStmt;
+const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// \brief Matches do statements.
///
/// Given
+/// \code
/// do {} while (true);
+/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
+/// \brief Matches break statements.
+///
+/// Given
+/// \code
+/// while (true) { break; }
+/// \endcode
+/// breakStmt()
+/// matches 'break'
+const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
+
+/// \brief Matches continue statements.
+///
+/// Given
+/// \code
+/// while (true) { continue; }
+/// \endcode
+/// continueStmt()
+/// matches 'continue'
+const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt;
+
+/// \brief Matches return statements.
+///
+/// Given
+/// \code
+/// return 1;
+/// \endcode
+/// returnStmt()
+/// matches 'return 1'
+const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
+
+/// \brief Matches goto statements.
+///
+/// Given
+/// \code
+/// goto FOO;
+/// FOO: bar();
+/// \endcode
+/// gotoStmt()
+/// matches 'goto FOO'
+const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
+
+/// \brief Matches label statements.
+///
+/// Given
+/// \code
+/// goto FOO;
+/// FOO: bar();
+/// \endcode
+/// labelStmt()
+/// matches 'FOO:'
+const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
+
+/// \brief Matches switch statements.
+///
+/// Given
+/// \code
+/// switch(a) { case 42: break; default: break; }
+/// \endcode
+/// switchStmt()
+/// matches 'switch(a)'.
+const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
+
/// \brief Matches case and default statements inside switch statements.
///
/// Given
+/// \code
/// switch(a) { case 42: break; default: break; }
+/// \endcode
/// switchCase()
/// matches 'case 42: break;' and 'default: break;'.
-const internal::VariadicDynCastAllOfMatcher<
- Stmt,
- SwitchCase> switchCase;
+const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// \brief Matches compound statements.
///
/// Example matches '{}' and '{{}}'in 'for (;;) {{}}'
+/// \code
/// for (;;) {{}}
-const internal::VariadicDynCastAllOfMatcher<
- Stmt,
- CompoundStmt> compoundStatement;
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt;
+
+/// \brief Matches catch statements.
+///
+/// \code
+/// try {} catch(int i) {}
+/// \endcode
+/// catchStmt()
+/// matches 'catch(int i)'
+const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> catchStmt;
+
+/// \brief Matches try statements.
+///
+/// \code
+/// try {} catch(int i) {}
+/// \endcode
+/// tryStmt()
+/// matches 'try {}'
+const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> tryStmt;
+
+/// \brief Matches throw expressions.
+///
+/// \code
+/// try { throw 5; } catch(int i) {}
+/// \endcode
+/// throwExpr()
+/// matches 'throw 5'
+const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> throwExpr;
+
+/// \brief Matches null statements.
+///
+/// \code
+/// foo();;
+/// \endcode
+/// nullStmt()
+/// matches the second ';'
+const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
+
+/// \brief Matches asm statements.
+///
+/// \code
+/// int i = 100;
+/// __asm("mov al, 2");
+/// \endcode
+/// asmStmt()
+/// matches '__asm("mov al, 2")'
+const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// \brief Matches bool literals.
///
/// Example matches true
+/// \code
/// true
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
- Expr,
+ Stmt,
CXXBoolLiteralExpr> boolLiteral;
/// \brief Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
+/// \code
/// char *s = "abcd"; wchar_t *ws = L"abcd"
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
- Expr,
+ Stmt,
StringLiteral> stringLiteral;
/// \brief Matches character literals (also matches wchar_t).
@@ -643,9 +899,11 @@ const internal::VariadicDynCastAllOfMatcher<
/// though.
///
/// Example matches 'a', L'a'
+/// \code
/// char ch = 'a'; wchar_t chw = L'a';
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
- Expr,
+ Stmt,
CharacterLiteral> characterLiteral;
/// \brief Matches integer literals of all sizes / encodings.
@@ -654,13 +912,27 @@ const internal::VariadicDynCastAllOfMatcher<
///
/// Example matches 1, 1L, 0x1, 1U
const internal::VariadicDynCastAllOfMatcher<
- Expr,
+ Stmt,
IntegerLiteral> integerLiteral;
+/// \brief Matches user defined literal operator call.
+///
+/// Example match: "foo"_suffix
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ UserDefinedLiteral> userDefinedLiteral;
+
+/// \brief Matches nullptr literal.
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ CXXNullPtrLiteralExpr> nullPtrLiteralExpr;
+
/// \brief Matches binary operator expressions.
///
/// Example matches a || b
+/// \code
/// !(a || b)
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
Stmt,
BinaryOperator> binaryOperator;
@@ -668,7 +940,9 @@ const internal::VariadicDynCastAllOfMatcher<
/// \brief Matches unary operator expressions.
///
/// Example matches !a
+/// \code
/// !a || b
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
Stmt,
UnaryOperator> unaryOperator;
@@ -676,7 +950,9 @@ const internal::VariadicDynCastAllOfMatcher<
/// \brief Matches conditional operator expressions.
///
/// Example matches a ? b : c
+/// \code
/// (a ? b : c) + 42
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
Stmt,
ConditionalOperator> conditionalOperator;
@@ -688,10 +964,12 @@ const internal::VariadicDynCastAllOfMatcher<
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
+/// \code
/// void* p = reinterpret_cast<char*>(&p);
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
- Expr,
- CXXReinterpretCastExpr> reinterpretCast;
+ Stmt,
+ CXXReinterpretCastExpr> reinterpretCastExpr;
/// \brief Matches a C++ static_cast expression.
///
@@ -699,38 +977,54 @@ const internal::VariadicDynCastAllOfMatcher<
/// \see reinterpretCast
///
/// Example:
-/// staticCast()
+/// staticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
+/// \code
/// long eight(static_cast<long>(8));
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
- Expr,
- CXXStaticCastExpr> staticCast;
+ Stmt,
+ CXXStaticCastExpr> staticCastExpr;
/// \brief Matches a dynamic_cast expression.
///
/// Example:
-/// dynamicCast()
+/// dynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
+/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
- Expr,
- CXXDynamicCastExpr> dynamicCast;
+ Stmt,
+ CXXDynamicCastExpr> dynamicCastExpr;
/// \brief Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
+/// \code
/// int n = 42;
-/// const int& r(n);
+/// const int &r(n);
/// int* p = const_cast<int*>(&r);
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ CXXConstCastExpr> constCastExpr;
+
+/// \brief Matches a C-style cast expression.
+///
+/// Example: Matches (int*) 2.2f in
+/// \code
+/// int i = (int) 2.2f;
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
- Expr,
- CXXConstCastExpr> constCast;
+ Stmt,
+ CStyleCastExpr> cStyleCastExpr;
/// \brief Matches explicit cast expressions.
///
@@ -746,98 +1040,127 @@ const internal::VariadicDynCastAllOfMatcher<
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
+/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
+/// \endcode
/// but does not match the implicit conversion in
+/// \code
/// long ell = 42;
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
- Expr,
- ExplicitCastExpr> explicitCast;
+ Stmt,
+ ExplicitCastExpr> explicitCastExpr;
/// \brief Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
const internal::VariadicDynCastAllOfMatcher<
- Expr,
- ImplicitCastExpr> implicitCast;
+ Stmt,
+ ImplicitCastExpr> implicitCastExpr;
/// \brief Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
+/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
+/// \endcode
/// but does not match
+/// \code
/// int i = (0);
/// int k = 0;
-const internal::VariadicDynCastAllOfMatcher<
- Expr,
- CastExpr> castExpr;
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// \brief Matches functional cast expressions
///
/// Example: Matches Foo(bar);
+/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
+/// \endcode
const internal::VariadicDynCastAllOfMatcher<
- Expr,
- CXXFunctionalCastExpr> functionalCast;
+ Stmt,
+ CXXFunctionalCastExpr> functionalCastExpr;
+
+/// \brief Matches \c QualTypes in the clang AST.
+const internal::VariadicAllOfMatcher<QualType> qualType;
+
+/// \brief Matches \c Types in the clang AST.
+const internal::VariadicDynCastAllOfMatcher<Type, Type> type;
+
+/// \brief Matches \c TypeLocs in the clang AST.
+const internal::VariadicDynCastAllOfMatcher<TypeLoc, TypeLoc> typeLoc;
/// \brief Various overloads for the anyOf matcher.
/// @{
-template<typename C1, typename C2>
-internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C1, C2>
-anyOf(const C1 &P1, const C2 &P2) {
+
+/// \brief Matches if any of the given matchers matches.
+///
+/// Usable as: Any Matcher
+template<typename M1, typename M2>
+internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, M1, M2>
+anyOf(const M1 &P1, const M2 &P2) {
return internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher,
- C1, C2 >(P1, P2);
+ M1, M2 >(P1, P2);
}
-template<typename C1, typename C2, typename C3>
-internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C1,
- internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C2, C3> >
-anyOf(const C1 &P1, const C2 &P2, const C3 &P3) {
+template<typename M1, typename M2, typename M3>
+internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, M1,
+ internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, M2, M3> >
+anyOf(const M1 &P1, const M2 &P2, const M3 &P3) {
return anyOf(P1, anyOf(P2, P3));
}
-template<typename C1, typename C2, typename C3, typename C4>
-internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C1,
- internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C2,
+template<typename M1, typename M2, typename M3, typename M4>
+internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, M1,
+ internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, M2,
internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher,
- C3, C4> > >
-anyOf(const C1 &P1, const C2 &P2, const C3 &P3, const C4 &P4) {
+ M3, M4> > >
+anyOf(const M1 &P1, const M2 &P2, const M3 &P3, const M4 &P4) {
return anyOf(P1, anyOf(P2, anyOf(P3, P4)));
}
-template<typename C1, typename C2, typename C3, typename C4, typename C5>
-internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C1,
- internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C2,
- internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C3,
+template<typename M1, typename M2, typename M3, typename M4, typename M5>
+internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, M1,
+ internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, M2,
+ internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, M3,
internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher,
- C4, C5> > > >
-anyOf(const C1& P1, const C2& P2, const C3& P3, const C4& P4, const C5& P5) {
+ M4, M5> > > >
+anyOf(const M1 &P1, const M2 &P2, const M3 &P3, const M4 &P4, const M5 &P5) {
return anyOf(P1, anyOf(P2, anyOf(P3, anyOf(P4, P5))));
}
+
/// @}
/// \brief Various overloads for the allOf matcher.
/// @{
-template<typename C1, typename C2>
-internal::PolymorphicMatcherWithParam2<internal::AllOfMatcher, C1, C2>
-allOf(const C1 &P1, const C2 &P2) {
+
+/// \brief Matches if all given matchers match.
+///
+/// Usable as: Any Matcher
+template<typename M1, typename M2>
+internal::PolymorphicMatcherWithParam2<internal::AllOfMatcher, M1, M2>
+allOf(const M1 &P1, const M2 &P2) {
return internal::PolymorphicMatcherWithParam2<internal::AllOfMatcher,
- C1, C2>(P1, P2);
+ M1, M2>(P1, P2);
}
-template<typename C1, typename C2, typename C3>
-internal::PolymorphicMatcherWithParam2<internal::AllOfMatcher, C1,
- internal::PolymorphicMatcherWithParam2<internal::AllOfMatcher, C2, C3> >
-allOf(const C1& P1, const C2& P2, const C3& P3) {
+template<typename M1, typename M2, typename M3>
+internal::PolymorphicMatcherWithParam2<internal::AllOfMatcher, M1,
+ internal::PolymorphicMatcherWithParam2<internal::AllOfMatcher, M2, M3> >
+allOf(const M1 &P1, const M2 &P2, const M3 &P3) {
return allOf(P1, allOf(P2, P3));
}
+
/// @}
/// \brief Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
+/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
+/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
const internal::VariadicDynCastAllOfMatcher<
@@ -847,20 +1170,24 @@ const internal::VariadicDynCastAllOfMatcher<
/// \brief Matches unary expressions that have a specific type of argument.
///
/// Given
+/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
+/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
- internal::Matcher<QualType>, Matcher) {
+ internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
- return Matcher.matches(ArgumentType, Finder, Builder);
+ return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// \brief Matches unary expressions of a certain kind.
///
/// Given
+/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
+/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
@@ -870,17 +1197,17 @@ AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
/// \brief Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::Matcher<Stmt> alignOfExpr(
- const internal::Matcher<UnaryExprOrTypeTraitExpr> &Matcher) {
+ const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return internal::Matcher<Stmt>(unaryExprOrTypeTraitExpr(allOf(
- ofKind(UETT_AlignOf), Matcher)));
+ ofKind(UETT_AlignOf), InnerMatcher)));
}
/// \brief Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::Matcher<Stmt> sizeOfExpr(
- const internal::Matcher<UnaryExprOrTypeTraitExpr> &Matcher) {
+ const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return internal::Matcher<Stmt>(unaryExprOrTypeTraitExpr(allOf(
- ofKind(UETT_SizeOf), Matcher)));
+ ofKind(UETT_SizeOf), InnerMatcher)));
}
/// \brief Matches NamedDecl nodes that have the specified name.
@@ -890,10 +1217,14 @@ inline internal::Matcher<Stmt> sizeOfExpr(
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
+/// \code
/// class X;
+/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
-/// namespace a { namespace b { class X; } }
+/// \code
+/// namespace a { namespace b { class X; } }
+/// \endcode
AST_MATCHER_P(NamedDecl, hasName, std::string, Name) {
assert(!Name.empty());
const std::string FullNameString = "::" + Node.getQualifiedNameAsString();
@@ -914,10 +1245,14 @@ AST_MATCHER_P(NamedDecl, hasName, std::string, Name) {
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
+/// \code
/// class X;
+/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
-/// namespace foo { namespace bar { class X; } }
+/// \code
+/// namespace foo { namespace bar { class X; } }
+/// \endcode
AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) {
assert(!RegExp.empty());
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
@@ -931,10 +1266,12 @@ AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) {
/// "operator" prefix, such as "<<", for OverloadedOperatorCall's.
///
/// Example matches a << b
-/// (matcher == overloadedOperatorCall(hasOverloadedOperatorName("<<")))
+/// (matcher == operatorCallExpr(hasOverloadedOperatorName("<<")))
+/// \code
/// a << b;
/// c && d; // assuming both operator<<
/// // and operator&& are overloaded somewhere.
+/// \endcode
AST_MATCHER_P(CXXOperatorCallExpr,
hasOverloadedOperatorName, std::string, Name) {
return getOperatorSpelling(Node.getOperator()) == Name;
@@ -943,20 +1280,24 @@ AST_MATCHER_P(CXXOperatorCallExpr,
/// \brief Matches C++ classes that are directly or indirectly derived from
/// a class matching \c Base.
///
-/// Note that a class is considered to be also derived from itself.
+/// Note that a class is not considered to be derived from itself.
///
-/// Example matches X, Y, Z, C (Base == hasName("X"))
-/// class X; // A class is considered to be derived from itself
+/// Example matches Y, Z, C (Base == hasName("X"))
+/// \code
+/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
+/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
+/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
+/// \endcode
AST_MATCHER_P(CXXRecordDecl, isDerivedFrom,
internal::Matcher<NamedDecl>, Base) {
return Finder->classIsDerivedFrom(&Node, Base, Builder);
@@ -968,15 +1309,34 @@ inline internal::Matcher<CXXRecordDecl> isDerivedFrom(StringRef BaseName) {
return isDerivedFrom(hasName(BaseName));
}
+/// \brief Similar to \c isDerivedFrom(), but also matches classes that directly
+/// match \c Base.
+inline internal::Matcher<CXXRecordDecl> isSameOrDerivedFrom(
+ internal::Matcher<NamedDecl> Base) {
+ return anyOf(Base, isDerivedFrom(Base));
+}
+
+/// \brief Overloaded method as shortcut for
+/// \c isSameOrDerivedFrom(hasName(...)).
+inline internal::Matcher<CXXRecordDecl> isSameOrDerivedFrom(
+ StringRef BaseName) {
+ assert(!BaseName.empty());
+ return isSameOrDerivedFrom(hasName(BaseName));
+}
+
/// \brief Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
-/// Example matches X, Y (matcher = record(has(record(hasName("X")))
+/// Example matches X, Y (matcher = recordDecl(has(recordDecl(hasName("X")))
+/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
+/// \endcode
///
/// ChildT must be an AST base type.
+///
+/// Usable as: Any Matcher
template <typename ChildT>
internal::ArgumentAdaptingMatcher<internal::HasMatcher, ChildT> has(
const internal::Matcher<ChildT> &ChildMatcher) {
@@ -988,12 +1348,16 @@ internal::ArgumentAdaptingMatcher<internal::HasMatcher, ChildT> has(
/// provided matcher.
///
/// Example matches X, Y, Z
-/// (matcher = record(hasDescendant(record(hasName("X")))))
+/// (matcher = recordDecl(hasDescendant(recordDecl(hasName("X")))))
+/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
+/// \endcode
///
/// DescendantT must be an AST base type.
+///
+/// Usable as: Any Matcher
template <typename DescendantT>
internal::ArgumentAdaptingMatcher<internal::HasDescendantMatcher, DescendantT>
hasDescendant(const internal::Matcher<DescendantT> &DescendantMatcher) {
@@ -1002,22 +1366,25 @@ hasDescendant(const internal::Matcher<DescendantT> &DescendantMatcher) {
DescendantT>(DescendantMatcher);
}
-
/// \brief Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
-/// Example matches X, Y (matcher = record(forEach(record(hasName("X")))
+/// Example matches X, Y (matcher = recordDecl(forEach(recordDecl(hasName("X")))
+/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
+/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
+///
+/// Usable as: Any Matcher
template <typename ChildT>
internal::ArgumentAdaptingMatcher<internal::ForEachMatcher, ChildT> forEach(
- const internal::Matcher<ChildT>& ChildMatcher) {
+ const internal::Matcher<ChildT> &ChildMatcher) {
return internal::ArgumentAdaptingMatcher<
internal::ForEachMatcher,
ChildT>(ChildMatcher);
@@ -1027,10 +1394,12 @@ internal::ArgumentAdaptingMatcher<internal::ForEachMatcher, ChildT> forEach(
/// provided matcher.
///
/// Example matches X, A, B, C
-/// (matcher = record(forEachDescendant(record(hasName("X")))))
+/// (matcher = recordDecl(forEachDescendant(recordDecl(hasName("X")))))
+/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class A { class X {}; };
/// class B { class C { class X {}; }; };
+/// \endcode
///
/// DescendantT must be an AST base type.
///
@@ -1038,25 +1407,72 @@ internal::ArgumentAdaptingMatcher<internal::ForEachMatcher, ChildT> forEach(
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
-/// record(forEachDescendant(record(forEachDescendant(record()))))
+/// recordDecl(forEachDescendant(recordDecl(forEachDescendant(recordDecl()))))
/// will match 10 times (plus injected class name matches) on:
+/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
+/// \endcode
+///
+/// Usable as: Any Matcher
template <typename DescendantT>
-internal::ArgumentAdaptingMatcher<internal::ForEachDescendantMatcher, DescendantT>
+internal::ArgumentAdaptingMatcher<internal::ForEachDescendantMatcher,
+ DescendantT>
forEachDescendant(
- const internal::Matcher<DescendantT>& DescendantMatcher) {
+ const internal::Matcher<DescendantT> &DescendantMatcher) {
return internal::ArgumentAdaptingMatcher<
internal::ForEachDescendantMatcher,
DescendantT>(DescendantMatcher);
}
+/// \brief Matches AST nodes that have a parent that matches the provided
+/// matcher.
+///
+/// Given
+/// \code
+/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
+/// \endcode
+/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
+///
+/// Usable as: Any Matcher
+template <typename ParentT>
+internal::ArgumentAdaptingMatcher<internal::HasParentMatcher, ParentT>
+hasParent(const internal::Matcher<ParentT> &ParentMatcher) {
+ return internal::ArgumentAdaptingMatcher<
+ internal::HasParentMatcher,
+ ParentT>(ParentMatcher);
+}
+
+/// \brief Matches AST nodes that have an ancestor that matches the provided
+/// matcher.
+///
+/// Given
+/// \code
+/// void f() { if (true) { int x = 42; } }
+/// void g() { for (;;) { int x = 43; } }
+/// \endcode
+/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
+///
+/// Usable as: Any Matcher
+template <typename AncestorT>
+internal::ArgumentAdaptingMatcher<internal::HasAncestorMatcher, AncestorT>
+hasAncestor(const internal::Matcher<AncestorT> &AncestorMatcher) {
+ return internal::ArgumentAdaptingMatcher<
+ internal::HasAncestorMatcher,
+ AncestorT>(AncestorMatcher);
+}
+
/// \brief Matches if the provided matcher does not match.
///
-/// Example matches Y (matcher = record(unless(hasName("X"))))
+/// Example matches Y (matcher = recordDecl(unless(hasName("X"))))
+/// \code
/// class X {};
/// class Y {};
+/// \endcode
+///
+/// Usable as: Any Matcher
template <typename M>
-internal::PolymorphicMatcherWithParam1<internal::NotMatcher, M> unless(const M &InnerMatcher) {
+internal::PolymorphicMatcherWithParam1<internal::NotMatcher, M>
+unless(const M &InnerMatcher) {
return internal::PolymorphicMatcherWithParam1<
internal::NotMatcher, M>(InnerMatcher);
}
@@ -1064,7 +1480,8 @@ internal::PolymorphicMatcherWithParam1<internal::NotMatcher, M> unless(const M &
/// \brief Matches a type if the declaration of the type matches the given
/// matcher.
///
-/// Usable as: Matcher<QualType>, Matcher<CallExpr>, Matcher<CXXConstructExpr>
+/// Usable as: Matcher<QualType>, Matcher<CallExpr>, Matcher<CXXConstructExpr>,
+/// Matcher<MemberExpr>
inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher,
internal::Matcher<Decl> >
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
@@ -1075,9 +1492,11 @@ inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher,
/// \brief Matches on the implicit object argument of a member call expression.
///
-/// Example matches y.x() (matcher = call(on(hasType(record(hasName("Y"))))))
+/// Example matches y.x() (matcher = callExpr(on(hasType(recordDecl(hasName("Y"))))))
+/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }",
+/// \endcode
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
@@ -1092,9 +1511,11 @@ AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
/// \brief Matches if the call expression's callee expression matches.
///
/// Given
+/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
-/// call(callee(expression()))
+/// \endcode
+/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
@@ -1113,9 +1534,11 @@ AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
/// \brief Matches if the call expression's callee's declaration matches the
/// given matcher.
///
-/// Example matches y.x() (matcher = call(callee(method(hasName("x")))))
+/// Example matches y.x() (matcher = callExpr(callee(methodDecl(hasName("x")))))
+/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x();
+/// \endcode
inline internal::Matcher<CallExpr> callee(
const internal::Matcher<Decl> &InnerMatcher) {
return internal::Matcher<CallExpr>(hasDeclaration(InnerMatcher));
@@ -1124,12 +1547,12 @@ inline internal::Matcher<CallExpr> callee(
/// \brief Matches if the expression's or declaration's type matches a type
/// matcher.
///
-/// Example matches x (matcher = expression(hasType(
-/// hasDeclaration(record(hasName("X"))))))
-/// and z (matcher = variable(hasType(
-/// hasDeclaration(record(hasName("X"))))))
+/// Example matches x (matcher = expr(hasType(recordDecl(hasName("X")))))
+/// and z (matcher = varDecl(hasType(recordDecl(hasName("X")))))
+/// \code
/// class X {};
/// void y(X &x) { x; X z; }
+/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasType, internal::Matcher<QualType>,
InnerMatcher) {
TOOLING_COMPILE_ASSERT((llvm::is_base_of<Expr, NodeType>::value ||
@@ -1143,14 +1566,16 @@ AST_POLYMORPHIC_MATCHER_P(hasType, internal::Matcher<QualType>,
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
-/// declaration "X x;", record(hasName("X")) matches the declaration of X,
-/// while variable(hasType(record(hasName("X")))) matches the declaration
+/// declaration "X x;", recordDecl(hasName("X")) matches the declaration of X,
+/// while varDecl(hasType(recordDecl(hasName("X")))) matches the declaration
/// of x."
///
-/// Example matches x (matcher = expression(hasType(record(hasName("X")))))
-/// and z (matcher = variable(hasType(record(hasName("X")))))
+/// Example matches x (matcher = expr(hasType(recordDecl(hasName("X")))))
+/// and z (matcher = varDecl(hasType(recordDecl(hasName("X")))))
+/// \code
/// class X {};
/// void y(X &x) { x; X z; }
+/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<ValueDecl>
inline internal::PolymorphicMatcherWithParam1<
@@ -1164,9 +1589,11 @@ hasType(const internal::Matcher<Decl> &InnerMatcher) {
/// \brief Matches if the matched type is represented by the given string.
///
/// Given
+/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
-/// call(on(hasType(asString("class Y *"))))
+/// \endcode
+/// callExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
@@ -1176,9 +1603,11 @@ AST_MATCHER_P(QualType, asString, std::string, Name) {
/// matches the specified matcher.
///
/// Example matches y->x()
-/// (matcher = call(on(hasType(pointsTo(record(hasName("Y")))))))
+/// (matcher = callExpr(on(hasType(pointsTo(recordDecl(hasName("Y")))))))
+/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
+/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
@@ -1197,12 +1626,14 @@ inline internal::Matcher<QualType> pointsTo(
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
-/// (matcher = variable(hasType(references(record(hasName("X"))))))
+/// (matcher = varDecl(hasType(references(recordDecl(hasName("X"))))))
+/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// };
+/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
@@ -1243,9 +1674,11 @@ inline internal::Matcher<CXXMemberCallExpr> thisPointerType(
/// specified matcher.
///
/// Example matches x in if(x)
-/// (matcher = declarationReference(to(variable(hasName("x")))))
+/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
+/// \code
/// bool x;
/// if (x) {}
+/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
@@ -1259,29 +1692,33 @@ AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
/// FIXME: This currently only works for functions. Fix.
///
/// Given
+/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
-/// declarationReference(throughUsingDeclaration(anything()))
+/// \endcode
+/// declRefExpr(throughUsingDeclaration(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
- internal::Matcher<UsingShadowDecl>, Matcher) {
+ internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl =
llvm::dyn_cast<UsingShadowDecl>(FoundDecl))
- return Matcher.matches(*UsingDecl, Finder, Builder);
+ return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// \brief Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
+/// \code
/// int a, b;
/// int c;
-/// declarationStatement(hasSingleDecl(anything()))
+/// \endcode
+/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
@@ -1294,9 +1731,11 @@ AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
/// \brief Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
-/// Example matches x (matcher = variable(hasInitializer(call())))
+/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
+/// \code
/// bool y() { return true; }
/// bool x = y();
+/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
@@ -1308,9 +1747,11 @@ AST_MATCHER_P(
/// \brief Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
-/// Example matches f(0, 0) (matcher = call(argumentCountIs(2)))
+/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
+/// \code
/// void f(int x, int y);
/// f(0, 0);
+/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs, unsigned, N) {
TOOLING_COMPILE_ASSERT((llvm::is_base_of<CallExpr, NodeType>::value ||
llvm::is_base_of<CXXConstructExpr,
@@ -1323,8 +1764,10 @@ AST_POLYMORPHIC_MATCHER_P(argumentCountIs, unsigned, N) {
/// call expression.
///
/// Example matches y in x(y)
-/// (matcher = call(hasArgument(0, declarationReference())))
+/// (matcher = callExpr(hasArgument(0, declRefExpr())))
+/// \code
/// void x(int) { int y; x(y); }
+/// \endcode
AST_POLYMORPHIC_MATCHER_P2(
hasArgument, unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
TOOLING_COMPILE_ASSERT((llvm::is_base_of<CallExpr, NodeType>::value ||
@@ -1340,13 +1783,15 @@ AST_POLYMORPHIC_MATCHER_P2(
/// declarations.
///
/// Example: Given
+/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
+/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
- return std::distance(Node.decl_begin(), Node.decl_end()) == N;
+ return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// \brief Matches the n'th declaration of a declaration statement.
@@ -1355,15 +1800,19 @@ AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
+/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
-/// declarationStatement(containsDeclaration(
-/// 0, variable(hasInitializer(anything()))))
+/// \endcode
+/// declStmt(containsDeclaration(
+/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
-/// declarationStatement(containsDeclaration(1, variable()))
+/// declStmt(containsDeclaration(1, varDecl()))
+/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
+/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
@@ -1377,11 +1826,13 @@ AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
/// \brief Matches a constructor initializer.
///
/// Given
+/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
-/// record(has(constructor(hasAnyConstructorInitializer(anything()))))
+/// \endcode
+/// recordDecl(has(constructorDecl(hasAnyConstructorInitializer(anything()))))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
@@ -1397,11 +1848,13 @@ AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
/// \brief Matches the field declaration of a constructor initializer.
///
/// Given
+/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
-/// record(has(constructor(hasAnyConstructorInitializer(
+/// \endcode
+/// recordDecl(has(constructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
@@ -1415,11 +1868,13 @@ AST_MATCHER_P(CXXCtorInitializer, forField,
/// \brief Matches the initializer expression of a constructor initializer.
///
/// Given
+/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
-/// record(has(constructor(hasAnyConstructorInitializer(
+/// \endcode
+/// recordDecl(has(constructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
@@ -1434,12 +1889,14 @@ AST_MATCHER_P(CXXCtorInitializer, withInitializer,
/// code (as opposed to implicitly added by the compiler).
///
/// Given
+/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
-/// constructor(hasAnyConstructorInitializer(isWritten()))
+/// \endcode
+/// constructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
@@ -1455,8 +1912,10 @@ AST_MATCHER(CXXConstructorDecl, isImplicit) {
/// expression.
///
/// Given
+/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
-/// call(hasAnyArgument(declarationReference()))
+/// \endcode
+/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
@@ -1478,8 +1937,10 @@ AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, internal::Matcher<Expr>,
/// \brief Matches the n'th parameter of a function declaration.
///
/// Given
+/// \code
/// class X { void f(int x) {} };
-/// method(hasParameter(0, hasType(variable())))
+/// \endcode
+/// methodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
@@ -1496,8 +1957,10 @@ AST_MATCHER_P2(FunctionDecl, hasParameter,
/// Does not match the 'this' parameter of a method.
///
/// Given
+/// \code
/// class X { void f(int x, int y, int z) {} };
-/// method(hasAnyParameter(hasName("y")))
+/// \endcode
+/// methodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
@@ -1514,20 +1977,25 @@ AST_MATCHER_P(FunctionDecl, hasAnyParameter,
/// \brief Matches the return type of a function declaration.
///
/// Given:
+/// \code
/// class X { int f() { return 1; } };
-/// method(returns(asString("int")))
+/// \endcode
+/// methodDecl(returns(asString("int")))
/// matches int f() { return 1; }
-AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, Matcher) {
- return Matcher.matches(Node.getResultType(), Finder, Builder);
+AST_MATCHER_P(FunctionDecl, returns,
+ internal::Matcher<QualType>, InnerMatcher) {
+ return InnerMatcher.matches(Node.getResultType(), Finder, Builder);
}
/// \brief Matches extern "C" function declarations.
///
/// Given:
+/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
-/// function(isExternC())
+/// \endcode
+/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration h
AST_MATCHER(FunctionDecl, isExternC) {
return Node.isExternC();
@@ -1537,7 +2005,9 @@ AST_MATCHER(FunctionDecl, isExternC) {
/// or conditional operator.
///
/// Example matches true (matcher = hasCondition(boolLiteral(equals(true))))
+/// \code
/// if (true) {}
+/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasCondition, internal::Matcher<Expr>,
InnerMatcher) {
TOOLING_COMPILE_ASSERT(
@@ -1555,7 +2025,9 @@ AST_POLYMORPHIC_MATCHER_P(hasCondition, internal::Matcher<Expr>,
/// \brief Matches the condition variable statement in an if statement.
///
/// Given
+/// \code
/// if (A* a = GetAPointer()) {}
+/// \endcode
/// hasConditionVariableStatment(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
@@ -1569,29 +2041,33 @@ AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
/// \brief Matches the index expression of an array subscript expression.
///
/// Given
+/// \code
/// int i[5];
/// void f() { i[1] = 42; }
+/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
- internal::Matcher<Expr>, matcher) {
+ internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
- return matcher.matches(*Expression, Finder, Builder);
+ return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// \brief Matches the base expression of an array subscript expression.
///
/// Given
+/// \code
/// int i[5];
/// void f() { i[1] = 42; }
-/// arraySubscriptExpression(hasBase(implicitCast(
-/// hasSourceExpression(declarationReference()))))
-/// matches \c i[1] with the \c declarationReference() matching \c i
+/// \endcode
+/// arraySubscriptExpression(hasBase(implicitCastExpr(
+/// hasSourceExpression(declRefExpr()))))
+/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
- internal::Matcher<Expr>, matcher) {
+ internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
- return matcher.matches(*Expression, Finder, Builder);
+ return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
@@ -1599,10 +2075,12 @@ AST_MATCHER_P(ArraySubscriptExpr, hasBase,
/// a given body.
///
/// Given
+/// \code
/// for (;;) {}
-/// hasBody(compoundStatement())
+/// \endcode
+/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
-/// with compoundStatement()
+/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasBody, internal::Matcher<Stmt>,
InnerMatcher) {
@@ -1620,10 +2098,12 @@ AST_POLYMORPHIC_MATCHER_P(hasBody, internal::Matcher<Stmt>,
/// a given matcher.
///
/// Given
+/// \code
/// { {}; 1+2; }
-/// hasAnySubstatement(compoundStatement())
+/// \endcode
+/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
-/// with compoundStatement()
+/// with compoundStmt()
/// matching '{}'
AST_MATCHER_P(CompoundStmt, hasAnySubstatement,
internal::Matcher<Stmt>, InnerMatcher) {
@@ -1639,8 +2119,10 @@ AST_MATCHER_P(CompoundStmt, hasAnySubstatement,
/// child statements.
///
/// Example: Given
+/// \code
/// { for (;;) {} }
-/// compoundStatement(statementCountIs(0)))
+/// \endcode
+/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
@@ -1650,7 +2132,9 @@ AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
/// \brief Matches literals that are equal to the given value.
///
/// Example matches true (matcher = boolLiteral(equals(true)))
+/// \code
/// true
+/// \endcode
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteral>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
@@ -1666,7 +2150,9 @@ equals(const ValueT &Value) {
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
+/// \code
/// !(a || b)
+/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasOperatorName, std::string, Name) {
TOOLING_COMPILE_ASSERT(
(llvm::is_base_of<BinaryOperator, NodeType>::value) ||
@@ -1678,7 +2164,9 @@ AST_POLYMORPHIC_MATCHER_P(hasOperatorName, std::string, Name) {
/// \brief Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
+/// \code
/// a || b
+/// \endcode
AST_MATCHER_P(BinaryOperator, hasLHS,
internal::Matcher<Expr>, InnerMatcher) {
Expr *LeftHandSide = Node.getLHS();
@@ -1689,7 +2177,9 @@ AST_MATCHER_P(BinaryOperator, hasLHS,
/// \brief Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
+/// \code
/// a || b
+/// \endcode
AST_MATCHER_P(BinaryOperator, hasRHS,
internal::Matcher<Expr>, InnerMatcher) {
Expr *RightHandSide = Node.getRHS();
@@ -1706,8 +2196,10 @@ inline internal::Matcher<BinaryOperator> hasEitherOperand(
/// \brief Matches if the operand of a unary operator matches.
///
-/// Example matches true (matcher = hasOperand(boolLiteral(equals(true))))
+/// Example matches true (matcher = hasUnaryOperand(boolLiteral(equals(true))))
+/// \code
/// !true
+/// \endcode
AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
internal::Matcher<Expr>, InnerMatcher) {
const Expr * const Operand = Node.getSubExpr();
@@ -1718,8 +2210,8 @@ AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
/// \brief Matches if the cast's source expression matches the given matcher.
///
/// Example: matches "a string" (matcher =
-/// hasSourceExpression(constructorCall()))
-///
+/// hasSourceExpression(constructExpr()))
+/// \code
/// class URL { URL(string); };
/// URL url = "a string";
AST_MATCHER_P(CastExpr, hasSourceExpression,
@@ -1751,7 +2243,9 @@ AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
/// \brief Matches the true branch expression of a conditional operator.
///
/// Example matches a
+/// \code
/// condition ? a : b
+/// \endcode
AST_MATCHER_P(ConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
Expr *Expression = Node.getTrueExpr();
@@ -1762,7 +2256,9 @@ AST_MATCHER_P(ConditionalOperator, hasTrueExpression,
/// \brief Matches the false branch expression of a conditional operator.
///
/// Example matches b
+/// \code
/// condition ? a : b
+/// \endcode
AST_MATCHER_P(ConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
Expr *Expression = Node.getFalseExpr();
@@ -1773,12 +2269,14 @@ AST_MATCHER_P(ConditionalOperator, hasFalseExpression,
/// \brief Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
+/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
+/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam0<internal::IsDefinitionMatcher>
@@ -1795,13 +2293,15 @@ isDefinition() {
/// this to?
///
/// Example matches A() in the last line
-/// (matcher = constructorCall(hasDeclaration(method(
+/// (matcher = constructExpr(hasDeclaration(methodDecl(
/// ofClass(hasName("A"))))))
+/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
+/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
const CXXRecordDecl *Parent = Node.getParent();
@@ -1815,12 +2315,14 @@ AST_MATCHER_P(CXXMethodDecl, ofClass,
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
+/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a;
/// static int b;
/// };
-/// memberExpression(isArrow())
+/// \endcode
+/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
inline internal::Matcher<MemberExpr> isArrow() {
return makeMatcher(new internal::IsArrowMatcher());
@@ -1829,10 +2331,12 @@ inline internal::Matcher<MemberExpr> isArrow() {
/// \brief Matches QualType nodes that are of integer type.
///
/// Given
+/// \code
/// void a(int);
/// void b(long);
/// void c(double);
-/// function(hasAnyParameter(hasType(isInteger())))
+/// \endcode
+/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
@@ -1842,12 +2346,14 @@ AST_MATCHER(QualType, isInteger) {
/// include "top-level" const.
///
/// Given
+/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
-/// function(hasAnyParameter(hasType(isConstQualified())))
+/// \endcode
+/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
@@ -1859,10 +2365,12 @@ inline internal::Matcher<QualType> isConstQualified() {
/// given matcher.
///
/// Given
+/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
-/// memberExpression(member(hasName("first")))
+/// \endcode
+/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
@@ -1874,9 +2382,11 @@ AST_MATCHER_P(MemberExpr, member,
/// matched by a given matcher.
///
/// Given
+/// \code
/// struct X { int m; };
/// void f(X x) { x.m; m; }
-/// memberExpression(hasObjectExpression(hasType(record(hasName("X")))))))
+/// \endcode
+/// memberExpr(hasObjectExpression(hasType(recordDecl(hasName("X")))))))
/// matches "x.m" and "m"
/// with hasObjectExpression(...)
/// matching "x" and the implicit object expression of "m" which has type X*.
@@ -1888,15 +2398,17 @@ AST_MATCHER_P(MemberExpr, hasObjectExpression,
/// \brief Matches any using shadow declaration.
///
/// Given
+/// \code
/// namespace X { void b(); }
/// using X::b;
+/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
- internal::Matcher<UsingShadowDecl>, Matcher) {
+ internal::Matcher<UsingShadowDecl>, InnerMatcher) {
for (UsingDecl::shadow_iterator II = Node.shadow_begin();
II != Node.shadow_end(); ++II) {
- if (Matcher.matches(**II, Finder, Builder))
+ if (InnerMatcher.matches(**II, Finder, Builder))
return true;
}
return false;
@@ -1906,31 +2418,39 @@ AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
/// matched by the given matcher.
///
/// Given
+/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
-/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(function())))
+/// \endcode
+/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
- internal::Matcher<NamedDecl>, Matcher) {
- return Matcher.matches(*Node.getTargetDecl(), Finder, Builder);
+ internal::Matcher<NamedDecl>, InnerMatcher) {
+ return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// \brief Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
+/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
+/// \endcode
/// or
+/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
-/// record(hasName("::X"), isTemplateInstantiation())
+/// \endcode
+/// recordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
-/// template <typename T> class X {}; class A {};
+/// \code
+/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
-/// record(hasName("::X"), isTemplateInstantiation())
+/// \endcode
+/// recordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
@@ -1941,6 +2461,411 @@ isTemplateInstantiation() {
internal::IsTemplateInstantiationMatcher>();
}
+/// \brief Matches explicit template specializations of function, class, or
+/// static member variable template instantiations.
+///
+/// Given
+/// \code
+/// template<typename T> void A(T t) { }
+/// template<> void A(int N) { }
+/// \endcode
+/// functionDecl(isExplicitTemplateSpecialization())
+/// matches the specialization A<int>().
+///
+/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
+inline internal::PolymorphicMatcherWithParam0<
+ internal::IsExplicitTemplateSpecializationMatcher>
+isExplicitTemplateSpecialization() {
+ return internal::PolymorphicMatcherWithParam0<
+ internal::IsExplicitTemplateSpecializationMatcher>();
+}
+
+/// \brief Matches \c TypeLocs for which the given inner
+/// QualType-matcher matches.
+inline internal::BindableMatcher<TypeLoc> loc(
+ const internal::Matcher<QualType> &InnerMatcher) {
+ return internal::BindableMatcher<TypeLoc>(
+ new internal::TypeLocTypeMatcher(InnerMatcher));
+}
+
+/// \brief Matches builtin Types.
+///
+/// Given
+/// \code
+/// struct A {};
+/// A a;
+/// int b;
+/// float c;
+/// bool d;
+/// \endcode
+/// builtinType()
+/// matches "int b", "float c" and "bool d"
+AST_TYPE_MATCHER(BuiltinType, builtinType);
+
+/// \brief Matches all kinds of arrays.
+///
+/// Given
+/// \code
+/// int a[] = { 2, 3 };
+/// int b[4];
+/// void f() { int c[a[0]]; }
+/// \endcode
+/// arrayType()
+/// matches "int a[]", "int b[4]" and "int c[a[0]]";
+AST_TYPE_MATCHER(ArrayType, arrayType);
+
+/// \brief Matches C99 complex types.
+///
+/// Given
+/// \code
+/// _Complex float f;
+/// \endcode
+/// complexType()
+/// matches "_Complex float f"
+AST_TYPE_MATCHER(ComplexType, complexType);
+
+/// \brief Matches arrays and C99 complex types that have a specific element
+/// type.
+///
+/// Given
+/// \code
+/// struct A {};
+/// A a[7];
+/// int b[7];
+/// \endcode
+/// arrayType(hasElementType(builtinType()))
+/// matches "int b[7]"
+///
+/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
+AST_TYPELOC_TRAVERSE_MATCHER(hasElementType, getElement);
+
+/// \brief Matches C arrays with a specified constant size.
+///
+/// Given
+/// \code
+/// void() {
+/// int a[2];
+/// int b[] = { 2, 3 };
+/// int c[b[0]];
+/// }
+/// \endcode
+/// constantArrayType()
+/// matches "int a[2]"
+AST_TYPE_MATCHER(ConstantArrayType, constantArrayType);
+
+/// \brief Matches \c ConstantArrayType nodes that have the specified size.
+///
+/// Given
+/// \code
+/// int a[42];
+/// int b[2 * 21];
+/// int c[41], d[43];
+/// \endcode
+/// constantArrayType(hasSize(42))
+/// matches "int a[42]" and "int b[2 * 21]"
+AST_MATCHER_P(ConstantArrayType, hasSize, unsigned, N) {
+ return Node.getSize() == N;
+}
+
+/// \brief Matches C++ arrays whose size is a value-dependent expression.
+///
+/// Given
+/// \code
+/// template<typename T, int Size>
+/// class array {
+/// T data[Size];
+/// };
+/// \endcode
+/// dependentSizedArrayType
+/// matches "T data[Size]"
+AST_TYPE_MATCHER(DependentSizedArrayType, dependentSizedArrayType);
+
+/// \brief Matches C arrays with unspecified size.
+///
+/// Given
+/// \code
+/// int a[] = { 2, 3 };
+/// int b[42];
+/// void f(int c[]) { int d[a[0]]; };
+/// \endcode
+/// incompleteArrayType()
+/// matches "int a[]" and "int c[]"
+AST_TYPE_MATCHER(IncompleteArrayType, incompleteArrayType);
+
+/// \brief Matches C arrays with a specified size that is not an
+/// integer-constant-expression.
+///
+/// Given
+/// \code
+/// void f() {
+/// int a[] = { 2, 3 }
+/// int b[42];
+/// int c[a[0]];
+/// \endcode
+/// variableArrayType()
+/// matches "int c[a[0]]"
+AST_TYPE_MATCHER(VariableArrayType, variableArrayType);
+
+/// \brief Matches \c VariableArrayType nodes that have a specific size
+/// expression.
+///
+/// Given
+/// \code
+/// void f(int b) {
+/// int a[b];
+/// }
+/// \endcode
+/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
+/// varDecl(hasName("b")))))))
+/// matches "int a[b]"
+AST_MATCHER_P(VariableArrayType, hasSizeExpr,
+ internal::Matcher<Expr>, InnerMatcher) {
+ return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
+}
+
+/// \brief Matches atomic types.
+///
+/// Given
+/// \code
+/// _Atomic(int) i;
+/// \endcode
+/// atomicType()
+/// matches "_Atomic(int) i"
+AST_TYPE_MATCHER(AtomicType, atomicType);
+
+/// \brief Matches atomic types with a specific value type.
+///
+/// Given
+/// \code
+/// _Atomic(int) i;
+/// _Atomic(float) f;
+/// \endcode
+/// atomicType(hasValueType(isInteger()))
+/// matches "_Atomic(int) i"
+///
+/// Usable as: Matcher<AtomicType>
+AST_TYPELOC_TRAVERSE_MATCHER(hasValueType, getValue);
+
+/// \brief Matches types nodes representing C++11 auto types.
+///
+/// Given:
+/// \code
+/// auto n = 4;
+/// int v[] = { 2, 3 }
+/// for (auto i : v) { }
+/// \endcode
+/// autoType()
+/// matches "auto n" and "auto i"
+AST_TYPE_MATCHER(AutoType, autoType);
+
+/// \brief Matches \c AutoType nodes where the deduced type is a specific type.
+///
+/// Note: There is no \c TypeLoc for the deduced type and thus no
+/// \c getDeducedLoc() matcher.
+///
+/// Given
+/// \code
+/// auto a = 1;
+/// auto b = 2.0;
+/// \endcode
+/// autoType(hasDeducedType(isInteger()))
+/// matches "auto a"
+///
+/// Usable as: Matcher<AutoType>
+AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType);
+
+/// \brief Matches \c FunctionType nodes.
+///
+/// Given
+/// \code
+/// int (*f)(int);
+/// void g();
+/// \endcode
+/// functionType()
+/// matches "int (*f)(int)" and the type of "g".
+AST_TYPE_MATCHER(FunctionType, functionType);
+
+/// \brief Matches block pointer types, i.e. types syntactically represented as
+/// "void (^)(int)".
+///
+/// The \c pointee is always required to be a \c FunctionType.
+AST_TYPE_MATCHER(BlockPointerType, blockPointerType);
+
+/// \brief Matches member pointer types.
+/// Given
+/// \code
+/// struct A { int i; }
+/// A::* ptr = A::i;
+/// \endcode
+/// memberPointerType()
+/// matches "A::* ptr"
+AST_TYPE_MATCHER(MemberPointerType, memberPointerType);
+
+/// \brief Matches pointer types.
+///
+/// Given
+/// \code
+/// int *a;
+/// int &b = *a;
+/// int c = 5;
+/// \endcode
+/// pointerType()
+/// matches "int *a"
+AST_TYPE_MATCHER(PointerType, pointerType);
+
+/// \brief Matches reference types.
+///
+/// Given
+/// \code
+/// int *a;
+/// int &b = *a;
+/// int c = 5;
+/// \endcode
+/// pointerType()
+/// matches "int &b"
+AST_TYPE_MATCHER(ReferenceType, referenceType);
+
+/// \brief Narrows PointerType (and similar) matchers to those where the
+/// \c pointee matches a given matcher.
+///
+/// Given
+/// \code
+/// int *a;
+/// int const *b;
+/// float const *f;
+/// \endcode
+/// pointerType(pointee(isConstQualified(), isInteger()))
+/// matches "int const *b"
+///
+/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
+/// Matcher<PointerType>, Matcher<ReferenceType>
+AST_TYPELOC_TRAVERSE_MATCHER(pointee, getPointee);
+
+/// \brief Matches typedef types.
+///
+/// Given
+/// \code
+/// typedef int X;
+/// \endcode
+/// typedefType()
+/// matches "typedef int X"
+AST_TYPE_MATCHER(TypedefType, typedefType);
+
+/// \brief Matches \c TypedefTypes referring to a specific
+/// \c TypedefNameDecl.
+AST_MATCHER_P(TypedefType, hasDecl,
+ internal::Matcher<TypedefNameDecl>, InnerMatcher) {
+ return InnerMatcher.matches(*Node.getDecl(), Finder, Builder);
+}
+
+/// \brief Matches nested name specifiers.
+///
+/// Given
+/// \code
+/// namespace ns {
+/// struct A { static void f(); };
+/// void A::f() {}
+/// void g() { A::f(); }
+/// }
+/// ns::A a;
+/// \endcode
+/// nestedNameSpecifier()
+/// matches "ns::" and both "A::"
+const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier;
+
+/// \brief Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
+const internal::VariadicAllOfMatcher<
+ NestedNameSpecifierLoc> nestedNameSpecifierLoc;
+
+/// \brief Matches \c NestedNameSpecifierLocs for which the given inner
+/// NestedNameSpecifier-matcher matches.
+inline internal::BindableMatcher<NestedNameSpecifierLoc> loc(
+ const internal::Matcher<NestedNameSpecifier> &InnerMatcher) {
+ return internal::BindableMatcher<NestedNameSpecifierLoc>(
+ new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
+ InnerMatcher));
+}
+
+/// \brief Matches nested name specifiers that specify a type matching the
+/// given \c QualType matcher without qualifiers.
+///
+/// Given
+/// \code
+/// struct A { struct B { struct C {}; }; };
+/// A::B::C c;
+/// \endcode
+/// nestedNameSpecifier(specifiesType(hasDeclaration(recordDecl(hasName("A")))))
+/// matches "A::"
+AST_MATCHER_P(NestedNameSpecifier, specifiesType,
+ internal::Matcher<QualType>, InnerMatcher) {
+ if (Node.getAsType() == NULL)
+ return false;
+ return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
+}
+
+/// \brief Matches nested name specifier locs that specify a type matching the
+/// given \c TypeLoc.
+///
+/// Given
+/// \code
+/// struct A { struct B { struct C {}; }; };
+/// A::B::C c;
+/// \endcode
+/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
+/// hasDeclaration(recordDecl(hasName("A")))))))
+/// matches "A::"
+AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
+ internal::Matcher<TypeLoc>, InnerMatcher) {
+ return InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
+}
+
+/// \brief Matches on the prefix of a \c NestedNameSpecifier.
+///
+/// Given
+/// \code
+/// struct A { struct B { struct C {}; }; };
+/// A::B::C c;
+/// \endcode
+/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
+/// matches "A::"
+inline internal::Matcher<NestedNameSpecifier> hasPrefix(
+ const internal::Matcher<NestedNameSpecifier> &InnerMatcher) {
+ return internal::makeMatcher(
+ new internal::NestedNameSpecifierPrefixMatcher(InnerMatcher));
+}
+
+/// \brief Matches on the prefix of a \c NestedNameSpecifierLoc.
+///
+/// Given
+/// \code
+/// struct A { struct B { struct C {}; }; };
+/// A::B::C c;
+/// \endcode
+/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
+/// matches "A::"
+inline internal::Matcher<NestedNameSpecifierLoc> hasPrefix(
+ const internal::Matcher<NestedNameSpecifierLoc> &InnerMatcher) {
+ return internal::makeMatcher(
+ new internal::NestedNameSpecifierLocPrefixMatcher(InnerMatcher));
+}
+
+/// \brief Matches nested name specifiers that specify a namespace matching the
+/// given namespace matcher.
+///
+/// Given
+/// \code
+/// namespace ns { struct A {}; }
+/// ns::A a;
+/// \endcode
+/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
+/// matches "ns::"
+AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
+ internal::Matcher<NamespaceDecl>, InnerMatcher) {
+ if (Node.getAsNamespace() == NULL)
+ return false;
+ return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
+}
+
} // end namespace ast_matchers
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersInternal.h b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
index 3f55685..e5365ff 100644
--- a/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
+++ b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
@@ -39,7 +39,11 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/ASTMatchers/ASTTypeTraits.h"
#include "llvm/ADT/VariadicFunction.h"
+#include "llvm/Support/type_traits.h"
#include <map>
#include <string>
#include <vector>
@@ -57,6 +61,45 @@ class BoundNodes;
namespace internal {
class BoundNodesTreeBuilder;
+/// \brief Internal version of BoundNodes. Holds all the bound nodes.
+class BoundNodesMap {
+public:
+ /// \brief Adds \c Node to the map with key \c ID.
+ ///
+ /// The node's base type should be in NodeBaseType or it will be unaccessible.
+ template <typename T>
+ void addNode(StringRef ID, const T* Node) {
+ NodeMap[ID] = ast_type_traits::DynTypedNode::create(*Node);
+ }
+ void addNode(StringRef ID, ast_type_traits::DynTypedNode Node) {
+ NodeMap[ID] = Node;
+ }
+
+ /// \brief Returns the AST node bound to \c ID.
+ ///
+ /// Returns NULL if there was no node bound to \c ID or if there is a node but
+ /// it cannot be converted to the specified type.
+ template <typename T>
+ const T *getNodeAs(StringRef ID) const {
+ IDToNodeMap::const_iterator It = NodeMap.find(ID);
+ if (It == NodeMap.end()) {
+ return NULL;
+ }
+ return It->second.get<T>();
+ }
+
+ /// \brief Copies all ID/Node pairs to BoundNodesTreeBuilder \c Builder.
+ void copyTo(BoundNodesTreeBuilder *Builder) const;
+
+ /// \brief Copies all ID/Node pairs to BoundNodesMap \c Other.
+ void copyTo(BoundNodesMap *Other) const;
+
+private:
+ /// \brief A map from IDs to the bound nodes.
+ typedef std::map<std::string, ast_type_traits::DynTypedNode> IDToNodeMap;
+
+ IDToNodeMap NodeMap;
+};
/// \brief A tree of bound nodes in match results.
///
@@ -84,11 +127,10 @@ public:
BoundNodesTree();
/// \brief Create a BoundNodesTree from pre-filled maps of bindings.
- BoundNodesTree(const std::map<std::string, const Decl*>& DeclBindings,
- const std::map<std::string, const Stmt*>& StmtBindings,
+ BoundNodesTree(const BoundNodesMap& Bindings,
const std::vector<BoundNodesTree> RecursiveBindings);
- /// \brief Adds all bound nodes to bound_nodes_builder.
+ /// \brief Adds all bound nodes to \c Builder.
void copyTo(BoundNodesTreeBuilder* Builder) const;
/// \brief Visits all matches that this BoundNodesTree represents.
@@ -99,17 +141,12 @@ public:
private:
void visitMatchesRecursively(
Visitor* ResultVistior,
- std::map<std::string, const Decl*> DeclBindings,
- std::map<std::string, const Stmt*> StmtBindings);
-
- template <typename T>
- void copyBindingsTo(const T& bindings, BoundNodesTreeBuilder* Builder) const;
+ const BoundNodesMap& AggregatedBindings);
// FIXME: Find out whether we want to use different data structures here -
// first benchmarks indicate that it doesn't matter though.
- std::map<std::string, const Decl*> DeclBindings;
- std::map<std::string, const Stmt*> StmtBindings;
+ BoundNodesMap Bindings;
std::vector<BoundNodesTree> RecursiveBindings;
};
@@ -123,12 +160,13 @@ public:
BoundNodesTreeBuilder();
/// \brief Add a binding from an id to a node.
- ///
- /// FIXME: Add overloads for all AST base types.
- /// @{
- void setBinding(const std::string &Id, const Decl *Node);
- void setBinding(const std::string &Id, const Stmt *Node);
- /// @}
+ template <typename T>
+ void setBinding(const std::string &Id, const T *Node) {
+ Bindings.addNode(Id, Node);
+ }
+ void setBinding(const std::string &Id, ast_type_traits::DynTypedNode Node) {
+ Bindings.addNode(Id, Node);
+ }
/// \brief Adds a branch in the tree.
void addMatch(const BoundNodesTree& Bindings);
@@ -137,11 +175,10 @@ public:
BoundNodesTree build() const;
private:
- BoundNodesTreeBuilder(const BoundNodesTreeBuilder&); // DO NOT IMPLEMENT
- void operator=(const BoundNodesTreeBuilder&); // DO NOT IMPLEMENT
+ BoundNodesTreeBuilder(const BoundNodesTreeBuilder &) LLVM_DELETED_FUNCTION;
+ void operator=(const BoundNodesTreeBuilder &) LLVM_DELETED_FUNCTION;
- std::map<std::string, const Decl*> DeclBindings;
- std::map<std::string, const Stmt*> StmtBindings;
+ BoundNodesMap Bindings;
std::vector<BoundNodesTree> RecursiveBindings;
};
@@ -169,7 +206,8 @@ public:
BoundNodesTreeBuilder *Builder) const = 0;
};
-/// \brief Interface for matchers that only evaluate properties on a single node.
+/// \brief Interface for matchers that only evaluate properties on a single
+/// node.
template <typename T>
class SingleNodeMatcherInterface : public MatcherInterface<T> {
public:
@@ -187,6 +225,24 @@ private:
}
};
+/// \brief Base class for all matchers that works on a \c DynTypedNode.
+///
+/// Matcher implementations will check whether the \c DynTypedNode is
+/// convertible into the respecitve types and then do the actual match
+/// on the actual node, or return false if it is not convertible.
+class DynTypedMatcher {
+public:
+ virtual ~DynTypedMatcher() {}
+
+ /// \brief Returns true if the matcher matches the given \c DynNode.
+ virtual bool matches(const ast_type_traits::DynTypedNode DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const = 0;
+
+ /// \brief Returns a unique ID for the matcher.
+ virtual uint64_t getID() const = 0;
+};
+
/// \brief Wrapper of a MatcherInterface<T> *that allows copying.
///
/// A Matcher<Base> can be used anywhere a Matcher<Derived> is
@@ -196,12 +252,32 @@ private:
/// operator rather than a type hierarchy to be able to templatize the
/// type hierarchy instead of spelling it out.
template <typename T>
-class Matcher {
+class Matcher : public DynTypedMatcher {
public:
/// \brief Takes ownership of the provided implementation pointer.
explicit Matcher(MatcherInterface<T> *Implementation)
: Implementation(Implementation) {}
+ /// \brief Implicitly converts \c Other to a Matcher<T>.
+ ///
+ /// Requires \c T to be derived from \c From.
+ template <typename From>
+ Matcher(const Matcher<From> &Other,
+ typename llvm::enable_if_c<
+ llvm::is_base_of<From, T>::value &&
+ !llvm::is_same<From, T>::value >::type* = 0)
+ : Implementation(new ImplicitCastMatcher<From>(Other)) {}
+
+ /// \brief Implicitly converts \c Matcher<Type> to \c Matcher<QualType>.
+ ///
+ /// The resulting matcher is not strict, i.e. ignores qualifiers.
+ template <typename TypeT>
+ Matcher(const Matcher<TypeT> &Other,
+ typename llvm::enable_if_c<
+ llvm::is_same<T, QualType>::value &&
+ llvm::is_same<TypeT, Type>::value >::type* = 0)
+ : Implementation(new TypeToQualType<TypeT>(Other)) {}
+
/// \brief Forwards the call to the underlying MatcherInterface<T> pointer.
bool matches(const T &Node,
ASTMatchFinder *Finder,
@@ -209,14 +285,6 @@ public:
return Implementation->matches(Node, Finder, Builder);
}
- /// \brief Implicitly converts this object to a Matcher<Derived>.
- ///
- /// Requires Derived to be derived from T.
- template <typename Derived>
- operator Matcher<Derived>() const {
- return Matcher<Derived>(new ImplicitCastMatcher<Derived>(*this));
- }
-
/// \brief Returns an ID that uniquely identifies the matcher.
uint64_t getID() const {
/// FIXME: Document the requirements this imposes on matcher
@@ -224,23 +292,55 @@ public:
return reinterpret_cast<uint64_t>(Implementation.getPtr());
}
+ /// \brief Returns whether the matcher matches on the given \c DynNode.
+ virtual bool matches(const ast_type_traits::DynTypedNode DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ const T *Node = DynNode.get<T>();
+ if (!Node) return false;
+ return matches(*Node, Finder, Builder);
+ }
+
+ /// \brief Allows the conversion of a \c Matcher<Type> to a \c
+ /// Matcher<QualType>.
+ ///
+ /// Depending on the constructor argument, the matcher is either strict, i.e.
+ /// does only matches in the absence of qualifiers, or not, i.e. simply
+ /// ignores any qualifiers.
+ template <typename TypeT>
+ class TypeToQualType : public MatcherInterface<QualType> {
+ public:
+ TypeToQualType(const Matcher<TypeT> &InnerMatcher)
+ : InnerMatcher(InnerMatcher) {}
+
+ virtual bool matches(const QualType &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ if (Node.isNull())
+ return false;
+ return InnerMatcher.matches(*Node, Finder, Builder);
+ }
+ private:
+ const Matcher<TypeT> InnerMatcher;
+ };
+
private:
- /// \brief Allows conversion from Matcher<T> to Matcher<Derived> if Derived
- /// is derived from T.
- template <typename Derived>
- class ImplicitCastMatcher : public MatcherInterface<Derived> {
+ /// \brief Allows conversion from Matcher<Base> to Matcher<T> if T
+ /// is derived from Base.
+ template <typename Base>
+ class ImplicitCastMatcher : public MatcherInterface<T> {
public:
- explicit ImplicitCastMatcher(const Matcher<T> &From)
+ explicit ImplicitCastMatcher(const Matcher<Base> &From)
: From(From) {}
- virtual bool matches(const Derived &Node,
+ virtual bool matches(const T &Node,
ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const {
return From.matches(Node, Finder, Builder);
}
private:
- const Matcher<T> From;
+ const Matcher<Base> From;
};
llvm::IntrusiveRefCntPtr< MatcherInterface<T> > Implementation;
@@ -280,18 +380,14 @@ private:
/// FIXME: Add other ways to convert...
if (Node.isNull())
return false;
- CXXRecordDecl *NodeAsRecordDecl = Node->getAsCXXRecordDecl();
- return NodeAsRecordDecl != NULL &&
- InnerMatcher.matches(*NodeAsRecordDecl, Finder, Builder);
+ return matchesDecl(Node->getAsCXXRecordDecl(), Finder, Builder);
}
/// \brief Extracts the Decl of the callee of a CallExpr and returns whether
/// the inner matcher matches on it.
bool matchesSpecialized(const CallExpr &Node, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const {
- const Decl *NodeAsDecl = Node.getCalleeDecl();
- return NodeAsDecl != NULL &&
- InnerMatcher.matches(*NodeAsDecl, Finder, Builder);
+ return matchesDecl(Node.getCalleeDecl(), Finder, Builder);
}
/// \brief Extracts the Decl of the constructor call and returns whether the
@@ -299,96 +395,63 @@ private:
bool matchesSpecialized(const CXXConstructExpr &Node,
ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const {
- const Decl *NodeAsDecl = Node.getConstructor();
- return NodeAsDecl != NULL &&
- InnerMatcher.matches(*NodeAsDecl, Finder, Builder);
+ return matchesDecl(Node.getConstructor(), Finder, Builder);
+ }
+
+ /// \brief Extracts the \c ValueDecl a \c MemberExpr refers to and returns
+ /// whether the inner matcher matches on it.
+ bool matchesSpecialized(const MemberExpr &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return matchesDecl(Node.getMemberDecl(), Finder, Builder);
+ }
+
+ /// \brief Returns whether the inner matcher \c Node. Returns false if \c Node
+ /// is \c NULL.
+ bool matchesDecl(const Decl *Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return Node != NULL && InnerMatcher.matches(*Node, Finder, Builder);
}
const Matcher<Decl> InnerMatcher;
};
/// \brief IsBaseType<T>::value is true if T is a "base" type in the AST
-/// node class hierarchies (i.e. if T is Decl, Stmt, or QualType).
+/// node class hierarchies.
template <typename T>
struct IsBaseType {
static const bool value =
(llvm::is_same<T, Decl>::value ||
llvm::is_same<T, Stmt>::value ||
llvm::is_same<T, QualType>::value ||
+ llvm::is_same<T, Type>::value ||
+ llvm::is_same<T, TypeLoc>::value ||
+ llvm::is_same<T, NestedNameSpecifier>::value ||
+ llvm::is_same<T, NestedNameSpecifierLoc>::value ||
llvm::is_same<T, CXXCtorInitializer>::value);
};
template <typename T>
const bool IsBaseType<T>::value;
-/// \brief Interface that can match any AST base node type and contains default
-/// implementations returning false.
-class UntypedBaseMatcher : public llvm::RefCountedBaseVPTR {
-public:
- virtual ~UntypedBaseMatcher() {}
-
- virtual bool matches(const Decl &DeclNode, ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder) const {
- return false;
- }
- virtual bool matches(const QualType &TypeNode, ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder) const {
- return false;
- }
- virtual bool matches(const Stmt &StmtNode, ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder) const {
- return false;
- }
- virtual bool matches(const CXXCtorInitializer &CtorInitNode,
- ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder) const {
- return false;
- }
-
- /// \brief Returns a unique ID for the matcher.
- virtual uint64_t getID() const = 0;
-};
-
-/// \brief An UntypedBaseMatcher that overwrites the Matches(...) method for
-/// node type T. T must be an AST base type.
-template <typename T>
-class TypedBaseMatcher : public UntypedBaseMatcher {
- TOOLING_COMPILE_ASSERT(IsBaseType<T>::value,
- typed_base_matcher_can_only_be_used_with_base_type);
-public:
- explicit TypedBaseMatcher(const Matcher<T> &InnerMatcher)
- : InnerMatcher(InnerMatcher) {}
-
- using UntypedBaseMatcher::matches;
- /// \brief Implements UntypedBaseMatcher::Matches.
- ///
- /// Since T is guaranteed to be a "base" AST node type, this method is
- /// guaranteed to override one of the matches() methods from
- /// UntypedBaseMatcher.
- virtual bool matches(const T &Node,
- ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder) const {
- return InnerMatcher.matches(Node, Finder, Builder);
- }
-
- /// \brief Implements UntypedBaseMatcher::getID.
- virtual uint64_t getID() const {
- return InnerMatcher.getID();
- }
-
-private:
- Matcher<T> InnerMatcher;
-};
-
/// \brief Interface that allows matchers to traverse the AST.
/// FIXME: Find a better name.
///
-/// This provides two entry methods for each base node type in the AST:
-/// - matchesChildOf:
+/// This provides three entry methods for each base node type in the AST:
+/// - \c matchesChildOf:
/// Matches a matcher on every child node of the given node. Returns true
/// if at least one child node could be matched.
-/// - matchesDescendantOf:
+/// - \c matchesDescendantOf:
/// Matches a matcher on all descendant nodes of the given node. Returns true
/// if at least one descendant matched.
+/// - \c matchesAncestorOf:
+/// Matches a matcher on all ancestors of the given node. Returns true if
+/// at least one ancestor matched.
+///
+/// FIXME: Currently we only allow Stmt and Decl nodes to start a traversal.
+/// In the future, we wan to implement this for all nodes for which it makes
+/// sense. In the case of matchesAncestorOf, we'll want to implement it for
+/// all nodes, as all nodes have ancestors.
class ASTMatchFinder {
public:
/// \brief Defines how we descend a level in the AST when we pass
@@ -408,6 +471,14 @@ public:
BK_All
};
+ /// \brief Defines which ancestors are considered for a match.
+ enum AncestorMatchMode {
+ /// All ancestors.
+ AMM_All,
+ /// Direct parent only.
+ AMM_ParentOnly
+ };
+
virtual ~ASTMatchFinder() {}
/// \brief Returns true if the given class is directly or indirectly derived
@@ -418,26 +489,70 @@ public:
const Matcher<NamedDecl> &Base,
BoundNodesTreeBuilder *Builder) = 0;
- // FIXME: Implement for other base nodes.
- virtual bool matchesChildOf(const Decl &DeclNode,
- const UntypedBaseMatcher &BaseMatcher,
- BoundNodesTreeBuilder *Builder,
- TraversalKind Traverse,
- BindKind Bind) = 0;
- virtual bool matchesChildOf(const Stmt &StmtNode,
- const UntypedBaseMatcher &BaseMatcher,
+ template <typename T>
+ bool matchesChildOf(const T &Node,
+ const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder,
+ TraversalKind Traverse,
+ BindKind Bind) {
+ TOOLING_COMPILE_ASSERT(
+ (llvm::is_base_of<Decl, T>::value ||
+ llvm::is_base_of<Stmt, T>::value ||
+ llvm::is_base_of<NestedNameSpecifier, T>::value ||
+ llvm::is_base_of<NestedNameSpecifierLoc, T>::value ||
+ llvm::is_base_of<TypeLoc, T>::value ||
+ llvm::is_base_of<QualType, T>::value),
+ unsupported_type_for_recursive_matching);
+ return matchesChildOf(ast_type_traits::DynTypedNode::create(Node),
+ Matcher, Builder, Traverse, Bind);
+ }
+
+ template <typename T>
+ bool matchesDescendantOf(const T &Node,
+ const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder,
+ BindKind Bind) {
+ TOOLING_COMPILE_ASSERT(
+ (llvm::is_base_of<Decl, T>::value ||
+ llvm::is_base_of<Stmt, T>::value ||
+ llvm::is_base_of<NestedNameSpecifier, T>::value ||
+ llvm::is_base_of<NestedNameSpecifierLoc, T>::value ||
+ llvm::is_base_of<TypeLoc, T>::value ||
+ llvm::is_base_of<QualType, T>::value),
+ unsupported_type_for_recursive_matching);
+ return matchesDescendantOf(ast_type_traits::DynTypedNode::create(Node),
+ Matcher, Builder, Bind);
+ }
+
+ // FIXME: Implement support for BindKind.
+ template <typename T>
+ bool matchesAncestorOf(const T &Node,
+ const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder,
+ AncestorMatchMode MatchMode) {
+ TOOLING_COMPILE_ASSERT((llvm::is_base_of<Decl, T>::value ||
+ llvm::is_base_of<Stmt, T>::value),
+ only_Decl_or_Stmt_allowed_for_recursive_matching);
+ return matchesAncestorOf(ast_type_traits::DynTypedNode::create(Node),
+ Matcher, Builder, MatchMode);
+ }
+
+protected:
+ virtual bool matchesChildOf(const ast_type_traits::DynTypedNode &Node,
+ const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder,
TraversalKind Traverse,
BindKind Bind) = 0;
- virtual bool matchesDescendantOf(const Decl &DeclNode,
- const UntypedBaseMatcher &BaseMatcher,
- BoundNodesTreeBuilder *Builder,
- BindKind Bind) = 0;
- virtual bool matchesDescendantOf(const Stmt &StmtNode,
- const UntypedBaseMatcher &BaseMatcher,
+ virtual bool matchesDescendantOf(const ast_type_traits::DynTypedNode &Node,
+ const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder,
BindKind Bind) = 0;
+
+ virtual bool matchesAncestorOf(const ast_type_traits::DynTypedNode &Node,
+ const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder,
+ AncestorMatchMode MatchMode) = 0;
};
/// \brief Converts a \c Matcher<T> to a matcher of desired type \c To by
@@ -606,9 +721,6 @@ public:
/// The returned matcher is equivalent to this matcher, but will
/// bind the matched node on a match.
Matcher<T> bind(StringRef ID) const {
- TOOLING_COMPILE_ASSERT((llvm::is_base_of<Stmt, T>::value ||
- llvm::is_base_of<Decl, T>::value),
- trying_to_bind_unsupported_node_type__only_decl_and_stmt_supported);
return Matcher<T>(new IdMatcher<T>(ID, *this));
}
};
@@ -635,7 +747,7 @@ public:
}
private:
- const TypedBaseMatcher<ChildT> ChildMatcher;
+ const Matcher<ChildT> ChildMatcher;
};
/// \brief Matches nodes of type T that have child nodes of type ChildT for
@@ -661,7 +773,7 @@ class ForEachMatcher : public MatcherInterface<T> {
}
private:
- const TypedBaseMatcher<ChildT> ChildMatcher;
+ const Matcher<ChildT> ChildMatcher;
};
/// \brief Matches nodes of type T if the given Matcher<T> does not match.
@@ -733,6 +845,20 @@ private:
const Matcher<T> InnertMatcher2;
};
+/// \brief Creates a Matcher<T> that matches if all inner matchers match.
+template<typename T>
+BindableMatcher<T> makeAllOfComposite(
+ ArrayRef<const Matcher<T> *> InnerMatchers) {
+ if (InnerMatchers.empty())
+ return BindableMatcher<T>(new TrueMatcher<T>);
+ MatcherInterface<T> *InnerMatcher = new TrueMatcher<T>;
+ for (int i = InnerMatchers.size() - 1; i >= 0; --i) {
+ InnerMatcher = new AllOfMatcher<T, Matcher<T>, Matcher<T> >(
+ *InnerMatchers[i], makeMatcher(InnerMatcher));
+ }
+ return BindableMatcher<T>(InnerMatcher);
+}
+
/// \brief Creates a Matcher<T> that matches if
/// T is dyn_cast'able into InnerT and all inner matchers match.
///
@@ -742,17 +868,8 @@ private:
template<typename T, typename InnerT>
BindableMatcher<T> makeDynCastAllOfComposite(
ArrayRef<const Matcher<InnerT> *> InnerMatchers) {
- if (InnerMatchers.empty()) {
- Matcher<InnerT> InnerMatcher = makeMatcher(new TrueMatcher<InnerT>);
- return BindableMatcher<T>(new DynCastMatcher<T, InnerT>(InnerMatcher));
- }
- Matcher<InnerT> InnerMatcher = *InnerMatchers.back();
- for (int i = InnerMatchers.size() - 2; i >= 0; --i) {
- InnerMatcher = makeMatcher(
- new AllOfMatcher<InnerT, Matcher<InnerT>, Matcher<InnerT> >(
- *InnerMatchers[i], InnerMatcher));
- }
- return BindableMatcher<T>(new DynCastMatcher<T, InnerT>(InnerMatcher));
+ return BindableMatcher<T>(new DynCastMatcher<T, InnerT>(
+ makeAllOfComposite(InnerMatchers)));
}
/// \brief Matches nodes of type T that have at least one descendant node of
@@ -775,7 +892,53 @@ public:
}
private:
- const TypedBaseMatcher<DescendantT> DescendantMatcher;
+ const Matcher<DescendantT> DescendantMatcher;
+};
+
+/// \brief Matches nodes of type \c T that have a parent node of type \c ParentT
+/// for which the given inner matcher matches.
+///
+/// \c ParentT must be an AST base type.
+template <typename T, typename ParentT>
+class HasParentMatcher : public MatcherInterface<T> {
+ TOOLING_COMPILE_ASSERT(IsBaseType<ParentT>::value,
+ has_parent_only_accepts_base_type_matcher);
+public:
+ explicit HasParentMatcher(const Matcher<ParentT> &ParentMatcher)
+ : ParentMatcher(ParentMatcher) {}
+
+ virtual bool matches(const T &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return Finder->matchesAncestorOf(
+ Node, ParentMatcher, Builder, ASTMatchFinder::AMM_ParentOnly);
+ }
+
+ private:
+ const Matcher<ParentT> ParentMatcher;
+};
+
+/// \brief Matches nodes of type \c T that have at least one ancestor node of
+/// type \c AncestorT for which the given inner matcher matches.
+///
+/// \c AncestorT must be an AST base type.
+template <typename T, typename AncestorT>
+class HasAncestorMatcher : public MatcherInterface<T> {
+ TOOLING_COMPILE_ASSERT(IsBaseType<AncestorT>::value,
+ has_ancestor_only_accepts_base_type_matcher);
+public:
+ explicit HasAncestorMatcher(const Matcher<AncestorT> &AncestorMatcher)
+ : AncestorMatcher(AncestorMatcher) {}
+
+ virtual bool matches(const T &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return Finder->matchesAncestorOf(
+ Node, AncestorMatcher, Builder, ASTMatchFinder::AMM_All);
+ }
+
+ private:
+ const Matcher<AncestorT> AncestorMatcher;
};
/// \brief Matches nodes of type T that have at least one descendant node of
@@ -801,7 +964,7 @@ class ForEachDescendantMatcher : public MatcherInterface<T> {
}
private:
- const TypedBaseMatcher<DescendantT> DescendantMatcher;
+ const Matcher<DescendantT> DescendantMatcher;
};
/// \brief Matches on nodes that have a getValue() method if getValue() equals
@@ -858,6 +1021,22 @@ class IsTemplateInstantiationMatcher : public MatcherInterface<T> {
}
};
+/// \brief Matches on explicit template specializations for FunctionDecl,
+/// VarDecl or CXXRecordDecl nodes.
+template <typename T>
+class IsExplicitTemplateSpecializationMatcher : public MatcherInterface<T> {
+ TOOLING_COMPILE_ASSERT((llvm::is_base_of<FunctionDecl, T>::value) ||
+ (llvm::is_base_of<VarDecl, T>::value) ||
+ (llvm::is_base_of<CXXRecordDecl, T>::value),
+ requires_getTemplateSpecializationKind_method);
+ public:
+ virtual bool matches(const T& Node,
+ ASTMatchFinder* Finder,
+ BoundNodesTreeBuilder* Builder) const {
+ return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
+ }
+};
+
class IsArrowMatcher : public SingleNodeMatcherInterface<MemberExpr> {
public:
virtual bool matchesNode(const MemberExpr &Node) const {
@@ -894,6 +1073,166 @@ public:
VariadicDynCastAllOfMatcher() {}
};
+/// \brief A \c VariadicAllOfMatcher<T> object is a variadic functor that takes
+/// a number of \c Matcher<T> and returns a \c Matcher<T> that matches \c T
+/// nodes that are matched by all of the given matchers.
+///
+/// For example:
+/// const VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier;
+/// Creates a functor nestedNameSpecifier(...) that creates a
+/// \c Matcher<NestedNameSpecifier> given a variable number of arguments of type
+/// \c Matcher<NestedNameSpecifier>.
+/// The returned matcher matches if all given matchers match.
+template <typename T>
+class VariadicAllOfMatcher : public llvm::VariadicFunction<
+ BindableMatcher<T>, Matcher<T>,
+ makeAllOfComposite<T> > {
+public:
+ VariadicAllOfMatcher() {}
+};
+
+/// \brief Matches nodes of type \c TLoc for which the inner
+/// \c Matcher<T> matches.
+template <typename TLoc, typename T>
+class LocMatcher : public MatcherInterface<TLoc> {
+public:
+ explicit LocMatcher(const Matcher<T> &InnerMatcher)
+ : InnerMatcher(InnerMatcher) {}
+
+ virtual bool matches(const TLoc &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ if (!Node)
+ return false;
+ return InnerMatcher.matches(*extract(Node), Finder, Builder);
+ }
+
+private:
+ const NestedNameSpecifier *extract(const NestedNameSpecifierLoc &Loc) const {
+ return Loc.getNestedNameSpecifier();
+ }
+
+ const Matcher<T> InnerMatcher;
+};
+
+/// \brief Matches \c NestedNameSpecifiers with a prefix matching another
+/// \c Matcher<NestedNameSpecifier>.
+class NestedNameSpecifierPrefixMatcher
+ : public MatcherInterface<NestedNameSpecifier> {
+public:
+ explicit NestedNameSpecifierPrefixMatcher(
+ const Matcher<NestedNameSpecifier> &InnerMatcher)
+ : InnerMatcher(InnerMatcher) {}
+
+ virtual bool matches(const NestedNameSpecifier &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ NestedNameSpecifier *NextNode = Node.getPrefix();
+ if (NextNode == NULL)
+ return false;
+ return InnerMatcher.matches(*NextNode, Finder, Builder);
+ }
+
+private:
+ const Matcher<NestedNameSpecifier> InnerMatcher;
+};
+
+/// \brief Matches \c NestedNameSpecifierLocs with a prefix matching another
+/// \c Matcher<NestedNameSpecifierLoc>.
+class NestedNameSpecifierLocPrefixMatcher
+ : public MatcherInterface<NestedNameSpecifierLoc> {
+public:
+ explicit NestedNameSpecifierLocPrefixMatcher(
+ const Matcher<NestedNameSpecifierLoc> &InnerMatcher)
+ : InnerMatcher(InnerMatcher) {}
+
+ virtual bool matches(const NestedNameSpecifierLoc &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ NestedNameSpecifierLoc NextNode = Node.getPrefix();
+ if (!NextNode)
+ return false;
+ return InnerMatcher.matches(NextNode, Finder, Builder);
+ }
+
+private:
+ const Matcher<NestedNameSpecifierLoc> InnerMatcher;
+};
+
+/// \brief Matches \c TypeLocs based on an inner matcher matching a certain
+/// \c QualType.
+///
+/// Used to implement the \c loc() matcher.
+class TypeLocTypeMatcher : public MatcherInterface<TypeLoc> {
+public:
+ explicit TypeLocTypeMatcher(const Matcher<QualType> &InnerMatcher)
+ : InnerMatcher(InnerMatcher) {}
+
+ virtual bool matches(const TypeLoc &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ if (!Node)
+ return false;
+ return InnerMatcher.matches(Node.getType(), Finder, Builder);
+ }
+
+private:
+ const Matcher<QualType> InnerMatcher;
+};
+
+/// \brief Matches nodes of type \c T for which the inner matcher matches on a
+/// another node of type \c T that can be reached using a given traverse
+/// function.
+template <typename T>
+class TypeTraverseMatcher : public MatcherInterface<T> {
+public:
+ explicit TypeTraverseMatcher(const Matcher<QualType> &InnerMatcher,
+ QualType (T::*TraverseFunction)() const)
+ : InnerMatcher(InnerMatcher), TraverseFunction(TraverseFunction) {}
+
+ virtual bool matches(const T &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ QualType NextNode = (Node.*TraverseFunction)();
+ if (NextNode.isNull())
+ return false;
+ return InnerMatcher.matches(NextNode, Finder, Builder);
+ }
+
+private:
+ const Matcher<QualType> InnerMatcher;
+ QualType (T::*TraverseFunction)() const;
+};
+
+/// \brief Matches nodes of type \c T in a ..Loc hierarchy, for which the inner
+/// matcher matches on a another node of type \c T that can be reached using a
+/// given traverse function.
+template <typename T>
+class TypeLocTraverseMatcher : public MatcherInterface<T> {
+public:
+ explicit TypeLocTraverseMatcher(const Matcher<TypeLoc> &InnerMatcher,
+ TypeLoc (T::*TraverseFunction)() const)
+ : InnerMatcher(InnerMatcher), TraverseFunction(TraverseFunction) {}
+
+ virtual bool matches(const T &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ TypeLoc NextNode = (Node.*TraverseFunction)();
+ if (!NextNode)
+ return false;
+ return InnerMatcher.matches(NextNode, Finder, Builder);
+ }
+
+private:
+ const Matcher<TypeLoc> InnerMatcher;
+ TypeLoc (T::*TraverseFunction)() const;
+};
+
+template <typename T, typename InnerT>
+T makeTypeAllOfComposite(ArrayRef<const Matcher<InnerT> *> InnerMatchers) {
+ return T(makeAllOfComposite<InnerT>(InnerMatchers));
+}
+
} // end namespace internal
} // end namespace ast_matchers
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersMacros.h b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersMacros.h
index c68534a..953abc2 100644
--- a/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersMacros.h
+++ b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersMacros.h
@@ -221,4 +221,69 @@
const NodeType &Node, ASTMatchFinder *Finder, \
BoundNodesTreeBuilder *Builder) const
+/// \brief Creates a variadic matcher for both a specific \c Type as well as
+/// the corresponding \c TypeLoc.
+#define AST_TYPE_MATCHER(NodeType, MatcherName) \
+ const internal::VariadicDynCastAllOfMatcher<Type, NodeType> MatcherName; \
+ const internal::VariadicDynCastAllOfMatcher<TypeLoc, \
+ NodeType##Loc> MatcherName##Loc
+
+/// \brief AST_TYPE_TRAVERSE_MATCHER(MatcherName, FunctionName) defines
+/// the matcher \c MatcherName that can be used to traverse from one \c Type
+/// to another.
+///
+/// For a specific \c SpecificType, the traversal is done using
+/// \c SpecificType::FunctionName. The existance of such a function determines
+/// whether a corresponding matcher can be used on \c SpecificType.
+#define AST_TYPE_TRAVERSE_MATCHER(MatcherName, FunctionName) \
+class Polymorphic##MatcherName##TypeMatcher { \
+public: \
+ Polymorphic##MatcherName##TypeMatcher( \
+ const internal::Matcher<QualType> &InnerMatcher) \
+ : InnerMatcher(InnerMatcher) {} \
+ template <typename T> operator internal::Matcher<T>() { \
+ return internal::Matcher<T>(new internal::TypeTraverseMatcher<T>( \
+ InnerMatcher, &T::FunctionName)); \
+ } \
+private: \
+ const internal::Matcher<QualType> InnerMatcher; \
+}; \
+class Variadic##MatcherName##TypeTraverseMatcher \
+ : public llvm::VariadicFunction< \
+ Polymorphic##MatcherName##TypeMatcher, \
+ internal::Matcher<QualType>, \
+ internal::makeTypeAllOfComposite< \
+ Polymorphic##MatcherName##TypeMatcher, QualType> > { \
+public: \
+ Variadic##MatcherName##TypeTraverseMatcher() {} \
+}; \
+const Variadic##MatcherName##TypeTraverseMatcher MatcherName
+
+/// \brief AST_TYPELOC_TRAVERSE_MATCHER(MatcherName, FunctionName) works
+/// identical to \c AST_TYPE_TRAVERSE_MATCHER but operates on \c TypeLocs.
+#define AST_TYPELOC_TRAVERSE_MATCHER(MatcherName, FunctionName) \
+class Polymorphic##MatcherName##TypeLocMatcher { \
+public: \
+ Polymorphic##MatcherName##TypeLocMatcher( \
+ const internal::Matcher<TypeLoc> &InnerMatcher) \
+ : InnerMatcher(InnerMatcher) {} \
+ template <typename T> operator internal::Matcher<T>() { \
+ return internal::Matcher<T>(new internal::TypeLocTraverseMatcher<T>( \
+ InnerMatcher, &T::FunctionName##Loc)); \
+ } \
+private: \
+ const internal::Matcher<TypeLoc> InnerMatcher; \
+}; \
+class Variadic##MatcherName##TypeLocTraverseMatcher \
+ : public llvm::VariadicFunction< \
+ Polymorphic##MatcherName##TypeLocMatcher, \
+ internal::Matcher<TypeLoc>, \
+ internal::makeTypeAllOfComposite< \
+ Polymorphic##MatcherName##TypeLocMatcher, TypeLoc> > { \
+public: \
+ Variadic##MatcherName##TypeLocTraverseMatcher() {} \
+}; \
+const Variadic##MatcherName##TypeLocTraverseMatcher MatcherName##Loc; \
+AST_TYPE_TRAVERSE_MATCHER(MatcherName, FunctionName##Type)
+
#endif // LLVM_CLANG_AST_MATCHERS_AST_MATCHERS_MACROS_H
diff --git a/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTTypeTraits.h b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTTypeTraits.h
new file mode 100644
index 0000000..bda53ea
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTTypeTraits.h
@@ -0,0 +1,209 @@
+//===--- ASTMatchersTypeTraits.h --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Provides a dynamically typed node container that can be used to store
+// an AST base node at runtime in the same storage in a type safe way.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_MATCHERS_AST_TYPE_TRAITS_H
+#define LLVM_CLANG_AST_MATCHERS_AST_TYPE_TRAITS_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/Stmt.h"
+#include "llvm/Support/AlignOf.h"
+
+namespace clang {
+namespace ast_type_traits {
+
+/// \brief A dynamically typed AST node container.
+///
+/// Stores an AST node in a type safe way. This allows writing code that
+/// works with different kinds of AST nodes, despite the fact that they don't
+/// have a common base class.
+///
+/// Use \c create(Node) to create a \c DynTypedNode from an AST node,
+/// and \c get<T>() to retrieve the node as type T if the types match.
+///
+/// See \c NodeTypeTag for which node base types are currently supported;
+/// You can create DynTypedNodes for all nodes in the inheritance hierarchy of
+/// the supported base types.
+class DynTypedNode {
+public:
+ /// \brief Creates a \c DynTypedNode from \c Node.
+ template <typename T>
+ static DynTypedNode create(const T &Node) {
+ return BaseConverter<T>::create(Node);
+ }
+
+ /// \brief Retrieve the stored node as type \c T.
+ ///
+ /// Returns NULL if the stored node does not have a type that is
+ /// convertible to \c T.
+ ///
+ /// For types that have identity via their pointer in the AST
+ /// (like \c Stmt and \c Decl) the returned pointer points to the
+ /// referenced AST node.
+ /// For other types (like \c QualType) the value is stored directly
+ /// in the \c DynTypedNode, and the returned pointer points at
+ /// the storage inside DynTypedNode. For those nodes, do not
+ /// use the pointer outside the scope of the DynTypedNode.
+ template <typename T>
+ const T *get() const {
+ return BaseConverter<T>::get(Tag, Storage.buffer);
+ }
+
+ /// \brief Returns a pointer that identifies the stored AST node.
+ ///
+ /// Note that this is not supported by all AST nodes. For AST nodes
+ /// that don't have a pointer-defined identity inside the AST, this
+ /// method returns NULL.
+ const void *getMemoizationData() const;
+
+private:
+ /// \brief Takes care of converting from and to \c T.
+ template <typename T, typename EnablerT = void> struct BaseConverter;
+
+ /// \brief Supported base node types.
+ enum NodeTypeTag {
+ NT_Decl,
+ NT_Stmt,
+ NT_NestedNameSpecifier,
+ NT_NestedNameSpecifierLoc,
+ NT_QualType,
+ NT_Type,
+ NT_TypeLoc
+ } Tag;
+
+ /// \brief Stores the data of the node.
+ ///
+ /// Note that we can store \c Decls and \c Stmts by pointer as they are
+ /// guaranteed to be unique pointers pointing to dedicated storage in the
+ /// AST. \c QualTypes on the other hand do not have storage or unique
+ /// pointers and thus need to be stored by value.
+ llvm::AlignedCharArrayUnion<Decl*, QualType, TypeLoc, NestedNameSpecifierLoc>
+ Storage;
+};
+
+// FIXME: Pull out abstraction for the following.
+template<typename T> struct DynTypedNode::BaseConverter<T,
+ typename llvm::enable_if<llvm::is_base_of<Decl, T> >::type> {
+ static const T *get(NodeTypeTag Tag, const char Storage[]) {
+ if (Tag == NT_Decl)
+ return dyn_cast<T>(*reinterpret_cast<Decl*const*>(Storage));
+ return NULL;
+ }
+ static DynTypedNode create(const Decl &Node) {
+ DynTypedNode Result;
+ Result.Tag = NT_Decl;
+ new (Result.Storage.buffer) const Decl*(&Node);
+ return Result;
+ }
+};
+template<typename T> struct DynTypedNode::BaseConverter<T,
+ typename llvm::enable_if<llvm::is_base_of<Stmt, T> >::type> {
+ static const T *get(NodeTypeTag Tag, const char Storage[]) {
+ if (Tag == NT_Stmt)
+ return dyn_cast<T>(*reinterpret_cast<Stmt*const*>(Storage));
+ return NULL;
+ }
+ static DynTypedNode create(const Stmt &Node) {
+ DynTypedNode Result;
+ Result.Tag = NT_Stmt;
+ new (Result.Storage.buffer) const Stmt*(&Node);
+ return Result;
+ }
+};
+template<typename T> struct DynTypedNode::BaseConverter<T,
+ typename llvm::enable_if<llvm::is_base_of<Type, T> >::type> {
+ static const T *get(NodeTypeTag Tag, const char Storage[]) {
+ if (Tag == NT_Type)
+ return dyn_cast<T>(*reinterpret_cast<Type*const*>(Storage));
+ return NULL;
+ }
+ static DynTypedNode create(const Type &Node) {
+ DynTypedNode Result;
+ Result.Tag = NT_Type;
+ new (Result.Storage.buffer) const Type*(&Node);
+ return Result;
+ }
+};
+template<> struct DynTypedNode::BaseConverter<NestedNameSpecifier, void> {
+ static const NestedNameSpecifier *get(NodeTypeTag Tag, const char Storage[]) {
+ if (Tag == NT_NestedNameSpecifier)
+ return *reinterpret_cast<NestedNameSpecifier*const*>(Storage);
+ return NULL;
+ }
+ static DynTypedNode create(const NestedNameSpecifier &Node) {
+ DynTypedNode Result;
+ Result.Tag = NT_NestedNameSpecifier;
+ new (Result.Storage.buffer) const NestedNameSpecifier*(&Node);
+ return Result;
+ }
+};
+template<> struct DynTypedNode::BaseConverter<NestedNameSpecifierLoc, void> {
+ static const NestedNameSpecifierLoc *get(NodeTypeTag Tag,
+ const char Storage[]) {
+ if (Tag == NT_NestedNameSpecifierLoc)
+ return reinterpret_cast<const NestedNameSpecifierLoc*>(Storage);
+ return NULL;
+ }
+ static DynTypedNode create(const NestedNameSpecifierLoc &Node) {
+ DynTypedNode Result;
+ Result.Tag = NT_NestedNameSpecifierLoc;
+ new (Result.Storage.buffer) NestedNameSpecifierLoc(Node);
+ return Result;
+ }
+};
+template<> struct DynTypedNode::BaseConverter<QualType, void> {
+ static const QualType *get(NodeTypeTag Tag, const char Storage[]) {
+ if (Tag == NT_QualType)
+ return reinterpret_cast<const QualType*>(Storage);
+ return NULL;
+ }
+ static DynTypedNode create(const QualType &Node) {
+ DynTypedNode Result;
+ Result.Tag = NT_QualType;
+ new (Result.Storage.buffer) QualType(Node);
+ return Result;
+ }
+};
+template<> struct DynTypedNode::BaseConverter<TypeLoc, void> {
+ static const TypeLoc *get(NodeTypeTag Tag, const char Storage[]) {
+ if (Tag == NT_TypeLoc)
+ return reinterpret_cast<const TypeLoc*>(Storage);
+ return NULL;
+ }
+ static DynTypedNode create(const TypeLoc &Node) {
+ DynTypedNode Result;
+ Result.Tag = NT_TypeLoc;
+ new (Result.Storage.buffer) TypeLoc(Node);
+ return Result;
+ }
+};
+// The only operation we allow on unsupported types is \c get.
+// This allows to conveniently use \c DynTypedNode when having an arbitrary
+// AST node that is not supported, but prevents misuse - a user cannot create
+// a DynTypedNode from arbitrary types.
+template <typename T, typename EnablerT> struct DynTypedNode::BaseConverter {
+ static const T *get(NodeTypeTag Tag, const char Storage[]) { return NULL; }
+};
+
+inline const void *DynTypedNode::getMemoizationData() const {
+ switch (Tag) {
+ case NT_Decl: return BaseConverter<Decl>::get(Tag, Storage.buffer);
+ case NT_Stmt: return BaseConverter<Stmt>::get(Tag, Storage.buffer);
+ default: return NULL;
+ };
+}
+
+} // end namespace ast_type_traits
+} // end namespace clang
+
+#endif // LLVM_CLANG_AST_MATCHERS_AST_TYPE_TRAITS_H
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h
index b6291f4..73c2e61 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h
@@ -23,6 +23,8 @@
namespace clang {
+class TargetInfo;
+
//===----------------------------------------------------------------------===//
/// Common components of both fprintf and fscanf format strings.
namespace analyze_format_string {
@@ -115,11 +117,14 @@ public:
// C99 conversion specifiers.
cArg,
dArg,
+ DArg, // Apple extension
iArg,
- IntArgBeg = cArg, IntArgEnd = iArg,
+ IntArgBeg = dArg, IntArgEnd = iArg,
oArg,
+ OArg, // Apple extension
uArg,
+ UArg, // Apple extension
xArg,
XArg,
UIntArgBeg = oArg, UIntArgEnd = XArg,
@@ -148,9 +153,9 @@ public:
ObjCBeg = ObjCObjArg, ObjCEnd = ObjCObjArg,
// FreeBSD specific specifiers
- bArg,
- DArg,
- rArg,
+ FreeBSDbArg,
+ FreeBSDDArg,
+ FreeBSDrArg,
// GlibC specific specifiers.
PrintErrno, // 'm'
@@ -162,7 +167,7 @@ public:
ScanfConvBeg = ScanListArg, ScanfConvEnd = ScanListArg
};
- ConversionSpecifier(bool isPrintf)
+ ConversionSpecifier(bool isPrintf = true)
: IsPrintf(isPrintf), Position(0), EndScanList(0), kind(InvalidSpecifier) {}
ConversionSpecifier(bool isPrintf, const char *pos, Kind k)
@@ -194,10 +199,14 @@ public:
return EndScanList ? EndScanList - Position : 1;
}
+ bool isIntArg() const { return kind >= IntArgBeg && kind <= IntArgEnd; }
bool isUIntArg() const { return kind >= UIntArgBeg && kind <= UIntArgEnd; }
+ bool isAnyIntArg() const { return kind >= IntArgBeg && kind <= UIntArgEnd; }
const char *toString() const;
bool isPrintfKind() const { return IsPrintf; }
+
+ llvm::Optional<ConversionSpecifier> getStandardSpecifier() const;
protected:
bool IsPrintf;
@@ -353,10 +362,12 @@ public:
bool usesPositionalArg() const { return UsesPositionalArg; }
- bool hasValidLengthModifier() const;
+ bool hasValidLengthModifier(const TargetInfo &Target) const;
bool hasStandardLengthModifier() const;
+ llvm::Optional<LengthModifier> getCorrectedLengthModifier() const;
+
bool hasStandardConversionSpecifier(const LangOptions &LangOpt) const;
bool hasStandardLengthConversionCombination() const;
@@ -383,7 +394,6 @@ public:
: ConversionSpecifier(true, pos, k) {}
bool isObjCArg() const { return kind >= ObjCBeg && kind <= ObjCEnd; }
- bool isIntArg() const { return kind >= IntArgBeg && kind <= IntArgEnd; }
bool isDoubleArg() const { return kind >= DoubleArgBeg &&
kind <= DoubleArgEnd; }
unsigned getLength() const {
@@ -628,10 +638,12 @@ public:
};
bool ParsePrintfString(FormatStringHandler &H,
- const char *beg, const char *end, const LangOptions &LO);
+ const char *beg, const char *end, const LangOptions &LO,
+ const TargetInfo &Target);
bool ParseScanfString(FormatStringHandler &H,
- const char *beg, const char *end, const LangOptions &LO);
+ const char *beg, const char *end, const LangOptions &LO,
+ const TargetInfo &Target);
} // end analyze_format_string namespace
} // end clang namespace
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h
index 742cc04..ef6b821 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h
@@ -132,7 +132,8 @@ public:
/// \param Loc -- The location of the protected operation.
virtual void handleMutexNotHeld(const NamedDecl *D,
ProtectedOperationKind POK, Name LockName,
- LockKind LK, SourceLocation Loc) {}
+ LockKind LK, SourceLocation Loc,
+ Name *PossibleMatch=0) {}
/// Warn when a function is called while an excluded mutex is locked. For
/// example, the mutex may be locked inside the function.
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
index 46b4e93..5246678 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
@@ -72,7 +72,7 @@ class AnalysisDeclContext {
/// AnalysisDeclContext. This may be null.
AnalysisDeclContextManager *Manager;
- const Decl *D;
+ const Decl * const D;
OwningPtr<CFG> cfg, completeCFG;
OwningPtr<CFGStmtMap> cfgStmtMap;
@@ -81,9 +81,6 @@ class AnalysisDeclContext {
CFG::BuildOptions::ForcedBlkExprs *forcedBlkExprs;
bool builtCFG, builtCompleteCFG;
-
- OwningPtr<LiveVariables> liveness;
- OwningPtr<LiveVariables> relaxedLiveness;
OwningPtr<ParentMap> PM;
OwningPtr<PseudoConstantAnalysis> PCA;
OwningPtr<CFGReverseBlockReachabilityAnalysis> CFA;
@@ -104,9 +101,15 @@ public:
~AnalysisDeclContext();
- ASTContext &getASTContext() { return D->getASTContext(); }
+ ASTContext &getASTContext() const { return D->getASTContext(); }
const Decl *getDecl() const { return D; }
+ /// Return the AnalysisDeclContextManager (if any) that created
+ /// this AnalysisDeclContext.
+ AnalysisDeclContextManager *getManager() const {
+ return Manager;
+ }
+
/// Return the build options used to construct the CFG.
CFG::BuildOptions &getCFGBuildOptions() {
return cfgBuildOptions;
@@ -234,9 +237,10 @@ public:
const StackFrameContext *getCurrentStackFrame() const;
- virtual void Profile(llvm::FoldingSetNodeID &ID) = 0;
+ /// Return true if the current LocationContext has no caller context.
+ virtual bool inTopFrame() const;
- static bool classof(const LocationContext*) { return true; }
+ virtual void Profile(llvm::FoldingSetNodeID &ID) = 0;
public:
static void ProfileCommon(llvm::FoldingSetNodeID &ID,
@@ -270,6 +274,9 @@ public:
const CFGBlock *getCallSiteBlock() const { return Block; }
+ /// Return true if the current LocationContext has no caller context.
+ virtual bool inTopFrame() const { return getParent() == 0; }
+
unsigned getIndex() const { return Index; }
void Profile(llvm::FoldingSetNodeID &ID);
@@ -379,11 +386,17 @@ class AnalysisDeclContextManager {
ContextMap Contexts;
LocationContextManager LocContexts;
CFG::BuildOptions cfgBuildOptions;
+
+ /// Flag to indicate whether or not bodies should be synthesized
+ /// for well-known functions.
+ bool SynthesizeBodies;
public:
AnalysisDeclContextManager(bool useUnoptimizedCFG = false,
- bool addImplicitDtors = false,
- bool addInitializers = false);
+ bool addImplicitDtors = false,
+ bool addInitializers = false,
+ bool addTemporaryDtors = false,
+ bool synthesizeBodies = false);
~AnalysisDeclContextManager();
@@ -396,6 +409,10 @@ public:
CFG::BuildOptions &getCFGBuildOptions() {
return cfgBuildOptions;
}
+
+ /// Return true if faux bodies should be synthesized for well-known
+ /// functions.
+ bool synthesizeBodies() const { return SynthesizeBodies; }
const StackFrameContext *getStackFrame(AnalysisDeclContext *Ctx,
LocationContext const *Parent,
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h b/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h
index 4d087e7..8cc5d81 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h
@@ -88,8 +88,6 @@ public:
return static_cast<const ElemTy*>(this);
return 0;
}
-
- static bool classof(const CFGElement *E) { return true; }
};
class CFGStmt : public CFGElement {
@@ -568,6 +566,7 @@ public:
bool AddEHEdges;
bool AddInitializers;
bool AddImplicitDtors;
+ bool AddTemporaryDtors;
bool alwaysAdd(const Stmt *stmt) const {
return alwaysAddMask[stmt->getStmtClass()];
@@ -587,7 +586,8 @@ public:
: forcedBlkExprs(0), PruneTriviallyFalseEdges(true)
,AddEHEdges(false)
,AddInitializers(false)
- ,AddImplicitDtors(false) {}
+ ,AddImplicitDtors(false)
+ ,AddTemporaryDtors(false) {}
};
/// \brief Provides a custom implementation of the iterator class to have the
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/ObjCNoReturn.h b/contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/ObjCNoReturn.h
new file mode 100644
index 0000000..930c2bd
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/ObjCNoReturn.h
@@ -0,0 +1,46 @@
+//= ObjCNoReturn.h - Handling of Cocoa APIs known not to return --*- C++ -*---//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements special handling of recognizing ObjC API hooks that
+// do not return but aren't marked as such in API headers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_DS_OBJCNORETURN
+#define LLVM_CLANG_ANALYSIS_DS_OBJCNORETURN
+
+#include "clang/Basic/IdentifierTable.h"
+
+namespace clang {
+
+class ASTContext;
+class ObjCMessageExpr;
+
+class ObjCNoReturn {
+ /// Cached "raise" selector.
+ Selector RaiseSel;
+
+ /// Cached identifier for "NSException".
+ IdentifierInfo *NSExceptionII;
+
+ enum { NUM_RAISE_SELECTORS = 2 };
+
+ /// Cached set of selectors in NSException that are 'noreturn'.
+ Selector NSExceptionInstanceRaiseSelectors[NUM_RAISE_SELECTORS];
+
+public:
+ ObjCNoReturn(ASTContext &C);
+
+ /// Return true if the given message expression is known to never
+ /// return.
+ bool isImplicitNoReturn(const ObjCMessageExpr *ME);
+};
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h b/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h
index 5de06cd..9479978 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h
@@ -140,8 +140,6 @@ public:
return ID.ComputeHash();
}
- static bool classof(const ProgramPoint*) { return true; }
-
bool operator==(const ProgramPoint & RHS) const {
return Data1 == RHS.Data1 &&
Data2 == RHS.Data2 &&
@@ -213,7 +211,9 @@ class StmtPoint : public ProgramPoint {
public:
StmtPoint(const Stmt *S, const void *p2, Kind k, const LocationContext *L,
const ProgramPointTag *tag)
- : ProgramPoint(S, p2, k, L, tag) {}
+ : ProgramPoint(S, p2, k, L, tag) {
+ assert(S);
+ }
const Stmt *getStmt() const { return (const Stmt*) getData1(); }
@@ -461,6 +461,7 @@ public:
};
/// Represents a point when we begin processing an inlined call.
+/// CallEnter uses the caller's location context.
class CallEnter : public ProgramPoint {
public:
CallEnter(const Stmt *stmt, const StackFrameContext *calleeCtx,
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Attr.td b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
index fade83e..bfe8093 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
@@ -190,7 +190,7 @@ def Availability : InheritableAttr {
[{static llvm::StringRef getPrettyPlatformName(llvm::StringRef Platform) {
return llvm::StringSwitch<llvm::StringRef>(Platform)
.Case("ios", "iOS")
- .Case("macosx", "Mac OS X")
+ .Case("macosx", "OS X")
.Default(llvm::StringRef());
} }];
}
@@ -341,6 +341,11 @@ def Final : InheritableAttr {
let SemaHandler = 0;
}
+def MinSize : InheritableAttr {
+ let Spellings = [GNU<"minsize">];
+ let Subjects = [Function];
+}
+
def Format : InheritableAttr {
let Spellings = [GNU<"format">];
let Args = [StringArgument<"Type">, IntArgument<"FormatIdx">,
@@ -528,6 +533,11 @@ def ObjCReturnsInnerPointer : Attr {
let Subjects = [ObjCMethod];
}
+def ObjCRequiresSuper : InheritableAttr {
+ let Spellings = [GNU<"objc_requires_super">];
+ let Subjects = [ObjCMethod];
+}
+
def ObjCRootClass : Attr {
let Spellings = [GNU<"objc_root_class">];
let Subjects = [ObjCInterface];
@@ -556,6 +566,10 @@ def Packed : InheritableAttr {
let Spellings = [GNU<"packed">];
}
+def PnaclCall : InheritableAttr {
+ let Spellings = [GNU<"pnaclcall">];
+}
+
def Pcs : InheritableAttr {
let Spellings = [GNU<"pcs">];
let Args = [EnumArgument<"PCS", "PCSType",
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
index 84b2881..d48eadc 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
@@ -41,6 +41,7 @@
// J -> jmp_buf
// SJ -> sigjmp_buf
// K -> ucontext_t
+// p -> pid_t
// . -> "...". This may only occur at the end of the function list.
//
// Types may be prefixed with the following modifiers:
@@ -388,6 +389,7 @@ BUILTIN(__builtin_popcountll, "iULLi", "nc")
// FIXME: These type signatures are not correct for targets with int != 32-bits
// or with ULL != 64-bits.
+BUILTIN(__builtin_bswap16, "UsUs", "nc")
BUILTIN(__builtin_bswap32, "UiUi", "nc")
BUILTIN(__builtin_bswap64, "ULLiULLi", "nc")
@@ -478,6 +480,7 @@ BUILTIN(__builtin_expect, "LiLiLi" , "nc")
BUILTIN(__builtin_prefetch, "vvC*.", "nc")
BUILTIN(__builtin_readcyclecounter, "ULLi", "n")
BUILTIN(__builtin_trap, "v", "nr")
+BUILTIN(__builtin_debugtrap, "v", "n")
BUILTIN(__builtin_unreachable, "v", "nr")
BUILTIN(__builtin_shufflevector, "v." , "nc")
BUILTIN(__builtin_alloca, "v*z" , "n")
@@ -735,7 +738,7 @@ LIBBUILTIN(strcasecmp, "icC*cC*", "f", "strings.h", ALL_LANGUAGES)
LIBBUILTIN(strncasecmp, "icC*cC*z", "f", "strings.h", ALL_LANGUAGES)
// POSIX unistd.h
LIBBUILTIN(_exit, "vi", "fr", "unistd.h", ALL_LANGUAGES)
-LIBBUILTIN(vfork, "i", "fj", "unistd.h", ALL_LANGUAGES)
+LIBBUILTIN(vfork, "p", "fj", "unistd.h", ALL_LANGUAGES)
// POSIX setjmp.h
// In some systems setjmp is a macro that expands to _setjmp. We undefine
@@ -826,9 +829,13 @@ LIBBUILTIN(atan2, "ddd", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(atan2l, "LdLdLd", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(atan2f, "fff", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(ceil, "dd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(ceill, "LdLd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(ceilf, "ff", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(ceil, "dd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(ceill, "LdLd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(ceilf, "ff", "fc", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(copysign, "ddd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(copysignl, "LdLdLd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(copysignf, "fff", "fc", "math.h", ALL_LANGUAGES)
LIBBUILTIN(cos, "dd", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(cosl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
@@ -838,37 +845,53 @@ LIBBUILTIN(exp, "dd", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(expl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(expf, "ff", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fabs, "dd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fabsl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fabsf, "ff", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(exp2, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(exp2l, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(exp2f, "ff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(fabs, "dd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fabsl, "LdLd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fabsf, "ff", "fc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(floor, "dd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(floorl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(floorf, "ff", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(floor, "dd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(floorl, "LdLd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(floorf, "ff", "fc", "math.h", ALL_LANGUAGES)
LIBBUILTIN(fma, "dddd", "fc", "math.h", ALL_LANGUAGES)
LIBBUILTIN(fmal, "LdLdLdLd", "fc", "math.h", ALL_LANGUAGES)
LIBBUILTIN(fmaf, "ffff", "fc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fmax, "ddd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fmaxl, "LdLdLd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fmaxf, "fff", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fmax, "ddd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fmaxl, "LdLdLd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fmaxf, "fff", "fc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fmin, "ddd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fminl, "LdLdLd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fminf, "fff", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fmin, "ddd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fminl, "LdLdLd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fminf, "fff", "fc", "math.h", ALL_LANGUAGES)
LIBBUILTIN(log, "dd", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(logl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(logf, "ff", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(log2, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(log2l, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(log2f, "ff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(nearbyint, "dd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(nearbyintl, "LdLd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(nearbyintf, "ff", "fc", "math.h", ALL_LANGUAGES)
+
LIBBUILTIN(pow, "ddd", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(powl, "LdLdLd", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(powf, "fff", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(round, "dd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(roundl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(roundf, "ff", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(rint, "dd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(rintl, "LdLd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(rintf, "ff", "fc", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(round, "dd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(roundl, "LdLd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(roundf, "ff", "fc", "math.h", ALL_LANGUAGES)
LIBBUILTIN(sin, "dd", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(sinl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
@@ -882,6 +905,10 @@ LIBBUILTIN(tan, "dd", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(tanl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(tanf, "ff", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(trunc, "dd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(truncl, "LdLd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(truncf, "ff", "fc", "math.h", ALL_LANGUAGES)
+
// Blocks runtime Builtin math library functions
LIBBUILTIN(_Block_object_assign, "vv*vC*iC", "f", "Blocks.h", ALL_LANGUAGES)
LIBBUILTIN(_Block_object_dispose, "vvC*iC", "f", "Blocks.h", ALL_LANGUAGES)
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsMips.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsMips.def
index d013715..43fb907 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsMips.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsMips.def
@@ -14,6 +14,8 @@
// The format of this database matches clang/Basic/Builtins.def.
+// MIPS DSP Rev 1
+
// Add/subtract with optional saturation
BUILTIN(__builtin_mips_addu_qb, "V4ScV4ScV4Sc", "n")
BUILTIN(__builtin_mips_addu_s_qb, "V4ScV4ScV4Sc", "n")
@@ -122,4 +124,65 @@ BUILTIN(__builtin_mips_lbux, "iv*i", "n")
BUILTIN(__builtin_mips_lhx, "iv*i", "n")
BUILTIN(__builtin_mips_lwx, "iv*i", "n")
+// MIPS DSP Rev 2
+
+BUILTIN(__builtin_mips_absq_s_qb, "V4ScV4Sc", "n")
+
+BUILTIN(__builtin_mips_addqh_ph, "V2sV2sV2s", "nc")
+BUILTIN(__builtin_mips_addqh_r_ph, "V2sV2sV2s", "nc")
+BUILTIN(__builtin_mips_addqh_w, "iii", "nc")
+BUILTIN(__builtin_mips_addqh_r_w, "iii", "nc")
+
+BUILTIN(__builtin_mips_addu_ph, "V2sV2sV2s", "n")
+BUILTIN(__builtin_mips_addu_s_ph, "V2sV2sV2s", "n")
+
+BUILTIN(__builtin_mips_adduh_qb, "V4ScV4ScV4Sc", "nc")
+BUILTIN(__builtin_mips_adduh_r_qb, "V4ScV4ScV4Sc", "nc")
+
+BUILTIN(__builtin_mips_append, "iiiIi", "nc")
+BUILTIN(__builtin_mips_balign, "iiiIi", "nc")
+
+BUILTIN(__builtin_mips_cmpgdu_eq_qb, "iV4ScV4Sc", "n")
+BUILTIN(__builtin_mips_cmpgdu_lt_qb, "iV4ScV4Sc", "n")
+BUILTIN(__builtin_mips_cmpgdu_le_qb, "iV4ScV4Sc", "n")
+
+BUILTIN(__builtin_mips_dpa_w_ph, "LLiLLiV2sV2s", "nc")
+BUILTIN(__builtin_mips_dps_w_ph, "LLiLLiV2sV2s", "nc")
+
+BUILTIN(__builtin_mips_dpaqx_s_w_ph, "LLiLLiV2sV2s", "n")
+BUILTIN(__builtin_mips_dpaqx_sa_w_ph, "LLiLLiV2sV2s", "n")
+BUILTIN(__builtin_mips_dpax_w_ph, "LLiLLiV2sV2s", "nc")
+BUILTIN(__builtin_mips_dpsx_w_ph, "LLiLLiV2sV2s", "nc")
+BUILTIN(__builtin_mips_dpsqx_s_w_ph, "LLiLLiV2sV2s", "n")
+BUILTIN(__builtin_mips_dpsqx_sa_w_ph, "LLiLLiV2sV2s", "n")
+
+BUILTIN(__builtin_mips_mul_ph, "V2sV2sV2s", "n")
+BUILTIN(__builtin_mips_mul_s_ph, "V2sV2sV2s", "n")
+
+BUILTIN(__builtin_mips_mulq_rs_w, "iii", "n")
+BUILTIN(__builtin_mips_mulq_s_ph, "V2sV2sV2s", "n")
+BUILTIN(__builtin_mips_mulq_s_w, "iii", "n")
+BUILTIN(__builtin_mips_mulsa_w_ph, "LLiLLiV2sV2s", "nc")
+
+BUILTIN(__builtin_mips_precr_qb_ph, "V4ScV2sV2s", "n")
+BUILTIN(__builtin_mips_precr_sra_ph_w, "V2siiIi", "nc")
+BUILTIN(__builtin_mips_precr_sra_r_ph_w, "V2siiIi", "nc")
+
+BUILTIN(__builtin_mips_prepend, "iiiIi", "nc")
+
+BUILTIN(__builtin_mips_shra_qb, "V4ScV4Sci", "nc")
+BUILTIN(__builtin_mips_shra_r_qb, "V4ScV4Sci", "nc")
+BUILTIN(__builtin_mips_shrl_ph, "V2sV2si", "nc")
+
+BUILTIN(__builtin_mips_subqh_ph, "V2sV2sV2s", "nc")
+BUILTIN(__builtin_mips_subqh_r_ph, "V2sV2sV2s", "nc")
+BUILTIN(__builtin_mips_subqh_w, "iii", "nc")
+BUILTIN(__builtin_mips_subqh_r_w, "iii", "nc")
+
+BUILTIN(__builtin_mips_subu_ph, "V2sV2sV2s", "n")
+BUILTIN(__builtin_mips_subu_s_ph, "V2sV2sV2s", "n")
+
+BUILTIN(__builtin_mips_subuh_qb, "V4ScV4ScV4Sc", "nc")
+BUILTIN(__builtin_mips_subuh_r_qb, "V4ScV4ScV4Sc", "nc")
+
#undef BUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsNVPTX.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsNVPTX.def
index f90a43f..3c3f06c 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsNVPTX.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsNVPTX.def
@@ -14,6 +14,7 @@
// The format of this database matches clang/Basic/Builtins.def.
+// Builtins retained from previous PTX back-end
BUILTIN(__builtin_ptx_read_tid_x, "i", "nc")
BUILTIN(__builtin_ptx_read_tid_y, "i", "nc")
BUILTIN(__builtin_ptx_read_tid_z, "i", "nc")
@@ -59,4 +60,249 @@ BUILTIN(__builtin_ptx_read_pm3, "i", "n")
BUILTIN(__builtin_ptx_bar_sync, "vi", "n")
+// Builtins exposed as part of NVVM
+BUILTIN(__syncthreads, "v", "n")
+BUILTIN(__nvvm_bar0, "v", "n")
+BUILTIN(__nvvm_bar0_popc, "ii", "n")
+BUILTIN(__nvvm_bar0_and, "ii", "n")
+BUILTIN(__nvvm_bar0_or, "ii", "n")
+BUILTIN(__nvvm_membar_cta, "v", "n")
+BUILTIN(__nvvm_membar_gl, "v", "n")
+BUILTIN(__nvvm_membar_sys, "v", "n")
+BUILTIN(__nvvm_popc_i, "ii", "nc")
+BUILTIN(__nvvm_popc_ll, "LiLi", "nc")
+BUILTIN(__nvvm_prmt, "UiUiUiUi", "nc")
+BUILTIN(__nvvm_min_i, "iii", "nc")
+BUILTIN(__nvvm_min_ui, "UiUiUi", "nc")
+BUILTIN(__nvvm_min_ll, "LLiLLiLLi", "nc")
+BUILTIN(__nvvm_min_ull, "ULLiULLiULLi", "nc")
+BUILTIN(__nvvm_max_i, "iii", "nc")
+BUILTIN(__nvvm_max_ui, "UiUiUi", "nc")
+BUILTIN(__nvvm_max_ll, "LLiLLiLLi", "nc")
+BUILTIN(__nvvm_max_ull, "ULLiULLiULLi", "nc")
+BUILTIN(__nvvm_mulhi_i, "iii", "nc")
+BUILTIN(__nvvm_mulhi_ui, "UiUiUi", "nc")
+BUILTIN(__nvvm_mulhi_ll, "LLiLLiLLi", "nc")
+BUILTIN(__nvvm_mulhi_ull, "ULLiULLiULLi", "nc")
+BUILTIN(__nvvm_mul24_i, "iii", "nc")
+BUILTIN(__nvvm_mul24_ui, "UiUiUi", "nc")
+BUILTIN(__nvvm_brev32, "UiUi", "nc")
+BUILTIN(__nvvm_brev64, "ULLiULLi", "nc")
+BUILTIN(__nvvm_sad_i, "iiii", "nc")
+BUILTIN(__nvvm_sad_ui, "UiUiUiUi", "nc")
+BUILTIN(__nvvm_abs_i, "ii", "nc")
+BUILTIN(__nvvm_abs_ll, "LiLi", "nc")
+BUILTIN(__nvvm_floor_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_floor_f, "ff", "nc")
+BUILTIN(__nvvm_floor_d, "dd", "nc")
+BUILTIN(__nvvm_fabs_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_fabs_f, "ff", "nc")
+BUILTIN(__nvvm_fabs_d, "dd", "nc")
+BUILTIN(__nvvm_rcp_approx_ftz_d, "dd", "nc")
+BUILTIN(__nvvm_fmin_ftz_f, "fff", "nc")
+BUILTIN(__nvvm_fmin_f, "fff", "nc")
+BUILTIN(__nvvm_fmax_ftz_f, "fff", "nc")
+BUILTIN(__nvvm_fmax_f, "fff", "nc")
+BUILTIN(__nvvm_rsqrt_approx_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_rsqrt_approx_f, "ff", "nc")
+BUILTIN(__nvvm_fmin_d, "ddd", "nc")
+BUILTIN(__nvvm_fmax_d, "ddd", "nc")
+BUILTIN(__nvvm_rsqrt_approx_d, "dd", "nc")
+BUILTIN(__nvvm_ceil_d, "dd", "nc")
+BUILTIN(__nvvm_trunc_d, "dd", "nc")
+BUILTIN(__nvvm_round_d, "dd", "nc")
+BUILTIN(__nvvm_ex2_approx_d, "dd", "nc")
+BUILTIN(__nvvm_lg2_approx_d, "dd", "nc")
+BUILTIN(__nvvm_round_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_round_f, "ff", "nc")
+BUILTIN(__nvvm_ex2_approx_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_ex2_approx_f, "ff", "nc")
+BUILTIN(__nvvm_lg2_approx_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_lg2_approx_f, "ff", "nc")
+BUILTIN(__nvvm_sin_approx_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_sin_approx_f, "ff", "nc")
+BUILTIN(__nvvm_cos_approx_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_cos_approx_f, "ff", "nc")
+BUILTIN(__nvvm_trunc_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_trunc_f, "ff", "nc")
+BUILTIN(__nvvm_ceil_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_ceil_f, "ff", "nc")
+BUILTIN(__nvvm_saturate_d, "dd", "nc")
+BUILTIN(__nvvm_saturate_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_saturate_f, "ff", "nc")
+BUILTIN(__nvvm_fma_rn_ftz_f, "ffff", "nc")
+BUILTIN(__nvvm_fma_rn_f, "ffff", "nc")
+BUILTIN(__nvvm_fma_rz_ftz_f, "ffff", "nc")
+BUILTIN(__nvvm_fma_rz_f, "ffff", "nc")
+BUILTIN(__nvvm_fma_rm_ftz_f, "ffff", "nc")
+BUILTIN(__nvvm_fma_rm_f, "ffff", "nc")
+BUILTIN(__nvvm_fma_rp_ftz_f, "ffff", "nc")
+BUILTIN(__nvvm_fma_rp_f, "ffff", "nc")
+BUILTIN(__nvvm_fma_rn_d, "dddd", "nc")
+BUILTIN(__nvvm_fma_rz_d, "dddd", "nc")
+BUILTIN(__nvvm_fma_rm_d, "dddd", "nc")
+BUILTIN(__nvvm_fma_rp_d, "dddd", "nc")
+BUILTIN(__nvvm_div_approx_ftz_f, "fff", "nc")
+BUILTIN(__nvvm_div_approx_f, "fff", "nc")
+BUILTIN(__nvvm_div_rn_ftz_f, "fff", "nc")
+BUILTIN(__nvvm_div_rn_f, "fff", "nc")
+BUILTIN(__nvvm_div_rz_ftz_f, "fff", "nc")
+BUILTIN(__nvvm_div_rz_f, "fff", "nc")
+BUILTIN(__nvvm_div_rm_ftz_f, "fff", "nc")
+BUILTIN(__nvvm_div_rm_f, "fff", "nc")
+BUILTIN(__nvvm_div_rp_ftz_f, "fff", "nc")
+BUILTIN(__nvvm_div_rp_f, "fff", "nc")
+BUILTIN(__nvvm_rcp_rn_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_rcp_rn_f, "ff", "nc")
+BUILTIN(__nvvm_rcp_rz_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_rcp_rz_f, "ff", "nc")
+BUILTIN(__nvvm_rcp_rm_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_rcp_rm_f, "ff", "nc")
+BUILTIN(__nvvm_rcp_rp_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_rcp_rp_f, "ff", "nc")
+BUILTIN(__nvvm_sqrt_rn_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_sqrt_rn_f, "ff", "nc")
+BUILTIN(__nvvm_sqrt_rz_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_sqrt_rz_f, "ff", "nc")
+BUILTIN(__nvvm_sqrt_rm_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_sqrt_rm_f, "ff", "nc")
+BUILTIN(__nvvm_sqrt_rp_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_sqrt_rp_f, "ff", "nc")
+BUILTIN(__nvvm_div_rn_d, "ddd", "nc")
+BUILTIN(__nvvm_div_rz_d, "ddd", "nc")
+BUILTIN(__nvvm_div_rm_d, "ddd", "nc")
+BUILTIN(__nvvm_div_rp_d, "ddd", "nc")
+BUILTIN(__nvvm_rcp_rn_d, "dd", "nc")
+BUILTIN(__nvvm_rcp_rz_d, "dd", "nc")
+BUILTIN(__nvvm_rcp_rm_d, "dd", "nc")
+BUILTIN(__nvvm_rcp_rp_d, "dd", "nc")
+BUILTIN(__nvvm_sqrt_rn_d, "dd", "nc")
+BUILTIN(__nvvm_sqrt_rz_d, "dd", "nc")
+BUILTIN(__nvvm_sqrt_rm_d, "dd", "nc")
+BUILTIN(__nvvm_sqrt_rp_d, "dd", "nc")
+BUILTIN(__nvvm_sqrt_approx_ftz_f, "ff", "nc")
+BUILTIN(__nvvm_sqrt_approx_f, "ff", "nc")
+BUILTIN(__nvvm_add_rn_d, "ddd", "nc")
+BUILTIN(__nvvm_add_rz_d, "ddd", "nc")
+BUILTIN(__nvvm_add_rm_d, "ddd", "nc")
+BUILTIN(__nvvm_add_rp_d, "ddd", "nc")
+BUILTIN(__nvvm_mul_rn_d, "ddd", "nc")
+BUILTIN(__nvvm_mul_rz_d, "ddd", "nc")
+BUILTIN(__nvvm_mul_rm_d, "ddd", "nc")
+BUILTIN(__nvvm_mul_rp_d, "ddd", "nc")
+BUILTIN(__nvvm_add_rm_ftz_f, "fff", "nc")
+BUILTIN(__nvvm_add_rm_f, "fff", "nc")
+BUILTIN(__nvvm_add_rp_ftz_f, "fff", "nc")
+BUILTIN(__nvvm_add_rp_f, "fff", "nc")
+BUILTIN(__nvvm_mul_rm_ftz_f, "fff", "nc")
+BUILTIN(__nvvm_mul_rm_f, "fff", "nc")
+BUILTIN(__nvvm_mul_rp_ftz_f, "fff", "nc")
+BUILTIN(__nvvm_mul_rp_f, "fff", "nc")
+BUILTIN(__nvvm_add_rn_ftz_f, "fff", "nc")
+BUILTIN(__nvvm_add_rn_f, "fff", "nc")
+BUILTIN(__nvvm_add_rz_ftz_f, "fff", "nc")
+BUILTIN(__nvvm_add_rz_f, "fff", "nc")
+BUILTIN(__nvvm_mul_rn_ftz_f, "fff", "nc")
+BUILTIN(__nvvm_mul_rn_f, "fff", "nc")
+BUILTIN(__nvvm_mul_rz_ftz_f, "fff", "nc")
+BUILTIN(__nvvm_mul_rz_f, "fff", "nc")
+BUILTIN(__nvvm_d2f_rn_ftz, "fd", "nc")
+BUILTIN(__nvvm_d2f_rn, "fd", "nc")
+BUILTIN(__nvvm_d2f_rz_ftz, "fd", "nc")
+BUILTIN(__nvvm_d2f_rz, "fd", "nc")
+BUILTIN(__nvvm_d2f_rm_ftz, "fd", "nc")
+BUILTIN(__nvvm_d2f_rm, "fd", "nc")
+BUILTIN(__nvvm_d2f_rp_ftz, "fd", "nc")
+BUILTIN(__nvvm_d2f_rp, "fd", "nc")
+BUILTIN(__nvvm_d2i_rn, "id", "nc")
+BUILTIN(__nvvm_d2i_rz, "id", "nc")
+BUILTIN(__nvvm_d2i_rm, "id", "nc")
+BUILTIN(__nvvm_d2i_rp, "id", "nc")
+BUILTIN(__nvvm_d2ui_rn, "Uid", "nc")
+BUILTIN(__nvvm_d2ui_rz, "Uid", "nc")
+BUILTIN(__nvvm_d2ui_rm, "Uid", "nc")
+BUILTIN(__nvvm_d2ui_rp, "Uid", "nc")
+BUILTIN(__nvvm_i2d_rn, "di", "nc")
+BUILTIN(__nvvm_i2d_rz, "di", "nc")
+BUILTIN(__nvvm_i2d_rm, "di", "nc")
+BUILTIN(__nvvm_i2d_rp, "di", "nc")
+BUILTIN(__nvvm_ui2d_rn, "dUi", "nc")
+BUILTIN(__nvvm_ui2d_rz, "dUi", "nc")
+BUILTIN(__nvvm_ui2d_rm, "dUi", "nc")
+BUILTIN(__nvvm_ui2d_rp, "dUi", "nc")
+BUILTIN(__nvvm_f2i_rn_ftz, "if", "nc")
+BUILTIN(__nvvm_f2i_rn, "if", "nc")
+BUILTIN(__nvvm_f2i_rz_ftz, "if", "nc")
+BUILTIN(__nvvm_f2i_rz, "if", "nc")
+BUILTIN(__nvvm_f2i_rm_ftz, "if", "nc")
+BUILTIN(__nvvm_f2i_rm, "if", "nc")
+BUILTIN(__nvvm_f2i_rp_ftz, "if", "nc")
+BUILTIN(__nvvm_f2i_rp, "if", "nc")
+BUILTIN(__nvvm_f2ui_rn_ftz, "Uif", "nc")
+BUILTIN(__nvvm_f2ui_rn, "Uif", "nc")
+BUILTIN(__nvvm_f2ui_rz_ftz, "Uif", "nc")
+BUILTIN(__nvvm_f2ui_rz, "Uif", "nc")
+BUILTIN(__nvvm_f2ui_rm_ftz, "Uif", "nc")
+BUILTIN(__nvvm_f2ui_rm, "Uif", "nc")
+BUILTIN(__nvvm_f2ui_rp_ftz, "Uif", "nc")
+BUILTIN(__nvvm_f2ui_rp, "Uif", "nc")
+BUILTIN(__nvvm_i2f_rn, "fi", "nc")
+BUILTIN(__nvvm_i2f_rz, "fi", "nc")
+BUILTIN(__nvvm_i2f_rm, "fi", "nc")
+BUILTIN(__nvvm_i2f_rp, "fi", "nc")
+BUILTIN(__nvvm_ui2f_rn, "fUi", "nc")
+BUILTIN(__nvvm_ui2f_rz, "fUi", "nc")
+BUILTIN(__nvvm_ui2f_rm, "fUi", "nc")
+BUILTIN(__nvvm_ui2f_rp, "fUi", "nc")
+BUILTIN(__nvvm_lohi_i2d, "dii", "nc")
+BUILTIN(__nvvm_d2i_lo, "id", "nc")
+BUILTIN(__nvvm_d2i_hi, "id", "nc")
+BUILTIN(__nvvm_f2ll_rn_ftz, "LLif", "nc")
+BUILTIN(__nvvm_f2ll_rn, "LLif", "nc")
+BUILTIN(__nvvm_f2ll_rz_ftz, "LLif", "nc")
+BUILTIN(__nvvm_f2ll_rz, "LLif", "nc")
+BUILTIN(__nvvm_f2ll_rm_ftz, "LLif", "nc")
+BUILTIN(__nvvm_f2ll_rm, "LLif", "nc")
+BUILTIN(__nvvm_f2ll_rp_ftz, "LLif", "nc")
+BUILTIN(__nvvm_f2ll_rp, "LLif", "nc")
+BUILTIN(__nvvm_f2ull_rn_ftz, "ULLif", "nc")
+BUILTIN(__nvvm_f2ull_rn, "ULLif", "nc")
+BUILTIN(__nvvm_f2ull_rz_ftz, "ULLif", "nc")
+BUILTIN(__nvvm_f2ull_rz, "ULLif", "nc")
+BUILTIN(__nvvm_f2ull_rm_ftz, "ULLif", "nc")
+BUILTIN(__nvvm_f2ull_rm, "ULLif", "nc")
+BUILTIN(__nvvm_f2ull_rp_ftz, "ULLif", "nc")
+BUILTIN(__nvvm_f2ull_rp, "ULLif", "nc")
+BUILTIN(__nvvm_d2ll_rn, "LLid", "nc")
+BUILTIN(__nvvm_d2ll_rz, "LLid", "nc")
+BUILTIN(__nvvm_d2ll_rm, "LLid", "nc")
+BUILTIN(__nvvm_d2ll_rp, "LLid", "nc")
+BUILTIN(__nvvm_d2ull_rn, "ULLid", "nc")
+BUILTIN(__nvvm_d2ull_rz, "ULLid", "nc")
+BUILTIN(__nvvm_d2ull_rm, "ULLid", "nc")
+BUILTIN(__nvvm_d2ull_rp, "ULLid", "nc")
+BUILTIN(__nvvm_ll2f_rn, "fLLi", "nc")
+BUILTIN(__nvvm_ll2f_rz, "fLLi", "nc")
+BUILTIN(__nvvm_ll2f_rm, "fLLi", "nc")
+BUILTIN(__nvvm_ll2f_rp, "fLLi", "nc")
+BUILTIN(__nvvm_ull2f_rn, "fULLi", "nc")
+BUILTIN(__nvvm_ull2f_rz, "fULLi", "nc")
+BUILTIN(__nvvm_ull2f_rm, "fULLi", "nc")
+BUILTIN(__nvvm_ull2f_rp, "fULLi", "nc")
+BUILTIN(__nvvm_ll2d_rn, "dLLi", "nc")
+BUILTIN(__nvvm_ll2d_rz, "dLLi", "nc")
+BUILTIN(__nvvm_ll2d_rm, "dLLi", "nc")
+BUILTIN(__nvvm_ll2d_rp, "dLLi", "nc")
+BUILTIN(__nvvm_ull2d_rn, "dULLi", "nc")
+BUILTIN(__nvvm_ull2d_rz, "dULLi", "nc")
+BUILTIN(__nvvm_ull2d_rm, "dULLi", "nc")
+BUILTIN(__nvvm_ull2d_rp, "dULLi", "nc")
+BUILTIN(__nvvm_f2h_rn_ftz, "Usf", "nc")
+BUILTIN(__nvvm_f2h_rn, "Usf", "nc")
+BUILTIN(__nvvm_h2f, "fUs", "nc")
+BUILTIN(__nvvm_bitcast_i2f, "fi", "nc")
+BUILTIN(__nvvm_bitcast_f2i, "if", "nc")
+BUILTIN(__nvvm_bitcast_ll2d, "dLLi", "nc")
+BUILTIN(__nvvm_bitcast_d2ll, "LLid", "nc")
+
#undef BUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
index 75e6074..5b46f8e 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
@@ -613,6 +613,12 @@ BUILTIN(__builtin_ia32_gatherd_d256, "V8iV8iV8iC*V8iV8iIc", "")
BUILTIN(__builtin_ia32_gatherq_d, "V4iV4iV4iC*V2LLiV4iIc", "")
BUILTIN(__builtin_ia32_gatherq_d256, "V4iV4iV4iC*V4LLiV4iIc", "")
+// F16C
+BUILTIN(__builtin_ia32_vcvtps2ph, "V8sV4fIi", "")
+BUILTIN(__builtin_ia32_vcvtps2ph256, "V8sV8fIi", "")
+BUILTIN(__builtin_ia32_vcvtph2ps, "V4fV8s", "")
+BUILTIN(__builtin_ia32_vcvtph2ps256, "V8fV8s", "")
+
// RDRAND
BUILTIN(__builtin_ia32_rdrand16_step, "UiUs*", "")
BUILTIN(__builtin_ia32_rdrand32_step, "UiUi*", "")
@@ -730,5 +736,8 @@ BUILTIN(__builtin_ia32_vfrczps, "V4fV4f", "")
BUILTIN(__builtin_ia32_vfrczpd, "V2dV2d", "")
BUILTIN(__builtin_ia32_vfrczps256, "V8fV8f", "")
BUILTIN(__builtin_ia32_vfrczpd256, "V4dV4d", "")
+BUILTIN(__builtin_ia32_xbegin, "i", "")
+BUILTIN(__builtin_ia32_xend, "v", "")
+BUILTIN(__builtin_ia32_xabort, "vIc", "")
#undef BUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h b/contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h
index e7cfa8a..cdc4269 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h
@@ -147,7 +147,6 @@ ConversionResult ConvertUTF32toUTF8 (
const UTF32** sourceStart, const UTF32* sourceEnd,
UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
-#ifdef CLANG_NEEDS_THESE_ONE_DAY
ConversionResult ConvertUTF16toUTF32 (
const UTF16** sourceStart, const UTF16* sourceEnd,
UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
@@ -159,7 +158,9 @@ ConversionResult ConvertUTF32toUTF16 (
Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd);
-Boolean isLegalUTF8String(const UTF8 *source, const UTF8 *sourceEnd);
+Boolean isLegalUTF8String(const UTF8 **source, const UTF8 *sourceEnd);
+
+unsigned getNumBytesForUTF8(UTF8 firstByte);
#ifdef __cplusplus
}
@@ -175,11 +176,13 @@ namespace clang {
* Convert an UTF8 StringRef to UTF8, UTF16, or UTF32 depending on
* WideCharWidth. The converted data is written to ResultPtr, which needs to
* point to at least WideCharWidth * (Source.Size() + 1) bytes. On success,
- * ResultPtr will point one after the end of the copied string.
+ * ResultPtr will point one after the end of the copied string. On failure,
+ * ResultPtr will not be changed, and ErrorPtr will be set to the location of
+ * the first character which could not be converted.
* \return true on success.
*/
bool ConvertUTF8toWide(unsigned WideCharWidth, llvm::StringRef Source,
- char *&ResultPtr);
+ char *&ResultPtr, const UTF8 *&ErrorPtr);
/**
* Convert an Unicode code point to UTF8 sequence.
@@ -194,7 +197,6 @@ bool ConvertUTF8toWide(unsigned WideCharWidth, llvm::StringRef Source,
bool ConvertCodePointToUTF8(unsigned Source, char *&ResultPtr);
}
-#endif
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
index 3997fb8..e47f3e1 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
@@ -16,6 +16,7 @@
#define LLVM_CLANG_DIAGNOSTIC_H
#include "clang/Basic/DiagnosticIDs.h"
+#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
@@ -29,6 +30,7 @@
namespace clang {
class DiagnosticConsumer;
class DiagnosticBuilder;
+ class DiagnosticOptions;
class IdentifierInfo;
class DeclContext;
class LangOptions;
@@ -160,13 +162,6 @@ public:
ak_qualtype_pair ///< pair<QualType, QualType>
};
- /// \brief Specifies which overload candidates to display when overload
- /// resolution fails.
- enum OverloadsShown {
- Ovl_All, ///< Show all overloads.
- Ovl_Best ///< Show just the "best" overload candidates.
- };
-
/// \brief Represents on argument value, which is a union discriminated
/// by ArgumentKind, with a value.
typedef std::pair<ArgumentKind, intptr_t> ArgumentValue;
@@ -190,6 +185,7 @@ private:
// backtrace stack, 0 -> no limit.
ExtensionHandling ExtBehavior; // Map extensions onto warnings or errors?
IntrusiveRefCntPtr<DiagnosticIDs> Diags;
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
DiagnosticConsumer *Client;
bool OwnsDiagClient;
SourceManager *SourceMgr;
@@ -341,6 +337,7 @@ private:
public:
explicit DiagnosticsEngine(
const IntrusiveRefCntPtr<DiagnosticIDs> &Diags,
+ DiagnosticOptions *DiagOpts,
DiagnosticConsumer *client = 0,
bool ShouldOwnClient = true);
~DiagnosticsEngine();
@@ -349,6 +346,9 @@ public:
return Diags;
}
+ /// \brief Retrieve the diagnostic options.
+ DiagnosticOptions &getDiagnosticOptions() const { return *DiagOpts; }
+
DiagnosticConsumer *getClient() { return Client; }
const DiagnosticConsumer *getClient() const { return Client; }
@@ -478,10 +478,13 @@ public:
}
OverloadsShown getShowOverloads() const { return ShowOverloads; }
- /// \brief Pretend that the last diagnostic issued was ignored.
+ /// \brief Pretend that the last diagnostic issued was ignored, so any
+ /// subsequent notes will be suppressed.
///
/// This can be used by clients who suppress diagnostics themselves.
void setLastDiagnosticIgnored() {
+ if (LastDiagLevel == DiagnosticIDs::Fatal)
+ FatalErrorOccurred = true;
LastDiagLevel = DiagnosticIDs::Ignored;
}
@@ -584,7 +587,7 @@ public:
const char *Argument, unsigned ArgLen,
const ArgumentValue *PrevArgs, unsigned NumPrevArgs,
SmallVectorImpl<char> &Output,
- SmallVectorImpl<intptr_t> &QualTypeVals) const {
+ ArrayRef<intptr_t> QualTypeVals) const {
ArgToStringFn(Kind, Val, Modifier, ModLen, Argument, ArgLen,
PrevArgs, NumPrevArgs, Output, ArgToStringCookie,
QualTypeVals);
@@ -837,7 +840,7 @@ class DiagnosticBuilder {
/// call to ForceEmit.
mutable bool IsForceEmit;
- void operator=(const DiagnosticBuilder&); // DO NOT IMPLEMENT
+ void operator=(const DiagnosticBuilder &) LLVM_DELETED_FUNCTION;
friend class DiagnosticsEngine;
DiagnosticBuilder()
@@ -961,6 +964,10 @@ public:
"Too many arguments to diagnostic!");
DiagObj->DiagFixItHints[NumFixits++] = Hint;
}
+
+ bool hasMaxRanges() const {
+ return NumRanges == DiagnosticsEngine::MaxRanges;
+ }
};
inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td
index 9cfe5ef..d869c99 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td
@@ -137,7 +137,7 @@ def err_odr_function_type_inconsistent : Error<
def warn_odr_tag_type_inconsistent : Warning<
"type %0 has incompatible definitions in different translation units">;
def note_odr_tag_kind_here: Note<
- "%0 is a %select{struct|union|class|enum}1 here">;
+ "%0 is a %select{struct|interface|union|class|enum}1 here">;
def note_odr_field : Note<"field %0 has type %1 here">;
def note_odr_missing_field : Note<"no corresponding field here">;
def note_odr_bit_field : Note<"bit-field %0 with type %1 and length %2 here">;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommentKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommentKinds.td
index 235ca79..e6dfe5b 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommentKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommentKinds.td
@@ -121,5 +121,21 @@ def warn_doc_returns_attached_to_a_void_function : Warning<
"method returning void}1">,
InGroup<Documentation>, DefaultIgnore;
+// \deprecated command
+
+def warn_doc_deprecated_not_sync : Warning<
+ "declaration is marked with '\\deprecated' command but does not have "
+ "a deprecation attribute">,
+ InGroup<DocumentationDeprecatedSync>, DefaultIgnore;
+
+def note_add_deprecation_attr : Note<
+ "add a deprecation attribute to the declaration to silence this warning">;
+
+// verbatim block commands
+
+def warn_verbatim_block_end_without_start : Warning<
+ "'\\%0' command does not terminate a verbatim text block">,
+ InGroup<Documentation>, DefaultIgnore;
+
} // end of documentation issue category
} // end of AST component
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
index f859287..a6ce9d4 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
@@ -78,9 +78,12 @@ def note_decl_hiding_tag_type : Note<
"%1 %0 is hidden by a non-type declaration of %0 here">;
// Sema && Lex
-def ext_longlong : Extension<
+def ext_c99_longlong : Extension<
"'long long' is an extension when C99 mode is not enabled">,
InGroup<LongLong>;
+def ext_cxx11_longlong : Extension<
+ "'long long' is a C++11 extension">,
+ InGroup<CXX11LongLong>;
def warn_cxx98_compat_longlong : Warning<
"'long long' is incompatible with C++98">,
InGroup<CXX98CompatPedantic>, DefaultIgnore;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td
index 583b234..4b43035 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -11,6 +11,8 @@ let Component = "Driver" in {
def err_drv_no_such_file : Error<"no such file or directory: '%0'">;
def err_drv_unsupported_opt : Error<"unsupported option '%0'">;
+def err_drv_unsupported_opt_for_target : Error<
+ "unsupported option '%0' for target '%1'">;
def err_drv_unsupported_option_argument : Error<
"unsupported argument '%1' to option '%0'">;
def err_drv_unknown_stdin_type : Error<
@@ -91,10 +93,10 @@ def err_drv_invalid_arch_for_deployment_target : Error<
"invalid architecture '%0' for deployment target '%1'">;
def err_drv_objc_gc_arr : Error<
"cannot specify both '-fobjc-arc' and '%0'">;
-def err_arc_nonfragile_abi : Error<
- "-fobjc-arc is not supported with legacy abi">;
-def err_arc_unsupported : Error<
- "-fobjc-arc is not supported on current deployment target">;
+def err_arc_unsupported_on_runtime : Error<
+ "-fobjc-arc is not supported on platforms using the legacy runtime">;
+def err_arc_unsupported_on_toolchain : Error< // feel free to generalize this
+ "-fobjc-arc is not supported on versions of OS X prior to 10.6">;
def err_drv_mg_requires_m_or_mm : Error<
"option '-MG' requires '-M' or '-MM'">;
def err_drv_asan_android_requires_pie : Error<
@@ -119,14 +121,10 @@ def warn_drv_unused_argument : Warning<
def warn_drv_empty_joined_argument : Warning<
"joined argument expects additional value: '%0'">,
InGroup<DiagGroup<"unused-command-line-argument">>;
-def warn_drv_not_using_clang_cpp : Warning<
- "not using the clang preprocessor due to user override">;
-def warn_drv_not_using_clang_cxx : Warning<
- "not using the clang compiler for C++ inputs">;
-def warn_drv_not_using_clang_arch : Warning<
- "not using the clang compiler for the '%0' architecture">;
def warn_drv_clang_unsupported : Warning<
"the clang compiler does not support '%0'">;
+def warn_drv_deprecated_arg : Warning<
+ "argument '%0' is deprecated, use '%1' instead">, InGroup<Deprecated>;
def warn_drv_assuming_mfloat_abi_is : Warning<
"unknown platform, assuming -mfloat-abi=%0">;
def warn_ignoring_ftabstop_value : Warning<
@@ -141,5 +139,9 @@ def warn_drv_pch_not_first_include : Warning<
def note_drv_command_failed_diag_msg : Note<
"diagnostic msg: %0">;
-
+
+def err_analyzer_config_no_value : Error<
+ "analyzer-config option '%0' has a key but no value">;
+def err_analyzer_config_multiple_values : Error<
+ "analyzer-config option '%0' should contain only one '='">;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
index 417a22c..b7a8476 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
@@ -58,6 +58,8 @@ def warn_fe_cc_print_header_failure : Warning<
"unable to open CC_PRINT_HEADERS file: %0 (using stderr)">;
def warn_fe_cc_log_diagnostics_failure : Warning<
"unable to open CC_LOG_DIAGNOSTICS file: %0 (using stderr)">;
+def err_fe_no_pch_in_dir : Error<
+ "no suitable precompiled header file found in directory '%0'">;
def warn_fe_serialized_diag_failure : Warning<
"unable to open file %0 for serializing diagnostics (%1)">,
@@ -76,6 +78,11 @@ def err_verify_invalid_content : Error<
def err_verify_inconsistent_diags : Error<
"'%0' diagnostics %select{expected|seen}1 but not %select{seen|expected}1: "
"%2">;
+def err_verify_invalid_no_diags : Error<
+ "%select{expected|'expected-no-diagnostics'}0 directive cannot follow "
+ "%select{'expected-no-diagnostics' directive|other expected directives}0">;
+def err_verify_no_directives : Error<
+ "no expected directives found: consider use of 'expected-no-diagnostics'">;
def note_fixit_applied : Note<"FIX-IT applied suggested code changes">;
def note_fixit_in_macro : Note<
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
index d8632dd..f9f9ec7 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
@@ -22,8 +22,6 @@ def : DiagGroup<"address">;
def AddressOfTemporary : DiagGroup<"address-of-temporary">;
def : DiagGroup<"aggregate-return">;
def AmbigMemberTemplate : DiagGroup<"ambiguous-member-template">;
-def : DiagGroup<"attributes">;
-def : DiagGroup<"bad-function-cast">;
def Availability : DiagGroup<"availability">;
def Section : DiagGroup<"section">;
def AutoImport : DiagGroup<"auto-import">;
@@ -33,8 +31,11 @@ def StringConversion : DiagGroup<"string-conversion">;
def SignConversion : DiagGroup<"sign-conversion">;
def BoolConversion : DiagGroup<"bool-conversion">;
def IntConversion : DiagGroup<"int-conversion">;
+def EnumConversion : DiagGroup<"enum-conversion">;
def NonLiteralNullConversion : DiagGroup<"non-literal-null-conversion">;
def NullConversion : DiagGroup<"null-conversion">;
+def ImplicitConversionFloatingPointToBool :
+ DiagGroup<"implicit-conversion-floating-point-to-bool">;
def BuiltinRequiresHeader : DiagGroup<"builtin-requires-header">;
def CXXCompat: DiagGroup<"c++-compat">;
def CastAlign : DiagGroup<"cast-align">;
@@ -42,11 +43,11 @@ def : DiagGroup<"cast-qual">;
def : DiagGroup<"char-align">;
def Comment : DiagGroup<"comment">;
def : DiagGroup<"ctor-dtor-privacy">;
-def : DiagGroup<"declaration-after-statement">;
def DefaultArgSpecialMember : DiagGroup<"default-arg-special-member">;
def GNUDesignator : DiagGroup<"gnu-designator">;
def DeleteNonVirtualDtor : DiagGroup<"delete-non-virtual-dtor">;
+def AbstractFinalClass : DiagGroup<"abstract-final-class">;
def DeprecatedDeclarations : DiagGroup<"deprecated-declarations">;
def DeprecatedWritableStr : DiagGroup<"deprecated-writable-strings">;
@@ -58,9 +59,14 @@ def DeprecatedImplementations :DiagGroup<"deprecated-implementations">;
def : DiagGroup<"disabled-optimization">;
def : DiagGroup<"discard-qual">;
def : DiagGroup<"div-by-zero">;
+
def DocumentationHTML : DiagGroup<"documentation-html">;
def DocumentationPedantic : DiagGroup<"documentation-pedantic">;
-def Documentation : DiagGroup<"documentation", [DocumentationHTML]>;
+def DocumentationDeprecatedSync : DiagGroup<"documentation-deprecated-sync">;
+def Documentation : DiagGroup<"documentation",
+ [DocumentationHTML,
+ DocumentationDeprecatedSync]>;
+
def EmptyBody : DiagGroup<"empty-body">;
def ExtraTokens : DiagGroup<"extra-tokens">;
def CXX11ExtraSemi : DiagGroup<"c++11-extra-semi">;
@@ -108,9 +114,9 @@ def ExitTimeDestructors : DiagGroup<"exit-time-destructors">;
def FlexibleArrayExtensions : DiagGroup<"flexible-array-extensions">;
def FourByteMultiChar : DiagGroup<"four-char-constants">;
def GlobalConstructors : DiagGroup<"global-constructors">;
-def : DiagGroup<"idiomatic-parentheses">;
def BitwiseOpParentheses: DiagGroup<"bitwise-op-parentheses">;
def LogicalOpParentheses: DiagGroup<"logical-op-parentheses">;
+def ShiftOpParentheses: DiagGroup<"shift-op-parentheses">;
def DanglingElse: DiagGroup<"dangling-else">;
def IgnoredQualifiers : DiagGroup<"ignored-qualifiers">;
def : DiagGroup<"import">;
@@ -119,7 +125,6 @@ def IncompleteUmbrella : DiagGroup<"incomplete-umbrella">;
def KNRPromotedParameter : DiagGroup<"knr-promoted-parameter">;
def : DiagGroup<"init-self">;
def : DiagGroup<"inline">;
-def : DiagGroup<"int-to-pointer-cast">;
def : DiagGroup<"invalid-pch">;
def LiteralRange : DiagGroup<"literal-range">;
def LocalTypeTemplateArgs : DiagGroup<"local-type-template-args",
@@ -131,11 +136,12 @@ def MissingBraces : DiagGroup<"missing-braces">;
def MissingDeclarations: DiagGroup<"missing-declarations">;
def : DiagGroup<"missing-format-attribute">;
def : DiagGroup<"missing-include-dirs">;
-def : DiagGroup<"missing-noreturn">;
def MultiChar : DiagGroup<"multichar">;
def : DiagGroup<"nested-externs">;
-def : DiagGroup<"newline-eof">;
-def LongLong : DiagGroup<"long-long">;
+def CXX11LongLong : DiagGroup<"c++11-long-long">;
+def LongLong : DiagGroup<"long-long", [CXX11LongLong]>;
+def MismatchedParameterTypes : DiagGroup<"mismatched-parameter-types">;
+def MismatchedReturnTypes : DiagGroup<"mismatched-return-types">;
def MismatchedTags : DiagGroup<"mismatched-tags">;
def MissingFieldInitializers : DiagGroup<"missing-field-initializers">;
def ModuleBuild : DiagGroup<"module-build">;
@@ -157,6 +163,7 @@ def OverlengthStrings : DiagGroup<"overlength-strings">;
def OverloadedVirtual : DiagGroup<"overloaded-virtual">;
def PrivateExtern : DiagGroup<"private-extern">;
def SelTypeCast : DiagGroup<"cast-of-sel-type">;
+def BadFunctionCast : DiagGroup<"bad-function-cast">;
def ObjCPropertyImpl : DiagGroup<"objc-property-implementation">;
def ObjCPropertyNoAttribute : DiagGroup<"objc-property-no-attribute">;
def ObjCMissingSuperCalls : DiagGroup<"objc-missing-super-calls">;
@@ -183,7 +190,7 @@ def Sentinel : DiagGroup<"sentinel">;
def MissingMethodReturnType : DiagGroup<"missing-method-return-type">;
def : DiagGroup<"sequence-point">;
def Shadow : DiagGroup<"shadow">;
-def : DiagGroup<"shorten-64-to-32">;
+def Shorten64To32 : DiagGroup<"shorten-64-to-32">;
def : DiagGroup<"sign-promo">;
def SignCompare : DiagGroup<"sign-compare">;
def : DiagGroup<"stack-protector">;
@@ -192,11 +199,15 @@ def : DiagGroup<"synth">;
def SizeofArrayArgument : DiagGroup<"sizeof-array-argument">;
def StringPlusInt : DiagGroup<"string-plus-int">;
def StrncatSize : DiagGroup<"strncat-size">;
-def TautologicalCompare : DiagGroup<"tautological-compare">;
+def TautologicalOutOfRangeCompare : DiagGroup<"tautological-constant-out-of-range-compare">;
+def TautologicalCompare : DiagGroup<"tautological-compare",
+ [TautologicalOutOfRangeCompare]>;
def HeaderHygiene : DiagGroup<"header-hygiene">;
+def DuplicateDeclSpecifier : DiagGroup<"duplicate-decl-specifier">;
+def CompareDistinctPointerType : DiagGroup<"compare-distinct-pointer-types">;
// Preprocessor warnings.
-def : DiagGroup<"builtin-macro-redefined">;
+def AmbiguousMacro : DiagGroup<"ambiguous-macro">;
// Just silence warnings about -Wstrict-aliasing for now.
def : DiagGroup<"strict-aliasing=0">;
@@ -262,26 +273,29 @@ def ImplicitAtomic : DiagGroup<"implicit-atomic-properties">;
def CustomAtomic : DiagGroup<"custom-atomic-properties">;
def AtomicProperties : DiagGroup<"atomic-properties",
[ImplicitAtomic, CustomAtomic]>;
-def AutomaticReferenceCountingABI : DiagGroup<"arc-abi">;
+// FIXME: Remove arc-abi once an Xcode is released that doesn't pass this flag.
+def : DiagGroup<"arc-abi">;
def ARCUnsafeRetainedAssign : DiagGroup<"arc-unsafe-retained-assign">;
def ARCRetainCycles : DiagGroup<"arc-retain-cycles">;
def ARCNonPodMemAccess : DiagGroup<"arc-non-pod-memaccess">;
def AutomaticReferenceCounting : DiagGroup<"arc",
- [AutomaticReferenceCountingABI,
- ARCUnsafeRetainedAssign,
+ [ARCUnsafeRetainedAssign,
ARCRetainCycles,
ARCNonPodMemAccess]>;
+def ARCRepeatedUseOfWeakMaybe : DiagGroup<"arc-maybe-repeated-use-of-weak">;
+def ARCRepeatedUseOfWeak : DiagGroup<"arc-repeated-use-of-weak",
+ [ARCRepeatedUseOfWeakMaybe]>;
def Selector : DiagGroup<"selector">;
def Protocol : DiagGroup<"protocol">;
def SuperSubClassMismatch : DiagGroup<"super-class-method-mismatch">;
def OverridingMethodMismatch : DiagGroup<"overriding-method-mismatch">;
-def : DiagGroup<"variadic-macros">;
def VariadicMacros : DiagGroup<"variadic-macros">;
def VectorConversion : DiagGroup<"vector-conversion">; // clang specific
def VexingParse : DiagGroup<"vexing-parse">;
def VLA : DiagGroup<"vla">;
def VolatileRegisterVar : DiagGroup<"volatile-register-var">;
def Visibility : DiagGroup<"visibility">;
+def ZeroLengthArray : DiagGroup<"zero-length-array">;
// GCC calls -Wdeprecated-writable-strings -Wwrite-strings.
def GCCWriteStrings : DiagGroup<"write-strings" , [DeprecatedWritableStr]>;
@@ -300,6 +314,7 @@ def ParenthesesOnEquality : DiagGroup<"parentheses-equality">;
def Parentheses : DiagGroup<"parentheses",
[LogicalOpParentheses,
BitwiseOpParentheses,
+ ShiftOpParentheses,
ParenthesesOnEquality,
DanglingElse]>;
@@ -311,15 +326,16 @@ def Parentheses : DiagGroup<"parentheses",
// - bool-to-pointer conversion warnings are on by default
// - __null-to-integer conversion warnings are on by default
def Conversion : DiagGroup<"conversion",
- [DiagGroup<"shorten-64-to-32">,
+ [BoolConversion,
ConstantConversion,
+ EnumConversion,
+ Shorten64To32,
+ IntConversion,
LiteralConversion,
- StringConversion,
- SignConversion,
- BoolConversion,
- NullConversion, // NULL->non-pointer
NonLiteralNullConversion, // (1-1)->pointer (etc)
- IntConversion]>,
+ NullConversion, // NULL->non-pointer
+ SignConversion,
+ StringConversion]>,
DiagCategory<"Value Conversion Issue">;
def Unused : DiagGroup<"unused",
@@ -345,6 +361,8 @@ def Format2 : DiagGroup<"format=2",
def TypeSafety : DiagGroup<"type-safety">;
+def IntToPointerCast : DiagGroup<"int-to-pointer-cast">;
+
def Extra : DiagGroup<"extra", [
MissingFieldInitializers,
IgnoredQualifiers,
@@ -361,6 +379,7 @@ def Most : DiagGroup<"most", [
DeleteNonVirtualDtor,
Format,
Implicit,
+ IntToPointerCast,
MismatchedTags,
MissingBraces,
MultiChar,
@@ -382,9 +401,12 @@ def Most : DiagGroup<"most", [
// Thread Safety warnings
def ThreadSafetyAttributes : DiagGroup<"thread-safety-attributes">;
-def ThreadSafetyAnalysis : DiagGroup<"thread-safety-analysis">;
-def ThreadSafety : DiagGroup<"thread-safety",
- [ThreadSafetyAttributes, ThreadSafetyAnalysis]>;
+def ThreadSafetyAnalysis : DiagGroup<"thread-safety-analysis">;
+def ThreadSafetyPrecise : DiagGroup<"thread-safety-precise">;
+def ThreadSafety : DiagGroup<"thread-safety",
+ [ThreadSafetyAttributes,
+ ThreadSafetyAnalysis,
+ ThreadSafetyPrecise]>;
// Note that putting warnings in -Wall will not disable them by default. If a
// warning should be active _only_ when -Wall is passed in, mark it as
@@ -414,7 +436,8 @@ def NonGCC : DiagGroup<"non-gcc",
// A warning group for warnings about using C++11 features as extensions in
// earlier C++ versions.
-def CXX11 : DiagGroup<"c++11-extensions", [CXX11ExtraSemi]>;
+def CXX11 : DiagGroup<"c++11-extensions", [CXX11ExtraSemi, CXX11LongLong]>;
+
def : DiagGroup<"c++0x-extensions", [CXX11]>;
def DelegatingCtorCycles :
DiagGroup<"delegating-ctor-cycles">;
@@ -426,7 +449,7 @@ def C11 : DiagGroup<"c11-extensions">;
def C99 : DiagGroup<"c99-extensions">;
// A warning group for warnings about GCC extensions.
-def GNU : DiagGroup<"gnu", [GNUDesignator, VLA]>;
+def GNU : DiagGroup<"gnu", [GNUDesignator, VLA, ZeroLengthArray]>;
// A warning group for warnings about code that clang accepts but gcc doesn't.
def GccCompat : DiagGroup<"gcc-compat">;
@@ -451,3 +474,9 @@ def ObjCStringComparison : DiagGroup<"objc-string-compare">;
def ObjCLiteralComparison : DiagGroup<"objc-literal-compare", [
ObjCStringComparison
]>;
+
+// Inline ASM warnings.
+def ASMOperandWidths : DiagGroup<"asm-operand-widths">;
+def ASM : DiagGroup<"asm", [
+ ASMOperandWidths
+ ]>;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
index cc958db..c6c50ab 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
@@ -41,9 +41,9 @@ def trigraph_ends_block_comment : Warning<"trigraph ends block comment">,
def trigraph_converted : Warning<"trigraph converted to '%0' character">,
InGroup<Trigraphs>;
-def ext_multi_line_bcpl_comment : Extension<"multi-line // comment">,
+def ext_multi_line_line_comment : Extension<"multi-line // comment">,
InGroup<Comment>;
-def ext_bcpl_comment : Extension<
+def ext_line_comment : Extension<
"// comments are not allowed in this language">,
InGroup<Comment>;
def ext_no_newline_eof : Extension<"no newline at end of file">,
@@ -55,7 +55,7 @@ def warn_cxx98_compat_no_newline_eof : Warning<
def ext_dollar_in_identifier : Extension<"'$' in identifier">,
InGroup<DiagGroup<"dollar-in-identifier-extension">>;
-def ext_charize_microsoft : Extension<"@# is a microsoft extension">,
+def ext_charize_microsoft : Extension<"charizing operator #@ is a Microsoft extension">,
InGroup<Microsoft>;
def ext_token_used : Extension<"extension used">,
@@ -127,15 +127,16 @@ def warn_char_constant_too_large : Warning<
def err_multichar_utf_character_literal : Error<
"Unicode character literals may not contain multiple characters">;
def err_exponent_has_no_digits : Error<"exponent has no digits">;
-def ext_imaginary_constant : Extension<"imaginary constants are an extension">;
+def ext_imaginary_constant : Extension<
+ "imaginary constants are a GNU extension">, InGroup<GNU>;
def err_hexconstant_requires_exponent : Error<
"hexadecimal floating constants require an exponent">;
def err_hexconstant_requires_digits : Error<
"hexadecimal floating constants require a significand">;
def ext_hexconstant_invalid : Extension<
- "hexadecimal floating constants are a C99 feature">;
+ "hexadecimal floating constants are a C99 feature">, InGroup<C99>;
def ext_binary_literal : Extension<
- "binary integer literals are an extension">;
+ "binary integer literals are a GNU extension">, InGroup<GNU>;
def err_pascal_string_too_long : Error<"Pascal string is too long">;
def warn_octal_escape_too_large : ExtWarn<"octal escape sequence out of range">;
def warn_hex_escape_too_large : ExtWarn<"hex escape sequence out of range">;
@@ -228,6 +229,12 @@ def pp_macro_not_used : Warning<"macro is not used">, DefaultIgnore,
def warn_pp_undef_identifier : Warning<
"%0 is not defined, evaluates to 0">,
InGroup<DiagGroup<"undef">>, DefaultIgnore;
+def warn_pp_ambiguous_macro : Warning<
+ "ambiguous expansion of macro %0">, InGroup<AmbiguousMacro>;
+def note_pp_ambiguous_macro_chosen : Note<
+ "expanding this definition of %0">;
+def note_pp_ambiguous_macro_other : Note<
+ "other definition of %0">;
def pp_invalid_string_literal : Warning<
"invalid string literal, ignoring final '\\'">;
@@ -484,15 +491,21 @@ def err_mmap_missing_module_unqualified : Error<
def err_mmap_missing_module_qualified : Error<
"no module named '%0' in '%1'">;
def err_mmap_top_level_inferred_submodule : Error<
- "only submodules may be inferred with wildcard syntax">;
+ "only submodules and framework modules may be inferred with wildcard syntax">;
def err_mmap_inferred_no_umbrella : Error<
"inferred submodules require a module with an umbrella">;
+def err_mmap_inferred_framework_submodule : Error<
+ "inferred submodule cannot be a framework submodule">;
+def err_mmap_explicit_inferred_framework : Error<
+ "inferred framework modules cannot be 'explicit'">;
+def err_mmap_missing_exclude_name : Error<
+ "expected excluded module name">;
def err_mmap_inferred_redef : Error<
"redefinition of inferred submodule">;
def err_mmap_expected_lbrace_wildcard : Error<
"expected '{' to start inferred submodule">;
-def err_mmap_expected_wildcard_member : Error<
- "expected module export wildcard">;
+def err_mmap_expected_inferred_member : Error<
+ "expected %select{module exclusion with 'exclude'|'export *'}0">;
def err_mmap_expected_export_wildcard : Error<
"only '*' can be exported from an inferred submodule">;
def err_mmap_explicit_top_level : Error<
@@ -509,5 +522,7 @@ def warn_auto_module_import : Warning<
"import of module '%1'">, InGroup<AutoImport>, DefaultIgnore;
def warn_uncovered_module_header : Warning<
"umbrella header does not include header '%0'">, InGroup<IncompleteUmbrella>;
-
+def err_expected_id_building_module : Error<
+ "expected a module name in '__building_module' expression">;
+
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticOptions.def b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticOptions.def
new file mode 100644
index 0000000..476ac1e
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticOptions.def
@@ -0,0 +1,93 @@
+//===--- DiagOptions.def - Diagnostic option database ------------- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the diagnostic options. Users of this file
+// must define the DIAGOPT macro to make use of this information.
+// Optionally, the user may also define ENUM_DIAGOPT (for options
+// that have enumeration type and VALUE_DIAGOPT (for options that
+// describe a value rather than a flag). The SEMANTIC_* variants of these macros
+// indicate options that affect the processing of the program, rather than
+// simply the output.
+//
+//===----------------------------------------------------------------------===//
+#ifndef DIAGOPT
+# error Define the DIAGOPT macro to handle language options
+#endif
+
+#ifndef VALUE_DIAGOPT
+# define VALUE_DIAGOPT(Name, Bits, Default) \
+DIAGOPT(Name, Bits, Default)
+#endif
+
+#ifndef ENUM_DIAGOPT
+# define ENUM_DIAGOPT(Name, Type, Bits, Default) \
+DIAGOPT(Name, Bits, Default)
+#endif
+
+#ifndef SEMANTIC_DIAGOPT
+# define SEMANTIC_DIAGOPT(Name, Bits, Default) DIAGOPT(Name, Bits, Default)
+#endif
+
+#ifndef SEMANTIC_VALUE_DIAGOPT
+# define SEMANTIC_VALUE_DIAGOPT(Name, Bits, Default) \
+ VALUE_DIAGOPT(Name, Bits, Default)
+#endif
+
+#ifndef SEMANTIC_ENUM_DIAGOPT
+# define SEMANTIC_ENUM_DIAGOPT(Name, Type, Bits, Default) \
+ ENUM_DIAGOPT(Name, Type, Bits, Default)
+#endif
+
+SEMANTIC_DIAGOPT(IgnoreWarnings, 1, 0) /// -w
+DIAGOPT(NoRewriteMacros, 1, 0) /// -Wno-rewrite-macros
+DIAGOPT(Pedantic, 1, 0) /// -pedantic
+DIAGOPT(PedanticErrors, 1, 0) /// -pedantic-errors
+DIAGOPT(ShowColumn, 1, 1) /// Show column number on diagnostics.
+DIAGOPT(ShowLocation, 1, 1) /// Show source location information.
+DIAGOPT(ShowCarets, 1, 1) /// Show carets in diagnostics.
+DIAGOPT(ShowFixits, 1, 1) /// Show fixit information.
+DIAGOPT(ShowSourceRanges, 1, 0) /// Show source ranges in numeric form.
+DIAGOPT(ShowParseableFixits, 1, 0) /// Show machine parseable fix-its.
+DIAGOPT(ShowOptionNames, 1, 0) /// Show the option name for mappable
+ /// diagnostics.
+DIAGOPT(ShowNoteIncludeStack, 1, 0) /// Show include stacks for notes.
+VALUE_DIAGOPT(ShowCategories, 2, 0) /// Show categories: 0 -> none, 1 -> Number,
+ /// 2 -> Full Name.
+
+ENUM_DIAGOPT(Format, TextDiagnosticFormat, 2, Clang) /// Format for diagnostics:
+
+DIAGOPT(ShowColors, 1, 0) /// Show diagnostics with ANSI color sequences.
+ENUM_DIAGOPT(ShowOverloads, OverloadsShown, 1,
+ Ovl_All) /// Overload candidates to show.
+DIAGOPT(VerifyDiagnostics, 1, 0) /// Check that diagnostics match the expected
+ /// diagnostics, indicated by markers in the
+ /// input source file.
+
+DIAGOPT(ElideType, 1, 0) /// Elide identical types in template diffing
+DIAGOPT(ShowTemplateTree, 1, 0) /// Print a template tree when diffing
+
+VALUE_DIAGOPT(ErrorLimit, 32, 0) /// Limit # errors emitted.
+/// Limit depth of macro expansion backtrace.
+VALUE_DIAGOPT(MacroBacktraceLimit, 32, DefaultMacroBacktraceLimit)
+/// Limit depth of instantiation backtrace.
+VALUE_DIAGOPT(TemplateBacktraceLimit, 32, DefaultTemplateBacktraceLimit)
+/// Limit depth of constexpr backtrace.
+VALUE_DIAGOPT(ConstexprBacktraceLimit, 32, DefaultConstexprBacktraceLimit)
+
+VALUE_DIAGOPT(TabStop, 32, DefaultTabStop) /// The distance between tab stops.
+/// Column limit for formatting message diagnostics, or 0 if unused.
+VALUE_DIAGOPT(MessageLength, 32, 0)
+
+#undef DIAGOPT
+#undef ENUM_DIAGOPT
+#undef VALUE_DIAGOPT
+#undef SEMANTIC_DIAGOPT
+#undef SEMANTIC_ENUM_DIAGOPT
+#undef SEMANTIC_VALUE_DIAGOPT
+
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticOptions.h b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticOptions.h
new file mode 100644
index 0000000..b75cb0c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticOptions.h
@@ -0,0 +1,85 @@
+//===--- DiagnosticOptions.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_DIAGNOSTICOPTIONS_H
+#define LLVM_CLANG_BASIC_DIAGNOSTICOPTIONS_H
+
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+
+#include <string>
+#include <vector>
+
+namespace clang {
+
+/// \brief Specifies which overload candidates to display when overload
+/// resolution fails.
+enum OverloadsShown {
+ Ovl_All, ///< Show all overloads.
+ Ovl_Best ///< Show just the "best" overload candidates.
+};
+
+/// DiagnosticOptions - Options for controlling the compiler diagnostics
+/// engine.
+class DiagnosticOptions : public llvm::RefCountedBase<DiagnosticOptions>{
+public:
+ enum TextDiagnosticFormat { Clang, Msvc, Vi };
+
+ // Default values.
+ enum { DefaultTabStop = 8, MaxTabStop = 100,
+ DefaultMacroBacktraceLimit = 6,
+ DefaultTemplateBacktraceLimit = 10,
+ DefaultConstexprBacktraceLimit = 10 };
+
+ // Define simple diagnostic options (with no accessors).
+#define DIAGOPT(Name, Bits, Default) unsigned Name : Bits;
+#define ENUM_DIAGOPT(Name, Type, Bits, Default)
+#include "clang/Basic/DiagnosticOptions.def"
+
+protected:
+ // Define diagnostic options of enumeration type. These are private, and will
+ // have accessors (below).
+#define DIAGOPT(Name, Bits, Default)
+#define ENUM_DIAGOPT(Name, Type, Bits, Default) unsigned Name : Bits;
+#include "clang/Basic/DiagnosticOptions.def"
+
+public:
+ /// If non-empty, a file to log extended build information to, for development
+ /// testing and analysis.
+ std::string DumpBuildInformation;
+
+ /// The file to log diagnostic output to.
+ std::string DiagnosticLogFile;
+
+ /// The file to serialize diagnostics to (non-appending).
+ std::string DiagnosticSerializationFile;
+
+ /// The list of -W... options used to alter the diagnostic mappings, with the
+ /// prefixes removed.
+ std::vector<std::string> Warnings;
+
+public:
+ // Define accessors/mutators for diagnostic options of enumeration type.
+#define DIAGOPT(Name, Bits, Default)
+#define ENUM_DIAGOPT(Name, Type, Bits, Default) \
+ Type get##Name() const { return static_cast<Type>(Name); } \
+ void set##Name(Type Value) { Name = static_cast<unsigned>(Value); }
+#include "clang/Basic/DiagnosticOptions.def"
+
+ DiagnosticOptions() {
+#define DIAGOPT(Name, Bits, Default) Name = Default;
+#define ENUM_DIAGOPT(Name, Type, Bits, Default) set##Name(Default);
+#include "clang/Basic/DiagnosticOptions.def"
+ }
+};
+
+typedef DiagnosticOptions::TextDiagnosticFormat TextDiagnosticFormat;
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
index b1c16fa..21eeccb 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -18,10 +18,13 @@ def w_asm_qualifier_ignored : Warning<"ignored %0 qualifier on asm">,
def warn_file_asm_volatile : Warning<
"meaningless 'volatile' on asm outside function">, CatInlineAsm;
+def warn_unsupported_msasm : Warning<
+ "MS-style inline assembly is not supported">, InGroup<Microsoft>;
+
let CategoryName = "Parse Issue" in {
def ext_empty_translation_unit : Extension<
- "ISO C requires a translation unit to contain at least one declaration.">,
+ "ISO C requires a translation unit to contain at least one declaration">,
InGroup<DiagGroup<"empty-translation-unit">>;
def warn_cxx98_compat_top_level_semi : Warning<
"extra ';' outside of a function is incompatible with C++98">,
@@ -40,11 +43,14 @@ def warn_extra_semi_after_mem_fn_def : Warning<
"extra ';' after member function definition">,
InGroup<ExtraSemi>, DefaultIgnore;
-def ext_duplicate_declspec : Extension<"duplicate '%0' declaration specifier">;
+def ext_duplicate_declspec : ExtWarn<"duplicate '%0' declaration specifier">,
+ InGroup<DuplicateDeclSpecifier>;
+def warn_duplicate_declspec : Warning<"duplicate '%0' declaration specifier">,
+ InGroup<DuplicateDeclSpecifier>;
def ext_plain_complex : ExtWarn<
"plain '_Complex' requires a type specifier; assuming '_Complex double'">;
def ext_integer_complex : Extension<
- "complex integer types are an extension">;
+ "complex integer types are a GNU extension">, InGroup<GNU>;
def ext_thread_before : Extension<"'__thread' before '%0'">;
def ext_empty_struct_union : Extension<
@@ -80,8 +86,11 @@ def err_enumerator_list_missing_comma : Error<
"missing ',' between enumerators">;
def err_enumerator_unnamed_no_def : Error<
"unnamed enumeration must be a definition">;
-def ext_ms_enum_fixed_underlying_type : Extension<
- "enumeration types with a fixed underlying type are a Microsoft extension">,
+def ext_cxx11_enum_fixed_underlying_type : Extension<
+ "enumeration types with a fixed underlying type are a C++11 extension">,
+ InGroup<CXX11>;
+def ext_c_enum_fixed_underlying_type : Extension<
+ "enumeration types with a fixed underlying type are a Microsoft extension">,
InGroup<Microsoft>;
def warn_cxx98_compat_enum_fixed_underlying_type : Warning<
"enumeration types with a fixed underlying type are incompatible with C++98">,
@@ -207,6 +216,17 @@ def err_expected_semi_after_static_assert : Error<
"expected ';' after static_assert">;
def err_expected_semi_for : Error<"expected ';' in 'for' statement specifier">;
def err_expected_colon_after : Error<"expected ':' after %0">;
+def warn_missing_selector_name : Warning<
+ "%0 used as the name of the previous parameter rather than as part "
+ "of the selector">,
+ InGroup<DiagGroup<"missing-selector-name">>;
+def note_missing_selector_name : Note<
+ "introduce a parameter name to make %0 part of the selector">;
+def note_force_empty_selector_name : Note<
+ "or insert whitespace before ':' to use %0 as parameter name "
+ "and have an empty entry in the selector">;
+def note_missing_argument_name : Note<
+ "did you mean to use %0 as the selector name instead of %1">;
def err_label_end_of_compound_statement : Error<
"label at end of compound statement: expected statement">;
def err_address_of_label_outside_fn : Error<
@@ -331,6 +351,8 @@ def ext_c11_static_assert : Extension<
def warn_cxx98_compat_static_assert : Warning<
"static_assert declarations are incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
+def err_paren_after_colon_colon : Error<
+ "unexpected parenthesis after '::'">;
/// Objective-C parser diagnostics
def err_expected_minus_or_plus : Error<
@@ -423,7 +445,8 @@ def err_expected_member_or_base_name : Error<
def err_expected_lbrace_after_base_specifiers : Error<
"expected '{' after base class list">;
def ext_ellipsis_exception_spec : Extension<
- "exception specification of '...' is a Microsoft extension">;
+ "exception specification of '...' is a Microsoft extension">,
+ InGroup<Microsoft>;
def err_dynamic_and_noexcept_specification : Error<
"cannot have both throw() and noexcept() clause on the same function">;
def warn_cxx98_compat_noexcept_decl : Warning<
@@ -455,9 +478,6 @@ def err_literal_operator_string_prefix : Error<
"string literal after 'operator' cannot have an encoding prefix">;
def err_literal_operator_string_not_empty : Error<
"string literal after 'operator' must be '\"\"'">;
-def err_literal_operator_missing_space : Error<
- "C++11 requires a space between the \"\" and the user-defined suffix in a "
- "literal operator">;
def warn_cxx98_compat_literal_operator : Warning<
"literal operators are incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
@@ -570,7 +590,7 @@ def err_expected_type_name_after_typename : Error<
"expected an identifier or template-id after '::'">;
def err_explicit_spec_non_template : Error<
"explicit %select{specialization|instantiation}0 of non-template "
- "%select{class|struct|union}1 %2">;
+ "%select{class|struct|union|interface}1 %2">;
def err_default_template_template_parameter_not_template : Error<
"default template argument for a template template parameter must be a class "
@@ -627,6 +647,11 @@ def ext_override_control_keyword : ExtWarn<
def warn_cxx98_compat_override_control_keyword : Warning<
"'%0' keyword is incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
+def err_override_control_interface : Error<
+ "'%0' keyword not permitted with interface types">;
+
+def err_access_specifier_interface : Error<
+ "interface types cannot specify '%select{private|protected}0' access">;
def err_duplicate_virt_specifier : Error<
"class member already marked '%0'">;
@@ -710,6 +735,11 @@ def warn_pragma_unused_expected_var : Warning<
"expected '#pragma unused' argument to be a variable name">;
def warn_pragma_unused_expected_punc : Warning<
"expected ')' or ',' in '#pragma unused'">;
+// - #pragma fp_contract
+def err_pragma_fp_contract_scope : Error<
+ "'#pragma fp_contract' should only appear at file scope or at the start of a "
+ "compound expression">;
+
// OpenCL Section 6.8.g
def err_not_opencl_storage_class_specifier : Error<
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 96b3140..0d64bf3 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -20,13 +20,6 @@ def warn_variables_not_in_loop_body : Warning<
"used in loop condition not modified in loop body">,
InGroup<DiagGroup<"loop-analysis">>, DefaultIgnore;
-def warn_identical_enum_values : Warning<
- "all elements of %0 are initialized with literals to value %1">,
- InGroup<DiagGroup<"unique-enum">>;
-def note_identical_enum_values : Note<
- "initialize the last element with the previous element to silence "
- "this warning">;
-
// Constant expressions
def err_expr_not_ice : Error<
"expression is not an %select{integer|integral}0 constant expression">;
@@ -102,7 +95,7 @@ def err_variably_modified_new_type : Error<
// C99 Designated Initializers
def ext_designated_init : Extension<
- "designated initializers are a C99 feature">;
+ "designated initializers are a C99 feature">, InGroup<C99>;
def err_array_designator_negative : Error<
"array designator value '%0' is negative">;
def err_array_designator_empty_range : Error<
@@ -410,12 +403,10 @@ def err_statically_allocated_object : Error<
"interface type cannot be statically allocated">;
def err_object_cannot_be_passed_returned_by_value : Error<
"interface type %1 cannot be %select{returned|passed}0 by value"
- "; did you forget * in %1">;
+ "; did you forget * in %1?">;
def err_parameters_retval_cannot_have_fp16_type : Error<
"%select{parameters|function return value}0 cannot have __fp16 type; did you forget * ?">;
def warn_enum_value_overflow : Warning<"overflow in enumeration value">;
-def warn_pragma_options_align_unsupported_option : Warning<
- "unsupported alignment option in '#pragma options align'">;
def warn_pragma_options_align_reset_failed : Warning<
"#pragma options align=reset failed: %0">;
def err_pragma_options_align_mac68k_target_unsupported : Error<
@@ -521,7 +512,8 @@ def warn_conflicting_overriding_ret_types : Warning<
def warn_conflicting_ret_types : Warning<
"conflicting return type in "
- "implementation of %0%diff{: $ vs $|}1,2">;
+ "implementation of %0%diff{: $ vs $|}1,2">,
+ InGroup<MismatchedReturnTypes>;
def warn_conflicting_overriding_ret_type_modifiers : Warning<
"conflicting distributed object modifiers on return type "
@@ -550,7 +542,9 @@ def warn_conflicting_overriding_param_types : Warning<
def warn_conflicting_param_types : Warning<
"conflicting parameter types in "
- "implementation of %0%diff{: $ vs $|}1,2">;
+ "implementation of %0%diff{: $ vs $|}1,2">,
+ InGroup<MismatchedParameterTypes>;
+
def warn_conflicting_param_modifiers : Warning<
"conflicting distributed object modifiers on parameter type "
"in implementation of %0">,
@@ -595,6 +589,8 @@ def warn_accessor_property_type_mismatch : Warning<
"type of property %0 does not match type of accessor %1">;
def not_conv_function_declared_at : Note<"type conversion function declared here">;
def note_method_declared_at : Note<"method %0 declared here">;
+def note_property_attribute : Note<"property %0 is declared "
+ "%select{deprecated|unavailable}1 here">;
def err_setter_type_void : Error<"type of setter must be void">;
def err_duplicate_method_decl : Error<"duplicate declaration of method %0">;
def warn_duplicate_method_decl :
@@ -700,7 +696,7 @@ def error_bad_property_context : Error<
"property implementation must be in a class or category implementation">;
def error_missing_property_ivar_decl : Error<
"synthesized property %0 must either be named the same as a compatible"
- " ivar or must explicitly name an ivar">;
+ " instance variable or must explicitly name an instance variable">;
def error_synthesize_weak_non_arc_or_gc : Error<
"@synthesize of 'weak' property is only allowed in ARC or GC mode">;
def err_arc_perform_selector_retains : Error<
@@ -712,38 +708,55 @@ def err_gc_weak_property_strong_type : Error<
"weak attribute declared on a __strong type property in GC mode">;
def warn_receiver_is_weak : Warning <
"weak %select{receiver|property|implicit property}0 may be "
- "unpredictably null in ARC mode">,
+ "unpredictably set to nil">,
InGroup<DiagGroup<"receiver-is-weak">>, DefaultIgnore;
+def note_arc_assign_to_strong : Note<
+ "assign the value to a strong variable to keep the object alive during use">;
+def warn_arc_repeated_use_of_weak : Warning <
+ "weak %select{variable|property|implicit property|instance variable}0 %1 is "
+ "accessed multiple times in this %select{function|method|block|lambda}2 "
+ "but may be unpredictably set to nil; assign to a strong variable to keep "
+ "the object alive">,
+ InGroup<ARCRepeatedUseOfWeak>, DefaultIgnore;
+def warn_implicitly_retains_self : Warning <
+ "block implicitly retains 'self'; explicitly mention 'self' to indicate "
+ "this is intended behavior">,
+ InGroup<DiagGroup<"implicit-retain-self">>, DefaultIgnore;
+def warn_arc_possible_repeated_use_of_weak : Warning <
+ "weak %select{variable|property|implicit property|instance variable}0 %1 may "
+ "be accessed multiple times in this %select{function|method|block|lambda}2 "
+ "and may be unpredictably set to nil; assign to a strong variable to keep "
+ "the object alive">,
+ InGroup<ARCRepeatedUseOfWeakMaybe>, DefaultIgnore;
+def note_arc_weak_also_accessed_here : Note<
+ "also accessed here">;
def err_incomplete_synthesized_property : Error<
"cannot synthesize property %0 with incomplete type %1">;
def error_property_ivar_type : Error<
- "type of property %0 (%1) does not match type of ivar %2 (%3)">;
+ "type of property %0 (%1) does not match type of instance variable %2 (%3)">;
def error_property_accessor_type : Error<
"type of property %0 (%1) does not match type of accessor %2 (%3)">;
def error_ivar_in_superclass_use : Error<
- "property %0 attempting to use ivar %1 declared in super class %2">;
+ "property %0 attempting to use instance variable %1 declared in super class %2">;
def error_weak_property : Error<
- "existing ivar %1 for __weak property %0 must be __weak">;
+ "existing instance variable %1 for __weak property %0 must be __weak">;
def error_strong_property : Error<
- "existing ivar %1 for strong property %0 may not be __weak">;
+ "existing instance variable %1 for strong property %0 may not be __weak">;
def error_dynamic_property_ivar_decl : Error<
- "dynamic property can not have ivar specification">;
+ "dynamic property can not have instance variable specification">;
def error_duplicate_ivar_use : Error<
- "synthesized properties %0 and %1 both claim ivar %2">;
+ "synthesized properties %0 and %1 both claim instance variable %2">;
def error_property_implemented : Error<"property %0 is already implemented">;
def warn_objc_property_attr_mutually_exclusive : Warning<
"property attributes '%0' and '%1' are mutually exclusive">,
InGroup<ReadOnlySetterAttrs>, DefaultIgnore;
-def warn_objc_missing_super_dealloc : Warning<
- "method possibly missing a [super dealloc] call">,
+def warn_objc_missing_super_call : Warning<
+ "method possibly missing a [super %0] call">,
InGroup<ObjCMissingSuperCalls>;
def error_dealloc_bad_result_type : Error<
"dealloc return type must be correctly specified as 'void' under ARC, "
"instead of %0">;
-def warn_objc_missing_super_finalize : Warning<
- "method possibly missing a [super finalize] call">,
- InGroup<ObjCMissingSuperCalls>;
def warn_undeclared_selector : Warning<
"undeclared selector %0">, InGroup<UndeclaredSelector>, DefaultIgnore;
def warn_implicit_atomic_property : Warning<
@@ -768,7 +781,7 @@ def err_static_assert_expression_is_not_constant : Error<
def err_static_assert_failed : Error<"static_assert failed %0">;
def warn_inline_namespace_reopened_noninline : Warning<
- "inline namespace cannot be re-opened as a non-inline namespace">;
+ "inline namespace cannot be reopened as a non-inline namespace">;
def err_inline_namespace_mismatch : Error<
"%select{|non-}0inline namespace "
"cannot be reopened as %select{non-|}0inline">;
@@ -792,10 +805,11 @@ def warn_cxx98_compat_friend_is_member : Warning<
"with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
def ext_unelaborated_friend_type : ExtWarn<
"unelaborated friend declaration is a C++11 extension; specify "
- "'%select{struct|union|class|enum}0' to befriend %1">, InGroup<CXX11>;
+ "'%select{struct|interface|union|class|enum}0' to befriend %1">,
+ InGroup<CXX11>;
def warn_cxx98_compat_unelaborated_friend_type : Warning<
- "befriending %1 without '%select{struct|union|class|enum}0' keyword is "
- "incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
+ "befriending %1 without '%select{struct|interface|union|class|enum}0' "
+ "keyword is incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
def err_qualified_friend_not_found : Error<
"no function named %0 with type %1 was found in the specified scope">;
def err_introducing_special_friend : Error<
@@ -806,16 +820,26 @@ def err_tagless_friend_type_template : Error<
def err_no_matching_local_friend : Error<
"no matching function found in local scope">;
def err_no_matching_local_friend_suggest : Error<
- "no matching function %0 found in local scope; did you mean %2">;
+ "no matching function %0 found in local scope; did you mean %2?">;
def err_partial_specialization_friend : Error<
"partial specialization cannot be declared as a friend">;
def err_qualified_friend_def : Error<
"friend function definition cannot be qualified with '%0'">;
def err_friend_def_in_local_class : Error<
"friend function cannot be defined in a local class">;
+def err_friend_not_first_in_declaration : Error<
+ "'friend' must appear first in a non-function declaration">;
+def err_invalid_member_in_interface : Error<
+ "%select{data member |non-public member function |static member function |"
+ "user-declared constructor|user-declared destructor|operator |"
+ "nested class }0%1 is not permitted within an interface type">;
+def err_invalid_base_in_interface : Error<
+ "interface type cannot inherit from "
+ "%select{'struct|non-public 'interface|'class}0 %1'">;
+
def err_abstract_type_in_decl : Error<
- "%select{return|parameter|variable|field|ivar}0 type %1 is an abstract class">;
+ "%select{return|parameter|variable|field|instance variable}0 type %1 is an abstract class">;
def err_allocation_of_abstract_type : Error<
"allocating an object of abstract class type %0">;
def err_throw_abstract_type : Error<
@@ -841,6 +865,12 @@ def note_pure_virtual_function : Note<
def err_deleted_decl_not_first : Error<
"deleted definition must be first declaration">;
+def err_deleted_override : Error<
+ "deleted function %0 cannot override a non-deleted function">;
+
+def err_non_deleted_override : Error<
+ "non-deleted function %0 cannot override a deleted function">;
+
def warn_weak_vtable : Warning<
"%0 has no out-of-line virtual method definitions; its vtable will be "
"emitted in every translation unit">,
@@ -1030,7 +1060,7 @@ def warn_call_to_pure_virtual_member_function_from_ctor_dtor : Warning<
"not available in the %select{constructor|destructor}1 of %2">;
def note_field_decl : Note<"member is declared here">;
-def note_ivar_decl : Note<"ivar is declared here">;
+def note_ivar_decl : Note<"instance variable is declared here">;
def note_bitfield_decl : Note<"bit-field is declared here">;
def note_previous_decl : Note<"%0 declared here">;
def note_member_synthesized_at : Note<
@@ -1113,8 +1143,8 @@ def err_constructor_redeclared : Error<"constructor cannot be redeclared">;
def err_constructor_byvalue_arg : Error<
"copy constructor must pass its first argument by reference">;
def warn_no_constructor_for_refconst : Warning<
- "%select{struct|union|class|enum}0 %1 does not declare any constructor to "
- "initialize its non-modifiable members">;
+ "%select{struct|interface|union|class|enum}0 %1 does not declare any "
+ "constructor to initialize its non-modifiable members">;
def note_refconst_member_not_initialized : Note<
"%select{const|reference}0 member %1 will never be initialized">;
def ext_ms_explicit_constructor_call : ExtWarn<
@@ -1214,11 +1244,18 @@ def err_init_reference_member_uninitialized : Error<
"reference member of type %0 uninitialized">;
def note_uninit_reference_member : Note<
"uninitialized reference member is here">;
-def warn_field_is_uninit : Warning<"field is uninitialized when used here">,
+def warn_field_is_uninit : Warning<"field %0 is uninitialized when used here">,
+ InGroup<Uninitialized>;
+def warn_reference_field_is_uninit : Warning<
+ "reference %0 is not yet bound to a value when used here">,
InGroup<Uninitialized>;
def warn_uninit_self_reference_in_init : Warning<
"variable %0 is uninitialized when used within its own initialization">,
InGroup<Uninitialized>;
+def warn_uninit_self_reference_in_reference_init : Warning<
+ "reference %0 is not yet bound to a value when used within its own"
+ " initialization">,
+ InGroup<Uninitialized>;
def warn_uninit_var : Warning<
"variable %0 is uninitialized when %select{used here|captured by block}1">,
InGroup<Uninitialized>, DefaultIgnore;
@@ -1300,9 +1337,10 @@ def err_new_array_of_auto : Error<
"cannot allocate array of 'auto'">;
def err_auto_not_allowed : Error<
"'auto' not allowed %select{in function prototype|in non-static struct member"
- "|in non-static union member|in non-static class member|in exception declaration"
- "|in template parameter|in block literal|in template argument"
- "|in typedef|in type alias|in function return type|here}0">;
+ "|in non-static union member|in non-static class member|in interface member"
+ "|in exception declaration|in template parameter|in block literal"
+ "|in template argument|in typedef|in type alias|in function return type"
+ "|here}0">;
def err_auto_var_requires_init : Error<
"declaration of variable %0 with type %1 requires an initializer">;
def err_auto_new_requires_ctor_arg : Error<
@@ -1346,6 +1384,8 @@ def err_function_marked_override_not_overriding : Error<
"%0 marked 'override' but does not override any member functions">;
def err_class_marked_final_used_as_base : Error<
"base %0 is marked 'final'">;
+def warn_abstract_final_class : Warning<
+ "abstract class is marked 'final'">, InGroup<AbstractFinalClass>;
// C++11 attributes
def err_repeat_attribute : Error<"'%0' attribute cannot be repeated">;
@@ -1408,7 +1448,17 @@ def err_for_range_member_begin_end_mismatch : Error<
"range type %0 has '%select{begin|end}1' member but no '%select{end|begin}1' member">;
def err_for_range_begin_end_types_differ : Error<
"'begin' and 'end' must return the same type (got %0 and %1)">;
-def note_for_range_type : Note<"range has type %0">;
+def note_in_for_range: Note<
+ "when looking up '%select{begin|end}0' function for range expression "
+ "of type %1">;
+def err_for_range_invalid: Error<
+ "invalid range expression of type %0; no viable '%select{begin|end}1' "
+ "function available">;
+def err_for_range_dereference : Error<
+ "invalid range expression of type %0; did you mean to dereference it "
+ "with '*'?">;
+def note_for_range_invalid_iterator : Note <
+ "in implicit call to 'operator%select{!=|*|++}0' for iterator of type %1">;
def note_for_range_begin_end : Note<
"selected '%select{begin|end}0' %select{function|template }1%2 with iterator type %3">;
@@ -1420,7 +1470,7 @@ def err_invalid_constexpr : Error<
"%select{function parameter|typedef|non-static data member}0 "
"cannot be constexpr">;
def err_constexpr_tag : Error<
- "%select{class|struct|union|enum}0 cannot be marked constexpr">;
+ "%select{class|struct|interface|union|enum}0 cannot be marked constexpr">;
def err_constexpr_dtor : Error<"destructor cannot be marked constexpr">;
def err_constexpr_no_declarators : Error<
"constexpr can only be used in variable and function declarations">;
@@ -1438,11 +1488,12 @@ def err_constexpr_redecl_mismatch : Error<
def err_constexpr_virtual : Error<"virtual function cannot be constexpr">;
def err_constexpr_virtual_base : Error<
"constexpr %select{member function|constructor}0 not allowed in "
- "%select{class|struct}1 with virtual base %plural{1:class|:classes}2">;
+ "%select{struct|interface|class}1 with virtual base "
+ "%plural{1:class|:classes}2">;
def note_non_literal_incomplete : Note<
"incomplete type %0 is not a literal type">;
-def note_non_literal_virtual_base : Note<"%select{class|struct}0 with virtual "
- "base %plural{1:class|:classes}1 is not a literal type">;
+def note_non_literal_virtual_base : Note<"%select{struct|interface|class}0 "
+ "with virtual base %plural{1:class|:classes}1 is not a literal type">;
def note_constexpr_virtual_base_here : Note<"virtual base class declared here">;
def err_constexpr_non_literal_return : Error<
"constexpr function's return type %0 is not a literal type">;
@@ -1490,10 +1541,10 @@ def note_non_literal_user_provided_dtor : Note<
def note_non_literal_nontrivial_dtor : Note<
"%0 is not literal because it has a non-trivial destructor">;
def warn_private_extern : Warning<
- "Use of __private_extern__ on tentative definition has unexpected"
- " behaviour - use __attribute__((visibility(\"hidden\"))) on extern"
- " declaration or definition instead">,
- InGroup<PrivateExtern>, DefaultIgnore;
+ "use of __private_extern__ on a declaration may not produce external symbol "
+ "private to the linkage unit and is deprecated">, InGroup<PrivateExtern>;
+def note_private_extern : Note<
+ "use __attribute__((visibility(\"hidden\"))) attribute instead">;
// C++11 char16_t/char32_t
def warn_cxx98_compat_unicode_type : Warning<
@@ -1681,7 +1732,9 @@ def warn_attribute_invalid_on_stmt : Warning<
"attribute %0 cannot be specified on a statement">,
InGroup<IgnoredAttributes>;
def warn_declspec_attribute_ignored : Warning<
- "attribute %0 is ignored, place it after \"%select{class|struct|union|enum}1\" to apply attribute to type declaration">, InGroup<IgnoredAttributes>;
+ "attribute %0 is ignored, place it after "
+ "\"%select{class|struct|union|interface|enum}1\" to apply attribute to "
+ "type declaration">, InGroup<IgnoredAttributes>;
def warn_attribute_precede_definition : Warning<
"attribute declaration must precede definition">,
InGroup<IgnoredAttributes>;
@@ -1699,7 +1752,8 @@ def warn_nsobject_attribute : Warning<
"__attribute ((NSObject)) may be put on a typedef only, "
"attribute is ignored">, InGroup<NSobjectAttribute>;
def warn_attribute_weak_on_local : Warning<
- "__weak attribute cannot be specified on an automatic variable">,
+ "__weak attribute cannot be specified on an automatic variable when ARC "
+ "is not enabled">,
InGroup<IgnoredAttributes>;
def warn_weak_identifier_undeclared : Warning<
"weak identifier %0 never declared">;
@@ -1753,6 +1807,8 @@ def err_attribute_vecreturn_only_pod_record : Error<
def err_cconv_change : Error<
"function declared '%0' here was previously declared "
"%select{'%2'|without calling convention}1">;
+def warn_cconv_ignored : Warning<
+ "calling convention %0 ignored for this target">, InGroup<IgnoredAttributes>;
def err_cconv_knr : Error<
"function with no prototype cannot use %0 calling convention">;
def err_cconv_varargs : Error<
@@ -1840,14 +1896,6 @@ def warn_lock_exclusive_and_shared : Warning<
InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def note_lock_exclusive_and_shared : Note<
"the other lock of mutex '%0' is here">;
-def warn_variable_requires_lock : Warning<
- "%select{reading|writing}2 variable '%0' requires locking "
- "%select{'%1'|'%1' exclusively}2">,
- InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
-def warn_var_deref_requires_lock : Warning<
- "%select{reading|writing}2 the value pointed to by '%0' requires locking "
- "%select{'%1'|'%1' exclusively}2">,
- InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def warn_variable_requires_any_lock : Warning<
"%select{reading|writing}1 variable '%0' requires locking "
"%select{any mutex|any mutex exclusively}1">,
@@ -1856,9 +1904,6 @@ def warn_var_deref_requires_any_lock : Warning<
"%select{reading|writing}1 the value pointed to by '%0' requires locking "
"%select{any mutex|any mutex exclusively}1">,
InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
-def warn_fun_requires_lock : Warning<
- "calling function '%0' requires %select{shared|exclusive}2 lock on '%1'">,
- InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def warn_fun_excludes_mutex : Warning<
"cannot call function '%0' while mutex '%1' is locked">,
InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
@@ -1866,6 +1911,32 @@ def warn_cannot_resolve_lock : Warning<
"cannot resolve lock expression">,
InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
+// Imprecise thread safety warnings
+def warn_variable_requires_lock : Warning<
+ "%select{reading|writing}2 variable '%0' requires locking "
+ "%select{'%1'|'%1' exclusively}2">,
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
+def warn_var_deref_requires_lock : Warning<
+ "%select{reading|writing}2 the value pointed to by '%0' requires locking "
+ "%select{'%1'|'%1' exclusively}2">,
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
+def warn_fun_requires_lock : Warning<
+ "calling function '%0' requires %select{shared|exclusive}2 lock on '%1'">,
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
+
+// Precise thread safety warnings
+def warn_variable_requires_lock_precise : Warning<
+ "%select{reading|writing}2 variable '%0' requires locking "
+ "%select{'%1'|'%1' exclusively}2">,
+ InGroup<ThreadSafetyPrecise>, DefaultIgnore;
+def warn_var_deref_requires_lock_precise : Warning<
+ "%select{reading|writing}2 the value pointed to by '%0' requires locking "
+ "%select{'%1'|'%1' exclusively}2">,
+ InGroup<ThreadSafetyPrecise>, DefaultIgnore;
+def warn_fun_requires_lock_precise : Warning<
+ "calling function '%0' requires %select{shared|exclusive}2 lock on '%1'">,
+ InGroup<ThreadSafetyPrecise>, DefaultIgnore;
+def note_found_mutex_near_match : Note<"found near match '%0'">;
def warn_impcast_vector_scalar : Warning<
"implicit conversion turns vector to scalar: %0 to %1">,
@@ -1890,7 +1961,7 @@ def warn_impcast_integer_precision : Warning<
InGroup<DiagGroup<"conversion">>, DefaultIgnore;
def warn_impcast_integer_64_32 : Warning<
"implicit conversion loses integer precision: %0 to %1">,
- InGroup<DiagGroup<"shorten-64-to-32">>, DefaultIgnore;
+ InGroup<Shorten64To32>, DefaultIgnore;
def warn_impcast_integer_precision_constant : Warning<
"implicit conversion from %2 to %3 changes value from %0 to %1">,
InGroup<ConstantConversion>;
@@ -1905,7 +1976,7 @@ def warn_impcast_string_literal_to_bool : Warning<
InGroup<StringConversion>, DefaultIgnore;
def warn_impcast_different_enum_types : Warning<
"implicit conversion from enumeration type %0 to different enumeration type "
- "%1">, InGroup<DiagGroup<"conversion">>;
+ "%1">, InGroup<EnumConversion>;
def warn_impcast_bool_to_null_pointer : Warning<
"initialization of pointer of type %0 to null from a constant boolean "
"expression">, InGroup<BoolConversion>;
@@ -1915,6 +1986,9 @@ def warn_non_literal_null_pointer : Warning<
def warn_impcast_null_pointer_to_integer : Warning<
"implicit conversion of NULL constant to %0">,
InGroup<NullConversion>;
+def warn_impcast_floating_point_to_bool : Warning<
+ "implicit conversion turns floating-point number into bool: %0 to %1">,
+ InGroup<ImplicitConversionFloatingPointToBool>;
def warn_impcast_function_to_bool : Warning<
"address of function %q0 will always evaluate to 'true'">,
InGroup<BoolConversion>;
@@ -1927,6 +2001,10 @@ def warn_cast_align : Warning<
"cast from %0 to %1 increases required alignment from %2 to %3">,
InGroup<CastAlign>, DefaultIgnore;
+def warn_int_to_pointer_cast : Warning<
+ "cast to %1 from smaller integer type %0">,
+ InGroup<IntToPointerCast>;
+
def warn_attribute_ignored_for_field_of_type : Warning<
"%0 attribute ignored for field of type %1">,
InGroup<IgnoredAttributes>;
@@ -2008,7 +2086,7 @@ def warn_attribute_ibaction: Warning<
def err_iboutletcollection_type : Error<
"invalid type %0 as argument of iboutletcollection attribute">;
def warn_iboutlet_object_type : Warning<
- "%select{ivar|property}2 with %0 attribute must "
+ "%select{instance variable|property}2 with %0 attribute must "
"be an object type (invalid %1)">,
InGroup<DiagGroup<"invalid-iboutlet">>;
def err_attribute_overloadable_not_function : Error<
@@ -2028,6 +2106,12 @@ def warn_ns_attribute_wrong_parameter_type : Warning<
"%0 attribute only applies to %select{Objective-C object|pointer}1 "
"parameters">,
InGroup<IgnoredAttributes>;
+def warn_objc_requires_super_protocol : Warning<
+ "%0 attribute cannot be applied to %select{methods in protocols|dealloc}1">,
+ InGroup<DiagGroup<"requires-super-attribute">>;
+def note_protocol_decl : Note<
+ "protocol is declared here">;
+
def err_ns_bridged_not_interface : Error<
"parameter of 'ns_bridged' attribute does not name an Objective-C class">;
@@ -3147,7 +3231,7 @@ def err_non_thread_thread : Error<
def err_thread_non_thread : Error<
"thread-local declaration of %0 follows non-thread-local declaration">;
def err_redefinition_different_type : Error<
- "redefinition of %0 with a different type">;
+ "redefinition of %0 with a different type%diff{: $ vs $|}1,2">;
def err_redefinition_different_kind : Error<
"redefinition of %0 as different kind of symbol">;
def warn_forward_class_redefinition : Warning<
@@ -3162,8 +3246,8 @@ def err_tag_reference_conflict : Error<
"implicit declaration introduced by elaborated type conflicts with "
"%select{a declaration|a typedef|a type alias|a template}0 of the same name">;
def err_dependent_tag_decl : Error<
- "%select{declaration|definition}0 of %select{struct|union|class|enum}1 "
- "in a dependent scope">;
+ "%select{declaration|definition}0 of "
+ "%select{struct|interface|union|class|enum}1 in a dependent scope">;
def err_tag_definition_of_typedef : Error<
"definition of type %0 conflicts with %select{typedef|type alias}1 of the same name">;
def err_conflicting_types : Error<"conflicting types for %0">;
@@ -3171,15 +3255,16 @@ def err_nested_redefinition : Error<"nested redefinition of %0">;
def err_use_with_wrong_tag : Error<
"use of %0 with tag type that does not match previous declaration">;
def warn_struct_class_tag_mismatch : Warning<
- "%select{struct|class}0%select{| template}1 %2 was previously declared "
- "as a %select{class|struct}0%select{| template}1">,
+ "%select{struct|interface|class}0%select{| template}1 %2 was previously "
+ "declared as a %select{struct|interface|class}3%select{| template}1">,
InGroup<MismatchedTags>, DefaultIgnore;
def warn_struct_class_previous_tag_mismatch : Warning<
- "%2 defined as a %select{struct|class}0%select{| template}1 here but "
- "previously declared as a %select{class|struct}0%select{| template}1">,
+ "%2 defined as %select{a struct|an interface|a class}0%select{| template}1 "
+ "here but previously declared as "
+ "%select{a struct|an interface|a class}3%select{| template}1">,
InGroup<MismatchedTags>, DefaultIgnore;
def note_struct_class_suggestion : Note<
- "did you mean %select{struct|class}0 here?">;
+ "did you mean %select{struct|interface|class}0 here?">;
def ext_forward_ref_enum : Extension<
"ISO C forbids forward references to 'enum' types">;
def err_forward_ref_enum : Error<
@@ -3192,9 +3277,9 @@ def ext_forward_ref_enum_def : Extension<
def err_redefinition_of_enumerator : Error<"redefinition of enumerator %0">;
def err_duplicate_member : Error<"duplicate member %0">;
def err_misplaced_ivar : Error<
- "ivars may not be placed in %select{categories|class extension}0">;
+ "instance variables may not be placed in %select{categories|class extension}0">;
def warn_ivars_in_interface : Warning<
- "declaration of ivars in the interface is deprecated">,
+ "declaration of instance variables in the interface is deprecated">,
InGroup<DiagGroup<"objc-interface-ivars">>, DefaultIgnore;
def ext_enum_value_not_int : Extension<
"ISO C restricts enumerator values to range of 'int' (%0 is too "
@@ -3223,11 +3308,13 @@ def warn_array_new_too_large : Warning<"array is too large (%0 elements)">,
// -Wpadded, -Wpacked
def warn_padded_struct_field : Warning<
- "padding %select{struct|class}0 %1 with %2 %select{byte|bit}3%select{|s}4 "
- "to align %5">, InGroup<Padded>, DefaultIgnore;
+ "padding %select{struct|interface|class}0 %1 with %2 "
+ "%select{byte|bit}3%select{|s}4 to align %5">,
+ InGroup<Padded>, DefaultIgnore;
def warn_padded_struct_anon_field : Warning<
- "padding %select{struct|class}0 %1 with %2 %select{byte|bit}3%select{|s}4 "
- "to align anonymous bit-field">, InGroup<Padded>, DefaultIgnore;
+ "padding %select{struct|interface|class}0 %1 with %2 "
+ "%select{byte|bit}3%select{|s}4 to align anonymous bit-field">,
+ InGroup<Padded>, DefaultIgnore;
def warn_padded_struct_size : Warning<
"padding size of %0 with %1 %select{byte|bit}2%select{|s}3 "
"to alignment boundary">, InGroup<Padded>, DefaultIgnore;
@@ -3247,7 +3334,7 @@ def err_typecheck_invalid_restrict_not_pointer_noarg : Error<
def err_typecheck_invalid_restrict_invalid_pointee : Error<
"pointer to function type %0 may not be 'restrict' qualified">;
def ext_typecheck_zero_array_size : Extension<
- "zero size arrays are an extension">;
+ "zero size arrays are an extension">, InGroup<ZeroLengthArray>;
def err_typecheck_zero_array_size : Error<
"zero-length arrays are not permitted in C++">;
def warn_typecheck_zero_static_array_size : Warning<
@@ -3357,7 +3444,7 @@ def warn_anon_bitfield_width_exceeds_type_size : Warning<
def warn_missing_braces : Warning<
"suggest braces around initialization of subobject">,
- InGroup<DiagGroup<"missing-braces">>, DefaultIgnore;
+ InGroup<MissingBraces>, DefaultIgnore;
def err_missing_braces : Error<
"cannot omit braces around initialization of subobject when using direct "
"list-initialization">;
@@ -3475,14 +3562,16 @@ def ext_flexible_array_in_array : Extension<
def err_flexible_array_init : Error<
"initialization of flexible array member is not allowed">;
def ext_flexible_array_empty_aggregate_ms : Extension<
- "flexible array member %0 in otherwise empty %select{struct|class}1 "
- "is a Microsoft extension">, InGroup<Microsoft>;
+ "flexible array member %0 in otherwise empty "
+ "%select{struct|interface|union|class|enum}1 is a Microsoft extension">,
+ InGroup<Microsoft>;
def ext_flexible_array_union_ms : Extension<
"flexible array member %0 in a union is a Microsoft extension">,
InGroup<Microsoft>;
def ext_flexible_array_empty_aggregate_gnu : Extension<
- "flexible array member %0 in otherwise empty %select{struct|class}1 "
- "is a GNU extension">, InGroup<GNU>;
+ "flexible array member %0 in otherwise empty "
+ "%select{struct|interface|union|class|enum}1 is a GNU extension">,
+ InGroup<GNU>;
def ext_flexible_array_union_gnu : Extension<
"flexible array member %0 in a union is a GNU extension">, InGroup<GNU>;
@@ -3500,7 +3589,7 @@ def err_arc_weak_unavailable_assign : Error<
"assignment of a weak-unavailable object to a __weak object">;
def err_arc_weak_unavailable_property : Error<
"synthesis of a weak-unavailable property is disallowed "
- "because it requires synthesis of an ivar of the __weak object">;
+ "because it requires synthesis of an instance variable of the __weak object">;
def err_arc_convesion_of_weak_unavailable : Error<
"%select{implicit conversion|cast}0 of weak-unavailable object of type %1 to"
" a __weak object of type %2">;
@@ -3530,6 +3619,9 @@ def err_arc_illegal_selector : Error<
"ARC forbids use of %0 in a @selector">;
def err_arc_illegal_method_def : Error<
"ARC forbids implementation of %0">;
+def warn_arc_strong_pointer_objc_pointer : Warning<
+ "method parameter of type %0 with no explicit ownership">,
+ InGroup<DiagGroup<"explicit-ownership-type">>, DefaultIgnore;
} // end "ARC Restrictions" category
@@ -3552,11 +3644,6 @@ def err_typecheck_arc_assign_self_class_method : Error<
def err_typecheck_arr_assign_enumeration : Error<
"fast enumeration variables can't be modified in ARC by default; "
"declare the variable __strong to allow this">;
-def warn_arc_non_pod_class_with_object_member : Warning<
- "%0 cannot be shared between ARC and non-ARC "
- "code; add a copy constructor, a copy assignment operator, and a destructor "
- "to make it ABI-compatible">, InGroup<AutomaticReferenceCountingABI>,
- DefaultIgnore;
def warn_arc_retained_assign : Warning<
"assigning retained object to %select{weak|unsafe_unretained}0 "
"%select{property|variable}1"
@@ -3566,19 +3653,10 @@ def warn_arc_retained_property_assign : Warning<
"assigning retained object to unsafe property"
"; object will be released after assignment">,
InGroup<ARCUnsafeRetainedAssign>;
-def warn_arc_trivial_member_function_with_object_member : Warning<
- "%0 cannot be shared between ARC and non-ARC "
- "code; add a non-trivial %select{copy constructor|copy assignment operator|"
- "destructor}1 to make it ABI-compatible">,
- InGroup<AutomaticReferenceCountingABI>, DefaultIgnore;
def err_arc_new_array_without_ownership : Error<
"'new' cannot allocate an array of %0 with no explicit ownership">;
-def warn_err_new_delete_object_array : Warning<
- "%select{allocating|destroying}0 an array of %1; this array must not "
- "%select{be deleted in|have been allocated from}0 non-ARC code">,
- InGroup<AutomaticReferenceCountingABI>, DefaultIgnore;
def err_arc_autoreleasing_var : Error<
- "%select{__block variables|global variables|fields|ivars}0 cannot have "
+ "%select{__block variables|global variables|fields|instance variables}0 cannot have "
"__autoreleasing ownership">;
def err_arc_autoreleasing_capture : Error<
"cannot capture __autoreleasing variable in a "
@@ -3635,10 +3713,10 @@ def warn_arc_object_memaccess : Warning<
let CategoryName = "ARC and @properties" in {
def err_arc_strong_property_ownership : Error<
- "existing ivar %1 for strong property %0 may not be "
+ "existing instance variable %1 for strong property %0 may not be "
"%select{|__unsafe_unretained||__weak}2">;
def err_arc_assign_property_ownership : Error<
- "existing ivar %1 for property %0 with %select{unsafe_unretained| assign}2 "
+ "existing instance variable %1 for property %0 with %select{unsafe_unretained| assign}2 "
"attribute must be __unsafe_unretained">;
def err_arc_inconsistent_property_ownership : Error<
"%select{|unsafe_unretained|strong|weak}1 property %0 may not also be "
@@ -3769,16 +3847,14 @@ def warn_precedence_bitwise_rel : Warning<
InGroup<Parentheses>;
def note_precedence_bitwise_first : Note<
"place parentheses around the %0 expression to evaluate it first">;
-def note_precedence_bitwise_silence : Note<
- "place parentheses around the %0 expression to silence this warning">;
+def note_precedence_silence : Note<
+ "place parentheses around the '%0' expression to silence this warning">;
def warn_precedence_conditional : Warning<
"operator '?:' has lower precedence than '%0'; '%0' will be evaluated first">,
InGroup<Parentheses>;
def note_precedence_conditional_first : Note<
"place parentheses around the '?:' expression to evaluate it first">;
-def note_precedence_conditional_silence : Note<
- "place parentheses around the '%0' expression to silence this warning">;
def warn_logical_instead_of_bitwise : Warning<
"use of logical '%0' with constant operand">,
@@ -3790,13 +3866,13 @@ def note_logical_instead_of_bitwise_remove_constant : Note<
def warn_bitwise_and_in_bitwise_or : Warning<
"'&' within '|'">, InGroup<BitwiseOpParentheses>;
-def note_bitwise_and_in_bitwise_or_silence : Note<
- "place parentheses around the '&' expression to silence this warning">;
def warn_logical_and_in_logical_or : Warning<
"'&&' within '||'">, InGroup<LogicalOpParentheses>;
-def note_logical_and_in_logical_or_silence : Note<
- "place parentheses around the '&&' expression to silence this warning">;
+
+def warn_addition_in_bitshift : Warning<
+ "operator '%0' has lower precedence than '%1'; "
+ "'%1' will be evaluated first">, InGroup<ShiftOpParentheses>;
def warn_self_assignment : Warning<
"explicitly assigning a variable of type %0 to itself">,
@@ -3899,6 +3975,8 @@ def ext_out_of_line_declaration : ExtWarn<
"out-of-line declaration of a member must be a definition">,
InGroup<OutOfLineDeclaration>, DefaultError;
def warn_member_extra_qualification : Warning<
+ "extra qualification on member %0">, InGroup<Microsoft>;
+def err_member_extra_qualification : Error<
"extra qualification on member %0">;
def err_member_qualification : Error<
"non-friend class member %0 cannot have a qualified name">;
@@ -4016,7 +4094,8 @@ def ext_typecheck_comparison_of_pointer_integer : ExtWarn<
def err_typecheck_comparison_of_pointer_integer : Error<
"comparison between pointer and integer (%0 and %1)">;
def ext_typecheck_comparison_of_distinct_pointers : ExtWarn<
- "comparison of distinct pointer types%diff{ ($ and $)|}0,1">;
+ "comparison of distinct pointer types%diff{ ($ and $)|}0,1">,
+ InGroup<CompareDistinctPointerType>;
def ext_typecheck_cond_incompatible_operands : ExtWarn<
"incompatible operand types (%0 and %1)">;
def err_cond_voidptr_arc : Error <
@@ -4026,7 +4105,7 @@ def err_typecheck_comparison_of_distinct_pointers : Error<
"comparison of distinct pointer types%diff{ ($ and $)|}0,1">;
def ext_typecheck_comparison_of_distinct_pointers_nonstandard : ExtWarn<
"comparison of distinct pointer types (%0 and %1) uses non-standard "
- "composite pointer type %2">;
+ "composite pointer type %2">, InGroup<CompareDistinctPointerType>;
def err_typecheck_assign_const : Error<"read-only variable is not assignable">;
def err_stmtexpr_file_scope : Error<
"statement expression not allowed at file scope">;
@@ -4036,6 +4115,9 @@ def warn_mixed_sign_comparison : Warning<
def warn_lunsigned_always_true_comparison : Warning<
"comparison of unsigned%select{| enum}2 expression %0 is always %1">,
InGroup<TautologicalCompare>;
+def warn_out_of_range_compare : Warning<
+ "comparison of constant %0 with expression of type %1 is always "
+ "%select{false|true}2">, InGroup<TautologicalOutOfRangeCompare>;
def warn_runsigned_always_true_comparison : Warning<
"comparison of %0 unsigned%select{| enum}2 expression is always %1">,
InGroup<TautologicalCompare>;
@@ -4169,7 +4251,7 @@ def err_nogetter_property_incdec : Error<
def error_no_subobject_property_setting : Error<
"expression is not assignable">;
def err_qualified_objc_access : Error<
- "%select{property|ivar}0 access cannot be qualified with '%1'">;
+ "%select{property|instance variable}0 access cannot be qualified with '%1'">;
def ext_freestanding_complex : Extension<
"complex numbers are an extension in a freestanding C99 implementation">;
@@ -4565,7 +4647,7 @@ def err_invalid_declarator_global_scope : Error<
def err_invalid_declarator_in_function : Error<
"definition or redeclaration of %0 not allowed inside a function">;
def err_not_tag_in_scope : Error<
- "no %select{struct|union|class|enum}0 named %1 in %2">;
+ "no %select{struct|interface|union|class|enum}0 named %1 in %2">;
def err_no_typeid_with_fno_rtti : Error<
"cannot use typeid with -fno-rtti">;
@@ -4888,6 +4970,8 @@ def note_callee_decl : Note<
"%0 declared here">;
def note_defined_here : Note<"%0 defined here">;
+def err_builtin_fn_use : Error<"builtin functions must be directly called">;
+
def warn_call_wrong_number_of_arguments : Warning<
"too %select{few|many}0 arguments in call to %1">;
def err_atomic_builtin_must_be_pointer : Error<
@@ -4901,6 +4985,9 @@ def err_atomic_builtin_pointer_size : Error<
def err_atomic_op_needs_atomic : Error<
"first argument to atomic operation must be a pointer to _Atomic "
"type (%0 invalid)">;
+def err_atomic_op_needs_non_const_atomic : Error<
+ "first argument to atomic operation must be a pointer to non-const _Atomic "
+ "type (%0 invalid)">;
def err_atomic_op_needs_trivial_copy : Error<
"first argument to atomic operation must be a pointer to a trivially-copyable"
" type (%0 invalid)">;
@@ -4963,7 +5050,9 @@ def err_typecheck_cast_to_incomplete : Error<
"cast to incomplete type %0">;
def ext_typecheck_cast_nonscalar : Extension<
"C99 forbids casting nonscalar type %0 to the same type">;
-def ext_typecheck_cast_to_union : Extension<"C99 forbids casts to union type">;
+def ext_typecheck_cast_to_union : Extension<
+ "cast to union type is a GNU extension">,
+ InGroup<GNU>;
def err_typecheck_cast_to_union_no_type : Error<
"cast to union type from type %0 not present in union">;
def err_cast_pointer_from_non_pointer_int : Error<
@@ -4971,6 +5060,9 @@ def err_cast_pointer_from_non_pointer_int : Error<
def warn_cast_pointer_from_sel : Warning<
"cast of type %0 to %1 is deprecated; use sel_getName instead">,
InGroup<SelTypeCast>;
+def warn_bad_function_cast : Warning<
+ "cast from function call of type %0 to non-matching type %1">,
+ InGroup<BadFunctionCast>, DefaultIgnore;
def err_cast_pointer_to_non_pointer_int : Error<
"pointer cannot be cast to type %0">;
def err_typecheck_expect_scalar_operand : Error<
@@ -5048,19 +5140,21 @@ let CategoryName = "Inline Assembly Issue" in {
"unsupported inline asm: input with type "
"%diff{$ matching output with type $|}0,1">;
def err_asm_unknown_register_name : Error<"unknown register name '%0' in asm">;
- def warn_asm_label_on_auto_decl : Warning<
- "ignored asm label '%0' on automatic variable">;
+ def err_asm_empty : Error<"__asm used with no assembly instructions">;
def err_invalid_asm_cast_lvalue : Error<
"invalid use of a cast in a inline asm context requiring an l-value: "
"remove the cast or build with -fheinous-gnu-extensions">;
+ def err_inline_ms_asm_parsing : Error<"%0">;
+ def warn_asm_label_on_auto_decl : Warning<
+ "ignored asm label '%0' on automatic variable">;
def warn_invalid_asm_cast_lvalue : Warning<
- "invalid use of a cast in a inline asm context requiring an l-value: "
+ "invalid use of a cast in an inline asm context requiring an l-value: "
"accepted due to -fheinous-gnu-extensions, but clang may remove support "
"for this in the future">;
-
- def warn_unsupported_msasm : ExtWarn<
- "MS-style inline assembly is not supported">, InGroup<Microsoft>;
+ def warn_asm_mismatched_size_modifier : Warning<
+ "the size being stored is truncated, use a modifier to specify the size">,
+ InGroup<ASMOperandWidths>;
}
let CategoryName = "Semantic Issue" in {
@@ -5125,7 +5219,7 @@ def err_in_class_initializer_references_def_ctor : Error<
def ext_in_class_initializer_non_constant : Extension<
"in-class initializer for static data member is not a constant expression; "
- "folding it to a constant is a GNU extension">;
+ "folding it to a constant is a GNU extension">, InGroup<GNU>;
// C++ anonymous unions and GNU anonymous structs/unions
def ext_anonymous_union : Extension<
@@ -5181,6 +5275,8 @@ def err_static_data_member_not_allowed_in_local_class : Error<
def err_base_clause_on_union : Error<"unions cannot have base classes">;
def err_base_must_be_class : Error<"base specifier must name a class">;
def err_union_as_base_class : Error<"unions cannot be base classes">;
+def err_circular_inheritance : Error<
+ "circular inheritance between %0 and %1">;
def err_incomplete_base_class : Error<"base class has incomplete type">;
def err_duplicate_base_class : Error<
"base class %0 specified more than once as a direct base class">;
@@ -5343,6 +5439,10 @@ def err_out_of_line_default_deletes : Error<
"defaulting this %select{default constructor|copy constructor|move "
"constructor|copy assignment operator|move assignment operator|destructor}0 "
"would delete it after its first declaration">;
+def ext_implicit_exception_spec_mismatch : ExtWarn<
+ "function previously declared with an %select{explicit|implicit}0 exception "
+ "specification redeclared with an %select{implicit|explicit}0 exception "
+ "specification">, InGroup<DiagGroup<"implicit-exception-spec-mismatch">>;
def warn_ptr_arith_precedes_bounds : Warning<
"the pointer decremented by %0 refers before the beginning of the array">,
@@ -5376,6 +5476,10 @@ def warn_scanf_nonzero_width : Warning<
def warn_printf_conversion_argument_type_mismatch : Warning<
"format specifies type %0 but the argument has type %1">,
InGroup<Format>;
+def warn_format_argument_needs_cast : Warning<
+ "values of type '%0' should not be used as format arguments; add an explicit "
+ "cast to %1 instead">,
+ InGroup<Format>;
def warn_printf_positional_arg_exceeds_data_args : Warning <
"data argument position '%0' exceeds the number of data arguments (%1)">,
InGroup<Format>;
@@ -5400,7 +5504,8 @@ def warn_format_string_is_wide_literal : Warning<
def warn_printf_format_string_contains_null_char : Warning<
"format string contains '\\0' within the string body">, InGroup<Format>;
def warn_printf_asterisk_missing_arg : Warning<
- "'%select{*|.*}0' specified field %select{width|precision}0 is missing a matching 'int' argument">;
+ "'%select{*|.*}0' specified field %select{width|precision}0 is missing a matching 'int' argument">,
+ InGroup<Format>;
def warn_printf_asterisk_wrong_type : Warning<
"field %select{width|precision}0 should have type %1, but argument has type %2">,
InGroup<Format>;
@@ -5428,6 +5533,7 @@ def warn_scanf_scanlist_incomplete : Warning<
"no closing ']' for '%%[' in scanf format string">,
InGroup<Format>;
def note_format_string_defined : Note<"format string is defined here">;
+def note_format_fix_specifier : Note<"did you mean to use '%0'?">;
def note_printf_c_str: Note<"did you mean to call the %0 method?">;
def warn_null_arg : Warning<
@@ -5591,7 +5697,7 @@ def warn_unannotated_fallthrough_per_function : Warning<
"unannotated fall-through between switch labels in partly-annotated "
"function">, InGroup<ImplicitFallthroughPerFunction>, DefaultIgnore;
def note_insert_fallthrough_fixit : Note<
- "insert '[[clang::fallthrough]];' to silence this warning">;
+ "insert '%0;' to silence this warning">;
def note_insert_break_fixit : Note<
"insert 'break;' to avoid fall-through">;
def err_fallthrough_attr_wrong_target : Error<
@@ -5827,7 +5933,7 @@ def err_typecheck_member_reference_ivar_suggest : Error<
def err_property_not_found_suggest : Error<
"property %0 not found on object of type %1; did you mean %2?">;
def err_ivar_access_using_property_syntax_suggest : Error<
- "property %0 not found on object of type %1; did you mean to access ivar %2?">;
+ "property %0 not found on object of type %1; did you mean to access instance variable %2?">;
def err_property_found_suggest : Error<
"property %0 found on object of type %1; did you mean to access "
"it with the \".\" operator?">;
@@ -5914,7 +6020,7 @@ def err_module_private_local : Error<
"%select{local variable|parameter|typedef}0 %1 cannot be declared "
"__module_private__">;
def err_module_private_local_class : Error<
- "local %select{struct|union|class|enum}0 cannot be declared "
+ "local %select{struct|interface|union|class|enum}0 cannot be declared "
"__module_private__">;
def err_module_private_definition : Error<
"definition of %0 must be imported before it is required">;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td
index a440e80..e9df09d 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td
@@ -25,9 +25,13 @@ def err_fe_pch_file_modified : Error<
def err_fe_pch_file_overridden : Error<
"file '%0' from the precompiled header has been overridden">;
-def warn_pch_target_triple : Error<
- "PCH file was compiled for the target '%0' but the current translation "
- "unit is being compiled for target '%1'">;
+def err_pch_targetopt_mismatch : Error<
+ "PCH file was compiled for the %0 '%1' but the current translation "
+ "unit is being compiled for target '%2'">;
+def err_pch_targetopt_feature_mismatch : Error<
+ "%select{AST file|current translation unit}0 was compiled with the target "
+ "feature'%1' but the %select{current translation unit is|AST file was}0 "
+ "not">;
def err_pch_langopt_mismatch : Error<"%0 was %select{disabled|enabled}1 in "
"PCH file but is currently %select{disabled|enabled}2">;
def err_pch_langopt_value_mismatch : Error<
@@ -41,21 +45,24 @@ def warn_pch_different_branch : Error<
"PCH file built from a different branch (%0) than the compiler (%1)">;
def err_pch_with_compiler_errors : Error<
"PCH file contains compiler errors">;
-def warn_cmdline_conflicting_macro_def : Error<
- "definition of the macro '%0' conflicts with the definition used to "
- "build the precompiled header">;
-def note_pch_macro_defined_as : Note<
- "definition of macro '%0' in the precompiled header">;
-def warn_cmdline_missing_macro_defs : Warning<
- "macro definitions used to build the precompiled header are missing">;
-def note_using_macro_def_from_pch : Note<
- "using this macro definition from precompiled header">;
-def warn_macro_name_used_in_pch : Error<
- "definition of macro %0 conflicts with an identifier used in the "
- "precompiled header">;
-def warn_pch_compiler_options_mismatch : Error<
- "compiler options used when building the precompiled header differ from "
- "the options used when using the precompiled header">;
+
+
+def err_pch_macro_def_undef : Error<
+ "macro '%0' was %select{defined|undef'd}1 in the precompiled header but "
+ "%select{undef'd|defined}1 on the command line">;
+def err_pch_macro_def_conflict : Error<
+ "definition of macro '%0' differs between the precompiled header ('%1') "
+ "and the command line ('%2')">;
+def err_pch_include_opt_missing : Error<
+ "precompiled header depends on '%select{-include|-imacros}0 %1' option "
+ "that is missing from the command line">;
+def err_pch_include_opt_conflict : Error<
+ "precompiled header option '%select{-include|-imacros}0 %1' conflicts with "
+ "corresponding option '%select{-include|-imacros}0 %2' on command line">;
+def err_pch_undef : Error<
+ "%select{command line contains|precompiled header was built with}0 "
+ "'-undef' but %select{precompiled header was not built with it|"
+ "it is not present on the command line}0">;
def err_not_a_pch_file : Error<
"'%0' does not appear to be a precompiled header file">, DefaultFatal;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h b/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
index b00f2b7..b2f578d 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
@@ -103,6 +103,10 @@ public:
bool operator<(const FileEntry &RHS) const {
return Device < RHS.Device || (Device == RHS.Device && Inode < RHS.Inode);
}
+
+ /// \brief Check whether the file is a named pipe (and thus can't be opened by
+ /// the native FileManager methods).
+ bool isNamedPipe() const;
};
/// \brief Implements support for file system lookup, file system caching,
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h b/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
index dc6acda..76242ec 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
@@ -21,7 +21,6 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cassert>
#include <string>
@@ -54,6 +53,7 @@ class IdentifierInfo {
// are for builtins.
unsigned ObjCOrBuiltinID :11;
bool HasMacro : 1; // True if there is a #define for this.
+ bool HadMacro : 1; // True if there was a #define for this.
bool IsExtension : 1; // True if identifier is a lang extension.
bool IsCXX11CompatKeyword : 1; // True if identifier is a keyword in C++11.
bool IsPoisoned : 1; // True if identifier is poisoned.
@@ -70,13 +70,13 @@ class IdentifierInfo {
// stored externally.
bool IsModulesImport : 1; // True if this is the 'import' contextual
// keyword.
- // 1 bit left in 32-bit word.
-
+ // 32-bit word is filled.
+
void *FETokenInfo; // Managed by the language front-end.
llvm::StringMapEntry<IdentifierInfo*> *Entry;
- IdentifierInfo(const IdentifierInfo&); // NONCOPYABLE.
- void operator=(const IdentifierInfo&); // NONASSIGNABLE.
+ IdentifierInfo(const IdentifierInfo&) LLVM_DELETED_FUNCTION;
+ void operator=(const IdentifierInfo&) LLVM_DELETED_FUNCTION;
friend class IdentifierTable;
@@ -133,10 +133,21 @@ public:
if (HasMacro == Val) return;
HasMacro = Val;
- if (Val)
+ if (Val) {
NeedsHandleIdentifier = 1;
- else
+ HadMacro = true;
+ } else {
RecomputeNeedsHandleIdentifier();
+ }
+ }
+ /// \brief Returns true if this identifier was \#defined to some value at any
+ /// moment. In this case there should be an entry for the identifier in the
+ /// macro history table in Preprocessor.
+ bool hadMacroDefinition() const {
+ return HadMacro;
+ }
+ void setHadMacroDefinition(bool Val) {
+ HadMacro = Val;
}
/// getTokenID - If this is a source-language token (e.g. 'for'), this API
@@ -346,8 +357,8 @@ public:
/// actual functionality.
class IdentifierIterator {
private:
- IdentifierIterator(const IdentifierIterator&); // Do not implement
- IdentifierIterator &operator=(const IdentifierIterator&); // Do not implement
+ IdentifierIterator(const IdentifierIterator &) LLVM_DELETED_FUNCTION;
+ void operator=(const IdentifierIterator &) LLVM_DELETED_FUNCTION;
protected:
IdentifierIterator() { }
@@ -695,8 +706,8 @@ public:
/// multi-keyword caching.
class SelectorTable {
void *Impl; // Actually a SelectorTableImpl
- SelectorTable(const SelectorTable&); // DISABLED: DO NOT IMPLEMENT
- void operator=(const SelectorTable&); // DISABLED: DO NOT IMPLEMENT
+ SelectorTable(const SelectorTable &) LLVM_DELETED_FUNCTION;
+ void operator=(const SelectorTable &) LLVM_DELETED_FUNCTION;
public:
SelectorTable();
~SelectorTable();
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def
index 76de1e8..571b9d2 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def
@@ -48,15 +48,19 @@ LANGOPT(MicrosoftMode , 1, 0, "Microsoft compatibility mode")
LANGOPT(Borland , 1, 0, "Borland extensions")
LANGOPT(CPlusPlus , 1, 0, "C++")
LANGOPT(CPlusPlus0x , 1, 0, "C++0x")
+LANGOPT(CPlusPlus1y , 1, 0, "C++1y")
LANGOPT(ObjC1 , 1, 0, "Objective-C 1")
LANGOPT(ObjC2 , 1, 0, "Objective-C 2")
BENIGN_LANGOPT(ObjCDefaultSynthProperties , 1, 0,
"Objective-C auto-synthesized properties")
+BENIGN_LANGOPT(EncodeExtendedBlockSig , 1, 0,
+ "Encoding extended block type signature")
BENIGN_LANGOPT(ObjCInferRelatedResultType , 1, 1,
"Objective-C related result type inference")
LANGOPT(Trigraphs , 1, 0,"trigraphs")
-LANGOPT(BCPLComment , 1, 0, "BCPL-style '//' comments")
+LANGOPT(LineComment , 1, 0, "'//' comments")
LANGOPT(Bool , 1, 0, "bool, true, and false keywords")
+LANGOPT(WChar , 1, CPlusPlus, "wchar_t keyword")
BENIGN_LANGOPT(DollarIdents , 1, 1, "'$' in identifiers")
BENIGN_LANGOPT(AsmPreprocessor, 1, 0, "preprocessor in asm mode")
BENIGN_LANGOPT(GNUMode , 1, 1, "GNU extensions")
@@ -116,7 +120,6 @@ LANGOPT(CUDA , 1, 0, "CUDA")
LANGOPT(AssumeSaneOperatorNew , 1, 1, "implicit __attribute__((malloc)) for C++'s new operators")
BENIGN_LANGOPT(ElideConstructors , 1, 1, "C++ copy constructor elision")
-BENIGN_LANGOPT(CatchUndefined , 1, 0, "catching undefined behavior at run time")
BENIGN_LANGOPT(DumpRecordLayouts , 1, 0, "dumping the layout of IRgen'd records")
BENIGN_LANGOPT(DumpRecordLayoutsSimple , 1, 0, "dumping the layout of IRgen'd records in a simple form")
BENIGN_LANGOPT(DumpVTableLayouts , 1, 0, "dumping the layouts of emitted vtables")
@@ -126,8 +129,6 @@ BENIGN_LANGOPT(ParseUnknownAnytype, 1, 0, "__unknown_anytype")
BENIGN_LANGOPT(DebuggerSupport , 1, 0, "debugger support")
BENIGN_LANGOPT(DebuggerCastResultToId, 1, 0, "for 'po' in the debugger, cast the result to id if it is of unknown type")
BENIGN_LANGOPT(DebuggerObjCLiteral , 1, 0, "debugger Objective-C literals and subscripting support")
-BENIGN_LANGOPT(AddressSanitizer , 1, 0, "AddressSanitizer enabled")
-BENIGN_LANGOPT(ThreadSanitizer , 1, 0, "ThreadSanitizer enabled")
BENIGN_LANGOPT(SpellChecking , 1, 1, "spell-checking")
LANGOPT(SinglePrecisionConstants , 1, 0, "treating double-precision floating point constants as single precision constants")
@@ -136,7 +137,7 @@ LANGOPT(DefaultFPContract , 1, 0, "FP_CONTRACT")
LANGOPT(NoBitFieldTypeAlign , 1, 0, "bit-field type alignment")
LANGOPT(HexagonQdsp6Compat , 1, 0, "hexagon-qdsp6 backward compatibility")
LANGOPT(ObjCAutoRefCount , 1, 0, "Objective-C automated reference counting")
-LANGOPT(ObjCRuntimeHasWeak , 1, 0, "__weak support in the ARC runtime")
+LANGOPT(ObjCARCWeak , 1, 0, "__weak support in the ARC runtime")
LANGOPT(FakeAddressSpaceMap , 1, 0, "OpenCL fake address space map")
LANGOPT(MRTD , 1, 0, "-mrtd calling convention")
@@ -163,6 +164,17 @@ VALUE_LANGOPT(MSCVersion, 32, 0,
LANGOPT(ApplePragmaPack, 1, 0, "Apple gcc-compatible #pragma pack handling")
+BENIGN_LANGOPT(EmitMicrosoftInlineAsm , 1, 0,
+ "Enable emission of MS-style inline assembly.")
+
+
+BENIGN_LANGOPT(RetainCommentsFromSystemHeaders, 1, 0, "retain documentation comments from system headers in the AST")
+
+/// Runtime sanitizers.
+#define SANITIZER(NAME, ID) \
+BENIGN_LANGOPT(Sanitize##ID, 1, 0, NAME " sanitizer")
+#include "clang/Basic/Sanitizers.def"
+
#undef LANGOPT
#undef VALUE_LANGOPT
#undef BENIGN_LANGOPT
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Module.h b/contrib/llvm/tools/clang/include/clang/Basic/Module.h
index c8027f4..b6b088c 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Module.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Module.h
@@ -21,6 +21,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SetVector.h"
#include <string>
#include <utility>
#include <vector>
@@ -63,11 +64,21 @@ private:
/// \brief A mapping from the submodule name to the index into the
/// \c SubModules vector at which that submodule resides.
llvm::StringMap<unsigned> SubModuleIndex;
+
+ /// \brief The AST file if this is a top-level module which has a
+ /// corresponding serialized AST file, or null otherwise.
+ const FileEntry *ASTFile;
public:
/// \brief The headers that are part of this module.
llvm::SmallVector<const FileEntry *, 2> Headers;
+ /// \brief The headers that are explicitly excluded from this module.
+ llvm::SmallVector<const FileEntry *, 2> ExcludedHeaders;
+
+ /// \brief The top-level headers associated with this module.
+ llvm::SmallSetVector<const FileEntry *, 2> TopHeaders;
+
/// \brief The set of language features required to use this module.
///
/// If any of these features is not present, the \c IsAvailable bit
@@ -158,7 +169,7 @@ public:
/// \brief Construct a top-level module.
explicit Module(StringRef Name, SourceLocation DefinitionLoc,
bool IsFramework)
- : Name(Name), DefinitionLoc(DefinitionLoc), Parent(0), Umbrella(),
+ : Name(Name), DefinitionLoc(DefinitionLoc), Parent(0),Umbrella(),ASTFile(0),
IsAvailable(true), IsFromModuleFile(false), IsFramework(IsFramework),
IsExplicit(false), IsSystem(false),
InferSubmodules(false), InferExplicitSubmodules(false),
@@ -227,7 +238,18 @@ public:
StringRef getTopLevelModuleName() const {
return getTopLevelModule()->Name;
}
-
+
+ /// \brief The serialized AST file for this module, if one was created.
+ const FileEntry *getASTFile() const {
+ return getTopLevelModule()->ASTFile;
+ }
+
+ /// \brief Set the serialized AST file for the top-level module of this module.
+ void setASTFile(const FileEntry *File) {
+ assert((getASTFile() == 0 || getASTFile() == File) && "file path changed");
+ getTopLevelModule()->ASTFile = File;
+ }
+
/// \brief Retrieve the directory for which this module serves as the
/// umbrella.
const DirectoryEntry *getUmbrellaDir() const;
@@ -271,6 +293,10 @@ public:
submodule_iterator submodule_end() { return SubModules.end(); }
submodule_const_iterator submodule_end() const { return SubModules.end(); }
+ static StringRef getModuleInputBufferName() {
+ return "<module-includes>";
+ }
+
/// \brief Print the module map for this module to the given stream.
///
void print(llvm::raw_ostream &OS, unsigned Indent = 0) const;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/ObjCRuntime.h b/contrib/llvm/tools/clang/include/clang/Basic/ObjCRuntime.h
index b24fe7c..d543b76 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/ObjCRuntime.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/ObjCRuntime.h
@@ -126,12 +126,25 @@ public:
return !isGNUFamily();
}
+ /// \brief Does this runtime allow ARC at all?
+ bool allowsARC() const {
+ switch (getKind()) {
+ case FragileMacOSX: return false;
+ case MacOSX: return true;
+ case iOS: return true;
+ case GCC: return false;
+ case GNUstep: return true;
+ case ObjFW: return true;
+ }
+ llvm_unreachable("bad kind");
+ }
+
/// \brief Does this runtime natively provide the ARC entrypoints?
///
/// ARC cannot be directly supported on a platform that does not provide
/// these entrypoints, although it may be supportable via a stub
/// library.
- bool hasARC() const {
+ bool hasNativeARC() const {
switch (getKind()) {
case FragileMacOSX: return false;
case MacOSX: return getVersion() >= VersionTuple(10, 7);
@@ -139,16 +152,35 @@ public:
case GCC: return false;
case GNUstep: return getVersion() >= VersionTuple(1, 6);
- case ObjFW: return false; // XXX: this will change soon
+ case ObjFW: return true;
}
llvm_unreachable("bad kind");
}
+ /// \brief Does this runtime supports optimized setter entrypoints?
+ bool hasOptimizedSetter() const {
+ switch (getKind()) {
+ case MacOSX:
+ return getVersion() >= VersionTuple(10, 8);
+ case iOS:
+ return (getVersion() >= VersionTuple(6));
+
+ default:
+ return false;
+ }
+ }
+
+ /// Does this runtime allow the use of __weak?
+ bool allowsWeak() const {
+ return hasNativeWeak();
+ }
+
/// \brief Does this runtime natively provide ARC-compliant 'weak'
/// entrypoints?
- bool hasWeak() const {
- // Right now, this is always equivalent to the ARC decision.
- return hasARC();
+ bool hasNativeWeak() const {
+ // Right now, this is always equivalent to whether the runtime
+ // natively supports ARC decision.
+ return hasNativeARC();
}
/// \brief Does this runtime directly support the subscripting methods?
@@ -158,7 +190,7 @@ public:
switch (getKind()) {
case FragileMacOSX: return false;
case MacOSX: return getVersion() >= VersionTuple(10, 8);
- case iOS: return false;
+ case iOS: return getVersion() >= VersionTuple(6);
// This is really a lie, because some implementations and versions
// of the runtime do not support ARC. Probably -fgnu-runtime
@@ -226,6 +258,7 @@ public:
}
llvm_unreachable("bad kind");
}
+
/// \brief Does this runtime use zero-cost exceptions?
bool hasUnwindExceptions() const {
switch (getKind()) {
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h b/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h
index 79273fc..cc9ca9f 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h
@@ -65,8 +65,7 @@ inline void Emit64(raw_ostream& Out, uint64_t V) {
inline void Pad(raw_ostream& Out, unsigned A) {
Offset off = (Offset) Out.tell();
- uint32_t n = ((uintptr_t)(off+A-1) & ~(uintptr_t)(A-1)) - off;
- for (; n ; --n)
+ for (uint32_t n = llvm::OffsetToAlignment(off, A); n; --n)
Emit8(Out, 0);
}
@@ -102,7 +101,7 @@ inline uint64_t ReadUnalignedLE64(const unsigned char *&Data) {
inline uint32_t ReadLE32(const unsigned char *&Data) {
// Hosts that directly support little-endian 32-bit loads can just
// use them. Big-endian hosts need a bswap.
- uint32_t V = *((uint32_t*)Data);
+ uint32_t V = *((const uint32_t*)Data);
if (llvm::sys::isBigEndianHost())
V = llvm::ByteSwap_32(V);
Data += 4;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Sanitizers.def b/contrib/llvm/tools/clang/include/clang/Basic/Sanitizers.def
new file mode 100644
index 0000000..085ca16
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Sanitizers.def
@@ -0,0 +1,69 @@
+//===--- Sanitizers.def - Runtime sanitizer options -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the options for specifying which runtime sanitizers to
+// enable. Users of this file must define the SANITIZER macro to make use of
+// this information. Users of this file can also define the SANITIZER_GROUP
+// macro to get information on options which refer to sets of sanitizers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER
+#error "Define SANITIZER prior to including this file!"
+#endif
+
+// SANITIZER(NAME, ID)
+
+// The first value is the name of the sanitizer as a string. The sanitizer can
+// be enabled by specifying -fsanitize=NAME.
+
+// The second value is an identifier which can be used to refer to the
+// sanitizer.
+
+
+// SANITIZER_GROUP(NAME, ID, ALIAS)
+
+// The first two values have the same semantics as the corresponding SANITIZER
+// values. The third value is an expression ORing together the IDs of individual
+// sanitizers in this group.
+
+#ifndef SANITIZER_GROUP
+#define SANITIZER_GROUP(NAME, ID, ALIAS)
+#endif
+
+
+// AddressSanitizer
+SANITIZER("address", Address)
+
+// ThreadSanitizer
+SANITIZER("thread", Thread)
+
+// UndefinedBehaviorSanitizer
+SANITIZER("signed-integer-overflow", SignedIntegerOverflow)
+SANITIZER("divide-by-zero", DivideByZero)
+SANITIZER("shift", Shift)
+SANITIZER("unreachable", Unreachable)
+SANITIZER("return", Return)
+SANITIZER("vla-bound", VLABound)
+SANITIZER("alignment", Alignment)
+SANITIZER("null", Null)
+SANITIZER("vptr", Vptr)
+SANITIZER("object-size", ObjectSize)
+SANITIZER("float-cast-overflow", FloatCastOverflow)
+
+// -fsanitize=undefined (and its alias -fcatch-undefined-behavior). This should
+// include all the sanitizers which have low overhead, no ABI or address space
+// layout implications, and only catch undefined behavior.
+SANITIZER_GROUP("undefined", Undefined,
+ SignedIntegerOverflow | DivideByZero | Shift | Unreachable |
+ Return | VLABound | Alignment | Null | Vptr | ObjectSize |
+ FloatCastOverflow)
+
+#undef SANITIZER
+#undef SANITIZER_GROUP
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h b/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
index d6bba38..cfcf468 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
@@ -21,6 +21,7 @@
#include <utility>
#include <functional>
#include <cassert>
+#include <string>
namespace llvm {
class MemoryBuffer;
@@ -171,6 +172,7 @@ public:
}
void print(raw_ostream &OS, const SourceManager &SM) const;
+ LLVM_ATTRIBUTE_USED std::string printToString(const SourceManager &SM) const;
void dump(const SourceManager &SM) const;
};
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h b/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h
index 32268d7..db6bfd2 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h
@@ -62,7 +62,6 @@ class LangOptions;
class ASTWriter;
class ASTReader;
-/// \namespace
/// \brief Public enums and private classes that are part of the
/// SourceManager implementation.
///
@@ -221,7 +220,7 @@ namespace SrcMgr {
private:
// Disable assignments.
- ContentCache &operator=(const ContentCache& RHS);
+ ContentCache &operator=(const ContentCache& RHS) LLVM_DELETED_FUNCTION;
};
/// \brief Information about a FileID, basically just the logical file
@@ -647,8 +646,8 @@ class SourceManager : public RefCountedBase<SourceManager> {
mutable llvm::DenseMap<FileID, MacroArgsMap *> MacroArgsCacheMap;
// SourceManager doesn't support copy construction.
- explicit SourceManager(const SourceManager&);
- void operator=(const SourceManager&);
+ explicit SourceManager(const SourceManager&) LLVM_DELETED_FUNCTION;
+ void operator=(const SourceManager&) LLVM_DELETED_FUNCTION;
public:
SourceManager(DiagnosticsEngine &Diag, FileManager &FileMgr,
bool UserFilesAreVolatile = false);
@@ -675,9 +674,10 @@ public:
///
/// One example of when this would be used is when the main source is read
/// from STDIN.
- FileID createMainFileIDForMemBuffer(const llvm::MemoryBuffer *Buffer) {
+ FileID createMainFileIDForMemBuffer(const llvm::MemoryBuffer *Buffer,
+ SrcMgr::CharacteristicKind Kind = SrcMgr::C_User) {
assert(MainFileID.isInvalid() && "MainFileID already set!");
- MainFileID = createFileIDForMemBuffer(Buffer);
+ MainFileID = createFileIDForMemBuffer(Buffer, Kind);
return MainFileID;
}
@@ -734,10 +734,11 @@ public:
/// This does no caching of the buffer and takes ownership of the
/// MemoryBuffer, so only pass a MemoryBuffer to this once.
FileID createFileIDForMemBuffer(const llvm::MemoryBuffer *Buffer,
+ SrcMgr::CharacteristicKind FileCharacter = SrcMgr::C_User,
int LoadedID = 0, unsigned LoadedOffset = 0,
SourceLocation IncludeLoc = SourceLocation()) {
return createFileID(createMemBufferContentCache(Buffer), IncludeLoc,
- SrcMgr::C_User, LoadedID, LoadedOffset);
+ FileCharacter, LoadedID, LoadedOffset);
}
/// \brief Return a new SourceLocation that encodes the
@@ -1557,7 +1558,11 @@ private:
getDecomposedSpellingLocSlowCase(const SrcMgr::SLocEntry *E,
unsigned Offset) const;
void computeMacroArgsCache(MacroArgsMap *&MacroArgsCache, FileID FID) const;
-
+ void associateFileChunkWithMacroArgExp(MacroArgsMap &MacroArgsCache,
+ FileID FID,
+ SourceLocation SpellLoc,
+ SourceLocation ExpansionLoc,
+ unsigned ExpansionLength) const;
friend class ASTReader;
friend class ASTWriter;
};
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h b/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h
index 96cada1..c82b8cb 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h
@@ -53,6 +53,7 @@ namespace clang {
TST_union,
TST_struct,
TST_class, // C++ class type
+ TST_interface, // C++ (Microsoft-specific) __interface type
TST_typename, // Typedef, C++ class-name or enum name, etc.
TST_typeofType,
TST_typeofExpr,
@@ -174,6 +175,20 @@ namespace clang {
ICIS_CopyInit, ///< Copy initialization.
ICIS_ListInit ///< Direct list-initialization.
};
+
+ /// \brief CallingConv - Specifies the calling convention that a function uses.
+ enum CallingConv {
+ CC_Default,
+ CC_C, // __attribute__((cdecl))
+ CC_X86StdCall, // __attribute__((stdcall))
+ CC_X86FastCall, // __attribute__((fastcall))
+ CC_X86ThisCall, // __attribute__((thiscall))
+ CC_X86Pascal, // __attribute__((pascal))
+ CC_AAPCS, // __attribute__((pcs("aapcs")))
+ CC_AAPCS_VFP, // __attribute__((pcs("aapcs-vfp")))
+ CC_PnaclCall // __attribute__((pnaclcall))
+ };
+
} // end namespace clang
#endif // LLVM_CLANG_BASIC_SPECIFIERS_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td b/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
index 47738af..8f6a1c9 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
@@ -28,11 +28,10 @@ def SwitchCase : Stmt<1>;
def CaseStmt : DStmt<SwitchCase>;
def DefaultStmt : DStmt<SwitchCase>;
-// GNU Extensions
-def AsmStmt : Stmt;
-
-// MS Extensions
-def MSAsmStmt : Stmt;
+// Asm statements
+def AsmStmt : Stmt<1>;
+def GCCAsmStmt : DStmt<AsmStmt>;
+def MSAsmStmt : DStmt<AsmStmt>;
// Obj-C statements
def ObjCAtTryStmt : Stmt;
@@ -132,6 +131,7 @@ def PackExpansionExpr : DStmt<Expr>;
def SizeOfPackExpr : DStmt<Expr>;
def SubstNonTypeTemplateParmExpr : DStmt<Expr>;
def SubstNonTypeTemplateParmPackExpr : DStmt<Expr>;
+def FunctionParmPackExpr : DStmt<Expr>;
def MaterializeTemporaryExpr : DStmt<Expr>;
def LambdaExpr : DStmt<Expr>;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h b/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
index 54d49e6..2d26783 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
@@ -23,7 +23,9 @@
#include "llvm/ADT/Triple.h"
#include "llvm/Support/DataTypes.h"
#include "clang/Basic/AddressSpaces.h"
+#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/VersionTuple.h"
+#include "clang/Basic/Specifiers.h"
#include <cassert>
#include <vector>
#include <string>
@@ -38,7 +40,6 @@ class LangOptions;
class MacroBuilder;
class SourceLocation;
class SourceManager;
-class TargetOptions;
namespace Builtin { struct Info; }
@@ -61,6 +62,7 @@ enum TargetCXXABI {
/// \brief Exposes information about the current target.
///
class TargetInfo : public RefCountedBase<TargetInfo> {
+ llvm::IntrusiveRefCntPtr<TargetOptions> TargetOpts;
llvm::Triple Triple;
protected:
// Target values set by the ctor of the actual target implementation. Default
@@ -111,6 +113,16 @@ public:
virtual ~TargetInfo();
+ /// \brief Retrieve the target options.
+ TargetOptions &getTargetOpts() const {
+ assert(TargetOpts && "Missing target options");
+ return *TargetOpts;
+ }
+
+ void setTargetOpts(TargetOptions &TargetOpts) {
+ this->TargetOpts = &TargetOpts;
+ }
+
///===---- Target Data Type Query Methods -------------------------------===//
enum IntType {
NoInt = 0,
@@ -150,12 +162,18 @@ public:
/// __builtin_va_list as defined by the x86-64 ABI:
/// http://www.x86-64.org/documentation/abi.pdf
- X86_64ABIBuiltinVaList
+ X86_64ABIBuiltinVaList,
+
+ /// __builtin_va_list as defined by ARM AAPCS ABI
+ /// http://infocenter.arm.com
+ // /help/topic/com.arm.doc.ihi0042d/IHI0042D_aapcs.pdf
+ AAPCSABIBuiltinVaList
};
protected:
IntType SizeType, IntMaxType, UIntMaxType, PtrDiffType, IntPtrType, WCharType,
- WIntType, Char16Type, Char32Type, Int64Type, SigAtomicType;
+ WIntType, Char16Type, Char32Type, Int64Type, SigAtomicType,
+ ProcessIDType;
/// \brief Whether Objective-C's built-in boolean type should be signed char.
///
@@ -196,7 +214,7 @@ public:
IntType getChar32Type() const { return Char32Type; }
IntType getInt64Type() const { return Int64Type; }
IntType getSigAtomicType() const { return SigAtomicType; }
-
+ IntType getProcessIDType() const { return ProcessIDType; }
/// \brief Return the width (in bits) of the specified integer type enum.
///
@@ -500,6 +518,11 @@ public:
bool validateInputConstraint(ConstraintInfo *OutputConstraints,
unsigned NumOutputs,
ConstraintInfo &info) const;
+ virtual bool validateConstraintModifier(StringRef /*Constraint*/,
+ const char /*Modifier*/,
+ unsigned /*Size*/) const {
+ return true;
+ }
bool resolveSymbolicName(const char *&Name,
ConstraintInfo *OutputConstraints,
unsigned NumOutputs, unsigned &Index) const;
@@ -712,6 +735,34 @@ public:
bool isBigEndian() const { return BigEndian; }
+ /// \brief Gets the default calling convention for the given target and
+ /// declaration context.
+ virtual CallingConv getDefaultCallingConv() const {
+ // Not all targets will specify an explicit calling convention that we can
+ // express. This will always do the right thing, even though it's not
+ // an explicit calling convention.
+ return CC_Default;
+ }
+
+ enum CallingConvCheckResult {
+ CCCR_OK,
+ CCCR_Warning
+ };
+
+ /// \brief Determines whether a given calling convention is valid for the
+ /// target. A calling convention can either be accepted, produce a warning
+ /// and be substituted with the default calling convention, or (someday)
+ /// produce an error (such as using thiscall on a non-instance function).
+ virtual CallingConvCheckResult checkCallingConvention(CallingConv CC) const {
+ switch (CC) {
+ default:
+ return CCCR_Warning;
+ case CC_C:
+ case CC_Default:
+ return CCCR_OK;
+ }
+ }
+
protected:
virtual uint64_t getPointerWidthV(unsigned AddrSpace) const {
return PointerWidth;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h b/contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h
index 15ececd..d6deb02 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h
@@ -15,13 +15,14 @@
#ifndef LLVM_CLANG_FRONTEND_TARGETOPTIONS_H
#define LLVM_CLANG_FRONTEND_TARGETOPTIONS_H
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include <string>
#include <vector>
namespace clang {
/// \brief Options for controlling the target.
-class TargetOptions {
+class TargetOptions : public RefCountedBase<TargetOptions> {
public:
/// If given, the name of the target triple to compile for. If not given the
/// target will be selected to match the host.
@@ -40,6 +41,9 @@ public:
/// If given, the version string of the linker in use.
std::string LinkerVersion;
+ /// \brief The list of target specific features to enable or disable, as written on the command line.
+ std::vector<std::string> FeaturesAsWritten;
+
/// The list of target specific features to enable or disable -- this should
/// be a list of strings starting with by '+' or '-'.
std::vector<std::string> Features;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
index fc03191..25e8d5a 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
@@ -217,6 +217,7 @@ PUNCTUATOR(greatergreatergreater, ">>>")
// KEYALTIVEC - This is a keyword in AltiVec
// KEYBORLAND - This is a keyword if Borland extensions are enabled
// BOOLSUPPORT - This is a keyword if 'bool' is a built-in type
+// WCHARSUPPORT - This is a keyword if 'wchar_t' is a built-in type
//
KEYWORD(auto , KEYALL)
KEYWORD(break , KEYALL)
@@ -295,7 +296,7 @@ KEYWORD(typename , KEYCXX)
KEYWORD(typeid , KEYCXX)
KEYWORD(using , KEYCXX)
KEYWORD(virtual , KEYCXX)
-KEYWORD(wchar_t , KEYCXX)
+KEYWORD(wchar_t , WCHARSUPPORT)
// C++ 2.5p2: Alternative Representations.
CXX_KEYWORD_OPERATOR(and , ampamp)
@@ -364,6 +365,7 @@ KEYWORD(__is_convertible_to , KEYCXX)
KEYWORD(__is_empty , KEYCXX)
KEYWORD(__is_enum , KEYCXX)
KEYWORD(__is_final , KEYCXX)
+KEYWORD(__is_interface_class , KEYCXX)
// Tentative name - there's no implementation of std::is_literal_type yet.
KEYWORD(__is_literal , KEYCXX)
// Name for GCC 4.6 compatibility - people have already written libraries using
@@ -505,6 +507,7 @@ KEYWORD(__if_not_exists , KEYMS)
KEYWORD(__single_inheritance , KEYMS)
KEYWORD(__multiple_inheritance , KEYMS)
KEYWORD(__virtual_inheritance , KEYMS)
+KEYWORD(__interface , KEYMS)
ALIAS("__int8" , char , KEYMS)
ALIAS("__int16" , short , KEYMS)
ALIAS("__int32" , int , KEYMS)
@@ -518,7 +521,6 @@ ALIAS("_thiscall" , __thiscall , KEYMS)
ALIAS("_uuidof" , __uuidof , KEYMS | KEYBORLAND)
ALIAS("_inline" , inline , KEYMS)
ALIAS("_declspec" , __declspec , KEYMS)
-ALIAS("__interface" , struct , KEYMS)
// Borland Extensions which should be disabled in strict conformance mode.
ALIAS("_pascal" , __pascal , KEYBORLAND)
@@ -600,6 +602,41 @@ ANNOTATION(pragma_pack)
// handles them.
ANNOTATION(pragma_parser_crash)
+// Annotation for #pragma ms_struct...
+// The lexer produces these so that they only take effect when the parser
+// handles them.
+ANNOTATION(pragma_msstruct)
+
+// Annotation for #pragma align...
+// The lexer produces these so that they only take effect when the parser
+// handles them.
+ANNOTATION(pragma_align)
+
+// Annotation for #pragma weak id
+// The lexer produces these so that they only take effect when the parser
+// handles them.
+ANNOTATION(pragma_weak)
+
+// Annotation for #pragma weak id = id
+// The lexer produces these so that they only take effect when the parser
+// handles them.
+ANNOTATION(pragma_weakalias)
+
+// Annotation for #pragma redefine_extname...
+// The lexer produces these so that they only take effect when the parser
+// handles them.
+ANNOTATION(pragma_redefine_extname)
+
+// Annotation for #pragma STDC FP_CONTRACT...
+// The lexer produces these so that they only take effect when the parser
+// handles them.
+ANNOTATION(pragma_fp_contract)
+
+// Annotation for #pragma OPENCL EXTENSION...
+// The lexer produces these so that they only take effect when the parser
+// handles them.
+ANNOTATION(pragma_opencl_extension)
+
#undef ANNOTATION
#undef TESTING_KEYWORD
#undef OBJC2_AT_KEYWORD
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h
index 478add8..e850971 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h
@@ -63,6 +63,31 @@ const char *getTokenName(enum TokenKind Kind);
/// Preprocessor::getSpelling().
const char *getTokenSimpleSpelling(enum TokenKind Kind);
+/// \brief Return true if this is a raw identifier or an identifier kind.
+inline bool isAnyIdentifier(TokenKind K) {
+ return (K == tok::identifier) || (K == tok::raw_identifier);
+}
+
+/// \brief Return true if this is a "literal" kind, like a numeric
+/// constant, string, etc.
+inline bool isLiteral(TokenKind K) {
+ return (K == tok::numeric_constant) || (K == tok::char_constant) ||
+ (K == tok::wide_char_constant) || (K == tok::utf16_char_constant) ||
+ (K == tok::utf32_char_constant) || (K == tok::string_literal) ||
+ (K == tok::wide_string_literal) || (K == tok::utf8_string_literal) ||
+ (K == tok::utf16_string_literal) || (K == tok::utf32_string_literal) ||
+ (K == tok::angle_string_literal);
+}
+
+/// \brief Return true if this is any of tok::annot_* kinds.
+inline bool isAnnotation(TokenKind K) {
+#define ANNOTATION(NAME) \
+ if (K == tok::annot_##NAME) \
+ return true;
+#include "clang/Basic/TokenKinds.def"
+ return false;
+}
+
} // end namespace tok
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h b/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h
index 0a5a864..882b52d 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h
@@ -41,6 +41,7 @@ namespace clang {
UTT_IsFunction,
UTT_IsFundamental,
UTT_IsIntegral,
+ UTT_IsInterfaceClass,
UTT_IsLiteral,
UTT_IsLvalueReference,
UTT_IsMemberFunctionPointer,
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td b/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td
index 451d562..3373e01 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td
@@ -379,8 +379,8 @@ def VORR : Inst<"vorr", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_OR>;
def VEOR : Inst<"veor", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_XOR>;
def VBIC : Inst<"vbic", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ANDN>;
def VORN : Inst<"vorn", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ORN>;
-def VBSL : Inst<"vbsl", "dudd",
- "csilUcUsUiUlfPcPsQcQsQiQlQUcQUsQUiQUlQfQPcQPs", OP_SEL>;
+def VBSL : SInst<"vbsl", "dudd",
+ "csilUcUsUiUlfPcPsQcQsQiQlQUcQUsQUiQUlQfQPcQPs">;
////////////////////////////////////////////////////////////////////////////////
// E.3.30 Transposition operations
@@ -394,3 +394,7 @@ def VREINTERPRET
: Inst<"vreinterpret", "dd",
"csilUcUsUiUlhfPcPsQcQsQiQlQUcQUsQUiQUlQhQfQPcQPs", OP_REINT>;
+////////////////////////////////////////////////////////////////////////////////
+// Vector fused multiply-add operations
+
+def VFMA : SInst<"vfma", "dddd", "fQf">;
diff --git a/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h b/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h
index 7fa589f..912ef01 100644
--- a/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h
+++ b/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h
@@ -30,7 +30,7 @@ private:
bool OwnsVMContext;
protected:
- /// Create a new code generation action. If the optional \arg _VMContext
+ /// Create a new code generation action. If the optional \p _VMContext
/// parameter is supplied, the action uses it without taking ownership,
/// otherwise it creates a fresh LLVM context and takes ownership.
CodeGenAction(unsigned _Act, llvm::LLVMContext *_VMContext = 0);
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Action.h b/contrib/llvm/tools/clang/include/clang/Driver/Action.h
index 6e317a0..4057e48 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Action.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Action.h
@@ -90,8 +90,6 @@ public:
iterator end() { return Inputs.end(); }
const_iterator begin() const { return Inputs.begin(); }
const_iterator end() const { return Inputs.end(); }
-
- static bool classof(const Action *) { return true; }
};
class InputAction : public Action {
@@ -105,7 +103,6 @@ public:
static bool classof(const Action *A) {
return A->getKind() == InputClass;
}
- static bool classof(const InputAction *) { return true; }
};
class BindArchAction : public Action {
@@ -122,7 +119,6 @@ public:
static bool classof(const Action *A) {
return A->getKind() == BindArchClass;
}
- static bool classof(const BindArchAction *) { return true; }
};
class JobAction : public Action {
@@ -136,7 +132,6 @@ public:
return (A->getKind() >= JobClassFirst &&
A->getKind() <= JobClassLast);
}
- static bool classof(const JobAction *) { return true; }
};
class PreprocessJobAction : public JobAction {
@@ -147,7 +142,6 @@ public:
static bool classof(const Action *A) {
return A->getKind() == PreprocessJobClass;
}
- static bool classof(const PreprocessJobAction *) { return true; }
};
class PrecompileJobAction : public JobAction {
@@ -158,7 +152,6 @@ public:
static bool classof(const Action *A) {
return A->getKind() == PrecompileJobClass;
}
- static bool classof(const PrecompileJobAction *) { return true; }
};
class AnalyzeJobAction : public JobAction {
@@ -169,7 +162,6 @@ public:
static bool classof(const Action *A) {
return A->getKind() == AnalyzeJobClass;
}
- static bool classof(const AnalyzeJobAction *) { return true; }
};
class MigrateJobAction : public JobAction {
@@ -180,7 +172,6 @@ public:
static bool classof(const Action *A) {
return A->getKind() == MigrateJobClass;
}
- static bool classof(const MigrateJobAction *) { return true; }
};
class CompileJobAction : public JobAction {
@@ -191,7 +182,6 @@ public:
static bool classof(const Action *A) {
return A->getKind() == CompileJobClass;
}
- static bool classof(const CompileJobAction *) { return true; }
};
class AssembleJobAction : public JobAction {
@@ -202,7 +192,6 @@ public:
static bool classof(const Action *A) {
return A->getKind() == AssembleJobClass;
}
- static bool classof(const AssembleJobAction *) { return true; }
};
class LinkJobAction : public JobAction {
@@ -213,7 +202,6 @@ public:
static bool classof(const Action *A) {
return A->getKind() == LinkJobClass;
}
- static bool classof(const LinkJobAction *) { return true; }
};
class LipoJobAction : public JobAction {
@@ -224,7 +212,6 @@ public:
static bool classof(const Action *A) {
return A->getKind() == LipoJobClass;
}
- static bool classof(const LipoJobAction *) { return true; }
};
class DsymutilJobAction : public JobAction {
@@ -235,7 +222,6 @@ public:
static bool classof(const Action *A) {
return A->getKind() == DsymutilJobClass;
}
- static bool classof(const DsymutilJobAction *) { return true; }
};
class VerifyJobAction : public JobAction {
@@ -245,7 +231,6 @@ public:
static bool classof(const Action *A) {
return A->getKind() == VerifyJobClass;
}
- static bool classof(const VerifyJobAction *) { return true; }
};
} // end namespace driver
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Arg.h b/contrib/llvm/tools/clang/include/clang/Driver/Arg.h
index e466cc3..3b3829a 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Arg.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Arg.h
@@ -15,6 +15,8 @@
#ifndef CLANG_DRIVER_ARG_H_
#define CLANG_DRIVER_ARG_H_
+#include "clang/Driver/Option.h"
+
#include "Util.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -23,7 +25,6 @@
namespace clang {
namespace driver {
class ArgList;
- class Option;
/// \brief A concrete instance of a particular driver option.
///
@@ -33,17 +34,20 @@ namespace driver {
/// ArgList to provide efficient iteration over all instances of a
/// particular option.
class Arg {
- Arg(const Arg &); // DO NOT IMPLEMENT
- void operator=(const Arg &); // DO NOT IMPLEMENT
+ Arg(const Arg &) LLVM_DELETED_FUNCTION;
+ void operator=(const Arg &) LLVM_DELETED_FUNCTION;
private:
/// \brief The option this argument is an instance of.
- const Option *Opt;
+ const Option Opt;
/// \brief The argument this argument was derived from (during tool chain
/// argument translation), if any.
const Arg *BaseArg;
+ /// \brief How this instance of the option was spelled.
+ StringRef Spelling;
+
/// \brief The index at which this argument appears in the containing
/// ArgList.
unsigned Index;
@@ -60,14 +64,16 @@ namespace driver {
SmallVector<const char *, 2> Values;
public:
- Arg(const Option *Opt, unsigned Index, const Arg *BaseArg = 0);
- Arg(const Option *Opt, unsigned Index,
+ Arg(const Option Opt, StringRef Spelling, unsigned Index,
+ const Arg *BaseArg = 0);
+ Arg(const Option Opt, StringRef Spelling, unsigned Index,
const char *Value0, const Arg *BaseArg = 0);
- Arg(const Option *Opt, unsigned Index,
+ Arg(const Option Opt, StringRef Spelling, unsigned Index,
const char *Value0, const char *Value1, const Arg *BaseArg = 0);
~Arg();
- const Option &getOption() const { return *Opt; }
+ const Option getOption() const { return Opt; }
+ StringRef getSpelling() const { return Spelling; }
unsigned getIndex() const { return Index; }
/// \brief Return the base argument which generated this arg.
@@ -90,7 +96,7 @@ namespace driver {
void claim() const { getBaseArg().Claimed = true; }
unsigned getNumValues() const { return Values.size(); }
- const char *getValue(const ArgList &Args, unsigned N=0) const {
+ const char *getValue(unsigned N = 0) const {
return Values[N];
}
@@ -115,8 +121,6 @@ namespace driver {
/// when rendered as a input (e.g., Xlinker).
void renderAsInput(const ArgList &Args, ArgStringList &Output) const;
- static bool classof(const Arg *) { return true; }
-
void dump() const;
/// \brief Return a formatted version of the argument and
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h b/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
index b7e490c..72ed7bf 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
@@ -11,6 +11,7 @@
#define CLANG_DRIVER_ARGLIST_H_
#include "clang/Basic/LLVM.h"
+#include "clang/Driver/Option.h"
#include "clang/Driver/OptSpecifier.h"
#include "clang/Driver/Util.h"
#include "llvm/ADT/SmallVector.h"
@@ -94,8 +95,8 @@ namespace driver {
/// and to iterate over groups of arguments.
class ArgList {
private:
- ArgList(const ArgList &); // DO NOT IMPLEMENT
- void operator=(const ArgList &); // DO NOT IMPLEMENT
+ ArgList(const ArgList &) LLVM_DELETED_FUNCTION;
+ void operator=(const ArgList &) LLVM_DELETED_FUNCTION;
public:
typedef SmallVector<Arg*, 16> arglist_type;
@@ -117,7 +118,7 @@ namespace driver {
/// @name Arg Access
/// @{
- /// append - Append \arg A to the arg list.
+ /// append - Append \p A to the arg list.
void append(Arg *A);
arglist_type &getArgs() { return Args; }
@@ -153,16 +154,16 @@ namespace driver {
/// @name Arg Removal
/// @{
- /// eraseArg - Remove any option matching \arg Id.
+ /// eraseArg - Remove any option matching \p Id.
void eraseArg(OptSpecifier Id);
/// @}
/// @name Arg Access
/// @{
- /// hasArg - Does the arg list contain any option matching \arg Id.
+ /// hasArg - Does the arg list contain any option matching \p Id.
///
- /// \arg Claim Whether the argument should be claimed, if it exists.
+ /// \p Claim Whether the argument should be claimed, if it exists.
bool hasArgNoClaim(OptSpecifier Id) const {
return getLastArgNoClaim(Id) != 0;
}
@@ -176,9 +177,9 @@ namespace driver {
return getLastArg(Id0, Id1, Id2) != 0;
}
- /// getLastArg - Return the last argument matching \arg Id, or null.
+ /// getLastArg - Return the last argument matching \p Id, or null.
///
- /// \arg Claim Whether the argument should be claimed, if it exists.
+ /// \p Claim Whether the argument should be claimed, if it exists.
Arg *getLastArgNoClaim(OptSpecifier Id) const;
Arg *getLastArg(OptSpecifier Id) const;
Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1) const;
@@ -196,7 +197,7 @@ namespace driver {
OptSpecifier Id3, OptSpecifier Id4, OptSpecifier Id5,
OptSpecifier Id6, OptSpecifier Id7) const;
- /// getArgString - Return the input argument string at \arg Index.
+ /// getArgString - Return the input argument string at \p Index.
virtual const char *getArgString(unsigned Index) const = 0;
/// getNumInputArgStrings - Return the number of original argument strings,
@@ -233,15 +234,13 @@ namespace driver {
/// @name Translation Utilities
/// @{
- /// hasFlag - Given an option \arg Pos and its negative form \arg
- /// Neg, return true if the option is present, false if the
- /// negation is present, and \arg Default if neither option is
- /// given. If both the option and its negation are present, the
- /// last one wins.
+ /// hasFlag - Given an option \p Pos and its negative form \p Neg, return
+ /// true if the option is present, false if the negation is present, and
+ /// \p Default if neither option is given. If both the option and its
+ /// negation are present, the last one wins.
bool hasFlag(OptSpecifier Pos, OptSpecifier Neg, bool Default=true) const;
- /// AddLastArg - Render only the last argument match \arg Id0, if
- /// present.
+ /// AddLastArg - Render only the last argument match \p Id0, if present.
void AddLastArg(ArgStringList &Output, OptSpecifier Id0) const;
/// AddAllArgs - Render all arguments matching the given ids.
@@ -286,8 +285,8 @@ namespace driver {
}
const char *MakeArgString(const Twine &Str) const;
- /// \brief Create an arg string for (\arg LHS + \arg RHS), reusing the
- /// string at \arg Index if possible.
+ /// \brief Create an arg string for (\p LHS + \p RHS), reusing the
+ /// string at \p Index if possible.
const char *GetOrMakeJoinedArgString(unsigned Index, StringRef LHS,
StringRef RHS) const;
@@ -347,7 +346,7 @@ namespace driver {
mutable arglist_type SynthesizedArgs;
public:
- /// Construct a new derived arg list from \arg BaseArgs.
+ /// Construct a new derived arg list from \p BaseArgs.
DerivedArgList(const InputArgList &BaseArgs);
~DerivedArgList();
@@ -374,55 +373,54 @@ namespace driver {
virtual const char *MakeArgString(StringRef Str) const;
- /// AddFlagArg - Construct a new FlagArg for the given option \arg Id and
+ /// AddFlagArg - Construct a new FlagArg for the given option \p Id and
/// append it to the argument list.
- void AddFlagArg(const Arg *BaseArg, const Option *Opt) {
+ void AddFlagArg(const Arg *BaseArg, const Option Opt) {
append(MakeFlagArg(BaseArg, Opt));
}
/// AddPositionalArg - Construct a new Positional arg for the given option
- /// \arg Id, with the provided \arg Value and append it to the argument
+ /// \p Id, with the provided \p Value and append it to the argument
/// list.
- void AddPositionalArg(const Arg *BaseArg, const Option *Opt,
+ void AddPositionalArg(const Arg *BaseArg, const Option Opt,
StringRef Value) {
append(MakePositionalArg(BaseArg, Opt, Value));
}
/// AddSeparateArg - Construct a new Positional arg for the given option
- /// \arg Id, with the provided \arg Value and append it to the argument
+ /// \p Id, with the provided \p Value and append it to the argument
/// list.
- void AddSeparateArg(const Arg *BaseArg, const Option *Opt,
+ void AddSeparateArg(const Arg *BaseArg, const Option Opt,
StringRef Value) {
append(MakeSeparateArg(BaseArg, Opt, Value));
}
- /// AddJoinedArg - Construct a new Positional arg for the given option \arg
- /// Id, with the provided \arg Value and append it to the argument list.
- void AddJoinedArg(const Arg *BaseArg, const Option *Opt,
+ /// AddJoinedArg - Construct a new Positional arg for the given option
+ /// \p Id, with the provided \p Value and append it to the argument list.
+ void AddJoinedArg(const Arg *BaseArg, const Option Opt,
StringRef Value) {
append(MakeJoinedArg(BaseArg, Opt, Value));
}
- /// MakeFlagArg - Construct a new FlagArg for the given option
- /// \arg Id.
- Arg *MakeFlagArg(const Arg *BaseArg, const Option *Opt) const;
+ /// MakeFlagArg - Construct a new FlagArg for the given option \p Id.
+ Arg *MakeFlagArg(const Arg *BaseArg, const Option Opt) const;
/// MakePositionalArg - Construct a new Positional arg for the
- /// given option \arg Id, with the provided \arg Value.
- Arg *MakePositionalArg(const Arg *BaseArg, const Option *Opt,
+ /// given option \p Id, with the provided \p Value.
+ Arg *MakePositionalArg(const Arg *BaseArg, const Option Opt,
StringRef Value) const;
/// MakeSeparateArg - Construct a new Positional arg for the
- /// given option \arg Id, with the provided \arg Value.
- Arg *MakeSeparateArg(const Arg *BaseArg, const Option *Opt,
+ /// given option \p Id, with the provided \p Value.
+ Arg *MakeSeparateArg(const Arg *BaseArg, const Option Opt,
StringRef Value) const;
/// MakeJoinedArg - Construct a new Positional arg for the
- /// given option \arg Id, with the provided \arg Value.
- Arg *MakeJoinedArg(const Arg *BaseArg, const Option *Opt,
+ /// given option \p Id, with the provided \p Value.
+ Arg *MakeJoinedArg(const Arg *BaseArg, const Option Opt,
StringRef Value) const;
/// @}
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.h b/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.h
index 0508213..420a101 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.h
@@ -17,11 +17,13 @@ namespace driver {
namespace cc1asoptions {
enum ID {
OPT_INVALID = 0, // This is not an option ID.
-#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
+#define PREFIX(NAME, VALUE)
+#define OPTION(PREFIX, NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
HELPTEXT, METAVAR) OPT_##ID,
#include "clang/Driver/CC1AsOptions.inc"
LastOption
#undef OPTION
+#undef PREFIX
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td b/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td
index 37ba602..9fd855a 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td
@@ -18,22 +18,22 @@ include "OptParser.td"
// Target Options
//===----------------------------------------------------------------------===//
-def triple : Separate<"-triple">,
+def triple : Separate<["-"], "triple">,
HelpText<"Specify target triple (e.g. x86_64-pc-linux-gnu)">;
-def target_cpu : Separate<"-target-cpu">,
+def target_cpu : Separate<["-"], "target-cpu">,
HelpText<"Target a specific cpu type">;
-def target_feature : Separate<"-target-feature">,
+def target_feature : Separate<["-"], "target-feature">,
HelpText<"Target specific attributes">;
//===----------------------------------------------------------------------===//
// Language Options
//===----------------------------------------------------------------------===//
-def I : JoinedOrSeparate<"-I">, MetaVarName<"<directory>">,
+def I : JoinedOrSeparate<["-"], "I">, MetaVarName<"<directory>">,
HelpText<"Add directory to include search path">;
-def n : Flag<"-n">,
+def n : Flag<["-"], "n">,
HelpText<"Don't automatically start assembly file with a text section">;
-def L : Flag<"-L">,
+def L : Flag<["-"], "L">,
HelpText<"Save temporary labels in the symbol table. "
"Note this may change .s semantics, it should almost never be used "
"on compiler generated code!">;
@@ -42,50 +42,49 @@ def L : Flag<"-L">,
// Frontend Options
//===----------------------------------------------------------------------===//
-def o : Separate<"-o">, MetaVarName<"<path>">, HelpText<"Specify output file">;
+def o : Separate<["-"], "o">, MetaVarName<"<path>">,
+ HelpText<"Specify output file">;
-def filetype : Separate<"-filetype">,
+def filetype : Separate<["-"], "filetype">,
HelpText<"Specify the output file type ('asm', 'null', or 'obj')">;
-def help : Flag<"-help">,
+def help : Flag<["-", "--"], "help">,
HelpText<"Print this help text">;
-def _help : Flag<"--help">, Alias<help>;
-def version : Flag<"-version">,
+def version : Flag<["-", "--"], "version">,
HelpText<"Print the assembler version">;
-def _version : Flag<"--version">, Alias<version>;
-def v : Flag<"-v">, Alias<version>;
+def v : Flag<["-"], "v">, Alias<version>;
// Generic forwarding to LLVM options. This should only be used for debugging
// and experimental features.
-def mllvm : Separate<"-mllvm">,
+def mllvm : Separate<["-"], "mllvm">,
HelpText<"Additional arguments to forward to LLVM's option processing">;
//===----------------------------------------------------------------------===//
// Transliterate Options
//===----------------------------------------------------------------------===//
-def output_asm_variant : Separate<"-output-asm-variant">,
+def output_asm_variant : Separate<["-"], "output-asm-variant">,
HelpText<"Select the asm variant index to use for output">;
-def show_encoding : Flag<"-show-encoding">,
+def show_encoding : Flag<["-"], "show-encoding">,
HelpText<"Show instruction encoding information in transliterate mode">;
-def show_inst : Flag<"-show-inst">,
+def show_inst : Flag<["-"], "show-inst">,
HelpText<"Show internal instruction representation in transliterate mode">;
//===----------------------------------------------------------------------===//
// Assemble Options
//===----------------------------------------------------------------------===//
-def relax_all : Flag<"-relax-all">,
+def relax_all : Flag<["-"], "relax-all">,
HelpText<"Relax all fixups (for performance testing)">;
-def no_exec_stack : Flag<"--noexecstack">,
+def no_exec_stack : Flag<["--"], "noexecstack">,
HelpText<"Mark the file as not needing an executable stack">;
-def fatal_warnings : Flag<"--fatal-warnings">,
+def fatal_warnings : Flag<["--"], "fatal-warnings">,
HelpText<"Consider warnings as errors">;
-def g : Flag<"-g">, HelpText<"Generate source level debug information">;
+def g : Flag<["-"], "g">, HelpText<"Generate source level debug information">;
-def dwarf_debug_flags : Separate<"-dwarf-debug-flags">,
+def dwarf_debug_flags : Separate<["-"], "dwarf-debug-flags">,
HelpText<"The string to embed in the Dwarf debug flags record.">;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
index 6e4d7f2..3ff2549 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
@@ -11,235 +11,238 @@
//
//===----------------------------------------------------------------------===//
-let Flags = [CC1Option] in {
+let Flags = [CC1Option, NoDriverOption] in {
//===----------------------------------------------------------------------===//
// Target Options
//===----------------------------------------------------------------------===//
-def cxx_abi : Separate<"-cxx-abi">,
+def cxx_abi : Separate<["-"], "cxx-abi">,
HelpText<"Target a particular C++ ABI type">;
-def target_abi : Separate<"-target-abi">,
+def target_abi : Separate<["-"], "target-abi">,
HelpText<"Target a particular ABI type">;
-def target_cpu : Separate<"-target-cpu">,
+def target_cpu : Separate<["-"], "target-cpu">,
HelpText<"Target a specific cpu type">;
-def target_feature : Separate<"-target-feature">,
+def target_feature : Separate<["-"], "target-feature">,
HelpText<"Target specific attributes">;
-def target_linker_version : Separate<"-target-linker-version">,
+def target_linker_version : Separate<["-"], "target-linker-version">,
HelpText<"Target linker version">;
-def triple : Separate<"-triple">,
+def triple : Separate<["-"], "triple">,
HelpText<"Specify target triple (e.g. i686-apple-darwin9)">;
-def triple_EQ : Joined<"-triple=">, Alias<triple>;
+def triple_EQ : Joined<["-"], "triple=">, Alias<triple>;
//===----------------------------------------------------------------------===//
// Analyzer Options
//===----------------------------------------------------------------------===//
-def analysis_UnoptimizedCFG : Flag<"-unoptimized-cfg">,
+def analysis_UnoptimizedCFG : Flag<["-"], "unoptimized-cfg">,
HelpText<"Generate unoptimized CFGs for all analyses">;
-def analysis_CFGAddImplicitDtors : Flag<"-cfg-add-implicit-dtors">,
+def analysis_CFGAddImplicitDtors : Flag<["-"], "cfg-add-implicit-dtors">,
HelpText<"Add C++ implicit destructors to CFGs for all analyses">;
-def analyzer_store : Separate<"-analyzer-store">,
+def analyzer_store : Separate<["-"], "analyzer-store">,
HelpText<"Source Code Analysis - Abstract Memory Store Models">;
-def analyzer_store_EQ : Joined<"-analyzer-store=">, Alias<analyzer_store>;
+def analyzer_store_EQ : Joined<["-"], "analyzer-store=">, Alias<analyzer_store>;
-def analyzer_constraints : Separate<"-analyzer-constraints">,
+def analyzer_constraints : Separate<["-"], "analyzer-constraints">,
HelpText<"Source Code Analysis - Symbolic Constraint Engines">;
-def analyzer_constraints_EQ : Joined<"-analyzer-constraints=">,
+def analyzer_constraints_EQ : Joined<["-"], "analyzer-constraints=">,
Alias<analyzer_constraints>;
-def analyzer_output : Separate<"-analyzer-output">,
+def analyzer_output : Separate<["-"], "analyzer-output">,
HelpText<"Source Code Analysis - Output Options">;
-def analyzer_output_EQ : Joined<"-analyzer-output=">,
+def analyzer_output_EQ : Joined<["-"], "analyzer-output=">,
Alias<analyzer_output>;
-def analyzer_purge : Separate<"-analyzer-purge">,
+def analyzer_purge : Separate<["-"], "analyzer-purge">,
HelpText<"Source Code Analysis - Dead Symbol Removal Frequency">;
-def analyzer_purge_EQ : Joined<"-analyzer-purge=">, Alias<analyzer_purge>;
+def analyzer_purge_EQ : Joined<["-"], "analyzer-purge=">, Alias<analyzer_purge>;
-def analyzer_opt_analyze_headers : Flag<"-analyzer-opt-analyze-headers">,
+def analyzer_opt_analyze_headers : Flag<["-"], "analyzer-opt-analyze-headers">,
HelpText<"Force the static analyzer to analyze functions defined in header files">;
-def analyzer_opt_analyze_nested_blocks : Flag<"-analyzer-opt-analyze-nested-blocks">,
+def analyzer_opt_analyze_nested_blocks : Flag<["-"], "analyzer-opt-analyze-nested-blocks">,
HelpText<"Analyze the definitions of blocks in addition to functions">;
-def analyzer_display_progress : Flag<"-analyzer-display-progress">,
+def analyzer_display_progress : Flag<["-"], "analyzer-display-progress">,
HelpText<"Emit verbose output about the analyzer's progress">;
-def analyze_function : Separate<"-analyze-function">,
+def analyze_function : Separate<["-"], "analyze-function">,
HelpText<"Run analysis on specific function">;
-def analyze_function_EQ : Joined<"-analyze-function=">, Alias<analyze_function>;
-def analyzer_eagerly_assume : Flag<"-analyzer-eagerly-assume">,
+def analyze_function_EQ : Joined<["-"], "analyze-function=">, Alias<analyze_function>;
+def analyzer_eagerly_assume : Flag<["-"], "analyzer-eagerly-assume">,
HelpText<"Eagerly assume the truth/falseness of some symbolic constraints">;
-def analyzer_no_eagerly_trim_egraph : Flag<"-analyzer-no-eagerly-trim-egraph">,
- HelpText<"Don't eagerly remove uninteresting ExplodedNodes from the ExplodedGraph">;
-def trim_egraph : Flag<"-trim-egraph">,
+def trim_egraph : Flag<["-"], "trim-egraph">,
HelpText<"Only show error-related paths in the analysis graph">;
-def analyzer_viz_egraph_graphviz : Flag<"-analyzer-viz-egraph-graphviz">,
+def analyzer_viz_egraph_graphviz : Flag<["-"], "analyzer-viz-egraph-graphviz">,
HelpText<"Display exploded graph using GraphViz">;
-def analyzer_viz_egraph_ubigraph : Flag<"-analyzer-viz-egraph-ubigraph">,
+def analyzer_viz_egraph_ubigraph : Flag<["-"], "analyzer-viz-egraph-ubigraph">,
HelpText<"Display exploded graph using Ubigraph">;
-def analyzer_inline_max_stack_depth : Separate<"-analyzer-inline-max-stack-depth">,
+def analyzer_inline_max_stack_depth : Separate<["-"], "analyzer-inline-max-stack-depth">,
HelpText<"Bound on stack depth while inlining (4 by default)">;
-def analyzer_inline_max_stack_depth_EQ : Joined<"-analyzer-inline-max-stack-depth=">,
+def analyzer_inline_max_stack_depth_EQ : Joined<["-"], "analyzer-inline-max-stack-depth=">,
Alias<analyzer_inline_max_stack_depth>;
-def analyzer_inline_max_function_size : Separate<"-analyzer-inline-max-function-size">,
+def analyzer_inline_max_function_size : Separate<["-"], "analyzer-inline-max-function-size">,
HelpText<"Bound on the number of basic blocks in an inlined function (200 by default)">;
-def analyzer_inline_max_function_size_EQ : Joined<"-analyzer-inline-max-function-size=">,
+def analyzer_inline_max_function_size_EQ : Joined<["-"], "analyzer-inline-max-function-size=">,
Alias<analyzer_inline_max_function_size>;
-def analyzer_ipa : Separate<"-analyzer-ipa">,
+def analyzer_ipa : Separate<["-"], "analyzer-ipa">,
HelpText<"Specify the inter-procedural analysis mode">;
-def analyzer_ipa_EQ : Joined<"-analyzer-ipa=">, Alias<analyzer_ipa>;
+def analyzer_ipa_EQ : Joined<["-"], "analyzer-ipa=">, Alias<analyzer_ipa>;
-def analyzer_inlining_mode : Separate<"-analyzer-inlining-mode">,
+def analyzer_inlining_mode : Separate<["-"], "analyzer-inlining-mode">,
HelpText<"Specify the function selection heuristic used during inlining">;
-def analyzer_inlining_mode_EQ : Joined<"-analyzer-inlining-mode=">, Alias<analyzer_inlining_mode>;
+def analyzer_inlining_mode_EQ : Joined<["-"], "analyzer-inlining-mode=">, Alias<analyzer_inlining_mode>;
-def analyzer_disable_retry_exhausted : Flag<"-analyzer-disable-retry-exhausted">,
+def analyzer_disable_retry_exhausted : Flag<["-"], "analyzer-disable-retry-exhausted">,
HelpText<"Do not re-analyze paths leading to exhausted nodes with a different strategy (may decrease code coverage)">;
-def analyzer_max_nodes : Separate<"-analyzer-max-nodes">,
+def analyzer_max_nodes : Separate<["-"], "analyzer-max-nodes">,
HelpText<"The maximum number of nodes the analyzer can generate (150000 default, 0 = no limit)">;
-def analyzer_max_loop : Separate<"-analyzer-max-loop">,
+def analyzer_max_loop : Separate<["-"], "analyzer-max-loop">,
HelpText<"The maximum number of times the analyzer will go through a loop">;
-def analyzer_stats : Flag<"-analyzer-stats">,
+def analyzer_stats : Flag<["-"], "analyzer-stats">,
HelpText<"Print internal analyzer statistics.">;
-def analyzer_checker : Separate<"-analyzer-checker">,
+def analyzer_checker : Separate<["-"], "analyzer-checker">,
HelpText<"Choose analyzer checkers to enable">;
-def analyzer_checker_EQ : Joined<"-analyzer-checker=">,
+def analyzer_checker_EQ : Joined<["-"], "analyzer-checker=">,
Alias<analyzer_checker>;
-def analyzer_disable_checker : Separate<"-analyzer-disable-checker">,
+def analyzer_disable_checker : Separate<["-"], "analyzer-disable-checker">,
HelpText<"Choose analyzer checkers to disable">;
-def analyzer_disable_checker_EQ : Joined<"-analyzer-disable-checker=">,
+def analyzer_disable_checker_EQ : Joined<["-"], "analyzer-disable-checker=">,
Alias<analyzer_disable_checker>;
-def analyzer_checker_help : Flag<"-analyzer-checker-help">,
+def analyzer_checker_help : Flag<["-"], "analyzer-checker-help">,
HelpText<"Display the list of analyzer checkers that are available">;
+def analyzer_config : Separate<["-"], "analyzer-config">,
+ HelpText<"Choose analyzer options to enable">;
+
//===----------------------------------------------------------------------===//
// Migrator Options
//===----------------------------------------------------------------------===//
-def migrator_no_nsalloc_error : Flag<"-no-ns-alloc-error">,
+def migrator_no_nsalloc_error : Flag<["-"], "no-ns-alloc-error">,
HelpText<"Do not error on use of NSAllocateCollectable/NSReallocateCollectable">;
-def migrator_no_finalize_removal : Flag<"-no-finalize-removal">,
+def migrator_no_finalize_removal : Flag<["-"], "no-finalize-removal">,
HelpText<"Do not remove finalize method in gc mode">;
//===----------------------------------------------------------------------===//
// CodeGen Options
//===----------------------------------------------------------------------===//
-def disable_llvm_optzns : Flag<"-disable-llvm-optzns">,
+def disable_llvm_optzns : Flag<["-"], "disable-llvm-optzns">,
HelpText<"Don't run LLVM optimization passes">;
-def disable_llvm_verifier : Flag<"-disable-llvm-verifier">,
+def disable_llvm_verifier : Flag<["-"], "disable-llvm-verifier">,
HelpText<"Don't run the LLVM IR verifier pass">;
-def disable_red_zone : Flag<"-disable-red-zone">,
+def disable_red_zone : Flag<["-"], "disable-red-zone">,
HelpText<"Do not emit code that uses the red zone.">;
-def fdebug_compilation_dir : Separate<"-fdebug-compilation-dir">,
+def fdebug_compilation_dir : Separate<["-"], "fdebug-compilation-dir">,
HelpText<"The compilation directory to embed in the debug info.">;
-def dwarf_debug_flags : Separate<"-dwarf-debug-flags">,
+def dwarf_debug_flags : Separate<["-"], "dwarf-debug-flags">,
HelpText<"The string to embed in the Dwarf debug flags record.">;
-def fforbid_guard_variables : Flag<"-fforbid-guard-variables">,
+def dwarf_column_info : Flag<["-"], "dwarf-column-info">,
+ HelpText<"Turn on column location information.">;
+def fforbid_guard_variables : Flag<["-"], "fforbid-guard-variables">,
HelpText<"Emit an error if a C++ static local initializer would need a guard variable">;
-def no_implicit_float : Flag<"-no-implicit-float">,
+def no_implicit_float : Flag<["-"], "no-implicit-float">,
HelpText<"Don't generate implicit floating point instructions">;
-def fdump_vtable_layouts : Flag<"-fdump-vtable-layouts">,
+def fdump_vtable_layouts : Flag<["-"], "fdump-vtable-layouts">,
HelpText<"Dump the layouts of all vtables that will be emitted in a translation unit">;
-def femit_coverage_notes : Flag<"-femit-coverage-notes">,
+def femit_coverage_notes : Flag<["-"], "femit-coverage-notes">,
HelpText<"Emit a gcov coverage notes file when compiling.">;
-def femit_coverage_data: Flag<"-femit-coverage-data">,
+def femit_coverage_data: Flag<["-"], "femit-coverage-data">,
HelpText<"Instrument the program to emit gcov coverage data when run.">;
-def coverage_file : Separate<"-coverage-file">,
+def coverage_file : Separate<["-"], "coverage-file">,
HelpText<"Emit coverage data to this filename. The extension will be replaced.">;
-def coverage_file_EQ : Joined<"-coverage-file=">, Alias<coverage_file>;
-def fuse_register_sized_bitfield_access: Flag<"-fuse-register-sized-bitfield-access">,
+def coverage_file_EQ : Joined<["-"], "coverage-file=">, Alias<coverage_file>;
+def fuse_register_sized_bitfield_access: Flag<["-"], "fuse-register-sized-bitfield-access">,
HelpText<"Use register sized accesses to bit-fields, when possible.">;
-def relaxed_aliasing : Flag<"-relaxed-aliasing">,
+def relaxed_aliasing : Flag<["-"], "relaxed-aliasing">,
HelpText<"Turn off Type Based Alias Analysis">;
-def masm_verbose : Flag<"-masm-verbose">,
+def masm_verbose : Flag<["-"], "masm-verbose">,
HelpText<"Generate verbose assembly output">;
-def mcode_model : Separate<"-mcode-model">,
+def mcode_model : Separate<["-"], "mcode-model">,
HelpText<"The code model to use">;
-def mdebug_pass : Separate<"-mdebug-pass">,
+def mdebug_pass : Separate<["-"], "mdebug-pass">,
HelpText<"Enable additional debug output">;
-def mdisable_fp_elim : Flag<"-mdisable-fp-elim">,
+def mdisable_fp_elim : Flag<["-"], "mdisable-fp-elim">,
HelpText<"Disable frame pointer elimination optimization">;
-def mdisable_tail_calls : Flag<"-mdisable-tail-calls">,
+def mdisable_tail_calls : Flag<["-"], "mdisable-tail-calls">,
HelpText<"Disable tail call optimization, keeping the call stack accurate">;
-def menable_no_infinities : Flag<"-menable-no-infs">,
+def menable_no_infinities : Flag<["-"], "menable-no-infs">,
HelpText<"Allow optimization to assume there are no infinities.">;
-def menable_no_nans : Flag<"-menable-no-nans">,
+def menable_no_nans : Flag<["-"], "menable-no-nans">,
HelpText<"Allow optimization to assume there are no NaNs.">;
-def menable_unsafe_fp_math : Flag<"-menable-unsafe-fp-math">,
+def menable_unsafe_fp_math : Flag<["-"], "menable-unsafe-fp-math">,
HelpText<"Allow unsafe floating-point math optimizations which may decrease "
"precision">;
-def mfloat_abi : Separate<"-mfloat-abi">,
+def mfloat_abi : Separate<["-"], "mfloat-abi">,
HelpText<"The float ABI to use">;
-def mlimit_float_precision : Separate<"-mlimit-float-precision">,
+def mlimit_float_precision : Separate<["-"], "mlimit-float-precision">,
HelpText<"Limit float precision to the given value">;
-def mno_exec_stack : Flag<"-mnoexecstack">,
+def mno_exec_stack : Flag<["-"], "mnoexecstack">,
HelpText<"Mark the file as not needing an executable stack">;
-def mno_zero_initialized_in_bss : Flag<"-mno-zero-initialized-in-bss">,
+def mno_zero_initialized_in_bss : Flag<["-"], "mno-zero-initialized-in-bss">,
HelpText<"Do not put zero initialized data in the BSS">;
-def backend_option : Separate<"-backend-option">,
+def backend_option : Separate<["-"], "backend-option">,
HelpText<"Additional arguments to forward to LLVM backend (during code gen)">;
-def mregparm : Separate<"-mregparm">,
+def mregparm : Separate<["-"], "mregparm">,
HelpText<"Limit the number of registers available for integer arguments">;
-def msave_temp_labels : Flag<"-msave-temp-labels">,
+def msave_temp_labels : Flag<["-"], "msave-temp-labels">,
HelpText<"(integrated-as) Save temporary labels">;
-def mrelocation_model : Separate<"-mrelocation-model">,
+def mrelocation_model : Separate<["-"], "mrelocation-model">,
HelpText<"The relocation model to use">;
-def munwind_tables : Flag<"-munwind-tables">,
+def munwind_tables : Flag<["-"], "munwind-tables">,
HelpText<"Generate unwinding tables for all functions">;
-def fuse_init_array : Flag<"-fuse-init-array">,
+def fuse_init_array : Flag<["-"], "fuse-init-array">,
HelpText<"Use .init_array instead of .ctors">;
-def mconstructor_aliases : Flag<"-mconstructor-aliases">,
+def mconstructor_aliases : Flag<["-"], "mconstructor-aliases">,
HelpText<"Emit complete constructors and destructors as aliases when possible">;
-def mlink_bitcode_file : Separate<"-mlink-bitcode-file">,
+def mlink_bitcode_file : Separate<["-"], "mlink-bitcode-file">,
HelpText<"Link the given bitcode file before performing optimizations.">;
//===----------------------------------------------------------------------===//
// Dependency Output Options
//===----------------------------------------------------------------------===//
-def sys_header_deps : Flag<"-sys-header-deps">,
+def sys_header_deps : Flag<["-"], "sys-header-deps">,
HelpText<"Include system headers in dependency output">;
-def header_include_file : Separate<"-header-include-file">,
+def header_include_file : Separate<["-"], "header-include-file">,
HelpText<"Filename (or -) to write header include output to">;
//===----------------------------------------------------------------------===//
// Diagnostic Options
//===----------------------------------------------------------------------===//
-def dump_build_information : Separate<"-dump-build-information">,
+def dump_build_information : Separate<["-"], "dump-build-information">,
MetaVarName<"<filename>">,
HelpText<"output a dump of some build information to a file">;
-def diagnostic_log_file : Separate<"-diagnostic-log-file">,
+def diagnostic_log_file : Separate<["-"], "diagnostic-log-file">,
HelpText<"Filename (or -) to log diagnostics to">;
-def diagnostic_serialized_file : Separate<"-serialize-diagnostic-file">,
+def diagnostic_serialized_file : Separate<["-"], "serialize-diagnostic-file">,
MetaVarName<"<filename>">,
HelpText<"File for serializing diagnostics in a binary format">;
-def fdiagnostics_format : Separate<"-fdiagnostics-format">,
+def fdiagnostics_format : Separate<["-"], "fdiagnostics-format">,
HelpText<"Change diagnostic formatting to match IDE and command line tools">;
-def fdiagnostics_show_category : Separate<"-fdiagnostics-show-category">,
+def fdiagnostics_show_category : Separate<["-"], "fdiagnostics-show-category">,
HelpText<"Print diagnostic category">;
-def ftabstop : Separate<"-ftabstop">, MetaVarName<"<N>">,
+def ftabstop : Separate<["-"], "ftabstop">, MetaVarName<"<N>">,
HelpText<"Set the tab stop distance.">;
-def ferror_limit : Separate<"-ferror-limit">, MetaVarName<"<N>">,
+def ferror_limit : Separate<["-"], "ferror-limit">, MetaVarName<"<N>">,
HelpText<"Set the maximum number of errors to emit before stopping (0 = no limit).">;
-def fmacro_backtrace_limit : Separate<"-fmacro-backtrace-limit">, MetaVarName<"<N>">,
+def fmacro_backtrace_limit : Separate<["-"], "fmacro-backtrace-limit">, MetaVarName<"<N>">,
HelpText<"Set the maximum number of entries to print in a macro expansion backtrace (0 = no limit).">;
-def ftemplate_backtrace_limit : Separate<"-ftemplate-backtrace-limit">, MetaVarName<"<N>">,
+def ftemplate_backtrace_limit : Separate<["-"], "ftemplate-backtrace-limit">, MetaVarName<"<N>">,
HelpText<"Set the maximum number of entries to print in a template instantiation backtrace (0 = no limit).">;
-def fconstexpr_backtrace_limit : Separate<"-fconstexpr-backtrace-limit">, MetaVarName<"<N>">,
+def fconstexpr_backtrace_limit : Separate<["-"], "fconstexpr-backtrace-limit">, MetaVarName<"<N>">,
HelpText<"Set the maximum number of entries to print in a constexpr evaluation backtrace (0 = no limit).">;
-def fmessage_length : Separate<"-fmessage-length">, MetaVarName<"<N>">,
+def fmessage_length : Separate<["-"], "fmessage-length">, MetaVarName<"<N>">,
HelpText<"Format message diagnostics so that they fit within N columns or fewer, when possible.">;
-def Wno_rewrite_macros : Flag<"-Wno-rewrite-macros">,
+def Wno_rewrite_macros : Flag<["-"], "Wno-rewrite-macros">,
HelpText<"Silence ObjC rewriting warnings">;
//===----------------------------------------------------------------------===//
@@ -248,43 +251,43 @@ def Wno_rewrite_macros : Flag<"-Wno-rewrite-macros">,
// This isn't normally used, it is just here so we can parse a
// CompilerInvocation out of a driver-derived argument vector.
-def cc1 : Flag<"-cc1">;
+def cc1 : Flag<["-"], "cc1">;
-def ast_merge : Separate<"-ast-merge">,
+def ast_merge : Separate<["-"], "ast-merge">,
MetaVarName<"<ast file>">,
HelpText<"Merge the given AST file into the translation unit being compiled.">;
-def code_completion_at : Separate<"-code-completion-at">,
+def code_completion_at : Separate<["-"], "code-completion-at">,
MetaVarName<"<file>:<line>:<column>">,
HelpText<"Dump code-completion information at a location">;
-def remap_file : Separate<"-remap-file">,
+def remap_file : Separate<["-"], "remap-file">,
MetaVarName<"<from>;<to>">,
HelpText<"Replace the contents of the <from> file with the contents of the <to> file">;
-def code_completion_at_EQ : Joined<"-code-completion-at=">,
+def code_completion_at_EQ : Joined<["-"], "code-completion-at=">,
Alias<code_completion_at>;
-def code_completion_macros : Flag<"-code-completion-macros">,
+def code_completion_macros : Flag<["-"], "code-completion-macros">,
HelpText<"Include macros in code-completion results">;
-def code_completion_patterns : Flag<"-code-completion-patterns">,
+def code_completion_patterns : Flag<["-"], "code-completion-patterns">,
HelpText<"Include code patterns in code-completion results">;
-def no_code_completion_globals : Flag<"-no-code-completion-globals">,
+def no_code_completion_globals : Flag<["-"], "no-code-completion-globals">,
HelpText<"Do not include global declarations in code-completion results.">;
-def code_completion_brief_comments : Flag<"-code-completion-brief-comments">,
+def code_completion_brief_comments : Flag<["-"], "code-completion-brief-comments">,
HelpText<"Include brief documentation comments in code-completion results.">;
-def disable_free : Flag<"-disable-free">,
+def disable_free : Flag<["-"], "disable-free">,
HelpText<"Disable freeing of memory on exit">;
-def load : Separate<"-load">, MetaVarName<"<dsopath>">,
+def load : Separate<["-"], "load">, MetaVarName<"<dsopath>">,
HelpText<"Load the named plugin (dynamic shared object)">;
-def plugin : Separate<"-plugin">, MetaVarName<"<name>">,
+def plugin : Separate<["-"], "plugin">, MetaVarName<"<name>">,
HelpText<"Use the named plugin action instead of the default action (use \"help\" to list available options)">;
-def plugin_arg : JoinedAndSeparate<"-plugin-arg-">,
+def plugin_arg : JoinedAndSeparate<["-"], "plugin-arg-">,
MetaVarName<"<name> <arg>">,
HelpText<"Pass <arg> to plugin <name>">;
-def add_plugin : Separate<"-add-plugin">, MetaVarName<"<name>">,
+def add_plugin : Separate<["-"], "add-plugin">, MetaVarName<"<name>">,
HelpText<"Use the named plugin action in addition to the default action">;
-def resource_dir : Separate<"-resource-dir">,
+def resource_dir : Separate<["-"], "resource-dir">,
HelpText<"The directory which holds the compiler resource files">;
-def version : Flag<"-version">,
+def version : Flag<["-"], "version">,
HelpText<"Print the compiler version">;
-def ast_dump_filter : Separate<"-ast-dump-filter">,
+def ast_dump_filter : Separate<["-"], "ast-dump-filter">,
MetaVarName<"<dump_filter>">,
HelpText<"Use with -ast-dump or -ast-print to dump/print only AST declaration"
" nodes having a certain substring in a qualified name. Use"
@@ -292,191 +295,197 @@ def ast_dump_filter : Separate<"-ast-dump-filter">,
let Group = Action_Group in {
-def Eonly : Flag<"-Eonly">,
+def Eonly : Flag<["-"], "Eonly">,
HelpText<"Just run preprocessor, no output (for timings)">;
-def dump_raw_tokens : Flag<"-dump-raw-tokens">,
+def dump_raw_tokens : Flag<["-"], "dump-raw-tokens">,
HelpText<"Lex file in raw mode and dump raw tokens">;
-def analyze : Flag<"-analyze">,
+def analyze : Flag<["-"], "analyze">,
HelpText<"Run static analysis engine">;
-def dump_tokens : Flag<"-dump-tokens">,
+def dump_tokens : Flag<["-"], "dump-tokens">,
HelpText<"Run preprocessor, dump internal rep of tokens">;
-def init_only : Flag<"-init-only">,
+def init_only : Flag<["-"], "init-only">,
HelpText<"Only execute frontend initialization">;
-def fixit : Flag<"-fixit">,
+def fixit : Flag<["-"], "fixit">,
HelpText<"Apply fix-it advice to the input source">;
-def fixit_EQ : Joined<"-fixit=">,
+def fixit_EQ : Joined<["-"], "fixit=">,
HelpText<"Apply fix-it advice creating a file with the given suffix">;
-def print_preamble : Flag<"-print-preamble">,
+def print_preamble : Flag<["-"], "print-preamble">,
HelpText<"Print the \"preamble\" of a file, which is a candidate for implicit"
" precompiled headers.">;
-def emit_html : Flag<"-emit-html">,
+def emit_html : Flag<["-"], "emit-html">,
HelpText<"Output input source as HTML">;
-def ast_print : Flag<"-ast-print">,
+def ast_print : Flag<["-"], "ast-print">,
HelpText<"Build ASTs and then pretty-print them">;
-def ast_list : Flag<"-ast-list">,
+def ast_list : Flag<["-"], "ast-list">,
HelpText<"Build ASTs and print the list of declaration node qualified names">;
-def ast_dump : Flag<"-ast-dump">,
+def ast_dump : Flag<["-"], "ast-dump">,
HelpText<"Build ASTs and then debug dump them">;
-def ast_dump_xml : Flag<"-ast-dump-xml">,
+def ast_dump_xml : Flag<["-"], "ast-dump-xml">,
HelpText<"Build ASTs and then debug dump them in a verbose XML format">;
-def ast_view : Flag<"-ast-view">,
+def ast_view : Flag<["-"], "ast-view">,
HelpText<"Build ASTs and view them with GraphViz">;
-def print_decl_contexts : Flag<"-print-decl-contexts">,
+def print_decl_contexts : Flag<["-"], "print-decl-contexts">,
HelpText<"Print DeclContexts and their Decls">;
-def emit_module : Flag<"-emit-module">,
+def emit_module : Flag<["-"], "emit-module">,
HelpText<"Generate pre-compiled module file from a module map">;
-def emit_pth : Flag<"-emit-pth">,
+def emit_pth : Flag<["-"], "emit-pth">,
HelpText<"Generate pre-tokenized header file">;
-def emit_pch : Flag<"-emit-pch">,
+def emit_pch : Flag<["-"], "emit-pch">,
HelpText<"Generate pre-compiled header file">;
-def emit_llvm_bc : Flag<"-emit-llvm-bc">,
+def emit_llvm_bc : Flag<["-"], "emit-llvm-bc">,
HelpText<"Build ASTs then convert to LLVM, emit .bc file">;
-def emit_llvm_only : Flag<"-emit-llvm-only">,
+def emit_llvm_only : Flag<["-"], "emit-llvm-only">,
HelpText<"Build ASTs and convert to LLVM, discarding output">;
-def emit_codegen_only : Flag<"-emit-codegen-only">,
+def emit_codegen_only : Flag<["-"], "emit-codegen-only">,
HelpText<"Generate machine code, but discard output">;
-def emit_obj : Flag<"-emit-obj">,
+def emit_obj : Flag<["-"], "emit-obj">,
HelpText<"Emit native object files">;
-def rewrite_test : Flag<"-rewrite-test">,
+def rewrite_test : Flag<["-"], "rewrite-test">,
HelpText<"Rewriter playground">;
-def rewrite_macros : Flag<"-rewrite-macros">,
+def rewrite_macros : Flag<["-"], "rewrite-macros">,
HelpText<"Expand macros without full preprocessing">;
-def migrate : Flag<"-migrate">,
+def migrate : Flag<["-"], "migrate">,
HelpText<"Migrate source code">;
}
-def mt_migrate_directory : Separate<"-mt-migrate-directory">,
+def mt_migrate_directory : Separate<["-"], "mt-migrate-directory">,
HelpText<"Directory for temporary files produced during ARC or ObjC migration">;
-def arcmt_check : Flag<"-arcmt-check">,
+def arcmt_check : Flag<["-"], "arcmt-check">,
HelpText<"Check for ARC migration issues that need manual handling">;
-def arcmt_modify : Flag<"-arcmt-modify">,
+def arcmt_modify : Flag<["-"], "arcmt-modify">,
HelpText<"Apply modifications to files to conform to ARC">;
-def arcmt_migrate : Flag<"-arcmt-migrate">,
+def arcmt_migrate : Flag<["-"], "arcmt-migrate">,
HelpText<"Apply modifications and produces temporary files that conform to ARC">;
-def relocatable_pch : Flag<"-relocatable-pch">,
+def relocatable_pch : Flag<["-", "--"], "relocatable-pch">,
HelpText<"Whether to build a relocatable precompiled header">;
-def print_stats : Flag<"-print-stats">,
+def print_stats : Flag<["-"], "print-stats">,
HelpText<"Print performance metrics and statistics">;
-def fdump_record_layouts : Flag<"-fdump-record-layouts">,
+def fdump_record_layouts : Flag<["-"], "fdump-record-layouts">,
HelpText<"Dump record layout information">;
-def fdump_record_layouts_simple : Flag<"-fdump-record-layouts-simple">,
+def fdump_record_layouts_simple : Flag<["-"], "fdump-record-layouts-simple">,
HelpText<"Dump record layout information in a simple form used for testing">;
-def fix_what_you_can : Flag<"-fix-what-you-can">,
+def fix_what_you_can : Flag<["-"], "fix-what-you-can">,
HelpText<"Apply fix-it advice even in the presence of unfixable errors">;
-def fix_only_warnings : Flag<"-fix-only-warnings">,
+def fix_only_warnings : Flag<["-"], "fix-only-warnings">,
HelpText<"Apply fix-it advice only for warnings, not errors">;
-def fixit_recompile : Flag<"-fixit-recompile">,
+def fixit_recompile : Flag<["-"], "fixit-recompile">,
HelpText<"Apply fix-it changes and recompile">;
-def fixit_to_temp : Flag<"-fixit-to-temporary">,
+def fixit_to_temp : Flag<["-"], "fixit-to-temporary">,
HelpText<"Apply fix-it changes to temporary files">;
-def foverride_record_layout_EQ : Joined<"-foverride-record-layout=">,
+def foverride_record_layout_EQ : Joined<["-"], "foverride-record-layout=">,
HelpText<"Override record layouts with those in the given file">;
//===----------------------------------------------------------------------===//
// Language Options
//===----------------------------------------------------------------------===//
-def fblocks_runtime_optional : Flag<"-fblocks-runtime-optional">,
+def fblocks_runtime_optional : Flag<["-"], "fblocks-runtime-optional">,
HelpText<"Weakly link in the blocks runtime">;
-def fsjlj_exceptions : Flag<"-fsjlj-exceptions">,
+def fsjlj_exceptions : Flag<["-"], "fsjlj-exceptions">,
HelpText<"Use SjLj style exceptions">;
-def fhidden_weak_vtables : Flag<"-fhidden-weak-vtables">,
+def fhidden_weak_vtables : Flag<["-"], "fhidden-weak-vtables">,
HelpText<"Generate weak vtables and RTTI with hidden visibility">;
-def main_file_name : Separate<"-main-file-name">,
+def main_file_name : Separate<["-"], "main-file-name">,
HelpText<"Main file name to use for debug info">;
-def fno_signed_char : Flag<"-fno-signed-char">,
+def fno_signed_char : Flag<["-"], "fno-signed-char">,
HelpText<"Char is unsigned">;
-def fconstant_string_class : Separate<"-fconstant-string-class">,
+def fno_wchar : Flag<["-"], "fno-wchar">,
+ HelpText<"Disable C++ builtin type wchar_t">;
+def fconstant_string_class : Separate<["-"], "fconstant-string-class">,
MetaVarName<"<class name>">,
HelpText<"Specify the class to use for constant Objective-C string objects.">;
-def fobjc_arc_cxxlib_EQ : Joined<"-fobjc-arc-cxxlib=">,
+def fobjc_arc_cxxlib_EQ : Joined<["-"], "fobjc-arc-cxxlib=">,
HelpText<"Objective-C++ Automatic Reference Counting standard library kind">;
-def fobjc_runtime_has_weak : Flag<"-fobjc-runtime-has-weak">,
+def fobjc_runtime_has_weak : Flag<["-"], "fobjc-runtime-has-weak">,
HelpText<"The target Objective-C runtime supports ARC weak operations">;
-def fobjc_dispatch_method_EQ : Joined<"-fobjc-dispatch-method=">,
+def fobjc_dispatch_method_EQ : Joined<["-"], "fobjc-dispatch-method=">,
HelpText<"Objective-C dispatch method to use">;
-def fobjc_default_synthesize_properties : Flag<"-fobjc-default-synthesize-properties">,
+def fobjc_default_synthesize_properties : Flag<["-"], "fobjc-default-synthesize-properties">,
HelpText<"enable the default synthesis of Objective-C properties">;
-def pic_level : Separate<"-pic-level">,
+def fencode_extended_block_signature : Flag<["-"], "fencode-extended-block-signature">,
+ HelpText<"enable extended encoding of block type signature">;
+def pic_level : Separate<["-"], "pic-level">,
HelpText<"Value for __PIC__">;
-def pie_level : Separate<"-pie-level">,
+def pie_level : Separate<["-"], "pie-level">,
HelpText<"Value for __PIE__">;
-def fno_validate_pch : Flag<"-fno-validate-pch">,
+def fno_validate_pch : Flag<["-"], "fno-validate-pch">,
HelpText<"Disable validation of precompiled headers">;
-def dump_deserialized_pch_decls : Flag<"-dump-deserialized-decls">,
+def dump_deserialized_pch_decls : Flag<["-"], "dump-deserialized-decls">,
HelpText<"Dump declarations that are deserialized from PCH, for testing">;
-def error_on_deserialized_pch_decl : Separate<"-error-on-deserialized-decl">,
+def error_on_deserialized_pch_decl : Separate<["-"], "error-on-deserialized-decl">,
HelpText<"Emit error if a specific declaration is deserialized from PCH, for testing">;
-def error_on_deserialized_pch_decl_EQ : Joined<"-error-on-deserialized-decl=">,
+def error_on_deserialized_pch_decl_EQ : Joined<["-"], "error-on-deserialized-decl=">,
Alias<error_on_deserialized_pch_decl>;
-def static_define : Flag<"-static-define">,
+def static_define : Flag<["-"], "static-define">,
HelpText<"Should __STATIC__ be defined">;
-def stack_protector : Separate<"-stack-protector">,
+def stack_protector : Separate<["-"], "stack-protector">,
HelpText<"Enable stack protectors">;
-def fvisibility : Separate<"-fvisibility">,
+def stack_protector_buffer_size : Separate<["-"], "stack-protector-buffer-size">,
+ HelpText<"Lower bound for a buffer to be considered for stack protection">;
+def fvisibility : Separate<["-"], "fvisibility">,
HelpText<"Default symbol visibility">;
-def ftemplate_depth : Separate<"-ftemplate-depth">,
+def ftemplate_depth : Separate<["-"], "ftemplate-depth">,
HelpText<"Maximum depth of recursive template instantiation">;
-def fconstexpr_depth : Separate<"-fconstexpr-depth">,
+def fconstexpr_depth : Separate<["-"], "fconstexpr-depth">,
HelpText<"Maximum depth of recursive constexpr function calls">;
-def fconst_strings : Flag<"-fconst-strings">,
+def fconst_strings : Flag<["-"], "fconst-strings">,
HelpText<"Use a const qualified type for string literals in C and ObjC">;
-def fno_const_strings : Flag<"-fno-const-strings">,
+def fno_const_strings : Flag<["-"], "fno-const-strings">,
HelpText<"Don't use a const qualified type for string literals in C and ObjC">;
-def fno_bitfield_type_align : Flag<"-fno-bitfield-type-align">,
+def fno_bitfield_type_align : Flag<["-"], "fno-bitfield-type-align">,
HelpText<"Ignore bit-field types when aligning structures">;
-def ffake_address_space_map : Flag<"-ffake-address-space-map">,
+def ffake_address_space_map : Flag<["-"], "ffake-address-space-map">,
HelpText<"Use a fake address space map; OpenCL testing purposes only">;
-def funknown_anytype : Flag<"-funknown-anytype">,
+def funknown_anytype : Flag<["-"], "funknown-anytype">,
HelpText<"Enable parser support for the __unknown_anytype type; for testing purposes only">;
-def fdebugger_support : Flag<"-fdebugger-support">,
+def fdebugger_support : Flag<["-"], "fdebugger-support">,
HelpText<"Enable special debugger support behavior">;
-def fdebugger_cast_result_to_id : Flag<"-fdebugger-cast-result-to-id">,
+def fdebugger_cast_result_to_id : Flag<["-"], "fdebugger-cast-result-to-id">,
HelpText<"Enable casting unknown expression results to id">;
-def fdebugger_objc_literal : Flag<"-fdebugger-objc-literal">,
+def fdebugger_objc_literal : Flag<["-"], "fdebugger-objc-literal">,
HelpText<"Enable special debugger support for Objective-C subscripting and literals">;
-def fdeprecated_macro : Flag<"-fdeprecated-macro">,
+def fdeprecated_macro : Flag<["-"], "fdeprecated-macro">,
HelpText<"Defines the __DEPRECATED macro">;
-def fno_deprecated_macro : Flag<"-fno-deprecated-macro">,
+def fno_deprecated_macro : Flag<["-"], "fno-deprecated-macro">,
HelpText<"Undefines the __DEPRECATED macro">;
//===----------------------------------------------------------------------===//
// Header Search Options
//===----------------------------------------------------------------------===//
-def nostdsysteminc : Flag<"-nostdsysteminc">,
+def nostdsysteminc : Flag<["-"], "nostdsysteminc">,
HelpText<"Disable standard system #include directories">;
-def fmodule_name : Joined<"-fmodule-name=">,
+def fmodule_name : Joined<["-"], "fmodule-name=">,
MetaVarName<"<name>">,
HelpText<"Specify the name of the module to build">;
-def fdisable_module_hash : Flag<"-fdisable-module-hash">,
+def fdisable_module_hash : Flag<["-"], "fdisable-module-hash">,
HelpText<"Disable the module hash">;
-def c_isystem : JoinedOrSeparate<"-c-isystem">, MetaVarName<"<directory>">,
+def c_isystem : JoinedOrSeparate<["-"], "c-isystem">, MetaVarName<"<directory>">,
HelpText<"Add directory to the C SYSTEM include search path">;
-def objc_isystem : JoinedOrSeparate<"-objc-isystem">,
+def objc_isystem : JoinedOrSeparate<["-"], "objc-isystem">,
MetaVarName<"<directory>">,
HelpText<"Add directory to the ObjC SYSTEM include search path">;
-def objcxx_isystem : JoinedOrSeparate<"-objcxx-isystem">,
+def objcxx_isystem : JoinedOrSeparate<["-"], "objcxx-isystem">,
MetaVarName<"<directory>">,
HelpText<"Add directory to the ObjC++ SYSTEM include search path">;
-def internal_isystem : JoinedOrSeparate<"-internal-isystem">,
+def internal_isystem : JoinedOrSeparate<["-"], "internal-isystem">,
MetaVarName<"<directory>">,
HelpText<"Add directory to the internal system include search path; these "
"are assumed to not be user-provided and are used to model system "
"and standard headers' paths.">;
-def internal_externc_isystem : JoinedOrSeparate<"-internal-externc-isystem">,
+def internal_externc_isystem : JoinedOrSeparate<["-"], "internal-externc-isystem">,
MetaVarName<"<directory>">,
HelpText<"Add directory to the internal system include search path with "
"implicit extern \"C\" semantics; these are assumed to not be "
"user-provided and are used to model system and standard headers' "
"paths.">;
-def isystem_prefix : JoinedOrSeparate<"-isystem-prefix">,
+def isystem_prefix : JoinedOrSeparate<["-"], "isystem-prefix">,
MetaVarName<"<prefix>">,
HelpText<"Treat all #include paths starting with <prefix> as including a "
"system header.">;
-def ino_system_prefix : JoinedOrSeparate<"-ino-system-prefix">,
+def ino_system_prefix : JoinedOrSeparate<["-"], "ino-system-prefix">,
MetaVarName<"<prefix>">,
HelpText<"Treat all #include paths starting with <prefix> as not including a "
"system header.">;
@@ -485,42 +494,42 @@ def ino_system_prefix : JoinedOrSeparate<"-ino-system-prefix">,
// Preprocessor Options
//===----------------------------------------------------------------------===//
-def include_pth : Separate<"-include-pth">, MetaVarName<"<file>">,
+def include_pth : Separate<["-"], "include-pth">, MetaVarName<"<file>">,
HelpText<"Include file before parsing">;
-def chain_include : Separate<"-chain-include">, MetaVarName<"<file>">,
+def chain_include : Separate<["-"], "chain-include">, MetaVarName<"<file>">,
HelpText<"Include and chain a header file after turning it into PCH">;
-def preamble_bytes_EQ : Joined<"-preamble-bytes=">,
+def preamble_bytes_EQ : Joined<["-"], "preamble-bytes=">,
HelpText<"Assume that the precompiled header is a precompiled preamble "
"covering the first N bytes of the main file">;
-def token_cache : Separate<"-token-cache">, MetaVarName<"<path>">,
+def token_cache : Separate<["-"], "token-cache">, MetaVarName<"<path>">,
HelpText<"Use specified token cache file">;
-def detailed_preprocessing_record : Flag<"-detailed-preprocessing-record">,
+def detailed_preprocessing_record : Flag<["-"], "detailed-preprocessing-record">,
HelpText<"include a detailed record of preprocessing actions">;
//===----------------------------------------------------------------------===//
// OpenCL Options
//===----------------------------------------------------------------------===//
-def cl_opt_disable : Flag<"-cl-opt-disable">,
+def cl_opt_disable : Flag<["-"], "cl-opt-disable">,
HelpText<"OpenCL only. This option disables all optimizations. The default is optimizations are enabled.">;
-def cl_single_precision_constant : Flag<"-cl-single-precision-constant">,
+def cl_single_precision_constant : Flag<["-"], "cl-single-precision-constant">,
HelpText<"OpenCL only. Treat double precision floating-point constant as single precision constant.">;
-def cl_finite_math_only : Flag<"-cl-finite-math-only">,
+def cl_finite_math_only : Flag<["-"], "cl-finite-math-only">,
HelpText<"OpenCL only. Allow floating-point optimizations that assume arguments and results are not NaNs or +-Inf.">;
-def cl_unsafe_math_optimizations : Flag<"-cl-unsafe-math-optimizations">,
+def cl_unsafe_math_optimizations : Flag<["-"], "cl-unsafe-math-optimizations">,
HelpText<"OpenCL only. Allow unsafe floating-point optimizations. Also implies -cl-no-signed-zeros and -cl-mad-enable">;
-def cl_fast_relaxed_math : Flag<"-cl-fast-relaxed-math">,
+def cl_fast_relaxed_math : Flag<["-"], "cl-fast-relaxed-math">,
HelpText<"OpenCL only. Sets -cl-finite-math-only and -cl-unsafe-math-optimizations, and defines __FAST_RELAXED_MATH__">;
-def cl_mad_enable : Flag<"-cl-mad-enable">,
+def cl_mad_enable : Flag<["-"], "cl-mad-enable">,
HelpText<"OpenCL only. Enable less precise MAD instructions to be generated.">;
-def cl_std_EQ : Joined<"-cl-std=">,
+def cl_std_EQ : Joined<["-"], "cl-std=">,
HelpText<"OpenCL language standard to compile for">;
//===----------------------------------------------------------------------===//
// CUDA Options
//===----------------------------------------------------------------------===//
-def fcuda_is_device : Flag<"-fcuda-is-device">,
+def fcuda_is_device : Flag<["-"], "fcuda-is-device">,
HelpText<"Generate code for CUDA device">;
} // let Flags = [CC1Option]
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h b/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h
index 7a10d56..5f63aa7 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h
@@ -98,8 +98,7 @@ public:
StringRef getSysRoot() const;
/// getArgsForToolChain - Return the derived argument list for the
- /// tool chain \arg TC (or the default tool chain, if TC is not
- /// specified).
+ /// tool chain \p TC (or the default tool chain, if TC is not specified).
///
/// \param BoundArch - The bound architecture name, or 0.
const DerivedArgList &getArgsForToolChain(const ToolChain *TC,
@@ -142,6 +141,14 @@ public:
void PrintJob(raw_ostream &OS, const Job &J,
const char *Terminator, bool Quote) const;
+ /// PrintDiagnosticJob - Print one job in -### format, but with the
+ /// superfluous options removed, which are not necessary for
+ /// reproducing the crash.
+ ///
+ /// \param OS - The stream to print on.
+ /// \param J - The job to print.
+ void PrintDiagnosticJob(raw_ostream &OS, const Job &J) const;
+
/// ExecuteCommand - Execute an actual command.
///
/// \param FailingCommand - For non-zero results, this will be set to the
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Driver.h b/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
index 6095055..b752ce6 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
@@ -146,28 +146,11 @@ private:
/// jobs.
unsigned CheckInputsExist : 1;
- /// Use the clang compiler where possible.
- unsigned CCCUseClang : 1;
-
- /// Use clang for handling C++ and Objective-C++ inputs.
- unsigned CCCUseClangCXX : 1;
-
- /// Use clang as a preprocessor (clang's preprocessor will still be
- /// used where an integrated CPP would).
- unsigned CCCUseClangCPP : 1;
-
- /// \brief Force use of clang frontend.
- unsigned ForcedClangUse : 1;
-
public:
/// Use lazy precompiled headers for PCH support.
unsigned CCCUsePCH : 1;
private:
- /// Only use clang for the given architectures (only used when
- /// non-empty).
- std::set<llvm::Triple::ArchType> CCCClangArchs;
-
/// Certain options suppress the 'no input files' warning.
bool SuppressMissingInputWarning : 1;
@@ -232,9 +215,6 @@ public:
InstalledDir = Value;
}
- bool shouldForceClangUse() const { return ForcedClangUse; }
- void setForcedClangUse(bool V = true) { ForcedClangUse = V; }
-
/// @}
/// @name Primary Functionality
/// @{
@@ -287,7 +267,7 @@ public:
/// BuildJobs - Bind actions to concrete tools and translate
/// arguments to form the list of jobs to run.
///
- /// \arg C - The compilation that is being built.
+ /// \param C - The compilation that is being built.
void BuildJobs(Compilation &C) const;
/// ExecuteCompilation - Execute the compilation according to the command line
@@ -323,26 +303,21 @@ public:
/// PrintVersion - Print the driver version.
void PrintVersion(const Compilation &C, raw_ostream &OS) const;
- /// GetFilePath - Lookup \arg Name in the list of file search paths.
+ /// GetFilePath - Lookup \p Name in the list of file search paths.
///
- /// \arg TC - The tool chain for additional information on
+ /// \param TC - The tool chain for additional information on
/// directories to search.
//
// FIXME: This should be in CompilationInfo.
std::string GetFilePath(const char *Name, const ToolChain &TC) const;
- /// GetProgramPath - Lookup \arg Name in the list of program search
- /// paths.
+ /// GetProgramPath - Lookup \p Name in the list of program search paths.
///
- /// \arg TC - The provided tool chain for additional information on
+ /// \param TC - The provided tool chain for additional information on
/// directories to search.
- ///
- /// \arg WantFile - False when searching for an executable file, otherwise
- /// true. Defaults to false.
//
// FIXME: This should be in CompilationInfo.
- std::string GetProgramPath(const char *Name, const ToolChain &TC,
- bool WantFile = false) const;
+ std::string GetProgramPath(const char *Name, const ToolChain &TC) const;
/// HandleImmediateArgs - Handle any arguments which should be
/// treated before building actions or binding tools.
@@ -352,14 +327,14 @@ public:
bool HandleImmediateArgs(const Compilation &C);
/// ConstructAction - Construct the appropriate action to do for
- /// \arg Phase on the \arg Input, taking in to account arguments
+ /// \p Phase on the \p Input, taking in to account arguments
/// like -fsyntax-only or --analyze.
Action *ConstructPhaseAction(const ArgList &Args, phases::ID Phase,
Action *Input) const;
/// BuildJobsForAction - Construct the jobs to perform for the
- /// action \arg A.
+ /// action \p A.
void BuildJobsForAction(Compilation &C,
const Action *A,
const ToolChain *TC,
@@ -369,7 +344,7 @@ public:
InputInfo &Result) const;
/// GetNamedOutputPath - Return the name to use for the output of
- /// the action \arg JA. The result is appended to the compilation's
+ /// the action \p JA. The result is appended to the compilation's
/// list of temporary or result files, as appropriate.
///
/// \param C - The compilation.
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Job.h b/contrib/llvm/tools/clang/include/clang/Driver/Job.h
index c94886d..84f5ee1 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Job.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Job.h
@@ -39,8 +39,6 @@ public:
/// addCommand - Append a command to the current job, which must be
/// either a piped job or a job list.
void addCommand(Command *C);
-
- static bool classof(const Job *) { return true; }
};
/// Command - An executable path/name and argument vector to
@@ -78,7 +76,6 @@ public:
static bool classof(const Job *J) {
return J->getKind() == CommandClass;
}
- static bool classof(const Command *) { return true; }
};
/// JobList - A sequence of jobs to perform.
@@ -113,7 +110,6 @@ public:
static bool classof(const Job *J) {
return J->getKind() == JobListClass;
}
- static bool classof(const JobList *) { return true; }
};
} // end namespace driver
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/OptParser.td b/contrib/llvm/tools/clang/include/clang/Driver/OptParser.td
index 9e6d5b9..d16a2a7 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/OptParser.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/OptParser.td
@@ -88,6 +88,9 @@ def NoForward : OptionFlag;
// CC1Option - This option should be accepted by clang -cc1.
def CC1Option : OptionFlag;
+// NoDriverOption - This option should not be accepted by the driver.
+def NoDriverOption : OptionFlag;
+
// Define the option group class.
class OptionGroup<string name> {
@@ -99,8 +102,9 @@ class OptionGroup<string name> {
// Define the option class.
-class Option<string name, OptionKind kind> {
+class Option<list<string> prefixes, string name, OptionKind kind> {
string EnumName = ?; // Uses the def name if undefined.
+ list<string> Prefixes = prefixes;
string Name = name;
OptionKind Kind = kind;
// Used by MultiArg option kind.
@@ -114,15 +118,22 @@ class Option<string name, OptionKind kind> {
// Helpers for defining options.
-class Flag<string name> : Option<name, KIND_FLAG>;
-class Joined<string name> : Option<name, KIND_JOINED>;
-class Separate<string name> : Option<name, KIND_SEPARATE>;
-class CommaJoined<string name> : Option<name, KIND_COMMAJOINED>;
-class MultiArg<string name, int numargs> : Option<name, KIND_MULTIARG> {
+class Flag<list<string> prefixes, string name>
+ : Option<prefixes, name, KIND_FLAG>;
+class Joined<list<string> prefixes, string name>
+ : Option<prefixes, name, KIND_JOINED>;
+class Separate<list<string> prefixes, string name>
+ : Option<prefixes, name, KIND_SEPARATE>;
+class CommaJoined<list<string> prefixes, string name>
+ : Option<prefixes, name, KIND_COMMAJOINED>;
+class MultiArg<list<string> prefixes, string name, int numargs>
+ : Option<prefixes, name, KIND_MULTIARG> {
int NumArgs = numargs;
}
-class JoinedOrSeparate<string name> : Option<name, KIND_JOINED_OR_SEPARATE>;
-class JoinedAndSeparate<string name> : Option<name, KIND_JOINED_AND_SEPARATE>;
+class JoinedOrSeparate<list<string> prefixes, string name>
+ : Option<prefixes, name, KIND_JOINED_OR_SEPARATE>;
+class JoinedAndSeparate<list<string> prefixes, string name>
+ : Option<prefixes, name, KIND_JOINED_AND_SEPARATE>;
// Mix-ins for adding optional attributes.
@@ -137,5 +148,5 @@ class MetaVarName<string name> { string MetaVarName = name; }
// FIXME: Have generator validate that these appear in correct position (and
// aren't duplicated).
-def INPUT : Option<"<input>", KIND_INPUT>, Flags<[DriverOption,CC1Option]>;
-def UNKNOWN : Option<"<unknown>", KIND_UNKNOWN>;
+def INPUT : Option<[], "<input>", KIND_INPUT>, Flags<[DriverOption,CC1Option]>;
+def UNKNOWN : Option<[], "<unknown>", KIND_UNKNOWN>;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h b/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
index 27bd119..53d83a0 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
@@ -12,24 +12,10 @@
#include "clang/Basic/LLVM.h"
#include "clang/Driver/OptSpecifier.h"
+#include "llvm/ADT/StringSet.h"
namespace clang {
namespace driver {
-namespace options {
- enum DriverFlag {
- DriverOption = (1 << 0),
- HelpHidden = (1 << 1),
- LinkerInput = (1 << 2),
- NoArgumentUnused = (1 << 3),
- NoForward = (1 << 4),
- RenderAsInput = (1 << 5),
- RenderJoined = (1 << 6),
- RenderSeparate = (1 << 7),
- Unsupported = (1 << 8),
- CC1Option = (1 << 9)
- };
-}
-
class Arg;
class ArgList;
class InputArgList;
@@ -46,9 +32,13 @@ namespace options {
public:
/// \brief Entry for a single option instance in the option data table.
struct Info {
+ /// A null terminated array of prefix strings to apply to name while
+ /// matching.
+ const char *const *Prefixes;
const char *Name;
const char *HelpText;
const char *MetaVar;
+ unsigned ID;
unsigned char Kind;
unsigned char Param;
unsigned short Flags;
@@ -61,19 +51,18 @@ namespace options {
const Info *OptionInfos;
unsigned NumOptionInfos;
- /// \brief The lazily constructed options table, indexed by option::ID - 1.
- mutable Option **Options;
-
- /// \brief Prebound input option instance.
- const Option *TheInputOption;
-
- /// \brief Prebound unknown option instance.
- const Option *TheUnknownOption;
+ unsigned TheInputOptionID;
+ unsigned TheUnknownOptionID;
/// The index of the first option which can be parsed (i.e., is not a
/// special option like 'input' or 'unknown', and is not an option group).
unsigned FirstSearchableIndex;
+ /// The union of all option prefixes. If an argument does not begin with
+ /// one of these, it is an input.
+ llvm::StringSet<> PrefixesUnion;
+ std::string PrefixChars;
+
private:
const Info &getInfo(OptSpecifier Opt) const {
unsigned id = Opt.getID();
@@ -81,8 +70,6 @@ namespace options {
return OptionInfos[id - 1];
}
- Option *CreateOption(unsigned id) const;
-
protected:
OptTable(const Info *_OptionInfos, unsigned _NumOptionInfos);
public:
@@ -95,17 +82,7 @@ namespace options {
/// if necessary.
///
/// \return The option, or null for the INVALID option id.
- const Option *getOption(OptSpecifier Opt) const {
- unsigned id = Opt.getID();
- if (id == 0)
- return 0;
-
- assert((unsigned) (id - 1) < getNumOptions() && "Invalid ID.");
- Option *&Entry = Options[id - 1];
- if (!Entry)
- Entry = CreateOption(id);
- return Entry;
- }
+ const Option getOption(OptSpecifier Opt) const;
/// \brief Lookup the name of the given option.
const char *getOptionName(OptSpecifier id) const {
@@ -122,11 +99,6 @@ namespace options {
return getInfo(id).GroupID;
}
- /// \brief Should the help for the given option be hidden by default.
- bool isOptionHelpHidden(OptSpecifier id) const {
- return getInfo(id).Flags & options::HelpHidden;
- }
-
/// \brief Get the help text to use to describe this option.
const char *getOptionHelpText(OptSpecifier id) const {
return getInfo(id).HelpText;
@@ -176,9 +148,12 @@ namespace options {
/// \param OS - The stream to write the help text to.
/// \param Name - The name to use in the usage line.
/// \param Title - The title to use in the usage line.
- /// \param ShowHidden - Whether help-hidden arguments should be shown.
+ /// \param FlagsToInclude - If non-zero, only include options with any
+ /// of these flags set.
+ /// \param FlagsToExclude - Exclude options with any of these flags set.
void PrintHelp(raw_ostream &OS, const char *Name,
- const char *Title, bool ShowHidden = false) const;
+ const char *Title, unsigned short FlagsToInclude = 0,
+ unsigned short FlagsToExclude = 0) const;
};
}
}
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Option.h b/contrib/llvm/tools/clang/include/clang/Driver/Option.h
index e6c4e12..c3db773 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Option.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Option.h
@@ -10,15 +10,36 @@
#ifndef CLANG_DRIVER_OPTION_H_
#define CLANG_DRIVER_OPTION_H_
-#include "clang/Driver/OptSpecifier.h"
+#include "clang/Driver/OptTable.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ErrorHandling.h"
#include "clang/Basic/LLVM.h"
namespace clang {
namespace driver {
class Arg;
class ArgList;
- class OptionGroup;
+
+namespace options {
+ /// Base flags for all options. Custom flags may be added after.
+ enum DriverFlag {
+ HelpHidden = (1 << 0),
+ RenderAsInput = (1 << 1),
+ RenderJoined = (1 << 2),
+ RenderSeparate = (1 << 3)
+ };
+
+ /// Flags specifically for clang options.
+ enum ClangFlags {
+ DriverOption = (1 << 4),
+ LinkerInput = (1 << 5),
+ NoArgumentUnused = (1 << 6),
+ NoForward = (1 << 7),
+ Unsupported = (1 << 8),
+ CC1Option = (1 << 9),
+ NoDriverOption = (1 << 10)
+ };
+}
/// Option - Abstract representation for a single form of driver
/// argument.
@@ -53,100 +74,104 @@ namespace driver {
RenderValuesStyle
};
- private:
- OptionClass Kind;
-
- /// The option ID.
- OptSpecifier ID;
-
- /// The option name.
- StringRef Name;
-
- /// Group this option is a member of, if any.
- const OptionGroup *Group;
-
- /// Option that this is an alias for, if any.
- const Option *Alias;
-
- /// Unsupported options will be rejected.
- bool Unsupported : 1;
-
- /// Treat this option like a linker input?
- bool LinkerInput : 1;
-
- /// When rendering as an input, don't render the option.
+ protected:
+ const OptTable::Info *Info;
+ const OptTable *Owner;
- // FIXME: We should ditch the render/renderAsInput distinction.
- bool NoOptAsInput : 1;
+ public:
+ Option(const OptTable::Info *Info, const OptTable *Owner);
+ ~Option();
- /// The style to using when rendering arguments parsed by this option.
- unsigned RenderStyle : 2;
+ bool isValid() const {
+ return Info != 0;
+ }
- /// This option is only consumed by the driver.
- bool DriverOption : 1;
+ unsigned getID() const {
+ assert(Info && "Must have a valid info!");
+ return Info->ID;
+ }
- /// This option should not report argument unused errors.
- bool NoArgumentUnused : 1;
+ OptionClass getKind() const {
+ assert(Info && "Must have a valid info!");
+ return OptionClass(Info->Kind);
+ }
- /// This option should not be implicitly forwarded.
- bool NoForward : 1;
+ /// \brief Get the name of this option without any prefix.
+ StringRef getName() const {
+ assert(Info && "Must have a valid info!");
+ return Info->Name;
+ }
- /// CC1Option - This option should be accepted by clang -cc1.
- bool CC1Option : 1;
+ const Option getGroup() const {
+ assert(Info && "Must have a valid info!");
+ assert(Owner && "Must have a valid owner!");
+ return Owner->getOption(Info->GroupID);
+ }
- protected:
- Option(OptionClass Kind, OptSpecifier ID, const char *Name,
- const OptionGroup *Group, const Option *Alias);
- public:
- virtual ~Option();
+ const Option getAlias() const {
+ assert(Info && "Must have a valid info!");
+ assert(Owner && "Must have a valid owner!");
+ return Owner->getOption(Info->AliasID);
+ }
- unsigned getID() const { return ID.getID(); }
- OptionClass getKind() const { return Kind; }
- StringRef getName() const { return Name; }
- const OptionGroup *getGroup() const { return Group; }
- const Option *getAlias() const { return Alias; }
+ /// \brief Get the default prefix for this option.
+ StringRef getPrefix() const {
+ const char *Prefix = *Info->Prefixes;
+ return Prefix ? Prefix : StringRef();
+ }
- bool isUnsupported() const { return Unsupported; }
- void setUnsupported(bool Value) { Unsupported = Value; }
+ /// \brief Get the name of this option with the default prefix.
+ std::string getPrefixedName() const {
+ std::string Ret = getPrefix();
+ Ret += getName();
+ return Ret;
+ }
- bool isLinkerInput() const { return LinkerInput; }
- void setLinkerInput(bool Value) { LinkerInput = Value; }
+ unsigned getNumArgs() const { return Info->Param; }
- bool hasNoOptAsInput() const { return NoOptAsInput; }
- void setNoOptAsInput(bool Value) { NoOptAsInput = Value; }
+ bool hasNoOptAsInput() const { return Info->Flags & options::RenderAsInput;}
RenderStyleKind getRenderStyle() const {
- return RenderStyleKind(RenderStyle);
+ if (Info->Flags & options::RenderJoined)
+ return RenderJoinedStyle;
+ if (Info->Flags & options::RenderSeparate)
+ return RenderSeparateStyle;
+ switch (getKind()) {
+ case GroupClass:
+ case InputClass:
+ case UnknownClass:
+ return RenderValuesStyle;
+ case JoinedClass:
+ case JoinedAndSeparateClass:
+ return RenderJoinedStyle;
+ case CommaJoinedClass:
+ return RenderCommaJoinedStyle;
+ case FlagClass:
+ case SeparateClass:
+ case MultiArgClass:
+ case JoinedOrSeparateClass:
+ return RenderSeparateStyle;
+ }
+ llvm_unreachable("Unexpected kind!");
}
- void setRenderStyle(RenderStyleKind Value) { RenderStyle = Value; }
-
- bool isDriverOption() const { return DriverOption; }
- void setDriverOption(bool Value) { DriverOption = Value; }
- bool hasNoArgumentUnused() const { return NoArgumentUnused; }
- void setNoArgumentUnused(bool Value) { NoArgumentUnused = Value; }
-
- bool hasNoForward() const { return NoForward; }
- void setNoForward(bool Value) { NoForward = Value; }
-
- bool isCC1Option() const { return CC1Option; }
- void setIsCC1Option(bool Value) { CC1Option = Value; }
-
- bool hasForwardToGCC() const {
- return !NoForward && !DriverOption && !LinkerInput;
+ /// Test if this option has the flag \a Val.
+ bool hasFlag(unsigned Val) const {
+ return Info->Flags & Val;
}
/// getUnaliasedOption - Return the final option this option
/// aliases (itself, if the option has no alias).
- const Option *getUnaliasedOption() const {
- if (Alias) return Alias->getUnaliasedOption();
- return this;
+ const Option getUnaliasedOption() const {
+ const Option Alias = getAlias();
+ if (Alias.isValid()) return Alias.getUnaliasedOption();
+ return *this;
}
/// getRenderName - Return the name to use when rendering this
/// option.
StringRef getRenderName() const {
- return getUnaliasedOption()->getName();
+ return getUnaliasedOption().getName();
}
/// matches - Predicate for whether this option is part of the
@@ -164,158 +189,13 @@ namespace driver {
/// If the option accepts the current argument, accept() sets
/// Index to the position where argument parsing should resume
/// (even if the argument is missing values).
- virtual Arg *accept(const ArgList &Args, unsigned &Index) const = 0;
+ ///
+ /// \parm ArgSize The number of bytes taken up by the matched Option prefix
+ /// and name. This is used to determine where joined values
+ /// start.
+ Arg *accept(const ArgList &Args, unsigned &Index, unsigned ArgSize) const;
void dump() const;
-
- static bool classof(const Option *) { return true; }
- };
-
- /// OptionGroup - A set of options which are can be handled uniformly
- /// by the driver.
- class OptionGroup : public Option {
- public:
- OptionGroup(OptSpecifier ID, const char *Name, const OptionGroup *Group);
-
- virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
-
- static bool classof(const Option *O) {
- return O->getKind() == Option::GroupClass;
- }
- static bool classof(const OptionGroup *) { return true; }
- };
-
- // Dummy option classes.
-
- /// InputOption - Dummy option class for representing driver inputs.
- class InputOption : public Option {
- public:
- InputOption(OptSpecifier ID);
-
- virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
-
- static bool classof(const Option *O) {
- return O->getKind() == Option::InputClass;
- }
- static bool classof(const InputOption *) { return true; }
- };
-
- /// UnknownOption - Dummy option class for represent unknown arguments.
- class UnknownOption : public Option {
- public:
- UnknownOption(OptSpecifier ID);
-
- virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
-
- static bool classof(const Option *O) {
- return O->getKind() == Option::UnknownClass;
- }
- static bool classof(const UnknownOption *) { return true; }
- };
-
- // Normal options.
-
- class FlagOption : public Option {
- public:
- FlagOption(OptSpecifier ID, const char *Name, const OptionGroup *Group,
- const Option *Alias);
-
- virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
-
- static bool classof(const Option *O) {
- return O->getKind() == Option::FlagClass;
- }
- static bool classof(const FlagOption *) { return true; }
- };
-
- class JoinedOption : public Option {
- public:
- JoinedOption(OptSpecifier ID, const char *Name, const OptionGroup *Group,
- const Option *Alias);
-
- virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
-
- static bool classof(const Option *O) {
- return O->getKind() == Option::JoinedClass;
- }
- static bool classof(const JoinedOption *) { return true; }
- };
-
- class SeparateOption : public Option {
- public:
- SeparateOption(OptSpecifier ID, const char *Name,
- const OptionGroup *Group, const Option *Alias);
-
- virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
-
- static bool classof(const Option *O) {
- return O->getKind() == Option::SeparateClass;
- }
- static bool classof(const SeparateOption *) { return true; }
- };
-
- class CommaJoinedOption : public Option {
- public:
- CommaJoinedOption(OptSpecifier ID, const char *Name,
- const OptionGroup *Group, const Option *Alias);
-
- virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
-
- static bool classof(const Option *O) {
- return O->getKind() == Option::CommaJoinedClass;
- }
- static bool classof(const CommaJoinedOption *) { return true; }
- };
-
- // FIXME: Fold MultiArgOption into SeparateOption?
-
- /// MultiArgOption - An option which takes multiple arguments (these
- /// are always separate arguments).
- class MultiArgOption : public Option {
- unsigned NumArgs;
-
- public:
- MultiArgOption(OptSpecifier ID, const char *Name, const OptionGroup *Group,
- const Option *Alias, unsigned NumArgs);
-
- unsigned getNumArgs() const { return NumArgs; }
-
- virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
-
- static bool classof(const Option *O) {
- return O->getKind() == Option::MultiArgClass;
- }
- static bool classof(const MultiArgOption *) { return true; }
- };
-
- /// JoinedOrSeparateOption - An option which either literally
- /// prefixes its (non-empty) value, or is follwed by a value.
- class JoinedOrSeparateOption : public Option {
- public:
- JoinedOrSeparateOption(OptSpecifier ID, const char *Name,
- const OptionGroup *Group, const Option *Alias);
-
- virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
-
- static bool classof(const Option *O) {
- return O->getKind() == Option::JoinedOrSeparateClass;
- }
- static bool classof(const JoinedOrSeparateOption *) { return true; }
- };
-
- /// JoinedAndSeparateOption - An option which literally prefixes its
- /// value and is followed by another value.
- class JoinedAndSeparateOption : public Option {
- public:
- JoinedAndSeparateOption(OptSpecifier ID, const char *Name,
- const OptionGroup *Group, const Option *Alias);
-
- virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
-
- static bool classof(const Option *O) {
- return O->getKind() == Option::JoinedAndSeparateClass;
- }
- static bool classof(const JoinedAndSeparateOption *) { return true; }
};
} // end namespace driver
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Options.h b/contrib/llvm/tools/clang/include/clang/Driver/Options.h
index ac312cd..6c114e2 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Options.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Options.h
@@ -17,11 +17,13 @@ namespace driver {
namespace options {
enum ID {
OPT_INVALID = 0, // This is not an option ID.
-#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
+#define PREFIX(NAME, VALUE)
+#define OPTION(PREFIX, NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
HELPTEXT, METAVAR) OPT_##ID,
#include "clang/Driver/Options.inc"
LastOption
#undef OPTION
+#undef PREFIX
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Options.td b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
index dee0dfb..ca4f6d5 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Options.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
@@ -1,4 +1,4 @@
-//===--- DriverOptions.td - Options for clang -----------------------------===//
+//===--- Options.td - Options for clang -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -41,6 +41,8 @@ def m_x86_Features_Group : OptionGroup<"<m x86 features group>">, Group<m_Group
def m_hexagon_Features_Group : OptionGroup<"<m hexagon features group>">, Group<m_Group>;
def opencl_Group : OptionGroup<"<opencl group>">;
def u_Group : OptionGroup<"<u group>">;
+def mips_CPUs_Group : OptionGroup<"<MIPS CPU aliases group>">,
+ Group<CompileOnly_Group>;
def pedantic_Group : OptionGroup<"<pedantic group>">,
Group<CompileOnly_Group>;
@@ -82,1094 +84,1073 @@ def ccc_debug_Group : OptionGroup<"<clang debug/development internal options>">,
Group<ccc_Group>, HelpText<"DEBUG/DEVELOPMENT OPTIONS">;
class CCCDriverOpt : Group<ccc_driver_Group>, Flags<[DriverOption, HelpHidden]>;
-def ccc_cxx : Flag<"-ccc-cxx">, CCCDriverOpt,
+def ccc_cxx : Flag<["-"], "ccc-cxx">, CCCDriverOpt,
HelpText<"Act as a C++ driver">;
-def ccc_echo : Flag<"-ccc-echo">, CCCDriverOpt,
+def ccc_echo : Flag<["-"], "ccc-echo">, CCCDriverOpt,
HelpText<"Echo commands before running them">;
-def ccc_gcc_name : Separate<"-ccc-gcc-name">, CCCDriverOpt,
+def ccc_gcc_name : Separate<["-"], "ccc-gcc-name">, CCCDriverOpt,
HelpText<"Name for native GCC compiler">,
MetaVarName<"<gcc-path>">;
-def ccc_clang_cxx : Flag<"-ccc-clang-cxx">, CCCDriverOpt,
- HelpText<"Enable the clang compiler for C++">;
-def ccc_no_clang_cxx : Flag<"-ccc-no-clang-cxx">, CCCDriverOpt,
- HelpText<"Disable the clang compiler for C++">;
-def ccc_no_clang : Flag<"-ccc-no-clang">, CCCDriverOpt,
- HelpText<"Disable the clang compiler">;
-def ccc_no_clang_cpp : Flag<"-ccc-no-clang-cpp">, CCCDriverOpt,
- HelpText<"Disable the clang preprocessor">;
-def ccc_clang_archs : Separate<"-ccc-clang-archs">, CCCDriverOpt,
+def ccc_clang_archs : Separate<["-"], "ccc-clang-archs">, CCCDriverOpt,
HelpText<"Comma separate list of architectures to use the clang compiler for">,
MetaVarName<"<arch-list>">;
-def ccc_pch_is_pch : Flag<"-ccc-pch-is-pch">, CCCDriverOpt,
+def ccc_pch_is_pch : Flag<["-"], "ccc-pch-is-pch">, CCCDriverOpt,
HelpText<"Use lazy PCH for precompiled headers">;
-def ccc_pch_is_pth : Flag<"-ccc-pch-is-pth">, CCCDriverOpt,
+def ccc_pch_is_pth : Flag<["-"], "ccc-pch-is-pth">, CCCDriverOpt,
HelpText<"Use pretokenized headers for precompiled headers">;
class CCCDebugOpt : Group<ccc_debug_Group>, Flags<[DriverOption, HelpHidden]>;
-def ccc_install_dir : Separate<"-ccc-install-dir">, CCCDebugOpt,
+def ccc_install_dir : Separate<["-"], "ccc-install-dir">, CCCDebugOpt,
HelpText<"Simulate installation in the given directory">;
-def ccc_print_options : Flag<"-ccc-print-options">, CCCDebugOpt,
+def ccc_print_options : Flag<["-"], "ccc-print-options">, CCCDebugOpt,
HelpText<"Dump parsed command line arguments">;
-def ccc_print_phases : Flag<"-ccc-print-phases">, CCCDebugOpt,
+def ccc_print_phases : Flag<["-"], "ccc-print-phases">, CCCDebugOpt,
HelpText<"Dump list of actions to perform">;
-def ccc_print_bindings : Flag<"-ccc-print-bindings">, CCCDebugOpt,
+def ccc_print_bindings : Flag<["-"], "ccc-print-bindings">, CCCDebugOpt,
HelpText<"Show bindings of tools to actions">;
-def ccc_arcmt_check : Flag<"-ccc-arcmt-check">, CCCDriverOpt,
+def ccc_arcmt_check : Flag<["-"], "ccc-arcmt-check">, CCCDriverOpt,
HelpText<"Check for ARC migration issues that need manual handling">;
-def ccc_arcmt_modify : Flag<"-ccc-arcmt-modify">, CCCDriverOpt,
+def ccc_arcmt_modify : Flag<["-"], "ccc-arcmt-modify">, CCCDriverOpt,
HelpText<"Apply modifications to files to conform to ARC">;
-def ccc_arrmt_check : Flag<"-ccc-arrmt-check">, Alias<ccc_arcmt_check>;
-def ccc_arrmt_modify : Flag<"-ccc-arrmt-modify">, Alias<ccc_arcmt_modify>;
-def ccc_arcmt_migrate : Separate<"-ccc-arcmt-migrate">, CCCDriverOpt,
+def ccc_arrmt_check : Flag<["-"], "ccc-arrmt-check">, Alias<ccc_arcmt_check>;
+def ccc_arrmt_modify : Flag<["-"], "ccc-arrmt-modify">, Alias<ccc_arcmt_modify>;
+def ccc_arcmt_migrate : Separate<["-"], "ccc-arcmt-migrate">, CCCDriverOpt,
HelpText<"Apply modifications and produces temporary files that conform to ARC">;
-def arcmt_migrate_report_output : Separate<"-arcmt-migrate-report-output">,
+def arcmt_migrate_report_output : Separate<["-"], "arcmt-migrate-report-output">,
HelpText<"Output path for the plist report">, Flags<[CC1Option]>;
-def arcmt_migrate_emit_arc_errors : Flag<"-arcmt-migrate-emit-errors">,
+def arcmt_migrate_emit_arc_errors : Flag<["-"], "arcmt-migrate-emit-errors">,
HelpText<"Emit ARC errors even if the migrator can fix them">,
Flags<[CC1Option]>;
-def _migrate : Flag<"--migrate">, Flags<[DriverOption]>,
+def _migrate : Flag<["--"], "migrate">, Flags<[DriverOption]>,
HelpText<"Run the migrator">;
-def ccc_objcmt_migrate : Separate<"-ccc-objcmt-migrate">, CCCDriverOpt,
+def ccc_objcmt_migrate : Separate<["-"], "ccc-objcmt-migrate">, CCCDriverOpt,
HelpText<"Apply modifications and produces temporary files to migrate to "
"modern ObjC syntax">;
-def objcmt_migrate_literals : Flag<"-objcmt-migrate-literals">, Flags<[CC1Option]>,
+def objcmt_migrate_literals : Flag<["-"], "objcmt-migrate-literals">, Flags<[CC1Option]>,
HelpText<"Enable migration to modern ObjC literals">;
-def objcmt_migrate_subscripting : Flag<"-objcmt-migrate-subscripting">, Flags<[CC1Option]>,
+def objcmt_migrate_subscripting : Flag<["-"], "objcmt-migrate-subscripting">, Flags<[CC1Option]>,
HelpText<"Enable migration to modern ObjC subscripting">;
// Make sure all other -ccc- options are rejected.
-def ccc_ : Joined<"-ccc-">, Group<ccc_Group>, Flags<[Unsupported]>;
+def ccc_ : Joined<["-"], "ccc-">, Group<ccc_Group>, Flags<[Unsupported]>;
// Standard Options
-def _HASH_HASH_HASH : Flag<"-###">, Flags<[DriverOption]>,
+def _HASH_HASH_HASH : Flag<["-"], "###">, Flags<[DriverOption]>,
HelpText<"Print the commands to run for this compilation">;
// The '--' option is here for the sake of compatibility with gcc, but is
// being ignored by the driver.
-def _DASH_DASH : Flag<"--">, Flags<[DriverOption]>;
-def A : JoinedOrSeparate<"-A">;
-def B : JoinedOrSeparate<"-B">;
-def CC : Flag<"-CC">, Flags<[CC1Option]>;
-def C : Flag<"-C">, Flags<[CC1Option]>;
-def D : JoinedOrSeparate<"-D">, Group<CompileOnly_Group>, Flags<[CC1Option]>;
-def E : Flag<"-E">, Flags<[DriverOption,CC1Option]>, Group<Action_Group>,
+def _DASH_DASH : Flag<["--"], "">, Flags<[DriverOption]>;
+def A : JoinedOrSeparate<["-"], "A">;
+def B : JoinedOrSeparate<["-"], "B">;
+def CC : Flag<["-"], "CC">, Flags<[CC1Option]>;
+def C : Flag<["-"], "C">, Flags<[CC1Option]>;
+def D : JoinedOrSeparate<["-"], "D">, Group<CompileOnly_Group>, Flags<[CC1Option]>;
+def E : Flag<["-"], "E">, Flags<[DriverOption,CC1Option]>, Group<Action_Group>,
HelpText<"Only run the preprocessor">;
-def F : JoinedOrSeparate<"-F">, Flags<[RenderJoined,CC1Option]>,
+def F : JoinedOrSeparate<["-"], "F">, Flags<[RenderJoined,CC1Option]>,
HelpText<"Add directory to framework include search path">;
-def G : Separate<"-G">, Flags<[DriverOption]>;
-def H : Flag<"-H">, Flags<[CC1Option]>,
+def G : Separate<["-"], "G">, Flags<[DriverOption]>;
+def H : Flag<["-"], "H">, Flags<[CC1Option]>,
HelpText<"Show header includes and nesting depth">;
-def I_ : Flag<"-I-">, Group<I_Group>;
-def I : JoinedOrSeparate<"-I">, Group<I_Group>, Flags<[CC1Option]>,
+def I_ : Flag<["-"], "I-">, Group<I_Group>;
+def I : JoinedOrSeparate<["-"], "I">, Group<I_Group>, Flags<[CC1Option]>,
HelpText<"Add directory to include search path">;
-def L : JoinedOrSeparate<"-L">, Flags<[RenderJoined]>;
-def MD : Flag<"-MD">, Group<M_Group>;
-def MF : JoinedOrSeparate<"-MF">, Group<M_Group>;
-def MG : Flag<"-MG">, Group<M_Group>, Flags<[CC1Option]>,
+def L : JoinedOrSeparate<["-"], "L">, Flags<[RenderJoined]>;
+def MD : Flag<["-"], "MD">, Group<M_Group>;
+def MF : JoinedOrSeparate<["-"], "MF">, Group<M_Group>;
+def MG : Flag<["-"], "MG">, Group<M_Group>, Flags<[CC1Option]>,
HelpText<"Add missing headers to dependency list">;
-def MMD : Flag<"-MMD">, Group<M_Group>;
-def MM : Flag<"-MM">, Group<M_Group>;
-def MP : Flag<"-MP">, Group<M_Group>, Flags<[CC1Option]>,
+def MMD : Flag<["-"], "MMD">, Group<M_Group>;
+def MM : Flag<["-"], "MM">, Group<M_Group>;
+def MP : Flag<["-"], "MP">, Group<M_Group>, Flags<[CC1Option]>,
HelpText<"Create phony target for each dependency (other than main file)">;
-def MQ : JoinedOrSeparate<"-MQ">, Group<M_Group>, Flags<[CC1Option]>,
+def MQ : JoinedOrSeparate<["-"], "MQ">, Group<M_Group>, Flags<[CC1Option]>,
HelpText<"Specify target to quote for dependency">;
-def MT : JoinedOrSeparate<"-MT">, Group<M_Group>, Flags<[CC1Option]>,
+def MT : JoinedOrSeparate<["-"], "MT">, Group<M_Group>, Flags<[CC1Option]>,
HelpText<"Specify target for dependency">;
-def Mach : Flag<"-Mach">;
-def M : Flag<"-M">, Group<M_Group>;
-def O0 : Joined<"-O0">, Group<O_Group>, Flags<[CC1Option]>;
-def O4 : Joined<"-O4">, Group<O_Group>, Flags<[CC1Option]>;
-def ObjCXX : Flag<"-ObjC++">, Flags<[DriverOption]>,
+def Mach : Flag<["-"], "Mach">;
+def M : Flag<["-"], "M">, Group<M_Group>;
+def O0 : Joined<["-"], "O0">, Group<O_Group>, Flags<[CC1Option]>;
+def O4 : Joined<["-"], "O4">, Group<O_Group>, Flags<[CC1Option]>;
+def ObjCXX : Flag<["-"], "ObjC++">, Flags<[DriverOption]>,
HelpText<"Treat source input files as Objective-C++ inputs">;
-def ObjC : Flag<"-ObjC">, Flags<[DriverOption]>,
+def ObjC : Flag<["-"], "ObjC">, Flags<[DriverOption]>,
HelpText<"Treat source input files as Objective-C inputs">;
-def O : Joined<"-O">, Group<O_Group>, Flags<[CC1Option]>;
-def P : Flag<"-P">, Flags<[CC1Option]>,
+def O : Joined<["-"], "O">, Group<O_Group>, Flags<[CC1Option]>;
+def P : Flag<["-"], "P">, Flags<[CC1Option]>,
HelpText<"Disable linemarker output in -E mode">;
-def Qn : Flag<"-Qn">;
-def Qunused_arguments : Flag<"-Qunused-arguments">, Flags<[DriverOption]>,
+def Qn : Flag<["-"], "Qn">;
+def Qunused_arguments : Flag<["-"], "Qunused-arguments">, Flags<[DriverOption]>,
HelpText<"Don't emit warning for unused driver arguments">;
-def Q : Flag<"-Q">;
-def R : Flag<"-R">;
-def S : Flag<"-S">, Flags<[DriverOption,CC1Option]>, Group<Action_Group>,
+def Q : Flag<["-"], "Q">;
+def R : Flag<["-"], "R">;
+def S : Flag<["-"], "S">, Flags<[DriverOption,CC1Option]>, Group<Action_Group>,
HelpText<"Only run preprocess and compilation steps">;
-def Tbss : JoinedOrSeparate<"-Tbss">, Group<T_Group>;
-def Tdata : JoinedOrSeparate<"-Tdata">, Group<T_Group>;
-def Ttext : JoinedOrSeparate<"-Ttext">, Group<T_Group>;
-def T : JoinedOrSeparate<"-T">, Group<T_Group>;
-def U : JoinedOrSeparate<"-U">, Group<CompileOnly_Group>, Flags<[CC1Option]>;
-def V : JoinedOrSeparate<"-V">, Flags<[DriverOption, Unsupported]>;
-def Wa_COMMA : CommaJoined<"-Wa,">,
+def Tbss : JoinedOrSeparate<["-"], "Tbss">, Group<T_Group>;
+def Tdata : JoinedOrSeparate<["-"], "Tdata">, Group<T_Group>;
+def Ttext : JoinedOrSeparate<["-"], "Ttext">, Group<T_Group>;
+def T : JoinedOrSeparate<["-"], "T">, Group<T_Group>;
+def U : JoinedOrSeparate<["-"], "U">, Group<CompileOnly_Group>, Flags<[CC1Option]>;
+def V : JoinedOrSeparate<["-"], "V">, Flags<[DriverOption, Unsupported]>;
+def Wa_COMMA : CommaJoined<["-"], "Wa,">,
HelpText<"Pass the comma separated arguments in <arg> to the assembler">,
MetaVarName<"<arg>">;
-def Wall : Flag<"-Wall">, Group<W_Group>, Flags<[CC1Option]>;
-def Wdeprecated : Flag<"-Wdeprecated">, Group<W_Group>, Flags<[CC1Option]>;
-def Wno_deprecated : Flag<"-Wno-deprecated">, Group<W_Group>, Flags<[CC1Option]>;
-def Wextra : Flag<"-Wextra">, Group<W_Group>, Flags<[CC1Option]>;
-def Wl_COMMA : CommaJoined<"-Wl,">, Flags<[LinkerInput, RenderAsInput]>,
+def Wall : Flag<["-"], "Wall">, Group<W_Group>, Flags<[CC1Option]>;
+def Wdeprecated : Flag<["-"], "Wdeprecated">, Group<W_Group>, Flags<[CC1Option]>;
+def Wno_deprecated : Flag<["-"], "Wno-deprecated">, Group<W_Group>, Flags<[CC1Option]>;
+def Wextra : Flag<["-"], "Wextra">, Group<W_Group>, Flags<[CC1Option]>;
+def Wl_COMMA : CommaJoined<["-"], "Wl,">, Flags<[LinkerInput, RenderAsInput]>,
HelpText<"Pass the comma separated arguments in <arg> to the linker">,
MetaVarName<"<arg>">;
-def Wno_nonportable_cfstrings : Joined<"-Wno-nonportable-cfstrings">, Group<W_Group>,
+def Wno_nonportable_cfstrings : Joined<["-"], "Wno-nonportable-cfstrings">, Group<W_Group>,
Flags<[CC1Option]>;
-def Wnonportable_cfstrings : Joined<"-Wnonportable-cfstrings">, Group<W_Group>,
+def Wnonportable_cfstrings : Joined<["-"], "Wnonportable-cfstrings">, Group<W_Group>,
Flags<[CC1Option]>;
-def Wp_COMMA : CommaJoined<"-Wp,">,
+def Wp_COMMA : CommaJoined<["-"], "Wp,">,
HelpText<"Pass the comma separated arguments in <arg> to the preprocessor">,
MetaVarName<"<arg>">;
-def Wwrite_strings : Flag<"-Wwrite-strings">, Group<W_Group>, Flags<[CC1Option]>;
-def Wno_write_strings : Flag<"-Wno-write-strings">, Group<W_Group>, Flags<[CC1Option]>;
-def W_Joined : Joined<"-W">, Group<W_Group>, Flags<[CC1Option]>;
-def Xanalyzer : Separate<"-Xanalyzer">,
+def Wwrite_strings : Flag<["-"], "Wwrite-strings">, Group<W_Group>, Flags<[CC1Option]>;
+def Wno_write_strings : Flag<["-"], "Wno-write-strings">, Group<W_Group>, Flags<[CC1Option]>;
+def W_Joined : Joined<["-"], "W">, Group<W_Group>, Flags<[CC1Option]>,
+ MetaVarName<"<warning>">, HelpText<"Enable the specified warning">;
+def Xanalyzer : Separate<["-"], "Xanalyzer">,
HelpText<"Pass <arg> to the static analyzer">, MetaVarName<"<arg>">;
-def Xarch__ : JoinedAndSeparate<"-Xarch_">, Flags<[DriverOption]>;
-def Xassembler : Separate<"-Xassembler">,
+def Xarch__ : JoinedAndSeparate<["-"], "Xarch_">, Flags<[DriverOption]>;
+def Xassembler : Separate<["-"], "Xassembler">,
HelpText<"Pass <arg> to the assembler">, MetaVarName<"<arg>">;
-def Xclang : Separate<"-Xclang">,
+def Xclang : Separate<["-"], "Xclang">,
HelpText<"Pass <arg> to the clang compiler">, MetaVarName<"<arg>">,
Flags<[NoForward]>;
-def Xlinker : Separate<"-Xlinker">, Flags<[LinkerInput, RenderAsInput]>,
+def Xlinker : Separate<["-"], "Xlinker">, Flags<[LinkerInput, RenderAsInput]>,
HelpText<"Pass <arg> to the linker">, MetaVarName<"<arg>">;
-def Xpreprocessor : Separate<"-Xpreprocessor">,
+def Xpreprocessor : Separate<["-"], "Xpreprocessor">,
HelpText<"Pass <arg> to the preprocessor">, MetaVarName<"<arg>">;
-def X_Flag : Flag<"-X">;
-def X_Joined : Joined<"-X">;
-def Z_Flag : Flag<"-Z">;
-def Z_Joined : Joined<"-Z">;
-def all__load : Flag<"-all_load">;
-def allowable__client : Separate<"-allowable_client">;
-def ansi : Flag<"-ansi">, Group<a_Group>;
-def arch__errors__fatal : Flag<"-arch_errors_fatal">;
-def arch : Separate<"-arch">, Flags<[DriverOption]>;
-def arch__only : Separate<"-arch_only">;
-def a : Joined<"-a">, Group<a_Group>;
-def bind__at__load : Flag<"-bind_at_load">;
-def bundle__loader : Separate<"-bundle_loader">;
-def bundle : Flag<"-bundle">;
-def b : JoinedOrSeparate<"-b">, Flags<[Unsupported]>;
-def cl_kernel_arg_info : Flag<"-cl-kernel-arg-info">, Flags<[CC1Option]>, Group<opencl_Group>,
+def X_Flag : Flag<["-"], "X">;
+def X_Joined : Joined<["-"], "X">;
+def Z_Flag : Flag<["-"], "Z">;
+def Z_Joined : Joined<["-"], "Z">;
+def all__load : Flag<["-"], "all_load">;
+def allowable__client : Separate<["-"], "allowable_client">;
+def ansi : Flag<["-", "--"], "ansi">, Group<a_Group>;
+def arch__errors__fatal : Flag<["-"], "arch_errors_fatal">;
+def arch : Separate<["-"], "arch">, Flags<[DriverOption]>;
+def arch__only : Separate<["-"], "arch_only">;
+def a : Joined<["-"], "a">, Group<a_Group>;
+def bind__at__load : Flag<["-"], "bind_at_load">;
+def bundle__loader : Separate<["-"], "bundle_loader">;
+def bundle : Flag<["-"], "bundle">;
+def b : JoinedOrSeparate<["-"], "b">, Flags<[Unsupported]>;
+def cl_kernel_arg_info : Flag<["-"], "cl-kernel-arg-info">, Flags<[CC1Option]>, Group<opencl_Group>,
HelpText<"OpenCL only. This option allows the compiler to store information about the arguments of a kernel(s)"> ;
-def client__name : JoinedOrSeparate<"-client_name">;
-def combine : Flag<"-combine">, Flags<[DriverOption, Unsupported]>;
-def compatibility__version : JoinedOrSeparate<"-compatibility_version">;
-def coverage : Flag<"-coverage">;
-def cpp_precomp : Flag<"-cpp-precomp">, Group<clang_ignored_f_Group>;
-def current__version : JoinedOrSeparate<"-current_version">;
-def cxx_isystem : JoinedOrSeparate<"-cxx-isystem">, Group<clang_i_Group>,
+def client__name : JoinedOrSeparate<["-"], "client_name">;
+def combine : Flag<["-", "--"], "combine">, Flags<[DriverOption, Unsupported]>;
+def compatibility__version : JoinedOrSeparate<["-"], "compatibility_version">;
+def coverage : Flag<["-", "--"], "coverage">;
+def cpp_precomp : Flag<["-"], "cpp-precomp">, Group<clang_ignored_f_Group>;
+def current__version : JoinedOrSeparate<["-"], "current_version">;
+def cxx_isystem : JoinedOrSeparate<["-"], "cxx-isystem">, Group<clang_i_Group>,
HelpText<"Add directory to the C++ SYSTEM include search path">, Flags<[CC1Option]>,
MetaVarName<"<directory>">;
-def c : Flag<"-c">, Flags<[DriverOption]>,
+def c : Flag<["-"], "c">, Flags<[DriverOption]>,
HelpText<"Only run preprocess, compile, and assemble steps">;
-def dA : Flag<"-dA">, Group<d_Group>;
-def dD : Flag<"-dD">, Group<d_Group>, Flags<[CC1Option]>,
+def dA : Flag<["-"], "dA">, Group<d_Group>;
+def dD : Flag<["-"], "dD">, Group<d_Group>, Flags<[CC1Option]>,
HelpText<"Print macro definitions in -E mode in addition to normal output">;
-def dM : Flag<"-dM">, Group<d_Group>, Flags<[CC1Option]>,
+def dM : Flag<["-"], "dM">, Group<d_Group>, Flags<[CC1Option]>,
HelpText<"Print macro definitions in -E mode instead of normal output">;
-def dead__strip : Flag<"-dead_strip">;
-def dependency_file : Separate<"-dependency-file">, Flags<[CC1Option]>,
+def dead__strip : Flag<["-"], "dead_strip">;
+def dependency_file : Separate<["-"], "dependency-file">, Flags<[CC1Option]>,
HelpText<"Filename (or -) to write dependency output to">;
-def dependency_dot : Separate<"-dependency-dot">, Flags<[CC1Option]>,
+def dependency_dot : Separate<["-"], "dependency-dot">, Flags<[CC1Option]>,
HelpText<"Filename to write DOT-formatted header dependencies to">;
-def dumpmachine : Flag<"-dumpmachine">;
-def dumpspecs : Flag<"-dumpspecs">, Flags<[Unsupported]>;
-def dumpversion : Flag<"-dumpversion">;
-def dylib__file : Separate<"-dylib_file">;
-def dylinker__install__name : JoinedOrSeparate<"-dylinker_install_name">;
-def dylinker : Flag<"-dylinker">;
-def dynamiclib : Flag<"-dynamiclib">;
-def dynamic : Flag<"-dynamic">, Flags<[NoArgumentUnused]>;
-def d_Flag : Flag<"-d">, Group<d_Group>;
-def d_Joined : Joined<"-d">, Group<d_Group>;
-def emit_ast : Flag<"-emit-ast">,
+def dumpmachine : Flag<["-"], "dumpmachine">;
+def dumpspecs : Flag<["-"], "dumpspecs">, Flags<[Unsupported]>;
+def dumpversion : Flag<["-"], "dumpversion">;
+def dylib__file : Separate<["-"], "dylib_file">;
+def dylinker__install__name : JoinedOrSeparate<["-"], "dylinker_install_name">;
+def dylinker : Flag<["-"], "dylinker">;
+def dynamiclib : Flag<["-"], "dynamiclib">;
+def dynamic : Flag<["-"], "dynamic">, Flags<[NoArgumentUnused]>;
+def d_Flag : Flag<["-"], "d">, Group<d_Group>;
+def d_Joined : Joined<["-"], "d">, Group<d_Group>;
+def emit_ast : Flag<["-"], "emit-ast">,
HelpText<"Emit Clang AST files for source inputs">;
-def emit_llvm : Flag<"-emit-llvm">, Flags<[CC1Option]>, Group<Action_Group>,
+def emit_llvm : Flag<["-"], "emit-llvm">, Flags<[CC1Option]>, Group<Action_Group>,
HelpText<"Use the LLVM representation for assembler and object files">;
-def exported__symbols__list : Separate<"-exported_symbols_list">;
-def e : JoinedOrSeparate<"-e">;
-def fPIC : Flag<"-fPIC">, Group<f_Group>;
-def fno_PIC : Flag<"-fno-PIC">, Group<f_Group>;
-def fPIE : Flag<"-fPIE">, Group<f_Group>;
-def fno_PIE : Flag<"-fno-PIE">, Group<f_Group>;
-def faccess_control : Flag<"-faccess-control">, Group<f_Group>;
-def fallow_unsupported : Flag<"-fallow-unsupported">, Group<f_Group>;
-def faltivec : Flag<"-faltivec">, Group<f_Group>, Flags<[CC1Option]>,
+def exported__symbols__list : Separate<["-"], "exported_symbols_list">;
+def e : JoinedOrSeparate<["-"], "e">;
+def fPIC : Flag<["-"], "fPIC">, Group<f_Group>;
+def fno_PIC : Flag<["-"], "fno-PIC">, Group<f_Group>;
+def fPIE : Flag<["-"], "fPIE">, Group<f_Group>;
+def fno_PIE : Flag<["-"], "fno-PIE">, Group<f_Group>;
+def faccess_control : Flag<["-"], "faccess-control">, Group<f_Group>;
+def fallow_unsupported : Flag<["-"], "fallow-unsupported">, Group<f_Group>;
+def faltivec : Flag<["-"], "faltivec">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable AltiVec vector initializer syntax">;
-def fapple_kext : Flag<"-fapple-kext">, Group<f_Group>, Flags<[CC1Option]>,
+def fapple_kext : Flag<["-"], "fapple-kext">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use Apple's kernel extensions ABI">;
-def fapple_pragma_pack : Flag<"-fapple-pragma-pack">, Group<f_Group>, Flags<[CC1Option]>,
+def fapple_pragma_pack : Flag<["-"], "fapple-pragma-pack">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable Apple gcc-compatible #pragma pack handling">;
-def faddress_sanitizer : Flag<"-faddress-sanitizer">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Enable AddressSanitizer instrumentation (memory error detection)">;
-def fno_address_sanitizer : Flag<"-fno-address-sanitizer">, Group<f_Group>, Flags<[CC1Option]>;
-def fthread_sanitizer : Flag<"-fthread-sanitizer">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Enable ThreadSanitizer instrumentation (race detection)">;
-def fno_thread_sanitizer : Flag<"-fno-thread-sanitizer">, Group<f_Group>, Flags<[CC1Option]>;
-def fasm : Flag<"-fasm">, Group<f_Group>;
+def faddress_sanitizer : Flag<["-"], "faddress-sanitizer">, Group<f_Group>;
+def fno_address_sanitizer : Flag<["-"], "fno-address-sanitizer">, Group<f_Group>;
+def fthread_sanitizer : Flag<["-"], "fthread-sanitizer">, Group<f_Group>;
+def fno_thread_sanitizer : Flag<["-"], "fno-thread-sanitizer">, Group<f_Group>;
+def fasm : Flag<["-"], "fasm">, Group<f_Group>;
-def fasm_blocks : Flag<"-fasm-blocks">, Group<f_Group>;
-def fno_asm_blocks : Flag<"-fno-asm-blocks">, Group<f_Group>;
+def fasm_blocks : Flag<["-"], "fasm-blocks">, Group<f_Group>;
+def fno_asm_blocks : Flag<["-"], "fno-asm-blocks">, Group<f_Group>;
-def fassume_sane_operator_new : Flag<"-fassume-sane-operator-new">, Group<f_Group>;
-def fastcp : Flag<"-fastcp">, Group<f_Group>;
-def fastf : Flag<"-fastf">, Group<f_Group>;
-def fast : Flag<"-fast">, Group<f_Group>;
-def fasynchronous_unwind_tables : Flag<"-fasynchronous-unwind-tables">, Group<f_Group>;
-def fblocks : Flag<"-fblocks">, Group<f_Group>, Flags<[CC1Option]>,
+def fassume_sane_operator_new : Flag<["-"], "fassume-sane-operator-new">, Group<f_Group>;
+def fastcp : Flag<["-"], "fastcp">, Group<f_Group>;
+def fastf : Flag<["-"], "fastf">, Group<f_Group>;
+def fast : Flag<["-"], "fast">, Group<f_Group>;
+def fasynchronous_unwind_tables : Flag<["-"], "fasynchronous-unwind-tables">, Group<f_Group>;
+def fblocks : Flag<["-"], "fblocks">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable the 'blocks' language feature">;
-def fbootclasspath_EQ : Joined<"-fbootclasspath=">, Group<f_Group>;
-def fborland_extensions : Flag<"-fborland-extensions">, Group<f_Group>, Flags<[CC1Option]>,
+def fbootclasspath_EQ : Joined<["-"], "fbootclasspath=">, Group<f_Group>;
+def fborland_extensions : Flag<["-"], "fborland-extensions">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Accept non-standard constructs supported by the Borland compiler">;
-def fbounds_checking : Flag<"-fbounds-checking">, Group<f_Group>,
- HelpText<"Enable run-time bounds checks.">;
-def fbounds_checking_EQ : Joined<"-fbounds-checking=">, Flags<[CC1Option]>,
+def fbounds_checking : Flag<["-"], "fbounds-checking">, Group<f_Group>,
+ HelpText<"Enable run-time bounds checks">;
+def fbounds_checking_EQ : Joined<["-"], "fbounds-checking=">, Flags<[CC1Option]>,
Group<f_Group>;
-def fbuiltin_strcat : Flag<"-fbuiltin-strcat">, Group<f_Group>;
-def fbuiltin_strcpy : Flag<"-fbuiltin-strcpy">, Group<f_Group>;
-def fbuiltin : Flag<"-fbuiltin">, Group<f_Group>;
-def fcaret_diagnostics : Flag<"-fcaret-diagnostics">, Group<f_Group>;
-def fcatch_undefined_behavior : Flag<"-fcatch-undefined-behavior">, Flags<[CC1Option]>,
- Group<f_Group>, HelpText<"Generate runtime checks for undefined behavior.">;
-def fclasspath_EQ : Joined<"-fclasspath=">, Group<f_Group>;
-def fcolor_diagnostics : Flag<"-fcolor-diagnostics">, Group<f_Group>, Flags<[CC1Option]>,
+def fbuiltin_strcat : Flag<["-"], "fbuiltin-strcat">, Group<f_Group>;
+def fbuiltin_strcpy : Flag<["-"], "fbuiltin-strcpy">, Group<f_Group>;
+def fbuiltin : Flag<["-"], "fbuiltin">, Group<f_Group>;
+def fcaret_diagnostics : Flag<["-"], "fcaret-diagnostics">, Group<f_Group>;
+def fcatch_undefined_behavior : Flag<["-"], "fcatch-undefined-behavior">, Group<f_Group>;
+def fclasspath_EQ : Joined<["-"], "fclasspath=">, Group<f_Group>;
+def fcolor_diagnostics : Flag<["-"], "fcolor-diagnostics">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use colors in diagnostics">;
-def fcommon : Flag<"-fcommon">, Group<f_Group>;
-def fcompile_resource_EQ : Joined<"-fcompile-resource=">, Group<f_Group>;
-def fconstant_cfstrings : Flag<"-fconstant-cfstrings">, Group<f_Group>;
-def fconstant_string_class_EQ : Joined<"-fconstant-string-class=">, Group<f_Group>;
-def fconstexpr_depth_EQ : Joined<"-fconstexpr-depth=">, Group<f_Group>;
-def fconstexpr_backtrace_limit_EQ : Joined<"-fconstexpr-backtrace-limit=">,
+def fcommon : Flag<["-"], "fcommon">, Group<f_Group>;
+def fcompile_resource_EQ : Joined<["-"], "fcompile-resource=">, Group<f_Group>;
+def fconstant_cfstrings : Flag<["-"], "fconstant-cfstrings">, Group<f_Group>;
+def fconstant_string_class_EQ : Joined<["-"], "fconstant-string-class=">, Group<f_Group>;
+def fconstexpr_depth_EQ : Joined<["-"], "fconstexpr-depth=">, Group<f_Group>;
+def fconstexpr_backtrace_limit_EQ : Joined<["-"], "fconstexpr-backtrace-limit=">,
Group<f_Group>;
-def fno_crash_diagnostics : Flag<"-fno-crash-diagnostics">, Group<f_clang_Group>, Flags<[NoArgumentUnused]>;
-def fcreate_profile : Flag<"-fcreate-profile">, Group<f_Group>;
-def fcxx_exceptions: Flag<"-fcxx-exceptions">, Group<f_Group>,
+def fno_crash_diagnostics : Flag<["-"], "fno-crash-diagnostics">, Group<f_clang_Group>, Flags<[NoArgumentUnused]>;
+def fcreate_profile : Flag<["-"], "fcreate-profile">, Group<f_Group>;
+def fcxx_exceptions: Flag<["-"], "fcxx-exceptions">, Group<f_Group>,
HelpText<"Enable C++ exceptions">, Flags<[CC1Option]>;
-def fcxx_modules : Flag <"-fcxx-modules">, Group<f_Group>, Flags<[NoForward]>;
-def fdebug_pass_arguments : Flag<"-fdebug-pass-arguments">, Group<f_Group>;
-def fdebug_pass_structure : Flag<"-fdebug-pass-structure">, Group<f_Group>;
-def fdiagnostics_fixit_info : Flag<"-fdiagnostics-fixit-info">, Group<f_clang_Group>;
-def fdiagnostics_parseable_fixits : Flag<"-fdiagnostics-parseable-fixits">, Group<f_clang_Group>,
+def fcxx_modules : Flag <["-"], "fcxx-modules">, Group<f_Group>, Flags<[NoForward]>;
+def fdebug_pass_arguments : Flag<["-"], "fdebug-pass-arguments">, Group<f_Group>;
+def fdebug_pass_structure : Flag<["-"], "fdebug-pass-structure">, Group<f_Group>;
+def fdiagnostics_fixit_info : Flag<["-"], "fdiagnostics-fixit-info">, Group<f_clang_Group>;
+def fdiagnostics_parseable_fixits : Flag<["-"], "fdiagnostics-parseable-fixits">, Group<f_clang_Group>,
Flags<[CC1Option]>, HelpText<"Print fix-its in machine parseable form">;
-def fdiagnostics_print_source_range_info : Flag<"-fdiagnostics-print-source-range-info">,
+def fdiagnostics_print_source_range_info : Flag<["-"], "fdiagnostics-print-source-range-info">,
Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Print source range spans in numeric form">;
-def fdiagnostics_show_option : Flag<"-fdiagnostics-show-option">, Group<f_Group>,
+def fdiagnostics_show_option : Flag<["-"], "fdiagnostics-show-option">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Print option name with mappable diagnostics">;
-def fdiagnostics_show_name : Flag<"-fdiagnostics-show-name">, Group<f_Group>,
+def fdiagnostics_show_name : Flag<["-"], "fdiagnostics-show-name">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Print diagnostic name">;
-def fdiagnostics_show_note_include_stack : Flag<"-fdiagnostics-show-note-include-stack">,
+def fdiagnostics_show_note_include_stack : Flag<["-"], "fdiagnostics-show-note-include-stack">,
Group<f_Group>, Flags<[CC1Option]>, HelpText<"Display include stacks for diagnostic notes">;
-def fdiagnostics_format_EQ : Joined<"-fdiagnostics-format=">, Group<f_clang_Group>;
-def fdiagnostics_show_category_EQ : Joined<"-fdiagnostics-show-category=">, Group<f_clang_Group>;
-def fdiagnostics_show_template_tree : Flag<"-fdiagnostics-show-template-tree">,
+def fdiagnostics_format_EQ : Joined<["-"], "fdiagnostics-format=">, Group<f_clang_Group>;
+def fdiagnostics_show_category_EQ : Joined<["-"], "fdiagnostics-show-category=">, Group<f_clang_Group>;
+def fdiagnostics_show_template_tree : Flag<["-"], "fdiagnostics-show-template-tree">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Print a template comparison tree for differing templates">;
-def fdollars_in_identifiers : Flag<"-fdollars-in-identifiers">, Group<f_Group>,
+def fdollars_in_identifiers : Flag<["-"], "fdollars-in-identifiers">, Group<f_Group>,
HelpText<"Allow '$' in identifiers">, Flags<[CC1Option]>;
-def fdwarf2_cfi_asm : Flag<"-fdwarf2-cfi-asm">, Group<f_Group>;
-def fno_dwarf2_cfi_asm : Flag<"-fno-dwarf2-cfi-asm">, Group<f_Group>, Flags<[CC1Option]>;
-def fdwarf_directory_asm : Flag<"-fdwarf-directory-asm">, Group<f_Group>;
-def fno_dwarf_directory_asm : Flag<"-fno-dwarf-directory-asm">, Group<f_Group>, Flags<[CC1Option]>;
-def felide_constructors : Flag<"-felide-constructors">, Group<f_Group>;
-def fno_elide_type : Flag<"-fno-elide-type">, Group<f_Group>,
+def fdwarf2_cfi_asm : Flag<["-"], "fdwarf2-cfi-asm">, Group<f_Group>;
+def fno_dwarf2_cfi_asm : Flag<["-"], "fno-dwarf2-cfi-asm">, Group<f_Group>, Flags<[CC1Option]>;
+def fdwarf_directory_asm : Flag<["-"], "fdwarf-directory-asm">, Group<f_Group>;
+def fno_dwarf_directory_asm : Flag<["-"], "fno-dwarf-directory-asm">, Group<f_Group>, Flags<[CC1Option]>;
+def felide_constructors : Flag<["-"], "felide-constructors">, Group<f_Group>;
+def fno_elide_type : Flag<["-"], "fno-elide-type">, Group<f_Group>,
Flags<[CC1Option]>,
HelpText<"Do not elide types when printing diagnostics">;
-def feliminate_unused_debug_symbols : Flag<"-feliminate-unused-debug-symbols">, Group<f_Group>;
-def femit_all_decls : Flag<"-femit-all-decls">, Group<f_Group>, Flags<[CC1Option]>,
+def feliminate_unused_debug_symbols : Flag<["-"], "feliminate-unused-debug-symbols">, Group<f_Group>;
+def femit_all_decls : Flag<["-"], "femit-all-decls">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Emit all declarations, even if unused">;
-def fencoding_EQ : Joined<"-fencoding=">, Group<f_Group>;
-def ferror_limit_EQ : Joined<"-ferror-limit=">, Group<f_Group>;
-def fexceptions : Flag<"-fexceptions">, Group<f_Group>, Flags<[CC1Option]>,
+def fencoding_EQ : Joined<["-"], "fencoding=">, Group<f_Group>;
+def ferror_limit_EQ : Joined<["-"], "ferror-limit=">, Group<f_Group>;
+def fexceptions : Flag<["-"], "fexceptions">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable support for exception handling">;
-def fextdirs_EQ : Joined<"-fextdirs=">, Group<f_Group>;
-def fhosted : Flag<"-fhosted">, Group<f_Group>;
-def ffast_math : Flag<"-ffast-math">, Group<f_Group>, Flags<[CC1Option]>,
+def fextdirs_EQ : Joined<["-"], "fextdirs=">, Group<f_Group>;
+def fhosted : Flag<["-"], "fhosted">, Group<f_Group>;
+def ffast_math : Flag<["-"], "ffast-math">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable the *frontend*'s 'fast-math' mode. This has no effect on "
"optimizations, but provides a preprocessor macro __FAST_MATH__ the "
- "same as GCC's -ffast-math flag.">;
-def fmath_errno : Flag<"-fmath-errno">, Group<f_Group>, Flags<[CC1Option]>,
+ "same as GCC's -ffast-math flag">;
+def fno_fast_math : Flag<["-"], "fno-fast-math">, Group<f_Group>;
+def fmath_errno : Flag<["-"], "fmath-errno">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Require math functions to indicate errors by setting errno">;
-def fno_math_errno : Flag<"-fno-math-errno">, Group<f_Group>;
-def fsignaling_math : Flag<"-fsignaling-math">, Group<f_Group>;
-def fno_signaling_math : Flag<"-fno-signaling-math">, Group<f_Group>;
-def funsafe_math_optimizations : Flag<"-funsafe-math-optimizations">,
+def fno_math_errno : Flag<["-"], "fno-math-errno">, Group<f_Group>;
+def fsignaling_math : Flag<["-"], "fsignaling-math">, Group<f_Group>;
+def fno_signaling_math : Flag<["-"], "fno-signaling-math">, Group<f_Group>;
+def fsanitize_EQ : CommaJoined<["-"], "fsanitize=">, Group<f_clang_Group>,
+ Flags<[CC1Option]>, MetaVarName<"<check>">,
+ HelpText<"Enable runtime instrumentation for bug detection: "
+ "address (memory errors) | thread (race detection) | "
+ "undefined (miscellaneous undefined behavior)">;
+def fno_sanitize_EQ : CommaJoined<["-"], "fno-sanitize=">, Group<f_clang_Group>;
+def funsafe_math_optimizations : Flag<["-"], "funsafe-math-optimizations">,
Group<f_Group>;
-def fno_unsafe_math_optimizations : Flag<"-fno-unsafe-math-optimizations">,
+def fno_unsafe_math_optimizations : Flag<["-"], "fno-unsafe-math-optimizations">,
Group<f_Group>;
-def fassociative_math : Flag<"-fassociative-math">, Group<f_Group>;
-def fno_associative_math : Flag<"-fno-associative-math">, Group<f_Group>;
-def freciprocal_math : Flag<"-freciprocal-math">, Group<f_Group>;
-def fno_reciprocal_math : Flag<"-fno-reciprocal-math">, Group<f_Group>;
-def ffinite_math_only : Flag<"-ffinite-math-only">, Group<f_Group>, Flags<[CC1Option]>;
-def fno_finite_math_only : Flag<"-fno-finite-math-only">, Group<f_Group>;
-def fsigned_zeros : Flag<"-fsigned-zeros">, Group<f_Group>;
-def fno_signed_zeros : Flag<"-fno-signed-zeros">, Group<f_Group>;
-def fhonor_nans : Flag<"-fhonor-nans">, Group<f_Group>;
-def fno_honor_nans : Flag<"-fno-honor-nans">, Group<f_Group>;
-def fhonor_infinities : Flag<"-fhonor-infinities">, Group<f_Group>;
-def fno_honor_infinities : Flag<"-fno-honor-infinities">, Group<f_Group>;
+def fassociative_math : Flag<["-"], "fassociative-math">, Group<f_Group>;
+def fno_associative_math : Flag<["-"], "fno-associative-math">, Group<f_Group>;
+def freciprocal_math : Flag<["-"], "freciprocal-math">, Group<f_Group>;
+def fno_reciprocal_math : Flag<["-"], "fno-reciprocal-math">, Group<f_Group>;
+def ffinite_math_only : Flag<["-"], "ffinite-math-only">, Group<f_Group>, Flags<[CC1Option]>;
+def fno_finite_math_only : Flag<["-"], "fno-finite-math-only">, Group<f_Group>;
+def fsigned_zeros : Flag<["-"], "fsigned-zeros">, Group<f_Group>;
+def fno_signed_zeros : Flag<["-"], "fno-signed-zeros">, Group<f_Group>;
+def fhonor_nans : Flag<["-"], "fhonor-nans">, Group<f_Group>;
+def fno_honor_nans : Flag<["-"], "fno-honor-nans">, Group<f_Group>;
+def fhonor_infinities : Flag<["-"], "fhonor-infinities">, Group<f_Group>;
+def fno_honor_infinities : Flag<["-"], "fno-honor-infinities">, Group<f_Group>;
// Sic. This option was misspelled originally.
-def fhonor_infinites : Flag<"-fhonor-infinites">, Alias<fhonor_infinities>;
-def fno_honor_infinites : Flag<"-fno-honor-infinites">, Alias<fno_honor_infinities>;
-def ftrapping_math : Flag<"-ftrapping-math">, Group<f_Group>;
-def fno_trapping_math : Flag<"-fno-trapping-math">, Group<f_Group>;
-def ffp_contract : Joined<"-ffp-contract=">, Group<f_Group>,
+def fhonor_infinites : Flag<["-"], "fhonor-infinites">, Alias<fhonor_infinities>;
+def fno_honor_infinites : Flag<["-"], "fno-honor-infinites">, Alias<fno_honor_infinities>;
+def ftrapping_math : Flag<["-"], "ftrapping-math">, Group<f_Group>;
+def fno_trapping_math : Flag<["-"], "fno-trapping-math">, Group<f_Group>;
+def ffp_contract : Joined<["-"], "ffp-contract=">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Form fused FP ops (e.g. FMAs): fast (everywhere)"
" | on (according to FP_CONTRACT pragma, default) | off (never fuse)">;
-def ffor_scope : Flag<"-ffor-scope">, Group<f_Group>;
-def fno_for_scope : Flag<"-fno-for-scope">, Group<f_Group>;
+def ffor_scope : Flag<["-"], "ffor-scope">, Group<f_Group>;
+def fno_for_scope : Flag<["-"], "fno-for-scope">, Group<f_Group>;
-def frewrite_includes : Flag<"-frewrite-includes">, Group<f_Group>,
+def frewrite_includes : Flag<["-"], "frewrite-includes">, Group<f_Group>,
Flags<[CC1Option]>;
-def fno_rewrite_includes : Flag<"-fno-rewrite-includes">, Group<f_Group>;
+def fno_rewrite_includes : Flag<["-"], "fno-rewrite-includes">, Group<f_Group>;
-def ffreestanding : Flag<"-ffreestanding">, Group<f_Group>, Flags<[CC1Option]>,
+def ffreestanding : Flag<["-"], "ffreestanding">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Assert that the compilation takes place in a freestanding environment">;
-def fformat_extensions: Flag<"-fformat-extensions">, Group<f_Group>, Flags<[CC1Option]>,
+def fformat_extensions: Flag<["-"], "fformat-extensions">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable FreeBSD kernel specific format string extensions">;
-def fgnu_keywords : Flag<"-fgnu-keywords">, Group<f_Group>, Flags<[CC1Option]>,
+def fgnu_keywords : Flag<["-"], "fgnu-keywords">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Allow GNU-extension keywords regardless of language standard">;
-def fgnu89_inline : Flag<"-fgnu89-inline">, Group<f_Group>, Flags<[CC1Option]>,
+def fgnu89_inline : Flag<["-"], "fgnu89-inline">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use the gnu89 inline semantics">;
-def fno_gnu89_inline : Flag<"-fno-gnu89-inline">, Group<f_Group>;
-def fgnu_runtime : Flag<"-fgnu-runtime">, Group<f_Group>,
+def fno_gnu89_inline : Flag<["-"], "fno-gnu89-inline">, Group<f_Group>;
+def fgnu_runtime : Flag<["-"], "fgnu-runtime">, Group<f_Group>,
HelpText<"Generate output compatible with the standard GNU Objective-C runtime">;
-def fheinous_gnu_extensions : Flag<"-fheinous-gnu-extensions">, Flags<[CC1Option]>;
-def filelist : Separate<"-filelist">, Flags<[LinkerInput]>;
-def findirect_virtual_calls : Flag<"-findirect-virtual-calls">, Alias<fapple_kext>;
-def finline_functions : Flag<"-finline-functions">, Group<clang_ignored_f_Group>;
-def finline : Flag<"-finline">, Group<clang_ignored_f_Group>;
-def finstrument_functions : Flag<"-finstrument-functions">, Group<f_Group>, Flags<[CC1Option]>,
+def fheinous_gnu_extensions : Flag<["-"], "fheinous-gnu-extensions">, Flags<[CC1Option]>;
+def filelist : Separate<["-"], "filelist">, Flags<[LinkerInput]>;
+def findirect_virtual_calls : Flag<["-"], "findirect-virtual-calls">, Alias<fapple_kext>;
+def finline_functions : Flag<["-"], "finline-functions">, Group<clang_ignored_f_Group>;
+def finline : Flag<["-"], "finline">, Group<clang_ignored_f_Group>;
+def finstrument_functions : Flag<["-"], "finstrument-functions">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Generate calls to instrument function entry and exit">;
-def fkeep_inline_functions : Flag<"-fkeep-inline-functions">, Group<clang_ignored_f_Group>;
-def flat__namespace : Flag<"-flat_namespace">;
-def flax_vector_conversions : Flag<"-flax-vector-conversions">, Group<f_Group>;
-def flimit_debug_info : Flag<"-flimit-debug-info">, Group<f_Group>, Flags<[CC1Option]>,
+def fkeep_inline_functions : Flag<["-"], "fkeep-inline-functions">, Group<clang_ignored_f_Group>;
+def flat__namespace : Flag<["-"], "flat_namespace">;
+def flax_vector_conversions : Flag<["-"], "flax-vector-conversions">, Group<f_Group>;
+def flimit_debug_info : Flag<["-"], "flimit-debug-info">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Limit debug information produced to reduce size of debug binary">;
-def flimited_precision_EQ : Joined<"-flimited-precision=">, Group<f_Group>;
-def flto : Flag<"-flto">, Group<f_Group>;
-def fno_lto : Flag<"-fno-lto">, Group<f_Group>;
-def fmacro_backtrace_limit_EQ : Joined<"-fmacro-backtrace-limit=">,
+def flimited_precision_EQ : Joined<["-"], "flimited-precision=">, Group<f_Group>;
+def flto : Flag<["-"], "flto">, Group<f_Group>;
+def fno_lto : Flag<["-"], "fno-lto">, Group<f_Group>;
+def fmacro_backtrace_limit_EQ : Joined<["-"], "fmacro-backtrace-limit=">,
Group<f_Group>;
-def fmerge_all_constants : Flag<"-fmerge-all-constants">, Group<f_Group>;
-def fmessage_length_EQ : Joined<"-fmessage-length=">, Group<f_Group>;
-def fms_extensions : Flag<"-fms-extensions">, Group<f_Group>, Flags<[CC1Option]>,
+def fmerge_all_constants : Flag<["-"], "fmerge-all-constants">, Group<f_Group>;
+def fmessage_length_EQ : Joined<["-"], "fmessage-length=">, Group<f_Group>;
+def fms_extensions : Flag<["-"], "fms-extensions">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Accept some non-standard constructs supported by the Microsoft compiler">;
-def fenable_experimental_ms_inline_asm : Flag<"-fenable-experimental-ms-inline-asm">, Group<f_Group>, Flags<[CC1Option]>,
+def fenable_experimental_ms_inline_asm : Flag<["-"], "fenable-experimental-ms-inline-asm">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable support for Microsoft style inine assembly">;
-def fms_compatibility : Flag<"-fms-compatibility">, Group<f_Group>, Flags<[CC1Option]>,
+def fms_compatibility : Flag<["-"], "fms-compatibility">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable Microsoft compatibility mode">;
-def fmsc_version : Joined<"-fmsc-version=">, Group<f_Group>, Flags<[CC1Option]>,
+def fmsc_version : Joined<["-"], "fmsc-version=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Version of the Microsoft C/C++ compiler to report in _MSC_VER (0 = don't define it (default))">;
-def fdelayed_template_parsing : Flag<"-fdelayed-template-parsing">, Group<f_Group>,
+def fdelayed_template_parsing : Flag<["-"], "fdelayed-template-parsing">, Group<f_Group>,
HelpText<"Parse templated function definitions at the end of the "
"translation unit ">, Flags<[CC1Option]>;
-def fmodule_cache_path : Separate<"-fmodule-cache-path">, Group<i_Group>,
+def fmodule_cache_path : Separate<["-"], "fmodule-cache-path">, Group<i_Group>,
Flags<[NoForward,CC1Option]>, MetaVarName<"<directory>">,
HelpText<"Specify the module cache path">;
-def fmodules : Flag <"-fmodules">, Group<f_Group>, Flags<[NoForward,CC1Option]>,
+def fmodules : Flag <["-"], "fmodules">, Group<f_Group>, Flags<[NoForward,CC1Option]>,
HelpText<"Enable the 'modules' language feature">;
-
-def fmudflapth : Flag<"-fmudflapth">, Group<f_Group>;
-def fmudflap : Flag<"-fmudflap">, Group<f_Group>;
-def fnested_functions : Flag<"-fnested-functions">, Group<f_Group>;
-def fnext_runtime : Flag<"-fnext-runtime">, Group<f_Group>;
-def fno_access_control : Flag<"-fno-access-control">, Group<f_Group>, Flags<[CC1Option]>,
+def fretain_comments_from_system_headers : Flag<["-"], "fretain-comments-from-system-headers">, Group<f_Group>, Flags<[CC1Option]>;
+
+def fmudflapth : Flag<["-"], "fmudflapth">, Group<f_Group>;
+def fmudflap : Flag<["-"], "fmudflap">, Group<f_Group>;
+def fnested_functions : Flag<["-"], "fnested-functions">, Group<f_Group>;
+def fnext_runtime : Flag<["-"], "fnext-runtime">, Group<f_Group>;
+def fno_access_control : Flag<["-"], "fno-access-control">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Disable C++ access control">;
-def fno_apple_pragma_pack : Flag<"-fno-apple-pragma-pack">, Group<f_Group>;
-def fno_asm : Flag<"-fno-asm">, Group<f_Group>;
-def fno_asynchronous_unwind_tables : Flag<"-fno-asynchronous-unwind-tables">, Group<f_Group>;
-def fno_assume_sane_operator_new : Flag<"-fno-assume-sane-operator-new">, Group<f_Group>,
+def fno_apple_pragma_pack : Flag<["-"], "fno-apple-pragma-pack">, Group<f_Group>;
+def fno_asm : Flag<["-"], "fno-asm">, Group<f_Group>;
+def fno_asynchronous_unwind_tables : Flag<["-"], "fno-asynchronous-unwind-tables">, Group<f_Group>;
+def fno_assume_sane_operator_new : Flag<["-"], "fno-assume-sane-operator-new">, Group<f_Group>,
HelpText<"Don't assume that C++'s global operator new can't alias any pointer">,
Flags<[CC1Option]>;
-def fno_blocks : Flag<"-fno-blocks">, Group<f_Group>;
-def fno_borland_extensions : Flag<"-fno-borland-extensions">, Group<f_Group>;
-def fno_builtin_strcat : Flag<"-fno-builtin-strcat">, Group<f_Group>;
-def fno_builtin_strcpy : Flag<"-fno-builtin-strcpy">, Group<f_Group>;
-def fno_builtin : Flag<"-fno-builtin">, Group<f_Group>, Flags<[CC1Option]>,
+def fno_blocks : Flag<["-"], "fno-blocks">, Group<f_Group>;
+def fno_borland_extensions : Flag<["-"], "fno-borland-extensions">, Group<f_Group>;
+def fno_builtin_strcat : Flag<["-"], "fno-builtin-strcat">, Group<f_Group>;
+def fno_builtin_strcpy : Flag<["-"], "fno-builtin-strcpy">, Group<f_Group>;
+def fno_builtin : Flag<["-"], "fno-builtin">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Disable implicit builtin knowledge of functions">;
-def fno_caret_diagnostics : Flag<"-fno-caret-diagnostics">, Group<f_Group>,
+def fno_caret_diagnostics : Flag<["-"], "fno-caret-diagnostics">, Group<f_Group>,
Flags<[CC1Option]>;
-def fno_color_diagnostics : Flag<"-fno-color-diagnostics">, Group<f_Group>;
-def fno_common : Flag<"-fno-common">, Group<f_Group>, Flags<[CC1Option]>,
+def fno_color_diagnostics : Flag<["-"], "fno-color-diagnostics">, Group<f_Group>;
+def fno_common : Flag<["-"], "fno-common">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Compile common globals like normal definitions">;
-def fno_constant_cfstrings : Flag<"-fno-constant-cfstrings">, Group<f_Group>,
+def fno_constant_cfstrings : Flag<["-"], "fno-constant-cfstrings">, Group<f_Group>,
Flags<[CC1Option]>,
HelpText<"Disable creation of CodeFoundation-type constant strings">;
-def fno_cxx_exceptions: Flag<"-fno-cxx-exceptions">, Group<f_Group>;
-def fno_cxx_modules : Flag <"-fno-cxx-modules">, Group<f_Group>, Flags<[NoForward]>;
-def fno_diagnostics_fixit_info : Flag<"-fno-diagnostics-fixit-info">, Group<f_Group>,
+def fno_cxx_exceptions: Flag<["-"], "fno-cxx-exceptions">, Group<f_Group>;
+def fno_cxx_modules : Flag <["-"], "fno-cxx-modules">, Group<f_Group>, Flags<[NoForward]>;
+def fno_diagnostics_fixit_info : Flag<["-"], "fno-diagnostics-fixit-info">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Do not include fixit information in diagnostics">;
-def fno_diagnostics_show_name : Flag<"-fno-diagnostics-show-name">, Group<f_Group>;
-def fno_diagnostics_show_option : Flag<"-fno-diagnostics-show-option">, Group<f_Group>;
-def fno_diagnostics_show_note_include_stack : Flag<"-fno-diagnostics-show-note-include-stack">,
+def fno_diagnostics_show_name : Flag<["-"], "fno-diagnostics-show-name">, Group<f_Group>;
+def fno_diagnostics_show_option : Flag<["-"], "fno-diagnostics-show-option">, Group<f_Group>;
+def fno_diagnostics_show_note_include_stack : Flag<["-"], "fno-diagnostics-show-note-include-stack">,
Flags<[CC1Option]>, Group<f_Group>, HelpText<"Display include stacks for diagnostic notes">;
-def fno_dollars_in_identifiers : Flag<"-fno-dollars-in-identifiers">, Group<f_Group>,
+def fno_dollars_in_identifiers : Flag<["-"], "fno-dollars-in-identifiers">, Group<f_Group>,
HelpText<"Disallow '$' in identifiers">, Flags<[CC1Option]>;
-def fno_elide_constructors : Flag<"-fno-elide-constructors">, Group<f_Group>,
+def fno_elide_constructors : Flag<["-"], "fno-elide-constructors">, Group<f_Group>,
HelpText<"Disable C++ copy constructor elision">, Flags<[CC1Option]>;
-def fno_eliminate_unused_debug_symbols : Flag<"-fno-eliminate-unused-debug-symbols">, Group<f_Group>;
-def fno_exceptions : Flag<"-fno-exceptions">, Group<f_Group>;
-def fno_gnu_keywords : Flag<"-fno-gnu-keywords">, Group<f_Group>, Flags<[CC1Option]>;
-def fno_inline_functions : Flag<"-fno-inline-functions">, Group<f_clang_Group>, Flags<[CC1Option]>;
-def fno_inline : Flag<"-fno-inline">, Group<f_clang_Group>, Flags<[CC1Option]>;
-def fno_keep_inline_functions : Flag<"-fno-keep-inline-functions">, Group<clang_ignored_f_Group>;
-def fno_lax_vector_conversions : Flag<"-fno-lax-vector-conversions">, Group<f_Group>,
+def fno_eliminate_unused_debug_symbols : Flag<["-"], "fno-eliminate-unused-debug-symbols">, Group<f_Group>;
+def fno_exceptions : Flag<["-"], "fno-exceptions">, Group<f_Group>;
+def fno_gnu_keywords : Flag<["-"], "fno-gnu-keywords">, Group<f_Group>, Flags<[CC1Option]>;
+def fno_inline_functions : Flag<["-"], "fno-inline-functions">, Group<f_clang_Group>, Flags<[CC1Option]>;
+def fno_inline : Flag<["-"], "fno-inline">, Group<f_clang_Group>, Flags<[CC1Option]>;
+def fno_keep_inline_functions : Flag<["-"], "fno-keep-inline-functions">, Group<clang_ignored_f_Group>;
+def fno_lax_vector_conversions : Flag<["-"], "fno-lax-vector-conversions">, Group<f_Group>,
HelpText<"Disallow implicit conversions between vectors with a different number of elements or different element types">, Flags<[CC1Option]>;
-def fno_limit_debug_info : Flag<"-fno-limit-debug-info">, Group<f_Group>, Flags<[CC1Option]>,
+def fno_limit_debug_info : Flag<["-"], "fno-limit-debug-info">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Do not limit debug information produced to reduce size of debug binary">;
-def fno_merge_all_constants : Flag<"-fno-merge-all-constants">, Group<f_Group>,
- Flags<[CC1Option]>, HelpText<"Disallow merging of constants.">;
-def fno_modules : Flag <"-fno-modules">, Group<f_Group>, Flags<[NoForward]>;
-def fno_ms_extensions : Flag<"-fno-ms-extensions">, Group<f_Group>;
-def fno_ms_compatibility : Flag<"-fno-ms-compatibility">, Group<f_Group>;
-def fno_delayed_template_parsing : Flag<"-fno-delayed-template-parsing">, Group<f_Group>;
-def fno_objc_exceptions: Flag<"-fno-objc-exceptions">, Group<f_Group>;
-def fno_objc_legacy_dispatch : Flag<"-fno-objc-legacy-dispatch">, Group<f_Group>;
-def fno_omit_frame_pointer : Flag<"-fno-omit-frame-pointer">, Group<f_Group>;
-def fno_operator_names : Flag<"-fno-operator-names">, Group<f_Group>,
+def fno_merge_all_constants : Flag<["-"], "fno-merge-all-constants">, Group<f_Group>,
+ Flags<[CC1Option]>, HelpText<"Disallow merging of constants">;
+def fno_modules : Flag <["-"], "fno-modules">, Group<f_Group>, Flags<[NoForward]>;
+def fno_ms_extensions : Flag<["-"], "fno-ms-extensions">, Group<f_Group>;
+def fno_ms_compatibility : Flag<["-"], "fno-ms-compatibility">, Group<f_Group>;
+def fno_delayed_template_parsing : Flag<["-"], "fno-delayed-template-parsing">, Group<f_Group>;
+def fno_objc_exceptions: Flag<["-"], "fno-objc-exceptions">, Group<f_Group>;
+def fno_objc_legacy_dispatch : Flag<["-"], "fno-objc-legacy-dispatch">, Group<f_Group>;
+def fno_omit_frame_pointer : Flag<["-"], "fno-omit-frame-pointer">, Group<f_Group>;
+def fno_operator_names : Flag<["-"], "fno-operator-names">, Group<f_Group>,
HelpText<"Do not treat C++ operator name keywords as synonyms for operators">,
Flags<[CC1Option]>;
-def fno_pascal_strings : Flag<"-fno-pascal-strings">, Group<f_Group>;
-def fno_rtti : Flag<"-fno-rtti">, Group<f_Group>, Flags<[CC1Option]>,
+def fno_pascal_strings : Flag<["-"], "fno-pascal-strings">, Group<f_Group>;
+def fno_rtti : Flag<["-"], "fno-rtti">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Disable generation of rtti information">;
-def fno_short_enums : Flag<"-fno-short-enums">, Group<f_Group>;
-def fno_show_column : Flag<"-fno-show-column">, Group<f_Group>, Flags<[CC1Option]>,
+def fno_short_enums : Flag<["-"], "fno-short-enums">, Group<f_Group>;
+def fno_show_column : Flag<["-"], "fno-show-column">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Do not include column number on diagnostics">;
-def fno_show_source_location : Flag<"-fno-show-source-location">, Group<f_Group>,
+def fno_show_source_location : Flag<["-"], "fno-show-source-location">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Do not include source location information with diagnostics">;
-def fno_spell_checking : Flag<"-fno-spell-checking">, Group<f_Group>,
+def fno_spell_checking : Flag<["-"], "fno-spell-checking">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Disable spell-checking">;
-def fno_stack_protector : Flag<"-fno-stack-protector">, Group<f_Group>;
-def fno_strict_aliasing : Flag<"-fno-strict-aliasing">, Group<f_Group>;
-def fno_strict_enums : Flag<"-fno-strict-enums">, Group<f_Group>;
-def fno_strict_overflow : Flag<"-fno-strict-overflow">, Group<f_Group>;
-def fno_threadsafe_statics : Flag<"-fno-threadsafe-statics">, Group<f_Group>,
+def fno_stack_protector : Flag<["-"], "fno-stack-protector">, Group<f_Group>;
+def fno_strict_aliasing : Flag<["-"], "fno-strict-aliasing">, Group<f_Group>;
+def fno_strict_enums : Flag<["-"], "fno-strict-enums">, Group<f_Group>;
+def fno_strict_overflow : Flag<["-"], "fno-strict-overflow">, Group<f_Group>;
+def fno_threadsafe_statics : Flag<["-"], "fno-threadsafe-statics">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Do not emit code to make initialization of local statics thread safe">;
-def fno_use_cxa_atexit : Flag<"-fno-use-cxa-atexit">, Group<f_Group>, Flags<[CC1Option]>,
+def fno_use_cxa_atexit : Flag<["-"], "fno-use-cxa-atexit">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Don't use __cxa_atexit for calling destructors">;
-def fno_unit_at_a_time : Flag<"-fno-unit-at-a-time">, Group<f_Group>;
-def fno_unwind_tables : Flag<"-fno-unwind-tables">, Group<f_Group>;
-def fno_verbose_asm : Flag<"-fno-verbose-asm">, Group<f_Group>;
-def fno_working_directory : Flag<"-fno-working-directory">, Group<f_Group>;
-def fno_wrapv : Flag<"-fno-wrapv">, Group<f_Group>;
-def fno_zero_initialized_in_bss : Flag<"-fno-zero-initialized-in-bss">, Group<f_Group>;
-def fobjc_arc : Flag<"-fobjc-arc">, Group<f_Group>, Flags<[CC1Option]>,
+def fno_unit_at_a_time : Flag<["-"], "fno-unit-at-a-time">, Group<f_Group>;
+def fno_unwind_tables : Flag<["-"], "fno-unwind-tables">, Group<f_Group>;
+def fno_verbose_asm : Flag<["-"], "fno-verbose-asm">, Group<f_Group>;
+def fno_working_directory : Flag<["-"], "fno-working-directory">, Group<f_Group>;
+def fno_wrapv : Flag<["-"], "fno-wrapv">, Group<f_Group>;
+def fno_zero_initialized_in_bss : Flag<["-"], "fno-zero-initialized-in-bss">, Group<f_Group>;
+def fobjc_arc : Flag<["-"], "fobjc-arc">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Synthesize retain and release calls for Objective-C pointers">;
-def fno_objc_arc : Flag<"-fno-objc-arc">, Group<f_Group>;
-def fobjc_arc_exceptions : Flag<"-fobjc-arc-exceptions">, Group<f_Group>, Flags<[CC1Option]>,
+def fno_objc_arc : Flag<["-"], "fno-objc-arc">, Group<f_Group>;
+def fobjc_arc_exceptions : Flag<["-"], "fobjc-arc-exceptions">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use EH-safe code when synthesizing retains and releases in -fobjc-arc">;
-def fno_objc_arc_exceptions : Flag<"-fno-objc-arc-exceptions">, Group<f_Group>;
-def fobjc_atdefs : Flag<"-fobjc-atdefs">, Group<clang_ignored_f_Group>;
-def fobjc_call_cxx_cdtors : Flag<"-fobjc-call-cxx-cdtors">, Group<clang_ignored_f_Group>;
-def fobjc_exceptions: Flag<"-fobjc-exceptions">, Group<f_Group>,
+def fno_objc_arc_exceptions : Flag<["-"], "fno-objc-arc-exceptions">, Group<f_Group>;
+def fobjc_atdefs : Flag<["-"], "fobjc-atdefs">, Group<clang_ignored_f_Group>;
+def fobjc_call_cxx_cdtors : Flag<["-"], "fobjc-call-cxx-cdtors">, Group<clang_ignored_f_Group>;
+def fobjc_exceptions: Flag<["-"], "fobjc-exceptions">, Group<f_Group>,
HelpText<"Enable Objective-C exceptions">, Flags<[CC1Option]>;
-def fobjc_gc_only : Flag<"-fobjc-gc-only">, Group<f_Group>, Flags<[CC1Option]>,
+def fobjc_gc_only : Flag<["-"], "fobjc-gc-only">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use GC exclusively for Objective-C related memory management">;
-def fobjc_gc : Flag<"-fobjc-gc">, Group<f_Group>, Flags<[CC1Option]>,
+def fobjc_gc : Flag<["-"], "fobjc-gc">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable Objective-C garbage collection">;
-def fobjc_legacy_dispatch : Flag<"-fobjc-legacy-dispatch">, Group<f_Group>;
-def fobjc_new_property : Flag<"-fobjc-new-property">, Group<clang_ignored_f_Group>;
-def fobjc_infer_related_result_type : Flag<"-fobjc-infer-related-result-type">,
+def fobjc_legacy_dispatch : Flag<["-"], "fobjc-legacy-dispatch">, Group<f_Group>;
+def fobjc_new_property : Flag<["-"], "fobjc-new-property">, Group<clang_ignored_f_Group>;
+def fobjc_infer_related_result_type : Flag<["-"], "fobjc-infer-related-result-type">,
Group<f_Group>;
-def fno_objc_infer_related_result_type : Flag<
- "-fno-objc-infer-related-result-type">, Group<f_Group>,
+def fno_objc_infer_related_result_type : Flag<["-"],
+ "fno-objc-infer-related-result-type">, Group<f_Group>,
HelpText<
"do not infer Objective-C related result type based on method family">,
Flags<[CC1Option]>;
-def fobjc_link_runtime: Flag<"-fobjc-link-runtime">, Group<f_Group>;
+def fobjc_link_runtime: Flag<["-"], "fobjc-link-runtime">, Group<f_Group>;
// Objective-C ABI options.
-def fobjc_runtime_EQ : Joined<"-fobjc-runtime=">, Group<f_Group>, Flags<[CC1Option]>,
+def fobjc_runtime_EQ : Joined<["-"], "fobjc-runtime=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Specify the target Objective-C runtime kind and version">;
-def fobjc_abi_version_EQ : Joined<"-fobjc-abi-version=">, Group<f_Group>;
-def fobjc_nonfragile_abi_version_EQ : Joined<"-fobjc-nonfragile-abi-version=">, Group<f_Group>;
-def fobjc_nonfragile_abi : Flag<"-fobjc-nonfragile-abi">, Group<f_Group>;
-def fno_objc_nonfragile_abi : Flag<"-fno-objc-nonfragile-abi">, Group<f_Group>;
+def fobjc_abi_version_EQ : Joined<["-"], "fobjc-abi-version=">, Group<f_Group>;
+def fobjc_nonfragile_abi_version_EQ : Joined<["-"], "fobjc-nonfragile-abi-version=">, Group<f_Group>;
+def fobjc_nonfragile_abi : Flag<["-"], "fobjc-nonfragile-abi">, Group<f_Group>;
+def fno_objc_nonfragile_abi : Flag<["-"], "fno-objc-nonfragile-abi">, Group<f_Group>;
-def fobjc_sender_dependent_dispatch : Flag<"-fobjc-sender-dependent-dispatch">, Group<f_Group>;
-def fobjc : Flag<"-fobjc">, Group<f_Group>;
-def fomit_frame_pointer : Flag<"-fomit-frame-pointer">, Group<f_Group>;
-def fopenmp : Flag<"-fopenmp">, Group<f_Group>;
-def fno_optimize_sibling_calls : Flag<"-fno-optimize-sibling-calls">, Group<f_Group>;
-def foptimize_sibling_calls : Flag<"-foptimize-sibling-calls">, Group<f_Group>;
-def force__cpusubtype__ALL : Flag<"-force_cpusubtype_ALL">;
-def force__flat__namespace : Flag<"-force_flat_namespace">;
-def force__load : Separate<"-force_load">;
-def foutput_class_dir_EQ : Joined<"-foutput-class-dir=">, Group<f_Group>;
-def fpack_struct : Flag<"-fpack-struct">, Group<f_Group>;
-def fno_pack_struct : Flag<"-fno-pack-struct">, Group<f_Group>;
-def fpack_struct_EQ : Joined<"-fpack-struct=">, Group<f_Group>, Flags<[CC1Option]>,
+def fobjc_sender_dependent_dispatch : Flag<["-"], "fobjc-sender-dependent-dispatch">, Group<f_Group>;
+def fobjc : Flag<["-"], "fobjc">, Group<f_Group>;
+def fomit_frame_pointer : Flag<["-"], "fomit-frame-pointer">, Group<f_Group>;
+def fopenmp : Flag<["-"], "fopenmp">, Group<f_Group>;
+def fno_optimize_sibling_calls : Flag<["-"], "fno-optimize-sibling-calls">, Group<f_Group>;
+def foptimize_sibling_calls : Flag<["-"], "foptimize-sibling-calls">, Group<f_Group>;
+def force__cpusubtype__ALL : Flag<["-"], "force_cpusubtype_ALL">;
+def force__flat__namespace : Flag<["-"], "force_flat_namespace">;
+def force__load : Separate<["-"], "force_load">;
+def foutput_class_dir_EQ : Joined<["-"], "foutput-class-dir=">, Group<f_Group>;
+def fpack_struct : Flag<["-"], "fpack-struct">, Group<f_Group>;
+def fno_pack_struct : Flag<["-"], "fno-pack-struct">, Group<f_Group>;
+def fpack_struct_EQ : Joined<["-"], "fpack-struct=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Specify the default maximum struct packing alignment">;
-def fpascal_strings : Flag<"-fpascal-strings">, Group<f_Group>, Flags<[CC1Option]>,
+def fpascal_strings : Flag<["-"], "fpascal-strings">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Recognize and construct Pascal-style string literals">;
-def fpch_preprocess : Flag<"-fpch-preprocess">, Group<f_Group>;
-def fpic : Flag<"-fpic">, Group<f_Group>;
-def fno_pic : Flag<"-fno-pic">, Group<f_Group>;
-def fpie : Flag<"-fpie">, Group<f_Group>;
-def fno_pie : Flag<"-fno-pie">, Group<f_Group>;
-def fprofile_arcs : Flag<"-fprofile-arcs">, Group<f_Group>;
-def fprofile_generate : Flag<"-fprofile-generate">, Group<f_Group>;
-def framework : Separate<"-framework">, Flags<[LinkerInput]>;
-def frandom_seed_EQ : Joined<"-frandom-seed=">, Group<clang_ignored_f_Group>;
-def frtti : Flag<"-frtti">, Group<f_Group>;
-def fsched_interblock : Flag<"-fsched-interblock">, Group<clang_ignored_f_Group>;
-def fshort_enums : Flag<"-fshort-enums">, Group<f_Group>, Flags<[CC1Option]>,
+def fpch_preprocess : Flag<["-"], "fpch-preprocess">, Group<f_Group>;
+def fpic : Flag<["-"], "fpic">, Group<f_Group>;
+def fno_pic : Flag<["-"], "fno-pic">, Group<f_Group>;
+def fpie : Flag<["-"], "fpie">, Group<f_Group>;
+def fno_pie : Flag<["-"], "fno-pie">, Group<f_Group>;
+def fprofile_arcs : Flag<["-"], "fprofile-arcs">, Group<f_Group>;
+def fprofile_generate : Flag<["-"], "fprofile-generate">, Group<f_Group>;
+def framework : Separate<["-"], "framework">, Flags<[LinkerInput]>;
+def frandom_seed_EQ : Joined<["-"], "frandom-seed=">, Group<clang_ignored_f_Group>;
+def frtti : Flag<["-"], "frtti">, Group<f_Group>;
+def fsched_interblock : Flag<["-"], "fsched-interblock">, Group<clang_ignored_f_Group>;
+def fshort_enums : Flag<["-"], "fshort-enums">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Allocate to an enum type only as many bytes as it needs for the declared range of possible values">;
-def freorder_blocks : Flag<"-freorder-blocks">, Group<clang_ignored_f_Group>;
-def fshort_wchar : Flag<"-fshort-wchar">, Group<f_Group>, Flags<[CC1Option]>,
+def freorder_blocks : Flag<["-"], "freorder-blocks">, Group<clang_ignored_f_Group>;
+def fshort_wchar : Flag<["-"], "fshort-wchar">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Force wchar_t to be a short unsigned int">;
-def fshow_overloads_EQ : Joined<"-fshow-overloads=">, Group<f_Group>, Flags<[CC1Option]>,
+def fshow_overloads_EQ : Joined<["-"], "fshow-overloads=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Which overload candidates to show when overload resolution fails: "
"best|all; defaults to all">;
-def fshow_column : Flag<"-fshow-column">, Group<f_Group>, Flags<[CC1Option]>;
-def fshow_source_location : Flag<"-fshow-source-location">, Group<f_Group>;
-def fspell_checking : Flag<"-fspell-checking">, Group<f_Group>;
-def fsigned_bitfields : Flag<"-fsigned-bitfields">, Group<f_Group>;
-def fsigned_char : Flag<"-fsigned-char">, Group<f_Group>;
-def fstack_protector_all : Flag<"-fstack-protector-all">, Group<f_Group>;
-def fstack_protector : Flag<"-fstack-protector">, Group<f_Group>;
-def fstrict_aliasing : Flag<"-fstrict-aliasing">, Group<f_Group>;
-def fstrict_enums : Flag<"-fstrict-enums">, Group<f_Group>, Flags<[CC1Option]>,
+def fshow_column : Flag<["-"], "fshow-column">, Group<f_Group>, Flags<[CC1Option]>;
+def fshow_source_location : Flag<["-"], "fshow-source-location">, Group<f_Group>;
+def fspell_checking : Flag<["-"], "fspell-checking">, Group<f_Group>;
+def fsigned_bitfields : Flag<["-"], "fsigned-bitfields">, Group<f_Group>;
+def fsigned_char : Flag<["-"], "fsigned-char">, Group<f_Group>;
+def fstack_protector_all : Flag<["-"], "fstack-protector-all">, Group<f_Group>;
+def fstack_protector : Flag<["-"], "fstack-protector">, Group<f_Group>;
+def fstrict_aliasing : Flag<["-"], "fstrict-aliasing">, Group<f_Group>;
+def fstrict_enums : Flag<["-"], "fstrict-enums">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable optimizations based on the strict definition of an enum's "
- "value range.">;
-def fstrict_overflow : Flag<"-fstrict-overflow">, Group<f_Group>;
-def fsyntax_only : Flag<"-fsyntax-only">, Flags<[DriverOption,CC1Option]>, Group<Action_Group>;
-def ftabstop_EQ : Joined<"-ftabstop=">, Group<f_Group>;
-def ftemplate_depth_EQ : Joined<"-ftemplate-depth=">, Group<f_Group>;
-def ftemplate_depth_ : Joined<"-ftemplate-depth-">, Group<f_Group>;
-def ftemplate_backtrace_limit_EQ : Joined<"-ftemplate-backtrace-limit=">,
+ "value range">;
+def fstrict_overflow : Flag<["-"], "fstrict-overflow">, Group<f_Group>;
+def fsyntax_only : Flag<["-"], "fsyntax-only">, Flags<[DriverOption,CC1Option]>, Group<Action_Group>;
+def ftabstop_EQ : Joined<["-"], "ftabstop=">, Group<f_Group>;
+def ftemplate_depth_EQ : Joined<["-"], "ftemplate-depth=">, Group<f_Group>;
+def ftemplate_depth_ : Joined<["-"], "ftemplate-depth-">, Group<f_Group>;
+def ftemplate_backtrace_limit_EQ : Joined<["-"], "ftemplate-backtrace-limit=">,
Group<f_Group>;
-def ftest_coverage : Flag<"-ftest-coverage">, Group<f_Group>;
-def Wlarge_by_value_copy_def : Flag<"-Wlarge-by-value-copy">,
+def ftest_coverage : Flag<["-"], "ftest-coverage">, Group<f_Group>;
+def Wlarge_by_value_copy_def : Flag<["-"], "Wlarge-by-value-copy">,
HelpText<"Warn if a function definition returns or accepts an object larger "
- "in bytes that a given value">;
-def Wlarge_by_value_copy_EQ : Joined<"-Wlarge-by-value-copy=">, Flags<[CC1Option]>;
+ "in bytes than a given value">, Flags<[HelpHidden]>;
+def Wlarge_by_value_copy_EQ : Joined<["-"], "Wlarge-by-value-copy=">, Flags<[CC1Option]>;
// Just silence warnings about -Wlarger-than, -Wframe-larger-than for now.
-def Wlarger_than : Separate<"-Wlarger-than">, Group<clang_ignored_f_Group>;
-def Wlarger_than_EQ : Joined<"-Wlarger-than=">, Alias<Wlarger_than>;
-def Wlarger_than_ : Joined<"-Wlarger-than-">, Alias<Wlarger_than>;
-def Wframe_larger_than : Separate<"-Wframe-larger-than">, Group<clang_ignored_f_Group>;
-def Wframe_larger_than_EQ : Joined<"-Wframe-larger-than=">, Alias<Wframe_larger_than>;
+def Wlarger_than : Separate<["-"], "Wlarger-than">, Group<clang_ignored_f_Group>;
+def Wlarger_than_EQ : Joined<["-"], "Wlarger-than=">, Alias<Wlarger_than>;
+def Wlarger_than_ : Joined<["-"], "Wlarger-than-">, Alias<Wlarger_than>;
+def Wframe_larger_than : Separate<["-"], "Wframe-larger-than">, Group<clang_ignored_f_Group>;
+def Wframe_larger_than_EQ : Joined<["-"], "Wframe-larger-than=">, Alias<Wframe_larger_than>;
-def fterminated_vtables : Flag<"-fterminated-vtables">, Alias<fapple_kext>;
-def fthreadsafe_statics : Flag<"-fthreadsafe-statics">, Group<f_Group>;
-def ftime_report : Flag<"-ftime-report">, Group<f_Group>, Flags<[CC1Option]>;
-def ftlsmodel_EQ : Joined<"-ftls-model=">, Group<f_Group>, Flags<[CC1Option]>;
-def ftrapv : Flag<"-ftrapv">, Group<f_Group>, Flags<[CC1Option]>,
+def fterminated_vtables : Flag<["-"], "fterminated-vtables">, Alias<fapple_kext>;
+def fthreadsafe_statics : Flag<["-"], "fthreadsafe-statics">, Group<f_Group>;
+def ftime_report : Flag<["-"], "ftime-report">, Group<f_Group>, Flags<[CC1Option]>;
+def ftlsmodel_EQ : Joined<["-"], "ftls-model=">, Group<f_Group>, Flags<[CC1Option]>;
+def ftrapv : Flag<["-"], "ftrapv">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Trap on integer overflow">;
-def ftrapv_handler_EQ : Joined<"-ftrapv-handler=">, Group<f_Group>,
+def ftrapv_handler_EQ : Joined<["-"], "ftrapv-handler=">, Group<f_Group>,
MetaVarName<"<function name>">,
- HelpText<"Specify the function to be called on overflow.">;
-def ftrapv_handler : Separate<"-ftrapv-handler">, Group<f_Group>, Flags<[CC1Option]>;
-def ftrap_function_EQ : Joined<"-ftrap-function=">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Specify the function to be called on overflow">;
+def ftrapv_handler : Separate<["-"], "ftrapv-handler">, Group<f_Group>, Flags<[CC1Option]>;
+def ftrap_function_EQ : Joined<["-"], "ftrap-function=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Issue call to specified function rather than a trap instruction">;
-def funit_at_a_time : Flag<"-funit-at-a-time">, Group<f_Group>;
-def funroll_loops : Flag<"-funroll-loops">, Group<f_Group>,
+def funit_at_a_time : Flag<["-"], "funit-at-a-time">, Group<f_Group>;
+def funroll_loops : Flag<["-"], "funroll-loops">, Group<f_Group>,
HelpText<"Turn on loop unroller">, Flags<[CC1Option]>;
-def funsigned_bitfields : Flag<"-funsigned-bitfields">, Group<f_Group>;
-def funsigned_char : Flag<"-funsigned-char">, Group<f_Group>;
-def funwind_tables : Flag<"-funwind-tables">, Group<f_Group>;
-def fuse_cxa_atexit : Flag<"-fuse-cxa-atexit">, Group<f_Group>;
-def fverbose_asm : Flag<"-fverbose-asm">, Group<f_Group>;
-def fvisibility_EQ : Joined<"-fvisibility=">, Group<f_Group>;
-def fvisibility_inlines_hidden : Flag<"-fvisibility-inlines-hidden">, Group<f_Group>,
+def funsigned_bitfields : Flag<["-"], "funsigned-bitfields">, Group<f_Group>;
+def funsigned_char : Flag<["-"], "funsigned-char">, Group<f_Group>;
+def funwind_tables : Flag<["-"], "funwind-tables">, Group<f_Group>;
+def fuse_cxa_atexit : Flag<["-"], "fuse-cxa-atexit">, Group<f_Group>;
+def fverbose_asm : Flag<["-"], "fverbose-asm">, Group<f_Group>;
+def fvisibility_EQ : Joined<["-"], "fvisibility=">, Group<f_Group>;
+def fvisibility_inlines_hidden : Flag<["-"], "fvisibility-inlines-hidden">, Group<f_Group>,
HelpText<"Give inline C++ member functions default visibility by default">,
Flags<[CC1Option]>;
-def fwrapv : Flag<"-fwrapv">, Group<f_Group>, Flags<[CC1Option]>,
+def fwrapv : Flag<["-"], "fwrapv">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Treat signed integer overflow as two's complement">;
-def fwritable_strings : Flag<"-fwritable-strings">, Group<f_Group>, Flags<[CC1Option]>,
+def fwritable_strings : Flag<["-"], "fwritable-strings">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Store string literals as writable data">;
-def fzero_initialized_in_bss : Flag<"-fzero-initialized-in-bss">, Group<f_Group>;
-def ffunction_sections: Flag <"-ffunction-sections">, Group<f_Group>,
+def fzero_initialized_in_bss : Flag<["-"], "fzero-initialized-in-bss">, Group<f_Group>;
+def ffunction_sections: Flag <["-"], "ffunction-sections">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Place each function in its own section (ELF Only)">;
-def fdata_sections : Flag <"-fdata-sections">, Group<f_Group>, Flags<[CC1Option]>,
+def fdata_sections : Flag <["-"], "fdata-sections">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Place each data in its own section (ELF Only)">;
-def f : Joined<"-f">, Group<f_Group>;
-def g_Flag : Flag<"-g">, Group<g_Group>,
+def f : Joined<["-"], "f">, Group<f_Group>;
+def g_Flag : Flag<["-"], "g">, Group<g_Group>,
HelpText<"Generate source level debug information">, Flags<[CC1Option]>;
-def gline_tables_only : Flag<"-gline-tables-only">, Group<g_Group>,
+def gline_tables_only : Flag<["-"], "gline-tables-only">, Group<g_Group>,
HelpText<"Emit debug line number tables only">, Flags<[CC1Option]>;
-def g0 : Flag<"-g0">, Group<g_Group>;
-def g1 : Flag<"-g1">, Group<g_Group>;
-def g2 : Flag<"-g2">, Group<g_Group>;
-def g3 : Flag<"-g3">, Group<g_Group>;
-def ggdb : Flag<"-ggdb">, Group<g_Group>;
-def ggdb0 : Flag<"-ggdb0">, Group<g_Group>;
-def ggdb1 : Flag<"-ggdb1">, Group<g_Group>;
-def ggdb2 : Flag<"-ggdb2">, Group<g_Group>;
-def ggdb3 : Flag<"-ggdb3">, Group<g_Group>;
-def gdwarf_2 : Flag<"-gdwarf-2">, Group<g_Group>;
-def gdwarf_3 : Flag<"-gdwarf-3">, Group<g_Group>;
-def gdwarf_4 : Flag<"-gdwarf-4">, Group<g_Group>;
-def gfull : Flag<"-gfull">, Group<g_Group>;
-def gused : Flag<"-gused">, Group<g_Group>;
-def gstabs : Joined<"-gstabs">, Group<g_Group>, Flags<[Unsupported]>;
-def gcoff : Joined<"-gcoff">, Group<g_Group>, Flags<[Unsupported]>;
-def gxcoff : Joined<"-gxcoff">, Group<g_Group>, Flags<[Unsupported]>;
-def gvms : Joined<"-gvms">, Group<g_Group>, Flags<[Unsupported]>;
-def gtoggle : Flag<"-gtoggle">, Group<g_flags_Group>, Flags<[Unsupported]>;
-def grecord_gcc_switches : Flag<"-grecord-gcc-switches">, Group<g_flags_Group>;
-def gno_record_gcc_switches : Flag<"-gno-record-gcc-switches">,
+def g0 : Flag<["-"], "g0">, Group<g_Group>;
+def g1 : Flag<["-"], "g1">, Group<g_Group>;
+def g2 : Flag<["-"], "g2">, Group<g_Group>;
+def g3 : Flag<["-"], "g3">, Group<g_Group>;
+def ggdb : Flag<["-"], "ggdb">, Group<g_Group>;
+def ggdb0 : Flag<["-"], "ggdb0">, Group<g_Group>;
+def ggdb1 : Flag<["-"], "ggdb1">, Group<g_Group>;
+def ggdb2 : Flag<["-"], "ggdb2">, Group<g_Group>;
+def ggdb3 : Flag<["-"], "ggdb3">, Group<g_Group>;
+def gdwarf_2 : Flag<["-"], "gdwarf-2">, Group<g_Group>;
+def gdwarf_3 : Flag<["-"], "gdwarf-3">, Group<g_Group>;
+def gdwarf_4 : Flag<["-"], "gdwarf-4">, Group<g_Group>;
+def gfull : Flag<["-"], "gfull">, Group<g_Group>;
+def gused : Flag<["-"], "gused">, Group<g_Group>;
+def gstabs : Joined<["-"], "gstabs">, Group<g_Group>, Flags<[Unsupported]>;
+def gcoff : Joined<["-"], "gcoff">, Group<g_Group>, Flags<[Unsupported]>;
+def gxcoff : Joined<["-"], "gxcoff">, Group<g_Group>, Flags<[Unsupported]>;
+def gvms : Joined<["-"], "gvms">, Group<g_Group>, Flags<[Unsupported]>;
+def gtoggle : Flag<["-"], "gtoggle">, Group<g_flags_Group>, Flags<[Unsupported]>;
+def grecord_gcc_switches : Flag<["-"], "grecord-gcc-switches">, Group<g_flags_Group>;
+def gno_record_gcc_switches : Flag<["-"], "gno-record-gcc-switches">,
Group<g_flags_Group>;
-def gstrict_dwarf : Flag<"-gstrict-dwarf">, Group<g_flags_Group>;
-def gno_strict_dwarf : Flag<"-gno-strict-dwarf">, Group<g_flags_Group>;
-def headerpad__max__install__names : Joined<"-headerpad_max_install_names">;
-def help : Flag<"-help">, Flags<[CC1Option]>,
+def gstrict_dwarf : Flag<["-"], "gstrict-dwarf">, Group<g_flags_Group>;
+def gno_strict_dwarf : Flag<["-"], "gno-strict-dwarf">, Group<g_flags_Group>;
+def gcolumn_info : Flag<["-"], "gcolumn-info">, Group<g_flags_Group>;
+def headerpad__max__install__names : Joined<["-"], "headerpad_max_install_names">;
+def help : Flag<["-", "--"], "help">, Flags<[CC1Option]>,
HelpText<"Display available options">;
-def index_header_map : Flag<"-index-header-map">, Flags<[CC1Option]>,
+def index_header_map : Flag<["-"], "index-header-map">, Flags<[CC1Option]>,
HelpText<"Make the next included directory (-I or -F) an indexer header map">;
-def idirafter : JoinedOrSeparate<"-idirafter">, Group<clang_i_Group>, Flags<[CC1Option]>,
+def idirafter : JoinedOrSeparate<["-"], "idirafter">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Add directory to AFTER include search path">;
-def iframework : Joined<"-iframework">, Group<clang_i_Group>, Flags<[CC1Option]>,
+def iframework : JoinedOrSeparate<["-"], "iframework">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Add directory to SYSTEM framework search path">;
-def imacros : JoinedOrSeparate<"-imacros">, Group<clang_i_Group>, Flags<[CC1Option]>,
+def imacros : JoinedOrSeparate<["-", "--"], "imacros">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Include macros from file before parsing">, MetaVarName<"<file>">;
-def image__base : Separate<"-image_base">;
-def include_ : JoinedOrSeparate<"-include">, Group<clang_i_Group>, EnumName<"include">,
+def image__base : Separate<["-"], "image_base">;
+def include_ : JoinedOrSeparate<["-", "--"], "include">, Group<clang_i_Group>, EnumName<"include">,
MetaVarName<"<file>">, HelpText<"Include file before parsing">, Flags<[CC1Option]>;
-def include_pch : Separate<"-include-pch">, Group<clang_i_Group>, Flags<[CC1Option]>,
+def include_pch : Separate<["-"], "include-pch">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Include precompiled header file">, MetaVarName<"<file>">;
-def init : Separate<"-init">;
-def install__name : Separate<"-install_name">;
-def integrated_as : Flag<"-integrated-as">, Flags<[DriverOption]>;
-def iprefix : JoinedOrSeparate<"-iprefix">, Group<clang_i_Group>, Flags<[CC1Option]>,
+def init : Separate<["-"], "init">;
+def install__name : Separate<["-"], "install_name">;
+def integrated_as : Flag<["-"], "integrated-as">, Flags<[DriverOption]>;
+def iprefix : JoinedOrSeparate<["-"], "iprefix">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Set the -iwithprefix/-iwithprefixbefore prefix">, MetaVarName<"<dir>">;
-def iquote : JoinedOrSeparate<"-iquote">, Group<clang_i_Group>, Flags<[CC1Option]>,
+def iquote : JoinedOrSeparate<["-"], "iquote">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Add directory to QUOTE include search path">, MetaVarName<"<directory>">;
-def isysroot : JoinedOrSeparate<"-isysroot">, Group<clang_i_Group>, Flags<[CC1Option]>,
+def isysroot : JoinedOrSeparate<["-"], "isysroot">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Set the system root directory (usually /)">, MetaVarName<"<dir>">;
-def isystem : JoinedOrSeparate<"-isystem">, Group<clang_i_Group>, Flags<[CC1Option]>,
+def isystem : JoinedOrSeparate<["-"], "isystem">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Add directory to SYSTEM include search path">, MetaVarName<"<directory>">;
-def iwithprefixbefore : JoinedOrSeparate<"-iwithprefixbefore">, Group<clang_i_Group>,
+def iwithprefixbefore : JoinedOrSeparate<["-"], "iwithprefixbefore">, Group<clang_i_Group>,
HelpText<"Set directory to include search path with prefix">, MetaVarName<"<dir>">,
Flags<[CC1Option]>;
-def iwithprefix : JoinedOrSeparate<"-iwithprefix">, Group<clang_i_Group>, Flags<[CC1Option]>,
+def iwithprefix : JoinedOrSeparate<["-"], "iwithprefix">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Set directory to SYSTEM include search path with prefix">, MetaVarName<"<dir>">;
-def iwithsysroot : JoinedOrSeparate<"-iwithsysroot">, Group<clang_i_Group>,
+def iwithsysroot : JoinedOrSeparate<["-"], "iwithsysroot">, Group<clang_i_Group>,
HelpText<"Add directory to SYSTEM include search path, "
"absolute paths are relative to -isysroot">, MetaVarName<"<directory>">,
Flags<[CC1Option]>;
-def i : Joined<"-i">, Group<i_Group>;
-def keep__private__externs : Flag<"-keep_private_externs">;
-def l : JoinedOrSeparate<"-l">, Flags<[LinkerInput, RenderJoined]>;
-def lazy__framework : Separate<"-lazy_framework">, Flags<[LinkerInput]>;
-def lazy__library : Separate<"-lazy_library">, Flags<[LinkerInput]>;
-def m32 : Flag<"-m32">, Group<m_Group>, Flags<[DriverOption]>;
-def mqdsp6_compat : Flag<"-mqdsp6-compat">, Group<m_Group>, Flags<[DriverOption,CC1Option]>,
+def i : Joined<["-"], "i">, Group<i_Group>;
+def keep__private__externs : Flag<["-"], "keep_private_externs">;
+def l : JoinedOrSeparate<["-"], "l">, Flags<[LinkerInput, RenderJoined]>;
+def lazy__framework : Separate<["-"], "lazy_framework">, Flags<[LinkerInput]>;
+def lazy__library : Separate<["-"], "lazy_library">, Flags<[LinkerInput]>;
+def m32 : Flag<["-"], "m32">, Group<m_Group>, Flags<[DriverOption]>;
+def mqdsp6_compat : Flag<["-"], "mqdsp6-compat">, Group<m_Group>, Flags<[DriverOption,CC1Option]>,
HelpText<"Enable hexagon-qdsp6 backward compatibility">;
-def m3dnowa : Flag<"-m3dnowa">, Group<m_x86_Features_Group>;
-def m3dnow : Flag<"-m3dnow">, Group<m_x86_Features_Group>;
-def m64 : Flag<"-m64">, Group<m_Group>, Flags<[DriverOption]>;
-def mabi_EQ : Joined<"-mabi=">, Group<m_Group>;
-def march_EQ : Joined<"-march=">, Group<m_Group>;
-def maltivec : Flag<"-maltivec">, Alias<faltivec>;
-def mcmodel_EQ : Joined<"-mcmodel=">, Group<m_Group>;
-def mconstant_cfstrings : Flag<"-mconstant-cfstrings">, Group<clang_ignored_m_Group>;
-def mcpu_EQ : Joined<"-mcpu=">, Group<m_Group>;
-def mdynamic_no_pic : Joined<"-mdynamic-no-pic">, Group<m_Group>;
-def mfix_and_continue : Flag<"-mfix-and-continue">, Group<clang_ignored_m_Group>;
-def mfloat_abi_EQ : Joined<"-mfloat-abi=">, Group<m_Group>;
-def mfpmath_EQ : Joined<"-mfpmath=">, Group<m_Group>;
-def mfpu_EQ : Joined<"-mfpu=">, Group<m_Group>;
-def mglobal_merge : Flag<"-mglobal-merge">, Group<m_Group>;
-def mhard_float : Flag<"-mhard-float">, Group<m_Group>;
-def miphoneos_version_min_EQ : Joined<"-miphoneos-version-min=">, Group<m_Group>;
-def mios_version_min_EQ : Joined<"-mios-version-min=">, Alias<miphoneos_version_min_EQ>;
-def mios_simulator_version_min_EQ : Joined<"-mios-simulator-version-min=">, Group<m_Group>;
-def mkernel : Flag<"-mkernel">, Group<m_Group>;
-def mlinker_version_EQ : Joined<"-mlinker-version=">, Flags<[NoForward]>;
-def mllvm : Separate<"-mllvm">, Flags<[CC1Option]>,
+def m3dnowa : Flag<["-"], "m3dnowa">, Group<m_x86_Features_Group>;
+def m3dnow : Flag<["-"], "m3dnow">, Group<m_x86_Features_Group>;
+def m64 : Flag<["-"], "m64">, Group<m_Group>, Flags<[DriverOption]>;
+def mabi_EQ : Joined<["-"], "mabi=">, Group<m_Group>;
+def march_EQ : Joined<["-"], "march=">, Group<m_Group>;
+def maltivec : Flag<["-"], "maltivec">, Alias<faltivec>;
+def mcmodel_EQ : Joined<["-"], "mcmodel=">, Group<m_Group>;
+def mconstant_cfstrings : Flag<["-"], "mconstant-cfstrings">, Group<clang_ignored_m_Group>;
+def mcpu_EQ : Joined<["-"], "mcpu=">, Group<m_Group>;
+def mdynamic_no_pic : Joined<["-"], "mdynamic-no-pic">, Group<m_Group>;
+def mfix_and_continue : Flag<["-"], "mfix-and-continue">, Group<clang_ignored_m_Group>;
+def mfloat_abi_EQ : Joined<["-"], "mfloat-abi=">, Group<m_Group>;
+def mfpmath_EQ : Joined<["-"], "mfpmath=">, Group<m_Group>;
+def mfpu_EQ : Joined<["-"], "mfpu=">, Group<m_Group>;
+def mglobal_merge : Flag<["-"], "mglobal-merge">, Group<m_Group>;
+def mhard_float : Flag<["-"], "mhard-float">, Group<m_Group>;
+def miphoneos_version_min_EQ : Joined<["-"], "miphoneos-version-min=">, Group<m_Group>;
+def mios_version_min_EQ : Joined<["-"], "mios-version-min=">, Alias<miphoneos_version_min_EQ>;
+def mios_simulator_version_min_EQ : Joined<["-"], "mios-simulator-version-min=">, Group<m_Group>;
+def mkernel : Flag<["-"], "mkernel">, Group<m_Group>;
+def mlinker_version_EQ : Joined<["-"], "mlinker-version=">, Flags<[NoForward]>;
+def mllvm : Separate<["-"], "mllvm">, Flags<[CC1Option]>,
HelpText<"Additional arguments to forward to LLVM's option processing">;
-def mmacosx_version_min_EQ : Joined<"-mmacosx-version-min=">, Group<m_Group>;
-def mms_bitfields : Flag<"-mms-bitfields">, Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"Set the default structure layout to be compatible with the Microsoft compiler standard.">;
-def mstackrealign : Flag<"-mstackrealign">, Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"Force realign the stack at entry to every function.">;
-def mstack_alignment : Joined<"-mstack-alignment=">, Group<m_Group>, Flags<[CC1Option]>,
+def mmacosx_version_min_EQ : Joined<["-"], "mmacosx-version-min=">, Group<m_Group>;
+def mms_bitfields : Flag<["-"], "mms-bitfields">, Group<m_Group>, Flags<[CC1Option]>,
+ HelpText<"Set the default structure layout to be compatible with the Microsoft compiler standard">;
+def mstackrealign : Flag<["-"], "mstackrealign">, Group<m_Group>, Flags<[CC1Option]>,
+ HelpText<"Force realign the stack at entry to every function">;
+def mstack_alignment : Joined<["-"], "mstack-alignment=">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Set the stack alignment">;
-def mmmx : Flag<"-mmmx">, Group<m_x86_Features_Group>;
-def mno_3dnowa : Flag<"-mno-3dnowa">, Group<m_x86_Features_Group>;
-def mno_3dnow : Flag<"-mno-3dnow">, Group<m_x86_Features_Group>;
-def mno_constant_cfstrings : Flag<"-mno-constant-cfstrings">, Group<m_Group>;
-def mno_global_merge : Flag<"-mno-global-merge">, Group<m_Group>, Flags<[CC1Option]>,
+def mstrict_align : Flag<["-"], "mstrict-align">, Group<m_Group>, Flags<[CC1Option]>,
+ HelpText<"Force all memory accesses to be aligned (ARM only)">;
+def mmmx : Flag<["-"], "mmmx">, Group<m_x86_Features_Group>;
+def mno_3dnowa : Flag<["-"], "mno-3dnowa">, Group<m_x86_Features_Group>;
+def mno_3dnow : Flag<["-"], "mno-3dnow">, Group<m_x86_Features_Group>;
+def mno_constant_cfstrings : Flag<["-"], "mno-constant-cfstrings">, Group<m_Group>;
+def mno_global_merge : Flag<["-"], "mno-global-merge">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Disable merging of globals">;
-def mno_mmx : Flag<"-mno-mmx">, Group<m_x86_Features_Group>;
-def mno_pascal_strings : Flag<"-mno-pascal-strings">, Group<m_Group>;
-def mno_red_zone : Flag<"-mno-red-zone">, Group<m_Group>;
-def mno_relax_all : Flag<"-mno-relax-all">, Group<m_Group>;
-def mno_rtd: Flag<"-mno-rtd">, Group<m_Group>;
-def mno_soft_float : Flag<"-mno-soft-float">, Group<m_Group>;
-def mno_stackrealign : Flag<"-mno-stackrealign">, Group<m_Group>;
-def mno_sse2 : Flag<"-mno-sse2">, Group<m_x86_Features_Group>;
-def mno_sse3 : Flag<"-mno-sse3">, Group<m_x86_Features_Group>;
-def mno_sse4a : Flag<"-mno-sse4a">, Group<m_x86_Features_Group>;
-def mno_sse4 : Flag<"-mno-sse4">, Group<m_x86_Features_Group>;
-def mno_sse4_1 : Flag<"-mno-sse4.1">, Group<m_x86_Features_Group>;
-def mno_sse4_2 : Flag<"-mno-sse4.2">, Group<m_x86_Features_Group>;
-def mno_sse : Flag<"-mno-sse">, Group<m_x86_Features_Group>;
-def mno_ssse3 : Flag<"-mno-ssse3">, Group<m_x86_Features_Group>;
-def mno_aes : Flag<"-mno-aes">, Group<m_x86_Features_Group>;
-def mno_avx : Flag<"-mno-avx">, Group<m_x86_Features_Group>;
-def mno_avx2 : Flag<"-mno-avx2">, Group<m_x86_Features_Group>;
-def mno_pclmul : Flag<"-mno-pclmul">, Group<m_x86_Features_Group>;
-def mno_lzcnt : Flag<"-mno-lzcnt">, Group<m_x86_Features_Group>;
-def mno_rdrnd : Flag<"-mno-rdrnd">, Group<m_x86_Features_Group>;
-def mno_bmi : Flag<"-mno-bmi">, Group<m_x86_Features_Group>;
-def mno_bmi2 : Flag<"-mno-bmi2">, Group<m_x86_Features_Group>;
-def mno_popcnt : Flag<"-mno-popcnt">, Group<m_x86_Features_Group>;
-def mno_fma4 : Flag<"-mno-fma4">, Group<m_x86_Features_Group>;
-def mno_fma : Flag<"-mno-fma">, Group<m_x86_Features_Group>;
-def mno_xop : Flag<"-mno-xop">, Group<m_x86_Features_Group>;
+def mno_mmx : Flag<["-"], "mno-mmx">, Group<m_x86_Features_Group>;
+def mno_pascal_strings : Flag<["-"], "mno-pascal-strings">, Group<m_Group>;
+def mno_red_zone : Flag<["-"], "mno-red-zone">, Group<m_Group>;
+def mno_relax_all : Flag<["-"], "mno-relax-all">, Group<m_Group>;
+def mno_rtd: Flag<["-"], "mno-rtd">, Group<m_Group>;
+def mno_soft_float : Flag<["-"], "mno-soft-float">, Group<m_Group>;
+def mno_stackrealign : Flag<["-"], "mno-stackrealign">, Group<m_Group>;
+def mno_sse2 : Flag<["-"], "mno-sse2">, Group<m_x86_Features_Group>;
+def mno_sse3 : Flag<["-"], "mno-sse3">, Group<m_x86_Features_Group>;
+def mno_sse4a : Flag<["-"], "mno-sse4a">, Group<m_x86_Features_Group>;
+def mno_sse4 : Flag<["-"], "mno-sse4">, Group<m_x86_Features_Group>;
+def mno_sse4_1 : Flag<["-"], "mno-sse4.1">, Group<m_x86_Features_Group>;
+def mno_sse4_2 : Flag<["-"], "mno-sse4.2">, Group<m_x86_Features_Group>;
+def mno_sse : Flag<["-"], "mno-sse">, Group<m_x86_Features_Group>;
+def mno_ssse3 : Flag<["-"], "mno-ssse3">, Group<m_x86_Features_Group>;
+def mno_aes : Flag<["-"], "mno-aes">, Group<m_x86_Features_Group>;
+def mno_avx : Flag<["-"], "mno-avx">, Group<m_x86_Features_Group>;
+def mno_avx2 : Flag<["-"], "mno-avx2">, Group<m_x86_Features_Group>;
+def mno_pclmul : Flag<["-"], "mno-pclmul">, Group<m_x86_Features_Group>;
+def mno_lzcnt : Flag<["-"], "mno-lzcnt">, Group<m_x86_Features_Group>;
+def mno_rdrnd : Flag<["-"], "mno-rdrnd">, Group<m_x86_Features_Group>;
+def mno_bmi : Flag<["-"], "mno-bmi">, Group<m_x86_Features_Group>;
+def mno_bmi2 : Flag<["-"], "mno-bmi2">, Group<m_x86_Features_Group>;
+def mno_popcnt : Flag<["-"], "mno-popcnt">, Group<m_x86_Features_Group>;
+def mno_fma4 : Flag<["-"], "mno-fma4">, Group<m_x86_Features_Group>;
+def mno_fma : Flag<["-"], "mno-fma">, Group<m_x86_Features_Group>;
+def mno_xop : Flag<["-"], "mno-xop">, Group<m_x86_Features_Group>;
+def mno_f16c : Flag<["-"], "mno-f16c">, Group<m_x86_Features_Group>;
+def mno_rtm : Flag<["-"], "mno-rtm">, Group<m_x86_Features_Group>;
-def mno_thumb : Flag<"-mno-thumb">, Group<m_Group>;
-def marm : Flag<"-marm">, Alias<mno_thumb>;
+def mno_thumb : Flag<["-"], "mno-thumb">, Group<m_Group>;
+def marm : Flag<["-"], "marm">, Alias<mno_thumb>;
-def mno_warn_nonportable_cfstrings : Flag<"-mno-warn-nonportable-cfstrings">, Group<m_Group>;
-def mno_omit_leaf_frame_pointer : Flag<"-mno-omit-leaf-frame-pointer">, Group<f_Group>;
-def momit_leaf_frame_pointer : Flag<"-momit-leaf-frame-pointer">, Group<f_Group>,
- HelpText<"Omit frame pointer setup for leaf functions.">, Flags<[CC1Option]>;
-def mpascal_strings : Flag<"-mpascal-strings">, Group<m_Group>;
-def mred_zone : Flag<"-mred-zone">, Group<m_Group>;
-def mregparm_EQ : Joined<"-mregparm=">, Group<m_Group>;
-def mrelax_all : Flag<"-mrelax-all">, Group<m_Group>, Flags<[CC1Option]>,
+def mno_warn_nonportable_cfstrings : Flag<["-"], "mno-warn-nonportable-cfstrings">, Group<m_Group>;
+def mno_omit_leaf_frame_pointer : Flag<["-"], "mno-omit-leaf-frame-pointer">, Group<m_Group>;
+def momit_leaf_frame_pointer : Flag<["-"], "momit-leaf-frame-pointer">, Group<m_Group>,
+ HelpText<"Omit frame pointer setup for leaf functions">, Flags<[CC1Option]>;
+def mpascal_strings : Flag<["-"], "mpascal-strings">, Group<m_Group>;
+def mred_zone : Flag<["-"], "mred-zone">, Group<m_Group>;
+def mregparm_EQ : Joined<["-"], "mregparm=">, Group<m_Group>;
+def mrelax_all : Flag<["-"], "mrelax-all">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"(integrated-as) Relax all machine instructions">;
-def mrtd : Flag<"-mrtd">, Group<m_Group>, Flags<[CC1Option]>,
+def mrtd : Flag<["-"], "mrtd">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Make StdCall calling convention the default">;
-def msmall_data_threshold_EQ : Joined <"-msmall-data-threshold=">, Group<m_Group>;
-def msoft_float : Flag<"-msoft-float">, Group<m_Group>, Flags<[CC1Option]>,
+def msmall_data_threshold_EQ : Joined <["-"], "msmall-data-threshold=">, Group<m_Group>;
+def msoft_float : Flag<["-"], "msoft-float">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Use software floating point">;
-def mno_implicit_float : Flag<"-mno-implicit-float">, Group<m_Group>,
+def mno_implicit_float : Flag<["-"], "mno-implicit-float">, Group<m_Group>,
HelpText<"Don't generate implicit floating point instructions">;
-def msse2 : Flag<"-msse2">, Group<m_x86_Features_Group>;
-def msse3 : Flag<"-msse3">, Group<m_x86_Features_Group>;
-def msse4a : Flag<"-msse4a">, Group<m_x86_Features_Group>;
-def msse4 : Flag<"-msse4">, Group<m_x86_Features_Group>;
-def msse4_1 : Flag<"-msse4.1">, Group<m_x86_Features_Group>;
-def msse4_2 : Flag<"-msse4.2">, Group<m_x86_Features_Group>;
-def msse : Flag<"-msse">, Group<m_x86_Features_Group>;
-def mssse3 : Flag<"-mssse3">, Group<m_x86_Features_Group>;
-def maes : Flag<"-maes">, Group<m_x86_Features_Group>;
-def mavx : Flag<"-mavx">, Group<m_x86_Features_Group>;
-def mavx2 : Flag<"-mavx2">, Group<m_x86_Features_Group>;
-def mpclmul : Flag<"-mpclmul">, Group<m_x86_Features_Group>;
-def mlzcnt : Flag<"-mlzcnt">, Group<m_x86_Features_Group>;
-def mrdrnd : Flag<"-mrdrnd">, Group<m_x86_Features_Group>;
-def mbmi : Flag<"-mbmi">, Group<m_x86_Features_Group>;
-def mbmi2 : Flag<"-mbmi2">, Group<m_x86_Features_Group>;
-def mpopcnt : Flag<"-mpopcnt">, Group<m_x86_Features_Group>;
-def mfma4 : Flag<"-mfma4">, Group<m_x86_Features_Group>;
-def mfma : Flag<"-mfma">, Group<m_x86_Features_Group>;
-def mxop : Flag<"-mxop">, Group<m_x86_Features_Group>;
-def mips16 : Flag<"-mips16">, Group<m_Group>;
-def mno_mips16 : Flag<"-mno-mips16">, Group<m_Group>;
-def mdsp : Flag<"-mdsp">, Group<m_Group>;
-def mno_dsp : Flag<"-mno-dsp">, Group<m_Group>;
-def mdspr2 : Flag<"-mdspr2">, Group<m_Group>;
-def mno_dspr2 : Flag<"-mno-dspr2">, Group<m_Group>;
-def mthumb : Flag<"-mthumb">, Group<m_Group>;
-def mtune_EQ : Joined<"-mtune=">, Group<m_Group>;
-def multi__module : Flag<"-multi_module">;
-def multiply__defined__unused : Separate<"-multiply_defined_unused">;
-def multiply__defined : Separate<"-multiply_defined">;
-def mwarn_nonportable_cfstrings : Flag<"-mwarn-nonportable-cfstrings">, Group<m_Group>;
-def m_Separate : Separate<"-m">, Group<m_Group>;
-def m_Joined : Joined<"-m">, Group<m_Group>;
-def no_canonical_prefixes : Flag<"-no-canonical-prefixes">, Flags<[HelpHidden]>,
+def msse2 : Flag<["-"], "msse2">, Group<m_x86_Features_Group>;
+def msse3 : Flag<["-"], "msse3">, Group<m_x86_Features_Group>;
+def msse4a : Flag<["-"], "msse4a">, Group<m_x86_Features_Group>;
+def msse4 : Flag<["-"], "msse4">, Group<m_x86_Features_Group>;
+def msse4_1 : Flag<["-"], "msse4.1">, Group<m_x86_Features_Group>;
+def msse4_2 : Flag<["-"], "msse4.2">, Group<m_x86_Features_Group>;
+def msse : Flag<["-"], "msse">, Group<m_x86_Features_Group>;
+def mssse3 : Flag<["-"], "mssse3">, Group<m_x86_Features_Group>;
+def maes : Flag<["-"], "maes">, Group<m_x86_Features_Group>;
+def mavx : Flag<["-"], "mavx">, Group<m_x86_Features_Group>;
+def mavx2 : Flag<["-"], "mavx2">, Group<m_x86_Features_Group>;
+def mpclmul : Flag<["-"], "mpclmul">, Group<m_x86_Features_Group>;
+def mlzcnt : Flag<["-"], "mlzcnt">, Group<m_x86_Features_Group>;
+def mrdrnd : Flag<["-"], "mrdrnd">, Group<m_x86_Features_Group>;
+def mbmi : Flag<["-"], "mbmi">, Group<m_x86_Features_Group>;
+def mbmi2 : Flag<["-"], "mbmi2">, Group<m_x86_Features_Group>;
+def mpopcnt : Flag<["-"], "mpopcnt">, Group<m_x86_Features_Group>;
+def mfma4 : Flag<["-"], "mfma4">, Group<m_x86_Features_Group>;
+def mfma : Flag<["-"], "mfma">, Group<m_x86_Features_Group>;
+def mxop : Flag<["-"], "mxop">, Group<m_x86_Features_Group>;
+def mf16c : Flag<["-"], "mf16c">, Group<m_x86_Features_Group>;
+def mrtm : Flag<["-"], "mrtm">, Group<m_x86_Features_Group>;
+def mips16 : Flag<["-"], "mips16">, Group<m_Group>;
+def mno_mips16 : Flag<["-"], "mno-mips16">, Group<m_Group>;
+def mdsp : Flag<["-"], "mdsp">, Group<m_Group>;
+def mno_dsp : Flag<["-"], "mno-dsp">, Group<m_Group>;
+def mdspr2 : Flag<["-"], "mdspr2">, Group<m_Group>;
+def mno_dspr2 : Flag<["-"], "mno-dspr2">, Group<m_Group>;
+def mips32 : Flag<["-"], "mips32">, Group<mips_CPUs_Group>,
+ HelpText<"Equivalent to -march=mips32">, Flags<[HelpHidden]>;
+def mips32r2 : Flag<["-"], "mips32r2">, Group<mips_CPUs_Group>,
+ HelpText<"Equivalent to -march=mips32r2">, Flags<[HelpHidden]>;
+def mips64 : Flag<["-"], "mips64">, Group<mips_CPUs_Group>,
+ HelpText<"Equivalent to -march=mips64">, Flags<[HelpHidden]>;
+def mips64r2 : Flag<["-"], "mips64r2">, Group<mips_CPUs_Group>,
+ HelpText<"Equivalent to -march=mips64r2">, Flags<[HelpHidden]>;
+def mthumb : Flag<["-"], "mthumb">, Group<m_Group>;
+def mtune_EQ : Joined<["-"], "mtune=">, Group<m_Group>;
+def multi__module : Flag<["-"], "multi_module">;
+def multiply__defined__unused : Separate<["-"], "multiply_defined_unused">;
+def multiply__defined : Separate<["-"], "multiply_defined">;
+def mwarn_nonportable_cfstrings : Flag<["-"], "mwarn-nonportable-cfstrings">, Group<m_Group>;
+def m_Separate : Separate<["-"], "m">, Group<m_Group>;
+def m_Joined : Joined<["-"], "m">, Group<m_Group>;
+def no_canonical_prefixes : Flag<["-"], "no-canonical-prefixes">, Flags<[HelpHidden]>,
HelpText<"Use relative instead of canonical paths">;
-def no_cpp_precomp : Flag<"-no-cpp-precomp">, Group<clang_ignored_f_Group>;
-def no_integrated_as : Flag<"-no-integrated-as">, Flags<[DriverOption]>;
-def no_integrated_cpp : Flag<"-no-integrated-cpp">, Flags<[DriverOption]>;
-def no_pedantic : Flag<"-no-pedantic">, Group<pedantic_Group>;
-def no__dead__strip__inits__and__terms : Flag<"-no_dead_strip_inits_and_terms">;
-def nobuiltininc : Flag<"-nobuiltininc">, Flags<[CC1Option]>,
+def no_cpp_precomp : Flag<["-"], "no-cpp-precomp">, Group<clang_ignored_f_Group>;
+def no_integrated_as : Flag<["-"], "no-integrated-as">, Flags<[DriverOption]>;
+def no_integrated_cpp : Flag<["-", "--"], "no-integrated-cpp">, Flags<[DriverOption]>;
+def no_pedantic : Flag<["-", "--"], "no-pedantic">, Group<pedantic_Group>;
+def no__dead__strip__inits__and__terms : Flag<["-"], "no_dead_strip_inits_and_terms">;
+def nobuiltininc : Flag<["-"], "nobuiltininc">, Flags<[CC1Option]>,
HelpText<"Disable builtin #include directories">;
-def nodefaultlibs : Flag<"-nodefaultlibs">;
-def nofixprebinding : Flag<"-nofixprebinding">;
-def nolibc : Flag<"-nolibc">;
-def nomultidefs : Flag<"-nomultidefs">;
-def noprebind : Flag<"-noprebind">;
-def noseglinkedit : Flag<"-noseglinkedit">;
-def nostartfiles : Flag<"-nostartfiles">;
-def nostdinc : Flag<"-nostdinc">;
-def nostdlibinc : Flag<"-nostdlibinc">;
-def nostdincxx : Flag<"-nostdinc++">, Flags<[CC1Option]>,
+def nodefaultlibs : Flag<["-"], "nodefaultlibs">;
+def nofixprebinding : Flag<["-"], "nofixprebinding">;
+def nolibc : Flag<["-"], "nolibc">;
+def nomultidefs : Flag<["-"], "nomultidefs">;
+def noprebind : Flag<["-"], "noprebind">;
+def noseglinkedit : Flag<["-"], "noseglinkedit">;
+def nostartfiles : Flag<["-"], "nostartfiles">;
+def nostdinc : Flag<["-"], "nostdinc">;
+def nostdlibinc : Flag<["-"], "nostdlibinc">;
+def nostdincxx : Flag<["-"], "nostdinc++">, Flags<[CC1Option]>,
HelpText<"Disable standard #include directories for the C++ standard library">;
-def nostdlib : Flag<"-nostdlib">;
-def object : Flag<"-object">;
-def o : JoinedOrSeparate<"-o">, Flags<[DriverOption, RenderAsInput, CC1Option]>,
+def nostdlib : Flag<["-"], "nostdlib">;
+def object : Flag<["-"], "object">;
+def o : JoinedOrSeparate<["-"], "o">, Flags<[DriverOption, RenderAsInput, CC1Option]>,
HelpText<"Write output to <file>">, MetaVarName<"<file>">;
-def pagezero__size : JoinedOrSeparate<"-pagezero_size">;
-def pass_exit_codes : Flag<"-pass-exit-codes">, Flags<[Unsupported]>;
-def pedantic_errors : Flag<"-pedantic-errors">, Group<pedantic_Group>, Flags<[CC1Option]>;
-def pedantic : Flag<"-pedantic">, Group<pedantic_Group>, Flags<[CC1Option]>;
-def pg : Flag<"-pg">, HelpText<"Enable mcount instrumentation">, Flags<[CC1Option]>;
-def pipe : Flag<"-pipe">,
+def pagezero__size : JoinedOrSeparate<["-"], "pagezero_size">;
+def pass_exit_codes : Flag<["-", "--"], "pass-exit-codes">, Flags<[Unsupported]>;
+def pedantic_errors : Flag<["-", "--"], "pedantic-errors">, Group<pedantic_Group>, Flags<[CC1Option]>;
+def pedantic : Flag<["-", "--"], "pedantic">, Group<pedantic_Group>, Flags<[CC1Option]>;
+def pg : Flag<["-"], "pg">, HelpText<"Enable mcount instrumentation">, Flags<[CC1Option]>;
+def pipe : Flag<["-", "--"], "pipe">,
HelpText<"Use pipes between commands, when possible">;
-def prebind__all__twolevel__modules : Flag<"-prebind_all_twolevel_modules">;
-def prebind : Flag<"-prebind">;
-def preload : Flag<"-preload">;
-def print_file_name_EQ : Joined<"-print-file-name=">,
+def prebind__all__twolevel__modules : Flag<["-"], "prebind_all_twolevel_modules">;
+def prebind : Flag<["-"], "prebind">;
+def preload : Flag<["-"], "preload">;
+def print_file_name_EQ : Joined<["-", "--"], "print-file-name=">,
HelpText<"Print the full library path of <file>">, MetaVarName<"<file>">;
-def print_ivar_layout : Flag<"-print-ivar-layout">, Flags<[CC1Option]>,
+def print_ivar_layout : Flag<["-"], "print-ivar-layout">, Flags<[CC1Option]>,
HelpText<"Enable Objective-C Ivar layout bitmap print trace">;
-def print_libgcc_file_name : Flag<"-print-libgcc-file-name">,
+def print_libgcc_file_name : Flag<["-", "--"], "print-libgcc-file-name">,
HelpText<"Print the library path for \"libgcc.a\"">;
-def print_multi_directory : Flag<"-print-multi-directory">;
-def print_multi_lib : Flag<"-print-multi-lib">;
-def print_multi_os_directory : Flag<"-print-multi-os-directory">;
-def print_prog_name_EQ : Joined<"-print-prog-name=">,
+def print_multi_directory : Flag<["-", "--"], "print-multi-directory">;
+def print_multi_lib : Flag<["-", "--"], "print-multi-lib">;
+def print_multi_os_directory : Flag<["-", "--"], "print-multi-os-directory">;
+def print_prog_name_EQ : Joined<["-", "--"], "print-prog-name=">,
HelpText<"Print the full program path of <name>">, MetaVarName<"<name>">;
-def print_search_dirs : Flag<"-print-search-dirs">,
+def print_search_dirs : Flag<["-", "--"], "print-search-dirs">,
HelpText<"Print the paths used for finding libraries and programs">;
-def private__bundle : Flag<"-private_bundle">;
-def pthreads : Flag<"-pthreads">;
-def pthread : Flag<"-pthread">, Flags<[CC1Option]>,
+def private__bundle : Flag<["-"], "private_bundle">;
+def pthreads : Flag<["-"], "pthreads">;
+def pthread : Flag<["-"], "pthread">, Flags<[CC1Option]>,
HelpText<"Support POSIX threads in generated code">;
-def p : Flag<"-p">;
-def pie : Flag<"-pie">;
-def read__only__relocs : Separate<"-read_only_relocs">;
-def remap : Flag<"-remap">;
-def rewrite_objc : Flag<"-rewrite-objc">, Flags<[DriverOption,CC1Option]>,
+def p : Flag<["-"], "p">;
+def pie : Flag<["-"], "pie">;
+def read__only__relocs : Separate<["-"], "read_only_relocs">;
+def remap : Flag<["-"], "remap">;
+def rewrite_objc : Flag<["-"], "rewrite-objc">, Flags<[DriverOption,CC1Option]>,
HelpText<"Rewrite Objective-C source to C++">, Group<Action_Group>;
-def rewrite_legacy_objc : Flag<"-rewrite-legacy-objc">, Flags<[DriverOption]>,
+def rewrite_legacy_objc : Flag<["-"], "rewrite-legacy-objc">, Flags<[DriverOption]>,
HelpText<"Rewrite Legacy Objective-C source to C++">;
-def rdynamic : Flag<"-rdynamic">;
-def rpath : Separate<"-rpath">, Flags<[LinkerInput]>;
-def rtlib_EQ : Joined<"-rtlib=">;
-def r : Flag<"-r">;
-def save_temps : Flag<"-save-temps">, Flags<[DriverOption]>,
+def rdynamic : Flag<["-"], "rdynamic">;
+def rpath : Separate<["-"], "rpath">, Flags<[LinkerInput]>;
+def rtlib_EQ : Joined<["-", "--"], "rtlib=">;
+def r : Flag<["-"], "r">;
+def save_temps : Flag<["-", "--"], "save-temps">, Flags<[DriverOption]>,
HelpText<"Save intermediate compilation results">;
-def sectalign : MultiArg<"-sectalign", 3>;
-def sectcreate : MultiArg<"-sectcreate", 3>;
-def sectobjectsymbols : MultiArg<"-sectobjectsymbols", 2>;
-def sectorder : MultiArg<"-sectorder", 3>;
-def seg1addr : JoinedOrSeparate<"-seg1addr">;
-def seg__addr__table__filename : Separate<"-seg_addr_table_filename">;
-def seg__addr__table : Separate<"-seg_addr_table">;
-def segaddr : MultiArg<"-segaddr", 2>;
-def segcreate : MultiArg<"-segcreate", 3>;
-def seglinkedit : Flag<"-seglinkedit">;
-def segprot : MultiArg<"-segprot", 3>;
-def segs__read__only__addr : Separate<"-segs_read_only_addr">;
-def segs__read__write__addr : Separate<"-segs_read_write_addr">;
-def segs__read__ : Joined<"-segs_read_">;
-def shared_libgcc : Flag<"-shared-libgcc">;
-def shared : Flag<"-shared">;
-def single__module : Flag<"-single_module">;
-def specs_EQ : Joined<"-specs=">;
-def specs : Separate<"-specs">, Flags<[Unsupported]>;
-def static_libgcc : Flag<"-static-libgcc">;
-def static_libstdcxx : Flag<"-static-libstdc++">;
-def static : Flag<"-static">, Flags<[NoArgumentUnused]>;
-def std_default_EQ : Joined<"-std-default=">;
-def std_EQ : Joined<"-std=">, Flags<[CC1Option]>, Group<L_Group>,
+def sectalign : MultiArg<["-"], "sectalign", 3>;
+def sectcreate : MultiArg<["-"], "sectcreate", 3>;
+def sectobjectsymbols : MultiArg<["-"], "sectobjectsymbols", 2>;
+def sectorder : MultiArg<["-"], "sectorder", 3>;
+def seg1addr : JoinedOrSeparate<["-"], "seg1addr">;
+def seg__addr__table__filename : Separate<["-"], "seg_addr_table_filename">;
+def seg__addr__table : Separate<["-"], "seg_addr_table">;
+def segaddr : MultiArg<["-"], "segaddr", 2>;
+def segcreate : MultiArg<["-"], "segcreate", 3>;
+def seglinkedit : Flag<["-"], "seglinkedit">;
+def segprot : MultiArg<["-"], "segprot", 3>;
+def segs__read__only__addr : Separate<["-"], "segs_read_only_addr">;
+def segs__read__write__addr : Separate<["-"], "segs_read_write_addr">;
+def segs__read__ : Joined<["-"], "segs_read_">;
+def shared_libgcc : Flag<["-"], "shared-libgcc">;
+def shared : Flag<["-", "--"], "shared">;
+def single__module : Flag<["-"], "single_module">;
+def specs_EQ : Joined<["-", "--"], "specs=">;
+def specs : Separate<["-", "--"], "specs">, Flags<[Unsupported]>;
+def static_libgcc : Flag<["-"], "static-libgcc">;
+def static_libstdcxx : Flag<["-"], "static-libstdc++">;
+def static : Flag<["-", "--"], "static">, Flags<[NoArgumentUnused]>;
+def std_default_EQ : Joined<["-"], "std-default=">;
+def std_EQ : Joined<["-", "--"], "std=">, Flags<[CC1Option]>, Group<L_Group>,
HelpText<"Language standard to compile for">;
-def stdlib_EQ : Joined<"-stdlib=">, Flags<[CC1Option]>,
+def stdlib_EQ : Joined<["-", "--"], "stdlib=">, Flags<[CC1Option]>,
HelpText<"C++ standard library to use">;
-def sub__library : JoinedOrSeparate<"-sub_library">;
-def sub__umbrella : JoinedOrSeparate<"-sub_umbrella">;
-def s : Flag<"-s">;
-def target : Separate<"-target">, Flags<[DriverOption]>,
+def sub__library : JoinedOrSeparate<["-"], "sub_library">;
+def sub__umbrella : JoinedOrSeparate<["-"], "sub_umbrella">;
+def s : Flag<["-"], "s">;
+def target : Separate<["-"], "target">, Flags<[DriverOption]>,
HelpText<"Generate code for the given target">;
-def gcc_toolchain : Separate<"-gcc-toolchain">, Flags<[DriverOption]>,
+def gcc_toolchain : Separate<["-"], "gcc-toolchain">, Flags<[DriverOption]>,
HelpText<"Use the gcc toolchain at the given directory">;
-// We should deprecate the use of -ccc-host-triple, and then remove.
-def ccc_host_triple : Separate<"-ccc-host-triple">, Alias<target>;
-def time : Flag<"-time">,
+def time : Flag<["-"], "time">,
HelpText<"Time individual commands">;
-def traditional_cpp : Flag<"-traditional-cpp">, Flags<[CC1Option]>,
+def traditional_cpp : Flag<["-", "--"], "traditional-cpp">, Flags<[CC1Option]>,
HelpText<"Enable some traditional CPP emulation">;
-def traditional : Flag<"-traditional">;
-def trigraphs : Flag<"-trigraphs">, Flags<[CC1Option]>,
+def traditional : Flag<["-", "--"], "traditional">;
+def trigraphs : Flag<["-", "--"], "trigraphs">, Flags<[CC1Option]>,
HelpText<"Process trigraph sequences">;
-def twolevel__namespace__hints : Flag<"-twolevel_namespace_hints">;
-def twolevel__namespace : Flag<"-twolevel_namespace">;
-def t : Flag<"-t">;
-def umbrella : Separate<"-umbrella">;
-def undefined : JoinedOrSeparate<"-undefined">, Group<u_Group>;
-def undef : Flag<"-undef">, Group<u_Group>, Flags<[CC1Option]>,
+def twolevel__namespace__hints : Flag<["-"], "twolevel_namespace_hints">;
+def twolevel__namespace : Flag<["-"], "twolevel_namespace">;
+def t : Flag<["-"], "t">;
+def umbrella : Separate<["-"], "umbrella">;
+def undefined : JoinedOrSeparate<["-"], "undefined">, Group<u_Group>;
+def undef : Flag<["-"], "undef">, Group<u_Group>, Flags<[CC1Option]>,
HelpText<"undef all system defines">;
-def unexported__symbols__list : Separate<"-unexported_symbols_list">;
-def u : JoinedOrSeparate<"-u">, Group<u_Group>;
-def use_gold_plugin : Flag<"-use-gold-plugin">;
-def v : Flag<"-v">, Flags<[CC1Option]>,
+def unexported__symbols__list : Separate<["-"], "unexported_symbols_list">;
+def u : JoinedOrSeparate<["-"], "u">, Group<u_Group>;
+def use_gold_plugin : Flag<["-"], "use-gold-plugin">;
+def v : Flag<["-"], "v">, Flags<[CC1Option]>,
HelpText<"Show commands to run and use verbose output">;
-def verify : Flag<"-verify">, Flags<[DriverOption,CC1Option]>,
- HelpText<"Verify output using a verifier.">;
-def weak_l : Joined<"-weak-l">, Flags<[LinkerInput]>;
-def weak__framework : Separate<"-weak_framework">, Flags<[LinkerInput]>;
-def weak__library : Separate<"-weak_library">, Flags<[LinkerInput]>;
-def weak__reference__mismatches : Separate<"-weak_reference_mismatches">;
-def whatsloaded : Flag<"-whatsloaded">;
-def whyload : Flag<"-whyload">;
-def w : Flag<"-w">, HelpText<"Suppress all warnings.">, Flags<[CC1Option]>;
-def x : JoinedOrSeparate<"-x">, Flags<[DriverOption,CC1Option]>,
+def verify : Flag<["-"], "verify">, Flags<[DriverOption,CC1Option]>,
+ HelpText<"Verify output using a verifier">;
+def weak_l : Joined<["-"], "weak-l">, Flags<[LinkerInput]>;
+def weak__framework : Separate<["-"], "weak_framework">, Flags<[LinkerInput]>;
+def weak__library : Separate<["-"], "weak_library">, Flags<[LinkerInput]>;
+def weak__reference__mismatches : Separate<["-"], "weak_reference_mismatches">;
+def whatsloaded : Flag<["-"], "whatsloaded">;
+def whyload : Flag<["-"], "whyload">;
+def w : Flag<["-"], "w">, HelpText<"Suppress all warnings">, Flags<[CC1Option]>;
+def x : JoinedOrSeparate<["-"], "x">, Flags<[DriverOption,CC1Option]>,
HelpText<"Treat subsequent input files as having type <language>">,
MetaVarName<"<language>">;
-def y : Joined<"-y">;
+def y : Joined<["-"], "y">;
-def working_directory : JoinedOrSeparate<"-working-directory">, Flags<[CC1Option]>,
+def working_directory : JoinedOrSeparate<["-"], "working-directory">, Flags<[CC1Option]>,
HelpText<"Resolve file paths relative to the specified directory">;
-def working_directory_EQ : Joined<"-working-directory=">, Flags<[CC1Option]>,
+def working_directory_EQ : Joined<["-"], "working-directory=">, Flags<[CC1Option]>,
Alias<working_directory>;
// Double dash options, which are usually an alias for one of the previous
// options.
-def _CLASSPATH_EQ : Joined<"--CLASSPATH=">, Alias<fclasspath_EQ>;
-def _CLASSPATH : Separate<"--CLASSPATH">, Alias<fclasspath_EQ>;
-def _all_warnings : Flag<"--all-warnings">, Alias<Wall>;
-def _analyze_auto : Flag<"--analyze-auto">, Flags<[DriverOption]>;
-def _analyzer_no_default_checks : Flag<"--analyzer-no-default-checks">, Flags<[DriverOption]>;
-def _analyzer_output : JoinedOrSeparate<"--analyzer-output">, Flags<[DriverOption]>;
-def _analyze : Flag<"--analyze">, Flags<[DriverOption]>,
+def _CLASSPATH_EQ : Joined<["--"], "CLASSPATH=">, Alias<fclasspath_EQ>;
+def _CLASSPATH : Separate<["--"], "CLASSPATH">, Alias<fclasspath_EQ>;
+def _all_warnings : Flag<["--"], "all-warnings">, Alias<Wall>;
+def _analyze_auto : Flag<["--"], "analyze-auto">, Flags<[DriverOption]>;
+def _analyzer_no_default_checks : Flag<["--"], "analyzer-no-default-checks">, Flags<[DriverOption]>;
+def _analyzer_output : JoinedOrSeparate<["--"], "analyzer-output">, Flags<[DriverOption]>;
+def _analyze : Flag<["--"], "analyze">, Flags<[DriverOption]>,
HelpText<"Run the static analyzer">;
-def _ansi : Flag<"--ansi">, Alias<ansi>;
-def _assemble : Flag<"--assemble">, Alias<S>;
-def _assert_EQ : Joined<"--assert=">, Alias<A>;
-def _assert : Separate<"--assert">, Alias<A>;
-def _bootclasspath_EQ : Joined<"--bootclasspath=">, Alias<fbootclasspath_EQ>;
-def _bootclasspath : Separate<"--bootclasspath">, Alias<fbootclasspath_EQ>;
-def _classpath_EQ : Joined<"--classpath=">, Alias<fclasspath_EQ>;
-def _classpath : Separate<"--classpath">, Alias<fclasspath_EQ>;
-def _combine : Flag<"--combine">, Alias<combine>;
-def _comments_in_macros : Flag<"--comments-in-macros">, Alias<CC>;
-def _comments : Flag<"--comments">, Alias<C>;
-def _compile : Flag<"--compile">, Alias<c>;
-def _constant_cfstrings : Flag<"--constant-cfstrings">;
-def _coverage : Flag<"--coverage">, Alias<coverage>;
-def _debug_EQ : Joined<"--debug=">, Alias<g_Flag>;
-def _debug : Flag<"--debug">, Alias<g_Flag>;
-def _define_macro_EQ : Joined<"--define-macro=">, Alias<D>;
-def _define_macro : Separate<"--define-macro">, Alias<D>;
-def _dependencies : Flag<"--dependencies">, Alias<M>;
-def _encoding_EQ : Joined<"--encoding=">, Alias<fencoding_EQ>;
-def _encoding : Separate<"--encoding">, Alias<fencoding_EQ>;
-def _entry : Flag<"--entry">, Alias<e>;
-def _extdirs_EQ : Joined<"--extdirs=">, Alias<fextdirs_EQ>;
-def _extdirs : Separate<"--extdirs">, Alias<fextdirs_EQ>;
-def _extra_warnings : Flag<"--extra-warnings">, Alias<W_Joined>;
-def _for_linker_EQ : Joined<"--for-linker=">, Alias<Xlinker>;
-def _for_linker : Separate<"--for-linker">, Alias<Xlinker>;
-def _force_link_EQ : Joined<"--force-link=">, Alias<u>;
-def _force_link : Separate<"--force-link">, Alias<u>;
-def _help_hidden : Flag<"--help-hidden">;
-def _help : Flag<"--help">, Alias<help>;
-def _imacros_EQ : Joined<"--imacros=">, Alias<imacros>;
-def _imacros : Separate<"--imacros">, Alias<imacros>;
-def _include_barrier : Flag<"--include-barrier">, Alias<I_>;
-def _include_directory_after_EQ : Joined<"--include-directory-after=">, Alias<idirafter>;
-def _include_directory_after : Separate<"--include-directory-after">, Alias<idirafter>;
-def _include_directory_EQ : Joined<"--include-directory=">, Alias<I>;
-def _include_directory : Separate<"--include-directory">, Alias<I>;
-def _include_prefix_EQ : Joined<"--include-prefix=">, Alias<iprefix>;
-def _include_prefix : Separate<"--include-prefix">, Alias<iprefix>;
-def _include_with_prefix_after_EQ : Joined<"--include-with-prefix-after=">, Alias<iwithprefix>;
-def _include_with_prefix_after : Separate<"--include-with-prefix-after">, Alias<iwithprefix>;
-def _include_with_prefix_before_EQ : Joined<"--include-with-prefix-before=">, Alias<iwithprefixbefore>;
-def _include_with_prefix_before : Separate<"--include-with-prefix-before">, Alias<iwithprefixbefore>;
-def _include_with_prefix_EQ : Joined<"--include-with-prefix=">, Alias<iwithprefix>;
-def _include_with_prefix : Separate<"--include-with-prefix">, Alias<iwithprefix>;
-def _include_EQ : Joined<"--include=">, Alias<include_>;
-def _include : Separate<"--include">, Alias<include_>;
-def _language_EQ : Joined<"--language=">, Alias<x>;
-def _language : Separate<"--language">, Alias<x>;
-def _library_directory_EQ : Joined<"--library-directory=">, Alias<L>;
-def _library_directory : Separate<"--library-directory">, Alias<L>;
-def _machine__EQ : Joined<"--machine-=">, Alias<m_Joined>;
-def _machine_ : Joined<"--machine-">, Alias<m_Joined>;
-def _machine_EQ : Joined<"--machine=">, Alias<m_Joined>;
-def _machine : Separate<"--machine">, Alias<m_Joined>;
-def _no_integrated_cpp : Flag<"--no-integrated-cpp">, Alias<no_integrated_cpp>;
-def _no_line_commands : Flag<"--no-line-commands">, Alias<P>;
-def _no_pedantic : Flag<"--no-pedantic">, Alias<no_pedantic>;
-def _no_standard_includes : Flag<"--no-standard-includes">, Alias<nostdinc>;
-def _no_standard_libraries : Flag<"--no-standard-libraries">, Alias<nostdlib>;
-def _no_undefined : Flag<"--no-undefined">, Flags<[LinkerInput]>;
-def _no_warnings : Flag<"--no-warnings">, Alias<w>;
-def _optimize_EQ : Joined<"--optimize=">, Alias<O>;
-def _optimize : Flag<"--optimize">, Alias<O>;
-def _output_class_directory_EQ : Joined<"--output-class-directory=">, Alias<foutput_class_dir_EQ>;
-def _output_class_directory : Separate<"--output-class-directory">, Alias<foutput_class_dir_EQ>;
-def _output_EQ : Joined<"--output=">, Alias<o>;
-def _output : Separate<"--output">, Alias<o>;
-def _param : Separate<"--param">;
-def _param_EQ : Joined<"--param=">, Alias<_param>;
-def _pass_exit_codes : Flag<"--pass-exit-codes">, Alias<pass_exit_codes>;
-def _pedantic_errors : Flag<"--pedantic-errors">, Alias<pedantic_errors>;
-def _pedantic : Flag<"--pedantic">, Alias<pedantic>;
-def _pipe : Flag<"--pipe">, Alias<pipe>;
-def _prefix_EQ : Joined<"--prefix=">, Alias<B>;
-def _prefix : Separate<"--prefix">, Alias<B>;
-def _preprocess : Flag<"--preprocess">, Alias<E>;
-def _print_diagnostic_categories : Flag<"--print-diagnostic-categories">;
-def _print_file_name_EQ : Joined<"--print-file-name=">, Alias<print_file_name_EQ>;
-def _print_file_name : Separate<"--print-file-name">, Alias<print_file_name_EQ>;
-def _print_libgcc_file_name : Flag<"--print-libgcc-file-name">, Alias<print_libgcc_file_name>;
-def _print_missing_file_dependencies : Flag<"--print-missing-file-dependencies">, Alias<MG>;
-def _print_multi_directory : Flag<"--print-multi-directory">, Alias<print_multi_directory>;
-def _print_multi_lib : Flag<"--print-multi-lib">, Alias<print_multi_lib>;
-def _print_multi_os_directory : Flag<"--print-multi-os-directory">, Alias<print_multi_os_directory>;
-def _print_prog_name_EQ : Joined<"--print-prog-name=">, Alias<print_prog_name_EQ>;
-def _print_prog_name : Separate<"--print-prog-name">, Alias<print_prog_name_EQ>;
-def _print_search_dirs : Flag<"--print-search-dirs">, Alias<print_search_dirs>;
-def _profile_blocks : Flag<"--profile-blocks">, Alias<a>;
-def _profile : Flag<"--profile">, Alias<p>;
-def _relocatable_pch : Flag<"--relocatable-pch">,
- HelpText<"Build a relocatable precompiled header">;
-def _resource_EQ : Joined<"--resource=">, Alias<fcompile_resource_EQ>;
-def _resource : Separate<"--resource">, Alias<fcompile_resource_EQ>;
-def _rtlib_EQ : Joined<"--rtlib=">, Alias<rtlib_EQ>;
-def _rtlib : Separate<"--rtlib">, Alias<rtlib_EQ>;
-def _save_temps : Flag<"--save-temps">, Alias<save_temps>;
-def _serialize_diags : Separate<"--serialize-diagnostics">, Flags<[DriverOption]>,
+def _assemble : Flag<["--"], "assemble">, Alias<S>;
+def _assert_EQ : Joined<["--"], "assert=">, Alias<A>;
+def _assert : Separate<["--"], "assert">, Alias<A>;
+def _bootclasspath_EQ : Joined<["--"], "bootclasspath=">, Alias<fbootclasspath_EQ>;
+def _bootclasspath : Separate<["--"], "bootclasspath">, Alias<fbootclasspath_EQ>;
+def _classpath_EQ : Joined<["--"], "classpath=">, Alias<fclasspath_EQ>;
+def _classpath : Separate<["--"], "classpath">, Alias<fclasspath_EQ>;
+def _comments_in_macros : Flag<["--"], "comments-in-macros">, Alias<CC>;
+def _comments : Flag<["--"], "comments">, Alias<C>;
+def _compile : Flag<["--"], "compile">, Alias<c>;
+def _constant_cfstrings : Flag<["--"], "constant-cfstrings">;
+def _debug_EQ : Joined<["--"], "debug=">, Alias<g_Flag>;
+def _debug : Flag<["--"], "debug">, Alias<g_Flag>;
+def _define_macro_EQ : Joined<["--"], "define-macro=">, Alias<D>;
+def _define_macro : Separate<["--"], "define-macro">, Alias<D>;
+def _dependencies : Flag<["--"], "dependencies">, Alias<M>;
+def _encoding_EQ : Joined<["--"], "encoding=">, Alias<fencoding_EQ>;
+def _encoding : Separate<["--"], "encoding">, Alias<fencoding_EQ>;
+def _entry : Flag<["--"], "entry">, Alias<e>;
+def _extdirs_EQ : Joined<["--"], "extdirs=">, Alias<fextdirs_EQ>;
+def _extdirs : Separate<["--"], "extdirs">, Alias<fextdirs_EQ>;
+def _extra_warnings : Flag<["--"], "extra-warnings">, Alias<W_Joined>;
+def _for_linker_EQ : Joined<["--"], "for-linker=">, Alias<Xlinker>;
+def _for_linker : Separate<["--"], "for-linker">, Alias<Xlinker>;
+def _force_link_EQ : Joined<["--"], "force-link=">, Alias<u>;
+def _force_link : Separate<["--"], "force-link">, Alias<u>;
+def _help_hidden : Flag<["--"], "help-hidden">;
+def _imacros_EQ : Joined<["--"], "imacros=">, Alias<imacros>;
+def _include_barrier : Flag<["--"], "include-barrier">, Alias<I_>;
+def _include_directory_after_EQ : Joined<["--"], "include-directory-after=">, Alias<idirafter>;
+def _include_directory_after : Separate<["--"], "include-directory-after">, Alias<idirafter>;
+def _include_directory_EQ : Joined<["--"], "include-directory=">, Alias<I>;
+def _include_directory : Separate<["--"], "include-directory">, Alias<I>;
+def _include_prefix_EQ : Joined<["--"], "include-prefix=">, Alias<iprefix>;
+def _include_prefix : Separate<["--"], "include-prefix">, Alias<iprefix>;
+def _include_with_prefix_after_EQ : Joined<["--"], "include-with-prefix-after=">, Alias<iwithprefix>;
+def _include_with_prefix_after : Separate<["--"], "include-with-prefix-after">, Alias<iwithprefix>;
+def _include_with_prefix_before_EQ : Joined<["--"], "include-with-prefix-before=">, Alias<iwithprefixbefore>;
+def _include_with_prefix_before : Separate<["--"], "include-with-prefix-before">, Alias<iwithprefixbefore>;
+def _include_with_prefix_EQ : Joined<["--"], "include-with-prefix=">, Alias<iwithprefix>;
+def _include_with_prefix : Separate<["--"], "include-with-prefix">, Alias<iwithprefix>;
+def _include_EQ : Joined<["--"], "include=">, Alias<include_>;
+def _language_EQ : Joined<["--"], "language=">, Alias<x>;
+def _language : Separate<["--"], "language">, Alias<x>;
+def _library_directory_EQ : Joined<["--"], "library-directory=">, Alias<L>;
+def _library_directory : Separate<["--"], "library-directory">, Alias<L>;
+def _machine__EQ : Joined<["--"], "machine-=">, Alias<m_Joined>;
+def _machine_ : Joined<["--"], "machine-">, Alias<m_Joined>;
+def _machine_EQ : Joined<["--"], "machine=">, Alias<m_Joined>;
+def _machine : Separate<["--"], "machine">, Alias<m_Joined>;
+def _no_line_commands : Flag<["--"], "no-line-commands">, Alias<P>;
+def _no_standard_includes : Flag<["--"], "no-standard-includes">, Alias<nostdinc>;
+def _no_standard_libraries : Flag<["--"], "no-standard-libraries">, Alias<nostdlib>;
+def _no_undefined : Flag<["--"], "no-undefined">, Flags<[LinkerInput]>;
+def _no_warnings : Flag<["--"], "no-warnings">, Alias<w>;
+def _optimize_EQ : Joined<["--"], "optimize=">, Alias<O>;
+def _optimize : Flag<["--"], "optimize">, Alias<O>;
+def _output_class_directory_EQ : Joined<["--"], "output-class-directory=">, Alias<foutput_class_dir_EQ>;
+def _output_class_directory : Separate<["--"], "output-class-directory">, Alias<foutput_class_dir_EQ>;
+def _output_EQ : Joined<["--"], "output=">, Alias<o>;
+def _output : Separate<["--"], "output">, Alias<o>;
+def _param : Separate<["--"], "param">;
+def _param_EQ : Joined<["--"], "param=">, Alias<_param>;
+def _prefix_EQ : Joined<["--"], "prefix=">, Alias<B>;
+def _prefix : Separate<["--"], "prefix">, Alias<B>;
+def _preprocess : Flag<["--"], "preprocess">, Alias<E>;
+def _print_diagnostic_categories : Flag<["--"], "print-diagnostic-categories">;
+def _print_file_name : Separate<["--"], "print-file-name">, Alias<print_file_name_EQ>;
+def _print_missing_file_dependencies : Flag<["--"], "print-missing-file-dependencies">, Alias<MG>;
+def _print_prog_name : Separate<["--"], "print-prog-name">, Alias<print_prog_name_EQ>;
+def _profile_blocks : Flag<["--"], "profile-blocks">, Alias<a>;
+def _profile : Flag<["--"], "profile">, Alias<p>;
+def _resource_EQ : Joined<["--"], "resource=">, Alias<fcompile_resource_EQ>;
+def _resource : Separate<["--"], "resource">, Alias<fcompile_resource_EQ>;
+def _rtlib : Separate<["--"], "rtlib">, Alias<rtlib_EQ>;
+def _serialize_diags : Separate<["-", "--"], "serialize-diagnostics">, Flags<[DriverOption]>,
HelpText<"Serialize compiler diagnostics to a file">;
-def _shared : Flag<"--shared">, Alias<shared>;
-def _signed_char : Flag<"--signed-char">, Alias<fsigned_char>;
-def _specs_EQ : Joined<"--specs=">, Alias<specs_EQ>;
-def _specs : Separate<"--specs">, Alias<specs_EQ>;
-def _static : Flag<"--static">, Alias<static>;
-def _std_EQ : Joined<"--std=">, Alias<std_EQ>;
-def _std : Separate<"--std">, Alias<std_EQ>;
-def _stdlib_EQ : Joined<"--stdlib=">, Alias<stdlib_EQ>;
-def _stdlib : Separate<"--stdlib">, Alias<stdlib_EQ>;
-def _sysroot_EQ : Joined<"--sysroot=">;
-def _sysroot : Separate<"--sysroot">, Alias<_sysroot_EQ>;
-def _target_help : Flag<"--target-help">;
-def _trace_includes : Flag<"--trace-includes">, Alias<H>;
-def _traditional_cpp : Flag<"--traditional-cpp">, Alias<traditional_cpp>;
-def _traditional : Flag<"--traditional">, Alias<traditional>;
-def _trigraphs : Flag<"--trigraphs">, Alias<trigraphs>;
-def _undefine_macro_EQ : Joined<"--undefine-macro=">, Alias<U>;
-def _undefine_macro : Separate<"--undefine-macro">, Alias<U>;
-def _unsigned_char : Flag<"--unsigned-char">, Alias<funsigned_char>;
-def _user_dependencies : Flag<"--user-dependencies">, Alias<MM>;
-def _verbose : Flag<"--verbose">, Alias<v>;
-def _version : Flag<"--version">, Flags<[CC1Option]>;
-def _warn__EQ : Joined<"--warn-=">, Alias<W_Joined>;
-def _warn_ : Joined<"--warn-">, Alias<W_Joined>;
-def _write_dependencies : Flag<"--write-dependencies">, Alias<MD>;
-def _write_user_dependencies : Flag<"--write-user-dependencies">, Alias<MMD>;
-def _ : Joined<"--">, Flags<[Unsupported]>;
-def mieee_rnd_near : Flag<"-mieee-rnd-near">, Group<m_hexagon_Features_Group>;
-def serialize_diags : Separate<"-serialize-diagnostics">, Alias<_serialize_diags>;
+// We give --version different semantics from -version.
+def _version : Flag<["--"], "version">, Flags<[CC1Option]>;
+def _signed_char : Flag<["--"], "signed-char">, Alias<fsigned_char>;
+def _std : Separate<["--"], "std">, Alias<std_EQ>;
+def _stdlib : Separate<["--"], "stdlib">, Alias<stdlib_EQ>;
+def _sysroot_EQ : Joined<["--"], "sysroot=">;
+def _sysroot : Separate<["--"], "sysroot">, Alias<_sysroot_EQ>;
+def _target_help : Flag<["--"], "target-help">;
+def _trace_includes : Flag<["--"], "trace-includes">, Alias<H>;
+def _undefine_macro_EQ : Joined<["--"], "undefine-macro=">, Alias<U>;
+def _undefine_macro : Separate<["--"], "undefine-macro">, Alias<U>;
+def _unsigned_char : Flag<["--"], "unsigned-char">, Alias<funsigned_char>;
+def _user_dependencies : Flag<["--"], "user-dependencies">, Alias<MM>;
+def _verbose : Flag<["--"], "verbose">, Alias<v>;
+def _warn__EQ : Joined<["--"], "warn-=">, Alias<W_Joined>;
+def _warn_ : Joined<["--"], "warn-">, Alias<W_Joined>;
+def _write_dependencies : Flag<["--"], "write-dependencies">, Alias<MD>;
+def _write_user_dependencies : Flag<["--"], "write-user-dependencies">, Alias<MMD>;
+def _ : Joined<["--"], "">, Flags<[Unsupported]>;
+def mieee_rnd_near : Flag<["-"], "mieee-rnd-near">, Group<m_hexagon_Features_Group>;
// Special internal option to handle -Xlinker --no-demangle.
-def Z_Xlinker__no_demangle : Flag<"-Z-Xlinker-no-demangle">,
+def Z_Xlinker__no_demangle : Flag<["-"], "Z-Xlinker-no-demangle">,
Flags<[Unsupported, NoArgumentUnused]>;
// Special internal option to allow forwarding arbitrary arguments to linker.
-def Zlinker_input : Separate<"-Zlinker-input">,
+def Zlinker_input : Separate<["-"], "Zlinker-input">,
Flags<[Unsupported, NoArgumentUnused]>;
// Reserved library options.
-def Z_reserved_lib_stdcxx : Flag<"-Z-reserved-lib-stdc++">,
+def Z_reserved_lib_stdcxx : Flag<["-"], "Z-reserved-lib-stdc++">,
Flags<[LinkerInput, NoArgumentUnused, Unsupported]>, Group<reserved_lib_Group>;
-def Z_reserved_lib_cckext : Flag<"-Z-reserved-lib-cckext">,
+def Z_reserved_lib_cckext : Flag<["-"], "Z-reserved-lib-cckext">,
Flags<[LinkerInput, NoArgumentUnused, Unsupported]>, Group<reserved_lib_Group>;
include "CC1Options.td"
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Tool.h b/contrib/llvm/tools/clang/include/clang/Driver/Tool.h
index 8822d7b..c62e756 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Tool.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Tool.h
@@ -55,8 +55,8 @@ public:
/// driver add an additional "command failed" diagnostic on failures.
virtual bool hasGoodDiagnostics() const { return false; }
- /// ConstructJob - Construct jobs to perform the action \arg JA,
- /// writing to \arg Output and with \arg Inputs.
+ /// ConstructJob - Construct jobs to perform the action \p JA,
+ /// writing to \p Output and with \p Inputs.
///
/// \param TCArgs - The argument list for this toolchain, with any
/// tool chain specific translations applied.
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h b/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
index ab417bb..509e08d 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
@@ -85,6 +85,10 @@ public:
StringRef getPlatform() const { return Triple.getVendorName(); }
StringRef getOS() const { return Triple.getOSName(); }
+ /// \brief Provide the default architecture name (as expected by -arch) for
+ /// this toolchain. Note t
+ std::string getDefaultUniversalArchName() const;
+
std::string getTripleString() const {
return Triple.getTriple();
}
@@ -107,15 +111,15 @@ public:
return 0;
}
- /// SelectTool - Choose a tool to use to handle the action \arg JA with the
- /// given \arg Inputs.
+ /// SelectTool - Choose a tool to use to handle the action \p JA with the
+ /// given \p Inputs.
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const = 0;
// Helper methods
std::string GetFilePath(const char *Name) const;
- std::string GetProgramPath(const char *Name, bool WantFile = false) const;
+ std::string GetProgramPath(const char *Name) const;
// Platform defaults information
@@ -144,6 +148,10 @@ public:
/// IsObjCDefaultSynthPropertiesDefault - Does this tool chain enable
/// -fobjc-default-synthesize-properties by default.
virtual bool IsObjCDefaultSynthPropertiesDefault() const { return false; }
+
+ /// IsEncodeExtendedBlockSignatureDefault - Does this tool chain enable
+ /// -fencode-extended-block-signature by default.
+ virtual bool IsEncodeExtendedBlockSignatureDefault() const { return false; }
/// IsObjCNonFragileABIDefault - Does this tool chain set
/// -fobjc-nonfragile-abi by default.
@@ -166,16 +174,15 @@ public:
/// IsUnwindTablesDefault - Does this tool chain use -funwind-tables
/// by default.
- virtual bool IsUnwindTablesDefault() const = 0;
+ virtual bool IsUnwindTablesDefault() const;
- /// GetDefaultRelocationModel - Return the LLVM name of the default
- /// relocation model for this tool chain.
- virtual const char *GetDefaultRelocationModel() const = 0;
+ /// \brief Test whether this toolchain defaults to PIC.
+ virtual bool isPICDefault() const = 0;
- /// GetForcedPicModel - Return the LLVM name of the forced PIC model
- /// for this tool chain, or 0 if this tool chain does not force a
- /// particular PIC mode.
- virtual const char *GetForcedPicModel() const = 0;
+ /// \brief Tests whether this toolchain forces its default for PIC or non-PIC.
+ /// If this returns true, any PIC related flags should be ignored and instead
+ /// the result of \c isPICDefault() is used exclusively.
+ virtual bool isPICDefaultForced() const = 0;
/// SupportsProfiling - Does this tool chain support -pg.
virtual bool SupportsProfiling() const { return true; }
@@ -183,8 +190,8 @@ public:
/// Does this tool chain support Objective-C garbage collection.
virtual bool SupportsObjCGC() const { return true; }
- /// Does this tool chain support Objective-C ARC.
- virtual bool SupportsObjCARC() const { return true; }
+ /// Complain if this tool chain doesn't support Objective-C ARC.
+ virtual void CheckObjCARC() const {}
/// UseDwarfDebugFlags - Embed the compile options to clang into the Dwarf
/// compile unit information.
@@ -252,6 +259,13 @@ public:
/// for kernel extensions (Darwin-specific).
virtual void AddCCKextLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const;
+
+ /// AddFastMathRuntimeIfAvailable - If a runtime library exists that sets
+ /// global flags for unsafe floating point math, add it and return true.
+ ///
+ /// This checks for presence of the -ffast-math or -funsafe-math flags.
+ virtual bool AddFastMathRuntimeIfAvailable(const ArgList &Args,
+ ArgStringList &CmdArgs) const;
};
} // end namespace driver
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Types.h b/contrib/llvm/tools/clang/include/clang/Driver/Types.h
index 3dea471..d28ca88 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Types.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Types.h
@@ -59,10 +59,6 @@ namespace types {
/// isAcceptedByClang - Can clang handle this input type.
bool isAcceptedByClang(ID Id);
- /// isOnlyAcceptedByClang - Is clang the only compiler that can handle this
- /// input type.
- bool isOnlyAcceptedByClang(ID Id);
-
/// isCXX - Is this a "C++" input (C++ and Obj-C++ sources and headers).
bool isCXX(ID Id);
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h b/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h
index 144b796..5e409bd 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h
@@ -19,11 +19,13 @@
#include "clang/Sema/CodeCompleteConsumer.h"
#include "clang/Lex/ModuleLoader.h"
#include "clang/Lex/PreprocessingRecord.h"
+#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/AST/ASTContext.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/FileSystemOptions.h"
+#include "clang/Basic/TargetOptions.h"
#include "clang-c/Index.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/OwningPtr.h"
@@ -56,21 +58,27 @@ class Preprocessor;
class SourceManager;
class TargetInfo;
class ASTFrontendAction;
+class ASTDeserializationListener;
/// \brief Utility class for loading a ASTContext from an AST file.
///
class ASTUnit : public ModuleLoader {
private:
- IntrusiveRefCntPtr<LangOptions> LangOpts;
- IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics;
- IntrusiveRefCntPtr<FileManager> FileMgr;
- IntrusiveRefCntPtr<SourceManager> SourceMgr;
- OwningPtr<HeaderSearch> HeaderInfo;
- IntrusiveRefCntPtr<TargetInfo> Target;
- IntrusiveRefCntPtr<Preprocessor> PP;
- IntrusiveRefCntPtr<ASTContext> Ctx;
+ IntrusiveRefCntPtr<LangOptions> LangOpts;
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics;
+ IntrusiveRefCntPtr<FileManager> FileMgr;
+ IntrusiveRefCntPtr<SourceManager> SourceMgr;
+ OwningPtr<HeaderSearch> HeaderInfo;
+ IntrusiveRefCntPtr<TargetInfo> Target;
+ IntrusiveRefCntPtr<Preprocessor> PP;
+ IntrusiveRefCntPtr<ASTContext> Ctx;
+ IntrusiveRefCntPtr<TargetOptions> TargetOpts;
+ IntrusiveRefCntPtr<HeaderSearchOptions> HSOpts;
ASTReader *Reader;
+ struct ASTWriterData;
+ OwningPtr<ASTWriterData> WriterData;
+
FileSystemOptions FileSystemOpts;
/// \brief The AST consumer that received information about the translation
@@ -85,13 +93,6 @@ private:
/// LoadFromCommandLine available.
IntrusiveRefCntPtr<CompilerInvocation> Invocation;
- /// \brief The set of target features.
- ///
- /// FIXME: each time we reparse, we need to restore the set of target
- /// features from this vector, because TargetInfo::CreateTargetInfo()
- /// mangles the target options in place. Yuck!
- std::vector<std::string> TargetFeatures;
-
// OnlyLocalDecls - when true, walking this AST should only visit declarations
// that come from the AST itself, not from included precompiled headers.
// FIXME: This is temporary; eventually, CIndex will always do this.
@@ -374,8 +375,8 @@ private:
/// \brief Clear out and deallocate
void ClearCachedCompletionResults();
- ASTUnit(const ASTUnit&); // DO NOT IMPLEMENT
- ASTUnit &operator=(const ASTUnit &); // DO NOT IMPLEMENT
+ ASTUnit(const ASTUnit &) LLVM_DELETED_FUNCTION;
+ void operator=(const ASTUnit &) LLVM_DELETED_FUNCTION;
explicit ASTUnit(bool MainFileIsAST);
@@ -466,7 +467,11 @@ public:
const FileSystemOptions &getFileSystemOpts() const { return FileSystemOpts; }
- const std::string &getOriginalSourceFileName();
+ StringRef getOriginalSourceFileName() {
+ return OriginalSourceFile;
+ }
+
+ ASTDeserializationListener *getDeserializationListener();
/// \brief Add a temporary file that the ASTUnit depends on.
///
@@ -515,7 +520,7 @@ public:
void addFileLevelDecl(Decl *D);
/// \brief Get the decls that are contained in a file in the Offset/Length
- /// range. \arg Length can be 0 to indicate a point at \arg Offset instead of
+ /// range. \p Length can be 0 to indicate a point at \p Offset instead of
/// a range.
void findFileRegionDecls(FileID File, unsigned Offset, unsigned Length,
SmallVectorImpl<Decl *> &Decls);
@@ -542,14 +547,14 @@ public:
/// \brief Get the source location for the given file:offset pair.
SourceLocation getLocation(const FileEntry *File, unsigned Offset) const;
- /// \brief If \arg Loc is a loaded location from the preamble, returns
+ /// \brief If \p Loc is a loaded location from the preamble, returns
/// the corresponding local location of the main file, otherwise it returns
- /// \arg Loc.
+ /// \p Loc.
SourceLocation mapLocationFromPreamble(SourceLocation Loc);
- /// \brief If \arg Loc is a local location of the main file but inside the
+ /// \brief If \p Loc is a local location of the main file but inside the
/// preamble chunk, returns the corresponding loaded location from the
- /// preamble, otherwise it returns \arg Loc.
+ /// preamble, otherwise it returns \p Loc.
SourceLocation mapLocationToPreamble(SourceLocation Loc);
bool isInPreambleFileID(SourceLocation Loc);
@@ -557,13 +562,13 @@ public:
SourceLocation getStartOfMainFileID();
SourceLocation getEndOfPreambleFileID();
- /// \brief \see mapLocationFromPreamble.
+ /// \see mapLocationFromPreamble.
SourceRange mapRangeFromPreamble(SourceRange R) {
return SourceRange(mapLocationFromPreamble(R.getBegin()),
mapLocationFromPreamble(R.getEnd()));
}
- /// \brief \see mapLocationToPreamble.
+ /// \see mapLocationToPreamble.
SourceRange mapRangeToPreamble(SourceRange R) {
return SourceRange(mapLocationToPreamble(R.getBegin()),
mapLocationToPreamble(R.getEnd()));
@@ -607,6 +612,29 @@ public:
return CachedCompletionResults.size();
}
+ /// \brief Returns an iterator range for the local preprocessing entities
+ /// of the local Preprocessor, if this is a parsed source file, or the loaded
+ /// preprocessing entities of the primary module if this is an AST file.
+ std::pair<PreprocessingRecord::iterator, PreprocessingRecord::iterator>
+ getLocalPreprocessingEntities() const;
+
+ /// \brief Type for a function iterating over a number of declarations.
+ /// \returns true to continue iteration and false to abort.
+ typedef bool (*DeclVisitorFn)(void *context, const Decl *D);
+
+ /// \brief Iterate over local declarations (locally parsed if this is a parsed
+ /// source file or the loaded declarations of the primary module if this is an
+ /// AST file).
+ /// \returns true if the iteration was complete or false if it was aborted.
+ bool visitLocalTopLevelDecls(void *context, DeclVisitorFn Fn);
+
+ /// \brief Get the PCH file if one was included.
+ const FileEntry *getPCHFile();
+
+ /// \brief Returns true if the ASTUnit was constructed from a serialized
+ /// module file.
+ bool isModuleFile();
+
llvm::MemoryBuffer *getBufferForFile(StringRef Filename,
std::string *ErrorStr = 0);
@@ -679,7 +707,7 @@ public:
/// (e.g. because the PCH could not be loaded), this accepts the ASTUnit
/// mainly to allow the caller to see the diagnostics.
/// This will only receive an ASTUnit if a new one was created. If an already
- /// created ASTUnit was passed in \param Unit then the caller can check that.
+ /// created ASTUnit was passed in \p Unit then the caller can check that.
///
static ASTUnit *LoadFromCompilerInvocationAction(CompilerInvocation *CI,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
@@ -750,6 +778,7 @@ public:
bool AllowPCHWithCompilerErrors = false,
bool SkipFunctionBodies = false,
bool UserFilesAreVolatile = false,
+ bool ForSerialization = false,
OwningPtr<ASTUnit> *ErrAST = 0);
/// \brief Reparse the source files using the same command-line options that
@@ -792,8 +821,9 @@ public:
/// \brief Save this translation unit to a file with the given name.
///
- /// \returns An indication of whether the save was successful or not.
- CXSaveError Save(StringRef File);
+ /// \returns true if there was a file error or false if the save was
+ /// successful.
+ bool Save(StringRef File);
/// \brief Serialize this translation unit with the given output stream.
///
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h
deleted file mode 100644
index 4e489fe..0000000
--- a/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h
+++ /dev/null
@@ -1,135 +0,0 @@
-//===--- AnalyzerOptions.h - Analysis Engine Options ------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This header contains the structures necessary for a front-end to specify
-// various analyses.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_FRONTEND_ANALYZEROPTIONS_H
-#define LLVM_CLANG_FRONTEND_ANALYZEROPTIONS_H
-
-#include <string>
-#include <vector>
-
-namespace clang {
-class ASTConsumer;
-class DiagnosticsEngine;
-class Preprocessor;
-class LangOptions;
-
-/// Analysis - Set of available source code analyses.
-enum Analyses {
-#define ANALYSIS(NAME, CMDFLAG, DESC, SCOPE) NAME,
-#include "clang/Frontend/Analyses.def"
-NumAnalyses
-};
-
-/// AnalysisStores - Set of available analysis store models.
-enum AnalysisStores {
-#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN) NAME##Model,
-#include "clang/Frontend/Analyses.def"
-NumStores
-};
-
-/// AnalysisConstraints - Set of available constraint models.
-enum AnalysisConstraints {
-#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN) NAME##Model,
-#include "clang/Frontend/Analyses.def"
-NumConstraints
-};
-
-/// AnalysisDiagClients - Set of available diagnostic clients for rendering
-/// analysis results.
-enum AnalysisDiagClients {
-#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATFN, AUTOCREAT) PD_##NAME,
-#include "clang/Frontend/Analyses.def"
-NUM_ANALYSIS_DIAG_CLIENTS
-};
-
-/// AnalysisPurgeModes - Set of available strategies for dead symbol removal.
-enum AnalysisPurgeMode {
-#define ANALYSIS_PURGE(NAME, CMDFLAG, DESC) NAME,
-#include "clang/Frontend/Analyses.def"
-NumPurgeModes
-};
-
-/// AnalysisIPAMode - Set of inter-procedural modes.
-enum AnalysisIPAMode {
-#define ANALYSIS_IPA(NAME, CMDFLAG, DESC) NAME,
-#include "clang/Frontend/Analyses.def"
-NumIPAModes
-};
-
-/// AnalysisInlineFunctionSelection - Set of inlining function selection heuristics.
-enum AnalysisInliningMode {
-#define ANALYSIS_INLINING_MODE(NAME, CMDFLAG, DESC) NAME,
-#include "clang/Frontend/Analyses.def"
-NumInliningModes
-};
-
-class AnalyzerOptions {
-public:
- /// \brief Pair of checker name and enable/disable.
- std::vector<std::pair<std::string, bool> > CheckersControlList;
- AnalysisStores AnalysisStoreOpt;
- AnalysisConstraints AnalysisConstraintsOpt;
- AnalysisDiagClients AnalysisDiagOpt;
- AnalysisPurgeMode AnalysisPurgeOpt;
- AnalysisIPAMode IPAMode;
- std::string AnalyzeSpecificFunction;
- unsigned MaxNodes;
- unsigned MaxLoop;
- unsigned ShowCheckerHelp : 1;
- unsigned AnalyzeAll : 1;
- unsigned AnalyzerDisplayProgress : 1;
- unsigned AnalyzeNestedBlocks : 1;
- unsigned EagerlyAssume : 1;
- unsigned TrimGraph : 1;
- unsigned VisualizeEGDot : 1;
- unsigned VisualizeEGUbi : 1;
- unsigned UnoptimizedCFG : 1;
- unsigned CFGAddImplicitDtors : 1;
- unsigned EagerlyTrimEGraph : 1;
- unsigned PrintStats : 1;
- unsigned NoRetryExhausted : 1;
- unsigned InlineMaxStackDepth;
- unsigned InlineMaxFunctionSize;
- AnalysisInliningMode InliningMode;
-
-public:
- AnalyzerOptions() {
- AnalysisStoreOpt = RegionStoreModel;
- AnalysisConstraintsOpt = RangeConstraintsModel;
- AnalysisDiagOpt = PD_HTML;
- AnalysisPurgeOpt = PurgeStmt;
- IPAMode = Inlining;
- ShowCheckerHelp = 0;
- AnalyzeAll = 0;
- AnalyzerDisplayProgress = 0;
- AnalyzeNestedBlocks = 0;
- EagerlyAssume = 0;
- TrimGraph = 0;
- VisualizeEGDot = 0;
- VisualizeEGUbi = 0;
- UnoptimizedCFG = 0;
- CFGAddImplicitDtors = 0;
- EagerlyTrimEGraph = 0;
- PrintStats = 0;
- NoRetryExhausted = 0;
- // Cap the stack depth at 4 calls (5 stack frames, base + 4 calls).
- InlineMaxStackDepth = 5;
- InlineMaxFunctionSize = 200;
- InliningMode = NoRedundancy;
- }
-};
-
-}
-
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.def b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.def
new file mode 100644
index 0000000..558e6f1
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.def
@@ -0,0 +1,132 @@
+//===--- CodeGenOptions.def - Code generation option database ------ C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the code generation options. Users of this file
+// must define the CODEGENOPT macro to make use of this information.
+// Optionally, the user may also define ENUM_CODEGENOPT (for options
+// that have enumeration type and VALUE_CODEGENOPT is a code
+// generation option that describes a value rather than a flag.
+//
+//===----------------------------------------------------------------------===//
+#ifndef CODEGENOPT
+# error Define the CODEGENOPT macro to handle language options
+#endif
+
+#ifndef VALUE_CODEGENOPT
+# define VALUE_CODEGENOPT(Name, Bits, Default) \
+CODEGENOPT(Name, Bits, Default)
+#endif
+
+#ifndef ENUM_CODEGENOPT
+# define ENUM_CODEGENOPT(Name, Type, Bits, Default) \
+CODEGENOPT(Name, Bits, Default)
+#endif
+
+CODEGENOPT(AsmVerbose , 1, 0) ///< -dA, -fverbose-asm.
+CODEGENOPT(ObjCAutoRefCountExceptions , 1, 0) ///< Whether ARC should be EH-safe.
+CODEGENOPT(CUDAIsDevice , 1, 0) ///< Set when compiling for CUDA device.
+CODEGENOPT(CXAAtExit , 1, 1) ///< Use __cxa_atexit for calling destructors.
+CODEGENOPT(CXXCtorDtorAliases, 1, 0) ///< Emit complete ctors/dtors as linker
+ ///< aliases to base ctors when possible.
+CODEGENOPT(DataSections , 1, 0) ///< Set when -fdata-sections is enabled.
+CODEGENOPT(DisableFPElim , 1, 0) ///< Set when -fomit-frame-pointer is enabled.
+CODEGENOPT(DisableLLVMOpts , 1, 0) ///< Don't run any optimizations, for use in
+ ///< getting .bc files that correspond to the
+ ///< internal state before optimizations are
+ ///< done.
+CODEGENOPT(DisableRedZone , 1, 0) ///< Set when -mno-red-zone is enabled.
+CODEGENOPT(DisableTailCalls , 1, 0) ///< Do not emit tail calls.
+CODEGENOPT(EmitDeclMetadata , 1, 0) ///< Emit special metadata indicating what
+ ///< Decl* various IR entities came from.
+ ///< Only useful when running CodeGen as a
+ ///< subroutine.
+CODEGENOPT(EmitGcovArcs , 1, 0) ///< Emit coverage data files, aka. GCDA.
+CODEGENOPT(EmitGcovNotes , 1, 0) ///< Emit coverage "notes" files, aka GCNO.
+CODEGENOPT(EmitOpenCLArgMetadata , 1, 0) ///< Emit OpenCL kernel arg metadata.
+CODEGENOPT(ForbidGuardVariables , 1, 0) ///< Issue errors if C++ guard variables
+ ///< are required.
+CODEGENOPT(FunctionSections , 1, 0) ///< Set when -ffunction-sections is enabled.
+CODEGENOPT(HiddenWeakVTables , 1, 0) ///< Emit weak vtables, RTTI, and thunks with
+ ///< hidden visibility.
+CODEGENOPT(InstrumentFunctions , 1, 0) ///< Set when -finstrument-functions is
+ ///< enabled.
+CODEGENOPT(InstrumentForProfiling , 1, 0) ///< Set when -pg is enabled.
+CODEGENOPT(LessPreciseFPMAD , 1, 0) ///< Enable less precise MAD instructions to
+ ///< be generated.
+CODEGENOPT(MergeAllConstants , 1, 1) ///< Merge identical constants.
+CODEGENOPT(NoCommon , 1, 0) ///< Set when -fno-common or C++ is enabled.
+CODEGENOPT(NoDwarf2CFIAsm , 1, 0) ///< Set when -fno-dwarf2-cfi-asm is enabled.
+CODEGENOPT(NoDwarfDirectoryAsm , 1, 0) ///< Set when -fno-dwarf-directory-asm is
+ ///< enabled.
+CODEGENOPT(NoExecStack , 1, 0) ///< Set when -Wa,--noexecstack is enabled.
+CODEGENOPT(NoGlobalMerge , 1, 0) ///< Set when -mno-global-merge is enabled.
+CODEGENOPT(NoImplicitFloat , 1, 0) ///< Set when -mno-implicit-float is enabled.
+CODEGENOPT(NoInfsFPMath , 1, 0) ///< Assume FP arguments, results not +-Inf.
+CODEGENOPT(NoInline , 1, 0) ///< Set when -fno-inline is enabled.
+ ///< Disables use of the inline keyword.
+CODEGENOPT(NoNaNsFPMath , 1, 0) ///< Assume FP arguments, results not NaN.
+CODEGENOPT(NoZeroInitializedInBSS , 1, 0) ///< -fno-zero-initialized-in-bss.
+/// \brief Method of Objective-C dispatch to use.
+ENUM_CODEGENOPT(ObjCDispatchMethod, ObjCDispatchMethodKind, 2, Legacy)
+CODEGENOPT(OmitLeafFramePointer , 1, 0) ///< Set when -momit-leaf-frame-pointer is
+ ///< enabled.
+VALUE_CODEGENOPT(OptimizationLevel, 3, 0) ///< The -O[0-4] option specified.
+VALUE_CODEGENOPT(OptimizeSize, 2, 0) ///< If -Os (==1) or -Oz (==2) is specified.
+CODEGENOPT(RelaxAll , 1, 0) ///< Relax all machine code instructions.
+CODEGENOPT(RelaxedAliasing , 1, 0) ///< Set when -fno-strict-aliasing is enabled.
+CODEGENOPT(SaveTempLabels , 1, 0) ///< Save temporary labels.
+CODEGENOPT(SimplifyLibCalls , 1, 1) ///< Set when -fbuiltin is enabled.
+CODEGENOPT(SoftFloat , 1, 0) ///< -soft-float.
+CODEGENOPT(StrictEnums , 1, 0) ///< Optimize based on strict enum definition.
+CODEGENOPT(TimePasses , 1, 0) ///< Set when -ftime-report is enabled.
+CODEGENOPT(UnitAtATime , 1, 1) ///< Unused. For mirroring GCC optimization
+ ///< selection.
+CODEGENOPT(UnrollLoops , 1, 0) ///< Control whether loops are unrolled.
+CODEGENOPT(UnsafeFPMath , 1, 0) ///< Allow unsafe floating point optzns.
+CODEGENOPT(UnwindTables , 1, 0) ///< Emit unwind tables.
+
+ /// Attempt to use register sized accesses to bit-fields in structures, when
+ /// possible.
+CODEGENOPT(UseRegisterSizedBitfieldAccess , 1, 0)
+
+CODEGENOPT(VerifyModule , 1, 1) ///< Control whether the module should be run
+ ///< through the LLVM Verifier.
+
+CODEGENOPT(StackRealignment , 1, 0) ///< Control whether to permit stack
+ ///< realignment.
+CODEGENOPT(UseInitArray , 1, 0) ///< Control whether to use .init_array or
+ ///< .ctors.
+VALUE_CODEGENOPT(StackAlignment , 32, 0) ///< Overrides default stack
+ ///< alignment, if not 0.
+CODEGENOPT(DebugColumnInfo, 1, 0) ///< Whether or not to use column information
+ ///< in debug info.
+
+/// The user specified number of registers to be used for integral arguments,
+/// or 0 if unspecified.
+VALUE_CODEGENOPT(NumRegisterParameters, 32, 0)
+
+/// The run-time penalty for bounds checking, or 0 to disable.
+VALUE_CODEGENOPT(BoundsChecking, 8, 0)
+
+/// The lower bound for a buffer to be considered for stack protection.
+VALUE_CODEGENOPT(SSPBufferSize, 32, 0)
+
+/// The kind of generated debug info.
+ENUM_CODEGENOPT(DebugInfo, DebugInfoKind, 2, NoDebugInfo)
+
+/// The kind of inlining to perform.
+ENUM_CODEGENOPT(Inlining, InliningMethod, 2, NoInlining)
+
+/// The default TLS model to use.
+ENUM_CODEGENOPT(DefaultTLSModel, TLSModel, 2, GeneralDynamicTLSModel)
+
+#undef CODEGENOPT
+#undef ENUM_CODEGENOPT
+#undef VALUE_CODEGENOPT
+
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
index 3e34093..3567187 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
@@ -19,9 +19,23 @@
namespace clang {
+/// \brief Bitfields of CodeGenOptions, split out from CodeGenOptions to ensure
+/// that this large collection of bitfields is a trivial class type.
+class CodeGenOptionsBase {
+public:
+#define CODEGENOPT(Name, Bits, Default) unsigned Name : Bits;
+#define ENUM_CODEGENOPT(Name, Type, Bits, Default)
+#include "clang/Frontend/CodeGenOptions.def"
+
+protected:
+#define CODEGENOPT(Name, Bits, Default)
+#define ENUM_CODEGENOPT(Name, Type, Bits, Default) unsigned Name : Bits;
+#include "clang/Frontend/CodeGenOptions.def"
+};
+
/// CodeGenOptions - Track various options which control how the code
/// is optimized and passed to the backend.
-class CodeGenOptions {
+class CodeGenOptions : public CodeGenOptionsBase {
public:
enum InliningMethod {
NoInlining, // Perform no inlining whatsoever.
@@ -51,86 +65,6 @@ public:
LocalExecTLSModel
};
- unsigned AsmVerbose : 1; ///< -dA, -fverbose-asm.
- unsigned ObjCAutoRefCountExceptions : 1; ///< Whether ARC should be EH-safe.
- unsigned CUDAIsDevice : 1; ///< Set when compiling for CUDA device.
- unsigned CXAAtExit : 1; ///< Use __cxa_atexit for calling destructors.
- unsigned CXXCtorDtorAliases: 1; ///< Emit complete ctors/dtors as linker
- ///< aliases to base ctors when possible.
- unsigned DataSections : 1; ///< Set when -fdata-sections is enabled.
- unsigned DisableFPElim : 1; ///< Set when -fomit-frame-pointer is enabled.
- unsigned DisableLLVMOpts : 1; ///< Don't run any optimizations, for use in
- ///< getting .bc files that correspond to the
- ///< internal state before optimizations are
- ///< done.
- unsigned DisableRedZone : 1; ///< Set when -mno-red-zone is enabled.
- unsigned DisableTailCalls : 1; ///< Do not emit tail calls.
- unsigned EmitDeclMetadata : 1; ///< Emit special metadata indicating what
- ///< Decl* various IR entities came from. Only
- ///< useful when running CodeGen as a
- ///< subroutine.
- unsigned EmitGcovArcs : 1; ///< Emit coverage data files, aka. GCDA.
- unsigned EmitGcovNotes : 1; ///< Emit coverage "notes" files, aka GCNO.
- unsigned EmitOpenCLArgMetadata : 1; ///< Emit OpenCL kernel arg metadata.
- unsigned EmitMicrosoftInlineAsm : 1; ///< Enable emission of MS-style inline
- ///< assembly.
- unsigned ForbidGuardVariables : 1; ///< Issue errors if C++ guard variables
- ///< are required.
- unsigned FunctionSections : 1; ///< Set when -ffunction-sections is enabled.
- unsigned HiddenWeakTemplateVTables : 1; ///< Emit weak vtables and RTTI for
- ///< template classes with hidden visibility
- unsigned HiddenWeakVTables : 1; ///< Emit weak vtables, RTTI, and thunks with
- ///< hidden visibility.
- unsigned InstrumentFunctions : 1; ///< Set when -finstrument-functions is
- ///< enabled.
- unsigned InstrumentForProfiling : 1; ///< Set when -pg is enabled.
- unsigned LessPreciseFPMAD : 1; ///< Enable less precise MAD instructions to
- ///< be generated.
- unsigned MergeAllConstants : 1; ///< Merge identical constants.
- unsigned NoCommon : 1; ///< Set when -fno-common or C++ is enabled.
- unsigned NoDwarf2CFIAsm : 1; ///< Set when -fno-dwarf2-cfi-asm is enabled.
- unsigned NoDwarfDirectoryAsm : 1; ///< Set when -fno-dwarf-directory-asm is
- ///< enabled.
- unsigned NoExecStack : 1; ///< Set when -Wa,--noexecstack is enabled.
- unsigned NoGlobalMerge : 1; ///< Set when -mno-global-merge is enabled.
- unsigned NoImplicitFloat : 1; ///< Set when -mno-implicit-float is enabled.
- unsigned NoInfsFPMath : 1; ///< Assume FP arguments, results not +-Inf.
- unsigned NoInline : 1; ///< Set when -fno-inline is enabled. Disables
- ///< use of the inline keyword.
- unsigned NoNaNsFPMath : 1; ///< Assume FP arguments, results not NaN.
- unsigned NoZeroInitializedInBSS : 1; ///< -fno-zero-initialized-in-bss.
- unsigned ObjCDispatchMethod : 2; ///< Method of Objective-C dispatch to use.
- unsigned OmitLeafFramePointer : 1; ///< Set when -momit-leaf-frame-pointer is
- ///< enabled.
- unsigned OptimizationLevel : 3; ///< The -O[0-4] option specified.
- unsigned OptimizeSize : 2; ///< If -Os (==1) or -Oz (==2) is specified.
- unsigned RelaxAll : 1; ///< Relax all machine code instructions.
- unsigned RelaxedAliasing : 1; ///< Set when -fno-strict-aliasing is enabled.
- unsigned SaveTempLabels : 1; ///< Save temporary labels.
- unsigned SimplifyLibCalls : 1; ///< Set when -fbuiltin is enabled.
- unsigned SoftFloat : 1; ///< -soft-float.
- unsigned StrictEnums : 1; ///< Optimize based on strict enum definition.
- unsigned TimePasses : 1; ///< Set when -ftime-report is enabled.
- unsigned UnitAtATime : 1; ///< Unused. For mirroring GCC optimization
- ///< selection.
- unsigned UnrollLoops : 1; ///< Control whether loops are unrolled.
- unsigned UnsafeFPMath : 1; ///< Allow unsafe floating point optzns.
- unsigned UnwindTables : 1; ///< Emit unwind tables.
-
- /// Attempt to use register sized accesses to bit-fields in structures, when
- /// possible.
- unsigned UseRegisterSizedBitfieldAccess : 1;
-
- unsigned VerifyModule : 1; ///< Control whether the module should be run
- ///< through the LLVM Verifier.
-
- unsigned StackRealignment : 1; ///< Control whether to permit stack
- ///< realignment.
- unsigned UseInitArray : 1; ///< Control whether to use .init_array or
- ///< .ctors.
- unsigned StackAlignment; ///< Overrides default stack alignment,
- ///< if not 0.
-
/// The code model to use (-mcmodel).
std::string CodeModel;
@@ -144,9 +78,6 @@ public:
/// The string to embed in debug information as the current working directory.
std::string DebugCompilationDir;
- /// The kind of generated debug info.
- DebugInfoKind DebugInfo;
-
/// The string to embed in the debug information for the compile unit, if
/// non-empty.
std::string DwarfDebugFlags;
@@ -160,9 +91,6 @@ public:
/// The name of the bitcode file to link before optzns.
std::string LinkBitcodeFile;
- /// The kind of inlining to perform.
- InliningMethod Inlining;
-
/// The user provided name for the "main file", if non-empty. This is useful
/// in situations where the input file name does not match the original input
/// file, for example with -save-temps.
@@ -178,79 +106,21 @@ public:
/// A list of command-line options to forward to the LLVM backend.
std::vector<std::string> BackendOptions;
- /// The user specified number of registers to be used for integral arguments,
- /// or 0 if unspecified.
- unsigned NumRegisterParameters;
-
- /// The run-time penalty for bounds checking, or 0 to disable.
- unsigned char BoundsChecking;
-
- /// The default TLS model to use.
- TLSModel DefaultTLSModel;
-
public:
+ // Define accessors/mutators for code generation options of enumeration type.
+#define CODEGENOPT(Name, Bits, Default)
+#define ENUM_CODEGENOPT(Name, Type, Bits, Default) \
+ Type get##Name() const { return static_cast<Type>(Name); } \
+ void set##Name(Type Value) { Name = static_cast<unsigned>(Value); }
+#include "clang/Frontend/CodeGenOptions.def"
+
CodeGenOptions() {
- AsmVerbose = 0;
- CUDAIsDevice = 0;
- CXAAtExit = 1;
- CXXCtorDtorAliases = 0;
- DataSections = 0;
- DisableFPElim = 0;
- DisableLLVMOpts = 0;
- DisableRedZone = 0;
- DisableTailCalls = 0;
- EmitDeclMetadata = 0;
- EmitGcovArcs = 0;
- EmitGcovNotes = 0;
- EmitOpenCLArgMetadata = 0;
- EmitMicrosoftInlineAsm = 0;
- ForbidGuardVariables = 0;
- FunctionSections = 0;
- HiddenWeakTemplateVTables = 0;
- HiddenWeakVTables = 0;
- InstrumentFunctions = 0;
- InstrumentForProfiling = 0;
- LessPreciseFPMAD = 0;
- MergeAllConstants = 1;
- NoCommon = 0;
- NoDwarf2CFIAsm = 0;
- NoImplicitFloat = 0;
- NoInfsFPMath = 0;
- NoInline = 0;
- NoNaNsFPMath = 0;
- NoZeroInitializedInBSS = 0;
- NumRegisterParameters = 0;
- ObjCAutoRefCountExceptions = 0;
- ObjCDispatchMethod = Legacy;
- OmitLeafFramePointer = 0;
- OptimizationLevel = 0;
- OptimizeSize = 0;
- RelaxAll = 0;
- RelaxedAliasing = 0;
- SaveTempLabels = 0;
- SimplifyLibCalls = 1;
- SoftFloat = 0;
- StrictEnums = 0;
- TimePasses = 0;
- UnitAtATime = 1;
- UnrollLoops = 0;
- UnsafeFPMath = 0;
- UnwindTables = 0;
- UseRegisterSizedBitfieldAccess = 0;
- VerifyModule = 1;
- StackRealignment = 0;
- StackAlignment = 0;
- BoundsChecking = 0;
- UseInitArray = 0;
+#define CODEGENOPT(Name, Bits, Default) Name = Default;
+#define ENUM_CODEGENOPT(Name, Type, Bits, Default) \
+ set##Name(Default);
+#include "clang/Frontend/CodeGenOptions.def"
- DebugInfo = NoDebugInfo;
- Inlining = NoInlining;
RelocationModel = "pic";
- DefaultTLSModel = GeneralDynamicTLSModel;
- }
-
- ObjCDispatchMethodKind getObjCDispatchMethod() const {
- return ObjCDispatchMethodKind(ObjCDispatchMethod);
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
index b28e103..2f3dc3f 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
@@ -11,6 +11,7 @@
#define LLVM_CLANG_FRONTEND_COMPILERINSTANCE_H_
#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/ModuleLoader.h"
#include "llvm/ADT/ArrayRef.h"
@@ -130,8 +131,8 @@ class CompilerInstance : public ModuleLoader {
/// The list of active output files.
std::list<OutputFile> OutputFiles;
- void operator=(const CompilerInstance &); // DO NOT IMPLEMENT
- CompilerInstance(const CompilerInstance&); // DO NOT IMPLEMENT
+ CompilerInstance(const CompilerInstance &) LLVM_DELETED_FUNCTION;
+ void operator=(const CompilerInstance &) LLVM_DELETED_FUNCTION;
public:
CompilerInstance();
~CompilerInstance();
@@ -189,10 +190,7 @@ public:
/// @name Forwarding Methods
/// {
- AnalyzerOptions &getAnalyzerOpts() {
- return Invocation->getAnalyzerOpts();
- }
- const AnalyzerOptions &getAnalyzerOpts() const {
+ AnalyzerOptionsRef getAnalyzerOpts() {
return Invocation->getAnalyzerOpts();
}
@@ -393,7 +391,7 @@ public:
ASTConsumer *takeASTConsumer() { return Consumer.take(); }
/// setASTConsumer - Replace the current AST consumer; the compiler instance
- /// takes ownership of \arg Value.
+ /// takes ownership of \p Value.
void setASTConsumer(ASTConsumer *Value);
/// }
@@ -433,7 +431,7 @@ public:
}
/// setCodeCompletionConsumer - Replace the current code completion consumer;
- /// the compiler instance takes ownership of \arg Value.
+ /// the compiler instance takes ownership of \p Value.
void setCodeCompletionConsumer(CodeCompleteConsumer *Value);
/// }
@@ -488,7 +486,7 @@ public:
/// Create a DiagnosticsEngine object with a the TextDiagnosticPrinter.
///
- /// The \arg Argc and \arg Argv arguments are used only for logging purposes,
+ /// The \p Argc and \p Argv arguments are used only for logging purposes,
/// when the diagnostic options indicate that the compiler should output
/// logging information.
///
@@ -498,8 +496,7 @@ public:
/// releasing the returned DiagnosticsEngine's client eventually.
///
/// \param Opts - The diagnostic options; note that the created text
- /// diagnostic object contains a reference to these options and its lifetime
- /// must extend past that of the diagnostic engine.
+ /// diagnostic object contains a reference to these options.
///
/// \param Client If non-NULL, a diagnostic client that will be
/// attached to (and, then, owned by) the returned DiagnosticsEngine
@@ -510,7 +507,7 @@ public:
///
/// \return The new object on success, or null on failure.
static IntrusiveRefCntPtr<DiagnosticsEngine>
- createDiagnostics(const DiagnosticOptions &Opts, int Argc,
+ createDiagnostics(DiagnosticOptions *Opts, int Argc,
const char* const *Argv,
DiagnosticConsumer *Client = 0,
bool ShouldOwnClient = true,
@@ -534,7 +531,6 @@ public:
/// context.
void createPCHExternalASTSource(StringRef Path,
bool DisablePCHValidation,
- bool DisableStatCache,
bool AllowPCHWithCompilerErrors,
void *DeserializationListener);
@@ -544,7 +540,6 @@ public:
static ExternalASTSource *
createPCHExternalASTSource(StringRef Path, const std::string &Sysroot,
bool DisablePCHValidation,
- bool DisableStatCache,
bool AllowPCHWithCompilerErrors,
Preprocessor &PP, ASTContext &Context,
void *DeserializationListener, bool Preamble);
@@ -555,8 +550,7 @@ public:
void createCodeCompletionConsumer();
/// Create a code completion consumer to print code completion results, at
- /// \arg Filename, \arg Line, and \arg Column, to the given output stream \arg
- /// OS.
+ /// \p Filename, \p Line, and \p Column, to the given output stream \p OS.
static CodeCompleteConsumer *
createCodeCompletionConsumer(Preprocessor &PP, const std::string &Filename,
unsigned Line, unsigned Column,
@@ -596,15 +590,15 @@ public:
/// Create a new output file, optionally deriving the output path name.
///
- /// If \arg OutputPath is empty, then createOutputFile will derive an output
- /// path location as \arg BaseInput, with any suffix removed, and \arg
- /// Extension appended. If OutputPath is not stdout and \arg UseTemporary
+ /// If \p OutputPath is empty, then createOutputFile will derive an output
+ /// path location as \p BaseInput, with any suffix removed, and \p Extension
+ /// appended. If \p OutputPath is not stdout and \p UseTemporary
/// is true, createOutputFile will create a new temporary file that must be
- /// renamed to OutputPath in the end.
+ /// renamed to \p OutputPath in the end.
///
/// \param OutputPath - If given, the path to the output file.
/// \param Error [out] - On failure, the error message.
- /// \param BaseInput - If \arg OutputPath is empty, the input path name to use
+ /// \param BaseInput - If \p OutputPath is empty, the input path name to use
/// for deriving the output path.
/// \param Extension - The extension to use for derived output names.
/// \param Binary - The mode to open the file in.
@@ -613,7 +607,7 @@ public:
/// multithreaded use, as the underlying signal mechanism is not reentrant
/// \param UseTemporary - Create a new temporary file that must be renamed to
/// OutputPath in the end.
- /// \param CreateMissingDirectories - When \arg UseTemporary is true, create
+ /// \param CreateMissingDirectories - When \p UseTemporary is true, create
/// missing directories in the output path.
/// \param ResultPathName [out] - If given, the result path name will be
/// stored here on success.
@@ -637,15 +631,13 @@ public:
/// as the main file.
///
/// \return True on success.
- bool InitializeSourceManager(StringRef InputFile,
- SrcMgr::CharacteristicKind Kind = SrcMgr::C_User);
+ bool InitializeSourceManager(const FrontendInputFile &Input);
/// InitializeSourceManager - Initialize the source manager to set InputFile
/// as the main file.
///
/// \return True on success.
- static bool InitializeSourceManager(StringRef InputFile,
- SrcMgr::CharacteristicKind Kind,
+ static bool InitializeSourceManager(const FrontendInputFile &Input,
DiagnosticsEngine &Diags,
FileManager &FileMgr,
SourceManager &SourceMgr,
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
index d6fe003..1314956 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
@@ -13,15 +13,15 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/FileSystemOptions.h"
-#include "clang/Frontend/AnalyzerOptions.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Lex/HeaderSearchOptions.h"
+#include "clang/Lex/PreprocessorOptions.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/Frontend/MigratorOptions.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/DependencyOutputOptions.h"
-#include "clang/Frontend/DiagnosticOptions.h"
#include "clang/Frontend/FrontendOptions.h"
-#include "clang/Frontend/HeaderSearchOptions.h"
#include "clang/Frontend/LangStandard.h"
-#include "clang/Frontend/PreprocessorOptions.h"
#include "clang/Frontend/PreprocessorOutputOptions.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/StringRef.h"
@@ -52,6 +52,19 @@ class CompilerInvocationBase : public RefCountedBase<CompilerInvocation> {
protected:
/// Options controlling the language variant.
IntrusiveRefCntPtr<LangOptions> LangOpts;
+
+ /// Options controlling the target.
+ IntrusiveRefCntPtr<TargetOptions> TargetOpts;
+
+ /// Options controlling the diagnostic engine.
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagnosticOpts;
+
+ /// Options controlling the \#include directive.
+ IntrusiveRefCntPtr<HeaderSearchOptions> HeaderSearchOpts;
+
+ /// Options controlling the preprocessor (aside from \#include handling).
+ IntrusiveRefCntPtr<PreprocessorOptions> PreprocessorOpts;
+
public:
CompilerInvocationBase();
@@ -59,6 +72,23 @@ public:
LangOptions *getLangOpts() { return LangOpts.getPtr(); }
const LangOptions *getLangOpts() const { return LangOpts.getPtr(); }
+
+ TargetOptions &getTargetOpts() { return *TargetOpts.getPtr(); }
+ const TargetOptions &getTargetOpts() const {
+ return *TargetOpts.getPtr();
+ }
+
+ DiagnosticOptions &getDiagnosticOpts() const { return *DiagnosticOpts; }
+
+ HeaderSearchOptions &getHeaderSearchOpts() { return *HeaderSearchOpts; }
+ const HeaderSearchOptions &getHeaderSearchOpts() const {
+ return *HeaderSearchOpts;
+ }
+
+ PreprocessorOptions &getPreprocessorOpts() { return *PreprocessorOpts; }
+ const PreprocessorOptions &getPreprocessorOpts() const {
+ return *PreprocessorOpts;
+ }
};
/// \brief Helper class for holding the data necessary to invoke the compiler.
@@ -68,7 +98,7 @@ public:
/// options, the warning flags, and so on.
class CompilerInvocation : public CompilerInvocationBase {
/// Options controlling the static analyzer.
- AnalyzerOptions AnalyzerOpts;
+ AnalyzerOptionsRef AnalyzerOpts;
MigratorOptions MigratorOpts;
@@ -78,29 +108,17 @@ class CompilerInvocation : public CompilerInvocationBase {
/// Options controlling dependency output.
DependencyOutputOptions DependencyOutputOpts;
- /// Options controlling the diagnostic engine.
- DiagnosticOptions DiagnosticOpts;
-
/// Options controlling file system operations.
FileSystemOptions FileSystemOpts;
/// Options controlling the frontend itself.
FrontendOptions FrontendOpts;
- /// Options controlling the \#include directive.
- HeaderSearchOptions HeaderSearchOpts;
-
- /// Options controlling the preprocessor (aside from \#include handling).
- PreprocessorOptions PreprocessorOpts;
-
/// Options controlling preprocessed output.
PreprocessorOutputOptions PreprocessorOutputOpts;
- /// Options controlling the target.
- TargetOptions TargetOpts;
-
public:
- CompilerInvocation() {}
+ CompilerInvocation() : AnalyzerOpts(new AnalyzerOptions()) {}
/// @name Utility Methods
/// @{
@@ -127,10 +145,6 @@ public:
/// executable), for finding the builtin compiler path.
static std::string GetResourcesPath(const char *Argv0, void *MainAddr);
- /// \brief Convert the CompilerInvocation to a list of strings suitable for
- /// passing to CreateFromArgs.
- void toArgs(std::vector<std::string> &Res) const;
-
/// \brief Set language defaults for the given input language and
/// language standard in the given LangOptions object.
///
@@ -148,8 +162,7 @@ public:
/// @name Option Subgroups
/// @{
- AnalyzerOptions &getAnalyzerOpts() { return AnalyzerOpts; }
- const AnalyzerOptions &getAnalyzerOpts() const {
+ AnalyzerOptionsRef getAnalyzerOpts() const {
return AnalyzerOpts;
}
@@ -170,29 +183,16 @@ public:
return DependencyOutputOpts;
}
- DiagnosticOptions &getDiagnosticOpts() { return DiagnosticOpts; }
- const DiagnosticOptions &getDiagnosticOpts() const { return DiagnosticOpts; }
-
FileSystemOptions &getFileSystemOpts() { return FileSystemOpts; }
const FileSystemOptions &getFileSystemOpts() const {
return FileSystemOpts;
}
- HeaderSearchOptions &getHeaderSearchOpts() { return HeaderSearchOpts; }
- const HeaderSearchOptions &getHeaderSearchOpts() const {
- return HeaderSearchOpts;
- }
-
FrontendOptions &getFrontendOpts() { return FrontendOpts; }
const FrontendOptions &getFrontendOpts() const {
return FrontendOpts;
}
- PreprocessorOptions &getPreprocessorOpts() { return PreprocessorOpts; }
- const PreprocessorOptions &getPreprocessorOpts() const {
- return PreprocessorOpts;
- }
-
PreprocessorOutputOptions &getPreprocessorOutputOpts() {
return PreprocessorOutputOpts;
}
@@ -200,11 +200,6 @@ public:
return PreprocessorOutputOpts;
}
- TargetOptions &getTargetOpts() { return TargetOpts; }
- const TargetOptions &getTargetOpts() const {
- return TargetOpts;
- }
-
/// @}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
deleted file mode 100644
index 8dec37c..0000000
--- a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
+++ /dev/null
@@ -1,111 +0,0 @@
-//===--- DiagnosticOptions.h ------------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_FRONTEND_DIAGNOSTICOPTIONS_H
-#define LLVM_CLANG_FRONTEND_DIAGNOSTICOPTIONS_H
-
-#include "clang/Basic/Diagnostic.h"
-
-#include <string>
-#include <vector>
-
-namespace clang {
-
-/// DiagnosticOptions - Options for controlling the compiler diagnostics
-/// engine.
-class DiagnosticOptions {
-public:
- unsigned IgnoreWarnings : 1; /// -w
- unsigned NoRewriteMacros : 1; /// -Wno-rewrite-macros
- unsigned Pedantic : 1; /// -pedantic
- unsigned PedanticErrors : 1; /// -pedantic-errors
- unsigned ShowColumn : 1; /// Show column number on diagnostics.
- unsigned ShowLocation : 1; /// Show source location information.
- unsigned ShowCarets : 1; /// Show carets in diagnostics.
- unsigned ShowFixits : 1; /// Show fixit information.
- unsigned ShowSourceRanges : 1; /// Show source ranges in numeric form.
- unsigned ShowParseableFixits : 1; /// Show machine parseable fix-its.
- unsigned ShowOptionNames : 1; /// Show the option name for mappable
- /// diagnostics.
- unsigned ShowNoteIncludeStack : 1; /// Show include stacks for notes.
- unsigned ShowCategories : 2; /// Show categories: 0 -> none, 1 -> Number,
- /// 2 -> Full Name.
-
- unsigned Format : 2; /// Format for diagnostics:
- enum TextDiagnosticFormat { Clang, Msvc, Vi };
-
- unsigned ShowColors : 1; /// Show diagnostics with ANSI color sequences.
- unsigned ShowOverloads : 1; /// Overload candidates to show. Values from
- /// DiagnosticsEngine::OverloadsShown
- unsigned VerifyDiagnostics: 1; /// Check that diagnostics match the expected
- /// diagnostics, indicated by markers in the
- /// input source file.
-
- unsigned ElideType: 1; /// Elide identical types in template diffing
- unsigned ShowTemplateTree: 1; /// Print a template tree when diffing
-
- unsigned ErrorLimit; /// Limit # errors emitted.
- unsigned MacroBacktraceLimit; /// Limit depth of macro expansion backtrace.
- unsigned TemplateBacktraceLimit; /// Limit depth of instantiation backtrace.
- unsigned ConstexprBacktraceLimit; /// Limit depth of constexpr backtrace.
-
- /// The distance between tab stops.
- unsigned TabStop;
- enum { DefaultTabStop = 8, MaxTabStop = 100,
- DefaultMacroBacktraceLimit = 6,
- DefaultTemplateBacktraceLimit = 10,
- DefaultConstexprBacktraceLimit = 10 };
-
- /// Column limit for formatting message diagnostics, or 0 if unused.
- unsigned MessageLength;
-
- /// If non-empty, a file to log extended build information to, for development
- /// testing and analysis.
- std::string DumpBuildInformation;
-
- /// The file to log diagnostic output to.
- std::string DiagnosticLogFile;
-
- /// The file to serialize diagnostics to (non-appending).
- std::string DiagnosticSerializationFile;
-
- /// The list of -W... options used to alter the diagnostic mappings, with the
- /// prefixes removed.
- std::vector<std::string> Warnings;
-
-public:
- DiagnosticOptions() {
- IgnoreWarnings = 0;
- TabStop = DefaultTabStop;
- MessageLength = 0;
- NoRewriteMacros = 0;
- Pedantic = 0;
- PedanticErrors = 0;
- ShowCarets = 1;
- ShowColors = 0;
- ShowOverloads = DiagnosticsEngine::Ovl_All;
- ShowColumn = 1;
- ShowFixits = 1;
- ShowLocation = 1;
- ShowOptionNames = 0;
- ShowCategories = 0;
- Format = Clang;
- ShowSourceRanges = 0;
- ShowParseableFixits = 0;
- VerifyDiagnostics = 0;
- ErrorLimit = 0;
- TemplateBacktraceLimit = DefaultTemplateBacktraceLimit;
- MacroBacktraceLimit = DefaultMacroBacktraceLimit;
- ConstexprBacktraceLimit = DefaultConstexprBacktraceLimit;
- }
-};
-
-} // end namespace clang
-
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h
index 09d7ecb..086bb13 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h
@@ -44,7 +44,7 @@ typedef llvm::PointerUnion<const Diagnostic *,
class DiagnosticRenderer {
protected:
const LangOptions &LangOpts;
- const DiagnosticOptions &DiagOpts;
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
/// \brief The location of the previous diagnostic if known.
///
@@ -66,7 +66,7 @@ protected:
DiagnosticsEngine::Level LastLevel;
DiagnosticRenderer(const LangOptions &LangOpts,
- const DiagnosticOptions &DiagOpts);
+ DiagnosticOptions *DiagOpts);
virtual ~DiagnosticRenderer();
@@ -124,7 +124,7 @@ public:
/// \param Ranges The underlined ranges for this code snippet.
/// \param FixItHints The FixIt hints active for this diagnostic.
/// \param SM The SourceManager; will be null if the diagnostic came from the
- /// frontend, thus \param Loc will be invalid.
+ /// frontend, thus \p Loc will be invalid.
void emitDiagnostic(SourceLocation Loc, DiagnosticsEngine::Level Level,
StringRef Message, ArrayRef<CharSourceRange> Ranges,
ArrayRef<FixItHint> FixItHints,
@@ -139,7 +139,7 @@ public:
class DiagnosticNoteRenderer : public DiagnosticRenderer {
public:
DiagnosticNoteRenderer(const LangOptions &LangOpts,
- const DiagnosticOptions &DiagOpts)
+ DiagnosticOptions *DiagOpts)
: DiagnosticRenderer(LangOpts, DiagOpts) {}
virtual ~DiagnosticNoteRenderer();
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
index c0056de..3283444 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
@@ -109,7 +109,7 @@ public:
/// @{
bool isCurrentFileAST() const {
- assert(!CurrentInput.File.empty() && "No current file!");
+ assert(!CurrentInput.isEmpty() && "No current file!");
return CurrentASTUnit != 0;
}
@@ -117,14 +117,14 @@ public:
return CurrentInput;
}
- const std::string &getCurrentFile() const {
- assert(!CurrentInput.File.empty() && "No current file!");
- return CurrentInput.File;
+ const StringRef getCurrentFile() const {
+ assert(!CurrentInput.isEmpty() && "No current file!");
+ return CurrentInput.getFile();
}
InputKind getCurrentFileKind() const {
- assert(!CurrentInput.File.empty() && "No current file!");
- return CurrentInput.Kind;
+ assert(!CurrentInput.isEmpty() && "No current file!");
+ return CurrentInput.getKind();
}
ASTUnit &getCurrentASTUnit() const {
@@ -167,8 +167,8 @@ public:
/// @name Public Action Interface
/// @{
- /// BeginSourceFile - Prepare the action for processing the input file \arg
- /// Filename; this is run after the options and frontend have been
+ /// BeginSourceFile - Prepare the action for processing the input file
+ /// \p Input; this is run after the options and frontend have been
/// initialized, but prior to executing any per-file processing.
///
/// \param CI - The compiler instance this action is being run from. The
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
index ce1cd9b..db2f5a5 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
@@ -16,6 +16,10 @@
#include <string>
#include <vector>
+namespace llvm {
+class MemoryBuffer;
+}
+
namespace clang {
namespace frontend {
@@ -72,19 +76,41 @@ enum InputKind {
/// \brief An input file for the front end.
-struct FrontendInputFile {
+class FrontendInputFile {
/// \brief The file name, or "-" to read from standard input.
std::string File;
+ llvm::MemoryBuffer *Buffer;
+
/// \brief The kind of input, e.g., C source, AST file, LLVM IR.
InputKind Kind;
/// \brief Whether we're dealing with a 'system' input (vs. a 'user' input).
bool IsSystem;
-
- FrontendInputFile() : Kind(IK_None) { }
+
+public:
+ FrontendInputFile() : Buffer(0), Kind(IK_None) { }
FrontendInputFile(StringRef File, InputKind Kind, bool IsSystem = false)
- : File(File.str()), Kind(Kind), IsSystem(IsSystem) { }
+ : File(File.str()), Buffer(0), Kind(Kind), IsSystem(IsSystem) { }
+ FrontendInputFile(llvm::MemoryBuffer *buffer, InputKind Kind,
+ bool IsSystem = false)
+ : Buffer(buffer), Kind(Kind), IsSystem(IsSystem) { }
+
+ InputKind getKind() const { return Kind; }
+ bool isSystem() const { return IsSystem; }
+
+ bool isEmpty() const { return File.empty() && Buffer == 0; }
+ bool isFile() const { return !isBuffer(); }
+ bool isBuffer() const { return Buffer != 0; }
+
+ StringRef getFile() const {
+ assert(isFile());
+ return File;
+ }
+ llvm::MemoryBuffer *getBuffer() const {
+ assert(isBuffer());
+ return Buffer;
+ }
};
/// FrontendOptions - Options for controlling the behavior of the frontend.
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/LangStandard.h b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandard.h
index e6f4403..f07cb02 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/LangStandard.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandard.h
@@ -18,16 +18,17 @@ namespace clang {
namespace frontend {
enum LangFeatures {
- BCPLComment = (1 << 0),
+ LineComment = (1 << 0),
C89 = (1 << 1),
C99 = (1 << 2),
C11 = (1 << 3),
CPlusPlus = (1 << 4),
CPlusPlus0x = (1 << 5),
- Digraphs = (1 << 6),
- GNUMode = (1 << 7),
- HexFloat = (1 << 8),
- ImplicitInt = (1 << 9)
+ CPlusPlus1y = (1 << 6),
+ Digraphs = (1 << 7),
+ GNUMode = (1 << 8),
+ HexFloat = (1 << 9),
+ ImplicitInt = (1 << 10)
};
}
@@ -53,8 +54,8 @@ public:
/// getDescription - Get the description of this standard.
const char *getDescription() const { return Description; }
- /// hasBCPLComments - Language supports '//' comments.
- bool hasBCPLComments() const { return Flags & frontend::BCPLComment; }
+ /// Language supports '//' comments.
+ bool hasLineComments() const { return Flags & frontend::LineComment; }
/// isC89 - Language is a superset of C89.
bool isC89() const { return Flags & frontend::C89; }
@@ -71,6 +72,9 @@ public:
/// isCPlusPlus0x - Language is a C++0x variant.
bool isCPlusPlus0x() const { return Flags & frontend::CPlusPlus0x; }
+ /// isCPlusPlus1y - Language is a C++1y variant.
+ bool isCPlusPlus1y() const { return Flags & frontend::CPlusPlus1y; }
+
/// hasDigraphs - Language supports digraphs.
bool hasDigraphs() const { return Flags & frontend::Digraphs; }
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def
index a604d4b..10807b7 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def
@@ -36,91 +36,99 @@ LANGSTANDARD(c94, "iso9899:199409",
LANGSTANDARD(gnu89, "gnu89",
"ISO C 1990 with GNU extensions",
- BCPLComment | C89 | Digraphs | GNUMode | ImplicitInt)
+ LineComment | C89 | Digraphs | GNUMode | ImplicitInt)
LANGSTANDARD(gnu90, "gnu90",
"ISO C 1990 with GNU extensions",
- BCPLComment | C89 | Digraphs | GNUMode | ImplicitInt)
+ LineComment | C89 | Digraphs | GNUMode | ImplicitInt)
// C99-ish modes
LANGSTANDARD(c99, "c99",
"ISO C 1999",
- BCPLComment | C99 | Digraphs | HexFloat)
+ LineComment | C99 | Digraphs | HexFloat)
LANGSTANDARD(c9x, "c9x",
"ISO C 1999",
- BCPLComment | C99 | Digraphs | HexFloat)
+ LineComment | C99 | Digraphs | HexFloat)
LANGSTANDARD(iso9899_1999,
"iso9899:1999", "ISO C 1999",
- BCPLComment | C99 | Digraphs | HexFloat)
+ LineComment | C99 | Digraphs | HexFloat)
LANGSTANDARD(iso9899_199x,
"iso9899:199x", "ISO C 1999",
- BCPLComment | C99 | Digraphs | HexFloat)
+ LineComment | C99 | Digraphs | HexFloat)
LANGSTANDARD(gnu99, "gnu99",
"ISO C 1999 with GNU extensions",
- BCPLComment | C99 | Digraphs | GNUMode | HexFloat)
+ LineComment | C99 | Digraphs | GNUMode | HexFloat)
LANGSTANDARD(gnu9x, "gnu9x",
"ISO C 1999 with GNU extensions",
- BCPLComment | C99 | Digraphs | GNUMode | HexFloat)
+ LineComment | C99 | Digraphs | GNUMode | HexFloat)
// C11 modes
LANGSTANDARD(c11, "c11",
"ISO C 2011",
- BCPLComment | C99 | C11 | Digraphs | HexFloat)
+ LineComment | C99 | C11 | Digraphs | HexFloat)
LANGSTANDARD(c1x, "c1x",
"ISO C 2011",
- BCPLComment | C99 | C11 | Digraphs | HexFloat)
+ LineComment | C99 | C11 | Digraphs | HexFloat)
LANGSTANDARD(iso9899_2011,
"iso9899:2011", "ISO C 2011",
- BCPLComment | C99 | C11 | Digraphs | HexFloat)
+ LineComment | C99 | C11 | Digraphs | HexFloat)
LANGSTANDARD(iso9899_201x,
"iso9899:2011", "ISO C 2011",
- BCPLComment | C99 | C11 | Digraphs | HexFloat)
+ LineComment | C99 | C11 | Digraphs | HexFloat)
LANGSTANDARD(gnu11, "gnu11",
"ISO C 2011 with GNU extensions",
- BCPLComment | C99 | C11 | Digraphs | GNUMode | HexFloat)
+ LineComment | C99 | C11 | Digraphs | GNUMode | HexFloat)
LANGSTANDARD(gnu1x, "gnu1x",
"ISO C 2011 with GNU extensions",
- BCPLComment | C99 | C11 | Digraphs | GNUMode | HexFloat)
+ LineComment | C99 | C11 | Digraphs | GNUMode | HexFloat)
// C++ modes
LANGSTANDARD(cxx98, "c++98",
"ISO C++ 1998 with amendments",
- BCPLComment | CPlusPlus | Digraphs)
+ LineComment | CPlusPlus | Digraphs)
LANGSTANDARD(cxx03, "c++03",
"ISO C++ 1998 with amendments",
- BCPLComment | CPlusPlus | Digraphs)
+ LineComment | CPlusPlus | Digraphs)
LANGSTANDARD(gnucxx98, "gnu++98",
"ISO C++ 1998 with amendments and GNU extensions",
- BCPLComment | CPlusPlus | Digraphs | GNUMode)
+ LineComment | CPlusPlus | Digraphs | GNUMode)
LANGSTANDARD(cxx0x, "c++0x",
"ISO C++ 2011 with amendments",
- BCPLComment | CPlusPlus | CPlusPlus0x | Digraphs)
+ LineComment | CPlusPlus | CPlusPlus0x | Digraphs)
LANGSTANDARD(cxx11, "c++11",
"ISO C++ 2011 with amendments",
- BCPLComment | CPlusPlus | CPlusPlus0x | Digraphs)
+ LineComment | CPlusPlus | CPlusPlus0x | Digraphs)
LANGSTANDARD(gnucxx0x, "gnu++0x",
"ISO C++ 2011 with amendments and GNU extensions",
- BCPLComment | CPlusPlus | CPlusPlus0x | Digraphs | GNUMode)
+ LineComment | CPlusPlus | CPlusPlus0x | Digraphs | GNUMode)
LANGSTANDARD(gnucxx11, "gnu++11",
"ISO C++ 2011 with amendments and GNU extensions",
- BCPLComment | CPlusPlus | CPlusPlus0x | Digraphs | GNUMode)
+ LineComment | CPlusPlus | CPlusPlus0x | Digraphs | GNUMode)
+
+LANGSTANDARD(cxx1y, "c++1y",
+ "Working draft for ISO C++ 2014",
+ LineComment | CPlusPlus | CPlusPlus0x | CPlusPlus1y | Digraphs)
+LANGSTANDARD(gnucxx1y, "gnu++1y",
+ "Working draft for ISO C++ 2014 with GNU extensions",
+ LineComment | CPlusPlus | CPlusPlus0x | CPlusPlus1y | Digraphs |
+ GNUMode)
// OpenCL
LANGSTANDARD(opencl, "cl",
"OpenCL 1.0",
- BCPLComment | C99 | Digraphs | HexFloat)
+ LineComment | C99 | Digraphs | HexFloat)
LANGSTANDARD(opencl11, "CL1.1",
"OpenCL 1.1",
- BCPLComment | C99 | Digraphs | HexFloat)
+ LineComment | C99 | Digraphs | HexFloat)
LANGSTANDARD(opencl12, "CL1.2",
"OpenCL 1.2",
- BCPLComment | C99 | Digraphs | HexFloat)
+ LineComment | C99 | Digraphs | HexFloat)
// CUDA
LANGSTANDARD(cuda, "cuda",
"NVIDIA CUDA(tm)",
- BCPLComment | CPlusPlus | Digraphs)
+ LineComment | CPlusPlus | Digraphs)
#undef LANGSTANDARD
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/LogDiagnosticPrinter.h b/contrib/llvm/tools/clang/include/clang/Frontend/LogDiagnosticPrinter.h
index 4de15f2..f4fa876 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/LogDiagnosticPrinter.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/LogDiagnosticPrinter.h
@@ -42,7 +42,7 @@ class LogDiagnosticPrinter : public DiagnosticConsumer {
raw_ostream &OS;
const LangOptions *LangOpts;
- const DiagnosticOptions *DiagOpts;
+ llvm::IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
SourceLocation LastWarningLoc;
FullSourceLoc LastLoc;
@@ -54,7 +54,7 @@ class LogDiagnosticPrinter : public DiagnosticConsumer {
std::string DwarfDebugFlags;
public:
- LogDiagnosticPrinter(raw_ostream &OS, const DiagnosticOptions &Diags,
+ LogDiagnosticPrinter(raw_ostream &OS, DiagnosticOptions *Diags,
bool OwnsOutputStream = false);
virtual ~LogDiagnosticPrinter();
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h b/contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h
index ffa7b4a..539f2c5 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h
@@ -52,7 +52,6 @@ public:
virtual void InitializeSema(Sema &S);
virtual void ForgetSema();
- static bool classof(const MultiplexConsumer *) { return true; }
private:
std::vector<ASTConsumer*> Consumers; // Owns these.
OwningPtr<MultiplexASTMutationListener> MutationListener;
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h b/contrib/llvm/tools/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h
index aa0695f..ab70afd 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h
@@ -54,7 +54,7 @@ enum RecordIDs {
/// (via libclang) without needing to parse Clang's command line output.
///
DiagnosticConsumer *create(llvm::raw_ostream *OS,
- const DiagnosticOptions &diags);
+ DiagnosticOptions *diags);
} // end serialized_diags namespace
} // end clang namespace
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h
index c869c08..51f841d 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h
@@ -40,7 +40,7 @@ class TextDiagnostic : public DiagnosticRenderer {
public:
TextDiagnostic(raw_ostream &OS,
const LangOptions &LangOpts,
- const DiagnosticOptions &DiagOpts);
+ DiagnosticOptions *DiagOpts);
virtual ~TextDiagnostic();
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h
index 23cf521..91ac3c8 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h
@@ -18,6 +18,7 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
namespace clang {
class DiagnosticOptions;
@@ -26,7 +27,7 @@ class TextDiagnostic;
class TextDiagnosticPrinter : public DiagnosticConsumer {
raw_ostream &OS;
- const DiagnosticOptions *DiagOpts;
+ llvm::IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
/// \brief Handle to the currently active text diagnostic emitter.
OwningPtr<TextDiagnostic> TextDiag;
@@ -37,7 +38,7 @@ class TextDiagnosticPrinter : public DiagnosticConsumer {
unsigned OwnsOutputStream : 1;
public:
- TextDiagnosticPrinter(raw_ostream &os, const DiagnosticOptions &diags,
+ TextDiagnosticPrinter(raw_ostream &os, DiagnosticOptions *diags,
bool OwnsOutputStream = false);
virtual ~TextDiagnosticPrinter();
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h b/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
index a74589e..06a3b24 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
@@ -12,9 +12,9 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Lex/Preprocessor.h"
-#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include <climits>
@@ -33,7 +33,9 @@ class FileEntry;
/// Indicating that a line expects an error or a warning is simple. Put a
/// comment on the line that has the diagnostic, use:
///
-/// expected-{error,warning,note}
+/// \code
+/// expected-{error,warning,note}
+/// \endcode
///
/// to tag if it's an expected error or warning, and place the expected text
/// between {{ and }} markers. The full text doesn't have to be included, only
@@ -94,12 +96,15 @@ class FileEntry;
///
/// In this example, the diagnostic may appear only once, if at all.
///
-/// Regex matching mode may be selected by appending '-re' to type. Example:
+/// Regex matching mode may be selected by appending '-re' to type, such as:
///
+/// \code
/// expected-error-re
+/// \endcode
///
/// Examples matching error: "variable has incomplete type 'struct s'"
///
+/// \code
/// // expected-error {{variable has incomplete type 'struct s'}}
/// // expected-error {{variable has incomplete type}}
///
@@ -107,6 +112,15 @@ class FileEntry;
/// // expected-error-re {{variable has has type 'struct .*'}}
/// // expected-error-re {{variable has has type 'struct (.*)'}}
/// // expected-error-re {{variable has has type 'struct[[:space:]](.*)'}}
+/// \endcode
+///
+/// VerifyDiagnosticConsumer expects at least one expected-* directive to
+/// be found inside the source code. If no diagnostics are expected the
+/// following directive can be used to indicate this:
+///
+/// \code
+/// // expected-no-diagnostics
+/// \endcode
///
class VerifyDiagnosticConsumer: public DiagnosticConsumer,
public CommentHandler {
@@ -146,8 +160,8 @@ public:
}
private:
- Directive(const Directive&); // DO NOT IMPLEMENT
- void operator=(const Directive&); // DO NOT IMPLEMENT
+ Directive(const Directive &) LLVM_DELETED_FUNCTION;
+ void operator=(const Directive &) LLVM_DELETED_FUNCTION;
};
typedef std::vector<Directive*> DirectiveList;
@@ -166,10 +180,12 @@ public:
}
};
-#ifndef NDEBUG
- typedef llvm::DenseSet<FileID> FilesWithDiagnosticsSet;
- typedef llvm::SmallPtrSet<const FileEntry *, 4> FilesParsedForDirectivesSet;
-#endif
+ enum DirectiveStatus {
+ HasNoDirectives,
+ HasNoDirectivesReported,
+ HasExpectedNoDiagnostics,
+ HasOtherExpectedDirectives
+ };
private:
DiagnosticsEngine &Diags;
@@ -177,13 +193,36 @@ private:
bool OwnsPrimaryClient;
OwningPtr<TextDiagnosticBuffer> Buffer;
const Preprocessor *CurrentPreprocessor;
+ const LangOptions *LangOpts;
+ SourceManager *SrcManager;
unsigned ActiveSourceFiles;
-#ifndef NDEBUG
- FilesWithDiagnosticsSet FilesWithDiagnostics;
- FilesParsedForDirectivesSet FilesParsedForDirectives;
-#endif
+ DirectiveStatus Status;
ExpectedData ED;
+
void CheckDiagnostics();
+ void setSourceManager(SourceManager &SM) {
+ assert((!SrcManager || SrcManager == &SM) && "SourceManager changed!");
+ SrcManager = &SM;
+ }
+
+#ifndef NDEBUG
+ class UnparsedFileStatus {
+ llvm::PointerIntPair<const FileEntry *, 1, bool> Data;
+
+ public:
+ UnparsedFileStatus(const FileEntry *File, bool FoundDirectives)
+ : Data(File, FoundDirectives) {}
+
+ const FileEntry *getFile() const { return Data.getPointer(); }
+ bool foundDirectives() const { return Data.getInt(); }
+ };
+
+ typedef llvm::DenseMap<FileID, const FileEntry *> ParsedFilesMap;
+ typedef llvm::DenseMap<FileID, UnparsedFileStatus> UnparsedFilesMap;
+
+ ParsedFilesMap ParsedFiles;
+ UnparsedFilesMap UnparsedFiles;
+#endif
public:
/// Create a new verifying diagnostic client, which will issue errors to
@@ -197,12 +236,19 @@ public:
virtual void EndSourceFile();
- /// \brief Manually register a file as parsed.
- inline void appendParsedFile(const FileEntry *File) {
-#ifndef NDEBUG
- FilesParsedForDirectives.insert(File);
-#endif
- }
+ enum ParsedStatus {
+ /// File has been processed via HandleComment.
+ IsParsed,
+
+ /// File has diagnostics and may have directives.
+ IsUnparsed,
+
+ /// File has diagnostics but guaranteed no directives.
+ IsUnparsedNoDirectives
+ };
+
+ /// \brief Update lists of parsed and unparsed files.
+ void UpdateParsedFileStatus(SourceManager &SM, FileID FID, ParsedStatus PS);
virtual bool HandleComment(Preprocessor &PP, SourceRange Comment);
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h b/contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h
index f172b5c..d2e2412 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h
@@ -28,9 +28,6 @@ public:
/// \brief Read the set of macros defined by this external macro source.
virtual void ReadDefinedMacros() = 0;
- /// \brief Read the definition for the given macro.
- virtual void LoadMacroDefinition(IdentifierInfo *II) = 0;
-
/// \brief Update an out-of-date identifier.
virtual void updateOutOfDateIdentifier(IdentifierInfo &II) = 0;
};
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h b/contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h
index 107408d..8473a6a 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_LEX_HEADERMAP_H
#include "clang/Basic/LLVM.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
class MemoryBuffer;
@@ -30,8 +31,8 @@ namespace clang {
/// symlinks to files. Its advantages are that it is dense and more efficient
/// to create and process than a directory of symlinks.
class HeaderMap {
- HeaderMap(const HeaderMap&); // DO NOT IMPLEMENT
- void operator=(const HeaderMap&); // DO NOT IMPLEMENT
+ HeaderMap(const HeaderMap &) LLVM_DELETED_FUNCTION;
+ void operator=(const HeaderMap &) LLVM_DELETED_FUNCTION;
const llvm::MemoryBuffer *FileBuffer;
bool NeedsBSwap;
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h b/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
index 8e9491f..4334db7 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
@@ -17,6 +17,7 @@
#include "clang/Lex/DirectoryLookup.h"
#include "clang/Lex/ModuleMap.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Allocator.h"
@@ -29,6 +30,7 @@ class DiagnosticsEngine;
class ExternalIdentifierLookup;
class FileEntry;
class FileManager;
+class HeaderSearchOptions;
class IdentifierInfo;
/// \brief The preprocessor keeps track of this information for each
@@ -131,8 +133,10 @@ class HeaderSearch {
bool IsUserSpecifiedSystemFramework;
};
+ /// \brief Header-search options used to initialize this header search.
+ llvm::IntrusiveRefCntPtr<HeaderSearchOptions> HSOpts;
+
FileManager &FileMgr;
- DiagnosticsEngine &Diags;
/// \#include search path information. Requests for \#include "x" search the
/// directory of the \#including file first, then each directory in SearchDirs
/// consecutively. Requests for <x> search the current dir first, then each
@@ -207,17 +211,21 @@ class HeaderSearch {
unsigned NumFrameworkLookups, NumSubFrameworkLookups;
// HeaderSearch doesn't support default or copy construction.
- explicit HeaderSearch();
- explicit HeaderSearch(const HeaderSearch&);
- void operator=(const HeaderSearch&);
-
+ HeaderSearch(const HeaderSearch&) LLVM_DELETED_FUNCTION;
+ void operator=(const HeaderSearch&) LLVM_DELETED_FUNCTION;
+
friend class DirectoryLookup;
public:
- HeaderSearch(FileManager &FM, DiagnosticsEngine &Diags,
+ HeaderSearch(llvm::IntrusiveRefCntPtr<HeaderSearchOptions> HSOpts,
+ FileManager &FM, DiagnosticsEngine &Diags,
const LangOptions &LangOpts, const TargetInfo *Target);
~HeaderSearch();
+ /// \brief Retrieve the header-search options with which this header search
+ /// was initialized.
+ HeaderSearchOptions &getHeaderSearchOpts() const { return *HSOpts; }
+
FileManager &getFileMgr() const { return FileMgr; }
/// \brief Interface for setting the file search paths.
@@ -283,6 +291,11 @@ public:
/// \brief Retrieve the path to the module cache.
StringRef getModuleCachePath() const { return ModuleCachePath; }
+
+ /// \brief Consider modules when including files from this directory.
+ void setDirectoryHasModuleMap(const DirectoryEntry* Dir) {
+ DirectoryHasModuleMap[Dir] = true;
+ }
/// \brief Forget everything we know about headers so far.
void ClearFileInfo() {
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h b/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearchOptions.h
index ebc8f26..468fefa 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearchOptions.h
@@ -7,9 +7,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FRONTEND_HEADERSEARCHOPTIONS_H
-#define LLVM_CLANG_FRONTEND_HEADERSEARCHOPTIONS_H
+#ifndef LLVM_CLANG_LEX_HEADERSEARCHOPTIONS_H
+#define LLVM_CLANG_LEX_HEADERSEARCHOPTIONS_H
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/StringRef.h"
#include <vector>
@@ -36,7 +37,7 @@ namespace frontend {
/// HeaderSearchOptions - Helper class for storing options related to the
/// initialization of the HeaderSearch object.
-class HeaderSearchOptions {
+class HeaderSearchOptions : public llvm::RefCountedBase<HeaderSearchOptions> {
public:
struct Entry {
std::string Path;
@@ -125,7 +126,7 @@ public:
UseStandardSystemIncludes(true), UseStandardCXXIncludes(true),
UseLibcxx(false), Verbose(false) {}
- /// AddPath - Add the \arg Path path to the specified \arg Group list.
+ /// AddPath - Add the \p Path path to the specified \p Group list.
void AddPath(StringRef Path, frontend::IncludeDirGroup Group,
bool IsUserSupplied, bool IsFramework, bool IgnoreSysRoot,
bool IsInternal = false, bool ImplicitExternC = false) {
@@ -134,7 +135,7 @@ public:
}
/// AddSystemHeaderPrefix - Override whether \#include directives naming a
- /// path starting with \arg Prefix should be considered as naming a system
+ /// path starting with \p Prefix should be considered as naming a system
/// header.
void AddSystemHeaderPrefix(StringRef Prefix, bool IsSystemHeader) {
SystemHeaderPrefixes.push_back(SystemHeaderPrefix(Prefix, IsSystemHeader));
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h b/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
index ca233de..407b644 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
@@ -31,11 +31,11 @@ class DiagnosticBuilder;
enum ConflictMarkerKind {
/// Not within a conflict marker.
CMK_None,
- /// A normal or diff3 conflict marker, initiated by at least 7 <s,
- /// separated by at least 7 =s or |s, and terminated by at least 7 >s.
+ /// A normal or diff3 conflict marker, initiated by at least 7 "<"s,
+ /// separated by at least 7 "="s or "|"s, and terminated by at least 7 ">"s.
CMK_Normal,
- /// A Perforce-style conflict marker, initiated by 4 >s, separated by 4 =s,
- /// and terminated by 4 <s.
+ /// A Perforce-style conflict marker, initiated by 4 ">"s,
+ /// separated by 4 "="s, and terminated by 4 "<"s.
CMK_Perforce
};
@@ -83,8 +83,8 @@ class Lexer : public PreprocessorLexer {
// CurrentConflictMarkerState - The kind of conflict marker we are handling.
ConflictMarkerKind CurrentConflictMarkerState;
- Lexer(const Lexer&); // DO NOT IMPLEMENT
- void operator=(const Lexer&); // DO NOT IMPLEMENT
+ Lexer(const Lexer &) LLVM_DELETED_FUNCTION;
+ void operator=(const Lexer &) LLVM_DELETED_FUNCTION;
friend class Preprocessor;
void InitLexer(const char *BufStart, const char *BufPtr, const char *BufEnd);
@@ -128,9 +128,7 @@ public:
SourceLocation getFileLoc() const { return FileLoc; }
/// Lex - Return the next token in the file. If this is the end of file, it
- /// return the tok::eof token. Return true if an error occurred and
- /// compilation should terminate, false if normal. This implicitly involves
- /// the preprocessor.
+ /// return the tok::eof token. This implicitly involves the preprocessor.
void Lex(Token &Result) {
// Start a new token.
Result.startToken();
@@ -278,8 +276,6 @@ public:
/// \brief Given a location any where in a source buffer, find the location
/// that corresponds to the beginning of the token in which the original
/// source location lands.
- ///
- /// \param Loc
static SourceLocation GetBeginningOfToken(SourceLocation Loc,
const SourceManager &SM,
const LangOptions &LangOpts);
@@ -324,7 +320,7 @@ public:
/// \brief Returns true if the given MacroID location points at the last
/// token of the macro expansion.
///
- /// \param MacroBegin If non-null and function returns true, it is set to
+ /// \param MacroEnd If non-null and function returns true, it is set to
/// end location of the macro.
static bool isAtEndOfMacroExpansion(SourceLocation loc,
const SourceManager &SM,
@@ -396,7 +392,36 @@ public:
static std::pair<unsigned, bool>
ComputePreamble(const llvm::MemoryBuffer *Buffer, const LangOptions &LangOpts,
unsigned MaxLines = 0);
-
+
+ /// \brief Checks that the given token is the first token that occurs after
+ /// the given location (this excludes comments and whitespace). Returns the
+ /// location immediately after the specified token. If the token is not found
+ /// or the location is inside a macro, the returned source location will be
+ /// invalid.
+ static SourceLocation findLocationAfterToken(SourceLocation loc,
+ tok::TokenKind TKind,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ bool SkipTrailingWhitespaceAndNewLine);
+
+ /// \brief Returns true if the given character could appear in an identifier.
+ static bool isIdentifierBodyChar(char c, const LangOptions &LangOpts);
+
+ /// getCharAndSizeNoWarn - Like the getCharAndSize method, but does not ever
+ /// emit a warning.
+ static inline char getCharAndSizeNoWarn(const char *Ptr, unsigned &Size,
+ const LangOptions &LangOpts) {
+ // If this is not a trigraph and not a UCN or escaped newline, return
+ // quickly.
+ if (isObviouslySimpleCharacter(Ptr[0])) {
+ Size = 1;
+ return *Ptr;
+ }
+
+ Size = 0;
+ return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
+ }
+
//===--------------------------------------------------------------------===//
// Internal implementation interfaces.
private:
@@ -427,7 +452,6 @@ private:
//===--------------------------------------------------------------------===//
// Lexer character reading interfaces.
-public:
// This lexer is built on two interfaces for reading characters, both of which
// automatically provide phase 1/2 translation. getAndAdvanceChar is used
@@ -467,7 +491,6 @@ public:
return C;
}
-private:
/// ConsumeChar - When a character (identified by getCharAndSize) is consumed
/// and added to a given token, check to see if there are diagnostics that
/// need to be emitted or flags that need to be set on the token. If so, do
@@ -503,22 +526,6 @@ private:
/// getCharAndSizeSlow - Handle the slow/uncommon case of the getCharAndSize
/// method.
char getCharAndSizeSlow(const char *Ptr, unsigned &Size, Token *Tok = 0);
-public:
-
- /// getCharAndSizeNoWarn - Like the getCharAndSize method, but does not ever
- /// emit a warning.
- static inline char getCharAndSizeNoWarn(const char *Ptr, unsigned &Size,
- const LangOptions &LangOpts) {
- // If this is not a trigraph and not a UCN or escaped newline, return
- // quickly.
- if (isObviouslySimpleCharacter(Ptr[0])) {
- Size = 1;
- return *Ptr;
- }
-
- Size = 0;
- return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
- }
/// getEscapedNewLineSize - Return the size of the specified escaped newline,
/// or 0 if it is not an escaped newline. P[-1] is known to be a "\" on entry
@@ -530,22 +537,6 @@ public:
/// otherwise return P.
static const char *SkipEscapedNewLines(const char *P);
- /// \brief Checks that the given token is the first token that occurs after
- /// the given location (this excludes comments and whitespace). Returns the
- /// location immediately after the specified token. If the token is not found
- /// or the location is inside a macro, the returned source location will be
- /// invalid.
- static SourceLocation findLocationAfterToken(SourceLocation loc,
- tok::TokenKind TKind,
- const SourceManager &SM,
- const LangOptions &LangOpts,
- bool SkipTrailingWhitespaceAndNewLine);
-
- /// \brief Returns true if the given character could appear in an identifier.
- static bool isIdentifierBodyChar(char c, const LangOptions &LangOpts);
-
-private:
-
/// getCharAndSizeSlowNoWarn - Same as getCharAndSizeSlow, but never emits a
/// diagnostic.
static char getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
@@ -571,15 +562,17 @@ private:
bool LexEndOfFile (Token &Result, const char *CurPtr);
bool SkipWhitespace (Token &Result, const char *CurPtr);
- bool SkipBCPLComment (Token &Result, const char *CurPtr);
+ bool SkipLineComment (Token &Result, const char *CurPtr);
bool SkipBlockComment (Token &Result, const char *CurPtr);
- bool SaveBCPLComment (Token &Result, const char *CurPtr);
+ bool SaveLineComment (Token &Result, const char *CurPtr);
bool IsStartOfConflictMarker(const char *CurPtr);
bool HandleEndOfConflictMarker(const char *CurPtr);
bool isCodeCompletionPoint(const char *CurPtr) const;
void cutOffLexing() { BufferPtr = BufferEnd; }
+
+ bool isHexaLiteral(const char *Start, const LangOptions &LangOpts);
};
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h b/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h
index bbce62d..3b68d1b 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h
@@ -18,6 +18,7 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
#include "clang/Basic/TokenKinds.h"
#include <cctype>
@@ -48,8 +49,9 @@ class NumericLiteralParser {
bool saw_exponent, saw_period, saw_ud_suffix;
public:
- NumericLiteralParser(const char *begin, const char *end,
- SourceLocation Loc, Preprocessor &PP);
+ NumericLiteralParser(StringRef TokSpelling,
+ SourceLocation TokLoc,
+ Preprocessor &PP);
bool hadError;
bool isUnsigned;
bool isLong; // This is *not* set for long long.
@@ -230,8 +232,8 @@ public:
private:
void init(const Token *StringToks, unsigned NumStringToks);
- bool CopyStringFragment(StringRef Fragment);
- bool DiagnoseBadString(const Token& Tok);
+ bool CopyStringFragment(const Token &Tok, const char *TokBegin,
+ StringRef Fragment);
void DiagnoseLexingError(SourceLocation Loc);
};
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h b/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h
index cbd201f..aba77d5 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h
@@ -32,6 +32,12 @@ class MacroInfo {
SourceLocation Location;
/// EndLocation - The location of the last token in the macro.
SourceLocation EndLocation;
+ /// \brief The location where the macro was #undef'd, or an invalid location
+ /// for macros that haven't been undefined.
+ SourceLocation UndefLocation;
+ /// \brief Previous definition, the identifier of this macro was defined to,
+ /// or NULL.
+ MacroInfo *PreviousDefinition;
/// Arguments - The list of arguments for a function-like macro. This can be
/// empty, for, e.g. "#define X()". In a C99-style variadic macro, this
@@ -99,7 +105,16 @@ private:
/// \brief Whether the macro has public (when described in a module).
bool IsPublic : 1;
-
+
+ /// \brief Whether the macro definition is currently "hidden".
+ /// Note that this is transient state that is never serialized to the AST
+ /// file.
+ bool IsHidden : 1;
+
+ /// \brief Whether the definition of this macro is ambiguous, due to
+ /// multiple definitions coming in from multiple modules.
+ bool IsAmbiguous : 1;
+
~MacroInfo() {
assert(ArgumentList == 0 && "Didn't call destroy before dtor!");
}
@@ -128,10 +143,34 @@ public:
/// setDefinitionEndLoc - Set the location of the last token in the macro.
///
void setDefinitionEndLoc(SourceLocation EndLoc) { EndLocation = EndLoc; }
+
/// getDefinitionEndLoc - Return the location of the last token in the macro.
///
SourceLocation getDefinitionEndLoc() const { return EndLocation; }
-
+
+ /// \brief Set the location where macro was undefined. Can only be set once.
+ void setUndefLoc(SourceLocation UndefLoc) {
+ assert(UndefLocation.isInvalid() && "UndefLocation is already set!");
+ assert(UndefLoc.isValid() && "Invalid UndefLoc!");
+ UndefLocation = UndefLoc;
+ }
+
+ /// \brief Get the location where macro was undefined.
+ SourceLocation getUndefLoc() const { return UndefLocation; }
+
+ /// \brief Set previous definition of the macro with the same name.
+ void setPreviousDefinition(MacroInfo *PreviousDef) {
+ PreviousDefinition = PreviousDef;
+ }
+
+ /// \brief Get previous definition of the macro with the same name.
+ MacroInfo *getPreviousDefinition() { return PreviousDefinition; }
+
+ /// \brief Find macro definition active in the specified source location. If
+ /// this macro was not defined there, return NULL.
+ const MacroInfo *findDefinitionAtLoc(SourceLocation L,
+ SourceManager &SM) const;
+
/// \brief Get length in characters of the macro definition.
unsigned getDefinitionLength(SourceManager &SM) const {
if (IsDefinitionLengthCached)
@@ -294,6 +333,23 @@ public:
/// \brief Determine the location where this macro was explicitly made
/// public or private within its module.
SourceLocation getVisibilityLocation() { return VisibilityLocation; }
+
+ /// \brief Determine whether this macro is currently defined (and has not
+ /// been #undef'd) or has been hidden.
+ bool isDefined() const { return UndefLocation.isInvalid() && !IsHidden; }
+
+ /// \brief Determine whether this macro definition is hidden.
+ bool isHidden() const { return IsHidden; }
+
+ /// \brief Set whether this macro definition is hidden.
+ void setHidden(bool Val) { IsHidden = Val; }
+
+ /// \brief Determine whether this macro definition is ambiguous with
+ /// other macro definitions.
+ bool isAmbiguous() const { return IsAmbiguous; }
+
+ /// \brief Set whether this macro definition is ambiguous.
+ void setAmbiguous(bool Val) { IsAmbiguous = Val; }
private:
unsigned getDefinitionLengthSlow(SourceManager &SM) const;
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h b/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h
index fe5abdf..082408d 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h
@@ -52,10 +52,37 @@ class ModuleMap {
/// \brief The top-level modules that are known.
llvm::StringMap<Module *> Modules;
-
+
+ /// \brief A header that is known to reside within a given module,
+ /// whether it was included or excluded.
+ class KnownHeader {
+ llvm::PointerIntPair<Module *, 1, bool> Storage;
+
+ public:
+ KnownHeader() : Storage(0, false) { }
+ KnownHeader(Module *M, bool Excluded) : Storage(M, Excluded) { }
+
+ /// \brief Retrieve the module the header is stored in.
+ Module *getModule() const { return Storage.getPointer(); }
+
+ /// \brief Whether this header is explicitly excluded from the module.
+ bool isExcluded() const { return Storage.getInt(); }
+
+ /// \brief Whether this header is available in the module.
+ bool isAvailable() const {
+ return !isExcluded() && getModule()->isAvailable();
+ }
+
+ // \brief Whether this known header is valid (i.e., it has an
+ // associated module).
+ operator bool() const { return Storage.getPointer() != 0; }
+ };
+
+ typedef llvm::DenseMap<const FileEntry *, KnownHeader> HeadersMap;
+
/// \brief Mapping from each header to the module that owns the contents of the
/// that header.
- llvm::DenseMap<const FileEntry *, Module *> Headers;
+ HeadersMap Headers;
/// \brief Mapping from directories with umbrella headers to the module
/// that is generated from the umbrella header.
@@ -64,7 +91,26 @@ class ModuleMap {
/// in the module map over to the module that includes them via its umbrella
/// header.
llvm::DenseMap<const DirectoryEntry *, Module *> UmbrellaDirs;
-
+
+ /// \brief A directory for which framework modules can be inferred.
+ struct InferredDirectory {
+ InferredDirectory() : InferModules(), InferSystemModules() { }
+
+ /// \brief Whether to infer modules from this directory.
+ unsigned InferModules : 1;
+
+ /// \brief Whether the modules we infer are [system] modules.
+ unsigned InferSystemModules : 1;
+
+ /// \brief The names of modules that cannot be inferred within this
+ /// directory.
+ llvm::SmallVector<std::string, 2> ExcludedModules;
+ };
+
+ /// \brief A mapping from directories to information about inferring
+ /// framework modules from within those directories.
+ llvm::DenseMap<const DirectoryEntry *, InferredDirectory> InferredDirectories;
+
friend class ModuleMapParser;
/// \brief Resolve the given export declaration into an actual export
@@ -170,7 +216,23 @@ public:
std::pair<Module *, bool> findOrCreateModule(StringRef Name, Module *Parent,
bool IsFramework,
bool IsExplicit);
-
+
+ /// \brief Determine whether we can infer a framework module a framework
+ /// with the given name in the given
+ ///
+ /// \param ParentDir The directory that is the parent of the framework
+ /// directory.
+ ///
+ /// \param Name The name of the module.
+ ///
+ /// \param IsSystem Will be set to 'true' if the inferred module must be a
+ /// system module.
+ ///
+ /// \returns true if we are allowed to infer a framework module, and false
+ /// otherwise.
+ bool canInferFrameworkModule(const DirectoryEntry *ParentDir,
+ StringRef Name, bool &IsSystem);
+
/// \brief Infer the contents of a framework module map from the given
/// framework directory.
Module *inferFrameworkModule(StringRef ModuleName,
@@ -215,7 +277,9 @@ public:
void setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir);
/// \brief Adds this header to the given module.
- void addHeader(Module *Mod, const FileEntry *Header);
+ /// \param Excluded Whether this header is explicitly excluded from the
+ /// module; otherwise, it's included in the module.
+ void addHeader(Module *Mod, const FileEntry *Header, bool Excluded);
/// \brief Parse the given module map file, and record any modules we
/// encounter.
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h b/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
index 962b4df..8ba02cc 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
@@ -16,6 +16,7 @@
#define LLVM_CLANG_LEX_PPCALLBACKS_H
#include "clang/Lex/DirectoryLookup.h"
+#include "clang/Lex/ModuleLoader.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/DiagnosticIDs.h"
#include "llvm/ADT/StringRef.h"
@@ -93,10 +94,10 @@ public:
/// \param IsAngled Whether the file name was enclosed in angle brackets;
/// otherwise, it was enclosed in quotes.
///
- /// \param File The actual file that may be included by this inclusion
- /// directive.
+ /// \param FilenameRange The character range of the quotes or angle brackets
+ /// for the written file name.
///
- /// \param EndLoc The location of the last token within the inclusion
+ /// \param File The actual file that may be included by this inclusion
/// directive.
///
/// \param SearchPath Contains the search path which was used to find the file
@@ -110,14 +111,34 @@ public:
///
/// \param RelativePath The path relative to SearchPath, at which the include
/// file was found. This is equal to FileName except for framework includes.
+ ///
+ /// \param Imported The module, whenever an inclusion directive was
+ /// automatically turned into a module import or null otherwise.
+ ///
virtual void InclusionDirective(SourceLocation HashLoc,
const Token &IncludeTok,
StringRef FileName,
bool IsAngled,
+ CharSourceRange FilenameRange,
const FileEntry *File,
- SourceLocation EndLoc,
StringRef SearchPath,
- StringRef RelativePath) {
+ StringRef RelativePath,
+ const Module *Imported) {
+ }
+
+ /// \brief Callback invoked whenever there was an explicit module-import
+ /// syntax.
+ ///
+ /// \param ImportLoc The location of import directive token.
+ ///
+ /// \param Path The identifiers (and their locations) of the module
+ /// "path", e.g., "std.vector" would be split into "std" and "vector".
+ ///
+ /// \param Imported The imported module; can be null if importing failed.
+ ///
+ virtual void moduleImport(SourceLocation ImportLoc,
+ ModuleIdPath Path,
+ const Module *Imported) {
}
/// \brief Callback invoked when the end of the main file is reached.
@@ -266,14 +287,24 @@ public:
const Token &IncludeTok,
StringRef FileName,
bool IsAngled,
+ CharSourceRange FilenameRange,
const FileEntry *File,
- SourceLocation EndLoc,
StringRef SearchPath,
- StringRef RelativePath) {
- First->InclusionDirective(HashLoc, IncludeTok, FileName, IsAngled, File,
- EndLoc, SearchPath, RelativePath);
- Second->InclusionDirective(HashLoc, IncludeTok, FileName, IsAngled, File,
- EndLoc, SearchPath, RelativePath);
+ StringRef RelativePath,
+ const Module *Imported) {
+ First->InclusionDirective(HashLoc, IncludeTok, FileName, IsAngled,
+ FilenameRange, File, SearchPath, RelativePath,
+ Imported);
+ Second->InclusionDirective(HashLoc, IncludeTok, FileName, IsAngled,
+ FilenameRange, File, SearchPath, RelativePath,
+ Imported);
+ }
+
+ virtual void moduleImport(SourceLocation ImportLoc,
+ ModuleIdPath Path,
+ const Module *Imported) {
+ First->moduleImport(ImportLoc, Path, Imported);
+ Second->moduleImport(ImportLoc, Path, Imported);
}
virtual void EndOfMainFile() {
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PPMutationListener.h b/contrib/llvm/tools/clang/include/clang/Lex/PPMutationListener.h
new file mode 100644
index 0000000..5319c66
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PPMutationListener.h
@@ -0,0 +1,43 @@
+//===--- PPMutationListener.h - Preprocessor Mutation Interface -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PPMutationListener interface.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_LEX_PPTMUTATIONLISTENER_H
+#define LLVM_CLANG_LEX_PPTMUTATIONLISTENER_H
+
+#include "clang/Basic/SourceLocation.h"
+
+namespace clang {
+
+class MacroInfo;
+
+/// \brief A record that describes an update to a macro that was
+/// originally loaded to an AST file and has been modified within the
+/// current translation unit.
+struct MacroUpdate {
+ /// \brief The source location at which this macro was #undef'd.
+ SourceLocation UndefLoc;
+};
+
+/// \brief An abstract interface that should be implemented by
+/// listeners that want to be notified when a preprocessor entity gets
+/// modified after its initial creation.
+class PPMutationListener {
+public:
+ virtual ~PPMutationListener();
+
+ /// \brief A macro has been #undef'd.
+ virtual void UndefinedMacro(MacroInfo *MI) { }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PTHLexer.h b/contrib/llvm/tools/clang/include/clang/Lex/PTHLexer.h
index f6a97a0..a9276e8 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PTHLexer.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PTHLexer.h
@@ -44,8 +44,8 @@ class PTHLexer : public PreprocessorLexer {
/// to process when doing quick skipping of preprocessor blocks.
const unsigned char* CurPPCondPtr;
- PTHLexer(const PTHLexer&); // DO NOT IMPLEMENT
- void operator=(const PTHLexer&); // DO NOT IMPLEMENT
+ PTHLexer(const PTHLexer &) LLVM_DELETED_FUNCTION;
+ void operator=(const PTHLexer &) LLVM_DELETED_FUNCTION;
/// ReadToken - Used by PTHLexer to read tokens TokBuf.
void ReadToken(Token& T);
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h b/contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h
index 44f9ab3..e64dbd8 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h
@@ -81,9 +81,8 @@ class PTHManager : public IdentifierInfoLookup {
void* stringIdLookup, unsigned numIds,
const unsigned char* spellingBase, const char *originalSourceFile);
- // Do not implement.
- PTHManager();
- void operator=(const PTHManager&);
+ PTHManager(const PTHManager &) LLVM_DELETED_FUNCTION;
+ void operator=(const PTHManager &) LLVM_DELETED_FUNCTION;
/// getSpellingAtPTHOffset - Used by PTHLexer classes to get the cached
/// spelling for a token.
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h
index fb3e081..57e51b7 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h
@@ -94,9 +94,6 @@ namespace clang {
/// entity.
bool isInvalid() const { return Kind == InvalidKind; }
- // Implement isa/cast/dyncast/etc.
- static bool classof(const PreprocessedEntity *) { return true; }
-
// Only allow allocation of preprocessed entities using the allocator
// in PreprocessingRecord or by doing a placement new.
void* operator new(size_t bytes, PreprocessingRecord& PR,
@@ -133,7 +130,6 @@ namespace clang {
return PD->getKind() >= FirstPreprocessingDirective &&
PD->getKind() <= LastPreprocessingDirective;
}
- static bool classof(const PreprocessingDirective *) { return true; }
};
/// \brief Record the location of a macro definition.
@@ -155,7 +151,6 @@ namespace clang {
static bool classof(const PreprocessedEntity *PE) {
return PE->getKind() == MacroDefinitionKind;
}
- static bool classof(const MacroDefinition *) { return true; }
};
/// \brief Records the location of a macro expansion.
@@ -193,7 +188,6 @@ namespace clang {
static bool classof(const PreprocessedEntity *PE) {
return PE->getKind() == MacroExpansionKind;
}
- static bool classof(const MacroExpansion *) { return true; }
};
/// \brief Record the location of an inclusion directive, such as an
@@ -227,13 +221,18 @@ namespace clang {
/// This is a value of type InclusionKind.
unsigned Kind : 2;
+ /// \brief Whether the inclusion directive was automatically turned into
+ /// a module import.
+ unsigned ImportedModule : 1;
+
/// \brief The file that was included.
const FileEntry *File;
public:
InclusionDirective(PreprocessingRecord &PPRec,
InclusionKind Kind, StringRef FileName,
- bool InQuotes, const FileEntry *File, SourceRange Range);
+ bool InQuotes, bool ImportedModule,
+ const FileEntry *File, SourceRange Range);
/// \brief Determine what kind of inclusion directive this is.
InclusionKind getKind() const { return static_cast<InclusionKind>(Kind); }
@@ -244,6 +243,10 @@ namespace clang {
/// \brief Determine whether the included file name was written in quotes;
/// otherwise, it was written in angle brackets.
bool wasInQuotes() const { return InQuotes; }
+
+ /// \brief Determine whether the inclusion directive was automatically
+ /// turned into a module import.
+ bool importedModule() const { return ImportedModule; }
/// \brief Retrieve the file entry for the actual file that was included
/// by this directive.
@@ -253,7 +256,6 @@ namespace clang {
static bool classof(const PreprocessedEntity *PE) {
return PE->getKind() == InclusionDirectiveKind;
}
- static bool classof(const InclusionDirective *) { return true; }
};
/// \brief An abstract class that should be subclassed by any external source
@@ -269,12 +271,12 @@ namespace clang {
virtual PreprocessedEntity *ReadPreprocessedEntity(unsigned Index) = 0;
/// \brief Returns a pair of [Begin, End) indices of preallocated
- /// preprocessed entities that \arg Range encompasses.
+ /// preprocessed entities that \p Range encompasses.
virtual std::pair<unsigned, unsigned>
findPreprocessedEntitiesInRange(SourceRange Range) = 0;
/// \brief Optionally returns true or false if the preallocated preprocessed
- /// entity with index \arg Index came from file \arg FID.
+ /// entity with index \p Index came from file \p FID.
virtual llvm::Optional<bool> isPreprocessedEntityInFileID(unsigned Index,
FileID FID) {
return llvm::Optional<bool>();
@@ -343,14 +345,21 @@ namespace clang {
/// Negative values are used to indicate preprocessed entities
/// loaded from the external source while non-negative values are used to
/// indicate preprocessed entities introduced by the current preprocessor.
- /// If M is the number of loaded preprocessed entities, value -M
- /// corresponds to element 0 in the loaded entities vector, position -M+1
- /// corresponds to element 1 in the loaded entities vector, etc.
- typedef int PPEntityID;
-
- PPEntityID getPPEntityID(unsigned Index, bool isLoaded) const {
- return isLoaded ? PPEntityID(Index) - LoadedPreprocessedEntities.size()
- : Index;
+ /// Value -1 corresponds to element 0 in the loaded entities vector,
+ /// value -2 corresponds to element 1 in the loaded entities vector, etc.
+ /// Value 0 is an invalid value, the index to local entities is 1-based,
+ /// value 1 corresponds to element 0 in the local entities vector,
+ /// value 2 corresponds to element 1 in the local entities vector, etc.
+ class PPEntityID {
+ int ID;
+ explicit PPEntityID(int ID) : ID(ID) {}
+ friend class PreprocessingRecord;
+ public:
+ PPEntityID() : ID(0) {}
+ };
+
+ static PPEntityID getPPEntityID(unsigned Index, bool isLoaded) {
+ return isLoaded ? PPEntityID(-int(Index)-1) : PPEntityID(Index+1);
}
/// \brief Mapping from MacroInfo structures to their definitions.
@@ -372,7 +381,7 @@ namespace clang {
}
/// \brief Returns a pair of [Begin, End) indices of local preprocessed
- /// entities that \arg Range encompasses.
+ /// entities that \p Range encompasses.
std::pair<unsigned, unsigned>
findLocalPreprocessedEntitiesInRange(SourceRange Range) const;
unsigned findBeginLocalPreprocessedEntity(SourceLocation Loc) const;
@@ -419,7 +428,7 @@ namespace clang {
/// corresponds to element 0 in the loaded entities vector, position -M+1
/// corresponds to element 1 in the loaded entities vector, etc. This
/// gives us a reasonably efficient, source-order walk.
- PPEntityID Position;
+ int Position;
public:
typedef PreprocessedEntity *value_type;
@@ -430,11 +439,15 @@ namespace clang {
iterator() : Self(0), Position(0) { }
- iterator(PreprocessingRecord *Self, PPEntityID Position)
+ iterator(PreprocessingRecord *Self, int Position)
: Self(Self), Position(Position) { }
value_type operator*() const {
- return Self->getPreprocessedEntity(Position);
+ bool isLoaded = Position < 0;
+ unsigned Index = isLoaded ?
+ Self->LoadedPreprocessedEntities.size() + Position : Position;
+ PPEntityID ID = Self->getPPEntityID(Index, isLoaded);
+ return Self->getPreprocessedEntity(ID);
}
value_type operator[](difference_type D) {
@@ -539,15 +552,26 @@ namespace clang {
return iterator(this, PreprocessedEntities.size());
}
+ /// \brief begin/end iterator pair for the given range of loaded
+ /// preprocessed entities.
+ std::pair<iterator, iterator>
+ getIteratorsForLoadedRange(unsigned start, unsigned count) {
+ unsigned end = start + count;
+ assert(end <= LoadedPreprocessedEntities.size());
+ return std::make_pair(
+ iterator(this, int(start)-LoadedPreprocessedEntities.size()),
+ iterator(this, int(end)-LoadedPreprocessedEntities.size()));
+ }
+
/// \brief Returns a pair of [Begin, End) iterators of preprocessed entities
- /// that source range \arg R encompasses.
+ /// that source range \p R encompasses.
///
/// \param R the range to look for preprocessed entities.
///
std::pair<iterator, iterator> getPreprocessedEntitiesInRange(SourceRange R);
- /// \brief Returns true if the preprocessed entity that \arg PPEI iterator
- /// points to is coming from the file \arg FID.
+ /// \brief Returns true if the preprocessed entity that \p PPEI iterator
+ /// points to is coming from the file \p FID.
///
/// Can be used to avoid implicit deserializations of preallocated
/// preprocessed entities if we only care about entities of a specific file
@@ -597,10 +621,11 @@ namespace clang {
const Token &IncludeTok,
StringRef FileName,
bool IsAngled,
+ CharSourceRange FilenameRange,
const FileEntry *File,
- SourceLocation EndLoc,
StringRef SearchPath,
- StringRef RelativePath);
+ StringRef RelativePath,
+ const Module *Imported);
virtual void If(SourceLocation Loc, SourceRange ConditionRange);
virtual void Elif(SourceLocation Loc, SourceRange ConditionRange,
SourceLocation IfLoc);
@@ -613,11 +638,10 @@ namespace clang {
/// query.
struct {
SourceRange Range;
- std::pair<PPEntityID, PPEntityID> Result;
+ std::pair<int, int> Result;
} CachedRangeQuery;
- std::pair<PPEntityID, PPEntityID>
- getPreprocessedEntitiesInRangeSlow(SourceRange R);
+ std::pair<int, int> getPreprocessedEntitiesInRangeSlow(SourceRange R);
friend class ASTReader;
friend class ASTWriter;
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h b/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
index 02e3f1e..e9095fb 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
@@ -18,6 +18,7 @@
#include "clang/Lex/Lexer.h"
#include "clang/Lex/PTHLexer.h"
#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/PPMutationListener.h"
#include "clang/Lex/TokenLexer.h"
#include "clang/Lex/PTHManager.h"
#include "clang/Basic/Builtins.h"
@@ -54,6 +55,28 @@ class CodeCompletionHandler;
class DirectoryLookup;
class PreprocessingRecord;
class ModuleLoader;
+class PreprocessorOptions;
+
+/// \brief Stores token information for comparing actual tokens with
+/// predefined values. Only handles simple tokens and identifiers.
+class TokenValue {
+ tok::TokenKind Kind;
+ IdentifierInfo *II;
+
+public:
+ TokenValue(tok::TokenKind Kind) : Kind(Kind), II(0) {
+ assert(Kind != tok::raw_identifier && "Raw identifiers are not supported.");
+ assert(Kind != tok::identifier &&
+ "Identifiers should be created by TokenValue(IdentifierInfo *)");
+ assert(!tok::isLiteral(Kind) && "Literals are not supported.");
+ assert(!tok::isAnnotation(Kind) && "Annotations are not supported.");
+ }
+ TokenValue(IdentifierInfo *II) : Kind(tok::identifier), II(II) {}
+ bool operator==(const Token &Tok) const {
+ return Tok.getKind() == Kind &&
+ (!II || II == Tok.getIdentifierInfo());
+ }
+};
/// Preprocessor - This object engages in a tight little dance with the lexer to
/// efficiently preprocess tokens. Lexers know only about tokens within a
@@ -61,6 +84,7 @@ class ModuleLoader;
/// like the \#include stack, token expansion, etc.
///
class Preprocessor : public RefCountedBase<Preprocessor> {
+ llvm::IntrusiveRefCntPtr<PreprocessorOptions> PPOpts;
DiagnosticsEngine *Diags;
LangOptions &LangOpts;
const TargetInfo *Target;
@@ -98,6 +122,8 @@ class Preprocessor : public RefCountedBase<Preprocessor> {
IdentifierInfo *Ident__has_include; // __has_include
IdentifierInfo *Ident__has_include_next; // __has_include_next
IdentifierInfo *Ident__has_warning; // __has_warning
+ IdentifierInfo *Ident__building_module; // __building_module
+ IdentifierInfo *Ident__MODULE__; // __MODULE__
SourceLocation DATELoc, TIMELoc;
unsigned CounterValue; // Next __COUNTER__ value.
@@ -265,6 +291,11 @@ class Preprocessor : public RefCountedBase<Preprocessor> {
/// encountered (e.g. a file is \#included, etc).
PPCallbacks *Callbacks;
+ /// \brief Listener whose actions are invoked when an entity in the
+ /// preprocessor (e.g., a macro) that was loaded from an AST file is
+ /// later mutated.
+ PPMutationListener *Listener;
+
struct MacroExpandsInfo {
Token Tok;
MacroInfo *MI;
@@ -274,10 +305,12 @@ class Preprocessor : public RefCountedBase<Preprocessor> {
};
SmallVector<MacroExpandsInfo, 2> DelayedMacroExpandsCallbacks;
- /// Macros - For each IdentifierInfo with 'HasMacro' set, we keep a mapping
- /// to the actual definition of the macro.
+ /// Macros - For each IdentifierInfo that was associated with a macro, we
+ /// keep a mapping to the history of all macro definitions and #undefs in
+ /// the reverse order (the latest one is in the head of the list).
llvm::DenseMap<IdentifierInfo*, MacroInfo*> Macros;
-
+ friend class ASTReader;
+
/// \brief Macros that we want to warn because they are not used at the end
/// of the translation unit; we store just their SourceLocations instead
/// something like MacroInfo*. The benefit of this is that when we are
@@ -362,10 +395,9 @@ private: // Cached tokens state.
/// allocation.
MacroInfoChain *MICache;
- MacroInfo *getInfoForMacro(IdentifierInfo *II) const;
-
public:
- Preprocessor(DiagnosticsEngine &diags, LangOptions &opts,
+ Preprocessor(llvm::IntrusiveRefCntPtr<PreprocessorOptions> PPOpts,
+ DiagnosticsEngine &diags, LangOptions &opts,
const TargetInfo *target,
SourceManager &SM, HeaderSearch &Headers,
ModuleLoader &TheModuleLoader,
@@ -382,6 +414,10 @@ public:
/// \param Target Information about the target.
void Initialize(const TargetInfo &Target);
+ /// \brief Retrieve the preprocessor options used to initialize this
+ /// preprocessor.
+ PreprocessorOptions &getPreprocessorOpts() const { return *PPOpts; }
+
DiagnosticsEngine &getDiagnostics() const { return *Diags; }
void setDiagnostics(DiagnosticsEngine &D) { Diags = &D; }
@@ -457,37 +493,70 @@ public:
Callbacks = C;
}
+ /// \brief Attach an preprocessor mutation listener to the preprocessor.
+ ///
+ /// The preprocessor mutation listener provides the ability to track
+ /// modifications to the preprocessor entities committed after they were
+ /// initially created.
+ void setPPMutationListener(PPMutationListener *Listener) {
+ this->Listener = Listener;
+ }
+
+ /// \brief Retrieve a pointer to the preprocessor mutation listener
+ /// associated with this preprocessor, if any.
+ PPMutationListener *getPPMutationListener() const { return Listener; }
+
/// \brief Given an identifier, return the MacroInfo it is \#defined to
/// or null if it isn't \#define'd.
MacroInfo *getMacroInfo(IdentifierInfo *II) const {
if (!II->hasMacroDefinition())
return 0;
- return getInfoForMacro(II);
+ MacroInfo *MI = getMacroInfoHistory(II);
+ assert(MI->getUndefLoc().isInvalid() && "Macro is undefined!");
+ return MI;
}
- /// \brief Specify a macro for this identifier.
- void setMacroInfo(IdentifierInfo *II, MacroInfo *MI,
- bool LoadedFromAST = false);
+ /// \brief Given an identifier, return the (probably #undef'd) MacroInfo
+ /// representing the most recent macro definition. One can iterate over all
+ /// previous macro definitions from it. This method should only be called for
+ /// identifiers that hadMacroDefinition().
+ MacroInfo *getMacroInfoHistory(IdentifierInfo *II) const;
- /// macro_iterator/macro_begin/macro_end - This allows you to walk the current
- /// state of the macro table. This visits every currently-defined macro.
+ /// \brief Specify a macro for this identifier.
+ void setMacroInfo(IdentifierInfo *II, MacroInfo *MI);
+ /// \brief Add a MacroInfo that was loaded from an AST file.
+ void addLoadedMacroInfo(IdentifierInfo *II, MacroInfo *MI,
+ MacroInfo *Hint = 0);
+ /// \brief Make the given MacroInfo, that was loaded from an AST file and
+ /// previously hidden, visible.
+ void makeLoadedMacroInfoVisible(IdentifierInfo *II, MacroInfo *MI);
+ /// \brief Undefine a macro for this identifier.
+ void clearMacroInfo(IdentifierInfo *II);
+
+ /// macro_iterator/macro_begin/macro_end - This allows you to walk the macro
+ /// history table. Currently defined macros have
+ /// IdentifierInfo::hasMacroDefinition() set and an empty
+ /// MacroInfo::getUndefLoc() at the head of the list.
typedef llvm::DenseMap<IdentifierInfo*,
MacroInfo*>::const_iterator macro_iterator;
macro_iterator macro_begin(bool IncludeExternalMacros = true) const;
macro_iterator macro_end(bool IncludeExternalMacros = true) const;
+ /// \brief Return the name of the macro defined before \p Loc that has
+ /// spelling \p Tokens. If there are multiple macros with same spelling,
+ /// return the last one defined.
+ StringRef getLastMacroWithSpelling(SourceLocation Loc,
+ ArrayRef<TokenValue> Tokens) const;
+
const std::string &getPredefines() const { return Predefines; }
/// setPredefines - Set the predefines for this Preprocessor. These
/// predefines are automatically injected when parsing the main file.
void setPredefines(const char *P) { Predefines = P; }
void setPredefines(const std::string &P) { Predefines = P; }
- /// getIdentifierInfo - Return information about the specified preprocessor
- /// identifier token. The version of this method that takes two character
- /// pointers is preferred unless the identifier is already available as a
- /// string (this avoids allocation and copying of memory to construct an
- /// std::string).
+ /// Return information about the specified preprocessor
+ /// identifier token.
IdentifierInfo *getIdentifierInfo(StringRef Name) const {
return &Identifiers.get(Name);
}
@@ -501,8 +570,8 @@ public:
}
/// RemovePragmaHandler - Remove the specific pragma handler from
- /// the preprocessor. If \arg Namespace is non-null, then it should
- /// be the namespace that \arg Handler was added to. It is an error
+ /// the preprocessor. If \p Namespace is non-null, then it should
+ /// be the namespace that \p Handler was added to. It is an error
/// to remove a handler that has not been registered.
void RemovePragmaHandler(StringRef Namespace, PragmaHandler *Handler);
void RemovePragmaHandler(PragmaHandler *Handler) {
@@ -564,7 +633,8 @@ public:
///
/// ILEnd specifies the location of the ')' for a function-like macro or the
/// identifier for an object-like macro.
- void EnterMacro(Token &Identifier, SourceLocation ILEnd, MacroArgs *Args);
+ void EnterMacro(Token &Identifier, SourceLocation ILEnd, MacroInfo *Macro,
+ MacroArgs *Args);
/// EnterTokenStream - Add a "macro" context to the top of the include stack,
/// which will cause the lexer to start returning the specified tokens.
@@ -724,6 +794,14 @@ public:
CachedTokens[CachedLexPos-1] = Tok;
}
+ /// TypoCorrectToken - Update the current token to represent the provided
+ /// identifier, in order to cache an action performed by typo correction.
+ void TypoCorrectToken(const Token &Tok) {
+ assert(Tok.getIdentifierInfo() && "Expected identifier token");
+ if (CachedLexPos != 0 && isBacktrackEnabled())
+ CachedTokens[CachedLexPos-1] = Tok;
+ }
+
/// \brief Recompute the current lexer kind based on the CurLexer/CurPTHLexer/
/// CurTokenLexer pointers.
void recomputeCurLexerKind();
@@ -892,7 +970,7 @@ public:
/// CreateString - Plop the specified string into a scratch buffer and set the
/// specified token's location and length to it. If specified, the source
/// location provides a location of the expansion point of the token.
- void CreateString(const char *Buf, unsigned Len, Token &Tok,
+ void CreateString(StringRef Str, Token &Tok,
SourceLocation ExpansionLocStart = SourceLocation(),
SourceLocation ExpansionLocEnd = SourceLocation());
@@ -929,7 +1007,7 @@ public:
/// \brief Returns true if the given MacroID location points at the last
/// token of the macro expansion.
///
- /// \param MacroBegin If non-null and function returns true, it is set to
+ /// \param MacroEnd If non-null and function returns true, it is set to
/// end location of the macro.
bool isAtEndOfMacroExpansion(SourceLocation loc,
SourceLocation *MacroEnd = 0) const {
@@ -1103,10 +1181,10 @@ public:
/// from a macro as multiple tokens, which need to be glued together. This
/// occurs for code like:
/// \code
- /// \#define FOO <a/b.h>
+ /// \#define FOO <x/y.h>
/// \#include FOO
/// \endcode
- /// because in this case, "<a/b.h>" is returned as 7 tokens, not one.
+ /// because in this case, "<x/y.h>" is returned as 7 tokens, not one.
///
/// This code concatenates and consumes tokens up to the '>' token. It
/// returns false if the > was found, otherwise it returns true if it finds
@@ -1289,6 +1367,8 @@ private:
// Macro handling.
void HandleDefineDirective(Token &Tok);
void HandleUndefDirective(Token &Tok);
+ void UndefineMacro(IdentifierInfo *II, MacroInfo *MI,
+ SourceLocation UndefLoc);
// Conditional Inclusion.
void HandleIfdefDirective(Token &Tok, bool isIfndef,
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h
index 8a0b3cf..20fb8a0 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h
@@ -69,8 +69,8 @@ protected:
/// we are currently in.
SmallVector<PPConditionalInfo, 4> ConditionalStack;
- PreprocessorLexer(const PreprocessorLexer&); // DO NOT IMPLEMENT
- void operator=(const PreprocessorLexer&); // DO NOT IMPLEMENT
+ PreprocessorLexer(const PreprocessorLexer &) LLVM_DELETED_FUNCTION;
+ void operator=(const PreprocessorLexer &) LLVM_DELETED_FUNCTION;
friend class Preprocessor;
PreprocessorLexer(Preprocessor *pp, FileID fid);
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorOptions.h
index d86a923..e5fe373 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorOptions.h
@@ -7,9 +7,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FRONTEND_PREPROCESSOROPTIONS_H_
-#define LLVM_CLANG_FRONTEND_PREPROCESSOROPTIONS_H_
+#ifndef LLVM_CLANG_LEX_PREPROCESSOROPTIONS_H_
+#define LLVM_CLANG_LEX_PREPROCESSOROPTIONS_H_
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
@@ -38,7 +39,7 @@ enum ObjCXXARCStandardLibraryKind {
/// PreprocessorOptions - This class is used for passing the various options
/// used in preprocessor initialization to InitializePreprocessor().
-class PreprocessorOptions {
+class PreprocessorOptions : public llvm::RefCountedBase<PreprocessorOptions> {
public:
std::vector<std::pair<std::string, bool/*isUndef*/> > Macros;
std::vector<std::string> Includes;
@@ -65,10 +66,6 @@ public:
/// precompiled headers.
bool DisablePCHValidation;
- /// \brief When true, disables the use of the stat cache within a
- /// precompiled header or AST file.
- bool DisableStatCache;
-
/// \brief When true, a PCH with compiler errors will not be rejected.
bool AllowPCHWithCompilerErrors;
@@ -167,7 +164,7 @@ public:
public:
PreprocessorOptions() : UsePredefines(true), DetailedRecord(false),
DetailedRecordConditionalDirectives(false),
- DisablePCHValidation(false), DisableStatCache(false),
+ DisablePCHValidation(false),
AllowPCHWithCompilerErrors(false),
DumpDeserializedPCHDecls(false),
PrecompiledPreambleBytes(0, true),
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Token.h b/contrib/llvm/tools/clang/include/clang/Lex/Token.h
index 9c5a023..50b86c8 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Token.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Token.h
@@ -90,26 +90,18 @@ public:
/// \brief Return true if this is a raw identifier (when lexing
/// in raw mode) or a non-keyword identifier (when lexing in non-raw mode).
bool isAnyIdentifier() const {
- return is(tok::identifier) || is(tok::raw_identifier);
+ return tok::isAnyIdentifier(getKind());
}
- /// isLiteral - Return true if this is a "literal", like a numeric
+ /// \brief Return true if this is a "literal", like a numeric
/// constant, string, etc.
bool isLiteral() const {
- return is(tok::numeric_constant) || is(tok::char_constant) ||
- is(tok::wide_char_constant) || is(tok::utf16_char_constant) ||
- is(tok::utf32_char_constant) || is(tok::string_literal) ||
- is(tok::wide_string_literal) || is(tok::utf8_string_literal) ||
- is(tok::utf16_string_literal) || is(tok::utf32_string_literal) ||
- is(tok::angle_string_literal);
+ return tok::isLiteral(getKind());
}
+ /// \brief Return true if this is any of tok::annot_* kind tokens.
bool isAnnotation() const {
-#define ANNOTATION(NAME) \
- if (is(tok::annot_##NAME)) \
- return true;
-#include "clang/Basic/TokenKinds.def"
- return false;
+ return tok::isAnnotation(getKind());
}
/// \brief Return a source location identifier for the specified
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/TokenLexer.h b/contrib/llvm/tools/clang/include/clang/Lex/TokenLexer.h
index 1330ad5..090402a 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/TokenLexer.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/TokenLexer.h
@@ -22,7 +22,7 @@ namespace clang {
class Token;
class MacroArgs;
-/// TokenLexer - This implements a lexer that returns token from a macro body
+/// TokenLexer - This implements a lexer that returns tokens from a macro body
/// or token stream instead of lexing from a character buffer. This is used for
/// macro expansion and _Pragma handling, for example.
///
@@ -91,24 +91,25 @@ class TokenLexer {
/// should not be subject to further macro expansion.
bool DisableMacroExpansion : 1;
- TokenLexer(const TokenLexer&); // DO NOT IMPLEMENT
- void operator=(const TokenLexer&); // DO NOT IMPLEMENT
+ TokenLexer(const TokenLexer &) LLVM_DELETED_FUNCTION;
+ void operator=(const TokenLexer &) LLVM_DELETED_FUNCTION;
public:
/// Create a TokenLexer for the specified macro with the specified actual
/// arguments. Note that this ctor takes ownership of the ActualArgs pointer.
/// ILEnd specifies the location of the ')' for a function-like macro or the
/// identifier for an object-like macro.
- TokenLexer(Token &Tok, SourceLocation ILEnd, MacroArgs *ActualArgs,
- Preprocessor &pp)
+ TokenLexer(Token &Tok, SourceLocation ILEnd, MacroInfo *MI,
+ MacroArgs *ActualArgs, Preprocessor &pp)
: Macro(0), ActualArgs(0), PP(pp), OwnsTokens(false) {
- Init(Tok, ILEnd, ActualArgs);
+ Init(Tok, ILEnd, MI, ActualArgs);
}
/// Init - Initialize this TokenLexer to expand from the specified macro
/// with the specified argument information. Note that this ctor takes
/// ownership of the ActualArgs pointer. ILEnd specifies the location of the
/// ')' for a function-like macro or the identifier for an object-like macro.
- void Init(Token &Tok, SourceLocation ILEnd, MacroArgs *ActualArgs);
+ void Init(Token &Tok, SourceLocation ILEnd, MacroInfo *MI,
+ MacroArgs *ActualArgs);
/// Create a TokenLexer for the specified token stream. If 'OwnsTokens' is
/// specified, this takes ownership of the tokens and delete[]'s them when
@@ -168,7 +169,7 @@ private:
/// first token on the next line.
void HandleMicrosoftCommentPaste(Token &Tok);
- /// \brief If \arg loc is a FileID and points inside the current macro
+ /// \brief If \p loc is a FileID and points inside the current macro
/// definition, returns the appropriate source location pointing at the
/// macro expansion source location entry.
SourceLocation getExpansionLocForMacroDefLoc(SourceLocation loc) const;
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/Parser.h b/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
index 4ef92f7..c433344 100644
--- a/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
+++ b/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
@@ -30,6 +30,7 @@ namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
+ class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
class Parser;
@@ -163,6 +164,10 @@ class Parser : public CodeCompletionHandler {
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_override;
+ // C++ type trait keywords that have can be reverted to identifiers and
+ // still used as type traits.
+ llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertableTypeTraits;
+
OwningPtr<PragmaHandler> AlignHandler;
OwningPtr<PragmaHandler> GCCVisibilityHandler;
OwningPtr<PragmaHandler> OptionsHandler;
@@ -204,6 +209,9 @@ class Parser : public CodeCompletionHandler {
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
+ /// \brief Identifiers which have been declared within a tentative parse.
+ SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
+
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
@@ -244,7 +252,7 @@ public:
typedef clang::TypeResult TypeResult;
typedef Expr *ExprArg;
- typedef ASTMultiPtr<Stmt*> MultiStmtArg;
+ typedef llvm::MutableArrayRef<Stmt*> MultiStmtArg;
typedef Sema::FullExprArg FullExprArg;
/// Adorns a ExprResult with Actions to make it an ExprResult
@@ -278,6 +286,23 @@ public:
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result);
+ /// ConsumeToken - Consume the current 'peek token' and lex the next one.
+ /// This does not work with all kinds of tokens: strings and specific other
+ /// tokens must be consumed with custom methods below. This returns the
+ /// location of the consumed token.
+ SourceLocation ConsumeToken() {
+ assert(!isTokenStringLiteral() && !isTokenParen() && !isTokenBracket() &&
+ !isTokenBrace() &&
+ "Should consume special tokens with Consume*Token");
+
+ if (Tok.is(tok::code_completion))
+ return handleUnexpectedCodeCompletionToken();
+
+ PrevTokLocation = Tok.getLocation();
+ PP.Lex(Tok);
+ return PrevTokLocation;
+ }
+
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
@@ -310,23 +335,6 @@ private:
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
- /// ConsumeToken - Consume the current 'peek token' and lex the next one.
- /// This does not work with all kinds of tokens: strings and specific other
- /// tokens must be consumed with custom methods below. This returns the
- /// location of the consumed token.
- SourceLocation ConsumeToken() {
- assert(!isTokenStringLiteral() && !isTokenParen() && !isTokenBracket() &&
- !isTokenBrace() &&
- "Should consume special tokens with Consume*Token");
-
- if (Tok.is(tok::code_completion))
- return handleUnexpectedCodeCompletionToken();
-
- PrevTokLocation = Tok.getLocation();
- PP.Lex(Tok);
- return PrevTokLocation;
- }
-
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
@@ -433,6 +441,34 @@ private:
/// #pragma pack...
void HandlePragmaPack();
+ /// \brief Handle the annotation token produced for
+ /// #pragma ms_struct...
+ void HandlePragmaMSStruct();
+
+ /// \brief Handle the annotation token produced for
+ /// #pragma align...
+ void HandlePragmaAlign();
+
+ /// \brief Handle the annotation token produced for
+ /// #pragma weak id...
+ void HandlePragmaWeak();
+
+ /// \brief Handle the annotation token produced for
+ /// #pragma weak id = id...
+ void HandlePragmaWeakAlias();
+
+ /// \brief Handle the annotation token produced for
+ /// #pragma redefine_extname...
+ void HandlePragmaRedefineExtname();
+
+ /// \brief Handle the annotation token produced for
+ /// #pragma STDC FP_CONTRACT...
+ void HandlePragmaFPContract();
+
+ /// \brief Handle the annotation token produced for
+ /// #pragma OPENCL EXTENSION...
+ void HandlePragmaOpenCLExtension();
+
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
@@ -445,6 +481,7 @@ private:
return PP.LookAhead(N-1);
}
+public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
@@ -456,6 +493,7 @@ private:
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
+private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
@@ -478,12 +516,36 @@ private:
Tok.setAnnotationValue(ER.get());
}
+public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken(bool EnteringContext = false,
bool NeedType = false);
+ bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(bool EnteringContext,
+ bool NeedType,
+ CXXScopeSpec &SS,
+ bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
+private:
+ enum AnnotatedNameKind {
+ /// Annotation has failed and emitted an error.
+ ANK_Error,
+ /// The identifier is a tentatively-declared name.
+ ANK_TentativeDecl,
+ /// The identifier is a template name. FIXME: Add an annotation for that.
+ ANK_TemplateName,
+ /// The identifier can't be resolved.
+ ANK_Unresolved,
+ /// Annotation was successful.
+ ANK_Success
+ };
+ AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand,
+ CorrectionCandidateCallback *CCC = 0);
+
+ /// Push a tok::annot_cxxscope token onto the token stream.
+ void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
+
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
@@ -529,12 +591,15 @@ private:
class TentativeParsingAction {
Parser &P;
Token PrevTok;
+ size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevTok = P.Tok;
+ PrevTentativelyDeclaredIdentifierCount =
+ P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
@@ -543,6 +608,8 @@ private:
}
void Commit() {
assert(isActive && "Parsing action was finished!");
+ P.TentativelyDeclaredIdentifiers.resize(
+ PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
@@ -550,6 +617,8 @@ private:
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.Tok = PrevTok;
+ P.TentativelyDeclaredIdentifiers.resize(
+ PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
@@ -608,6 +677,7 @@ private:
/// \brief Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
+public:
//===--------------------------------------------------------------------===//
// Scope manipulation
@@ -619,8 +689,8 @@ private:
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
- ParseScope(const ParseScope&); // do not implement
- ParseScope& operator=(const ParseScope&); // do not implement
+ ParseScope(const ParseScope &) LLVM_DELETED_FUNCTION;
+ void operator=(const ParseScope &) LLVM_DELETED_FUNCTION;
public:
// ParseScope - Construct a new object to manage a scope in the
@@ -655,12 +725,13 @@ private:
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
+private:
/// \brief RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
- ParseScopeFlags(const ParseScopeFlags &); // do not implement
- void operator=(const ParseScopeFlags &); // do not implement
+ ParseScopeFlags(const ParseScopeFlags &) LLVM_DELETED_FUNCTION;
+ void operator=(const ParseScopeFlags &) LLVM_DELETED_FUNCTION;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
@@ -682,6 +753,7 @@ private:
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
+public:
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless DontConsume is true). Because we cannot guarantee that the
/// token will ever occur, this skips to the next token, or to some likely
@@ -713,6 +785,7 @@ private:
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
+private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
@@ -775,9 +848,16 @@ private:
void addDecl(Decl *D) { Decls.push_back(D); }
};
- /// A list of late parsed attributes. Used by ParseGNUAttributes.
- typedef llvm::SmallVector<LateParsedAttribute*, 2> LateParsedAttrList;
+ // A list of late-parsed attributes. Used by ParseGNUAttributes.
+ class LateParsedAttrList: public llvm::SmallVector<LateParsedAttribute*, 2> {
+ public:
+ LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
+
+ bool parseSoon() { return ParseSoon; }
+ private:
+ bool ParseSoon; // Are we planning to parse these shortly after creation?
+ };
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
@@ -880,9 +960,9 @@ private:
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
- ParsingClass(Decl *TagOrTemplate, bool TopLevelClass)
+ ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
- TagOrTemplate(TagOrTemplate) { }
+ IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// \brief Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
@@ -893,6 +973,9 @@ private:
/// othewise, it is a tag declaration.
bool TemplateScope : 1;
+ /// \brief Whether this class is an __interface.
+ bool IsInterface : 1;
+
/// \brief The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
@@ -919,9 +1002,10 @@ private:
Sema::ParsingClassState State;
public:
- ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass)
+ ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
+ bool IsInterface)
: P(P), Popped(false),
- State(P.PushParsingClass(TagOrTemplate, TopLevelClass)) {
+ State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// \brief Pop this class of the stack.
@@ -1009,7 +1093,7 @@ private:
void LateTemplateParser(const FunctionDecl *FD);
Sema::ParsingClassState
- PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass);
+ PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
@@ -1145,6 +1229,7 @@ private:
Decl *ParseObjCMethodDefinition();
+public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
@@ -1160,6 +1245,7 @@ private:
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
+private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
@@ -1174,6 +1260,9 @@ private:
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast);
+ /// Returns true if the next token cannot start an expression.
+ bool isNotExpressionStart();
+
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
@@ -1245,6 +1334,8 @@ private:
bool *MayBePseudoDestructor = 0,
bool IsTypename = false);
+ void CheckForLParenAfterColonColon();
+
//===--------------------------------------------------------------------===//
// C++0x 5.1.2: Lambda expressions
@@ -1379,8 +1470,16 @@ private:
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
+ /// A SmallVector of statements, with stack size 32 (as that is the only one
+ /// used.)
+ typedef SmallVector<Stmt*, 32> StmtVector;
+ /// A SmallVector of expressions, with stack size 12 (the maximum used.)
+ typedef SmallVector<Expr*, 12> ExprVector;
+ /// A SmallVector of types.
+ typedef SmallVector<ParsedType, 12> TypeVector;
+
StmtResult ParseStatement(SourceLocation *TrailingElseLoc = 0) {
- StmtVector Stmts(Actions);
+ StmtVector Stmts;
return ParseStatementOrDeclaration(Stmts, true, TrailingElseLoc);
}
StmtResult ParseStatementOrDeclaration(StmtVector &Stmts,
@@ -1399,6 +1498,7 @@ private:
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
+ void ParseCompoundStatementLeadingPragmas();
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(ExprResult &ExprResult,
Decl *&DeclResult,
@@ -1463,8 +1563,8 @@ private:
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
- StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc);
- StmtResult ParseCXXCatchBlock();
+ StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
+ StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
@@ -1578,6 +1678,15 @@ private:
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
+ /// \brief Return true if we know that we are definitely looking at a
+ /// decl-specifier, and isn't part of an expression such as a function-style
+ /// cast. Return false if it's no a decl-specifier, or we're not sure.
+ bool isKnownToBeDeclarationSpecifier() {
+ if (getLangOpts().CPlusPlus)
+ return isCXXDeclarationSpecifier() == TPResult::True();
+ return isDeclarationSpecifier(true);
+ }
+
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
@@ -1707,6 +1816,11 @@ private:
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False(),
bool *HasMissingTypename = 0);
+ /// \brief Determine whether an identifier has been tentatively declared as a
+ /// non-type. Such tentative declarations should not be found to name a type
+ /// during a tentative parse, but also should not be annotated as a non-type.
+ bool isTentativelyDeclared(IdentifierInfo *II);
+
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error().
// Returning TPResult::True()/False() indicates that the ambiguity was
@@ -1724,11 +1838,14 @@ private:
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
+public:
TypeResult ParseTypeName(SourceRange *Range = 0,
Declarator::TheContext Context
= Declarator::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = 0);
+
+private:
void ParseBlockId(SourceLocation CaretLoc);
// Check for the start of a C++11 attribute-specifier-seq in a context where
@@ -1748,6 +1865,11 @@ private:
}
void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs);
+ // Forbid C++11 attributes that appear on certain syntactic
+ // locations which standard permits but we don't supported yet,
+ // for example, attributes appertain to decl specifiers.
+ void ProhibitCXX11Attributes(ParsedAttributesWithRange &attrs);
+
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = 0) {
if (Tok.is(tok::kw___attribute)) {
@@ -1769,7 +1891,10 @@ private:
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
- SourceLocation *EndLoc);
+ SourceLocation *EndLoc,
+ IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc,
+ AttributeList::Syntax Syntax);
void MaybeParseCXX0XAttributes(Declarator &D) {
if (getLangOpts().CPlusPlus0x && isCXX11AttributeSpecifier()) {
@@ -1799,6 +1924,7 @@ private:
SourceLocation *EndLoc = 0);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = 0);
+
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
@@ -1856,7 +1982,7 @@ private:
VirtSpecifiers::Specifier isCXX0XVirtSpecifier() const {
return isCXX0XVirtSpecifier(Tok);
}
- void ParseOptionalCXX0XVirtSpecifierSeq(VirtSpecifiers &VS);
+ void ParseOptionalCXX0XVirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface);
bool isCXX0XFinalKeyword() const;
@@ -2004,6 +2130,8 @@ private:
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
+
+public:
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
@@ -2011,6 +2139,7 @@ private:
SourceLocation& TemplateKWLoc,
UnqualifiedId &Result);
+private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/DeltaTree.h b/contrib/llvm/tools/clang/include/clang/Rewrite/Core/DeltaTree.h
index f32906a..a6109bf 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/DeltaTree.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/Core/DeltaTree.h
@@ -14,6 +14,8 @@
#ifndef CLANG_REWRITE_DELTATREE_H
#define CLANG_REWRITE_DELTATREE_H
+#include "llvm/Support/Compiler.h"
+
namespace clang {
/// DeltaTree - a multiway search tree (BTree) structure with some fancy
@@ -25,7 +27,7 @@ namespace clang {
/// as well, without traversing the whole tree.
class DeltaTree {
void *Root; // "DeltaTreeNode *"
- void operator=(const DeltaTree&); // DO NOT IMPLEMENT
+ void operator=(const DeltaTree &) LLVM_DELETED_FUNCTION;
public:
DeltaTree();
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/HTMLRewrite.h b/contrib/llvm/tools/clang/include/clang/Rewrite/Core/HTMLRewrite.h
index 88caf85..88caf85 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/HTMLRewrite.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/Core/HTMLRewrite.h
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/RewriteRope.h b/contrib/llvm/tools/clang/include/clang/Rewrite/Core/RewriteRope.h
index cb3f8a8..9f1bbe5 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/RewriteRope.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/Core/RewriteRope.h
@@ -14,6 +14,8 @@
#ifndef LLVM_CLANG_REWRITEROPE_H
#define LLVM_CLANG_REWRITEROPE_H
+#include "llvm/Support/Compiler.h"
+
#include <cstring>
#include <cassert>
#include <cstddef>
@@ -33,11 +35,11 @@ namespace clang {
char Data[1]; // Variable sized.
void addRef() {
- if (this) ++RefCount;
+ ++RefCount;
}
void dropRef() {
- if (this && --RefCount == 0)
+ if (--RefCount == 0)
delete [] (char*)this;
}
};
@@ -63,22 +65,27 @@ namespace clang {
RopePiece(RopeRefCountString *Str, unsigned Start, unsigned End)
: StrData(Str), StartOffs(Start), EndOffs(End) {
- StrData->addRef();
+ if (StrData)
+ StrData->addRef();
}
RopePiece(const RopePiece &RP)
: StrData(RP.StrData), StartOffs(RP.StartOffs), EndOffs(RP.EndOffs) {
- StrData->addRef();
+ if (StrData)
+ StrData->addRef();
}
~RopePiece() {
- StrData->dropRef();
+ if (StrData)
+ StrData->dropRef();
}
void operator=(const RopePiece &RHS) {
if (StrData != RHS.StrData) {
- StrData->dropRef();
+ if (StrData)
+ StrData->dropRef();
StrData = RHS.StrData;
- StrData->addRef();
+ if (StrData)
+ StrData->addRef();
}
StartOffs = RHS.StartOffs;
EndOffs = RHS.EndOffs;
@@ -148,7 +155,7 @@ namespace clang {
class RopePieceBTree {
void /*RopePieceBTreeNode*/ *Root;
- void operator=(const RopePieceBTree &); // DO NOT IMPLEMENT
+ void operator=(const RopePieceBTree &) LLVM_DELETED_FUNCTION;
public:
RopePieceBTree();
RopePieceBTree(const RopePieceBTree &RHS);
@@ -191,7 +198,8 @@ public:
~RewriteRope() {
// If we had an allocation buffer, drop our reference to it.
- AllocBuffer->dropRef();
+ if (AllocBuffer)
+ AllocBuffer->dropRef();
}
typedef RopePieceBTree::iterator iterator;
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h b/contrib/llvm/tools/clang/include/clang/Rewrite/Core/Rewriter.h
index 5ffd88b..a33ea13 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/Core/Rewriter.h
@@ -16,8 +16,8 @@
#define LLVM_CLANG_REWRITER_H
#include "clang/Basic/SourceLocation.h"
-#include "clang/Rewrite/DeltaTree.h"
-#include "clang/Rewrite/RewriteRope.h"
+#include "clang/Rewrite/Core/DeltaTree.h"
+#include "clang/Rewrite/Core/RewriteRope.h"
#include "llvm/ADT/StringRef.h"
#include <cstring>
#include <map>
@@ -183,7 +183,7 @@ public:
/// location was not rewritable, false otherwise.
///
/// \param indentNewLines if true new lines in the string are indented
- /// using the indentation of the source line in position \arg Loc.
+ /// using the indentation of the source line in position \p Loc.
bool InsertText(SourceLocation Loc, StringRef Str,
bool InsertAfter = true, bool indentNewLines = false);
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h b/contrib/llvm/tools/clang/include/clang/Rewrite/Core/TokenRewriter.h
index 894db09..ec0bb5b 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/Core/TokenRewriter.h
@@ -43,8 +43,8 @@ namespace clang {
///
OwningPtr<ScratchBuffer> ScratchBuf;
- TokenRewriter(const TokenRewriter&); // DO NOT IMPLEMENT
- void operator=(const TokenRewriter&); // DO NOT IMPLEMENT.
+ TokenRewriter(const TokenRewriter &) LLVM_DELETED_FUNCTION;
+ void operator=(const TokenRewriter &) LLVM_DELETED_FUNCTION;
public:
/// TokenRewriter - This creates a TokenRewriter for the file with the
/// specified FileID.
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h b/contrib/llvm/tools/clang/include/clang/Rewrite/Frontend/ASTConsumers.h
index c9c92e3..c9c92e3 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/Frontend/ASTConsumers.h
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h b/contrib/llvm/tools/clang/include/clang/Rewrite/Frontend/FixItRewriter.h
index 44f0611..f12a034 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/Frontend/FixItRewriter.h
@@ -17,7 +17,7 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceLocation.h"
-#include "clang/Rewrite/Rewriter.h"
+#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/Edit/EditedSource.h"
namespace clang {
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h b/contrib/llvm/tools/clang/include/clang/Rewrite/Frontend/FrontendActions.h
index ea876d9..ea876d9 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/Frontend/FrontendActions.h
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h b/contrib/llvm/tools/clang/include/clang/Rewrite/Frontend/Rewriters.h
index f5ade5a..f5ade5a 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/Frontend/Rewriters.h
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h b/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
index bf35886..2e8b0c0 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
@@ -140,10 +140,10 @@ private:
return *reinterpret_cast<const TypeTagForDatatypeData *>(this + 1);
}
- AttributeList(const AttributeList &); // DO NOT IMPLEMENT
- void operator=(const AttributeList &); // DO NOT IMPLEMENT
- void operator delete(void *); // DO NOT IMPLEMENT
- ~AttributeList(); // DO NOT IMPLEMENT
+ AttributeList(const AttributeList &) LLVM_DELETED_FUNCTION;
+ void operator=(const AttributeList &) LLVM_DELETED_FUNCTION;
+ void operator delete(void *) LLVM_DELETED_FUNCTION;
+ ~AttributeList() LLVM_DELETED_FUNCTION;
size_t allocated_size() const;
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h b/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
index d43aaaf..b128bd8 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
@@ -439,9 +439,6 @@ private:
/// \brief The availability of this code-completion result.
unsigned Availability : 2;
-
- /// \brief The kind of the parent context.
- unsigned ParentKind : 14;
/// \brief The name of the parent context.
StringRef ParentName;
@@ -450,13 +447,13 @@ private:
/// entity being completed by this result.
const char *BriefComment;
- CodeCompletionString(const CodeCompletionString &); // DO NOT IMPLEMENT
- CodeCompletionString &operator=(const CodeCompletionString &); // DITTO
+ CodeCompletionString(const CodeCompletionString &) LLVM_DELETED_FUNCTION;
+ void operator=(const CodeCompletionString &) LLVM_DELETED_FUNCTION;
CodeCompletionString(const Chunk *Chunks, unsigned NumChunks,
unsigned Priority, CXAvailabilityKind Availability,
const char **Annotations, unsigned NumAnnotations,
- CXCursorKind ParentKind, StringRef ParentName,
+ StringRef ParentName,
const char *BriefComment);
~CodeCompletionString() { }
@@ -489,11 +486,6 @@ public:
/// \brief Retrieve the annotation string specified by \c AnnotationNr.
const char *getAnnotation(unsigned AnnotationNr) const;
-
- /// \brief Retrieve parent context's cursor kind.
- CXCursorKind getParentContextKind() const {
- return (CXCursorKind)ParentKind;
- }
/// \brief Retrieve the name of the parent context.
StringRef getParentContextName() const {
@@ -577,7 +569,6 @@ private:
CodeCompletionTUInfo &CCTUInfo;
unsigned Priority;
CXAvailabilityKind Availability;
- CXCursorKind ParentKind;
StringRef ParentName;
const char *BriefComment;
@@ -591,14 +582,14 @@ public:
CodeCompletionTUInfo &CCTUInfo)
: Allocator(Allocator), CCTUInfo(CCTUInfo),
Priority(0), Availability(CXAvailability_Available),
- ParentKind(CXCursor_NotImplemented), BriefComment(NULL) { }
+ BriefComment(NULL) { }
CodeCompletionBuilder(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
unsigned Priority, CXAvailabilityKind Availability)
: Allocator(Allocator), CCTUInfo(CCTUInfo),
Priority(Priority), Availability(Availability),
- ParentKind(CXCursor_NotImplemented), BriefComment(NULL) { }
+ BriefComment(NULL) { }
/// \brief Retrieve the allocator into which the code completion
/// strings should be allocated.
@@ -642,7 +633,6 @@ public:
void addBriefComment(StringRef Comment);
- CXCursorKind getParentKind() const { return ParentKind; }
StringRef getParentName() const { return ParentName; }
};
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h b/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h
index 792b0c6..0728e87 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h
@@ -266,6 +266,7 @@ public:
static const TST TST_enum = clang::TST_enum;
static const TST TST_union = clang::TST_union;
static const TST TST_struct = clang::TST_struct;
+ static const TST TST_interface = clang::TST_interface;
static const TST TST_class = clang::TST_class;
static const TST TST_typename = clang::TST_typename;
static const TST TST_typeofType = clang::TST_typeofType;
@@ -378,11 +379,12 @@ private:
}
static bool isDeclRep(TST T) {
return (T == TST_enum || T == TST_struct ||
- T == TST_union || T == TST_class);
+ T == TST_interface || T == TST_union ||
+ T == TST_class);
}
- DeclSpec(const DeclSpec&); // DO NOT IMPLEMENT
- void operator=(const DeclSpec&); // DO NOT IMPLEMENT
+ DeclSpec(const DeclSpec &) LLVM_DELETED_FUNCTION;
+ void operator=(const DeclSpec &) LLVM_DELETED_FUNCTION;
public:
DeclSpec(AttributeFactory &attrFactory)
@@ -598,8 +600,7 @@ public:
}
bool SetTypeQual(TQ T, SourceLocation Loc, const char *&PrevSpec,
- unsigned &DiagID, const LangOptions &Lang,
- bool IsTypeSpec);
+ unsigned &DiagID, const LangOptions &Lang);
bool SetFunctionSpecInline(SourceLocation Loc, const char *&PrevSpec,
unsigned &DiagID);
@@ -781,8 +782,9 @@ private:
/// \brief Represents a C++ unqualified-id that has been parsed.
class UnqualifiedId {
private:
- const UnqualifiedId &operator=(const UnqualifiedId &); // DO NOT IMPLEMENT
-
+ UnqualifiedId(const UnqualifiedId &Other) LLVM_DELETED_FUNCTION;
+ const UnqualifiedId &operator=(const UnqualifiedId &) LLVM_DELETED_FUNCTION;
+
public:
/// \brief Describes the kind of unqualified-id parsed.
enum IdKind {
@@ -857,17 +859,6 @@ public:
UnqualifiedId() : Kind(IK_Identifier), Identifier(0) { }
- /// \brief Do not use this copy constructor. It is temporary, and only
- /// exists because we are holding FieldDeclarators in a SmallVector when we
- /// don't actually need them.
- ///
- /// FIXME: Kill this copy constructor.
- UnqualifiedId(const UnqualifiedId &Other)
- : Kind(IK_Identifier), Identifier(Other.Identifier),
- StartLocation(Other.StartLocation), EndLocation(Other.EndLocation) {
- assert(Other.Kind == IK_Identifier && "Cannot copy non-identifiers");
- }
-
/// \brief Clear out this unqualified-id, setting it to default (invalid)
/// state.
void clear() {
@@ -1110,7 +1101,7 @@ struct DeclaratorChunk {
/// \brief Whether the ref-qualifier (if any) is an lvalue reference.
/// Otherwise, it's an rvalue reference.
unsigned RefQualifierIsLValueRef : 1;
-
+
/// The type qualifiers: const/volatile/restrict.
/// The qualifier bitmask values are the same as in QualType.
unsigned TypeQuals : 3;
@@ -1125,9 +1116,15 @@ struct DeclaratorChunk {
/// specified.
unsigned HasTrailingReturnType : 1;
+ /// The location of the left parenthesis in the source.
+ unsigned LParenLoc;
+
/// When isVariadic is true, the location of the ellipsis in the source.
unsigned EllipsisLoc;
+ /// The location of the right parenthesis in the source.
+ unsigned RParenLoc;
+
/// NumArgs - This is the number of formal arguments provided for the
/// declarator.
unsigned NumArgs;
@@ -1202,10 +1199,19 @@ struct DeclaratorChunk {
bool isKNRPrototype() const {
return !hasPrototype && NumArgs != 0;
}
-
+
+ SourceLocation getLParenLoc() const {
+ return SourceLocation::getFromRawEncoding(LParenLoc);
+ }
+
SourceLocation getEllipsisLoc() const {
return SourceLocation::getFromRawEncoding(EllipsisLoc);
}
+
+ SourceLocation getRParenLoc() const {
+ return SourceLocation::getFromRawEncoding(RParenLoc);
+ }
+
SourceLocation getExceptionSpecLoc() const {
return SourceLocation::getFromRawEncoding(ExceptionSpecLoc);
}
@@ -1358,11 +1364,13 @@ struct DeclaratorChunk {
/// DeclaratorChunk::getFunction - Return a DeclaratorChunk for a function.
/// "TheDeclarator" is the declarator that this will be added to.
- static DeclaratorChunk getFunction(bool hasProto, bool isVariadic,
+ static DeclaratorChunk getFunction(bool hasProto,
bool isAmbiguous,
- SourceLocation EllipsisLoc,
+ SourceLocation LParenLoc,
ParamInfo *ArgInfo, unsigned NumArgs,
- unsigned TypeQuals,
+ SourceLocation EllipsisLoc,
+ SourceLocation RParenLoc,
+ unsigned TypeQuals,
bool RefQualifierIsLvalueRef,
SourceLocation RefQualifierLoc,
SourceLocation ConstQualifierLoc,
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h
index c241266..a20480c 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h
@@ -124,6 +124,7 @@ public:
static DelayedDiagnostic makeDeprecation(SourceLocation Loc,
const NamedDecl *D,
const ObjCInterfaceDecl *UnknownObjCClass,
+ const ObjCPropertyDecl *ObjCProperty,
StringRef Msg);
static DelayedDiagnostic makeAccess(SourceLocation Loc,
@@ -193,12 +194,17 @@ public:
return DeprecationData.UnknownObjCClass;
}
+ const ObjCPropertyDecl *getObjCProperty() const {
+ return DeprecationData.ObjCProperty;
+ }
+
private:
union {
/// Deprecation.
struct {
const NamedDecl *Decl;
const ObjCInterfaceDecl *UnknownObjCClass;
+ const ObjCPropertyDecl *ObjCProperty;
const char *Message;
size_t MessageLen;
} DeprecationData;
@@ -220,9 +226,8 @@ class DelayedDiagnosticPool {
const DelayedDiagnosticPool *Parent;
llvm::SmallVector<DelayedDiagnostic, 4> Diagnostics;
- // Do not implement.
- DelayedDiagnosticPool(const DelayedDiagnosticPool &other);
- DelayedDiagnosticPool &operator=(const DelayedDiagnosticPool &other);
+ DelayedDiagnosticPool(const DelayedDiagnosticPool &) LLVM_DELETED_FUNCTION;
+ void operator=(const DelayedDiagnosticPool &) LLVM_DELETED_FUNCTION;
public:
DelayedDiagnosticPool(const DelayedDiagnosticPool *parent) : Parent(parent) {}
~DelayedDiagnosticPool() {
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h b/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h
index 785bf6a..7a59849 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h
@@ -175,7 +175,6 @@ public:
static bool classof(const ExternalASTSource *Source) {
return Source->SemaSource;
}
- static bool classof(const ExternalSemaSource *) { return true; }
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h b/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h
index 77659be..0b0af0c 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h
@@ -901,9 +901,9 @@ public:
/// \brief Add a constructor-initialization step.
///
- /// \arg FromInitList The constructor call is syntactically an initializer
+ /// \param FromInitList The constructor call is syntactically an initializer
/// list.
- /// \arg AsInitList The constructor is called as an init list constructor.
+ /// \param AsInitList The constructor is called as an init list constructor.
void AddConstructorInitializationStep(CXXConstructorDecl *Constructor,
AccessSpecifier Access,
QualType T,
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/LocInfoType.h b/contrib/llvm/tools/clang/include/clang/Sema/LocInfoType.h
index 93cb8cb..63dfa72 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/LocInfoType.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/LocInfoType.h
@@ -55,7 +55,6 @@ public:
static bool classof(const Type *T) {
return T->getTypeClass() == (TypeClass)LocInfo;
}
- static bool classof(const LocInfoType *) { return true; }
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/MultiplexExternalSemaSource.h b/contrib/llvm/tools/clang/include/clang/Sema/MultiplexExternalSemaSource.h
new file mode 100644
index 0000000..1513aeb
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/MultiplexExternalSemaSource.h
@@ -0,0 +1,367 @@
+//===--- MultiplexExternalSemaSource.h - External Sema Interface-*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ExternalSemaSource interface, dispatching to all clients
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_SEMA_MULTIPLEX_EXTERNAL_SEMA_SOURCE_H
+#define LLVM_CLANG_SEMA_MULTIPLEX_EXTERNAL_SEMA_SOURCE_H
+
+#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/Sema/Weak.h"
+
+#include "llvm/ADT/SmallVector.h"
+
+#include <utility>
+
+namespace clang {
+
+ class CXXConstructorDecl;
+ class CXXRecordDecl;
+ class DeclaratorDecl;
+ struct ExternalVTableUse;
+ class LookupResult;
+ class NamespaceDecl;
+ class Scope;
+ class Sema;
+ class TypedefNameDecl;
+ class ValueDecl;
+ class VarDecl;
+
+
+/// \brief An abstract interface that should be implemented by
+/// external AST sources that also provide information for semantic
+/// analysis.
+class MultiplexExternalSemaSource : public ExternalSemaSource {
+
+private:
+ llvm::SmallVector<ExternalSemaSource*, 2> Sources; // doesn't own them.
+
+public:
+
+ ///\brief Constructs a new multiplexing external sema source and appends the
+ /// given element to it.
+ ///
+ ///\param[in] s1 - A non-null (old) ExternalSemaSource.
+ ///\param[in] s2 - A non-null (new) ExternalSemaSource.
+ ///
+ MultiplexExternalSemaSource(ExternalSemaSource& s1, ExternalSemaSource& s2);
+
+ ~MultiplexExternalSemaSource();
+
+ ///\brief Appends new source to the source list.
+ ///
+ ///\param[in] source - An ExternalSemaSource.
+ ///
+ void addSource(ExternalSemaSource &source);
+
+ //===--------------------------------------------------------------------===//
+ // ExternalASTSource.
+ //===--------------------------------------------------------------------===//
+
+ /// \brief Resolve a declaration ID into a declaration, potentially
+ /// building a new declaration.
+ ///
+ /// This method only needs to be implemented if the AST source ever
+ /// passes back decl sets as VisibleDeclaration objects.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual Decl *GetExternalDecl(uint32_t ID);
+
+ /// \brief Resolve a selector ID into a selector.
+ ///
+ /// This operation only needs to be implemented if the AST source
+ /// returns non-zero for GetNumKnownSelectors().
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual Selector GetExternalSelector(uint32_t ID);
+
+ /// \brief Returns the number of selectors known to the external AST
+ /// source.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual uint32_t GetNumExternalSelectors();
+
+ /// \brief Resolve the offset of a statement in the decl stream into
+ /// a statement.
+ ///
+ /// This operation is meant to be used via a LazyOffsetPtr. It only
+ /// needs to be implemented if the AST source uses methods like
+ /// FunctionDecl::setLazyBody when building decls.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual Stmt *GetExternalDeclStmt(uint64_t Offset);
+
+ /// \brief Resolve the offset of a set of C++ base specifiers in the decl
+ /// stream into an array of specifiers.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual CXXBaseSpecifier *GetExternalCXXBaseSpecifiers(uint64_t Offset);
+
+ /// \brief Finds all declarations with the given name in the
+ /// given context.
+ ///
+ /// Generally the final step of this method is either to call
+ /// SetExternalVisibleDeclsForName or to recursively call lookup on
+ /// the DeclContext after calling SetExternalVisibleDecls.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual DeclContextLookupResult
+ FindExternalVisibleDeclsByName(const DeclContext *DC, DeclarationName Name);
+
+ /// \brief Ensures that the table of all visible declarations inside this
+ /// context is up to date.
+ ///
+ /// The default implementation of this functino is a no-op.
+ virtual void completeVisibleDeclsMap(const DeclContext *DC);
+
+ /// \brief Finds all declarations lexically contained within the given
+ /// DeclContext, after applying an optional filter predicate.
+ ///
+ /// \param isKindWeWant a predicate function that returns true if the passed
+ /// declaration kind is one we are looking for. If NULL, all declarations
+ /// are returned.
+ ///
+ /// \return an indication of whether the load succeeded or failed.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual ExternalLoadResult FindExternalLexicalDecls(const DeclContext *DC,
+ bool (*isKindWeWant)(Decl::Kind),
+ SmallVectorImpl<Decl*> &Result);
+
+ /// \brief Finds all declarations lexically contained within the given
+ /// DeclContext.
+ ///
+ /// \return true if an error occurred
+ ExternalLoadResult FindExternalLexicalDecls(const DeclContext *DC,
+ SmallVectorImpl<Decl*> &Result) {
+ return FindExternalLexicalDecls(DC, 0, Result);
+ }
+
+ template <typename DeclTy>
+ ExternalLoadResult FindExternalLexicalDeclsBy(const DeclContext *DC,
+ SmallVectorImpl<Decl*> &Result) {
+ return FindExternalLexicalDecls(DC, DeclTy::classofKind, Result);
+ }
+
+ /// \brief Get the decls that are contained in a file in the Offset/Length
+ /// range. \p Length can be 0 to indicate a point at \p Offset instead of
+ /// a range.
+ virtual void FindFileRegionDecls(FileID File, unsigned Offset,unsigned Length,
+ SmallVectorImpl<Decl *> &Decls);
+
+ /// \brief Gives the external AST source an opportunity to complete
+ /// an incomplete type.
+ virtual void CompleteType(TagDecl *Tag);
+
+ /// \brief Gives the external AST source an opportunity to complete an
+ /// incomplete Objective-C class.
+ ///
+ /// This routine will only be invoked if the "externally completed" bit is
+ /// set on the ObjCInterfaceDecl via the function
+ /// \c ObjCInterfaceDecl::setExternallyCompleted().
+ virtual void CompleteType(ObjCInterfaceDecl *Class);
+
+ /// \brief Loads comment ranges.
+ virtual void ReadComments();
+
+ /// \brief Notify ExternalASTSource that we started deserialization of
+ /// a decl or type so until FinishedDeserializing is called there may be
+ /// decls that are initializing. Must be paired with FinishedDeserializing.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual void StartedDeserializing();
+
+ /// \brief Notify ExternalASTSource that we finished the deserialization of
+ /// a decl or type. Must be paired with StartedDeserializing.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual void FinishedDeserializing();
+
+ /// \brief Function that will be invoked when we begin parsing a new
+ /// translation unit involving this external AST source.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual void StartTranslationUnit(ASTConsumer *Consumer);
+
+ /// \brief Print any statistics that have been gathered regarding
+ /// the external AST source.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual void PrintStats();
+
+
+ /// \brief Perform layout on the given record.
+ ///
+ /// This routine allows the external AST source to provide an specific
+ /// layout for a record, overriding the layout that would normally be
+ /// constructed. It is intended for clients who receive specific layout
+ /// details rather than source code (such as LLDB). The client is expected
+ /// to fill in the field offsets, base offsets, virtual base offsets, and
+ /// complete object size.
+ ///
+ /// \param Record The record whose layout is being requested.
+ ///
+ /// \param Size The final size of the record, in bits.
+ ///
+ /// \param Alignment The final alignment of the record, in bits.
+ ///
+ /// \param FieldOffsets The offset of each of the fields within the record,
+ /// expressed in bits. All of the fields must be provided with offsets.
+ ///
+ /// \param BaseOffsets The offset of each of the direct, non-virtual base
+ /// classes. If any bases are not given offsets, the bases will be laid
+ /// out according to the ABI.
+ ///
+ /// \param VirtualBaseOffsets The offset of each of the virtual base classes
+ /// (either direct or not). If any bases are not given offsets, the bases will
+ /// be laid out according to the ABI.
+ ///
+ /// \returns true if the record layout was provided, false otherwise.
+ virtual bool
+ layoutRecordType(const RecordDecl *Record,
+ uint64_t &Size, uint64_t &Alignment,
+ llvm::DenseMap<const FieldDecl *, uint64_t> &FieldOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &BaseOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &VirtualBaseOffsets);
+
+ /// Return the amount of memory used by memory buffers, breaking down
+ /// by heap-backed versus mmap'ed memory.
+ virtual void getMemoryBufferSizes(MemoryBufferSizes &sizes) const;
+
+ //===--------------------------------------------------------------------===//
+ // ExternalSemaSource.
+ //===--------------------------------------------------------------------===//
+
+ /// \brief Initialize the semantic source with the Sema instance
+ /// being used to perform semantic analysis on the abstract syntax
+ /// tree.
+ virtual void InitializeSema(Sema &S);
+
+ /// \brief Inform the semantic consumer that Sema is no longer available.
+ virtual void ForgetSema();
+
+ /// \brief Load the contents of the global method pool for a given
+ /// selector.
+ virtual void ReadMethodPool(Selector Sel);
+
+ /// \brief Load the set of namespaces that are known to the external source,
+ /// which will be used during typo correction.
+ virtual void ReadKnownNamespaces(SmallVectorImpl<NamespaceDecl*> &Namespaces);
+
+ /// \brief Do last resort, unqualified lookup on a LookupResult that
+ /// Sema cannot find.
+ ///
+ /// \param R a LookupResult that is being recovered.
+ ///
+ /// \param S the Scope of the identifier occurrence.
+ ///
+ /// \return true to tell Sema to recover using the LookupResult.
+ virtual bool LookupUnqualified(LookupResult &R, Scope *S);
+
+ /// \brief Read the set of tentative definitions known to the external Sema
+ /// source.
+ ///
+ /// The external source should append its own tentative definitions to the
+ /// given vector of tentative definitions. Note that this routine may be
+ /// invoked multiple times; the external source should take care not to
+ /// introduce the same declarations repeatedly.
+ virtual void ReadTentativeDefinitions(SmallVectorImpl<VarDecl*> &Defs);
+
+ /// \brief Read the set of unused file-scope declarations known to the
+ /// external Sema source.
+ ///
+ /// The external source should append its own unused, filed-scope to the
+ /// given vector of declarations. Note that this routine may be
+ /// invoked multiple times; the external source should take care not to
+ /// introduce the same declarations repeatedly.
+ virtual void ReadUnusedFileScopedDecls(
+ SmallVectorImpl<const DeclaratorDecl*> &Decls);
+
+ /// \brief Read the set of delegating constructors known to the
+ /// external Sema source.
+ ///
+ /// The external source should append its own delegating constructors to the
+ /// given vector of declarations. Note that this routine may be
+ /// invoked multiple times; the external source should take care not to
+ /// introduce the same declarations repeatedly.
+ virtual void ReadDelegatingConstructors(
+ SmallVectorImpl<CXXConstructorDecl*> &Decls);
+
+ /// \brief Read the set of ext_vector type declarations known to the
+ /// external Sema source.
+ ///
+ /// The external source should append its own ext_vector type declarations to
+ /// the given vector of declarations. Note that this routine may be
+ /// invoked multiple times; the external source should take care not to
+ /// introduce the same declarations repeatedly.
+ virtual void ReadExtVectorDecls(SmallVectorImpl<TypedefNameDecl*> &Decls);
+
+ /// \brief Read the set of dynamic classes known to the external Sema source.
+ ///
+ /// The external source should append its own dynamic classes to
+ /// the given vector of declarations. Note that this routine may be
+ /// invoked multiple times; the external source should take care not to
+ /// introduce the same declarations repeatedly.
+ virtual void ReadDynamicClasses(SmallVectorImpl<CXXRecordDecl*> &Decls);
+
+ /// \brief Read the set of locally-scoped external declarations known to the
+ /// external Sema source.
+ ///
+ /// The external source should append its own locally-scoped external
+ /// declarations to the given vector of declarations. Note that this routine
+ /// may be invoked multiple times; the external source should take care not
+ /// to introduce the same declarations repeatedly.
+ virtual void ReadLocallyScopedExternalDecls(SmallVectorImpl<NamedDecl*>&Decls);
+
+ /// \brief Read the set of referenced selectors known to the
+ /// external Sema source.
+ ///
+ /// The external source should append its own referenced selectors to the
+ /// given vector of selectors. Note that this routine
+ /// may be invoked multiple times; the external source should take care not
+ /// to introduce the same selectors repeatedly.
+ virtual void ReadReferencedSelectors(SmallVectorImpl<std::pair<Selector,
+ SourceLocation> > &Sels);
+
+ /// \brief Read the set of weak, undeclared identifiers known to the
+ /// external Sema source.
+ ///
+ /// The external source should append its own weak, undeclared identifiers to
+ /// the given vector. Note that this routine may be invoked multiple times;
+ /// the external source should take care not to introduce the same identifiers
+ /// repeatedly.
+ virtual void ReadWeakUndeclaredIdentifiers(
+ SmallVectorImpl<std::pair<IdentifierInfo*, WeakInfo> > &WI);
+
+ /// \brief Read the set of used vtables known to the external Sema source.
+ ///
+ /// The external source should append its own used vtables to the given
+ /// vector. Note that this routine may be invoked multiple times; the external
+ /// source should take care not to introduce the same vtables repeatedly.
+ virtual void ReadUsedVTables(SmallVectorImpl<ExternalVTableUse> &VTables);
+
+ /// \brief Read the set of pending instantiations known to the external
+ /// Sema source.
+ ///
+ /// The external source should append its own pending instantiations to the
+ /// given vector. Note that this routine may be invoked multiple times; the
+ /// external source should take care not to introduce the same instantiations
+ /// repeatedly.
+ virtual void ReadPendingInstantiations(
+ SmallVectorImpl<std::pair<ValueDecl*, SourceLocation> >& Pending);
+
+ // isa/cast/dyn_cast support
+ static bool classof(const MultiplexExternalSemaSource*) { return true; }
+ //static bool classof(const ExternalSemaSource*) { return true; }
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_SEMA_MULTIPLEX_EXTERNAL_SEMA_SOURCE_H
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Overload.h b/contrib/llvm/tools/clang/include/clang/Sema/Overload.h
index d2fc285..65ed781 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Overload.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Overload.h
@@ -748,12 +748,14 @@ namespace clang {
unsigned NumInlineSequences;
char InlineSpace[16 * sizeof(ImplicitConversionSequence)];
- OverloadCandidateSet(const OverloadCandidateSet &);
- OverloadCandidateSet &operator=(const OverloadCandidateSet &);
-
+ OverloadCandidateSet(const OverloadCandidateSet &) LLVM_DELETED_FUNCTION;
+ void operator=(const OverloadCandidateSet &) LLVM_DELETED_FUNCTION;
+
+ void destroyCandidates();
+
public:
OverloadCandidateSet(SourceLocation Loc) : Loc(Loc), NumInlineSequences(0){}
- ~OverloadCandidateSet() { clear(); }
+ ~OverloadCandidateSet() { destroyCandidates(); }
SourceLocation getLocation() const { return Loc; }
@@ -808,7 +810,7 @@ namespace clang {
void NoteCandidates(Sema &S,
OverloadCandidateDisplayKind OCD,
llvm::ArrayRef<Expr *> Args,
- const char *Opc = 0,
+ StringRef Opc = "",
SourceLocation Loc = SourceLocation());
};
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Ownership.h b/contrib/llvm/tools/clang/include/clang/Sema/Ownership.h
index fb9e368d..e59fb3f 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Ownership.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Ownership.h
@@ -15,7 +15,7 @@
#define LLVM_CLANG_SEMA_OWNERSHIP_H
#include "clang/Basic/LLVM.h"
-#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
//===----------------------------------------------------------------------===//
@@ -30,8 +30,8 @@ namespace clang {
class DeclGroupRef;
class Expr;
class NestedNameSpecifier;
+ class ParsedTemplateArgument;
class QualType;
- class Sema;
class Stmt;
class TemplateName;
class TemplateParameterList;
@@ -112,96 +112,6 @@ namespace llvm {
struct isPodLike<clang::OpaquePtr<T> > { static const bool value = true; };
}
-
-
-// -------------------------- About Move Emulation -------------------------- //
-// The smart pointer classes in this file attempt to emulate move semantics
-// as they appear in C++0x with rvalue references. Since C++03 doesn't have
-// rvalue references, some tricks are needed to get similar results.
-// Move semantics in C++0x have the following properties:
-// 1) "Moving" means transferring the value of an object to another object,
-// similar to copying, but without caring what happens to the old object.
-// In particular, this means that the new object can steal the old object's
-// resources instead of creating a copy.
-// 2) Since moving can modify the source object, it must either be explicitly
-// requested by the user, or the modifications must be unnoticeable.
-// 3) As such, C++0x moving is only allowed in three contexts:
-// * By explicitly using std::move() to request it.
-// * From a temporary object, since that object cannot be accessed
-// afterwards anyway, thus making the state unobservable.
-// * On function return, since the object is not observable afterwards.
-//
-// To sum up: moving from a named object should only be possible with an
-// explicit std::move(), or on function return. Moving from a temporary should
-// be implicitly done. Moving from a const object is forbidden.
-//
-// The emulation is not perfect, and has the following shortcomings:
-// * move() is not in namespace std.
-// * move() is required on function return.
-// * There are difficulties with implicit conversions.
-// * Microsoft's compiler must be given the /Za switch to successfully compile.
-//
-// -------------------------- Implementation -------------------------------- //
-// The move emulation relies on the peculiar reference binding semantics of
-// C++03: as a rule, a non-const reference may not bind to a temporary object,
-// except for the implicit object parameter in a member function call, which
-// can refer to a temporary even when not being const.
-// The moveable object has five important functions to facilitate moving:
-// * A private, unimplemented constructor taking a non-const reference to its
-// own class. This constructor serves a two-fold purpose.
-// - It prevents the creation of a copy constructor that takes a const
-// reference. Temporaries would be able to bind to the argument of such a
-// constructor, and that would be bad.
-// - Named objects will bind to the non-const reference, but since it's
-// private, this will fail to compile. This prevents implicit moving from
-// named objects.
-// There's also a copy assignment operator for the same purpose.
-// * An implicit, non-const conversion operator to a special mover type. This
-// type represents the rvalue reference of C++0x. Being a non-const member,
-// its implicit this parameter can bind to temporaries.
-// * A constructor that takes an object of this mover type. This constructor
-// performs the actual move operation. There is an equivalent assignment
-// operator.
-// There is also a free move() function that takes a non-const reference to
-// an object and returns a temporary. Internally, this function uses explicit
-// constructor calls to move the value from the referenced object to the return
-// value.
-//
-// There are now three possible scenarios of use.
-// * Copying from a const object. Constructor overload resolution will find the
-// non-const copy constructor, and the move constructor. The first is not
-// viable because the const object cannot be bound to the non-const reference.
-// The second fails because the conversion to the mover object is non-const.
-// Moving from a const object fails as intended.
-// * Copying from a named object. Constructor overload resolution will select
-// the non-const copy constructor, but fail as intended, because this
-// constructor is private.
-// * Copying from a temporary. Constructor overload resolution cannot select
-// the non-const copy constructor, because the temporary cannot be bound to
-// the non-const reference. It thus selects the move constructor. The
-// temporary can be bound to the implicit this parameter of the conversion
-// operator, because of the special binding rule. Construction succeeds.
-// Note that the Microsoft compiler, as an extension, allows binding
-// temporaries against non-const references. The compiler thus selects the
-// non-const copy constructor and fails, because the constructor is private.
-// Passing /Za (disable extensions) disables this behaviour.
-// The free move() function is used to move from a named object.
-//
-// Note that when passing an object of a different type (the classes below
-// have OwningResult and OwningPtr, which should be mixable), you get a problem.
-// Argument passing and function return use copy initialization rules. The
-// effect of this is that, when the source object is not already of the target
-// type, the compiler will first seek a way to convert the source object to the
-// target type, and only then attempt to copy the resulting object. This means
-// that when passing an OwningResult where an OwningPtr is expected, the
-// compiler will first seek a conversion from OwningResult to OwningPtr, then
-// copy the OwningPtr. The resulting conversion sequence is:
-// OwningResult object -> ResultMover -> OwningResult argument to
-// OwningPtr(OwningResult) -> OwningPtr -> PtrMover -> final OwningPtr
-// This conversion sequence is too complex to be allowed. Thus the special
-// move_* functions, which help the compiler out with some explicit
-// conversions.
-
namespace clang {
// Basic
class DiagnosticBuilder;
@@ -239,6 +149,7 @@ namespace clang {
bool isUsable() const { return !Invalid && Val; }
PtrTy get() const { return Val; }
+ // FIXME: Replace with get.
PtrTy release() const { return Val; }
PtrTy take() const { return Val; }
template <typename T> T *takeAs() { return static_cast<T*>(get()); }
@@ -282,6 +193,7 @@ namespace clang {
void *VP = reinterpret_cast<void *>(PtrWithInvalid & ~0x01);
return PtrTraits::getFromVoidPointer(VP);
}
+ // FIXME: Replace with get.
PtrTy take() const { return get(); }
PtrTy release() const { return get(); }
template <typename T> T *takeAs() { return static_cast<T*>(get()); }
@@ -300,119 +212,11 @@ namespace clang {
}
};
- /// ASTMultiPtr - A moveable smart pointer to multiple AST nodes. Only owns
- /// the individual pointers, not the array holding them.
- template <typename PtrTy> class ASTMultiPtr;
-
- template <class PtrTy>
- class ASTMultiPtr {
- PtrTy *Nodes;
- unsigned Count;
-
- public:
- // Normal copying implicitly defined
- ASTMultiPtr() : Nodes(0), Count(0) {}
- explicit ASTMultiPtr(Sema &) : Nodes(0), Count(0) {}
- ASTMultiPtr(Sema &, PtrTy *nodes, unsigned count)
- : Nodes(nodes), Count(count) {}
- // Fake mover in Parse/AstGuard.h needs this:
- ASTMultiPtr(PtrTy *nodes, unsigned count) : Nodes(nodes), Count(count) {}
-
- /// Access to the raw pointers.
- PtrTy *get() const { return Nodes; }
-
- /// Access to the count.
- unsigned size() const { return Count; }
-
- PtrTy *release() {
- return Nodes;
- }
- };
-
- class ParsedTemplateArgument;
-
- class ASTTemplateArgsPtr {
- ParsedTemplateArgument *Args;
- mutable unsigned Count;
-
- public:
- ASTTemplateArgsPtr(Sema &actions, ParsedTemplateArgument *args,
- unsigned count) :
- Args(args), Count(count) { }
-
- // FIXME: Lame, not-fully-type-safe emulation of 'move semantics'.
- ASTTemplateArgsPtr(ASTTemplateArgsPtr &Other) :
- Args(Other.Args), Count(Other.Count) {
- }
-
- // FIXME: Lame, not-fully-type-safe emulation of 'move semantics'.
- ASTTemplateArgsPtr& operator=(ASTTemplateArgsPtr &Other) {
- Args = Other.Args;
- Count = Other.Count;
- return *this;
- }
-
- ParsedTemplateArgument *getArgs() const { return Args; }
- unsigned size() const { return Count; }
-
- void reset(ParsedTemplateArgument *args, unsigned count) {
- Args = args;
- Count = count;
- }
-
- const ParsedTemplateArgument &operator[](unsigned Arg) const;
-
- ParsedTemplateArgument *release() const {
- return Args;
- }
- };
-
- /// \brief A small vector that owns a set of AST nodes.
- template <class PtrTy, unsigned N = 8>
- class ASTOwningVector : public SmallVector<PtrTy, N> {
- ASTOwningVector(ASTOwningVector &); // do not implement
- ASTOwningVector &operator=(ASTOwningVector &); // do not implement
-
- public:
- explicit ASTOwningVector(Sema &Actions)
- { }
-
- PtrTy *take() {
- return &this->front();
- }
-
- template<typename T> T **takeAs() { return reinterpret_cast<T**>(take()); }
- };
-
/// An opaque type for threading parsed type information through the
/// parser.
typedef OpaquePtr<QualType> ParsedType;
typedef UnionOpaquePtr<QualType> UnionParsedType;
- /// A SmallVector of statements, with stack size 32 (as that is the only one
- /// used.)
- typedef ASTOwningVector<Stmt*, 32> StmtVector;
- /// A SmallVector of expressions, with stack size 12 (the maximum used.)
- typedef ASTOwningVector<Expr*, 12> ExprVector;
- /// A SmallVector of types.
- typedef ASTOwningVector<ParsedType, 12> TypeVector;
-
- template <class T, unsigned N> inline
- ASTMultiPtr<T> move_arg(ASTOwningVector<T, N> &vec) {
- return ASTMultiPtr<T>(vec.take(), vec.size());
- }
-
- // These versions are hopefully no-ops.
- template <class T, bool C>
- inline ActionResult<T,C> move(ActionResult<T,C> &ptr) {
- return ptr;
- }
-
- template <class T> inline
- ASTMultiPtr<T>& move(ASTMultiPtr<T> &ptr) {
- return ptr;
- }
-
// We can re-use the low bit of expression, statement, base, and
// member-initializer pointers for the "invalid" flag of
// ActionResult.
@@ -438,13 +242,11 @@ namespace clang {
typedef ActionResult<Decl*> DeclResult;
typedef OpaquePtr<TemplateName> ParsedTemplateTy;
- inline Expr *move(Expr *E) { return E; }
- inline Stmt *move(Stmt *S) { return S; }
-
- typedef ASTMultiPtr<Expr*> MultiExprArg;
- typedef ASTMultiPtr<Stmt*> MultiStmtArg;
- typedef ASTMultiPtr<ParsedType> MultiTypeArg;
- typedef ASTMultiPtr<TemplateParameterList*> MultiTemplateParamsArg;
+ typedef llvm::MutableArrayRef<Expr*> MultiExprArg;
+ typedef llvm::MutableArrayRef<Stmt*> MultiStmtArg;
+ typedef llvm::MutableArrayRef<ParsedTemplateArgument> ASTTemplateArgsPtr;
+ typedef llvm::MutableArrayRef<ParsedType> MultiTypeArg;
+ typedef llvm::MutableArrayRef<TemplateParameterList*> MultiTemplateParamsArg;
inline ExprResult ExprError() { return ExprResult(true); }
inline StmtResult StmtError() { return StmtResult(true); }
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h b/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h
index 69080ad..94db454 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h
@@ -209,11 +209,6 @@ namespace clang {
/// Retrieves the range of the given template parameter lists.
SourceRange getTemplateParamsRange(TemplateParameterList const *const *Params,
unsigned NumParams);
-
- inline const ParsedTemplateArgument &
- ASTTemplateArgsPtr::operator[](unsigned Arg) const {
- return Args[Arg];
- }
}
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Scope.h b/contrib/llvm/tools/clang/include/clang/Sema/Scope.h
index b78556e..fa508cf 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Scope.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Scope.h
@@ -82,7 +82,13 @@ public:
SwitchScope = 0x800,
/// TryScope - This is the scope of a C++ try statement.
- TryScope = 0x1000
+ TryScope = 0x1000,
+
+ /// FnTryScope - This is the scope of a function-level C++ try scope.
+ FnTryScope = 0x3000,
+
+ /// FnCatchScope - This is the scope of a function-level C++ catch scope.
+ FnCatchScope = 0x4000
};
private:
/// The parent scope for this scope. This is null for the translation-unit
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h b/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h
index b4752f5..feda9c9 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h
@@ -7,7 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines FunctionScopeInfo and BlockScopeInfo.
+// This file defines FunctionScopeInfo and its subclasses, which contain
+// information about a single function, block, lambda, or method body.
//
//===----------------------------------------------------------------------===//
@@ -21,14 +22,20 @@
namespace clang {
+class Decl;
class BlockDecl;
class CXXMethodDecl;
+class ObjCPropertyDecl;
class IdentifierInfo;
class LabelDecl;
class ReturnStmt;
class Scope;
class SwitchStmt;
class VarDecl;
+class DeclRefExpr;
+class ObjCIvarRefExpr;
+class ObjCPropertyRefExpr;
+class ObjCMessageExpr;
namespace sema {
@@ -84,13 +91,10 @@ public:
/// \brief Whether this function contains any indirect gotos.
bool HasIndirectGoto;
- /// A flag that is set when parsing a -dealloc method and no [super dealloc]
- /// call was found yet.
- bool ObjCShouldCallSuperDealloc;
-
- /// A flag that is set when parsing a -finalize method and no [super finalize]
- /// call was found yet.
- bool ObjCShouldCallSuperFinalize;
+ /// A flag that is set when parsing a method that must call super's
+ /// implementation, such as \c -dealloc, \c -finalize, or any method marked
+ /// with \c __attribute__((objc_requires_super)).
+ bool ObjCShouldCallSuper;
/// \brief Used to determine if errors occurred in this function or block.
DiagnosticErrorTrap ErrorTrap;
@@ -113,6 +117,164 @@ public:
/// prior to being emitted.
SmallVector<PossiblyUnreachableDiag, 4> PossiblyUnreachableDiags;
+public:
+ /// Represents a simple identification of a weak object.
+ ///
+ /// Part of the implementation of -Wrepeated-use-of-weak.
+ ///
+ /// This is used to determine if two weak accesses refer to the same object.
+ /// Here are some examples of how various accesses are "profiled":
+ ///
+ /// Access Expression | "Base" Decl | "Property" Decl
+ /// :---------------: | :-----------------: | :------------------------------:
+ /// self.property | self (VarDecl) | property (ObjCPropertyDecl)
+ /// self.implicitProp | self (VarDecl) | -implicitProp (ObjCMethodDecl)
+ /// self->ivar.prop | ivar (ObjCIvarDecl) | prop (ObjCPropertyDecl)
+ /// cxxObj.obj.prop | obj (FieldDecl) | prop (ObjCPropertyDecl)
+ /// [self foo].prop | 0 (unknown) | prop (ObjCPropertyDecl)
+ /// self.prop1.prop2 | prop1 (ObjCPropertyDecl) | prop2 (ObjCPropertyDecl)
+ /// MyClass.prop | MyClass (ObjCInterfaceDecl) | -prop (ObjCMethodDecl)
+ /// weakVar | 0 (known) | weakVar (VarDecl)
+ /// self->weakIvar | self (VarDecl) | weakIvar (ObjCIvarDecl)
+ ///
+ /// Objects are identified with only two Decls to make it reasonably fast to
+ /// compare them.
+ class WeakObjectProfileTy {
+ /// The base object decl, as described in the class documentation.
+ ///
+ /// The extra flag is "true" if the Base and Property are enough to uniquely
+ /// identify the object in memory.
+ ///
+ /// \sa isExactProfile()
+ typedef llvm::PointerIntPair<const NamedDecl *, 1, bool> BaseInfoTy;
+ BaseInfoTy Base;
+
+ /// The "property" decl, as described in the class documentation.
+ ///
+ /// Note that this may not actually be an ObjCPropertyDecl, e.g. in the
+ /// case of "implicit" properties (regular methods accessed via dot syntax).
+ const NamedDecl *Property;
+
+ /// Used to find the proper base profile for a given base expression.
+ static BaseInfoTy getBaseInfo(const Expr *BaseE);
+
+ // For use in DenseMap.
+ friend class DenseMapInfo;
+ inline WeakObjectProfileTy();
+ static inline WeakObjectProfileTy getSentinel();
+
+ public:
+ WeakObjectProfileTy(const ObjCPropertyRefExpr *RE);
+ WeakObjectProfileTy(const Expr *Base, const ObjCPropertyDecl *Property);
+ WeakObjectProfileTy(const DeclRefExpr *RE);
+ WeakObjectProfileTy(const ObjCIvarRefExpr *RE);
+
+ const NamedDecl *getBase() const { return Base.getPointer(); }
+ const NamedDecl *getProperty() const { return Property; }
+
+ /// Returns true if the object base specifies a known object in memory,
+ /// rather than, say, an instance variable or property of another object.
+ ///
+ /// Note that this ignores the effects of aliasing; that is, \c foo.bar is
+ /// considered an exact profile if \c foo is a local variable, even if
+ /// another variable \c foo2 refers to the same object as \c foo.
+ ///
+ /// For increased precision, accesses with base variables that are
+ /// properties or ivars of 'self' (e.g. self.prop1.prop2) are considered to
+ /// be exact, though this is not true for arbitrary variables
+ /// (foo.prop1.prop2).
+ bool isExactProfile() const {
+ return Base.getInt();
+ }
+
+ bool operator==(const WeakObjectProfileTy &Other) const {
+ return Base == Other.Base && Property == Other.Property;
+ }
+
+ // For use in DenseMap.
+ // We can't specialize the usual llvm::DenseMapInfo at the end of the file
+ // because by that point the DenseMap in FunctionScopeInfo has already been
+ // instantiated.
+ class DenseMapInfo {
+ public:
+ static inline WeakObjectProfileTy getEmptyKey() {
+ return WeakObjectProfileTy();
+ }
+ static inline WeakObjectProfileTy getTombstoneKey() {
+ return WeakObjectProfileTy::getSentinel();
+ }
+
+ static unsigned getHashValue(const WeakObjectProfileTy &Val) {
+ typedef std::pair<BaseInfoTy, const NamedDecl *> Pair;
+ return llvm::DenseMapInfo<Pair>::getHashValue(Pair(Val.Base,
+ Val.Property));
+ }
+
+ static bool isEqual(const WeakObjectProfileTy &LHS,
+ const WeakObjectProfileTy &RHS) {
+ return LHS == RHS;
+ }
+ };
+ };
+
+ /// Represents a single use of a weak object.
+ ///
+ /// Stores both the expression and whether the access is potentially unsafe
+ /// (i.e. it could potentially be warned about).
+ ///
+ /// Part of the implementation of -Wrepeated-use-of-weak.
+ class WeakUseTy {
+ llvm::PointerIntPair<const Expr *, 1, bool> Rep;
+ public:
+ WeakUseTy(const Expr *Use, bool IsRead) : Rep(Use, IsRead) {}
+
+ const Expr *getUseExpr() const { return Rep.getPointer(); }
+ bool isUnsafe() const { return Rep.getInt(); }
+ void markSafe() { Rep.setInt(false); }
+
+ bool operator==(const WeakUseTy &Other) const {
+ return Rep == Other.Rep;
+ }
+ };
+
+ /// Used to collect uses of a particular weak object in a function body.
+ ///
+ /// Part of the implementation of -Wrepeated-use-of-weak.
+ typedef SmallVector<WeakUseTy, 4> WeakUseVector;
+
+ /// Used to collect all uses of weak objects in a function body.
+ ///
+ /// Part of the implementation of -Wrepeated-use-of-weak.
+ typedef llvm::SmallDenseMap<WeakObjectProfileTy, WeakUseVector, 8,
+ WeakObjectProfileTy::DenseMapInfo>
+ WeakObjectUseMap;
+
+private:
+ /// Used to collect all uses of weak objects in this function body.
+ ///
+ /// Part of the implementation of -Wrepeated-use-of-weak.
+ WeakObjectUseMap WeakObjectUses;
+
+public:
+ /// Record that a weak object was accessed.
+ ///
+ /// Part of the implementation of -Wrepeated-use-of-weak.
+ template <typename ExprT>
+ inline void recordUseOfWeak(const ExprT *E, bool IsRead = true);
+
+ void recordUseOfWeak(const ObjCMessageExpr *Msg,
+ const ObjCPropertyDecl *Prop);
+
+ /// Record that a given expression is a "safe" access of a weak object (e.g.
+ /// assigning it to a strong variable.)
+ ///
+ /// Part of the implementation of -Wrepeated-use-of-weak.
+ void markSafeWeakUse(const Expr *E);
+
+ const WeakObjectUseMap &getWeakObjectUses() const {
+ return WeakObjectUses;
+ }
+
void setHasBranchIntoScope() {
HasBranchIntoScope = true;
}
@@ -135,8 +297,7 @@ public:
HasBranchProtectedScope(false),
HasBranchIntoScope(false),
HasIndirectGoto(false),
- ObjCShouldCallSuperDealloc(false),
- ObjCShouldCallSuperFinalize(false),
+ ObjCShouldCallSuper(false),
ErrorTrap(Diag) { }
virtual ~FunctionScopeInfo();
@@ -144,8 +305,6 @@ public:
/// \brief Clear out the information in this function scope, making it
/// suitable for reuse.
void Clear();
-
- static bool classof(const FunctionScopeInfo *FSI) { return true; }
};
class CapturingScopeInfo : public FunctionScopeInfo {
@@ -262,11 +421,7 @@ public:
}
void addThisCapture(bool isNested, SourceLocation Loc, QualType CaptureType,
- Expr *Cpy) {
- Captures.push_back(Capture(Capture::ThisCapture, isNested, Loc, CaptureType,
- Cpy));
- CXXThisCaptureIndex = Captures.size();
- }
+ Expr *Cpy);
/// \brief Determine whether the C++ 'this' is captured.
bool isCXXThisCaptured() const { return CXXThisCaptureIndex != 0; }
@@ -299,7 +454,6 @@ public:
static bool classof(const FunctionScopeInfo *FSI) {
return FSI->Kind == SK_Block || FSI->Kind == SK_Lambda;
}
- static bool classof(const CapturingScopeInfo *BSI) { return true; }
};
/// \brief Retains information about a block that is currently being parsed.
@@ -327,7 +481,6 @@ public:
static bool classof(const FunctionScopeInfo *FSI) {
return FSI->Kind == SK_Block;
}
- static bool classof(const BlockScopeInfo *BSI) { return true; }
};
class LambdaScopeInfo : public CapturingScopeInfo {
@@ -379,15 +532,42 @@ public:
void finishedExplicitCaptures() {
NumExplicitCaptures = Captures.size();
}
-
- static bool classof(const FunctionScopeInfo *FSI) {
+
+ static bool classof(const FunctionScopeInfo *FSI) {
return FSI->Kind == SK_Lambda;
}
- static bool classof(const LambdaScopeInfo *BSI) { return true; }
-
};
+
+FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy()
+ : Base(0, false), Property(0) {}
+
+FunctionScopeInfo::WeakObjectProfileTy
+FunctionScopeInfo::WeakObjectProfileTy::getSentinel() {
+ FunctionScopeInfo::WeakObjectProfileTy Result;
+ Result.Base.setInt(true);
+ return Result;
+}
+
+template <typename ExprT>
+void FunctionScopeInfo::recordUseOfWeak(const ExprT *E, bool IsRead) {
+ assert(E);
+ WeakUseVector &Uses = WeakObjectUses[WeakObjectProfileTy(E)];
+ Uses.push_back(WeakUseTy(E, IsRead));
}
+
+inline void
+CapturingScopeInfo::addThisCapture(bool isNested, SourceLocation Loc,
+ QualType CaptureType, Expr *Cpy) {
+ Captures.push_back(Capture(Capture::ThisCapture, isNested, Loc, CaptureType,
+ Cpy));
+ CXXThisCaptureIndex = Captures.size();
+
+ if (LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(this))
+ LSI->ArrayIndexStarts.push_back(LSI->ArrayIndexVars.size());
}
+} // end namespace sema
+} // end namespace clang
+
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Sema.h b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
index acc88c0..9b572d8 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
@@ -187,9 +187,16 @@ typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
- Sema(const Sema&); // DO NOT IMPLEMENT
- void operator=(const Sema&); // DO NOT IMPLEMENT
+ Sema(const Sema &) LLVM_DELETED_FUNCTION;
+ void operator=(const Sema &) LLVM_DELETED_FUNCTION;
mutable const TargetAttributesSema* TheTargetAttributesSema;
+
+ ///\brief Source of additional semantic information.
+ ExternalSemaSource *ExternalSource;
+
+ ///\brief Whether Sema has generated a multiplexer and has to delete it.
+ bool isMultiplexExternalSource;
+
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
@@ -208,9 +215,6 @@ public:
/// \brief Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
- /// \brief Source of additional semantic information.
- ExternalSemaSource *ExternalSource;
-
/// \brief Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
@@ -234,6 +238,12 @@ public:
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
+ /// \brief Flag indicating if Sema is building a recovery call expression.
+ ///
+ /// This flag is used to avoid building recovery call expressions
+ /// if Sema is already doing so, which would cause infinite recursions.
+ bool IsBuildingRecoveryCallExpr;
+
/// ExprNeedsCleanups - True if the current evaluation context
/// requires cleanups to be run at its conclusion.
bool ExprNeedsCleanups;
@@ -456,6 +466,26 @@ public:
}
};
+ /// \brief RAII object to handle the state changes required to synthesize
+ /// a function body.
+ class SynthesizedFunctionScope {
+ Sema &S;
+ Sema::ContextRAII SavedContext;
+
+ public:
+ SynthesizedFunctionScope(Sema &S, DeclContext *DC)
+ : S(S), SavedContext(S, DC)
+ {
+ S.PushFunctionScope();
+ S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
+ }
+
+ ~SynthesizedFunctionScope() {
+ S.PopExpressionEvaluationContext();
+ S.PopFunctionScopeInfo();
+ }
+ };
+
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
@@ -729,6 +759,20 @@ public:
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
+ /// Records and restores the FP_CONTRACT state on entry/exit of compound
+ /// statements.
+ class FPContractStateRAII {
+ public:
+ FPContractStateRAII(Sema& S)
+ : S(S), OldFPContractState(S.FPFeatures.fp_contract) {}
+ ~FPContractStateRAII() {
+ S.FPFeatures.fp_contract = OldFPContractState;
+ }
+ private:
+ Sema& S;
+ bool OldFPContractState : 1;
+ };
+
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
@@ -750,6 +794,14 @@ public:
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
+ ExternalSemaSource* getExternalSource() const { return ExternalSource; }
+
+ ///\brief Registers an external source. If an external source already exists,
+ /// creates a multiplex external source and appends to it.
+ ///
+ ///\param[in] E - A non-null external sema source.
+ ///
+ void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
@@ -1203,7 +1255,7 @@ public:
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate);
return Kind == NC_TypeTemplate? TNK_Type_template : TNK_Function_template;
}
-};
+ };
/// \brief Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
@@ -1223,11 +1275,19 @@ public:
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
+ ///
+ /// \param IsAddressOfOperand True if this name is the operand of a unary
+ /// address of ('&') expression, assuming it is classified as an
+ /// expression.
+ ///
+ /// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *&Name,
SourceLocation NameLoc,
- const Token &NextToken);
+ const Token &NextToken,
+ bool IsAddressOfOperand,
+ CorrectionCandidateCallback *CCC = 0);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
@@ -1265,6 +1325,7 @@ public:
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
+ void checkVoidParamDecl(ParmVarDecl *Param);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
@@ -1293,7 +1354,6 @@ public:
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
- void CheckSelfReference(Decl *OrigDecl, Expr *E);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
bool TypeMayContainAuto);
void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto);
@@ -1642,7 +1702,7 @@ public:
/// \brief Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
- /// \returns true if \arg FD is unavailable and current context is inside
+ /// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
@@ -1866,8 +1926,7 @@ public:
llvm::ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
- bool PartialOverloading = false,
- bool StdNamespaceIsAssociated = false);
+ bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType());
@@ -1916,6 +1975,30 @@ public:
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
+ // An enum used to represent the different possible results of building a
+ // range-based for loop.
+ enum ForRangeStatus {
+ FRS_Success,
+ FRS_NoViableFunction,
+ FRS_DiagnosticIssued
+ };
+
+ // An enum to represent whether something is dealing with a call to begin()
+ // or a call to end() in a range-based for loop.
+ enum BeginEndFunction {
+ BEF_begin,
+ BEF_end
+ };
+
+ ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc,
+ SourceLocation RangeLoc,
+ VarDecl *Decl,
+ BeginEndFunction BEF,
+ const DeclarationNameInfo &NameInfo,
+ LookupResult &MemberLookup,
+ OverloadCandidateSet *CandidateSet,
+ Expr *Range, ExprResult *CallExpr);
+
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
@@ -1924,6 +2007,12 @@ public:
Expr *ExecConfig,
bool AllowTypoCorrection=true);
+ bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc,
+ OverloadCandidateSet *CandidateSet,
+ ExprResult *Result);
+
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
@@ -2130,8 +2219,7 @@ public:
void ArgumentDependentLookup(DeclarationName Name, bool Operator,
SourceLocation Loc,
llvm::ArrayRef<Expr *> Args,
- ADLResult &Functions,
- bool StdNamespaceIsAssociated = false);
+ ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
@@ -2148,7 +2236,8 @@ public:
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = 0);
- void FindAssociatedClassesAndNamespaces(llvm::ArrayRef<Expr *> Args,
+ void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
+ llvm::ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
@@ -2249,13 +2338,7 @@ public:
void CollectImmediateProperties(ObjCContainerDecl *CDecl,
llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap,
llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& SuperPropMap);
-
-
- /// LookupPropertyDecl - Looks up a property in the current class and all
- /// its protocols.
- ObjCPropertyDecl *LookupPropertyDecl(const ObjCContainerDecl *CDecl,
- IdentifierInfo *II);
-
+
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
Decl *HandlePropertyInClassExtension(Scope *S,
@@ -2399,7 +2482,7 @@ public:
FullExprArg(const FullExprArg& Other) : E(Other.E) {}
ExprResult release() {
- return move(E);
+ return E;
}
Expr *get() const { return E; }
@@ -2502,15 +2585,28 @@ public:
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
+ enum BuildForRangeKind {
+ /// Initial building of a for-range statement.
+ BFRK_Build,
+ /// Instantiation or recovery rebuild of a for-range statement. Don't
+ /// attempt any typo-correction.
+ BFRK_Rebuild,
+ /// Determining whether a for-range statement could be built. Avoid any
+ /// unnecessary or irreversible actions.
+ BFRK_Check
+ };
+
StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
- SourceLocation RParenLoc);
+ SourceLocation RParenLoc,
+ BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *BeginEndDecl,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
- SourceLocation RParenLoc);
+ SourceLocation RParenLoc,
+ BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
@@ -2528,21 +2624,19 @@ public:
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
- StmtResult ActOnAsmStmt(SourceLocation AsmLoc,
- bool IsSimple, bool IsVolatile,
- unsigned NumOutputs, unsigned NumInputs,
- IdentifierInfo **Names,
- MultiExprArg Constraints,
- MultiExprArg Exprs,
- Expr *AsmString,
- MultiExprArg Clobbers,
- SourceLocation RParenLoc,
- bool MSAsm = false);
+ StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
+ bool IsVolatile, unsigned NumOutputs,
+ unsigned NumInputs, IdentifierInfo **Names,
+ MultiExprArg Constraints, MultiExprArg Exprs,
+ Expr *AsmString, MultiExprArg Clobbers,
+ SourceLocation RParenLoc);
- StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc,
- SourceLocation LBraceLoc,
- ArrayRef<Token> AsmToks,
- SourceLocation EndLoc);
+ NamedDecl *LookupInlineAsmIdentifier(StringRef Name, SourceLocation Loc,
+ unsigned &Size);
+ bool LookupInlineAsmField(StringRef Base, StringRef Member,
+ unsigned &Offset, SourceLocation AsmLoc);
+ StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
+ ArrayRef<Token> AsmToks, SourceLocation EndLoc);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
@@ -2639,7 +2733,8 @@ public:
void EmitDeprecationWarning(NamedDecl *D, StringRef Message,
SourceLocation Loc,
- const ObjCInterfaceDecl *UnknownObjCClass=0);
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ const ObjCPropertyDecl *ObjCProperty);
void HandleDelayedDeprecationCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
@@ -2663,7 +2758,10 @@ public:
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = 0,
bool IsDecltype = false);
-
+ enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
+ void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
+ ReuseLambdaContextDecl_t,
+ bool IsDecltype = false);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
@@ -2815,7 +2913,8 @@ public:
bool HasTrailingLParen);
ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
- const DeclarationNameInfo &NameInfo);
+ const DeclarationNameInfo &NameInfo,
+ bool IsAddressOfOperand);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
@@ -3279,18 +3378,11 @@ public:
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
- // noexcept is the most restrictive, but is only used in C++0x.
+ // noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
- // Finally no specification.
+ // Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
- //
- // If this exception specification cannot be known yet (for instance,
- // because this is the exception specification for a defaulted default
- // constructor and we haven't finished parsing the deferred parts of the
- // class yet), the C++0x standard does not specify how to behave. We
- // record this as an 'unknown' exception specification, which overrules
- // any other specification (even 'none', to keep this rule simple).
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
@@ -3330,8 +3422,17 @@ public:
/// computed exception specification.
void getEPI(FunctionProtoType::ExtProtoInfo &EPI) const {
EPI.ExceptionSpecType = getExceptionSpecType();
- EPI.NumExceptions = size();
- EPI.Exceptions = data();
+ if (EPI.ExceptionSpecType == EST_Dynamic) {
+ EPI.NumExceptions = size();
+ EPI.Exceptions = data();
+ } else if (EPI.ExceptionSpecType == EST_None) {
+ /// C++11 [except.spec]p14:
+ /// The exception-specification is noexcept(false) if the set of
+ /// potential exceptions of the special member function contains "any"
+ EPI.ExceptionSpecType = EST_ComputedNoexcept;
+ EPI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
+ tok::kw_false).take();
+ }
}
FunctionProtoType::ExtProtoInfo getEPI() const {
FunctionProtoType::ExtProtoInfo EPI;
@@ -3515,7 +3616,7 @@ public:
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
- ASTOwningVector<Expr*> &ConvertedArgs,
+ SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false);
ParsedType getDestructorName(SourceLocation TildeLoc,
@@ -3661,7 +3762,7 @@ public:
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
- ExprResult BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
+ ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
@@ -3991,7 +4092,8 @@ public:
/// \brief Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
- bool KnownDependent = false);
+ TypeSourceInfo *Info,
+ bool KnownDependent);
/// \brief Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
@@ -4300,7 +4402,7 @@ public:
SourceLocation RParenLoc,
bool Failed);
- FriendDecl *CheckFriendTypeDecl(SourceLocation Loc,
+ FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
@@ -4622,8 +4724,6 @@ public:
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
- ///
- /// \param TUK
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
@@ -4799,7 +4899,8 @@ public:
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
- const TemplateArgumentLoc &Arg);
+ const TemplateArgumentLoc &Arg,
+ unsigned ArgumentPackIndex);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
@@ -5246,6 +5347,8 @@ public:
enum TemplateDeductionResult {
/// \brief Template argument deduction was successful.
TDK_Success = 0,
+ /// \brief The declaration was invalid; do nothing.
+ TDK_Invalid,
/// \brief Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
@@ -5681,10 +5784,10 @@ public:
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
- InstantiatingTemplate(const InstantiatingTemplate&); // not implemented
+ InstantiatingTemplate(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION;
InstantiatingTemplate&
- operator=(const InstantiatingTemplate&); // not implemented
+ operator=(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION;
};
void PrintInstantiationStack();
@@ -6016,7 +6119,7 @@ public:
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
- /// be modified to be consistent with \arg PropertyTy.
+ /// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
@@ -6252,8 +6355,7 @@ public:
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
- SourceLocation PragmaLoc,
- SourceLocation KindLoc);
+ SourceLocation PragmaLoc);
enum PragmaPackKind {
PPK_Default, // #pragma pack([n])
@@ -6748,6 +6850,7 @@ public:
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
+ void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
@@ -6763,6 +6866,7 @@ public:
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
Expr **Args, unsigned NumArgs, Selector Sel,
+ ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
@@ -7210,6 +7314,14 @@ public:
}
AvailabilityResult getCurContextAvailability() const;
+
+ const DeclContext *getCurObjCLexicalContext() const {
+ const DeclContext *DC = getCurLexicalContext();
+ // A category implicitly has the attribute of the interface.
+ if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
+ DC = CatD->getClassInterface();
+ return DC;
+ }
};
/// \brief RAII object that enters a new expression evaluation context.
@@ -7225,6 +7337,15 @@ public:
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
IsDecltype);
}
+ EnterExpressionEvaluationContext(Sema &Actions,
+ Sema::ExpressionEvaluationContext NewContext,
+ Sema::ReuseLambdaContextDecl_t,
+ bool IsDecltype = false)
+ : Actions(Actions) {
+ Actions.PushExpressionEvaluationContext(NewContext,
+ Sema::ReuseLambdaContextDecl,
+ IsDecltype);
+ }
~EnterExpressionEvaluationContext() {
Actions.PopExpressionEvaluationContext();
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/SemaConsumer.h b/contrib/llvm/tools/clang/include/clang/Sema/SemaConsumer.h
index 139cce8..676646a 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/SemaConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/SemaConsumer.h
@@ -42,7 +42,6 @@ namespace clang {
static bool classof(const ASTConsumer *Consumer) {
return Consumer->SemaConsumer;
}
- static bool classof(const SemaConsumer *) { return true; }
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Template.h b/contrib/llvm/tools/clang/include/clang/Sema/Template.h
index 273374d..bbccd25 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Template.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Template.h
@@ -239,8 +239,9 @@ namespace clang {
unsigned NumArgsInPartiallySubstitutedPack;
// This class is non-copyable
- LocalInstantiationScope(const LocalInstantiationScope &);
- LocalInstantiationScope &operator=(const LocalInstantiationScope &);
+ LocalInstantiationScope(
+ const LocalInstantiationScope &) LLVM_DELETED_FUNCTION;
+ void operator=(const LocalInstantiationScope &) LLVM_DELETED_FUNCTION;
public:
LocalInstantiationScope(Sema &SemaRef, bool CombineWithOuterScope = false)
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h b/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h
index 4c2d876..251a659 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h
@@ -19,7 +19,6 @@
namespace clang {
-class ASTContext;
class TemplateArgumentList;
namespace sema {
@@ -28,9 +27,6 @@ namespace sema {
/// deduction, whose success or failure was described by a
/// TemplateDeductionResult value.
class TemplateDeductionInfo {
- /// \brief The context in which the template arguments are stored.
- ASTContext &Context;
-
/// \brief The deduced template argument list.
///
TemplateArgumentList *Deduced;
@@ -46,17 +42,12 @@ class TemplateDeductionInfo {
/// SFINAE while performing template argument deduction.
SmallVector<PartialDiagnosticAt, 4> SuppressedDiagnostics;
- // do not implement these
- TemplateDeductionInfo(const TemplateDeductionInfo&);
- TemplateDeductionInfo &operator=(const TemplateDeductionInfo&);
+ TemplateDeductionInfo(const TemplateDeductionInfo &) LLVM_DELETED_FUNCTION;
+ void operator=(const TemplateDeductionInfo &) LLVM_DELETED_FUNCTION;
public:
- TemplateDeductionInfo(ASTContext &Context, SourceLocation Loc)
- : Context(Context), Deduced(0), Loc(Loc), HasSFINAEDiagnostic(false) { }
-
- ~TemplateDeductionInfo() {
- // FIXME: if (Deduced) Deduced->Destroy(Context);
- }
+ TemplateDeductionInfo(SourceLocation Loc)
+ : Deduced(0), Loc(Loc), HasSFINAEDiagnostic(false) { }
/// \brief Returns the location at which template argument is
/// occurring.
@@ -83,7 +74,6 @@ public:
/// \brief Provide a new template argument list that contains the
/// results of template argument deduction.
void reset(TemplateArgumentList *NewDeduced) {
- // FIXME: if (Deduced) Deduced->Destroy(Context);
Deduced = NewDeduced;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/TypoCorrection.h b/contrib/llvm/tools/clang/include/clang/Sema/TypoCorrection.h
index a8f6e11..2b4a9e6 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/TypoCorrection.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/TypoCorrection.h
@@ -170,6 +170,17 @@ public:
return CorrectionDecls.size() > 1;
}
+ void setCorrectionRange(CXXScopeSpec* SS,
+ const DeclarationNameInfo &TypoName) {
+ CorrectionRange.setBegin(CorrectionNameSpec && SS ? SS->getBeginLoc()
+ : TypoName.getLoc());
+ CorrectionRange.setEnd(TypoName.getLoc());
+ }
+
+ SourceRange getCorrectionRange() const {
+ return CorrectionRange;
+ }
+
typedef llvm::SmallVector<NamedDecl*, 1>::iterator decl_iterator;
decl_iterator begin() {
return isKeyword() ? CorrectionDecls.end() : CorrectionDecls.begin();
@@ -193,6 +204,7 @@ private:
unsigned CharDistance;
unsigned QualifierDistance;
unsigned CallbackDistance;
+ SourceRange CorrectionRange;
};
/// @brief Base class for callback objects used by Sema::CorrectTypo to check
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h
index dbe6e5a..8c58fb2 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h
@@ -126,7 +126,13 @@ namespace clang {
/// \brief The number of predefined identifier IDs.
const unsigned int NUM_PREDEF_IDENT_IDS = 1;
+
+ /// \brief An ID number that refers to a macro in an AST file.
+ typedef uint32_t MacroID;
+ /// \brief The number of predefined macro IDs.
+ const unsigned int NUM_PREDEF_MACRO_IDS = 1;
+
/// \brief An ID number that refers to an ObjC selector in an AST file.
typedef uint32_t SelectorID;
@@ -210,7 +216,71 @@ namespace clang {
SUBMODULE_BLOCK_ID,
/// \brief The block containing comments.
- COMMENTS_BLOCK_ID
+ COMMENTS_BLOCK_ID,
+
+ /// \brief The control block, which contains all of the
+ /// information that needs to be validated prior to committing
+ /// to loading the AST file.
+ CONTROL_BLOCK_ID,
+
+ /// \brief The block of input files, which were used as inputs
+ /// to create this AST file.
+ ///
+ /// This block is part of the control block.
+ INPUT_FILES_BLOCK_ID
+ };
+
+ /// \brief Record types that occur within the control block.
+ enum ControlRecordTypes {
+ /// \brief AST file metadata, including the AST file version number
+ /// and information about the compiler used to build this AST file.
+ METADATA = 1,
+
+ /// \brief Record code for the list of other AST files imported by
+ /// this AST file.
+ IMPORTS = 2,
+
+ /// \brief Record code for the language options table.
+ ///
+ /// The record with this code contains the contents of the
+ /// LangOptions structure. We serialize the entire contents of
+ /// the structure, and let the reader decide which options are
+ /// actually important to check.
+ LANGUAGE_OPTIONS = 3,
+
+ /// \brief Record code for the target options table.
+ TARGET_OPTIONS = 4,
+
+ /// \brief Record code for the original file that was used to
+ /// generate the AST file, including both its file ID and its
+ /// name.
+ ORIGINAL_FILE = 5,
+
+ /// \brief The directory that the PCH was originally created in.
+ ORIGINAL_PCH_DIR = 6,
+
+ /// \brief Offsets into the input-files block where input files
+ /// reside.
+ INPUT_FILE_OFFSETS = 7,
+
+ /// \brief Record code for the diagnostic options table.
+ DIAGNOSTIC_OPTIONS = 8,
+
+ /// \brief Record code for the filesystem options table.
+ FILE_SYSTEM_OPTIONS = 9,
+
+ /// \brief Record code for the headers search options table.
+ HEADER_SEARCH_OPTIONS = 10,
+
+ /// \brief Record code for the preprocessor options table.
+ PREPROCESSOR_OPTIONS = 11
+ };
+
+ /// \brief Record types that occur within the input-files block
+ /// inside the control block.
+ enum InputFileRecordTypes {
+ /// \brief An input file.
+ INPUT_FILE = 1
};
/// \brief Record types that occur within the AST block itself.
@@ -241,25 +311,13 @@ namespace clang {
/// reserved for the translation unit declaration.
DECL_OFFSET = 2,
- /// \brief Record code for the language options table.
- ///
- /// The record with this code contains the contents of the
- /// LangOptions structure. We serialize the entire contents of
- /// the structure, and let the reader decide which options are
- /// actually important to check.
- LANGUAGE_OPTIONS = 3,
-
- /// \brief AST file metadata, including the AST file version number
- /// and the target triple used to build the AST file.
- METADATA = 4,
-
/// \brief Record code for the table of offsets of each
/// identifier ID.
///
/// The offset table contains offsets into the blob stored in
/// the IDENTIFIER_TABLE record. Each offset points to the
/// NULL-terminated string that corresponds to that identifier.
- IDENTIFIER_OFFSET = 5,
+ IDENTIFIER_OFFSET = 3,
/// \brief Record code for the identifier table.
///
@@ -273,7 +331,7 @@ namespace clang {
/// between offsets (for unresolved identifier IDs) and
/// IdentifierInfo pointers (for already-resolved identifier
/// IDs).
- IDENTIFIER_TABLE = 6,
+ IDENTIFIER_TABLE = 4,
/// \brief Record code for the array of external definitions.
///
@@ -283,7 +341,7 @@ namespace clang {
/// reported to the AST consumer after the AST file has been
/// read, since their presence can affect the semantics of the
/// program (e.g., for code generation).
- EXTERNAL_DEFINITIONS = 7,
+ EXTERNAL_DEFINITIONS = 5,
/// \brief Record code for the set of non-builtin, special
/// types.
@@ -292,33 +350,33 @@ namespace clang {
/// that are constructed during semantic analysis (e.g.,
/// __builtin_va_list). The SPECIAL_TYPE_* constants provide
/// offsets into this record.
- SPECIAL_TYPES = 8,
+ SPECIAL_TYPES = 6,
/// \brief Record code for the extra statistics we gather while
/// generating an AST file.
- STATISTICS = 9,
+ STATISTICS = 7,
/// \brief Record code for the array of tentative definitions.
- TENTATIVE_DEFINITIONS = 10,
+ TENTATIVE_DEFINITIONS = 8,
/// \brief Record code for the array of locally-scoped external
/// declarations.
- LOCALLY_SCOPED_EXTERNAL_DECLS = 11,
+ LOCALLY_SCOPED_EXTERNAL_DECLS = 9,
/// \brief Record code for the table of offsets into the
/// Objective-C method pool.
- SELECTOR_OFFSETS = 12,
+ SELECTOR_OFFSETS = 10,
/// \brief Record code for the Objective-C method pool,
- METHOD_POOL = 13,
+ METHOD_POOL = 11,
/// \brief The value of the next __COUNTER__ to dispense.
/// [PP_COUNTER_VALUE, Val]
- PP_COUNTER_VALUE = 14,
+ PP_COUNTER_VALUE = 12,
/// \brief Record code for the table of offsets into the block
/// of source-location information.
- SOURCE_LOCATION_OFFSETS = 15,
+ SOURCE_LOCATION_OFFSETS = 13,
/// \brief Record code for the set of source location entries
/// that need to be preloaded by the AST reader.
@@ -326,153 +384,138 @@ namespace clang {
/// This set contains the source location entry for the
/// predefines buffer and for any file entries that need to be
/// preloaded.
- SOURCE_LOCATION_PRELOADS = 16,
-
- /// \brief Record code for the stat() cache.
- STAT_CACHE = 17,
+ SOURCE_LOCATION_PRELOADS = 14,
/// \brief Record code for the set of ext_vector type names.
- EXT_VECTOR_DECLS = 18,
-
- /// \brief Record code for the original file that was used to
- /// generate the AST file.
- ORIGINAL_FILE_NAME = 19,
-
- /// \brief Record code for the file ID of the original file used to
- /// generate the AST file.
- ORIGINAL_FILE_ID = 20,
-
- /// \brief Record code for the version control branch and revision
- /// information of the compiler used to build this AST file.
- VERSION_CONTROL_BRANCH_REVISION = 21,
+ EXT_VECTOR_DECLS = 16,
/// \brief Record code for the array of unused file scoped decls.
- UNUSED_FILESCOPED_DECLS = 22,
+ UNUSED_FILESCOPED_DECLS = 17,
/// \brief Record code for the table of offsets to entries in the
/// preprocessing record.
- PPD_ENTITIES_OFFSETS = 23,
+ PPD_ENTITIES_OFFSETS = 18,
/// \brief Record code for the array of VTable uses.
- VTABLE_USES = 24,
+ VTABLE_USES = 19,
/// \brief Record code for the array of dynamic classes.
- DYNAMIC_CLASSES = 25,
-
- /// \brief Record code for the list of other AST files imported by
- /// this AST file.
- IMPORTS = 26,
+ DYNAMIC_CLASSES = 20,
/// \brief Record code for referenced selector pool.
- REFERENCED_SELECTOR_POOL = 27,
+ REFERENCED_SELECTOR_POOL = 21,
/// \brief Record code for an update to the TU's lexically contained
/// declarations.
- TU_UPDATE_LEXICAL = 28,
+ TU_UPDATE_LEXICAL = 22,
/// \brief Record code for the array describing the locations (in the
/// LOCAL_REDECLARATIONS record) of the redeclaration chains, indexed by
/// the first known ID.
- LOCAL_REDECLARATIONS_MAP = 29,
+ LOCAL_REDECLARATIONS_MAP = 23,
/// \brief Record code for declarations that Sema keeps references of.
- SEMA_DECL_REFS = 30,
+ SEMA_DECL_REFS = 24,
/// \brief Record code for weak undeclared identifiers.
- WEAK_UNDECLARED_IDENTIFIERS = 31,
+ WEAK_UNDECLARED_IDENTIFIERS = 25,
/// \brief Record code for pending implicit instantiations.
- PENDING_IMPLICIT_INSTANTIATIONS = 32,
+ PENDING_IMPLICIT_INSTANTIATIONS = 26,
/// \brief Record code for a decl replacement block.
///
/// If a declaration is modified after having been deserialized, and then
/// written to a dependent AST file, its ID and offset must be added to
/// the replacement block.
- DECL_REPLACEMENTS = 33,
+ DECL_REPLACEMENTS = 27,
/// \brief Record code for an update to a decl context's lookup table.
///
/// In practice, this should only be used for the TU and namespaces.
- UPDATE_VISIBLE = 34,
+ UPDATE_VISIBLE = 28,
/// \brief Record for offsets of DECL_UPDATES records for declarations
/// that were modified after being deserialized and need updates.
- DECL_UPDATE_OFFSETS = 35,
+ DECL_UPDATE_OFFSETS = 29,
/// \brief Record of updates for a declaration that was modified after
/// being deserialized.
- DECL_UPDATES = 36,
+ DECL_UPDATES = 30,
/// \brief Record code for the table of offsets to CXXBaseSpecifier
/// sets.
- CXX_BASE_SPECIFIER_OFFSETS = 37,
+ CXX_BASE_SPECIFIER_OFFSETS = 31,
/// \brief Record code for \#pragma diagnostic mappings.
- DIAG_PRAGMA_MAPPINGS = 38,
+ DIAG_PRAGMA_MAPPINGS = 32,
/// \brief Record code for special CUDA declarations.
- CUDA_SPECIAL_DECL_REFS = 39,
+ CUDA_SPECIAL_DECL_REFS = 33,
/// \brief Record code for header search information.
- HEADER_SEARCH_TABLE = 40,
-
- /// \brief The directory that the PCH was originally created in.
- ORIGINAL_PCH_DIR = 41,
+ HEADER_SEARCH_TABLE = 34,
/// \brief Record code for floating point \#pragma options.
- FP_PRAGMA_OPTIONS = 42,
+ FP_PRAGMA_OPTIONS = 35,
/// \brief Record code for enabled OpenCL extensions.
- OPENCL_EXTENSIONS = 43,
+ OPENCL_EXTENSIONS = 36,
/// \brief The list of delegating constructor declarations.
- DELEGATING_CTORS = 44,
+ DELEGATING_CTORS = 37,
- /// \brief Record code for the table of offsets into the block
- /// of file source-location information.
- FILE_SOURCE_LOCATION_OFFSETS = 45,
-
/// \brief Record code for the set of known namespaces, which are used
/// for typo correction.
- KNOWN_NAMESPACES = 46,
+ KNOWN_NAMESPACES = 38,
/// \brief Record code for the remapping information used to relate
/// loaded modules to the various offsets and IDs(e.g., source location
/// offests, declaration and type IDs) that are used in that module to
/// refer to other modules.
- MODULE_OFFSET_MAP = 47,
+ MODULE_OFFSET_MAP = 39,
/// \brief Record code for the source manager line table information,
/// which stores information about \#line directives.
- SOURCE_MANAGER_LINE_TABLE = 48,
+ SOURCE_MANAGER_LINE_TABLE = 40,
/// \brief Record code for map of Objective-C class definition IDs to the
/// ObjC categories in a module that are attached to that class.
- OBJC_CATEGORIES_MAP = 49,
+ OBJC_CATEGORIES_MAP = 41,
/// \brief Record code for a file sorted array of DeclIDs in a module.
- FILE_SORTED_DECLS = 50,
+ FILE_SORTED_DECLS = 42,
/// \brief Record code for an array of all of the (sub)modules that were
/// imported by the AST file.
- IMPORTED_MODULES = 51,
+ IMPORTED_MODULES = 43,
/// \brief Record code for the set of merged declarations in an AST file.
- MERGED_DECLARATIONS = 52,
+ MERGED_DECLARATIONS = 44,
/// \brief Record code for the array of redeclaration chains.
///
/// This array can only be interpreted properly using the local
/// redeclarations map.
- LOCAL_REDECLARATIONS = 53,
+ LOCAL_REDECLARATIONS = 45,
/// \brief Record code for the array of Objective-C categories (including
/// extensions).
///
/// This array can only be interpreted properly using the Objective-C
/// categories map.
- OBJC_CATEGORIES = 54
+ OBJC_CATEGORIES = 46,
+
+ /// \brief Record code for the table of offsets of each macro ID.
+ ///
+ /// The offset table contains offsets into the blob stored in
+ /// the preprocessor block. Each offset points to the corresponding
+ /// macro definition.
+ MACRO_OFFSET = 47,
+
+ /// \brief Record of updates for a macro that was modified after
+ /// being deserialized.
+ MACRO_UPDATES = 48
};
/// \brief Record types used within a source manager block.
@@ -537,16 +580,21 @@ namespace clang {
SUBMODULE_UMBRELLA_HEADER = 2,
/// \brief Specifies a header that falls into this (sub)module.
SUBMODULE_HEADER = 3,
+ /// \brief Specifies a top-level header that falls into this (sub)module.
+ SUBMODULE_TOPHEADER = 4,
/// \brief Specifies an umbrella directory.
- SUBMODULE_UMBRELLA_DIR = 4,
+ SUBMODULE_UMBRELLA_DIR = 5,
/// \brief Specifies the submodules that are imported by this
/// submodule.
- SUBMODULE_IMPORTS = 5,
+ SUBMODULE_IMPORTS = 6,
/// \brief Specifies the submodules that are re-exported from this
/// submodule.
- SUBMODULE_EXPORTS = 6,
+ SUBMODULE_EXPORTS = 7,
/// \brief Specifies a required feature.
- SUBMODULE_REQUIRES = 7
+ SUBMODULE_REQUIRES = 8,
+ /// \brief Specifies a header that has been explicitly excluded
+ /// from this submodule.
+ SUBMODULE_EXCLUDED_HEADER = 9
};
/// \brief Record types used within a comments block.
@@ -642,7 +690,9 @@ namespace clang {
/// \brief The pseudo-object placeholder type.
PREDEF_TYPE_PSEUDO_OBJECT = 35,
/// \brief The __va_list_tag placeholder type.
- PREDEF_TYPE_VA_LIST_TAG = 36
+ PREDEF_TYPE_VA_LIST_TAG = 36,
+ /// \brief The placeholder type for builtin functions.
+ PREDEF_TYPE_BUILTIN_FN = 37
};
/// \brief The number of predefined type IDs that are reserved for
@@ -943,6 +993,9 @@ namespace clang {
/// \brief A NonTypeTemplateParmDecl record that stores an expanded
/// non-type template parameter pack.
DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK,
+ /// \brief A TemplateTemplateParmDecl record that stores an expanded
+ /// template template parameter pack.
+ DECL_EXPANDED_TEMPLATE_TEMPLATE_PARM_PACK,
/// \brief A ClassScopeFunctionSpecializationDecl record a class scope
/// function specialization. (Microsoft extension).
DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION,
@@ -999,8 +1052,10 @@ namespace clang {
STMT_RETURN,
/// \brief A DeclStmt record.
STMT_DECL,
- /// \brief An AsmStmt record.
- STMT_ASM,
+ /// \brief A GCC-style AsmStmt record.
+ STMT_GCCASM,
+ /// \brief A MS-style AsmStmt record.
+ STMT_MSASM,
/// \brief A PredefinedExpr record.
EXPR_PREDEFINED,
/// \brief A DeclRefExpr record.
@@ -1186,6 +1241,7 @@ namespace clang {
EXPR_SIZEOF_PACK, // SizeOfPackExpr
EXPR_SUBST_NON_TYPE_TEMPLATE_PARM, // SubstNonTypeTemplateParmExpr
EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK,// SubstNonTypeTemplateParmPackExpr
+ EXPR_FUNCTION_PARM_PACK, // FunctionParmPackExpr
EXPR_MATERIALIZE_TEMPORARY, // MaterializeTemporaryExpr
// CUDA
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h
index ab0d313..0218129 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h
@@ -23,6 +23,7 @@ class Decl;
class ASTReader;
class QualType;
class MacroDefinition;
+class MacroInfo;
class Module;
class ASTDeserializationListener {
@@ -37,6 +38,8 @@ public:
/// \brief An identifier was deserialized from the AST file.
virtual void IdentifierRead(serialization::IdentID ID,
IdentifierInfo *II) { }
+ /// \brief A macro was read from the AST file.
+ virtual void MacroRead(serialization::MacroID ID, MacroInfo *MI) { }
/// \brief A type was deserialized from the AST file. The ID here has the
/// qualifier bits already removed, and T is guaranteed to be locally
/// unqualified.
@@ -48,9 +51,6 @@ public:
/// \brief A macro definition was read from the AST file.
virtual void MacroDefinitionRead(serialization::PreprocessedEntityID,
MacroDefinition *MD) { }
- /// \brief A macro definition that had previously been deserialized
- /// (and removed via IdentifierRead) has now been made visible.
- virtual void MacroVisible(IdentifierInfo *II) { }
/// \brief A module definition was read from the AST file.
virtual void ModuleRead(serialization::SubmoduleID ID, Module *Mod) { }
};
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h
index f0b7275..e23ea5c 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h
@@ -24,6 +24,7 @@
#include "clang/AST/TemplateBase.h"
#include "clang/Lex/ExternalPreprocessorSource.h"
#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/PPMutationListener.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/FileManager.h"
@@ -33,6 +34,7 @@
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
@@ -61,6 +63,7 @@ class ASTUnit; // FIXME: Layering violation and egregious hack.
class Attr;
class Decl;
class DeclContext;
+class DiagnosticOptions;
class NestedNameSpecifier;
class CXXBaseSpecifier;
class CXXConstructorDecl;
@@ -70,6 +73,7 @@ class MacroDefinition;
class NamedDecl;
class OpaqueValueExpr;
class Preprocessor;
+class PreprocessorOptions;
class Sema;
class SwitchCase;
class ASTDeserializationListener;
@@ -80,15 +84,7 @@ class ASTStmtReader;
class TypeLocReader;
struct HeaderFileInfo;
class VersionTuple;
-
-struct PCHPredefinesBlock {
- /// \brief The file ID for this predefines buffer in a PCH file.
- FileID BufferID;
-
- /// \brief This predefines buffer in a PCH file.
- StringRef Data;
-};
-typedef SmallVector<PCHPredefinesBlock, 2> PCHPredefinesBlocks;
+class TargetOptions;
/// \brief Abstract interface for callback invocations by the ASTReader.
///
@@ -103,32 +99,58 @@ public:
/// \brief Receives the language options.
///
/// \returns true to indicate the options are invalid or false otherwise.
- virtual bool ReadLanguageOptions(const LangOptions &LangOpts) {
+ virtual bool ReadLanguageOptions(const LangOptions &LangOpts,
+ bool Complain) {
return false;
}
- /// \brief Receives the target triple.
+ /// \brief Receives the target options.
///
- /// \returns true to indicate the target triple is invalid or false otherwise.
- virtual bool ReadTargetTriple(StringRef Triple) {
+ /// \returns true to indicate the target options are invalid, or false
+ /// otherwise.
+ virtual bool ReadTargetOptions(const TargetOptions &TargetOpts,
+ bool Complain) {
return false;
}
- /// \brief Receives the contents of the predefines buffer.
+ /// \brief Receives the diagnostic options.
///
- /// \param Buffers Information about the predefines buffers.
+ /// \returns true to indicate the diagnostic options are invalid, or false
+ /// otherwise.
+ virtual bool ReadDiagnosticOptions(const DiagnosticOptions &DiagOpts,
+ bool Complain) {
+ return false;
+ }
+
+ /// \brief Receives the file system options.
///
- /// \param OriginalFileName The original file name for the AST file, which
- /// will appear as an entry in the predefines buffer.
+ /// \returns true to indicate the file system options are invalid, or false
+ /// otherwise.
+ virtual bool ReadFileSystemOptions(const FileSystemOptions &FSOpts,
+ bool Complain) {
+ return false;
+ }
+
+ /// \brief Receives the header search options.
+ ///
+ /// \returns true to indicate the header search options are invalid, or false
+ /// otherwise.
+ virtual bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
+ bool Complain) {
+ return false;
+ }
+
+ /// \brief Receives the preprocessor options.
///
- /// \param SuggestedPredefines If necessary, additional definitions are added
- /// here.
+ /// \param SuggestedPredefines Can be filled in with the set of predefines
+ /// that are suggested by the preprocessor options. Typically only used when
+ /// loading a precompiled header.
///
- /// \returns true to indicate the predefines are invalid or false otherwise.
- virtual bool ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
- StringRef OriginalFileName,
- std::string &SuggestedPredefines,
- FileManager &FileMgr) {
+ /// \returns true to indicate the preprocessor options are invalid, or false
+ /// otherwise.
+ virtual bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
+ bool Complain,
+ std::string &SuggestedPredefines) {
return false;
}
@@ -136,7 +158,8 @@ public:
virtual void ReadHeaderFileInfo(const HeaderFileInfo &HFI, unsigned ID) {}
/// \brief Receives __COUNTER__ value.
- virtual void ReadCounter(unsigned Value) {}
+ virtual void ReadCounter(const serialization::ModuleFile &M,
+ unsigned Value) {}
};
/// \brief ASTReaderListener implementation to validate the information of
@@ -151,14 +174,15 @@ public:
PCHValidator(Preprocessor &PP, ASTReader &Reader)
: PP(PP), Reader(Reader), NumHeaderInfos(0) {}
- virtual bool ReadLanguageOptions(const LangOptions &LangOpts);
- virtual bool ReadTargetTriple(StringRef Triple);
- virtual bool ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
- StringRef OriginalFileName,
- std::string &SuggestedPredefines,
- FileManager &FileMgr);
+ virtual bool ReadLanguageOptions(const LangOptions &LangOpts,
+ bool Complain);
+ virtual bool ReadTargetOptions(const TargetOptions &TargetOpts,
+ bool Complain);
+ virtual bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
+ bool Complain,
+ std::string &SuggestedPredefines);
virtual void ReadHeaderFileInfo(const HeaderFileInfo &HFI, unsigned ID);
- virtual void ReadCounter(unsigned Value);
+ virtual void ReadCounter(const serialization::ModuleFile &M, unsigned Value);
private:
void Error(const char *Msg);
@@ -201,7 +225,26 @@ class ASTReader
public:
typedef SmallVector<uint64_t, 64> RecordData;
- enum ASTReadResult { Success, Failure, IgnorePCH };
+ /// \brief The result of reading the control block of an AST file, which
+ /// can fail for various reasons.
+ enum ASTReadResult {
+ /// \brief The control block was read successfully. Aside from failures,
+ /// the AST file is safe to read into the current context.
+ Success,
+ /// \brief The AST file itself appears corrupted.
+ Failure,
+ /// \brief The AST file is out-of-date relative to its input files,
+ /// and needs to be regenerated.
+ OutOfDate,
+ /// \brief The AST file was written by a different version of Clang.
+ VersionMismatch,
+ /// \brief The AST file was writtten with a different language/target
+ /// configuration.
+ ConfigurationMismatch,
+ /// \brief The AST file has errors.
+ HadErrors
+ };
+
/// \brief Types of AST files.
friend class PCHValidator;
friend class ASTDeclReader;
@@ -341,7 +384,15 @@ private:
/// \brief The set of C++ or Objective-C classes that have forward
/// declarations that have not yet been linked to their definitions.
llvm::SmallPtrSet<Decl *, 4> PendingDefinitions;
-
+
+ typedef llvm::MapVector<Decl *, uint64_t,
+ llvm::SmallDenseMap<Decl *, unsigned, 4>,
+ llvm::SmallVector<std::pair<Decl *, uint64_t>, 4> >
+ PendingBodiesMap;
+
+ /// \brief Functions or methods that have bodies that will be attached.
+ PendingBodiesMap PendingBodies;
+
/// \brief Read the records that describe the contents of declcontexts.
bool ReadDeclContextStorage(ModuleFile &M,
llvm::BitstreamCursor &Cursor,
@@ -359,11 +410,36 @@ private:
typedef ContinuousRangeMap<serialization::IdentID, ModuleFile *, 4>
GlobalIdentifierMapType;
- /// \brief Mapping from global identifer IDs to the module in which the
+ /// \brief Mapping from global identifier IDs to the module in which the
/// identifier resides along with the offset that should be added to the
/// global identifier ID to produce a local ID.
GlobalIdentifierMapType GlobalIdentifierMap;
+ /// \brief A vector containing macros that have already been
+ /// loaded.
+ ///
+ /// If the pointer at index I is non-NULL, then it refers to the
+ /// MacroInfo for the identifier with ID=I+1 that has already
+ /// been loaded.
+ std::vector<MacroInfo *> MacrosLoaded;
+
+ typedef ContinuousRangeMap<serialization::MacroID, ModuleFile *, 4>
+ GlobalMacroMapType;
+
+ /// \brief Mapping from global macro IDs to the module in which the
+ /// macro resides along with the offset that should be added to the
+ /// global macro ID to produce a local ID.
+ GlobalMacroMapType GlobalMacroMap;
+
+ typedef llvm::DenseMap<serialization::MacroID,
+ llvm::SmallVector<std::pair<serialization::SubmoduleID,
+ MacroUpdate>, 1> >
+ MacroUpdatesMap;
+
+ /// \brief Mapping from (global) macro IDs to the set of updates to be
+ /// performed to the corresponding macro.
+ MacroUpdatesMap MacroUpdates;
+
/// \brief A vector containing submodules that have already been loaded.
///
/// This vector is indexed by the Submodule ID (-1). NULL submodule entries
@@ -378,8 +454,55 @@ private:
/// global submodule ID to produce a local ID.
GlobalSubmoduleMapType GlobalSubmoduleMap;
+ /// \brief An entity that has been hidden.
+ class HiddenName {
+ public:
+ enum NameKind {
+ Declaration,
+ MacroVisibility,
+ MacroUndef
+ } Kind;
+
+ private:
+ unsigned Loc;
+
+ union {
+ Decl *D;
+ MacroInfo *MI;
+ };
+
+ IdentifierInfo *Id;
+
+ public:
+ HiddenName(Decl *D) : Kind(Declaration), Loc(), D(D), Id() { }
+
+ HiddenName(IdentifierInfo *II, MacroInfo *MI)
+ : Kind(MacroVisibility), Loc(), MI(MI), Id(II) { }
+
+ HiddenName(IdentifierInfo *II, MacroInfo *MI, SourceLocation Loc)
+ : Kind(MacroUndef), Loc(Loc.getRawEncoding()), MI(MI), Id(II) { }
+
+ NameKind getKind() const { return Kind; }
+
+ Decl *getDecl() const {
+ assert(getKind() == Declaration && "Hidden name is not a declaration");
+ return D;
+ }
+
+ std::pair<IdentifierInfo *, MacroInfo *> getMacro() const {
+ assert((getKind() == MacroUndef || getKind() == MacroVisibility)
+ && "Hidden name is not a macro!");
+ return std::make_pair(Id, MI);
+ }
+
+ SourceLocation getMacroUndefLoc() const {
+ assert(getKind() == MacroUndef && "Hidden name is not an undef!");
+ return SourceLocation::getFromRawEncoding(Loc);
+ }
+};
+
/// \brief A set of hidden declarations.
- typedef llvm::SmallVector<llvm::PointerUnion<Decl *, IdentifierInfo *>, 2>
+ typedef llvm::SmallVector<HiddenName, 2>
HiddenNames;
typedef llvm::DenseMap<Module *, HiddenNames> HiddenNamesMapType;
@@ -431,10 +554,13 @@ private:
/// global method pool for this selector.
llvm::DenseMap<Selector, unsigned> SelectorGeneration;
- /// \brief Mapping from identifiers that represent macros whose definitions
- /// have not yet been deserialized to the global offset where the macro
- /// record resides.
- llvm::DenseMap<IdentifierInfo *, uint64_t> UnreadMacroRecordOffsets;
+ typedef llvm::MapVector<IdentifierInfo *,
+ llvm::SmallVector<serialization::MacroID, 2> >
+ PendingMacroIDsMap;
+
+ /// \brief Mapping from identifiers that have a macro history to the global
+ /// IDs have not yet been deserialized to the global IDs of those macros.
+ PendingMacroIDsMap PendingMacroIDs;
typedef ContinuousRangeMap<unsigned, ModuleFile *, 4>
GlobalPreprocessedEntityMapType;
@@ -553,28 +679,9 @@ private:
SmallVector<serialization::SubmoduleID, 2> ImportedModules;
//@}
- /// \brief The original file name that was used to build the primary AST file,
- /// which may have been modified for relocatable-pch support.
- std::string OriginalFileName;
-
- /// \brief The actual original file name that was used to build the primary
- /// AST file.
- std::string ActualOriginalFileName;
-
- /// \brief The file ID for the original file that was used to build the
- /// primary AST file.
- FileID OriginalFileID;
-
- /// \brief The directory that the PCH was originally created in. Used to
- /// allow resolving headers even after headers+PCH was moved to a new path.
- std::string OriginalDir;
-
/// \brief The directory that the PCH we are reading is stored in.
std::string CurrentDir;
- /// \brief Whether this precompiled header is a relocatable PCH file.
- bool RelocatablePCH;
-
/// \brief The system include root to be used when loading the
/// precompiled header.
std::string isysroot;
@@ -583,9 +690,6 @@ private:
/// headers when they are loaded.
bool DisableValidation;
- /// \brief Whether to disable the use of stat caches in AST files.
- bool DisableStatCache;
-
/// \brief Whether to accept an AST file with compiler errors.
bool AllowASTWithCompilerErrors;
@@ -602,10 +706,6 @@ private:
SwitchCaseMapTy *CurrSwitchCaseStmts;
- /// \brief The number of stat() calls that hit/missed the stat
- /// cache.
- unsigned NumStatHits, NumStatMisses;
-
/// \brief The number of source location entries de-serialized from
/// the PCH file.
unsigned NumSLocEntriesRead;
@@ -687,7 +787,7 @@ private:
/// Objective-C protocols.
std::deque<Decl *> InterestingDecls;
- /// \brief The set of redeclarable declaraations that have been deserialized
+ /// \brief The set of redeclarable declarations that have been deserialized
/// since the last time the declaration chains were linked.
llvm::SmallPtrSet<Decl *, 16> RedeclsDeserialized;
@@ -758,8 +858,8 @@ private:
ASTReader &Reader;
enum ReadingKind PrevKind;
- ReadingKindTracker(const ReadingKindTracker&); // do not implement
- ReadingKindTracker &operator=(const ReadingKindTracker&);// do not implement
+ ReadingKindTracker(const ReadingKindTracker &) LLVM_DELETED_FUNCTION;
+ void operator=(const ReadingKindTracker &) LLVM_DELETED_FUNCTION;
public:
ReadingKindTracker(enum ReadingKind newKind, ASTReader &reader)
@@ -770,10 +870,6 @@ private:
~ReadingKindTracker() { Reader.ReadingKind = PrevKind; }
};
- /// \brief All predefines buffers in the chain, to be treated as if
- /// concatenated.
- PCHPredefinesBlocks PCHPredefinesBuffers;
-
/// \brief Suggested contents of the predefines buffer, after this
/// PCH file has been processed.
///
@@ -787,24 +883,45 @@ private:
/// \brief Reads a statement from the specified cursor.
Stmt *ReadStmtFromStream(ModuleFile &F);
+ typedef llvm::PointerIntPair<const FileEntry *, 1, bool> InputFile;
+
+ /// \brief Retrieve the file entry and 'overridden' bit for an input
+ /// file in the given module file.
+ InputFile getInputFile(ModuleFile &F, unsigned ID, bool Complain = true);
+
/// \brief Get a FileEntry out of stored-in-PCH filename, making sure we take
/// into account all the necessary relocations.
const FileEntry *getFileEntry(StringRef filename);
- void MaybeAddSystemRootToFilename(std::string &Filename);
+ void MaybeAddSystemRootToFilename(ModuleFile &M, std::string &Filename);
ASTReadResult ReadASTCore(StringRef FileName, ModuleKind Type,
- ModuleFile *ImportedBy);
- ASTReadResult ReadASTBlock(ModuleFile &F);
- bool CheckPredefinesBuffers();
+ ModuleFile *ImportedBy,
+ llvm::SmallVectorImpl<ModuleFile *> &Loaded,
+ unsigned ClientLoadCapabilities);
+ ASTReadResult ReadControlBlock(ModuleFile &F,
+ llvm::SmallVectorImpl<ModuleFile *> &Loaded,
+ unsigned ClientLoadCapabilities);
+ bool ReadASTBlock(ModuleFile &F);
bool ParseLineTable(ModuleFile &F, SmallVectorImpl<uint64_t> &Record);
- ASTReadResult ReadSourceManagerBlock(ModuleFile &F);
- ASTReadResult ReadSLocEntryRecord(int ID);
+ bool ReadSourceManagerBlock(ModuleFile &F);
llvm::BitstreamCursor &SLocCursorForID(int ID);
SourceLocation getImportLocation(ModuleFile *F);
- ASTReadResult ReadSubmoduleBlock(ModuleFile &F);
- bool ParseLanguageOptions(const RecordData &Record);
-
+ bool ReadSubmoduleBlock(ModuleFile &F);
+ static bool ParseLanguageOptions(const RecordData &Record, bool Complain,
+ ASTReaderListener &Listener);
+ static bool ParseTargetOptions(const RecordData &Record, bool Complain,
+ ASTReaderListener &Listener);
+ static bool ParseDiagnosticOptions(const RecordData &Record, bool Complain,
+ ASTReaderListener &Listener);
+ static bool ParseFileSystemOptions(const RecordData &Record, bool Complain,
+ ASTReaderListener &Listener);
+ static bool ParseHeaderSearchOptions(const RecordData &Record, bool Complain,
+ ASTReaderListener &Listener);
+ static bool ParsePreprocessorOptions(const RecordData &Record, bool Complain,
+ ASTReaderListener &Listener,
+ std::string &SuggestedPredefines);
+
struct RecordLocation {
RecordLocation(ModuleFile *M, uint64_t O)
: F(M), Offset(O) {}
@@ -836,18 +953,82 @@ private:
/// \brief Find the next module that contains entities and return the ID
/// of the first entry.
- /// \arg SLocMapI points at a chunk of a module that contains no
+ ///
+ /// \param SLocMapI points at a chunk of a module that contains no
/// preprocessed entities or the entities it contains are not the
/// ones we are looking for.
serialization::PreprocessedEntityID
findNextPreprocessedEntity(
GlobalSLocOffsetMapType::const_iterator SLocMapI) const;
- /// \brief Returns (ModuleFile, Local index) pair for \arg GlobalIndex of a
+ /// \brief Returns (ModuleFile, Local index) pair for \p GlobalIndex of a
/// preprocessed entity.
std::pair<ModuleFile *, unsigned>
getModulePreprocessedEntity(unsigned GlobalIndex);
+ /// \brief Returns (begin, end) pair for the preprocessed entities of a
+ /// particular module.
+ std::pair<PreprocessingRecord::iterator, PreprocessingRecord::iterator>
+ getModulePreprocessedEntities(ModuleFile &Mod) const;
+
+ class ModuleDeclIterator {
+ ASTReader *Reader;
+ ModuleFile *Mod;
+ const serialization::LocalDeclID *Pos;
+
+ public:
+ typedef const Decl *value_type;
+ typedef value_type& reference;
+ typedef value_type* pointer;
+
+ ModuleDeclIterator() : Reader(0), Mod(0), Pos(0) { }
+
+ ModuleDeclIterator(ASTReader *Reader, ModuleFile *Mod,
+ const serialization::LocalDeclID *Pos)
+ : Reader(Reader), Mod(Mod), Pos(Pos) { }
+
+ value_type operator*() const {
+ return Reader->GetDecl(Reader->getGlobalDeclID(*Mod, *Pos));
+ }
+
+ ModuleDeclIterator &operator++() {
+ ++Pos;
+ return *this;
+ }
+
+ ModuleDeclIterator operator++(int) {
+ ModuleDeclIterator Prev(*this);
+ ++Pos;
+ return Prev;
+ }
+
+ ModuleDeclIterator &operator--() {
+ --Pos;
+ return *this;
+ }
+
+ ModuleDeclIterator operator--(int) {
+ ModuleDeclIterator Prev(*this);
+ --Pos;
+ return Prev;
+ }
+
+ friend bool operator==(const ModuleDeclIterator &LHS,
+ const ModuleDeclIterator &RHS) {
+ assert(LHS.Reader == RHS.Reader && LHS.Mod == RHS.Mod);
+ return LHS.Pos == RHS.Pos;
+ }
+
+ friend bool operator!=(const ModuleDeclIterator &LHS,
+ const ModuleDeclIterator &RHS) {
+ assert(LHS.Reader == RHS.Reader && LHS.Mod == RHS.Mod);
+ return LHS.Pos != RHS.Pos;
+ }
+ };
+
+ std::pair<ModuleDeclIterator, ModuleDeclIterator>
+ getModuleFileLevelDecls(ModuleFile &Mod);
+
void PassInterestingDeclsToConsumer();
void PassInterestingDeclToConsumer(Decl *D);
@@ -861,8 +1042,8 @@ private:
void Error(unsigned DiagID, StringRef Arg1 = StringRef(),
StringRef Arg2 = StringRef());
- ASTReader(const ASTReader&); // do not implement
- ASTReader &operator=(const ASTReader &); // do not implement
+ ASTReader(const ASTReader &) LLVM_DELETED_FUNCTION;
+ void operator=(const ASTReader &) LLVM_DELETED_FUNCTION;
public:
/// \brief Load the AST file and validate its contents against the given
/// Preprocessor.
@@ -881,29 +1062,49 @@ public:
/// of its regular consistency checking, allowing the use of precompiled
/// headers that cannot be determined to be compatible.
///
- /// \param DisableStatCache If true, the AST reader will ignore the
- /// stat cache in the AST files. This performance pessimization can
- /// help when an AST file is being used in cases where the
- /// underlying files in the file system may have changed, but
- /// parsing should still continue.
- ///
/// \param AllowASTWithCompilerErrors If true, the AST reader will accept an
/// AST file the was created out of an AST with compiler errors,
/// otherwise it will reject it.
ASTReader(Preprocessor &PP, ASTContext &Context, StringRef isysroot = "",
- bool DisableValidation = false, bool DisableStatCache = false,
+ bool DisableValidation = false,
bool AllowASTWithCompilerErrors = false);
~ASTReader();
SourceManager &getSourceManager() const { return SourceMgr; }
- /// \brief Load the AST file designated by the given file name.
- ASTReadResult ReadAST(const std::string &FileName, ModuleKind Type);
+ /// \brief Flags that indicate what kind of AST loading failures the client
+ /// of the AST reader can directly handle.
+ ///
+ /// When a client states that it can handle a particular kind of failure,
+ /// the AST reader will not emit errors when producing that kind of failure.
+ enum LoadFailureCapabilities {
+ /// \brief The client can't handle any AST loading failures.
+ ARR_None = 0,
+ /// \brief The client can handle an AST file that cannot load because it
+ /// is out-of-date relative to its input files.
+ ARR_OutOfDate = 0x1,
+ /// \brief The client can handle an AST file that cannot load because it
+ /// was built with a different version of Clang.
+ ARR_VersionMismatch = 0x2,
+ /// \brief The client can handle an AST file that cannot load because it's
+ /// compiled configuration doesn't match that of the context it was
+ /// loaded into.
+ ARR_ConfigurationMismatch = 0x4
+ };
- /// \brief Checks that no file that is stored in PCH is out-of-sync with
- /// the actual file in the file system.
- ASTReadResult validateFileEntries(ModuleFile &M);
+ /// \brief Load the AST file designated by the given file name.
+ ///
+ /// \param FileName The name of the AST file to load.
+ ///
+ /// \param Type The kind of AST being loaded, e.g., PCH, module, main file,
+ /// or preamble.
+ ///
+ /// \param ClientLoadCapabilities The set of client load-failure
+ /// capabilities, represented as a bitset of the enumerators of
+ /// LoadFailureCapabilities.
+ ASTReadResult ReadAST(const std::string &FileName, ModuleKind Type,
+ unsigned ClientLoadCapabilities);
/// \brief Make the entities in the given module and any of its (non-explicit)
/// submodules visible to name lookup.
@@ -947,8 +1148,11 @@ public:
/// \brief Retrieve the preprocessor.
Preprocessor &getPreprocessor() const { return PP; }
- /// \brief Retrieve the name of the original source file name
- const std::string &getOriginalSourceFile() { return OriginalFileName; }
+ /// \brief Retrieve the name of the original source file name for the primary
+ /// module file.
+ StringRef getOriginalSourceFile() {
+ return ModuleMgr.getPrimaryModule().OriginalSourceFileName;
+ }
/// \brief Retrieve the name of the original source file name directly from
/// the AST file, without actually loading the AST file.
@@ -956,6 +1160,21 @@ public:
FileManager &FileMgr,
DiagnosticsEngine &Diags);
+ /// \brief Read the control block for the named AST file.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ static bool readASTFileControlBlock(StringRef Filename,
+ FileManager &FileMgr,
+ ASTReaderListener &Listener);
+
+ /// \brief Determine whether the given AST file is acceptable to load into a
+ /// translation unit with the given language and target options.
+ static bool isAcceptableASTFile(StringRef Filename,
+ FileManager &FileMgr,
+ const LangOptions &LangOpts,
+ const TargetOptions &TargetOpts,
+ const PreprocessorOptions &PPOpts);
+
/// \brief Returns the suggested contents of the predefines buffer,
/// which contains a (typically-empty) subset of the predefines
/// build prior to including the precompiled header.
@@ -968,12 +1187,12 @@ public:
virtual PreprocessedEntity *ReadPreprocessedEntity(unsigned Index);
/// \brief Returns a pair of [Begin, End) indices of preallocated
- /// preprocessed entities that \arg Range encompasses.
+ /// preprocessed entities that \p Range encompasses.
virtual std::pair<unsigned, unsigned>
findPreprocessedEntitiesInRange(SourceRange Range);
/// \brief Optionally returns true or false if the preallocated preprocessed
- /// entity with index \arg Index came from file \arg FID.
+ /// entity with index \p Index came from file \p FID.
virtual llvm::Optional<bool> isPreprocessedEntityInFileID(unsigned Index,
FileID FID);
@@ -992,6 +1211,11 @@ public:
return static_cast<unsigned>(IdentifiersLoaded.size());
}
+ /// \brief Returns the number of macros found in the chain.
+ unsigned getTotalNumMacros() const {
+ return static_cast<unsigned>(MacrosLoaded.size());
+ }
+
/// \brief Returns the number of types found in the chain.
unsigned getTotalNumTypes() const {
return static_cast<unsigned>(TypesLoaded.size());
@@ -1065,17 +1289,17 @@ public:
/// \brief Map from a local declaration ID within a given module to a
/// global declaration ID.
- serialization::DeclID getGlobalDeclID(ModuleFile &F, unsigned LocalID) const;
+ serialization::DeclID getGlobalDeclID(ModuleFile &F,
+ serialization::LocalDeclID LocalID) const;
- /// \brief Returns true if global DeclID \arg ID originated from module
- /// \arg M.
+ /// \brief Returns true if global DeclID \p ID originated from module \p M.
bool isDeclIDFromModule(serialization::GlobalDeclID ID, ModuleFile &M) const;
/// \brief Retrieve the module file that owns the given declaration, or NULL
/// if the declaration is not from a module file.
ModuleFile *getOwningModuleFile(Decl *D);
- /// \brief Returns the source location for the decl \arg ID.
+ /// \brief Returns the source location for the decl \p ID.
SourceLocation getSourceLocationForDeclID(serialization::GlobalDeclID ID);
/// \brief Resolve a declaration ID into a declaration, potentially
@@ -1172,7 +1396,7 @@ public:
SmallVectorImpl<Decl*> &Decls);
/// \brief Get the decls that are contained in a file in the Offset/Length
- /// range. \arg Length can be 0 to indicate a point at \arg Offset instead of
+ /// range. \p Length can be 0 to indicate a point at \p Offset instead of
/// a range.
virtual void FindFileRegionDecls(FileID File, unsigned Offset,unsigned Length,
SmallVectorImpl<Decl *> &Decls);
@@ -1285,6 +1509,9 @@ public:
}
virtual IdentifierInfo *GetIdentifier(serialization::IdentifierID ID) {
+ // Note that we are loading an identifier.
+ Deserializing AnIdentifier(this);
+
return DecodeIdentifierInfo(ID);
}
@@ -1293,6 +1520,13 @@ public:
serialization::IdentifierID getGlobalIdentifierID(ModuleFile &M,
unsigned LocalID);
+ /// \brief Retrieve the macro with the given ID.
+ MacroInfo *getMacro(serialization::MacroID ID, MacroInfo *Hint = 0);
+
+ /// \brief Retrieve the global macro ID corresponding to the given local
+ /// ID within the given module file.
+ serialization::MacroID getGlobalMacroID(ModuleFile &M, unsigned LocalID);
+
/// \brief Read the source location entry with index ID.
virtual bool ReadSLocEntry(int ID);
@@ -1404,10 +1638,10 @@ public:
llvm::APFloat ReadAPFloat(const RecordData &Record, unsigned &Idx);
// \brief Read a string
- std::string ReadString(const RecordData &Record, unsigned &Idx);
+ static std::string ReadString(const RecordData &Record, unsigned &Idx);
/// \brief Read a version tuple.
- VersionTuple ReadVersionTuple(const RecordData &Record, unsigned &Idx);
+ static VersionTuple ReadVersionTuple(const RecordData &Record, unsigned &Idx);
CXXTemporary *ReadCXXTemporary(ModuleFile &F, const RecordData &Record,
unsigned &Idx);
@@ -1436,43 +1670,29 @@ public:
Expr *ReadSubExpr();
/// \brief Reads the macro record located at the given offset.
- void ReadMacroRecord(ModuleFile &F, uint64_t Offset);
+ void ReadMacroRecord(ModuleFile &F, uint64_t Offset, MacroInfo *Hint = 0);
/// \brief Determine the global preprocessed entity ID that corresponds to
/// the given local ID within the given module.
serialization::PreprocessedEntityID
getGlobalPreprocessedEntityID(ModuleFile &M, unsigned LocalID) const;
- /// \brief Note that the identifier is a macro whose record will be loaded
- /// from the given AST file at the given (file-local) offset.
+ /// \brief Note that the identifier has a macro history.
///
/// \param II The name of the macro.
///
- /// \param F The module file from which the macro definition was deserialized.
- ///
- /// \param Offset The offset into the module file at which the macro
- /// definition is located.
- ///
- /// \param Visible Whether the macro should be made visible.
- void setIdentifierIsMacro(IdentifierInfo *II, ModuleFile &F,
- uint64_t Offset, bool Visible);
+ /// \param IDs The global macro IDs that are associated with this identifier.
+ void setIdentifierIsMacro(IdentifierInfo *II,
+ ArrayRef<serialization::MacroID> IDs);
/// \brief Read the set of macros defined by this external macro source.
virtual void ReadDefinedMacros();
- /// \brief Read the macro definition for this identifier.
- virtual void LoadMacroDefinition(IdentifierInfo *II);
-
/// \brief Update an out-of-date identifier.
virtual void updateOutOfDateIdentifier(IdentifierInfo &II);
/// \brief Note that this identifier is up-to-date.
void markIdentifierUpToDate(IdentifierInfo *II);
-
- /// \brief Read the macro definition corresponding to this iterator
- /// into the unread macro record offsets table.
- void LoadMacroDefinition(
- llvm::DenseMap<IdentifierInfo *, uint64_t>::iterator Pos);
/// \brief Load all external visible decls in the given DeclContext.
void completeVisibleDeclsMap(const DeclContext *DC);
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h
index d038d58..ac81e21 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h
@@ -18,6 +18,7 @@
#include "clang/AST/DeclarationName.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/ASTMutationListener.h"
+#include "clang/Lex/PPMutationListener.h"
#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ASTDeserializationListener.h"
#include "clang/Sema/SemaConsumer.h"
@@ -25,6 +26,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Bitcode/BitstreamWriter.h"
#include <map>
@@ -43,14 +45,15 @@ class ASTContext;
class NestedNameSpecifier;
class CXXBaseSpecifier;
class CXXCtorInitializer;
+class FileEntry;
class FPOptions;
class HeaderSearch;
class IdentifierResolver;
class MacroDefinition;
-class MemorizeStatCalls;
class OpaqueValueExpr;
class OpenCLOptions;
class ASTReader;
+class MacroInfo;
class Module;
class PreprocessedEntity;
class PreprocessingRecord;
@@ -70,6 +73,7 @@ namespace SrcMgr { class SLocEntry; }
/// data structures. This bitstream can be de-serialized via an
/// instance of the ASTReader class.
class ASTWriter : public ASTDeserializationListener,
+ public PPMutationListener,
public ASTMutationListener {
public:
typedef SmallVector<uint64_t, 64> RecordData;
@@ -117,6 +121,10 @@ private:
/// \brief Indicates that the AST contained compiler errors.
bool ASTHasCompilerErrors;
+ /// \brief Mapping from input file entries to the index into the
+ /// offset table where information about that input file is stored.
+ llvm::DenseMap<const FileEntry *, uint32_t> InputFileIDs;
+
/// \brief Stores a declaration or a type to be written to the AST file.
class DeclOrType {
public:
@@ -171,8 +179,7 @@ private:
/// indicates the index that this particular vector has in the global one.
unsigned FirstDeclIndex;
};
- typedef llvm::DenseMap<const SrcMgr::SLocEntry *,
- DeclIDInFileInfo *> FileDeclIDsTy;
+ typedef llvm::DenseMap<FileID, DeclIDInFileInfo *> FileDeclIDsTy;
/// \brief Map from file SLocEntries to info about the file-level declarations
/// that it contains.
@@ -215,6 +222,15 @@ private:
/// IdentifierInfo.
llvm::DenseMap<const IdentifierInfo *, serialization::IdentID> IdentifierIDs;
+ /// \brief The first ID number we can use for our own macros.
+ serialization::MacroID FirstMacroID;
+
+ /// \brief The identifier ID that will be assigned to the next new identifier.
+ serialization::MacroID NextMacroID;
+
+ /// \brief Map that provides the ID numbers of each macro.
+ llvm::DenseMap<MacroInfo *, serialization::MacroID> MacroIDs;
+
/// @name FlushStmt Caches
/// @{
@@ -250,16 +266,10 @@ private:
/// table, indexed by the Selector ID (-1).
std::vector<uint32_t> SelectorOffsets;
- /// \brief Offsets of each of the macro identifiers into the
- /// bitstream.
- ///
- /// For each identifier that is associated with a macro, this map
- /// provides the offset into the bitstream where that macro is
- /// defined.
- llvm::DenseMap<const IdentifierInfo *, uint64_t> MacroOffsets;
+ typedef llvm::MapVector<MacroInfo *, MacroUpdate> MacroUpdatesMap;
- /// \brief The set of identifiers that had macro definitions at some point.
- std::vector<const IdentifierInfo *> DeserializedMacroNames;
+ /// \brief Updates to macro definitions that were loaded from an AST file.
+ MacroUpdatesMap MacroUpdates;
/// \brief Mapping from macro definitions (as they occur in the preprocessing
/// record) to the macro IDs.
@@ -403,10 +413,9 @@ private:
llvm::DenseSet<Stmt *> &ParentStmts);
void WriteBlockInfoBlock();
- void WriteMetadata(ASTContext &Context, StringRef isysroot,
- const std::string &OutputFile);
- void WriteLanguageOptions(const LangOptions &LangOpts);
- void WriteStatCache(MemorizeStatCalls &StatCalls);
+ void WriteControlBlock(Preprocessor &PP, ASTContext &Context,
+ StringRef isysroot, const std::string &OutputFile);
+ void WriteInputFiles(SourceManager &SourceMgr, StringRef isysroot);
void WriteSourceManagerBlock(SourceManager &SourceMgr,
const Preprocessor &PP,
StringRef isysroot);
@@ -428,6 +437,7 @@ private:
void WriteIdentifierTable(Preprocessor &PP, IdentifierResolver &IdResolver,
bool IsModule);
void WriteAttributes(ArrayRef<const Attr*> Attrs, RecordDataImpl &Record);
+ void WriteMacroUpdates();
void ResolveDeclUpdatesBlocks();
void WriteDeclUpdatesBlocks();
void WriteDeclReplacementsBlock();
@@ -455,7 +465,7 @@ private:
void WriteDeclsBlockAbbrevs();
void WriteDecl(ASTContext &Context, Decl *D);
- void WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
+ void WriteASTCore(Sema &SemaRef,
StringRef isysroot, const std::string &OutputFile,
Module *WritingModule);
@@ -470,15 +480,12 @@ public:
/// \param SemaRef a reference to the semantic analysis object that processed
/// the AST to be written into the precompiled header.
///
- /// \param StatCalls the object that cached all of the stat() calls made while
- /// searching for source files and headers.
- ///
/// \param WritingModule The module that we are writing. If null, we are
/// writing a precompiled header.
///
/// \param isysroot if non-empty, write a relocatable file whose headers
/// are relative to the given system root.
- void WriteAST(Sema &SemaRef, MemorizeStatCalls *StatCalls,
+ void WriteAST(Sema &SemaRef,
const std::string &OutputFile,
Module *WritingModule, StringRef isysroot,
bool hasErrors = false);
@@ -501,6 +508,9 @@ public:
/// \brief Emit a reference to an identifier.
void AddIdentifierRef(const IdentifierInfo *II, RecordDataImpl &Record);
+ /// \brief Emit a reference to a macro.
+ void addMacroRef(MacroInfo *MI, RecordDataImpl &Record);
+
/// \brief Emit a Selector (which is a smart pointer reference).
void AddSelectorRef(Selector, RecordDataImpl &Record);
@@ -518,15 +528,8 @@ public:
/// \brief Get the unique number used to refer to the given identifier.
serialization::IdentID getIdentifierRef(const IdentifierInfo *II);
- /// \brief Retrieve the offset of the macro definition for the given
- /// identifier.
- ///
- /// The identifier must refer to a macro.
- uint64_t getMacroOffset(const IdentifierInfo *II) {
- assert(MacroOffsets.find(II) != MacroOffsets.end() &&
- "Identifier does not name a macro");
- return MacroOffsets[II];
- }
+ /// \brief Get the unique number used to refer to the given macro.
+ serialization::MacroID getMacroRef(MacroInfo *MI);
/// \brief Emit a reference to a type.
void AddTypeRef(QualType T, RecordDataImpl &Record);
@@ -689,13 +692,16 @@ public:
// ASTDeserializationListener implementation
void ReaderInitialized(ASTReader *Reader);
void IdentifierRead(serialization::IdentID ID, IdentifierInfo *II);
+ void MacroRead(serialization::MacroID ID, MacroInfo *MI);
void TypeRead(serialization::TypeIdx Idx, QualType T);
void SelectorRead(serialization::SelectorID ID, Selector Sel);
void MacroDefinitionRead(serialization::PreprocessedEntityID ID,
MacroDefinition *MD);
- void MacroVisible(IdentifierInfo *II);
void ModuleRead(serialization::SubmoduleID ID, Module *Mod);
-
+
+ // PPMutationListener implementation.
+ virtual void UndefinedMacro(MacroInfo *MI);
+
// ASTMutationListener implementation.
virtual void CompletedTagDefinition(const TagDecl *D);
virtual void AddedVisibleDecl(const DeclContext *DC, const Decl *D);
@@ -722,7 +728,6 @@ class PCHGenerator : public SemaConsumer {
std::string isysroot;
raw_ostream *Out;
Sema *SemaPtr;
- MemorizeStatCalls *StatCalls; // owned by the FileManager
llvm::SmallVector<char, 128> Buffer;
llvm::BitstreamWriter Stream;
ASTWriter Writer;
@@ -738,6 +743,7 @@ public:
~PCHGenerator();
virtual void InitializeSema(Sema &S) { SemaPtr = &S; }
virtual void HandleTranslationUnit(ASTContext &Ctx);
+ virtual PPMutationListener *GetPPMutationListener();
virtual ASTMutationListener *GetASTMutationListener();
virtual ASTDeserializationListener *GetASTDeserializationListener();
};
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ContinuousRangeMap.h b/contrib/llvm/tools/clang/include/clang/Serialization/ContinuousRangeMap.h
index f368a80..d89cd02 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ContinuousRangeMap.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ContinuousRangeMap.h
@@ -108,8 +108,8 @@ public:
class Builder {
ContinuousRangeMap &Self;
- Builder(const Builder&); // DO NOT IMPLEMENT
- Builder &operator=(const Builder&); // DO NOT IMPLEMENT
+ Builder(const Builder&) LLVM_DELETED_FUNCTION;
+ Builder &operator=(const Builder&) LLVM_DELETED_FUNCTION;
public:
explicit Builder(ContinuousRangeMap &Self) : Self(Self) { }
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/Module.h b/contrib/llvm/tools/clang/include/clang/Serialization/Module.h
index 786ecd3..39fa3d9 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/Module.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/Module.h
@@ -25,6 +25,7 @@
namespace clang {
+class FileEntry;
class DeclContext;
class Module;
template<typename Info> class OnDiskChainedHashTable;
@@ -74,6 +75,29 @@ public:
/// \brief The file name of the module file.
std::string FileName;
+ /// \brief The original source file name that was used to build the
+ /// primary AST file, which may have been modified for
+ /// relocatable-pch support.
+ std::string OriginalSourceFileName;
+
+ /// \brief The actual original source file name that was used to
+ /// build this AST file.
+ std::string ActualOriginalSourceFileName;
+
+ /// \brief The file ID for the original source file that was used to
+ /// build this AST file.
+ FileID OriginalSourceFileID;
+
+ /// \brief The directory that the PCH was originally created in. Used to
+ /// allow resolving headers even after headers+PCH was moved to a new path.
+ std::string OriginalDir;
+
+ /// \brief Whether this precompiled header is a relocatable PCH file.
+ bool RelocatablePCH;
+
+ /// \brief The file entry for the module file.
+ const FileEntry *File;
+
/// \brief Whether this module has been directly imported by the
/// user.
bool DirectlyImported;
@@ -98,11 +122,24 @@ public:
llvm::BitstreamCursor Stream;
/// \brief The source location where this module was first imported.
+ /// FIXME: This is not properly initialized yet.
SourceLocation ImportLoc;
/// \brief The first source location in this module.
SourceLocation FirstLoc;
+ // === Input Files ===
+ /// \brief The cursor to the start of the input-files block.
+ llvm::BitstreamCursor InputFilesCursor;
+
+ /// \brief Offsets for all of the input file entries in the AST file.
+ const uint32_t *InputFileOffsets;
+
+ /// \brief The input files that have been loaded from this AST file, along
+ /// with a bool indicating whether this was an overridden buffer.
+ std::vector<llvm::PointerIntPair<const FileEntry *, 1, bool> >
+ InputFilesLoaded;
+
// === Source Locations ===
/// \brief Cursor used to read source location entries.
@@ -124,13 +161,6 @@ public:
/// \brief SLocEntries that we're going to preload.
SmallVector<uint64_t, 4> PreloadSLocEntries;
- /// \brief The number of source location file entries in this AST file.
- unsigned LocalNumSLocFileEntries;
-
- /// \brief Offsets for all of the source location file entries in the
- /// AST file.
- const uint32_t *SLocFileOffsets;
-
/// \brief Remapping table for source locations in this module.
ContinuousRangeMap<uint32_t, int, 2> SLocRemap;
@@ -168,6 +198,22 @@ public:
/// all of the macro definitions.
llvm::BitstreamCursor MacroCursor;
+ /// \brief The number of macros in this AST file.
+ unsigned LocalNumMacros;
+
+ /// \brief Offsets of macros in the preprocessor block.
+ ///
+ /// This array is indexed by the macro ID (-1), and provides
+ /// the offset into the preprocessor block where macro definitions are
+ /// stored.
+ const uint32_t *MacroOffsets;
+
+ /// \brief Base macro ID for macros local to this module.
+ serialization::MacroID BaseMacroID;
+
+ /// \brief Remapping table for macro IDs in this module.
+ ContinuousRangeMap<uint32_t, int, 2> MacroRemap;
+
/// \brief The offset of the start of the set of defined macros.
uint64_t MacroStartOffset;
@@ -294,6 +340,7 @@ public:
/// \brief Array of file-level DeclIDs sorted by file.
const serialization::DeclID *FileSortedDecls;
+ unsigned NumFileSortedDecls;
/// \brief Array of redeclaration chain location information within this
/// module file, sorted by the first declaration ID.
@@ -338,11 +385,6 @@ public:
/// \brief Diagnostic IDs and their mappings that the user changed.
SmallVector<uint64_t, 8> PragmaDiagMappings;
- /// \brief The AST stat cache installed for this file, if any.
- ///
- /// The dynamic type of this stat cache is always ASTStatCache
- void *StatCache;
-
/// \brief List of modules which depend on this module
llvm::SetVector<ModuleFile *> ImportedBy;
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ModuleManager.h b/contrib/llvm/tools/clang/include/clang/Serialization/ModuleManager.h
index 6ff0640..6dcaa21 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ModuleManager.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ModuleManager.h
@@ -34,7 +34,7 @@ class ModuleManager {
/// \brief FileManager that handles translating between filenames and
/// FileEntry *.
- FileManager FileMgr;
+ FileManager &FileMgr;
/// \brief A lookup of in-memory (virtual file) buffers
llvm::DenseMap<const FileEntry *, llvm::MemoryBuffer *> InMemoryBuffers;
@@ -45,7 +45,7 @@ public:
typedef SmallVector<ModuleFile*, 2>::reverse_iterator ModuleReverseIterator;
typedef std::pair<uint32_t, StringRef> ModuleOffset;
- ModuleManager(const FileSystemOptions &FSO);
+ explicit ModuleManager(FileManager &FileMgr);
~ModuleManager();
/// \brief Forward iterator to traverse all loaded modules. This is reverse
@@ -105,7 +105,10 @@ public:
std::pair<ModuleFile *, bool>
addModule(StringRef FileName, ModuleKind Type, ModuleFile *ImportedBy,
unsigned Generation, std::string &ErrorStr);
-
+
+ /// \brief Remove the given set of modules.
+ void removeModules(ModuleIterator first, ModuleIterator last);
+
/// \brief Add an in-memory buffer the list of known buffers
void addInMemoryBuffer(StringRef FileName, llvm::MemoryBuffer *Buffer);
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/DereferenceChecker.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/DereferenceChecker.h
deleted file mode 100644
index f9cce9c..0000000
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/DereferenceChecker.h
+++ /dev/null
@@ -1,35 +0,0 @@
-//== NullDerefChecker.h - Null dereference checker --------------*- C++ -*--==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This defines NullDerefChecker and UndefDerefChecker, two builtin checks
-// in ExprEngine that check for null and undefined pointers at loads
-// and stores.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_GR_DEREFCHECKER
-#define LLVM_CLANG_GR_DEREFCHECKER
-
-#include <utility>
-
-namespace clang {
-
-namespace ento {
-
-class ExprEngine;
-class ExplodedNode;
-
-std::pair<ExplodedNode * const *, ExplodedNode * const *>
-GetImplicitNullDereferences(ExprEngine &Eng);
-
-} // end GR namespace
-
-} // end clang namespace
-
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Analyses.def
index 29ddc9e..01a6ffd 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Analyses.def
@@ -21,7 +21,6 @@ ANALYSIS_STORE(RegionStore, "region", "Use region-based analyzer store", CreateR
#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN)
#endif
-ANALYSIS_CONSTRAINTS(BasicConstraints, "basic", "Use basic constraint tracking", CreateBasicConstraintManager)
ANALYSIS_CONSTRAINTS(RangeConstraints, "range", "Use constraint tracking of concrete value ranges", CreateRangeConstraintManager)
#ifndef ANALYSIS_DIAGNOSTICS
@@ -47,6 +46,7 @@ ANALYSIS_PURGE(PurgeNone, "none", "Do not purge symbols, bindings, or constrain
#endif
ANALYSIS_IPA(None, "none", "Perform only intra-procedural analysis")
+ANALYSIS_IPA(BasicInlining, "basic-inlining", "Inline C functions and blocks when their definitions are available")
ANALYSIS_IPA(Inlining, "inlining", "Inline callees when their definitions are available")
ANALYSIS_IPA(DynamicDispatch, "dynamic", "Experimental: Enable inlining of dynamically dispatched methods")
ANALYSIS_IPA(DynamicDispatchBifurcate, "dynamic-bifurcate", "Experimental: Enable inlining of dynamically dispatched methods, bifurcate paths when exact type info is unavailable")
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h
new file mode 100644
index 0000000..fa0754a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h
@@ -0,0 +1,308 @@
+//===--- AnalyzerOptions.h - Analysis Engine Options ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines various options for the static analyzer that are set
+// by the frontend and are consulted throughout the analyzer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYZEROPTIONS_H
+#define LLVM_CLANG_ANALYZEROPTIONS_H
+
+#include <string>
+#include <vector>
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/StringMap.h"
+
+namespace clang {
+class ASTConsumer;
+class DiagnosticsEngine;
+class Preprocessor;
+class LangOptions;
+
+/// Analysis - Set of available source code analyses.
+enum Analyses {
+#define ANALYSIS(NAME, CMDFLAG, DESC, SCOPE) NAME,
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+NumAnalyses
+};
+
+/// AnalysisStores - Set of available analysis store models.
+enum AnalysisStores {
+#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN) NAME##Model,
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+NumStores
+};
+
+/// AnalysisConstraints - Set of available constraint models.
+enum AnalysisConstraints {
+#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN) NAME##Model,
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+NumConstraints
+};
+
+/// AnalysisDiagClients - Set of available diagnostic clients for rendering
+/// analysis results.
+enum AnalysisDiagClients {
+#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATFN, AUTOCREAT) PD_##NAME,
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+NUM_ANALYSIS_DIAG_CLIENTS
+};
+
+/// AnalysisPurgeModes - Set of available strategies for dead symbol removal.
+enum AnalysisPurgeMode {
+#define ANALYSIS_PURGE(NAME, CMDFLAG, DESC) NAME,
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+NumPurgeModes
+};
+
+/// AnalysisIPAMode - Set of inter-procedural modes.
+enum AnalysisIPAMode {
+#define ANALYSIS_IPA(NAME, CMDFLAG, DESC) NAME,
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+NumIPAModes
+};
+
+/// AnalysisInlineFunctionSelection - Set of inlining function selection heuristics.
+enum AnalysisInliningMode {
+#define ANALYSIS_INLINING_MODE(NAME, CMDFLAG, DESC) NAME,
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+NumInliningModes
+};
+
+/// \brief Describes the different kinds of C++ member functions which can be
+/// considered for inlining by the analyzer.
+///
+/// These options are cumulative; enabling one kind of member function will
+/// enable all kinds with lower enum values.
+enum CXXInlineableMemberKind {
+ // Uninitialized = 0,
+
+ /// A dummy mode in which no C++ inlining is enabled.
+ CIMK_None = 1,
+
+ /// Refers to regular member function and operator calls.
+ CIMK_MemberFunctions,
+
+ /// Refers to constructors (implicit or explicit).
+ ///
+ /// Note that a constructor will not be inlined if the corresponding
+ /// destructor is non-trivial.
+ CIMK_Constructors,
+
+ /// Refers to destructors (implicit or explicit).
+ CIMK_Destructors
+};
+
+
+class AnalyzerOptions : public llvm::RefCountedBase<AnalyzerOptions> {
+public:
+ typedef llvm::StringMap<std::string> ConfigTable;
+
+ /// \brief Pair of checker name and enable/disable.
+ std::vector<std::pair<std::string, bool> > CheckersControlList;
+
+ /// \brief A key-value table of use-specified configuration values.
+ ConfigTable Config;
+ AnalysisStores AnalysisStoreOpt;
+ AnalysisConstraints AnalysisConstraintsOpt;
+ AnalysisDiagClients AnalysisDiagOpt;
+ AnalysisPurgeMode AnalysisPurgeOpt;
+
+ // \brief The interprocedural analysis mode.
+ AnalysisIPAMode IPAMode;
+
+ std::string AnalyzeSpecificFunction;
+
+ /// \brief The maximum number of exploded nodes the analyzer will generate.
+ unsigned MaxNodes;
+
+ /// \brief The maximum number of times the analyzer visits a block.
+ unsigned maxBlockVisitOnPath;
+
+
+ unsigned ShowCheckerHelp : 1;
+ unsigned AnalyzeAll : 1;
+ unsigned AnalyzerDisplayProgress : 1;
+ unsigned AnalyzeNestedBlocks : 1;
+
+ /// \brief The flag regulates if we should eagerly assume evaluations of
+ /// conditionals, thus, bifurcating the path.
+ ///
+ /// This flag indicates how the engine should handle expressions such as: 'x =
+ /// (y != 0)'. When this flag is true then the subexpression 'y != 0' will be
+ /// eagerly assumed to be true or false, thus evaluating it to the integers 0
+ /// or 1 respectively. The upside is that this can increase analysis
+ /// precision until we have a better way to lazily evaluate such logic. The
+ /// downside is that it eagerly bifurcates paths.
+ unsigned eagerlyAssumeBinOpBifurcation : 1;
+
+ unsigned TrimGraph : 1;
+ unsigned visualizeExplodedGraphWithGraphViz : 1;
+ unsigned visualizeExplodedGraphWithUbiGraph : 1;
+ unsigned UnoptimizedCFG : 1;
+ unsigned PrintStats : 1;
+
+ /// \brief Do not re-analyze paths leading to exhausted nodes with a different
+ /// strategy. We get better code coverage when retry is enabled.
+ unsigned NoRetryExhausted : 1;
+
+ /// \brief The inlining stack depth limit.
+ unsigned InlineMaxStackDepth;
+
+ /// \brief The mode of function selection used during inlining.
+ unsigned InlineMaxFunctionSize;
+
+ /// \brief The mode of function selection used during inlining.
+ AnalysisInliningMode InliningMode;
+
+private:
+ /// Controls which C++ member functions will be considered for inlining.
+ CXXInlineableMemberKind CXXMemberInliningMode;
+
+ /// \sa includeTemporaryDtorsInCFG
+ llvm::Optional<bool> IncludeTemporaryDtorsInCFG;
+
+ /// \sa mayInlineCXXStandardLibrary
+ llvm::Optional<bool> InlineCXXStandardLibrary;
+
+ /// \sa mayInlineTemplateFunctions
+ llvm::Optional<bool> InlineTemplateFunctions;
+
+ /// \sa mayInlineObjCMethod
+ llvm::Optional<bool> ObjCInliningMode;
+
+ // Cache of the "ipa-always-inline-size" setting.
+ // \sa getAlwaysInlineSize
+ llvm::Optional<unsigned> AlwaysInlineSize;
+
+ /// \sa shouldPruneNullReturnPaths
+ llvm::Optional<bool> PruneNullReturnPaths;
+
+ /// \sa shouldAvoidSuppressingNullArgumentPaths
+ llvm::Optional<bool> AvoidSuppressingNullArgumentPaths;
+
+ /// \sa getGraphTrimInterval
+ llvm::Optional<unsigned> GraphTrimInterval;
+
+ /// Interprets an option's string value as a boolean.
+ ///
+ /// Accepts the strings "true" and "false".
+ /// If an option value is not provided, returns the given \p DefaultVal.
+ bool getBooleanOption(StringRef Name, bool DefaultVal);
+
+ /// Variant that accepts a Optional value to cache the result.
+ bool getBooleanOption(llvm::Optional<bool> &V, StringRef Name,
+ bool DefaultVal);
+
+ /// Interprets an option's string value as an integer value.
+ int getOptionAsInteger(llvm::StringRef Name, int DefaultVal);
+
+public:
+ /// Returns the option controlling which C++ member functions will be
+ /// considered for inlining.
+ ///
+ /// This is controlled by the 'c++-inlining' config option.
+ ///
+ /// \sa CXXMemberInliningMode
+ bool mayInlineCXXMemberFunction(CXXInlineableMemberKind K);
+
+ /// Returns true if ObjectiveC inlining is enabled, false otherwise.
+ bool mayInlineObjCMethod();
+
+ /// Returns whether or not the destructors for C++ temporary objects should
+ /// be included in the CFG.
+ ///
+ /// This is controlled by the 'cfg-temporary-dtors' config option, which
+ /// accepts the values "true" and "false".
+ bool includeTemporaryDtorsInCFG();
+
+ /// Returns whether or not C++ standard library functions may be considered
+ /// for inlining.
+ ///
+ /// This is controlled by the 'c++-stdlib-inlining' config option, which
+ /// accepts the values "true" and "false".
+ bool mayInlineCXXStandardLibrary();
+
+ /// Returns whether or not templated functions may be considered for inlining.
+ ///
+ /// This is controlled by the 'c++-template-inlining' config option, which
+ /// accepts the values "true" and "false".
+ bool mayInlineTemplateFunctions();
+
+ /// Returns whether or not paths that go through null returns should be
+ /// suppressed.
+ ///
+ /// This is a heuristic for avoiding bug reports with paths that go through
+ /// inlined functions that are more defensive than their callers.
+ ///
+ /// This is controlled by the 'suppress-null-return-paths' config option,
+ /// which accepts the values "true" and "false".
+ bool shouldPruneNullReturnPaths();
+
+ /// Returns whether a bug report should \em not be suppressed if its path
+ /// includes a call with a null argument, even if that call has a null return.
+ ///
+ /// This option has no effect when #shouldPruneNullReturnPaths() is false.
+ ///
+ /// This is a counter-heuristic to avoid false negatives.
+ ///
+ /// This is controlled by the 'avoid-suppressing-null-argument-paths' config
+ /// option, which accepts the values "true" and "false".
+ bool shouldAvoidSuppressingNullArgumentPaths();
+
+ // Returns the size of the functions (in basic blocks), which should be
+ // considered to be small enough to always inline.
+ //
+ // This is controlled by "ipa-always-inline-size" analyzer-config option.
+ unsigned getAlwaysInlineSize();
+
+ /// Returns true if the analyzer engine should synthesize fake bodies
+ /// for well-known functions.
+ bool shouldSynthesizeBodies();
+
+ /// Returns how often nodes in the ExplodedGraph should be recycled to save
+ /// memory.
+ ///
+ /// This is controlled by the 'graph-trim-interval' config option. To disable
+ /// node reclamation, set the option to "0".
+ unsigned getGraphTrimInterval();
+
+public:
+ AnalyzerOptions() : CXXMemberInliningMode() {
+ AnalysisStoreOpt = RegionStoreModel;
+ AnalysisConstraintsOpt = RangeConstraintsModel;
+ AnalysisDiagOpt = PD_HTML;
+ AnalysisPurgeOpt = PurgeStmt;
+ IPAMode = DynamicDispatchBifurcate;
+ ShowCheckerHelp = 0;
+ AnalyzeAll = 0;
+ AnalyzerDisplayProgress = 0;
+ AnalyzeNestedBlocks = 0;
+ eagerlyAssumeBinOpBifurcation = 0;
+ TrimGraph = 0;
+ visualizeExplodedGraphWithGraphViz = 0;
+ visualizeExplodedGraphWithUbiGraph = 0;
+ UnoptimizedCFG = 0;
+ PrintStats = 0;
+ NoRetryExhausted = 0;
+ // Cap the stack depth at 4 calls (5 stack frames, base + 4 calls).
+ InlineMaxStackDepth = 5;
+ InlineMaxFunctionSize = 200;
+ InliningMode = NoRedundancy;
+ }
+};
+
+typedef llvm::IntrusiveRefCntPtr<AnalyzerOptions> AnalyzerOptionsRef;
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
index 48393a3..b5a88ba 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
@@ -24,6 +24,7 @@
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/ImmutableSet.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallSet.h"
namespace clang {
@@ -95,6 +96,10 @@ protected:
/// for multiple PathDiagnosticConsumers.
llvm::SmallVector<Regions *, 2> interestingRegions;
+ /// A set of location contexts that correspoind to call sites which should be
+ /// considered "interesting".
+ llvm::SmallSet<const LocationContext *, 2> InterestingLocationContexts;
+
/// A set of custom visitors which generate "event" diagnostics at
/// interesting points in the path.
VisitorList Callbacks;
@@ -111,6 +116,19 @@ protected:
/// when reporting an issue.
bool DoNotPrunePath;
+ /// Used to track unique reasons why a bug report might be invalid.
+ ///
+ /// \sa markInvalid
+ /// \sa removeInvalidation
+ typedef std::pair<const void *, const void *> InvalidationRecord;
+
+ /// If non-empty, this bug report is likely a false positive and should not be
+ /// shown to the user.
+ ///
+ /// \sa markInvalid
+ /// \sa removeInvalidation
+ llvm::SmallSet<InvalidationRecord, 4> Invalidations;
+
private:
// Used internally by BugReporter.
Symbols &getInterestingSymbols();
@@ -147,7 +165,8 @@ public:
PathDiagnosticLocation LocationToUnique)
: BT(bt), DeclWithIssue(0), Description(desc),
UniqueingLocation(LocationToUnique),
- ErrorNode(errornode), ConfigurationChangeToken(0) {}
+ ErrorNode(errornode), ConfigurationChangeToken(0),
+ DoNotPrunePath(false) {}
virtual ~BugReport();
@@ -158,8 +177,10 @@ public:
const StringRef getDescription() const { return Description; }
- const StringRef getShortDescription() const {
- return ShortDescription.empty() ? Description : ShortDescription;
+ const StringRef getShortDescription(bool UseFallback = true) const {
+ if (ShortDescription.empty() && UseFallback)
+ return Description;
+ return ShortDescription;
}
/// Indicates whether or not any path pruning should take place
@@ -172,14 +193,44 @@ public:
void markInteresting(SymbolRef sym);
void markInteresting(const MemRegion *R);
void markInteresting(SVal V);
+ void markInteresting(const LocationContext *LC);
bool isInteresting(SymbolRef sym);
bool isInteresting(const MemRegion *R);
bool isInteresting(SVal V);
+ bool isInteresting(const LocationContext *LC);
unsigned getConfigurationChangeToken() const {
return ConfigurationChangeToken;
}
+
+ /// Returns whether or not this report should be considered valid.
+ ///
+ /// Invalid reports are those that have been classified as likely false
+ /// positives after the fact.
+ bool isValid() const {
+ return Invalidations.empty();
+ }
+
+ /// Marks the current report as invalid, meaning that it is probably a false
+ /// positive and should not be reported to the user.
+ ///
+ /// The \p Tag and \p Data arguments are intended to be opaque identifiers for
+ /// this particular invalidation, where \p Tag represents the visitor
+ /// responsible for invalidation, and \p Data represents the reason this
+ /// visitor decided to invalidate the bug report.
+ ///
+ /// \sa removeInvalidation
+ void markInvalid(const void *Tag, const void *Data) {
+ Invalidations.insert(std::make_pair(Tag, Data));
+ }
+
+ /// Reverses the effects of a previous invalidation.
+ ///
+ /// \sa markInvalid
+ void removeInvalidation(const void *Tag, const void *Data) {
+ Invalidations.erase(std::make_pair(Tag, Data));
+ }
/// Return the canonical declaration, be it a method or class, where
/// this issue semantically occurred.
@@ -342,6 +393,11 @@ private:
/// A vector of BugReports for tracking the allocated pointers and cleanup.
std::vector<BugReportEquivClass *> EQClassesVector;
+ /// A map from PathDiagnosticPiece to the LocationContext of the inlined
+ /// function call it represents.
+ llvm::DenseMap<const PathDiagnosticCallPiece*,
+ const LocationContext*> LocationContextMap;
+
protected:
BugReporter(BugReporterData& d, Kind k) : BugTypes(F.getEmptySet()), kind(k),
D(d) {}
@@ -378,9 +434,14 @@ public:
SourceManager& getSourceManager() { return D.getSourceManager(); }
- virtual void GeneratePathDiagnostic(PathDiagnostic& pathDiagnostic,
+ virtual bool generatePathDiagnostic(PathDiagnostic& pathDiagnostic,
PathDiagnosticConsumer &PC,
- ArrayRef<BugReport *> &bugReports) {}
+ ArrayRef<BugReport *> &bugReports) {
+ return true;
+ }
+
+ bool RemoveUneededCalls(PathPieces &pieces, BugReport *R,
+ PathDiagnosticCallPiece *CallWithLoc = 0);
void Register(BugType *BT);
@@ -389,7 +450,7 @@ public:
/// The reports are usually generated by the checkers. Further, they are
/// folded based on the profile value, which is done to coalesce similar
/// reports.
- void EmitReport(BugReport *R);
+ void emitReport(BugReport *R);
void EmitBasicReport(const Decl *DeclWithIssue,
StringRef BugName, StringRef BugCategory,
@@ -409,8 +470,10 @@ public:
EmitBasicReport(DeclWithIssue, BugName, Category, BugStr, Loc, &R, 1);
}
- static bool classof(const BugReporter* R) { return true; }
-
+ void addCallPieceLocationContextPair(const PathDiagnosticCallPiece *C,
+ const LocationContext *LC) {
+ LocationContextMap[C] = LC;
+ }
private:
llvm::StringMap<BugType *> StrBugTypes;
@@ -440,7 +503,15 @@ public:
/// engine.
ProgramStateManager &getStateManager();
- virtual void GeneratePathDiagnostic(PathDiagnostic &pathDiagnostic,
+ /// Generates a path corresponding to one of the given bug reports.
+ ///
+ /// Which report is used for path generation is not specified. The
+ /// bug reporter will try to pick the shortest path, but this is not
+ /// guaranteed.
+ ///
+ /// \return True if the report was valid and a path was generated,
+ /// false if the reports should be considered invalid.
+ virtual bool generatePathDiagnostic(PathDiagnostic &PD,
PathDiagnosticConsumer &PC,
ArrayRef<BugReport*> &bugReports);
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h
index f53c15f..78e35ca 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h
@@ -100,7 +100,6 @@ class FindLastStoreBRVisitor
const MemRegion *R;
SVal V;
bool satisfied;
- const ExplodedNode *StoreSite;
public:
/// \brief Convenience method to create a visitor given only the MemRegion.
@@ -114,7 +113,7 @@ public:
static void registerStatementVarDecls(BugReport &BR, const Stmt *S);
FindLastStoreBRVisitor(SVal v, const MemRegion *r)
- : R(r), V(v), satisfied(false), StoreSite(0) {
+ : R(r), V(v), satisfied(false) {
assert (!V.isUnknown() && "Cannot track unknown value.");
// TODO: Does it make sense to allow undef values here?
@@ -142,6 +141,10 @@ public:
void Profile(llvm::FoldingSetNodeID &ID) const;
+ /// Return the tag associated with this visitor. This tag will be used
+ /// to make all PathDiagnosticPieces created by this visitor.
+ static const char *getTag();
+
PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
const ExplodedNode *PrevN,
BugReporterContext &BRC,
@@ -171,6 +174,9 @@ public:
ID.AddPointer(&x);
}
+ /// Return the tag associated with this visitor. This tag will be used
+ /// to make all PathDiagnosticPieces created by this visitor.
+ static const char *getTag();
virtual PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
const ExplodedNode *Prev,
@@ -223,15 +229,57 @@ public:
const ExplodedNode *N,
llvm::Optional<bool> &prunable);
};
-
+
+/// \brief When a region containing undefined value or '0' value is passed
+/// as an argument in a call, marks the call as interesting.
+///
+/// As a result, BugReporter will not prune the path through the function even
+/// if the region's contents are not modified/accessed by the call.
+class UndefOrNullArgVisitor
+ : public BugReporterVisitorImpl<UndefOrNullArgVisitor> {
+
+ /// The interesting memory region this visitor is tracking.
+ const MemRegion *R;
+
+public:
+ UndefOrNullArgVisitor(const MemRegion *InR) : R(InR) {}
+
+ virtual void Profile(llvm::FoldingSetNodeID &ID) const {
+ static int Tag = 0;
+ ID.AddPointer(&Tag);
+ ID.AddPointer(R);
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR);
+};
+
namespace bugreporter {
-void addTrackNullOrUndefValueVisitor(const ExplodedNode *N, const Stmt *S,
- BugReport *R);
+/// Attempts to add visitors to trace a null or undefined value back to its
+/// point of origin, whether it is a symbol constrained to null or an explicit
+/// assignment.
+///
+/// \param N A node "downstream" from the evaluation of the statement.
+/// \param S The statement whose value is null or undefined.
+/// \param R The bug report to which visitors should be attached.
+/// \param IsArg Whether the statement is an argument to an inlined function.
+/// If this is the case, \p N \em must be the CallEnter node for
+/// the function.
+///
+/// \return Whether or not the function was able to add visitors for this
+/// statement. Note that returning \c true does not actually imply
+/// that any visitors were added.
+bool trackNullOrUndefValue(const ExplodedNode *N, const Stmt *S, BugReport &R,
+ bool IsArg = false);
const Stmt *GetDerefExpr(const ExplodedNode *N);
const Stmt *GetDenomExpr(const ExplodedNode *N);
const Stmt *GetRetValExpr(const ExplodedNode *N);
+bool isDeclRefExprToReference(const Expr *E);
+
} // end namespace clang
} // end namespace ento
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h
index 973cfb1..6dc26e6 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h
@@ -52,7 +52,31 @@ class PathDiagnostic;
class PathDiagnosticConsumer {
public:
- typedef std::vector<std::pair<StringRef, std::string> > FilesMade;
+ class PDFileEntry : public llvm::FoldingSetNode {
+ public:
+ PDFileEntry(llvm::FoldingSetNodeID &NodeID) : NodeID(NodeID) {}
+
+ typedef std::vector<std::pair<StringRef, StringRef> > ConsumerFiles;
+
+ /// \brief A vector of <consumer,file> pairs.
+ ConsumerFiles files;
+
+ /// \brief A precomputed hash tag used for uniquing PDFileEntry objects.
+ const llvm::FoldingSetNodeID NodeID;
+
+ /// \brief Used for profiling in the FoldingSet.
+ void Profile(llvm::FoldingSetNodeID &ID) { ID = NodeID; }
+ };
+
+ struct FilesMade : public llvm::FoldingSet<PDFileEntry> {
+ llvm::BumpPtrAllocator Alloc;
+
+ void addDiagnostic(const PathDiagnostic &PD,
+ StringRef ConsumerName,
+ StringRef fileName);
+
+ PDFileEntry::ConsumerFiles *getFiles(const PathDiagnostic &PD);
+ };
private:
virtual void anchor();
@@ -73,7 +97,6 @@ public:
virtual PathGenerationScheme getGenerationScheme() const { return Minimal; }
virtual bool supportsLogicalOpControlFlow() const { return false; }
virtual bool supportsAllBlockEdges() const { return false; }
- virtual bool useVerboseDescription() const { return true; }
/// Return true if the PathDiagnosticConsumer supports individual
/// PathDiagnostics that span multiple files.
@@ -114,8 +137,6 @@ private:
Kind kind)
: K(kind), S(0), D(0), SM(&sm),
Loc(genLocation(L)), Range(genRange()) {
- assert(Loc.isValid());
- assert(Range.isValid());
}
FullSourceLoc
@@ -134,12 +155,14 @@ public:
PathDiagnosticLocation(const Stmt *s,
const SourceManager &sm,
LocationOrAnalysisDeclContext lac)
- : K(StmtK), S(s), D(0), SM(&sm),
+ : K(s->getLocStart().isValid() ? StmtK : SingleLocK),
+ S(K == StmtK ? s : 0),
+ D(0), SM(&sm),
Loc(genLocation(SourceLocation(), lac)),
Range(genRange(lac)) {
- assert(S);
- assert(Loc.isValid());
- assert(Range.isValid());
+ assert(K == SingleLocK || S);
+ assert(K == SingleLocK || Loc.isValid());
+ assert(K == SingleLocK || Range.isValid());
}
/// Create a location corresponding to the given declaration.
@@ -297,12 +320,18 @@ private:
const std::string str;
const Kind kind;
const DisplayHint Hint;
+
+ /// A constant string that can be used to tag the PathDiagnosticPiece,
+ /// typically with the identification of the creator. The actual pointer
+ /// value is meant to be an identifier; the string itself is useful for
+ /// debugging.
+ StringRef Tag;
+
std::vector<SourceRange> ranges;
- // Do not implement:
- PathDiagnosticPiece();
- PathDiagnosticPiece(const PathDiagnosticPiece &P);
- PathDiagnosticPiece& operator=(const PathDiagnosticPiece &P);
+ PathDiagnosticPiece() LLVM_DELETED_FUNCTION;
+ PathDiagnosticPiece(const PathDiagnosticPiece &P) LLVM_DELETED_FUNCTION;
+ void operator=(const PathDiagnosticPiece &P) LLVM_DELETED_FUNCTION;
protected:
PathDiagnosticPiece(StringRef s, Kind k, DisplayHint hint = Below);
@@ -312,8 +341,18 @@ protected:
public:
virtual ~PathDiagnosticPiece();
- const std::string& getString() const { return str; }
+ llvm::StringRef getString() const { return str; }
+ /// Tag this PathDiagnosticPiece with the given C-string.
+ void setTag(const char *tag) { Tag = tag; }
+
+ /// Return the opaque tag (if any) on the PathDiagnosticPiece.
+ const void *getTag() const { return Tag.data(); }
+
+ /// Return the string representation of the tag. This is useful
+ /// for debugging.
+ StringRef getTagStr() const { return Tag; }
+
/// getDisplayHint - Return a hint indicating where the diagnostic should
/// be displayed by the PathDiagnosticConsumer.
DisplayHint getDisplayHint() const { return Hint; }
@@ -338,10 +377,6 @@ public:
/// Return the SourceRanges associated with this PathDiagnosticPiece.
ArrayRef<SourceRange> getRanges() const { return ranges; }
- static inline bool classof(const PathDiagnosticPiece *P) {
- return true;
- }
-
virtual void Profile(llvm::FoldingSetNodeID &ID) const;
};
@@ -377,6 +412,10 @@ public:
virtual void flattenLocations() { Pos.flatten(); }
virtual void Profile(llvm::FoldingSetNodeID &ID) const;
+
+ static bool classof(const PathDiagnosticPiece *P) {
+ return P->getKind() == Event || P->getKind() == Macro;
+ }
};
/// \brief Interface for classes constructing Stack hints.
@@ -410,10 +449,6 @@ public:
/// 'getMessageForX()' methods to construct a specific message.
virtual std::string getMessage(const ExplodedNode *N);
- /// Prints the ordinal form of the given integer,
- /// only valid for ValNo : ValNo > 0.
- void printOrdinal(unsigned ValNo, llvm::raw_svector_ostream &Out);
-
/// Produces the message of the following form:
/// 'Msg via Nth parameter'
virtual std::string getMessageForArg(const Expr *ArgE, unsigned ArgIndex);
@@ -629,14 +664,22 @@ public:
class PathDiagnostic : public llvm::FoldingSetNode {
const Decl *DeclWithIssue;
std::string BugType;
- std::string Desc;
+ std::string VerboseDesc;
+ std::string ShortDesc;
std::string Category;
std::deque<std::string> OtherDesc;
+ PathDiagnosticLocation Loc;
PathPieces pathImpl;
llvm::SmallVector<PathPieces *, 3> pathStack;
PathDiagnostic(); // Do not implement.
public:
+ PathDiagnostic(const Decl *DeclWithIssue, StringRef bugtype,
+ StringRef verboseDesc, StringRef shortDesc,
+ StringRef category);
+
+ ~PathDiagnostic();
+
const PathPieces &path;
/// Return the path currently used by builders for constructing the
@@ -659,16 +702,24 @@ public:
void popActivePath() { if (!pathStack.empty()) pathStack.pop_back(); }
bool isWithinCall() const { return !pathStack.empty(); }
-
- // PathDiagnostic();
- PathDiagnostic(const Decl *DeclWithIssue,
- StringRef bugtype,
- StringRef desc,
- StringRef category);
- ~PathDiagnostic();
+ void setEndOfPath(PathDiagnosticPiece *EndPiece) {
+ assert(!Loc.isValid() && "End location already set!");
+ Loc = EndPiece->getLocation();
+ assert(Loc.isValid() && "Invalid location for end-of-path piece");
+ getActivePath().push_back(EndPiece);
+ }
- StringRef getDescription() const { return Desc; }
+ void resetPath() {
+ pathStack.clear();
+ pathImpl.clear();
+ Loc = PathDiagnosticLocation();
+ }
+
+ StringRef getVerboseDescription() const { return VerboseDesc; }
+ StringRef getShortDescription() const {
+ return ShortDesc.empty() ? VerboseDesc : ShortDesc;
+ }
StringRef getBugType() const { return BugType; }
StringRef getCategory() const { return Category; }
@@ -682,15 +733,27 @@ public:
meta_iterator meta_end() const { return OtherDesc.end(); }
void addMeta(StringRef s) { OtherDesc.push_back(s); }
- PathDiagnosticLocation getLocation() const;
+ PathDiagnosticLocation getLocation() const {
+ assert(Loc.isValid() && "No end-of-path location set yet!");
+ return Loc;
+ }
void flattenLocations() {
+ Loc.flatten();
for (PathPieces::iterator I = pathImpl.begin(), E = pathImpl.end();
I != E; ++I) (*I)->flattenLocations();
}
-
+
+ /// Profiles the diagnostic, independent of the path it references.
+ ///
+ /// This can be used to merge diagnostics that refer to the same issue
+ /// along different paths.
void Profile(llvm::FoldingSetNodeID &ID) const;
-
+
+ /// Profiles the diagnostic, including its path.
+ ///
+ /// Two diagnostics with the same issue along different paths will generate
+ /// different profiles.
void FullProfile(llvm::FoldingSetNodeID &ID) const;
};
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h
index 3214d96..9eb1248 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h
@@ -366,23 +366,6 @@ public:
}
};
-class InlineCall {
- template <typename CHECKER>
- static bool _inlineCall(void *checker, const CallExpr *CE,
- ExprEngine &Eng,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
- return ((const CHECKER *)checker)->inlineCall(CE, Eng, Pred, Dst);
- }
-
-public:
- template <typename CHECKER>
- static void _register(CHECKER *checker, CheckerManager &mgr) {
- mgr._registerForInlineCall(
- CheckerManager::InlineCallFunc(checker, _inlineCall<CHECKER>));
- }
-};
-
} // end eval namespace
class CheckerBase : public ProgramPointTag {
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
index e11b6d5..7ae8e53 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
@@ -258,7 +258,7 @@ public:
const ExplodedNodeSet &Src,
SVal location, SVal val,
const Stmt *S, ExprEngine &Eng,
- ProgramPoint::Kind PointKind);
+ const ProgramPoint &PP);
/// \brief Run checkers for end of analysis.
void runCheckersForEndAnalysis(ExplodedGraph &G, BugReporter &BR,
@@ -267,6 +267,7 @@ public:
/// \brief Run checkers for end of path.
void runCheckersForEndPath(NodeBuilderContext &BC,
ExplodedNodeSet &Dst,
+ ExplodedNode *Pred,
ExprEngine &Eng);
/// \brief Run checkers for branch condition.
@@ -407,11 +408,6 @@ public:
typedef CheckerFn<bool (const CallExpr *, CheckerContext &)>
EvalCallFunc;
- typedef CheckerFn<bool (const CallExpr *, ExprEngine &Eng,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst)>
- InlineCallFunc;
-
typedef CheckerFn<void (const TranslationUnitDecl *,
AnalysisManager&, BugReporter &)>
CheckEndOfTranslationUnit;
@@ -449,8 +445,6 @@ public:
void _registerForEvalCall(EvalCallFunc checkfn);
- void _registerForInlineCall(InlineCallFunc checkfn);
-
void _registerForEndOfTranslationUnit(CheckEndOfTranslationUnit checkfn);
//===----------------------------------------------------------------------===//
@@ -576,8 +570,6 @@ private:
std::vector<EvalCallFunc> EvalCallCheckers;
- std::vector<InlineCallFunc> InlineCallCheckers;
-
std::vector<CheckEndOfTranslationUnit> EndOfTranslationUnitCheckers;
struct EventInfo {
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h
index e1ff17b..27f3677 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h
@@ -66,6 +66,10 @@ public:
return llvm::APSInt::getMaxValue(BitWidth, IsUnsigned);
}
+ llvm::APSInt getValue(uint64_t RawValue) const LLVM_READONLY {
+ return (llvm::APSInt(BitWidth, IsUnsigned) = RawValue);
+ }
+
/// Used to classify whether a value is representable using this type.
///
/// \see testInRange
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
index 876196b..9038ae5 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
@@ -16,7 +16,7 @@
#define LLVM_CLANG_GR_ANALYSISMANAGER_H
#include "clang/Analysis/AnalysisContext.h"
-#include "clang/Frontend/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
@@ -41,64 +41,16 @@ class AnalysisManager : public BugReporterData {
CheckerManager *CheckerMgr;
- /// \brief The maximum number of exploded nodes the analyzer will generate.
- unsigned MaxNodes;
-
- /// \brief The maximum number of times the analyzer visits a block.
- unsigned MaxVisit;
-
- bool VisualizeEGDot;
- bool VisualizeEGUbi;
- AnalysisPurgeMode PurgeDead;
-
- /// \brief The flag regulates if we should eagerly assume evaluations of
- /// conditionals, thus, bifurcating the path.
- ///
- /// EagerlyAssume - A flag indicating how the engine should handle
- /// expressions such as: 'x = (y != 0)'. When this flag is true then
- /// the subexpression 'y != 0' will be eagerly assumed to be true or false,
- /// thus evaluating it to the integers 0 or 1 respectively. The upside
- /// is that this can increase analysis precision until we have a better way
- /// to lazily evaluate such logic. The downside is that it eagerly
- /// bifurcates paths.
- bool EagerlyAssume;
- bool TrimGraph;
- bool EagerlyTrimEGraph;
-
-public:
- // \brief inter-procedural analysis mode.
- AnalysisIPAMode IPAMode;
-
- // Settings for inlining tuning.
- /// \brief The inlining stack depth limit.
- unsigned InlineMaxStackDepth;
- /// \brief The max number of basic blocks in a function being inlined.
- unsigned InlineMaxFunctionSize;
- /// \brief The mode of function selection used during inlining.
- AnalysisInliningMode InliningMode;
-
- /// \brief Do not re-analyze paths leading to exhausted nodes with a different
- /// strategy. We get better code coverage when retry is enabled.
- bool NoRetryExhausted;
-
public:
+ AnalyzerOptions &options;
+
AnalysisManager(ASTContext &ctx,DiagnosticsEngine &diags,
const LangOptions &lang,
const PathDiagnosticConsumers &Consumers,
StoreManagerCreator storemgr,
ConstraintManagerCreator constraintmgr,
CheckerManager *checkerMgr,
- unsigned maxnodes, unsigned maxvisit,
- bool vizdot, bool vizubi, AnalysisPurgeMode purge,
- bool eager, bool trim,
- bool useUnoptimizedCFG,
- bool addImplicitDtors,
- bool eagerlyTrimEGraph,
- AnalysisIPAMode ipa,
- unsigned inlineMaxStack,
- unsigned inlineMaxFunctionSize,
- AnalysisInliningMode inliningMode,
- bool NoRetry);
+ AnalyzerOptions &Options);
~AnalysisManager();
@@ -142,27 +94,14 @@ public:
void FlushDiagnostics();
- unsigned getMaxNodes() const { return MaxNodes; }
-
- unsigned getMaxVisit() const { return MaxVisit; }
-
- bool shouldVisualizeGraphviz() const { return VisualizeEGDot; }
-
- bool shouldVisualizeUbigraph() const { return VisualizeEGUbi; }
-
bool shouldVisualize() const {
- return VisualizeEGDot || VisualizeEGUbi;
+ return options.visualizeExplodedGraphWithGraphViz ||
+ options.visualizeExplodedGraphWithUbiGraph;
}
- bool shouldEagerlyTrimExplodedGraph() const { return EagerlyTrimEGraph; }
-
- bool shouldTrimGraph() const { return TrimGraph; }
-
- AnalysisPurgeMode getPurgeMode() const { return PurgeDead; }
-
- bool shouldEagerlyAssume() const { return EagerlyAssume; }
-
- bool shouldInlineCall() const { return (IPAMode != None); }
+ bool shouldInlineCall() const {
+ return options.IPAMode != None;
+ }
CFG *getCFG(Decl const *D) {
return AnaCtxMgr.getContext(D)->getCFG();
@@ -180,7 +119,6 @@ public:
AnalysisDeclContext *getAnalysisDeclContext(const Decl *D) {
return AnaCtxMgr.getContext(D);
}
-
};
} // enAnaCtxMgrspace
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
index b4a9de7..fb39354 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
@@ -73,6 +73,10 @@ class BasicValueFactory {
llvm::FoldingSet<CompoundValData> CompoundValDataSet;
llvm::FoldingSet<LazyCompoundValData> LazyCompoundValDataSet;
+ // This is private because external clients should use the factory
+ // method that takes a QualType.
+ const llvm::APSInt& getValue(uint64_t X, unsigned BitWidth, bool isUnsigned);
+
public:
BasicValueFactory(ASTContext &ctx, llvm::BumpPtrAllocator& Alloc)
: Ctx(ctx), BPAlloc(Alloc), PersistentSVals(0), PersistentSValPairs(0),
@@ -84,7 +88,6 @@ public:
const llvm::APSInt& getValue(const llvm::APSInt& X);
const llvm::APSInt& getValue(const llvm::APInt& X, bool isUnsigned);
- const llvm::APSInt& getValue(uint64_t X, unsigned BitWidth, bool isUnsigned);
const llvm::APSInt& getValue(uint64_t X, QualType T);
/// Returns the type of the APSInt used to store values of the given QualType.
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
index f6c5830..a6a91e2 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
@@ -20,6 +20,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/Analysis/AnalysisContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "llvm/ADT/PointerIntPair.h"
@@ -68,15 +69,22 @@ public:
}
};
+/// \class RuntimeDefinition
/// \brief Defines the runtime definition of the called function.
+///
+/// Encapsulates the information we have about which Decl will be used
+/// when the call is executed on the given path. When dealing with dynamic
+/// dispatch, the information is based on DynamicTypeInfo and might not be
+/// precise.
class RuntimeDefinition {
- /// The Declaration of the function which will be called at runtime.
- /// 0 if not available.
+ /// The Declaration of the function which could be called at runtime.
+ /// NULL if not available.
const Decl *D;
/// The region representing an object (ObjC/C++) on which the method is
/// called. With dynamic dispatch, the method definition depends on the
- /// runtime type of this object. 0 when there is no dynamic dispatch.
+ /// runtime type of this object. NULL when the DynamicTypeInfo is
+ /// precise.
const MemRegion *R;
public:
@@ -84,8 +92,15 @@ public:
RuntimeDefinition(const Decl *InD): D(InD), R(0) {}
RuntimeDefinition(const Decl *InD, const MemRegion *InR): D(InD), R(InR) {}
const Decl *getDecl() { return D; }
- const MemRegion *getDispatchRegion() { return R; }
+
+ /// \brief Check if the definition we have is precise.
+ /// If not, it is possible that the call dispatches to another definition at
+ /// execution time.
bool mayHaveOtherDefinitions() { return R != 0; }
+
+ /// When other definitions are possible, returns the region whose runtime type
+ /// determines the method definition.
+ const MemRegion *getDispatchRegion() { return R; }
};
/// \brief Represents an abstract call to a function or method along a
@@ -106,8 +121,7 @@ private:
const LocationContext *LCtx;
llvm::PointerUnion<const Expr *, const Decl *> Origin;
- // DO NOT IMPLEMENT
- CallEvent &operator=(const CallEvent &);
+ void operator=(const CallEvent &) LLVM_DELETED_FUNCTION;
protected:
// This is user data for subclasses.
@@ -139,16 +153,6 @@ protected:
: State(Original.State), LCtx(Original.LCtx), Origin(Original.Origin),
Data(Original.Data), Location(Original.Location), RefCount(0) {}
-
- ProgramStateRef getState() const {
- return State;
- }
-
- const LocationContext *getLocationContext() const {
- return LCtx;
- }
-
-
/// Copies this CallEvent, with vtable intact, into a new block of memory.
virtual void cloneTo(void *Dest) const = 0;
@@ -164,8 +168,6 @@ protected:
/// result of this call.
virtual void getExtraInvalidatedRegions(RegionList &Regions) const {}
- virtual QualType getDeclaredResultType() const = 0;
-
public:
virtual ~CallEvent() {}
@@ -178,6 +180,16 @@ public:
return Origin.dyn_cast<const Decl *>();
}
+ /// \brief The state in which the call is being evaluated.
+ ProgramStateRef getState() const {
+ return State;
+ }
+
+ /// \brief The context in which the call is being evaluated.
+ const LocationContext *getLocationContext() const {
+ return LCtx;
+ }
+
/// \brief Returns the definition of the function or method that will be
/// called.
virtual RuntimeDefinition getRuntimeDefinition() const = 0;
@@ -237,6 +249,12 @@ public:
/// \brief Returns the result type, adjusted for references.
QualType getResultType() const;
+ /// \brief Returns the return value of the call.
+ ///
+ /// This should only be called if the CallEvent was created using a state in
+ /// which the return value has already been bound to the origin expression.
+ SVal getReturnValue() const;
+
/// \brief Returns true if any of the arguments appear to represent callbacks.
bool hasNonZeroCallbackArg() const;
@@ -249,6 +267,38 @@ public:
return hasNonZeroCallbackArg();
}
+ /// \brief Returns true if the callee is an externally-visible function in the
+ /// top-level namespace, such as \c malloc.
+ ///
+ /// You can use this call to determine that a particular function really is
+ /// a library function and not, say, a C++ member function with the same name.
+ ///
+ /// If a name is provided, the function must additionally match the given
+ /// name.
+ ///
+ /// Note that this deliberately excludes C++ library functions in the \c std
+ /// namespace, but will include C library functions accessed through the
+ /// \c std namespace. This also does not check if the function is declared
+ /// as 'extern "C"', or if it uses C++ name mangling.
+ // FIXME: Add a helper for checking namespaces.
+ // FIXME: Move this down to AnyFunctionCall once checkers have more
+ // precise callbacks.
+ bool isGlobalCFunction(StringRef SpecificName = StringRef()) const;
+
+ /// \brief Returns the name of the callee, if its name is a simple identifier.
+ ///
+ /// Note that this will fail for Objective-C methods, blocks, and C++
+ /// overloaded operators. The former is named by a Selector rather than a
+ /// simple identifier, and the latter two do not have names.
+ // FIXME: Move this down to AnyFunctionCall once checkers have more
+ // precise callbacks.
+ const IdentifierInfo *getCalleeIdentifier() const {
+ const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(getDecl());
+ if (!ND)
+ return 0;
+ return ND->getIdentifier();
+ }
+
/// \brief Returns an appropriate ProgramPoint for this call.
ProgramPoint getProgramPoint(bool IsPreVisit = false,
const ProgramPointTag *Tag = 0) const;
@@ -277,12 +327,12 @@ public:
return cloneWithState<CallEvent>(NewState);
}
- /// \brief Returns true if this is a statement that can be considered for
- /// inlining.
- ///
- /// FIXME: This should go away once CallEvents are cheap and easy to
- /// construct from ExplodedNodes.
- static bool mayBeInlined(const Stmt *S);
+ /// \brief Returns true if this is a statement is a function or method call
+ /// of some kind.
+ static bool isCallStmt(const Stmt *S);
+
+ /// \brief Returns the result type of a function, method declaration.
+ static QualType getDeclaredResultType(const Decl *D);
// Iterator access to formal parameters and their types.
private:
@@ -329,8 +379,6 @@ public:
// For debugging purposes only
void dump(raw_ostream &Out) const;
LLVM_ATTRIBUTE_USED void dump() const;
-
- static bool classof(const CallEvent *) { return true; }
};
@@ -346,8 +394,6 @@ protected:
: CallEvent(D, St, LCtx) {}
AnyFunctionCall(const AnyFunctionCall &Other) : CallEvent(Other) {}
- virtual QualType getDeclaredResultType() const;
-
public:
// This function is overridden by subclasses, but they must return
// a FunctionDecl.
@@ -357,9 +403,16 @@ public:
virtual RuntimeDefinition getRuntimeDefinition() const {
const FunctionDecl *FD = getDecl();
- // Note that hasBody() will fill FD with the definition FunctionDecl.
- if (FD && FD->hasBody(FD))
- return RuntimeDefinition(FD);
+ // Note that the AnalysisDeclContext will have the FunctionDecl with
+ // the definition (if one exists).
+ if (FD) {
+ AnalysisDeclContext *AD =
+ getLocationContext()->getAnalysisDeclContext()->
+ getManager()->getContext(FD);
+ if (AD->getBody())
+ return RuntimeDefinition(AD->getDecl());
+ }
+
return RuntimeDefinition();
}
@@ -442,8 +495,6 @@ protected:
virtual void getExtraInvalidatedRegions(RegionList &Regions) const;
- virtual QualType getDeclaredResultType() const;
-
public:
/// \brief Returns the region associated with this instance of the block.
///
@@ -499,13 +550,7 @@ public:
virtual const Expr *getCXXThisExpr() const { return 0; }
/// \brief Returns the value of the implicit 'this' object.
- virtual SVal getCXXThisVal() const {
- const Expr *Base = getCXXThisExpr();
- // FIXME: This doesn't handle an overloaded ->* operator.
- if (!Base)
- return UnknownVal();
- return getSVal(Base);
- }
+ virtual SVal getCXXThisVal() const;
virtual const FunctionDecl *getDecl() const;
@@ -550,6 +595,8 @@ public:
}
virtual const Expr *getCXXThisExpr() const;
+
+ virtual RuntimeDefinition getRuntimeDefinition() const;
virtual Kind getKind() const { return CE_CXXMember; }
@@ -605,6 +652,8 @@ class CXXDestructorCall : public CXXInstanceCall {
friend class CallEventManager;
protected:
+ typedef llvm::PointerIntPair<const MemRegion *, 1, bool> DtorDataTy;
+
/// Creates an implicit destructor.
///
/// \param DD The destructor that will be called.
@@ -613,10 +662,10 @@ protected:
/// \param St The path-sensitive state at this point in the program.
/// \param LCtx The location context at this point in the program.
CXXDestructorCall(const CXXDestructorDecl *DD, const Stmt *Trigger,
- const MemRegion *Target, ProgramStateRef St,
- const LocationContext *LCtx)
+ const MemRegion *Target, bool IsBaseDestructor,
+ ProgramStateRef St, const LocationContext *LCtx)
: CXXInstanceCall(DD, St, LCtx) {
- Data = Target;
+ Data = DtorDataTy(Target, IsBaseDestructor).getOpaqueValue();
Location = Trigger->getLocEnd();
}
@@ -627,9 +676,16 @@ public:
virtual SourceRange getSourceRange() const { return Location; }
virtual unsigned getNumArgs() const { return 0; }
+ virtual RuntimeDefinition getRuntimeDefinition() const;
+
/// \brief Returns the value of the implicit 'this' object.
virtual SVal getCXXThisVal() const;
+ /// Returns true if this is a call to a base class destructor.
+ bool isBaseDestructor() const {
+ return DtorDataTy::getFromOpaqueValue(Data).getInt();
+ }
+
virtual Kind getKind() const { return CE_CXXDestructor; }
static bool classof(const CallEvent *CA) {
@@ -651,10 +707,10 @@ protected:
/// a new symbolic region will be used.
/// \param St The path-sensitive state at this point in the program.
/// \param LCtx The location context at this point in the program.
- CXXConstructorCall(const CXXConstructExpr *CE, const MemRegion *target,
+ CXXConstructorCall(const CXXConstructExpr *CE, const MemRegion *Target,
ProgramStateRef St, const LocationContext *LCtx)
: AnyFunctionCall(CE, St, LCtx) {
- Data = target;
+ Data = Target;
}
CXXConstructorCall(const CXXConstructorCall &Other) : AnyFunctionCall(Other){}
@@ -761,8 +817,6 @@ protected:
virtual void getExtraInvalidatedRegions(RegionList &Regions) const;
- virtual QualType getDeclaredResultType() const;
-
/// Check if the selector may have multiple definitions (may have overrides).
virtual bool canBeOverridenInSubclass(ObjCInterfaceDecl *IDecl,
Selector Sel) const;
@@ -796,6 +850,9 @@ public:
/// \brief Returns the value of the receiver at the time of this call.
SVal getReceiverSVal() const;
+ /// \brief Return the value of 'self' if available.
+ SVal getSelfSVal() const;
+
/// \brief Get the interface for the receiver.
///
/// This works whether this is an instance message or a class message.
@@ -804,6 +861,9 @@ public:
return getOriginExpr()->getReceiverInterface();
}
+ /// \brief Checks if the receiver refers to 'self' or 'super'.
+ bool isReceiverSelfOrSuper() const;
+
/// Returns how the message was written in the source (property access,
/// subscript, or explicit message send).
ObjCMessageKind getMessageKind() const;
@@ -879,6 +939,13 @@ class CallEventManager {
return new (allocate()) T(A1, A2, A3, St, LCtx);
}
+ template <typename T, typename Arg1, typename Arg2, typename Arg3,
+ typename Arg4>
+ T *create(Arg1 A1, Arg2 A2, Arg3 A3, Arg4 A4, ProgramStateRef St,
+ const LocationContext *LCtx) {
+ return new (allocate()) T(A1, A2, A3, A4, St, LCtx);
+ }
+
public:
CallEventManager(llvm::BumpPtrAllocator &alloc) : Alloc(alloc) {}
@@ -905,9 +972,9 @@ public:
CallEventRef<CXXDestructorCall>
getCXXDestructorCall(const CXXDestructorDecl *DD, const Stmt *Trigger,
- const MemRegion *Target, ProgramStateRef State,
- const LocationContext *LCtx) {
- return create<CXXDestructorCall>(DD, Trigger, Target, State, LCtx);
+ const MemRegion *Target, bool IsBase,
+ ProgramStateRef State, const LocationContext *LCtx) {
+ return create<CXXDestructorCall>(DD, Trigger, Target, IsBase, State, LCtx);
}
CallEventRef<CXXAllocatorCall>
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
index 8c8e82c..4558cd9 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
@@ -16,10 +16,57 @@
#define LLVM_CLANG_SA_CORE_PATHSENSITIVE_CHECKERCONTEXT
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
namespace clang {
namespace ento {
+ /// Declares an immutable map of type \p NameTy, suitable for placement into
+ /// the ProgramState. This is implementing using llvm::ImmutableMap.
+ ///
+ /// \code
+ /// State = State->set<Name>(K, V);
+ /// const Value *V = State->get<Name>(K); // Returns NULL if not in the map.
+ /// State = State->remove<Name>(K);
+ /// NameTy Map = State->get<Name>();
+ /// \endcode
+ ///
+ /// The macro should not be used inside namespaces, or for traits that must
+ /// be accessible from more than one translation unit.
+ #define REGISTER_MAP_WITH_PROGRAMSTATE(Name, Key, Value) \
+ REGISTER_TRAIT_WITH_PROGRAMSTATE(Name, \
+ CLANG_ENTO_PROGRAMSTATE_MAP(Key, Value))
+
+ /// Declares an immutable set of type \p NameTy, suitable for placement into
+ /// the ProgramState. This is implementing using llvm::ImmutableSet.
+ ///
+ /// \code
+ /// State = State->add<Name>(E);
+ /// State = State->remove<Name>(E);
+ /// bool Present = State->contains<Name>(E);
+ /// NameTy Set = State->get<Name>();
+ /// \endcode
+ ///
+ /// The macro should not be used inside namespaces, or for traits that must
+ /// be accessible from more than one translation unit.
+ #define REGISTER_SET_WITH_PROGRAMSTATE(Name, Elem) \
+ REGISTER_TRAIT_WITH_PROGRAMSTATE(Name, llvm::ImmutableSet<Elem>)
+
+ /// Declares an immutable list of type \p NameTy, suitable for placement into
+ /// the ProgramState. This is implementing using llvm::ImmutableList.
+ ///
+ /// \code
+ /// State = State->add<Name>(E); // Adds to the /end/ of the list.
+ /// bool Present = State->contains<Name>(E);
+ /// NameTy List = State->get<Name>();
+ /// \endcode
+ ///
+ /// The macro should not be used inside namespaces, or for traits that must
+ /// be accessible from more than one translation unit.
+ #define REGISTER_LIST_WITH_PROGRAMSTATE(Name, Elem) \
+ REGISTER_TRAIT_WITH_PROGRAMSTATE(Name, llvm::ImmutableList<Elem>)
+
+
class CheckerContext {
ExprEngine &Eng;
/// The current exploded(symbolic execution) graph node.
@@ -64,6 +111,10 @@ public:
return Eng.getStoreManager();
}
+ const AnalyzerOptions::ConfigTable &getConfig() const {
+ return Eng.getAnalysisManager().options.Config;
+ }
+
/// \brief Returns the previous node in the exploded graph, which includes
/// the state of the program before the checker ran. Note, checkers should
/// not retain the node in their state since the nodes might get invalidated.
@@ -76,8 +127,8 @@ public:
/// \brief Returns the number of times the current block has been visited
/// along the analyzed path.
- unsigned getCurrentBlockCount() const {
- return NB.getContext().getCurrentBlockCount();
+ unsigned blockCount() const {
+ return NB.getContext().blockCount();
}
ASTContext &getASTContext() {
@@ -96,6 +147,9 @@ public:
return Pred->getStackFrame();
}
+ /// Return true if the current LocationContext has no caller context.
+ bool inTopFrame() const { return getLocationContext()->inTopFrame(); }
+
BugReporter &getBugReporter() {
return Eng.getBugReporter();
}
@@ -144,20 +198,15 @@ public:
/// \brief Generates a new transition in the program state graph
/// (ExplodedGraph). Uses the default CheckerContext predecessor node.
///
- /// @param State The state of the generated node.
+ /// @param State The state of the generated node. If not specified, the state
+ /// will not be changed, but the new node will have the checker's tag.
/// @param Tag The tag is used to uniquely identify the creation site. If no
/// tag is specified, a default tag, unique to the given checker,
/// will be used. Tags are used to prevent states generated at
/// different sites from caching out.
- ExplodedNode *addTransition(ProgramStateRef State,
+ ExplodedNode *addTransition(ProgramStateRef State = 0,
const ProgramPointTag *Tag = 0) {
- return addTransitionImpl(State, false, 0, Tag);
- }
-
- /// \brief Generates a default transition (containing checker tag but no
- /// checker state changes).
- ExplodedNode *addTransition() {
- return addTransition(getState());
+ return addTransitionImpl(State ? State : getState(), false, 0, Tag);
}
/// \brief Generates a new transition with the given predecessor.
@@ -167,25 +216,24 @@ public:
/// @param Pred The transition will be generated from the specified Pred node
/// to the newly generated node.
/// @param Tag The tag to uniquely identify the creation site.
- /// @param IsSink Mark the new node as sink, which will stop exploration of
- /// the given path.
ExplodedNode *addTransition(ProgramStateRef State,
- ExplodedNode *Pred,
- const ProgramPointTag *Tag = 0,
- bool IsSink = false) {
- return addTransitionImpl(State, IsSink, Pred, Tag);
+ ExplodedNode *Pred,
+ const ProgramPointTag *Tag = 0) {
+ return addTransitionImpl(State, false, Pred, Tag);
}
- /// \brief Generate a sink node. Generating sink stops exploration of the
+ /// \brief Generate a sink node. Generating a sink stops exploration of the
/// given path.
- ExplodedNode *generateSink(ProgramStateRef state = 0) {
- return addTransitionImpl(state ? state : getState(), true);
+ ExplodedNode *generateSink(ProgramStateRef State = 0,
+ ExplodedNode *Pred = 0,
+ const ProgramPointTag *Tag = 0) {
+ return addTransitionImpl(State ? State : getState(), true, Pred, Tag);
}
/// \brief Emit the diagnostics report.
- void EmitReport(BugReport *R) {
+ void emitReport(BugReport *R) {
Changed = true;
- Eng.getBugReporter().EmitReport(R);
+ Eng.getBugReporter().emitReport(R);
}
/// \brief Get the declaration of the called function (path-sensitive).
@@ -194,17 +242,33 @@ public:
/// \brief Get the name of the called function (path-sensitive).
StringRef getCalleeName(const FunctionDecl *FunDecl) const;
+ /// \brief Get the identifier of the called function (path-sensitive).
+ const IdentifierInfo *getCalleeIdentifier(const CallExpr *CE) const {
+ const FunctionDecl *FunDecl = getCalleeDecl(CE);
+ if (FunDecl)
+ return FunDecl->getIdentifier();
+ else
+ return 0;
+ }
+
/// \brief Get the name of the called function (path-sensitive).
StringRef getCalleeName(const CallExpr *CE) const {
const FunctionDecl *FunDecl = getCalleeDecl(CE);
return getCalleeName(FunDecl);
}
- /// Given a function declaration and a name checks if this is a C lib
- /// function with the given name.
- bool isCLibraryFunction(const FunctionDecl *FD, StringRef Name);
- static bool isCLibraryFunction(const FunctionDecl *FD, StringRef Name,
- ASTContext &Context);
+ /// \brief Returns true if the callee is an externally-visible function in the
+ /// top-level namespace, such as \c malloc.
+ ///
+ /// If a name is provided, the function must additionally match the given
+ /// name.
+ ///
+ /// Note that this deliberately excludes C++ library functions in the \c std
+ /// namespace, but will include C library functions accessed through the
+ /// \c std namespace. This also does not check if the function is declared
+ /// as 'extern "C"', or if it uses C++ name mangling.
+ static bool isCLibraryFunction(const FunctionDecl *FD,
+ StringRef Name = StringRef());
/// \brief Depending on wither the location corresponds to a macro, return
/// either the macro name or the token spelling.
@@ -226,9 +290,15 @@ private:
return Pred;
Changed = true;
- ExplodedNode *node = NB.generateNode(Tag ? Location.withTag(Tag) : Location,
- State,
- P ? P : Pred, MarkAsSink);
+ const ProgramPoint &LocalLoc = (Tag ? Location.withTag(Tag) : Location);
+ if (!P)
+ P = Pred;
+
+ ExplodedNode *node;
+ if (MarkAsSink)
+ node = NB.generateSink(LocalLoc, State, P);
+ else
+ node = NB.generateNode(LocalLoc, State, P);
return node;
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
index 631858d..4a78849 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
@@ -16,6 +16,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/Support/SaveAndRestore.h"
namespace llvm {
class APSInt;
@@ -26,29 +27,83 @@ namespace ento {
class SubEngine;
+class ConditionTruthVal {
+ llvm::Optional<bool> Val;
+public:
+ /// Construct a ConditionTruthVal indicating the constraint is constrained
+ /// to either true or false, depending on the boolean value provided.
+ ConditionTruthVal(bool constraint) : Val(constraint) {}
+
+ /// Construct a ConstraintVal indicating the constraint is underconstrained.
+ ConditionTruthVal() {}
+
+ /// Return true if the constraint is perfectly constrained to 'true'.
+ bool isConstrainedTrue() const {
+ return Val.hasValue() && Val.getValue();
+ }
+
+ /// Return true if the constraint is perfectly constrained to 'false'.
+ bool isConstrainedFalse() const {
+ return Val.hasValue() && !Val.getValue();
+ }
+
+ /// Return true if the constrained is perfectly constrained.
+ bool isConstrained() const {
+ return Val.hasValue();
+ }
+
+ /// Return true if the constrained is underconstrained and we do not know
+ /// if the constraint is true of value.
+ bool isUnderconstrained() const {
+ return !Val.hasValue();
+ }
+};
+
class ConstraintManager {
public:
+ ConstraintManager() : NotifyAssumeClients(true) {}
+
virtual ~ConstraintManager();
virtual ProgramStateRef assume(ProgramStateRef state,
- DefinedSVal Cond,
- bool Assumption) = 0;
-
- std::pair<ProgramStateRef, ProgramStateRef >
- assumeDual(ProgramStateRef state, DefinedSVal Cond)
- {
- std::pair<ProgramStateRef, ProgramStateRef > res =
- std::make_pair(assume(state, Cond, true), assume(state, Cond, false));
-
- assert(!(!res.first && !res.second) && "System is over constrained.");
- return res;
+ DefinedSVal Cond,
+ bool Assumption) = 0;
+
+ typedef std::pair<ProgramStateRef, ProgramStateRef> ProgramStatePair;
+
+ /// Returns a pair of states (StTrue, StFalse) where the given condition is
+ /// assumed to be true or false, respectively.
+ ProgramStatePair assumeDual(ProgramStateRef State, DefinedSVal Cond) {
+ ProgramStateRef StTrue = assume(State, Cond, true);
+
+ // If StTrue is infeasible, asserting the falseness of Cond is unnecessary
+ // because the existing constraints already establish this.
+ if (!StTrue) {
+ // FIXME: This is fairly expensive and should be disabled even in
+ // Release+Asserts builds.
+ assert(assume(State, Cond, false) && "System is over constrained.");
+ return ProgramStatePair((ProgramStateRef)NULL, State);
+ }
+
+ ProgramStateRef StFalse = assume(State, Cond, false);
+ if (!StFalse) {
+ // We are careful to return the original state, /not/ StTrue,
+ // because we want to avoid having callers generate a new node
+ // in the ExplodedGraph.
+ return ProgramStatePair(State, (ProgramStateRef)NULL);
+ }
+
+ return ProgramStatePair(StTrue, StFalse);
}
+ /// \brief If a symbol is perfectly constrained to a constant, attempt
+ /// to return the concrete value.
+ ///
+ /// Note that a ConstraintManager is not obligated to return a concretized
+ /// value for a symbol, even if it is perfectly constrained.
virtual const llvm::APSInt* getSymVal(ProgramStateRef state,
- SymbolRef sym) const = 0;
-
- virtual bool isEqual(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt& V) const = 0;
+ SymbolRef sym) const {
+ return 0;
+ }
virtual ProgramStateRef removeDeadBindings(ProgramStateRef state,
SymbolReaper& SymReaper) = 0;
@@ -59,20 +114,38 @@ public:
const char *sep) = 0;
virtual void EndPath(ProgramStateRef state) {}
+
+ /// Convenience method to query the state to see if a symbol is null or
+ /// not null, or if neither assumption can be made.
+ ConditionTruthVal isNull(ProgramStateRef State, SymbolRef Sym) {
+ llvm::SaveAndRestore<bool> DisableNotify(NotifyAssumeClients, false);
+
+ return checkNull(State, Sym);
+ }
protected:
+ /// A flag to indicate that clients should be notified of assumptions.
+ /// By default this is the case, but sometimes this needs to be restricted
+ /// to avoid infinite recursions within the ConstraintManager.
+ ///
+ /// Note that this flag allows the ConstraintManager to be re-entrant,
+ /// but not thread-safe.
+ bool NotifyAssumeClients;
+
/// canReasonAbout - Not all ConstraintManagers can accurately reason about
/// all SVal values. This method returns true if the ConstraintManager can
/// reasonably handle a given SVal value. This is typically queried by
/// ExprEngine to determine if the value should be replaced with a
/// conjured symbolic value in order to recover some precision.
virtual bool canReasonAbout(SVal X) const = 0;
+
+ /// Returns whether or not a symbol is known to be null ("true"), known to be
+ /// non-null ("false"), or may be either ("underconstrained").
+ virtual ConditionTruthVal checkNull(ProgramStateRef State, SymbolRef Sym);
};
-ConstraintManager* CreateBasicConstraintManager(ProgramStateManager& statemgr,
- SubEngine &subengine);
ConstraintManager* CreateRangeConstraintManager(ProgramStateManager& statemgr,
- SubEngine &subengine);
+ SubEngine *subengine);
} // end GR namespace
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
index e75cdd8..b668640 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
@@ -80,10 +80,6 @@ private:
/// usually because it could not reason about something.
BlocksAborted blocksAborted;
- /// The functions which have been analyzed through inlining. This is owned by
- /// AnalysisConsumer. It can be null.
- SetOfConstDecls *AnalyzedCallees;
-
/// The information about functions shared by the whole translation unit.
/// (This data is owned by AnalysisConsumer.)
FunctionSummariesTy *FunctionSummaries;
@@ -101,19 +97,18 @@ private:
ExplodedNode *Pred);
private:
- CoreEngine(const CoreEngine&); // Do not implement.
- CoreEngine& operator=(const CoreEngine&);
+ CoreEngine(const CoreEngine &) LLVM_DELETED_FUNCTION;
+ void operator=(const CoreEngine &) LLVM_DELETED_FUNCTION;
ExplodedNode *generateCallExitBeginNode(ExplodedNode *N);
public:
/// Construct a CoreEngine object to analyze the provided CFG.
- CoreEngine(SubEngine& subengine, SetOfConstDecls *VisitedCallees,
+ CoreEngine(SubEngine& subengine,
FunctionSummariesTy *FS)
: SubEng(subengine), G(new ExplodedGraph()),
WList(WorkList::makeDFS()),
BCounterFactory(G->getAllocator()),
- AnalyzedCallees(VisitedCallees),
FunctionSummaries(FS){}
/// getGraph - Returns the exploded graph.
@@ -185,20 +180,18 @@ public:
struct NodeBuilderContext {
const CoreEngine &Eng;
const CFGBlock *Block;
- ExplodedNode *Pred;
+ const LocationContext *LC;
NodeBuilderContext(const CoreEngine &E, const CFGBlock *B, ExplodedNode *N)
- : Eng(E), Block(B), Pred(N) { assert(B); assert(!N->isSink()); }
-
- ExplodedNode *getPred() const { return Pred; }
+ : Eng(E), Block(B), LC(N->getLocationContext()) { assert(B); }
/// \brief Return the CFGBlock associated with this builder.
const CFGBlock *getBlock() const { return Block; }
/// \brief Returns the number of times the current basic block has been
/// visited on the exploded graph path.
- unsigned getCurrentBlockCount() const {
+ unsigned blockCount() const {
return Eng.WList->getBlockCounter().getNumVisited(
- Pred->getLocationContext()->getCurrentStackFrame(),
+ LC->getCurrentStackFrame(),
Block->getBlockID());
}
};
@@ -265,14 +258,21 @@ public:
virtual ~NodeBuilder() {}
/// \brief Generates a node in the ExplodedGraph.
+ ExplodedNode *generateNode(const ProgramPoint &PP,
+ ProgramStateRef State,
+ ExplodedNode *Pred) {
+ return generateNodeImpl(PP, State, Pred, false);
+ }
+
+ /// \brief Generates a sink in the ExplodedGraph.
///
/// When a node is marked as sink, the exploration from the node is stopped -
- /// the node becomes the last node on the path.
- ExplodedNode *generateNode(const ProgramPoint &PP,
+ /// the node becomes the last node on the path and certain kinds of bugs are
+ /// suppressed.
+ ExplodedNode *generateSink(const ProgramPoint &PP,
ProgramStateRef State,
- ExplodedNode *Pred,
- bool MarkAsSink = false) {
- return generateNodeImpl(PP, State, Pred, MarkAsSink);
+ ExplodedNode *Pred) {
+ return generateNodeImpl(PP, State, Pred, true);
}
const ExplodedNodeSet &getResults() {
@@ -317,13 +317,18 @@ public:
NodeBuilderWithSinks(ExplodedNode *Pred, ExplodedNodeSet &DstSet,
const NodeBuilderContext &Ctx, ProgramPoint &L)
: NodeBuilder(Pred, DstSet, Ctx), Location(L) {}
+
ExplodedNode *generateNode(ProgramStateRef State,
ExplodedNode *Pred,
- const ProgramPointTag *Tag = 0,
- bool MarkAsSink = false) {
- ProgramPoint LocalLoc = (Tag ? Location.withTag(Tag): Location);
+ const ProgramPointTag *Tag = 0) {
+ const ProgramPoint &LocalLoc = (Tag ? Location.withTag(Tag) : Location);
+ return NodeBuilder::generateNode(LocalLoc, State, Pred);
+ }
- ExplodedNode *N = generateNodeImpl(LocalLoc, State, Pred, MarkAsSink);
+ ExplodedNode *generateSink(ProgramStateRef State, ExplodedNode *Pred,
+ const ProgramPointTag *Tag = 0) {
+ const ProgramPoint &LocalLoc = (Tag ? Location.withTag(Tag) : Location);
+ ExplodedNode *N = NodeBuilder::generateSink(LocalLoc, State, Pred);
if (N && N->isSink())
sinksGenerated.push_back(N);
return N;
@@ -336,7 +341,7 @@ public:
/// \class StmtNodeBuilder
/// \brief This builder class is useful for generating nodes that resulted from
-/// visiting a statement. The main difference from it's parent NodeBuilder is
+/// visiting a statement. The main difference from its parent NodeBuilder is
/// that it creates a statement specific ProgramPoint.
class StmtNodeBuilder: public NodeBuilder {
NodeBuilder *EnclosingBldr;
@@ -363,22 +368,27 @@ public:
virtual ~StmtNodeBuilder();
+ using NodeBuilder::generateNode;
+ using NodeBuilder::generateSink;
+
ExplodedNode *generateNode(const Stmt *S,
ExplodedNode *Pred,
ProgramStateRef St,
- bool MarkAsSink = false,
const ProgramPointTag *tag = 0,
ProgramPoint::Kind K = ProgramPoint::PostStmtKind){
const ProgramPoint &L = ProgramPoint::getProgramPoint(S, K,
Pred->getLocationContext(), tag);
- return generateNodeImpl(L, St, Pred, MarkAsSink);
+ return NodeBuilder::generateNode(L, St, Pred);
}
- ExplodedNode *generateNode(const ProgramPoint &PP,
+ ExplodedNode *generateSink(const Stmt *S,
ExplodedNode *Pred,
- ProgramStateRef State,
- bool MarkAsSink = false) {
- return generateNodeImpl(PP, State, Pred, MarkAsSink);
+ ProgramStateRef St,
+ const ProgramPointTag *tag = 0,
+ ProgramPoint::Kind K = ProgramPoint::PostStmtKind){
+ const ProgramPoint &L = ProgramPoint::getProgramPoint(S, K,
+ Pred->getLocationContext(), tag);
+ return NodeBuilder::generateSink(L, St, Pred);
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h
new file mode 100644
index 0000000..5ac97db
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h
@@ -0,0 +1,52 @@
+//== DynamicTypeInfo.h - Runtime type information ----------------*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_SA_CORE_DYNAMICTYPEINFO_H
+#define LLVM_CLANG_SA_CORE_DYNAMICTYPEINFO_H
+
+#include "clang/AST/Type.h"
+
+namespace clang {
+namespace ento {
+
+/// \brief Stores the currently inferred strictest bound on the runtime type
+/// of a region in a given state along the analysis path.
+class DynamicTypeInfo {
+private:
+ QualType T;
+ bool CanBeASubClass;
+
+public:
+
+ DynamicTypeInfo() : T(QualType()) {}
+ DynamicTypeInfo(QualType WithType, bool CanBeSub = true)
+ : T(WithType), CanBeASubClass(CanBeSub) {}
+
+ /// \brief Return false if no dynamic type info is available.
+ bool isValid() const { return !T.isNull(); }
+
+ /// \brief Returns the currently inferred upper bound on the runtime type.
+ QualType getType() const { return T; }
+
+ /// \brief Returns false if the type information is precise (the type T is
+ /// the only type in the lattice), true otherwise.
+ bool canBeASubClass() const { return CanBeASubClass; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.Add(T);
+ ID.AddInteger((unsigned)CanBeASubClass);
+ }
+ bool operator==(const DynamicTypeInfo &X) const {
+ return T == X.T && CanBeASubClass == X.CanBeASubClass;
+ }
+};
+
+} // end ento
+} // end clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h
index b80213e..eb9bd85 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h
@@ -33,10 +33,11 @@ class SValBuilder;
/// other things.
class EnvironmentEntry : public std::pair<const Stmt*,
const StackFrameContext *> {
+ friend class EnvironmentManager;
+ EnvironmentEntry makeLocation() const;
+
public:
- EnvironmentEntry(const Stmt *s, const LocationContext *L)
- : std::pair<const Stmt*,
- const StackFrameContext*>(s, L ? L->getCurrentStackFrame():0) {}
+ EnvironmentEntry(const Stmt *s, const LocationContext *L);
const Stmt *getStmt() const { return first; }
const LocationContext *getLocationContext() const { return second; }
@@ -76,9 +77,7 @@ public:
/// Fetches the current binding of the expression in the
/// Environment.
- SVal getSVal(const EnvironmentEntry &E,
- SValBuilder &svalBuilder,
- bool useOnlyDirectBindings = false) const;
+ SVal getSVal(const EnvironmentEntry &E, SValBuilder &svalBuilder) const;
/// Profile - Profile the contents of an Environment object for use
/// in a FoldingSet.
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
index 1052d94..b112e66 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
@@ -60,45 +60,50 @@ class ExplodedNode : public llvm::FoldingSetNode {
friend class SwitchNodeBuilder;
friend class EndOfFunctionNodeBuilder;
+ /// Efficiently stores a list of ExplodedNodes, or an optional flag.
+ ///
+ /// NodeGroup provides opaque storage for a list of ExplodedNodes, optimizing
+ /// for the case when there is only one node in the group. This is a fairly
+ /// common case in an ExplodedGraph, where most nodes have only one
+ /// predecessor and many have only one successor. It can also be used to
+ /// store a flag rather than a node list, which ExplodedNode uses to mark
+ /// whether a node is a sink. If the flag is set, the group is implicitly
+ /// empty and no nodes may be added.
class NodeGroup {
- enum { Size1 = 0x0, SizeOther = 0x1, AuxFlag = 0x2, Mask = 0x3 };
+ // Conceptually a discriminated union. If the low bit is set, the node is
+ // a sink. If the low bit is not set, the pointer refers to the storage
+ // for the nodes in the group.
+ // This is not a PointerIntPair in order to keep the storage type opaque.
uintptr_t P;
-
- unsigned getKind() const {
- return P & 0x1;
- }
-
- void *getPtr() const {
- assert (!getFlag());
- return reinterpret_cast<void*>(P & ~Mask);
- }
-
- ExplodedNode *getNode() const {
- return reinterpret_cast<ExplodedNode*>(getPtr());
- }
public:
- NodeGroup() : P(0) {}
+ NodeGroup(bool Flag = false) : P(Flag) {
+ assert(getFlag() == Flag);
+ }
- ExplodedNode **begin() const;
+ ExplodedNode * const *begin() const;
- ExplodedNode **end() const;
+ ExplodedNode * const *end() const;
unsigned size() const;
- bool empty() const { return (P & ~Mask) == 0; }
+ bool empty() const { return P == 0 || getFlag() != 0; }
+ /// Adds a node to the list.
+ ///
+ /// The group must not have been created with its flag set.
void addNode(ExplodedNode *N, ExplodedGraph &G);
+ /// Replaces the single node in this group with a new node.
+ ///
+ /// Note that this should only be used when you know the group was not
+ /// created with its flag set, and that the group is empty or contains
+ /// only a single node.
void replaceNode(ExplodedNode *node);
- void setFlag() {
- assert(P == 0);
- P = AuxFlag;
- }
-
+ /// Returns whether this group was created with its flag set.
bool getFlag() const {
- return P & AuxFlag ? true : false;
+ return (P & 1);
}
};
@@ -119,9 +124,8 @@ public:
explicit ExplodedNode(const ProgramPoint &loc, ProgramStateRef state,
bool IsSink)
- : Location(loc), State(state) {
- if (IsSink)
- Succs.setFlag();
+ : Location(loc), State(state), Succs(IsSink) {
+ assert(isSink() == IsSink);
}
~ExplodedNode() {}
@@ -190,9 +194,9 @@ public:
}
// Iterators over successor and predecessor vertices.
- typedef ExplodedNode** succ_iterator;
+ typedef ExplodedNode* const * succ_iterator;
typedef const ExplodedNode* const * const_succ_iterator;
- typedef ExplodedNode** pred_iterator;
+ typedef ExplodedNode* const * pred_iterator;
typedef const ExplodedNode* const * const_pred_iterator;
pred_iterator pred_begin() { return Preds.begin(); }
@@ -278,11 +282,13 @@ protected:
/// A list of nodes that can be reused.
NodeVector FreeNodes;
- /// A flag that indicates whether nodes should be recycled.
- bool reclaimNodes;
+ /// Determines how often nodes are reclaimed.
+ ///
+ /// If this is 0, nodes will never be reclaimed.
+ unsigned ReclaimNodeInterval;
/// Counter to determine when to reclaim nodes.
- unsigned reclaimCounter;
+ unsigned ReclaimCounter;
public:
@@ -370,7 +376,9 @@ public:
/// Enable tracking of recently allocated nodes for potential reclamation
/// when calling reclaimRecentlyAllocatedNodes().
- void enableNodeReclamation() { reclaimNodes = true; }
+ void enableNodeReclamation(unsigned Interval) {
+ ReclaimCounter = ReclaimNodeInterval = Interval;
+ }
/// Reclaim "uninteresting" nodes created since the last time this method
/// was called.
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
index 4addb9d..78b2542 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
@@ -16,6 +16,7 @@
#ifndef LLVM_CLANG_GR_EXPRENGINE
#define LLVM_CLANG_GR_EXPRENGINE
+#include "clang/Analysis/DomainSpecific/ObjCNoReturn.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
@@ -70,17 +71,14 @@ class ExprEngine : public SubEngine {
/// variables and symbols (as determined by a liveness analysis).
ProgramStateRef CleanedState;
- /// currentStmt - The current block-level statement.
- const Stmt *currentStmt;
- unsigned int currentStmtIdx;
- const NodeBuilderContext *currentBuilderContext;
-
- /// Obj-C Class Identifiers.
- IdentifierInfo* NSExceptionII;
-
- /// Obj-C Selectors.
- Selector* NSExceptionInstanceRaiseSelectors;
- Selector RaiseSel;
+ /// currStmt - The current block-level statement.
+ const Stmt *currStmt;
+ unsigned int currStmtIdx;
+ const NodeBuilderContext *currBldrCtx;
+
+ /// Helper object to determine if an Objective-C message expression
+ /// implicitly never returns.
+ ObjCNoReturn ObjCNoRet;
/// Whether or not GC is enabled in this analysis.
bool ObjCGCEnabled;
@@ -90,9 +88,13 @@ class ExprEngine : public SubEngine {
/// destructor is called before the rest of the ExprEngine is destroyed.
GRBugReporter BR;
+ /// The functions which have been analyzed through inlining. This is owned by
+ /// AnalysisConsumer. It can be null.
+ SetOfConstDecls *VisitedCallees;
+
public:
ExprEngine(AnalysisManager &mgr, bool gcEnabled,
- SetOfConstDecls *VisitedCallees,
+ SetOfConstDecls *VisitedCalleesIn,
FunctionSummariesTy *FS);
~ExprEngine();
@@ -126,8 +128,8 @@ public:
BugReporter& getBugReporter() { return BR; }
const NodeBuilderContext &getBuilderContext() {
- assert(currentBuilderContext);
- return *currentBuilderContext;
+ assert(currBldrCtx);
+ return *currBldrCtx;
}
bool isObjCGCEnabled() { return ObjCGCEnabled; }
@@ -165,8 +167,12 @@ public:
/// are usually reported here).
/// \param K - In some cases it is possible to use PreStmt kind. (Do
/// not use it unless you know what you are doing.)
+ /// If the ReferenceStmt is NULL, everything is this and parent contexts is
+ /// considered live.
+ /// If the stack frame context is NULL, everything on stack is considered
+ /// dead.
void removeDead(ExplodedNode *Node, ExplodedNodeSet &Out,
- const Stmt *ReferenceStmt, const LocationContext *LC,
+ const Stmt *ReferenceStmt, const StackFrameContext *LC,
const Stmt *DiagnosticStmt,
ProgramPoint::Kind K = ProgramPoint::PreStmtPurgeDeadSymbolsKind);
@@ -192,7 +198,8 @@ public:
/// Called by CoreEngine when processing the entrance of a CFGBlock.
virtual void processCFGBlockEntrance(const BlockEdge &L,
- NodeBuilderWithSinks &nodeBuilder);
+ NodeBuilderWithSinks &nodeBuilder,
+ ExplodedNode *Pred);
/// ProcessBranch - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a branch condition.
@@ -213,7 +220,13 @@ public:
/// ProcessEndPath - Called by CoreEngine. Used to generate end-of-path
/// nodes when the control reaches the end of a function.
- void processEndOfFunction(NodeBuilderContext& BC);
+ void processEndOfFunction(NodeBuilderContext& BC,
+ ExplodedNode *Pred);
+
+ /// Remove dead bindings/symbols before exiting a function.
+ void removeDeadOnEndOfFunction(NodeBuilderContext& BC,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
/// Generate the entry node of the callee.
void processCallEnter(CallEnter CE, ExplodedNode *Pred);
@@ -258,9 +271,6 @@ public:
BasicValueFactory& getBasicVals() {
return StateMgr.getBasicVals();
}
- const BasicValueFactory& getBasicVals() const {
- return StateMgr.getBasicVals();
- }
// FIXME: Remove when we migrate over to just using ValueManager.
SymbolManager& getSymbolManager() { return SymMgr; }
@@ -283,13 +293,14 @@ public:
ExplodedNode *Pred,
ExplodedNodeSet &Dst);
- /// VisitAsmStmt - Transfer function logic for inline asm.
- void VisitAsmStmt(const AsmStmt *A, ExplodedNode *Pred, ExplodedNodeSet &Dst);
+ /// VisitGCCAsmStmt - Transfer function logic for inline asm.
+ void VisitGCCAsmStmt(const GCCAsmStmt *A, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
/// VisitMSAsmStmt - Transfer function logic for MS inline asm.
void VisitMSAsmStmt(const MSAsmStmt *A, ExplodedNode *Pred,
ExplodedNodeSet &Dst);
-
+
/// VisitBlockExpr - Transfer function logic for BlockExprs.
void VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
ExplodedNodeSet &Dst);
@@ -380,8 +391,8 @@ public:
void VisitCXXConstructExpr(const CXXConstructExpr *E, ExplodedNode *Pred,
ExplodedNodeSet &Dst);
- void VisitCXXDestructor(QualType ObjectType,
- const MemRegion *Dest, const Stmt *S,
+ void VisitCXXDestructor(QualType ObjectType, const MemRegion *Dest,
+ const Stmt *S, bool IsBaseDtor,
ExplodedNode *Pred, ExplodedNodeSet &Dst);
void VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
@@ -395,14 +406,14 @@ public:
ExplodedNode *Pred,
ExplodedNodeSet &Dst);
- /// evalEagerlyAssume - Given the nodes in 'Src', eagerly assume symbolic
+ /// evalEagerlyAssumeBinOpBifurcation - Given the nodes in 'Src', eagerly assume symbolic
/// expressions of the form 'x != 0' and generate new nodes (stored in Dst)
/// with those assumptions.
- void evalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src,
+ void evalEagerlyAssumeBinOpBifurcation(ExplodedNodeSet &Dst, ExplodedNodeSet &Src,
const Expr *Ex);
std::pair<const ProgramPointTag *, const ProgramPointTag*>
- getEagerlyAssumeTags();
+ geteagerlyAssumeBinOpBifurcationTags();
SVal evalMinus(SVal X) {
return X.isValid() ? svalBuilder.evalMinus(cast<NonLoc>(X)) : X;
@@ -433,7 +444,8 @@ protected:
/// evalBind - Handle the semantics of binding a value to a specific location.
/// This method is used by evalStore, VisitDeclStmt, and others.
void evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE, ExplodedNode *Pred,
- SVal location, SVal Val, bool atDeclInit = false);
+ SVal location, SVal Val, bool atDeclInit = false,
+ const ProgramPoint *PP = 0);
public:
// FIXME: 'tag' should be removed, and a LocationContext should be used
@@ -490,6 +502,10 @@ private:
ProgramStateRef St, SVal location,
const ProgramPointTag *tag, bool isLoad);
+ /// Count the stack depth and determine if the call is recursive.
+ void examineStackFrames(const Decl *D, const LocationContext *LCtx,
+ bool &IsRecursive, unsigned &StackDepth);
+
bool shouldInlineDecl(const Decl *D, ExplodedNode *Pred);
bool inlineCall(const CallEvent &Call, const Decl *D, NodeBuilder &Bldr,
ExplodedNode *Pred, ProgramStateRef State);
@@ -510,6 +526,8 @@ private:
/// Traits for storing the call processing policy inside GDM.
/// The GDM stores the corresponding CallExpr pointer.
+// FIXME: This does not use the nice trait macros because it must be accessible
+// from multiple translation units.
struct ReplayWithoutInlining{};
template <>
struct ProgramStateTrait<ReplayWithoutInlining> :
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
index 8044ed8..34fbc3c 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
@@ -99,11 +99,11 @@ public:
// Untyped regions.
SymbolicRegionKind,
AllocaRegionKind,
- BlockDataRegionKind,
// Typed regions.
BEG_TYPED_REGIONS,
FunctionTextRegionKind = BEG_TYPED_REGIONS,
BlockTextRegionKind,
+ BlockDataRegionKind,
BEG_TYPED_VALUE_REGIONS,
CompoundLiteralRegionKind = BEG_TYPED_VALUE_REGIONS,
CXXThisRegionKind,
@@ -140,6 +140,9 @@ public:
const MemRegion *getBaseRegion() const;
+ /// Check if the region is a subregion of the given region.
+ virtual bool isSubRegionOf(const MemRegion *R) const;
+
const MemRegion *StripCasts(bool StripBaseCasts = true) const;
bool hasGlobalsOrParametersStorage() const;
@@ -171,8 +174,6 @@ public:
template<typename RegionTy> const RegionTy* getAs() const;
virtual bool isBoundable() const { return false; }
-
- static bool classof(const MemRegion*) { return true; }
};
/// MemSpaceRegion - A memory region that represents a "memory space";
@@ -416,7 +417,7 @@ public:
MemRegionManager* getMemRegionManager() const;
- bool isSubRegionOf(const MemRegion* R) const;
+ virtual bool isSubRegionOf(const MemRegion* R) const;
static bool classof(const MemRegion* R) {
return R->getKind() > END_MEMSPACES;
@@ -530,16 +531,28 @@ public:
/// FunctionTextRegion - A region that represents code texts of function.
class FunctionTextRegion : public CodeTextRegion {
- const FunctionDecl *FD;
+ const NamedDecl *FD;
public:
- FunctionTextRegion(const FunctionDecl *fd, const MemRegion* sreg)
- : CodeTextRegion(sreg, FunctionTextRegionKind), FD(fd) {}
+ FunctionTextRegion(const NamedDecl *fd, const MemRegion* sreg)
+ : CodeTextRegion(sreg, FunctionTextRegionKind), FD(fd) {
+ assert(isa<ObjCMethodDecl>(fd) || isa<FunctionDecl>(fd));
+ }
QualType getLocationType() const {
- return getContext().getPointerType(FD->getType());
+ const ASTContext &Ctx = getContext();
+ if (const FunctionDecl *D = dyn_cast<FunctionDecl>(FD)) {
+ return Ctx.getPointerType(D->getType());
+ }
+
+ assert(isa<ObjCMethodDecl>(FD));
+ assert(false && "Getting the type of ObjCMethod is not supported yet");
+
+ // TODO: We might want to return a different type here (ex: id (*ty)(...))
+ // depending on how it is used.
+ return QualType();
}
-
- const FunctionDecl *getDecl() const {
+
+ const NamedDecl *getDecl() const {
return FD;
}
@@ -547,7 +560,7 @@ public:
void Profile(llvm::FoldingSetNodeID& ID) const;
- static void ProfileRegion(llvm::FoldingSetNodeID& ID, const FunctionDecl *FD,
+ static void ProfileRegion(llvm::FoldingSetNodeID& ID, const NamedDecl *FD,
const MemRegion*);
static bool classof(const MemRegion* R) {
@@ -603,7 +616,7 @@ public:
/// which correspond to "code+data". The distinction is important, because
/// like a closure a block captures the values of externally referenced
/// variables.
-class BlockDataRegion : public SubRegion {
+class BlockDataRegion : public TypedRegion {
friend class MemRegionManager;
const BlockTextRegion *BC;
const LocationContext *LC; // Can be null */
@@ -612,13 +625,15 @@ class BlockDataRegion : public SubRegion {
BlockDataRegion(const BlockTextRegion *bc, const LocationContext *lc,
const MemRegion *sreg)
- : SubRegion(sreg, BlockDataRegionKind), BC(bc), LC(lc),
+ : TypedRegion(sreg, BlockDataRegionKind), BC(bc), LC(lc),
ReferencedVars(0), OriginalVars(0) {}
public:
const BlockTextRegion *getCodeRegion() const { return BC; }
const BlockDecl *getDecl() const { return BC->getDecl(); }
+
+ QualType getLocationType() const { return BC->getLocationType(); }
class referenced_vars_iterator {
const MemRegion * const *R;
@@ -1212,7 +1227,7 @@ public:
return getCXXBaseObjectRegion(baseReg->getDecl(), superRegion);
}
- const FunctionTextRegion *getFunctionTextRegion(const FunctionDecl *FD);
+ const FunctionTextRegion *getFunctionTextRegion(const NamedDecl *FD);
const BlockTextRegion *getBlockTextRegion(const BlockDecl *BD,
CanQualType locTy,
AnalysisDeclContext *AC);
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
index b0c51dd..86c94de 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines SymbolRef, ExprBindKey, and ProgramState*.
+// This file defines the state of the program along the analysisa path.
//
//===----------------------------------------------------------------------===//
@@ -16,6 +16,7 @@
#include "clang/Basic/LLVM.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/Environment.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
@@ -39,7 +40,7 @@ class CallEvent;
class CallEventManager;
typedef ConstraintManager* (*ConstraintManagerCreator)(ProgramStateManager&,
- SubEngine&);
+ SubEngine*);
typedef StoreManager* (*StoreManagerCreator)(ProgramStateManager&);
//===----------------------------------------------------------------------===//
@@ -56,32 +57,6 @@ template <typename T> struct ProgramStateTrait {
}
};
-/// \class Stores the dynamic type information.
-/// Information about type of an object at runtime. This is used by dynamic
-/// dispatch implementation.
-class DynamicTypeInfo {
- QualType T;
- bool CanBeASubClass;
-
-public:
- DynamicTypeInfo() : T(QualType()) {}
- DynamicTypeInfo(QualType WithType, bool CanBeSub = true)
- : T(WithType), CanBeASubClass(CanBeSub) {}
-
- bool isValid() const { return !T.isNull(); }
-
- QualType getType() const { return T; }
- bool canBeASubClass() const { return CanBeASubClass; }
-
- void Profile(llvm::FoldingSetNodeID &ID) const {
- T.Profile(ID);
- ID.AddInteger((unsigned)CanBeASubClass);
- }
- bool operator==(const DynamicTypeInfo &X) const {
- return T == X.T && CanBeASubClass == X.CanBeASubClass;
- }
-};
-
/// \class ProgramState
/// ProgramState - This class encapsulates:
///
@@ -100,7 +75,7 @@ public:
typedef llvm::ImmutableMap<void*, void*> GenericDataMap;
private:
- void operator=(const ProgramState& R) const; // Do not implement.
+ void operator=(const ProgramState& R) LLVM_DELETED_FUNCTION;
friend class ProgramStateManager;
friend class ExplodedGraph;
@@ -130,7 +105,12 @@ public:
~ProgramState();
/// Return the ProgramStateManager associated with this state.
- ProgramStateManager &getStateManager() const { return *stateMgr; }
+ ProgramStateManager &getStateManager() const {
+ return *stateMgr;
+ }
+
+ /// Return the ConstraintManager.
+ ConstraintManager &getConstraintManager() const;
/// getEnvironment - Return the environment associated with this state.
/// The environment is the mapping from expressions to values.
@@ -210,11 +190,13 @@ public:
// Binding and retrieving values to/from the environment and symbolic store.
//==---------------------------------------------------------------------==//
- /// BindCompoundLiteral - Return the state that has the bindings currently
- /// in this state plus the bindings for the CompoundLiteral.
+ /// \brief Create a new state with the specified CompoundLiteral binding.
+ /// \param CL the compound literal expression (the binding key)
+ /// \param LC the LocationContext of the binding
+ /// \param V the value to bind.
ProgramStateRef bindCompoundLiteral(const CompoundLiteralExpr *CL,
- const LocationContext *LC,
- SVal V) const;
+ const LocationContext *LC,
+ SVal V) const;
/// Create a new state by binding the value 'V' to the statement 'S' in the
/// state's environment.
@@ -226,18 +208,16 @@ public:
ProgramStateRef bindExprAndLocation(const Stmt *S,
const LocationContext *LCtx,
SVal location, SVal V) const;
-
- ProgramStateRef bindDecl(const VarRegion *VR, SVal V) const;
- ProgramStateRef bindDeclWithNoInit(const VarRegion *VR) const;
-
- ProgramStateRef bindLoc(Loc location, SVal V) const;
+ ProgramStateRef bindLoc(Loc location,
+ SVal V,
+ bool notifyChanges = true) const;
ProgramStateRef bindLoc(SVal location, SVal V) const;
ProgramStateRef bindDefault(SVal loc, SVal V) const;
- ProgramStateRef unbindLoc(Loc LV) const;
+ ProgramStateRef killBinding(Loc LV) const;
/// invalidateRegions - Returns the state with bindings for the given regions
/// cleared from the store. The regions are provided as a continuous array
@@ -271,11 +251,8 @@ public:
/// Get the lvalue for an array index.
SVal getLValue(QualType ElementType, SVal Idx, SVal Base) const;
- const llvm::APSInt *getSymVal(SymbolRef sym) const;
-
/// Returns the SVal bound to the statement 'S' in the state's environment.
- SVal getSVal(const Stmt *S, const LocationContext *LCtx,
- bool useOnlyDirectBindings = false) const;
+ SVal getSVal(const Stmt *S, const LocationContext *LCtx) const;
SVal getSValAsScalarOrLoc(const Stmt *Ex, const LocationContext *LCtx) const;
@@ -469,7 +446,7 @@ public:
StoreManagerCreator CreateStoreManager,
ConstraintManagerCreator CreateConstraintManager,
llvm::BumpPtrAllocator& alloc,
- SubEngine &subeng);
+ SubEngine *subeng);
~ProgramStateManager();
@@ -481,9 +458,6 @@ public:
BasicValueFactory &getBasicVals() {
return svalBuilder->getBasicValueFactory();
}
- const BasicValueFactory& getBasicVals() const {
- return svalBuilder->getBasicValueFactory();
- }
SValBuilder &getSValBuilder() {
return *svalBuilder;
@@ -515,10 +489,6 @@ public:
const StackFrameContext *LCtx,
SymbolReaper& SymReaper);
- /// Marshal a new state for the callee in another translation unit.
- /// 'state' is owned by the caller's engine.
- ProgramStateRef MarshalState(ProgramStateRef state, const StackFrameContext *L);
-
public:
SVal ArrayToPointer(Loc Array) {
@@ -617,10 +587,6 @@ public:
return ProgramStateTrait<T>::MakeContext(p);
}
- const llvm::APSInt* getSymVal(ProgramStateRef St, SymbolRef sym) {
- return ConstraintMgr->getSymVal(St, sym);
- }
-
void EndPath(ProgramStateRef St) {
ConstraintMgr->EndPath(St);
}
@@ -631,6 +597,10 @@ public:
// Out-of-line method definitions for ProgramState.
//===----------------------------------------------------------------------===//
+inline ConstraintManager &ProgramState::getConstraintManager() const {
+ return stateMgr->getConstraintManager();
+}
+
inline const VarRegion* ProgramState::getRegion(const VarDecl *D,
const LocationContext *LC) const
{
@@ -695,15 +665,10 @@ inline SVal ProgramState::getLValue(QualType ElementType, SVal Idx, SVal Base) c
return UnknownVal();
}
-inline const llvm::APSInt *ProgramState::getSymVal(SymbolRef sym) const {
- return getStateManager().getSymVal(this, sym);
-}
-
-inline SVal ProgramState::getSVal(const Stmt *Ex, const LocationContext *LCtx,
- bool useOnlyDirectBindings) const{
+inline SVal ProgramState::getSVal(const Stmt *Ex,
+ const LocationContext *LCtx) const{
return Env.getSVal(EnvironmentEntry(Ex, LCtx),
- *getStateManager().svalBuilder,
- useOnlyDirectBindings);
+ *getStateManager().svalBuilder);
}
inline SVal
@@ -821,7 +786,7 @@ public:
bool scan(const SymExpr *sym);
};
-} // end GR namespace
+} // end ento namespace
} // end clang namespace
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h
index 1c7bedb..ea2a852 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h
@@ -31,6 +31,26 @@ namespace clang {
namespace ento {
template <typename T> struct ProgramStatePartialTrait;
+ /// Declares a program state trait for type \p Type called \p Name, and
+ /// introduce a typedef named \c NameTy.
+ /// The macro should not be used inside namespaces, or for traits that must
+ /// be accessible from more than one translation unit.
+ #define REGISTER_TRAIT_WITH_PROGRAMSTATE(Name, Type) \
+ namespace { \
+ class Name {}; \
+ typedef Type Name ## Ty; \
+ } \
+ namespace clang { \
+ namespace ento { \
+ template <> \
+ struct ProgramStateTrait<Name> \
+ : public ProgramStatePartialTrait<Name ## Ty> { \
+ static void *GDMIndex() { static int Index; return &Index; } \
+ }; \
+ } \
+ }
+
+
// Partial-specialization for ImmutableMap.
template <typename Key, typename Data, typename Info>
@@ -71,6 +91,15 @@ namespace ento {
}
};
+ /// Helper for registering a map trait.
+ ///
+ /// If the map type were written directly in the invocation of
+ /// REGISTER_TRAIT_WITH_PROGRAMSTATE, the comma in the template arguments
+ /// would be treated as a macro argument separator, which is wrong.
+ /// This allows the user to specify a map type in a way that the preprocessor
+ /// can deal with.
+ #define CLANG_ENTO_PROGRAMSTATE_MAP(Key, Value) llvm::ImmutableMap<Key, Value>
+
// Partial-specialization for ImmutableSet.
@@ -113,6 +142,7 @@ namespace ento {
}
};
+
// Partial-specialization for ImmutableList.
template <typename T>
@@ -150,6 +180,7 @@ namespace ento {
delete (typename data_type::Factory*) Ctx;
}
};
+
// Partial specialization for bool.
template <> struct ProgramStatePartialTrait<bool> {
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
index 83c3a56..5d72e73 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
@@ -72,7 +72,7 @@ public:
virtual ~SValBuilder() {}
bool haveSameType(const SymExpr *Sym1, const SymExpr *Sym2) {
- return haveSameType(Sym1->getType(Context), Sym2->getType(Context));
+ return haveSameType(Sym1->getType(), Sym2->getType());
}
bool haveSameType(QualType Ty1, QualType Ty2) {
@@ -142,19 +142,19 @@ public:
// Forwarding methods to SymbolManager.
- const SymbolConjured* getConjuredSymbol(const Stmt *stmt,
- const LocationContext *LCtx,
- QualType type,
- unsigned visitCount,
- const void *symbolTag = 0) {
- return SymMgr.getConjuredSymbol(stmt, LCtx, type, visitCount, symbolTag);
+ const SymbolConjured* conjureSymbol(const Stmt *stmt,
+ const LocationContext *LCtx,
+ QualType type,
+ unsigned visitCount,
+ const void *symbolTag = 0) {
+ return SymMgr.conjureSymbol(stmt, LCtx, type, visitCount, symbolTag);
}
- const SymbolConjured* getConjuredSymbol(const Expr *expr,
- const LocationContext *LCtx,
- unsigned visitCount,
- const void *symbolTag = 0) {
- return SymMgr.getConjuredSymbol(expr, LCtx, visitCount, symbolTag);
+ const SymbolConjured* conjureSymbol(const Expr *expr,
+ const LocationContext *LCtx,
+ unsigned visitCount,
+ const void *symbolTag = 0) {
+ return SymMgr.conjureSymbol(expr, LCtx, visitCount, symbolTag);
}
/// Construct an SVal representing '0' for the specified type.
@@ -169,20 +169,20 @@ public:
/// The advantage of symbols derived/built from other symbols is that we
/// preserve the relation between related(or even equivalent) expressions, so
/// conjured symbols should be used sparingly.
- DefinedOrUnknownSVal getConjuredSymbolVal(const void *symbolTag,
- const Expr *expr,
- const LocationContext *LCtx,
- unsigned count);
- DefinedOrUnknownSVal getConjuredSymbolVal(const void *symbolTag,
- const Expr *expr,
- const LocationContext *LCtx,
- QualType type,
- unsigned count);
+ DefinedOrUnknownSVal conjureSymbolVal(const void *symbolTag,
+ const Expr *expr,
+ const LocationContext *LCtx,
+ unsigned count);
+ DefinedOrUnknownSVal conjureSymbolVal(const void *symbolTag,
+ const Expr *expr,
+ const LocationContext *LCtx,
+ QualType type,
+ unsigned count);
- DefinedOrUnknownSVal getConjuredSymbolVal(const Stmt *stmt,
- const LocationContext *LCtx,
- QualType type,
- unsigned visitCount);
+ DefinedOrUnknownSVal conjureSymbolVal(const Stmt *stmt,
+ const LocationContext *LCtx,
+ QualType type,
+ unsigned visitCount);
/// \brief Conjure a symbol representing heap allocated memory region.
///
/// Note, the expression should represent a location.
@@ -227,7 +227,7 @@ public:
BasicVals.getValue(integer->getValue(),
integer->getType()->isUnsignedIntegerOrEnumerationType()));
}
-
+
nonloc::ConcreteInt makeBoolVal(const ObjCBoolLiteralExpr *boolean) {
return makeTruthVal(boolean->getValue(), boolean->getType());
}
@@ -262,11 +262,6 @@ public:
BasicVals.getIntWithPtrWidth(integer, isUnsigned));
}
- NonLoc makeIntVal(uint64_t integer, unsigned bitWidth, bool isUnsigned) {
- return nonloc::ConcreteInt(
- BasicVals.getValue(integer, bitWidth, isUnsigned));
- }
-
NonLoc makeLocAsInteger(Loc loc, unsigned bits) {
return nonloc::LocAsInteger(BasicVals.getPersistentSValWithData(loc, bits));
}
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
index e0b5f64..c2134cf 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
@@ -154,9 +154,6 @@ public:
SymExpr::symbol_iterator symbol_end() const {
return SymExpr::symbol_end();
}
-
- // Implement isa<T> support.
- static inline bool classof(const SVal*) { return true; }
};
@@ -257,7 +254,7 @@ public:
namespace nonloc {
-enum Kind { ConcreteIntKind, SymbolValKind, SymExprValKind,
+enum Kind { ConcreteIntKind, SymbolValKind,
LocAsIntegerKind, CompoundValKind, LazyCompoundValKind };
/// \brief Represents symbolic expression.
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
index 138a590..979546b 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
@@ -25,6 +25,7 @@ namespace clang {
class Stmt;
class Expr;
class ObjCIvarDecl;
+class CXXBasePath;
class StackFrameContext;
namespace ento {
@@ -67,15 +68,26 @@ public:
virtual StoreRef Bind(Store store, Loc loc, SVal val) = 0;
virtual StoreRef BindDefault(Store store, const MemRegion *R, SVal V);
- virtual StoreRef Remove(Store St, Loc L) = 0;
- /// BindCompoundLiteral - Return the store that has the bindings currently
- /// in 'store' plus the bindings for the CompoundLiteral. 'R' is the region
- /// for the compound literal and 'BegInit' and 'EndInit' represent an
- /// array of initializer values.
- virtual StoreRef BindCompoundLiteral(Store store,
- const CompoundLiteralExpr *cl,
- const LocationContext *LC, SVal v) = 0;
+ /// \brief Create a new store with the specified binding removed.
+ /// \param ST the original store, that is the basis for the new store.
+ /// \param L the location whose binding should be removed.
+ virtual StoreRef killBinding(Store ST, Loc L) = 0;
+
+ /// \brief Create a new store that binds a value to a compound literal.
+ ///
+ /// \param ST The original store whose bindings are the basis for the new
+ /// store.
+ ///
+ /// \param CL The compound literal to bind (the binding key).
+ ///
+ /// \param LC The LocationContext for the binding.
+ ///
+ /// \param V The value to bind to the compound literal.
+ virtual StoreRef bindCompoundLiteral(Store ST,
+ const CompoundLiteralExpr *CL,
+ const LocationContext *LC,
+ SVal V) = 0;
/// getInitialStore - Returns the initial "empty" store representing the
/// value bindings upon entry to an analyzed function.
@@ -114,11 +126,15 @@ public:
/// conversions between arrays and pointers.
virtual SVal ArrayToPointer(Loc Array) = 0;
- /// Evaluates DerivedToBase casts.
- SVal evalDerivedToBase(SVal derived, const CastExpr *Cast);
+ /// Evaluates a chain of derived-to-base casts through the path specified in
+ /// \p Cast.
+ SVal evalDerivedToBase(SVal Derived, const CastExpr *Cast);
+
+ /// Evaluates a chain of derived-to-base casts through the specified path.
+ SVal evalDerivedToBase(SVal Derived, const CXXBasePath &CastPath);
/// Evaluates a derived-to-base cast through a single level of derivation.
- virtual SVal evalDerivedToBase(SVal derived, QualType derivedPtrType) = 0;
+ SVal evalDerivedToBase(SVal Derived, QualType DerivedPtrType);
/// \brief Evaluates C++ dynamic_cast cast.
/// The callback may result in the following 3 scenarios:
@@ -128,8 +144,7 @@ public:
/// enough info to determine if the cast will succeed at run time).
/// The function returns an SVal representing the derived class; it's
/// valid only if Failed flag is set to false.
- virtual SVal evalDynamicCast(SVal base, QualType derivedPtrType,
- bool &Failed) = 0;
+ SVal evalDynamicCast(SVal Base, QualType DerivedPtrType, bool &Failed);
const ElementRegion *GetElementZeroRegion(const MemRegion *R, QualType T);
@@ -141,10 +156,6 @@ public:
virtual StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx,
SymbolReaper& SymReaper) = 0;
- virtual StoreRef BindDecl(Store store, const VarRegion *VR, SVal initVal) = 0;
-
- virtual StoreRef BindDeclWithNoInit(Store store, const VarRegion *VR) = 0;
-
virtual bool includedInBindings(Store store,
const MemRegion *region) const = 0;
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h
index 68b81f1..1e71077 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h
@@ -60,7 +60,8 @@ public:
/// SubEngine is expected to populate dstNodes with new nodes representing
/// updated analysis state, or generate no nodes at all if it doesn't.
virtual void processCFGBlockEntrance(const BlockEdge &L,
- NodeBuilderWithSinks &nodeBuilder) = 0;
+ NodeBuilderWithSinks &nodeBuilder,
+ ExplodedNode *Pred) = 0;
/// Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a branch condition.
@@ -81,7 +82,8 @@ public:
/// Called by CoreEngine. Used to generate end-of-path
/// nodes when the control reaches the end of a function.
- virtual void processEndOfFunction(NodeBuilderContext& BC) = 0;
+ virtual void processEndOfFunction(NodeBuilderContext& BC,
+ ExplodedNode *Pred) = 0;
// Generate the entry node of the callee.
virtual void processCallEnter(CallEnter CE, ExplodedNode *Pred) = 0;
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
index 5d27f86..873f773 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
@@ -65,12 +65,9 @@ public:
virtual void dumpToStream(raw_ostream &os) const {}
- virtual QualType getType(ASTContext&) const = 0;
+ virtual QualType getType() const = 0;
virtual void Profile(llvm::FoldingSetNodeID& profile) = 0;
- // Implement isa<T> support.
- static inline bool classof(const SymExpr*) { return true; }
-
/// \brief Iterator over symbols that the current symbol depends on.
///
/// For SymbolData, it's the symbol itself; for expressions, it's the
@@ -144,7 +141,7 @@ public:
virtual void dumpToStream(raw_ostream &os) const;
- QualType getType(ASTContext&) const;
+ QualType getType() const;
// Implement isa<T> support.
static inline bool classof(const SymExpr *SE) {
@@ -173,7 +170,7 @@ public:
unsigned getCount() const { return Count; }
const void *getTag() const { return SymbolTag; }
- QualType getType(ASTContext&) const;
+ QualType getType() const;
virtual void dumpToStream(raw_ostream &os) const;
@@ -211,7 +208,7 @@ public:
SymbolRef getParentSymbol() const { return parentSymbol; }
const TypedValueRegion *getRegion() const { return R; }
- QualType getType(ASTContext&) const;
+ QualType getType() const;
virtual void dumpToStream(raw_ostream &os) const;
@@ -244,7 +241,7 @@ public:
const SubRegion *getRegion() const { return R; }
- QualType getType(ASTContext&) const;
+ QualType getType() const;
virtual void dumpToStream(raw_ostream &os) const;
@@ -283,7 +280,7 @@ public:
unsigned getCount() const { return Count; }
const void *getTag() const { return Tag; }
- QualType getType(ASTContext&) const;
+ QualType getType() const;
virtual void dumpToStream(raw_ostream &os) const;
@@ -320,7 +317,7 @@ public:
SymbolCast(const SymExpr *In, QualType From, QualType To) :
SymExpr(CastSymbolKind), Operand(In), FromTy(From), ToTy(To) { }
- QualType getType(ASTContext &C) const { return ToTy; }
+ QualType getType() const { return ToTy; }
const SymExpr *getOperand() const { return Operand; }
@@ -358,7 +355,7 @@ public:
// FIXME: We probably need to make this out-of-line to avoid redundant
// generation of virtual functions.
- QualType getType(ASTContext &C) const { return T; }
+ QualType getType() const { return T; }
BinaryOperator::Opcode getOpcode() const { return Op; }
@@ -399,7 +396,7 @@ public:
const SymExpr *rhs, QualType t)
: SymExpr(IntSymKind), LHS(lhs), Op(op), RHS(rhs), T(t) {}
- QualType getType(ASTContext &C) const { return T; }
+ QualType getType() const { return T; }
BinaryOperator::Opcode getOpcode() const { return Op; }
@@ -446,7 +443,7 @@ public:
// FIXME: We probably need to make this out-of-line to avoid redundant
// generation of virtual functions.
- QualType getType(ASTContext &C) const { return T; }
+ QualType getType() const { return T; }
virtual void dumpToStream(raw_ostream &os) const;
@@ -495,18 +492,17 @@ public:
/// \brief Make a unique symbol for MemRegion R according to its kind.
const SymbolRegionValue* getRegionValueSymbol(const TypedValueRegion* R);
- const SymbolConjured* getConjuredSymbol(const Stmt *E,
- const LocationContext *LCtx,
- QualType T,
- unsigned VisitCount,
- const void *SymbolTag = 0);
+ const SymbolConjured* conjureSymbol(const Stmt *E,
+ const LocationContext *LCtx,
+ QualType T,
+ unsigned VisitCount,
+ const void *SymbolTag = 0);
- const SymbolConjured* getConjuredSymbol(const Expr *E,
- const LocationContext *LCtx,
- unsigned VisitCount,
- const void *SymbolTag = 0) {
- return getConjuredSymbol(E, LCtx, E->getType(),
- VisitCount, SymbolTag);
+ const SymbolConjured* conjureSymbol(const Expr *E,
+ const LocationContext *LCtx,
+ unsigned VisitCount,
+ const void *SymbolTag = 0) {
+ return conjureSymbol(E, LCtx, E->getType(), VisitCount, SymbolTag);
}
const SymbolDerived *getDerivedSymbol(SymbolRef parentSymbol,
@@ -541,7 +537,7 @@ public:
const SymExpr *rhs, QualType t);
QualType getType(const SymExpr *SE) const {
- return SE->getType(Ctx);
+ return SE->getType();
}
/// \brief Add artificial symbol dependency.
@@ -584,9 +580,11 @@ public:
///
/// If the statement is NULL, everything is this and parent contexts is
/// considered live.
- SymbolReaper(const LocationContext *ctx, const Stmt *s, SymbolManager& symmgr,
+ /// If the stack frame context is NULL, everything on stack is considered
+ /// dead.
+ SymbolReaper(const StackFrameContext *Ctx, const Stmt *s, SymbolManager& symmgr,
StoreManager &storeMgr)
- : LCtx(ctx->getCurrentStackFrame()), Loc(s), SymMgr(symmgr),
+ : LCtx(Ctx), Loc(s), SymMgr(symmgr),
reapedStore(0, storeMgr) {}
~SymbolReaper() {}
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h
index 53205d3..c274cea 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h
@@ -22,6 +22,8 @@ namespace ento {
/// The GDM component containing the tainted root symbols. We lazily infer the
/// taint of the dependent symbols. Currently, this is a map from a symbol to
/// tag kind. TODO: Should support multiple tag kinds.
+// FIXME: This does not use the nice trait macros because it must be accessible
+// from multiple translation units.
struct TaintMap {};
typedef llvm::ImmutableMap<SymbolRef, TaintTagType> TaintMapImpl;
template<> struct ProgramStateTrait<TaintMap>
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/CommandLineClangTool.h b/contrib/llvm/tools/clang/include/clang/Tooling/CommandLineClangTool.h
deleted file mode 100644
index c29c302..0000000
--- a/contrib/llvm/tools/clang/include/clang/Tooling/CommandLineClangTool.h
+++ /dev/null
@@ -1,80 +0,0 @@
-//===- CommandLineClangTool.h - command-line clang tools driver -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the CommandLineClangTool class used to run clang
-// tools as separate command-line applications with a consistent common
-// interface for handling compilation database and input files.
-//
-// It provides a common subset of command-line options, common algorithm
-// for locating a compilation database and source files, and help messages
-// for the basic command-line interface.
-//
-// It creates a CompilationDatabase, initializes a ClangTool and runs a
-// user-specified FrontendAction over all TUs in which the given files are
-// compiled.
-//
-// This class uses the Clang Tooling infrastructure, see
-// http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html
-// for details on setting it up with LLVM source tree.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TOOLS_CLANG_INCLUDE_CLANG_TOOLING_COMMANDLINECLANGTOOL_H
-#define LLVM_TOOLS_CLANG_INCLUDE_CLANG_TOOLING_COMMANDLINECLANGTOOL_H
-
-#include "llvm/Support/CommandLine.h"
-#include "clang/Tooling/CompilationDatabase.h"
-
-namespace clang {
-
-namespace tooling {
-
-class CompilationDatabase;
-class FrontendActionFactory;
-
-/// \brief A common driver for command-line Clang tools.
-///
-/// Parses a common subset of command-line arguments, locates and loads a
-/// compilation commands database, runs a tool with user-specified action. It
-/// also contains a help message for the common command-line options.
-/// An example of usage:
-/// @code
-/// int main(int argc, const char **argv) {
-/// CommandLineClangTool Tool;
-/// cl::extrahelp MoreHelp("\nMore help text...");
-/// Tool.initialize(argc, argv);
-/// return Tool.run(newFrontendActionFactory<clang::SyntaxOnlyAction>());
-/// }
-/// @endcode
-///
-class CommandLineClangTool {
-public:
- /// Sets up command-line options and help messages.
- /// Add your own help messages after constructing this tool.
- CommandLineClangTool();
-
- /// Parses command-line, initializes a compilation database.
- /// This method exits program in case of error.
- void initialize(int argc, const char **argv);
-
- /// Runs a clang tool with an action created by \c ActionFactory.
- int run(FrontendActionFactory *ActionFactory);
-
-private:
- llvm::OwningPtr<CompilationDatabase> Compilations;
- llvm::cl::opt<std::string> BuildPath;
- llvm::cl::list<std::string> SourcePaths;
- llvm::cl::extrahelp MoreHelp;
-};
-
-} // namespace tooling
-
-} // namespace clang
-
-#endif // LLVM_TOOLS_CLANG_INCLUDE_CLANG_TOOLING_COMMANDLINECLANGTOOL_H
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/CommonOptionsParser.h b/contrib/llvm/tools/clang/include/clang/Tooling/CommonOptionsParser.h
new file mode 100644
index 0000000..a1bad12
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/CommonOptionsParser.h
@@ -0,0 +1,89 @@
+//===- CommonOptionsParser.h - common options for clang tools -*- C++ -*-=====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the CommonOptionsParser class used to parse common
+// command-line options for clang tools, so that they can be run as separate
+// command-line applications with a consistent common interface for handling
+// compilation database and input files.
+//
+// It provides a common subset of command-line options, common algorithm
+// for locating a compilation database and source files, and help messages
+// for the basic command-line interface.
+//
+// It creates a CompilationDatabase and reads common command-line options.
+//
+// This class uses the Clang Tooling infrastructure, see
+// http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html
+// for details on setting it up with LLVM source tree.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TOOLS_CLANG_INCLUDE_CLANG_TOOLING_COMMONOPTIONSPARSER_H
+#define LLVM_TOOLS_CLANG_INCLUDE_CLANG_TOOLING_COMMONOPTIONSPARSER_H
+
+#include "clang/Tooling/CompilationDatabase.h"
+
+namespace clang {
+namespace tooling {
+/// \brief A parser for options common to all command-line Clang tools.
+///
+/// Parses a common subset of command-line arguments, locates and loads a
+/// compilation commands database and runs a tool with user-specified action. It
+/// also contains a help message for the common command-line options.
+///
+/// An example of usage:
+/// \code
+/// #include "clang/Frontend/FrontendActions.h"
+/// #include "clang/Tooling/CommonOptionsParser.h"
+/// #include "llvm/Support/CommandLine.h"
+///
+/// using namespace clang::tooling;
+/// using namespace llvm;
+///
+/// static cl::extrahelp CommonHelp(CommonOptionsParser::HelpMessage);
+/// static cl::extrahelp MoreHelp("\nMore help text...");
+/// static cl:opt<bool> YourOwnOption(...);
+/// ...
+///
+/// int main(int argc, const char **argv) {
+/// CommonOptionsParser OptionsParser(argc, argv);
+/// ClangTool Tool(OptionsParser.GetCompilations(),
+/// OptionsParser.GetSourcePathListi());
+/// return Tool.run(newFrontendActionFactory<clang::SyntaxOnlyAction>());
+/// }
+/// \endcode
+class CommonOptionsParser {
+public:
+ /// \brief Parses command-line, initializes a compilation database.
+ /// This constructor can change argc and argv contents, e.g. consume
+ /// command-line options used for creating FixedCompilationDatabase.
+ /// This constructor exits program in case of error.
+ CommonOptionsParser(int &argc, const char **argv);
+
+ /// Returns a reference to the loaded compilations database.
+ CompilationDatabase &GetCompilations() {
+ return *Compilations;
+ }
+
+ /// Returns a list of source file paths to process.
+ std::vector<std::string> GetSourcePathList() {
+ return SourcePathList;
+ }
+
+ static const char *const HelpMessage;
+
+private:
+ llvm::OwningPtr<CompilationDatabase> Compilations;
+ std::vector<std::string> SourcePathList;
+};
+
+} // namespace tooling
+} // namespace clang
+
+#endif // LLVM_TOOLS_CLANG_INCLUDE_CLANG_TOOLING_COMMONOPTIONSPARSER_H
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h b/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h
index f78ffae..a40bffe 100644
--- a/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h
@@ -31,12 +31,9 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/YAMLParser.h"
+
#include <string>
#include <vector>
@@ -111,6 +108,27 @@ public:
virtual std::vector<std::string> getAllFiles() const = 0;
};
+/// \brief Interface for compilation database plugins.
+///
+/// A compilation database plugin allows the user to register custom compilation
+/// databases that are picked up as compilation database if the corresponding
+/// library is linked in. To register a plugin, declare a static variable like:
+///
+/// \code
+/// static CompilationDatabasePluginRegistry::Add<MyDatabasePlugin>
+/// X("my-compilation-database", "Reads my own compilation database");
+/// \endcode
+class CompilationDatabasePlugin {
+public:
+ virtual ~CompilationDatabasePlugin();
+
+ /// \brief Loads a compilation database from a build directory.
+ ///
+ /// \see CompilationDatabase::loadFromDirectory().
+ virtual CompilationDatabase *loadFromDirectory(StringRef Directory,
+ std::string &ErrorMessage) = 0;
+};
+
/// \brief A compilation database that returns a single compile command line.
///
/// Useful when we want a tool to behave more like a compiler invocation.
@@ -169,75 +187,6 @@ private:
std::vector<CompileCommand> CompileCommands;
};
-/// \brief A JSON based compilation database.
-///
-/// JSON compilation database files must contain a list of JSON objects which
-/// provide the command lines in the attributes 'directory', 'command' and
-/// 'file':
-/// [
-/// { "directory": "<working directory of the compile>",
-/// "command": "<compile command line>",
-/// "file": "<path to source file>"
-/// },
-/// ...
-/// ]
-/// Each object entry defines one compile action. The specified file is
-/// considered to be the main source file for the translation unit.
-///
-/// JSON compilation databases can for example be generated in CMake projects
-/// by setting the flag -DCMAKE_EXPORT_COMPILE_COMMANDS.
-class JSONCompilationDatabase : public CompilationDatabase {
-public:
- /// \brief Loads a JSON compilation database from the specified file.
- ///
- /// Returns NULL and sets ErrorMessage if the database could not be
- /// loaded from the given file.
- static JSONCompilationDatabase *loadFromFile(StringRef FilePath,
- std::string &ErrorMessage);
-
- /// \brief Loads a JSON compilation database from a data buffer.
- ///
- /// Returns NULL and sets ErrorMessage if the database could not be loaded.
- static JSONCompilationDatabase *loadFromBuffer(StringRef DatabaseString,
- std::string &ErrorMessage);
-
- /// \brief Returns all compile comamnds in which the specified file was
- /// compiled.
- ///
- /// FIXME: Currently FilePath must be an absolute path inside the
- /// source directory which does not have symlinks resolved.
- virtual std::vector<CompileCommand> getCompileCommands(
- StringRef FilePath) const;
-
- /// \brief Returns the list of all files available in the compilation database.
- ///
- /// These are the 'file' entries of the JSON objects.
- virtual std::vector<std::string> getAllFiles() const;
-
-private:
- /// \brief Constructs a JSON compilation database on a memory buffer.
- JSONCompilationDatabase(llvm::MemoryBuffer *Database)
- : Database(Database), YAMLStream(Database->getBuffer(), SM) {}
-
- /// \brief Parses the database file and creates the index.
- ///
- /// Returns whether parsing succeeded. Sets ErrorMessage if parsing
- /// failed.
- bool parse(std::string &ErrorMessage);
-
- // Tuple (directory, commandline) where 'commandline' pointing to the
- // corresponding nodes in the YAML stream.
- typedef std::pair<llvm::yaml::ScalarNode*,
- llvm::yaml::ScalarNode*> CompileCommandRef;
-
- // Maps file paths to the compile command lines for that file.
- llvm::StringMap< std::vector<CompileCommandRef> > IndexByFile;
-
- llvm::OwningPtr<llvm::MemoryBuffer> Database;
- llvm::SourceMgr SM;
- llvm::yaml::Stream YAMLStream;
-};
-
} // end namespace tooling
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabasePluginRegistry.h b/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabasePluginRegistry.h
new file mode 100644
index 0000000..84fcd24
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabasePluginRegistry.h
@@ -0,0 +1,27 @@
+//===--- CompilationDatabasePluginRegistry.h - ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_COMPILATION_DATABASE_PLUGIN_REGISTRY_H
+#define LLVM_CLANG_TOOLING_COMPILATION_DATABASE_PLUGIN_REGISTRY_H
+
+#include "clang/Tooling/CompilationDatabase.h"
+#include "llvm/Support/Registry.h"
+
+namespace clang {
+namespace tooling {
+
+class CompilationDatabasePlugin;
+
+typedef llvm::Registry<CompilationDatabasePlugin>
+ CompilationDatabasePluginRegistry;
+
+} // end namespace tooling
+} // end namespace clang
+
+#endif // LLVM_CLANG_TOOLING_COMPILATION_DATABASE_PLUGIN_REGISTRY_H
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/FileMatchTrie.h b/contrib/llvm/tools/clang/include/clang/Tooling/FileMatchTrie.h
new file mode 100644
index 0000000..ff988be
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/FileMatchTrie.h
@@ -0,0 +1,90 @@
+//===--- FileMatchTrie.h - --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a match trie to find the matching file in a compilation
+// database based on a given path in the presence of symlinks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_FILE_MATCH_TRIE_H
+#define LLVM_CLANG_TOOLING_FILE_MATCH_TRIE_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/StringRef.h"
+
+#include <string>
+#include <vector>
+
+namespace clang {
+namespace tooling {
+
+struct PathComparator {
+ virtual ~PathComparator() {}
+ virtual bool equivalent(StringRef FileA, StringRef FileB) const = 0;
+};
+class FileMatchTrieNode;
+
+/// \brief A trie to efficiently match against the entries of the compilation
+/// database in order of matching suffix length.
+///
+/// When a clang tool is supposed to operate on a specific file, we have to
+/// find the corresponding file in the compilation database. Although entries
+/// in the compilation database are keyed by filename, a simple string match
+/// is insufficient because of symlinks. Commonly, a project hierarchy looks
+/// like this:
+/// /<project-root>/src/<path>/<somefile>.cc (used as input for the tool)
+/// /<project-root>/build/<symlink-to-src>/<path>/<somefile>.cc (stored in DB)
+///
+/// Furthermore, there might be symlinks inside the source folder or inside the
+/// database, so that the same source file is translated with different build
+/// options.
+///
+/// For a given input file, the \c FileMatchTrie finds its entries in order
+/// of matching suffix length. For each suffix length, there might be one or
+/// more entries in the database. For each of those entries, it calls
+/// \c llvm::sys::fs::equivalent() (injected as \c PathComparator). There might
+/// be zero or more entries with the same matching suffix length that are
+/// equivalent to the input file. Three cases are distinguished:
+/// 0 equivalent files: Continue with the next suffix length.
+/// 1 equivalent file: Best match found, return it.
+/// >1 equivalent files: Match is ambiguous, return error.
+class FileMatchTrie {
+public:
+ FileMatchTrie();
+
+ /// \brief Construct a new \c FileMatchTrie with the given \c PathComparator.
+ ///
+ /// The \c FileMatchTrie takes ownership of 'Comparator'. Used for testing.
+ FileMatchTrie(PathComparator* Comparator);
+
+ ~FileMatchTrie();
+
+ /// \brief Insert a new absolute path. Relative paths are ignored.
+ void insert(StringRef NewPath);
+
+ /// \brief Finds the corresponding file in this trie.
+ ///
+ /// Returns file name stored in this trie that is equivalent to 'FileName'
+ /// according to 'Comparator', if it can be uniquely identified. If there
+ /// are no matches an empty \c StringRef is returned. If there are ambigious
+ /// matches, an empty \c StringRef is returned and a corresponding message
+ /// written to 'Error'.
+ StringRef findEquivalent(StringRef FileName,
+ llvm::raw_ostream &Error) const;
+private:
+ FileMatchTrieNode *Root;
+ OwningPtr<PathComparator> Comparator;
+};
+
+
+} // end namespace tooling
+} // end namespace clang
+
+#endif // LLVM_CLANG_TOOLING_FILE_MATCH_TRIE_H
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/JSONCompilationDatabase.h b/contrib/llvm/tools/clang/include/clang/Tooling/JSONCompilationDatabase.h
new file mode 100644
index 0000000..d62ab5c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/JSONCompilationDatabase.h
@@ -0,0 +1,107 @@
+//===--- JSONCompilationDatabase.h - ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The JSONCompilationDatabase finds compilation databases supplied as a file
+// 'compile_commands.json'.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_JSON_COMPILATION_DATABASE_H
+#define LLVM_CLANG_TOOLING_JSON_COMPILATION_DATABASE_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/Tooling/CompilationDatabase.h"
+#include "clang/Tooling/FileMatchTrie.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/YAMLParser.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+namespace tooling {
+
+/// \brief A JSON based compilation database.
+///
+/// JSON compilation database files must contain a list of JSON objects which
+/// provide the command lines in the attributes 'directory', 'command' and
+/// 'file':
+/// [
+/// { "directory": "<working directory of the compile>",
+/// "command": "<compile command line>",
+/// "file": "<path to source file>"
+/// },
+/// ...
+/// ]
+/// Each object entry defines one compile action. The specified file is
+/// considered to be the main source file for the translation unit.
+///
+/// JSON compilation databases can for example be generated in CMake projects
+/// by setting the flag -DCMAKE_EXPORT_COMPILE_COMMANDS.
+class JSONCompilationDatabase : public CompilationDatabase {
+public:
+ /// \brief Loads a JSON compilation database from the specified file.
+ ///
+ /// Returns NULL and sets ErrorMessage if the database could not be
+ /// loaded from the given file.
+ static JSONCompilationDatabase *loadFromFile(StringRef FilePath,
+ std::string &ErrorMessage);
+
+ /// \brief Loads a JSON compilation database from a data buffer.
+ ///
+ /// Returns NULL and sets ErrorMessage if the database could not be loaded.
+ static JSONCompilationDatabase *loadFromBuffer(StringRef DatabaseString,
+ std::string &ErrorMessage);
+
+ /// \brief Returns all compile comamnds in which the specified file was
+ /// compiled.
+ ///
+ /// FIXME: Currently FilePath must be an absolute path inside the
+ /// source directory which does not have symlinks resolved.
+ virtual std::vector<CompileCommand> getCompileCommands(
+ StringRef FilePath) const;
+
+ /// \brief Returns the list of all files available in the compilation database.
+ ///
+ /// These are the 'file' entries of the JSON objects.
+ virtual std::vector<std::string> getAllFiles() const;
+
+private:
+ /// \brief Constructs a JSON compilation database on a memory buffer.
+ JSONCompilationDatabase(llvm::MemoryBuffer *Database)
+ : Database(Database), YAMLStream(Database->getBuffer(), SM) {}
+
+ /// \brief Parses the database file and creates the index.
+ ///
+ /// Returns whether parsing succeeded. Sets ErrorMessage if parsing
+ /// failed.
+ bool parse(std::string &ErrorMessage);
+
+ // Tuple (directory, commandline) where 'commandline' pointing to the
+ // corresponding nodes in the YAML stream.
+ typedef std::pair<llvm::yaml::ScalarNode*,
+ llvm::yaml::ScalarNode*> CompileCommandRef;
+
+ // Maps file paths to the compile command lines for that file.
+ llvm::StringMap< std::vector<CompileCommandRef> > IndexByFile;
+
+ FileMatchTrie MatchTrie;
+
+ llvm::OwningPtr<llvm::MemoryBuffer> Database;
+ llvm::SourceMgr SM;
+ llvm::yaml::Stream YAMLStream;
+};
+
+} // end namespace tooling
+} // end namespace clang
+
+#endif // LLVM_CLANG_TOOLING_JSON_COMPILATION_DATABASE_H
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/Refactoring.h b/contrib/llvm/tools/clang/include/clang/Tooling/Refactoring.h
index 0e42a0e..aaffc1a 100644
--- a/contrib/llvm/tools/clang/include/clang/Tooling/Refactoring.h
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/Refactoring.h
@@ -74,6 +74,7 @@ public:
StringRef getFilePath() const { return FilePath; }
unsigned getOffset() const { return Offset; }
unsigned getLength() const { return Length; }
+ StringRef getReplacementText() const { return ReplacementText; }
/// @}
/// \brief Applies the replacement on the Rewriter.
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h b/contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h
index e06705f..a03bcb1 100644
--- a/contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h
@@ -74,6 +74,14 @@ public:
template <typename T>
FrontendActionFactory *newFrontendActionFactory();
+/// \brief Called at the end of each source file when used with
+/// \c newFrontendActionFactory.
+class EndOfSourceFileCallback {
+public:
+ virtual ~EndOfSourceFileCallback() {}
+ virtual void run() = 0;
+};
+
/// \brief Returns a new FrontendActionFactory for any type that provides an
/// implementation of newASTConsumer().
///
@@ -87,7 +95,7 @@ FrontendActionFactory *newFrontendActionFactory();
/// newFrontendActionFactory(&Factory);
template <typename FactoryT>
inline FrontendActionFactory *newFrontendActionFactory(
- FactoryT *ConsumerFactory);
+ FactoryT *ConsumerFactory, EndOfSourceFileCallback *EndCallback = NULL);
/// \brief Runs (and deletes) the tool on 'Code' with the -fsyntax-only flag.
///
@@ -99,6 +107,19 @@ inline FrontendActionFactory *newFrontendActionFactory(
bool runToolOnCode(clang::FrontendAction *ToolAction, const Twine &Code,
const Twine &FileName = "input.cc");
+/// \brief Runs (and deletes) the tool on 'Code' with the -fsyntax-only flag and
+/// with additional other flags.
+///
+/// \param ToolAction The action to run over the code.
+/// \param Code C++ code.
+/// \param Args Additional flags to pass on.
+/// \param FileName The file name which 'Code' will be mapped as.
+///
+/// \return - True if 'ToolAction' was successfully executed.
+bool runToolOnCodeWithArgs(clang::FrontendAction *ToolAction, const Twine &Code,
+ const std::vector<std::string> &Args,
+ const Twine &FileName = "input.cc");
+
/// \brief Utility to run a FrontendAction in a single clang invocation.
class ToolInvocation {
public:
@@ -204,34 +225,45 @@ FrontendActionFactory *newFrontendActionFactory() {
template <typename FactoryT>
inline FrontendActionFactory *newFrontendActionFactory(
- FactoryT *ConsumerFactory) {
+ FactoryT *ConsumerFactory, EndOfSourceFileCallback *EndCallback) {
class FrontendActionFactoryAdapter : public FrontendActionFactory {
public:
- explicit FrontendActionFactoryAdapter(FactoryT *ConsumerFactory)
- : ConsumerFactory(ConsumerFactory) {}
+ explicit FrontendActionFactoryAdapter(FactoryT *ConsumerFactory,
+ EndOfSourceFileCallback *EndCallback)
+ : ConsumerFactory(ConsumerFactory), EndCallback(EndCallback) {}
virtual clang::FrontendAction *create() {
- return new ConsumerFactoryAdaptor(ConsumerFactory);
+ return new ConsumerFactoryAdaptor(ConsumerFactory, EndCallback);
}
private:
class ConsumerFactoryAdaptor : public clang::ASTFrontendAction {
public:
- ConsumerFactoryAdaptor(FactoryT *ConsumerFactory)
- : ConsumerFactory(ConsumerFactory) {}
+ ConsumerFactoryAdaptor(FactoryT *ConsumerFactory,
+ EndOfSourceFileCallback *EndCallback)
+ : ConsumerFactory(ConsumerFactory), EndCallback(EndCallback) {}
clang::ASTConsumer *CreateASTConsumer(clang::CompilerInstance &,
llvm::StringRef) {
return ConsumerFactory->newASTConsumer();
}
+ protected:
+ virtual void EndSourceFileAction() {
+ if (EndCallback != NULL)
+ EndCallback->run();
+ clang::ASTFrontendAction::EndSourceFileAction();
+ }
+
private:
FactoryT *ConsumerFactory;
+ EndOfSourceFileCallback *EndCallback;
};
FactoryT *ConsumerFactory;
+ EndOfSourceFileCallback *EndCallback;
};
- return new FrontendActionFactoryAdapter(ConsumerFactory);
+ return new FrontendActionFactoryAdapter(ConsumerFactory, EndCallback);
}
/// \brief Returns the absolute path of \c File, by prepending it with
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp
index f291dec..b57d996 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp
@@ -14,7 +14,7 @@
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Frontend/Utils.h"
#include "clang/AST/ASTConsumer.h"
-#include "clang/Rewrite/Rewriter.h"
+#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Basic/DiagnosticCategories.h"
#include "clang/Lex/Preprocessor.h"
@@ -42,7 +42,7 @@ bool CapturedDiagList::clearDiagnostic(ArrayRef<unsigned> IDs,
while (I != List.end() && I->getLevel() == DiagnosticsEngine::Note)
++I;
// Clear the diagnostic and any notes following it.
- List.erase(eraseS, I);
+ I = List.erase(eraseS, I);
continue;
}
@@ -147,54 +147,10 @@ public:
} // end anonymous namespace
-static inline StringRef SimulatorVersionDefineName() {
- return "__IPHONE_OS_VERSION_MIN_REQUIRED=";
-}
-
-/// \brief Parse the simulator version define:
-/// __IPHONE_OS_VERSION_MIN_REQUIRED=([0-9])([0-9][0-9])([0-9][0-9])
-// and return the grouped values as integers, e.g:
-// __IPHONE_OS_VERSION_MIN_REQUIRED=40201
-// will return Major=4, Minor=2, Micro=1.
-static bool GetVersionFromSimulatorDefine(StringRef define,
- unsigned &Major, unsigned &Minor,
- unsigned &Micro) {
- assert(define.startswith(SimulatorVersionDefineName()));
- StringRef name, version;
- llvm::tie(name, version) = define.split('=');
- if (version.empty())
- return false;
- std::string verstr = version.str();
- char *end;
- unsigned num = (unsigned) strtol(verstr.c_str(), &end, 10);
- if (*end != '\0')
- return false;
- Major = num / 10000;
- num = num % 10000;
- Minor = num / 100;
- Micro = num % 100;
- return true;
-}
-
static bool HasARCRuntime(CompilerInvocation &origCI) {
// This duplicates some functionality from Darwin::AddDeploymentTarget
// but this function is well defined, so keep it decoupled from the driver
// and avoid unrelated complications.
-
- for (unsigned i = 0, e = origCI.getPreprocessorOpts().Macros.size();
- i != e; ++i) {
- StringRef define = origCI.getPreprocessorOpts().Macros[i].first;
- bool isUndef = origCI.getPreprocessorOpts().Macros[i].second;
- if (isUndef)
- continue;
- if (!define.startswith(SimulatorVersionDefineName()))
- continue;
- unsigned Major = 0, Minor = 0, Micro = 0;
- if (GetVersionFromSimulatorDefine(define, Major, Minor, Micro) &&
- Major < 10 && Minor < 100 && Micro < 100)
- return Major >= 5;
- }
-
llvm::Triple triple(origCI.getTargetOpts().Triple);
if (triple.getOS() == llvm::Triple::IOS)
@@ -237,18 +193,19 @@ createInvocationForMigration(CompilerInvocation &origCI) {
WarnOpts.push_back("error=arc-unsafe-retained-assign");
CInvok->getDiagnosticOpts().Warnings = llvm_move(WarnOpts);
- CInvok->getLangOpts()->ObjCRuntimeHasWeak = HasARCRuntime(origCI);
+ CInvok->getLangOpts()->ObjCARCWeak = HasARCRuntime(origCI);
return CInvok.take();
}
static void emitPremigrationErrors(const CapturedDiagList &arcDiags,
- const DiagnosticOptions &diagOpts,
+ DiagnosticOptions *diagOpts,
Preprocessor &PP) {
TextDiagnosticPrinter printer(llvm::errs(), diagOpts);
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
- new DiagnosticsEngine(DiagID, &printer, /*ShouldOwnClient=*/false));
+ new DiagnosticsEngine(DiagID, diagOpts, &printer,
+ /*ShouldOwnClient=*/false));
Diags->setSourceManager(&PP.getSourceManager());
printer.BeginSourceFile(PP.getLangOpts(), &PP);
@@ -286,7 +243,8 @@ bool arcmt::checkForManualIssues(CompilerInvocation &origCI,
assert(DiagClient);
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
- new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+ new DiagnosticsEngine(DiagID, &origCI.getDiagnosticOpts(),
+ DiagClient, /*ShouldOwnClient=*/false));
// Filter of all diagnostics.
CaptureDiagnosticConsumer errRec(*Diags, *DiagClient, capturedDiags);
@@ -314,7 +272,7 @@ bool arcmt::checkForManualIssues(CompilerInvocation &origCI,
}
if (emitPremigrationARCErrors)
- emitPremigrationErrors(capturedDiags, origCI.getDiagnosticOpts(),
+ emitPremigrationErrors(capturedDiags, &origCI.getDiagnosticOpts(),
Unit->getPreprocessor());
if (!plistOut.empty()) {
SmallVector<StoredDiagnostic, 8> arcDiags;
@@ -395,7 +353,8 @@ static bool applyTransforms(CompilerInvocation &origCI,
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
- new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+ new DiagnosticsEngine(DiagID, &origCI.getDiagnosticOpts(),
+ DiagClient, /*ShouldOwnClient=*/false));
if (outputDir.empty()) {
origCI.getLangOpts()->ObjCAutoRefCount = true;
@@ -434,7 +393,8 @@ bool arcmt::getFileRemappings(std::vector<std::pair<std::string,std::string> > &
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
- new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+ new DiagnosticsEngine(DiagID, new DiagnosticOptions,
+ DiagClient, /*ShouldOwnClient=*/false));
FileRemapper remapper;
bool err = remapper.initFromDisk(outputDir, *Diags,
@@ -458,7 +418,8 @@ bool arcmt::getFileRemappingsFromFileList(
llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
- new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+ new DiagnosticsEngine(DiagID, new DiagnosticOptions,
+ DiagClient, /*ShouldOwnClient=*/false));
for (ArrayRef<StringRef>::iterator
I = remapFiles.begin(), E = remapFiles.end(); I != E; ++I) {
@@ -574,7 +535,8 @@ MigrationProcess::MigrationProcess(const CompilerInvocation &CI,
if (!outputDir.empty()) {
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
- new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+ new DiagnosticsEngine(DiagID, &CI.getDiagnosticOpts(),
+ DiagClient, /*ShouldOwnClient=*/false));
Remapper.initFromDisk(outputDir, *Diags, /*ignoreIfFilesChanges=*/true);
}
}
@@ -593,7 +555,8 @@ bool MigrationProcess::applyTransform(TransformFn trans,
assert(DiagClient);
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
- new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+ new DiagnosticsEngine(DiagID, new DiagnosticOptions,
+ DiagClient, /*ShouldOwnClient=*/false));
// Filter of all diagnostics.
CaptureDiagnosticConsumer errRec(*Diags, *DiagClient, capturedDiags);
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp
index e9b49b3..28ca9a5 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
#include "clang/ARCMigrate/FileRemapper.h"
-#include "clang/Frontend/PreprocessorOptions.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/Diagnostic.h"
#include "llvm/Support/MemoryBuffer.h"
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h b/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h
index 935fc9b..1966a98 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h
@@ -11,8 +11,10 @@
#define LLVM_CLANG_LIB_ARCMIGRATE_INTERNALS_H
#include "clang/ARCMigrate/ARCMT.h"
+#include "clang/Basic/Diagnostic.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
+#include <list>
namespace clang {
class Sema;
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp
index 0098f97..dfe14e2 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp
@@ -18,7 +18,7 @@
#include "clang/Edit/EditedSource.h"
#include "clang/Edit/Commit.h"
#include "clang/Edit/EditsReceiver.h"
-#include "clang/Rewrite/Rewriter.h"
+#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/FileManager.h"
#include "llvm/ADT/SmallString.h"
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp
index 1175c36..805a67d 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp
@@ -40,7 +40,7 @@ bool MigrationPass::CFBridgingFunctionsDefined() {
bool trans::canApplyWeak(ASTContext &Ctx, QualType type,
bool AllowOnUnknownClass) {
- if (!Ctx.getLangOpts().ObjCRuntimeHasWeak)
+ if (!Ctx.getLangOpts().ObjCARCWeak)
return false;
QualType T = type;
@@ -59,7 +59,7 @@ bool trans::canApplyWeak(ASTContext &Ctx, QualType type,
return false; // id/NSObject is not safe for weak.
if (!AllowOnUnknownClass && !Class->hasDefinition())
return false; // forward classes are not verifiable, therefore not safe.
- if (Class->isArcWeakrefUnavailable())
+ if (Class && Class->isArcWeakrefUnavailable())
return false;
}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp b/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp
index 1672bc8..a4e17c0 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp
@@ -13,6 +13,7 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/DeclGroup.h"
+#include "clang/AST/Decl.h"
using namespace clang;
bool ASTConsumer::HandleTopLevelDecl(DeclGroupRef D) {
@@ -24,3 +25,7 @@ void ASTConsumer::HandleInterestingDecl(DeclGroupRef D) {
}
void ASTConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {}
+
+void ASTConsumer::HandleImplicitImportDecl(ImportDecl *D) {
+ HandleTopLevelDecl(DeclGroupRef(D));
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
index c021323..74c68ae 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
@@ -24,6 +24,7 @@
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/Mangle.h"
+#include "clang/AST/Comment.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -72,6 +73,22 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
return NULL;
}
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->isStaticDataMember() &&
+ VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ return NULL;
+ }
+
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) {
+ if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ return NULL;
+ }
+
+ if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
+ if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ return NULL;
+ }
+
// TODO: handle comments for function parameters properly.
if (isa<ParmVarDecl>(D))
return NULL;
@@ -196,15 +213,72 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
namespace {
/// If we have a 'templated' declaration for a template, adjust 'D' to
/// refer to the actual template.
+/// If we have an implicit instantiation, adjust 'D' to refer to template.
const Decl *adjustDeclToTemplate(const Decl *D) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // Is this function declaration part of a function template?
if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
- D = FTD;
- } else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
- if (const ClassTemplateDecl *CTD = RD->getDescribedClassTemplate())
- D = CTD;
+ return FTD;
+
+ // Nothing to do if function is not an implicit instantiation.
+ if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
+ return D;
+
+ // Function is an implicit instantiation of a function template?
+ if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
+ return FTD;
+
+ // Function is instantiated from a member definition of a class template?
+ if (const FunctionDecl *MemberDecl =
+ FD->getInstantiatedFromMemberFunction())
+ return MemberDecl;
+
+ return D;
}
- // FIXME: Alias templates?
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ // Static data member is instantiated from a member definition of a class
+ // template?
+ if (VD->isStaticDataMember())
+ if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
+ return MemberDecl;
+
+ return D;
+ }
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) {
+ // Is this class declaration part of a class template?
+ if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
+ return CTD;
+
+ // Class is an implicit instantiation of a class template or partial
+ // specialization?
+ if (const ClassTemplateSpecializationDecl *CTSD =
+ dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
+ if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
+ return D;
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>
+ PU = CTSD->getSpecializedTemplateOrPartial();
+ return PU.is<ClassTemplateDecl*>() ?
+ static_cast<const Decl*>(PU.get<ClassTemplateDecl *>()) :
+ static_cast<const Decl*>(
+ PU.get<ClassTemplatePartialSpecializationDecl *>());
+ }
+
+ // Class is instantiated from a member definition of a class template?
+ if (const MemberSpecializationInfo *Info =
+ CRD->getMemberSpecializationInfo())
+ return Info->getInstantiatedFrom();
+
+ return D;
+ }
+ if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
+ // Enum is instantiated from a member definition of a class template?
+ if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
+ return MemberDecl;
+
+ return D;
+ }
+ // FIXME: Adjust alias templates?
return D;
}
} // unnamed namespace
@@ -282,23 +356,83 @@ const RawComment *ASTContext::getRawCommentForAnyRedecl(
return RC;
}
-comments::FullComment *ASTContext::getCommentForDecl(const Decl *D) const {
+static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
+ SmallVectorImpl<const NamedDecl *> &Redeclared) {
+ const DeclContext *DC = ObjCMethod->getDeclContext();
+ if (const ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(DC)) {
+ const ObjCInterfaceDecl *ID = IMD->getClassInterface();
+ if (!ID)
+ return;
+ // Add redeclared method here.
+ for (const ObjCCategoryDecl *ClsExtDecl = ID->getFirstClassExtension();
+ ClsExtDecl; ClsExtDecl = ClsExtDecl->getNextClassExtension()) {
+ if (ObjCMethodDecl *RedeclaredMethod =
+ ClsExtDecl->getMethod(ObjCMethod->getSelector(),
+ ObjCMethod->isInstanceMethod()))
+ Redeclared.push_back(RedeclaredMethod);
+ }
+ }
+}
+
+comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC,
+ const Decl *D) const {
+ comments::DeclInfo *ThisDeclInfo = new (*this) comments::DeclInfo;
+ ThisDeclInfo->CommentDecl = D;
+ ThisDeclInfo->IsFilled = false;
+ ThisDeclInfo->fill();
+ ThisDeclInfo->CommentDecl = FC->getDecl();
+ comments::FullComment *CFC =
+ new (*this) comments::FullComment(FC->getBlocks(),
+ ThisDeclInfo);
+ return CFC;
+
+}
+
+comments::FullComment *ASTContext::getCommentForDecl(
+ const Decl *D,
+ const Preprocessor *PP) const {
D = adjustDeclToTemplate(D);
+
const Decl *Canonical = D->getCanonicalDecl();
llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
ParsedComments.find(Canonical);
- if (Pos != ParsedComments.end())
+
+ if (Pos != ParsedComments.end()) {
+ if (Canonical != D) {
+ comments::FullComment *FC = Pos->second;
+ comments::FullComment *CFC = cloneFullComment(FC, D);
+ return CFC;
+ }
return Pos->second;
-
+ }
+
const Decl *OriginalDecl;
+
const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
- if (!RC)
+ if (!RC) {
+ if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
+ SmallVector<const NamedDecl*, 8> Overridden;
+ if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D))
+ addRedeclaredMethods(OMD, Overridden);
+ getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
+ for (unsigned i = 0, e = Overridden.size(); i < e; i++) {
+ if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) {
+ comments::FullComment *CFC = cloneFullComment(FC, D);
+ return CFC;
+ }
+ }
+ }
return NULL;
-
+ }
+
+ // If the RawComment was attached to other redeclaration of this Decl, we
+ // should parse the comment in context of that other Decl. This is important
+ // because comments can contain references to parameter names which can be
+ // different across redeclarations.
if (D != OriginalDecl)
- return getCommentForDecl(OriginalDecl);
+ return getCommentForDecl(OriginalDecl, PP);
- comments::FullComment *FC = RC->parse(*this, D);
+ comments::FullComment *FC = RC->parse(*this, PP, D);
ParsedComments[Canonical] = FC;
return FC;
}
@@ -481,6 +615,7 @@ ASTContext::ASTContext(LangOptions& LOpts, SourceManager &SM,
Int128Decl(0), UInt128Decl(0),
BuiltinVaListDecl(0),
ObjCIdDecl(0), ObjCSelDecl(0), ObjCClassDecl(0), ObjCProtocolClassDecl(0),
+ BOOLDecl(0),
CFConstantStringTypeDecl(0), ObjCInstanceTypeDecl(0),
FILEDecl(0),
jmp_bufDecl(0), sigjmp_bufDecl(0), ucontext_tDecl(0),
@@ -495,6 +630,7 @@ ASTContext::ASTContext(LangOptions& LOpts, SourceManager &SM,
DeclarationNames(*this),
ExternalSource(0), Listener(0),
Comments(SM), CommentsLoaded(false),
+ CommentCommandTraits(BumpAlloc),
LastSDM(0, 0),
UniqueBlockByRefTypeID(0)
{
@@ -683,12 +819,12 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target) {
InitBuiltinType(Int128Ty, BuiltinType::Int128);
InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
- if (LangOpts.CPlusPlus) { // C++ 3.9.1p5
+ if (LangOpts.CPlusPlus && LangOpts.WChar) { // C++ 3.9.1p5
if (TargetInfo::isTypeSigned(Target.getWCharType()))
InitBuiltinType(WCharTy, BuiltinType::WChar_S);
else // -fshort-wchar makes wchar_t be unsigned.
InitBuiltinType(WCharTy, BuiltinType::WChar_U);
- } else // C99
+ } else // C99 (or C++ using -fno-wchar)
WCharTy = getFromTargetType(Target.getWCharType());
WIntTy = getFromTargetType(Target.getWIntType());
@@ -725,6 +861,9 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target) {
// Placeholder type for unbridged ARC casts.
InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
+ // Placeholder type for builtin functions.
+ InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
+
// C99 6.2.5p11.
FloatComplexTy = getComplexType(FloatTy);
DoubleComplexTy = getComplexType(DoubleTy);
@@ -909,7 +1048,7 @@ bool ASTContext::BitfieldFollowsNonBitfield(const FieldDecl *FD,
ASTContext::overridden_cxx_method_iterator
ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const {
llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
- = OverriddenMethods.find(Method);
+ = OverriddenMethods.find(Method->getCanonicalDecl());
if (Pos == OverriddenMethods.end())
return 0;
@@ -919,7 +1058,7 @@ ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const {
ASTContext::overridden_cxx_method_iterator
ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
- = OverriddenMethods.find(Method);
+ = OverriddenMethods.find(Method->getCanonicalDecl());
if (Pos == OverriddenMethods.end())
return 0;
@@ -929,7 +1068,7 @@ ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
unsigned
ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
- = OverriddenMethods.find(Method);
+ = OverriddenMethods.find(Method->getCanonicalDecl());
if (Pos == OverriddenMethods.end())
return 0;
@@ -938,9 +1077,30 @@ ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
const CXXMethodDecl *Overridden) {
+ assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
OverriddenMethods[Method].push_back(Overridden);
}
+void ASTContext::getOverriddenMethods(
+ const NamedDecl *D,
+ SmallVectorImpl<const NamedDecl *> &Overridden) const {
+ assert(D);
+
+ if (const CXXMethodDecl *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
+ Overridden.append(CXXMethod->begin_overridden_methods(),
+ CXXMethod->end_overridden_methods());
+ return;
+ }
+
+ const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(D);
+ if (!Method)
+ return;
+
+ SmallVector<const ObjCMethodDecl *, 8> OverDecls;
+ Method->getOverriddenMethods(OverDecls);
+ Overridden.append(OverDecls.begin(), OverDecls.end());
+}
+
void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
assert(!Import->NextLocalImport && "Import declaration already in the chain");
assert(!Import->isFromASTFile() && "Non-local import declaration");
@@ -1062,6 +1222,27 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool RefAsPointee) const {
return toCharUnitsFromBits(Align);
}
+// getTypeInfoDataSizeInChars - Return the size of a type, in
+// chars. If the type is a record, its data size is returned. This is
+// the size of the memcpy that's performed when assigning this type
+// using a trivial copy/move assignment operator.
+std::pair<CharUnits, CharUnits>
+ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
+ std::pair<CharUnits, CharUnits> sizeAndAlign = getTypeInfoInChars(T);
+
+ // In C++, objects can sometimes be allocated into the tail padding
+ // of a base-class subobject. We decide whether that's possible
+ // during class layout, so here we can just trust the layout results.
+ if (getLangOpts().CPlusPlus) {
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
+ sizeAndAlign.first = layout.getDataSize();
+ }
+ }
+
+ return sizeAndAlign;
+}
+
std::pair<CharUnits, CharUnits>
ASTContext::getTypeInfoInChars(const Type *T) const {
std::pair<uint64_t, unsigned> Info = getTypeInfo(T);
@@ -3353,6 +3534,12 @@ QualType ASTContext::getPointerDiffType() const {
return getFromTargetType(Target->getPtrDiffType(0));
}
+/// \brief Return the unique type for "pid_t" defined in
+/// <sys/types.h>. We need this to compute the correct type for vfork().
+QualType ASTContext::getProcessIDType() const {
+ return getFromTargetType(Target->getProcessIDType());
+}
+
//===----------------------------------------------------------------------===//
// Type Operators
//===----------------------------------------------------------------------===//
@@ -3581,11 +3768,14 @@ ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
return Arg;
case TemplateArgument::Declaration: {
- if (Decl *D = Arg.getAsDecl())
- return TemplateArgument(D->getCanonicalDecl());
- return TemplateArgument((Decl*)0);
+ ValueDecl *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
+ return TemplateArgument(D, Arg.isDeclForReferenceParam());
}
+ case TemplateArgument::NullPtr:
+ return TemplateArgument(getCanonicalType(Arg.getNullPtrType()),
+ /*isNullPtr*/true);
+
case TemplateArgument::Template:
return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()));
@@ -4297,7 +4487,13 @@ std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
QualType BlockTy =
Expr->getType()->getAs<BlockPointerType>()->getPointeeType();
// Encode result type.
- getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getResultType(), S);
+ if (getLangOpts().EncodeExtendedBlockSig)
+ getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None,
+ BlockTy->getAs<FunctionType>()->getResultType(),
+ S, true /*Extended*/);
+ else
+ getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getResultType(),
+ S);
// Compute size of all parameters.
// Start with computing size of a pointer in number of bytes.
// FIXME: There might(should) be a better way of doing this computation!
@@ -4332,7 +4528,11 @@ std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
PType = PVDecl->getType();
} else if (PType->isFunctionType())
PType = PVDecl->getType();
- getObjCEncodingForType(PType, S);
+ if (getLangOpts().EncodeExtendedBlockSig)
+ getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType,
+ S, true /*Extended*/);
+ else
+ getObjCEncodingForType(PType, S);
S += charUnitsToString(ParmOffset);
ParmOffset += getObjCEncodingTypeSize(PType);
}
@@ -5393,6 +5593,65 @@ static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) {
return VaListTypedefDecl;
}
+static TypedefDecl *
+CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
+ RecordDecl *VaListDecl;
+ if (Context->getLangOpts().CPlusPlus) {
+ // namespace std { struct __va_list {
+ NamespaceDecl *NS;
+ NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context),
+ Context->getTranslationUnitDecl(),
+ /*Inline*/false, SourceLocation(),
+ SourceLocation(), &Context->Idents.get("std"),
+ /*PrevDecl*/0);
+
+ VaListDecl = CXXRecordDecl::Create(*Context, TTK_Struct,
+ Context->getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__va_list"));
+
+ VaListDecl->setDeclContext(NS);
+
+ } else {
+ // struct __va_list {
+ VaListDecl = CreateRecordDecl(*Context, TTK_Struct,
+ Context->getTranslationUnitDecl(),
+ &Context->Idents.get("__va_list"));
+ }
+
+ VaListDecl->startDefinition();
+
+ // void * __ap;
+ FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
+ VaListDecl,
+ SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("__ap"),
+ Context->getPointerType(Context->VoidTy),
+ /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ ICIS_NoInit);
+ Field->setAccess(AS_public);
+ VaListDecl->addDecl(Field);
+
+ // };
+ VaListDecl->completeDefinition();
+
+ // typedef struct __va_list __builtin_va_list;
+ TypeSourceInfo *TInfo
+ = Context->getTrivialTypeSourceInfo(Context->getRecordType(VaListDecl));
+
+ TypedefDecl *VaListTypeDecl
+ = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
+ Context->getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__builtin_va_list"),
+ TInfo);
+
+ return VaListTypeDecl;
+}
+
static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
TargetInfo::BuiltinVaListKind Kind) {
switch (Kind) {
@@ -5406,6 +5665,8 @@ static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
return CreateX86_64ABIBuiltinVaListDecl(Context);
case TargetInfo::PNaClABIBuiltinVaList:
return CreatePNaClABIBuiltinVaListDecl(Context);
+ case TargetInfo::AAPCSABIBuiltinVaList:
+ return CreateAAPCSABIBuiltinVaListDecl(Context);
}
llvm_unreachable("Unhandled __builtin_va_list type kind");
@@ -5702,7 +5963,7 @@ ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
return false;
}
-/// QualifiedIdConformsQualifiedId - compare id<p,...> with id<p1,...>
+/// QualifiedIdConformsQualifiedId - compare id<pr,...> with id<pr1,...>
/// return true if lhs's protocols conform to rhs's protocol; false
/// otherwise.
bool ASTContext::QualifiedIdConformsQualifiedId(QualType lhs, QualType rhs) {
@@ -5711,8 +5972,8 @@ bool ASTContext::QualifiedIdConformsQualifiedId(QualType lhs, QualType rhs) {
return false;
}
-/// ObjCQualifiedClassTypesAreCompatible - compare Class<p,...> and
-/// Class<p1, ...>.
+/// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and
+/// Class<pr1, ...>.
bool ASTContext::ObjCQualifiedClassTypesAreCompatible(QualType lhs,
QualType rhs) {
const ObjCObjectPointerType *lhsQID = lhs->getAs<ObjCObjectPointerType>();
@@ -6315,10 +6576,13 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
for (unsigned i = 0; i < proto_nargs; ++i) {
QualType argTy = proto->getArgType(i);
- // Look at the promotion type of enum types, since that is the type used
+ // Look at the converted type of enum types, since that is the type used
// to pass enum values.
- if (const EnumType *Enum = argTy->getAs<EnumType>())
- argTy = Enum->getDecl()->getPromotionType();
+ if (const EnumType *Enum = argTy->getAs<EnumType>()) {
+ argTy = Enum->getDecl()->getIntegerType();
+ if (argTy.isNull())
+ return QualType();
+ }
if (argTy->isPromotableIntegerType() ||
getCanonicalType(argTy).getUnqualifiedType() == FloatTy)
@@ -6725,7 +6989,7 @@ unsigned ASTContext::getIntWidth(QualType T) const {
return (unsigned)getTypeSize(T);
}
-QualType ASTContext::getCorrespondingUnsignedType(QualType T) {
+QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
assert(T->hasSignedIntegerRepresentation() && "Unexpected type");
// Turn <4 x signed int> -> <4 x unsigned int>
@@ -6959,6 +7223,9 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
return QualType();
}
break;
+ case 'p':
+ Type = Context.getProcessIDType();
+ break;
}
// If there are modifiers and if we're allowed to parse them, go for it.
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp b/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
index a605f1a..0b9c524 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
@@ -430,6 +430,15 @@ class TemplateDiff {
/// arguments or the type arguments that are templates.
TemplateDecl *FromTD, *ToTD;
+ /// FromQual, ToQual - Qualifiers for template types.
+ Qualifiers FromQual, ToQual;
+
+ /// FromInt, ToInt - APSInt's for integral arguments.
+ llvm::APSInt FromInt, ToInt;
+
+ /// IsValidFromInt, IsValidToInt - Whether the APSInt's are valid.
+ bool IsValidFromInt, IsValidToInt;
+
/// FromDefault, ToDefault - Whether the argument is a default argument.
bool FromDefault, ToDefault;
@@ -480,6 +489,21 @@ class TemplateDiff {
FlatTree[CurrentNode].ToExpr = ToExpr;
}
+ /// SetNode - Set FromInt and ToInt of the current node.
+ void SetNode(llvm::APSInt FromInt, llvm::APSInt ToInt,
+ bool IsValidFromInt, bool IsValidToInt) {
+ FlatTree[CurrentNode].FromInt = FromInt;
+ FlatTree[CurrentNode].ToInt = ToInt;
+ FlatTree[CurrentNode].IsValidFromInt = IsValidFromInt;
+ FlatTree[CurrentNode].IsValidToInt = IsValidToInt;
+ }
+
+ /// SetNode - Set FromQual and ToQual of the current node.
+ void SetNode(Qualifiers FromQual, Qualifiers ToQual) {
+ FlatTree[CurrentNode].FromQual = FromQual;
+ FlatTree[CurrentNode].ToQual = ToQual;
+ }
+
/// SetSame - Sets the same flag of the current node.
void SetSame(bool Same) {
FlatTree[CurrentNode].Same = Same;
@@ -557,6 +581,12 @@ class TemplateDiff {
(FlatTree[ReadNode].FromTD || FlatTree[ReadNode].ToTD);
}
+ /// NodeIsAPSInt - Returns true if the arugments are stored in APSInt's.
+ bool NodeIsAPSInt() {
+ return FlatTree[ReadNode].IsValidFromInt ||
+ FlatTree[ReadNode].IsValidToInt;
+ }
+
/// GetNode - Gets the FromType and ToType.
void GetNode(QualType &FromType, QualType &ToType) {
FromType = FlatTree[ReadNode].FromType;
@@ -575,6 +605,21 @@ class TemplateDiff {
ToTD = FlatTree[ReadNode].ToTD;
}
+ /// GetNode - Gets the FromInt and ToInt.
+ void GetNode(llvm::APSInt &FromInt, llvm::APSInt &ToInt,
+ bool &IsValidFromInt, bool &IsValidToInt) {
+ FromInt = FlatTree[ReadNode].FromInt;
+ ToInt = FlatTree[ReadNode].ToInt;
+ IsValidFromInt = FlatTree[ReadNode].IsValidFromInt;
+ IsValidToInt = FlatTree[ReadNode].IsValidToInt;
+ }
+
+ /// GetNode - Gets the FromQual and ToQual.
+ void GetNode(Qualifiers &FromQual, Qualifiers &ToQual) {
+ FromQual = FlatTree[ReadNode].FromQual;
+ ToQual = FlatTree[ReadNode].ToQual;
+ }
+
/// NodeIsSame - Returns true the arguments are the same.
bool NodeIsSame() {
return FlatTree[ReadNode].Same;
@@ -778,18 +823,21 @@ class TemplateDiff {
if (Context.hasSameType(FromType, ToType)) {
Tree.SetSame(true);
} else {
+ Qualifiers FromQual = FromType.getQualifiers(),
+ ToQual = ToType.getQualifiers();
const TemplateSpecializationType *FromArgTST =
GetTemplateSpecializationType(Context, FromType);
const TemplateSpecializationType *ToArgTST =
GetTemplateSpecializationType(Context, ToType);
- if (FromArgTST && ToArgTST) {
- bool SameTemplate = hasSameTemplate(FromArgTST, ToArgTST);
- if (SameTemplate) {
- Tree.SetNode(FromArgTST->getTemplateName().getAsTemplateDecl(),
- ToArgTST->getTemplateName().getAsTemplateDecl());
- DiffTemplate(FromArgTST, ToArgTST);
- }
+ if (FromArgTST && ToArgTST &&
+ hasSameTemplate(FromArgTST, ToArgTST)) {
+ FromQual -= QualType(FromArgTST, 0).getQualifiers();
+ ToQual -= QualType(ToArgTST, 0).getQualifiers();
+ Tree.SetNode(FromArgTST->getTemplateName().getAsTemplateDecl(),
+ ToArgTST->getTemplateName().getAsTemplateDecl());
+ Tree.SetNode(FromQual, ToQual);
+ DiffTemplate(FromArgTST, ToArgTST);
}
}
}
@@ -799,12 +847,41 @@ class TemplateDiff {
if (NonTypeTemplateParmDecl *DefaultNTTPD =
dyn_cast<NonTypeTemplateParmDecl>(ParamND)) {
Expr *FromExpr, *ToExpr;
- GetExpr(FromIter, DefaultNTTPD, FromExpr);
- GetExpr(ToIter, DefaultNTTPD, ToExpr);
- Tree.SetNode(FromExpr, ToExpr);
- Tree.SetSame(IsEqualExpr(Context, FromExpr, ToExpr));
- Tree.SetDefault(FromIter.isEnd() && FromExpr,
- ToIter.isEnd() && ToExpr);
+ llvm::APSInt FromInt, ToInt;
+ bool HasFromInt = !FromIter.isEnd() &&
+ FromIter->getKind() == TemplateArgument::Integral;
+ bool HasToInt = !ToIter.isEnd() &&
+ ToIter->getKind() == TemplateArgument::Integral;
+ //bool IsValidFromInt = false, IsValidToInt = false;
+ if (HasFromInt)
+ FromInt = FromIter->getAsIntegral();
+ else
+ GetExpr(FromIter, DefaultNTTPD, FromExpr);
+
+ if (HasToInt)
+ ToInt = ToIter->getAsIntegral();
+ else
+ GetExpr(ToIter, DefaultNTTPD, ToExpr);
+
+ if (!HasFromInt && !HasToInt) {
+ Tree.SetNode(FromExpr, ToExpr);
+ Tree.SetSame(IsEqualExpr(Context, FromExpr, ToExpr));
+ Tree.SetDefault(FromIter.isEnd() && FromExpr,
+ ToIter.isEnd() && ToExpr);
+ } else {
+ if (!HasFromInt && FromExpr) {
+ FromInt = FromExpr->EvaluateKnownConstInt(Context);
+ HasFromInt = true;
+ }
+ if (!HasToInt && ToExpr) {
+ ToInt = ToExpr->EvaluateKnownConstInt(Context);
+ HasToInt = true;
+ }
+ Tree.SetNode(FromInt, ToInt, HasFromInt, HasToInt);
+ Tree.SetSame(llvm::APSInt::isSameValue(FromInt, ToInt));
+ Tree.SetDefault(FromIter.isEnd() && HasFromInt,
+ ToIter.isEnd() && HasToInt);
+ }
}
// Handle Templates
@@ -824,6 +901,26 @@ class TemplateDiff {
}
}
+ /// makeTemplateList - Dump every template alias into the vector.
+ static void makeTemplateList(
+ SmallVector<const TemplateSpecializationType*, 1> &TemplateList,
+ const TemplateSpecializationType *TST) {
+ while (TST) {
+ TemplateList.push_back(TST);
+ if (!TST->isTypeAlias())
+ return;
+ TST = TST->getAliasedType()->getAs<TemplateSpecializationType>();
+ }
+ }
+
+ /// hasSameBaseTemplate - Returns true when the base templates are the same,
+ /// even if the template arguments are not.
+ static bool hasSameBaseTemplate(const TemplateSpecializationType *FromTST,
+ const TemplateSpecializationType *ToTST) {
+ return FromTST->getTemplateName().getAsTemplateDecl()->getIdentifier() ==
+ ToTST->getTemplateName().getAsTemplateDecl()->getIdentifier();
+ }
+
/// hasSameTemplate - Returns true if both types are specialized from the
/// same template declaration. If they come from different template aliases,
/// do a parallel ascension search to determine the highest template alias in
@@ -831,49 +928,29 @@ class TemplateDiff {
static bool hasSameTemplate(const TemplateSpecializationType *&FromTST,
const TemplateSpecializationType *&ToTST) {
// Check the top templates if they are the same.
- if (FromTST->getTemplateName().getAsTemplateDecl()->getIdentifier() ==
- ToTST->getTemplateName().getAsTemplateDecl()->getIdentifier())
+ if (hasSameBaseTemplate(FromTST, ToTST))
return true;
// Create vectors of template aliases.
SmallVector<const TemplateSpecializationType*, 1> FromTemplateList,
ToTemplateList;
- const TemplateSpecializationType *TempToTST = ToTST, *TempFromTST = FromTST;
- FromTemplateList.push_back(FromTST);
- ToTemplateList.push_back(ToTST);
-
- // Dump every template alias into the vectors.
- while (TempFromTST->isTypeAlias()) {
- TempFromTST =
- TempFromTST->getAliasedType()->getAs<TemplateSpecializationType>();
- if (!TempFromTST)
- break;
- FromTemplateList.push_back(TempFromTST);
- }
- while (TempToTST->isTypeAlias()) {
- TempToTST =
- TempToTST->getAliasedType()->getAs<TemplateSpecializationType>();
- if (!TempToTST)
- break;
- ToTemplateList.push_back(TempToTST);
- }
+ makeTemplateList(FromTemplateList, FromTST);
+ makeTemplateList(ToTemplateList, ToTST);
SmallVector<const TemplateSpecializationType*, 1>::reverse_iterator
FromIter = FromTemplateList.rbegin(), FromEnd = FromTemplateList.rend(),
ToIter = ToTemplateList.rbegin(), ToEnd = ToTemplateList.rend();
// Check if the lowest template types are the same. If not, return.
- if ((*FromIter)->getTemplateName().getAsTemplateDecl()->getIdentifier() !=
- (*ToIter)->getTemplateName().getAsTemplateDecl()->getIdentifier())
+ if (!hasSameBaseTemplate(*FromIter, *ToIter))
return false;
// Begin searching up the template aliases. The bottom most template
// matches so move up until one pair does not match. Use the template
// right before that one.
for (; FromIter != FromEnd && ToIter != ToEnd; ++FromIter, ++ToIter) {
- if ((*FromIter)->getTemplateName().getAsTemplateDecl()->getIdentifier() !=
- (*ToIter)->getTemplateName().getAsTemplateDecl()->getIdentifier())
+ if (!hasSameBaseTemplate(*FromIter, *ToIter))
break;
}
@@ -923,7 +1000,9 @@ class TemplateDiff {
bool isVariadic = DefaultTTPD->isParameterPack();
TemplateArgument TA = DefaultTTPD->getDefaultArgument().getArgument();
- TemplateDecl *DefaultTD = TA.getAsTemplate().getAsTemplateDecl();
+ TemplateDecl *DefaultTD = 0;
+ if (TA.getKind() != TemplateArgument::Null)
+ DefaultTD = TA.getAsTemplate().getAsTemplateDecl();
if (!Iter.isEnd())
ArgDecl = Iter->getAsTemplate().getAsTemplateDecl();
@@ -1018,6 +1097,15 @@ class TemplateDiff {
Tree.ToDefault(), Tree.NodeIsSame());
return;
}
+
+ if (Tree.NodeIsAPSInt()) {
+ llvm::APSInt FromInt, ToInt;
+ bool IsValidFromInt, IsValidToInt;
+ Tree.GetNode(FromInt, ToInt, IsValidFromInt, IsValidToInt);
+ PrintAPSInt(FromInt, ToInt, IsValidFromInt, IsValidToInt,
+ Tree.FromDefault(), Tree.ToDefault(), Tree.NodeIsSame());
+ return;
+ }
llvm_unreachable("Unable to deduce template difference.");
}
@@ -1027,6 +1115,10 @@ class TemplateDiff {
assert(Tree.HasChildren() && "Template difference not found in diff tree.");
+ Qualifiers FromQual, ToQual;
+ Tree.GetNode(FromQual, ToQual);
+ PrintQualifiers(FromQual, ToQual);
+
OS << FromTD->getNameAsString() << '<';
Tree.MoveToChild();
unsigned NumElideArgs = 0;
@@ -1088,6 +1180,17 @@ class TemplateDiff {
return;
}
+ if (!FromType.isNull() && !ToType.isNull() &&
+ FromType.getLocalUnqualifiedType() ==
+ ToType.getLocalUnqualifiedType()) {
+ Qualifiers FromQual = FromType.getLocalQualifiers(),
+ ToQual = ToType.getLocalQualifiers(),
+ CommonQual;
+ PrintQualifiers(FromQual, ToQual);
+ FromType.getLocalUnqualifiedType().print(OS, Policy);
+ return;
+ }
+
std::string FromTypeStr = FromType.isNull() ? "(no argument)"
: FromType.getAsString();
std::string ToTypeStr = ToType.isNull() ? "(no argument)"
@@ -1177,6 +1280,34 @@ class TemplateDiff {
}
}
+ /// PrintAPSInt - Handles printing of integral arguments, highlighting
+ /// argument differences.
+ void PrintAPSInt(llvm::APSInt FromInt, llvm::APSInt ToInt,
+ bool IsValidFromInt, bool IsValidToInt, bool FromDefault,
+ bool ToDefault, bool Same) {
+ assert((IsValidFromInt || IsValidToInt) &&
+ "Only one integral argument may be missing.");
+
+ if (Same) {
+ OS << FromInt.toString(10);
+ } else if (!PrintTree) {
+ OS << (FromDefault ? "(default) " : "");
+ Bold();
+ OS << (IsValidFromInt ? FromInt.toString(10) : "(no argument)");
+ Unbold();
+ } else {
+ OS << (FromDefault ? "[(default) " : "[");
+ Bold();
+ OS << (IsValidFromInt ? FromInt.toString(10) : "(no argument)");
+ Unbold();
+ OS << " != " << (ToDefault ? "(default) " : "");
+ Bold();
+ OS << (IsValidToInt ? ToInt.toString(10) : "(no argument)");
+ Unbold();
+ OS << ']';
+ }
+ }
+
// Prints the appropriate placeholder for elided template arguments.
void PrintElideArgs(unsigned NumElideArgs, unsigned Indent) {
if (PrintTree) {
@@ -1191,6 +1322,68 @@ class TemplateDiff {
OS << "[" << NumElideArgs << " * ...]";
}
+ // Prints and highlights differences in Qualifiers.
+ void PrintQualifiers(Qualifiers FromQual, Qualifiers ToQual) {
+ // Both types have no qualifiers
+ if (FromQual.empty() && ToQual.empty())
+ return;
+
+ // Both types have same qualifiers
+ if (FromQual == ToQual) {
+ PrintQualifier(FromQual, /*ApplyBold*/false);
+ return;
+ }
+
+ // Find common qualifiers and strip them from FromQual and ToQual.
+ Qualifiers CommonQual = Qualifiers::removeCommonQualifiers(FromQual,
+ ToQual);
+
+ // The qualifiers are printed before the template name.
+ // Inline printing:
+ // The common qualifiers are printed. Then, qualifiers only in this type
+ // are printed and highlighted. Finally, qualifiers only in the other
+ // type are printed and highlighted inside parentheses after "missing".
+ // Tree printing:
+ // Qualifiers are printed next to each other, inside brackets, and
+ // separated by "!=". The printing order is:
+ // common qualifiers, highlighted from qualifiers, "!=",
+ // common qualifiers, highlighted to qualifiers
+ if (PrintTree) {
+ OS << "[";
+ if (CommonQual.empty() && FromQual.empty()) {
+ Bold();
+ OS << "(no qualifiers) ";
+ Unbold();
+ } else {
+ PrintQualifier(CommonQual, /*ApplyBold*/false);
+ PrintQualifier(FromQual, /*ApplyBold*/true);
+ }
+ OS << "!= ";
+ if (CommonQual.empty() && ToQual.empty()) {
+ Bold();
+ OS << "(no qualifiers)";
+ Unbold();
+ } else {
+ PrintQualifier(CommonQual, /*ApplyBold*/false,
+ /*appendSpaceIfNonEmpty*/!ToQual.empty());
+ PrintQualifier(ToQual, /*ApplyBold*/true,
+ /*appendSpaceIfNonEmpty*/false);
+ }
+ OS << "] ";
+ } else {
+ PrintQualifier(CommonQual, /*ApplyBold*/false);
+ PrintQualifier(FromQual, /*ApplyBold*/true);
+ }
+ }
+
+ void PrintQualifier(Qualifiers Q, bool ApplyBold,
+ bool AppendSpaceIfNonEmpty = true) {
+ if (Q.empty()) return;
+ if (ApplyBold) Bold();
+ Q.print(OS, Policy, AppendSpaceIfNonEmpty);
+ if (ApplyBold) Unbold();
+ }
+
public:
TemplateDiff(ASTContext &Context, QualType FromType, QualType ToType,
@@ -1210,6 +1403,9 @@ public:
/// DiffTemplate - Start the template type diffing.
void DiffTemplate() {
+ Qualifiers FromQual = FromType.getQualifiers(),
+ ToQual = ToType.getQualifiers();
+
const TemplateSpecializationType *FromOrigTST =
GetTemplateSpecializationType(Context, FromType);
const TemplateSpecializationType *ToOrigTST =
@@ -1224,7 +1420,10 @@ public:
return;
}
+ FromQual -= QualType(FromOrigTST, 0).getQualifiers();
+ ToQual -= QualType(ToOrigTST, 0).getQualifiers();
Tree.SetNode(FromType, ToType);
+ Tree.SetNode(FromQual, ToQual);
// Same base template, but different arguments.
Tree.SetNode(FromOrigTST->getTemplateName().getAsTemplateDecl(),
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
index 3e952ac..0d4f303 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
@@ -228,16 +228,12 @@ namespace {
public:
DiagnosticBuilder Diag1(SourceLocation Loc, unsigned DiagID) {
- if (!Complain)
- return DiagnosticBuilder::getEmpty();
-
+ assert(Complain && "Not allowed to complain");
return C1.getDiagnostics().Report(Loc, DiagID);
}
DiagnosticBuilder Diag2(SourceLocation Loc, unsigned DiagID) {
- if (!Complain)
- return DiagnosticBuilder::getEmpty();
-
+ assert(Complain && "Not allowed to complain");
return C2.getDiagnostics().Report(Loc, DiagID);
}
};
@@ -288,7 +284,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
case TemplateArgument::Type:
return Context.IsStructurallyEquivalent(Arg1.getAsType(), Arg2.getAsType());
-
+
case TemplateArgument::Integral:
if (!Context.IsStructurallyEquivalent(Arg1.getIntegralType(),
Arg2.getIntegralType()))
@@ -297,10 +293,11 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return llvm::APSInt::isSameValue(Arg1.getAsIntegral(), Arg2.getAsIntegral());
case TemplateArgument::Declaration:
- if (!Arg1.getAsDecl() || !Arg2.getAsDecl())
- return !Arg1.getAsDecl() && !Arg2.getAsDecl();
return Context.IsStructurallyEquivalent(Arg1.getAsDecl(), Arg2.getAsDecl());
-
+
+ case TemplateArgument::NullPtr:
+ return true; // FIXME: Is this correct?
+
case TemplateArgument::Template:
return IsStructurallyEquivalent(Context,
Arg1.getAsTemplate(),
@@ -822,33 +819,47 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
FieldDecl *Field1, FieldDecl *Field2) {
RecordDecl *Owner2 = cast<RecordDecl>(Field2->getDeclContext());
-
- if (!IsStructurallyEquivalent(Context,
+
+ // For anonymous structs/unions, match up the anonymous struct/union type
+ // declarations directly, so that we don't go off searching for anonymous
+ // types
+ if (Field1->isAnonymousStructOrUnion() &&
+ Field2->isAnonymousStructOrUnion()) {
+ RecordDecl *D1 = Field1->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *D2 = Field2->getType()->castAs<RecordType>()->getDecl();
+ return IsStructurallyEquivalent(Context, D1, D2);
+ }
+
+ if (!IsStructurallyEquivalent(Context,
Field1->getType(), Field2->getType())) {
- Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
- << Context.C2.getTypeDeclType(Owner2);
- Context.Diag2(Field2->getLocation(), diag::note_odr_field)
- << Field2->getDeclName() << Field2->getType();
- Context.Diag1(Field1->getLocation(), diag::note_odr_field)
- << Field1->getDeclName() << Field1->getType();
+ if (Context.Complain) {
+ Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(Owner2);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_field)
+ << Field2->getDeclName() << Field2->getType();
+ Context.Diag1(Field1->getLocation(), diag::note_odr_field)
+ << Field1->getDeclName() << Field1->getType();
+ }
return false;
}
if (Field1->isBitField() != Field2->isBitField()) {
- Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
- << Context.C2.getTypeDeclType(Owner2);
- if (Field1->isBitField()) {
- Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field)
- << Field1->getDeclName() << Field1->getType()
- << Field1->getBitWidthValue(Context.C1);
- Context.Diag2(Field2->getLocation(), diag::note_odr_not_bit_field)
- << Field2->getDeclName();
- } else {
- Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field)
- << Field2->getDeclName() << Field2->getType()
- << Field2->getBitWidthValue(Context.C2);
- Context.Diag1(Field1->getLocation(), diag::note_odr_not_bit_field)
- << Field1->getDeclName();
+ if (Context.Complain) {
+ Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(Owner2);
+ if (Field1->isBitField()) {
+ Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field)
+ << Field1->getDeclName() << Field1->getType()
+ << Field1->getBitWidthValue(Context.C1);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_not_bit_field)
+ << Field2->getDeclName();
+ } else {
+ Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field)
+ << Field2->getDeclName() << Field2->getType()
+ << Field2->getBitWidthValue(Context.C2);
+ Context.Diag1(Field1->getLocation(), diag::note_odr_not_bit_field)
+ << Field1->getDeclName();
+ }
}
return false;
}
@@ -859,12 +870,14 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
unsigned Bits2 = Field2->getBitWidthValue(Context.C2);
if (Bits1 != Bits2) {
- Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
- << Context.C2.getTypeDeclType(Owner2);
- Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field)
- << Field2->getDeclName() << Field2->getType() << Bits2;
- Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field)
- << Field1->getDeclName() << Field1->getType() << Bits1;
+ if (Context.Complain) {
+ Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(Owner2);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field)
+ << Field2->getDeclName() << Field2->getType() << Bits2;
+ Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field)
+ << Field1->getDeclName() << Field1->getType() << Bits1;
+ }
return false;
}
}
@@ -872,17 +885,65 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
}
+/// \brief Find the index of the given anonymous struct/union within its
+/// context.
+///
+/// \returns Returns the index of this anonymous struct/union in its context,
+/// including the next assigned index (if none of them match). Returns an
+/// empty option if the context is not a record, i.e.. if the anonymous
+/// struct/union is at namespace or block scope.
+static llvm::Optional<unsigned>
+findAnonymousStructOrUnionIndex(RecordDecl *Anon) {
+ ASTContext &Context = Anon->getASTContext();
+ QualType AnonTy = Context.getRecordType(Anon);
+
+ RecordDecl *Owner = dyn_cast<RecordDecl>(Anon->getDeclContext());
+ if (!Owner)
+ return llvm::Optional<unsigned>();
+
+ unsigned Index = 0;
+ for (DeclContext::decl_iterator D = Owner->noload_decls_begin(),
+ DEnd = Owner->noload_decls_end();
+ D != DEnd; ++D) {
+ FieldDecl *F = dyn_cast<FieldDecl>(*D);
+ if (!F || !F->isAnonymousStructOrUnion())
+ continue;
+
+ if (Context.hasSameType(F->getType(), AnonTy))
+ break;
+
+ ++Index;
+ }
+
+ return Index;
+}
+
/// \brief Determine structural equivalence of two records.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
RecordDecl *D1, RecordDecl *D2) {
if (D1->isUnion() != D2->isUnion()) {
- Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
- << Context.C2.getTypeDeclType(D2);
- Context.Diag1(D1->getLocation(), diag::note_odr_tag_kind_here)
- << D1->getDeclName() << (unsigned)D1->getTagKind();
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag1(D1->getLocation(), diag::note_odr_tag_kind_here)
+ << D1->getDeclName() << (unsigned)D1->getTagKind();
+ }
return false;
}
-
+
+ if (D1->isAnonymousStructOrUnion() && D2->isAnonymousStructOrUnion()) {
+ // If both anonymous structs/unions are in a record context, make sure
+ // they occur in the same location in the context records.
+ if (llvm::Optional<unsigned> Index1
+ = findAnonymousStructOrUnionIndex(D1)) {
+ if (llvm::Optional<unsigned> Index2
+ = findAnonymousStructOrUnionIndex(D2)) {
+ if (*Index1 != *Index2)
+ return false;
+ }
+ }
+ }
+
// If both declarations are class template specializations, we know
// the ODR applies, so check the template and template arguments.
ClassTemplateSpecializationDecl *Spec1
@@ -920,12 +981,14 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(D1)) {
if (CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(D2)) {
if (D1CXX->getNumBases() != D2CXX->getNumBases()) {
- Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
- << Context.C2.getTypeDeclType(D2);
- Context.Diag2(D2->getLocation(), diag::note_odr_number_of_bases)
- << D2CXX->getNumBases();
- Context.Diag1(D1->getLocation(), diag::note_odr_number_of_bases)
- << D1CXX->getNumBases();
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(D2->getLocation(), diag::note_odr_number_of_bases)
+ << D2CXX->getNumBases();
+ Context.Diag1(D1->getLocation(), diag::note_odr_number_of_bases)
+ << D1CXX->getNumBases();
+ }
return false;
}
@@ -937,38 +1000,44 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
++Base1, ++Base2) {
if (!IsStructurallyEquivalent(Context,
Base1->getType(), Base2->getType())) {
- Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
- << Context.C2.getTypeDeclType(D2);
- Context.Diag2(Base2->getLocStart(), diag::note_odr_base)
- << Base2->getType()
- << Base2->getSourceRange();
- Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
- << Base1->getType()
- << Base1->getSourceRange();
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(Base2->getLocStart(), diag::note_odr_base)
+ << Base2->getType()
+ << Base2->getSourceRange();
+ Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
+ << Base1->getType()
+ << Base1->getSourceRange();
+ }
return false;
}
// Check virtual vs. non-virtual inheritance mismatch.
if (Base1->isVirtual() != Base2->isVirtual()) {
- Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
- << Context.C2.getTypeDeclType(D2);
- Context.Diag2(Base2->getLocStart(),
- diag::note_odr_virtual_base)
- << Base2->isVirtual() << Base2->getSourceRange();
- Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
- << Base1->isVirtual()
- << Base1->getSourceRange();
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(Base2->getLocStart(),
+ diag::note_odr_virtual_base)
+ << Base2->isVirtual() << Base2->getSourceRange();
+ Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
+ << Base1->isVirtual()
+ << Base1->getSourceRange();
+ }
return false;
}
}
} else if (D1CXX->getNumBases() > 0) {
- Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
- << Context.C2.getTypeDeclType(D2);
- const CXXBaseSpecifier *Base1 = D1CXX->bases_begin();
- Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
- << Base1->getType()
- << Base1->getSourceRange();
- Context.Diag2(D2->getLocation(), diag::note_odr_missing_base);
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ const CXXBaseSpecifier *Base1 = D1CXX->bases_begin();
+ Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
+ << Base1->getType()
+ << Base1->getSourceRange();
+ Context.Diag2(D2->getLocation(), diag::note_odr_missing_base);
+ }
return false;
}
}
@@ -981,11 +1050,13 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Field1 != Field1End;
++Field1, ++Field2) {
if (Field2 == Field2End) {
- Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
- << Context.C2.getTypeDeclType(D2);
- Context.Diag1(Field1->getLocation(), diag::note_odr_field)
- << Field1->getDeclName() << Field1->getType();
- Context.Diag2(D2->getLocation(), diag::note_odr_missing_field);
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag1(Field1->getLocation(), diag::note_odr_field)
+ << Field1->getDeclName() << Field1->getType();
+ Context.Diag2(D2->getLocation(), diag::note_odr_missing_field);
+ }
return false;
}
@@ -994,11 +1065,13 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
if (Field2 != Field2End) {
- Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
- << Context.C2.getTypeDeclType(D2);
- Context.Diag2(Field2->getLocation(), diag::note_odr_field)
- << Field2->getDeclName() << Field2->getType();
- Context.Diag1(D1->getLocation(), diag::note_odr_missing_field);
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_field)
+ << Field2->getDeclName() << Field2->getType();
+ Context.Diag1(D1->getLocation(), diag::note_odr_missing_field);
+ }
return false;
}
@@ -1014,12 +1087,14 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
EC1End = D1->enumerator_end();
EC1 != EC1End; ++EC1, ++EC2) {
if (EC2 == EC2End) {
- Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
- << Context.C2.getTypeDeclType(D2);
- Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator)
- << EC1->getDeclName()
- << EC1->getInitVal().toString(10);
- Context.Diag2(D2->getLocation(), diag::note_odr_missing_enumerator);
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator)
+ << EC1->getDeclName()
+ << EC1->getInitVal().toString(10);
+ Context.Diag2(D2->getLocation(), diag::note_odr_missing_enumerator);
+ }
return false;
}
@@ -1027,25 +1102,29 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
llvm::APSInt Val2 = EC2->getInitVal();
if (!llvm::APSInt::isSameValue(Val1, Val2) ||
!IsStructurallyEquivalent(EC1->getIdentifier(), EC2->getIdentifier())) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator)
+ << EC2->getDeclName()
+ << EC2->getInitVal().toString(10);
+ Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator)
+ << EC1->getDeclName()
+ << EC1->getInitVal().toString(10);
+ }
+ return false;
+ }
+ }
+
+ if (EC2 != EC2End) {
+ if (Context.Complain) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(D2);
Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator)
<< EC2->getDeclName()
<< EC2->getInitVal().toString(10);
- Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator)
- << EC1->getDeclName()
- << EC1->getInitVal().toString(10);
- return false;
+ Context.Diag1(D1->getLocation(), diag::note_odr_missing_enumerator);
}
- }
-
- if (EC2 != EC2End) {
- Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
- << Context.C2.getTypeDeclType(D2);
- Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator)
- << EC2->getDeclName()
- << EC2->getInitVal().toString(10);
- Context.Diag1(D1->getLocation(), diag::note_odr_missing_enumerator);
return false;
}
@@ -1056,20 +1135,24 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
TemplateParameterList *Params1,
TemplateParameterList *Params2) {
if (Params1->size() != Params2->size()) {
- Context.Diag2(Params2->getTemplateLoc(),
- diag::err_odr_different_num_template_parameters)
- << Params1->size() << Params2->size();
- Context.Diag1(Params1->getTemplateLoc(),
- diag::note_odr_template_parameter_list);
+ if (Context.Complain) {
+ Context.Diag2(Params2->getTemplateLoc(),
+ diag::err_odr_different_num_template_parameters)
+ << Params1->size() << Params2->size();
+ Context.Diag1(Params1->getTemplateLoc(),
+ diag::note_odr_template_parameter_list);
+ }
return false;
}
for (unsigned I = 0, N = Params1->size(); I != N; ++I) {
if (Params1->getParam(I)->getKind() != Params2->getParam(I)->getKind()) {
- Context.Diag2(Params2->getParam(I)->getLocation(),
- diag::err_odr_different_template_parameter_kind);
- Context.Diag1(Params1->getParam(I)->getLocation(),
- diag::note_odr_template_parameter_here);
+ if (Context.Complain) {
+ Context.Diag2(Params2->getParam(I)->getLocation(),
+ diag::err_odr_different_template_parameter_kind);
+ Context.Diag1(Params1->getParam(I)->getLocation(),
+ diag::note_odr_template_parameter_here);
+ }
return false;
}
@@ -1087,10 +1170,12 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
TemplateTypeParmDecl *D1,
TemplateTypeParmDecl *D2) {
if (D1->isParameterPack() != D2->isParameterPack()) {
- Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
- << D2->isParameterPack();
- Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
- << D1->isParameterPack();
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ << D2->isParameterPack();
+ Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
+ << D1->isParameterPack();
+ }
return false;
}
@@ -1100,24 +1185,25 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
NonTypeTemplateParmDecl *D1,
NonTypeTemplateParmDecl *D2) {
- // FIXME: Enable once we have variadic templates.
-#if 0
if (D1->isParameterPack() != D2->isParameterPack()) {
- Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
- << D2->isParameterPack();
- Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
- << D1->isParameterPack();
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ << D2->isParameterPack();
+ Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
+ << D1->isParameterPack();
+ }
return false;
}
-#endif
// Check types.
if (!Context.IsStructurallyEquivalent(D1->getType(), D2->getType())) {
- Context.Diag2(D2->getLocation(),
- diag::err_odr_non_type_parameter_type_inconsistent)
- << D2->getType() << D1->getType();
- Context.Diag1(D1->getLocation(), diag::note_odr_value_here)
- << D1->getType();
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(),
+ diag::err_odr_non_type_parameter_type_inconsistent)
+ << D2->getType() << D1->getType();
+ Context.Diag1(D1->getLocation(), diag::note_odr_value_here)
+ << D1->getType();
+ }
return false;
}
@@ -1127,17 +1213,16 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
TemplateTemplateParmDecl *D1,
TemplateTemplateParmDecl *D2) {
- // FIXME: Enable once we have variadic templates.
-#if 0
if (D1->isParameterPack() != D2->isParameterPack()) {
- Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
- << D2->isParameterPack();
- Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
- << D1->isParameterPack();
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ << D2->isParameterPack();
+ Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
+ << D1->isParameterPack();
+ }
return false;
}
-#endif
-
+
// Check template parameter lists.
return IsStructurallyEquivalent(Context, D1->getTemplateParameters(),
D2->getTemplateParameters());
@@ -1509,11 +1594,26 @@ QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
ExceptionTypes.push_back(ExceptionType);
}
- FunctionProtoType::ExtProtoInfo EPI = T->getExtProtoInfo();
- EPI.Exceptions = ExceptionTypes.data();
-
+ FunctionProtoType::ExtProtoInfo FromEPI = T->getExtProtoInfo();
+ FunctionProtoType::ExtProtoInfo ToEPI;
+
+ ToEPI.ExtInfo = FromEPI.ExtInfo;
+ ToEPI.Variadic = FromEPI.Variadic;
+ ToEPI.HasTrailingReturn = FromEPI.HasTrailingReturn;
+ ToEPI.TypeQuals = FromEPI.TypeQuals;
+ ToEPI.RefQualifier = FromEPI.RefQualifier;
+ ToEPI.NumExceptions = ExceptionTypes.size();
+ ToEPI.Exceptions = ExceptionTypes.data();
+ ToEPI.ConsumedArguments = FromEPI.ConsumedArguments;
+ ToEPI.ExceptionSpecType = FromEPI.ExceptionSpecType;
+ ToEPI.NoexceptExpr = Importer.Import(FromEPI.NoexceptExpr);
+ ToEPI.ExceptionSpecDecl = cast_or_null<FunctionDecl>(
+ Importer.Import(FromEPI.ExceptionSpecDecl));
+ ToEPI.ExceptionSpecTemplate = cast_or_null<FunctionDecl>(
+ Importer.Import(FromEPI.ExceptionSpecTemplate));
+
return Importer.getToContext().getFunctionType(ToResultType, ArgTypes.data(),
- ArgTypes.size(), EPI);
+ ArgTypes.size(), ToEPI);
}
QualType ASTNodeImporter::VisitParenType(const ParenType *T) {
@@ -1961,11 +2061,20 @@ ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
return TemplateArgument(From, ToType);
}
- case TemplateArgument::Declaration:
- if (Decl *To = Importer.Import(From.getAsDecl()))
- return TemplateArgument(To);
+ case TemplateArgument::Declaration: {
+ ValueDecl *FromD = From.getAsDecl();
+ if (ValueDecl *To = cast_or_null<ValueDecl>(Importer.Import(FromD)))
+ return TemplateArgument(To, From.isDeclForReferenceParam());
return TemplateArgument();
-
+ }
+
+ case TemplateArgument::NullPtr: {
+ QualType ToType = Importer.Import(From.getNullPtrType());
+ if (ToType.isNull())
+ return TemplateArgument();
+ return TemplateArgument(ToType, /*isNullPtr*/true);
+ }
+
case TemplateArgument::Template: {
TemplateName ToTemplate = Importer.Import(From.getAsTemplate());
if (ToTemplate.isNull())
@@ -2318,6 +2427,20 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
}
if (RecordDecl *FoundRecord = dyn_cast<RecordDecl>(Found)) {
+ if (D->isAnonymousStructOrUnion() &&
+ FoundRecord->isAnonymousStructOrUnion()) {
+ // If both anonymous structs/unions are in a record context, make sure
+ // they occur in the same location in the context records.
+ if (llvm::Optional<unsigned> Index1
+ = findAnonymousStructOrUnionIndex(D)) {
+ if (llvm::Optional<unsigned> Index2
+ = findAnonymousStructOrUnionIndex(FoundRecord)) {
+ if (*Index1 != *Index2)
+ continue;
+ }
+ }
+ }
+
if (RecordDecl *FoundDef = FoundRecord->getDefinition()) {
if ((SearchName && !D->isCompleteDefinition())
|| (D->isCompleteDefinition() &&
@@ -2491,8 +2614,30 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
// Import additional name location/type info.
ImportDeclarationNameLoc(D->getNameInfo(), NameInfo);
+ QualType FromTy = D->getType();
+ bool usedDifferentExceptionSpec = false;
+
+ if (const FunctionProtoType *
+ FromFPT = D->getType()->getAs<FunctionProtoType>()) {
+ FunctionProtoType::ExtProtoInfo FromEPI = FromFPT->getExtProtoInfo();
+ // FunctionProtoType::ExtProtoInfo's ExceptionSpecDecl can point to the
+ // FunctionDecl that we are importing the FunctionProtoType for.
+ // To avoid an infinite recursion when importing, create the FunctionDecl
+ // with a simplified function type and update it afterwards.
+ if (FromEPI.ExceptionSpecDecl || FromEPI.ExceptionSpecTemplate ||
+ FromEPI.NoexceptExpr) {
+ FunctionProtoType::ExtProtoInfo DefaultEPI;
+ FromTy = Importer.getFromContext().getFunctionType(
+ FromFPT->getResultType(),
+ FromFPT->arg_type_begin(),
+ FromFPT->arg_type_end() - FromFPT->arg_type_begin(),
+ DefaultEPI);
+ usedDifferentExceptionSpec = true;
+ }
+ }
+
// Import the type.
- QualType T = Importer.Import(D->getType());
+ QualType T = Importer.Import(FromTy);
if (T.isNull())
return 0;
@@ -2572,6 +2717,14 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
}
ToFunction->setParams(Parameters);
+ if (usedDifferentExceptionSpec) {
+ // Update FunctionProtoType::ExtProtoInfo.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+ ToFunction->setType(T);
+ }
+
// FIXME: Other bits to merge?
// Add this function to the lexical context.
@@ -2596,6 +2749,25 @@ Decl *ASTNodeImporter::VisitCXXConversionDecl(CXXConversionDecl *D) {
return VisitCXXMethodDecl(D);
}
+static unsigned getFieldIndex(Decl *F) {
+ RecordDecl *Owner = dyn_cast<RecordDecl>(F->getDeclContext());
+ if (!Owner)
+ return 0;
+
+ unsigned Index = 1;
+ for (DeclContext::decl_iterator D = Owner->noload_decls_begin(),
+ DEnd = Owner->noload_decls_end();
+ D != DEnd; ++D) {
+ if (*D == F)
+ return Index;
+
+ if (isa<FieldDecl>(*D) || isa<IndirectFieldDecl>(*D))
+ ++Index;
+ }
+
+ return Index;
+}
+
Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
// Import the major distinguishing characteristics of a variable.
DeclContext *DC, *LexicalDC;
@@ -2609,6 +2781,10 @@ Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
DC->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (FieldDecl *FoundField = dyn_cast<FieldDecl>(FoundDecls[I])) {
+ // For anonymous fields, match up by index.
+ if (!Name && getFieldIndex(D) != getFieldIndex(FoundField))
+ continue;
+
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundField->getType())) {
Importer.Imported(D, FoundField);
@@ -2642,6 +2818,7 @@ Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
ToField->setLexicalDeclContext(LexicalDC);
if (ToField->hasInClassInitializer())
ToField->setInClassInitializer(D->getInClassInitializer());
+ ToField->setImplicit(D->isImplicit());
Importer.Imported(D, ToField);
LexicalDC->addDeclInternal(ToField);
return ToField;
@@ -2661,6 +2838,10 @@ Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (IndirectFieldDecl *FoundField
= dyn_cast<IndirectFieldDecl>(FoundDecls[I])) {
+ // For anonymous indirect fields, match up by index.
+ if (!Name && getFieldIndex(D) != getFieldIndex(FoundField))
+ continue;
+
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundField->getType(),
Name)) {
@@ -3025,7 +3206,7 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
ResultTy, ResultTInfo, DC,
D->isInstanceMethod(),
D->isVariadic(),
- D->isSynthesized(),
+ D->isPropertyAccessor(),
D->isImplicit(),
D->isDefined(),
D->getImplementationControl(),
@@ -4027,7 +4208,8 @@ Expr *ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) {
return new (Importer.getToContext()) BinaryOperator(LHS, RHS, E->getOpcode(),
T, E->getValueKind(),
E->getObjectKind(),
- Importer.Import(E->getOperatorLoc()));
+ Importer.Import(E->getOperatorLoc()),
+ E->isFPContractable());
}
Expr *ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
@@ -4056,7 +4238,8 @@ Expr *ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
T, E->getValueKind(),
E->getObjectKind(),
CompLHSType, CompResultType,
- Importer.Import(E->getOperatorLoc()));
+ Importer.Import(E->getOperatorLoc()),
+ E->isFPContractable());
}
static bool ImportCastPath(CastExpr *E, CXXCastPath &Path) {
@@ -4479,7 +4662,8 @@ FileID ASTImporter::Import(FileID FromID) {
llvm::MemoryBuffer *ToBuf
= llvm::MemoryBuffer::getMemBufferCopy(FromBuf->getBuffer(),
FromBuf->getBufferIdentifier());
- ToID = ToSM.createFileIDForMemBuffer(ToBuf);
+ ToID = ToSM.createFileIDForMemBuffer(ToBuf,
+ FromSLoc.getFile().getFileCharacteristic());
}
diff --git a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
index cf3913b..213b214 100644
--- a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/DeclCXX.h"
+#include "llvm/ADT/SetVector.h"
#include <algorithm>
#include <set>
@@ -25,13 +26,9 @@ void CXXBasePaths::ComputeDeclsFound() {
assert(NumDeclsFound == 0 && !DeclsFound &&
"Already computed the set of declarations");
- SmallVector<NamedDecl *, 8> Decls;
+ llvm::SetVector<NamedDecl *, SmallVector<NamedDecl *, 8> > Decls;
for (paths_iterator Path = begin(), PathEnd = end(); Path != PathEnd; ++Path)
- Decls.push_back(*Path->Decls.first);
-
- // Eliminate duplicated decls.
- llvm::array_pod_sort(Decls.begin(), Decls.end());
- Decls.erase(std::unique(Decls.begin(), Decls.end()), Decls.end());
+ Decls.insert(*Path->Decls.first);
NumDeclsFound = Decls.size();
DeclsFound = new NamedDecl * [NumDeclsFound];
@@ -258,7 +255,7 @@ bool CXXBasePaths::lookupInBases(ASTContext &Context,
}
} else if (VisitBase) {
CXXRecordDecl *BaseRecord
- = cast<CXXRecordDecl>(BaseSpec->getType()->getAs<RecordType>()
+ = cast<CXXRecordDecl>(BaseSpec->getType()->castAs<RecordType>()
->getDecl());
if (lookupInBases(Context, BaseRecord, BaseMatches, UserData)) {
// C++ [class.member.lookup]p2:
@@ -365,8 +362,8 @@ bool CXXRecordDecl::FindBaseClass(const CXXBaseSpecifier *Specifier,
void *BaseRecord) {
assert(((Decl *)BaseRecord)->getCanonicalDecl() == BaseRecord &&
"User data for FindBaseClass is not canonical!");
- return Specifier->getType()->getAs<RecordType>()->getDecl()
- ->getCanonicalDecl() == BaseRecord;
+ return Specifier->getType()->castAs<RecordType>()->getDecl()
+ ->getCanonicalDecl() == BaseRecord;
}
bool CXXRecordDecl::FindVirtualBaseClass(const CXXBaseSpecifier *Specifier,
@@ -375,14 +372,15 @@ bool CXXRecordDecl::FindVirtualBaseClass(const CXXBaseSpecifier *Specifier,
assert(((Decl *)BaseRecord)->getCanonicalDecl() == BaseRecord &&
"User data for FindBaseClass is not canonical!");
return Specifier->isVirtual() &&
- Specifier->getType()->getAs<RecordType>()->getDecl()
- ->getCanonicalDecl() == BaseRecord;
+ Specifier->getType()->castAs<RecordType>()->getDecl()
+ ->getCanonicalDecl() == BaseRecord;
}
bool CXXRecordDecl::FindTagMember(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
void *Name) {
- RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
+ RecordDecl *BaseRecord =
+ Specifier->getType()->castAs<RecordType>()->getDecl();
DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
for (Path.Decls = BaseRecord->lookup(N);
@@ -398,7 +396,8 @@ bool CXXRecordDecl::FindTagMember(const CXXBaseSpecifier *Specifier,
bool CXXRecordDecl::FindOrdinaryMember(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
void *Name) {
- RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
+ RecordDecl *BaseRecord =
+ Specifier->getType()->castAs<RecordType>()->getDecl();
const unsigned IDNS = IDNS_Ordinary | IDNS_Tag | IDNS_Member;
DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
@@ -416,7 +415,8 @@ bool CXXRecordDecl::
FindNestedNameSpecifierMember(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
void *Name) {
- RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
+ RecordDecl *BaseRecord =
+ Specifier->getType()->castAs<RecordType>()->getDecl();
DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
for (Path.Decls = BaseRecord->lookup(N);
@@ -694,7 +694,7 @@ AddIndirectPrimaryBases(const CXXRecordDecl *RD, ASTContext &Context,
"Cannot get indirect primary bases for class with dependent bases.");
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
// Only bases with virtual bases participate in computing the
// indirect primary virtual base classes.
@@ -717,7 +717,7 @@ CXXRecordDecl::getIndirectPrimaryBases(CXXIndirectPrimaryBaseSet& Bases) const {
"Cannot get indirect primary bases for class with dependent bases.");
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
// Only bases with virtual bases participate in computing the
// indirect primary virtual base classes.
diff --git a/contrib/llvm/tools/clang/lib/AST/Comment.cpp b/contrib/llvm/tools/clang/lib/AST/Comment.cpp
index 8a711f0..361f8ac 100644
--- a/contrib/llvm/tools/clang/lib/AST/Comment.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Comment.cpp
@@ -7,6 +7,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Comment.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
@@ -37,11 +38,12 @@ void Comment::dump() const {
// in CommentDumper.cpp, that object file would be removed by linker because
// none of its functions are referenced by other object files, despite the
// LLVM_ATTRIBUTE_USED.
- dump(llvm::errs(), NULL);
+ dump(llvm::errs(), NULL, NULL);
}
-void Comment::dump(SourceManager &SM) const {
- dump(llvm::errs(), &SM);
+void Comment::dump(const ASTContext &Context) const {
+ dump(llvm::errs(), &Context.getCommentCommandTraits(),
+ &Context.getSourceManager());
}
namespace {
@@ -149,13 +151,14 @@ void DeclInfo::fill() {
ParamVars = ArrayRef<const ParmVarDecl *>();
TemplateParameters = NULL;
- if (!ThisDecl) {
+ if (!CommentDecl) {
// If there is no declaration, the defaults is our only guess.
IsFilled = true;
return;
}
-
- Decl::Kind K = ThisDecl->getKind();
+ CurrentDecl = CommentDecl;
+
+ Decl::Kind K = CommentDecl->getKind();
switch (K) {
default:
// Defaults are should be good for declarations we don't handle explicitly.
@@ -165,7 +168,7 @@ void DeclInfo::fill() {
case Decl::CXXConstructor:
case Decl::CXXDestructor:
case Decl::CXXConversion: {
- const FunctionDecl *FD = cast<FunctionDecl>(ThisDecl);
+ const FunctionDecl *FD = cast<FunctionDecl>(CommentDecl);
Kind = FunctionKind;
ParamVars = ArrayRef<const ParmVarDecl *>(FD->param_begin(),
FD->getNumParams());
@@ -179,14 +182,14 @@ void DeclInfo::fill() {
if (K == Decl::CXXMethod || K == Decl::CXXConstructor ||
K == Decl::CXXDestructor || K == Decl::CXXConversion) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(ThisDecl);
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CommentDecl);
IsInstanceMethod = MD->isInstance();
IsClassMethod = !IsInstanceMethod;
}
break;
}
case Decl::ObjCMethod: {
- const ObjCMethodDecl *MD = cast<ObjCMethodDecl>(ThisDecl);
+ const ObjCMethodDecl *MD = cast<ObjCMethodDecl>(CommentDecl);
Kind = FunctionKind;
ParamVars = ArrayRef<const ParmVarDecl *>(MD->param_begin(),
MD->param_size());
@@ -197,7 +200,7 @@ void DeclInfo::fill() {
break;
}
case Decl::FunctionTemplate: {
- const FunctionTemplateDecl *FTD = cast<FunctionTemplateDecl>(ThisDecl);
+ const FunctionTemplateDecl *FTD = cast<FunctionTemplateDecl>(CommentDecl);
Kind = FunctionKind;
TemplateKind = Template;
const FunctionDecl *FD = FTD->getTemplatedDecl();
@@ -208,7 +211,7 @@ void DeclInfo::fill() {
break;
}
case Decl::ClassTemplate: {
- const ClassTemplateDecl *CTD = cast<ClassTemplateDecl>(ThisDecl);
+ const ClassTemplateDecl *CTD = cast<ClassTemplateDecl>(CommentDecl);
Kind = ClassKind;
TemplateKind = Template;
TemplateParameters = CTD->getTemplateParameters();
@@ -216,7 +219,7 @@ void DeclInfo::fill() {
}
case Decl::ClassTemplatePartialSpecialization: {
const ClassTemplatePartialSpecializationDecl *CTPSD =
- cast<ClassTemplatePartialSpecializationDecl>(ThisDecl);
+ cast<ClassTemplatePartialSpecializationDecl>(CommentDecl);
Kind = ClassKind;
TemplateKind = TemplatePartialSpecialization;
TemplateParameters = CTPSD->getTemplateParameters();
@@ -240,12 +243,55 @@ void DeclInfo::fill() {
case Decl::Namespace:
Kind = NamespaceKind;
break;
- case Decl::Typedef:
+ case Decl::Typedef: {
+ Kind = TypedefKind;
+ // If this is a typedef to something we consider a function, extract
+ // arguments and return type.
+ const TypedefDecl *TD = cast<TypedefDecl>(CommentDecl);
+ const TypeSourceInfo *TSI = TD->getTypeSourceInfo();
+ if (!TSI)
+ break;
+ TypeLoc TL = TSI->getTypeLoc().getUnqualifiedLoc();
+ while (true) {
+ TL = TL.IgnoreParens();
+ // Look through qualified types.
+ if (QualifiedTypeLoc *QualifiedTL = dyn_cast<QualifiedTypeLoc>(&TL)) {
+ TL = QualifiedTL->getUnqualifiedLoc();
+ continue;
+ }
+ // Look through pointer types.
+ if (PointerTypeLoc *PointerTL = dyn_cast<PointerTypeLoc>(&TL)) {
+ TL = PointerTL->getPointeeLoc().getUnqualifiedLoc();
+ continue;
+ }
+ if (BlockPointerTypeLoc *BlockPointerTL =
+ dyn_cast<BlockPointerTypeLoc>(&TL)) {
+ TL = BlockPointerTL->getPointeeLoc().getUnqualifiedLoc();
+ continue;
+ }
+ if (MemberPointerTypeLoc *MemberPointerTL =
+ dyn_cast<MemberPointerTypeLoc>(&TL)) {
+ TL = MemberPointerTL->getPointeeLoc().getUnqualifiedLoc();
+ continue;
+ }
+ // Is this a typedef for a function type?
+ if (FunctionTypeLoc *FTL = dyn_cast<FunctionTypeLoc>(&TL)) {
+ Kind = FunctionKind;
+ ArrayRef<ParmVarDecl *> Params = FTL->getParams();
+ ParamVars = ArrayRef<const ParmVarDecl *>(Params.data(),
+ Params.size());
+ ResultType = FTL->getResultLoc().getType();
+ break;
+ }
+ break;
+ }
+ break;
+ }
case Decl::TypeAlias:
Kind = TypedefKind;
break;
case Decl::TypeAliasTemplate: {
- const TypeAliasTemplateDecl *TAT = cast<TypeAliasTemplateDecl>(ThisDecl);
+ const TypeAliasTemplateDecl *TAT = cast<TypeAliasTemplateDecl>(CommentDecl);
Kind = TypedefKind;
TemplateKind = Template;
TemplateParameters = TAT->getTemplateParameters();
@@ -259,6 +305,25 @@ void DeclInfo::fill() {
IsFilled = true;
}
+StringRef ParamCommandComment::getParamName(const FullComment *FC) const {
+ assert(isParamIndexValid());
+ return FC->getThisDeclInfo()->ParamVars[getParamIndex()]->getName();
+}
+
+StringRef TParamCommandComment::getParamName(const FullComment *FC) const {
+ assert(isPositionValid());
+ const TemplateParameterList *TPL = FC->getThisDeclInfo()->TemplateParameters;
+ for (unsigned i = 0, e = getDepth(); i != e; ++i) {
+ if (i == e-1)
+ return TPL->getParam(getIndex(i))->getName();
+ const NamedDecl *Param = TPL->getParam(getIndex(i));
+ if (const TemplateTemplateParmDecl *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(Param))
+ TPL = TTP->getTemplateParameters();
+ }
+ return "";
+}
+
} // end namespace comments
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp b/contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp
index 0aebc1e..95daa7e 100644
--- a/contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp
@@ -15,6 +15,11 @@ namespace clang {
namespace comments {
namespace {
+inline bool isWhitespace(char C) {
+ return C == ' ' || C == '\n' || C == '\r' ||
+ C == '\t' || C == '\f' || C == '\v';
+}
+
/// Convert all whitespace into spaces, remove leading and trailing spaces,
/// compress multiple spaces into one.
void cleanupBrief(std::string &S) {
@@ -23,8 +28,7 @@ void cleanupBrief(std::string &S) {
for (std::string::iterator I = S.begin(), E = S.end();
I != E; ++I) {
const char C = *I;
- if (C == ' ' || C == '\n' || C == '\r' ||
- C == '\t' || C == '\v' || C == '\f') {
+ if (isWhitespace(C)) {
if (!PrevWasSpace) {
*O++ = ' ';
PrevWasSpace = true;
@@ -40,6 +44,15 @@ void cleanupBrief(std::string &S) {
S.resize(O - S.begin());
}
+
+bool isWhitespace(StringRef Text) {
+ for (StringRef::const_iterator I = Text.begin(), E = Text.end();
+ I != E; ++I) {
+ if (!isWhitespace(*I))
+ return false;
+ }
+ return true;
+}
} // unnamed namespace
BriefParser::BriefParser(Lexer &L, const CommandTraits &Traits) :
@@ -66,19 +79,23 @@ std::string BriefParser::Parse() {
}
if (Tok.is(tok::command)) {
- StringRef Name = Tok.getCommandName();
- if (Traits.isBriefCommand(Name)) {
+ const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID());
+ if (Info->IsBriefCommand) {
FirstParagraphOrBrief.clear();
InBrief = true;
ConsumeToken();
continue;
}
- if (Traits.isReturnsCommand(Name)) {
+ if (Info->IsReturnsCommand) {
InReturns = true;
+ InBrief = false;
+ InFirstParagraph = false;
ReturnsParagraph += "Returns ";
+ ConsumeToken();
+ continue;
}
// Block commands implicitly start a new paragraph.
- if (Traits.isBlockCommand(Name)) {
+ if (Info->IsBlockCommand) {
// We found an implicit paragraph end.
InFirstParagraph = false;
if (InBrief)
@@ -93,13 +110,29 @@ std::string BriefParser::Parse() {
ReturnsParagraph += ' ';
ConsumeToken();
+ // If the next token is a whitespace only text, ignore it. Thus we allow
+ // two paragraphs to be separated by line that has only whitespace in it.
+ //
+ // We don't need to add a space to the parsed text because we just added
+ // a space for the newline.
+ if (Tok.is(tok::text)) {
+ if (isWhitespace(Tok.getText()))
+ ConsumeToken();
+ }
+
if (Tok.is(tok::newline)) {
ConsumeToken();
- // We found a paragraph end.
- InFirstParagraph = false;
- InReturns = false;
+ // We found a paragraph end. This ends the brief description if
+ // \\brief command or its equivalent was explicitly used.
+ // Stop scanning text because an explicit \\brief paragraph is the
+ // preffered one.
if (InBrief)
break;
+ // End first paragraph if we found some non-whitespace text.
+ if (InFirstParagraph && !isWhitespace(FirstParagraphOrBrief))
+ InFirstParagraph = false;
+ // End the \\returns paragraph because we found the paragraph end.
+ InReturns = false;
}
continue;
}
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp b/contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp
index dc7a0bd..e7e40fd 100644
--- a/contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp
@@ -8,125 +8,64 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/CommentCommandTraits.h"
-#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/STLExtras.h"
namespace clang {
namespace comments {
-// TODO: tablegen
+#include "clang/AST/CommentCommandInfo.inc"
-bool CommandTraits::isVerbatimBlockCommand(StringRef StartName,
- StringRef &EndName) const {
- const char *Result = llvm::StringSwitch<const char *>(StartName)
- .Case("code", "endcode")
- .Case("verbatim", "endverbatim")
- .Case("htmlonly", "endhtmlonly")
- .Case("latexonly", "endlatexonly")
- .Case("xmlonly", "endxmlonly")
- .Case("manonly", "endmanonly")
- .Case("rtfonly", "endrtfonly")
+CommandTraits::CommandTraits(llvm::BumpPtrAllocator &Allocator) :
+ NextID(llvm::array_lengthof(Commands)), Allocator(Allocator)
+{ }
- .Case("dot", "enddot")
- .Case("msc", "endmsc")
-
- .Case("f$", "f$") // Inline LaTeX formula
- .Case("f[", "f]") // Displayed LaTeX formula
- .Case("f{", "f}") // LaTeX environment
-
- .Default(NULL);
-
- if (Result) {
- EndName = Result;
- return true;
- }
-
- for (VerbatimBlockCommandVector::const_iterator
- I = VerbatimBlockCommands.begin(),
- E = VerbatimBlockCommands.end();
- I != E; ++I)
- if (I->StartName == StartName) {
- EndName = I->EndName;
- return true;
- }
-
- return false;
+const CommandInfo *CommandTraits::getCommandInfoOrNULL(StringRef Name) const {
+ if (const CommandInfo *Info = getBuiltinCommandInfo(Name))
+ return Info;
+ return getRegisteredCommandInfo(Name);
}
-bool CommandTraits::isVerbatimLineCommand(StringRef Name) const {
- bool Result = isDeclarationCommand(Name) || llvm::StringSwitch<bool>(Name)
- .Case("defgroup", true)
- .Case("ingroup", true)
- .Case("addtogroup", true)
- .Case("weakgroup", true)
- .Case("name", true)
-
- .Case("section", true)
- .Case("subsection", true)
- .Case("subsubsection", true)
- .Case("paragraph", true)
-
- .Case("mainpage", true)
- .Case("subpage", true)
- .Case("ref", true)
+const CommandInfo *CommandTraits::getCommandInfo(unsigned CommandID) const {
+ if (const CommandInfo *Info = getBuiltinCommandInfo(CommandID))
+ return Info;
+ return getRegisteredCommandInfo(CommandID);
+}
- .Default(false);
+const CommandInfo *CommandTraits::registerUnknownCommand(StringRef CommandName) {
+ char *Name = Allocator.Allocate<char>(CommandName.size() + 1);
+ memcpy(Name, CommandName.data(), CommandName.size());
+ Name[CommandName.size()] = '\0';
- if (Result)
- return true;
+ // Value-initialize (=zero-initialize in this case) a new CommandInfo.
+ CommandInfo *Info = new (Allocator) CommandInfo();
+ Info->Name = Name;
+ Info->ID = NextID++;
+ Info->IsUnknownCommand = true;
- for (VerbatimLineCommandVector::const_iterator
- I = VerbatimLineCommands.begin(),
- E = VerbatimLineCommands.end();
- I != E; ++I)
- if (I->Name == Name)
- return true;
+ RegisteredCommands.push_back(Info);
- return false;
+ return Info;
}
-bool CommandTraits::isDeclarationCommand(StringRef Name) const {
- return llvm::StringSwitch<bool>(Name)
- // Doxygen commands.
- .Case("fn", true)
- .Case("var", true)
- .Case("property", true)
- .Case("typedef", true)
-
- .Case("overload", true)
-
- // HeaderDoc commands.
- .Case("class", true)
- .Case("interface", true)
- .Case("protocol", true)
- .Case("category", true)
- .Case("template", true)
- .Case("function", true)
- .Case("method", true)
- .Case("callback", true)
- .Case("var", true)
- .Case("const", true)
- .Case("constant", true)
- .Case("property", true)
- .Case("struct", true)
- .Case("union", true)
- .Case("typedef", true)
- .Case("enum", true)
-
- .Default(false);
+const CommandInfo *CommandTraits::getBuiltinCommandInfo(
+ unsigned CommandID) {
+ if (CommandID < llvm::array_lengthof(Commands))
+ return &Commands[CommandID];
+ return NULL;
}
-void CommandTraits::addVerbatimBlockCommand(StringRef StartName,
- StringRef EndName) {
- VerbatimBlockCommand VBC;
- VBC.StartName = StartName;
- VBC.EndName = EndName;
- VerbatimBlockCommands.push_back(VBC);
+const CommandInfo *CommandTraits::getRegisteredCommandInfo(
+ StringRef Name) const {
+ for (unsigned i = 0, e = RegisteredCommands.size(); i != e; ++i) {
+ if (RegisteredCommands[i]->Name == Name)
+ return RegisteredCommands[i];
+ }
+ return NULL;
}
-void CommandTraits::addVerbatimLineCommand(StringRef Name) {
- VerbatimLineCommand VLC;
- VLC.Name = Name;
- VerbatimLineCommands.push_back(VLC);
+const CommandInfo *CommandTraits::getRegisteredCommandInfo(
+ unsigned CommandID) const {
+ return RegisteredCommands[CommandID - llvm::array_lengthof(Commands)];
}
} // end namespace comments
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentDumper.cpp b/contrib/llvm/tools/clang/lib/AST/CommentDumper.cpp
index dffc823..19d24b2 100644
--- a/contrib/llvm/tools/clang/lib/AST/CommentDumper.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/CommentDumper.cpp
@@ -16,12 +16,20 @@ namespace comments {
namespace {
class CommentDumper: public comments::ConstCommentVisitor<CommentDumper> {
raw_ostream &OS;
- SourceManager *SM;
+ const CommandTraits *Traits;
+ const SourceManager *SM;
+
+ /// The \c FullComment parent of the comment being dumped.
+ const FullComment *FC;
+
unsigned IndentLevel;
public:
- CommentDumper(raw_ostream &OS, SourceManager *SM) :
- OS(OS), SM(SM), IndentLevel(0)
+ CommentDumper(raw_ostream &OS,
+ const CommandTraits *Traits,
+ const SourceManager *SM,
+ const FullComment *FC) :
+ OS(OS), Traits(Traits), SM(SM), FC(FC), IndentLevel(0)
{ }
void dumpIndent() const {
@@ -56,6 +64,15 @@ public:
void visitVerbatimLineComment(const VerbatimLineComment *C);
void visitFullComment(const FullComment *C);
+
+ const char *getCommandName(unsigned CommandID) {
+ if (Traits)
+ return Traits->getCommandInfo(CommandID)->Name;
+ const CommandInfo *Info = CommandTraits::getBuiltinCommandInfo(CommandID);
+ if (Info)
+ return Info->Name;
+ return "<not a builtin command>";
+ }
};
void CommentDumper::dumpSourceRange(const Comment *C) {
@@ -107,7 +124,7 @@ void CommentDumper::visitTextComment(const TextComment *C) {
void CommentDumper::visitInlineCommandComment(const InlineCommandComment *C) {
dumpComment(C);
- OS << " Name=\"" << C->getCommandName() << "\"";
+ OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"";
switch (C->getRenderKind()) {
case InlineCommandComment::RenderNormal:
OS << " RenderNormal";
@@ -155,7 +172,7 @@ void CommentDumper::visitParagraphComment(const ParagraphComment *C) {
void CommentDumper::visitBlockCommandComment(const BlockCommandComment *C) {
dumpComment(C);
- OS << " Name=\"" << C->getCommandName() << "\"";
+ OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"";
for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i)
OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\"";
}
@@ -170,8 +187,12 @@ void CommentDumper::visitParamCommandComment(const ParamCommandComment *C) {
else
OS << " implicitly";
- if (C->hasParamName())
- OS << " Param=\"" << C->getParamName() << "\"";
+ if (C->hasParamName()) {
+ if (C->isParamIndexValid())
+ OS << " Param=\"" << C->getParamName(FC) << "\"";
+ else
+ OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
+ }
if (C->isParamIndexValid())
OS << " ParamIndex=" << C->getParamIndex();
@@ -181,7 +202,10 @@ void CommentDumper::visitTParamCommandComment(const TParamCommandComment *C) {
dumpComment(C);
if (C->hasParamName()) {
- OS << " Param=\"" << C->getParamName() << "\"";
+ if (C->isPositionValid())
+ OS << " Param=\"" << C->getParamName(FC) << "\"";
+ else
+ OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
}
if (C->isPositionValid()) {
@@ -198,7 +222,7 @@ void CommentDumper::visitTParamCommandComment(const TParamCommandComment *C) {
void CommentDumper::visitVerbatimBlockComment(const VerbatimBlockComment *C) {
dumpComment(C);
- OS << " Name=\"" << C->getCommandName() << "\""
+ OS << " Name=\"" << getCommandName(C->getCommandID()) << "\""
" CloseName=\"" << C->getCloseName() << "\"";
}
@@ -220,8 +244,10 @@ void CommentDumper::visitFullComment(const FullComment *C) {
} // unnamed namespace
-void Comment::dump(llvm::raw_ostream &OS, SourceManager *SM) const {
- CommentDumper D(llvm::errs(), SM);
+void Comment::dump(llvm::raw_ostream &OS, const CommandTraits *Traits,
+ const SourceManager *SM) const {
+ const FullComment *FC = dyn_cast<FullComment>(this);
+ CommentDumper D(llvm::errs(), Traits, SM, FC);
D.dumpSubtree(this);
llvm::errs() << '\n';
}
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp b/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp
index b6516ec..31a09f7 100644
--- a/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp
@@ -28,6 +28,9 @@ bool isHTMLHexCharacterReferenceCharacter(char C) {
(C >= 'a' && C <= 'f') ||
(C >= 'A' && C <= 'F');
}
+
+#include "clang/AST/CommentHTMLTags.inc"
+
} // unnamed namespace
StringRef Lexer::resolveHTMLNamedCharacterReference(StringRef Name) const {
@@ -223,6 +226,11 @@ bool isWhitespace(const char *BufferPtr, const char *BufferEnd) {
return skipWhitespace(BufferPtr, BufferEnd) == BufferEnd;
}
+bool isCommandNameStartCharacter(char C) {
+ return (C >= 'a' && C <= 'z') ||
+ (C >= 'A' && C <= 'Z');
+}
+
bool isCommandNameCharacter(char C) {
return (C >= 'a' && C <= 'z') ||
(C >= 'A' && C <= 'Z') ||
@@ -337,7 +345,7 @@ void Lexer::lexCommentText(Token &T) {
}
// Don't make zero-length commands.
- if (!isCommandNameCharacter(*TokenPtr)) {
+ if (!isCommandNameStartCharacter(*TokenPtr)) {
formTextToken(T, TokenPtr);
return;
}
@@ -356,18 +364,23 @@ void Lexer::lexCommentText(Token &T) {
}
const StringRef CommandName(BufferPtr + 1, Length);
- StringRef EndName;
- if (Traits.isVerbatimBlockCommand(CommandName, EndName)) {
- setupAndLexVerbatimBlock(T, TokenPtr, *BufferPtr, EndName);
+ const CommandInfo *Info = Traits.getCommandInfoOrNULL(CommandName);
+ if (!Info) {
+ formTokenWithChars(T, TokenPtr, tok::unknown_command);
+ T.setUnknownCommandName(CommandName);
+ return;
+ }
+ if (Info->IsVerbatimBlockCommand) {
+ setupAndLexVerbatimBlock(T, TokenPtr, *BufferPtr, Info);
return;
}
- if (Traits.isVerbatimLineCommand(CommandName)) {
- setupAndLexVerbatimLine(T, TokenPtr);
+ if (Info->IsVerbatimLineCommand) {
+ setupAndLexVerbatimLine(T, TokenPtr, Info);
return;
}
formTokenWithChars(T, TokenPtr, tok::command);
- T.setCommandName(CommandName);
+ T.setCommandID(Info->getID());
return;
}
@@ -420,14 +433,15 @@ void Lexer::lexCommentText(Token &T) {
void Lexer::setupAndLexVerbatimBlock(Token &T,
const char *TextBegin,
- char Marker, StringRef EndName) {
+ char Marker, const CommandInfo *Info) {
+ assert(Info->IsVerbatimBlockCommand);
+
VerbatimBlockEndCommandName.clear();
VerbatimBlockEndCommandName.append(Marker == '\\' ? "\\" : "@");
- VerbatimBlockEndCommandName.append(EndName);
+ VerbatimBlockEndCommandName.append(Info->EndCommandName);
- StringRef Name(BufferPtr + 1, TextBegin - (BufferPtr + 1));
formTokenWithChars(T, TextBegin, tok::verbatim_block_begin);
- T.setVerbatimBlockName(Name);
+ T.setVerbatimBlockID(Info->getID());
// If there is a newline following the verbatim opening command, skip the
// newline so that we don't create an tok::verbatim_block_line with empty
@@ -468,7 +482,7 @@ again:
const char *End = BufferPtr + VerbatimBlockEndCommandName.size();
StringRef Name(BufferPtr + 1, End - (BufferPtr + 1));
formTokenWithChars(T, End, tok::verbatim_block_end);
- T.setVerbatimBlockName(Name);
+ T.setVerbatimBlockID(Traits.getCommandInfo(Name)->getID());
State = LS_Normal;
return;
} else {
@@ -498,10 +512,11 @@ void Lexer::lexVerbatimBlockBody(Token &T) {
lexVerbatimBlockFirstLine(T);
}
-void Lexer::setupAndLexVerbatimLine(Token &T, const char *TextBegin) {
- const StringRef Name(BufferPtr + 1, TextBegin - BufferPtr - 1);
+void Lexer::setupAndLexVerbatimLine(Token &T, const char *TextBegin,
+ const CommandInfo *Info) {
+ assert(Info->IsVerbatimLineCommand);
formTokenWithChars(T, TextBegin, tok::verbatim_line_name);
- T.setVerbatimLineName(Name);
+ T.setVerbatimLineID(Info->getID());
State = LS_VerbatimLineText;
}
@@ -585,8 +600,12 @@ void Lexer::setupAndLexHTMLStartTag(Token &T) {
assert(BufferPtr[0] == '<' &&
isHTMLIdentifierStartingCharacter(BufferPtr[1]));
const char *TagNameEnd = skipHTMLIdentifier(BufferPtr + 2, CommentEnd);
-
StringRef Name(BufferPtr + 1, TagNameEnd - (BufferPtr + 1));
+ if (!isHTMLTagName(Name)) {
+ formTextToken(T, TagNameEnd);
+ return;
+ }
+
formTokenWithChars(T, TagNameEnd, tok::html_start_tag);
T.setHTMLTagStartName(Name);
@@ -665,11 +684,16 @@ void Lexer::setupAndLexHTMLEndTag(Token &T) {
const char *TagNameBegin = skipWhitespace(BufferPtr + 2, CommentEnd);
const char *TagNameEnd = skipHTMLIdentifier(TagNameBegin, CommentEnd);
+ StringRef Name(TagNameBegin, TagNameEnd - TagNameBegin);
+ if (!isHTMLTagName(Name)) {
+ formTextToken(T, TagNameEnd);
+ return;
+ }
const char *End = skipWhitespace(TagNameEnd, CommentEnd);
formTokenWithChars(T, End, tok::html_end_tag);
- T.setHTMLTagEndName(StringRef(TagNameBegin, TagNameEnd - TagNameBegin));
+ T.setHTMLTagEndName(Name);
if (BufferPtr != CommentEnd && *BufferPtr == '>')
State = LS_HTMLEndTag;
@@ -683,11 +707,11 @@ void Lexer::lexHTMLEndTag(Token &T) {
}
Lexer::Lexer(llvm::BumpPtrAllocator &Allocator, const CommandTraits &Traits,
- SourceLocation FileLoc, const CommentOptions &CommOpts,
+ SourceLocation FileLoc,
const char *BufferStart, const char *BufferEnd):
Allocator(Allocator), Traits(Traits),
BufferStart(BufferStart), BufferEnd(BufferEnd),
- FileLoc(FileLoc), CommOpts(CommOpts), BufferPtr(BufferStart),
+ FileLoc(FileLoc), BufferPtr(BufferStart),
CommentState(LCS_BeforeComment), State(LS_Normal) {
}
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentParser.cpp b/contrib/llvm/tools/clang/lib/AST/CommentParser.cpp
index 43abf6a..d0a8474 100644
--- a/contrib/llvm/tools/clang/lib/AST/CommentParser.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/CommentParser.cpp
@@ -132,8 +132,8 @@ class TextTokenRetokenizer {
Result.setKind(tok::text);
Result.setLength(TokLength);
#ifndef NDEBUG
- Result.TextPtr1 = "<UNSET>";
- Result.TextLen1 = 7;
+ Result.TextPtr = "<UNSET>";
+ Result.IntVal = 7;
#endif
Result.setText(Text);
}
@@ -312,26 +312,26 @@ BlockCommandComment *Parser::parseBlockCommand() {
BlockCommandComment *BC;
bool IsParam = false;
bool IsTParam = false;
- unsigned NumArgs = 0;
- if (Traits.isParamCommand(Tok.getCommandName())) {
+ const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID());
+ if (Info->IsParamCommand) {
IsParam = true;
PC = S.actOnParamCommandStart(Tok.getLocation(),
Tok.getEndLocation(),
- Tok.getCommandName());
- } if (Traits.isTParamCommand(Tok.getCommandName())) {
+ Tok.getCommandID());
+ } if (Info->IsTParamCommand) {
IsTParam = true;
TPC = S.actOnTParamCommandStart(Tok.getLocation(),
Tok.getEndLocation(),
- Tok.getCommandName());
+ Tok.getCommandID());
} else {
- NumArgs = Traits.getBlockCommandNumArgs(Tok.getCommandName());
BC = S.actOnBlockCommandStart(Tok.getLocation(),
Tok.getEndLocation(),
- Tok.getCommandName());
+ Tok.getCommandID());
}
consumeToken();
- if (Tok.is(tok::command) && Traits.isBlockCommand(Tok.getCommandName())) {
+ if (Tok.is(tok::command) &&
+ Traits.getCommandInfo(Tok.getCommandID())->IsBlockCommand) {
// Block command ahead. We can't nest block commands, so pretend that this
// command has an empty argument.
ParagraphComment *Paragraph = S.actOnParagraphComment(
@@ -348,7 +348,7 @@ BlockCommandComment *Parser::parseBlockCommand() {
}
}
- if (IsParam || IsTParam || NumArgs > 0) {
+ if (IsParam || IsTParam || Info->NumArgs > 0) {
// In order to parse command arguments we need to retokenize a few
// following text tokens.
TextTokenRetokenizer Retokenizer(Allocator, *this);
@@ -358,7 +358,7 @@ BlockCommandComment *Parser::parseBlockCommand() {
else if (IsTParam)
parseTParamCommandArgs(TPC, Retokenizer);
else
- parseBlockCommandArgs(BC, Retokenizer, NumArgs);
+ parseBlockCommandArgs(BC, Retokenizer, Info->NumArgs);
Retokenizer.putBackLeftoverTokens();
}
@@ -394,14 +394,14 @@ InlineCommandComment *Parser::parseInlineCommand() {
if (ArgTokValid) {
IC = S.actOnInlineCommand(CommandTok.getLocation(),
CommandTok.getEndLocation(),
- CommandTok.getCommandName(),
+ CommandTok.getCommandID(),
ArgTok.getLocation(),
ArgTok.getEndLocation(),
ArgTok.getText());
} else {
IC = S.actOnInlineCommand(CommandTok.getLocation(),
CommandTok.getEndLocation(),
- CommandTok.getCommandName());
+ CommandTok.getCommandID());
}
Retokenizer.putBackLeftoverTokens();
@@ -540,23 +540,39 @@ BlockContentComment *Parser::parseParagraphOrBlockCommand() {
assert(Content.size() != 0);
break; // Block content or EOF ahead, finish this parapgaph.
- case tok::command:
- if (Traits.isBlockCommand(Tok.getCommandName())) {
+ case tok::unknown_command:
+ Content.push_back(S.actOnUnknownCommand(Tok.getLocation(),
+ Tok.getEndLocation(),
+ Tok.getUnknownCommandName()));
+ consumeToken();
+ continue;
+
+ case tok::command: {
+ const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID());
+ if (Info->IsBlockCommand) {
if (Content.size() == 0)
return parseBlockCommand();
break; // Block command ahead, finish this parapgaph.
}
- if (Traits.isInlineCommand(Tok.getCommandName())) {
- Content.push_back(parseInlineCommand());
+ if (Info->IsVerbatimBlockEndCommand) {
+ Diag(Tok.getLocation(),
+ diag::warn_verbatim_block_end_without_start)
+ << Info->Name
+ << SourceRange(Tok.getLocation(), Tok.getEndLocation());
+ consumeToken();
continue;
}
-
- // Not a block command, not an inline command ==> an unknown command.
- Content.push_back(S.actOnUnknownCommand(Tok.getLocation(),
- Tok.getEndLocation(),
- Tok.getCommandName()));
- consumeToken();
+ if (Info->IsUnknownCommand) {
+ Content.push_back(S.actOnUnknownCommand(Tok.getLocation(),
+ Tok.getEndLocation(),
+ Info->getID()));
+ consumeToken();
+ continue;
+ }
+ assert(Info->IsInlineCommand);
+ Content.push_back(parseInlineCommand());
continue;
+ }
case tok::newline: {
consumeToken();
@@ -606,7 +622,7 @@ VerbatimBlockComment *Parser::parseVerbatimBlock() {
VerbatimBlockComment *VB =
S.actOnVerbatimBlockStart(Tok.getLocation(),
- Tok.getVerbatimBlockName());
+ Tok.getVerbatimBlockID());
consumeToken();
// Don't create an empty line if verbatim opening command is followed
@@ -634,8 +650,9 @@ VerbatimBlockComment *Parser::parseVerbatimBlock() {
}
if (Tok.is(tok::verbatim_block_end)) {
+ const CommandInfo *Info = Traits.getCommandInfo(Tok.getVerbatimBlockID());
S.actOnVerbatimBlockFinish(VB, Tok.getLocation(),
- Tok.getVerbatimBlockName(),
+ Info->Name,
S.copyArray(llvm::makeArrayRef(Lines)));
consumeToken();
} else {
@@ -666,7 +683,7 @@ VerbatimLineComment *Parser::parseVerbatimLine() {
}
VerbatimLineComment *VL = S.actOnVerbatimLine(NameTok.getLocation(),
- NameTok.getVerbatimLineName(),
+ NameTok.getVerbatimLineID(),
TextBegin,
Text);
consumeToken();
@@ -676,6 +693,7 @@ VerbatimLineComment *Parser::parseVerbatimLine() {
BlockContentComment *Parser::parseBlockContent() {
switch (Tok.getKind()) {
case tok::text:
+ case tok::unknown_command:
case tok::command:
case tok::html_start_tag:
case tok::html_end_tag:
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentSema.cpp b/contrib/llvm/tools/clang/lib/AST/CommentSema.cpp
index c39ee57..08ecb3a 100644
--- a/contrib/llvm/tools/clang/lib/AST/CommentSema.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/CommentSema.cpp
@@ -13,15 +13,22 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/SmallString.h"
namespace clang {
namespace comments {
+namespace {
+#include "clang/AST/CommentHTMLTagsProperties.inc"
+} // unnamed namespace
+
Sema::Sema(llvm::BumpPtrAllocator &Allocator, const SourceManager &SourceMgr,
- DiagnosticsEngine &Diags, const CommandTraits &Traits) :
+ DiagnosticsEngine &Diags, CommandTraits &Traits,
+ const Preprocessor *PP) :
Allocator(Allocator), SourceMgr(SourceMgr), Diags(Diags), Traits(Traits),
- ThisDeclInfo(NULL), BriefCommand(NULL), ReturnsCommand(NULL) {
+ PP(PP), ThisDeclInfo(NULL), BriefCommand(NULL), ReturnsCommand(NULL) {
}
void Sema::setDecl(const Decl *D) {
@@ -29,7 +36,7 @@ void Sema::setDecl(const Decl *D) {
return;
ThisDeclInfo = new (Allocator) DeclInfo;
- ThisDeclInfo->ThisDecl = D;
+ ThisDeclInfo->CommentDecl = D;
ThisDeclInfo->IsFilled = false;
}
@@ -40,8 +47,8 @@ ParagraphComment *Sema::actOnParagraphComment(
BlockCommandComment *Sema::actOnBlockCommandStart(SourceLocation LocBegin,
SourceLocation LocEnd,
- StringRef Name) {
- return new (Allocator) BlockCommandComment(LocBegin, LocEnd, Name);
+ unsigned CommandID) {
+ return new (Allocator) BlockCommandComment(LocBegin, LocEnd, CommandID);
}
void Sema::actOnBlockCommandArgs(BlockCommandComment *Command,
@@ -55,18 +62,19 @@ void Sema::actOnBlockCommandFinish(BlockCommandComment *Command,
checkBlockCommandEmptyParagraph(Command);
checkBlockCommandDuplicate(Command);
checkReturnsCommand(Command);
+ checkDeprecatedCommand(Command);
}
ParamCommandComment *Sema::actOnParamCommandStart(SourceLocation LocBegin,
SourceLocation LocEnd,
- StringRef Name) {
+ unsigned CommandID) {
ParamCommandComment *Command =
- new (Allocator) ParamCommandComment(LocBegin, LocEnd, Name);
+ new (Allocator) ParamCommandComment(LocBegin, LocEnd, CommandID);
if (!isFunctionDecl())
Diag(Command->getLocation(),
diag::warn_doc_param_not_attached_to_a_function_decl)
- << Command->getCommandNameRange();
+ << Command->getCommandNameRange(Traits);
return Command;
}
@@ -142,56 +150,6 @@ void Sema::actOnParamCommandParamNameArg(ParamCommandComment *Command,
ArgLocEnd),
Arg);
Command->setArgs(llvm::makeArrayRef(A, 1));
-
- if (!isFunctionDecl()) {
- // We already warned that this \\param is not attached to a function decl.
- return;
- }
-
- ArrayRef<const ParmVarDecl *> ParamVars = getParamVars();
-
- // Check that referenced parameter name is in the function decl.
- const unsigned ResolvedParamIndex = resolveParmVarReference(Arg, ParamVars);
- if (ResolvedParamIndex != ParamCommandComment::InvalidParamIndex) {
- Command->setParamIndex(ResolvedParamIndex);
- if (ParamVarDocs[ResolvedParamIndex]) {
- SourceRange ArgRange(ArgLocBegin, ArgLocEnd);
- Diag(ArgLocBegin, diag::warn_doc_param_duplicate)
- << Arg << ArgRange;
- ParamCommandComment *PrevCommand = ParamVarDocs[ResolvedParamIndex];
- Diag(PrevCommand->getLocation(), diag::note_doc_param_previous)
- << PrevCommand->getParamNameRange();
- }
- ParamVarDocs[ResolvedParamIndex] = Command;
- return;
- }
-
- SourceRange ArgRange(ArgLocBegin, ArgLocEnd);
- Diag(ArgLocBegin, diag::warn_doc_param_not_found)
- << Arg << ArgRange;
-
- // No parameters -- can't suggest a correction.
- if (ParamVars.size() == 0)
- return;
-
- unsigned CorrectedParamIndex = ParamCommandComment::InvalidParamIndex;
- if (ParamVars.size() == 1) {
- // If function has only one parameter then only that parameter
- // can be documented.
- CorrectedParamIndex = 0;
- } else {
- // Do typo correction.
- CorrectedParamIndex = correctTypoInParmVarReference(Arg, ParamVars);
- }
- if (CorrectedParamIndex != ParamCommandComment::InvalidParamIndex) {
- const ParmVarDecl *CorrectedPVD = ParamVars[CorrectedParamIndex];
- if (const IdentifierInfo *CorrectedII = CorrectedPVD->getIdentifier())
- Diag(ArgLocBegin, diag::note_doc_param_name_suggestion)
- << CorrectedII->getName()
- << FixItHint::CreateReplacement(ArgRange, CorrectedII->getName());
- }
-
- return;
}
void Sema::actOnParamCommandFinish(ParamCommandComment *Command,
@@ -202,14 +160,14 @@ void Sema::actOnParamCommandFinish(ParamCommandComment *Command,
TParamCommandComment *Sema::actOnTParamCommandStart(SourceLocation LocBegin,
SourceLocation LocEnd,
- StringRef Name) {
+ unsigned CommandID) {
TParamCommandComment *Command =
- new (Allocator) TParamCommandComment(LocBegin, LocEnd, Name);
+ new (Allocator) TParamCommandComment(LocBegin, LocEnd, CommandID);
if (!isTemplateOrSpecialization())
Diag(Command->getLocation(),
diag::warn_doc_tparam_not_attached_to_a_template_decl)
- << Command->getCommandNameRange();
+ << Command->getCommandNameRange(Traits);
return Command;
}
@@ -285,19 +243,20 @@ void Sema::actOnTParamCommandFinish(TParamCommandComment *Command,
InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
SourceLocation CommandLocEnd,
- StringRef CommandName) {
+ unsigned CommandID) {
ArrayRef<InlineCommandComment::Argument> Args;
+ StringRef CommandName = Traits.getCommandInfo(CommandID)->Name;
return new (Allocator) InlineCommandComment(
CommandLocBegin,
CommandLocEnd,
- CommandName,
+ CommandID,
getInlineCommandRenderKind(CommandName),
Args);
}
InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
SourceLocation CommandLocEnd,
- StringRef CommandName,
+ unsigned CommandID,
SourceLocation ArgLocBegin,
SourceLocation ArgLocEnd,
StringRef Arg) {
@@ -305,21 +264,29 @@ InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
ArgLocEnd),
Arg);
+ StringRef CommandName = Traits.getCommandInfo(CommandID)->Name;
return new (Allocator) InlineCommandComment(
CommandLocBegin,
CommandLocEnd,
- CommandName,
+ CommandID,
getInlineCommandRenderKind(CommandName),
llvm::makeArrayRef(A, 1));
}
InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin,
SourceLocation LocEnd,
- StringRef Name) {
+ StringRef CommandName) {
+ unsigned CommandID = Traits.registerUnknownCommand(CommandName)->getID();
+ return actOnUnknownCommand(LocBegin, LocEnd, CommandID);
+}
+
+InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ unsigned CommandID) {
ArrayRef<InlineCommandComment::Argument> Args;
return new (Allocator) InlineCommandComment(
- LocBegin, LocEnd, Name,
+ LocBegin, LocEnd, CommandID,
InlineCommandComment::RenderNormal,
Args);
}
@@ -331,11 +298,12 @@ TextComment *Sema::actOnText(SourceLocation LocBegin,
}
VerbatimBlockComment *Sema::actOnVerbatimBlockStart(SourceLocation Loc,
- StringRef Name) {
+ unsigned CommandID) {
+ StringRef CommandName = Traits.getCommandInfo(CommandID)->Name;
return new (Allocator) VerbatimBlockComment(
Loc,
- Loc.getLocWithOffset(1 + Name.size()),
- Name);
+ Loc.getLocWithOffset(1 + CommandName.size()),
+ CommandID);
}
VerbatimBlockLineComment *Sema::actOnVerbatimBlockLine(SourceLocation Loc,
@@ -353,13 +321,13 @@ void Sema::actOnVerbatimBlockFinish(
}
VerbatimLineComment *Sema::actOnVerbatimLine(SourceLocation LocBegin,
- StringRef Name,
+ unsigned CommandID,
SourceLocation TextBegin,
StringRef Text) {
return new (Allocator) VerbatimLineComment(
LocBegin,
TextBegin.getLocWithOffset(Text.size()),
- Name,
+ CommandID,
TextBegin,
Text);
}
@@ -445,30 +413,35 @@ HTMLEndTagComment *Sema::actOnHTMLEndTag(SourceLocation LocBegin,
FullComment *Sema::actOnFullComment(
ArrayRef<BlockContentComment *> Blocks) {
- return new (Allocator) FullComment(Blocks, ThisDeclInfo);
+ FullComment *FC = new (Allocator) FullComment(Blocks, ThisDeclInfo);
+ resolveParamCommandIndexes(FC);
+ return FC;
}
void Sema::checkBlockCommandEmptyParagraph(BlockCommandComment *Command) {
+ if (Traits.getCommandInfo(Command->getCommandID())->IsEmptyParagraphAllowed)
+ return;
+
ParagraphComment *Paragraph = Command->getParagraph();
if (Paragraph->isWhitespace()) {
SourceLocation DiagLoc;
if (Command->getNumArgs() > 0)
DiagLoc = Command->getArgRange(Command->getNumArgs() - 1).getEnd();
if (!DiagLoc.isValid())
- DiagLoc = Command->getCommandNameRange().getEnd();
+ DiagLoc = Command->getCommandNameRange(Traits).getEnd();
Diag(DiagLoc, diag::warn_doc_block_command_empty_paragraph)
- << Command->getCommandName()
+ << Command->getCommandName(Traits)
<< Command->getSourceRange();
}
}
void Sema::checkReturnsCommand(const BlockCommandComment *Command) {
- if (!Traits.isReturnsCommand(Command->getCommandName()))
+ if (!Traits.getCommandInfo(Command->getCommandID())->IsReturnsCommand)
return;
if (isFunctionDecl()) {
if (ThisDeclInfo->ResultType->isVoidType()) {
unsigned DiagKind;
- switch (ThisDeclInfo->ThisDecl->getKind()) {
+ switch (ThisDeclInfo->CommentDecl->getKind()) {
default:
if (ThisDeclInfo->IsObjCMethod)
DiagKind = 3;
@@ -484,7 +457,7 @@ void Sema::checkReturnsCommand(const BlockCommandComment *Command) {
}
Diag(Command->getLocation(),
diag::warn_doc_returns_attached_to_a_void_function)
- << Command->getCommandName()
+ << Command->getCommandName(Traits)
<< DiagKind
<< Command->getSourceRange();
}
@@ -492,20 +465,20 @@ void Sema::checkReturnsCommand(const BlockCommandComment *Command) {
}
Diag(Command->getLocation(),
diag::warn_doc_returns_not_attached_to_a_function_decl)
- << Command->getCommandName()
+ << Command->getCommandName(Traits)
<< Command->getSourceRange();
}
void Sema::checkBlockCommandDuplicate(const BlockCommandComment *Command) {
- StringRef Name = Command->getCommandName();
+ const CommandInfo *Info = Traits.getCommandInfo(Command->getCommandID());
const BlockCommandComment *PrevCommand = NULL;
- if (Traits.isBriefCommand(Name)) {
+ if (Info->IsBriefCommand) {
if (!BriefCommand) {
BriefCommand = Command;
return;
}
PrevCommand = BriefCommand;
- } else if (Traits.isReturnsCommand(Name)) {
+ } else if (Info->IsReturnsCommand) {
if (!ReturnsCommand) {
ReturnsCommand = Command;
return;
@@ -515,18 +488,153 @@ void Sema::checkBlockCommandDuplicate(const BlockCommandComment *Command) {
// We don't want to check this command for duplicates.
return;
}
+ StringRef CommandName = Command->getCommandName(Traits);
+ StringRef PrevCommandName = PrevCommand->getCommandName(Traits);
Diag(Command->getLocation(), diag::warn_doc_block_command_duplicate)
- << Name
+ << CommandName
<< Command->getSourceRange();
- if (Name == PrevCommand->getCommandName())
+ if (CommandName == PrevCommandName)
Diag(PrevCommand->getLocation(), diag::note_doc_block_command_previous)
- << PrevCommand->getCommandName()
- << Command->getSourceRange();
+ << PrevCommandName
+ << PrevCommand->getSourceRange();
else
Diag(PrevCommand->getLocation(),
diag::note_doc_block_command_previous_alias)
- << PrevCommand->getCommandName()
- << Name;
+ << PrevCommandName
+ << CommandName;
+}
+
+void Sema::checkDeprecatedCommand(const BlockCommandComment *Command) {
+ if (!Traits.getCommandInfo(Command->getCommandID())->IsDeprecatedCommand)
+ return;
+
+ const Decl *D = ThisDeclInfo->CommentDecl;
+ if (!D)
+ return;
+
+ if (D->hasAttr<DeprecatedAttr>() ||
+ D->hasAttr<AvailabilityAttr>() ||
+ D->hasAttr<UnavailableAttr>())
+ return;
+
+ Diag(Command->getLocation(),
+ diag::warn_doc_deprecated_not_sync)
+ << Command->getSourceRange();
+
+ // Try to emit a fixit with a deprecation attribute.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // Don't emit a Fix-It for non-member function definitions. GCC does not
+ // accept attributes on them.
+ const DeclContext *Ctx = FD->getDeclContext();
+ if ((!Ctx || !Ctx->isRecord()) &&
+ FD->doesThisDeclarationHaveABody())
+ return;
+
+ StringRef AttributeSpelling = "__attribute__((deprecated))";
+ if (PP) {
+ TokenValue Tokens[] = {
+ tok::kw___attribute, tok::l_paren, tok::l_paren,
+ PP->getIdentifierInfo("deprecated"),
+ tok::r_paren, tok::r_paren
+ };
+ StringRef MacroName = PP->getLastMacroWithSpelling(FD->getLocation(),
+ Tokens);
+ if (!MacroName.empty())
+ AttributeSpelling = MacroName;
+ }
+
+ SmallString<64> TextToInsert(" ");
+ TextToInsert += AttributeSpelling;
+ Diag(FD->getLocEnd(),
+ diag::note_add_deprecation_attr)
+ << FixItHint::CreateInsertion(FD->getLocEnd().getLocWithOffset(1),
+ TextToInsert);
+ }
+}
+
+void Sema::resolveParamCommandIndexes(const FullComment *FC) {
+ if (!isFunctionDecl()) {
+ // We already warned that \\param commands are not attached to a function
+ // decl.
+ return;
+ }
+
+ llvm::SmallVector<ParamCommandComment *, 8> UnresolvedParamCommands;
+
+ // Comment AST nodes that correspond to \c ParamVars for which we have
+ // found a \\param command or NULL if no documentation was found so far.
+ llvm::SmallVector<ParamCommandComment *, 8> ParamVarDocs;
+
+ ArrayRef<const ParmVarDecl *> ParamVars = getParamVars();
+ ParamVarDocs.resize(ParamVars.size(), NULL);
+
+ // First pass over all \\param commands: resolve all parameter names.
+ for (Comment::child_iterator I = FC->child_begin(), E = FC->child_end();
+ I != E; ++I) {
+ ParamCommandComment *PCC = dyn_cast<ParamCommandComment>(*I);
+ if (!PCC || !PCC->hasParamName())
+ continue;
+ StringRef ParamName = PCC->getParamNameAsWritten();
+
+ // Check that referenced parameter name is in the function decl.
+ const unsigned ResolvedParamIndex = resolveParmVarReference(ParamName,
+ ParamVars);
+ if (ResolvedParamIndex == ParamCommandComment::InvalidParamIndex) {
+ UnresolvedParamCommands.push_back(PCC);
+ continue;
+ }
+ PCC->setParamIndex(ResolvedParamIndex);
+ if (ParamVarDocs[ResolvedParamIndex]) {
+ SourceRange ArgRange = PCC->getParamNameRange();
+ Diag(ArgRange.getBegin(), diag::warn_doc_param_duplicate)
+ << ParamName << ArgRange;
+ ParamCommandComment *PrevCommand = ParamVarDocs[ResolvedParamIndex];
+ Diag(PrevCommand->getLocation(), diag::note_doc_param_previous)
+ << PrevCommand->getParamNameRange();
+ }
+ ParamVarDocs[ResolvedParamIndex] = PCC;
+ }
+
+ // Find parameter declarations that have no corresponding \\param.
+ llvm::SmallVector<const ParmVarDecl *, 8> OrphanedParamDecls;
+ for (unsigned i = 0, e = ParamVarDocs.size(); i != e; ++i) {
+ if (!ParamVarDocs[i])
+ OrphanedParamDecls.push_back(ParamVars[i]);
+ }
+
+ // Second pass over unresolved \\param commands: do typo correction.
+ // Suggest corrections from a set of parameter declarations that have no
+ // corresponding \\param.
+ for (unsigned i = 0, e = UnresolvedParamCommands.size(); i != e; ++i) {
+ const ParamCommandComment *PCC = UnresolvedParamCommands[i];
+
+ SourceRange ArgRange = PCC->getParamNameRange();
+ StringRef ParamName = PCC->getParamNameAsWritten();
+ Diag(ArgRange.getBegin(), diag::warn_doc_param_not_found)
+ << ParamName << ArgRange;
+
+ // All parameters documented -- can't suggest a correction.
+ if (OrphanedParamDecls.size() == 0)
+ continue;
+
+ unsigned CorrectedParamIndex = ParamCommandComment::InvalidParamIndex;
+ if (OrphanedParamDecls.size() == 1) {
+ // If one parameter is not documented then that parameter is the only
+ // possible suggestion.
+ CorrectedParamIndex = 0;
+ } else {
+ // Do typo correction.
+ CorrectedParamIndex = correctTypoInParmVarReference(ParamName,
+ OrphanedParamDecls);
+ }
+ if (CorrectedParamIndex != ParamCommandComment::InvalidParamIndex) {
+ const ParmVarDecl *CorrectedPVD = OrphanedParamDecls[CorrectedParamIndex];
+ if (const IdentifierInfo *CorrectedII = CorrectedPVD->getIdentifier())
+ Diag(ArgRange.getBegin(), diag::note_doc_param_name_suggestion)
+ << CorrectedII->getName()
+ << FixItHint::CreateReplacement(ArgRange, CorrectedII->getName());
+ }
+ }
}
bool Sema::isFunctionDecl() {
@@ -553,7 +661,6 @@ ArrayRef<const ParmVarDecl *> Sema::getParamVars() {
void Sema::inspectThisDecl() {
ThisDeclInfo->fill();
- ParamVarDocs.resize(ThisDeclInfo->ParamVars.size(), NULL);
}
unsigned Sema::resolveParmVarReference(StringRef Name,
@@ -629,7 +736,7 @@ unsigned Sema::correctTypoInParmVarReference(
if (Corrector.getBestDecl())
return Corrector.getBestDeclIndex();
else
- return ParamCommandComment::InvalidParamIndex;;
+ return ParamCommandComment::InvalidParamIndex;
}
namespace {
@@ -700,7 +807,7 @@ StringRef Sema::correctTypoInTParamReference(
InlineCommandComment::RenderKind
Sema::getInlineCommandRenderKind(StringRef Name) const {
- assert(Traits.isInlineCommand(Name));
+ assert(Traits.getCommandInfo(Name)->IsInlineCommand);
return llvm::StringSwitch<InlineCommandComment::RenderKind>(Name)
.Case("b", InlineCommandComment::RenderBold)
@@ -709,31 +816,6 @@ Sema::getInlineCommandRenderKind(StringRef Name) const {
.Default(InlineCommandComment::RenderNormal);
}
-bool Sema::isHTMLEndTagOptional(StringRef Name) {
- return llvm::StringSwitch<bool>(Name)
- .Case("p", true)
- .Case("li", true)
- .Case("dt", true)
- .Case("dd", true)
- .Case("tr", true)
- .Case("th", true)
- .Case("td", true)
- .Case("thead", true)
- .Case("tfoot", true)
- .Case("tbody", true)
- .Case("colgroup", true)
- .Default(false);
-}
-
-bool Sema::isHTMLEndTagForbidden(StringRef Name) {
- return llvm::StringSwitch<bool>(Name)
- .Case("br", true)
- .Case("hr", true)
- .Case("img", true)
- .Case("col", true)
- .Default(false);
-}
-
} // end namespace comments
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/AST/Decl.cpp b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
index d5b0be3..7b13755 100644
--- a/contrib/llvm/tools/clang/lib/AST/Decl.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
@@ -126,12 +126,12 @@ static LinkageInfo getLVForTemplateArgumentList(const TemplateArgument *Args,
break;
case TemplateArgument::Declaration:
- // The decl can validly be null as the representation of nullptr
- // arguments, valid only in C++0x.
- if (Decl *D = Args[I].getAsDecl()) {
- if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
- LV.mergeWithMin(getLVForDecl(ND, OnlyTemplate));
- }
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(Args[I].getAsDecl()))
+ LV.mergeWithMin(getLVForDecl(ND, OnlyTemplate));
+ break;
+
+ case TemplateArgument::NullPtr:
+ LV.mergeWithMin(getLVForType(Args[I].getNullPtrType()));
break;
case TemplateArgument::Template:
@@ -193,7 +193,7 @@ static bool useInlineVisibilityHidden(const NamedDecl *D) {
// anyway.
return TSK != TSK_ExplicitInstantiationDeclaration &&
TSK != TSK_ExplicitInstantiationDefinition &&
- FD->hasBody(Def) && Def->isInlined();
+ FD->hasBody(Def) && Def->isInlined() && !Def->hasAttr<GNUInlineAttr>();
}
static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
@@ -213,12 +213,12 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
if (Var->getStorageClass() == SC_Static)
return LinkageInfo::internal();
- // - an object or reference that is explicitly declared const
- // and neither explicitly declared extern nor previously
- // declared to have external linkage; or
- // (there is no equivalent in C99)
+ // - a non-volatile object or reference that is explicitly declared const
+ // or constexpr and neither explicitly declared extern nor previously
+ // declared to have external linkage; or (there is no equivalent in C99)
if (Context.getLangOpts().CPlusPlus &&
- Var->getType().isConstant(Context) &&
+ Var->getType().isConstQualified() &&
+ !Var->getType().isVolatileQualified() &&
Var->getStorageClass() != SC_Extern &&
Var->getStorageClass() != SC_PrivateExtern) {
bool FoundExtern = false;
@@ -236,8 +236,8 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
for (; PrevVar; PrevVar = PrevVar->getPreviousDecl())
if (PrevVar->getStorageClass() == SC_PrivateExtern)
break;
- if (PrevVar)
- return PrevVar->getLinkageAndVisibility();
+ if (PrevVar)
+ return PrevVar->getLinkageAndVisibility();
}
} else if (isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D)) {
// C++ [temp]p4:
@@ -341,25 +341,9 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
if (Var->getStorageClass() == SC_PrivateExtern)
LV.mergeVisibility(HiddenVisibility, true);
- if (!Context.getLangOpts().CPlusPlus &&
- (Var->getStorageClass() == SC_Extern ||
- Var->getStorageClass() == SC_PrivateExtern)) {
-
- // C99 6.2.2p4:
- // For an identifier declared with the storage-class specifier
- // extern in a scope in which a prior declaration of that
- // identifier is visible, if the prior declaration specifies
- // internal or external linkage, the linkage of the identifier
- // at the later declaration is the same as the linkage
- // specified at the prior declaration. If no prior declaration
- // is visible, or if the prior declaration specifies no
- // linkage, then the identifier has external linkage.
- if (const VarDecl *PrevVar = Var->getPreviousDecl()) {
- LinkageInfo PrevLV = getLVForDecl(PrevVar, OnlyTemplate);
- if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
- LV.mergeVisibility(PrevLV);
- }
- }
+ // Note that Sema::MergeVarDecl already takes care of implementing
+ // C99 6.2.2p4 and propagating the visibility attribute, so we don't have
+ // to do it here.
// - a function, unless it has internal linkage; or
} else if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
@@ -841,13 +825,10 @@ static LinkageInfo getLVForDecl(const NamedDecl *D, bool OnlyTemplate) {
if (llvm::Optional<Visibility> Vis = Var->getExplicitVisibility())
LV.mergeVisibility(*Vis, true);
}
-
- if (const VarDecl *Prev = Var->getPreviousDecl()) {
- LinkageInfo PrevLV = getLVForDecl(Prev, OnlyTemplate);
- if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
- LV.mergeVisibility(PrevLV);
- }
+ // Note that Sema::MergeVarDecl already takes care of implementing
+ // C99 6.2.2p4 and propagating the visibility attribute, so we don't
+ // have to do it here.
return LV;
}
}
@@ -903,7 +884,7 @@ std::string NamedDecl::getQualifiedNameAsString(const PrintingPolicy &P) const {
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
const FunctionProtoType *FT = 0;
if (FD->hasWrittenPrototype())
- FT = dyn_cast<FunctionProtoType>(FD->getType()->getAs<FunctionType>());
+ FT = dyn_cast<FunctionProtoType>(FD->getType()->castAs<FunctionType>());
OS << *FD << '(';
if (FT) {
@@ -1204,8 +1185,11 @@ void VarDecl::setStorageClass(StorageClass SC) {
}
SourceRange VarDecl::getSourceRange() const {
- if (getInit())
- return SourceRange(getOuterLocStart(), getInit()->getLocEnd());
+ if (const Expr *Init = getInit()) {
+ SourceLocation InitEnd = Init->getLocEnd();
+ if (InitEnd.isValid())
+ return SourceRange(getOuterLocStart(), InitEnd);
+ }
return DeclaratorDecl::getSourceRange();
}
@@ -1859,7 +1843,7 @@ unsigned FunctionDecl::getBuiltinID() const {
/// based on its FunctionType. This is the length of the ParamInfo array
/// after it has been created.
unsigned FunctionDecl::getNumParams() const {
- const FunctionType *FT = getType()->getAs<FunctionType>();
+ const FunctionType *FT = getType()->castAs<FunctionType>();
if (isa<FunctionNoProtoType>(FT))
return 0;
return cast<FunctionProtoType>(FT)->getNumArgs();
@@ -2514,7 +2498,7 @@ unsigned FieldDecl::getFieldIndex() const {
unsigned Index = 0;
const RecordDecl *RD = getParent();
const FieldDecl *LastFD = 0;
- bool IsMsStruct = RD->hasAttr<MsStructAttr>();
+ bool IsMsStruct = RD->isMsStruct(getASTContext());
for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
I != E; ++I, ++Index) {
@@ -2762,6 +2746,17 @@ void RecordDecl::completeDefinition() {
TagDecl::completeDefinition();
}
+/// isMsStruct - Get whether or not this record uses ms_struct layout.
+/// This which can be turned on with an attribute, pragma, or the
+/// -mms-bitfields command-line option.
+bool RecordDecl::isMsStruct(const ASTContext &C) const {
+ return hasAttr<MsStructAttr>() || C.getLangOpts().MSBitfields == 1;
+}
+
+static bool isFieldOrIndirectField(Decl::Kind K) {
+ return FieldDecl::classofKind(K) || IndirectFieldDecl::classofKind(K);
+}
+
void RecordDecl::LoadFieldsFromExternalStorage() const {
ExternalASTSource *Source = getASTContext().getExternalSource();
assert(hasExternalLexicalStorage() && Source && "No external storage?");
@@ -2771,7 +2766,8 @@ void RecordDecl::LoadFieldsFromExternalStorage() const {
SmallVector<Decl*, 64> Decls;
LoadedFieldsFromExternalStorage = true;
- switch (Source->FindExternalLexicalDeclsBy<FieldDecl>(this, Decls)) {
+ switch (Source->FindExternalLexicalDecls(this, isFieldOrIndirectField,
+ Decls)) {
case ELR_Success:
break;
@@ -2783,7 +2779,7 @@ void RecordDecl::LoadFieldsFromExternalStorage() const {
#ifndef NDEBUG
// Check that all decls we got were FieldDecls.
for (unsigned i=0, e=Decls.size(); i != e; ++i)
- assert(isa<FieldDecl>(Decls[i]));
+ assert(isa<FieldDecl>(Decls[i]) || isa<IndirectFieldDecl>(Decls[i]));
#endif
if (Decls.empty())
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
index f9ce46d..4400d50 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
@@ -961,7 +961,7 @@ DeclContext::lookup_result
ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC,
DeclarationName Name,
ArrayRef<NamedDecl*> Decls) {
- ASTContext &Context = DC->getParentASTContext();;
+ ASTContext &Context = DC->getParentASTContext();
StoredDeclsMap *Map;
if (!(Map = DC->LookupPtr.getPointer()))
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
index 2f21e4c..82e630a 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
@@ -90,11 +90,12 @@ CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, TagKind TK,
}
CXXRecordDecl *CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC,
- SourceLocation Loc, bool Dependent) {
+ TypeSourceInfo *Info, SourceLocation Loc,
+ bool Dependent) {
CXXRecordDecl* R = new (C) CXXRecordDecl(CXXRecord, TTK_Class, DC, Loc, Loc,
0, 0);
R->IsBeingDefined = true;
- R->DefinitionData = new (C) struct LambdaDefinitionData(R, Dependent);
+ R->DefinitionData = new (C) struct LambdaDefinitionData(R, Info, Dependent);
C.getTypeDeclType(R, /*PrevDecl=*/0);
return R;
}
@@ -239,10 +240,13 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// -- the constructor selected to copy/move each direct base class
// subobject is trivial, and
// FIXME: C++0x: We need to only consider the selected constructor
- // instead of all of them.
+ // instead of all of them. For now, we treat a move constructor as being
+ // non-trivial if it calls anything other than a trivial move constructor.
if (!BaseClassDecl->hasTrivialCopyConstructor())
data().HasTrivialCopyConstructor = false;
- if (!BaseClassDecl->hasTrivialMoveConstructor())
+ if (!BaseClassDecl->hasTrivialMoveConstructor() ||
+ !(BaseClassDecl->hasDeclaredMoveConstructor() ||
+ BaseClassDecl->needsImplicitMoveConstructor()))
data().HasTrivialMoveConstructor = false;
// C++0x [class.copy]p27:
@@ -254,7 +258,9 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// of all of them.
if (!BaseClassDecl->hasTrivialCopyAssignment())
data().HasTrivialCopyAssignment = false;
- if (!BaseClassDecl->hasTrivialMoveAssignment())
+ if (!BaseClassDecl->hasTrivialMoveAssignment() ||
+ !(BaseClassDecl->hasDeclaredMoveAssignment() ||
+ BaseClassDecl->needsImplicitMoveAssignment()))
data().HasTrivialMoveAssignment = false;
// C++11 [class.ctor]p6:
@@ -466,7 +472,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
if (!D->isImplicit() &&
!isa<FieldDecl>(D) &&
!isa<IndirectFieldDecl>(D) &&
- (!isa<TagDecl>(D) || cast<TagDecl>(D)->getTagKind() == TTK_Class))
+ (!isa<TagDecl>(D) || cast<TagDecl>(D)->getTagKind() == TTK_Class ||
+ cast<TagDecl>(D)->getTagKind() == TTK_Interface))
data().HasOnlyCMembers = false;
// Ignore friends and invalid declarations.
@@ -828,7 +835,9 @@ NotASpecialMember:;
// FIXME: C++0x: We don't correctly model 'selected' constructors.
if (!FieldRec->hasTrivialCopyConstructor())
data().HasTrivialCopyConstructor = false;
- if (!FieldRec->hasTrivialMoveConstructor())
+ if (!FieldRec->hasTrivialMoveConstructor() ||
+ !(FieldRec->hasDeclaredMoveConstructor() ||
+ FieldRec->needsImplicitMoveConstructor()))
data().HasTrivialMoveConstructor = false;
// C++0x [class.copy]p27:
@@ -840,7 +849,9 @@ NotASpecialMember:;
// FIXME: C++0x: We don't correctly model 'selected' operators.
if (!FieldRec->hasTrivialCopyAssignment())
data().HasTrivialCopyAssignment = false;
- if (!FieldRec->hasTrivialMoveAssignment())
+ if (!FieldRec->hasTrivialMoveAssignment() ||
+ !(FieldRec->hasDeclaredMoveAssignment() ||
+ FieldRec->needsImplicitMoveAssignment()))
data().HasTrivialMoveAssignment = false;
if (!FieldRec->hasTrivialDestructor())
@@ -936,7 +947,8 @@ NotASpecialMember:;
}
bool CXXRecordDecl::isCLike() const {
- if (getTagKind() == TTK_Class || !TemplateOrInstantiation.isNull())
+ if (getTagKind() == TTK_Class || getTagKind() == TTK_Interface ||
+ !TemplateOrInstantiation.isNull())
return false;
if (!hasDefinition())
return true;
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
index 4d48ad8..65a9878 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/Stmt.h"
#include "clang/AST/ASTMutationListener.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -93,6 +94,16 @@ ObjCPropertyDecl::findPropertyDecl(const DeclContext *DC,
return 0;
}
+IdentifierInfo *
+ObjCPropertyDecl::getDefaultSynthIvarName(ASTContext &Ctx) const {
+ SmallString<128> ivarName;
+ {
+ llvm::raw_svector_ostream os(ivarName);
+ os << '_' << getIdentifier()->getName();
+ }
+ return &Ctx.Idents.get(ivarName.str());
+}
+
/// FindPropertyDeclaration - Finds declaration of the property given its name
/// in 'PropertyId' and returns it. It returns 0, if not found.
ObjCPropertyDecl *
@@ -179,6 +190,21 @@ ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass(
return 0;
}
+void ObjCInterfaceDecl::collectPropertiesToImplement(PropertyMap &PM) const {
+ for (ObjCContainerDecl::prop_iterator P = prop_begin(),
+ E = prop_end(); P != E; ++P) {
+ ObjCPropertyDecl *Prop = *P;
+ PM[Prop->getIdentifier()] = Prop;
+ }
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ PI = all_referenced_protocol_begin(),
+ E = all_referenced_protocol_end(); PI != E; ++PI)
+ (*PI)->collectPropertiesToImplement(PM);
+ // Note, the properties declared only in class extensions are still copied
+ // into the main @interface's property list, and therefore we don't
+ // explicitly, have to search class extension properties.
+}
+
void ObjCInterfaceDecl::mergeClassExtensionProtocolList(
ObjCProtocolDecl *const* ExtList, unsigned ExtNum,
ASTContext &C)
@@ -414,16 +440,15 @@ ObjCMethodDecl *ObjCMethodDecl::Create(ASTContext &C,
DeclContext *contextDecl,
bool isInstance,
bool isVariadic,
- bool isSynthesized,
+ bool isPropertyAccessor,
bool isImplicitlyDeclared,
bool isDefined,
ImplementationControl impControl,
bool HasRelatedResultType) {
return new (C) ObjCMethodDecl(beginLoc, endLoc,
SelInfo, T, ResultTInfo, contextDecl,
- isInstance,
- isVariadic, isSynthesized, isImplicitlyDeclared,
- isDefined,
+ isInstance, isVariadic, isPropertyAccessor,
+ isImplicitlyDeclared, isDefined,
impControl,
HasRelatedResultType);
}
@@ -434,6 +459,10 @@ ObjCMethodDecl *ObjCMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
Selector(), QualType(), 0, 0);
}
+Stmt *ObjCMethodDecl::getBody() const {
+ return Body.get(getASTContext().getExternalSource());
+}
+
void ObjCMethodDecl::setAsRedeclaration(const ObjCMethodDecl *PrevMethod) {
assert(PrevMethod);
getASTContext().setObjCMethodRedeclaration(PrevMethod, this);
@@ -707,6 +736,224 @@ ObjCInterfaceDecl *ObjCMethodDecl::getClassInterface() {
llvm_unreachable("unknown method context");
}
+static void CollectOverriddenMethodsRecurse(const ObjCContainerDecl *Container,
+ const ObjCMethodDecl *Method,
+ SmallVectorImpl<const ObjCMethodDecl *> &Methods,
+ bool MovedToSuper) {
+ if (!Container)
+ return;
+
+ // In categories look for overriden methods from protocols. A method from
+ // category is not "overriden" since it is considered as the "same" method
+ // (same USR) as the one from the interface.
+ if (const ObjCCategoryDecl *
+ Category = dyn_cast<ObjCCategoryDecl>(Container)) {
+ // Check whether we have a matching method at this category but only if we
+ // are at the super class level.
+ if (MovedToSuper)
+ if (ObjCMethodDecl *
+ Overridden = Container->getMethod(Method->getSelector(),
+ Method->isInstanceMethod()))
+ if (Method != Overridden) {
+ // We found an override at this category; there is no need to look
+ // into its protocols.
+ Methods.push_back(Overridden);
+ return;
+ }
+
+ for (ObjCCategoryDecl::protocol_iterator P = Category->protocol_begin(),
+ PEnd = Category->protocol_end();
+ P != PEnd; ++P)
+ CollectOverriddenMethodsRecurse(*P, Method, Methods, MovedToSuper);
+ return;
+ }
+
+ // Check whether we have a matching method at this level.
+ if (const ObjCMethodDecl *
+ Overridden = Container->getMethod(Method->getSelector(),
+ Method->isInstanceMethod()))
+ if (Method != Overridden) {
+ // We found an override at this level; there is no need to look
+ // into other protocols or categories.
+ Methods.push_back(Overridden);
+ return;
+ }
+
+ if (const ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)){
+ for (ObjCProtocolDecl::protocol_iterator P = Protocol->protocol_begin(),
+ PEnd = Protocol->protocol_end();
+ P != PEnd; ++P)
+ CollectOverriddenMethodsRecurse(*P, Method, Methods, MovedToSuper);
+ }
+
+ if (const ObjCInterfaceDecl *
+ Interface = dyn_cast<ObjCInterfaceDecl>(Container)) {
+ for (ObjCInterfaceDecl::protocol_iterator P = Interface->protocol_begin(),
+ PEnd = Interface->protocol_end();
+ P != PEnd; ++P)
+ CollectOverriddenMethodsRecurse(*P, Method, Methods, MovedToSuper);
+
+ for (const ObjCCategoryDecl *Category = Interface->getCategoryList();
+ Category; Category = Category->getNextClassCategory())
+ CollectOverriddenMethodsRecurse(Category, Method, Methods,
+ MovedToSuper);
+
+ if (const ObjCInterfaceDecl *Super = Interface->getSuperClass())
+ return CollectOverriddenMethodsRecurse(Super, Method, Methods,
+ /*MovedToSuper=*/true);
+ }
+}
+
+static inline void CollectOverriddenMethods(const ObjCContainerDecl *Container,
+ const ObjCMethodDecl *Method,
+ SmallVectorImpl<const ObjCMethodDecl *> &Methods) {
+ CollectOverriddenMethodsRecurse(Container, Method, Methods,
+ /*MovedToSuper=*/false);
+}
+
+static void collectOverriddenMethodsSlow(const ObjCMethodDecl *Method,
+ SmallVectorImpl<const ObjCMethodDecl *> &overridden) {
+ assert(Method->isOverriding());
+
+ if (const ObjCProtocolDecl *
+ ProtD = dyn_cast<ObjCProtocolDecl>(Method->getDeclContext())) {
+ CollectOverriddenMethods(ProtD, Method, overridden);
+
+ } else if (const ObjCImplDecl *
+ IMD = dyn_cast<ObjCImplDecl>(Method->getDeclContext())) {
+ const ObjCInterfaceDecl *ID = IMD->getClassInterface();
+ if (!ID)
+ return;
+ // Start searching for overridden methods using the method from the
+ // interface as starting point.
+ if (const ObjCMethodDecl *IFaceMeth = ID->getMethod(Method->getSelector(),
+ Method->isInstanceMethod()))
+ Method = IFaceMeth;
+ CollectOverriddenMethods(ID, Method, overridden);
+
+ } else if (const ObjCCategoryDecl *
+ CatD = dyn_cast<ObjCCategoryDecl>(Method->getDeclContext())) {
+ const ObjCInterfaceDecl *ID = CatD->getClassInterface();
+ if (!ID)
+ return;
+ // Start searching for overridden methods using the method from the
+ // interface as starting point.
+ if (const ObjCMethodDecl *IFaceMeth = ID->getMethod(Method->getSelector(),
+ Method->isInstanceMethod()))
+ Method = IFaceMeth;
+ CollectOverriddenMethods(ID, Method, overridden);
+
+ } else {
+ CollectOverriddenMethods(
+ dyn_cast_or_null<ObjCContainerDecl>(Method->getDeclContext()),
+ Method, overridden);
+ }
+}
+
+static void collectOnCategoriesAfterLocation(SourceLocation Loc,
+ const ObjCInterfaceDecl *Class,
+ SourceManager &SM,
+ const ObjCMethodDecl *Method,
+ SmallVectorImpl<const ObjCMethodDecl *> &Methods) {
+ if (!Class)
+ return;
+
+ for (const ObjCCategoryDecl *Category = Class->getCategoryList();
+ Category; Category = Category->getNextClassCategory())
+ if (SM.isBeforeInTranslationUnit(Loc, Category->getLocation()))
+ CollectOverriddenMethodsRecurse(Category, Method, Methods, true);
+
+ collectOnCategoriesAfterLocation(Loc, Class->getSuperClass(), SM,
+ Method, Methods);
+}
+
+/// \brief Faster collection that is enabled when ObjCMethodDecl::isOverriding()
+/// returns false.
+/// You'd think that in that case there are no overrides but categories can
+/// "introduce" new overridden methods that are missed by Sema because the
+/// overrides lookup that it does for methods, inside implementations, will
+/// stop at the interface level (if there is a method there) and not look
+/// further in super classes.
+/// Methods in an implementation can overide methods in super class's category
+/// but not in current class's category. But, such methods
+static void collectOverriddenMethodsFast(SourceManager &SM,
+ const ObjCMethodDecl *Method,
+ SmallVectorImpl<const ObjCMethodDecl *> &Methods) {
+ assert(!Method->isOverriding());
+
+ const ObjCContainerDecl *
+ ContD = cast<ObjCContainerDecl>(Method->getDeclContext());
+ if (isa<ObjCInterfaceDecl>(ContD) || isa<ObjCProtocolDecl>(ContD))
+ return;
+ const ObjCInterfaceDecl *Class = Method->getClassInterface();
+ if (!Class)
+ return;
+
+ collectOnCategoriesAfterLocation(Class->getLocation(), Class->getSuperClass(),
+ SM, Method, Methods);
+}
+
+void ObjCMethodDecl::getOverriddenMethods(
+ SmallVectorImpl<const ObjCMethodDecl *> &Overridden) const {
+ const ObjCMethodDecl *Method = this;
+
+ if (Method->isRedeclaration()) {
+ Method = cast<ObjCContainerDecl>(Method->getDeclContext())->
+ getMethod(Method->getSelector(), Method->isInstanceMethod());
+ }
+
+ if (!Method->isOverriding()) {
+ collectOverriddenMethodsFast(getASTContext().getSourceManager(),
+ Method, Overridden);
+ } else {
+ collectOverriddenMethodsSlow(Method, Overridden);
+ assert(!Overridden.empty() &&
+ "ObjCMethodDecl's overriding bit is not as expected");
+ }
+}
+
+const ObjCPropertyDecl *
+ObjCMethodDecl::findPropertyDecl(bool CheckOverrides) const {
+ Selector Sel = getSelector();
+ unsigned NumArgs = Sel.getNumArgs();
+ if (NumArgs > 1)
+ return 0;
+
+ if (!isInstanceMethod() || getMethodFamily() != OMF_None)
+ return 0;
+
+ if (isPropertyAccessor()) {
+ const ObjCContainerDecl *Container = cast<ObjCContainerDecl>(getParent());
+ bool IsGetter = (NumArgs == 0);
+
+ for (ObjCContainerDecl::prop_iterator I = Container->prop_begin(),
+ E = Container->prop_end();
+ I != E; ++I) {
+ Selector NextSel = IsGetter ? (*I)->getGetterName()
+ : (*I)->getSetterName();
+ if (NextSel == Sel)
+ return *I;
+ }
+
+ llvm_unreachable("Marked as a property accessor but no property found!");
+ }
+
+ if (!CheckOverrides)
+ return 0;
+
+ typedef SmallVector<const ObjCMethodDecl *, 8> OverridesTy;
+ OverridesTy Overrides;
+ getOverriddenMethods(Overrides);
+ for (OverridesTy::const_iterator I = Overrides.begin(), E = Overrides.end();
+ I != E; ++I) {
+ if (const ObjCPropertyDecl *Prop = (*I)->findPropertyDecl(false))
+ return Prop;
+ }
+
+ return 0;
+
+}
+
//===----------------------------------------------------------------------===//
// ObjCInterfaceDecl
//===----------------------------------------------------------------------===//
@@ -1081,6 +1328,20 @@ void ObjCProtocolDecl::startDefinition() {
RD->Data = this->Data;
}
+void ObjCProtocolDecl::collectPropertiesToImplement(PropertyMap &PM) const {
+ for (ObjCProtocolDecl::prop_iterator P = prop_begin(),
+ E = prop_end(); P != E; ++P) {
+ ObjCPropertyDecl *Prop = *P;
+ // Insert into PM if not there already.
+ PM.insert(std::make_pair(Prop->getIdentifier(), Prop));
+ }
+ // Scan through protocol's protocols.
+ for (ObjCProtocolDecl::protocol_iterator PI = protocol_begin(),
+ E = protocol_end(); PI != E; ++PI)
+ (*PI)->collectPropertiesToImplement(PM);
+}
+
+
//===----------------------------------------------------------------------===//
// ObjCCategoryDecl
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
index 7f47604..386ad66 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
@@ -105,6 +105,8 @@ static QualType GetBaseType(QualType T) {
break;
else if (const PointerType* PTy = BaseType->getAs<PointerType>())
BaseType = PTy->getPointeeType();
+ else if (const BlockPointerType *BPy = BaseType->getAs<BlockPointerType>())
+ BaseType = BPy->getPointeeType();
else if (const ArrayType* ATy = dyn_cast<ArrayType>(BaseType))
BaseType = ATy->getElementType();
else if (const FunctionType* FTy = BaseType->getAs<FunctionType>())
@@ -189,6 +191,9 @@ raw_ostream& DeclPrinter::Indent(unsigned Indentation) {
}
void DeclPrinter::prettyPrintAttributes(Decl *D) {
+ if (Policy.SuppressAttributes)
+ return;
+
if (D->hasAttrs()) {
AttrVec &Attrs = D->getAttrs();
for (AttrVec::const_iterator i=Attrs.begin(), e=Attrs.end(); i!=e; ++i) {
@@ -220,6 +225,9 @@ void DeclPrinter::Print(AccessSpecifier AS) {
//----------------------------------------------------------------------------
void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
+ if (Policy.TerseOutput)
+ return;
+
if (Indent)
Indentation += Policy.Indentation;
@@ -459,7 +467,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (I)
Proto += ", ";
- Proto += FT->getExceptionType(I).getAsString(SubPolicy);;
+ Proto += FT->getExceptionType(I).getAsString(SubPolicy);
}
Proto += ")";
} else if (FT && isNoexceptExceptionSpec(FT->getExceptionSpecType())) {
@@ -550,7 +558,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Out << " = 0";
else if (D->isDeletedAsWritten())
Out << " = delete";
- else if (D->doesThisDeclarationHaveABody()) {
+ else if (D->doesThisDeclarationHaveABody() && !Policy.TerseOutput) {
if (!D->hasPrototype() && D->getNumParams()) {
// This is a K&R function definition, so we need to print the
// parameters.
@@ -621,13 +629,13 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
ImplicitInit = D->getInitStyle() == VarDecl::CallInit &&
Construct->getNumArgs() == 0 && !Construct->isListInitialization();
if (!ImplicitInit) {
- if (D->getInitStyle() == VarDecl::CallInit)
+ if ((D->getInitStyle() == VarDecl::CallInit) && !isa<ParenListExpr>(Init))
Out << "(";
else if (D->getInitStyle() == VarDecl::CInit) {
Out << " = ";
}
Init->printPretty(Out, 0, Policy, Indentation);
- if (D->getInitStyle() == VarDecl::CallInit)
+ if ((D->getInitStyle() == VarDecl::CallInit) && !isa<ParenListExpr>(Init))
Out << ")";
}
}
@@ -869,7 +877,7 @@ void DeclPrinter::VisitObjCMethodDecl(ObjCMethodDecl *OMD) {
if (OMD->isVariadic())
Out << ", ...";
- if (OMD->getBody()) {
+ if (OMD->getBody() && !Policy.TerseOutput) {
Out << ' ';
OMD->getBody()->printPretty(Out, 0, Policy);
Out << '\n';
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
index a7e8999..a70983f 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
@@ -32,9 +32,25 @@ TemplateParameterList::TemplateParameterList(SourceLocation TemplateLoc,
NamedDecl **Params, unsigned NumParams,
SourceLocation RAngleLoc)
: TemplateLoc(TemplateLoc), LAngleLoc(LAngleLoc), RAngleLoc(RAngleLoc),
- NumParams(NumParams) {
- for (unsigned Idx = 0; Idx < NumParams; ++Idx)
- begin()[Idx] = Params[Idx];
+ NumParams(NumParams), ContainsUnexpandedParameterPack(false) {
+ assert(this->NumParams == NumParams && "Too many template parameters");
+ for (unsigned Idx = 0; Idx < NumParams; ++Idx) {
+ NamedDecl *P = Params[Idx];
+ begin()[Idx] = P;
+
+ if (!P->isTemplateParameterPack()) {
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P))
+ if (NTTP->getType()->containsUnexpandedParameterPack())
+ ContainsUnexpandedParameterPack = true;
+
+ if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(P))
+ if (TTP->getTemplateParameters()->containsUnexpandedParameterPack())
+ ContainsUnexpandedParameterPack = true;
+
+ // FIXME: If a default argument contains an unexpanded parameter pack, the
+ // template parameter list does too.
+ }
+ }
}
TemplateParameterList *
@@ -577,6 +593,19 @@ SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const {
void TemplateTemplateParmDecl::anchor() { }
+TemplateTemplateParmDecl::TemplateTemplateParmDecl(
+ DeclContext *DC, SourceLocation L, unsigned D, unsigned P,
+ IdentifierInfo *Id, TemplateParameterList *Params,
+ unsigned NumExpansions, TemplateParameterList * const *Expansions)
+ : TemplateDecl(TemplateTemplateParm, DC, L, Id, Params),
+ TemplateParmPosition(D, P), DefaultArgument(),
+ DefaultArgumentWasInherited(false), ParameterPack(true),
+ ExpandedParameterPack(true), NumExpandedParams(NumExpansions) {
+ if (Expansions)
+ std::memcpy(reinterpret_cast<void*>(this + 1), Expansions,
+ sizeof(TemplateParameterList*) * NumExpandedParams);
+}
+
TemplateTemplateParmDecl *
TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation L, unsigned D, unsigned P,
@@ -587,12 +616,35 @@ TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
}
TemplateTemplateParmDecl *
+TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation L, unsigned D, unsigned P,
+ IdentifierInfo *Id,
+ TemplateParameterList *Params,
+ llvm::ArrayRef<TemplateParameterList*> Expansions) {
+ void *Mem = C.Allocate(sizeof(TemplateTemplateParmDecl) +
+ sizeof(TemplateParameterList*) * Expansions.size());
+ return new (Mem) TemplateTemplateParmDecl(DC, L, D, P, Id, Params,
+ Expansions.size(),
+ Expansions.data());
+}
+
+TemplateTemplateParmDecl *
TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TemplateTemplateParmDecl));
return new (Mem) TemplateTemplateParmDecl(0, SourceLocation(), 0, 0, false,
0, 0);
}
+TemplateTemplateParmDecl *
+TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned NumExpansions) {
+ unsigned Size = sizeof(TemplateTemplateParmDecl) +
+ sizeof(TemplateParameterList*) * NumExpansions;
+ void *Mem = AllocateDeserializedDecl(C, ID, Size);
+ return new (Mem) TemplateTemplateParmDecl(0, SourceLocation(), 0, 0, 0, 0,
+ NumExpansions, 0);
+}
+
//===----------------------------------------------------------------------===//
// TemplateArgumentList Implementation
//===----------------------------------------------------------------------===//
@@ -712,13 +764,27 @@ ClassTemplateSpecializationDecl::getSpecializedTemplate() const {
SourceRange
ClassTemplateSpecializationDecl::getSourceRange() const {
if (ExplicitInfo) {
- SourceLocation Begin = getExternLoc();
- if (Begin.isInvalid())
- Begin = getTemplateKeywordLoc();
- SourceLocation End = getRBraceLoc();
- if (End.isInvalid())
- End = getTypeAsWritten()->getTypeLoc().getEndLoc();
- return SourceRange(Begin, End);
+ SourceLocation Begin = getTemplateKeywordLoc();
+ if (Begin.isValid()) {
+ // Here we have an explicit (partial) specialization or instantiation.
+ assert(getSpecializationKind() == TSK_ExplicitSpecialization ||
+ getSpecializationKind() == TSK_ExplicitInstantiationDeclaration ||
+ getSpecializationKind() == TSK_ExplicitInstantiationDefinition);
+ if (getExternLoc().isValid())
+ Begin = getExternLoc();
+ SourceLocation End = getRBraceLoc();
+ if (End.isInvalid())
+ End = getTypeAsWritten()->getTypeLoc().getEndLoc();
+ return SourceRange(Begin, End);
+ }
+ // An implicit instantiation of a class template partial specialization
+ // uses ExplicitInfo to record the TypeAsWritten, but the source
+ // locations should be retrieved from the instantiation pattern.
+ typedef ClassTemplatePartialSpecializationDecl CTPSDecl;
+ CTPSDecl *ctpsd = const_cast<CTPSDecl*>(cast<CTPSDecl>(this));
+ CTPSDecl *inst_from = ctpsd->getInstantiatedFromMember();
+ assert(inst_from != 0);
+ return inst_from->getSourceRange();
}
else {
// No explicit info available.
diff --git a/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp b/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp
index 84f3fc4..5f43fbc 100644
--- a/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp
@@ -1,4 +1,4 @@
-//===--- DumpXML.cpp - Detailed XML dumping ---------------------*- C++ -*-===//
+//===--- DumpXML.cpp - Detailed XML dumping -------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -64,6 +64,8 @@ template <class Impl> struct XMLDeclVisitor {
static_cast<Impl*>(this)->NAME(static_cast<CLASS*>(D))
void dispatch(Decl *D) {
+ if (D->isUsed())
+ static_cast<Impl*>(this)->set("used", "1");
switch (D->getKind()) {
#define DECL(DERIVED, BASE) \
case Decl::DERIVED: \
@@ -316,12 +318,12 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
}
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
+ case TemplateArgument::NullPtr:
// FIXME: Implement!
break;
case TemplateArgument::Declaration: {
- if (Decl *D = A.getAsDecl())
- visitDeclRef(D);
+ visitDeclRef(A.getAsDecl());
break;
}
case TemplateArgument::Integral: {
@@ -841,7 +843,7 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
setFlag("instance", D->isInstanceMethod());
setFlag("variadic", D->isVariadic());
- setFlag("synthesized", D->isSynthesized());
+ setFlag("property_accessor", D->isPropertyAccessor());
setFlag("defined", D->isDefined());
setFlag("related_result_type", D->hasRelatedResultType());
}
@@ -920,6 +922,7 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
case CC_X86Pascal: return set("cc", "x86_pascal");
case CC_AAPCS: return set("cc", "aapcs");
case CC_AAPCS_VFP: return set("cc", "aapcs_vfp");
+ case CC_PnaclCall: return set("cc", "pnaclcall");
}
}
@@ -974,6 +977,16 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
setFlag("const", T->isConst());
setFlag("volatile", T->isVolatile());
setFlag("restrict", T->isRestrict());
+ switch (T->getExceptionSpecType()) {
+ case EST_None: break;
+ case EST_DynamicNone: set("exception_spec", "throw()"); break;
+ case EST_Dynamic: set("exception_spec", "throw(T)"); break;
+ case EST_MSAny: set("exception_spec", "throw(...)"); break;
+ case EST_BasicNoexcept: set("exception_spec", "noexcept"); break;
+ case EST_ComputedNoexcept: set("exception_spec", "noexcept(expr)"); break;
+ case EST_Unevaluated: set("exception_spec", "unevaluated"); break;
+ case EST_Uninstantiated: set("exception_spec", "uninstantiated"); break;
+ }
}
void visitFunctionProtoTypeChildren(FunctionProtoType *T) {
push("parameters");
@@ -1023,7 +1036,7 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
}
void Decl::dumpXML() const {
- dump(llvm::errs());
+ dumpXML(llvm::errs());
}
void Decl::dumpXML(raw_ostream &out) const {
diff --git a/contrib/llvm/tools/clang/lib/AST/Expr.cpp b/contrib/llvm/tools/clang/lib/AST/Expr.cpp
index 24361ef..f3a2e05 100644
--- a/contrib/llvm/tools/clang/lib/AST/Expr.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Expr.cpp
@@ -48,6 +48,75 @@ const CXXRecordDecl *Expr::getBestDynamicClassType() const {
return cast<CXXRecordDecl>(D);
}
+const Expr *
+Expr::skipRValueSubobjectAdjustments(
+ SmallVectorImpl<SubobjectAdjustment> &Adjustments) const {
+ const Expr *E = this;
+ while (true) {
+ E = E->IgnoreParens();
+
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if ((CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase) &&
+ E->getType()->isRecordType()) {
+ E = CE->getSubExpr();
+ CXXRecordDecl *Derived
+ = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
+ Adjustments.push_back(SubobjectAdjustment(CE, Derived));
+ continue;
+ }
+
+ if (CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ if (!ME->isArrow() && ME->getBase()->isRValue()) {
+ assert(ME->getBase()->getType()->isRecordType());
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ E = ME->getBase();
+ Adjustments.push_back(SubobjectAdjustment(Field));
+ continue;
+ }
+ }
+ } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->isPtrMemOp()) {
+ assert(BO->getRHS()->isRValue());
+ E = BO->getLHS();
+ const MemberPointerType *MPT =
+ BO->getRHS()->getType()->getAs<MemberPointerType>();
+ Adjustments.push_back(SubobjectAdjustment(MPT, BO->getRHS()));
+ }
+ }
+
+ // Nothing changed.
+ break;
+ }
+ return E;
+}
+
+const Expr *
+Expr::findMaterializedTemporary(const MaterializeTemporaryExpr *&MTE) const {
+ const Expr *E = this;
+ // Look through single-element init lists that claim to be lvalues. They're
+ // just syntactic wrappers in this case.
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
+ if (ILE->getNumInits() == 1 && ILE->isGLValue())
+ E = ILE->getInit(0);
+ }
+
+ // Look through expressions for materialized temporaries (for now).
+ if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ MTE = M;
+ E = M->GetTemporaryExpr();
+ }
+
+ if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
+ E = DAE->getExpr();
+ return E;
+}
+
/// isKnownToHaveBooleanValue - Return true if this is an integer expression
/// that is known to return 0 or 1. This happens for _Bool/bool expressions
/// but also int expressions which are produced by things like comparisons in
@@ -784,19 +853,19 @@ void StringLiteral::setString(ASTContext &C, StringRef Str,
switch(CharByteWidth) {
case 1: {
char *AStrData = new (C) char[Length];
- std::memcpy(AStrData,Str.data(),Str.size());
+ std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData));
StrData.asChar = AStrData;
break;
}
case 2: {
uint16_t *AStrData = new (C) uint16_t[Length];
- std::memcpy(AStrData,Str.data(),Str.size());
+ std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData));
StrData.asUInt16 = AStrData;
break;
}
case 4: {
uint32_t *AStrData = new (C) uint32_t[Length];
- std::memcpy(AStrData,Str.data(),Str.size());
+ std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData));
StrData.asUInt32 = AStrData;
break;
}
@@ -869,7 +938,7 @@ getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
/// corresponds to, e.g. "sizeof" or "[pre]++".
-const char *UnaryOperator::getOpcodeStr(Opcode Op) {
+StringRef UnaryOperator::getOpcodeStr(Opcode Op) {
switch (Op) {
case UO_PostInc: return "++";
case UO_PostDec: return "--";
@@ -923,18 +992,18 @@ OverloadedOperatorKind UnaryOperator::getOverloadedOperator(Opcode Opc) {
//===----------------------------------------------------------------------===//
CallExpr::CallExpr(ASTContext& C, StmtClass SC, Expr *fn, unsigned NumPreArgs,
- Expr **args, unsigned numargs, QualType t, ExprValueKind VK,
+ ArrayRef<Expr*> args, QualType t, ExprValueKind VK,
SourceLocation rparenloc)
: Expr(SC, t, VK, OK_Ordinary,
fn->isTypeDependent(),
fn->isValueDependent(),
fn->isInstantiationDependent(),
fn->containsUnexpandedParameterPack()),
- NumArgs(numargs) {
+ NumArgs(args.size()) {
- SubExprs = new (C) Stmt*[numargs+PREARGS_START+NumPreArgs];
+ SubExprs = new (C) Stmt*[args.size()+PREARGS_START+NumPreArgs];
SubExprs[FN] = fn;
- for (unsigned i = 0; i != numargs; ++i) {
+ for (unsigned i = 0; i != args.size(); ++i) {
if (args[i]->isTypeDependent())
ExprBits.TypeDependent = true;
if (args[i]->isValueDependent())
@@ -951,18 +1020,18 @@ CallExpr::CallExpr(ASTContext& C, StmtClass SC, Expr *fn, unsigned NumPreArgs,
RParenLoc = rparenloc;
}
-CallExpr::CallExpr(ASTContext& C, Expr *fn, Expr **args, unsigned numargs,
+CallExpr::CallExpr(ASTContext& C, Expr *fn, ArrayRef<Expr*> args,
QualType t, ExprValueKind VK, SourceLocation rparenloc)
: Expr(CallExprClass, t, VK, OK_Ordinary,
fn->isTypeDependent(),
fn->isValueDependent(),
fn->isInstantiationDependent(),
fn->containsUnexpandedParameterPack()),
- NumArgs(numargs) {
+ NumArgs(args.size()) {
- SubExprs = new (C) Stmt*[numargs+PREARGS_START];
+ SubExprs = new (C) Stmt*[args.size()+PREARGS_START];
SubExprs[FN] = fn;
- for (unsigned i = 0; i != numargs; ++i) {
+ for (unsigned i = 0; i != args.size(); ++i) {
if (args[i]->isTypeDependent())
ExprBits.TypeDependent = true;
if (args[i]->isValueDependent())
@@ -1123,15 +1192,15 @@ SourceLocation CallExpr::getLocEnd() const {
OffsetOfExpr *OffsetOfExpr::Create(ASTContext &C, QualType type,
SourceLocation OperatorLoc,
TypeSourceInfo *tsi,
- OffsetOfNode* compsPtr, unsigned numComps,
- Expr** exprsPtr, unsigned numExprs,
+ ArrayRef<OffsetOfNode> comps,
+ ArrayRef<Expr*> exprs,
SourceLocation RParenLoc) {
void *Mem = C.Allocate(sizeof(OffsetOfExpr) +
- sizeof(OffsetOfNode) * numComps +
- sizeof(Expr*) * numExprs);
+ sizeof(OffsetOfNode) * comps.size() +
+ sizeof(Expr*) * exprs.size());
- return new (Mem) OffsetOfExpr(C, type, OperatorLoc, tsi, compsPtr, numComps,
- exprsPtr, numExprs, RParenLoc);
+ return new (Mem) OffsetOfExpr(C, type, OperatorLoc, tsi, comps, exprs,
+ RParenLoc);
}
OffsetOfExpr *OffsetOfExpr::CreateEmpty(ASTContext &C,
@@ -1144,8 +1213,7 @@ OffsetOfExpr *OffsetOfExpr::CreateEmpty(ASTContext &C,
OffsetOfExpr::OffsetOfExpr(ASTContext &C, QualType type,
SourceLocation OperatorLoc, TypeSourceInfo *tsi,
- OffsetOfNode* compsPtr, unsigned numComps,
- Expr** exprsPtr, unsigned numExprs,
+ ArrayRef<OffsetOfNode> comps, ArrayRef<Expr*> exprs,
SourceLocation RParenLoc)
: Expr(OffsetOfExprClass, type, VK_RValue, OK_Ordinary,
/*TypeDependent=*/false,
@@ -1153,19 +1221,19 @@ OffsetOfExpr::OffsetOfExpr(ASTContext &C, QualType type,
tsi->getType()->isInstantiationDependentType(),
tsi->getType()->containsUnexpandedParameterPack()),
OperatorLoc(OperatorLoc), RParenLoc(RParenLoc), TSInfo(tsi),
- NumComps(numComps), NumExprs(numExprs)
+ NumComps(comps.size()), NumExprs(exprs.size())
{
- for(unsigned i = 0; i < numComps; ++i) {
- setComponent(i, compsPtr[i]);
+ for (unsigned i = 0; i != comps.size(); ++i) {
+ setComponent(i, comps[i]);
}
- for(unsigned i = 0; i < numExprs; ++i) {
- if (exprsPtr[i]->isTypeDependent() || exprsPtr[i]->isValueDependent())
+ for (unsigned i = 0; i != exprs.size(); ++i) {
+ if (exprs[i]->isTypeDependent() || exprs[i]->isValueDependent())
ExprBits.ValueDependent = true;
- if (exprsPtr[i]->containsUnexpandedParameterPack())
+ if (exprs[i]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
- setIndexExpr(i, exprsPtr[i]);
+ setIndexExpr(i, exprs[i]);
}
}
@@ -1259,9 +1327,12 @@ SourceLocation MemberExpr::getLocStart() const {
return MemberLoc;
}
SourceLocation MemberExpr::getLocEnd() const {
+ SourceLocation EndLoc = getMemberNameInfo().getEndLoc();
if (hasExplicitTemplateArgs())
- return getRAngleLoc();
- return getMemberNameInfo().getEndLoc();
+ EndLoc = getRAngleLoc();
+ else if (EndLoc.isInvalid())
+ EndLoc = getBase()->getLocEnd();
+ return EndLoc;
}
void CastExpr::CheckCastConsistency() const {
@@ -1311,12 +1382,16 @@ void CastExpr::CheckCastConsistency() const {
assert(getType()->isBlockPointerType());
assert(getSubExpr()->getType()->isBlockPointerType());
goto CheckNoBasePath;
-
+
+ case CK_FunctionToPointerDecay:
+ assert(getType()->isPointerType());
+ assert(getSubExpr()->getType()->isFunctionType());
+ goto CheckNoBasePath;
+
// These should not have an inheritance path.
case CK_Dynamic:
case CK_ToUnion:
case CK_ArrayToPointerDecay:
- case CK_FunctionToPointerDecay:
case CK_NullToMemberPointer:
case CK_NullToPointer:
case CK_ConstructorConversion:
@@ -1357,6 +1432,7 @@ void CastExpr::CheckCastConsistency() const {
case CK_IntegralComplexToBoolean:
case CK_LValueBitCast: // -> bool&
case CK_UserDefinedConversion: // operator bool()
+ case CK_BuiltinFnToFnPtr:
CheckNoBasePath:
assert(path_empty() && "Cast kind should not have a base path!");
break;
@@ -1469,6 +1545,8 @@ const char *CastExpr::getCastKindName() const {
return "NonAtomicToAtomic";
case CK_CopyAndAutoreleaseBlockObject:
return "CopyAndAutoreleaseBlockObject";
+ case CK_BuiltinFnToFnPtr:
+ return "BuiltinFnToFnPtr";
}
llvm_unreachable("Unhandled cast kind!");
@@ -1564,7 +1642,7 @@ CStyleCastExpr *CStyleCastExpr::CreateEmpty(ASTContext &C, unsigned PathSize) {
/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
/// corresponds to, e.g. "<<=".
-const char *BinaryOperator::getOpcodeStr(Opcode Op) {
+StringRef BinaryOperator::getOpcodeStr(Opcode Op) {
switch (Op) {
case BO_PtrMemD: return ".*";
case BO_PtrMemI: return "->*";
@@ -1666,16 +1744,15 @@ OverloadedOperatorKind BinaryOperator::getOverloadedOperator(Opcode Opc) {
}
InitListExpr::InitListExpr(ASTContext &C, SourceLocation lbraceloc,
- Expr **initExprs, unsigned numInits,
- SourceLocation rbraceloc)
+ ArrayRef<Expr*> initExprs, SourceLocation rbraceloc)
: Expr(InitListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false,
false, false),
- InitExprs(C, numInits),
- LBraceLoc(lbraceloc), RBraceLoc(rbraceloc), SyntacticForm(0)
+ InitExprs(C, initExprs.size()),
+ LBraceLoc(lbraceloc), RBraceLoc(rbraceloc), AltForm(0, true)
{
sawArrayRangeDesignator(false);
setInitializesStdInitializerList(false);
- for (unsigned I = 0; I != numInits; ++I) {
+ for (unsigned I = 0; I != initExprs.size(); ++I) {
if (initExprs[I]->isTypeDependent())
ExprBits.TypeDependent = true;
if (initExprs[I]->isValueDependent())
@@ -1686,7 +1763,7 @@ InitListExpr::InitListExpr(ASTContext &C, SourceLocation lbraceloc,
ExprBits.ContainsUnexpandedParameterPack = true;
}
- InitExprs.insert(C, InitExprs.end(), initExprs, initExprs+numInits);
+ InitExprs.insert(C, InitExprs.end(), initExprs.begin(), initExprs.end());
}
void InitListExpr::reserveInits(ASTContext &C, unsigned NumInits) {
@@ -1723,15 +1800,15 @@ void InitListExpr::setArrayFiller(Expr *filler) {
bool InitListExpr::isStringLiteralInit() const {
if (getNumInits() != 1)
return false;
- const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(getType());
- if (!CAT || !CAT->getElementType()->isIntegerType())
+ const ArrayType *AT = getType()->getAsArrayTypeUnsafe();
+ if (!AT || !AT->getElementType()->isIntegerType())
return false;
- const Expr *Init = getInit(0)->IgnoreParenImpCasts();
+ const Expr *Init = getInit(0)->IgnoreParens();
return isa<StringLiteral>(Init) || isa<ObjCEncodeExpr>(Init);
}
SourceRange InitListExpr::getSourceRange() const {
- if (SyntacticForm)
+ if (InitListExpr *SyntacticForm = getSyntacticForm())
return SyntacticForm->getSourceRange();
SourceLocation Beg = LBraceLoc, End = RBraceLoc;
if (Beg.isInvalid()) {
@@ -1945,6 +2022,11 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
return false;
}
+ // If we don't know precisely what we're looking at, let's not warn.
+ case UnresolvedLookupExprClass:
+ case CXXUnresolvedConstructExprClass:
+ return false;
+
case CXXTemporaryObjectExprClass:
case CXXConstructExprClass:
return false;
@@ -2014,6 +2096,7 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
R1 = getSourceRange();
return true;
}
+ case CXXFunctionalCastExprClass:
case CStyleCastExprClass: {
// Ignore an explicit cast to void unless the operand is a non-trivial
// volatile lvalue.
@@ -2032,6 +2115,10 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
return false;
}
+ // Ignore casts within macro expansions.
+ if (getExprLoc().isMacroID())
+ return CE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+
// If this is a cast to a constructor conversion, check the operand.
// Otherwise, the result of the cast is unused.
if (CE->getCastKind() == CK_ConstructorConversion)
@@ -2641,6 +2728,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx) const {
case UnresolvedMemberExprClass:
case PackExpansionExprClass:
case SubstNonTypeTemplateParmPackExprClass:
+ case FunctionParmPackExprClass:
llvm_unreachable("shouldn't see dependent / unresolved nodes here");
case DeclRefExprClass:
@@ -2995,6 +3083,24 @@ const ObjCPropertyRefExpr *Expr::getObjCProperty() const {
return cast<ObjCPropertyRefExpr>(E);
}
+bool Expr::isObjCSelfExpr() const {
+ const Expr *E = IgnoreParenImpCasts();
+
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
+ if (!DRE)
+ return false;
+
+ const ImplicitParamDecl *Param = dyn_cast<ImplicitParamDecl>(DRE->getDecl());
+ if (!Param)
+ return false;
+
+ const ObjCMethodDecl *M = dyn_cast<ObjCMethodDecl>(Param->getDeclContext());
+ if (!M)
+ return false;
+
+ return M->getSelfDecl() == Param;
+}
+
FieldDecl *Expr::getBitField() {
Expr *E = this->IgnoreParens();
@@ -3339,33 +3445,29 @@ Selector ObjCMessageExpr::getSelector() const {
return Selector(SelectorOrMethod);
}
-ObjCInterfaceDecl *ObjCMessageExpr::getReceiverInterface() const {
+QualType ObjCMessageExpr::getReceiverType() const {
switch (getReceiverKind()) {
case Instance:
- if (const ObjCObjectPointerType *Ptr
- = getInstanceReceiver()->getType()->getAs<ObjCObjectPointerType>())
- return Ptr->getInterfaceDecl();
- break;
-
+ return getInstanceReceiver()->getType();
case Class:
- if (const ObjCObjectType *Ty
- = getClassReceiver()->getAs<ObjCObjectType>())
- return Ty->getInterface();
- break;
-
+ return getClassReceiver();
case SuperInstance:
- if (const ObjCObjectPointerType *Ptr
- = getSuperType()->getAs<ObjCObjectPointerType>())
- return Ptr->getInterfaceDecl();
- break;
-
case SuperClass:
- if (const ObjCObjectType *Iface
- = getSuperType()->getAs<ObjCObjectType>())
- return Iface->getInterface();
- break;
+ return getSuperType();
}
+ llvm_unreachable("unexpected receiver kind");
+}
+
+ObjCInterfaceDecl *ObjCMessageExpr::getReceiverInterface() const {
+ QualType T = getReceiverType();
+
+ if (const ObjCObjectPointerType *Ptr = T->getAs<ObjCObjectPointerType>())
+ return Ptr->getInterfaceDecl();
+
+ if (const ObjCObjectType *Ty = T->getAs<ObjCObjectType>())
+ return Ty->getInterface();
+
return 0;
}
@@ -3386,17 +3488,17 @@ bool ChooseExpr::isConditionTrue(const ASTContext &C) const {
return getCond()->EvaluateKnownConstInt(C) != 0;
}
-ShuffleVectorExpr::ShuffleVectorExpr(ASTContext &C, Expr **args, unsigned nexpr,
+ShuffleVectorExpr::ShuffleVectorExpr(ASTContext &C, ArrayRef<Expr*> args,
QualType Type, SourceLocation BLoc,
SourceLocation RP)
: Expr(ShuffleVectorExprClass, Type, VK_RValue, OK_Ordinary,
Type->isDependentType(), Type->isDependentType(),
Type->isInstantiationDependentType(),
Type->containsUnexpandedParameterPack()),
- BuiltinLoc(BLoc), RParenLoc(RP), NumExprs(nexpr)
+ BuiltinLoc(BLoc), RParenLoc(RP), NumExprs(args.size())
{
- SubExprs = new (C) Stmt*[nexpr];
- for (unsigned i = 0; i < nexpr; i++) {
+ SubExprs = new (C) Stmt*[args.size()];
+ for (unsigned i = 0; i != args.size(); i++) {
if (args[i]->isTypeDependent())
ExprBits.TypeDependent = true;
if (args[i]->isValueDependent())
@@ -3421,8 +3523,9 @@ void ShuffleVectorExpr::setExprs(ASTContext &C, Expr ** Exprs,
GenericSelectionExpr::GenericSelectionExpr(ASTContext &Context,
SourceLocation GenericLoc, Expr *ControllingExpr,
- TypeSourceInfo **AssocTypes, Expr **AssocExprs,
- unsigned NumAssocs, SourceLocation DefaultLoc,
+ ArrayRef<TypeSourceInfo*> AssocTypes,
+ ArrayRef<Expr*> AssocExprs,
+ SourceLocation DefaultLoc,
SourceLocation RParenLoc,
bool ContainsUnexpandedParameterPack,
unsigned ResultIndex)
@@ -3434,19 +3537,21 @@ GenericSelectionExpr::GenericSelectionExpr(ASTContext &Context,
AssocExprs[ResultIndex]->isValueDependent(),
AssocExprs[ResultIndex]->isInstantiationDependent(),
ContainsUnexpandedParameterPack),
- AssocTypes(new (Context) TypeSourceInfo*[NumAssocs]),
- SubExprs(new (Context) Stmt*[END_EXPR+NumAssocs]), NumAssocs(NumAssocs),
- ResultIndex(ResultIndex), GenericLoc(GenericLoc), DefaultLoc(DefaultLoc),
- RParenLoc(RParenLoc) {
+ AssocTypes(new (Context) TypeSourceInfo*[AssocTypes.size()]),
+ SubExprs(new (Context) Stmt*[END_EXPR+AssocExprs.size()]),
+ NumAssocs(AssocExprs.size()), ResultIndex(ResultIndex),
+ GenericLoc(GenericLoc), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
SubExprs[CONTROLLING] = ControllingExpr;
- std::copy(AssocTypes, AssocTypes+NumAssocs, this->AssocTypes);
- std::copy(AssocExprs, AssocExprs+NumAssocs, SubExprs+END_EXPR);
+ assert(AssocTypes.size() == AssocExprs.size());
+ std::copy(AssocTypes.begin(), AssocTypes.end(), this->AssocTypes);
+ std::copy(AssocExprs.begin(), AssocExprs.end(), SubExprs+END_EXPR);
}
GenericSelectionExpr::GenericSelectionExpr(ASTContext &Context,
SourceLocation GenericLoc, Expr *ControllingExpr,
- TypeSourceInfo **AssocTypes, Expr **AssocExprs,
- unsigned NumAssocs, SourceLocation DefaultLoc,
+ ArrayRef<TypeSourceInfo*> AssocTypes,
+ ArrayRef<Expr*> AssocExprs,
+ SourceLocation DefaultLoc,
SourceLocation RParenLoc,
bool ContainsUnexpandedParameterPack)
: Expr(GenericSelectionExprClass,
@@ -3457,13 +3562,14 @@ GenericSelectionExpr::GenericSelectionExpr(ASTContext &Context,
/*isValueDependent=*/true,
/*isInstantiationDependent=*/true,
ContainsUnexpandedParameterPack),
- AssocTypes(new (Context) TypeSourceInfo*[NumAssocs]),
- SubExprs(new (Context) Stmt*[END_EXPR+NumAssocs]), NumAssocs(NumAssocs),
- ResultIndex(-1U), GenericLoc(GenericLoc), DefaultLoc(DefaultLoc),
- RParenLoc(RParenLoc) {
+ AssocTypes(new (Context) TypeSourceInfo*[AssocTypes.size()]),
+ SubExprs(new (Context) Stmt*[END_EXPR+AssocExprs.size()]),
+ NumAssocs(AssocExprs.size()), ResultIndex(-1U), GenericLoc(GenericLoc),
+ DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
SubExprs[CONTROLLING] = ControllingExpr;
- std::copy(AssocTypes, AssocTypes+NumAssocs, this->AssocTypes);
- std::copy(AssocExprs, AssocExprs+NumAssocs, SubExprs+END_EXPR);
+ assert(AssocTypes.size() == AssocExprs.size());
+ std::copy(AssocTypes.begin(), AssocTypes.end(), this->AssocTypes);
+ std::copy(AssocExprs.begin(), AssocExprs.end(), SubExprs+END_EXPR);
}
//===----------------------------------------------------------------------===//
@@ -3483,8 +3589,7 @@ DesignatedInitExpr::DesignatedInitExpr(ASTContext &C, QualType Ty,
const Designator *Designators,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
- Expr **IndexExprs,
- unsigned NumIndexExprs,
+ ArrayRef<Expr*> IndexExprs,
Expr *Init)
: Expr(DesignatedInitExprClass, Ty,
Init->getValueKind(), Init->getObjectKind(),
@@ -3492,7 +3597,7 @@ DesignatedInitExpr::DesignatedInitExpr(ASTContext &C, QualType Ty,
Init->isInstantiationDependent(),
Init->containsUnexpandedParameterPack()),
EqualOrColonLoc(EqualOrColonLoc), GNUSyntax(GNUSyntax),
- NumDesignators(NumDesignators), NumSubExprs(NumIndexExprs + 1) {
+ NumDesignators(NumDesignators), NumSubExprs(IndexExprs.size() + 1) {
this->Designators = new (C) Designator[NumDesignators];
// Record the initializer itself.
@@ -3542,20 +3647,20 @@ DesignatedInitExpr::DesignatedInitExpr(ASTContext &C, QualType Ty,
}
}
- assert(IndexIdx == NumIndexExprs && "Wrong number of index expressions");
+ assert(IndexIdx == IndexExprs.size() && "Wrong number of index expressions");
}
DesignatedInitExpr *
DesignatedInitExpr::Create(ASTContext &C, Designator *Designators,
unsigned NumDesignators,
- Expr **IndexExprs, unsigned NumIndexExprs,
+ ArrayRef<Expr*> IndexExprs,
SourceLocation ColonOrEqualLoc,
bool UsesColonSyntax, Expr *Init) {
void *Mem = C.Allocate(sizeof(DesignatedInitExpr) +
- sizeof(Stmt *) * (NumIndexExprs + 1), 8);
+ sizeof(Stmt *) * (IndexExprs.size() + 1), 8);
return new (Mem) DesignatedInitExpr(C, C.VoidTy, NumDesignators, Designators,
ColonOrEqualLoc, UsesColonSyntax,
- IndexExprs, NumIndexExprs, Init);
+ IndexExprs, Init);
}
DesignatedInitExpr *DesignatedInitExpr::CreateEmpty(ASTContext &C,
@@ -3651,13 +3756,13 @@ void DesignatedInitExpr::ExpandDesignator(ASTContext &C, unsigned Idx,
}
ParenListExpr::ParenListExpr(ASTContext& C, SourceLocation lparenloc,
- Expr **exprs, unsigned nexprs,
+ ArrayRef<Expr*> exprs,
SourceLocation rparenloc)
: Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary,
false, false, false, false),
- NumExprs(nexprs), LParenLoc(lparenloc), RParenLoc(rparenloc) {
- Exprs = new (C) Stmt*[nexprs];
- for (unsigned i = 0; i != nexprs; ++i) {
+ NumExprs(exprs.size()), LParenLoc(lparenloc), RParenLoc(rparenloc) {
+ Exprs = new (C) Stmt*[exprs.size()];
+ for (unsigned i = 0; i != exprs.size(); ++i) {
if (exprs[i]->isTypeDependent())
ExprBits.TypeDependent = true;
if (exprs[i]->isValueDependent())
@@ -3902,14 +4007,14 @@ ObjCSubscriptRefExpr *ObjCSubscriptRefExpr::Create(ASTContext &C,
getMethod, setMethod, RB);
}
-AtomicExpr::AtomicExpr(SourceLocation BLoc, Expr **args, unsigned nexpr,
+AtomicExpr::AtomicExpr(SourceLocation BLoc, ArrayRef<Expr*> args,
QualType t, AtomicOp op, SourceLocation RP)
: Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary,
false, false, false, false),
- NumSubExprs(nexpr), BuiltinLoc(BLoc), RParenLoc(RP), Op(op)
+ NumSubExprs(args.size()), BuiltinLoc(BLoc), RParenLoc(RP), Op(op)
{
- assert(nexpr == getNumSubExprs(op) && "wrong number of subexpressions");
- for (unsigned i = 0; i < nexpr; i++) {
+ assert(args.size() == getNumSubExprs(op) && "wrong number of subexpressions");
+ for (unsigned i = 0; i != args.size(); i++) {
if (args[i]->isTypeDependent())
ExprBits.TypeDependent = true;
if (args[i]->isValueDependent())
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
index 3fa49e0..55722a2 100644
--- a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
@@ -51,6 +51,26 @@ QualType CXXUuidofExpr::getTypeOperand() const {
.getUnqualifiedType();
}
+// static
+UuidAttr *CXXUuidofExpr::GetUuidAttrOfType(QualType QT) {
+ // Optionally remove one level of pointer, reference or array indirection.
+ const Type *Ty = QT.getTypePtr();
+ if (QT->isPointerType() || QT->isReferenceType())
+ Ty = QT->getPointeeType().getTypePtr();
+ else if (QT->isArrayType())
+ Ty = cast<ArrayType>(QT)->getElementType().getTypePtr();
+
+ // Loop all record redeclaration looking for an uuid attribute.
+ CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ for (CXXRecordDecl::redecl_iterator I = RD->redecls_begin(),
+ E = RD->redecls_end(); I != E; ++I) {
+ if (UuidAttr *Uuid = I->getAttr<UuidAttr>())
+ return Uuid;
+ }
+
+ return 0;
+}
+
// CXXScalarValueInitExpr
SourceRange CXXScalarValueInitExpr::getSourceRange() const {
SourceLocation Start = RParenLoc;
@@ -63,24 +83,24 @@ SourceRange CXXScalarValueInitExpr::getSourceRange() const {
CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
FunctionDecl *operatorDelete,
bool usualArrayDeleteWantsSize,
- Expr **placementArgs, unsigned numPlaceArgs,
+ ArrayRef<Expr*> placementArgs,
SourceRange typeIdParens, Expr *arraySize,
InitializationStyle initializationStyle,
Expr *initializer, QualType ty,
TypeSourceInfo *allocatedTypeInfo,
- SourceLocation startLoc, SourceRange directInitRange)
+ SourceRange Range, SourceRange directInitRange)
: Expr(CXXNewExprClass, ty, VK_RValue, OK_Ordinary,
ty->isDependentType(), ty->isDependentType(),
ty->isInstantiationDependentType(),
ty->containsUnexpandedParameterPack()),
SubExprs(0), OperatorNew(operatorNew), OperatorDelete(operatorDelete),
AllocatedTypeInfo(allocatedTypeInfo), TypeIdParens(typeIdParens),
- StartLoc(startLoc), DirectInitRange(directInitRange),
+ Range(Range), DirectInitRange(directInitRange),
GlobalNew(globalNew), UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize) {
assert((initializer != 0 || initializationStyle == NoInit) &&
"Only NoInit can have no initializer.");
StoredInitializationStyle = initializer ? initializationStyle + 1 : 0;
- AllocateArgsArray(C, arraySize != 0, numPlaceArgs, initializer != 0);
+ AllocateArgsArray(C, arraySize != 0, placementArgs.size(), initializer != 0);
unsigned i = 0;
if (Array) {
if (arraySize->isInstantiationDependent())
@@ -102,7 +122,7 @@ CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
SubExprs[i++] = initializer;
}
- for (unsigned j = 0; j < NumPlacementArgs; ++j) {
+ for (unsigned j = 0; j != placementArgs.size(); ++j) {
if (placementArgs[j]->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
if (placementArgs[j]->containsUnexpandedParameterPack())
@@ -110,6 +130,14 @@ CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
SubExprs[i++] = placementArgs[j];
}
+
+ switch (getInitializationStyle()) {
+ case CallInit:
+ this->Range.setEnd(DirectInitRange.getEnd()); break;
+ case ListInit:
+ this->Range.setEnd(getInitializer()->getSourceRange().getEnd()); break;
+ default: break;
+ }
}
void CXXNewExpr::AllocateArgsArray(ASTContext &C, bool isArray,
@@ -127,18 +155,6 @@ bool CXXNewExpr::shouldNullCheckAllocation(ASTContext &Ctx) const {
castAs<FunctionProtoType>()->isNothrow(Ctx);
}
-SourceLocation CXXNewExpr::getEndLoc() const {
- switch (getInitializationStyle()) {
- case NoInit:
- return AllocatedTypeInfo->getTypeLoc().getEndLoc();
- case CallInit:
- return DirectInitRange.getEnd();
- case ListInit:
- return getInitializer()->getSourceRange().getEnd();
- }
- llvm_unreachable("bogus initialization style");
-}
-
// CXXDeleteExpr
QualType CXXDeleteExpr::getDestroyedType() const {
const Expr *Arg = getArgument();
@@ -227,7 +243,7 @@ UnresolvedLookupExpr::Create(ASTContext &C,
return new (Mem) UnresolvedLookupExpr(C, NamingClass, QualifierLoc,
TemplateKWLoc, NameInfo,
ADL, /*Overload*/ true, Args,
- Begin, End, /*StdIsAssociated=*/false);
+ Begin, End);
}
UnresolvedLookupExpr *
@@ -697,15 +713,14 @@ CXXBindTemporaryExpr *CXXBindTemporaryExpr::Create(ASTContext &C,
CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(ASTContext &C,
CXXConstructorDecl *Cons,
TypeSourceInfo *Type,
- Expr **Args,
- unsigned NumArgs,
+ ArrayRef<Expr*> Args,
SourceRange parenRange,
bool HadMultipleCandidates,
bool ZeroInitialization)
: CXXConstructExpr(C, CXXTemporaryObjectExprClass,
Type->getType().getNonReferenceType(),
Type->getTypeLoc().getBeginLoc(),
- Cons, false, Args, NumArgs,
+ Cons, false, Args,
HadMultipleCandidates, /*FIXME*/false, ZeroInitialization,
CXXConstructExpr::CK_Complete, parenRange),
Type(Type) {
@@ -719,14 +734,14 @@ SourceRange CXXTemporaryObjectExpr::getSourceRange() const {
CXXConstructExpr *CXXConstructExpr::Create(ASTContext &C, QualType T,
SourceLocation Loc,
CXXConstructorDecl *D, bool Elidable,
- Expr **Args, unsigned NumArgs,
+ ArrayRef<Expr*> Args,
bool HadMultipleCandidates,
bool ListInitialization,
bool ZeroInitialization,
ConstructionKind ConstructKind,
SourceRange ParenRange) {
return new (C) CXXConstructExpr(C, CXXConstructExprClass, T, Loc, D,
- Elidable, Args, NumArgs,
+ Elidable, Args,
HadMultipleCandidates, ListInitialization,
ZeroInitialization, ConstructKind,
ParenRange);
@@ -735,7 +750,7 @@ CXXConstructExpr *CXXConstructExpr::Create(ASTContext &C, QualType T,
CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
SourceLocation Loc,
CXXConstructorDecl *D, bool elidable,
- Expr **args, unsigned numargs,
+ ArrayRef<Expr*> args,
bool HadMultipleCandidates,
bool ListInitialization,
bool ZeroInitialization,
@@ -745,16 +760,16 @@ CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
T->isDependentType(), T->isDependentType(),
T->isInstantiationDependentType(),
T->containsUnexpandedParameterPack()),
- Constructor(D), Loc(Loc), ParenRange(ParenRange), NumArgs(numargs),
+ Constructor(D), Loc(Loc), ParenRange(ParenRange), NumArgs(args.size()),
Elidable(elidable), HadMultipleCandidates(HadMultipleCandidates),
ListInitialization(ListInitialization),
ZeroInitialization(ZeroInitialization),
ConstructKind(ConstructKind), Args(0)
{
if (NumArgs) {
- Args = new (C) Stmt*[NumArgs];
+ Args = new (C) Stmt*[args.size()];
- for (unsigned i = 0; i != NumArgs; ++i) {
+ for (unsigned i = 0; i != args.size(); ++i) {
assert(args[i] && "NULL argument in CXXConstructExpr");
if (args[i]->isValueDependent())
@@ -877,9 +892,12 @@ LambdaExpr *LambdaExpr::Create(ASTContext &Context,
QualType T = Context.getTypeDeclType(Class);
unsigned Size = sizeof(LambdaExpr) + sizeof(Stmt *) * (Captures.size() + 1);
- if (!ArrayIndexVars.empty())
- Size += sizeof(VarDecl *) * ArrayIndexVars.size()
- + sizeof(unsigned) * (Captures.size() + 1);
+ if (!ArrayIndexVars.empty()) {
+ Size += sizeof(unsigned) * (Captures.size() + 1);
+ // Realign for following VarDecl array.
+ Size = llvm::RoundUpToAlignment(Size, llvm::alignOf<VarDecl*>());
+ Size += sizeof(VarDecl *) * ArrayIndexVars.size();
+ }
void *Mem = Context.Allocate(Size);
return new (Mem) LambdaExpr(T, IntroducerRange, CaptureDefault,
Captures, ExplicitParams, ExplicitResultType,
@@ -997,8 +1015,7 @@ ExprWithCleanups *ExprWithCleanups::Create(ASTContext &C, EmptyShell empty,
CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
- Expr **Args,
- unsigned NumArgs,
+ ArrayRef<Expr*> Args,
SourceLocation RParenLoc)
: Expr(CXXUnresolvedConstructExprClass,
Type->getType().getNonReferenceType(),
@@ -1011,9 +1028,9 @@ CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
Type(Type),
LParenLoc(LParenLoc),
RParenLoc(RParenLoc),
- NumArgs(NumArgs) {
+ NumArgs(Args.size()) {
Stmt **StoredArgs = reinterpret_cast<Stmt **>(this + 1);
- for (unsigned I = 0; I != NumArgs; ++I) {
+ for (unsigned I = 0; I != Args.size(); ++I) {
if (Args[I]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -1025,13 +1042,11 @@ CXXUnresolvedConstructExpr *
CXXUnresolvedConstructExpr::Create(ASTContext &C,
TypeSourceInfo *Type,
SourceLocation LParenLoc,
- Expr **Args,
- unsigned NumArgs,
+ ArrayRef<Expr*> Args,
SourceLocation RParenLoc) {
void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) +
- sizeof(Expr *) * NumArgs);
- return new (Mem) CXXUnresolvedConstructExpr(Type, LParenLoc,
- Args, NumArgs, RParenLoc);
+ sizeof(Expr *) * Args.size());
+ return new (Mem) CXXUnresolvedConstructExpr(Type, LParenLoc, Args, RParenLoc);
}
CXXUnresolvedConstructExpr *
@@ -1300,6 +1315,34 @@ TemplateArgument SubstNonTypeTemplateParmPackExpr::getArgumentPack() const {
return TemplateArgument(Arguments, NumArguments);
}
+FunctionParmPackExpr::FunctionParmPackExpr(QualType T, ParmVarDecl *ParamPack,
+ SourceLocation NameLoc,
+ unsigned NumParams,
+ Decl * const *Params)
+ : Expr(FunctionParmPackExprClass, T, VK_LValue, OK_Ordinary,
+ true, true, true, true),
+ ParamPack(ParamPack), NameLoc(NameLoc), NumParameters(NumParams) {
+ if (Params)
+ std::uninitialized_copy(Params, Params + NumParams,
+ reinterpret_cast<Decl**>(this+1));
+}
+
+FunctionParmPackExpr *
+FunctionParmPackExpr::Create(ASTContext &Context, QualType T,
+ ParmVarDecl *ParamPack, SourceLocation NameLoc,
+ llvm::ArrayRef<Decl*> Params) {
+ return new (Context.Allocate(sizeof(FunctionParmPackExpr) +
+ sizeof(ParmVarDecl*) * Params.size()))
+ FunctionParmPackExpr(T, ParamPack, NameLoc, Params.size(), Params.data());
+}
+
+FunctionParmPackExpr *
+FunctionParmPackExpr::CreateEmpty(ASTContext &Context, unsigned NumParams) {
+ return new (Context.Allocate(sizeof(FunctionParmPackExpr) +
+ sizeof(ParmVarDecl*) * NumParams))
+ FunctionParmPackExpr(QualType(), 0, SourceLocation(), 0, 0);
+}
+
TypeTraitExpr::TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc,
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
index f16d70b..24ec6bb 100644
--- a/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
@@ -134,6 +134,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
// ObjC instance variables are lvalues
// FIXME: ObjC++0x might have different rules
case Expr::ObjCIvarRefExprClass:
+ case Expr::FunctionParmPackExprClass:
return Cl::CL_LValue;
// C99 6.5.2.5p5 says that compound literals are lvalues.
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
index 06c41a2..6e0b5fc 100644
--- a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
@@ -953,7 +953,7 @@ static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
if (VD)
Info.Note(VD->getLocation(), diag::note_declared_at);
else
- Info.Note(Base.dyn_cast<const Expr*>()->getExprLoc(),
+ Info.Note(Base.get<const Expr*>()->getExprLoc(),
diag::note_constexpr_temporary_here);
}
@@ -987,6 +987,14 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
LVal.getLValueCallIndex() == 0) &&
"have call index for global lvalue");
+ // Check if this is a thread-local variable.
+ if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) {
+ if (const VarDecl *Var = dyn_cast<const VarDecl>(VD)) {
+ if (Var->isThreadSpecified())
+ return false;
+ }
+ }
+
// Allow address constant expressions to be past-the-end pointers. This is
// an extension: the standard requires them to point to an object.
if (!IsReferenceType)
@@ -2586,7 +2594,7 @@ public:
const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
if (!FD) return Error(E);
assert(!FD->getType()->isReferenceType() && "prvalue reference?");
- assert(BaseTy->getAs<RecordType>()->getDecl()->getCanonicalDecl() ==
+ assert(BaseTy->castAs<RecordType>()->getDecl()->getCanonicalDecl() ==
FD->getParent()->getCanonicalDecl() && "record / field mismatch");
SubobjectDesignator Designator(BaseTy);
@@ -2665,7 +2673,7 @@ public:
if (E->isArrow()) {
if (!EvaluatePointer(E->getBase(), Result, this->Info))
return false;
- BaseTy = E->getBase()->getType()->getAs<PointerType>()->getPointeeType();
+ BaseTy = E->getBase()->getType()->castAs<PointerType>()->getPointeeType();
} else if (E->getBase()->isRValue()) {
assert(E->getBase()->getType()->isRecordType());
if (!EvaluateTemporary(E->getBase(), Result, this->Info))
@@ -2878,19 +2886,13 @@ LValueExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
}
bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
- if (E->isTypeOperand())
+ if (!E->isPotentiallyEvaluated())
return Success(E);
- CXXRecordDecl *RD = E->getExprOperand()->getType()->getAsCXXRecordDecl();
- // FIXME: The standard says "a typeid expression whose operand is of a
- // polymorphic class type" is not a constant expression, but it probably
- // means "a typeid expression whose operand is potentially evaluated".
- if (RD && RD->isPolymorphic()) {
- Info.Diag(E, diag::note_constexpr_typeid_polymorphic)
- << E->getExprOperand()->getType()
- << E->getExprOperand()->getSourceRange();
- return false;
- }
- return Success(E);
+
+ Info.Diag(E, diag::note_constexpr_typeid_polymorphic)
+ << E->getExprOperand()->getType()
+ << E->getExprOperand()->getSourceRange();
+ return false;
}
bool LValueExprEvaluator::VisitCXXUuidofExpr(const CXXUuidofExpr *E) {
@@ -3036,7 +3038,7 @@ bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (E->getOpcode() == BO_Sub)
AdditionalOffset = -AdditionalOffset;
- QualType Pointee = PExp->getType()->getAs<PointerType>()->getPointeeType();
+ QualType Pointee = PExp->getType()->castAs<PointerType>()->getPointeeType();
return HandleLValueArrayAdjustment(Info, E, Result, Pointee,
AdditionalOffset);
}
@@ -4288,6 +4290,16 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Error(E);
}
+ case Builtin::BI__builtin_bswap16:
+ case Builtin::BI__builtin_bswap32:
+ case Builtin::BI__builtin_bswap64: {
+ APSInt Val;
+ if (!EvaluateInteger(E->getArg(0), Val, Info))
+ return false;
+
+ return Success(Val.byteSwap(), E);
+ }
+
case Builtin::BI__builtin_classify_type:
return Success(EvaluateBuiltinClassifyType(E), E);
@@ -4902,7 +4914,7 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (!LHSValue.Offset.isZero() || !RHSValue.Offset.isZero())
return false;
const Expr *LHSExpr = LHSValue.Base.dyn_cast<const Expr*>();
- const Expr *RHSExpr = LHSValue.Base.dyn_cast<const Expr*>();
+ const Expr *RHSExpr = RHSValue.Base.dyn_cast<const Expr*>();
if (!LHSExpr || !RHSExpr)
return false;
const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
@@ -5176,7 +5188,7 @@ bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
QualType Ty = E->getTypeOfArgument();
if (Ty->isVectorType()) {
- unsigned n = Ty->getAs<VectorType>()->getNumElements();
+ unsigned n = Ty->castAs<VectorType>()->getNumElements();
// The vec_step built-in functions that take a 3-component
// vector return 4. (OpenCL 1.1 spec 6.11.12)
@@ -5349,6 +5361,7 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_IntegralRealToComplex:
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
+ case CK_BuiltinFnToFnPtr:
llvm_unreachable("invalid cast kind for integral value");
case CK_BitCast:
@@ -5753,7 +5766,7 @@ static bool EvaluateComplex(const Expr *E, ComplexValue &Result,
}
bool ComplexExprEvaluator::ZeroInitialization(const Expr *E) {
- QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
if (ElemTy->isRealFloatingType()) {
Result.makeComplexFloat();
APFloat Zero = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(ElemTy));
@@ -5835,6 +5848,7 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
+ case CK_BuiltinFnToFnPtr:
llvm_unreachable("invalid cast kind for complex value");
case CK_LValueToRValue:
@@ -5911,9 +5925,9 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
if (!Visit(E->getSubExpr()))
return false;
- QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType To = E->getType()->castAs<ComplexType>()->getElementType();
QualType From
- = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
+ = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
Result.makeComplexFloat();
return HandleIntToFloatCast(Info, E, From, Result.IntReal,
To, Result.FloatReal) &&
@@ -6177,11 +6191,9 @@ static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
return false;
Result = Info.CurrentCall->Temporaries[E];
} else if (E->getType()->isVoidType()) {
- if (Info.getLangOpts().CPlusPlus0x)
+ if (!Info.getLangOpts().CPlusPlus0x)
Info.CCEDiag(E, diag::note_constexpr_nonliteral)
<< E->getType();
- else
- Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
if (!EvaluateVoid(E, Info))
return false;
} else if (Info.getLangOpts().CPlusPlus0x) {
@@ -6470,6 +6482,7 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::OpaqueValueExprClass:
case Expr::PackExpansionExprClass:
case Expr::SubstNonTypeTemplateParmPackExprClass:
+ case Expr::FunctionParmPackExprClass:
case Expr::AsTypeExprClass:
case Expr::ObjCIndirectCopyRestoreExprClass:
case Expr::MaterializeTemporaryExprClass:
diff --git a/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
index 7c7a5e5..851944a 100644
--- a/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
@@ -343,17 +343,10 @@ private:
void mangleCXXDtorType(CXXDtorType T);
void mangleTemplateArgs(const ASTTemplateArgumentListInfo &TemplateArgs);
- void mangleTemplateArgs(TemplateName Template,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs);
- void mangleTemplateArgs(const TemplateParameterList &PL,
- const TemplateArgument *TemplateArgs,
+ void mangleTemplateArgs(const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs);
- void mangleTemplateArgs(const TemplateParameterList &PL,
- const TemplateArgumentList &AL);
- void mangleTemplateArg(const NamedDecl *P, TemplateArgument A);
- void mangleUnresolvedTemplateArgs(const TemplateArgument *args,
- unsigned numArgs);
+ void mangleTemplateArgs(const TemplateArgumentList &AL);
+ void mangleTemplateArg(TemplateArgument A);
void mangleTemplateParameter(unsigned Index);
@@ -570,8 +563,7 @@ void CXXNameMangler::mangleName(const NamedDecl *ND) {
const TemplateArgumentList *TemplateArgs = 0;
if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
mangleUnscopedTemplateName(TD);
- TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
- mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
+ mangleTemplateArgs(*TemplateArgs);
return;
}
@@ -593,8 +585,7 @@ void CXXNameMangler::mangleName(const TemplateDecl *TD,
if (DC->isTranslationUnit() || isStdNamespace(DC)) {
mangleUnscopedTemplateName(TD);
- TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
- mangleTemplateArgs(*TemplateParameters, TemplateArgs, NumTemplateArgs);
+ mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
} else {
mangleNestedName(TD, TemplateArgs, NumTemplateArgs);
}
@@ -693,9 +684,10 @@ void CXXNameMangler::mangleFloat(const llvm::APFloat &f) {
void CXXNameMangler::mangleNumber(const llvm::APSInt &Value) {
if (Value.isSigned() && Value.isNegative()) {
Out << 'n';
- Value.abs().print(Out, true);
- } else
- Value.print(Out, Value.isSigned());
+ Value.abs().print(Out, /*signed*/ false);
+ } else {
+ Value.print(Out, /*signed*/ false);
+ }
}
void CXXNameMangler::mangleNumber(int64_t Number) {
@@ -737,8 +729,7 @@ void CXXNameMangler::manglePrefix(QualType type) {
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
- mangleTemplateArgs(TST->getTemplateName(), TST->getArgs(),
- TST->getNumArgs());
+ mangleTemplateArgs(TST->getArgs(), TST->getNumArgs());
addSubstitution(QualType(TST, 0));
}
} else if (const DependentTemplateSpecializationType *DTST
@@ -751,7 +742,7 @@ void CXXNameMangler::manglePrefix(QualType type) {
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
- mangleTemplateArgs(Template, DTST->getArgs(), DTST->getNumArgs());
+ mangleTemplateArgs(DTST->getArgs(), DTST->getNumArgs());
} else {
// We use the QualType mangle type variant here because it handles
// substitutions.
@@ -942,7 +933,7 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
}
}
- mangleUnresolvedTemplateArgs(tst->getArgs(), tst->getNumArgs());
+ mangleTemplateArgs(tst->getArgs(), tst->getNumArgs());
break;
}
@@ -959,7 +950,7 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
const DependentTemplateSpecializationType *tst
= cast<DependentTemplateSpecializationType>(type);
mangleSourceName(tst->getIdentifier());
- mangleUnresolvedTemplateArgs(tst->getArgs(), tst->getNumArgs());
+ mangleTemplateArgs(tst->getArgs(), tst->getNumArgs());
break;
}
}
@@ -1228,8 +1219,7 @@ void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
const TemplateArgumentList *TemplateArgs = 0;
if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
mangleTemplatePrefix(TD);
- TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
- mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
+ mangleTemplateArgs(*TemplateArgs);
}
else {
manglePrefix(DC, NoFunction);
@@ -1246,8 +1236,7 @@ void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
Out << 'N';
mangleTemplatePrefix(TD);
- TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
- mangleTemplateArgs(*TemplateParameters, TemplateArgs, NumTemplateArgs);
+ mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
Out << 'E';
}
@@ -1341,11 +1330,8 @@ void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
}
Out << "Ul";
- DeclarationName Name
- = getASTContext().DeclarationNames.getCXXOperatorName(OO_Call);
- const FunctionProtoType *Proto
- = cast<CXXMethodDecl>(*Lambda->lookup(Name).first)->getType()->
- getAs<FunctionProtoType>();
+ const FunctionProtoType *Proto = Lambda->getLambdaTypeInfo()->getType()->
+ getAs<FunctionProtoType>();
mangleBareFunctionType(Proto, /*MangleReturnType=*/false);
Out << "E";
@@ -1423,8 +1409,7 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
const TemplateArgumentList *TemplateArgs = 0;
if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
mangleTemplatePrefix(TD);
- TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
- mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
+ mangleTemplateArgs(*TemplateArgs);
}
else if(NoFunction && (isa<FunctionDecl>(ND) || isa<ObjCMethodDecl>(ND)))
return;
@@ -2110,6 +2095,7 @@ void CXXNameMangler::mangleNeonVectorType(const VectorType *T) {
// ::= Dv [<dimension expression>] _ <element type>
// <extended element type> ::= <element type>
// ::= p # AltiVec vector pixel
+// ::= b # Altivec vector bool
void CXXNameMangler::mangleType(const VectorType *T) {
if ((T->getVectorKind() == VectorType::NeonVector ||
T->getVectorKind() == VectorType::NeonPolyVector)) {
@@ -2174,7 +2160,7 @@ void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
- mangleTemplateArgs(T->getTemplateName(), T->getArgs(), T->getNumArgs());
+ mangleTemplateArgs(T->getArgs(), T->getNumArgs());
addSubstitution(QualType(T, 0));
}
}
@@ -2200,7 +2186,7 @@ void CXXNameMangler::mangleType(const DependentTemplateSpecializationType *T) {
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
- mangleTemplateArgs(Prefix, T->getArgs(), T->getNumArgs());
+ mangleTemplateArgs(T->getArgs(), T->getNumArgs());
Out << 'E';
}
@@ -2414,7 +2400,6 @@ recurse:
case Expr::ExpressionTraitExprClass:
case Expr::VAArgExprClass:
case Expr::CXXUuidofExprClass:
- case Expr::CXXNoexceptExprClass:
case Expr::CUDAKernelCallExprClass:
case Expr::AsTypeExprClass:
case Expr::PseudoObjectExprClass:
@@ -2606,6 +2591,11 @@ recurse:
Out <<"_E";
break;
+ case Expr::CXXNoexceptExprClass:
+ Out << "nx";
+ mangleExpression(cast<CXXNoexceptExpr>(E)->getOperand());
+ break;
+
case Expr::UnaryExprOrTypeTraitExprClass: {
const UnaryExprOrTypeTraitExpr *SAE = cast<UnaryExprOrTypeTraitExpr>(E);
@@ -2808,7 +2798,15 @@ recurse:
// };
Out << "_SUBSTPACK_";
break;
-
+
+ case Expr::FunctionParmPackExprClass: {
+ // FIXME: not clear how to mangle this!
+ const FunctionParmPackExpr *FPPE = cast<FunctionParmPackExpr>(E);
+ Out << "v110_SUBSTPACK";
+ mangleFunctionParam(FPPE->getParameterPack());
+ break;
+ }
+
case Expr::DependentScopeDeclRefExprClass: {
const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E);
mangleUnresolvedName(DRE->getQualifier(), 0, DRE->getDeclName(), Arity);
@@ -3043,50 +3041,28 @@ void CXXNameMangler::mangleTemplateArgs(
// <template-args> ::= I <template-arg>+ E
Out << 'I';
for (unsigned i = 0, e = TemplateArgs.NumTemplateArgs; i != e; ++i)
- mangleTemplateArg(0, TemplateArgs.getTemplateArgs()[i].getArgument());
- Out << 'E';
-}
-
-void CXXNameMangler::mangleTemplateArgs(TemplateName Template,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs) {
- if (TemplateDecl *TD = Template.getAsTemplateDecl())
- return mangleTemplateArgs(*TD->getTemplateParameters(), TemplateArgs,
- NumTemplateArgs);
-
- mangleUnresolvedTemplateArgs(TemplateArgs, NumTemplateArgs);
-}
-
-void CXXNameMangler::mangleUnresolvedTemplateArgs(const TemplateArgument *args,
- unsigned numArgs) {
- // <template-args> ::= I <template-arg>+ E
- Out << 'I';
- for (unsigned i = 0; i != numArgs; ++i)
- mangleTemplateArg(0, args[i]);
+ mangleTemplateArg(TemplateArgs.getTemplateArgs()[i].getArgument());
Out << 'E';
}
-void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
- const TemplateArgumentList &AL) {
+void CXXNameMangler::mangleTemplateArgs(const TemplateArgumentList &AL) {
// <template-args> ::= I <template-arg>+ E
Out << 'I';
for (unsigned i = 0, e = AL.size(); i != e; ++i)
- mangleTemplateArg(PL.getParam(i), AL[i]);
+ mangleTemplateArg(AL[i]);
Out << 'E';
}
-void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
- const TemplateArgument *TemplateArgs,
+void CXXNameMangler::mangleTemplateArgs(const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs) {
// <template-args> ::= I <template-arg>+ E
Out << 'I';
for (unsigned i = 0; i != NumTemplateArgs; ++i)
- mangleTemplateArg(PL.getParam(i), TemplateArgs[i]);
+ mangleTemplateArg(TemplateArgs[i]);
Out << 'E';
}
-void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
- TemplateArgument A) {
+void CXXNameMangler::mangleTemplateArg(TemplateArgument A) {
// <template-arg> ::= <type> # type or template
// ::= X <expression> E # expression
// ::= <expr-primary> # simple expressions
@@ -3135,25 +3111,12 @@ void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
mangleIntegerLiteral(A.getIntegralType(), A.getAsIntegral());
break;
case TemplateArgument::Declaration: {
- assert(P && "Missing template parameter for declaration argument");
// <expr-primary> ::= L <mangled-name> E # external name
- // <expr-primary> ::= L <type> 0 E
// Clang produces AST's where pointer-to-member-function expressions
// and pointer-to-function expressions are represented as a declaration not
// an expression. We compensate for it here to produce the correct mangling.
- const NonTypeTemplateParmDecl *Parameter = cast<NonTypeTemplateParmDecl>(P);
-
- // Handle NULL pointer arguments.
- if (!A.getAsDecl()) {
- Out << "L";
- mangleType(Parameter->getType());
- Out << "0E";
- break;
- }
-
-
- NamedDecl *D = cast<NamedDecl>(A.getAsDecl());
- bool compensateMangling = !Parameter->getType()->isReferenceType();
+ ValueDecl *D = A.getAsDecl();
+ bool compensateMangling = !A.isDeclForReferenceParam();
if (compensateMangling) {
Out << 'X';
mangleOperatorName(OO_Amp, 1);
@@ -3176,14 +3139,20 @@ void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
break;
}
-
+ case TemplateArgument::NullPtr: {
+ // <expr-primary> ::= L <type> 0 E
+ Out << 'L';
+ mangleType(A.getNullPtrType());
+ Out << "0E";
+ break;
+ }
case TemplateArgument::Pack: {
// Note: proposal by Mike Herrick on 12/20/10
Out << 'J';
for (TemplateArgument::pack_iterator PA = A.pack_begin(),
PAEnd = A.pack_end();
PA != PAEnd; ++PA)
- mangleTemplateArg(P, *PA);
+ mangleTemplateArg(*PA);
Out << 'E';
}
}
diff --git a/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp
index e2cee7f..5d5b83d 100644
--- a/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Basic/ABI.h"
+#include "clang/Basic/DiagnosticOptions.h"
#include <map>
@@ -56,7 +57,7 @@ public:
void mangleVariableEncoding(const VarDecl *VD);
void mangleNumber(int64_t Number);
void mangleNumber(const llvm::APSInt &Value);
- void mangleType(QualType T, SourceRange Range);
+ void mangleType(QualType T, SourceRange Range, bool MangleQualifiers = true);
private:
void disableBackReferences() { UseNameBackReferences = false; }
@@ -68,6 +69,7 @@ private:
void manglePostfix(const DeclContext *DC, bool NoFunction=false);
void mangleOperatorName(OverloadedOperatorKind OO, SourceLocation Loc);
void mangleQualifiers(Qualifiers Quals, bool IsMember);
+ void manglePointerQualifiers(Qualifiers Quals);
void mangleUnscopedTemplateName(const TemplateDecl *ND);
void mangleTemplateInstantiationName(const TemplateDecl *TD,
@@ -75,7 +77,7 @@ private:
void mangleObjCMethodName(const ObjCMethodDecl *MD);
void mangleLocalName(const FunctionDecl *FD);
- void mangleTypeRepeated(QualType T, SourceRange Range);
+ void mangleArgumentType(QualType T, SourceRange Range);
// Declare manglers for every type class.
#define ABSTRACT_TYPE(CLASS, PARENT)
@@ -95,6 +97,7 @@ private:
void mangleFunctionClass(const FunctionDecl *FD);
void mangleCallingConvention(const FunctionType *T, bool IsInstMethod = false);
void mangleIntegerLiteral(QualType T, const llvm::APSInt &Number);
+ void mangleExpression(const Expr *E);
void mangleThrowSpecification(const FunctionProtoType *T);
void mangleTemplateArgs(
@@ -266,18 +269,18 @@ void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
Out << '4';
// Now mangle the type.
// <variable-type> ::= <type> <cvr-qualifiers>
- // ::= <type> A # pointers, references, arrays
+ // ::= <type> <pointee-cvr-qualifiers> # pointers, references
// Pointers and references are odd. The type of 'int * const foo;' gets
// mangled as 'QAHA' instead of 'PAHB', for example.
TypeLoc TL = VD->getTypeSourceInfo()->getTypeLoc();
QualType Ty = TL.getType();
if (Ty->isPointerType() || Ty->isReferenceType()) {
mangleType(Ty, TL.getSourceRange());
- Out << 'A';
+ mangleQualifiers(Ty->getPointeeType().getQualifiers(), false);
} else if (const ArrayType *AT = getASTContext().getAsArrayType(Ty)) {
// Global arrays are funny, too.
mangleType(AT, true);
- Out << 'A';
+ mangleQualifiers(Ty.getQualifiers(), false);
} else {
mangleType(Ty.getLocalUnqualifiedType(), TL.getSourceRange());
mangleQualifiers(Ty.getLocalQualifiers(), false);
@@ -304,39 +307,23 @@ void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
}
void MicrosoftCXXNameMangler::mangleNumber(int64_t Number) {
- // <number> ::= [?] <decimal digit> # 1 <= Number <= 10
- // ::= [?] <hex digit>+ @ # 0 or > 9; A = 0, B = 1, etc...
- // ::= [?] @ # 0 (alternate mangling, not emitted by VC)
- if (Number < 0) {
- Out << '?';
- Number = -Number;
- }
- // There's a special shorter mangling for 0, but Microsoft
- // chose not to use it. Instead, 0 gets mangled as "A@". Oh well...
- if (Number >= 1 && Number <= 10)
- Out << Number-1;
- else {
- // We have to build up the encoding in reverse order, so it will come
- // out right when we write it out.
- char Encoding[16];
- char *EndPtr = Encoding+sizeof(Encoding);
- char *CurPtr = EndPtr;
- do {
- *--CurPtr = 'A' + (Number % 16);
- Number /= 16;
- } while (Number);
- Out.write(CurPtr, EndPtr-CurPtr);
- Out << '@';
- }
+ llvm::APSInt APSNumber(/*BitWidth=*/64, /*isUnsigned=*/false);
+ APSNumber = Number;
+ mangleNumber(APSNumber);
}
void MicrosoftCXXNameMangler::mangleNumber(const llvm::APSInt &Value) {
+ // <number> ::= [?] <decimal digit> # 1 <= Number <= 10
+ // ::= [?] <hex digit>+ @ # 0 or > 9; A = 0, B = 1, etc...
+ // ::= [?] @ # 0 (alternate mangling, not emitted by VC)
if (Value.isSigned() && Value.isNegative()) {
Out << '?';
mangleNumber(llvm::APSInt(Value.abs()));
return;
}
llvm::APSInt Temp(Value);
+ // There's a special shorter mangling for 0, but Microsoft
+ // chose not to use it. Instead, 0 gets mangled as "A@". Oh well...
if (Value.uge(1) && Value.ule(10)) {
--Temp;
Temp.print(Out, false);
@@ -348,10 +335,10 @@ void MicrosoftCXXNameMangler::mangleNumber(const llvm::APSInt &Value) {
char *CurPtr = EndPtr;
llvm::APSInt NibbleMask(Value.getBitWidth(), Value.isUnsigned());
NibbleMask = 0xf;
- for (int i = 0, e = Value.getActiveBits() / 4; i != e; ++i) {
+ do {
*--CurPtr = 'A' + Temp.And(NibbleMask).getLimitedValue(0xf);
Temp = Temp.lshr(4);
- }
+ } while (Temp != 0);
Out.write(CurPtr, EndPtr-CurPtr);
Out << '@';
}
@@ -386,7 +373,7 @@ isTemplate(const NamedDecl *ND,
dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
TypeSourceInfo *TSI = Spec->getTypeAsWritten();
if (TSI) {
- TemplateSpecializationTypeLoc &TSTL =
+ TemplateSpecializationTypeLoc TSTL =
cast<TemplateSpecializationTypeLoc>(TSI->getTypeLoc());
TemplateArgumentListInfo LI(TSTL.getLAngleLoc(), TSTL.getRAngleLoc());
for (unsigned i = 0, e = TSTL.getNumArgs(); i != e; ++i)
@@ -784,6 +771,23 @@ MicrosoftCXXNameMangler::mangleIntegerLiteral(QualType T,
}
void
+MicrosoftCXXNameMangler::mangleExpression(const Expr *E) {
+ // See if this is a constant expression.
+ llvm::APSInt Value;
+ if (E->isIntegerConstantExpr(Value, Context.getASTContext())) {
+ mangleIntegerLiteral(E->getType(), Value);
+ return;
+ }
+
+ // As bad as this diagnostic is, it's better than crashing.
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot yet mangle expression type %0");
+ Diags.Report(E->getExprLoc(), DiagID)
+ << E->getStmtClassName() << E->getSourceRange();
+}
+
+void
MicrosoftCXXNameMangler::mangleTemplateArgs(
const SmallVectorImpl<TemplateArgumentLoc> &TemplateArgs) {
// <template-args> ::= {<type> | <integer-literal>}+ @
@@ -800,21 +804,19 @@ MicrosoftCXXNameMangler::mangleTemplateArgs(
case TemplateArgument::Integral:
mangleIntegerLiteral(TA.getIntegralType(), TA.getAsIntegral());
break;
- case TemplateArgument::Expression: {
- // See if this is a constant expression.
- Expr *TAE = TA.getAsExpr();
- llvm::APSInt Value;
- if (TAE->isIntegerConstantExpr(Value, Context.getASTContext())) {
- mangleIntegerLiteral(TAE->getType(), Value);
- break;
- }
- /* fallthrough */
- } default: {
+ case TemplateArgument::Expression:
+ mangleExpression(TA.getAsExpr());
+ break;
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::NullPtr:
+ case TemplateArgument::Pack: {
// Issue a diagnostic.
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
- "cannot mangle this %select{ERROR|ERROR|pointer/reference|ERROR|"
- "template|template pack expansion|expression|parameter pack}0 "
+ "cannot mangle this %select{ERROR|ERROR|pointer/reference|nullptr|"
+ "integral|template|template pack expansion|ERROR|parameter pack}0 "
"template argument yet");
Diags.Report(TAL.getLocation(), DiagID)
<< TA.getKind()
@@ -879,43 +881,60 @@ void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
// ::= 3 # ?
// ::= 4 # ?
// ::= 5 # not really based
+ bool HasConst = Quals.hasConst(),
+ HasVolatile = Quals.hasVolatile();
if (!IsMember) {
- if (!Quals.hasVolatile()) {
- if (!Quals.hasConst())
- Out << 'A';
- else
- Out << 'B';
+ if (HasConst && HasVolatile) {
+ Out << 'D';
+ } else if (HasVolatile) {
+ Out << 'C';
+ } else if (HasConst) {
+ Out << 'B';
} else {
- if (!Quals.hasConst())
- Out << 'C';
- else
- Out << 'D';
+ Out << 'A';
}
} else {
- if (!Quals.hasVolatile()) {
- if (!Quals.hasConst())
- Out << 'Q';
- else
- Out << 'R';
+ if (HasConst && HasVolatile) {
+ Out << 'T';
+ } else if (HasVolatile) {
+ Out << 'S';
+ } else if (HasConst) {
+ Out << 'R';
} else {
- if (!Quals.hasConst())
- Out << 'S';
- else
- Out << 'T';
+ Out << 'Q';
}
}
// FIXME: For now, just drop all extension qualifiers on the floor.
}
-void MicrosoftCXXNameMangler::mangleTypeRepeated(QualType T, SourceRange Range) {
+void MicrosoftCXXNameMangler::manglePointerQualifiers(Qualifiers Quals) {
+ // <pointer-cvr-qualifiers> ::= P # no qualifiers
+ // ::= Q # const
+ // ::= R # volatile
+ // ::= S # const volatile
+ bool HasConst = Quals.hasConst(),
+ HasVolatile = Quals.hasVolatile();
+ if (HasConst && HasVolatile) {
+ Out << 'S';
+ } else if (HasVolatile) {
+ Out << 'R';
+ } else if (HasConst) {
+ Out << 'Q';
+ } else {
+ Out << 'P';
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleArgumentType(QualType T,
+ SourceRange Range) {
void *TypePtr = getASTContext().getCanonicalType(T).getAsOpaquePtr();
ArgBackRefMap::iterator Found = TypeBackReferences.find(TypePtr);
if (Found == TypeBackReferences.end()) {
size_t OutSizeBefore = Out.GetNumBytesInBuffer();
- mangleType(T,Range);
+ mangleType(T, Range, false);
// See if it's worth creating a back reference.
// Only types longer than 1 character are considered
@@ -930,38 +949,30 @@ void MicrosoftCXXNameMangler::mangleTypeRepeated(QualType T, SourceRange Range)
}
}
-void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range) {
+void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range,
+ bool MangleQualifiers) {
// Only operate on the canonical type!
T = getASTContext().getCanonicalType(T);
-
+
Qualifiers Quals = T.getLocalQualifiers();
- if (Quals) {
- // We have to mangle these now, while we still have enough information.
- // <pointer-cvr-qualifiers> ::= P # pointer
- // ::= Q # const pointer
- // ::= R # volatile pointer
- // ::= S # const volatile pointer
- if (T->isAnyPointerType() || T->isMemberPointerType() ||
- T->isBlockPointerType()) {
- if (!Quals.hasVolatile())
- Out << 'Q';
- else {
- if (!Quals.hasConst())
- Out << 'R';
- else
- Out << 'S';
- }
- } else
- // Just emit qualifiers like normal.
- // NB: When we mangle a pointer/reference type, and the pointee
- // type has no qualifiers, the lack of qualifier gets mangled
- // in there.
- mangleQualifiers(Quals, false);
- } else if (T->isAnyPointerType() || T->isMemberPointerType() ||
- T->isBlockPointerType()) {
- Out << 'P';
+ // We have to mangle these now, while we still have enough information.
+ if (T->isAnyPointerType() || T->isMemberPointerType() ||
+ T->isBlockPointerType()) {
+ manglePointerQualifiers(Quals);
+ } else if (Quals && MangleQualifiers) {
+ mangleQualifiers(Quals, false);
}
- switch (T->getTypeClass()) {
+
+ SplitQualType split = T.split();
+ const Type *ty = split.Ty;
+
+ // If we're mangling a qualified array type, push the qualifiers to
+ // the element type.
+ if (split.Quals && isa<ArrayType>(T)) {
+ ty = Context.getASTContext().getAsArrayType(T);
+ }
+
+ switch (ty->getTypeClass()) {
#define ABSTRACT_TYPE(CLASS, PARENT)
#define NON_CANONICAL_TYPE(CLASS, PARENT) \
case Type::CLASS: \
@@ -969,7 +980,7 @@ void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range) {
return;
#define TYPE(CLASS, PARENT) \
case Type::CLASS: \
- mangleType(static_cast<const CLASS##Type*>(T.getTypePtr()), Range); \
+ mangleType(cast<CLASS##Type>(ty), Range); \
break;
#include "clang/AST/TypeNodes.def"
#undef ABSTRACT_TYPE
@@ -1059,6 +1070,8 @@ void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T,
SourceRange) {
// Structors only appear in decls, so at this point we know it's not a
// structor type.
+ // FIXME: This may not be lambda-friendly.
+ Out << "$$A6";
mangleType(T, NULL, false, false);
}
void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T,
@@ -1117,14 +1130,14 @@ void MicrosoftCXXNameMangler::mangleType(const FunctionType *T,
ParmEnd = D->param_end(); Parm != ParmEnd; ++Parm) {
TypeSourceInfo *TSI = (*Parm)->getTypeSourceInfo();
QualType Type = TSI ? TSI->getType() : (*Parm)->getType();
- mangleTypeRepeated(Type, (*Parm)->getSourceRange());
+ mangleArgumentType(Type, (*Parm)->getSourceRange());
}
} else {
// Happens for function pointer type arguments for example.
for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
ArgEnd = Proto->arg_type_end();
Arg != ArgEnd; ++Arg)
- mangleTypeRepeated(*Arg, SourceRange());
+ mangleArgumentType(*Arg, SourceRange());
}
// <builtin-type> ::= Z # ellipsis
if (Proto->isVariadic())
@@ -1214,7 +1227,7 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T,
if (CC == CC_Default) {
if (IsInstMethod) {
const FunctionProtoType *FPT =
- T->getCanonicalTypeUnqualified().getAs<FunctionProtoType>();
+ T->getCanonicalTypeUnqualified().castAs<FunctionProtoType>();
bool isVariadic = FPT->isVariadic();
CC = getASTContext().getDefaultCXXMethodCallConv(isVariadic);
} else {
@@ -1260,10 +1273,10 @@ void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T,
// <class-type> ::= V <name>
// <enum-type> ::= W <size> <name>
void MicrosoftCXXNameMangler::mangleType(const EnumType *T, SourceRange) {
- mangleType(static_cast<const TagType*>(T));
+ mangleType(cast<TagType>(T));
}
void MicrosoftCXXNameMangler::mangleType(const RecordType *T, SourceRange) {
- mangleType(static_cast<const TagType*>(T));
+ mangleType(cast<TagType>(T));
}
void MicrosoftCXXNameMangler::mangleType(const TagType *T) {
switch (T->getDecl()->getTagKind()) {
@@ -1271,6 +1284,7 @@ void MicrosoftCXXNameMangler::mangleType(const TagType *T) {
Out << 'T';
break;
case TTK_Struct:
+ case TTK_Interface:
Out << 'U';
break;
case TTK_Class:
@@ -1286,37 +1300,39 @@ void MicrosoftCXXNameMangler::mangleType(const TagType *T) {
}
// <type> ::= <array-type>
-// <array-type> ::= P <cvr-qualifiers> [Y <dimension-count> <dimension>+]
-// <element-type> # as global
+// <array-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
+// [Y <dimension-count> <dimension>+]
+// <element-type> # as global
// ::= Q <cvr-qualifiers> [Y <dimension-count> <dimension>+]
-// <element-type> # as param
+// <element-type> # as param
// It's supposed to be the other way around, but for some strange reason, it
// isn't. Today this behavior is retained for the sole purpose of backwards
// compatibility.
void MicrosoftCXXNameMangler::mangleType(const ArrayType *T, bool IsGlobal) {
// This isn't a recursive mangling, so now we have to do it all in this
// one call.
- if (IsGlobal)
- Out << 'P';
- else
+ if (IsGlobal) {
+ manglePointerQualifiers(T->getElementType().getQualifiers());
+ } else {
Out << 'Q';
+ }
mangleExtraDimensions(T->getElementType());
}
void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T,
SourceRange) {
- mangleType(static_cast<const ArrayType *>(T), false);
+ mangleType(cast<ArrayType>(T), false);
}
void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T,
SourceRange) {
- mangleType(static_cast<const ArrayType *>(T), false);
+ mangleType(cast<ArrayType>(T), false);
}
void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T,
SourceRange) {
- mangleType(static_cast<const ArrayType *>(T), false);
+ mangleType(cast<ArrayType>(T), false);
}
void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T,
SourceRange) {
- mangleType(static_cast<const ArrayType *>(T), false);
+ mangleType(cast<ArrayType>(T), false);
}
void MicrosoftCXXNameMangler::mangleExtraDimensions(QualType ElementTy) {
SmallVector<llvm::APInt, 3> Dimensions;
@@ -1409,10 +1425,8 @@ void MicrosoftCXXNameMangler::mangleType(const PointerType *T,
Out << '6';
mangleType(FT, NULL, false, false);
} else {
- if (!PointeeTy.hasQualifiers())
- // Lack of qualifiers is mangled as 'A'.
- Out << 'A';
- mangleType(PointeeTy, Range);
+ mangleQualifiers(PointeeTy.getQualifiers(), false);
+ mangleType(PointeeTy, Range, false);
}
}
void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T,
@@ -1497,7 +1511,9 @@ void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T,
void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T,
SourceRange Range) {
Out << "_E";
- mangleType(T->getPointeeType(), Range);
+
+ QualType pointee = T->getPointeeType();
+ mangleType(pointee->castAs<FunctionProtoType>(), NULL, false, false);
}
void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *T,
diff --git a/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp b/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp
index 39077d1..0837509 100644
--- a/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp
@@ -344,6 +344,7 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
case BuiltinType::ARCUnbridgedCast:
case BuiltinType::Half:
case BuiltinType::PseudoObject:
+ case BuiltinType::BuiltinFn:
break;
}
diff --git a/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp b/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
index fa87afd..1135928 100644
--- a/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
@@ -20,22 +20,63 @@ using namespace clang;
typedef llvm::DenseMap<Stmt*, Stmt*> MapTy;
-static void BuildParentMap(MapTy& M, Stmt* S) {
- for (Stmt::child_range I = S->children(); I; ++I)
- if (*I) {
- // Prefer the first time we see this statement in the traversal.
- // This is important for PseudoObjectExprs.
- Stmt *&Parent = M[*I];
- if (!Parent) {
- Parent = S;
- BuildParentMap(M, *I);
+enum OpaqueValueMode {
+ OV_Transparent,
+ OV_Opaque
+};
+
+static void BuildParentMap(MapTy& M, Stmt* S,
+ OpaqueValueMode OVMode = OV_Transparent) {
+
+ switch (S->getStmtClass()) {
+ case Stmt::PseudoObjectExprClass: {
+ assert(OVMode == OV_Transparent && "Should not appear alongside OVEs");
+ PseudoObjectExpr *POE = cast<PseudoObjectExpr>(S);
+
+ M[POE->getSyntacticForm()] = S;
+ BuildParentMap(M, POE->getSyntacticForm(), OV_Transparent);
+
+ for (PseudoObjectExpr::semantics_iterator I = POE->semantics_begin(),
+ E = POE->semantics_end();
+ I != E; ++I) {
+ M[*I] = S;
+ BuildParentMap(M, *I, OV_Opaque);
+ }
+ break;
+ }
+ case Stmt::BinaryConditionalOperatorClass: {
+ assert(OVMode == OV_Transparent && "Should not appear alongside OVEs");
+ BinaryConditionalOperator *BCO = cast<BinaryConditionalOperator>(S);
+
+ M[BCO->getCommon()] = S;
+ BuildParentMap(M, BCO->getCommon(), OV_Transparent);
+
+ M[BCO->getCond()] = S;
+ BuildParentMap(M, BCO->getCond(), OV_Opaque);
+
+ M[BCO->getTrueExpr()] = S;
+ BuildParentMap(M, BCO->getTrueExpr(), OV_Opaque);
+
+ M[BCO->getFalseExpr()] = S;
+ BuildParentMap(M, BCO->getFalseExpr(), OV_Transparent);
+
+ break;
+ }
+ case Stmt::OpaqueValueExprClass:
+ if (OVMode == OV_Transparent) {
+ OpaqueValueExpr *OVE = cast<OpaqueValueExpr>(S);
+ M[OVE->getSourceExpr()] = S;
+ BuildParentMap(M, OVE->getSourceExpr(), OV_Transparent);
+ }
+ break;
+ default:
+ for (Stmt::child_range I = S->children(); I; ++I) {
+ if (*I) {
+ M[*I] = S;
+ BuildParentMap(M, *I, OVMode);
}
}
-
- // Also include the source expr tree of an OpaqueValueExpr in the map.
- if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S)) {
- M[OVE->getSourceExpr()] = S;
- BuildParentMap(M, OVE->getSourceExpr());
+ break;
}
}
diff --git a/contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp b/contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp
index a5a3287..80b6272 100644
--- a/contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp
@@ -143,11 +143,10 @@ const char *RawComment::extractBriefText(const ASTContext &Context) const {
// a separate allocator for all temporary stuff.
llvm::BumpPtrAllocator Allocator;
- comments::CommandTraits Traits;
- comments::Lexer L(Allocator, Traits,
- Range.getBegin(), comments::CommentOptions(),
+ comments::Lexer L(Allocator, Context.getCommentCommandTraits(),
+ Range.getBegin(),
RawText.begin(), RawText.end());
- comments::BriefParser P(L, Traits);
+ comments::BriefParser P(L, Context.getCommentCommandTraits());
const std::string Result = P.Parse();
const unsigned BriefTextLength = Result.size();
@@ -160,19 +159,22 @@ const char *RawComment::extractBriefText(const ASTContext &Context) const {
}
comments::FullComment *RawComment::parse(const ASTContext &Context,
+ const Preprocessor *PP,
const Decl *D) const {
// Make sure that RawText is valid.
getRawText(Context.getSourceManager());
- comments::CommandTraits Traits;
- comments::Lexer L(Context.getAllocator(), Traits,
- getSourceRange().getBegin(), comments::CommentOptions(),
+ comments::Lexer L(Context.getAllocator(), Context.getCommentCommandTraits(),
+ getSourceRange().getBegin(),
RawText.begin(), RawText.end());
comments::Sema S(Context.getAllocator(), Context.getSourceManager(),
- Context.getDiagnostics(), Traits);
+ Context.getDiagnostics(),
+ Context.getCommentCommandTraits(),
+ PP);
S.setDecl(D);
comments::Parser P(L, S, Context.getAllocator(), Context.getSourceManager(),
- Context.getDiagnostics(), Traits);
+ Context.getDiagnostics(),
+ Context.getCommentCommandTraits());
return P.parseFullComment();
}
@@ -182,26 +184,22 @@ bool containsOnlyWhitespace(StringRef Str) {
return Str.find_first_not_of(" \t\f\v\r\n") == StringRef::npos;
}
-bool onlyWhitespaceBetweenComments(SourceManager &SM,
- const RawComment &C1, const RawComment &C2) {
- std::pair<FileID, unsigned> C1EndLocInfo = SM.getDecomposedLoc(
- C1.getSourceRange().getEnd());
- std::pair<FileID, unsigned> C2BeginLocInfo = SM.getDecomposedLoc(
- C2.getSourceRange().getBegin());
+bool onlyWhitespaceBetween(SourceManager &SM,
+ SourceLocation Loc1, SourceLocation Loc2) {
+ std::pair<FileID, unsigned> Loc1Info = SM.getDecomposedLoc(Loc1);
+ std::pair<FileID, unsigned> Loc2Info = SM.getDecomposedLoc(Loc2);
- // Question does not make sense if comments are located in different files.
- if (C1EndLocInfo.first != C2BeginLocInfo.first)
+ // Question does not make sense if locations are in different files.
+ if (Loc1Info.first != Loc2Info.first)
return false;
bool Invalid = false;
- const char *Buffer = SM.getBufferData(C1EndLocInfo.first, &Invalid).data();
+ const char *Buffer = SM.getBufferData(Loc1Info.first, &Invalid).data();
if (Invalid)
return false;
- StringRef TextBetweenComments(Buffer + C1EndLocInfo.second,
- C2BeginLocInfo.second - C1EndLocInfo.second);
-
- return containsOnlyWhitespace(TextBetweenComments);
+ StringRef Text(Buffer + Loc1Info.second, Loc2Info.second - Loc1Info.second);
+ return containsOnlyWhitespace(Text);
}
} // unnamed namespace
@@ -221,11 +219,13 @@ void RawCommentList::addComment(const RawComment &RC,
}
if (OnlyWhitespaceSeen) {
- if (!onlyWhitespaceBetweenComments(SourceMgr, LastComment, RC))
+ if (!onlyWhitespaceBetween(SourceMgr,
+ PrevCommentEndLoc,
+ RC.getSourceRange().getBegin()))
OnlyWhitespaceSeen = false;
}
- LastComment = RC;
+ PrevCommentEndLoc = RC.getSourceRange().getEnd();
// Ordinary comments are not interesting for us.
if (RC.isOrdinary())
@@ -244,15 +244,20 @@ void RawCommentList::addComment(const RawComment &RC,
// Merge comments only if there is only whitespace between them.
// Can't merge trailing and non-trailing comments.
- // Merge trailing comments if they are on same or consecutive lines.
+ // Merge comments if they are on same or consecutive lines.
+ bool Merged = false;
if (OnlyWhitespaceSeen &&
- (C1.isTrailingComment() == C2.isTrailingComment()) &&
- (!C1.isTrailingComment() ||
- C1.getEndLine(SourceMgr) + 1 >= C2.getBeginLine(SourceMgr))) {
- SourceRange MergedRange(C1.getSourceRange().getBegin(),
- C2.getSourceRange().getEnd());
- *Comments.back() = RawComment(SourceMgr, MergedRange, true);
- } else
+ (C1.isTrailingComment() == C2.isTrailingComment())) {
+ unsigned C1EndLine = C1.getEndLine(SourceMgr);
+ unsigned C2BeginLine = C2.getBeginLine(SourceMgr);
+ if (C1EndLine + 1 == C2BeginLine || C1EndLine == C2BeginLine) {
+ SourceRange MergedRange(C1.getSourceRange().getBegin(),
+ C2.getSourceRange().getEnd());
+ *Comments.back() = RawComment(SourceMgr, MergedRange, true);
+ Merged = true;
+ }
+ }
+ if (!Merged)
Comments.push_back(new (Allocator) RawComment(RC));
OnlyWhitespaceSeen = true;
diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
index d5df63f..4dfffc4 100644
--- a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -789,8 +789,8 @@ protected:
void setDataSize(CharUnits NewSize) { DataSize = Context.toBits(NewSize); }
void setDataSize(uint64_t NewSize) { DataSize = NewSize; }
- RecordLayoutBuilder(const RecordLayoutBuilder&); // DO NOT IMPLEMENT
- void operator=(const RecordLayoutBuilder&); // DO NOT IMPLEMENT
+ RecordLayoutBuilder(const RecordLayoutBuilder &) LLVM_DELETED_FUNCTION;
+ void operator=(const RecordLayoutBuilder &) LLVM_DELETED_FUNCTION;
public:
static const CXXMethodDecl *ComputeKeyFunction(const CXXRecordDecl *RD);
};
@@ -1557,6 +1557,13 @@ CharUnits RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
bool Allowed = EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset);
(void)Allowed;
assert(Allowed && "Base subobject externally placed at overlapping offset");
+
+ if (InferAlignment && Offset < getDataSize().RoundUpToAlignment(BaseAlign)){
+ // The externally-supplied base offset is before the base offset we
+ // computed. Assume that the structure is packed.
+ Alignment = CharUnits::One();
+ InferAlignment = false;
+ }
}
if (!Base->Class->isEmpty()) {
@@ -1574,12 +1581,12 @@ CharUnits RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
}
void RecordLayoutBuilder::InitializeLayout(const Decl *D) {
- if (const RecordDecl *RD = dyn_cast<RecordDecl>(D))
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
IsUnion = RD->isUnion();
+ IsMsStruct = RD->isMsStruct(Context);
+ }
- Packed = D->hasAttr<PackedAttr>();
-
- IsMsStruct = D->hasAttr<MsStructAttr>();
+ Packed = D->hasAttr<PackedAttr>();
// Honor the default struct packing maximum alignment flag.
if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) {
@@ -1616,7 +1623,6 @@ void RecordLayoutBuilder::InitializeLayout(const Decl *D) {
if (ExternalLayout) {
if (ExternalAlign > 0) {
Alignment = Context.toCharUnitsFromBits(ExternalAlign);
- UnpackedAlignment = Alignment;
} else {
// The external source didn't have alignment information; infer it.
InferAlignment = true;
@@ -2085,7 +2091,7 @@ void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
ZeroLengthBitfield = 0;
}
- if (Context.getLangOpts().MSBitfields || IsMsStruct) {
+ if (IsMsStruct) {
// If MS bitfield layout is required, figure out what type is being
// laid out and align the field to the width of that type.
@@ -2166,11 +2172,6 @@ void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
}
void RecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
- if (ExternalLayout) {
- setSize(ExternalSize);
- return;
- }
-
// In C++, records cannot be of size 0.
if (Context.getLangOpts().CPlusPlus && getSizeInBits() == 0) {
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
@@ -2184,20 +2185,37 @@ void RecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
setSize(CharUnits::One());
}
+ // Finally, round the size of the record up to the alignment of the
+ // record itself.
+ uint64_t UnpaddedSize = getSizeInBits() - UnfilledBitsInLastByte;
+ uint64_t UnpackedSizeInBits =
+ llvm::RoundUpToAlignment(getSizeInBits(),
+ Context.toBits(UnpackedAlignment));
+ CharUnits UnpackedSize = Context.toCharUnitsFromBits(UnpackedSizeInBits);
+ uint64_t RoundedSize
+ = llvm::RoundUpToAlignment(getSizeInBits(), Context.toBits(Alignment));
+
+ if (ExternalLayout) {
+ // If we're inferring alignment, and the external size is smaller than
+ // our size after we've rounded up to alignment, conservatively set the
+ // alignment to 1.
+ if (InferAlignment && ExternalSize < RoundedSize) {
+ Alignment = CharUnits::One();
+ InferAlignment = false;
+ }
+ setSize(ExternalSize);
+ return;
+ }
+
+
// MSVC doesn't round up to the alignment of the record with virtual bases.
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
if (isMicrosoftCXXABI() && RD->getNumVBases())
return;
}
- // Finally, round the size of the record up to the alignment of the
- // record itself.
- uint64_t UnpaddedSize = getSizeInBits() - UnfilledBitsInLastByte;
- uint64_t UnpackedSizeInBits =
- llvm::RoundUpToAlignment(getSizeInBits(),
- Context.toBits(UnpackedAlignment));
- CharUnits UnpackedSize = Context.toCharUnitsFromBits(UnpackedSizeInBits);
- setSize(llvm::RoundUpToAlignment(getSizeInBits(), Context.toBits(Alignment)));
+ // Set the size to the final size.
+ setSize(RoundedSize);
unsigned CharBitNum = Context.getTargetInfo().getCharWidth();
if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
@@ -2255,7 +2273,7 @@ RecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field,
if (InferAlignment && ExternalFieldOffset < ComputedOffset) {
// The externally-supplied field offset is before the field offset we
// computed. Assume that the structure is packed.
- Alignment = CharUnits::fromQuantity(1);
+ Alignment = CharUnits::One();
InferAlignment = false;
}
@@ -2263,6 +2281,20 @@ RecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field,
return ExternalFieldOffset;
}
+/// \brief Get diagnostic %select index for tag kind for
+/// field padding diagnostic message.
+/// WARNING: Indexes apply to particular diagnostics only!
+///
+/// \returns diagnostic %select index.
+static unsigned getPaddingDiagFromTagKind(TagTypeKind Tag) {
+ switch (Tag) {
+ case TTK_Struct: return 0;
+ case TTK_Interface: return 1;
+ case TTK_Class: return 2;
+ default: llvm_unreachable("Invalid tag kind for field padding diagnostic!");
+ }
+}
+
void RecordLayoutBuilder::CheckFieldPadding(uint64_t Offset,
uint64_t UnpaddedOffset,
uint64_t UnpackedOffset,
@@ -2291,14 +2323,14 @@ void RecordLayoutBuilder::CheckFieldPadding(uint64_t Offset,
}
if (D->getIdentifier())
Diag(D->getLocation(), diag::warn_padded_struct_field)
- << (D->getParent()->isStruct() ? 0 : 1) // struct|class
+ << getPaddingDiagFromTagKind(D->getParent()->getTagKind())
<< Context.getTypeDeclType(D->getParent())
<< PadSize
<< (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1) // plural or not
<< D->getIdentifier();
else
Diag(D->getLocation(), diag::warn_padded_struct_anon_field)
- << (D->getParent()->isStruct() ? 0 : 1) // struct|class
+ << getPaddingDiagFromTagKind(D->getParent()->getTagKind())
<< Context.getTypeDeclType(D->getParent())
<< PadSize
<< (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1); // plural or not
@@ -2508,8 +2540,8 @@ ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
assert(D && D->isThisDeclarationADefinition() && "Invalid interface decl!");
// Look up this layout, if already laid out, return what we have.
- ObjCContainerDecl *Key =
- Impl ? (ObjCContainerDecl*) Impl : (ObjCContainerDecl*) D;
+ const ObjCContainerDecl *Key =
+ Impl ? (const ObjCContainerDecl*) Impl : (const ObjCContainerDecl*) D;
if (const ASTRecordLayout *Entry = ObjCLayouts[Key])
return *Entry;
diff --git a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
index 77452c9..eafcf92 100644
--- a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -320,15 +321,52 @@ bool Stmt::hasImplicitControlFlow() const {
}
}
-Expr *AsmStmt::getOutputExpr(unsigned i) {
- return cast<Expr>(Exprs[i]);
+std::string AsmStmt::generateAsmString(ASTContext &C) const {
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ return gccAsmStmt->generateAsmString(C);
+ if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ return msAsmStmt->generateAsmString(C);
+ llvm_unreachable("unknown asm statement kind!");
}
-/// getOutputConstraint - Return the constraint string for the specified
-/// output operand. All output constraints are known to be non-empty (either
-/// '=' or '+').
StringRef AsmStmt::getOutputConstraint(unsigned i) const {
- return getOutputConstraintLiteral(i)->getString();
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ return gccAsmStmt->getOutputConstraint(i);
+ if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ return msAsmStmt->getOutputConstraint(i);
+ llvm_unreachable("unknown asm statement kind!");
+}
+
+const Expr *AsmStmt::getOutputExpr(unsigned i) const {
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ return gccAsmStmt->getOutputExpr(i);
+ if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ return msAsmStmt->getOutputExpr(i);
+ llvm_unreachable("unknown asm statement kind!");
+}
+
+StringRef AsmStmt::getInputConstraint(unsigned i) const {
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ return gccAsmStmt->getInputConstraint(i);
+ if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ return msAsmStmt->getInputConstraint(i);
+ llvm_unreachable("unknown asm statement kind!");
+}
+
+const Expr *AsmStmt::getInputExpr(unsigned i) const {
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ return gccAsmStmt->getInputExpr(i);
+ if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ return msAsmStmt->getInputExpr(i);
+ llvm_unreachable("unknown asm statement kind!");
+}
+
+StringRef AsmStmt::getClobber(unsigned i) const {
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ return gccAsmStmt->getClobber(i);
+ if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ return msAsmStmt->getClobber(i);
+ llvm_unreachable("unknown asm statement kind!");
}
/// getNumPlusOperands - Return the number of output operands that have a "+"
@@ -341,22 +379,35 @@ unsigned AsmStmt::getNumPlusOperands() const {
return Res;
}
-Expr *AsmStmt::getInputExpr(unsigned i) {
+StringRef GCCAsmStmt::getClobber(unsigned i) const {
+ return getClobberStringLiteral(i)->getString();
+}
+
+Expr *GCCAsmStmt::getOutputExpr(unsigned i) {
+ return cast<Expr>(Exprs[i]);
+}
+
+/// getOutputConstraint - Return the constraint string for the specified
+/// output operand. All output constraints are known to be non-empty (either
+/// '=' or '+').
+StringRef GCCAsmStmt::getOutputConstraint(unsigned i) const {
+ return getOutputConstraintLiteral(i)->getString();
+}
+
+Expr *GCCAsmStmt::getInputExpr(unsigned i) {
return cast<Expr>(Exprs[i + NumOutputs]);
}
-void AsmStmt::setInputExpr(unsigned i, Expr *E) {
+void GCCAsmStmt::setInputExpr(unsigned i, Expr *E) {
Exprs[i + NumOutputs] = E;
}
-
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
-StringRef AsmStmt::getInputConstraint(unsigned i) const {
+StringRef GCCAsmStmt::getInputConstraint(unsigned i) const {
return getInputConstraintLiteral(i)->getString();
}
-
-void AsmStmt::setOutputsAndInputsAndClobbers(ASTContext &C,
+void GCCAsmStmt::setOutputsAndInputsAndClobbers(ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
@@ -390,7 +441,7 @@ void AsmStmt::setOutputsAndInputsAndClobbers(ASTContext &C,
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
-int AsmStmt::getNamedOperand(StringRef SymbolicName) const {
+int GCCAsmStmt::getNamedOperand(StringRef SymbolicName) const {
unsigned NumPlusOperands = 0;
// Check if this is an output operand.
@@ -410,7 +461,7 @@ int AsmStmt::getNamedOperand(StringRef SymbolicName) const {
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false.
-unsigned AsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
+unsigned GCCAsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
ASTContext &C, unsigned &DiagOffs) const {
StringRef Str = getAsmString()->getString();
const char *StrStart = Str.begin();
@@ -548,6 +599,44 @@ unsigned AsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
}
}
+/// Assemble final IR asm string (GCC-style).
+std::string GCCAsmStmt::generateAsmString(ASTContext &C) const {
+ // Analyze the asm string to decompose it into its pieces. We know that Sema
+ // has already done this, so it is guaranteed to be successful.
+ SmallVector<GCCAsmStmt::AsmStringPiece, 4> Pieces;
+ unsigned DiagOffs;
+ AnalyzeAsmString(Pieces, C, DiagOffs);
+
+ std::string AsmString;
+ for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
+ if (Pieces[i].isString())
+ AsmString += Pieces[i].getString();
+ else if (Pieces[i].getModifier() == '\0')
+ AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo());
+ else
+ AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
+ Pieces[i].getModifier() + '}';
+ }
+ return AsmString;
+}
+
+/// Assemble final IR asm string (MS-style).
+std::string MSAsmStmt::generateAsmString(ASTContext &C) const {
+ // FIXME: This needs to be translated into the IR string representation.
+ return AsmStr;
+}
+
+Expr *MSAsmStmt::getOutputExpr(unsigned i) {
+ return cast<Expr>(Exprs[i]);
+}
+
+Expr *MSAsmStmt::getInputExpr(unsigned i) {
+ return cast<Expr>(Exprs[i + NumOutputs]);
+}
+void MSAsmStmt::setInputExpr(unsigned i, Expr *E) {
+ Exprs[i + NumOutputs] = E;
+}
+
QualType CXXCatchStmt::getCaughtType() const {
if (ExceptionDecl)
return ExceptionDecl->getType();
@@ -558,15 +647,14 @@ QualType CXXCatchStmt::getCaughtType() const {
// Constructors
//===----------------------------------------------------------------------===//
-AsmStmt::AsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple,
- bool isvolatile, bool msasm,
- unsigned numoutputs, unsigned numinputs,
- IdentifierInfo **names, StringLiteral **constraints,
- Expr **exprs, StringLiteral *asmstr, unsigned numclobbers,
- StringLiteral **clobbers, SourceLocation rparenloc)
- : Stmt(AsmStmtClass), AsmLoc(asmloc), RParenLoc(rparenloc), AsmStr(asmstr)
- , IsSimple(issimple), IsVolatile(isvolatile), MSAsm(msasm)
- , NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {
+GCCAsmStmt::GCCAsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple,
+ bool isvolatile, unsigned numoutputs, unsigned numinputs,
+ IdentifierInfo **names, StringLiteral **constraints,
+ Expr **exprs, StringLiteral *asmstr,
+ unsigned numclobbers, StringLiteral **clobbers,
+ SourceLocation rparenloc)
+ : AsmStmt(GCCAsmStmtClass, asmloc, issimple, isvolatile, numoutputs,
+ numinputs, numclobbers), RParenLoc(rparenloc), AsmStr(asmstr) {
unsigned NumExprs = NumOutputs + NumInputs;
@@ -585,26 +673,37 @@ AsmStmt::AsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple,
MSAsmStmt::MSAsmStmt(ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
- ArrayRef<Token> asmtoks, ArrayRef<IdentifierInfo*> inputs,
- ArrayRef<IdentifierInfo*> outputs, StringRef asmstr,
- ArrayRef<StringRef> clobbers, SourceLocation endloc)
- : Stmt(MSAsmStmtClass), AsmLoc(asmloc), LBraceLoc(lbraceloc), EndLoc(endloc),
- AsmStr(asmstr.str()), IsSimple(issimple), IsVolatile(isvolatile),
- NumAsmToks(asmtoks.size()), NumInputs(inputs.size()),
- NumOutputs(outputs.size()), NumClobbers(clobbers.size()) {
+ ArrayRef<Token> asmtoks, unsigned numoutputs,
+ unsigned numinputs, ArrayRef<IdentifierInfo*> names,
+ ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs,
+ StringRef asmstr, ArrayRef<StringRef> clobbers,
+ SourceLocation endloc)
+ : AsmStmt(MSAsmStmtClass, asmloc, issimple, isvolatile, numoutputs,
+ numinputs, clobbers.size()), LBraceLoc(lbraceloc),
+ EndLoc(endloc), AsmStr(asmstr.str()), NumAsmToks(asmtoks.size()) {
unsigned NumExprs = NumOutputs + NumInputs;
Names = new (C) IdentifierInfo*[NumExprs];
- for (unsigned i = 0, e = NumOutputs; i != e; ++i)
- Names[i] = outputs[i];
- for (unsigned i = NumOutputs, e = NumExprs; i != e; ++i)
- Names[i] = inputs[i];
+ for (unsigned i = 0, e = NumExprs; i != e; ++i)
+ Names[i] = names[i];
+
+ Exprs = new (C) Stmt*[NumExprs];
+ for (unsigned i = 0, e = NumExprs; i != e; ++i)
+ Exprs[i] = exprs[i];
AsmToks = new (C) Token[NumAsmToks];
for (unsigned i = 0, e = NumAsmToks; i != e; ++i)
AsmToks[i] = asmtoks[i];
+ Constraints = new (C) StringRef[NumExprs];
+ for (unsigned i = 0, e = NumExprs; i != e; ++i) {
+ size_t size = constraints[i].size();
+ char *dest = new (C) char[size];
+ std::strncpy(dest, constraints[i].data(), size);
+ Constraints[i] = StringRef(dest, size);
+ }
+
Clobbers = new (C) StringRef[NumClobbers];
for (unsigned i = 0, e = NumClobbers; i != e; ++i) {
// FIXME: Avoid the allocation/copy if at all possible.
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp b/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp
index 962e352..fbc990f 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the Stmt::dump/Stmt::print methods, which dump out the
+// This file implements the Stmt::dump method, which dumps out the
// AST in a form that exposes type details and other fields.
//
//===----------------------------------------------------------------------===//
@@ -30,6 +30,7 @@ namespace {
SourceManager *SM;
raw_ostream &OS;
unsigned IndentLevel;
+ bool IsFirstLine;
/// MaxDepth - When doing a normal dump (not dumpAll) we only want to dump
/// the first few levels of an AST. This keeps track of how many ast levels
@@ -41,46 +42,64 @@ namespace {
const char *LastLocFilename;
unsigned LastLocLine;
+ class IndentScope {
+ StmtDumper &Dumper;
+ public:
+ IndentScope(StmtDumper &Dumper) : Dumper(Dumper) {
+ Dumper.indent();
+ }
+ ~IndentScope() {
+ Dumper.unindent();
+ }
+ };
+
public:
StmtDumper(SourceManager *sm, raw_ostream &os, unsigned maxDepth)
- : SM(sm), OS(os), IndentLevel(0-1), MaxDepth(maxDepth) {
+ : SM(sm), OS(os), IndentLevel(0), IsFirstLine(true), MaxDepth(maxDepth) {
LastLocFilename = "";
LastLocLine = ~0U;
}
+ ~StmtDumper() {
+ OS << "\n";
+ }
+
void DumpSubTree(Stmt *S) {
// Prune the recursion if not using dump all.
if (MaxDepth == 0) return;
- ++IndentLevel;
- if (S) {
- if (DeclStmt* DS = dyn_cast<DeclStmt>(S))
- VisitDeclStmt(DS);
- else {
- Visit(S);
-
- // Print out children.
- Stmt::child_range CI = S->children();
- if (CI) {
- while (CI) {
- OS << '\n';
- DumpSubTree(*CI++);
- }
- }
- }
- OS << ')';
- } else {
- Indent();
+ IndentScope Indent(*this);
+
+ if (!S) {
OS << "<<<NULL>>>";
+ return;
+ }
+
+ if (DeclStmt* DS = dyn_cast<DeclStmt>(S)) {
+ VisitDeclStmt(DS);
+ return;
}
- --IndentLevel;
+
+ Visit(S);
+ for (Stmt::child_range CI = S->children(); CI; CI++)
+ DumpSubTree(*CI);
}
void DumpDeclarator(Decl *D);
- void Indent() const {
- for (int i = 0, e = IndentLevel; i < e; ++i)
- OS << " ";
+ void indent() {
+ if (IsFirstLine)
+ IsFirstLine = false;
+ else
+ OS << "\n";
+ OS.indent(IndentLevel * 2);
+ OS << "(";
+ IndentLevel++;
+ }
+
+ void unindent() {
+ OS << ")";
+ IndentLevel--;
}
void DumpType(QualType T) {
@@ -96,9 +115,8 @@ namespace {
}
void DumpDeclRef(Decl *node);
void DumpStmt(const Stmt *Node) {
- Indent();
- OS << "(" << Node->getStmtClassName()
- << " " << (void*)Node;
+ OS << Node->getStmtClassName()
+ << " " << (const void*)Node;
DumpSourceRange(Node);
}
void DumpValueKind(ExprValueKind K) {
@@ -262,7 +280,7 @@ void StmtDumper::DumpDeclarator(Decl *D) {
// If this is a vardecl with an initializer, emit it.
if (VarDecl *V = dyn_cast<VarDecl>(VD)) {
if (V->getInit()) {
- OS << " =\n";
+ OS << " =";
DumpSubTree(V->getInit());
}
}
@@ -294,9 +312,9 @@ void StmtDumper::DumpDeclarator(Decl *D) {
} else if (LabelDecl *LD = dyn_cast<LabelDecl>(D)) {
OS << "label " << *LD;
} else if (StaticAssertDecl *SAD = dyn_cast<StaticAssertDecl>(D)) {
- OS << "\"static_assert(\n";
+ OS << "\"static_assert(";
DumpSubTree(SAD->getAssertExpr());
- OS << ",\n";
+ OS << ",";
DumpSubTree(SAD->getMessage());
OS << ");\"";
} else {
@@ -306,17 +324,12 @@ void StmtDumper::DumpDeclarator(Decl *D) {
void StmtDumper::VisitDeclStmt(DeclStmt *Node) {
DumpStmt(Node);
- OS << "\n";
for (DeclStmt::decl_iterator DI = Node->decl_begin(), DE = Node->decl_end();
DI != DE; ++DI) {
+ IndentScope Indent(*this);
Decl* D = *DI;
- ++IndentLevel;
- Indent();
OS << (void*) D << " ";
DumpDeclarator(D);
- if (DI+1 != DE)
- OS << "\n";
- --IndentLevel;
}
}
@@ -503,35 +516,29 @@ void StmtDumper::VisitBlockExpr(BlockExpr *Node) {
BlockDecl *block = Node->getBlockDecl();
OS << " decl=" << block;
- IndentLevel++;
if (block->capturesCXXThis()) {
- OS << '\n'; Indent(); OS << "(capture this)";
+ IndentScope Indent(*this);
+ OS << "capture this";
}
for (BlockDecl::capture_iterator
i = block->capture_begin(), e = block->capture_end(); i != e; ++i) {
- OS << '\n';
- Indent();
- OS << "(capture ";
+ IndentScope Indent(*this);
+ OS << "capture ";
if (i->isByRef()) OS << "byref ";
if (i->isNested()) OS << "nested ";
if (i->getVariable())
DumpDeclRef(i->getVariable());
if (i->hasCopyExpr()) DumpSubTree(i->getCopyExpr());
- OS << ")";
}
- IndentLevel--;
- OS << '\n';
DumpSubTree(block->getBody());
}
void StmtDumper::VisitOpaqueValueExpr(OpaqueValueExpr *Node) {
DumpExpr(Node);
- if (Expr *Source = Node->getSourceExpr()) {
- OS << '\n';
+ if (Expr *Source = Node->getSourceExpr())
DumpSubTree(Source);
- }
}
// GNU extensions.
@@ -589,15 +596,11 @@ void StmtDumper::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) {
void StmtDumper::VisitExprWithCleanups(ExprWithCleanups *Node) {
DumpExpr(Node);
- ++IndentLevel;
for (unsigned i = 0, e = Node->getNumObjects(); i != e; ++i) {
- OS << "\n";
- Indent();
- OS << "(cleanup ";
+ IndentScope Indent(*this);
+ OS << "cleanup ";
DumpDeclRef(Node->getObject(i));
- OS << ")";
}
- --IndentLevel;
}
void StmtDumper::DumpCXXTemporary(CXXTemporary *Temporary) {
@@ -734,7 +737,6 @@ void Stmt::dump(SourceManager &SM) const {
void Stmt::dump(raw_ostream &OS, SourceManager &SM) const {
StmtDumper P(&SM, OS, 4);
P.DumpSubTree(const_cast<Stmt*>(this));
- OS << "\n";
}
/// dump - This does a local dump of the specified AST fragment. It dumps the
@@ -743,19 +745,16 @@ void Stmt::dump(raw_ostream &OS, SourceManager &SM) const {
void Stmt::dump() const {
StmtDumper P(0, llvm::errs(), 4);
P.DumpSubTree(const_cast<Stmt*>(this));
- llvm::errs() << "\n";
}
/// dumpAll - This does a dump of the specified AST fragment and all subtrees.
void Stmt::dumpAll(SourceManager &SM) const {
StmtDumper P(&SM, llvm::errs(), ~0U);
P.DumpSubTree(const_cast<Stmt*>(this));
- llvm::errs() << "\n";
}
/// dumpAll - This does a dump of the specified AST fragment and all subtrees.
void Stmt::dumpAll() const {
StmtDumper P(0, llvm::errs(), ~0U);
P.DumpSubTree(const_cast<Stmt*>(this));
- llvm::errs() << "\n";
}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
index c0960ce..57eb1a9 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
@@ -61,7 +61,7 @@ namespace {
void PrintRawCompoundStmt(CompoundStmt *S);
void PrintRawDecl(Decl *D);
- void PrintRawDeclStmt(DeclStmt *S);
+ void PrintRawDeclStmt(const DeclStmt *S);
void PrintRawIfStmt(IfStmt *If);
void PrintRawCXXCatchStmt(CXXCatchStmt *Catch);
void PrintCallArgs(CallExpr *E);
@@ -121,8 +121,8 @@ void StmtPrinter::PrintRawDecl(Decl *D) {
D->print(OS, Policy, IndentLevel);
}
-void StmtPrinter::PrintRawDeclStmt(DeclStmt *S) {
- DeclStmt::decl_iterator Begin = S->decl_begin(), End = S->decl_end();
+void StmtPrinter::PrintRawDeclStmt(const DeclStmt *S) {
+ DeclStmt::const_decl_iterator Begin = S->decl_begin(), End = S->decl_end();
SmallVector<Decl*, 2> Decls;
for ( ; Begin != End; ++Begin)
Decls.push_back(*Begin);
@@ -187,7 +187,10 @@ void StmtPrinter::VisitAttributedStmt(AttributedStmt *Node) {
void StmtPrinter::PrintRawIfStmt(IfStmt *If) {
OS << "if (";
- PrintExpr(If->getCond());
+ if (const DeclStmt *DS = If->getConditionVariableDeclStmt())
+ PrintRawDeclStmt(DS);
+ else
+ PrintExpr(If->getCond());
OS << ')';
if (CompoundStmt *CS = dyn_cast<CompoundStmt>(If->getThen())) {
@@ -224,7 +227,10 @@ void StmtPrinter::VisitIfStmt(IfStmt *If) {
void StmtPrinter::VisitSwitchStmt(SwitchStmt *Node) {
Indent() << "switch (";
- PrintExpr(Node->getCond());
+ if (const DeclStmt *DS = Node->getConditionVariableDeclStmt())
+ PrintRawDeclStmt(DS);
+ else
+ PrintExpr(Node->getCond());
OS << ")";
// Pretty print compoundstmt bodies (very common).
@@ -240,7 +246,10 @@ void StmtPrinter::VisitSwitchStmt(SwitchStmt *Node) {
void StmtPrinter::VisitWhileStmt(WhileStmt *Node) {
Indent() << "while (";
- PrintExpr(Node->getCond());
+ if (const DeclStmt *DS = Node->getConditionVariableDeclStmt())
+ PrintRawDeclStmt(DS);
+ else
+ PrintExpr(Node->getCond());
OS << ")\n";
PrintStmt(Node->getBody());
}
@@ -366,7 +375,7 @@ void StmtPrinter::VisitReturnStmt(ReturnStmt *Node) {
}
-void StmtPrinter::VisitAsmStmt(AsmStmt *Node) {
+void StmtPrinter::VisitGCCAsmStmt(GCCAsmStmt *Node) {
Indent() << "asm ";
if (Node->isVolatile())
@@ -422,7 +431,7 @@ void StmtPrinter::VisitAsmStmt(AsmStmt *Node) {
if (i != 0)
OS << ", ";
- VisitStringLiteral(Node->getClobber(i));
+ VisitStringLiteral(Node->getClobberStringLiteral(i));
}
OS << ");\n";
@@ -734,10 +743,30 @@ void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) {
case BuiltinType::UInt128: OS << "Ui128"; break;
}
}
-void StmtPrinter::VisitFloatingLiteral(FloatingLiteral *Node) {
+
+static void PrintFloatingLiteral(raw_ostream &OS, FloatingLiteral *Node,
+ bool PrintSuffix) {
SmallString<16> Str;
Node->getValue().toString(Str);
OS << Str;
+ if (Str.find_first_not_of("-0123456789") == StringRef::npos)
+ OS << '.'; // Trailing dot in order to separate from ints.
+
+ if (!PrintSuffix)
+ return;
+
+ // Emit suffixes. Float literals are always a builtin float type.
+ switch (Node->getType()->getAs<BuiltinType>()->getKind()) {
+ default: llvm_unreachable("Unexpected type for float literal!");
+ case BuiltinType::Half: break; // FIXME: suffix?
+ case BuiltinType::Double: break; // no suffix.
+ case BuiltinType::Float: OS << 'F'; break;
+ case BuiltinType::LongDouble: OS << 'L'; break;
+ }
+}
+
+void StmtPrinter::VisitFloatingLiteral(FloatingLiteral *Node) {
+ PrintFloatingLiteral(OS, Node, /*PrintSuffix=*/true);
}
void StmtPrinter::VisitImaginaryLiteral(ImaginaryLiteral *Node) {
@@ -907,7 +936,7 @@ void StmtPrinter::VisitExtVectorElementExpr(ExtVectorElementExpr *Node) {
OS << Node->getAccessor().getName();
}
void StmtPrinter::VisitCStyleCastExpr(CStyleCastExpr *Node) {
- OS << "(" << Node->getType().getAsString(Policy) << ")";
+ OS << "(" << Node->getTypeAsWritten().getAsString(Policy) << ")";
PrintExpr(Node->getSubExpr());
}
void StmtPrinter::VisitCompoundLiteralExpr(CompoundLiteralExpr *Node) {
@@ -1110,6 +1139,8 @@ void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) {
PrintExpr(Node->getArg(0));
OS << ' ' << OpStrings[Kind];
}
+ } else if (Kind == OO_Arrow) {
+ PrintExpr(Node->getArg(0));
} else if (Kind == OO_Call) {
PrintExpr(Node->getArg(0));
OS << '(';
@@ -1217,7 +1248,12 @@ void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) {
OS << Int->getValue().toString(10, /*isSigned*/false);
break;
}
- case UserDefinedLiteral::LOK_Floating:
+ case UserDefinedLiteral::LOK_Floating: {
+ // Print floating literal without suffix.
+ FloatingLiteral *Float = cast<FloatingLiteral>(Node->getCookedLiteral());
+ PrintFloatingLiteral(OS, Float, /*PrintSuffix=*/false);
+ break;
+ }
case UserDefinedLiteral::LOK_String:
case UserDefinedLiteral::LOK_Character:
PrintExpr(Node->getCookedLiteral());
@@ -1379,10 +1415,12 @@ void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
OS << "::";
OS << "new ";
unsigned NumPlace = E->getNumPlacementArgs();
- if (NumPlace > 0) {
+ if (NumPlace > 0 && !isa<CXXDefaultArgExpr>(E->getPlacementArg(0))) {
OS << "(";
PrintExpr(E->getPlacementArg(0));
for (unsigned i = 1; i < NumPlace; ++i) {
+ if (isa<CXXDefaultArgExpr>(E->getPlacementArg(i)))
+ break;
OS << ", ";
PrintExpr(E->getPlacementArg(i));
}
@@ -1429,6 +1467,7 @@ void StmtPrinter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
OS << '.';
if (E->getQualifier())
E->getQualifier()->print(OS, Policy);
+ OS << "~";
std::string TypeS;
if (IdentifierInfo *II = E->getDestroyedTypeIdentifier())
@@ -1531,6 +1570,7 @@ static const char *getTypeTraitName(UnaryTypeTrait UTT) {
case UTT_IsFunction: return "__is_function";
case UTT_IsFundamental: return "__is_fundamental";
case UTT_IsIntegral: return "__is_integral";
+ case UTT_IsInterfaceClass: return "__is_interface_class";
case UTT_IsLiteral: return "__is_literal";
case UTT_IsLvalueReference: return "__is_lvalue_reference";
case UTT_IsMemberFunctionPointer: return "__is_member_function_pointer";
@@ -1647,6 +1687,10 @@ void StmtPrinter::VisitSubstNonTypeTemplateParmExpr(
Visit(Node->getReplacement());
}
+void StmtPrinter::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) {
+ OS << *E->getParameterPack();
+}
+
void StmtPrinter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *Node){
PrintExpr(Node->GetTemporaryExpr());
}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
index 2168b64..bfd3132 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
@@ -158,7 +158,7 @@ void StmtProfiler::VisitReturnStmt(const ReturnStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitAsmStmt(const AsmStmt *S) {
+void StmtProfiler::VisitGCCAsmStmt(const GCCAsmStmt *S) {
VisitStmt(S);
ID.AddBoolean(S->isVolatile());
ID.AddBoolean(S->isSimple());
@@ -175,7 +175,7 @@ void StmtProfiler::VisitAsmStmt(const AsmStmt *S) {
}
ID.AddInteger(S->getNumClobbers());
for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I)
- VisitStringLiteral(S->getClobber(I));
+ VisitStringLiteral(S->getClobberStringLiteral(I));
}
void StmtProfiler::VisitMSAsmStmt(const MSAsmStmt *S) {
@@ -973,6 +973,14 @@ void StmtProfiler::VisitSubstNonTypeTemplateParmExpr(
Visit(E->getReplacement());
}
+void StmtProfiler::VisitFunctionParmPackExpr(const FunctionParmPackExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getParameterPack());
+ ID.AddInteger(S->getNumExpansions());
+ for (FunctionParmPackExpr::iterator I = S->begin(), E = S->end(); I != E; ++I)
+ VisitDecl(*I);
+}
+
void StmtProfiler::VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *S) {
VisitExpr(S);
@@ -1165,6 +1173,10 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
VisitDecl(Arg.getAsDecl());
break;
+ case TemplateArgument::NullPtr:
+ VisitType(Arg.getNullPtrType());
+ break;
+
case TemplateArgument::Integral:
Arg.getAsIntegral().Profile(ID);
VisitType(Arg.getIntegralType());
diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
index 95ff4ed..e9ee385 100644
--- a/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
@@ -77,7 +77,7 @@ TemplateArgument TemplateArgument::CreatePackCopy(ASTContext &Context,
const TemplateArgument *Args,
unsigned NumArgs) {
if (NumArgs == 0)
- return TemplateArgument(0, 0);
+ return getEmptyPack();
TemplateArgument *Storage = new (Context) TemplateArgument [NumArgs];
std::copy(Args, Args + NumArgs, Storage);
@@ -99,12 +99,11 @@ bool TemplateArgument::isDependent() const {
return true;
case Declaration:
- if (Decl *D = getAsDecl()) {
- if (DeclContext *DC = dyn_cast<DeclContext>(D))
- return DC->isDependentContext();
- return D->getDeclContext()->isDependentContext();
- }
-
+ if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl()))
+ return DC->isDependentContext();
+ return getAsDecl()->getDeclContext()->isDependentContext();
+
+ case NullPtr:
return false;
case Integral:
@@ -141,11 +140,11 @@ bool TemplateArgument::isInstantiationDependent() const {
return true;
case Declaration:
- if (Decl *D = getAsDecl()) {
- if (DeclContext *DC = dyn_cast<DeclContext>(D))
- return DC->isDependentContext();
- return D->getDeclContext()->isDependentContext();
- }
+ if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl()))
+ return DC->isDependentContext();
+ return getAsDecl()->getDeclContext()->isDependentContext();
+
+ case NullPtr:
return false;
case Integral:
@@ -174,6 +173,7 @@ bool TemplateArgument::isPackExpansion() const {
case Integral:
case Pack:
case Template:
+ case NullPtr:
return false;
case TemplateExpansion:
@@ -195,6 +195,7 @@ bool TemplateArgument::containsUnexpandedParameterPack() const {
case Declaration:
case Integral:
case TemplateExpansion:
+ case NullPtr:
break;
case Type:
@@ -286,12 +287,16 @@ bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const {
switch (getKind()) {
case Null:
case Type:
- case Declaration:
case Expression:
case Template:
case TemplateExpansion:
+ case NullPtr:
return TypeOrValue == Other.TypeOrValue;
+ case Declaration:
+ return getAsDecl() == Other.getAsDecl() &&
+ isDeclForReferenceParam() && Other.isDeclForReferenceParam();
+
case Integral:
return getIntegralType() == Other.getIntegralType() &&
getAsIntegral() == Other.getAsIntegral();
@@ -319,12 +324,13 @@ TemplateArgument TemplateArgument::getPackExpansionPattern() const {
case TemplateExpansion:
return TemplateArgument(getAsTemplateOrTemplatePattern());
-
+
case Declaration:
case Integral:
case Pack:
case Null:
case Template:
+ case NullPtr:
return TemplateArgument();
}
@@ -348,18 +354,20 @@ void TemplateArgument::print(const PrintingPolicy &Policy,
}
case Declaration: {
- if (NamedDecl *ND = dyn_cast_or_null<NamedDecl>(getAsDecl())) {
- if (ND->getDeclName()) {
- Out << *ND;
- } else {
- Out << "<anonymous>";
- }
+ NamedDecl *ND = cast<NamedDecl>(getAsDecl());
+ if (ND->getDeclName()) {
+ // FIXME: distinguish between pointer and reference args?
+ Out << *ND;
} else {
- Out << "nullptr";
+ Out << "<anonymous>";
}
break;
}
-
+
+ case NullPtr:
+ Out << "nullptr";
+ break;
+
case Template:
getAsTemplate().print(Out, Policy);
break;
@@ -411,6 +419,9 @@ SourceRange TemplateArgumentLoc::getSourceRange() const {
case TemplateArgument::Declaration:
return getSourceDeclExpression()->getSourceRange();
+ case TemplateArgument::NullPtr:
+ return getSourceNullPtrExpression()->getSourceRange();
+
case TemplateArgument::Type:
if (TypeSourceInfo *TSI = getTypeSourceInfo())
return TSI->getTypeLoc().getSourceRange();
@@ -430,6 +441,8 @@ SourceRange TemplateArgumentLoc::getSourceRange() const {
return SourceRange(getTemplateNameLoc(), getTemplateEllipsisLoc());
case TemplateArgument::Integral:
+ return getSourceIntegralExpression()->getSourceRange();
+
case TemplateArgument::Pack:
case TemplateArgument::Null:
return SourceRange();
@@ -490,6 +503,7 @@ TemplateArgumentLoc::getPackExpansionPattern(SourceLocation &Ellipsis,
getTemplateNameLoc());
case TemplateArgument::Declaration:
+ case TemplateArgument::NullPtr:
case TemplateArgument::Template:
case TemplateArgument::Integral:
case TemplateArgument::Pack:
@@ -512,8 +526,9 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
return DB << Arg.getAsType();
case TemplateArgument::Declaration:
- if (Decl *D = Arg.getAsDecl())
- return DB << D;
+ return DB << Arg.getAsDecl();
+
+ case TemplateArgument::NullPtr:
return DB << "nullptr";
case TemplateArgument::Integral:
diff --git a/contrib/llvm/tools/clang/lib/AST/Type.cpp b/contrib/llvm/tools/clang/lib/AST/Type.cpp
index abefae4..580ec50 100644
--- a/contrib/llvm/tools/clang/lib/AST/Type.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Type.cpp
@@ -288,18 +288,17 @@ QualType QualType::IgnoreParens(QualType T) {
return T;
}
-/// \brief This will check for a TypedefType by removing any existing sugar
-/// until it reaches a TypedefType or a non-sugared type.
-template <> const TypedefType *Type::getAs() const {
- const Type *Cur = this;
-
+/// \brief This will check for a T (which should be a Type which can act as
+/// sugar, such as a TypedefType) by removing any existing sugar until it
+/// reaches a T or a non-sugared type.
+template<typename T> static const T *getAsSugar(const Type *Cur) {
while (true) {
- if (const TypedefType *TDT = dyn_cast<TypedefType>(Cur))
- return TDT;
+ if (const T *Sugar = dyn_cast<T>(Cur))
+ return Sugar;
switch (Cur->getTypeClass()) {
#define ABSTRACT_TYPE(Class, Parent)
#define TYPE(Class, Parent) \
- case Class: { \
+ case Type::Class: { \
const Class##Type *Ty = cast<Class##Type>(Cur); \
if (!Ty->isSugared()) return 0; \
Cur = Ty->desugar().getTypePtr(); \
@@ -310,6 +309,14 @@ template <> const TypedefType *Type::getAs() const {
}
}
+template <> const TypedefType *Type::getAs() const {
+ return getAsSugar<TypedefType>(this);
+}
+
+template <> const TemplateSpecializationType *Type::getAs() const {
+ return getAsSugar<TemplateSpecializationType>(this);
+}
+
/// getUnqualifiedDesugaredType - Pull any qualifiers and syntactic
/// sugar off the given type. This should produce an object of the
/// same dynamic type as the canonical type.
@@ -357,9 +364,15 @@ bool Type::isStructureType() const {
return RT->getDecl()->isStruct();
return false;
}
+bool Type::isInterfaceType() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return RT->getDecl()->isInterface();
+ return false;
+}
bool Type::isStructureOrClassType() const {
if (const RecordType *RT = getAs<RecordType>())
- return RT->getDecl()->isStruct() || RT->getDecl()->isClass();
+ return RT->getDecl()->isStruct() || RT->getDecl()->isClass() ||
+ RT->getDecl()->isInterface();
return false;
}
bool Type::isVoidPointerType() const {
@@ -499,10 +512,18 @@ const ObjCObjectPointerType *Type::getAsObjCInterfacePointerType() const {
return 0;
}
-const CXXRecordDecl *Type::getCXXRecordDeclForPointerType() const {
+const CXXRecordDecl *Type::getPointeeCXXRecordDecl() const {
+ QualType PointeeType;
if (const PointerType *PT = getAs<PointerType>())
- if (const RecordType *RT = PT->getPointeeType()->getAs<RecordType>())
- return dyn_cast<CXXRecordDecl>(RT->getDecl());
+ PointeeType = PT->getPointeeType();
+ else if (const ReferenceType *RT = getAs<ReferenceType>())
+ PointeeType = RT->getPointeeType();
+ else
+ return 0;
+
+ if (const RecordType *RT = PointeeType->getAs<RecordType>())
+ return dyn_cast<CXXRecordDecl>(RT->getDecl());
+
return 0;
}
@@ -1205,8 +1226,6 @@ bool QualType::isCXX11PODType(ASTContext &Context) const {
return false;
case Qualifiers::OCL_None:
- if (ty->isObjCLifetimeType())
- return false;
break;
}
}
@@ -1317,6 +1336,7 @@ TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) {
case TST_typename: return ETK_Typename;
case TST_class: return ETK_Class;
case TST_struct: return ETK_Struct;
+ case TST_interface: return ETK_Interface;
case TST_union: return ETK_Union;
case TST_enum: return ETK_Enum;
}
@@ -1327,6 +1347,7 @@ TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
switch(TypeSpec) {
case TST_class: return TTK_Class;
case TST_struct: return TTK_Struct;
+ case TST_interface: return TTK_Interface;
case TST_union: return TTK_Union;
case TST_enum: return TTK_Enum;
}
@@ -1339,6 +1360,7 @@ TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) {
switch (Kind) {
case TTK_Class: return ETK_Class;
case TTK_Struct: return ETK_Struct;
+ case TTK_Interface: return ETK_Interface;
case TTK_Union: return ETK_Union;
case TTK_Enum: return ETK_Enum;
}
@@ -1350,6 +1372,7 @@ TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) {
switch (Keyword) {
case ETK_Class: return TTK_Class;
case ETK_Struct: return TTK_Struct;
+ case ETK_Interface: return TTK_Interface;
case ETK_Union: return TTK_Union;
case ETK_Enum: return TTK_Enum;
case ETK_None: // Fall through.
@@ -1367,6 +1390,7 @@ TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
return false;
case ETK_Class:
case ETK_Struct:
+ case ETK_Interface:
case ETK_Union:
case ETK_Enum:
return true;
@@ -1381,6 +1405,7 @@ TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
case ETK_Typename: return "typename";
case ETK_Class: return "class";
case ETK_Struct: return "struct";
+ case ETK_Interface: return "__interface";
case ETK_Union: return "union";
case ETK_Enum: return "enum";
}
@@ -1480,6 +1505,7 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
case Dependent: return "<dependent type>";
case UnknownAny: return "<unknown type>";
case ARCUnbridgedCast: return "<ARC unbridged cast type>";
+ case BuiltinFn: return "<builtin fn type>";
case ObjCId: return "id";
case ObjCClass: return "Class";
case ObjCSel: return "SEL";
@@ -1516,6 +1542,7 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
case CC_X86Pascal: return "pascal";
case CC_AAPCS: return "aapcs";
case CC_AAPCS_VFP: return "aapcs-vfp";
+ case CC_PnaclCall: return "pnaclcall";
}
llvm_unreachable("Invalid calling convention.");
diff --git a/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp b/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
index c7bb7da..58c4cbd 100644
--- a/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
@@ -98,23 +98,38 @@ void TypeLoc::initializeImpl(ASTContext &Context, TypeLoc TL,
SourceLocation TypeLoc::getBeginLoc() const {
TypeLoc Cur = *this;
+ TypeLoc LeftMost = Cur;
while (true) {
switch (Cur.getTypeLocClass()) {
- // FIXME: Currently QualifiedTypeLoc does not have a source range
- // case Qualified:
case Elaborated:
- case DependentName:
- case DependentTemplateSpecialization:
+ LeftMost = Cur;
break;
+ case FunctionProto:
+ if (cast<FunctionProtoTypeLoc>(&Cur)->getTypePtr()->hasTrailingReturn()) {
+ LeftMost = Cur;
+ break;
+ }
+ /* Fall through */
+ case FunctionNoProto:
+ case ConstantArray:
+ case DependentSizedArray:
+ case IncompleteArray:
+ case VariableArray:
+ // FIXME: Currently QualifiedTypeLoc does not have a source range
+ case Qualified:
+ Cur = Cur.getNextTypeLoc();
+ continue;
default:
- TypeLoc Next = Cur.getNextTypeLoc();
- if (Next.isNull()) break;
- Cur = Next;
+ if (!Cur.getLocalSourceRange().getBegin().isInvalid())
+ LeftMost = Cur;
+ Cur = Cur.getNextTypeLoc();
+ if (Cur.isNull())
+ break;
continue;
- }
+ } // switch
break;
- }
- return Cur.getLocalSourceRange().getBegin();
+ } // while
+ return LeftMost.getLocalSourceRange().getBegin();
}
SourceLocation TypeLoc::getEndLoc() const {
@@ -131,10 +146,15 @@ SourceLocation TypeLoc::getEndLoc() const {
case DependentSizedArray:
case IncompleteArray:
case VariableArray:
- case FunctionProto:
case FunctionNoProto:
Last = Cur;
break;
+ case FunctionProto:
+ if (cast<FunctionProtoTypeLoc>(&Cur)->getTypePtr()->hasTrailingReturn())
+ Last = TypeLoc();
+ else
+ Last = Cur;
+ break;
case Pointer:
case BlockPointer:
case MemberPointer:
@@ -241,6 +261,7 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
+ case BuiltinType::BuiltinFn:
return TST_unspecified;
}
@@ -300,7 +321,9 @@ void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context,
case TemplateArgument::Null:
case TemplateArgument::Declaration:
case TemplateArgument::Integral:
- case TemplateArgument::Pack:
+ case TemplateArgument::NullPtr:
+ llvm_unreachable("Impossible TemplateArgument");
+
case TemplateArgument::Expression:
ArgInfos[i] = TemplateArgumentLocInfo(Args[i].getAsExpr());
break;
@@ -310,7 +333,7 @@ void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context,
Context.getTrivialTypeSourceInfo(Args[i].getAsType(),
Loc));
break;
-
+
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion: {
NestedNameSpecifierLocBuilder Builder;
@@ -327,7 +350,11 @@ void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context,
? SourceLocation()
: Loc);
break;
- }
+ }
+
+ case TemplateArgument::Pack:
+ ArgInfos[i] = TemplateArgumentLocInfo();
+ break;
}
}
}
diff --git a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
index c42117c..90b2ca9 100644
--- a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
@@ -141,9 +141,6 @@ void TypePrinter::print(const Type *T, Qualifiers Quals, raw_ostream &OS,
OS << "NULL TYPE";
return;
}
-
- if (Policy.SuppressSpecifiers && T->isSpecifierType())
- return;
SaveAndRestore<bool> PHVal(HasEmptyPlaceHolder, PlaceHolder.empty());
@@ -556,7 +553,8 @@ void TypePrinter::printExtVectorAfter(const ExtVectorType *T, raw_ostream &OS) {
void
FunctionProtoType::printExceptionSpecification(raw_ostream &OS,
- PrintingPolicy Policy) const {
+ const PrintingPolicy &Policy)
+ const {
if (hasDynamicExceptionSpec()) {
OS << " throw(";
@@ -646,6 +644,9 @@ void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T,
case CC_AAPCS_VFP:
OS << " __attribute__((pcs(\"aapcs-vfp\")))";
break;
+ case CC_PnaclCall:
+ OS << " __attribute__((pnaclcall))";
+ break;
}
if (Info.getNoReturn())
OS << " __attribute__((noreturn))";
@@ -798,6 +799,7 @@ void TypePrinter::printAtomicAfter(const AtomicType *T, raw_ostream &OS) { }
/// Appends the given scope to the end of a string.
void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS) {
if (DC->isTranslationUnit()) return;
+ if (DC->isFunctionOrMethod()) return;
AppendScope(DC->getParent(), OS);
if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(DC)) {
@@ -1165,6 +1167,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
OS << ')';
break;
}
+ case AttributedType::attr_pnaclcall: OS << "pnaclcall"; break;
}
OS << "))";
}
@@ -1343,7 +1346,8 @@ PrintTemplateArgumentList(raw_ostream &OS,
void
FunctionProtoType::printExceptionSpecification(std::string &S,
- PrintingPolicy Policy) const {
+ const PrintingPolicy &Policy)
+ const {
if (hasDynamicExceptionSpec()) {
S += " throw(";
diff --git a/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp
index 104530f..33dad40 100644
--- a/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp
@@ -1891,6 +1891,9 @@ void VTableBuilder::dumpLayout(raw_ostream& Out) {
if (MD->isPure())
Out << " [pure]";
+ if (MD->isDeleted())
+ Out << " [deleted]";
+
ThunkInfo Thunk = VTableThunks.lookup(I);
if (!Thunk.isEmpty()) {
// If this function pointer has a return adjustment, dump it.
diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp b/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp
index 085049d..8ecb26e 100644
--- a/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -27,10 +27,75 @@ namespace ast_matchers {
namespace internal {
namespace {
+typedef MatchFinder::MatchCallback MatchCallback;
+
+/// \brief A \c RecursiveASTVisitor that builds a map from nodes to their
+/// parents as defined by the \c RecursiveASTVisitor.
+///
+/// Note that the relationship described here is purely in terms of AST
+/// traversal - there are other relationships (for example declaration context)
+/// in the AST that are better modeled by special matchers.
+///
+/// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes.
+class ParentMapASTVisitor : public RecursiveASTVisitor<ParentMapASTVisitor> {
+public:
+ /// \brief Maps from a node to its parent.
+ typedef llvm::DenseMap<const void*, ast_type_traits::DynTypedNode> ParentMap;
+
+ /// \brief Builds and returns the translation unit's parent map.
+ ///
+ /// The caller takes ownership of the returned \c ParentMap.
+ static ParentMap *buildMap(TranslationUnitDecl &TU) {
+ ParentMapASTVisitor Visitor(new ParentMap);
+ Visitor.TraverseDecl(&TU);
+ return Visitor.Parents;
+ }
+
+private:
+ typedef RecursiveASTVisitor<ParentMapASTVisitor> VisitorBase;
+
+ ParentMapASTVisitor(ParentMap *Parents) : Parents(Parents) {}
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return true; }
+
+ template <typename T>
+ bool TraverseNode(T *Node, bool (VisitorBase::*traverse)(T*)) {
+ if (Node == NULL)
+ return true;
+ if (ParentStack.size() > 0)
+ (*Parents)[Node] = ParentStack.back();
+ ParentStack.push_back(ast_type_traits::DynTypedNode::create(*Node));
+ bool Result = (this->*traverse)(Node);
+ ParentStack.pop_back();
+ return Result;
+ }
+
+ bool TraverseDecl(Decl *DeclNode) {
+ return TraverseNode(DeclNode, &VisitorBase::TraverseDecl);
+ }
+
+ bool TraverseStmt(Stmt *StmtNode) {
+ return TraverseNode(StmtNode, &VisitorBase::TraverseStmt);
+ }
+
+ ParentMap *Parents;
+ llvm::SmallVector<ast_type_traits::DynTypedNode, 16> ParentStack;
+
+ friend class RecursiveASTVisitor<ParentMapASTVisitor>;
+};
+
// We use memoization to avoid running the same matcher on the same
// AST node twice. This pair is the key for looking up match
// result. It consists of an ID of the MatcherInterface (for
// identifying the matcher) and a pointer to the AST node.
+//
+// We currently only memoize on nodes whose pointers identify the
+// nodes (\c Stmt and \c Decl, but not \c QualType or \c TypeLoc).
+// For \c QualType and \c TypeLoc it is possible to implement
+// generation of keys for each type.
+// FIXME: Benchmark whether memoization of non-pointer typed nodes
+// provides enough benefit for the additional amount of code.
typedef std::pair<uint64_t, const void*> UntypedMatchInput;
// Used to store the result of a match and possibly bound nodes.
@@ -50,16 +115,16 @@ public:
// descendants of a traversed node. max_depth is the maximum depth
// to traverse: use 1 for matching the children and INT_MAX for
// matching the descendants.
- MatchChildASTVisitor(const UntypedBaseMatcher *BaseMatcher,
+ MatchChildASTVisitor(const DynTypedMatcher *Matcher,
ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
int MaxDepth,
ASTMatchFinder::TraversalKind Traversal,
ASTMatchFinder::BindKind Bind)
- : BaseMatcher(BaseMatcher),
+ : Matcher(Matcher),
Finder(Finder),
Builder(Builder),
- CurrentDepth(-1),
+ CurrentDepth(0),
MaxDepth(MaxDepth),
Traversal(Traversal),
Bind(Bind),
@@ -76,10 +141,23 @@ public:
// Traverse*(c) for each child c of 'node'.
// - Traverse*(c) in turn calls Traverse(c), completing the
// recursion.
- template <typename T>
- bool findMatch(const T &Node) {
+ bool findMatch(const ast_type_traits::DynTypedNode &DynNode) {
reset();
- traverse(Node);
+ if (const Decl *D = DynNode.get<Decl>())
+ traverse(*D);
+ else if (const Stmt *S = DynNode.get<Stmt>())
+ traverse(*S);
+ else if (const NestedNameSpecifier *NNS =
+ DynNode.get<NestedNameSpecifier>())
+ traverse(*NNS);
+ else if (const NestedNameSpecifierLoc *NNSLoc =
+ DynNode.get<NestedNameSpecifierLoc>())
+ traverse(*NNSLoc);
+ else if (const QualType *Q = DynNode.get<QualType>())
+ traverse(*Q);
+ else if (const TypeLoc *T = DynNode.get<TypeLoc>())
+ traverse(*T);
+ // FIXME: Add other base types after adding tests.
return Matches;
}
@@ -87,9 +165,11 @@ public:
// They are public only to allow CRTP to work. They are *not *part
// of the public API of this class.
bool TraverseDecl(Decl *DeclNode) {
+ ScopedIncrement ScopedDepth(&CurrentDepth);
return (DeclNode == NULL) || traverse(*DeclNode);
}
bool TraverseStmt(Stmt *StmtNode) {
+ ScopedIncrement ScopedDepth(&CurrentDepth);
const Stmt *StmtToTraverse = StmtNode;
if (Traversal ==
ASTMatchFinder::TK_IgnoreImplicitCastsAndParentheses) {
@@ -100,9 +180,39 @@ public:
}
return (StmtToTraverse == NULL) || traverse(*StmtToTraverse);
}
+ // We assume that the QualType and the contained type are on the same
+ // hierarchy level. Thus, we try to match either of them.
bool TraverseType(QualType TypeNode) {
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+ // Match the Type.
+ if (!match(*TypeNode))
+ return false;
+ // The QualType is matched inside traverse.
return traverse(TypeNode);
}
+ // We assume that the TypeLoc, contained QualType and contained Type all are
+ // on the same hierarchy level. Thus, we try to match all of them.
+ bool TraverseTypeLoc(TypeLoc TypeLocNode) {
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+ // Match the Type.
+ if (!match(*TypeLocNode.getType()))
+ return false;
+ // Match the QualType.
+ if (!match(TypeLocNode.getType()))
+ return false;
+ // The TypeLoc is matched inside traverse.
+ return traverse(TypeLocNode);
+ }
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+ return (NNS == NULL) || traverse(*NNS);
+ }
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS) {
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+ if (!match(*NNS.getNestedNameSpecifier()))
+ return false;
+ return !NNS || traverse(NNS);
+ }
bool shouldVisitTemplateInstantiations() const { return true; }
bool shouldVisitImplicitCode() const { return true; }
@@ -120,7 +230,7 @@ private:
// Resets the state of this object.
void reset() {
Matches = false;
- CurrentDepth = -1;
+ CurrentDepth = 0;
}
// Forwards the call to the corresponding Traverse*() method in the
@@ -134,49 +244,57 @@ private:
bool baseTraverse(QualType TypeNode) {
return VisitorBase::TraverseType(TypeNode);
}
+ bool baseTraverse(TypeLoc TypeLocNode) {
+ return VisitorBase::TraverseTypeLoc(TypeLocNode);
+ }
+ bool baseTraverse(const NestedNameSpecifier &NNS) {
+ return VisitorBase::TraverseNestedNameSpecifier(
+ const_cast<NestedNameSpecifier*>(&NNS));
+ }
+ bool baseTraverse(NestedNameSpecifierLoc NNS) {
+ return VisitorBase::TraverseNestedNameSpecifierLoc(NNS);
+ }
- // Traverses the subtree rooted at 'node'; returns true if the
- // traversal should continue after this function returns; also sets
- // matched_ to true if a match is found during the traversal.
+ // Sets 'Matched' to true if 'Matcher' matches 'Node' and:
+ // 0 < CurrentDepth <= MaxDepth.
+ //
+ // Returns 'true' if traversal should continue after this function
+ // returns, i.e. if no match is found or 'Bind' is 'BK_All'.
template <typename T>
- bool traverse(const T &Node) {
- TOOLING_COMPILE_ASSERT(IsBaseType<T>::value,
- traverse_can_only_be_instantiated_with_base_type);
- ScopedIncrement ScopedDepth(&CurrentDepth);
- if (CurrentDepth == 0) {
- // We don't want to match the root node, so just recurse.
- return baseTraverse(Node);
+ bool match(const T &Node) {
+ if (CurrentDepth == 0 || CurrentDepth > MaxDepth) {
+ return true;
}
if (Bind != ASTMatchFinder::BK_All) {
- if (BaseMatcher->matches(Node, Finder, Builder)) {
+ if (Matcher->matches(ast_type_traits::DynTypedNode::create(Node),
+ Finder, Builder)) {
Matches = true;
return false; // Abort as soon as a match is found.
}
- if (CurrentDepth < MaxDepth) {
- // The current node doesn't match, and we haven't reached the
- // maximum depth yet, so recurse.
- return baseTraverse(Node);
- }
- // The current node doesn't match, and we have reached the
- // maximum depth, so don't recurse (but continue the traversal
- // such that other nodes at the current level can be visited).
- return true;
} else {
BoundNodesTreeBuilder RecursiveBuilder;
- if (BaseMatcher->matches(Node, Finder, &RecursiveBuilder)) {
+ if (Matcher->matches(ast_type_traits::DynTypedNode::create(Node),
+ Finder, &RecursiveBuilder)) {
// After the first match the matcher succeeds.
Matches = true;
Builder->addMatch(RecursiveBuilder.build());
}
- if (CurrentDepth < MaxDepth) {
- baseTraverse(Node);
- }
- // In kBindAll mode we always search for more matches.
- return true;
}
+ return true;
}
- const UntypedBaseMatcher *const BaseMatcher;
+ // Traverses the subtree rooted at 'Node'; returns true if the
+ // traversal should continue after this function returns.
+ template <typename T>
+ bool traverse(const T &Node) {
+ TOOLING_COMPILE_ASSERT(IsBaseType<T>::value,
+ traverse_can_only_be_instantiated_with_base_type);
+ if (!match(Node))
+ return false;
+ return baseTraverse(Node);
+ }
+
+ const DynTypedMatcher *const Matcher;
ASTMatchFinder *const Finder;
BoundNodesTreeBuilder *const Builder;
int CurrentDepth;
@@ -191,12 +309,21 @@ private:
class MatchASTVisitor : public RecursiveASTVisitor<MatchASTVisitor>,
public ASTMatchFinder {
public:
- MatchASTVisitor(std::vector< std::pair<const UntypedBaseMatcher*,
- MatchFinder::MatchCallback*> > *Triggers)
- : Triggers(Triggers),
+ MatchASTVisitor(std::vector<std::pair<const internal::DynTypedMatcher*,
+ MatchCallback*> > *MatcherCallbackPairs)
+ : MatcherCallbackPairs(MatcherCallbackPairs),
ActiveASTContext(NULL) {
}
+ void onStartOfTranslationUnit() {
+ for (std::vector<std::pair<const internal::DynTypedMatcher*,
+ MatchCallback*> >::const_iterator
+ I = MatcherCallbackPairs->begin(), E = MatcherCallbackPairs->end();
+ I != E; ++I) {
+ I->second->onStartOfTranslationUnit();
+ }
+ }
+
void set_active_ast_context(ASTContext *NewActiveASTContext) {
ActiveASTContext = NewActiveASTContext;
}
@@ -207,7 +334,7 @@ public:
bool VisitTypedefDecl(TypedefDecl *DeclNode) {
// When we see 'typedef A B', we add name 'B' to the set of names
// A's canonical type maps to. This is necessary for implementing
- // IsDerivedFrom(x) properly, where x can be the name of the base
+ // isDerivedFrom(x) properly, where x can be the name of the base
// class or any of its aliases.
//
// In general, the is-alias-of (as defined by typedefs) relation
@@ -228,7 +355,7 @@ public:
// `- E
//
// It is wrong to assume that the relation is a chain. A correct
- // implementation of IsDerivedFrom() needs to recognize that B and
+ // implementation of isDerivedFrom() needs to recognize that B and
// E are aliases, even though neither is a typedef of the other.
// Therefore, we cannot simply walk through one typedef chain to
// find out whether the type name matches.
@@ -243,23 +370,27 @@ public:
bool TraverseStmt(Stmt *StmtNode);
bool TraverseType(QualType TypeNode);
bool TraverseTypeLoc(TypeLoc TypeNode);
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS);
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS);
// Matches children or descendants of 'Node' with 'BaseMatcher'.
- template <typename T>
- bool memoizedMatchesRecursively(const T &Node,
- const UntypedBaseMatcher &BaseMatcher,
+ bool memoizedMatchesRecursively(const ast_type_traits::DynTypedNode &Node,
+ const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder, int MaxDepth,
TraversalKind Traversal, BindKind Bind) {
- TOOLING_COMPILE_ASSERT((llvm::is_same<T, Decl>::value) ||
- (llvm::is_same<T, Stmt>::value),
- type_does_not_support_memoization);
- const UntypedMatchInput input(BaseMatcher.getID(), &Node);
+ const UntypedMatchInput input(Matcher.getID(), Node.getMemoizationData());
+
+ // For AST-nodes that don't have an identity, we can't memoize.
+ if (!input.second)
+ return matchesRecursively(Node, Matcher, Builder, MaxDepth, Traversal,
+ Bind);
+
std::pair<MemoizationMap::iterator, bool> InsertResult
= ResultCache.insert(std::make_pair(input, MemoizedMatchResult()));
if (InsertResult.second) {
BoundNodesTreeBuilder DescendantBoundNodesBuilder;
InsertResult.first->second.ResultOfMatch =
- matchesRecursively(Node, BaseMatcher, &DescendantBoundNodesBuilder,
+ matchesRecursively(Node, Matcher, &DescendantBoundNodesBuilder,
MaxDepth, Traversal, Bind);
InsertResult.first->second.Nodes =
DescendantBoundNodesBuilder.build();
@@ -269,12 +400,12 @@ public:
}
// Matches children or descendants of 'Node' with 'BaseMatcher'.
- template <typename T>
- bool matchesRecursively(const T &Node, const UntypedBaseMatcher &BaseMatcher,
+ bool matchesRecursively(const ast_type_traits::DynTypedNode &Node,
+ const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder, int MaxDepth,
TraversalKind Traversal, BindKind Bind) {
MatchChildASTVisitor Visitor(
- &BaseMatcher, this, Builder, MaxDepth, Traversal, Bind);
+ &Matcher, this, Builder, MaxDepth, Traversal, Bind);
return Visitor.findMatch(Node);
}
@@ -282,38 +413,54 @@ public:
const Matcher<NamedDecl> &Base,
BoundNodesTreeBuilder *Builder);
- // Implements ASTMatchFinder::MatchesChildOf.
- virtual bool matchesChildOf(const Decl &DeclNode,
- const UntypedBaseMatcher &BaseMatcher,
+ // Implements ASTMatchFinder::matchesChildOf.
+ virtual bool matchesChildOf(const ast_type_traits::DynTypedNode &Node,
+ const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder,
TraversalKind Traversal,
BindKind Bind) {
- return matchesRecursively(DeclNode, BaseMatcher, Builder, 1, Traversal,
+ return matchesRecursively(Node, Matcher, Builder, 1, Traversal,
Bind);
}
- virtual bool matchesChildOf(const Stmt &StmtNode,
- const UntypedBaseMatcher &BaseMatcher,
- BoundNodesTreeBuilder *Builder,
- TraversalKind Traversal,
- BindKind Bind) {
- return matchesRecursively(StmtNode, BaseMatcher, Builder, 1, Traversal,
- Bind);
- }
-
- // Implements ASTMatchFinder::MatchesDescendantOf.
- virtual bool matchesDescendantOf(const Decl &DeclNode,
- const UntypedBaseMatcher &BaseMatcher,
+ // Implements ASTMatchFinder::matchesDescendantOf.
+ virtual bool matchesDescendantOf(const ast_type_traits::DynTypedNode &Node,
+ const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder,
BindKind Bind) {
- return memoizedMatchesRecursively(DeclNode, BaseMatcher, Builder, INT_MAX,
+ return memoizedMatchesRecursively(Node, Matcher, Builder, INT_MAX,
TK_AsIs, Bind);
}
- virtual bool matchesDescendantOf(const Stmt &StmtNode,
- const UntypedBaseMatcher &BaseMatcher,
- BoundNodesTreeBuilder *Builder,
- BindKind Bind) {
- return memoizedMatchesRecursively(StmtNode, BaseMatcher, Builder, INT_MAX,
- TK_AsIs, Bind);
+ // Implements ASTMatchFinder::matchesAncestorOf.
+ virtual bool matchesAncestorOf(const ast_type_traits::DynTypedNode &Node,
+ const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder,
+ AncestorMatchMode MatchMode) {
+ if (!Parents) {
+ // We always need to run over the whole translation unit, as
+ // \c hasAncestor can escape any subtree.
+ Parents.reset(ParentMapASTVisitor::buildMap(
+ *ActiveASTContext->getTranslationUnitDecl()));
+ }
+ ast_type_traits::DynTypedNode Ancestor = Node;
+ while (Ancestor.get<TranslationUnitDecl>() !=
+ ActiveASTContext->getTranslationUnitDecl()) {
+ assert(Ancestor.getMemoizationData() &&
+ "Invariant broken: only nodes that support memoization may be "
+ "used in the parent map.");
+ ParentMapASTVisitor::ParentMap::const_iterator I =
+ Parents->find(Ancestor.getMemoizationData());
+ if (I == Parents->end()) {
+ assert(false &&
+ "Found node that is not in the parent map.");
+ return false;
+ }
+ Ancestor = I->second;
+ if (Matcher.matches(Ancestor, this, Builder))
+ return true;
+ if (MatchMode == ASTMatchFinder::AMM_ParentOnly)
+ return false;
+ }
+ return false;
}
bool shouldVisitTemplateInstantiations() const { return true; }
@@ -358,21 +505,22 @@ private:
// result callback for every node that matches.
template <typename T>
void match(const T &node) {
- for (std::vector< std::pair<const UntypedBaseMatcher*,
- MatchFinder::MatchCallback*> >::const_iterator
- It = Triggers->begin(), End = Triggers->end();
- It != End; ++It) {
+ for (std::vector<std::pair<const internal::DynTypedMatcher*,
+ MatchCallback*> >::const_iterator
+ I = MatcherCallbackPairs->begin(), E = MatcherCallbackPairs->end();
+ I != E; ++I) {
BoundNodesTreeBuilder Builder;
- if (It->first->matches(node, this, &Builder)) {
+ if (I->first->matches(ast_type_traits::DynTypedNode::create(node),
+ this, &Builder)) {
BoundNodesTree BoundNodes = Builder.build();
- MatchVisitor Visitor(ActiveASTContext, It->second);
+ MatchVisitor Visitor(ActiveASTContext, I->second);
BoundNodes.visitMatches(&Visitor);
}
}
}
- std::vector< std::pair<const UntypedBaseMatcher*,
- MatchFinder::MatchCallback*> > *const Triggers;
+ std::vector<std::pair<const internal::DynTypedMatcher*,
+ MatchCallback*> > *const MatcherCallbackPairs;
ASTContext *ActiveASTContext;
// Maps a canonical type to its TypedefDecls.
@@ -381,16 +529,16 @@ private:
// Maps (matcher, node) -> the match result for memoization.
typedef llvm::DenseMap<UntypedMatchInput, MemoizedMatchResult> MemoizationMap;
MemoizationMap ResultCache;
+
+ llvm::OwningPtr<ParentMapASTVisitor::ParentMap> Parents;
};
// Returns true if the given class is directly or indirectly derived
-// from a base type with the given name. A class is considered to be
-// also derived from itself.
+// from a base type with the given name. A class is not considered to be
+// derived from itself.
bool MatchASTVisitor::classIsDerivedFrom(const CXXRecordDecl *Declaration,
const Matcher<NamedDecl> &Base,
BoundNodesTreeBuilder *Builder) {
- if (Base.matches(*Declaration, this, Builder))
- return true;
if (!Declaration->hasDefinition())
return false;
typedef CXXRecordDecl::base_class_const_iterator BaseIterator;
@@ -403,6 +551,7 @@ bool MatchASTVisitor::classIsDerivedFrom(const CXXRecordDecl *Declaration,
// Type::getAs<...>() drills through typedefs.
if (TypeNode->getAs<DependentNameType>() != NULL ||
+ TypeNode->getAs<DependentTemplateSpecializationType>() != NULL ||
TypeNode->getAs<TemplateTypeParmType>() != NULL)
// Dependent names and template TypeNode parameters will be matched when
// the template is instantiated.
@@ -439,6 +588,8 @@ bool MatchASTVisitor::classIsDerivedFrom(const CXXRecordDecl *Declaration,
}
assert(ClassDecl != NULL);
assert(ClassDecl != Declaration);
+ if (Base.matches(*ClassDecl, this, Builder))
+ return true;
if (classIsDerivedFrom(ClassDecl, Base, Builder))
return true;
}
@@ -466,19 +617,40 @@ bool MatchASTVisitor::TraverseType(QualType TypeNode) {
return RecursiveASTVisitor<MatchASTVisitor>::TraverseType(TypeNode);
}
-bool MatchASTVisitor::TraverseTypeLoc(TypeLoc TypeLoc) {
- match(TypeLoc.getType());
- return RecursiveASTVisitor<MatchASTVisitor>::
- TraverseTypeLoc(TypeLoc);
+bool MatchASTVisitor::TraverseTypeLoc(TypeLoc TypeLocNode) {
+ // The RecursiveASTVisitor only visits types if they're not within TypeLocs.
+ // We still want to find those types via matchers, so we match them here. Note
+ // that the TypeLocs are structurally a shadow-hierarchy to the expressed
+ // type, so we visit all involved parts of a compound type when matching on
+ // each TypeLoc.
+ match(TypeLocNode);
+ match(TypeLocNode.getType());
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseTypeLoc(TypeLocNode);
+}
+
+bool MatchASTVisitor::TraverseNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ match(*NNS);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseNestedNameSpecifier(NNS);
+}
+
+bool MatchASTVisitor::TraverseNestedNameSpecifierLoc(
+ NestedNameSpecifierLoc NNS) {
+ match(NNS);
+ // We only match the nested name specifier here (as opposed to traversing it)
+ // because the traversal is already done in the parallel "Loc"-hierarchy.
+ match(*NNS.getNestedNameSpecifier());
+ return
+ RecursiveASTVisitor<MatchASTVisitor>::TraverseNestedNameSpecifierLoc(NNS);
}
class MatchASTConsumer : public ASTConsumer {
public:
- MatchASTConsumer(std::vector< std::pair<const UntypedBaseMatcher*,
- MatchFinder::MatchCallback*> > *Triggers,
- MatchFinder::ParsingDoneTestCallback *ParsingDone)
- : Visitor(Triggers),
- ParsingDone(ParsingDone) {}
+ MatchASTConsumer(
+ std::vector<std::pair<const internal::DynTypedMatcher*,
+ MatchCallback*> > *MatcherCallbackPairs,
+ MatchFinder::ParsingDoneTestCallback *ParsingDone)
+ : Visitor(MatcherCallbackPairs),
+ ParsingDone(ParsingDone) {}
private:
virtual void HandleTranslationUnit(ASTContext &Context) {
@@ -486,6 +658,7 @@ private:
ParsingDone->run();
}
Visitor.set_active_ast_context(&Context);
+ Visitor.onStartOfTranslationUnit();
Visitor.TraverseDecl(Context.getTranslationUnitDecl());
Visitor.set_active_ast_context(NULL);
}
@@ -508,9 +681,9 @@ MatchFinder::ParsingDoneTestCallback::~ParsingDoneTestCallback() {}
MatchFinder::MatchFinder() : ParsingDone(NULL) {}
MatchFinder::~MatchFinder() {
- for (std::vector< std::pair<const internal::UntypedBaseMatcher*,
- MatchFinder::MatchCallback*> >::const_iterator
- It = Triggers.begin(), End = Triggers.end();
+ for (std::vector<std::pair<const internal::DynTypedMatcher*,
+ MatchCallback*> >::const_iterator
+ It = MatcherCallbackPairs.begin(), End = MatcherCallbackPairs.end();
It != End; ++It) {
delete It->first;
}
@@ -518,24 +691,54 @@ MatchFinder::~MatchFinder() {
void MatchFinder::addMatcher(const DeclarationMatcher &NodeMatch,
MatchCallback *Action) {
- Triggers.push_back(std::make_pair(
- new internal::TypedBaseMatcher<Decl>(NodeMatch), Action));
+ MatcherCallbackPairs.push_back(std::make_pair(
+ new internal::Matcher<Decl>(NodeMatch), Action));
}
void MatchFinder::addMatcher(const TypeMatcher &NodeMatch,
MatchCallback *Action) {
- Triggers.push_back(std::make_pair(
- new internal::TypedBaseMatcher<QualType>(NodeMatch), Action));
+ MatcherCallbackPairs.push_back(std::make_pair(
+ new internal::Matcher<QualType>(NodeMatch), Action));
}
void MatchFinder::addMatcher(const StatementMatcher &NodeMatch,
MatchCallback *Action) {
- Triggers.push_back(std::make_pair(
- new internal::TypedBaseMatcher<Stmt>(NodeMatch), Action));
+ MatcherCallbackPairs.push_back(std::make_pair(
+ new internal::Matcher<Stmt>(NodeMatch), Action));
+}
+
+void MatchFinder::addMatcher(const NestedNameSpecifierMatcher &NodeMatch,
+ MatchCallback *Action) {
+ MatcherCallbackPairs.push_back(std::make_pair(
+ new NestedNameSpecifierMatcher(NodeMatch), Action));
+}
+
+void MatchFinder::addMatcher(const NestedNameSpecifierLocMatcher &NodeMatch,
+ MatchCallback *Action) {
+ MatcherCallbackPairs.push_back(std::make_pair(
+ new NestedNameSpecifierLocMatcher(NodeMatch), Action));
+}
+
+void MatchFinder::addMatcher(const TypeLocMatcher &NodeMatch,
+ MatchCallback *Action) {
+ MatcherCallbackPairs.push_back(std::make_pair(
+ new TypeLocMatcher(NodeMatch), Action));
}
ASTConsumer *MatchFinder::newASTConsumer() {
- return new internal::MatchASTConsumer(&Triggers, ParsingDone);
+ return new internal::MatchASTConsumer(&MatcherCallbackPairs, ParsingDone);
+}
+
+void MatchFinder::findAll(const Decl &Node, ASTContext &Context) {
+ internal::MatchASTVisitor Visitor(&MatcherCallbackPairs);
+ Visitor.set_active_ast_context(&Context);
+ Visitor.TraverseDecl(const_cast<Decl*>(&Node));
+}
+
+void MatchFinder::findAll(const Stmt &Node, ASTContext &Context) {
+ internal::MatchASTVisitor Visitor(&MatcherCallbackPairs);
+ Visitor.set_active_ast_context(&Context);
+ Visitor.TraverseStmt(const_cast<Stmt*>(&Node));
}
void MatchFinder::registerTestCallbackAfterParsing(
diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchersInternal.cpp b/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
index 69c5190..408195d 100644
--- a/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -18,18 +18,29 @@ namespace clang {
namespace ast_matchers {
namespace internal {
+void BoundNodesMap::copyTo(BoundNodesTreeBuilder *Builder) const {
+ for (IDToNodeMap::const_iterator It = NodeMap.begin();
+ It != NodeMap.end();
+ ++It) {
+ Builder->setBinding(It->first, It->second);
+ }
+}
+
+void BoundNodesMap::copyTo(BoundNodesMap *Other) const {
+ copy(NodeMap.begin(), NodeMap.end(),
+ inserter(Other->NodeMap, Other->NodeMap.begin()));
+}
+
BoundNodesTree::BoundNodesTree() {}
BoundNodesTree::BoundNodesTree(
- const std::map<std::string, const Decl*>& DeclBindings,
- const std::map<std::string, const Stmt*>& StmtBindings,
+ const BoundNodesMap& Bindings,
const std::vector<BoundNodesTree> RecursiveBindings)
- : DeclBindings(DeclBindings), StmtBindings(StmtBindings),
+ : Bindings(Bindings),
RecursiveBindings(RecursiveBindings) {}
void BoundNodesTree::copyTo(BoundNodesTreeBuilder* Builder) const {
- copyBindingsTo(DeclBindings, Builder);
- copyBindingsTo(StmtBindings, Builder);
+ Bindings.copyTo(Builder);
for (std::vector<BoundNodesTree>::const_iterator
I = RecursiveBindings.begin(),
E = RecursiveBindings.end();
@@ -38,63 +49,34 @@ void BoundNodesTree::copyTo(BoundNodesTreeBuilder* Builder) const {
}
}
-template <typename T>
-void BoundNodesTree::copyBindingsTo(
- const T& Bindings, BoundNodesTreeBuilder* Builder) const {
- for (typename T::const_iterator I = Bindings.begin(),
- E = Bindings.end();
- I != E; ++I) {
- Builder->setBinding(I->first, I->second);
- }
-}
-
void BoundNodesTree::visitMatches(Visitor* ResultVisitor) {
- std::map<std::string, const Decl*> AggregatedDeclBindings;
- std::map<std::string, const Stmt*> AggregatedStmtBindings;
- visitMatchesRecursively(ResultVisitor, AggregatedDeclBindings,
- AggregatedStmtBindings);
+ BoundNodesMap AggregatedBindings;
+ visitMatchesRecursively(ResultVisitor, AggregatedBindings);
}
void BoundNodesTree::
visitMatchesRecursively(Visitor* ResultVisitor,
- std::map<std::string, const Decl*>
- AggregatedDeclBindings,
- std::map<std::string, const Stmt*>
- AggregatedStmtBindings) {
- copy(DeclBindings.begin(), DeclBindings.end(),
- inserter(AggregatedDeclBindings, AggregatedDeclBindings.begin()));
- copy(StmtBindings.begin(), StmtBindings.end(),
- inserter(AggregatedStmtBindings, AggregatedStmtBindings.begin()));
+ const BoundNodesMap& AggregatedBindings) {
+ BoundNodesMap CombinedBindings(AggregatedBindings);
+ Bindings.copyTo(&CombinedBindings);
if (RecursiveBindings.empty()) {
- ResultVisitor->visitMatch(BoundNodes(AggregatedDeclBindings,
- AggregatedStmtBindings));
+ ResultVisitor->visitMatch(BoundNodes(CombinedBindings));
} else {
for (unsigned I = 0; I < RecursiveBindings.size(); ++I) {
RecursiveBindings[I].visitMatchesRecursively(ResultVisitor,
- AggregatedDeclBindings,
- AggregatedStmtBindings);
+ CombinedBindings);
}
}
}
BoundNodesTreeBuilder::BoundNodesTreeBuilder() {}
-void BoundNodesTreeBuilder::setBinding(const std::string &Id,
- const Decl *Node) {
- DeclBindings[Id] = Node;
-}
-
-void BoundNodesTreeBuilder::setBinding(const std::string &Id,
- const Stmt *Node) {
- StmtBindings[Id] = Node;
-}
-
void BoundNodesTreeBuilder::addMatch(const BoundNodesTree& Bindings) {
RecursiveBindings.push_back(Bindings);
}
BoundNodesTree BoundNodesTreeBuilder::build() const {
- return BoundNodesTree(DeclBindings, StmtBindings, RecursiveBindings);
+ return BoundNodesTree(Bindings, RecursiveBindings);
}
} // end namespace internal
diff --git a/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp b/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp
index 7de7f39..e7df0a8 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp
@@ -29,13 +29,15 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/ErrorHandling.h"
+#include "BodyFarm.h"
+
using namespace clang;
typedef llvm::DenseMap<const void *, ManagedAnalysis *> ManagedAnalysisMap;
AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
- const Decl *d,
- const CFG::BuildOptions &buildOptions)
+ const Decl *d,
+ const CFG::BuildOptions &buildOptions)
: Manager(Mgr),
D(d),
cfgBuildOptions(buildOptions),
@@ -49,7 +51,7 @@ AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
}
AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
- const Decl *d)
+ const Decl *d)
: Manager(Mgr),
D(d),
forcedBlkExprs(0),
@@ -62,11 +64,16 @@ AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
}
AnalysisDeclContextManager::AnalysisDeclContextManager(bool useUnoptimizedCFG,
- bool addImplicitDtors,
- bool addInitializers) {
+ bool addImplicitDtors,
+ bool addInitializers,
+ bool addTemporaryDtors,
+ bool synthesizeBodies)
+ : SynthesizeBodies(synthesizeBodies)
+{
cfgBuildOptions.PruneTriviallyFalseEdges = !useUnoptimizedCFG;
cfgBuildOptions.AddImplicitDtors = addImplicitDtors;
cfgBuildOptions.AddInitializers = addInitializers;
+ cfgBuildOptions.AddTemporaryDtors = addTemporaryDtors;
}
void AnalysisDeclContextManager::clear() {
@@ -75,9 +82,18 @@ void AnalysisDeclContextManager::clear() {
Contexts.clear();
}
+static BodyFarm &getBodyFarm(ASTContext &C) {
+ static BodyFarm *BF = new BodyFarm(C);
+ return *BF;
+}
+
Stmt *AnalysisDeclContext::getBody() const {
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- return FD->getBody();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ Stmt *Body = FD->getBody();
+ if (!Body && Manager && Manager->synthesizeBodies())
+ return getBodyFarm(getASTContext()).getBody(FD);
+ return Body;
+ }
else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
return MD->getBody();
else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
@@ -201,6 +217,13 @@ PseudoConstantAnalysis *AnalysisDeclContext::getPseudoConstantAnalysis() {
}
AnalysisDeclContext *AnalysisDeclContextManager::getContext(const Decl *D) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // Calling 'hasBody' replaces 'FD' in place with the FunctionDecl
+ // that has the body.
+ FD->hasBody(FD);
+ D = FD;
+ }
+
AnalysisDeclContext *&AC = Contexts[D];
if (!AC)
AC = new AnalysisDeclContext(this, D, cfgBuildOptions);
@@ -332,6 +355,10 @@ const StackFrameContext *LocationContext::getCurrentStackFrame() const {
return NULL;
}
+bool LocationContext::inTopFrame() const {
+ return getCurrentStackFrame()->inTopFrame();
+}
+
bool LocationContext::isParentOf(const LocationContext *LC) const {
do {
const LocationContext *Parent = LC->getParent();
diff --git a/contrib/llvm/tools/clang/lib/Analysis/BodyFarm.cpp b/contrib/llvm/tools/clang/lib/Analysis/BodyFarm.cpp
new file mode 100644
index 0000000..794ff9c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/BodyFarm.cpp
@@ -0,0 +1,374 @@
+//== BodyFarm.cpp - Factory for conjuring up fake bodies ----------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// BodyFarm is a factory for creating faux implementations for functions/methods
+// for analysis purposes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/StringSwitch.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/ExprObjC.h"
+#include "BodyFarm.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Helper creation functions for constructing faux ASTs.
+//===----------------------------------------------------------------------===//
+
+static bool isDispatchBlock(QualType Ty) {
+ // Is it a block pointer?
+ const BlockPointerType *BPT = Ty->getAs<BlockPointerType>();
+ if (!BPT)
+ return false;
+
+ // Check if the block pointer type takes no arguments and
+ // returns void.
+ const FunctionProtoType *FT =
+ BPT->getPointeeType()->getAs<FunctionProtoType>();
+ if (!FT || !FT->getResultType()->isVoidType() ||
+ FT->getNumArgs() != 0)
+ return false;
+
+ return true;
+}
+
+namespace {
+class ASTMaker {
+public:
+ ASTMaker(ASTContext &C) : C(C) {}
+
+ /// Create a new BinaryOperator representing a simple assignment.
+ BinaryOperator *makeAssignment(const Expr *LHS, const Expr *RHS, QualType Ty);
+
+ /// Create a new BinaryOperator representing a comparison.
+ BinaryOperator *makeComparison(const Expr *LHS, const Expr *RHS,
+ BinaryOperator::Opcode Op);
+
+ /// Create a new compound stmt using the provided statements.
+ CompoundStmt *makeCompound(ArrayRef<Stmt*>);
+
+ /// Create a new DeclRefExpr for the referenced variable.
+ DeclRefExpr *makeDeclRefExpr(const VarDecl *D);
+
+ /// Create a new UnaryOperator representing a dereference.
+ UnaryOperator *makeDereference(const Expr *Arg, QualType Ty);
+
+ /// Create an implicit cast for an integer conversion.
+ Expr *makeIntegralCast(const Expr *Arg, QualType Ty);
+
+ /// Create an implicit cast to a builtin boolean type.
+ ImplicitCastExpr *makeIntegralCastToBoolean(const Expr *Arg);
+
+ // Create an implicit cast for lvalue-to-rvaluate conversions.
+ ImplicitCastExpr *makeLvalueToRvalue(const Expr *Arg, QualType Ty);
+
+ /// Create an Objective-C bool literal.
+ ObjCBoolLiteralExpr *makeObjCBool(bool Val);
+
+ /// Create a Return statement.
+ ReturnStmt *makeReturn(const Expr *RetVal);
+
+private:
+ ASTContext &C;
+};
+}
+
+BinaryOperator *ASTMaker::makeAssignment(const Expr *LHS, const Expr *RHS,
+ QualType Ty) {
+ return new (C) BinaryOperator(const_cast<Expr*>(LHS), const_cast<Expr*>(RHS),
+ BO_Assign, Ty, VK_RValue,
+ OK_Ordinary, SourceLocation(), false);
+}
+
+BinaryOperator *ASTMaker::makeComparison(const Expr *LHS, const Expr *RHS,
+ BinaryOperator::Opcode Op) {
+ assert(BinaryOperator::isLogicalOp(Op) ||
+ BinaryOperator::isComparisonOp(Op));
+ return new (C) BinaryOperator(const_cast<Expr*>(LHS),
+ const_cast<Expr*>(RHS),
+ Op,
+ C.getLogicalOperationType(),
+ VK_RValue,
+ OK_Ordinary, SourceLocation(), false);
+}
+
+CompoundStmt *ASTMaker::makeCompound(ArrayRef<Stmt *> Stmts) {
+ return new (C) CompoundStmt(C, const_cast<Stmt**>(Stmts.data()),
+ Stmts.size(),
+ SourceLocation(), SourceLocation());
+}
+
+DeclRefExpr *ASTMaker::makeDeclRefExpr(const VarDecl *D) {
+ DeclRefExpr *DR =
+ DeclRefExpr::Create(/* Ctx = */ C,
+ /* QualifierLoc = */ NestedNameSpecifierLoc(),
+ /* TemplateKWLoc = */ SourceLocation(),
+ /* D = */ const_cast<VarDecl*>(D),
+ /* isEnclosingLocal = */ false,
+ /* NameLoc = */ SourceLocation(),
+ /* T = */ D->getType(),
+ /* VK = */ VK_LValue);
+ return DR;
+}
+
+UnaryOperator *ASTMaker::makeDereference(const Expr *Arg, QualType Ty) {
+ return new (C) UnaryOperator(const_cast<Expr*>(Arg), UO_Deref, Ty,
+ VK_LValue, OK_Ordinary, SourceLocation());
+}
+
+ImplicitCastExpr *ASTMaker::makeLvalueToRvalue(const Expr *Arg, QualType Ty) {
+ return ImplicitCastExpr::Create(C, Ty, CK_LValueToRValue,
+ const_cast<Expr*>(Arg), 0, VK_RValue);
+}
+
+Expr *ASTMaker::makeIntegralCast(const Expr *Arg, QualType Ty) {
+ if (Arg->getType() == Ty)
+ return const_cast<Expr*>(Arg);
+
+ return ImplicitCastExpr::Create(C, Ty, CK_IntegralCast,
+ const_cast<Expr*>(Arg), 0, VK_RValue);
+}
+
+ImplicitCastExpr *ASTMaker::makeIntegralCastToBoolean(const Expr *Arg) {
+ return ImplicitCastExpr::Create(C, C.BoolTy, CK_IntegralToBoolean,
+ const_cast<Expr*>(Arg), 0, VK_RValue);
+}
+
+ObjCBoolLiteralExpr *ASTMaker::makeObjCBool(bool Val) {
+ QualType Ty = C.getBOOLDecl() ? C.getBOOLType() : C.ObjCBuiltinBoolTy;
+ return new (C) ObjCBoolLiteralExpr(Val, Ty, SourceLocation());
+}
+
+ReturnStmt *ASTMaker::makeReturn(const Expr *RetVal) {
+ return new (C) ReturnStmt(SourceLocation(), const_cast<Expr*>(RetVal), 0);
+}
+
+//===----------------------------------------------------------------------===//
+// Creation functions for faux ASTs.
+//===----------------------------------------------------------------------===//
+
+typedef Stmt *(*FunctionFarmer)(ASTContext &C, const FunctionDecl *D);
+
+/// Create a fake body for dispatch_once.
+static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
+ // Check if we have at least two parameters.
+ if (D->param_size() != 2)
+ return 0;
+
+ // Check if the first parameter is a pointer to integer type.
+ const ParmVarDecl *Predicate = D->getParamDecl(0);
+ QualType PredicateQPtrTy = Predicate->getType();
+ const PointerType *PredicatePtrTy = PredicateQPtrTy->getAs<PointerType>();
+ if (!PredicatePtrTy)
+ return 0;
+ QualType PredicateTy = PredicatePtrTy->getPointeeType();
+ if (!PredicateTy->isIntegerType())
+ return 0;
+
+ // Check if the second parameter is the proper block type.
+ const ParmVarDecl *Block = D->getParamDecl(1);
+ QualType Ty = Block->getType();
+ if (!isDispatchBlock(Ty))
+ return 0;
+
+ // Everything checks out. Create a fakse body that checks the predicate,
+ // sets it, and calls the block. Basically, an AST dump of:
+ //
+ // void dispatch_once(dispatch_once_t *predicate, dispatch_block_t block) {
+ // if (!*predicate) {
+ // *predicate = 1;
+ // block();
+ // }
+ // }
+
+ ASTMaker M(C);
+
+ // (1) Create the call.
+ DeclRefExpr *DR = M.makeDeclRefExpr(Block);
+ ImplicitCastExpr *ICE = M.makeLvalueToRvalue(DR, Ty);
+ CallExpr *CE = new (C) CallExpr(C, ICE, ArrayRef<Expr*>(), C.VoidTy,
+ VK_RValue, SourceLocation());
+
+ // (2) Create the assignment to the predicate.
+ IntegerLiteral *IL =
+ IntegerLiteral::Create(C, llvm::APInt(C.getTypeSize(C.IntTy), (uint64_t) 1),
+ C.IntTy, SourceLocation());
+ BinaryOperator *B =
+ M.makeAssignment(
+ M.makeDereference(
+ M.makeLvalueToRvalue(
+ M.makeDeclRefExpr(Predicate), PredicateQPtrTy),
+ PredicateTy),
+ M.makeIntegralCast(IL, PredicateTy),
+ PredicateTy);
+
+ // (3) Create the compound statement.
+ Stmt *Stmts[2];
+ Stmts[0] = B;
+ Stmts[1] = CE;
+ CompoundStmt *CS = M.makeCompound(ArrayRef<Stmt*>(Stmts, 2));
+
+ // (4) Create the 'if' condition.
+ ImplicitCastExpr *LValToRval =
+ M.makeLvalueToRvalue(
+ M.makeDereference(
+ M.makeLvalueToRvalue(
+ M.makeDeclRefExpr(Predicate),
+ PredicateQPtrTy),
+ PredicateTy),
+ PredicateTy);
+
+ UnaryOperator *UO = new (C) UnaryOperator(LValToRval, UO_LNot, C.IntTy,
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+
+ // (5) Create the 'if' statement.
+ IfStmt *If = new (C) IfStmt(C, SourceLocation(), 0, UO, CS);
+ return If;
+}
+
+/// Create a fake body for dispatch_sync.
+static Stmt *create_dispatch_sync(ASTContext &C, const FunctionDecl *D) {
+ // Check if we have at least two parameters.
+ if (D->param_size() != 2)
+ return 0;
+
+ // Check if the second parameter is a block.
+ const ParmVarDecl *PV = D->getParamDecl(1);
+ QualType Ty = PV->getType();
+ if (!isDispatchBlock(Ty))
+ return 0;
+
+ // Everything checks out. Create a fake body that just calls the block.
+ // This is basically just an AST dump of:
+ //
+ // void dispatch_sync(dispatch_queue_t queue, void (^block)(void)) {
+ // block();
+ // }
+ //
+ ASTMaker M(C);
+ DeclRefExpr *DR = M.makeDeclRefExpr(PV);
+ ImplicitCastExpr *ICE = M.makeLvalueToRvalue(DR, Ty);
+ CallExpr *CE = new (C) CallExpr(C, ICE, ArrayRef<Expr*>(), C.VoidTy,
+ VK_RValue, SourceLocation());
+ return CE;
+}
+
+static Stmt *create_OSAtomicCompareAndSwap(ASTContext &C, const FunctionDecl *D)
+{
+ // There are exactly 3 arguments.
+ if (D->param_size() != 3)
+ return 0;
+
+ // Body for:
+ // if (oldValue == *theValue) {
+ // *theValue = newValue;
+ // return YES;
+ // }
+ // else return NO;
+
+ QualType ResultTy = D->getResultType();
+ bool isBoolean = ResultTy->isBooleanType();
+ if (!isBoolean && !ResultTy->isIntegralType(C))
+ return 0;
+
+ const ParmVarDecl *OldValue = D->getParamDecl(0);
+ QualType OldValueTy = OldValue->getType();
+
+ const ParmVarDecl *NewValue = D->getParamDecl(1);
+ QualType NewValueTy = NewValue->getType();
+
+ assert(OldValueTy == NewValueTy);
+
+ const ParmVarDecl *TheValue = D->getParamDecl(2);
+ QualType TheValueTy = TheValue->getType();
+ const PointerType *PT = TheValueTy->getAs<PointerType>();
+ if (!PT)
+ return 0;
+ QualType PointeeTy = PT->getPointeeType();
+
+ ASTMaker M(C);
+ // Construct the comparison.
+ Expr *Comparison =
+ M.makeComparison(
+ M.makeLvalueToRvalue(M.makeDeclRefExpr(OldValue), OldValueTy),
+ M.makeLvalueToRvalue(
+ M.makeDereference(
+ M.makeLvalueToRvalue(M.makeDeclRefExpr(TheValue), TheValueTy),
+ PointeeTy),
+ PointeeTy),
+ BO_EQ);
+
+ // Construct the body of the IfStmt.
+ Stmt *Stmts[2];
+ Stmts[0] =
+ M.makeAssignment(
+ M.makeDereference(
+ M.makeLvalueToRvalue(M.makeDeclRefExpr(TheValue), TheValueTy),
+ PointeeTy),
+ M.makeLvalueToRvalue(M.makeDeclRefExpr(NewValue), NewValueTy),
+ NewValueTy);
+
+ Expr *BoolVal = M.makeObjCBool(true);
+ Expr *RetVal = isBoolean ? M.makeIntegralCastToBoolean(BoolVal)
+ : M.makeIntegralCast(BoolVal, ResultTy);
+ Stmts[1] = M.makeReturn(RetVal);
+ CompoundStmt *Body = M.makeCompound(ArrayRef<Stmt*>(Stmts, 2));
+
+ // Construct the else clause.
+ BoolVal = M.makeObjCBool(false);
+ RetVal = isBoolean ? M.makeIntegralCastToBoolean(BoolVal)
+ : M.makeIntegralCast(BoolVal, ResultTy);
+ Stmt *Else = M.makeReturn(RetVal);
+
+ /// Construct the If.
+ Stmt *If =
+ new (C) IfStmt(C, SourceLocation(), 0, Comparison, Body,
+ SourceLocation(), Else);
+
+ return If;
+}
+
+Stmt *BodyFarm::getBody(const FunctionDecl *D) {
+ D = D->getCanonicalDecl();
+
+ llvm::Optional<Stmt *> &Val = Bodies[D];
+ if (Val.hasValue())
+ return Val.getValue();
+
+ Val = 0;
+
+ if (D->getIdentifier() == 0)
+ return 0;
+
+ StringRef Name = D->getName();
+ if (Name.empty())
+ return 0;
+
+ FunctionFarmer FF;
+
+ if (Name.startswith("OSAtomicCompareAndSwap") ||
+ Name.startswith("objc_atomicCompareAndSwap")) {
+ FF = create_OSAtomicCompareAndSwap;
+ }
+ else {
+ FF = llvm::StringSwitch<FunctionFarmer>(Name)
+ .Case("dispatch_sync", create_dispatch_sync)
+ .Case("dispatch_once", create_dispatch_once)
+ .Default(NULL);
+ }
+
+ if (FF) { Val = FF(C, D); }
+ return Val.getValue();
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Analysis/BodyFarm.h b/contrib/llvm/tools/clang/lib/Analysis/BodyFarm.h
new file mode 100644
index 0000000..d503cc1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/BodyFarm.h
@@ -0,0 +1,43 @@
+//== BodyFarm.h - Factory for conjuring up fake bodies -------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// BodyFarm is a factory for creating faux implementations for functions/methods
+// for analysis purposes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_BODYFARM_H
+#define LLVM_CLANG_ANALYSIS_BODYFARM_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace clang {
+
+class ASTContext;
+class Decl;
+class FunctionDecl;
+class Stmt;
+
+class BodyFarm {
+public:
+ BodyFarm(ASTContext &C) : C(C) {}
+
+ /// Factory method for creating bodies for ordinary functions.
+ Stmt *getBody(const FunctionDecl *D);
+
+private:
+ typedef llvm::DenseMap<const Decl *, llvm::Optional<Stmt *> > BodyMap;
+
+ ASTContext &C;
+ BodyMap Bodies;
+};
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
index 05c5385..315e543 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
@@ -467,6 +467,30 @@ private:
CachedBoolEvals[S] = Result; // update or insert
return Result;
}
+ else {
+ switch (Bop->getOpcode()) {
+ default: break;
+ // For 'x & 0' and 'x * 0', we can determine that
+ // the value is always false.
+ case BO_Mul:
+ case BO_And: {
+ // If either operand is zero, we know the value
+ // must be false.
+ llvm::APSInt IntVal;
+ if (Bop->getLHS()->EvaluateAsInt(IntVal, *Context)) {
+ if (IntVal.getBoolValue() == false) {
+ return TryResult(false);
+ }
+ }
+ if (Bop->getRHS()->EvaluateAsInt(IntVal, *Context)) {
+ if (IntVal.getBoolValue() == false) {
+ return TryResult(false);
+ }
+ }
+ }
+ break;
+ }
+ }
}
return evaluateAsBooleanConditionNoCache(S);
@@ -682,7 +706,7 @@ CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) {
IsReference = FD->getType()->isReferenceType();
HasTemporaries = isa<ExprWithCleanups>(Init);
- if (BuildOpts.AddImplicitDtors && HasTemporaries) {
+ if (BuildOpts.AddTemporaryDtors && HasTemporaries) {
// Generate destructors for temporaries in initialization expression.
VisitForTemporaryDtors(cast<ExprWithCleanups>(Init)->getSubExpr(),
IsReference);
@@ -1022,6 +1046,14 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
case Stmt::ExprWithCleanupsClass:
return VisitExprWithCleanups(cast<ExprWithCleanups>(S), asc);
+ case Stmt::CXXDefaultArgExprClass:
+ // FIXME: The expression inside a CXXDefaultArgExpr is owned by the
+ // called function's declaration, not by the caller. If we simply add
+ // this expression to the CFG, we could end up with the same Expr
+ // appearing multiple times.
+ // PR13385 / <rdar://problem/12156507>
+ return VisitStmt(S, asc);
+
case Stmt::CXXBindTemporaryExprClass:
return VisitCXXBindTemporaryExpr(cast<CXXBindTemporaryExpr>(S), asc);
@@ -1585,7 +1617,7 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
IsReference = VD->getType()->isReferenceType();
HasTemporaries = isa<ExprWithCleanups>(Init);
- if (BuildOpts.AddImplicitDtors && HasTemporaries) {
+ if (BuildOpts.AddTemporaryDtors && HasTemporaries) {
// Generate destructors for temporaries in initialization expression.
VisitForTemporaryDtors(cast<ExprWithCleanups>(Init)->getSubExpr(),
IsReference);
@@ -1616,8 +1648,10 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
// If the type of VD is a VLA, then we must process its size expressions.
for (const VariableArrayType* VA = FindVA(VD->getType().getTypePtr());
- VA != 0; VA = FindVA(VA->getElementType().getTypePtr()))
- Block = addStmt(VA->getSizeExpr());
+ VA != 0; VA = FindVA(VA->getElementType().getTypePtr())) {
+ if (CFGBlock *newBlock = addStmt(VA->getSizeExpr()))
+ LastBlock = newBlock;
+ }
// Remove variable from local scope.
if (ScopePos && VD == *ScopePos)
@@ -1735,7 +1769,7 @@ CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
// Add the condition as the last statement in the new block. This may create
// new blocks as the condition may contain control-flow. Any newly created
// blocks will be pointed to be "Block".
- Block = addStmt(I->getCond());
+ CFGBlock *LastBlock = addStmt(I->getCond());
// Finally, if the IfStmt contains a condition variable, add both the IfStmt
// and the condition variable initialization to the CFG.
@@ -1743,11 +1777,11 @@ CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
if (Expr *Init = VD->getInit()) {
autoCreateBlock();
appendStmt(Block, I->getConditionVariableDeclStmt());
- addStmt(Init);
+ LastBlock = addStmt(Init);
}
}
- return Block;
+ return LastBlock;
}
@@ -2254,7 +2288,7 @@ CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) {
}
// The default case when not handling logical operators.
- EntryConditionBlock = ExitConditionBlock = createBlock(false);
+ ExitConditionBlock = createBlock(false);
ExitConditionBlock->setTerminator(W);
// Now add the actual condition to the condition block.
@@ -2579,7 +2613,7 @@ CFGBlock *CFGBuilder::VisitSwitchStmt(SwitchStmt *Terminator) {
// Add the terminator and condition in the switch block.
SwitchTerminatedBlock->setTerminator(Terminator);
Block = SwitchTerminatedBlock;
- Block = addStmt(Terminator->getCond());
+ CFGBlock *LastBlock = addStmt(Terminator->getCond());
// Finally, if the SwitchStmt contains a condition variable, add both the
// SwitchStmt and the condition variable initialization to the CFG.
@@ -2587,11 +2621,11 @@ CFGBlock *CFGBuilder::VisitSwitchStmt(SwitchStmt *Terminator) {
if (Expr *Init = VD->getInit()) {
autoCreateBlock();
appendStmt(Block, Terminator->getConditionVariableDeclStmt());
- addStmt(Init);
+ LastBlock = addStmt(Init);
}
}
- return Block;
+ return LastBlock;
}
static bool shouldAddCase(bool &switchExclusivelyCovered,
@@ -2775,8 +2809,7 @@ CFGBlock *CFGBuilder::VisitCXXTryStmt(CXXTryStmt *Terminator) {
assert(Terminator->getTryBlock() && "try must contain a non-NULL body");
Block = NULL;
- Block = addStmt(Terminator->getTryBlock());
- return Block;
+ return addStmt(Terminator->getTryBlock());
}
CFGBlock *CFGBuilder::VisitCXXCatchStmt(CXXCatchStmt *CS) {
@@ -2917,15 +2950,15 @@ CFGBlock *CFGBuilder::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
addLocalScopeAndDtors(S->getLoopVarStmt());
// Populate a new block to contain the loop body and loop variable.
- Block = addStmt(S->getBody());
+ addStmt(S->getBody());
if (badCFG)
return 0;
- Block = addStmt(S->getLoopVarStmt());
+ CFGBlock *LoopVarStmtBlock = addStmt(S->getLoopVarStmt());
if (badCFG)
return 0;
// This new body block is a successor to our condition block.
- addSuccessor(ConditionBlock, KnownVal.isFalse() ? 0 : Block);
+ addSuccessor(ConditionBlock, KnownVal.isFalse() ? 0 : LoopVarStmtBlock);
}
// Link up the condition block with the code that follows the loop (the
@@ -2940,7 +2973,7 @@ CFGBlock *CFGBuilder::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
CFGBlock *CFGBuilder::VisitExprWithCleanups(ExprWithCleanups *E,
AddStmtChoice asc) {
- if (BuildOpts.AddImplicitDtors) {
+ if (BuildOpts.AddTemporaryDtors) {
// If adding implicit destructors visit the full expression for adding
// destructors of temporaries.
VisitForTemporaryDtors(E->getSubExpr());
@@ -3020,6 +3053,8 @@ CFGBlock *CFGBuilder::VisitIndirectGotoStmt(IndirectGotoStmt *I) {
}
CFGBlock *CFGBuilder::VisitForTemporaryDtors(Stmt *E, bool BindToTemporary) {
+ assert(BuildOpts.AddImplicitDtors && BuildOpts.AddTemporaryDtors);
+
tryAgain:
if (!E) {
badCFG = true;
@@ -3449,12 +3484,12 @@ class StmtPrinterHelper : public PrinterHelper {
StmtMapTy StmtMap;
DeclMapTy DeclMap;
signed currentBlock;
- unsigned currentStmt;
+ unsigned currStmt;
const LangOptions &LangOpts;
public:
StmtPrinterHelper(const CFG* cfg, const LangOptions &LO)
- : currentBlock(0), currentStmt(0), LangOpts(LO)
+ : currentBlock(0), currStmt(0), LangOpts(LO)
{
for (CFG::const_iterator I = cfg->begin(), E = cfg->end(); I != E; ++I ) {
unsigned j = 1;
@@ -3515,7 +3550,7 @@ public:
const LangOptions &getLangOpts() const { return LangOpts; }
void setBlockID(signed i) { currentBlock = i; }
- void setStmtID(unsigned i) { currentStmt = i; }
+ void setStmtID(unsigned i) { currStmt = i; }
virtual bool handledStmt(Stmt *S, raw_ostream &OS) {
StmtMapTy::iterator I = StmtMap.find(S);
@@ -3524,7 +3559,7 @@ public:
return false;
if (currentBlock >= 0 && I->second.first == (unsigned) currentBlock
- && I->second.second == currentStmt) {
+ && I->second.second == currStmt) {
return false;
}
@@ -3539,7 +3574,7 @@ public:
return false;
if (currentBlock >= 0 && I->second.first == (unsigned) currentBlock
- && I->second.second == currentStmt) {
+ && I->second.second == currStmt) {
return false;
}
@@ -3831,8 +3866,8 @@ static void print_block(raw_ostream &OS, const CFG* cfg,
if (Helper) Helper->setBlockID(-1);
- CFGBlockTerminatorPrint TPrinter(OS, Helper,
- PrintingPolicy(Helper->getLangOpts()));
+ PrintingPolicy PP(Helper ? Helper->getLangOpts() : LangOptions());
+ CFGBlockTerminatorPrint TPrinter(OS, Helper, PP);
TPrinter.Visit(const_cast<Stmt*>(B.getTerminator().getStmt()));
OS << '\n';
diff --git a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
index ff2f777..2d1ca0e 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
@@ -14,6 +14,7 @@
#include "FormatStringParsing.h"
#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/TargetInfo.h"
using clang::analyze_format_string::ArgType;
using clang::analyze_format_string::FormatStringHandler;
@@ -489,9 +490,12 @@ analyze_format_string::LengthModifier::toString() const {
const char *ConversionSpecifier::toString() const {
switch (kind) {
case dArg: return "d";
+ case DArg: return "D";
case iArg: return "i";
case oArg: return "o";
+ case OArg: return "O";
case uArg: return "u";
+ case UArg: return "U";
case xArg: return "x";
case XArg: return "X";
case fArg: return "f";
@@ -518,9 +522,9 @@ const char *ConversionSpecifier::toString() const {
case ObjCObjArg: return "@";
// FreeBSD specific specifiers.
- case bArg: return "b";
- case DArg: return "D";
- case rArg: return "r";
+ case FreeBSDbArg: return "b";
+ case FreeBSDDArg: return "D";
+ case FreeBSDrArg: return "r";
// GlibC specific specifiers.
case PrintErrno: return "m";
@@ -528,6 +532,29 @@ const char *ConversionSpecifier::toString() const {
return NULL;
}
+llvm::Optional<ConversionSpecifier>
+ConversionSpecifier::getStandardSpecifier() const {
+ ConversionSpecifier::Kind NewKind;
+
+ switch (getKind()) {
+ default:
+ return llvm::Optional<ConversionSpecifier>();
+ case DArg:
+ NewKind = dArg;
+ break;
+ case UArg:
+ NewKind = uArg;
+ break;
+ case OArg:
+ NewKind = oArg;
+ break;
+ }
+
+ ConversionSpecifier FixedCS(*this);
+ FixedCS.setKind(NewKind);
+ return FixedCS;
+}
+
//===----------------------------------------------------------------------===//
// Methods on OptionalAmount.
//===----------------------------------------------------------------------===//
@@ -553,7 +580,7 @@ void OptionalAmount::toString(raw_ostream &os) const {
}
}
-bool FormatSpecifier::hasValidLengthModifier() const {
+bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target) const {
switch (LM.getKind()) {
case LengthModifier::None:
return true;
@@ -568,13 +595,16 @@ bool FormatSpecifier::hasValidLengthModifier() const {
case LengthModifier::AsPtrDiff:
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::oArg:
+ case ConversionSpecifier::OArg:
case ConversionSpecifier::uArg:
+ case ConversionSpecifier::UArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
case ConversionSpecifier::nArg:
- case ConversionSpecifier::rArg:
+ case ConversionSpecifier::FreeBSDrArg:
return true;
default:
return false;
@@ -584,9 +614,12 @@ bool FormatSpecifier::hasValidLengthModifier() const {
case LengthModifier::AsLong:
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::oArg:
+ case ConversionSpecifier::OArg:
case ConversionSpecifier::uArg:
+ case ConversionSpecifier::UArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
case ConversionSpecifier::aArg:
@@ -600,7 +633,7 @@ bool FormatSpecifier::hasValidLengthModifier() const {
case ConversionSpecifier::nArg:
case ConversionSpecifier::cArg:
case ConversionSpecifier::sArg:
- case ConversionSpecifier::rArg:
+ case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::ScanListArg:
return true;
default:
@@ -618,14 +651,15 @@ bool FormatSpecifier::hasValidLengthModifier() const {
case ConversionSpecifier::gArg:
case ConversionSpecifier::GArg:
return true;
- // GNU extension.
+ // GNU libc extension.
case ConversionSpecifier::dArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::oArg:
case ConversionSpecifier::uArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
- return true;
+ return !Target.getTriple().isOSDarwin() &&
+ !Target.getTriple().isOSWindows();
default:
return false;
}
@@ -703,10 +737,13 @@ bool FormatSpecifier::hasStandardConversionSpecifier(const LangOptions &LangOpt)
case ConversionSpecifier::SArg:
return LangOpt.ObjC1 || LangOpt.ObjC2;
case ConversionSpecifier::InvalidSpecifier:
- case ConversionSpecifier::bArg:
- case ConversionSpecifier::DArg:
- case ConversionSpecifier::rArg:
+ case ConversionSpecifier::FreeBSDbArg:
+ case ConversionSpecifier::FreeBSDDArg:
+ case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::PrintErrno:
+ case ConversionSpecifier::DArg:
+ case ConversionSpecifier::OArg:
+ case ConversionSpecifier::UArg:
return false;
}
llvm_unreachable("Invalid ConversionSpecifier Kind!");
@@ -729,6 +766,20 @@ bool FormatSpecifier::hasStandardLengthConversionCombination() const {
return true;
}
+llvm::Optional<LengthModifier>
+FormatSpecifier::getCorrectedLengthModifier() const {
+ if (CS.isAnyIntArg() || CS.getKind() == ConversionSpecifier::nArg) {
+ if (LM.getKind() == LengthModifier::AsLongDouble ||
+ LM.getKind() == LengthModifier::AsQuad) {
+ LengthModifier FixedLM(LM);
+ FixedLM.setKind(LengthModifier::AsLongLong);
+ return FixedLM;
+ }
+ }
+
+ return llvm::Optional<LengthModifier>();
+}
+
bool FormatSpecifier::namedTypeToLengthModifier(QualType QT,
LengthModifier &LM) {
assert(isa<TypedefType>(QT) && "Expected a TypedefType");
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ObjCNoReturn.cpp b/contrib/llvm/tools/clang/lib/Analysis/ObjCNoReturn.cpp
new file mode 100644
index 0000000..52d844b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/ObjCNoReturn.cpp
@@ -0,0 +1,67 @@
+//= ObjCNoReturn.cpp - Handling of Cocoa APIs known not to return --*- C++ -*---
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements special handling of recognizing ObjC API hooks that
+// do not return but aren't marked as such in API headers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Analysis/DomainSpecific/ObjCNoReturn.h"
+
+using namespace clang;
+
+static bool isSubclass(const ObjCInterfaceDecl *Class, IdentifierInfo *II) {
+ if (!Class)
+ return false;
+ if (Class->getIdentifier() == II)
+ return true;
+ return isSubclass(Class->getSuperClass(), II);
+}
+
+ObjCNoReturn::ObjCNoReturn(ASTContext &C)
+ : RaiseSel(GetNullarySelector("raise", C)),
+ NSExceptionII(&C.Idents.get("NSException"))
+{
+ // Generate selectors.
+ SmallVector<IdentifierInfo*, 3> II;
+
+ // raise:format:
+ II.push_back(&C.Idents.get("raise"));
+ II.push_back(&C.Idents.get("format"));
+ NSExceptionInstanceRaiseSelectors[0] =
+ C.Selectors.getSelector(II.size(), &II[0]);
+
+ // raise:format:arguments:
+ II.push_back(&C.Idents.get("arguments"));
+ NSExceptionInstanceRaiseSelectors[1] =
+ C.Selectors.getSelector(II.size(), &II[0]);
+}
+
+
+bool ObjCNoReturn::isImplicitNoReturn(const ObjCMessageExpr *ME) {
+ Selector S = ME->getSelector();
+
+ if (ME->isInstanceMessage()) {
+ // Check for the "raise" message.
+ return S == RaiseSel;
+ }
+
+ if (const ObjCInterfaceDecl *ID = ME->getReceiverInterface()) {
+ if (isSubclass(ID, NSExceptionII)) {
+ for (unsigned i = 0; i < NUM_RAISE_SELECTORS; ++i) {
+ if (S == NSExceptionInstanceRaiseSelectors[i])
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
index 2b350ce..cacb6cb 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/FormatString.h"
+#include "clang/Basic/TargetInfo.h"
#include "FormatStringParsing.h"
using clang::analyze_format_string::ArgType;
@@ -52,7 +53,8 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
const char *&Beg,
const char *E,
unsigned &argIndex,
- const LangOptions &LO) {
+ const LangOptions &LO,
+ const TargetInfo &Target) {
using namespace clang::analyze_format_string;
using namespace clang::analyze_printf;
@@ -197,17 +199,41 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
// Glibc specific.
case 'm': k = ConversionSpecifier::PrintErrno; break;
// FreeBSD format extensions
- case 'b': if (LO.FormatExtensions) k = ConversionSpecifier::bArg; break; /* check for int and then char * */
- case 'r': if (LO.FormatExtensions) k = ConversionSpecifier::rArg; break;
- case 'y': if (LO.FormatExtensions) k = ConversionSpecifier::iArg; break;
- case 'D': if (LO.FormatExtensions) k = ConversionSpecifier::DArg; break; /* check for u_char * pointer and a char * string */
+ case 'b':
+ if (LO.FormatExtensions)
+ k = ConversionSpecifier::FreeBSDbArg; // int followed by char *
+ break;
+ case 'r':
+ if (LO.FormatExtensions)
+ k = ConversionSpecifier::FreeBSDrArg;
+ break;
+ case 'y':
+ if (LO.FormatExtensions)
+ k = ConversionSpecifier::iArg;
+ break;
+ // Apple-specific
+ case 'D':
+ if (Target.getTriple().isOSDarwin())
+ k = ConversionSpecifier::DArg;
+ else if (LO.FormatExtensions)
+ k = ConversionSpecifier::FreeBSDDArg; // u_char * followed by char *
+ break;
+ case 'O':
+ if (Target.getTriple().isOSDarwin())
+ k = ConversionSpecifier::OArg;
+ break;
+ case 'U':
+ if (Target.getTriple().isOSDarwin())
+ k = ConversionSpecifier::UArg;
+ break;
}
PrintfConversionSpecifier CS(conversionPosition, k);
FS.setConversionSpecifier(CS);
if (CS.consumesDataArgument() && !FS.usesPositionalArg())
FS.setArgIndex(argIndex++);
// FreeBSD extension
- if (k == ConversionSpecifier::bArg || k == ConversionSpecifier::DArg)
+ if (k == ConversionSpecifier::FreeBSDbArg ||
+ k == ConversionSpecifier::FreeBSDDArg)
argIndex++;
if (k == ConversionSpecifier::InvalidSpecifier) {
@@ -220,18 +246,19 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
bool clang::analyze_format_string::ParsePrintfString(FormatStringHandler &H,
const char *I,
const char *E,
- const LangOptions &LO) {
+ const LangOptions &LO,
+ const TargetInfo &Target) {
unsigned argIndex = 0;
// Keep looking for a format specifier until we have exhausted the string.
while (I != E) {
const PrintfSpecifierResult &FSR = ParsePrintfSpecifier(H, I, E, argIndex,
- LO);
+ LO, Target);
// Did a fail-stop error of any kind occur when parsing the specifier?
// If so, don't do any more processing.
if (FSR.shouldStop())
- return true;;
+ return true;
// Did we exhaust the string or encounter an error that
// we can recover from?
if (!FSR.hasValue())
@@ -490,9 +517,11 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
namedTypeToLengthModifier(QT, LM);
// If fixing the length modifier was enough, we are done.
- const analyze_printf::ArgType &ATR = getArgType(Ctx, IsObjCLiteral);
- if (hasValidLengthModifier() && ATR.isValid() && ATR.matchesType(Ctx, QT))
- return true;
+ if (hasValidLengthModifier(Ctx.getTargetInfo())) {
+ const analyze_printf::ArgType &ATR = getArgType(Ctx, IsObjCLiteral);
+ if (ATR.isValid() && ATR.matchesType(Ctx, QT))
+ return true;
+ }
// Set conversion specifier and disable any flags which do not apply to it.
// Let typedefs to char fall through to int, as %c is silly for uint8_t.
@@ -557,6 +586,7 @@ bool PrintfSpecifier::hasValidPlusPrefix() const {
// The plus prefix only makes sense for signed conversions
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::fArg:
case ConversionSpecifier::FArg:
@@ -566,7 +596,7 @@ bool PrintfSpecifier::hasValidPlusPrefix() const {
case ConversionSpecifier::GArg:
case ConversionSpecifier::aArg:
case ConversionSpecifier::AArg:
- case ConversionSpecifier::rArg:
+ case ConversionSpecifier::FreeBSDrArg:
return true;
default:
@@ -581,6 +611,7 @@ bool PrintfSpecifier::hasValidAlternativeForm() const {
// Alternate form flag only valid with the oxXaAeEfFgG conversions
switch (CS.getKind()) {
case ConversionSpecifier::oArg:
+ case ConversionSpecifier::OArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
case ConversionSpecifier::aArg:
@@ -591,7 +622,7 @@ bool PrintfSpecifier::hasValidAlternativeForm() const {
case ConversionSpecifier::FArg:
case ConversionSpecifier::gArg:
case ConversionSpecifier::GArg:
- case ConversionSpecifier::rArg:
+ case ConversionSpecifier::FreeBSDrArg:
return true;
default:
@@ -606,9 +637,12 @@ bool PrintfSpecifier::hasValidLeadingZeros() const {
// Leading zeroes flag only valid with the diouxXaAeEfFgG conversions
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::oArg:
+ case ConversionSpecifier::OArg:
case ConversionSpecifier::uArg:
+ case ConversionSpecifier::UArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
case ConversionSpecifier::aArg:
@@ -633,6 +667,7 @@ bool PrintfSpecifier::hasValidSpacePrefix() const {
// The space prefix only makes sense for signed conversions
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::fArg:
case ConversionSpecifier::FArg:
@@ -669,8 +704,10 @@ bool PrintfSpecifier::hasValidThousandsGroupingPrefix() const {
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::uArg:
+ case ConversionSpecifier::UArg:
case ConversionSpecifier::fArg:
case ConversionSpecifier::FArg:
case ConversionSpecifier::gArg:
@@ -688,9 +725,12 @@ bool PrintfSpecifier::hasValidPrecision() const {
// Precision is only valid with the diouxXaAeEfFgGs conversions
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::oArg:
+ case ConversionSpecifier::OArg:
case ConversionSpecifier::uArg:
+ case ConversionSpecifier::UArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
case ConversionSpecifier::aArg:
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp b/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp
index bb63e2c..11f2ebe 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp
@@ -112,8 +112,8 @@ const Stmt *DeadCodeScan::findDeadCode(const clang::CFGBlock *Block) {
static int SrcCmp(const void *p1, const void *p2) {
return
- ((std::pair<const CFGBlock *, const Stmt *>*) p2)->second->getLocStart() <
- ((std::pair<const CFGBlock *, const Stmt *>*) p1)->second->getLocStart();
+ ((const std::pair<const CFGBlock *, const Stmt *>*) p2)->second->getLocStart() <
+ ((const std::pair<const CFGBlock *, const Stmt *>*) p1)->second->getLocStart();
}
unsigned DeadCodeScan::scanBackwards(const clang::CFGBlock *Start,
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
index 2942400..574e56a 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/FormatString.h"
+#include "clang/Basic/TargetInfo.h"
#include "FormatStringParsing.h"
using clang::analyze_format_string::ArgType;
@@ -67,7 +68,8 @@ static ScanfSpecifierResult ParseScanfSpecifier(FormatStringHandler &H,
const char *&Beg,
const char *E,
unsigned &argIndex,
- const LangOptions &LO) {
+ const LangOptions &LO,
+ const TargetInfo &Target) {
using namespace clang::analyze_scanf;
const char *I = Beg;
@@ -172,6 +174,20 @@ static ScanfSpecifierResult ParseScanfSpecifier(FormatStringHandler &H,
case 'o': k = ConversionSpecifier::oArg; break;
case 's': k = ConversionSpecifier::sArg; break;
case 'p': k = ConversionSpecifier::pArg; break;
+ // Apple extensions
+ // Apple-specific
+ case 'D':
+ if (Target.getTriple().isOSDarwin())
+ k = ConversionSpecifier::DArg;
+ break;
+ case 'O':
+ if (Target.getTriple().isOSDarwin())
+ k = ConversionSpecifier::OArg;
+ break;
+ case 'U':
+ if (Target.getTriple().isOSDarwin())
+ k = ConversionSpecifier::UArg;
+ break;
}
ScanfConversionSpecifier CS(conversionPosition, k);
if (k == ScanfConversionSpecifier::ScanListArg) {
@@ -202,6 +218,7 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
switch(CS.getKind()) {
// Signed int.
case ConversionSpecifier::dArg:
+ case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
switch (LM.getKind()) {
case LengthModifier::None:
@@ -233,7 +250,9 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
// Unsigned int.
case ConversionSpecifier::oArg:
+ case ConversionSpecifier::OArg:
case ConversionSpecifier::uArg:
+ case ConversionSpecifier::UArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
switch (LM.getKind()) {
@@ -430,9 +449,11 @@ bool ScanfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
namedTypeToLengthModifier(PT, LM);
// If fixing the length modifier was enough, we are done.
- const analyze_scanf::ArgType &AT = getArgType(Ctx);
- if (hasValidLengthModifier() && AT.isValid() && AT.matchesType(Ctx, QT))
- return true;
+ if (hasValidLengthModifier(Ctx.getTargetInfo())) {
+ const analyze_scanf::ArgType &AT = getArgType(Ctx);
+ if (AT.isValid() && AT.matchesType(Ctx, QT))
+ return true;
+ }
// Figure out the conversion specifier.
if (PT->isRealFloatingType())
@@ -463,18 +484,19 @@ void ScanfSpecifier::toString(raw_ostream &os) const {
bool clang::analyze_format_string::ParseScanfString(FormatStringHandler &H,
const char *I,
const char *E,
- const LangOptions &LO) {
+ const LangOptions &LO,
+ const TargetInfo &Target) {
unsigned argIndex = 0;
// Keep looking for a format specifier until we have exhausted the string.
while (I != E) {
const ScanfSpecifierResult &FSR = ParseScanfSpecifier(H, I, E, argIndex,
- LO);
+ LO, Target);
// Did a fail-stop error of any kind occur when parsing the specifier?
// If so, don't do any more processing.
if (FSR.shouldStop())
- return true;;
+ return true;
// Did we exhaust the string or encounter an error that
// we can recover from?
if (!FSR.hasValue())
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
index 5954682..c7f1f62 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
@@ -70,27 +70,28 @@ namespace {
class SExpr {
private:
enum ExprOp {
- EOP_Nop, //< No-op
- EOP_Wildcard, //< Matches anything.
- EOP_This, //< This keyword.
- EOP_NVar, //< Named variable.
- EOP_LVar, //< Local variable.
- EOP_Dot, //< Field access
- EOP_Call, //< Function call
- EOP_MCall, //< Method call
- EOP_Index, //< Array index
- EOP_Unary, //< Unary operation
- EOP_Binary, //< Binary operation
- EOP_Unknown //< Catchall for everything else
+ EOP_Nop, ///< No-op
+ EOP_Wildcard, ///< Matches anything.
+ EOP_Universal, ///< Universal lock.
+ EOP_This, ///< This keyword.
+ EOP_NVar, ///< Named variable.
+ EOP_LVar, ///< Local variable.
+ EOP_Dot, ///< Field access
+ EOP_Call, ///< Function call
+ EOP_MCall, ///< Method call
+ EOP_Index, ///< Array index
+ EOP_Unary, ///< Unary operation
+ EOP_Binary, ///< Binary operation
+ EOP_Unknown ///< Catchall for everything else
};
class SExprNode {
private:
- unsigned char Op; //< Opcode of the root node
- unsigned char Flags; //< Additional opcode-specific data
- unsigned short Sz; //< Number of child nodes
- const void* Data; //< Additional opcode-specific data
+ unsigned char Op; ///< Opcode of the root node
+ unsigned char Flags; ///< Additional opcode-specific data
+ unsigned short Sz; ///< Number of child nodes
+ const void* Data; ///< Additional opcode-specific data
public:
SExprNode(ExprOp O, unsigned F, const void* D)
@@ -118,18 +119,19 @@ private:
unsigned arity() const {
switch (Op) {
- case EOP_Nop: return 0;
- case EOP_Wildcard: return 0;
- case EOP_NVar: return 0;
- case EOP_LVar: return 0;
- case EOP_This: return 0;
- case EOP_Dot: return 1;
- case EOP_Call: return Flags+1; // First arg is function.
- case EOP_MCall: return Flags+1; // First arg is implicit obj.
- case EOP_Index: return 2;
- case EOP_Unary: return 1;
- case EOP_Binary: return 2;
- case EOP_Unknown: return Flags;
+ case EOP_Nop: return 0;
+ case EOP_Wildcard: return 0;
+ case EOP_Universal: return 0;
+ case EOP_NVar: return 0;
+ case EOP_LVar: return 0;
+ case EOP_This: return 0;
+ case EOP_Dot: return 1;
+ case EOP_Call: return Flags+1; // First arg is function.
+ case EOP_MCall: return Flags+1; // First arg is implicit obj.
+ case EOP_Index: return 2;
+ case EOP_Unary: return 1;
+ case EOP_Binary: return 2;
+ case EOP_Unknown: return Flags;
}
return 0;
}
@@ -194,6 +196,11 @@ private:
return NodeVec.size()-1;
}
+ unsigned makeUniversal() {
+ NodeVec.push_back(SExprNode(EOP_Universal, 0, 0));
+ return NodeVec.size()-1;
+ }
+
unsigned makeNamedVar(const NamedDecl *D) {
NodeVec.push_back(SExprNode(EOP_NVar, 0, D));
return NodeVec.size()-1;
@@ -219,8 +226,21 @@ private:
return NodeVec.size()-1;
}
- unsigned makeMCall(unsigned NumArgs, const NamedDecl *D) {
- NodeVec.push_back(SExprNode(EOP_MCall, NumArgs, D));
+ // Grab the very first declaration of virtual method D
+ const CXXMethodDecl* getFirstVirtualDecl(const CXXMethodDecl *D) {
+ while (true) {
+ D = D->getCanonicalDecl();
+ CXXMethodDecl::method_iterator I = D->begin_overridden_methods(),
+ E = D->end_overridden_methods();
+ if (I == E)
+ return D; // Method does not override anything
+ D = *I; // FIXME: this does not work with multiple inheritance.
+ }
+ return 0;
+ }
+
+ unsigned makeMCall(unsigned NumArgs, const CXXMethodDecl *D) {
+ NodeVec.push_back(SExprNode(EOP_MCall, NumArgs, getFirstVirtualDecl(D)));
return NodeVec.size()-1;
}
@@ -300,8 +320,9 @@ private:
} else if (CXXMemberCallExpr *CMCE = dyn_cast<CXXMemberCallExpr>(Exp)) {
// When calling a function with a lock_returned attribute, replace
// the function call with the expression in lock_returned.
- if (LockReturnedAttr* At =
- CMCE->getMethodDecl()->getAttr<LockReturnedAttr>()) {
+ CXXMethodDecl* MD =
+ cast<CXXMethodDecl>(CMCE->getMethodDecl()->getMostRecentDecl());
+ if (LockReturnedAttr* At = MD->getAttr<LockReturnedAttr>()) {
CallingContext LRCallCtx(CMCE->getMethodDecl());
LRCallCtx.SelfArg = CMCE->getImplicitObjectArgument();
LRCallCtx.SelfArrow =
@@ -320,8 +341,7 @@ private:
return buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx, NDeref);
}
unsigned NumCallArgs = CMCE->getNumArgs();
- unsigned Root =
- makeMCall(NumCallArgs, CMCE->getMethodDecl()->getCanonicalDecl());
+ unsigned Root = makeMCall(NumCallArgs, CMCE->getMethodDecl());
unsigned Sz = buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx);
Expr** CallArgs = CMCE->getArgs();
for (unsigned i = 0; i < NumCallArgs; ++i) {
@@ -330,8 +350,9 @@ private:
NodeVec[Root].setSize(Sz + 1);
return Sz + 1;
} else if (CallExpr *CE = dyn_cast<CallExpr>(Exp)) {
- if (LockReturnedAttr* At =
- CE->getDirectCallee()->getAttr<LockReturnedAttr>()) {
+ FunctionDecl* FD =
+ cast<FunctionDecl>(CE->getDirectCallee()->getMostRecentDecl());
+ if (LockReturnedAttr* At = FD->getAttr<LockReturnedAttr>()) {
CallingContext LRCallCtx(CE->getDirectCallee());
LRCallCtx.NumArgs = CE->getNumArgs();
LRCallCtx.FunArgs = CE->getArgs();
@@ -442,9 +463,23 @@ private:
/// \param DeclExp An expression involving the Decl on which the attribute
/// occurs.
/// \param D The declaration to which the lock/unlock attribute is attached.
- void buildSExprFromExpr(Expr *MutexExp, Expr *DeclExp, const NamedDecl *D) {
+ void buildSExprFromExpr(Expr *MutexExp, Expr *DeclExp, const NamedDecl *D,
+ VarDecl *SelfDecl = 0) {
CallingContext CallCtx(D);
+ if (MutexExp) {
+ if (StringLiteral* SLit = dyn_cast<StringLiteral>(MutexExp)) {
+ if (SLit->getString() == StringRef("*"))
+ // The "*" expr is a universal lock, which essentially turns off
+ // checks until it is removed from the lockset.
+ makeUniversal();
+ else
+ // Ignore other string literals for now.
+ makeNop();
+ return;
+ }
+ }
+
// If we are processing a raw attribute expression, with no substitutions.
if (DeclExp == 0) {
buildSExpr(MutexExp, 0);
@@ -465,7 +500,7 @@ private:
CallCtx.NumArgs = CE->getNumArgs();
CallCtx.FunArgs = CE->getArgs();
} else if (CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(DeclExp)) {
- CallCtx.SelfArg = 0; // FIXME -- get the parent from DeclStmt
+ CallCtx.SelfArg = 0; // Will be set below
CallCtx.NumArgs = CE->getNumArgs();
CallCtx.FunArgs = CE->getArgs();
} else if (D && isa<CXXDestructorDecl>(D)) {
@@ -473,14 +508,26 @@ private:
CallCtx.SelfArg = DeclExp;
}
- // If the attribute has no arguments, then assume the argument is "this".
- if (MutexExp == 0) {
- buildSExpr(CallCtx.SelfArg, 0);
+ // Hack to handle constructors, where self cannot be recovered from
+ // the expression.
+ if (SelfDecl && !CallCtx.SelfArg) {
+ DeclRefExpr SelfDRE(SelfDecl, false, SelfDecl->getType(), VK_LValue,
+ SelfDecl->getLocation());
+ CallCtx.SelfArg = &SelfDRE;
+
+ // If the attribute has no arguments, then assume the argument is "this".
+ if (MutexExp == 0)
+ buildSExpr(CallCtx.SelfArg, 0);
+ else // For most attributes.
+ buildSExpr(MutexExp, &CallCtx);
return;
}
- // For most attributes.
- buildSExpr(MutexExp, &CallCtx);
+ // If the attribute has no arguments, then assume the argument is "this".
+ if (MutexExp == 0)
+ buildSExpr(CallCtx.SelfArg, 0);
+ else // For most attributes.
+ buildSExpr(MutexExp, &CallCtx);
}
/// \brief Get index of next sibling of node i.
@@ -496,8 +543,9 @@ public:
/// occurs.
/// \param D The declaration to which the lock/unlock attribute is attached.
/// Caller must check isValid() after construction.
- SExpr(Expr* MutexExp, Expr *DeclExp, const NamedDecl* D) {
- buildSExprFromExpr(MutexExp, DeclExp, D);
+ SExpr(Expr* MutexExp, Expr *DeclExp, const NamedDecl* D,
+ VarDecl *SelfDecl=0) {
+ buildSExprFromExpr(MutexExp, DeclExp, D, SelfDecl);
}
/// Return true if this is a valid decl sequence.
@@ -506,6 +554,17 @@ public:
return !NodeVec.empty();
}
+ bool shouldIgnore() const {
+ // Nop is a mutex that we have decided to deliberately ignore.
+ assert(NodeVec.size() > 0 && "Invalid Mutex");
+ return NodeVec[0].kind() == EOP_Nop;
+ }
+
+ bool isUniversal() const {
+ assert(NodeVec.size() > 0 && "Invalid Mutex");
+ return NodeVec[0].kind() == EOP_Universal;
+ }
+
/// Issue a warning about an invalid lock expression
static void warnInvalidLock(ThreadSafetyHandler &Handler, Expr* MutexExp,
Expr *DeclExp, const NamedDecl* D) {
@@ -528,7 +587,9 @@ public:
bool matches(const SExpr &Other, unsigned i = 0, unsigned j = 0) const {
if (NodeVec[i].matches(Other.NodeVec[j])) {
- unsigned n = NodeVec[i].arity();
+ unsigned ni = NodeVec[i].arity();
+ unsigned nj = Other.NodeVec[j].arity();
+ unsigned n = (ni < nj) ? ni : nj;
bool Result = true;
unsigned ci = i+1; // first child of i
unsigned cj = j+1; // first child of j
@@ -541,6 +602,15 @@ public:
return false;
}
+ // A partial match between a.mu and b.mu returns true a and b have the same
+ // type (and thus mu refers to the same mutex declaration), regardless of
+ // whether a and b are different objects or not.
+ bool partiallyMatches(const SExpr &Other) const {
+ if (NodeVec[0].kind() == EOP_Dot)
+ return NodeVec[0].matches(Other.NodeVec[0]);
+ return false;
+ }
+
/// \brief Pretty print a lock expression for use in error messages.
std::string toString(unsigned i = 0) const {
assert(isValid());
@@ -553,6 +623,8 @@ public:
return "_";
case EOP_Wildcard:
return "(?)";
+ case EOP_Universal:
+ return "*";
case EOP_This:
return "this";
case EOP_NVar:
@@ -695,6 +767,10 @@ struct LockData {
ID.AddInteger(AcquireLoc.getRawEncoding());
ID.AddInteger(LKind);
}
+
+ bool isAtLeast(LockKind LK) {
+ return (LK == LK_Shared) || (LKind == LK_Exclusive);
+ }
};
@@ -780,9 +856,28 @@ public:
return false;
}
- LockData* findLock(FactManager& FM, const SExpr& M) const {
+ LockData* findLock(FactManager &FM, const SExpr &M) const {
+ for (const_iterator I = begin(), E = end(); I != E; ++I) {
+ const SExpr &Exp = FM[*I].MutID;
+ if (Exp.matches(M))
+ return &FM[*I].LDat;
+ }
+ return 0;
+ }
+
+ LockData* findLockUniv(FactManager &FM, const SExpr &M) const {
+ for (const_iterator I = begin(), E = end(); I != E; ++I) {
+ const SExpr &Exp = FM[*I].MutID;
+ if (Exp.matches(M) || Exp.isUniversal())
+ return &FM[*I].LDat;
+ }
+ return 0;
+ }
+
+ FactEntry* findPartialMatch(FactManager &FM, const SExpr &M) const {
for (const_iterator I=begin(), E=end(); I != E; ++I) {
- if (FM[*I].MutID.matches(M)) return &FM[*I].LDat;
+ const SExpr& Exp = FM[*I].MutID;
+ if (Exp.partiallyMatches(M)) return &FM[*I];
}
return 0;
}
@@ -811,6 +906,7 @@ struct CFGBlockInfo {
SourceLocation EntryLoc; // Location of first statement in block
SourceLocation ExitLoc; // Location of last statement in block.
unsigned EntryIndex; // Used to replay contexts later
+ bool Reachable; // Is this block reachable?
const FactSet &getSet(CFGBlockSide Side) const {
return Side == CBS_Entry ? EntrySet : ExitSet;
@@ -821,7 +917,7 @@ struct CFGBlockInfo {
private:
CFGBlockInfo(LocalVarContext EmptyCtx)
- : EntryContext(EmptyCtx), ExitContext(EmptyCtx)
+ : EntryContext(EmptyCtx), ExitContext(EmptyCtx), Reachable(false)
{ }
public:
@@ -939,7 +1035,7 @@ public:
return;
}
Dec->printName(llvm::errs());
- llvm::errs() << "." << i << " " << ((void*) Dec);
+ llvm::errs() << "." << i << " " << ((const void*) Dec);
}
/// Dumps an ASCII representation of the variable map to llvm::errs()
@@ -1339,7 +1435,7 @@ public:
template <typename AttrType>
void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp,
- const NamedDecl *D);
+ const NamedDecl *D, VarDecl *SelfDecl=0);
template <class AttrType>
void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp,
@@ -1376,6 +1472,9 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const SExpr &Mutex,
const LockData &LDat) {
// FIXME: deal with acquired before/after annotations.
// FIXME: Don't always warn when we have support for reentrant locks.
+ if (Mutex.shouldIgnore())
+ return;
+
if (FSet.findLock(FactMan, Mutex)) {
Handler.handleDoubleLock(Mutex.toString(), LDat.AcquireLoc);
} else {
@@ -1385,12 +1484,15 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const SExpr &Mutex,
/// \brief Remove a lock from the lockset, warning if the lock is not there.
-/// \param LockExp The lock expression corresponding to the lock to be removed
+/// \param Mutex The lock expression corresponding to the lock to be removed
/// \param UnlockLoc The source location of the unlock (only used in error msg)
void ThreadSafetyAnalyzer::removeLock(FactSet &FSet,
const SExpr &Mutex,
SourceLocation UnlockLoc,
bool FullyRemove) {
+ if (Mutex.shouldIgnore())
+ return;
+
const LockData *LDat = FSet.findLock(FactMan, Mutex);
if (!LDat) {
Handler.handleUnmatchedUnlock(Mutex.toString(), UnlockLoc);
@@ -1423,12 +1525,13 @@ void ThreadSafetyAnalyzer::removeLock(FactSet &FSet,
/// and push them onto Mtxs, discarding any duplicates.
template <typename AttrType>
void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr,
- Expr *Exp, const NamedDecl *D) {
+ Expr *Exp, const NamedDecl *D,
+ VarDecl *SelfDecl) {
typedef typename AttrType::args_iterator iterator_type;
if (Attr->args_size() == 0) {
// The mutex held is the "this" object.
- SExpr Mu(0, Exp, D);
+ SExpr Mu(0, Exp, D, SelfDecl);
if (!Mu.isValid())
SExpr::warnInvalidLock(Handler, 0, Exp, D);
else
@@ -1437,7 +1540,7 @@ void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr,
}
for (iterator_type I=Attr->args_begin(), E=Attr->args_end(); I != E; ++I) {
- SExpr Mu(*I, Exp, D);
+ SExpr Mu(*I, Exp, D, SelfDecl);
if (!Mu.isValid())
SExpr::warnInvalidLock(Handler, *I, Exp, D);
else
@@ -1512,6 +1615,9 @@ const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
else if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) {
return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
}
+ else if (const ExprWithCleanups* EWC = dyn_cast<ExprWithCleanups>(Cond)) {
+ return getTrylockCallExpr(EWC->getSubExpr(), C, Negate);
+ }
else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) {
const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
return getTrylockCallExpr(E, C, Negate);
@@ -1591,7 +1697,7 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
case attr::SharedTrylockFunction: {
SharedTrylockFunctionAttr *A =
cast<SharedTrylockFunctionAttr>(Attr);
- getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl,
+ getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl,
PredBlock, CurrBlock, A->getSuccessValue(), Negate);
break;
}
@@ -1631,39 +1737,12 @@ class BuildLockset : public StmtVisitor<BuildLockset> {
void warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp, AccessKind AK,
Expr *MutexExp, ProtectedOperationKind POK);
+ void warnIfMutexHeld(const NamedDecl *D, Expr *Exp, Expr *MutexExp);
void checkAccess(Expr *Exp, AccessKind AK);
void checkDereference(Expr *Exp, AccessKind AK);
void handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD = 0);
- /// \brief Returns true if the lockset contains a lock, regardless of whether
- /// the lock is held exclusively or shared.
- bool locksetContains(const SExpr &Mu) const {
- return FSet.findLock(Analyzer->FactMan, Mu);
- }
-
- /// \brief Returns true if the lockset contains a lock with the passed in
- /// locktype.
- bool locksetContains(const SExpr &Mu, LockKind KindRequested) const {
- const LockData *LockHeld = FSet.findLock(Analyzer->FactMan, Mu);
- return (LockHeld && KindRequested == LockHeld->LKind);
- }
-
- /// \brief Returns true if the lockset contains a lock with at least the
- /// passed in locktype. So for example, if we pass in LK_Shared, this function
- /// returns true if the lock is held LK_Shared or LK_Exclusive. If we pass in
- /// LK_Exclusive, this function returns true if the lock is held LK_Exclusive.
- bool locksetContainsAtLeast(const SExpr &Lock,
- LockKind KindRequested) const {
- switch (KindRequested) {
- case LK_Shared:
- return locksetContains(Lock);
- case LK_Exclusive:
- return locksetContains(Lock, KindRequested);
- }
- llvm_unreachable("Unknown LockKind");
- }
-
public:
BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info)
: StmtVisitor<BuildLockset>(),
@@ -1701,13 +1780,57 @@ void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp,
LockKind LK = getLockKindFromAccessKind(AK);
SExpr Mutex(MutexExp, Exp, D);
- if (!Mutex.isValid())
+ if (!Mutex.isValid()) {
SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D);
- else if (!locksetContainsAtLeast(Mutex, LK))
+ return;
+ } else if (Mutex.shouldIgnore()) {
+ return;
+ }
+
+ LockData* LDat = FSet.findLockUniv(Analyzer->FactMan, Mutex);
+ bool NoError = true;
+ if (!LDat) {
+ // No exact match found. Look for a partial match.
+ FactEntry* FEntry = FSet.findPartialMatch(Analyzer->FactMan, Mutex);
+ if (FEntry) {
+ // Warn that there's no precise match.
+ LDat = &FEntry->LDat;
+ std::string PartMatchStr = FEntry->MutID.toString();
+ StringRef PartMatchName(PartMatchStr);
+ Analyzer->Handler.handleMutexNotHeld(D, POK, Mutex.toString(), LK,
+ Exp->getExprLoc(), &PartMatchName);
+ } else {
+ // Warn that there's no match at all.
+ Analyzer->Handler.handleMutexNotHeld(D, POK, Mutex.toString(), LK,
+ Exp->getExprLoc());
+ }
+ NoError = false;
+ }
+ // Make sure the mutex we found is the right kind.
+ if (NoError && LDat && !LDat->isAtLeast(LK))
Analyzer->Handler.handleMutexNotHeld(D, POK, Mutex.toString(), LK,
Exp->getExprLoc());
}
+/// \brief Warn if the LSet contains the given lock.
+void BuildLockset::warnIfMutexHeld(const NamedDecl *D, Expr* Exp,
+ Expr *MutexExp) {
+ SExpr Mutex(MutexExp, Exp, D);
+ if (!Mutex.isValid()) {
+ SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D);
+ return;
+ }
+
+ LockData* LDat = FSet.findLock(Analyzer->FactMan, Mutex);
+ if (LDat) {
+ std::string DeclName = D->getNameAsString();
+ StringRef DeclNameSR (DeclName);
+ Analyzer->Handler.handleFunExcludesLock(DeclNameSR, Mutex.toString(),
+ Exp->getExprLoc());
+ }
+}
+
+
/// \brief This method identifies variable dereferences and checks pt_guarded_by
/// and pt_guarded_var annotations. Note that we only check these annotations
/// at the time a pointer is dereferenced.
@@ -1776,7 +1899,7 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
// to our lockset with kind exclusive.
case attr::ExclusiveLockFunction: {
ExclusiveLockFunctionAttr *A = cast<ExclusiveLockFunctionAttr>(At);
- Analyzer->getMutexIDs(ExclusiveLocksToAdd, A, Exp, D);
+ Analyzer->getMutexIDs(ExclusiveLocksToAdd, A, Exp, D, VD);
break;
}
@@ -1784,7 +1907,7 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
// to our lockset with kind shared.
case attr::SharedLockFunction: {
SharedLockFunctionAttr *A = cast<SharedLockFunctionAttr>(At);
- Analyzer->getMutexIDs(SharedLocksToAdd, A, Exp, D);
+ Analyzer->getMutexIDs(SharedLocksToAdd, A, Exp, D, VD);
break;
}
@@ -1792,7 +1915,7 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
// mutexes from the lockset, and flag a warning if they are not there.
case attr::UnlockFunction: {
UnlockFunctionAttr *A = cast<UnlockFunctionAttr>(At);
- Analyzer->getMutexIDs(LocksToRemove, A, Exp, D);
+ Analyzer->getMutexIDs(LocksToRemove, A, Exp, D, VD);
break;
}
@@ -1816,15 +1939,10 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
case attr::LocksExcluded: {
LocksExcludedAttr *A = cast<LocksExcludedAttr>(At);
+
for (LocksExcludedAttr::args_iterator I = A->args_begin(),
E = A->args_end(); I != E; ++I) {
- SExpr Mutex(*I, Exp, D);
- if (!Mutex.isValid())
- SExpr::warnInvalidLock(Analyzer->Handler, *I, Exp, D);
- else if (locksetContains(Mutex))
- Analyzer->Handler.handleFunExcludesLock(D->getName(),
- Mutex.toString(),
- Exp->getExprLoc());
+ warnIfMutexHeld(D, Exp, *I);
}
break;
}
@@ -1973,8 +2091,8 @@ void BuildLockset::VisitDeclStmt(DeclStmt *S) {
/// are the same. In the event of a difference, we use the intersection of these
/// two locksets at the start of D.
///
-/// \param LSet1 The first lockset.
-/// \param LSet2 The second lockset.
+/// \param FSet1 The first lockset.
+/// \param FSet2 The second lockset.
/// \param JoinLoc The location of the join point for error reporting
/// \param LEK1 The error message to report if a mutex is missing from LSet1
/// \param LEK2 The error message to report if a mutex is missing from Lset2
@@ -2012,7 +2130,7 @@ void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1,
JoinLoc, LEK1);
}
}
- else if (!LDat2.Managed)
+ else if (!LDat2.Managed && !FSet2Mutex.isUniversal())
Handler.handleMutexHeldEndOfScope(FSet2Mutex.toString(),
LDat2.AcquireLoc,
JoinLoc, LEK1);
@@ -2035,7 +2153,7 @@ void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1,
JoinLoc, LEK1);
}
}
- else if (!LDat1.Managed)
+ else if (!LDat1.Managed && !FSet1Mutex.isUniversal())
Handler.handleMutexHeldEndOfScope(FSet1Mutex.toString(),
LDat1.AcquireLoc,
JoinLoc, LEK2);
@@ -2081,6 +2199,9 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
PostOrderCFGView *SortedGraph = AC.getAnalysis<PostOrderCFGView>();
PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
+ // Mark entry block as reachable
+ BlockInfo[CFGraph->getEntry().getBlockID()].Reachable = true;
+
// Compute SSA names for local variables
LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
@@ -2168,10 +2289,16 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
if (*PI == 0 || !VisitedBlocks.alreadySet(*PI))
continue;
+ int PrevBlockID = (*PI)->getBlockID();
+ CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
+
// Ignore edges from blocks that can't return.
- if ((*PI)->hasNoReturnElement())
+ if ((*PI)->hasNoReturnElement() || !PrevBlockInfo->Reachable)
continue;
+ // Okay, we can reach this block from the entry.
+ CurrBlockInfo->Reachable = true;
+
// If the previous block ended in a 'continue' or 'break' statement, then
// a difference in locksets is probably due to a bug in that block, rather
// than in some other predecessor. In that case, keep the other
@@ -2183,8 +2310,7 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
}
}
- int PrevBlockID = (*PI)->getBlockID();
- CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
+
FactSet PrevLockset;
getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
@@ -2198,6 +2324,10 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
}
}
+ // Skip rest of block if it's not reachable.
+ if (!CurrBlockInfo->Reachable)
+ continue;
+
// Process continue and break blocks. Assume that the lockset for the
// resulting block is unaffected by any discrepancies in them.
for (unsigned SpecialI = 0, SpecialN = SpecialBlocks.size();
@@ -2287,6 +2417,10 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()];
CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()];
+ // Skip the final check if the exit block is unreachable.
+ if (!Final->Reachable)
+ return;
+
// FIXME: Should we call this function for all blocks which exit the function?
intersectAndWarn(Initial->EntrySet, Final->ExitSet,
Final->ExitLoc,
diff --git a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
index 858be45..b2e27ca 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
@@ -13,6 +13,7 @@
#include <utility>
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/PackedVector.h"
#include "llvm/ADT/DenseMap.h"
@@ -22,6 +23,7 @@
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h"
#include "clang/Analysis/Analyses/UninitializedValues.h"
+#include "clang/Analysis/DomainSpecific/ObjCNoReturn.h"
#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
@@ -97,22 +99,21 @@ static bool isAlwaysUninit(const Value v) {
namespace {
-typedef llvm::PackedVector<Value, 2> ValueVector;
+typedef llvm::PackedVector<Value, 2, llvm::SmallBitVector> ValueVector;
class CFGBlockValues {
const CFG &cfg;
- std::vector<ValueVector*> vals;
+ SmallVector<ValueVector, 8> vals;
ValueVector scratch;
DeclToIndex declToIndex;
public:
CFGBlockValues(const CFG &cfg);
- ~CFGBlockValues();
unsigned getNumEntries() const { return declToIndex.size(); }
void computeSetOfDeclarations(const DeclContext &dc);
ValueVector &getValueVector(const CFGBlock *block) {
- return *vals[block->getBlockID()];
+ return vals[block->getBlockID()];
}
void setAllScratchValues(Value V);
@@ -138,12 +139,6 @@ public:
CFGBlockValues::CFGBlockValues(const CFG &c) : cfg(c), vals(0) {}
-CFGBlockValues::~CFGBlockValues() {
- for (std::vector<ValueVector*>::iterator I = vals.begin(), E = vals.end();
- I != E; ++I)
- delete *I;
-}
-
void CFGBlockValues::computeSetOfDeclarations(const DeclContext &dc) {
declToIndex.computeMap(dc);
unsigned decls = declToIndex.size();
@@ -153,7 +148,7 @@ void CFGBlockValues::computeSetOfDeclarations(const DeclContext &dc) {
return;
vals.resize(n);
for (unsigned i = 0; i < n; ++i)
- vals[i] = new ValueVector(decls);
+ vals[i].resize(decls);
}
#if DEBUG_LOGGING
@@ -412,6 +407,7 @@ class TransferFunctions : public StmtVisitor<TransferFunctions> {
const CFGBlock *block;
AnalysisDeclContext &ac;
const ClassifyRefs &classification;
+ ObjCNoReturn objCNoRet;
UninitVariablesHandler *handler;
public:
@@ -420,16 +416,18 @@ public:
const ClassifyRefs &classification,
UninitVariablesHandler *handler)
: vals(vals), cfg(cfg), block(block), ac(ac),
- classification(classification), handler(handler) {}
+ classification(classification), objCNoRet(ac.getASTContext()),
+ handler(handler) {}
void reportUse(const Expr *ex, const VarDecl *vd);
- void VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS);
+ void VisitBinaryOperator(BinaryOperator *bo);
void VisitBlockExpr(BlockExpr *be);
void VisitCallExpr(CallExpr *ce);
- void VisitDeclStmt(DeclStmt *ds);
void VisitDeclRefExpr(DeclRefExpr *dr);
- void VisitBinaryOperator(BinaryOperator *bo);
+ void VisitDeclStmt(DeclStmt *ds);
+ void VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS);
+ void VisitObjCMessageExpr(ObjCMessageExpr *ME);
bool isTrackedVar(const VarDecl *vd) {
return ::isTrackedVar(vd, cast<DeclContext>(ac.getDecl()));
@@ -605,14 +603,26 @@ void TransferFunctions::VisitBlockExpr(BlockExpr *be) {
}
void TransferFunctions::VisitCallExpr(CallExpr *ce) {
- // After a call to a function like setjmp or vfork, any variable which is
- // initialized anywhere within this function may now be initialized. For now,
- // just assume such a call initializes all variables.
- // FIXME: Only mark variables as initialized if they have an initializer which
- // is reachable from here.
- Decl *Callee = ce->getCalleeDecl();
- if (Callee && Callee->hasAttr<ReturnsTwiceAttr>())
- vals.setAllScratchValues(Initialized);
+ if (Decl *Callee = ce->getCalleeDecl()) {
+ if (Callee->hasAttr<ReturnsTwiceAttr>()) {
+ // After a call to a function like setjmp or vfork, any variable which is
+ // initialized anywhere within this function may now be initialized. For
+ // now, just assume such a call initializes all variables. FIXME: Only
+ // mark variables as initialized if they have an initializer which is
+ // reachable from here.
+ vals.setAllScratchValues(Initialized);
+ }
+ else if (Callee->hasAttr<AnalyzerNoReturnAttr>()) {
+ // Functions labeled like "analyzer_noreturn" are often used to denote
+ // "panic" functions that in special debug situations can still return,
+ // but for the most part should not be treated as returning. This is a
+ // useful annotation borrowed from the static analyzer that is useful for
+ // suppressing branch-specific false positives when we call one of these
+ // functions but keep pretending the path continues (when in reality the
+ // user doesn't care).
+ vals.setAllScratchValues(Unknown);
+ }
+ }
}
void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *dr) {
@@ -677,6 +687,14 @@ void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
}
}
+void TransferFunctions::VisitObjCMessageExpr(ObjCMessageExpr *ME) {
+ // If the Objective-C message expression is an implicit no-return that
+ // is not modeled in the CFG, set the tracked dataflow values to Unknown.
+ if (objCNoRet.isImplicitNoReturn(ME)) {
+ vals.setAllScratchValues(Unknown);
+ }
+}
+
//------------------------------------------------------------------------====//
// High-level "driver" logic for uninitialized values analysis.
//====------------------------------------------------------------------------//
diff --git a/contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c b/contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c
index 4793b25..d16965d 100644
--- a/contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c
+++ b/contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c
@@ -111,7 +111,6 @@ static const UTF8 firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC
* into an inline function.
*/
-#ifdef CLANG_NEEDS_THESE_ONE_DAY
/* --------------------------------------------------------------------- */
@@ -285,7 +284,6 @@ ConversionResult ConvertUTF16toUTF8 (
*targetStart = target;
return result;
}
-#endif
/* --------------------------------------------------------------------- */
@@ -361,7 +359,7 @@ static Boolean isLegalUTF8(const UTF8 *source, int length) {
/* Everything else falls through when "true"... */
case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
- case 2: if ((a = (*--srcptr)) > 0xBF) return false;
+ case 2: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
switch (*source) {
/* no fall-through in this inner switch */
@@ -395,15 +393,25 @@ Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd) {
/* --------------------------------------------------------------------- */
/*
+ * Exported function to return the total number of bytes in a codepoint
+ * represented in UTF-8, given the value of the first byte.
+ */
+unsigned getNumBytesForUTF8(UTF8 first) {
+ return trailingBytesForUTF8[first] + 1;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
* Exported function to return whether a UTF-8 string is legal or not.
* This is not used here; it's just exported.
*/
-Boolean isLegalUTF8String(const UTF8 *source, const UTF8 *sourceEnd) {
- while (source != sourceEnd) {
- int length = trailingBytesForUTF8[*source] + 1;
- if (length > sourceEnd - source || !isLegalUTF8(source, length))
+Boolean isLegalUTF8String(const UTF8 **source, const UTF8 *sourceEnd) {
+ while (*source != sourceEnd) {
+ int length = trailingBytesForUTF8[**source] + 1;
+ if (length > sourceEnd - *source || !isLegalUTF8(*source, length))
return false;
- source += length;
+ *source += length;
}
return true;
}
diff --git a/contrib/llvm/tools/clang/lib/Basic/ConvertUTFWrapper.cpp b/contrib/llvm/tools/clang/lib/Basic/ConvertUTFWrapper.cpp
index a1b3f7f..6be3828 100644
--- a/contrib/llvm/tools/clang/lib/Basic/ConvertUTFWrapper.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/ConvertUTFWrapper.cpp
@@ -13,16 +13,19 @@
namespace clang {
bool ConvertUTF8toWide(unsigned WideCharWidth, llvm::StringRef Source,
- char *&ResultPtr) {
+ char *&ResultPtr, const UTF8 *&ErrorPtr) {
assert(WideCharWidth == 1 || WideCharWidth == 2 || WideCharWidth == 4);
ConversionResult result = conversionOK;
// Copy the character span over.
if (WideCharWidth == 1) {
- if (!isLegalUTF8String(reinterpret_cast<const UTF8*>(Source.begin()),
- reinterpret_cast<const UTF8*>(Source.end())))
+ const UTF8 *Pos = reinterpret_cast<const UTF8*>(Source.begin());
+ if (!isLegalUTF8String(&Pos, reinterpret_cast<const UTF8*>(Source.end()))) {
result = sourceIllegal;
- memcpy(ResultPtr, Source.data(), Source.size());
- ResultPtr += Source.size();
+ ErrorPtr = Pos;
+ } else {
+ memcpy(ResultPtr, Source.data(), Source.size());
+ ResultPtr += Source.size();
+ }
} else if (WideCharWidth == 2) {
const UTF8 *sourceStart = (const UTF8*)Source.data();
// FIXME: Make the type of the result buffer correct instead of
@@ -34,6 +37,8 @@ bool ConvertUTF8toWide(unsigned WideCharWidth, llvm::StringRef Source,
&targetStart, targetStart + 2*Source.size(), flags);
if (result == conversionOK)
ResultPtr = reinterpret_cast<char*>(targetStart);
+ else
+ ErrorPtr = sourceStart;
} else if (WideCharWidth == 4) {
const UTF8 *sourceStart = (const UTF8*)Source.data();
// FIXME: Make the type of the result buffer correct instead of
@@ -45,6 +50,8 @@ bool ConvertUTF8toWide(unsigned WideCharWidth, llvm::StringRef Source,
&targetStart, targetStart + 4*Source.size(), flags);
if (result == conversionOK)
ResultPtr = reinterpret_cast<char*>(targetStart);
+ else
+ ErrorPtr = sourceStart;
}
assert((result != targetExhausted)
&& "ConvertUTF8toUTFXX exhausted target buffer");
@@ -67,4 +74,3 @@ bool ConvertCodePointToUTF8(unsigned Source, char *&ResultPtr) {
}
} // end namespace clang
-
diff --git a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
index 8065b2d..854c4c5 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
@@ -12,9 +12,11 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include <cctype>
@@ -36,9 +38,10 @@ static void DummyArgToStringFn(DiagnosticsEngine::ArgumentKind AK, intptr_t QT,
DiagnosticsEngine::DiagnosticsEngine(
const IntrusiveRefCntPtr<DiagnosticIDs> &diags,
+ DiagnosticOptions *DiagOpts,
DiagnosticConsumer *client, bool ShouldOwnClient)
- : Diags(diags), Client(client), OwnsDiagClient(ShouldOwnClient),
- SourceMgr(0) {
+ : Diags(diags), DiagOpts(DiagOpts), Client(client),
+ OwnsDiagClient(ShouldOwnClient), SourceMgr(0) {
ArgToStringFn = DummyArgToStringFn;
ArgToStringCookie = 0;
@@ -515,23 +518,7 @@ static void HandleOrdinalModifier(unsigned ValNo,
// We could use text forms for the first N ordinals, but the numeric
// forms are actually nicer in diagnostics because they stand out.
- Out << ValNo;
-
- // It is critically important that we do this perfectly for
- // user-written sequences with over 100 elements.
- switch (ValNo % 100) {
- case 11:
- case 12:
- case 13:
- Out << "th"; return;
- default:
- switch (ValNo % 10) {
- case 1: Out << "st"; return;
- case 2: Out << "nd"; return;
- case 3: Out << "rd"; return;
- default: Out << "th"; return;
- }
- }
+ Out << ValNo << llvm::getOrdinalSuffix(ValNo);
}
diff --git a/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
index ca96fd2..ed97643 100644
--- a/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
@@ -628,9 +628,9 @@ bool DiagnosticIDs::ProcessDiag(DiagnosticsEngine &Diag) const {
if (DiagLevel >= DiagnosticIDs::Error) {
if (isUnrecoverable(DiagID))
Diag.UnrecoverableErrorOccurred = true;
-
+
+ Diag.ErrorOccurred = true;
if (Diag.Client->IncludeInDiagnosticCounts()) {
- Diag.ErrorOccurred = true;
++Diag.NumErrors;
}
@@ -686,4 +686,3 @@ bool DiagnosticIDs::isARCDiagnostic(unsigned DiagID) {
unsigned cat = getCategoryNumberForDiag(DiagID);
return DiagnosticIDs::getCategoryNameFromID(cat).startswith("ARC ");
}
-
diff --git a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
index c6b894c..a816969 100644
--- a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
@@ -36,6 +36,9 @@
#include <sys/uio.h>
#else
#include <io.h>
+#ifndef S_ISFIFO
+#define S_ISFIFO(x) (0)
+#endif
#endif
using namespace clang;
@@ -57,6 +60,10 @@ FileEntry::~FileEntry() {
if (FD != -1) ::close(FD);
}
+bool FileEntry::isNamedPipe() const {
+ return S_ISFIFO(FileMode);
+}
+
//===----------------------------------------------------------------------===//
// Windows.
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp b/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
index 4869ae1..1965bf9 100644
--- a/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
@@ -17,7 +17,6 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ErrorHandling.h"
#include <cctype>
@@ -33,6 +32,7 @@ IdentifierInfo::IdentifierInfo() {
TokenID = tok::identifier;
ObjCOrBuiltinID = 0;
HasMacro = false;
+ HadMacro = false;
IsExtension = false;
IsCXX11CompatKeyword = false;
IsPoisoned = false;
@@ -105,6 +105,7 @@ namespace {
KEYC11 = 0x400,
KEYARC = 0x800,
KEYNOMS = 0x01000,
+ WCHARSUPPORT = 0x02000,
KEYALL = (0xffff & ~KEYNOMS) // Because KEYNOMS is used to exclude.
};
}
@@ -129,6 +130,7 @@ static void AddKeyword(StringRef Keyword,
else if (LangOpts.MicrosoftExt && (Flags & KEYMS)) AddResult = 1;
else if (LangOpts.Borland && (Flags & KEYBORLAND)) AddResult = 1;
else if (LangOpts.Bool && (Flags & BOOLSUPPORT)) AddResult = 2;
+ else if (LangOpts.WChar && (Flags & WCHARSUPPORT)) AddResult = 2;
else if (LangOpts.AltiVec && (Flags & KEYALTIVEC)) AddResult = 2;
else if (LangOpts.OpenCL && (Flags & KEYOPENCL)) AddResult = 2;
else if (!LangOpts.CPlusPlus && (Flags & KEYNOCXX)) AddResult = 2;
diff --git a/contrib/llvm/tools/clang/lib/Basic/Module.cpp b/contrib/llvm/tools/clang/lib/Basic/Module.cpp
index 6348840..76c7f8b 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Module.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Module.cpp
@@ -23,8 +23,8 @@ using namespace clang;
Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
bool IsFramework, bool IsExplicit)
- : Name(Name), DefinitionLoc(DefinitionLoc), Parent(Parent),
- Umbrella(), IsAvailable(true), IsFromModuleFile(false),
+ : Name(Name), DefinitionLoc(DefinitionLoc), Parent(Parent),
+ Umbrella(), ASTFile(0), IsAvailable(true), IsFromModuleFile(false),
IsFramework(IsFramework), IsExplicit(IsExplicit), IsSystem(false),
InferSubmodules(false), InferExplicitSubmodules(false),
InferExportWildcard(false), NameVisibility(Hidden)
@@ -219,6 +219,13 @@ void Module::print(llvm::raw_ostream &OS, unsigned Indent) const {
OS.write_escaped(Headers[I]->getName());
OS << "\"\n";
}
+
+ for (unsigned I = 0, N = ExcludedHeaders.size(); I != N; ++I) {
+ OS.indent(Indent + 2);
+ OS << "exclude header \"";
+ OS.write_escaped(ExcludedHeaders[I]->getName());
+ OS << "\"\n";
+ }
for (submodule_const_iterator MI = submodule_begin(), MIEnd = submodule_end();
MI != MIEnd; ++MI)
diff --git a/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp b/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp
index bb5a10a..0d62f7b 100644
--- a/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp
@@ -61,6 +61,13 @@ void SourceLocation::print(raw_ostream &OS, const SourceManager &SM)const{
OS << '>';
}
+std::string SourceLocation::printToString(const SourceManager &SM) const {
+ std::string S;
+ llvm::raw_string_ostream OS(S);
+ print(OS, SM);
+ return S;
+}
+
void SourceLocation::dump(const SourceManager &SM) const {
print(llvm::errs(), SM);
}
diff --git a/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp b/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
index 9ec2474..cd0284a 100644
--- a/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
@@ -1029,6 +1029,17 @@ unsigned SourceManager::getColumnNumber(FileID FID, unsigned FilePos,
return 1;
}
+ // See if we just calculated the line number for this FilePos and can use
+ // that to lookup the start of the line instead of searching for it.
+ if (LastLineNoFileIDQuery == FID &&
+ LastLineNoContentCache->SourceLineCache != 0) {
+ unsigned *SourceLineCache = LastLineNoContentCache->SourceLineCache;
+ unsigned LineStart = SourceLineCache[LastLineNoResult - 1];
+ unsigned LineEnd = SourceLineCache[LastLineNoResult];
+ if (FilePos >= LineStart && FilePos < LineEnd)
+ return FilePos - LineStart + 1;
+ }
+
const char *Buf = MemBuf->getBufferStart();
unsigned LineStart = FilePos;
while (LineStart && Buf[LineStart-1] != '\n' && Buf[LineStart-1] != '\r')
@@ -1112,7 +1123,7 @@ static void ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
// Scan 16 byte chunks for '\r' and '\n'. Ignore '\0'.
while (NextBuf+16 <= End) {
- __m128i Chunk = *(__m128i*)NextBuf;
+ const __m128i Chunk = *(const __m128i*)NextBuf;
__m128i Cmp = _mm_or_si128(_mm_cmpeq_epi8(Chunk, CRs),
_mm_cmpeq_epi8(Chunk, LFs));
unsigned Mask = _mm_movemask_epi8(Cmp);
@@ -1577,6 +1588,7 @@ FileID SourceManager::translateFile(const FileEntry *SourceFile) const {
}
}
+ (void) SourceFile;
return FirstFID;
}
@@ -1693,46 +1705,91 @@ void SourceManager::computeMacroArgsCache(MacroArgsMap *&CachePtr,
if (!ExpInfo.isMacroArgExpansion())
continue;
- SourceLocation SpellLoc = ExpInfo.getSpellingLoc();
- while (!SpellLoc.isFileID()) {
- std::pair<FileID, unsigned> LocInfo = getDecomposedLoc(SpellLoc);
- const ExpansionInfo &Info = getSLocEntry(LocInfo.first).getExpansion();
- if (!Info.isMacroArgExpansion())
- break;
- SpellLoc = Info.getSpellingLoc().getLocWithOffset(LocInfo.second);
+ associateFileChunkWithMacroArgExp(MacroArgsCache, FID,
+ ExpInfo.getSpellingLoc(),
+ SourceLocation::getMacroLoc(Entry.getOffset()),
+ getFileIDSize(FileID::get(ID)));
+ }
+}
+
+void SourceManager::associateFileChunkWithMacroArgExp(
+ MacroArgsMap &MacroArgsCache,
+ FileID FID,
+ SourceLocation SpellLoc,
+ SourceLocation ExpansionLoc,
+ unsigned ExpansionLength) const {
+ if (!SpellLoc.isFileID()) {
+ unsigned SpellBeginOffs = SpellLoc.getOffset();
+ unsigned SpellEndOffs = SpellBeginOffs + ExpansionLength;
+
+ // The spelling range for this macro argument expansion can span multiple
+ // consecutive FileID entries. Go through each entry contained in the
+ // spelling range and if one is itself a macro argument expansion, recurse
+ // and associate the file chunk that it represents.
+
+ FileID SpellFID; // Current FileID in the spelling range.
+ unsigned SpellRelativeOffs;
+ llvm::tie(SpellFID, SpellRelativeOffs) = getDecomposedLoc(SpellLoc);
+ while (1) {
+ const SLocEntry &Entry = getSLocEntry(SpellFID);
+ unsigned SpellFIDBeginOffs = Entry.getOffset();
+ unsigned SpellFIDSize = getFileIDSize(SpellFID);
+ unsigned SpellFIDEndOffs = SpellFIDBeginOffs + SpellFIDSize;
+ const ExpansionInfo &Info = Entry.getExpansion();
+ if (Info.isMacroArgExpansion()) {
+ unsigned CurrSpellLength;
+ if (SpellFIDEndOffs < SpellEndOffs)
+ CurrSpellLength = SpellFIDSize - SpellRelativeOffs;
+ else
+ CurrSpellLength = ExpansionLength;
+ associateFileChunkWithMacroArgExp(MacroArgsCache, FID,
+ Info.getSpellingLoc().getLocWithOffset(SpellRelativeOffs),
+ ExpansionLoc, CurrSpellLength);
+ }
+
+ if (SpellFIDEndOffs >= SpellEndOffs)
+ return; // we covered all FileID entries in the spelling range.
+
+ // Move to the next FileID entry in the spelling range.
+ unsigned advance = SpellFIDSize - SpellRelativeOffs + 1;
+ ExpansionLoc = ExpansionLoc.getLocWithOffset(advance);
+ ExpansionLength -= advance;
+ ++SpellFID.ID;
+ SpellRelativeOffs = 0;
}
- if (!SpellLoc.isFileID())
- continue;
-
- unsigned BeginOffs;
- if (!isInFileID(SpellLoc, FID, &BeginOffs))
- continue;
- unsigned EndOffs = BeginOffs + getFileIDSize(FileID::get(ID));
-
- // Add a new chunk for this macro argument. A previous macro argument chunk
- // may have been lexed again, so e.g. if the map is
- // 0 -> SourceLocation()
- // 100 -> Expanded loc #1
- // 110 -> SourceLocation()
- // and we found a new macro FileID that lexed from offet 105 with length 3,
- // the new map will be:
- // 0 -> SourceLocation()
- // 100 -> Expanded loc #1
- // 105 -> Expanded loc #2
- // 108 -> Expanded loc #1
- // 110 -> SourceLocation()
- //
- // Since re-lexed macro chunks will always be the same size or less of
- // previous chunks, we only need to find where the ending of the new macro
- // chunk is mapped to and update the map with new begin/end mappings.
-
- MacroArgsMap::iterator I = MacroArgsCache.upper_bound(EndOffs);
- --I;
- SourceLocation EndOffsMappedLoc = I->second;
- MacroArgsCache[BeginOffs] = SourceLocation::getMacroLoc(Entry.getOffset());
- MacroArgsCache[EndOffs] = EndOffsMappedLoc;
}
+
+ assert(SpellLoc.isFileID());
+
+ unsigned BeginOffs;
+ if (!isInFileID(SpellLoc, FID, &BeginOffs))
+ return;
+
+ unsigned EndOffs = BeginOffs + ExpansionLength;
+
+ // Add a new chunk for this macro argument. A previous macro argument chunk
+ // may have been lexed again, so e.g. if the map is
+ // 0 -> SourceLocation()
+ // 100 -> Expanded loc #1
+ // 110 -> SourceLocation()
+ // and we found a new macro FileID that lexed from offet 105 with length 3,
+ // the new map will be:
+ // 0 -> SourceLocation()
+ // 100 -> Expanded loc #1
+ // 105 -> Expanded loc #2
+ // 108 -> Expanded loc #1
+ // 110 -> SourceLocation()
+ //
+ // Since re-lexed macro chunks will always be the same size or less of
+ // previous chunks, we only need to find where the ending of the new macro
+ // chunk is mapped to and update the map with new begin/end mappings.
+
+ MacroArgsMap::iterator I = MacroArgsCache.upper_bound(EndOffs);
+ --I;
+ SourceLocation EndOffsMappedLoc = I->second;
+ MacroArgsCache[BeginOffs] = ExpansionLoc;
+ MacroArgsCache[EndOffs] = EndOffsMappedLoc;
}
/// \brief If \arg Loc points inside a function macro argument, the returned
diff --git a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
index db5941a..83d4e2b 100644
--- a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
@@ -24,7 +24,8 @@ using namespace clang;
static const LangAS::Map DefaultAddrSpaceMap = { 0 };
// TargetInfo Constructor.
-TargetInfo::TargetInfo(const std::string &T) : Triple(T) {
+TargetInfo::TargetInfo(const std::string &T) : TargetOpts(), Triple(T)
+{
// Set defaults. Defaults are set for a 32-bit RISC platform, like PPC or
// SPARC. These should be overridden by concrete targets as needed.
BigEndian = true;
@@ -59,6 +60,7 @@ TargetInfo::TargetInfo(const std::string &T) : Triple(T) {
Char32Type = UnsignedInt;
Int64Type = SignedLongLong;
SigAtomicType = SignedInt;
+ ProcessIDType = SignedInt;
UseSignedCharForObjCBool = true;
UseBitFieldTypeAlignment = true;
UseZeroLengthBitfieldAlignment = false;
@@ -363,6 +365,8 @@ bool TargetInfo::validateOutputConstraint(ConstraintInfo &Info) const {
break;
case '?': // Disparage slightly code.
case '!': // Disparage severely.
+ case '#': // Ignore as constraint.
+ case '*': // Ignore for choosing register preferences.
break; // Pass them.
}
@@ -482,6 +486,8 @@ bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
break;
case '?': // Disparage slightly code.
case '!': // Disparage severely.
+ case '#': // Ignore as constraint.
+ case '*': // Ignore for choosing register preferences.
break; // Pass them.
}
diff --git a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
index 1d495f1..f36ef82 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
@@ -92,6 +92,9 @@ static void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
Builder.defineMacro("__APPLE__");
Builder.defineMacro("__MACH__");
Builder.defineMacro("OBJC_NEW_PROPERTIES");
+ // AddressSanitizer doesn't play well with source fortification, which is on
+ // by default on Darwin.
+ if (Opts.SanitizeAddress) Builder.defineMacro("_FORTIFY_SOURCE", "0");
if (!Opts.ObjCAutoRefCount) {
// __weak is always defined, for use in blocks and with objc pointers.
@@ -316,7 +319,7 @@ protected:
DefineStd(Builder, "linux", Opts);
Builder.defineMacro("__gnu_linux__");
Builder.defineMacro("__ELF__");
- if (Triple.getEnvironment() == llvm::Triple::ANDROIDEABI)
+ if (Triple.getEnvironment() == llvm::Triple::Android)
Builder.defineMacro("__ANDROID__", "1");
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
@@ -587,6 +590,48 @@ public:
: OSTargetInfo<Target>(triple) {}
};
+template <typename Target>
+class NaClTargetInfo : public OSTargetInfo<Target> {
+ protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__ELF__");
+ Builder.defineMacro("__native_client__");
+ }
+ public:
+ NaClTargetInfo(const std::string &triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+ this->LongAlign = 32;
+ this->LongWidth = 32;
+ this->PointerAlign = 32;
+ this->PointerWidth = 32;
+ this->IntMaxType = TargetInfo::SignedLongLong;
+ this->UIntMaxType = TargetInfo::UnsignedLongLong;
+ this->Int64Type = TargetInfo::SignedLongLong;
+ this->DoubleAlign = 64;
+ this->LongDoubleWidth = 64;
+ this->LongDoubleAlign = 64;
+ this->SizeType = TargetInfo::UnsignedInt;
+ this->PtrDiffType = TargetInfo::SignedInt;
+ this->IntPtrType = TargetInfo::SignedInt;
+ this->RegParmMax = 2;
+ this->LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ this->DescriptionString = "e-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"
+ "f32:32:32-f64:64:64-p:32:32:32-v128:32:32";
+ }
+ virtual typename Target::CallingConvCheckResult checkCallingConvention(
+ CallingConv CC) const {
+ return CC == CC_PnaclCall ? Target::CCCR_OK :
+ Target::checkCallingConvention(CC);
+ }
+};
} // end anonymous namespace.
//===----------------------------------------------------------------------===//
@@ -641,6 +686,8 @@ public:
.Case("970", true)
.Case("g5", true)
.Case("a2", true)
+ .Case("e500mc", true)
+ .Case("e5500", true)
.Case("pwr6", true)
.Case("pwr7", true)
.Case("ppc", true)
@@ -990,6 +1037,9 @@ public:
LongDoubleWidth = LongDoubleAlign = 64;
LongDoubleFormat = &llvm::APFloat::IEEEdouble;
}
+
+ // PPC32 supports atomics up to 4 bytes.
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
}
virtual BuiltinVaListKind getBuiltinVaListKind() const {
@@ -1007,13 +1057,20 @@ public:
IntMaxType = SignedLong;
UIntMaxType = UnsignedLong;
Int64Type = SignedLong;
- DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
- "i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64";
if (getTriple().getOS() == llvm::Triple::FreeBSD) {
LongDoubleWidth = LongDoubleAlign = 64;
LongDoubleFormat = &llvm::APFloat::IEEEdouble;
- }
+ DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-f128:64:64-"
+ "v128:128:128-n32:64";
+ } else
+ DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-f128:128:128-"
+ "v128:128:128-n32:64";
+
+ // PPC64 supports atomics up to 8 bytes.
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
}
virtual BuiltinVaListKind getBuiltinVaListKind() const {
return TargetInfo::CharPtrBuiltinVaList;
@@ -1047,6 +1104,8 @@ public:
: DarwinTargetInfo<PPC64TargetInfo>(triple) {
HasAlignMac68kSupport = true;
SuitableAlign = 128;
+ DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64";
}
};
} // end anonymous namespace.
@@ -1172,6 +1231,71 @@ namespace {
}
namespace {
+
+static const unsigned R600AddrSpaceMap[] = {
+ 1, // opencl_global
+ 3, // opencl_local
+ 2, // opencl_constant
+ 1, // cuda_device
+ 2, // cuda_constant
+ 3 // cuda_shared
+};
+
+class R600TargetInfo : public TargetInfo {
+public:
+ R600TargetInfo(const std::string& triple) : TargetInfo(triple) {
+ DescriptionString =
+ "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16"
+ "-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:32:32"
+ "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64"
+ "-v96:128:128-v128:128:128-v192:256:256-v256:256:256"
+ "-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+ "-n8:16:32:64";
+ AddrSpaceMap = &R600AddrSpaceMap;
+ }
+
+ virtual const char * getClobbers() const {
+ return "";
+ }
+
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &numNames) const {
+ Names = NULL;
+ numNames = 0;
+ }
+
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ Aliases = NULL;
+ NumAliases = 0;
+ }
+
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const {
+ return true;
+ }
+
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ Records = NULL;
+ NumRecords = 0;
+ }
+
+
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("__R600__");
+ }
+
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+
+};
+
+} // end anonymous namespace
+
+namespace {
// MBlaze abstract base class
class MBlazeTargetInfo : public TargetInfo {
static const char * const GCCRegNames[];
@@ -1351,10 +1475,12 @@ class X86TargetInfo : public TargetInfo {
bool HasBMI;
bool HasBMI2;
bool HasPOPCNT;
+ bool HasRTM;
bool HasSSE4a;
bool HasFMA4;
bool HasFMA;
bool HasXOP;
+ bool HasF16C;
/// \brief Enumeration of all of the X86 CPUs supported by Clang.
///
@@ -1500,8 +1626,9 @@ public:
X86TargetInfo(const std::string& triple)
: TargetInfo(triple), SSELevel(NoSSE), MMX3DNowLevel(NoMMX3DNow),
HasAES(false), HasPCLMUL(false), HasLZCNT(false), HasRDRND(false),
- HasBMI(false), HasBMI2(false), HasPOPCNT(false), HasSSE4a(false),
- HasFMA4(false), HasFMA(false), HasXOP(false), CPU(CK_Generic) {
+ HasBMI(false), HasBMI2(false), HasPOPCNT(false), HasRTM(false),
+ HasSSE4a(false), HasFMA4(false), HasFMA(false), HasXOP(false),
+ HasF16C(false), CPU(CK_Generic) {
BigEndian = false;
LongDoubleFormat = &llvm::APFloat::x87DoubleExtended;
}
@@ -1544,9 +1671,10 @@ public:
virtual bool hasFeature(StringRef Feature) const;
virtual void HandleTargetFeatures(std::vector<std::string> &Features);
virtual const char* getABI() const {
- if (PointerWidth == 64 && SSELevel >= AVX)
+ if (getTriple().getArch() == llvm::Triple::x86_64 && SSELevel >= AVX)
return "avx";
- else if (PointerWidth == 32 && MMX3DNowLevel == NoMMX3DNow)
+ else if (getTriple().getArch() == llvm::Triple::x86 &&
+ MMX3DNowLevel == NoMMX3DNow)
return "no-mmx";
return "";
}
@@ -1640,7 +1768,7 @@ public:
case CK_AthlonMP:
case CK_Geode:
// Only accept certain architectures when compiling in 32-bit mode.
- if (PointerWidth != 32)
+ if (getTriple().getArch() != llvm::Triple::x86)
return false;
// Fallthrough
@@ -1668,6 +1796,19 @@ public:
}
llvm_unreachable("Unhandled CPU kind");
}
+
+ virtual CallingConvCheckResult checkCallingConvention(CallingConv CC) const {
+ // We accept all non-ARM calling conventions
+ return (CC == CC_X86ThisCall ||
+ CC == CC_X86FastCall ||
+ CC == CC_X86StdCall ||
+ CC == CC_C ||
+ CC == CC_X86Pascal) ? CCCR_OK : CCCR_Warning;
+ }
+
+ virtual CallingConv getDefaultCallingConv() const {
+ return CC_C;
+ }
};
void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
@@ -1691,14 +1832,16 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
Features["bmi"] = false;
Features["bmi2"] = false;
Features["popcnt"] = false;
+ Features["rtm"] = false;
Features["fma4"] = false;
Features["fma"] = false;
Features["xop"] = false;
+ Features["f16c"] = false;
// FIXME: This *really* should not be here.
// X86_64 always has SSE2.
- if (PointerWidth == 64)
+ if (getTriple().getArch() == llvm::Triple::x86_64)
Features["sse2"] = Features["sse"] = Features["mmx"] = true;
switch (CPU) {
@@ -1770,6 +1913,7 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
setFeatureEnabled(Features, "rdrnd", true);
setFeatureEnabled(Features, "bmi", true);
setFeatureEnabled(Features, "bmi2", true);
+ setFeatureEnabled(Features, "rtm", true);
setFeatureEnabled(Features, "fma", true);
break;
case CK_K6:
@@ -1904,6 +2048,10 @@ bool X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
Features["bmi2"] = true;
else if (Name == "popcnt")
Features["popcnt"] = true;
+ else if (Name == "f16c")
+ Features["f16c"] = true;
+ else if (Name == "rtm")
+ Features["rtm"] = true;
} else {
if (Name == "mmx")
Features["mmx"] = Features["3dnow"] = Features["3dnowa"] = false;
@@ -1964,6 +2112,10 @@ bool X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
Features["fma4"] = Features["xop"] = false;
else if (Name == "xop")
Features["xop"] = false;
+ else if (Name == "f16c")
+ Features["f16c"] = false;
+ else if (Name == "rtm")
+ Features["rtm"] = false;
}
return true;
@@ -2015,6 +2167,11 @@ void X86TargetInfo::HandleTargetFeatures(std::vector<std::string> &Features) {
continue;
}
+ if (Feature == "rtm") {
+ HasRTM = true;
+ continue;
+ }
+
if (Feature == "sse4a") {
HasSSE4a = true;
continue;
@@ -2035,6 +2192,11 @@ void X86TargetInfo::HandleTargetFeatures(std::vector<std::string> &Features) {
continue;
}
+ if (Feature == "f16c") {
+ HasF16C = true;
+ continue;
+ }
+
assert(Features[i][0] == '+' && "Invalid target feature!");
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
.Case("avx2", AVX2)
@@ -2071,7 +2233,7 @@ void X86TargetInfo::HandleTargetFeatures(std::vector<std::string> &Features) {
void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
- if (PointerWidth == 64) {
+ if (getTriple().getArch() == llvm::Triple::x86_64) {
Builder.defineMacro("__amd64__");
Builder.defineMacro("__amd64");
Builder.defineMacro("__x86_64");
@@ -2231,6 +2393,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasPOPCNT)
Builder.defineMacro("__POPCNT__");
+ if (HasRTM)
+ Builder.defineMacro("__RTM__");
+
if (HasSSE4a)
Builder.defineMacro("__SSE4A__");
@@ -2243,6 +2408,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasXOP)
Builder.defineMacro("__XOP__");
+ if (HasF16C)
+ Builder.defineMacro("__F16C__");
+
// Each case falls through to the previous one here.
switch (SSELevel) {
case AVX2:
@@ -2267,7 +2435,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
break;
}
- if (Opts.MicrosoftExt && PointerWidth == 32) {
+ if (Opts.MicrosoftExt && getTriple().getArch() == llvm::Triple::x86) {
switch (SSELevel) {
case AVX2:
case AVX:
@@ -2315,6 +2483,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("mmx", MMX3DNowLevel >= MMX)
.Case("pclmul", HasPCLMUL)
.Case("popcnt", HasPOPCNT)
+ .Case("rtm", HasRTM)
.Case("sse", SSELevel >= SSE1)
.Case("sse2", SSELevel >= SSE2)
.Case("sse3", SSELevel >= SSE3)
@@ -2323,9 +2492,10 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("sse42", SSELevel >= SSE42)
.Case("sse4a", HasSSE4a)
.Case("x86", true)
- .Case("x86_32", PointerWidth == 32)
- .Case("x86_64", PointerWidth == 64)
+ .Case("x86_32", getTriple().getArch() == llvm::Triple::x86)
+ .Case("x86_64", getTriple().getArch() == llvm::Triple::x86_64)
.Case("xop", HasXOP)
+ .Case("f16c", HasF16C)
.Default(false);
}
@@ -2595,7 +2765,9 @@ public:
SizeType = UnsignedLong;
IntPtrType = SignedLong;
PtrDiffType = SignedLong;
+ ProcessIDType = SignedLong;
this->UserLabelPrefix = "";
+ this->TLSSupported = false;
}
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
@@ -2703,6 +2875,15 @@ public:
if (RegNo == 1) return 1;
return -1;
}
+
+ virtual CallingConvCheckResult checkCallingConvention(CallingConv CC) const {
+ return TargetInfo::checkCallingConvention(CC);
+ }
+
+ virtual CallingConv getDefaultCallingConv() const {
+ return CC_Default;
+ }
+
};
} // end anonymous namespace
@@ -2820,14 +3001,14 @@ namespace {
class ARMTargetInfo : public TargetInfo {
// Possible FPU choices.
enum FPUMode {
- NoFPU,
- VFP2FPU,
- VFP3FPU,
- NeonFPU
+ VFP2FPU = (1 << 0),
+ VFP3FPU = (1 << 1),
+ VFP4FPU = (1 << 2),
+ NeonFPU = (1 << 3)
};
static bool FPUModeIsVFP(FPUMode Mode) {
- return Mode >= VFP2FPU && Mode <= NeonFPU;
+ return Mode & (VFP2FPU | VFP3FPU | VFP4FPU | NeonFPU);
}
static const TargetInfo::GCCRegAlias GCCRegAliases[];
@@ -2835,8 +3016,9 @@ class ARMTargetInfo : public TargetInfo {
std::string ABI, CPU;
- unsigned FPU : 3;
+ unsigned FPU : 4;
+ unsigned IsAAPCS : 1;
unsigned IsThumb : 1;
// Initialized via features.
@@ -2847,7 +3029,7 @@ class ARMTargetInfo : public TargetInfo {
public:
ARMTargetInfo(const std::string &TripleStr)
- : TargetInfo(TripleStr), ABI("aapcs-linux"), CPU("arm1136j-s")
+ : TargetInfo(TripleStr), ABI("aapcs-linux"), CPU("arm1136j-s"), IsAAPCS(true)
{
BigEndian = false;
SizeType = UnsignedInt;
@@ -2910,6 +3092,8 @@ public:
/// gcc.
ZeroLengthBitfieldBoundary = 32;
+ IsAAPCS = false;
+
if (IsThumb) {
// Thumb1 add sp, #imm requires the immediate value be multiple of 4,
// so set preferred for small types to 32.
@@ -2923,10 +3107,11 @@ public:
}
// FIXME: Override "preferred align" for double and long long.
- } else if (Name == "aapcs") {
+ } else if (Name == "aapcs" || Name == "aapcs-vfp") {
+ IsAAPCS = true;
// FIXME: Enumerated types are variable width in straight AAPCS.
} else if (Name == "aapcs-linux") {
- ;
+ IsAAPCS = true;
} else
return false;
@@ -2936,16 +3121,21 @@ public:
void getDefaultFeatures(llvm::StringMap<bool> &Features) const {
if (CPU == "arm1136jf-s" || CPU == "arm1176jzf-s" || CPU == "mpcore")
Features["vfp2"] = true;
- else if (CPU == "cortex-a8" || CPU == "cortex-a9")
+ else if (CPU == "cortex-a8" || CPU == "cortex-a15" ||
+ CPU == "cortex-a9" || CPU == "cortex-a9-mp")
+ Features["neon"] = true;
+ else if (CPU == "swift") {
+ Features["vfp4"] = true;
Features["neon"] = true;
+ }
}
virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
StringRef Name,
bool Enabled) const {
if (Name == "soft-float" || Name == "soft-float-abi" ||
- Name == "vfp2" || Name == "vfp3" || Name == "neon" || Name == "d16" ||
- Name == "neonfp") {
+ Name == "vfp2" || Name == "vfp3" || Name == "vfp4" || Name == "neon" ||
+ Name == "d16" || Name == "neonfp") {
Features[Name] = Enabled;
} else
return false;
@@ -2954,7 +3144,7 @@ public:
}
virtual void HandleTargetFeatures(std::vector<std::string> &Features) {
- FPU = NoFPU;
+ FPU = 0;
SoftFloat = SoftFloatABI = false;
for (unsigned i = 0, e = Features.size(); i != e; ++i) {
if (Features[i] == "+soft-float")
@@ -2962,11 +3152,13 @@ public:
else if (Features[i] == "+soft-float-abi")
SoftFloatABI = true;
else if (Features[i] == "+vfp2")
- FPU = VFP2FPU;
+ FPU |= VFP2FPU;
else if (Features[i] == "+vfp3")
- FPU = VFP3FPU;
+ FPU |= VFP3FPU;
+ else if (Features[i] == "+vfp4")
+ FPU |= VFP4FPU;
else if (Features[i] == "+neon")
- FPU = NeonFPU;
+ FPU |= NeonFPU;
}
// Remove front-end specific options which the backend handles differently.
@@ -2988,6 +3180,7 @@ public:
StringRef(getCPUDefineSuffix(CPU)).startswith("7"))
.Default(false);
}
+ // FIXME: Should we actually have some table instead of these switches?
static const char *getCPUDefineSuffix(StringRef Name) {
return llvm::StringSwitch<const char*>(Name)
.Cases("arm8", "arm810", "4")
@@ -3004,12 +3197,19 @@ public:
.Cases("arm1176jz-s", "arm1176jzf-s", "6ZK")
.Cases("arm1136jf-s", "mpcorenovfp", "mpcore", "6K")
.Cases("arm1156t2-s", "arm1156t2f-s", "6T2")
- .Cases("cortex-a8", "cortex-a9", "7A")
- .Case("cortex-m3", "7M")
- .Case("cortex-m4", "7M")
+ .Cases("cortex-a8", "cortex-a9", "cortex-a15", "7A")
+ .Case("cortex-a9-mp", "7F")
+ .Case("swift", "7S")
+ .Cases("cortex-m3", "cortex-m4", "7M")
.Case("cortex-m0", "6M")
.Default(0);
}
+ static const char *getCPUProfile(StringRef Name) {
+ return llvm::StringSwitch<const char*>(Name)
+ .Cases("cortex-a8", "cortex-a9", "A")
+ .Cases("cortex-m3", "cortex-m4", "cortex-m0", "M")
+ .Default("");
+ }
virtual bool setCPU(const std::string &Name) {
if (!getCPUDefineSuffix(Name))
return false;
@@ -3030,7 +3230,11 @@ public:
StringRef CPUArch = getCPUDefineSuffix(CPU);
Builder.defineMacro("__ARM_ARCH_" + CPUArch + "__");
-
+ Builder.defineMacro("__ARM_ARCH", CPUArch.substr(0, 1));
+ StringRef CPUProfile = getCPUProfile(CPU);
+ if (!CPUProfile.empty())
+ Builder.defineMacro("__ARM_ARCH_PROFILE", CPUProfile);
+
// Subtarget options.
// FIXME: It's more complicated than this and we don't really support
@@ -3038,8 +3242,15 @@ public:
if ('5' <= CPUArch[0] && CPUArch[0] <= '7')
Builder.defineMacro("__THUMB_INTERWORK__");
- if (ABI == "aapcs" || ABI == "aapcs-linux")
- Builder.defineMacro("__ARM_EABI__");
+ if (ABI == "aapcs" || ABI == "aapcs-linux" || ABI == "aapcs-vfp") {
+ // M-class CPUs on Darwin follow AAPCS, but not EABI.
+ if (!(getTriple().isOSDarwin() && CPUProfile == "M"))
+ Builder.defineMacro("__ARM_EABI__");
+ Builder.defineMacro("__ARM_PCS", "1");
+
+ if ((!SoftFloat && !SoftFloatABI) || ABI == "aapcs-vfp")
+ Builder.defineMacro("__ARM_PCS_VFP", "1");
+ }
if (SoftFloat)
Builder.defineMacro("__SOFTFP__");
@@ -3058,14 +3269,21 @@ public:
// Note, this is always on in gcc, even though it doesn't make sense.
Builder.defineMacro("__APCS_32__");
- if (FPUModeIsVFP((FPUMode) FPU))
+ if (FPUModeIsVFP((FPUMode) FPU)) {
Builder.defineMacro("__VFP_FP__");
-
+ if (FPU & VFP2FPU)
+ Builder.defineMacro("__ARM_VFPV2__");
+ if (FPU & VFP3FPU)
+ Builder.defineMacro("__ARM_VFPV3__");
+ if (FPU & VFP4FPU)
+ Builder.defineMacro("__ARM_VFPV4__");
+ }
+
// This only gets set when Neon instructions are actually available, unlike
// the VFP define, hence the soft float and arch check. This is subtly
// different from gcc, we follow the intent which was that it should be set
// when Neon instructions are actually available.
- if (FPU == NeonFPU && !SoftFloat && IsARMv7)
+ if ((FPU & NeonFPU) && !SoftFloat && IsARMv7)
Builder.defineMacro("__ARM_NEON__");
}
virtual void getTargetBuiltins(const Builtin::Info *&Records,
@@ -3075,7 +3293,7 @@ public:
}
virtual bool isCLZForZeroUndef() const { return false; }
virtual BuiltinVaListKind getBuiltinVaListKind() const {
- return TargetInfo::VoidPtrBuiltinVaList;
+ return IsAAPCS ? AAPCSABIBuiltinVaList : TargetInfo::VoidPtrBuiltinVaList;
}
virtual void getGCCRegNames(const char * const *&Names,
unsigned &NumNames) const;
@@ -3127,10 +3345,38 @@ public:
}
return R;
}
+ virtual bool validateConstraintModifier(StringRef Constraint,
+ const char Modifier,
+ unsigned Size) const {
+ // Strip off constraint modifiers.
+ while (Constraint[0] == '=' ||
+ Constraint[0] == '+' ||
+ Constraint[0] == '&')
+ Constraint = Constraint.substr(1);
+
+ switch (Constraint[0]) {
+ default: break;
+ case 'r': {
+ switch (Modifier) {
+ default:
+ return Size == 32;
+ case 'q':
+ // A register of size 32 cannot fit a vector type.
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
virtual const char *getClobbers() const {
// FIXME: Is this really right?
return "";
}
+
+ virtual CallingConvCheckResult checkCallingConvention(CallingConv CC) const {
+ return (CC == CC_AAPCS || CC == CC_AAPCS_VFP) ? CCCR_OK : CCCR_Warning;
+ }
};
const char * const ARMTargetInfo::GCCRegNames[] = {
@@ -3701,8 +3947,12 @@ public:
Features[CPU] = true;
}
- virtual void getArchDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const {
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ DefineStd(Builder, "mips", Opts);
+ Builder.defineMacro("_mips");
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+
switch (FloatABI) {
case HardFloat:
Builder.defineMacro("__mips_hard_float", Twine(1));
@@ -3736,10 +3986,11 @@ public:
Builder.defineMacro("_MIPS_SZPTR", Twine(getPointerWidth(0)));
Builder.defineMacro("_MIPS_SZINT", Twine(getIntWidth()));
Builder.defineMacro("_MIPS_SZLONG", Twine(getLongWidth()));
+
+ Builder.defineMacro("_MIPS_ARCH", "\"" + CPU + "\"");
+ Builder.defineMacro("_MIPS_ARCH_" + StringRef(CPU).upper());
}
- virtual void getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const = 0;
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const {
Records = BuiltinInfo;
@@ -3859,9 +4110,9 @@ public:
} else
return false;
}
- virtual void getArchDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const {
- MipsTargetInfoBase::getArchDefines(Opts, Builder);
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ MipsTargetInfoBase::getTargetDefines(Opts, Builder);
if (ABI == "o32") {
Builder.defineMacro("__mips_o32");
@@ -3921,12 +4172,9 @@ public:
}
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- DefineStd(Builder, "mips", Opts);
- Builder.defineMacro("_mips");
DefineStd(Builder, "MIPSEB", Opts);
Builder.defineMacro("_MIPSEB");
- Builder.defineMacro("__REGISTER_PREFIX__", "");
- getArchDefines(Opts, Builder);
+ Mips32TargetInfoBase::getTargetDefines(Opts, Builder);
}
};
@@ -3939,12 +4187,9 @@ public:
}
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- DefineStd(Builder, "mips", Opts);
- Builder.defineMacro("_mips");
DefineStd(Builder, "MIPSEL", Opts);
Builder.defineMacro("_MIPSEL");
- Builder.defineMacro("__REGISTER_PREFIX__", "");
- getArchDefines(Opts, Builder);
+ Mips32TargetInfoBase::getTargetDefines(Opts, Builder);
}
};
@@ -3974,9 +4219,12 @@ public:
return true;
}
- virtual void getArchDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const {
- MipsTargetInfoBase::getArchDefines(Opts, Builder);
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ MipsTargetInfoBase::getTargetDefines(Opts, Builder);
+
+ Builder.defineMacro("__mips64");
+ Builder.defineMacro("__mips64__");
if (ABI == "n32") {
Builder.defineMacro("__mips_n32");
@@ -4048,12 +4296,9 @@ public:
}
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- DefineStd(Builder, "mips", Opts);
- Builder.defineMacro("_mips");
DefineStd(Builder, "MIPSEB", Opts);
Builder.defineMacro("_MIPSEB");
- Builder.defineMacro("__REGISTER_PREFIX__", "");
- getArchDefines(Opts, Builder);
+ Mips64TargetInfoBase::getTargetDefines(Opts, Builder);
}
};
@@ -4075,12 +4320,9 @@ public:
}
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- DefineStd(Builder, "mips", Opts);
- Builder.defineMacro("_mips");
DefineStd(Builder, "MIPSEL", Opts);
Builder.defineMacro("_MIPSEL");
- Builder.defineMacro("__REGISTER_PREFIX__", "");
- getArchDefines(Opts, Builder);
+ Mips64TargetInfoBase::getTargetDefines(Opts, Builder);
}
};
} // end anonymous namespace.
@@ -4118,15 +4360,7 @@ public:
}
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- DefineStd(Builder, "unix", Opts);
- Builder.defineMacro("__ELF__");
- if (Opts.POSIXThreads)
- Builder.defineMacro("_REENTRANT");
- if (Opts.CPlusPlus)
- Builder.defineMacro("_GNU_SOURCE");
-
Builder.defineMacro("__LITTLE_ENDIAN__");
- Builder.defineMacro("__native_client__");
getArchDefines(Opts, Builder);
}
virtual bool hasFeature(StringRef Feature) const {
@@ -4199,6 +4433,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new BitrigTargetInfo<ARMTargetInfo>(T);
case llvm::Triple::RTEMS:
return new RTEMSTargetInfo<ARMTargetInfo>(T);
+ case llvm::Triple::NativeClient:
+ return new NaClTargetInfo<ARMTargetInfo>(T);
default:
return new ARMTargetInfo(T);
}
@@ -4269,7 +4505,7 @@ static TargetInfo *AllocateTarget(const std::string &T) {
case llvm::Triple::le32:
switch (os) {
case llvm::Triple::NativeClient:
- return new PNaClTargetInfo(T);
+ return new NaClTargetInfo<PNaClTargetInfo>(T);
default:
return NULL;
}
@@ -4316,6 +4552,9 @@ static TargetInfo *AllocateTarget(const std::string &T) {
case llvm::Triple::mblaze:
return new MBlazeTargetInfo(T);
+ case llvm::Triple::r600:
+ return new R600TargetInfo(T);
+
case llvm::Triple::sparc:
switch (os) {
case llvm::Triple::Linux:
@@ -4374,6 +4613,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new HaikuX86_32TargetInfo(T);
case llvm::Triple::RTEMS:
return new RTEMSX86_32TargetInfo(T);
+ case llvm::Triple::NativeClient:
+ return new NaClTargetInfo<X86_32TargetInfo>(T);
default:
return new X86_32TargetInfo(T);
}
@@ -4403,6 +4644,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new MinGWX86_64TargetInfo(T);
case llvm::Triple::Win32: // This is what Triple.h supports now.
return new VisualStudioWindowsX86_64TargetInfo(T);
+ case llvm::Triple::NativeClient:
+ return new NaClTargetInfo<X86_64TargetInfo>(T);
default:
return new X86_64TargetInfo(T);
}
@@ -4421,6 +4664,7 @@ TargetInfo *TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
Diags.Report(diag::err_target_unknown_triple) << Triple.str();
return 0;
}
+ Target->setTargetOpts(Opts);
// Set the target CPU if specified.
if (!Opts.CPU.empty() && !Target->setCPU(Opts.CPU)) {
@@ -4447,8 +4691,10 @@ TargetInfo *TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
// Apply the user specified deltas.
// First the enables.
- for (std::vector<std::string>::const_iterator it = Opts.Features.begin(),
- ie = Opts.Features.end(); it != ie; ++it) {
+ for (std::vector<std::string>::const_iterator
+ it = Opts.FeaturesAsWritten.begin(),
+ ie = Opts.FeaturesAsWritten.end();
+ it != ie; ++it) {
const char *Name = it->c_str();
if (Name[0] != '+')
@@ -4462,8 +4708,10 @@ TargetInfo *TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
}
// Then the disables.
- for (std::vector<std::string>::const_iterator it = Opts.Features.begin(),
- ie = Opts.Features.end(); it != ie; ++it) {
+ for (std::vector<std::string>::const_iterator
+ it = Opts.FeaturesAsWritten.begin(),
+ ie = Opts.FeaturesAsWritten.end();
+ it != ie; ++it) {
const char *Name = it->c_str();
if (Name[0] == '+')
diff --git a/contrib/llvm/tools/clang/lib/Basic/Version.cpp b/contrib/llvm/tools/clang/lib/Basic/Version.cpp
index 0d1dd31..a04ab93 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Version.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Version.cpp
@@ -32,7 +32,7 @@ std::string getClangRepositoryPath() {
// If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
// pick up a tag in an SVN export, for example.
- static StringRef SVNRepository("$URL: http://llvm.org/svn/llvm-project/cfe/trunk/lib/Basic/Version.cpp $");
+ static StringRef SVNRepository("$URL: http://llvm.org/svn/llvm-project/cfe/branches/release_32/lib/Basic/Version.cpp $");
if (URL.empty()) {
URL = SVNRepository.slice(SVNRepository.find(':'),
SVNRepository.find("/lib/Basic"));
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
index 86f5380..da6d035 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
@@ -16,7 +16,7 @@
namespace llvm {
class Value;
class LLVMContext;
- class TargetData;
+ class DataLayout;
}
namespace clang {
@@ -70,46 +70,52 @@ namespace clang {
private:
Kind TheKind;
llvm::Type *TypeData;
- llvm::Type *PaddingType; // Currently allowed only for Direct.
+ llvm::Type *PaddingType;
unsigned UIntData;
bool BoolData0;
bool BoolData1;
bool InReg;
+ bool PaddingInReg;
ABIArgInfo(Kind K, llvm::Type *TD, unsigned UI, bool B0, bool B1, bool IR,
- llvm::Type* P)
+ bool PIR, llvm::Type* P)
: TheKind(K), TypeData(TD), PaddingType(P), UIntData(UI), BoolData0(B0),
- BoolData1(B1), InReg(IR) {}
+ BoolData1(B1), InReg(IR), PaddingInReg(PIR) {}
public:
ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
static ABIArgInfo getDirect(llvm::Type *T = 0, unsigned Offset = 0,
llvm::Type *Padding = 0) {
- return ABIArgInfo(Direct, T, Offset, false, false, false, Padding);
+ return ABIArgInfo(Direct, T, Offset, false, false, false, false, Padding);
}
- static ABIArgInfo getDirectInReg(llvm::Type *T) {
- return ABIArgInfo(Direct, T, 0, false, false, true, 0);
+ static ABIArgInfo getDirectInReg(llvm::Type *T = 0) {
+ return ABIArgInfo(Direct, T, 0, false, false, true, false, 0);
}
static ABIArgInfo getExtend(llvm::Type *T = 0) {
- return ABIArgInfo(Extend, T, 0, false, false, false, 0);
+ return ABIArgInfo(Extend, T, 0, false, false, false, false, 0);
}
static ABIArgInfo getExtendInReg(llvm::Type *T = 0) {
- return ABIArgInfo(Extend, T, 0, false, false, true, 0);
+ return ABIArgInfo(Extend, T, 0, false, false, true, false, 0);
}
static ABIArgInfo getIgnore() {
- return ABIArgInfo(Ignore, 0, 0, false, false, false, 0);
+ return ABIArgInfo(Ignore, 0, 0, false, false, false, false, 0);
}
static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true
, bool Realign = false) {
- return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign, false, 0);
+ return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign, false, false, 0);
}
static ABIArgInfo getIndirectInReg(unsigned Alignment, bool ByVal = true
, bool Realign = false) {
- return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign, true, 0);
+ return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign, true, false, 0);
}
static ABIArgInfo getExpand() {
- return ABIArgInfo(Expand, 0, 0, false, false, false, 0);
+ return ABIArgInfo(Expand, 0, 0, false, false, false, false, 0);
+ }
+ static ABIArgInfo getExpandWithPadding(bool PaddingInReg,
+ llvm::Type *Padding) {
+ return ABIArgInfo(Expand, 0, 0, false, false, false, PaddingInReg,
+ Padding);
}
Kind getKind() const { return TheKind; }
@@ -133,6 +139,10 @@ namespace clang {
return PaddingType;
}
+ bool getPaddingInReg() const {
+ return PaddingInReg;
+ }
+
llvm::Type *getCoerceToType() const {
assert(canHaveCoerceToType() && "Invalid kind!");
return TypeData;
@@ -178,7 +188,7 @@ namespace clang {
ASTContext &getContext() const;
llvm::LLVMContext &getVMContext() const;
- const llvm::TargetData &getTargetData() const;
+ const llvm::DataLayout &getDataLayout() const;
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
index 0a1915b..62f87c9 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
@@ -27,7 +27,7 @@
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -54,36 +54,67 @@ class EmitAssemblyHelper {
mutable FunctionPassManager *PerFunctionPasses;
private:
- PassManager *getCodeGenPasses() const {
+ PassManager *getCodeGenPasses(TargetMachine *TM) const {
if (!CodeGenPasses) {
CodeGenPasses = new PassManager();
- CodeGenPasses->add(new TargetData(TheModule));
+ CodeGenPasses->add(new DataLayout(TheModule));
+ // Add TargetTransformInfo.
+ if (TM) {
+ TargetTransformInfo *TTI =
+ new TargetTransformInfo(TM->getScalarTargetTransformInfo(),
+ TM->getVectorTargetTransformInfo());
+ CodeGenPasses->add(TTI);
+ }
}
return CodeGenPasses;
}
- PassManager *getPerModulePasses() const {
+ PassManager *getPerModulePasses(TargetMachine *TM) const {
if (!PerModulePasses) {
PerModulePasses = new PassManager();
- PerModulePasses->add(new TargetData(TheModule));
+ PerModulePasses->add(new DataLayout(TheModule));
+ if (TM) {
+ TargetTransformInfo *TTI =
+ new TargetTransformInfo(TM->getScalarTargetTransformInfo(),
+ TM->getVectorTargetTransformInfo());
+ PerModulePasses->add(TTI);
+ }
}
return PerModulePasses;
}
- FunctionPassManager *getPerFunctionPasses() const {
+ FunctionPassManager *getPerFunctionPasses(TargetMachine *TM) const {
if (!PerFunctionPasses) {
PerFunctionPasses = new FunctionPassManager(TheModule);
- PerFunctionPasses->add(new TargetData(TheModule));
+ PerFunctionPasses->add(new DataLayout(TheModule));
+ if (TM) {
+ TargetTransformInfo *TTI =
+ new TargetTransformInfo(TM->getScalarTargetTransformInfo(),
+ TM->getVectorTargetTransformInfo());
+ PerFunctionPasses->add(TTI);
+ }
}
return PerFunctionPasses;
}
- void CreatePasses();
+
+ void CreatePasses(TargetMachine *TM);
+
+ /// CreateTargetMachine - Generates the TargetMachine.
+ /// Returns Null if it is unable to create the target machine.
+ /// Some of our clang tests specify triples which are not built
+ /// into clang. This is okay because these tests check the generated
+ /// IR, and they require DataLayout which depends on the triple.
+ /// In this case, we allow this method to fail and not report an error.
+ /// When MustCreateTM is used, we print an error if we are unable to load
+ /// the requested target.
+ TargetMachine *CreateTargetMachine(bool MustCreateTM);
/// AddEmitPasses - Add passes necessary to emit assembly or LLVM IR.
///
/// \return True on success.
- bool AddEmitPasses(BackendAction Action, formatted_raw_ostream &OS);
+ bool AddEmitPasses(BackendAction Action, formatted_raw_ostream &OS,
+ TargetMachine *TM);
public:
EmitAssemblyHelper(DiagnosticsEngine &_Diags,
@@ -137,9 +168,9 @@ static void addThreadSanitizerPass(const PassManagerBuilder &Builder,
PM.add(createThreadSanitizerPass());
}
-void EmitAssemblyHelper::CreatePasses() {
+void EmitAssemblyHelper::CreatePasses(TargetMachine *TM) {
unsigned OptLevel = CodeGenOpts.OptimizationLevel;
- CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining;
+ CodeGenOptions::InliningMethod Inlining = CodeGenOpts.getInlining();
// Handle disabling of LLVM optimization, where we want to preserve the
// internal module before any optimization.
@@ -174,14 +205,14 @@ void EmitAssemblyHelper::CreatePasses() {
addBoundsCheckingPass);
}
- if (LangOpts.AddressSanitizer) {
- PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
+ if (LangOpts.SanitizeAddress) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addAddressSanitizerPass);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
addAddressSanitizerPass);
}
- if (LangOpts.ThreadSanitizer) {
+ if (LangOpts.SanitizeThread) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addThreadSanitizerPass);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
@@ -218,38 +249,36 @@ void EmitAssemblyHelper::CreatePasses() {
break;
}
-
// Set up the per-function pass manager.
- FunctionPassManager *FPM = getPerFunctionPasses();
+ FunctionPassManager *FPM = getPerFunctionPasses(TM);
if (CodeGenOpts.VerifyModule)
FPM->add(createVerifierPass());
PMBuilder.populateFunctionPassManager(*FPM);
// Set up the per-module pass manager.
- PassManager *MPM = getPerModulePasses();
+ PassManager *MPM = getPerModulePasses(TM);
if (CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes) {
MPM->add(createGCOVProfilerPass(CodeGenOpts.EmitGcovNotes,
CodeGenOpts.EmitGcovArcs,
TargetTriple.isMacOSX()));
- if (CodeGenOpts.DebugInfo == CodeGenOptions::NoDebugInfo)
+ if (CodeGenOpts.getDebugInfo() == CodeGenOptions::NoDebugInfo)
MPM->add(createStripSymbolsPass(true));
}
-
-
+
PMBuilder.populateModulePassManager(*MPM);
}
-bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
- formatted_raw_ostream &OS) {
+TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
// Create the TargetMachine for generating code.
std::string Error;
std::string Triple = TheModule->getTargetTriple();
const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error);
if (!TheTarget) {
- Diags.Report(diag::err_fe_unable_to_create_target) << Error;
- return false;
+ if (MustCreateTM)
+ Diags.Report(diag::err_fe_unable_to_create_target) << Error;
+ return 0;
}
// FIXME: Expose these capabilities via actual APIs!!!! Aside from just
@@ -361,7 +390,7 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
break;
case LangOptions::FPC_Fast:
Options.AllowFPOpFusion = llvm::FPOpFusion::Fast;
- break;
+ break;
}
Options.LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
@@ -375,6 +404,7 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
Options.DisableTailCalls = CodeGenOpts.DisableTailCalls;
Options.TrapFuncName = CodeGenOpts.TrapFuncName;
Options.PositionIndependentExecutable = LangOpts.PIELevel != 0;
+ Options.SSPBufferSize = CodeGenOpts.SSPBufferSize;
TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU,
FeaturesStr, Options,
@@ -391,15 +421,27 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
if (CodeGenOpts.NoExecStack)
TM->setMCNoExecStack(true);
+ return TM;
+}
+
+bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
+ formatted_raw_ostream &OS,
+ TargetMachine *TM) {
+
// Create the code generator passes.
- PassManager *PM = getCodeGenPasses();
+ PassManager *PM = getCodeGenPasses(TM);
// Add LibraryInfo.
- TargetLibraryInfo *TLI = new TargetLibraryInfo();
+ llvm::Triple TargetTriple(TheModule->getTargetTriple());
+ TargetLibraryInfo *TLI = new TargetLibraryInfo(TargetTriple);
if (!CodeGenOpts.SimplifyLibCalls)
TLI->disableAllFunctions();
PM->add(TLI);
+ // Add TargetTransformInfo.
+ PM->add(new TargetTransformInfo(TM->getScalarTargetTransformInfo(),
+ TM->getVectorTargetTransformInfo()));
+
// Normal mode, emit a .s or .o file by running the code generator. Note,
// this also adds codegenerator level optimization passes.
TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile;
@@ -430,23 +472,28 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : 0);
llvm::formatted_raw_ostream FormattedOS;
- CreatePasses();
+ bool UsesCodeGen = (Action != Backend_EmitNothing &&
+ Action != Backend_EmitBC &&
+ Action != Backend_EmitLL);
+ TargetMachine *TM = CreateTargetMachine(UsesCodeGen);
+ CreatePasses(TM);
+
switch (Action) {
case Backend_EmitNothing:
break;
case Backend_EmitBC:
- getPerModulePasses()->add(createBitcodeWriterPass(*OS));
+ getPerModulePasses(TM)->add(createBitcodeWriterPass(*OS));
break;
case Backend_EmitLL:
FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
- getPerModulePasses()->add(createPrintModulePass(&FormattedOS));
+ getPerModulePasses(TM)->add(createPrintModulePass(&FormattedOS));
break;
default:
FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
- if (!AddEmitPasses(Action, FormattedOS))
+ if (!AddEmitPasses(Action, FormattedOS, TM))
return;
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
index 37ef4af..6742f36 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
@@ -19,7 +19,7 @@
#include "clang/AST/DeclObjC.h"
#include "llvm/Module.h"
#include "llvm/ADT/SmallSet.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include <algorithm>
using namespace clang;
@@ -27,7 +27,8 @@ using namespace CodeGen;
CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
: Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
- HasCXXObject(false), UsesStret(false), StructureType(0), Block(block),
+ HasCXXObject(false), UsesStret(false), HasCapturedVariableLayout(false),
+ StructureType(0), Block(block),
DominatingIP(0) {
// Skip asm prefix, if any. 'name' is usually taken directly from
@@ -56,7 +57,18 @@ static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM,
return CodeGenFunction(CGM).GenerateDestroyHelperFunction(blockInfo);
}
-/// Build the block descriptor constant for a block.
+/// buildBlockDescriptor - Build the block descriptor meta-data for a block.
+/// buildBlockDescriptor is accessed from 5th field of the Block_literal
+/// meta-data and contains stationary information about the block literal.
+/// Its definition will have 4 (or optinally 6) words.
+/// struct Block_descriptor {
+/// unsigned long reserved;
+/// unsigned long size; // size of Block_literal metadata in bytes.
+/// void *copy_func_helper_decl; // optional copy helper.
+/// void *destroy_func_decl; // optioanl destructor helper.
+/// void *block_method_encoding_address;//@encode for block literal signature.
+/// void *block_layout_info; // encoding of captured block variables.
+/// };
static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
ASTContext &C = CGM.getContext();
@@ -92,8 +104,12 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
CGM.GetAddrOfConstantCString(typeAtEncoding), i8p));
// GC layout.
- if (C.getLangOpts().ObjC1)
- elements.push_back(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
+ if (C.getLangOpts().ObjC1) {
+ if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
+ elements.push_back(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
+ else
+ elements.push_back(CGM.getObjCRuntime().BuildRCBlockLayout(CGM, blockInfo));
+ }
else
elements.push_back(llvm::Constant::getNullValue(i8p));
@@ -293,7 +309,10 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
info.CanBeGlobal = true;
return;
}
-
+ else if (C.getLangOpts().ObjC1 &&
+ CGM.getLangOpts().getGC() == LangOptions::NonGC)
+ info.HasCapturedVariableLayout = true;
+
// Collect the layout chunks.
SmallVector<BlockLayoutChunk, 16> layout;
layout.reserve(block->capturesCXXThis() +
@@ -652,6 +671,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Compute the initial on-stack block flags.
BlockFlags flags = BLOCK_HAS_SIGNATURE;
+ if (blockInfo.HasCapturedVariableLayout) flags |= BLOCK_HAS_EXTENDED_LAYOUT;
if (blockInfo.NeedsCopyDispose) flags |= BLOCK_HAS_COPY_DISPOSE;
if (blockInfo.HasCXXObject) flags |= BLOCK_HAS_CXX_OBJ;
if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET;
@@ -1001,8 +1021,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
// Check if we should generate debug info for this block function.
- if (CGM.getModuleDebugInfo())
- DebugInfo = CGM.getModuleDebugInfo();
+ maybeInitializeDebugInfo();
CurGD = GD;
BlockInfo = &blockInfo;
@@ -1135,7 +1154,8 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const VarDecl *variable = ci->getVariable();
DI->EmitLocation(Builder, variable->getLocation());
- if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ if (CGM.getCodeGenOpts().getDebugInfo()
+ >= CodeGenOptions::LimitedDebugInfo) {
const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
if (capture.isConstant()) {
DI->EmitDeclareOfAutoVariable(variable, LocalDeclMap[variable],
@@ -1207,8 +1227,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
= &CGM.getContext().Idents.get("__copy_helper_block_");
// Check if we should generate debug info for this block helper function.
- if (CGM.getModuleDebugInfo())
- DebugInfo = CGM.getModuleDebugInfo();
+ maybeInitializeDebugInfo();
FunctionDecl *FD = FunctionDecl::Create(C,
C.getTranslationUnitDecl(),
@@ -1243,7 +1262,8 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
const Expr *copyExpr = ci->getCopyExpr();
BlockFieldFlags flags;
- bool isARCWeakCapture = false;
+ bool useARCWeakCopy = false;
+ bool useARCStrongCopy = false;
if (copyExpr) {
assert(!ci->isByRef());
@@ -1256,21 +1276,35 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
} else if (type->isObjCRetainableType()) {
flags = BLOCK_FIELD_IS_OBJECT;
- if (type->isBlockPointerType())
+ bool isBlockPointer = type->isBlockPointerType();
+ if (isBlockPointer)
flags = BLOCK_FIELD_IS_BLOCK;
// Special rules for ARC captures:
if (getLangOpts().ObjCAutoRefCount) {
Qualifiers qs = type.getQualifiers();
- // Don't generate special copy logic for a captured object
- // unless it's __strong or __weak.
- if (!qs.hasStrongOrWeakObjCLifetime())
+ // We need to register __weak direct captures with the runtime.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Weak) {
+ useARCWeakCopy = true;
+
+ // We need to retain the copied value for __strong direct captures.
+ } else if (qs.getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // If it's a block pointer, we have to copy the block and
+ // assign that to the destination pointer, so we might as
+ // well use _Block_object_assign. Otherwise we can avoid that.
+ if (!isBlockPointer)
+ useARCStrongCopy = true;
+
+ // Otherwise the memcpy is fine.
+ } else {
continue;
+ }
- // Support __weak direct captures.
- if (qs.getObjCLifetime() == Qualifiers::OCL_Weak)
- isARCWeakCapture = true;
+ // Non-ARC captures of retainable pointers are strong and
+ // therefore require a call to _Block_object_assign.
+ } else {
+ // fall through
}
} else {
continue;
@@ -1283,14 +1317,36 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
// If there's an explicit copy expression, we do that.
if (copyExpr) {
EmitSynthesizedCXXCopyCtor(dstField, srcField, copyExpr);
- } else if (isARCWeakCapture) {
+ } else if (useARCWeakCopy) {
EmitARCCopyWeak(dstField, srcField);
} else {
llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
- srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
- llvm::Value *dstAddr = Builder.CreateBitCast(dstField, VoidPtrTy);
- Builder.CreateCall3(CGM.getBlockObjectAssign(), dstAddr, srcValue,
- llvm::ConstantInt::get(Int32Ty, flags.getBitMask()));
+ if (useARCStrongCopy) {
+ // At -O0, store null into the destination field (so that the
+ // storeStrong doesn't over-release) and then call storeStrong.
+ // This is a workaround to not having an initStrong call.
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ llvm::PointerType *ty = cast<llvm::PointerType>(srcValue->getType());
+ llvm::Value *null = llvm::ConstantPointerNull::get(ty);
+ Builder.CreateStore(null, dstField);
+ EmitARCStoreStrongCall(dstField, srcValue, true);
+
+ // With optimization enabled, take advantage of the fact that
+ // the blocks runtime guarantees a memcpy of the block data, and
+ // just emit a retain of the src field.
+ } else {
+ EmitARCRetainNonBlock(srcValue);
+
+ // We don't need this anymore, so kill it. It's not quite
+ // worth the annoyance to avoid creating it in the first place.
+ cast<llvm::Instruction>(dstField)->eraseFromParent();
+ }
+ } else {
+ srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
+ llvm::Value *dstAddr = Builder.CreateBitCast(dstField, VoidPtrTy);
+ Builder.CreateCall3(CGM.getBlockObjectAssign(), dstAddr, srcValue,
+ llvm::ConstantInt::get(Int32Ty, flags.getBitMask()));
+ }
}
}
@@ -1321,8 +1377,7 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
"__destroy_helper_block_", &CGM.getModule());
// Check if we should generate debug info for this block destroy function.
- if (CGM.getModuleDebugInfo())
- DebugInfo = CGM.getModuleDebugInfo();
+ maybeInitializeDebugInfo();
IdentifierInfo *II
= &CGM.getContext().Idents.get("__destroy_helper_block_");
@@ -1356,7 +1411,8 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
BlockFieldFlags flags;
const CXXDestructorDecl *dtor = 0;
- bool isARCWeakCapture = false;
+ bool useARCWeakDestroy = false;
+ bool useARCStrongDestroy = false;
if (ci->isByRef()) {
flags = BLOCK_FIELD_IS_BYREF;
@@ -1382,7 +1438,11 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
// Support __weak direct captures.
if (qs.getObjCLifetime() == Qualifiers::OCL_Weak)
- isARCWeakCapture = true;
+ useARCWeakDestroy = true;
+
+ // Tools really want us to use objc_storeStrong here.
+ else
+ useARCStrongDestroy = true;
}
} else {
continue;
@@ -1396,9 +1456,13 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
PushDestructorCleanup(dtor, srcField);
// If this is a __weak capture, emit the release directly.
- } else if (isARCWeakCapture) {
+ } else if (useARCWeakDestroy) {
EmitARCDestroyWeak(srcField);
+ // Destroy strong objects with a call if requested.
+ } else if (useARCStrongDestroy) {
+ EmitARCDestroyStrong(srcField, /*precise*/ false);
+
// Otherwise we call _Block_object_dispose. It wouldn't be too
// hard to just emit this as a cleanup if we wanted to make sure
// that things were done in reverse.
@@ -1497,10 +1561,7 @@ public:
}
void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
- llvm::LoadInst *value = CGF.Builder.CreateLoad(field);
- value->setAlignment(Alignment.getQuantity());
-
- CGF.EmitARCRelease(value, /*precise*/ false);
+ CGF.EmitARCDestroyStrong(field, /*precise*/ false);
}
void profileImpl(llvm::FoldingSetNodeID &id) const {
@@ -1530,10 +1591,7 @@ public:
}
void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
- llvm::LoadInst *value = CGF.Builder.CreateLoad(field);
- value->setAlignment(Alignment.getQuantity());
-
- CGF.EmitARCRelease(value, /*precise*/ false);
+ CGF.EmitARCDestroyStrong(field, /*precise*/ false);
}
void profileImpl(llvm::FoldingSetNodeID &id) const {
@@ -1612,6 +1670,8 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
SC_None,
false, false);
+ // Initialize debug info if necessary.
+ CGF.maybeInitializeDebugInfo();
CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
if (byrefInfo.needsCopy()) {
@@ -1682,6 +1742,8 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
SC_Static,
SC_None,
false, false);
+ // Initialize debug info if necessary.
+ CGF.maybeInitializeDebugInfo();
CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
if (byrefInfo.needsDispose()) {
@@ -1879,7 +1941,7 @@ llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
// And either 2 or 4 pointers.
CurrentOffsetInBytes += (HasCopyAndDispose ? 4 : 2) *
- CGM.getTargetData().getTypeAllocSize(Int8PtrTy);
+ CGM.getDataLayout().getTypeAllocSize(Int8PtrTy);
// Align the offset.
unsigned AlignedOffsetInBytes =
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
index 095cfdb..f85701a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
@@ -33,7 +33,7 @@ namespace llvm {
class Constant;
class Function;
class GlobalValue;
- class TargetData;
+ class DataLayout;
class FunctionType;
class PointerType;
class Value;
@@ -47,12 +47,24 @@ namespace CodeGen {
class CodeGenModule;
class CGBlockInfo;
-enum BlockFlag_t {
+// Flags stored in __block variables.
+enum BlockByrefFlags {
+ BLOCK_BYREF_HAS_COPY_DISPOSE = (1 << 25), // compiler
+ BLOCK_BYREF_LAYOUT_MASK = (0xF << 28), // compiler
+ BLOCK_BYREF_LAYOUT_EXTENDED = (1 << 28),
+ BLOCK_BYREF_LAYOUT_NON_OBJECT = (2 << 28),
+ BLOCK_BYREF_LAYOUT_STRONG = (3 << 28),
+ BLOCK_BYREF_LAYOUT_WEAK = (4 << 28),
+ BLOCK_BYREF_LAYOUT_UNRETAINED = (5 << 28)
+};
+
+enum BlockLiteralFlags {
BLOCK_HAS_COPY_DISPOSE = (1 << 25),
BLOCK_HAS_CXX_OBJ = (1 << 26),
BLOCK_IS_GLOBAL = (1 << 28),
BLOCK_USE_STRET = (1 << 29),
- BLOCK_HAS_SIGNATURE = (1 << 30)
+ BLOCK_HAS_SIGNATURE = (1 << 30),
+ BLOCK_HAS_EXTENDED_LAYOUT = (1 << 31)
};
class BlockFlags {
uint32_t flags;
@@ -60,7 +72,7 @@ class BlockFlags {
BlockFlags(uint32_t flags) : flags(flags) {}
public:
BlockFlags() : flags(0) {}
- BlockFlags(BlockFlag_t flag) : flags(flag) {}
+ BlockFlags(BlockLiteralFlags flag) : flags(flag) {}
uint32_t getBitMask() const { return flags; }
bool empty() const { return flags == 0; }
@@ -76,7 +88,7 @@ public:
return (l.flags & r.flags);
}
};
-inline BlockFlags operator|(BlockFlag_t l, BlockFlag_t r) {
+inline BlockFlags operator|(BlockLiteralFlags l, BlockLiteralFlags r) {
return BlockFlags(l) | BlockFlags(r);
}
@@ -182,6 +194,10 @@ public:
/// UsesStret : True if the block uses an stret return. Mutable
/// because it gets set later in the block-creation process.
mutable bool UsesStret : 1;
+
+ /// HasCapturedVariableLayout : True if block has captured variables
+ /// and their layout meta-data has been generated.
+ bool HasCapturedVariableLayout : 1;
/// The mapping of allocated indexes within the block.
llvm::DenseMap<const VarDecl*, Capture> Captures;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
index 59ed313..e8c05d3 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
@@ -20,7 +20,7 @@
#include "clang/AST/Decl.h"
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/Intrinsics.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace clang;
using namespace CodeGen;
@@ -86,8 +86,7 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace =
- cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+ unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
@@ -121,8 +120,7 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace =
- cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+ unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
@@ -148,7 +146,7 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>();
assert(ValTyP && "isn't scalar fp type!");
-
+
StringRef FnName;
switch (ValTyP->getKind()) {
default: llvm_unreachable("Isn't a scalar fp type!");
@@ -156,7 +154,7 @@ static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
case BuiltinType::Double: FnName = "fabs"; break;
case BuiltinType::LongDouble: FnName = "fabsl"; break;
}
-
+
// The prototype is something that takes and returns whatever V's type is.
llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(),
false);
@@ -214,7 +212,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
DstPtr, SrcPtr));
}
- case Builtin::BI__builtin_abs:
+ case Builtin::BI__builtin_abs:
case Builtin::BI__builtin_labs:
case Builtin::BI__builtin_llabs: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
@@ -229,18 +227,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
}
-
+
case Builtin::BI__builtin_conj:
case Builtin::BI__builtin_conjf:
case Builtin::BI__builtin_conjl: {
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
Value *Real = ComplexVal.first;
Value *Imag = ComplexVal.second;
- Value *Zero =
- Imag->getType()->isFPOrFPVectorTy()
+ Value *Zero =
+ Imag->getType()->isFPOrFPVectorTy()
? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
: llvm::Constant::getNullValue(Imag->getType());
-
+
Imag = Builder.CreateFSub(Zero, Imag, "sub");
return RValue::getComplex(std::make_pair(Real, Imag));
}
@@ -250,14 +248,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
return RValue::get(ComplexVal.first);
}
-
+
case Builtin::BI__builtin_cimag:
case Builtin::BI__builtin_cimagf:
case Builtin::BI__builtin_cimagl: {
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
return RValue::get(ComplexVal.second);
}
-
+
case Builtin::BI__builtin_ctzs:
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
@@ -356,6 +354,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"expval");
return RValue::get(Result);
}
+ case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
@@ -371,15 +370,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// We pass this builtin onto the optimizer so that it can
// figure out the object size in more complex cases.
llvm::Type *ResType = ConvertType(E->getType());
-
+
// LLVM only supports 0 and 2, make sure that we pass along that
// as a boolean.
Value *Ty = EmitScalarExpr(E->getArg(1));
ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
assert(CI);
uint64_t val = CI->getZExtValue();
- CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
-
+ CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
+
Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType);
return RValue::get(Builder.CreateCall2(F, EmitScalarExpr(E->getArg(0)),CI));
}
@@ -402,9 +401,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::trap);
return RValue::get(Builder.CreateCall(F));
}
+ case Builtin::BI__debugbreak: {
+ Value *F = CGM.getIntrinsic(Intrinsic::debugtrap);
+ return RValue::get(Builder.CreateCall(F));
+ }
case Builtin::BI__builtin_unreachable: {
- if (CatchUndefined)
- EmitBranch(getTrapBB());
+ if (getLangOpts().SanitizeUnreachable)
+ EmitCheck(Builder.getFalse(), "builtin_unreachable",
+ EmitCheckSourceLocation(E->getExprLoc()),
+ llvm::ArrayRef<llvm::Value *>());
else
Builder.CreateUnreachable();
@@ -413,7 +418,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(0);
}
-
+
case Builtin::BI__builtin_powi:
case Builtin::BI__builtin_powif:
case Builtin::BI__builtin_powil: {
@@ -464,16 +469,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
V = Builder.CreateFCmpUNO(V, V, "cmp");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
-
+
case Builtin::BI__builtin_isinf: {
// isinf(x) --> fabs(x) == infinity
Value *V = EmitScalarExpr(E->getArg(0));
V = EmitFAbs(*this, V, E->getArg(0)->getType());
-
+
V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
-
+
// TODO: BI__builtin_isinf_sign
// isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0
@@ -499,11 +504,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// isfinite(x) --> x == x && fabs(x) != infinity;
Value *V = EmitScalarExpr(E->getArg(0));
Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
-
+
Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
Value *IsNotInf =
Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
-
+
V = Builder.CreateAnd(Eq, IsNotInf, "and");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
@@ -565,7 +570,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.SetInsertPoint(End);
return RValue::get(Result);
}
-
+
case Builtin::BIalloca:
case Builtin::BI__builtin_alloca: {
Value *Size = EmitScalarExpr(E->getArg(0));
@@ -573,85 +578,90 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BIbzero:
case Builtin::BI__builtin_bzero: {
- Value *Address = EmitScalarExpr(E->getArg(0));
+ std::pair<llvm::Value*, unsigned> Dest =
+ EmitPointerWithAlignment(E->getArg(0));
Value *SizeVal = EmitScalarExpr(E->getArg(1));
- unsigned Align = GetPointeeAlignment(E->getArg(0));
- Builder.CreateMemSet(Address, Builder.getInt8(0), SizeVal, Align, false);
- return RValue::get(Address);
+ Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal,
+ Dest.second, false);
+ return RValue::get(Dest.first);
}
case Builtin::BImemcpy:
case Builtin::BI__builtin_memcpy: {
- Value *Address = EmitScalarExpr(E->getArg(0));
- Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ std::pair<llvm::Value*, unsigned> Dest =
+ EmitPointerWithAlignment(E->getArg(0));
+ std::pair<llvm::Value*, unsigned> Src =
+ EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
- GetPointeeAlignment(E->getArg(1)));
- Builder.CreateMemCpy(Address, SrcAddr, SizeVal, Align, false);
- return RValue::get(Address);
+ unsigned Align = std::min(Dest.second, Src.second);
+ Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
+ return RValue::get(Dest.first);
}
-
+
case Builtin::BI__builtin___memcpy_chk: {
- // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
+ // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
llvm::APSInt Size, DstSize;
if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
!E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
if (Size.ugt(DstSize))
break;
- Value *Dest = EmitScalarExpr(E->getArg(0));
- Value *Src = EmitScalarExpr(E->getArg(1));
+ std::pair<llvm::Value*, unsigned> Dest =
+ EmitPointerWithAlignment(E->getArg(0));
+ std::pair<llvm::Value*, unsigned> Src =
+ EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
- GetPointeeAlignment(E->getArg(1)));
- Builder.CreateMemCpy(Dest, Src, SizeVal, Align, false);
- return RValue::get(Dest);
+ unsigned Align = std::min(Dest.second, Src.second);
+ Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
+ return RValue::get(Dest.first);
}
-
+
case Builtin::BI__builtin_objc_memmove_collectable: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SrcAddr = EmitScalarExpr(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
Address, SrcAddr, SizeVal);
return RValue::get(Address);
}
case Builtin::BI__builtin___memmove_chk: {
- // fold __builtin_memmove_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
+ // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
llvm::APSInt Size, DstSize;
if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
!E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
if (Size.ugt(DstSize))
break;
- Value *Dest = EmitScalarExpr(E->getArg(0));
- Value *Src = EmitScalarExpr(E->getArg(1));
+ std::pair<llvm::Value*, unsigned> Dest =
+ EmitPointerWithAlignment(E->getArg(0));
+ std::pair<llvm::Value*, unsigned> Src =
+ EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
- GetPointeeAlignment(E->getArg(1)));
- Builder.CreateMemMove(Dest, Src, SizeVal, Align, false);
- return RValue::get(Dest);
+ unsigned Align = std::min(Dest.second, Src.second);
+ Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
+ return RValue::get(Dest.first);
}
case Builtin::BImemmove:
case Builtin::BI__builtin_memmove: {
- Value *Address = EmitScalarExpr(E->getArg(0));
- Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ std::pair<llvm::Value*, unsigned> Dest =
+ EmitPointerWithAlignment(E->getArg(0));
+ std::pair<llvm::Value*, unsigned> Src =
+ EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
- GetPointeeAlignment(E->getArg(1)));
- Builder.CreateMemMove(Address, SrcAddr, SizeVal, Align, false);
- return RValue::get(Address);
+ unsigned Align = std::min(Dest.second, Src.second);
+ Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
+ return RValue::get(Dest.first);
}
case Builtin::BImemset:
case Builtin::BI__builtin_memset: {
- Value *Address = EmitScalarExpr(E->getArg(0));
+ std::pair<llvm::Value*, unsigned> Dest =
+ EmitPointerWithAlignment(E->getArg(0));
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- unsigned Align = GetPointeeAlignment(E->getArg(0));
- Builder.CreateMemSet(Address, ByteVal, SizeVal, Align, false);
- return RValue::get(Address);
+ Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
+ return RValue::get(Dest.first);
}
case Builtin::BI__builtin___memset_chk: {
// fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
@@ -661,14 +671,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
break;
if (Size.ugt(DstSize))
break;
- Value *Address = EmitScalarExpr(E->getArg(0));
+ std::pair<llvm::Value*, unsigned> Dest =
+ EmitPointerWithAlignment(E->getArg(0));
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- unsigned Align = GetPointeeAlignment(E->getArg(0));
- Builder.CreateMemSet(Address, ByteVal, SizeVal, Align, false);
-
- return RValue::get(Address);
+ Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
+ return RValue::get(Dest.first);
}
case Builtin::BI__builtin_dwarf_cfa: {
// The offset in bytes from the first argument to the CFA.
@@ -682,7 +691,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
int32_t Offset = 0;
Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
- return RValue::get(Builder.CreateCall(F,
+ return RValue::get(Builder.CreateCall(F,
llvm::ConstantInt::get(Int32Ty, Offset)));
}
case Builtin::BI__builtin_return_address: {
@@ -907,9 +916,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_val_compare_and_swap_16: {
QualType T = E->getType();
llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace =
- cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
-
+ unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
+
llvm::IntegerType *IntType =
llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(T));
@@ -935,9 +943,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_bool_compare_and_swap_16: {
QualType T = E->getArg(1)->getType();
llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace =
- cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
-
+ unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
+
llvm::IntegerType *IntType =
llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(T));
@@ -982,7 +989,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
StoreSize.getQuantity() * 8);
Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
- llvm::StoreInst *Store =
+ llvm::StoreInst *Store =
Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
Store->setAlignment(StoreSize.getQuantity());
Store->setAtomic(llvm::Release);
@@ -993,7 +1000,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// We assume this is supposed to correspond to a C++0x-style
// sequentially-consistent fence (i.e. this is only usable for
// synchonization, not device I/O or anything like that). This intrinsic
- // is really badly designed in the sense that in theory, there isn't
+ // is really badly designed in the sense that in theory, there isn't
// any way to safely use it... but in practice, it mostly works
// to use it with non-atomic loads and stores to get acquire/release
// semantics.
@@ -1033,8 +1040,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Value *Ptr = EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace =
- cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
Value *NewVal = Builder.getInt8(1);
Value *Order = EmitScalarExpr(E->getArg(1));
@@ -1120,8 +1126,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Value *Ptr = EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace =
- cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
Value *NewVal = Builder.getInt8(0);
Value *Order = EmitScalarExpr(E->getArg(1));
@@ -1310,6 +1315,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
}
+ case Builtin::BI__noop:
+ return RValue::get(0);
}
// If this is an alias for a lib function (e.g. __builtin_sin), emit
@@ -1318,7 +1325,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
return emitLibraryCall(*this, FD, E,
CGM.getBuiltinLibFunction(FD, BuiltinID));
-
+
// If this is a predefined lib function (e.g. malloc), emit the call
// using exactly the normal call path.
if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
@@ -1350,7 +1357,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
if ((ICEArguments & (1 << i)) == 0) {
ArgValue = EmitScalarExpr(E->getArg(i));
} else {
- // If this is required to be a constant, constant fold it so that we
+ // If this is required to be a constant, constant fold it so that we
// know that the generated intrinsic gets a ConstantInt.
llvm::APSInt Result;
bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
@@ -1375,7 +1382,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
QualType BuiltinRetType = E->getType();
llvm::Type *RetTy = VoidTy;
- if (!BuiltinRetType->isVoidType())
+ if (!BuiltinRetType->isVoidType())
RetTy = ConvertType(BuiltinRetType);
if (RetTy != V->getType()) {
@@ -1457,10 +1464,10 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
return Builder.CreateCall(F, Ops, name);
}
-Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
+Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
bool neg) {
int SV = cast<ConstantInt>(V)->getSExtValue();
-
+
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
@@ -1469,34 +1476,56 @@ Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
/// GetPointeeAlignment - Given an expression with a pointer type, find the
/// alignment of the type referenced by the pointer. Skip over implicit
/// casts.
-unsigned CodeGenFunction::GetPointeeAlignment(const Expr *Addr) {
- unsigned Align = 1;
- // Check if the type is a pointer. The implicit cast operand might not be.
- while (Addr->getType()->isPointerType()) {
- QualType PtTy = Addr->getType()->getPointeeType();
-
- // Can't get alignment of incomplete types.
- if (!PtTy->isIncompleteType()) {
- unsigned NewA = getContext().getTypeAlignInChars(PtTy).getQuantity();
- if (NewA > Align)
- Align = NewA;
+std::pair<llvm::Value*, unsigned>
+CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) {
+ assert(Addr->getType()->isPointerType());
+ Addr = Addr->IgnoreParens();
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Addr)) {
+ if ((ICE->getCastKind() == CK_BitCast || ICE->getCastKind() == CK_NoOp) &&
+ ICE->getSubExpr()->getType()->isPointerType()) {
+ std::pair<llvm::Value*, unsigned> Ptr =
+ EmitPointerWithAlignment(ICE->getSubExpr());
+ Ptr.first = Builder.CreateBitCast(Ptr.first,
+ ConvertType(Addr->getType()));
+ return Ptr;
+ } else if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
+ LValue LV = EmitLValue(ICE->getSubExpr());
+ unsigned Align = LV.getAlignment().getQuantity();
+ if (!Align) {
+ // FIXME: Once LValues are fixed to always set alignment,
+ // zap this code.
+ QualType PtTy = ICE->getSubExpr()->getType();
+ if (!PtTy->isIncompleteType())
+ Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
+ else
+ Align = 1;
+ }
+ return std::make_pair(LV.getAddress(), Align);
}
-
- // If the address is an implicit cast, repeat with the cast operand.
- if (const ImplicitCastExpr *CastAddr = dyn_cast<ImplicitCastExpr>(Addr)) {
- Addr = CastAddr->getSubExpr();
- continue;
+ }
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Addr)) {
+ if (UO->getOpcode() == UO_AddrOf) {
+ LValue LV = EmitLValue(UO->getSubExpr());
+ unsigned Align = LV.getAlignment().getQuantity();
+ if (!Align) {
+ // FIXME: Once LValues are fixed to always set alignment,
+ // zap this code.
+ QualType PtTy = UO->getSubExpr()->getType();
+ if (!PtTy->isIncompleteType())
+ Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
+ else
+ Align = 1;
+ }
+ return std::make_pair(LV.getAddress(), Align);
}
- break;
}
- return Align;
-}
-/// GetPointeeAlignmentValue - Given an expression with a pointer type, find
-/// the alignment of the type referenced by the pointer. Skip over implicit
-/// casts. Return the alignment as an llvm::Value.
-Value *CodeGenFunction::GetPointeeAlignmentValue(const Expr *Addr) {
- return llvm::ConstantInt::get(Int32Ty, GetPointeeAlignment(Addr));
+ unsigned Align = 1;
+ QualType PtTy = Addr->getType()->getPointeeType();
+ if (!PtTy->isIncompleteType())
+ Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
+
+ return std::make_pair(EmitScalarExpr(Addr), Align);
}
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
@@ -1549,8 +1578,69 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
SmallVector<Value*, 4> Ops;
- for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
+ llvm::Value *Align = 0;
+ for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
+ if (i == 0) {
+ switch (BuiltinID) {
+ case ARM::BI__builtin_neon_vld1_v:
+ case ARM::BI__builtin_neon_vld1q_v:
+ case ARM::BI__builtin_neon_vld1q_lane_v:
+ case ARM::BI__builtin_neon_vld1_lane_v:
+ case ARM::BI__builtin_neon_vld1_dup_v:
+ case ARM::BI__builtin_neon_vld1q_dup_v:
+ case ARM::BI__builtin_neon_vst1_v:
+ case ARM::BI__builtin_neon_vst1q_v:
+ case ARM::BI__builtin_neon_vst1q_lane_v:
+ case ARM::BI__builtin_neon_vst1_lane_v:
+ case ARM::BI__builtin_neon_vst2_v:
+ case ARM::BI__builtin_neon_vst2q_v:
+ case ARM::BI__builtin_neon_vst2_lane_v:
+ case ARM::BI__builtin_neon_vst2q_lane_v:
+ case ARM::BI__builtin_neon_vst3_v:
+ case ARM::BI__builtin_neon_vst3q_v:
+ case ARM::BI__builtin_neon_vst3_lane_v:
+ case ARM::BI__builtin_neon_vst3q_lane_v:
+ case ARM::BI__builtin_neon_vst4_v:
+ case ARM::BI__builtin_neon_vst4q_v:
+ case ARM::BI__builtin_neon_vst4_lane_v:
+ case ARM::BI__builtin_neon_vst4q_lane_v:
+ // Get the alignment for the argument in addition to the value;
+ // we'll use it later.
+ std::pair<llvm::Value*, unsigned> Src =
+ EmitPointerWithAlignment(E->getArg(0));
+ Ops.push_back(Src.first);
+ Align = Builder.getInt32(Src.second);
+ continue;
+ }
+ }
+ if (i == 1) {
+ switch (BuiltinID) {
+ case ARM::BI__builtin_neon_vld2_v:
+ case ARM::BI__builtin_neon_vld2q_v:
+ case ARM::BI__builtin_neon_vld3_v:
+ case ARM::BI__builtin_neon_vld3q_v:
+ case ARM::BI__builtin_neon_vld4_v:
+ case ARM::BI__builtin_neon_vld4q_v:
+ case ARM::BI__builtin_neon_vld2_lane_v:
+ case ARM::BI__builtin_neon_vld2q_lane_v:
+ case ARM::BI__builtin_neon_vld3_lane_v:
+ case ARM::BI__builtin_neon_vld3q_lane_v:
+ case ARM::BI__builtin_neon_vld4_lane_v:
+ case ARM::BI__builtin_neon_vld4q_lane_v:
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ case ARM::BI__builtin_neon_vld4_dup_v:
+ // Get the alignment for the argument in addition to the value;
+ // we'll use it later.
+ std::pair<llvm::Value*, unsigned> Src =
+ EmitPointerWithAlignment(E->getArg(1));
+ Ops.push_back(Src.first);
+ Align = Builder.getInt32(Src.second);
+ continue;
+ }
+ }
Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ }
// vget_lane and vset_lane are not overloaded and do not have an extra
// argument that specifies the vector type.
@@ -1596,7 +1686,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ty = FloatTy;
else
Ty = DoubleTy;
-
+
// Determine whether this is an unsigned conversion or not.
bool usgn = Result.getZExtValue() == 1;
unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
@@ -1605,7 +1695,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Int, Ty);
return Builder.CreateCall(F, Ops, "vcvtr");
}
-
+
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(Result.getZExtValue());
bool usgn = Type.isUnsigned();
@@ -1620,6 +1710,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
unsigned Int;
switch (BuiltinID) {
default: return 0;
+ case ARM::BI__builtin_neon_vbsl_v:
+ case ARM::BI__builtin_neon_vbslq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty),
+ Ops, "vbsl");
case ARM::BI__builtin_neon_vabd_v:
case ARM::BI__builtin_neon_vabdq_v:
Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
@@ -1690,7 +1784,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vcvtq_f32_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
- return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
+ return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case ARM::BI__builtin_neon_vcvt_s32_v:
case ARM::BI__builtin_neon_vcvt_u32_v:
@@ -1699,7 +1793,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::Type *FloatTy =
GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
- return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
+ return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
: Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
}
case ARM::BI__builtin_neon_vcvt_n_f32_v:
@@ -1730,7 +1824,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
-
+
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Value *SV = llvm::ConstantVector::get(Indices);
@@ -1746,7 +1840,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub");
case ARM::BI__builtin_neon_vld1_v:
case ARM::BI__builtin_neon_vld1q_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty),
Ops, "vld1");
case ARM::BI__builtin_neon_vld1q_lane_v:
@@ -1761,8 +1855,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Load the value as a one-element vector.
Ty = llvm::VectorType::get(VTy->getElementType(), 1);
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty);
- Value *Ld = Builder.CreateCall2(F, Ops[0],
- GetPointeeAlignmentValue(E->getArg(0)));
+ Value *Ld = Builder.CreateCall2(F, Ops[0], Align);
// Combine them.
SmallVector<Constant*, 2> Indices;
Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane));
@@ -1776,7 +1869,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
LoadInst *Ld = Builder.CreateLoad(Ops[0]);
- Value *Align = GetPointeeAlignmentValue(E->getArg(0));
Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
}
@@ -1786,7 +1878,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
LoadInst *Ld = Builder.CreateLoad(Ops[0]);
- Value *Align = GetPointeeAlignmentValue(E->getArg(0));
Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
@@ -1795,7 +1886,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld2_v:
case ARM::BI__builtin_neon_vld2q_v: {
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty);
- Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1804,7 +1894,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld3_v:
case ARM::BI__builtin_neon_vld3q_v: {
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty);
- Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1813,7 +1902,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld4_v:
case ARM::BI__builtin_neon_vld4q_v: {
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty);
- Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1824,7 +1912,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
+ Ops.push_back(Align);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1836,7 +1924,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
+ Ops.push_back(Align);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1849,7 +1937,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
+ Ops.push_back(Align);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1861,47 +1949,46 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Handle 64-bit elements as a special-case. There is no "dup" needed.
if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
switch (BuiltinID) {
- case ARM::BI__builtin_neon_vld2_dup_v:
- Int = Intrinsic::arm_neon_vld2;
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2;
break;
case ARM::BI__builtin_neon_vld3_dup_v:
- Int = Intrinsic::arm_neon_vld3;
+ Int = Intrinsic::arm_neon_vld3;
break;
case ARM::BI__builtin_neon_vld4_dup_v:
- Int = Intrinsic::arm_neon_vld4;
+ Int = Intrinsic::arm_neon_vld4;
break;
default: llvm_unreachable("unknown vld_dup intrinsic?");
}
Function *F = CGM.getIntrinsic(Int, Ty);
- Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
}
switch (BuiltinID) {
- case ARM::BI__builtin_neon_vld2_dup_v:
- Int = Intrinsic::arm_neon_vld2lane;
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2lane;
break;
case ARM::BI__builtin_neon_vld3_dup_v:
- Int = Intrinsic::arm_neon_vld3lane;
+ Int = Intrinsic::arm_neon_vld3lane;
break;
case ARM::BI__builtin_neon_vld4_dup_v:
- Int = Intrinsic::arm_neon_vld4lane;
+ Int = Intrinsic::arm_neon_vld4lane;
break;
default: llvm_unreachable("unknown vld_dup intrinsic?");
}
Function *F = CGM.getIntrinsic(Int, Ty);
llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
-
+
SmallVector<Value*, 6> Args;
Args.push_back(Ops[1]);
Args.append(STy->getNumElements(), UndefValue::get(Ty));
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Args.push_back(CI);
- Args.push_back(GetPointeeAlignmentValue(E->getArg(1)));
-
+ Args.push_back(Align);
+
Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
// splat lane 0 to all elts in each vector of the result.
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
@@ -1944,6 +2031,14 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
+ case ARM::BI__builtin_neon_vfma_v:
+ case ARM::BI__builtin_neon_vfmaq_v: {
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ return Builder.CreateCall3(F, Ops[0], Ops[1], Ops[2]);
+ }
case ARM::BI__builtin_neon_vpadal_v:
case ARM::BI__builtin_neon_vpadalq_v: {
Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
@@ -2016,7 +2111,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshl");
case ARM::BI__builtin_neon_vqrshrn_n_v:
- Int = usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
+ Int =
+ usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
1, true);
case ARM::BI__builtin_neon_vqrshrun_n_v:
@@ -2086,7 +2182,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
- Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
+ Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
case ARM::BI__builtin_neon_vrsubhn_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty),
@@ -2101,7 +2197,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vshl_n_v:
case ARM::BI__builtin_neon_vshlq_n_v:
Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
- return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n");
+ return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
+ "vshl_n");
case ARM::BI__builtin_neon_vshrn_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty),
Ops, "vshrn_n", 1, true);
@@ -2133,7 +2230,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateAdd(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vst1_v:
case ARM::BI__builtin_neon_vst1q_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst1q_lane_v:
@@ -2143,7 +2240,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
- Ops[2] = GetPointeeAlignmentValue(E->getArg(0));
+ Ops[2] = Align;
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
Ops[1]->getType()), Ops);
}
@@ -2154,38 +2251,37 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
StoreInst *St = Builder.CreateStore(Ops[1],
Builder.CreateBitCast(Ops[0], Ty));
- Value *Align = GetPointeeAlignmentValue(E->getArg(0));
St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
return St;
}
case ARM::BI__builtin_neon_vst2_v:
case ARM::BI__builtin_neon_vst2q_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst2_lane_v:
case ARM::BI__builtin_neon_vst2q_lane_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst3_v:
case ARM::BI__builtin_neon_vst3q_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst3_lane_v:
case ARM::BI__builtin_neon_vst3q_lane_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst4_v:
case ARM::BI__builtin_neon_vst4q_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst4_lane_v:
case ARM::BI__builtin_neon_vst4q_lane_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vsubhn_v:
@@ -2220,7 +2316,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
- Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
+ Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
ConstantAggregateZero::get(Ty));
return Builder.CreateSExt(Ops[0], Ty, "vtst");
}
@@ -2250,7 +2346,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = 0;
-
+
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
@@ -2263,13 +2359,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
return SV;
}
- case ARM::BI__builtin_neon_vzip_v:
+ case ARM::BI__builtin_neon_vzip_v:
case ARM::BI__builtin_neon_vzipq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = 0;
-
+
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
@@ -2382,62 +2478,62 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__builtin_ia32_palignr: {
unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
-
+
// If palignr is shifting the pair of input vectors less than 9 bytes,
// emit a shuffle instruction.
if (shiftVal <= 8) {
SmallVector<llvm::Constant*, 8> Indices;
for (unsigned i = 0; i != 8; ++i)
Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
-
+
Value* SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
}
-
+
// If palignr is shifting the pair of input vectors more than 8 but less
// than 16 bytes, emit a logical right shift of the destination.
if (shiftVal < 16) {
// MMX has these as 1 x i64 vectors for some odd optimization reasons.
llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
-
+
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
-
+
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
}
-
+
// If palignr is shifting the pair of vectors more than 16 bytes, emit zero.
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
case X86::BI__builtin_ia32_palignr128: {
unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
-
+
// If palignr is shifting the pair of input vectors less than 17 bytes,
// emit a shuffle instruction.
if (shiftVal <= 16) {
SmallVector<llvm::Constant*, 16> Indices;
for (unsigned i = 0; i != 16; ++i)
Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
-
+
Value* SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
}
-
+
// If palignr is shifting the pair of input vectors more than 16 but less
// than 32 bytes, emit a logical right shift of the destination.
if (shiftVal < 32) {
llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
-
+
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
-
+
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
}
-
+
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp
index aba5d75..91795b9 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp
@@ -189,7 +189,7 @@ void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *ptr,
llvm::Value *&numElements,
llvm::Value *&allocPtr, CharUnits &cookieSize) {
// Derive a char* in the same address space as the pointer.
- unsigned AS = cast<llvm::PointerType>(ptr->getType())->getAddressSpace();
+ unsigned AS = ptr->getType()->getPointerAddressSpace();
llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS);
ptr = CGF.Builder.CreateBitCast(ptr, charPtrTy);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
index a0dcdfd..570aeb0 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
@@ -154,6 +154,15 @@ protected:
llvm::Constant *getMemberPointerAdjustment(const CastExpr *E);
public:
+ /// Adjust the given non-null pointer to an object of polymorphic
+ /// type to point to the complete object.
+ ///
+ /// The IR type of the result should be a pointer but is otherwise
+ /// irrelevant.
+ virtual llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF,
+ llvm::Value *ptr,
+ QualType type) = 0;
+
/// Build the signature of the given constructor variant by adding
/// any required parameters. For convenience, ResTy has been
/// initialized to 'void', and ArgTys has been initialized with the
@@ -196,6 +205,9 @@ public:
/// Gets the pure virtual member call function.
virtual StringRef GetPureVirtualCallName() = 0;
+ /// Gets the deleted virtual member call name.
+ virtual StringRef GetDeletedVirtualCallName() = 0;
+
/**************************** Array cookies ******************************/
/// Returns the extra size required in order to store the array
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
index 7d2b9d3..2d1d152 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
@@ -25,7 +25,7 @@
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Attributes.h"
#include "llvm/Support/CallSite.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/InlineAsm.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace clang;
@@ -148,6 +148,9 @@ static CallingConv getCallingConventionForDecl(const Decl *D) {
if (PcsAttr *PCS = D->getAttr<PcsAttr>())
return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
+ if (D->hasAttr<PnaclCallAttr>())
+ return CC_PnaclCall;
+
return CC_C;
}
@@ -588,9 +591,9 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
// If the first elt is at least as large as what we're looking for, or if the
// first element is the same size as the whole struct, we can enter it.
uint64_t FirstEltSize =
- CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
+ CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
if (FirstEltSize < DstSize &&
- FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
+ FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
return SrcPtr;
// GEP into the first element.
@@ -653,14 +656,14 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
if (SrcTy == Ty)
return CGF.Builder.CreateLoad(SrcPtr);
- uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
+ uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
}
- uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+ uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
// If the source and destination are integer or pointer types, just do an
// extension or truncation to the desired type.
@@ -740,7 +743,7 @@ static void CreateCoercedStore(llvm::Value *Src,
return;
}
- uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+ uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
@@ -756,7 +759,7 @@ static void CreateCoercedStore(llvm::Value *Src,
return;
}
- uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
+ uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
// If store is legal, just bitcast the src pointer.
if (SrcSize <= DstSize) {
@@ -864,6 +867,10 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
ie = FI.arg_end(); it != ie; ++it) {
const ABIArgInfo &argAI = it->info;
+ // Insert a padding type to ensure proper alignment.
+ if (llvm::Type *PaddingType = argAI.getPaddingType())
+ argTypes.push_back(PaddingType);
+
switch (argAI.getKind()) {
case ABIArgInfo::Ignore:
break;
@@ -877,9 +884,6 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- // Insert a padding type to ensure proper alignment.
- if (llvm::Type *PaddingType = argAI.getPaddingType())
- argTypes.push_back(PaddingType);
// If the coerce-to type is a first class aggregate, flatten it. Either
// way is semantically identical, but fast-isel and the optimizer
// generally likes scalar values better than FCAs.
@@ -924,50 +928,52 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
const Decl *TargetDecl,
AttributeListType &PAL,
unsigned &CallingConv) {
- llvm::Attributes FuncAttrs;
- llvm::Attributes RetAttrs;
+ llvm::AttrBuilder FuncAttrs;
+ llvm::AttrBuilder RetAttrs;
CallingConv = FI.getEffectiveCallingConvention();
if (FI.isNoReturn())
- FuncAttrs |= llvm::Attribute::NoReturn;
+ FuncAttrs.addAttribute(llvm::Attributes::NoReturn);
// FIXME: handle sseregparm someday...
if (TargetDecl) {
if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
- FuncAttrs |= llvm::Attribute::ReturnsTwice;
+ FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice);
if (TargetDecl->hasAttr<NoThrowAttr>())
- FuncAttrs |= llvm::Attribute::NoUnwind;
+ FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
if (FPT && FPT->isNothrow(getContext()))
- FuncAttrs |= llvm::Attribute::NoUnwind;
+ FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
}
if (TargetDecl->hasAttr<NoReturnAttr>())
- FuncAttrs |= llvm::Attribute::NoReturn;
+ FuncAttrs.addAttribute(llvm::Attributes::NoReturn);
if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
- FuncAttrs |= llvm::Attribute::ReturnsTwice;
+ FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice);
// 'const' and 'pure' attribute functions are also nounwind.
if (TargetDecl->hasAttr<ConstAttr>()) {
- FuncAttrs |= llvm::Attribute::ReadNone;
- FuncAttrs |= llvm::Attribute::NoUnwind;
+ FuncAttrs.addAttribute(llvm::Attributes::ReadNone);
+ FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
} else if (TargetDecl->hasAttr<PureAttr>()) {
- FuncAttrs |= llvm::Attribute::ReadOnly;
- FuncAttrs |= llvm::Attribute::NoUnwind;
+ FuncAttrs.addAttribute(llvm::Attributes::ReadOnly);
+ FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
}
if (TargetDecl->hasAttr<MallocAttr>())
- RetAttrs |= llvm::Attribute::NoAlias;
+ RetAttrs.addAttribute(llvm::Attributes::NoAlias);
}
if (CodeGenOpts.OptimizeSize)
- FuncAttrs |= llvm::Attribute::OptimizeForSize;
+ FuncAttrs.addAttribute(llvm::Attributes::OptimizeForSize);
+ if (CodeGenOpts.OptimizeSize == 2)
+ FuncAttrs.addAttribute(llvm::Attributes::MinSize);
if (CodeGenOpts.DisableRedZone)
- FuncAttrs |= llvm::Attribute::NoRedZone;
+ FuncAttrs.addAttribute(llvm::Attributes::NoRedZone);
if (CodeGenOpts.NoImplicitFloat)
- FuncAttrs |= llvm::Attribute::NoImplicitFloat;
+ FuncAttrs.addAttribute(llvm::Attributes::NoImplicitFloat);
QualType RetTy = FI.getReturnType();
unsigned Index = 1;
@@ -975,24 +981,28 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
switch (RetAI.getKind()) {
case ABIArgInfo::Extend:
if (RetTy->hasSignedIntegerRepresentation())
- RetAttrs |= llvm::Attribute::SExt;
+ RetAttrs.addAttribute(llvm::Attributes::SExt);
else if (RetTy->hasUnsignedIntegerRepresentation())
- RetAttrs |= llvm::Attribute::ZExt;
+ RetAttrs.addAttribute(llvm::Attributes::ZExt);
break;
case ABIArgInfo::Direct:
case ABIArgInfo::Ignore:
break;
case ABIArgInfo::Indirect: {
- llvm::Attributes SRETAttrs = llvm::Attribute::StructRet;
+ llvm::AttrBuilder SRETAttrs;
+ SRETAttrs.addAttribute(llvm::Attributes::StructRet);
if (RetAI.getInReg())
- SRETAttrs |= llvm::Attribute::InReg;
- PAL.push_back(llvm::AttributeWithIndex::get(Index, SRETAttrs));
+ SRETAttrs.addAttribute(llvm::Attributes::InReg);
+ PAL.push_back(llvm::
+ AttributeWithIndex::get(Index,
+ llvm::Attributes::get(getLLVMContext(),
+ SRETAttrs)));
++Index;
// sret disables readnone and readonly
- FuncAttrs &= ~(llvm::Attribute::ReadOnly |
- llvm::Attribute::ReadNone);
+ FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly)
+ .removeAttribute(llvm::Attributes::ReadNone);
break;
}
@@ -1000,14 +1010,29 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
llvm_unreachable("Invalid ABI kind for return argument");
}
- if (RetAttrs)
- PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
+ if (RetAttrs.hasAttributes())
+ PAL.push_back(llvm::
+ AttributeWithIndex::get(llvm::AttrListPtr::ReturnIndex,
+ llvm::Attributes::get(getLLVMContext(),
+ RetAttrs)));
for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
ie = FI.arg_end(); it != ie; ++it) {
QualType ParamType = it->type;
const ABIArgInfo &AI = it->info;
- llvm::Attributes Attrs;
+ llvm::AttrBuilder Attrs;
+
+ if (AI.getPaddingType()) {
+ if (AI.getPaddingInReg()) {
+ llvm::AttrBuilder PadAttrs;
+ PadAttrs.addAttribute(llvm::Attributes::InReg);
+
+ llvm::Attributes A =llvm::Attributes::get(getLLVMContext(), PadAttrs);
+ PAL.push_back(llvm::AttributeWithIndex::get(Index, A));
+ }
+ // Increment Index if there is padding.
+ ++Index;
+ }
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
// have the corresponding parameter variable. It doesn't make
@@ -1015,38 +1040,40 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
switch (AI.getKind()) {
case ABIArgInfo::Extend:
if (ParamType->isSignedIntegerOrEnumerationType())
- Attrs |= llvm::Attribute::SExt;
+ Attrs.addAttribute(llvm::Attributes::SExt);
else if (ParamType->isUnsignedIntegerOrEnumerationType())
- Attrs |= llvm::Attribute::ZExt;
+ Attrs.addAttribute(llvm::Attributes::ZExt);
// FALL THROUGH
case ABIArgInfo::Direct:
if (AI.getInReg())
- Attrs |= llvm::Attribute::InReg;
+ Attrs.addAttribute(llvm::Attributes::InReg);
// FIXME: handle sseregparm someday...
- // Increment Index if there is padding.
- Index += (AI.getPaddingType() != 0);
-
if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
unsigned Extra = STy->getNumElements()-1; // 1 will be added below.
- if (Attrs != llvm::Attribute::None)
+ if (Attrs.hasAttributes())
for (unsigned I = 0; I < Extra; ++I)
- PAL.push_back(llvm::AttributeWithIndex::get(Index + I, Attrs));
+ PAL.push_back(llvm::AttributeWithIndex::get(Index + I,
+ llvm::Attributes::get(getLLVMContext(),
+ Attrs)));
Index += Extra;
}
break;
case ABIArgInfo::Indirect:
+ if (AI.getInReg())
+ Attrs.addAttribute(llvm::Attributes::InReg);
+
if (AI.getIndirectByVal())
- Attrs |= llvm::Attribute::ByVal;
+ Attrs.addAttribute(llvm::Attributes::ByVal);
+
+ Attrs.addAlignmentAttr(AI.getIndirectAlign());
- Attrs |=
- llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
// byval disables readnone and readonly.
- FuncAttrs &= ~(llvm::Attribute::ReadOnly |
- llvm::Attribute::ReadNone);
+ FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly)
+ .removeAttribute(llvm::Attributes::ReadNone);
break;
case ABIArgInfo::Ignore:
@@ -1064,12 +1091,17 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
}
}
- if (Attrs)
- PAL.push_back(llvm::AttributeWithIndex::get(Index, Attrs));
+ if (Attrs.hasAttributes())
+ PAL.push_back(llvm::AttributeWithIndex::get(Index,
+ llvm::Attributes::get(getLLVMContext(),
+ Attrs)));
++Index;
}
- if (FuncAttrs)
- PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
+ if (FuncAttrs.hasAttributes())
+ PAL.push_back(llvm::
+ AttributeWithIndex::get(llvm::AttrListPtr::FunctionIndex,
+ llvm::Attributes::get(getLLVMContext(),
+ FuncAttrs)));
}
/// An argument came in as a promoted argument; demote it back to its
@@ -1117,7 +1149,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Name the struct return argument.
if (CGM.ReturnTypeUsesSRet(FI)) {
AI->setName("agg.result");
- AI->addAttr(llvm::Attribute::NoAlias);
+ AI->addAttr(llvm::Attributes::get(getLLVMContext(),
+ llvm::Attributes::NoAlias));
++AI;
}
@@ -1134,6 +1167,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
bool isPromoted =
isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
+ // Skip the dummy padding argument.
+ if (ArgI.getPaddingType())
+ ++AI;
+
switch (ArgI.getKind()) {
case ABIArgInfo::Indirect: {
llvm::Value *V = AI;
@@ -1175,9 +1212,6 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- // Skip the dummy padding argument.
- if (ArgI.getPaddingType())
- ++AI;
// If we have the trivial case, handle it with no muss and fuss.
if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
@@ -1187,7 +1221,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Value *V = AI;
if (Arg->getType().isRestrictQualified())
- AI->addAttr(llvm::Attribute::NoAlias);
+ AI->addAttr(llvm::Attributes::get(getLLVMContext(),
+ llvm::Attributes::NoAlias));
// Ensure the argument is the correct type.
if (V->getType() != ArgI.getCoerceToType())
@@ -1205,7 +1240,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// The alignment we need to use is the max of the requested alignment for
// the argument plus the alignment required by our access code below.
unsigned AlignmentToUse =
- CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
+ CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
AlignmentToUse = std::max(AlignmentToUse,
(unsigned)getContext().getDeclAlign(Arg).getQuantity());
@@ -1226,10 +1261,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// and the optimizer generally likes scalar values better than FCAs.
llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
if (STy && STy->getNumElements() > 1) {
- uint64_t SrcSize = CGM.getTargetData().getTypeAllocSize(STy);
+ uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
llvm::Type *DstTy =
cast<llvm::PointerType>(Ptr->getType())->getElementType();
- uint64_t DstSize = CGM.getTargetData().getTypeAllocSize(DstTy);
+ uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
if (SrcSize <= DstSize) {
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
@@ -1363,12 +1398,23 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
.objc_retainAutoreleasedReturnValue) {
doRetainAutorelease = false;
- // Look for an inline asm immediately preceding the call and kill it, too.
- llvm::Instruction *prev = call->getPrevNode();
- if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev))
- if (asmCall->getCalledValue()
- == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker)
- insnsToKill.push_back(prev);
+ // If we emitted an assembly marker for this call (and the
+ // ARCEntrypoints field should have been set if so), go looking
+ // for that call. If we can't find it, we can't do this
+ // optimization. But it should always be the immediately previous
+ // instruction, unless we needed bitcasts around the call.
+ if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
+ llvm::Instruction *prev = call->getPrevNode();
+ assert(prev);
+ if (isa<llvm::BitCastInst>(prev)) {
+ prev = prev->getPrevNode();
+ assert(prev);
+ }
+ assert(isa<llvm::CallInst>(prev));
+ assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
+ CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
+ insnsToKill.push_back(prev);
+ }
} else {
return 0;
}
@@ -1755,7 +1801,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
QualType type) {
if (const ObjCIndirectCopyRestoreExpr *CRE
= dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
- assert(getContext().getLangOpts().ObjCAutoRefCount);
+ assert(getLangOpts().ObjCAutoRefCount);
assert(getContext().hasSameType(E->getType(), type));
return emitWritebackArg(*this, args, CRE);
}
@@ -1943,6 +1989,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
unsigned TypeAlign =
getContext().getTypeAlignInChars(I->Ty).getQuantity();
+
+ // Insert a padding argument to ensure proper alignment.
+ if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
+ Args.push_back(llvm::UndefValue::get(PaddingType));
+ ++IRArgNo;
+ }
+
switch (ArgInfo.getKind()) {
case ABIArgInfo::Indirect: {
if (RV.isScalar() || RV.isComplex()) {
@@ -1969,7 +2022,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// we cannot force it to be sufficiently aligned.
llvm::Value *Addr = RV.getAggregateAddr();
unsigned Align = ArgInfo.getIndirectAlign();
- const llvm::TargetData *TD = &CGM.getTargetData();
+ const llvm::DataLayout *TD = &CGM.getDataLayout();
if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
(ArgInfo.getIndirectByVal() && TypeAlign < Align &&
llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
@@ -1998,12 +2051,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- // Insert a padding argument to ensure proper alignment.
- if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
- Args.push_back(llvm::UndefValue::get(PaddingType));
- ++IRArgNo;
- }
-
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
ArgInfo.getDirectOffset() == 0) {
@@ -2049,8 +2096,25 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// and the optimizer generally likes scalar values better than FCAs.
if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
- SrcPtr = Builder.CreateBitCast(SrcPtr,
- llvm::PointerType::getUnqual(STy));
+ llvm::Type *SrcTy =
+ cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
+
+ // If the source type is smaller than the destination type of the
+ // coerce-to logic, copy the source value into a temp alloca the size
+ // of the destination type to allow loading all of it. The bits past
+ // the source value are left undef.
+ if (SrcSize < DstSize) {
+ llvm::AllocaInst *TempAlloca
+ = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
+ Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
+ SrcPtr = TempAlloca;
+ } else {
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
+ llvm::PointerType::getUnqual(STy));
+ }
+
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
@@ -2113,10 +2177,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
- llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList);
+ llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(getLLVMContext(),
+ AttributeList);
llvm::BasicBlock *InvokeDest = 0;
- if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
+ if (!Attrs.getFnAttributes().hasAttribute(llvm::Attributes::NoUnwind))
InvokeDest = getInvokeDest();
llvm::CallSite CS;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
index e37fa3a..b2225e4 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
@@ -542,12 +542,6 @@ namespace {
};
}
-static bool hasTrivialCopyOrMoveConstructor(const CXXRecordDecl *Record,
- bool Moving) {
- return Moving ? Record->hasTrivialMoveConstructor() :
- Record->hasTrivialCopyConstructor();
-}
-
static void EmitMemberInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
CXXCtorInitializer *MemberInit,
@@ -588,12 +582,11 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
if (Array && Constructor->isImplicitlyDefined() &&
Constructor->isCopyOrMoveConstructor()) {
QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
- const CXXRecordDecl *Record = BaseElementTy->getAsCXXRecordDecl();
+ CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
if (BaseElementTy.isPODType(CGF.getContext()) ||
- (Record && hasTrivialCopyOrMoveConstructor(Record,
- Constructor->isMoveConstructor()))) {
- // Find the source pointer. We knows it's the last argument because
- // we know we're in a copy constructor.
+ (CE && CE->getConstructor()->isTrivial())) {
+ // Find the source pointer. We know it's the last argument because
+ // we know we're in an implicit copy constructor.
unsigned SrcArgIndex = Args.size() - 1;
llvm::Value *SrcPtr
= CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
@@ -952,8 +945,8 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
}
// -fapple-kext must inline any call to this dtor into
// the caller's body.
- if (getContext().getLangOpts().AppleKext)
- CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
+ if (getLangOpts().AppleKext)
+ CurFn->addFnAttr(llvm::Attributes::AlwaysInline);
break;
}
@@ -1238,7 +1231,7 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CGDebugInfo *DI = getDebugInfo();
if (DI &&
- CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo) {
+ CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::LimitedDebugInfo) {
// If debug info for this class has not been emitted then this is the
// right time to do so.
const CXXRecordDecl *Parent = D->getParent();
@@ -1268,7 +1261,9 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(D, Type), ForVirtualBase);
llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
- EmitCXXMemberCall(D, Callee, ReturnValueSlot(), This, VTT, ArgBeg, ArgEnd);
+ // FIXME: Provide a source location here.
+ EmitCXXMemberCall(D, SourceLocation(), Callee, ReturnValueSlot(), This,
+ VTT, ArgBeg, ArgEnd);
}
void
@@ -1413,14 +1408,16 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type),
ForVirtualBase);
llvm::Value *Callee = 0;
- if (getContext().getLangOpts().AppleKext)
+ if (getLangOpts().AppleKext)
Callee = BuildAppleKextVirtualDestructorCall(DD, Type,
DD->getParent());
if (!Callee)
Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
- EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0);
+ // FIXME: Provide a source location here.
+ EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This,
+ VTT, 0, 0);
}
namespace {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
index fd1c7a3..80fa09b 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -15,6 +15,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "CGBlocks.h"
+#include "CGObjCRuntime.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
@@ -34,7 +35,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace clang;
using namespace clang::CodeGen;
@@ -157,7 +158,7 @@ StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
OS << OID->getName();
} else if (const ObjCCategoryImplDecl *OCD =
dyn_cast<const ObjCCategoryImplDecl>(DC)){
- OS << ((NamedDecl *)OCD)->getIdentifier()->getNameStart() << '(' <<
+ OS << ((const NamedDecl *)OCD)->getIdentifier()->getNameStart() << '(' <<
OCD->getIdentifier()->getNameStart() << ')';
}
OS << ' ' << OMD->getSelector().getAsString() << ']';
@@ -254,9 +255,13 @@ unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
return PLoc.isValid()? PLoc.getLine() : 0;
}
-/// getColumnNumber - Get column number for the location. If location is
-/// invalid then use current location.
+/// getColumnNumber - Get column number for the location.
unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc) {
+ // We may not want column information at all.
+ if (!CGM.getCodeGenOpts().DebugColumnInfo)
+ return 0;
+
+ // If the location is invalid then use the current column.
if (Loc.isInvalid() && CurLoc.isInvalid())
return 0;
SourceManager &SM = CGM.getContext().getSourceManager();
@@ -347,44 +352,60 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
llvm_unreachable("Unexpected builtin type");
case BuiltinType::NullPtr:
return DBuilder.
- createNullPtrType(BT->getName(CGM.getContext().getLangOpts()));
+ createNullPtrType(BT->getName(CGM.getLangOpts()));
case BuiltinType::Void:
return llvm::DIType();
case BuiltinType::ObjCClass:
- return DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- "objc_class", TheCU,
- getOrCreateMainFile(), 0);
+ if (ClassTy.Verify())
+ return ClassTy;
+ ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", TheCU,
+ getOrCreateMainFile(), 0);
+ return ClassTy;
case BuiltinType::ObjCId: {
// typedef struct objc_class *Class;
// typedef struct objc_object {
// Class isa;
// } *id;
- // TODO: Cache these two types to avoid duplicates.
- llvm::DIType OCTy =
- DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- "objc_class", TheCU, getOrCreateMainFile(), 0);
+ if (ObjTy.Verify())
+ return ObjTy;
+
+ if (!ClassTy.Verify())
+ ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", TheCU,
+ getOrCreateMainFile(), 0);
+
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
- llvm::DIType ISATy = DBuilder.createPointerType(OCTy, Size);
+ llvm::DIType ISATy = DBuilder.createPointerType(ClassTy, Size);
+
+ llvm::DIType FwdTy = DBuilder.createStructType(TheCU, "objc_object",
+ getOrCreateMainFile(),
+ 0, 0, 0, 0,
+ llvm::DIArray());
- SmallVector<llvm::Value *, 16> EltTys;
+ llvm::TrackingVH<llvm::MDNode> ObjNode(FwdTy);
+ SmallVector<llvm::Value *, 1> EltTys;
llvm::DIType FieldTy =
- DBuilder.createMemberType(getOrCreateMainFile(), "isa",
+ DBuilder.createMemberType(llvm::DIDescriptor(ObjNode), "isa",
getOrCreateMainFile(), 0, Size,
0, 0, 0, ISATy);
EltTys.push_back(FieldTy);
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
-
- return DBuilder.createStructType(TheCU, "objc_object",
- getOrCreateMainFile(),
- 0, 0, 0, 0, Elements);
+
+ ObjNode->replaceOperandWith(10, Elements);
+ ObjTy = llvm::DIType(ObjNode);
+ return ObjTy;
}
case BuiltinType::ObjCSel: {
- return
+ if (SelTy.Verify())
+ return SelTy;
+ SelTy =
DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
"objc_selector", TheCU, getOrCreateMainFile(),
0);
+ return SelTy;
}
case BuiltinType::UChar:
case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
@@ -417,7 +438,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
case BuiltinType::ULong: BTName = "long unsigned int"; break;
case BuiltinType::ULongLong: BTName = "long long unsigned int"; break;
default:
- BTName = BT->getName(CGM.getContext().getLangOpts());
+ BTName = BT->getName(CGM.getLangOpts());
break;
}
// Bit size, align and offset of the type.
@@ -498,21 +519,17 @@ llvm::DIType CGDebugInfo::createRecordFwdDecl(const RecordDecl *RD,
llvm::DIDescriptor Ctx) {
llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
unsigned Line = getLineNumber(RD->getLocation());
- StringRef RDName = RD->getName();
+ StringRef RDName = getClassName(RD);
- // Get the tag.
- const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
unsigned Tag = 0;
- if (CXXDecl) {
- RDName = getClassName(RD);
- Tag = llvm::dwarf::DW_TAG_class_type;
- }
- else if (RD->isStruct())
+ if (RD->isStruct() || RD->isInterface())
Tag = llvm::dwarf::DW_TAG_structure_type;
else if (RD->isUnion())
Tag = llvm::dwarf::DW_TAG_union_type;
- else
- llvm_unreachable("Unknown RecordDecl type!");
+ else {
+ assert(RD->isClass());
+ Tag = llvm::dwarf::DW_TAG_class_type;
+ }
// Create the type.
return DBuilder.createForwardDecl(Tag, RDName, Ctx, DefUnit, Line);
@@ -550,7 +567,7 @@ llvm::DIDescriptor CGDebugInfo::createContextChain(const Decl *Context) {
/// then emit record's fwd if debug info size reduction is enabled.
llvm::DIType CGDebugInfo::CreatePointeeType(QualType PointeeTy,
llvm::DIFile Unit) {
- if (CGM.getCodeGenOpts().DebugInfo != CodeGenOptions::LimitedDebugInfo)
+ if (CGM.getCodeGenOpts().getDebugInfo() != CodeGenOptions::LimitedDebugInfo)
return getOrCreateType(PointeeTy, Unit);
// Limit debug info for the pointee type.
@@ -777,8 +794,6 @@ CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
for (CXXRecordDecl::capture_const_iterator I = CXXDecl->captures_begin(),
E = CXXDecl->captures_end(); I != E; ++I, ++Field, ++fieldno) {
const LambdaExpr::Capture C = *I;
- // TODO: Need to handle 'this' in some way by probably renaming the
- // this of the lambda class and having a field member of 'this'.
if (C.capturesVariable()) {
VarDecl *V = C.getCapturedVar();
llvm::DIFile VUnit = getOrCreateFile(C.getLocation());
@@ -793,10 +808,24 @@ CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
Field->getAccess(), layout.getFieldOffset(fieldno),
VUnit, RecordTy);
elements.push_back(fieldType);
+ } else {
+ // TODO: Need to handle 'this' in some way by probably renaming the
+ // this of the lambda class and having a field member of 'this' or
+ // by using AT_object_pointer for the function and having that be
+ // used as 'this' for semantic references.
+ assert(C.capturesThis() && "Field that isn't captured and isn't this?");
+ FieldDecl *f = *Field;
+ llvm::DIFile VUnit = getOrCreateFile(f->getLocation());
+ QualType type = f->getType();
+ llvm::DIType fieldType
+ = createFieldType("this", type, 0, f->getLocation(), f->getAccess(),
+ layout.getFieldOffset(fieldNo), VUnit, RecordTy);
+
+ elements.push_back(fieldType);
}
}
} else {
- bool IsMsStruct = record->hasAttr<MsStructAttr>();
+ bool IsMsStruct = record->isMsStruct(CGM.getContext());
const FieldDecl *LastFD = 0;
for (RecordDecl::field_iterator I = record->field_begin(),
E = record->field_end();
@@ -875,12 +904,12 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
// TODO: This and the artificial type below are misleading, the
// types aren't artificial the argument is, but the current
// metadata doesn't represent that.
- ThisPtrType = DBuilder.createArtificialType(ThisPtrType);
+ ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType);
Elts.push_back(ThisPtrType);
} else {
llvm::DIType ThisPtrType = getOrCreateType(ThisPtr, Unit);
TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
- ThisPtrType = DBuilder.createArtificialType(ThisPtrType);
+ ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType);
Elts.push_back(ThisPtrType);
}
}
@@ -995,12 +1024,8 @@ CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
if (D->isImplicit() && !D->isUsed())
continue;
- if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
- // Only emit debug information for user provided functions, we're
- // unlikely to want info for artificial functions.
- if (Method->isUserProvided())
- EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
- }
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
else if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(D))
for (FunctionTemplateDecl::spec_iterator SI = FTD->spec_begin(),
SE = FTD->spec_end(); SI != SE; ++SI)
@@ -1182,7 +1207,7 @@ CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
/// getOrCreateRecordType - Emit record type's standalone debug info.
llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
SourceLocation Loc) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
llvm::DIType T = getOrCreateType(RTy, getOrCreateFile(Loc));
return T;
}
@@ -1191,7 +1216,7 @@ llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
/// debug info.
llvm::DIType CGDebugInfo::getOrCreateInterfaceType(QualType D,
SourceLocation Loc) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
llvm::DIType T = getOrCreateType(D, getOrCreateFile(Loc));
DBuilder.retainType(T);
return T;
@@ -1388,12 +1413,21 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
FieldAlign = CGM.getContext().getTypeAlign(FType);
}
- // We can't know the offset of our ivar in the structure if we're using
- // the non-fragile abi and the debugger should ignore the value anyways.
- // Call it the FieldNo+1 due to how debuggers use the information,
- // e.g. negating the value when it needs a lookup in the dynamic table.
- uint64_t FieldOffset = CGM.getLangOpts().ObjCRuntime.isNonFragile()
- ? FieldNo+1 : RL.getFieldOffset(FieldNo);
+ uint64_t FieldOffset;
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
+ // We don't know the runtime offset of an ivar if we're using the
+ // non-fragile ABI. For bitfields, use the bit offset into the first
+ // byte of storage of the bitfield. For other fields, use zero.
+ if (Field->isBitField()) {
+ FieldOffset = CGM.getObjCRuntime().ComputeBitfieldBitOffset(
+ CGM, ID, Field);
+ FieldOffset %= CGM.getContext().getCharWidth();
+ } else {
+ FieldOffset = 0;
+ }
+ } else {
+ FieldOffset = RL.getFieldOffset(FieldNo);
+ }
unsigned Flags = 0;
if (Field->getAccessControl() == ObjCIvarDecl::Protected)
@@ -1570,9 +1604,29 @@ llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty,
/// CreateEnumType - get enumeration type.
llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
- SmallVector<llvm::Value *, 16> Enumerators;
+ uint64_t Size = 0;
+ uint64_t Align = 0;
+ if (!ED->getTypeForDecl()->isIncompleteType()) {
+ Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
+ Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
+ }
+
+ // If this is just a forward declaration, construct an appropriately
+ // marked node and just return it.
+ if (!ED->getDefinition()) {
+ llvm::DIDescriptor EDContext;
+ EDContext = getContextDescriptor(cast<Decl>(ED->getDeclContext()));
+ llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
+ unsigned Line = getLineNumber(ED->getLocation());
+ StringRef EDName = ED->getName();
+ return DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_enumeration_type,
+ EDName, EDContext, DefUnit, Line, 0,
+ Size, Align);
+ }
// Create DIEnumerator elements for each enumerator.
+ SmallVector<llvm::Value *, 16> Enumerators;
+ ED = ED->getDefinition();
for (EnumDecl::enumerator_iterator
Enum = ED->enumerator_begin(), EnumEnd = ED->enumerator_end();
Enum != EnumEnd; ++Enum) {
@@ -1586,21 +1640,14 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
unsigned Line = getLineNumber(ED->getLocation());
- uint64_t Size = 0;
- uint64_t Align = 0;
- if (!ED->getTypeForDecl()->isIncompleteType()) {
- Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
- Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
- }
llvm::DIDescriptor EnumContext =
getContextDescriptor(cast<Decl>(ED->getDeclContext()));
llvm::DIType ClassTy = ED->isScopedUsingClassTag() ?
getOrCreateType(ED->getIntegerType(), DefUnit) : llvm::DIType();
- unsigned Flags = !ED->isCompleteDefinition() ? llvm::DIDescriptor::FlagFwdDecl : 0;
llvm::DIType DbgTy =
DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, Line,
Size, Align, EltArray,
- ClassTy, Flags);
+ ClassTy);
return DbgTy;
}
@@ -1838,10 +1885,10 @@ llvm::DIType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
// Get overall information about the record type for the debug info.
llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
unsigned Line = getLineNumber(RD->getLocation());
- StringRef RDName = RD->getName();
+ StringRef RDName = getClassName(RD);
llvm::DIDescriptor RDContext;
- if (CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo)
+ if (CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::LimitedDebugInfo)
RDContext = createContextChain(cast<Decl>(RD->getDeclContext()));
else
RDContext = getContextDescriptor(cast<Decl>(RD->getDeclContext()));
@@ -1859,9 +1906,7 @@ llvm::DIType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
if (RD->isUnion())
RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line,
Size, Align, 0, llvm::DIArray());
- else if (CXXDecl) {
- RDName = getClassName(RD);
-
+ else if (RD->isClass()) {
// FIXME: This could be a struct type giving a default visibility different
// than C++ class type, but needs llvm metadata changes first.
RealDecl = DBuilder.createClassType(RDContext, RDName, DefUnit, Line,
@@ -1969,7 +2014,7 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
// getOrCreateFunctionType - Construct DIType. If it is a c++ method, include
// implicit parameter "this".
-llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D,
+llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
QualType FnType,
llvm::DIFile F) {
@@ -1982,9 +2027,11 @@ llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D,
// First element is always return type. For 'void' functions it is NULL.
Elts.push_back(getOrCreateType(OMethod->getResultType(), F));
// "self" pointer is always first argument.
- Elts.push_back(getOrCreateType(OMethod->getSelfDecl()->getType(), F));
- // "cmd" pointer is always second argument.
- Elts.push_back(getOrCreateType(OMethod->getCmdDecl()->getType(), F));
+ llvm::DIType SelfTy = getOrCreateType(OMethod->getSelfDecl()->getType(), F);
+ Elts.push_back(DBuilder.createObjectPointerType(SelfTy));
+ // "_cmd" pointer is always second argument.
+ llvm::DIType CmdTy = getOrCreateType(OMethod->getCmdDecl()->getType(), F);
+ Elts.push_back(DBuilder.createArtificialType(CmdTy));
// Get rest of the arguments.
for (ObjCMethodDecl::param_const_iterator PI = OMethod->param_begin(),
PE = OMethod->param_end(); PI != PE; ++PI)
@@ -2007,14 +2054,22 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
FnBeginRegionCount.push_back(LexicalBlockStack.size());
const Decl *D = GD.getDecl();
+ // Function may lack declaration in source code if it is created by Clang
+ // CodeGen (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
+ bool HasDecl = (D != 0);
// Use the location of the declaration.
- SourceLocation Loc = D->getLocation();
-
+ SourceLocation Loc;
+ if (HasDecl)
+ Loc = D->getLocation();
+
unsigned Flags = 0;
llvm::DIFile Unit = getOrCreateFile(Loc);
llvm::DIDescriptor FDContext(Unit);
llvm::DIArray TParamsArray;
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (!HasDecl) {
+ // Use llvm function name.
+ Name = Fn->getName();
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// If there is a DISubprogram for this function available then use it.
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
FI = SPCache.find(FD->getCanonicalDecl());
@@ -2035,10 +2090,10 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
Flags |= llvm::DIDescriptor::FlagPrototyped;
}
if (LinkageName == Name ||
- CGM.getCodeGenOpts().DebugInfo <= CodeGenOptions::DebugLineTablesOnly)
+ CGM.getCodeGenOpts().getDebugInfo() <= CodeGenOptions::DebugLineTablesOnly)
LinkageName = StringRef();
- if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) {
if (const NamespaceDecl *NSDecl =
dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
FDContext = getOrCreateNameSpace(NSDecl);
@@ -2061,12 +2116,13 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
Name = Name.substr(1);
unsigned LineNo = getLineNumber(Loc);
- if (D->isImplicit())
+ if (!HasDecl || D->isImplicit())
Flags |= llvm::DIDescriptor::FlagArtificial;
llvm::DIType DIFnType;
llvm::DISubprogram SPDecl;
- if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ if (HasDecl &&
+ CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) {
DIFnType = getOrCreateFunctionType(D, FnType, Unit);
SPDecl = getFunctionDeclaration(D);
} else {
@@ -2089,7 +2145,8 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
// Push function on region stack.
llvm::MDNode *SPN = SP;
LexicalBlockStack.push_back(SPN);
- RegionMap[D] = llvm::WeakVH(SP);
+ if (HasDecl)
+ RegionMap[D] = llvm::WeakVH(SP);
}
/// EmitLocation - Emit metadata to indicate a change in line/column
@@ -2242,7 +2299,7 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::Value *Storage,
unsigned ArgNo, CGBuilderTy &Builder) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
@@ -2253,7 +2310,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
else
Ty = getOrCreateType(VD->getType(), Unit);
- // If there is not any debug info for type then do not emit debug info
+ // If there is no debug info for this type then do not emit debug info
// for this variable.
if (!Ty)
return;
@@ -2279,8 +2336,16 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
unsigned Flags = 0;
if (VD->isImplicit())
Flags |= llvm::DIDescriptor::FlagArtificial;
+ // If this is the first argument and it is implicit then
+ // give it an object pointer flag.
+ // FIXME: There has to be a better way to do this, but for static
+ // functions there won't be an implicit param at arg1 and
+ // otherwise it is 'self' or 'this'.
+ if (isa<ImplicitParamDecl>(VD) && ArgNo == 1)
+ Flags |= llvm::DIDescriptor::FlagObjectPointer;
+
llvm::MDNode *Scope = LexicalBlockStack.back();
-
+
StringRef Name = VD->getName();
if (!Name.empty()) {
if (VD->hasAttr<BlocksAttr>()) {
@@ -2376,14 +2441,15 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
llvm::Value *Storage,
CGBuilderTy &Builder) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
EmitDeclare(VD, llvm::dwarf::DW_TAG_auto_variable, Storage, 0, Builder);
}
-void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
- const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder,
- const CGBlockInfo &blockInfo) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(const VarDecl *VD,
+ llvm::Value *Storage,
+ CGBuilderTy &Builder,
+ const CGBlockInfo &blockInfo) {
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
if (Builder.GetInsertBlock() == 0)
@@ -2399,11 +2465,16 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
else
Ty = getOrCreateType(VD->getType(), Unit);
+ // Self is passed along as an implicit non-arg variable in a
+ // block. Mark it as the object pointer.
+ if (isa<ImplicitParamDecl>(VD) && VD->getName() == "self")
+ Ty = DBuilder.createObjectPointerType(Ty);
+
// Get location information.
unsigned Line = getLineNumber(VD->getLocation());
unsigned Column = getColumnNumber(VD->getLocation());
- const llvm::TargetData &target = CGM.getTargetData();
+ const llvm::DataLayout &target = CGM.getDataLayout();
CharUnits offset = CharUnits::fromQuantity(
target.getStructLayout(blockInfo.StructureType)
@@ -2418,7 +2489,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of __forwarding field
offset = CGM.getContext()
- .toCharUnitsFromBits(target.getPointerSizeInBits());
+ .toCharUnitsFromBits(target.getPointerSizeInBits(0));
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
@@ -2444,7 +2515,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
unsigned ArgNo,
CGBuilderTy &Builder) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
EmitDeclare(VD, llvm::dwarf::DW_TAG_arg_variable, AI, ArgNo, Builder);
}
@@ -2461,7 +2532,7 @@ namespace {
void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::Value *addr,
CGBuilderTy &Builder) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
ASTContext &C = CGM.getContext();
const BlockDecl *blockDecl = block.getBlockDecl();
@@ -2475,7 +2546,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
getContextDescriptor(cast<Decl>(blockDecl->getDeclContext()));
const llvm::StructLayout *blockLayout =
- CGM.getTargetData().getStructLayout(block.StructureType);
+ CGM.getDataLayout().getStructLayout(block.StructureType);
SmallVector<llvm::Value*, 16> fields;
fields.push_back(createFieldType("__isa", C.VoidPtrTy, 0, loc, AS_public,
@@ -2606,7 +2677,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
/// EmitGlobalVariable - Emit information about a global variable.
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
const VarDecl *D) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
// Create global variable debug descriptor.
llvm::DIFile Unit = getOrCreateFile(D->getLocation());
unsigned LineNo = getLineNumber(D->getLocation());
@@ -2640,7 +2711,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
/// EmitGlobalVariable - Emit information about an objective-c interface.
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
ObjCInterfaceDecl *ID) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
// Create global variable debug descriptor.
llvm::DIFile Unit = getOrCreateFile(ID->getLocation());
unsigned LineNo = getLineNumber(ID->getLocation());
@@ -2666,7 +2737,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
/// EmitGlobalVariable - Emit global variable's debug info.
void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
llvm::Constant *Init) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
// Create the descriptor for the variable.
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
StringRef Name = VD->getName();
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
index 44cc49a..2e88a73 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
@@ -50,6 +50,9 @@ class CGDebugInfo {
llvm::DICompileUnit TheCU;
SourceLocation CurLoc, PrevLoc;
llvm::DIType VTablePtrType;
+ llvm::DIType ClassTy;
+ llvm::DIType ObjTy;
+ llvm::DIType SelTy;
/// TypeCache - Cache of previously constructed Types.
llvm::DenseMap<void *, llvm::WeakVH> TypeCache;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
index be6638e..8870587 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
@@ -24,7 +24,7 @@
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Intrinsics.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Type.h"
using namespace clang;
using namespace CodeGen;
@@ -121,7 +121,7 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
// uniqued. We can't do this in C, though, because there's no
// standard way to agree on which variables are the same (i.e.
// there's no mangling).
- if (getContext().getLangOpts().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage()))
Linkage = CurFn->getLinkage();
@@ -141,7 +141,7 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
const char *Separator) {
CodeGenModule &CGM = CGF.CGM;
- if (CGF.getContext().getLangOpts().CPlusPlus) {
+ if (CGF.getLangOpts().CPlusPlus) {
StringRef Name = CGM.getMangledName(&D);
return Name.str();
}
@@ -184,12 +184,14 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
Name = GetStaticDeclName(*this, D, Separator);
llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
+ unsigned AddrSpace =
+ CGM.GetGlobalVarAddressSpace(&D, CGM.getContext().getTargetAddressSpace(Ty));
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), LTy,
Ty.isConstant(getContext()), Linkage,
CGM.EmitNullConstant(D.getType()), Name, 0,
llvm::GlobalVariable::NotThreadLocal,
- CGM.getContext().getTargetAddressSpace(Ty));
+ AddrSpace);
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
if (Linkage != llvm::GlobalValue::InternalLinkage)
GV->setVisibility(CurFn->getVisibility());
@@ -220,7 +222,7 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
// If constant emission failed, then this should be a C++ static
// initializer.
if (!Init) {
- if (!getContext().getLangOpts().CPlusPlus)
+ if (!getLangOpts().CPlusPlus)
CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
else if (Builder.GetInsertBlock()) {
// Since we have a static initializer, this global variable can't
@@ -331,7 +333,7 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// Emit global variable debug descriptor for static vars.
CGDebugInfo *DI = getDebugInfo();
if (DI &&
- CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
DI->EmitGlobalVariable(var, &D);
}
@@ -704,9 +706,8 @@ static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
/// stores that would be required.
static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
bool isVolatile, CGBuilderTy &Builder) {
- // Zero doesn't require a store.
- if (Init->isNullValue() || isa<llvm::UndefValue>(Init))
- return;
+ assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
+ "called emitStoresForInitAfterMemset for zero or undef value.");
if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
@@ -719,10 +720,11 @@ static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
dyn_cast<llvm::ConstantDataSequential>(Init)) {
for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
llvm::Constant *Elt = CDS->getElementAsConstant(i);
-
- // Get a pointer to the element and emit it.
- emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
- isVolatile, Builder);
+
+ // If necessary, get a pointer to the element and emit it.
+ if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
+ emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
+ isVolatile, Builder);
}
return;
}
@@ -732,9 +734,11 @@ static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
- // Get a pointer to the element and emit it.
- emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
- isVolatile, Builder);
+
+ // If necessary, get a pointer to the element and emit it.
+ if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
+ emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
+ isVolatile, Builder);
}
}
@@ -791,7 +795,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
llvm::Value *DeclPtr;
if (Ty->isConstantSizeType()) {
if (!Target.useGlobalsForAutomaticVariables()) {
- bool NRVO = getContext().getLangOpts().ElideConstructors &&
+ bool NRVO = getLangOpts().ElideConstructors &&
D.isNRVOVariable();
// If this value is a POD array or struct with a statically
@@ -910,7 +914,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// Emit debug info for local var declaration.
if (HaveInsertPoint())
if (CGDebugInfo *DI = getDebugInfo()) {
- if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ if (CGM.getCodeGenOpts().getDebugInfo()
+ >= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
if (Target.useGlobalsForAutomaticVariables()) {
DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr),
@@ -1056,10 +1061,11 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
// If the initializer is all or mostly zeros, codegen with memset then do
// a few stores afterward.
if (shouldUseMemSetPlusStoresToInitialize(constant,
- CGM.getTargetData().getTypeAllocSize(constant->getType()))) {
+ CGM.getDataLayout().getTypeAllocSize(constant->getType()))) {
Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
alignment.getQuantity(), isVolatile);
- if (!constant->isNullValue()) {
+ // Zero and undef don't require a stores.
+ if (!constant->isNullValue() && !isa<llvm::UndefValue>(constant)) {
Loc = Builder.CreateBitCast(Loc, constant->getType()->getPointerTo());
emitStoresForInitAfterMemset(constant, Loc, isVolatile, Builder);
}
@@ -1493,8 +1499,8 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
LocalDeclMap[&D] = Arg;
if (CGDebugInfo *DI = getDebugInfo()) {
- if (CGM.getCodeGenOpts().DebugInfo >=
- CodeGenOptions::LimitedDebugInfo) {
+ if (CGM.getCodeGenOpts().getDebugInfo()
+ >= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, Builder);
}
@@ -1576,7 +1582,8 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
// Emit debug info for param declaration.
if (CGDebugInfo *DI = getDebugInfo()) {
- if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ if (CGM.getCodeGenOpts().getDebugInfo()
+ >= CodeGenOptions::LimitedDebugInfo) {
DI->EmitDeclareOfArgVariable(&D, DeclPtr, ArgNo, Builder);
}
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
index 492b95a..65be3c1 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -16,6 +16,7 @@
#include "CGCXXABI.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
+#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace CodeGen;
@@ -163,6 +164,9 @@ static llvm::Constant *createAtExitStub(CodeGenModule &CGM,
CodeGenFunction CGF(CGM);
+ // Initialize debug info if needed.
+ CGF.maybeInitializeDebugInfo();
+
CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, fn,
CGM.getTypes().arrangeNullaryFunction(),
FunctionArgList(), SourceLocation());
@@ -218,7 +222,7 @@ CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
llvm::Function *Fn =
llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
- if (!CGM.getContext().getLangOpts().AppleKext) {
+ if (!CGM.getLangOpts().AppleKext) {
// Set the section if needed.
if (const char *Section =
CGM.getContext().getTargetInfo().getStaticInitSectionSpecifier())
@@ -228,8 +232,8 @@ CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
if (!CGM.getLangOpts().Exceptions)
Fn->setDoesNotThrow();
- if (CGM.getLangOpts().AddressSanitizer)
- Fn->addFnAttr(llvm::Attribute::AddressSafety);
+ if (CGM.getLangOpts().SanitizeAddress)
+ Fn->addFnAttr(llvm::Attributes::AddressSafety);
return Fn;
}
@@ -252,8 +256,7 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
OrderGlobalInits Key(order, PrioritizedCXXGlobalInits.size());
PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
DelayedCXXInitPosition.erase(D);
- }
- else {
+ } else {
llvm::DenseMap<const Decl *, unsigned>::iterator I =
DelayedCXXInitPosition.find(D);
if (I == DelayedCXXInitPosition.end()) {
@@ -276,28 +279,50 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
- // Create our global initialization function.
- llvm::Function *Fn =
- CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__I_a");
+ // Create our global initialization function.
if (!PrioritizedCXXGlobalInits.empty()) {
SmallVector<llvm::Constant*, 8> LocalCXXGlobalInits;
llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
- PrioritizedCXXGlobalInits.end());
- for (unsigned i = 0; i < PrioritizedCXXGlobalInits.size(); i++) {
- llvm::Function *Fn = PrioritizedCXXGlobalInits[i].second;
- LocalCXXGlobalInits.push_back(Fn);
- }
- LocalCXXGlobalInits.append(CXXGlobalInits.begin(), CXXGlobalInits.end());
- CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ PrioritizedCXXGlobalInits.end());
+ // Iterate over "chunks" of ctors with same priority and emit each chunk
+ // into separate function. Note - everything is sorted first by priority,
+ // second - by lex order, so we emit ctor functions in proper order.
+ for (SmallVectorImpl<GlobalInitData >::iterator
+ I = PrioritizedCXXGlobalInits.begin(),
+ E = PrioritizedCXXGlobalInits.end(); I != E; ) {
+ SmallVectorImpl<GlobalInitData >::iterator
+ PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
+
+ LocalCXXGlobalInits.clear();
+ unsigned Priority = I->first.priority;
+ // Compute the function suffix from priority. Prepend with zeroes to make
+ // sure the function names are also ordered as priorities.
+ std::string PrioritySuffix = llvm::utostr(Priority);
+ // Priority is always <= 65535 (enforced by sema)..
+ PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix;
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(*this, FTy,
+ "_GLOBAL__I_" + PrioritySuffix);
+
+ for (; I < PrioE; ++I)
+ LocalCXXGlobalInits.push_back(I->second);
+
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
&LocalCXXGlobalInits[0],
LocalCXXGlobalInits.size());
+ AddGlobalCtor(Fn, Priority);
+ }
}
- else
- CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
- &CXXGlobalInits[0],
- CXXGlobalInits.size());
+
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__I_a");
+
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ &CXXGlobalInits[0],
+ CXXGlobalInits.size());
AddGlobalCtor(Fn);
+
CXXGlobalInits.clear();
PrioritizedCXXGlobalInits.clear();
}
@@ -321,8 +346,9 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
llvm::GlobalVariable *Addr,
bool PerformInit) {
- if (CGM.getModuleDebugInfo() && !D->hasAttr<NoDebugAttr>())
- DebugInfo = CGM.getModuleDebugInfo();
+ // Check if we need to emit debug info for variable initializer.
+ if (!D->hasAttr<NoDebugAttr>())
+ maybeInitializeDebugInfo();
StartFunction(GlobalDecl(D), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(),
@@ -344,6 +370,9 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
llvm::Constant **Decls,
unsigned NumDecls) {
+ // Initialize debug info if needed.
+ maybeInitializeDebugInfo();
+
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(),
FunctionArgList(), SourceLocation());
@@ -369,6 +398,9 @@ void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
void CodeGenFunction::GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> >
&DtorsAndObjects) {
+ // Initialize debug info if needed.
+ maybeInitializeDebugInfo();
+
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(),
FunctionArgList(), SourceLocation());
@@ -405,6 +437,9 @@ CodeGenFunction::generateDestroyHelper(llvm::Constant *addr,
llvm::Function *fn =
CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
+ // Initialize debug info if needed.
+ maybeInitializeDebugInfo();
+
StartFunction(GlobalDecl(), getContext().VoidTy, fn, FI, args,
SourceLocation());
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
index ba9c296..86dee5a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
@@ -307,14 +307,15 @@ static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
/// aggressive about only using the ObjC++ personality in a function
/// when it really needs it.
void CodeGenModule::SimplifyPersonality() {
- // For now, this is really a Darwin-specific operation.
- if (!Context.getTargetInfo().getTriple().isOSDarwin())
- return;
-
// If we're not in ObjC++ -fexceptions, there's nothing to do.
if (!LangOpts.CPlusPlus || !LangOpts.ObjC1 || !LangOpts.Exceptions)
return;
+ // Both the problem this endeavors to fix and the way the logic
+ // above works is specific to the NeXT runtime.
+ if (!LangOpts.ObjCRuntime.isNeXTFamily())
+ return;
+
const EHPersonality &ObjCXX = EHPersonality::get(LangOpts);
const EHPersonality &CXX = getCXXPersonality(LangOpts);
if (&ObjCXX == &CXX)
@@ -534,7 +535,7 @@ static void emitFilterDispatchBlock(CodeGenFunction &CGF,
llvm::Value *zero = CGF.Builder.getInt32(0);
llvm::Value *failsFilter =
CGF.Builder.CreateICmpSLT(selector, zero, "ehspec.fails");
- CGF.Builder.CreateCondBr(failsFilter, unexpectedBB, CGF.getEHResumeBlock());
+ CGF.Builder.CreateCondBr(failsFilter, unexpectedBB, CGF.getEHResumeBlock(false));
CGF.EmitBlock(unexpectedBB);
}
@@ -614,7 +615,7 @@ CodeGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) {
// The dispatch block for the end of the scope chain is a block that
// just resumes unwinding.
if (si == EHStack.stable_end())
- return getEHResumeBlock();
+ return getEHResumeBlock(true);
// Otherwise, we should look at the actual scope.
EHScope &scope = *EHStack.find(si);
@@ -1546,7 +1547,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
return TerminateHandler;
}
-llvm::BasicBlock *CodeGenFunction::getEHResumeBlock() {
+llvm::BasicBlock *CodeGenFunction::getEHResumeBlock(bool isCleanup) {
if (EHResumeBlock) return EHResumeBlock;
CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
@@ -1560,7 +1561,7 @@ llvm::BasicBlock *CodeGenFunction::getEHResumeBlock() {
// This can always be a call because we necessarily didn't find
// anything on the EH stack which needs our help.
const char *RethrowName = Personality.CatchallRethrowFn;
- if (RethrowName != 0) {
+ if (RethrowName != 0 && !isCleanup) {
Builder.CreateCall(getCatchallRethrowFn(*this, RethrowName),
getExceptionFromSlot())
->setDoesNotReturn();
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
index 1fe4c18..63cc5b5 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
@@ -26,7 +26,8 @@
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
#include "llvm/MDBuilder.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
+#include "llvm/ADT/Hashing.h"
using namespace clang;
using namespace CodeGen;
@@ -156,50 +157,6 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
}
}
-namespace {
-/// \brief An adjustment to be made to the temporary created when emitting a
-/// reference binding, which accesses a particular subobject of that temporary.
- struct SubobjectAdjustment {
- enum {
- DerivedToBaseAdjustment,
- FieldAdjustment,
- MemberPointerAdjustment
- } Kind;
-
- union {
- struct {
- const CastExpr *BasePath;
- const CXXRecordDecl *DerivedClass;
- } DerivedToBase;
-
- FieldDecl *Field;
-
- struct {
- const MemberPointerType *MPT;
- llvm::Value *Ptr;
- } Ptr;
- };
-
- SubobjectAdjustment(const CastExpr *BasePath,
- const CXXRecordDecl *DerivedClass)
- : Kind(DerivedToBaseAdjustment) {
- DerivedToBase.BasePath = BasePath;
- DerivedToBase.DerivedClass = DerivedClass;
- }
-
- SubobjectAdjustment(FieldDecl *Field)
- : Kind(FieldAdjustment) {
- this->Field = Field;
- }
-
- SubobjectAdjustment(const MemberPointerType *MPT, llvm::Value *Ptr)
- : Kind(MemberPointerAdjustment) {
- this->Ptr.MPT = MPT;
- this->Ptr.Ptr = Ptr;
- }
- };
-}
-
static llvm::Value *
CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
const NamedDecl *InitializedDecl) {
@@ -232,32 +189,18 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
const CXXDestructorDecl *&ReferenceTemporaryDtor,
QualType &ObjCARCReferenceLifetimeType,
const NamedDecl *InitializedDecl) {
- // Look through single-element init lists that claim to be lvalues. They're
- // just syntactic wrappers in this case.
- if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
- if (ILE->getNumInits() == 1 && ILE->isGLValue())
- E = ILE->getInit(0);
- }
-
- // Look through expressions for materialized temporaries (for now).
- if (const MaterializeTemporaryExpr *M
- = dyn_cast<MaterializeTemporaryExpr>(E)) {
- // Objective-C++ ARC:
- // If we are binding a reference to a temporary that has ownership, we
- // need to perform retain/release operations on the temporary.
- if (CGF.getContext().getLangOpts().ObjCAutoRefCount &&
- E->getType()->isObjCLifetimeType() &&
- (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
- E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
- E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
- ObjCARCReferenceLifetimeType = E->getType();
-
- E = M->GetTemporaryExpr();
- }
+ const MaterializeTemporaryExpr *M = NULL;
+ E = E->findMaterializedTemporary(M);
+ // Objective-C++ ARC:
+ // If we are binding a reference to a temporary that has ownership, we
+ // need to perform retain/release operations on the temporary.
+ if (M && CGF.getLangOpts().ObjCAutoRefCount &&
+ M->getType()->isObjCLifetimeType() &&
+ (M->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
+ M->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
+ M->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
+ ObjCARCReferenceLifetimeType = M->getType();
- if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
- E = DAE->getExpr();
-
if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
CGF.enterFullExpression(EWC);
CodeGenFunction::RunCleanupsScope Scope(CGF);
@@ -335,54 +278,13 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
return ReferenceTemporary;
}
-
- SmallVector<SubobjectAdjustment, 2> Adjustments;
- while (true) {
- E = E->IgnoreParens();
-
- if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
- if ((CE->getCastKind() == CK_DerivedToBase ||
- CE->getCastKind() == CK_UncheckedDerivedToBase) &&
- E->getType()->isRecordType()) {
- E = CE->getSubExpr();
- CXXRecordDecl *Derived
- = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
- Adjustments.push_back(SubobjectAdjustment(CE, Derived));
- continue;
- }
-
- if (CE->getCastKind() == CK_NoOp) {
- E = CE->getSubExpr();
- continue;
- }
- } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
- if (!ME->isArrow() && ME->getBase()->isRValue()) {
- assert(ME->getBase()->getType()->isRecordType());
- if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
- E = ME->getBase();
- Adjustments.push_back(SubobjectAdjustment(Field));
- continue;
- }
- }
- } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
- if (BO->isPtrMemOp()) {
- assert(BO->getLHS()->isRValue());
- E = BO->getLHS();
- const MemberPointerType *MPT =
- BO->getRHS()->getType()->getAs<MemberPointerType>();
- llvm::Value *Ptr = CGF.EmitScalarExpr(BO->getRHS());
- Adjustments.push_back(SubobjectAdjustment(MPT, Ptr));
- }
- }
- if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
- if (opaque->getType()->isRecordType())
- return CGF.EmitOpaqueValueLValue(opaque).getAddress();
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ E = E->skipRValueSubobjectAdjustments(Adjustments);
+ if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
+ if (opaque->getType()->isRecordType())
+ return CGF.EmitOpaqueValueLValue(opaque).getAddress();
- // Nothing changed.
- break;
- }
-
// Create a reference temporary if necessary.
AggValueSlot AggSlot = AggValueSlot::ignored();
if (CGF.hasAggregateLLVMType(E->getType()) &&
@@ -446,8 +348,9 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
}
case SubobjectAdjustment::MemberPointerAdjustment: {
+ llvm::Value *Ptr = CGF.EmitScalarExpr(Adjustment.Ptr.RHS);
Object = CGF.CGM.getCXXABI().EmitMemberDataPointerAddress(
- CGF, Object, Adjustment.Ptr.Ptr, Adjustment.Ptr.MPT);
+ CGF, Object, Ptr, Adjustment.Ptr.MPT);
break;
}
}
@@ -486,6 +389,15 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
ReferenceTemporaryDtor,
ObjCARCReferenceLifetimeType,
InitializedDecl);
+ if (SanitizePerformTypeCheck && !E->getType()->isFunctionType()) {
+ // C++11 [dcl.ref]p5 (as amended by core issue 453):
+ // If a glvalue to which a reference is directly bound designates neither
+ // an existing object or function of an appropriate type nor a region of
+ // storage of suitable size and alignment to contain an object of the
+ // reference's type, the behavior is undefined.
+ QualType Ty = E->getType();
+ EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
+ }
if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
return RValue::get(Value);
@@ -549,22 +461,133 @@ unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
->getZExtValue();
}
-void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
- if (!CatchUndefined)
+/// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
+static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
+ llvm::Value *High) {
+ llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
+ llvm::Value *K47 = Builder.getInt64(47);
+ llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
+ llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
+ llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
+ llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
+ return Builder.CreateMul(B1, KMul);
+}
+
+void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
+ llvm::Value *Address,
+ QualType Ty, CharUnits Alignment) {
+ if (!SanitizePerformTypeCheck)
return;
- // This needs to be to the standard address space.
- Address = Builder.CreateBitCast(Address, Int8PtrTy);
+ // Don't check pointers outside the default address space. The null check
+ // isn't correct, the object-size check isn't supported by LLVM, and we can't
+ // communicate the addresses to the runtime handler for the vptr check.
+ if (Address->getType()->getPointerAddressSpace())
+ return;
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
+ llvm::Value *Cond = 0;
- llvm::Value *Min = Builder.getFalse();
- llvm::Value *C = Builder.CreateCall2(F, Address, Min);
- llvm::BasicBlock *Cont = createBasicBlock();
- Builder.CreateCondBr(Builder.CreateICmpUGE(C,
- llvm::ConstantInt::get(IntPtrTy, Size)),
- Cont, getTrapBB());
- EmitBlock(Cont);
+ if (getLangOpts().SanitizeNull) {
+ // The glvalue must not be an empty glvalue.
+ Cond = Builder.CreateICmpNE(
+ Address, llvm::Constant::getNullValue(Address->getType()));
+ }
+
+ if (getLangOpts().SanitizeObjectSize && !Ty->isIncompleteType()) {
+ uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity();
+
+ // The glvalue must refer to a large enough storage region.
+ // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
+ // to check this.
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
+ llvm::Value *Min = Builder.getFalse();
+ llvm::Value *CastAddr = Builder.CreateBitCast(Address, Int8PtrTy);
+ llvm::Value *LargeEnough =
+ Builder.CreateICmpUGE(Builder.CreateCall2(F, CastAddr, Min),
+ llvm::ConstantInt::get(IntPtrTy, Size));
+ Cond = Cond ? Builder.CreateAnd(Cond, LargeEnough) : LargeEnough;
+ }
+
+ uint64_t AlignVal = 0;
+
+ if (getLangOpts().SanitizeAlignment) {
+ AlignVal = Alignment.getQuantity();
+ if (!Ty->isIncompleteType() && !AlignVal)
+ AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
+
+ // The glvalue must be suitably aligned.
+ if (AlignVal) {
+ llvm::Value *Align =
+ Builder.CreateAnd(Builder.CreatePtrToInt(Address, IntPtrTy),
+ llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
+ llvm::Value *Aligned =
+ Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
+ Cond = Cond ? Builder.CreateAnd(Cond, Aligned) : Aligned;
+ }
+ }
+
+ if (Cond) {
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(Loc),
+ EmitCheckTypeDescriptor(Ty),
+ llvm::ConstantInt::get(SizeTy, AlignVal),
+ llvm::ConstantInt::get(Int8Ty, TCK)
+ };
+ EmitCheck(Cond, "type_mismatch", StaticData, Address);
+ }
+
+ // If possible, check that the vptr indicates that there is a subobject of
+ // type Ty at offset zero within this object.
+ CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ if (getLangOpts().SanitizeVptr && TCK != TCK_ConstructorCall &&
+ RD && RD->hasDefinition() && RD->isDynamicClass()) {
+ // Compute a hash of the mangled name of the type.
+ //
+ // FIXME: This is not guaranteed to be deterministic! Move to a
+ // fingerprinting mechanism once LLVM provides one. For the time
+ // being the implementation happens to be deterministic.
+ llvm::SmallString<64> MangledName;
+ llvm::raw_svector_ostream Out(MangledName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
+ Out);
+ llvm::hash_code TypeHash = hash_value(Out.str());
+
+ // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
+ llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
+ llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
+ llvm::Value *VPtrAddr = Builder.CreateBitCast(Address, VPtrTy);
+ llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
+ llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
+
+ llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
+ Hash = Builder.CreateTrunc(Hash, IntPtrTy);
+
+ // Look the hash up in our cache.
+ const int CacheSize = 128;
+ llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
+ llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
+ "__ubsan_vptr_type_cache");
+ llvm::Value *Slot = Builder.CreateAnd(Hash,
+ llvm::ConstantInt::get(IntPtrTy,
+ CacheSize-1));
+ llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
+ llvm::Value *CacheVal =
+ Builder.CreateLoad(Builder.CreateInBoundsGEP(Cache, Indices));
+
+ // If the hash isn't in the cache, call a runtime handler to perform the
+ // hard work of checking whether the vptr is for an object of the right
+ // type. This will either fill in the cache and return, or produce a
+ // diagnostic.
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(Loc),
+ EmitCheckTypeDescriptor(Ty),
+ CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
+ llvm::ConstantInt::get(Int8Ty, TCK)
+ };
+ llvm::Value *DynamicData[] = { Address, Hash };
+ EmitCheck(Builder.CreateICmpEQ(CacheVal, Hash),
+ "dynamic_type_cache_miss", StaticData, DynamicData, true);
+ }
}
@@ -641,11 +664,11 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
}
-LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
+LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
LValue LV = EmitLValue(E);
if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
- EmitCheck(LV.getAddress(),
- getContext().getTypeSizeInChars(E->getType()).getQuantity());
+ EmitTypeCheck(TCK, E->getExprLoc(), LV.getAddress(),
+ E->getType(), LV.getAlignment());
return LV;
}
@@ -672,7 +695,7 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
llvm_unreachable("cannot emit a property reference directly");
case Expr::ObjCSelectorExprClass:
- return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
+ return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
case Expr::ObjCIsaExprClass:
return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
case Expr::BinaryOperatorClass:
@@ -709,6 +732,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
case Expr::CXXBindTemporaryExprClass:
return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
+ case Expr::CXXUuidofExprClass:
+ return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
case Expr::LambdaExprClass:
return EmitLambdaLValue(cast<LambdaExpr>(E));
@@ -1124,7 +1149,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
// Get the output type.
llvm::Type *ResLTy = ConvertType(LV.getType());
- unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
+ unsigned ResSizeInBits = CGM.getDataLayout().getTypeSizeInBits(ResLTy);
// Compute the result as an OR of all of the individual component accesses.
llvm::Value *Res = 0;
@@ -1322,7 +1347,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Get the output type.
llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
- unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
+ unsigned ResSizeInBits = CGM.getDataLayout().getTypeSizeInBits(ResLTy);
// Get the source value, truncated to the width of the bit-field.
llvm::Value *SrcVal = Src.getScalarVal();
@@ -1645,6 +1670,21 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
CharUnits Alignment = getContext().getDeclAlign(ND);
QualType T = E->getType();
+ // A DeclRefExpr for a reference initialized by a constant expression can
+ // appear without being odr-used. Directly emit the constant initializer.
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ const Expr *Init = VD->getAnyInitializer(VD);
+ if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() &&
+ VD->isUsableInConstantExpressions(getContext()) &&
+ VD->checkInitIsICE()) {
+ llvm::Constant *Val =
+ CGM.EmitConstantValue(*VD->evaluateValue(), VD->getType(), this);
+ assert(Val && "failed to emit reference constant expression");
+ // FIXME: Eventually we will want to emit vector element references.
+ return MakeAddrLValue(Val, T, Alignment);
+ }
+ }
+
// FIXME: We should be able to assert this for FunctionDecls as well!
// FIXME: We should be able to assert this for all DeclRefExprs, not just
// those with a valid source location.
@@ -1655,7 +1695,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (ND->hasAttr<WeakRefAttr>()) {
const ValueDecl *VD = cast<ValueDecl>(ND);
llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
- return MakeAddrLValue(Aliasee, E->getType(), Alignment);
+ return MakeAddrLValue(Aliasee, T, Alignment);
}
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
@@ -1683,9 +1723,8 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
}
assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
- CharUnits alignment = getContext().getDeclAlign(VD);
return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
- E->getType(), alignment);
+ T, Alignment);
}
assert(V && "DeclRefExpr not entered in LocalDeclMap?");
@@ -1736,8 +1775,8 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
// of a pointer to object; as in void foo (__weak id *param); *param = 0;
// But, we continue to generate __strong write barrier on indirect write
// into a pointer to object.
- if (getContext().getLangOpts().ObjC1 &&
- getContext().getLangOpts().getGC() != LangOptions::NonGC &&
+ if (getLangOpts().ObjC1 &&
+ getLangOpts().getGC() != LangOptions::NonGC &&
LV.isObjCWeak())
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
return LV;
@@ -1815,8 +1854,9 @@ GetAddrOfConstantWideString(StringRef Str,
static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
SmallString<32>& Target) {
Target.resize(CharByteWidth * (Source.size() + 1));
- char* ResultPtr = &Target[0];
- bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr);
+ char *ResultPtr = &Target[0];
+ const UTF8 *ErrorPtr;
+ bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr);
(void)success;
assert(success);
Target.resize(ResultPtr - &Target[0]);
@@ -1888,33 +1928,167 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
}
}
-llvm::BasicBlock *CodeGenFunction::getTrapBB() {
- const CodeGenOptions &GCO = CGM.getCodeGenOpts();
+/// Emit a type description suitable for use by a runtime sanitizer library. The
+/// format of a type descriptor is
+///
+/// \code
+/// { i16 TypeKind, i16 TypeInfo }
+/// \endcode
+///
+/// followed by an array of i8 containing the type name. TypeKind is 0 for an
+/// integer, 1 for a floating point value, and -1 for anything else.
+llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
+ // FIXME: Only emit each type's descriptor once.
+ uint16_t TypeKind = -1;
+ uint16_t TypeInfo = 0;
+
+ if (T->isIntegerType()) {
+ TypeKind = 0;
+ TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
+ T->isSignedIntegerType();
+ } else if (T->isFloatingType()) {
+ TypeKind = 1;
+ TypeInfo = getContext().getTypeSize(T);
+ }
+
+ // Format the type name as if for a diagnostic, including quotes and
+ // optionally an 'aka'.
+ llvm::SmallString<32> Buffer;
+ CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
+ (intptr_t)T.getAsOpaquePtr(),
+ 0, 0, 0, 0, 0, 0, Buffer,
+ ArrayRef<intptr_t>());
+
+ llvm::Constant *Components[] = {
+ Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
+ llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
+ };
+ llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Descriptor->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalVariable::PrivateLinkage,
+ Descriptor);
+ GV->setUnnamedAddr(true);
+ return GV;
+}
- // If we are not optimzing, don't collapse all calls to trap in the function
- // to the same call, that way, in the debugger they can see which operation
- // did in fact fail. If we are optimizing, we collapse all calls to trap down
- // to just one per function to save on codesize.
- if (GCO.OptimizationLevel && TrapBB)
- return TrapBB;
+llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
+ llvm::Type *TargetTy = IntPtrTy;
+
+ // Integers which fit in intptr_t are zero-extended and passed directly.
+ if (V->getType()->isIntegerTy() &&
+ V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
+ return Builder.CreateZExt(V, TargetTy);
+
+ // Pointers are passed directly, everything else is passed by address.
+ if (!V->getType()->isPointerTy()) {
+ llvm::Value *Ptr = Builder.CreateAlloca(V->getType());
+ Builder.CreateStore(V, Ptr);
+ V = Ptr;
+ }
+ return Builder.CreatePtrToInt(V, TargetTy);
+}
+
+/// \brief Emit a representation of a SourceLocation for passing to a handler
+/// in a sanitizer runtime library. The format for this data is:
+/// \code
+/// struct SourceLocation {
+/// const char *Filename;
+/// int32_t Line, Column;
+/// };
+/// \endcode
+/// For an invalid SourceLocation, the Filename pointer is null.
+llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
+ PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
+
+ llvm::Constant *Data[] = {
+ // FIXME: Only emit each file name once.
+ PLoc.isValid() ? cast<llvm::Constant>(
+ Builder.CreateGlobalStringPtr(PLoc.getFilename()))
+ : llvm::Constant::getNullValue(Int8PtrTy),
+ Builder.getInt32(PLoc.getLine()),
+ Builder.getInt32(PLoc.getColumn())
+ };
- llvm::BasicBlock *Cont = 0;
- if (HaveInsertPoint()) {
- Cont = createBasicBlock("cont");
- EmitBranch(Cont);
+ return llvm::ConstantStruct::getAnon(Data);
+}
+
+void CodeGenFunction::EmitCheck(llvm::Value *Checked, StringRef CheckName,
+ llvm::ArrayRef<llvm::Constant *> StaticArgs,
+ llvm::ArrayRef<llvm::Value *> DynamicArgs,
+ bool Recoverable) {
+ llvm::BasicBlock *Cont = createBasicBlock("cont");
+
+ llvm::BasicBlock *Handler = createBasicBlock("handler." + CheckName);
+ Builder.CreateCondBr(Checked, Cont, Handler);
+ EmitBlock(Handler);
+
+ llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
+ llvm::GlobalValue *InfoPtr =
+ new llvm::GlobalVariable(CGM.getModule(), Info->getType(), true,
+ llvm::GlobalVariable::PrivateLinkage, Info);
+ InfoPtr->setUnnamedAddr(true);
+
+ llvm::SmallVector<llvm::Value *, 4> Args;
+ llvm::SmallVector<llvm::Type *, 4> ArgTypes;
+ Args.reserve(DynamicArgs.size() + 1);
+ ArgTypes.reserve(DynamicArgs.size() + 1);
+
+ // Handler functions take an i8* pointing to the (handler-specific) static
+ // information block, followed by a sequence of intptr_t arguments
+ // representing operand values.
+ Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy));
+ ArgTypes.push_back(Int8PtrTy);
+ for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
+ Args.push_back(EmitCheckValue(DynamicArgs[i]));
+ ArgTypes.push_back(IntPtrTy);
+ }
+
+ llvm::FunctionType *FnType =
+ llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
+ llvm::AttrBuilder B;
+ if (!Recoverable) {
+ B.addAttribute(llvm::Attributes::NoReturn)
+ .addAttribute(llvm::Attributes::NoUnwind);
+ }
+ B.addAttribute(llvm::Attributes::UWTable);
+ llvm::Value *Fn = CGM.CreateRuntimeFunction(FnType,
+ ("__ubsan_handle_" + CheckName).str(),
+ llvm::Attributes::get(getLLVMContext(),
+ B));
+ llvm::CallInst *HandlerCall = Builder.CreateCall(Fn, Args);
+ if (Recoverable) {
+ Builder.CreateBr(Cont);
+ } else {
+ HandlerCall->setDoesNotReturn();
+ HandlerCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
}
- TrapBB = createBasicBlock("trap");
- EmitBlock(TrapBB);
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
- llvm::CallInst *TrapCall = Builder.CreateCall(F);
- TrapCall->setDoesNotReturn();
- TrapCall->setDoesNotThrow();
- Builder.CreateUnreachable();
+ EmitBlock(Cont);
+}
- if (Cont)
- EmitBlock(Cont);
- return TrapBB;
+void CodeGenFunction::EmitTrapvCheck(llvm::Value *Checked) {
+ llvm::BasicBlock *Cont = createBasicBlock("cont");
+
+ // If we're optimizing, collapse all calls to trap down to just one per
+ // function to save on code size.
+ if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) {
+ TrapBB = createBasicBlock("trap");
+ Builder.CreateCondBr(Checked, Cont, TrapBB);
+ EmitBlock(TrapBB);
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
+ llvm::CallInst *TrapCall = Builder.CreateCall(F);
+ TrapCall->setDoesNotReturn();
+ TrapCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+ } else {
+ Builder.CreateCondBr(Checked, Cont, TrapBB);
+ }
+
+ EmitBlock(Cont);
}
/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
@@ -2007,14 +2181,14 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// Propagate the alignment from the array itself to the result.
ArrayAlignment = ArrayLV.getAlignment();
- if (getContext().getLangOpts().isSignedOverflowDefined())
+ if (getLangOpts().isSignedOverflowDefined())
Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
else
Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
} else {
// The base must be a pointer, which is not an aggregate. Emit it.
llvm::Value *Base = EmitScalarExpr(E->getBase());
- if (getContext().getLangOpts().isSignedOverflowDefined())
+ if (getLangOpts().isSignedOverflowDefined())
Address = Builder.CreateGEP(Base, Idx, "arrayidx");
else
Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
@@ -2037,8 +2211,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
- if (getContext().getLangOpts().ObjC1 &&
- getContext().getLangOpts().getGC() != LangOptions::NonGC) {
+ if (getLangOpts().ObjC1 &&
+ getLangOpts().getGC() != LangOptions::NonGC) {
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
setObjCGCLValueClass(getContext(), E, LV);
}
@@ -2114,11 +2288,13 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
// If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
LValue BaseLV;
- if (E->isArrow())
- BaseLV = MakeNaturalAlignAddrLValue(EmitScalarExpr(BaseExpr),
- BaseExpr->getType()->getPointeeType());
- else
- BaseLV = EmitLValue(BaseExpr);
+ if (E->isArrow()) {
+ llvm::Value *Ptr = EmitScalarExpr(BaseExpr);
+ QualType PtrTy = BaseExpr->getType()->getPointeeType();
+ EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Ptr, PtrTy);
+ BaseLV = MakeNaturalAlignAddrLValue(Ptr, PtrTy);
+ } else
+ BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
NamedDecl *ND = E->getMemberDecl();
if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
@@ -2355,7 +2531,10 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_Dependent:
llvm_unreachable("dependent cast kind in IR gen!");
-
+
+ case CK_BuiltinFnToFnPtr:
+ llvm_unreachable("builtin functions are handled elsewhere");
+
// These two casts are currently treated as no-ops, although they could
// potentially be real operations depending on the target's ABI.
case CK_NonAtomicToAtomic:
@@ -2546,7 +2725,7 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
if (const CXXPseudoDestructorExpr *PseudoDtor
= dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
QualType DestroyedType = PseudoDtor->getDestroyedType();
- if (getContext().getLangOpts().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
DestroyedType->isObjCLifetimeType() &&
(DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
@@ -2635,7 +2814,7 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
}
RValue RV = EmitAnyExpr(E->getRHS());
- LValue LV = EmitLValue(E->getLHS());
+ LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
EmitStoreThroughLValue(RV, LV);
return LV;
}
@@ -2677,6 +2856,14 @@ CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
}
+llvm::Value *CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
+ return CGM.GetAddrOfUuidDescriptor(E);
+}
+
+LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
+ return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType());
+}
+
LValue
CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
@@ -2977,11 +3164,10 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
uint64_t Size = sizeChars.getQuantity();
CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
unsigned Align = alignChars.getQuantity();
- unsigned MaxInlineWidth =
- getContext().getTargetInfo().getMaxAtomicInlineWidth();
- bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
-
-
+ unsigned MaxInlineWidthInBits =
+ getContext().getTargetInfo().getMaxAtomicInlineWidth();
+ bool UseLibcall = (Size != Align ||
+ getContext().toBits(sizeChars) > MaxInlineWidthInBits);
llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
Ptr = EmitScalarExpr(E->getPtr());
@@ -3177,6 +3363,13 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
return ConvertTempToRValue(*this, E->getType(), Dest);
}
+ bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store_n;
+ bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load_n;
+
llvm::Type *IPtrTy =
llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
llvm::Value *OrigDest = Dest;
@@ -3194,14 +3387,20 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
break;
case 1: // memory_order_consume
case 2: // memory_order_acquire
+ if (IsStore)
+ break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Acquire);
break;
case 3: // memory_order_release
+ if (IsLoad)
+ break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Release);
break;
case 4: // memory_order_acq_rel
+ if (IsLoad || IsStore)
+ break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::AcquireRelease);
break;
@@ -3221,13 +3420,6 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// Long case, when Order isn't obviously constant.
- bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
- E->getOp() == AtomicExpr::AO__atomic_store ||
- E->getOp() == AtomicExpr::AO__atomic_store_n;
- bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
- E->getOp() == AtomicExpr::AO__atomic_load ||
- E->getOp() == AtomicExpr::AO__atomic_load_n;
-
// Create all the relevant BB's
llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
*AcqRelBB = 0, *SeqCstBB = 0;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
index 61f7362..718e8f9 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
@@ -549,8 +549,10 @@ AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
switch (E->getCastKind()) {
case CK_Dynamic: {
+ // FIXME: Can this actually happen? We have no test coverage for it.
assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
- LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
+ LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
+ CodeGenFunction::TCK_Load);
// FIXME: Do we also need to handle property references here?
if (LV.isSimple())
CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
@@ -645,6 +647,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
+ case CK_BuiltinFnToFnPtr:
llvm_unreachable("cast kind invalid for aggregate types");
}
}
@@ -771,7 +774,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
Visit(E->getRHS());
// Now emit the LHS and copy into it.
- LValue LHS = CGF.EmitLValue(E->getLHS());
+ LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
EmitCopy(E->getLHS()->getType(),
AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
@@ -1205,7 +1208,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
// C++ objects with a user-declared constructor don't need zero'ing.
- if (CGF.getContext().getLangOpts().CPlusPlus)
+ if (CGF.getLangOpts().CPlusPlus)
if (const RecordType *RT = CGF.getContext()
.getBaseElementType(E->getType())->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
@@ -1271,10 +1274,11 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
llvm::Value *SrcPtr, QualType Ty,
bool isVolatile,
- CharUnits alignment) {
+ CharUnits alignment,
+ bool isAssignment) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
- if (getContext().getLangOpts().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
assert((Record->hasTrivialCopyConstructor() ||
@@ -1300,9 +1304,13 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
// implementation handles this case safely. If there is a libc that does not
// safely handle this, we can add a target hook.
- // Get size and alignment info for this aggregate.
- std::pair<CharUnits, CharUnits> TypeInfo =
- getContext().getTypeInfoInChars(Ty);
+ // Get data size and alignment info for this aggregate. If this is an
+ // assignment don't copy the tail padding. Otherwise copying it is fine.
+ std::pair<CharUnits, CharUnits> TypeInfo;
+ if (isAssignment)
+ TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
+ else
+ TypeInfo = getContext().getTypeInfoInChars(Ty);
if (alignment.isZero())
alignment = TypeInfo.second;
@@ -1359,11 +1367,17 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
}
}
}
+
+ // Determine the metadata to describe the position of any padding in this
+ // memcpy, as well as the TBAA tags for the members of the struct, in case
+ // the optimizer wishes to expand it in to scalar memory operations.
+ llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty);
Builder.CreateMemCpy(DestPtr, SrcPtr,
llvm::ConstantInt::get(IntPtrTy,
TypeInfo.first.getQuantity()),
- alignment.getQuantity(), isVolatile);
+ alignment.getQuantity(), isVolatile,
+ /*TBAATag=*/0, TBAAStructTag);
}
void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc,
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
index 31ea1b5..7f640f6 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
@@ -24,6 +24,7 @@ using namespace clang;
using namespace CodeGen;
RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
+ SourceLocation CallLoc,
llvm::Value *Callee,
ReturnValueSlot ReturnValue,
llvm::Value *This,
@@ -33,6 +34,13 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
assert(MD->isInstance() &&
"Trying to emit a member call expr on a static method!");
+ // C++11 [class.mfct.non-static]p2:
+ // If a non-static member function of a class X is called for an object that
+ // is not of type X, or of a type derived from X, the behavior is undefined.
+ EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
+ : TCK_MemberCall,
+ CallLoc, This, getContext().getRecordType(MD->getParent()));
+
CallArgList Args;
// Push the this ptr.
@@ -168,8 +176,9 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
CGDebugInfo *DI = getDebugInfo();
- if (DI && CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo
- && !isa<CallExpr>(ME->getBase())) {
+ if (DI &&
+ CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::LimitedDebugInfo &&
+ !isa<CallExpr>(ME->getBase())) {
QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
DI->getOrCreateRecordType(PTy->getPointeeType(),
@@ -235,7 +244,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
// We don't like to generate the trivial copy/move assignment operator
// when it isn't necessary; just produce the proper effect here.
llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
- EmitAggregateCopy(This, RHS, CE->getType());
+ EmitAggregateAssign(This, RHS, CE->getType());
return RValue::get(This);
}
@@ -251,16 +260,16 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
}
// Compute the function type we're calling.
+ const CXXMethodDecl *CalleeDecl = DevirtualizedMethod ? DevirtualizedMethod : MD;
const CGFunctionInfo *FInfo = 0;
- if (isa<CXXDestructorDecl>(MD))
- FInfo = &CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
+ if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
+ FInfo = &CGM.getTypes().arrangeCXXDestructor(Dtor,
Dtor_Complete);
- else if (isa<CXXConstructorDecl>(MD))
- FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(
- cast<CXXConstructorDecl>(MD),
- Ctor_Complete);
+ else if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
+ FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor,
+ Ctor_Complete);
else
- FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD);
+ FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
@@ -277,7 +286,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
if (UseVirtualCall) {
Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
} else {
- if (getContext().getLangOpts().AppleKext &&
+ if (getLangOpts().AppleKext &&
MD->isVirtual() &&
ME->hasQualifier())
Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
@@ -295,7 +304,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
} else if (UseVirtualCall) {
Callee = BuildVirtualCall(MD, This, Ty);
} else {
- if (getContext().getLangOpts().AppleKext &&
+ if (getLangOpts().AppleKext &&
MD->isVirtual() &&
ME->hasQualifier())
Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
@@ -306,8 +315,8 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
}
}
- return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
- CE->arg_begin(), CE->arg_end());
+ return EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
+ /*VTT=*/0, CE->arg_begin(), CE->arg_end());
}
RValue
@@ -337,6 +346,9 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
else
This = EmitLValue(BaseExpr).getAddress();
+ EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This,
+ QualType(MPT->getClass(), 0));
+
// Ask the ABI to load the callee. Note that This is modified.
llvm::Value *Callee =
CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
@@ -370,13 +382,13 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
MD->isTrivial()) {
llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
QualType Ty = E->getType();
- EmitAggregateCopy(This, Src, Ty);
+ EmitAggregateAssign(This, Src, Ty);
return RValue::get(This);
}
llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
- return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
- E->arg_begin() + 1, E->arg_end());
+ return EmitCXXMemberCall(MD, E->getExprLoc(), Callee, ReturnValue, This,
+ /*VTT=*/0, E->arg_begin() + 1, E->arg_end());
}
RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
@@ -457,7 +469,7 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
// Elide the constructor if we're constructing from a temporary.
// The temporary check is required because Sema sets this on NRVO
// returns.
- if (getContext().getLangOpts().ElideConstructors && E->isElidable()) {
+ if (getLangOpts().ElideConstructors && E->isElidable()) {
assert(getContext().hasSameUnqualifiedType(E->getType(),
E->getArg(0)->getType()));
if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
@@ -878,7 +890,7 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
if (constNum->getZExtValue() <= initializerElements) {
// If there was a cleanup, deactivate it.
if (cleanupDominator)
- DeactivateCleanupBlock(cleanup, cleanupDominator);;
+ DeactivateCleanupBlock(cleanup, cleanupDominator);
return;
}
} else {
@@ -949,7 +961,6 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
if (E->isArray()) {
if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
CXXConstructorDecl *Ctor = CCE->getConstructor();
- bool RequiresZeroInitialization = false;
if (Ctor->isTrivial()) {
// If new expression did not specify value-initialization, then there
// is no initialization.
@@ -962,13 +973,11 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
return;
}
-
- RequiresZeroInitialization = true;
}
CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
CCE->arg_begin(), CCE->arg_end(),
- RequiresZeroInitialization);
+ CCE->requiresZeroInitialization());
return;
} else if (Init && isa<ImplicitValueInitExpr>(Init) &&
CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
@@ -1230,8 +1239,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::BasicBlock *contBB = 0;
llvm::Value *allocation = RV.getScalarVal();
- unsigned AS =
- cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
+ unsigned AS = allocation->getType()->getPointerAddressSpace();
// The null-check means that the initializer is conditionally
// evaluated.
@@ -1377,8 +1385,14 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
if (UseGlobalDelete) {
// If we're supposed to call the global delete, make sure we do so
// even if the destructor throws.
+
+ // Derive the complete-object pointer, which is what we need
+ // to pass to the deallocation function.
+ llvm::Value *completePtr =
+ CGF.CGM.getCXXABI().adjustToCompleteObject(CGF, Ptr, ElementType);
+
CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
- Ptr, OperatorDelete,
+ completePtr, OperatorDelete,
ElementType);
}
@@ -1390,8 +1404,9 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
= CGF.BuildVirtualCall(Dtor,
UseGlobalDelete? Dtor_Complete : Dtor_Deleting,
Ptr, Ty);
- CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
- 0, 0);
+ // FIXME: Provide a source location here.
+ CGF.EmitCXXMemberCall(Dtor, SourceLocation(), Callee, ReturnValueSlot(),
+ Ptr, /*VTT=*/0, 0, 0);
if (UseGlobalDelete) {
CGF.PopCleanupBlock();
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
index 0233745..66b6f86 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
@@ -427,6 +427,7 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
+ case CK_BuiltinFnToFnPtr:
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
@@ -640,7 +641,7 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
LValue LV = EmitCompoundAssignLValue(E, Func, Val);
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOpts().CPlusPlus)
+ if (!CGF.getLangOpts().CPlusPlus)
return Val;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -675,7 +676,7 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
LValue LV = EmitBinAssignLValue(E, Val);
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOpts().CPlusPlus)
+ if (!CGF.getLangOpts().CPlusPlus)
return Val;
// If the lvalue is non-volatile, return the computed value of the assignment.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
index a17a436..206f74a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
@@ -24,7 +24,7 @@
#include "llvm/Constants.h"
#include "llvm/Function.h"
#include "llvm/GlobalVariable.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace clang;
using namespace CodeGen;
@@ -79,12 +79,12 @@ private:
CharUnits getAlignment(const llvm::Constant *C) const {
if (Packed) return CharUnits::One();
return CharUnits::fromQuantity(
- CGM.getTargetData().getABITypeAlignment(C->getType()));
+ CGM.getDataLayout().getABITypeAlignment(C->getType()));
}
CharUnits getSizeInChars(const llvm::Constant *C) const {
return CharUnits::fromQuantity(
- CGM.getTargetData().getTypeAllocSize(C->getType()));
+ CGM.getDataLayout().getTypeAllocSize(C->getType()));
}
};
@@ -204,7 +204,7 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
if (!FitsCompletelyInPreviousByte) {
unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
- if (CGM.getTargetData().isBigEndian()) {
+ if (CGM.getDataLayout().isBigEndian()) {
Tmp = Tmp.lshr(NewFieldWidth);
Tmp = Tmp.trunc(BitsInPreviousByte);
@@ -220,7 +220,7 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
}
Tmp = Tmp.zext(CharWidth);
- if (CGM.getTargetData().isBigEndian()) {
+ if (CGM.getDataLayout().isBigEndian()) {
if (FitsCompletelyInPreviousByte)
Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
} else {
@@ -269,7 +269,7 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
while (FieldValue.getBitWidth() > CharWidth) {
llvm::APInt Tmp;
- if (CGM.getTargetData().isBigEndian()) {
+ if (CGM.getDataLayout().isBigEndian()) {
// We want the high bits.
Tmp =
FieldValue.lshr(FieldValue.getBitWidth() - CharWidth).trunc(CharWidth);
@@ -292,7 +292,7 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
"Should not have more than a byte left!");
if (FieldValue.getBitWidth() < CharWidth) {
- if (CGM.getTargetData().isBigEndian()) {
+ if (CGM.getDataLayout().isBigEndian()) {
unsigned BitWidth = FieldValue.getBitWidth();
FieldValue = FieldValue.zext(CharWidth) << (CharWidth - BitWidth);
@@ -337,7 +337,7 @@ void ConstStructBuilder::ConvertStructToPacked() {
llvm::Constant *C = Elements[i];
CharUnits ElementAlign = CharUnits::fromQuantity(
- CGM.getTargetData().getABITypeAlignment(C->getType()));
+ CGM.getDataLayout().getABITypeAlignment(C->getType()));
CharUnits AlignedElementOffsetInChars =
ElementOffsetInChars.RoundUpToAlignment(ElementAlign);
@@ -379,7 +379,7 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
unsigned FieldNo = 0;
unsigned ElementNo = 0;
const FieldDecl *LastFD = 0;
- bool IsMsStruct = RD->hasAttr<MsStructAttr>();
+ bool IsMsStruct = RD->isMsStruct(CGM.getContext());
for (RecordDecl::field_iterator Field = RD->field_begin(),
FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
@@ -478,7 +478,7 @@ void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
unsigned FieldNo = 0;
const FieldDecl *LastFD = 0;
- bool IsMsStruct = RD->hasAttr<MsStructAttr>();
+ bool IsMsStruct = RD->isMsStruct(CGM.getContext());
uint64_t OffsetBits = CGM.getContext().toBits(Offset);
for (RecordDecl::field_iterator Field = RD->field_begin(),
@@ -665,8 +665,8 @@ public:
SmallVector<llvm::Type*, 2> Types;
Elts.push_back(C);
Types.push_back(C->getType());
- unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
- unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(destType);
+ unsigned CurSize = CGM.getDataLayout().getTypeAllocSize(C->getType());
+ unsigned TotalSize = CGM.getDataLayout().getTypeAllocSize(destType);
assert(CurSize <= TotalSize && "Union size mismatch!");
if (unsigned NumPadBytes = TotalSize - CurSize) {
@@ -691,6 +691,9 @@ public:
case CK_Dependent: llvm_unreachable("saw dependent cast!");
+ case CK_BuiltinFnToFnPtr:
+ llvm_unreachable("builtin functions are handled elsewhere");
+
case CK_ReinterpretMemberPointer:
case CK_DerivedToBaseMemberPointer:
case CK_BaseToDerivedMemberPointer:
@@ -811,11 +814,7 @@ public:
return llvm::ConstantArray::get(AType, Elts);
}
- llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
- return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
- }
-
- llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
+ llvm::Constant *EmitRecordInitialization(InitListExpr *ILE) {
return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
}
@@ -828,10 +827,7 @@ public:
return EmitArrayInitialization(ILE);
if (ILE->getType()->isRecordType())
- return EmitStructInitialization(ILE);
-
- if (ILE->getType()->isUnionType())
- return EmitUnionInitialization(ILE);
+ return EmitRecordInitialization(ILE);
return 0;
}
@@ -999,6 +995,9 @@ public:
T = Typeid->getExprOperand()->getType();
return CGM.GetAddrOfRTTIDescriptor(T);
}
+ case Expr::CXXUuidofExprClass: {
+ return CGM.GetAddrOfUuidDescriptor(cast<CXXUuidofExpr>(E));
+ }
}
return 0;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
index 1cccafe..b429b1d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
@@ -28,7 +28,7 @@
#include "llvm/Intrinsics.h"
#include "llvm/Module.h"
#include "llvm/Support/CFG.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include <cstdarg>
using namespace clang;
@@ -45,6 +45,7 @@ struct BinOpInfo {
Value *RHS;
QualType Ty; // Computation Type.
BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
+ bool FPContractable;
const Expr *E; // Entire expr, for error unsupported. May not be binop.
};
@@ -80,7 +81,11 @@ public:
llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
- LValue EmitCheckedLValue(const Expr *E) { return CGF.EmitCheckedLValue(E); }
+ LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
+ return CGF.EmitCheckedLValue(E, TCK);
+ }
+
+ void EmitBinOpCheck(Value *Check, const BinOpInfo &Info);
Value *EmitLoadOfLValue(LValue LV) {
return CGF.EmitLoadOfLValue(LV).getScalarVal();
@@ -90,13 +95,19 @@ public:
/// value l-value, this method emits the address of the l-value, then loads
/// and returns the result.
Value *EmitLoadOfLValue(const Expr *E) {
- return EmitLoadOfLValue(EmitCheckedLValue(E));
+ return EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load));
}
/// EmitConversionToBool - Convert the specified expression value to a
/// boolean (i1) truth value. This is equivalent to "Val != 0".
Value *EmitConversionToBool(Value *Src, QualType DstTy);
+ /// \brief Emit a check that a conversion to or from a floating-point type
+ /// does not overflow.
+ void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
+ Value *Src, QualType SrcType,
+ QualType DstType, llvm::Type *DstTy);
+
/// EmitScalarConversion - Emit a conversion from the specified type to the
/// specified destination type, both of which are LLVM scalar types.
Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
@@ -391,34 +402,26 @@ public:
// Binary Operators.
Value *EmitMul(const BinOpInfo &Ops) {
if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ case LangOptions::SOB_Undefined:
+ if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow)
+ return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
+ // Fall through.
case LangOptions::SOB_Trapping:
return EmitOverflowCheckedBinOp(Ops);
}
}
-
+
if (Ops.LHS->getType()->isFPOrFPVectorTy())
return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
}
- bool isTrapvOverflowBehavior() {
- return CGF.getContext().getLangOpts().getSignedOverflowBehavior()
- == LangOptions::SOB_Trapping;
- }
/// Create a binary op that checks for overflow.
/// Currently only supports +, - and *.
Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
- // Emit the overflow BB when -ftrapv option is activated.
- void EmitOverflowBB(llvm::BasicBlock *overflowBB) {
- Builder.SetInsertPoint(overflowBB);
- llvm::Function *Trap = CGF.CGM.getIntrinsic(llvm::Intrinsic::trap);
- Builder.CreateCall(Trap);
- Builder.CreateUnreachable();
- }
+
// Check for undefined division and modulus behaviors.
void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
llvm::Value *Zero,bool isDiv);
@@ -537,6 +540,110 @@ Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
return EmitPointerToBoolConversion(Src);
}
+void ScalarExprEmitter::EmitFloatConversionCheck(Value *OrigSrc,
+ QualType OrigSrcType,
+ Value *Src, QualType SrcType,
+ QualType DstType,
+ llvm::Type *DstTy) {
+ using llvm::APFloat;
+ using llvm::APSInt;
+
+ llvm::Type *SrcTy = Src->getType();
+
+ llvm::Value *Check = 0;
+ if (llvm::IntegerType *IntTy = dyn_cast<llvm::IntegerType>(SrcTy)) {
+ // Integer to floating-point. This can fail for unsigned short -> __half
+ // or unsigned __int128 -> float.
+ assert(DstType->isFloatingType());
+ bool SrcIsUnsigned = OrigSrcType->isUnsignedIntegerOrEnumerationType();
+
+ APFloat LargestFloat =
+ APFloat::getLargest(CGF.getContext().getFloatTypeSemantics(DstType));
+ APSInt LargestInt(IntTy->getBitWidth(), SrcIsUnsigned);
+
+ bool IsExact;
+ if (LargestFloat.convertToInteger(LargestInt, APFloat::rmTowardZero,
+ &IsExact) != APFloat::opOK)
+ // The range of representable values of this floating point type includes
+ // all values of this integer type. Don't need an overflow check.
+ return;
+
+ llvm::Value *Max = llvm::ConstantInt::get(VMContext, LargestInt);
+ if (SrcIsUnsigned)
+ Check = Builder.CreateICmpULE(Src, Max);
+ else {
+ llvm::Value *Min = llvm::ConstantInt::get(VMContext, -LargestInt);
+ llvm::Value *GE = Builder.CreateICmpSGE(Src, Min);
+ llvm::Value *LE = Builder.CreateICmpSLE(Src, Max);
+ Check = Builder.CreateAnd(GE, LE);
+ }
+ } else {
+ // Floating-point to integer or floating-point to floating-point. This has
+ // undefined behavior if the source is +-Inf, NaN, or doesn't fit into the
+ // destination type.
+ const llvm::fltSemantics &SrcSema =
+ CGF.getContext().getFloatTypeSemantics(OrigSrcType);
+ APFloat MaxSrc(SrcSema, APFloat::uninitialized);
+ APFloat MinSrc(SrcSema, APFloat::uninitialized);
+
+ if (isa<llvm::IntegerType>(DstTy)) {
+ unsigned Width = CGF.getContext().getIntWidth(DstType);
+ bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
+
+ APSInt Min = APSInt::getMinValue(Width, Unsigned);
+ if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
+ APFloat::opOverflow)
+ // Don't need an overflow check for lower bound. Just check for
+ // -Inf/NaN.
+ MinSrc = APFloat::getLargest(SrcSema, true);
+
+ APSInt Max = APSInt::getMaxValue(Width, Unsigned);
+ if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
+ APFloat::opOverflow)
+ // Don't need an overflow check for upper bound. Just check for
+ // +Inf/NaN.
+ MaxSrc = APFloat::getLargest(SrcSema, false);
+ } else {
+ const llvm::fltSemantics &DstSema =
+ CGF.getContext().getFloatTypeSemantics(DstType);
+ bool IsInexact;
+
+ MinSrc = APFloat::getLargest(DstSema, true);
+ if (MinSrc.convert(SrcSema, APFloat::rmTowardZero, &IsInexact) &
+ APFloat::opOverflow)
+ MinSrc = APFloat::getLargest(SrcSema, true);
+
+ MaxSrc = APFloat::getLargest(DstSema, false);
+ if (MaxSrc.convert(SrcSema, APFloat::rmTowardZero, &IsInexact) &
+ APFloat::opOverflow)
+ MaxSrc = APFloat::getLargest(SrcSema, false);
+ }
+
+ // If we're converting from __half, convert the range to float to match
+ // the type of src.
+ if (OrigSrcType->isHalfType()) {
+ const llvm::fltSemantics &Sema =
+ CGF.getContext().getFloatTypeSemantics(SrcType);
+ bool IsInexact;
+ MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
+ MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
+ }
+
+ llvm::Value *GE =
+ Builder.CreateFCmpOGE(Src, llvm::ConstantFP::get(VMContext, MinSrc));
+ llvm::Value *LE =
+ Builder.CreateFCmpOLE(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
+ Check = Builder.CreateAnd(GE, LE);
+ }
+
+ // FIXME: Provide a SourceLocation.
+ llvm::Constant *StaticArgs[] = {
+ CGF.EmitCheckTypeDescriptor(OrigSrcType),
+ CGF.EmitCheckTypeDescriptor(DstType)
+ };
+ CGF.EmitCheck(Check, "float_cast_overflow", StaticArgs, OrigSrc);
+}
+
/// EmitScalarConversion - Emit a conversion from the specified type to the
/// specified destination type, both of which are LLVM scalar types.
Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
@@ -547,6 +654,8 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
if (DstType->isVoidType()) return 0;
+ llvm::Value *OrigSrc = Src;
+ QualType OrigSrcType = SrcType;
llvm::Type *SrcTy = Src->getType();
// Floating casts might be a bit special: if we're doing casts to / from half
@@ -620,6 +729,12 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
Value *Res = NULL;
llvm::Type *ResTy = DstTy;
+ // An overflowing conversion has undefined behavior if either the source type
+ // or the destination type is a floating-point type.
+ if (CGF.getLangOpts().SanitizeFloatCastOverflow &&
+ (OrigSrcType->isFloatingType() || DstType->isFloatingType()))
+ EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy);
+
// Cast to half via float
if (DstType->isHalfType())
DstTy = CGF.FloatTy;
@@ -686,6 +801,54 @@ Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
return llvm::Constant::getNullValue(ConvertType(Ty));
}
+/// \brief Emit a sanitization check for the given "binary" operation (which
+/// might actually be a unary increment which has been lowered to a binary
+/// operation). The check passes if \p Check, which is an \c i1, is \c true.
+void ScalarExprEmitter::EmitBinOpCheck(Value *Check, const BinOpInfo &Info) {
+ StringRef CheckName;
+ llvm::SmallVector<llvm::Constant *, 4> StaticData;
+ llvm::SmallVector<llvm::Value *, 2> DynamicData;
+
+ BinaryOperatorKind Opcode = Info.Opcode;
+ if (BinaryOperator::isCompoundAssignmentOp(Opcode))
+ Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
+
+ StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
+ const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
+ if (UO && UO->getOpcode() == UO_Minus) {
+ CheckName = "negate_overflow";
+ StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
+ DynamicData.push_back(Info.RHS);
+ } else {
+ if (BinaryOperator::isShiftOp(Opcode)) {
+ // Shift LHS negative or too large, or RHS out of bounds.
+ CheckName = "shift_out_of_bounds";
+ const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
+ StaticData.push_back(
+ CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
+ StaticData.push_back(
+ CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
+ } else if (Opcode == BO_Div || Opcode == BO_Rem) {
+ // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
+ CheckName = "divrem_overflow";
+ StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.E->getType()));
+ } else {
+ // Signed arithmetic overflow (+, -, *).
+ switch (Opcode) {
+ case BO_Add: CheckName = "add_overflow"; break;
+ case BO_Sub: CheckName = "sub_overflow"; break;
+ case BO_Mul: CheckName = "mul_overflow"; break;
+ default: llvm_unreachable("unexpected opcode for bin op check");
+ }
+ StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.E->getType()));
+ }
+ DynamicData.push_back(Info.LHS);
+ DynamicData.push_back(Info.RHS);
+ }
+
+ CGF.EmitCheck(Check, CheckName, StaticData, DynamicData);
+}
+
//===----------------------------------------------------------------------===//
// Visitor Methods
//===----------------------------------------------------------------------===//
@@ -802,7 +965,8 @@ Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
// debug info size.
CGDebugInfo *DI = CGF.getDebugInfo();
if (DI &&
- CGF.CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo) {
+ CGF.CGM.getCodeGenOpts().getDebugInfo()
+ == CodeGenOptions::LimitedDebugInfo) {
QualType PQTy = E->getBase()->IgnoreParenImpCasts()->getType();
if (const PointerType * PTy = dyn_cast<PointerType>(PQTy))
if (FieldDecl *M = dyn_cast<FieldDecl>(E->getMemberDecl()))
@@ -1032,7 +1196,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// are in the same order as in the CastKind enum.
switch (Kind) {
case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
-
+ case CK_BuiltinFnToFnPtr:
+ llvm_unreachable("builtin functions are handled elsewhere");
+
case CK_LValueBitCast:
case CK_ObjCObjectLValueCast: {
Value *V = EmitLValue(E).getAddress();
@@ -1055,19 +1221,18 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return Visit(const_cast<Expr*>(E));
case CK_BaseToDerived: {
- const CXXRecordDecl *DerivedClassDecl =
- DestTy->getCXXRecordDeclForPointerType();
-
- return CGF.GetAddressOfDerivedClass(Visit(E), DerivedClassDecl,
+ const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
+ assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
+
+ return CGF.GetAddressOfDerivedClass(Visit(E), DerivedClassDecl,
CE->path_begin(), CE->path_end(),
ShouldNullCheckClassCastValue(CE));
}
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
- const RecordType *DerivedClassTy =
- E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>();
- CXXRecordDecl *DerivedClassDecl =
- cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+ const CXXRecordDecl *DerivedClassDecl =
+ E->getType()->getPointeeCXXRecordDecl();
+ assert(DerivedClassDecl && "DerivedToBase arg isn't a C++ object pointer!");
return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl,
CE->path_begin(), CE->path_end(),
@@ -1248,17 +1413,20 @@ llvm::Value *ScalarExprEmitter::
EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
llvm::Value *InVal,
llvm::Value *NextVal, bool IsInc) {
- switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ case LangOptions::SOB_Undefined:
+ if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow)
+ return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ // Fall through.
case LangOptions::SOB_Trapping:
BinOpInfo BinOp;
BinOp.LHS = InVal;
BinOp.RHS = NextVal;
BinOp.Ty = E->getType();
BinOp.Opcode = BO_Add;
+ BinOp.FPContractable = false;
BinOp.E = E;
return EmitOverflowCheckedBinOp(BinOp);
}
@@ -1300,7 +1468,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// Most common case by far: integer increment.
} else if (type->isIntegerType()) {
- llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
+ llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
// Note that signed integer inc/dec with width less than int can't
// overflow because of promotion rules; we're just eliding a few steps here.
@@ -1320,7 +1488,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
= CGF.getContext().getAsVariableArrayType(type)) {
llvm::Value *numElts = CGF.getVLASize(vla).first;
if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
- if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, numElts, "vla.inc");
else
value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc");
@@ -1330,7 +1498,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *amt = Builder.getInt32(amount);
value = CGF.EmitCastToVoidPtr(value);
- if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, amt, "incdec.funcptr");
else
value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
@@ -1339,7 +1507,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// For everything else, we can just do a simple increment.
} else {
llvm::Value *amt = Builder.getInt32(amount);
- if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, amt, "incdec.ptr");
else
value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
@@ -1400,7 +1568,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *sizeValue =
llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
- if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
else
value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
@@ -1444,6 +1612,7 @@ Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
BinOp.Ty = E->getType();
BinOp.Opcode = BO_Sub;
+ BinOp.FPContractable = false;
BinOp.E = E;
return EmitSub(BinOp);
}
@@ -1652,6 +1821,7 @@ BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
Result.RHS = Visit(E->getRHS());
Result.Ty = E->getType();
Result.Opcode = E->getOpcode();
+ Result.FPContractable = E->isFPContractable();
Result.E = E;
return Result;
}
@@ -1678,9 +1848,10 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
OpInfo.RHS = Visit(E->getRHS());
OpInfo.Ty = E->getComputationResultType();
OpInfo.Opcode = E->getOpcode();
+ OpInfo.FPContractable = false;
OpInfo.E = E;
// Load/convert the LHS.
- LValue LHSLV = EmitCheckedLValue(E->getLHS());
+ LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
OpInfo.LHS = EmitLoadOfLValue(LHSLV);
llvm::PHINode *atomicPHI = 0;
@@ -1740,7 +1911,7 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
return 0;
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOpts().CPlusPlus)
+ if (!CGF.getLangOpts().CPlusPlus)
return RHS;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -1752,56 +1923,44 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
}
void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
- const BinOpInfo &Ops,
- llvm::Value *Zero, bool isDiv) {
- llvm::Function::iterator insertPt = Builder.GetInsertBlock();
- llvm::BasicBlock *contBB =
- CGF.createBasicBlock(isDiv ? "div.cont" : "rem.cont", CGF.CurFn,
- llvm::next(insertPt));
- llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
+ const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
+ llvm::Value *Cond = 0;
+
+ if (CGF.getLangOpts().SanitizeDivideByZero)
+ Cond = Builder.CreateICmpNE(Ops.RHS, Zero);
- llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
+ if (CGF.getLangOpts().SanitizeSignedIntegerOverflow &&
+ Ops.Ty->hasSignedIntegerRepresentation()) {
+ llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
- if (Ops.Ty->hasSignedIntegerRepresentation()) {
llvm::Value *IntMin =
Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
- llvm::Value *Cond1 = Builder.CreateICmpEQ(Ops.RHS, Zero);
- llvm::Value *LHSCmp = Builder.CreateICmpEQ(Ops.LHS, IntMin);
- llvm::Value *RHSCmp = Builder.CreateICmpEQ(Ops.RHS, NegOne);
- llvm::Value *Cond2 = Builder.CreateAnd(LHSCmp, RHSCmp, "and");
- Builder.CreateCondBr(Builder.CreateOr(Cond1, Cond2, "or"),
- overflowBB, contBB);
- } else {
- CGF.Builder.CreateCondBr(Builder.CreateICmpEQ(Ops.RHS, Zero),
- overflowBB, contBB);
+ llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
+ llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
+ llvm::Value *Overflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
+ Cond = Cond ? Builder.CreateAnd(Cond, Overflow, "and") : Overflow;
}
- EmitOverflowBB(overflowBB);
- Builder.SetInsertPoint(contBB);
+
+ if (Cond)
+ EmitBinOpCheck(Cond, Ops);
}
Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
- if (isTrapvOverflowBehavior()) {
+ if (CGF.getLangOpts().SanitizeDivideByZero ||
+ CGF.getLangOpts().SanitizeSignedIntegerOverflow) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
if (Ops.Ty->isIntegerType())
EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
- else if (Ops.Ty->isRealFloatingType()) {
- llvm::Function::iterator insertPt = Builder.GetInsertBlock();
- llvm::BasicBlock *DivCont = CGF.createBasicBlock("div.cont", CGF.CurFn,
- llvm::next(insertPt));
- llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow",
- CGF.CurFn);
- CGF.Builder.CreateCondBr(Builder.CreateFCmpOEQ(Ops.RHS, Zero),
- overflowBB, DivCont);
- EmitOverflowBB(overflowBB);
- Builder.SetInsertPoint(DivCont);
- }
+ else if (CGF.getLangOpts().SanitizeDivideByZero &&
+ Ops.Ty->isRealFloatingType())
+ EmitBinOpCheck(Builder.CreateFCmpUNE(Ops.RHS, Zero), Ops);
}
if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
- if (CGF.getContext().getLangOpts().OpenCL) {
+ if (CGF.getLangOpts().OpenCL) {
// OpenCL 1.1 7.4: minimum accuracy of single precision / is 2.5ulp
llvm::Type *ValTy = Val->getType();
if (ValTy->isFloatTy() ||
@@ -1819,7 +1978,7 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
// Rem in C can't be a floating point type: C99 6.5.5p2.
- if (isTrapvOverflowBehavior()) {
+ if (CGF.getLangOpts().SanitizeDivideByZero) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
if (Ops.Ty->isIntegerType())
@@ -1866,6 +2025,19 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
+ // Handle overflow with llvm.trap if no custom handler has been specified.
+ const std::string *handlerName =
+ &CGF.getLangOpts().OverflowHandler;
+ if (handlerName->empty()) {
+ // If the signed-integer-overflow sanitizer is enabled, emit a call to its
+ // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
+ if (CGF.getLangOpts().SanitizeSignedIntegerOverflow)
+ EmitBinOpCheck(Builder.CreateNot(overflow), Ops);
+ else
+ CGF.EmitTrapvCheck(Builder.CreateNot(overflow));
+ return result;
+ }
+
// Branch in case of overflow.
llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
llvm::Function::iterator insertPt = initialBB;
@@ -1875,15 +2047,6 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Builder.CreateCondBr(overflow, overflowBB, continueBB);
- // Handle overflow with llvm.trap.
- const std::string *handlerName =
- &CGF.getContext().getLangOpts().OverflowHandler;
- if (handlerName->empty()) {
- EmitOverflowBB(overflowBB);
- Builder.SetInsertPoint(continueBB);
- return result;
- }
-
// If an overflow handler is set, then we want to call it and then use its
// result, if it returns.
Builder.SetInsertPoint(overflowBB);
@@ -2001,24 +2164,106 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
}
+// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
+// Addend. Use negMul and negAdd to negate the first operand of the Mul or
+// the add operand respectively. This allows fmuladd to represent a*b-c, or
+// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
+// efficient operations.
+static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
+ const CodeGenFunction &CGF, CGBuilderTy &Builder,
+ bool negMul, bool negAdd) {
+ assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
+
+ Value *MulOp0 = MulOp->getOperand(0);
+ Value *MulOp1 = MulOp->getOperand(1);
+ if (negMul) {
+ MulOp0 =
+ Builder.CreateFSub(
+ llvm::ConstantFP::getZeroValueForNegation(MulOp0->getType()), MulOp0,
+ "neg");
+ } else if (negAdd) {
+ Addend =
+ Builder.CreateFSub(
+ llvm::ConstantFP::getZeroValueForNegation(Addend->getType()), Addend,
+ "neg");
+ }
+
+ Value *FMulAdd =
+ Builder.CreateCall3(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
+ MulOp0, MulOp1, Addend);
+ MulOp->eraseFromParent();
+
+ return FMulAdd;
+}
+
+// Check whether it would be legal to emit an fmuladd intrinsic call to
+// represent op and if so, build the fmuladd.
+//
+// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
+// Does NOT check the type of the operation - it's assumed that this function
+// will be called from contexts where it's known that the type is contractable.
+static Value* tryEmitFMulAdd(const BinOpInfo &op,
+ const CodeGenFunction &CGF, CGBuilderTy &Builder,
+ bool isSub=false) {
+
+ assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
+ op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
+ "Only fadd/fsub can be the root of an fmuladd.");
+
+ // Check whether this op is marked as fusable.
+ if (!op.FPContractable)
+ return 0;
+
+ // Check whether -ffp-contract=on. (If -ffp-contract=off/fast, fusing is
+ // either disabled, or handled entirely by the LLVM backend).
+ if (CGF.getLangOpts().getFPContractMode() != LangOptions::FPC_On)
+ return 0;
+
+ // We have a potentially fusable op. Look for a mul on one of the operands.
+ if (llvm::BinaryOperator* LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
+ if (LHSBinOp->getOpcode() == llvm::Instruction::FMul) {
+ assert(LHSBinOp->getNumUses() == 0 &&
+ "Operations with multiple uses shouldn't be contracted.");
+ return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
+ }
+ } else if (llvm::BinaryOperator* RHSBinOp =
+ dyn_cast<llvm::BinaryOperator>(op.RHS)) {
+ if (RHSBinOp->getOpcode() == llvm::Instruction::FMul) {
+ assert(RHSBinOp->getNumUses() == 0 &&
+ "Operations with multiple uses shouldn't be contracted.");
+ return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
+ }
+ }
+
+ return 0;
+}
+
Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
if (op.LHS->getType()->isPointerTy() ||
op.RHS->getType()->isPointerTy())
return emitPointerArithmetic(CGF, op, /*subtraction*/ false);
if (op.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
return Builder.CreateAdd(op.LHS, op.RHS, "add");
+ case LangOptions::SOB_Undefined:
+ if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow)
+ return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
+ // Fall through.
case LangOptions::SOB_Trapping:
return EmitOverflowCheckedBinOp(op);
}
}
- if (op.LHS->getType()->isFPOrFPVectorTy())
+ if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ // Try to form an fmuladd.
+ if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
+ return FMulAdd;
+
return Builder.CreateFAdd(op.LHS, op.RHS, "add");
+ }
return Builder.CreateAdd(op.LHS, op.RHS, "add");
}
@@ -2027,18 +2272,24 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
// The LHS is always a pointer if either side is.
if (!op.LHS->getType()->isPointerTy()) {
if (op.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
return Builder.CreateSub(op.LHS, op.RHS, "sub");
+ case LangOptions::SOB_Undefined:
+ if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow)
+ return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
+ // Fall through.
case LangOptions::SOB_Trapping:
return EmitOverflowCheckedBinOp(op);
}
}
- if (op.LHS->getType()->isFPOrFPVectorTy())
+ if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ // Try to form an fmuladd.
+ if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
+ return FMulAdd;
return Builder.CreateFSub(op.LHS, op.RHS, "sub");
+ }
return Builder.CreateSub(op.LHS, op.RHS, "sub");
}
@@ -2108,14 +2359,34 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
if (Ops.LHS->getType() != RHS->getType())
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
- if (CGF.CatchUndefined
- && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ if (CGF.getLangOpts().SanitizeShift &&
+ isa<llvm::IntegerType>(Ops.LHS->getType())) {
unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
- llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
- CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
- llvm::ConstantInt::get(RHS->getType(), Width)),
- Cont, CGF.getTrapBB());
- CGF.EmitBlock(Cont);
+ llvm::Value *WidthMinusOne =
+ llvm::ConstantInt::get(RHS->getType(), Width - 1);
+ // FIXME: Emit the branching explicitly rather than emitting the check
+ // twice.
+ EmitBinOpCheck(Builder.CreateICmpULE(RHS, WidthMinusOne), Ops);
+
+ if (Ops.Ty->hasSignedIntegerRepresentation()) {
+ // Check whether we are shifting any non-zero bits off the top of the
+ // integer.
+ llvm::Value *BitsShiftedOff =
+ Builder.CreateLShr(Ops.LHS,
+ Builder.CreateSub(WidthMinusOne, RHS, "shl.zeros",
+ /*NUW*/true, /*NSW*/true),
+ "shl.check");
+ if (CGF.getLangOpts().CPlusPlus) {
+ // In C99, we are not permitted to shift a 1 bit into the sign bit.
+ // Under C++11's rules, shifting a 1 bit into the sign bit is
+ // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
+ // define signed left shifts, so we use the C99 and C++11 rules there).
+ llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
+ BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
+ }
+ llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
+ EmitBinOpCheck(Builder.CreateICmpEQ(BitsShiftedOff, Zero), Ops);
+ }
}
return Builder.CreateShl(Ops.LHS, RHS, "shl");
@@ -2128,14 +2399,11 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
if (Ops.LHS->getType() != RHS->getType())
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
- if (CGF.CatchUndefined
- && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ if (CGF.getLangOpts().SanitizeShift &&
+ isa<llvm::IntegerType>(Ops.LHS->getType())) {
unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
- llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
- CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
- llvm::ConstantInt::get(RHS->getType(), Width)),
- Cont, CGF.getTrapBB());
- CGF.EmitBlock(Cont);
+ llvm::Value *WidthVal = llvm::ConstantInt::get(RHS->getType(), Width);
+ EmitBinOpCheck(Builder.CreateICmpULT(RHS, WidthVal), Ops);
}
if (Ops.Ty->hasUnsignedIntegerRepresentation())
@@ -2326,7 +2594,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
case Qualifiers::OCL_Weak:
RHS = Visit(E->getRHS());
- LHS = EmitCheckedLValue(E->getLHS());
+ LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
break;
@@ -2336,7 +2604,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// __block variables need to have the rhs evaluated first, plus
// this should improve codegen just a little.
RHS = Visit(E->getRHS());
- LHS = EmitCheckedLValue(E->getLHS());
+ LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
// Store the value into the LHS. Bit-fields are handled specially
// because the result is altered by the store, i.e., [C99 6.5.16p1]
@@ -2353,7 +2621,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
return 0;
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOpts().CPlusPlus)
+ if (!CGF.getLangOpts().CPlusPlus)
return RHS;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -2567,7 +2835,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
// OpenCL: If the condition is a vector, we can treat this condition like
// the select function.
- if (CGF.getContext().getLangOpts().OpenCL
+ if (CGF.getLangOpts().OpenCL
&& condExpr->getType()->isVectorType()) {
llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
llvm::Value *LHS = Visit(lhsExpr);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
index 4ac172d..c90e4ec 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
@@ -21,7 +21,7 @@
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/Diagnostic.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/InlineAsm.h"
using namespace clang;
using namespace CodeGen;
@@ -440,8 +440,8 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
SourceLocation StartLoc) {
FunctionArgList args;
// Check if we should generate debug info for this method.
- if (CGM.getModuleDebugInfo() && !OMD->hasAttr<NoDebugAttr>())
- DebugInfo = CGM.getModuleDebugInfo();
+ if (!OMD->hasAttr<NoDebugAttr>())
+ maybeInitializeDebugInfo();
llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
@@ -613,7 +613,16 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
// which translates to objc_storeStrong. This isn't required, but
// it's slightly nicer.
} else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) {
- Kind = Expression;
+ // Using standard expression emission for the setter is only
+ // acceptable if the ivar is __strong, which won't be true if
+ // the property is annotated with __attribute__((NSObject)).
+ // TODO: falling all the way back to objc_setProperty here is
+ // just laziness, though; we could still use objc_storeStrong
+ // if we hacked it right.
+ if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong)
+ Kind = Expression;
+ else
+ Kind = SetPropertyAndExpressionGet;
return;
// Otherwise, we need to at least use setProperty. However, if
@@ -801,6 +810,10 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
PropertyImplStrategy strategy(CGM, propImpl);
switch (strategy.getKind()) {
case PropertyImplStrategy::Native: {
+ // We don't need to do anything for a zero-size struct.
+ if (strategy.getIvarSize().isZero())
+ return;
+
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
// Currently, all atomic accesses have to be through integer
@@ -1032,12 +1045,7 @@ static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
static bool UseOptimizedSetter(CodeGenModule &CGM) {
if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
return false;
- const TargetInfo &Target = CGM.getContext().getTargetInfo();
-
- if (Target.getPlatformName() != "macosx")
- return false;
-
- return Target.getPlatformMinVersion() >= VersionTuple(10, 8);
+ return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter();
}
void
@@ -1064,6 +1072,10 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
PropertyImplStrategy strategy(CGM, propImpl);
switch (strategy.getKind()) {
case PropertyImplStrategy::Native: {
+ // We don't need to do anything for a zero-size struct.
+ if (strategy.getIvarSize().isZero())
+ return;
+
llvm::Value *argAddr = LocalDeclMap[*setterMethod->param_begin()];
LValue ivarLValue =
@@ -1097,7 +1109,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
llvm::Value *setOptimizedPropertyFn = 0;
llvm::Value *setPropertyFn = 0;
if (UseOptimizedSetter(CGM)) {
- // 10.8 code and GC is off
+ // 10.8 and iOS 6.0 code and GC is off
setOptimizedPropertyFn =
CGM.getObjCRuntime()
.GetOptimizedPropertySetFunction(strategy.isAtomic(),
@@ -1209,7 +1221,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
ivarRef.getType(), VK_RValue, OK_Ordinary,
- SourceLocation());
+ SourceLocation(), false);
EmitStmt(&assign);
}
@@ -1697,11 +1709,11 @@ static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
// references to the runtime support library. We don't really
// permit this to fail, but we need a particular relocation style.
if (llvm::Function *f = dyn_cast<llvm::Function>(fn)) {
- if (!CGM.getLangOpts().ObjCRuntime.hasARC())
+ if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC())
f->setLinkage(llvm::Function::ExternalWeakLinkage);
// set nonlazybind attribute for these APIs for performance.
if (fnName == "objc_retain" || fnName == "objc_release")
- f->addFnAttr(llvm::Attribute::NonLazyBind);
+ f->addFnAttr(llvm::Attributes::NonLazyBind);
}
return fn;
@@ -1945,6 +1957,28 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
}
}
+/// Destroy a __strong variable.
+///
+/// At -O0, emit a call to store 'null' into the address;
+/// instrumenting tools prefer this because the address is exposed,
+/// but it's relatively cumbersome to optimize.
+///
+/// At -O1 and above, just load and call objc_release.
+///
+/// call void \@objc_storeStrong(i8** %addr, i8* null)
+void CodeGenFunction::EmitARCDestroyStrong(llvm::Value *addr, bool precise) {
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ llvm::PointerType *addrTy = cast<llvm::PointerType>(addr->getType());
+ llvm::Value *null = llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(addrTy->getElementType()));
+ EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
+ return;
+ }
+
+ llvm::Value *value = Builder.CreateLoad(addr);
+ EmitARCRelease(value, precise);
+}
+
/// Store into a strong object. Always calls this:
/// call void \@objc_storeStrong(i8** %addr, i8* %value)
llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
@@ -2218,15 +2252,13 @@ void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
llvm::Value *addr,
QualType type) {
- llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy");
- CGF.EmitARCRelease(ptr, /*precise*/ true);
+ CGF.EmitARCDestroyStrong(addr, /*precise*/ true);
}
void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
llvm::Value *addr,
QualType type) {
- llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy");
- CGF.EmitARCRelease(ptr, /*precise*/ false);
+ CGF.EmitARCDestroyStrong(addr, /*precise*/ false);
}
void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
@@ -2730,7 +2762,7 @@ void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
// Keep track of the current cleanup stack depth.
RunCleanupsScope Scope(*this);
- if (CGM.getLangOpts().ObjCRuntime.hasARC()) {
+ if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) {
llvm::Value *token = EmitObjCAutoreleasePoolPush();
EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
} else {
@@ -2826,9 +2858,8 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
"__assign_helper_atomic_property_",
&CGM.getModule());
- if (CGM.getModuleDebugInfo())
- DebugInfo = CGM.getModuleDebugInfo();
-
+ // Initialize debug info if needed.
+ maybeInitializeDebugInfo();
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
@@ -2845,8 +2876,8 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
Expr *Args[2] = { &DST, &SRC };
CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(),
- Args, 2, DestTy->getPointeeType(),
- VK_LValue, SourceLocation());
+ Args, DestTy->getPointeeType(),
+ VK_LValue, SourceLocation(), false);
EmitStmt(&TheCall);
@@ -2912,9 +2943,8 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
"__copy_helper_atomic_property_", &CGM.getModule());
- if (CGM.getModuleDebugInfo())
- DebugInfo = CGM.getModuleDebugInfo();
-
+ // Initialize debug info if needed.
+ maybeInitializeDebugInfo();
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
@@ -2940,7 +2970,7 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
CXXConstructExpr::Create(C, Ty, SourceLocation(),
CXXConstExpr->getConstructor(),
CXXConstExpr->isElidable(),
- &ConstructorArgs[0], ConstructorArgs.size(),
+ ConstructorArgs,
CXXConstExpr->hadMultipleCandidates(),
CXXConstExpr->isListInitialization(),
CXXConstExpr->requiresZeroInitialization(),
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
index 6d129d0..68d234d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -33,7 +33,7 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include <cstdarg>
@@ -224,6 +224,25 @@ protected:
llvm::ArrayType *ArrayTy = llvm::ArrayType::get(Ty, V.size());
return MakeGlobal(ArrayTy, V, Name, linkage);
}
+ /// Returns a property name and encoding string.
+ llvm::Constant *MakePropertyEncodingString(const ObjCPropertyDecl *PD,
+ const Decl *Container) {
+ ObjCRuntime R = CGM.getLangOpts().ObjCRuntime;
+ if ((R.getKind() == ObjCRuntime::GNUstep) &&
+ (R.getVersion() >= VersionTuple(1, 6))) {
+ std::string NameAndAttributes;
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr);
+ NameAndAttributes += '\0';
+ NameAndAttributes += TypeStr.length() + 3;
+ NameAndAttributes += TypeStr;
+ NameAndAttributes += '\0';
+ NameAndAttributes += PD->getNameAsString();
+ return llvm::ConstantExpr::getGetElementPtr(
+ CGM.GetAddrOfConstantString(NameAndAttributes), Zeros);
+ }
+ return MakeConstantString(PD->getNameAsString());
+ }
/// Ensures that the value has the required type, by inserting a bitcast if
/// required. This function lets us avoid inserting bitcasts that are
/// redundant.
@@ -514,7 +533,10 @@ public:
const CGBlockInfo &blockInfo) {
return NULLPtr;
}
-
+ virtual llvm::Constant *BuildRCBlockLayout(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ return NULLPtr;
+ }
virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) {
return 0;
}
@@ -578,6 +600,8 @@ class CGObjCGNUstep : public CGObjCGNU {
/// Type of an slot structure pointer. This is returned by the various
/// lookup functions.
llvm::Type *SlotTy;
+ public:
+ virtual llvm::Constant *GetEHType(QualType T);
protected:
virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
llvm::Value *&Receiver,
@@ -653,11 +677,40 @@ class CGObjCGNUstep : public CGObjCGNU {
}
};
-/// The ObjFW runtime, which closely follows the GCC runtime's
-/// compiler ABI. Support here is due to Jonathan Schleifer, the
-/// ObjFW maintainer.
-class CGObjCObjFW : public CGObjCGCC {
- /// Emit class references unconditionally as direct symbol references.
+/// Support for the ObjFW runtime. Support here is due to
+/// Jonathan Schleifer <js@webkeks.org>, the ObjFW maintainer.
+class CGObjCObjFW: public CGObjCGNU {
+protected:
+ /// The GCC ABI message lookup function. Returns an IMP pointing to the
+ /// method implementation for this message.
+ LazyRuntimeFunction MsgLookupFn;
+ /// The GCC ABI superclass message lookup function. Takes a pointer to a
+ /// structure describing the receiver and the class, and a selector as
+ /// arguments. Returns the IMP for the corresponding method.
+ LazyRuntimeFunction MsgLookupSuperFn;
+
+ virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
+ llvm::Value *&Receiver,
+ llvm::Value *cmd,
+ llvm::MDNode *node) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *args[] = {
+ EnforceType(Builder, Receiver, IdTy),
+ EnforceType(Builder, cmd, SelectorTy) };
+ llvm::CallSite imp = CGF.EmitCallOrInvoke(MsgLookupFn, args);
+ imp->setMetadata(msgSendMDKind, node);
+ return imp.getInstruction();
+ }
+
+ virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
+ llvm::Value *ObjCSuper,
+ llvm::Value *cmd) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
+ PtrToObjCSuperTy), cmd};
+ return Builder.CreateCall(MsgLookupSuperFn, lookupArgs);
+ }
+
virtual llvm::Value *GetClassNamed(CGBuilderTy &Builder,
const std::string &Name, bool isWeak) {
if (isWeak)
@@ -678,7 +731,13 @@ class CGObjCObjFW : public CGObjCGCC {
}
public:
- CGObjCObjFW(CodeGenModule &Mod): CGObjCGCC(Mod) {}
+ CGObjCObjFW(CodeGenModule &Mod): CGObjCGNU(Mod, 9, 3) {
+ // IMP objc_msg_lookup(id, SEL);
+ MsgLookupFn.init(&CGM, "objc_msg_lookup", IMPTy, IdTy, SelectorTy, NULL);
+ // IMP objc_msg_lookup_super(struct objc_super*, SEL);
+ MsgLookupSuperFn.init(&CGM, "objc_msg_lookup_super", IMPTy,
+ PtrToObjCSuperTy, SelectorTy, NULL);
+ }
};
} // end anonymous namespace
@@ -909,29 +968,30 @@ llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
}
llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
- if (!CGM.getLangOpts().CPlusPlus) {
- if (T->isObjCIdType()
- || T->isObjCQualifiedIdType()) {
- // With the old ABI, there was only one kind of catchall, which broke
- // foreign exceptions. With the new ABI, we use __objc_id_typeinfo as
- // a pointer indicating object catchalls, and NULL to indicate real
- // catchalls
- if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
- return MakeConstantString("@id");
- } else {
- return 0;
- }
- }
-
- // All other types should be Objective-C interface pointer types.
- const ObjCObjectPointerType *OPT =
- T->getAs<ObjCObjectPointerType>();
- assert(OPT && "Invalid @catch type.");
- const ObjCInterfaceDecl *IDecl =
- OPT->getObjectType()->getInterface();
- assert(IDecl && "Invalid @catch type.");
- return MakeConstantString(IDecl->getIdentifier()->getName());
+ if (T->isObjCIdType() || T->isObjCQualifiedIdType()) {
+ // With the old ABI, there was only one kind of catchall, which broke
+ // foreign exceptions. With the new ABI, we use __objc_id_typeinfo as
+ // a pointer indicating object catchalls, and NULL to indicate real
+ // catchalls
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
+ return MakeConstantString("@id");
+ } else {
+ return 0;
+ }
}
+
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *OPT = T->getAs<ObjCObjectPointerType>();
+ assert(OPT && "Invalid @catch type.");
+ const ObjCInterfaceDecl *IDecl = OPT->getObjectType()->getInterface();
+ assert(IDecl && "Invalid @catch type.");
+ return MakeConstantString(IDecl->getIdentifier()->getName());
+}
+
+llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
+ if (!CGM.getLangOpts().CPlusPlus)
+ return CGObjCGNU::GetEHType(T);
+
// For Objective-C++, we want to provide the ability to catch both C++ and
// Objective-C objects in the same function.
@@ -1436,7 +1496,7 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
Elements.push_back(Zero);
Elements.push_back(llvm::ConstantInt::get(LongTy, info));
if (isMeta) {
- llvm::TargetData td(&TheModule);
+ llvm::DataLayout td(&TheModule);
Elements.push_back(
llvm::ConstantInt::get(LongTy,
td.getTypeSizeInBits(ClassTy) /
@@ -1595,13 +1655,13 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
std::string TypeStr;
Context.getObjCEncodingForMethodDecl(*iter, TypeStr);
if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
- InstanceMethodNames.push_back(
- MakeConstantString((*iter)->getSelector().getAsString()));
- InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
- } else {
OptionalInstanceMethodNames.push_back(
MakeConstantString((*iter)->getSelector().getAsString()));
OptionalInstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ InstanceMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
}
}
// Collect information about class methods:
@@ -1615,13 +1675,13 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
std::string TypeStr;
Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
- ClassMethodNames.push_back(
- MakeConstantString((*iter)->getSelector().getAsString()));
- ClassMethodTypes.push_back(MakeConstantString(TypeStr));
- } else {
OptionalClassMethodNames.push_back(
MakeConstantString((*iter)->getSelector().getAsString()));
OptionalClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ ClassMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
}
}
@@ -1656,7 +1716,9 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
std::vector<llvm::Constant*> Fields;
ObjCPropertyDecl *property = *iter;
- Fields.push_back(MakeConstantString(property->getNameAsString()));
+
+ Fields.push_back(MakePropertyEncodingString(property, PD));
+
Fields.push_back(llvm::ConstantInt::get(Int8Ty,
property->getPropertyAttributes()));
Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0));
@@ -1909,7 +1971,7 @@ llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OI
bool isSynthesized = (propertyImpl->getPropertyImplementation() ==
ObjCPropertyImplDecl::Synthesize);
- Fields.push_back(MakeConstantString(property->getNameAsString()));
+ Fields.push_back(MakePropertyEncodingString(property, OID));
Fields.push_back(llvm::ConstantInt::get(Int8Ty,
property->getPropertyAttributes()));
Fields.push_back(llvm::ConstantInt::get(Int8Ty, isSynthesized));
@@ -2011,7 +2073,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize().getQuantity();
// For non-fragile ivars, set the instance size to 0 - {the size of just this
// class}. The runtime will then set this to the correct value on load.
- if (CGM.getContext().getLangOpts().ObjCRuntime.isNonFragile()) {
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
instanceSize = 0 - (instanceSize - superInstanceSize);
}
@@ -2026,7 +2088,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// Get the offset
uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, OID, IVD);
uint64_t Offset = BaseOffset;
- if (CGM.getContext().getLangOpts().ObjCRuntime.isNonFragile()) {
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
Offset = BaseOffset - superInstanceSize;
}
llvm::Constant *OffsetValue = llvm::ConstantInt::get(IntTy, Offset);
@@ -2334,7 +2396,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Runtime version, used for ABI compatibility checking.
Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
// sizeof(ModuleTy)
- llvm::TargetData td(&TheModule);
+ llvm::DataLayout td(&TheModule);
Elements.push_back(
llvm::ConstantInt::get(LongTy,
td.getTypeSizeInBits(ModuleTy) /
@@ -2488,7 +2550,7 @@ void CGObjCGNU::EmitTryStmt(CodeGenFunction &CGF,
// Unlike the Apple non-fragile runtimes, which also uses
// unwind-based zero cost exceptions, the GNU Objective C runtime's
// EH support isn't a veneer over C++ EH. Instead, exception
- // objects are created by __objc_exception_throw and destroyed by
+ // objects are created by objc_exception_throw and destroyed by
// the personality function; this avoids the need for bracketing
// catch handlers with calls to __blah_begin_catch/__blah_end_catch
// (or even _Unwind_DeleteException), but probably doesn't
@@ -2513,7 +2575,9 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
ExceptionAsObject = CGF.ObjCEHValueStack.back();
}
ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy);
- CGF.EmitCallOrInvoke(ExceptionThrowFn, ExceptionAsObject);
+ llvm::CallSite Throw =
+ CGF.EmitCallOrInvoke(ExceptionThrowFn, ExceptionAsObject);
+ Throw.setDoesNotReturn();
CGF.Builder.CreateUnreachable();
CGF.Builder.ClearInsertionPoint();
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
index ef802a3..2203f01 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
@@ -36,7 +36,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include <cstdio>
using namespace clang;
@@ -66,7 +66,8 @@ private:
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSend",
- llvm::Attribute::NonLazyBind);
+ llvm::Attributes::get(CGM.getLLVMContext(),
+ llvm::Attributes::NonLazyBind));
}
/// void objc_msgSend_stret (id, SEL, ...)
@@ -433,19 +434,19 @@ public:
/// SyncEnterFn - LLVM object_sync_enter function.
llvm::Constant *getSyncEnterFn() {
- // void objc_sync_enter (id)
+ // int objc_sync_enter (id)
llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.VoidTy, args, false);
+ llvm::FunctionType::get(CGM.IntTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
}
/// SyncExitFn - LLVM object_sync_exit function.
llvm::Constant *getSyncExitFn() {
- // void objc_sync_exit (id)
+ // int objc_sync_exit (id)
llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.VoidTy, args, false);
+ llvm::FunctionType::get(CGM.IntTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
}
@@ -583,7 +584,8 @@ public:
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty,
params, false),
"_setjmp",
- llvm::Attribute::ReturnsTwice);
+ llvm::Attributes::get(CGM.getLLVMContext(),
+ llvm::Attributes::NonLazyBind));
}
public:
@@ -753,6 +755,74 @@ public:
: skip(_skip), scan(_scan) {}
};
+ /// opcode for captured block variables layout 'instructions'.
+ /// In the following descriptions, 'I' is the value of the immediate field.
+ /// (field following the opcode).
+ ///
+ enum BLOCK_LAYOUT_OPCODE {
+ /// An operator which affects how the following layout should be
+ /// interpreted.
+ /// I == 0: Halt interpretation and treat everything else as
+ /// a non-pointer. Note that this instruction is equal
+ /// to '\0'.
+ /// I != 0: Currently unused.
+ BLOCK_LAYOUT_OPERATOR = 0,
+
+ /// The next I+1 bytes do not contain a value of object pointer type.
+ /// Note that this can leave the stream unaligned, meaning that
+ /// subsequent word-size instructions do not begin at a multiple of
+ /// the pointer size.
+ BLOCK_LAYOUT_NON_OBJECT_BYTES = 1,
+
+ /// The next I+1 words do not contain a value of object pointer type.
+ /// This is simply an optimized version of BLOCK_LAYOUT_BYTES for
+ /// when the required skip quantity is a multiple of the pointer size.
+ BLOCK_LAYOUT_NON_OBJECT_WORDS = 2,
+
+ /// The next I+1 words are __strong pointers to Objective-C
+ /// objects or blocks.
+ BLOCK_LAYOUT_STRONG = 3,
+
+ /// The next I+1 words are pointers to __block variables.
+ BLOCK_LAYOUT_BYREF = 4,
+
+ /// The next I+1 words are __weak pointers to Objective-C
+ /// objects or blocks.
+ BLOCK_LAYOUT_WEAK = 5,
+
+ /// The next I+1 words are __unsafe_unretained pointers to
+ /// Objective-C objects or blocks.
+ BLOCK_LAYOUT_UNRETAINED = 6
+
+ /// The next I+1 words are block or object pointers with some
+ /// as-yet-unspecified ownership semantics. If we add more
+ /// flavors of ownership semantics, values will be taken from
+ /// this range.
+ ///
+ /// This is included so that older tools can at least continue
+ /// processing the layout past such things.
+ //BLOCK_LAYOUT_OWNERSHIP_UNKNOWN = 7..10,
+
+ /// All other opcodes are reserved. Halt interpretation and
+ /// treat everything else as opaque.
+ };
+
+ class RUN_SKIP {
+ public:
+ enum BLOCK_LAYOUT_OPCODE opcode;
+ CharUnits block_var_bytepos;
+ CharUnits block_var_size;
+ RUN_SKIP(enum BLOCK_LAYOUT_OPCODE Opcode = BLOCK_LAYOUT_OPERATOR,
+ CharUnits BytePos = CharUnits::Zero(),
+ CharUnits Size = CharUnits::Zero())
+ : opcode(Opcode), block_var_bytepos(BytePos), block_var_size(Size) {}
+
+ // Allow sorting based on byte pos.
+ bool operator<(const RUN_SKIP &b) const {
+ return block_var_bytepos < b.block_var_bytepos;
+ }
+ };
+
protected:
llvm::LLVMContext &VMContext;
// FIXME! May not be needing this after all.
@@ -761,6 +831,9 @@ protected:
// gc ivar layout bitmap calculation helper caches.
SmallVector<GC_IVAR, 16> SkipIvars;
SmallVector<GC_IVAR, 16> IvarsInfo;
+
+ // arc/mrr layout of captured block literal variables.
+ SmallVector<RUN_SKIP, 16> RunSkipBlockVars;
/// LazySymbols - Symbols to generate a lazy reference for. See
/// DefinedSymbols and FinishModule().
@@ -869,6 +942,24 @@ protected:
ArrayRef<const FieldDecl*> RecFields,
unsigned int BytePos, bool ForStrongLayout,
bool &HasUnion);
+
+ Qualifiers::ObjCLifetime getBlockCaptureLifetime(QualType QT);
+
+ void UpdateRunSkipBlockVars(bool IsByref,
+ Qualifiers::ObjCLifetime LifeTime,
+ CharUnits FieldOffset,
+ CharUnits FieldSize);
+
+ void BuildRCBlockVarRecordLayout(const RecordType *RT,
+ CharUnits BytePos, bool &HasUnion);
+
+ void BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
+ const RecordDecl *RD,
+ ArrayRef<const FieldDecl*> RecFields,
+ CharUnits BytePos, bool &HasUnion);
+
+ uint64_t InlineLayoutInstruction(SmallVectorImpl<unsigned char> &Layout);
+
/// GetIvarLayoutName - Returns a unique constant for the given
/// ivar layout bitmap.
@@ -959,6 +1050,8 @@ public:
virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
const CGBlockInfo &blockInfo);
+ virtual llvm::Constant *BuildRCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo);
};
@@ -1787,8 +1880,8 @@ static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
+
llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy);
-
if (CGM.getLangOpts().getGC() == LangOptions::NonGC &&
!CGM.getLangOpts().ObjCAutoRefCount)
return nullPtr;
@@ -1807,7 +1900,7 @@ llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
// Calculate the basic layout of the block structure.
const llvm::StructLayout *layout =
- CGM.getTargetData().getStructLayout(blockInfo.StructureType);
+ CGM.getDataLayout().getStructLayout(blockInfo.StructureType);
// Ignore the optional 'this' capture: C++ objects are not assumed
// to be GC'ed.
@@ -1860,7 +1953,7 @@ llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
if (CGM.getLangOpts().ObjCGCBitmapPrint) {
printf("\n block variable layout for block: ");
- const unsigned char *s = (unsigned char*)BitMap.c_str();
+ const unsigned char *s = (const unsigned char*)BitMap.c_str();
for (unsigned i = 0, e = BitMap.size(); i < e; i++)
if (!(s[i] & 0xf0))
printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
@@ -1872,6 +1965,476 @@ llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
return C;
}
+/// getBlockCaptureLifetime - This routine returns life time of the captured
+/// block variable for the purpose of block layout meta-data generation. FQT is
+/// the type of the variable captured in the block.
+Qualifiers::ObjCLifetime CGObjCCommonMac::getBlockCaptureLifetime(QualType FQT) {
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ return FQT.getObjCLifetime();
+
+ // MRR.
+ if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
+ return Qualifiers::OCL_ExplicitNone;
+
+ return Qualifiers::OCL_None;
+}
+
+void CGObjCCommonMac::UpdateRunSkipBlockVars(bool IsByref,
+ Qualifiers::ObjCLifetime LifeTime,
+ CharUnits FieldOffset,
+ CharUnits FieldSize) {
+ // __block variables are passed by their descriptor address.
+ if (IsByref)
+ RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_BYREF, FieldOffset,
+ FieldSize));
+ else if (LifeTime == Qualifiers::OCL_Strong)
+ RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_STRONG, FieldOffset,
+ FieldSize));
+ else if (LifeTime == Qualifiers::OCL_Weak)
+ RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_WEAK, FieldOffset,
+ FieldSize));
+ else if (LifeTime == Qualifiers::OCL_ExplicitNone)
+ RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_UNRETAINED, FieldOffset,
+ FieldSize));
+ else
+ RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_NON_OBJECT_BYTES,
+ FieldOffset,
+ FieldSize));
+}
+
+void CGObjCCommonMac::BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
+ const RecordDecl *RD,
+ ArrayRef<const FieldDecl*> RecFields,
+ CharUnits BytePos, bool &HasUnion) {
+ bool IsUnion = (RD && RD->isUnion());
+ CharUnits MaxUnionSize = CharUnits::Zero();
+ const FieldDecl *MaxField = 0;
+ const FieldDecl *LastFieldBitfieldOrUnnamed = 0;
+ CharUnits MaxFieldOffset = CharUnits::Zero();
+ CharUnits LastBitfieldOrUnnamedOffset = CharUnits::Zero();
+
+ if (RecFields.empty())
+ return;
+ unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
+
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ const FieldDecl *Field = RecFields[i];
+ // Note that 'i' here is actually the field index inside RD of Field,
+ // although this dependency is hidden.
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ CharUnits FieldOffset =
+ CGM.getContext().toCharUnitsFromBits(RL.getFieldOffset(i));
+
+ // Skip over unnamed or bitfields
+ if (!Field->getIdentifier() || Field->isBitField()) {
+ LastFieldBitfieldOrUnnamed = Field;
+ LastBitfieldOrUnnamedOffset = FieldOffset;
+ continue;
+ }
+
+ LastFieldBitfieldOrUnnamed = 0;
+ QualType FQT = Field->getType();
+ if (FQT->isRecordType() || FQT->isUnionType()) {
+ if (FQT->isUnionType())
+ HasUnion = true;
+
+ BuildRCBlockVarRecordLayout(FQT->getAs<RecordType>(),
+ BytePos + FieldOffset, HasUnion);
+ continue;
+ }
+
+ if (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ uint64_t ElCount = CArray->getSize().getZExtValue();
+ assert(CArray && "only array with known element size is supported");
+ FQT = CArray->getElementType();
+ while (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ ElCount *= CArray->getSize().getZExtValue();
+ FQT = CArray->getElementType();
+ }
+
+ assert(!FQT->isUnionType() &&
+ "layout for array of unions not supported");
+ if (FQT->isRecordType() && ElCount) {
+ int OldIndex = RunSkipBlockVars.size() - 1;
+ const RecordType *RT = FQT->getAs<RecordType>();
+ BuildRCBlockVarRecordLayout(RT, BytePos + FieldOffset,
+ HasUnion);
+
+ // Replicate layout information for each array element. Note that
+ // one element is already done.
+ uint64_t ElIx = 1;
+ for (int FirstIndex = RunSkipBlockVars.size() - 1 ;ElIx < ElCount; ElIx++) {
+ CharUnits Size = CGM.getContext().getTypeSizeInChars(RT);
+ for (int i = OldIndex+1; i <= FirstIndex; ++i)
+ RunSkipBlockVars.push_back(
+ RUN_SKIP(RunSkipBlockVars[i].opcode,
+ RunSkipBlockVars[i].block_var_bytepos + Size*ElIx,
+ RunSkipBlockVars[i].block_var_size));
+ }
+ continue;
+ }
+ }
+ CharUnits FieldSize = CGM.getContext().getTypeSizeInChars(Field->getType());
+ if (IsUnion) {
+ CharUnits UnionIvarSize = FieldSize;
+ if (UnionIvarSize > MaxUnionSize) {
+ MaxUnionSize = UnionIvarSize;
+ MaxField = Field;
+ MaxFieldOffset = FieldOffset;
+ }
+ } else {
+ UpdateRunSkipBlockVars(false,
+ getBlockCaptureLifetime(FQT),
+ BytePos + FieldOffset,
+ FieldSize);
+ }
+ }
+
+ if (LastFieldBitfieldOrUnnamed) {
+ if (LastFieldBitfieldOrUnnamed->isBitField()) {
+ // Last field was a bitfield. Must update the info.
+ uint64_t BitFieldSize
+ = LastFieldBitfieldOrUnnamed->getBitWidthValue(CGM.getContext());
+ unsigned UnsSize = (BitFieldSize / ByteSizeInBits) +
+ ((BitFieldSize % ByteSizeInBits) != 0);
+ CharUnits Size = CharUnits::fromQuantity(UnsSize);
+ Size += LastBitfieldOrUnnamedOffset;
+ UpdateRunSkipBlockVars(false,
+ getBlockCaptureLifetime(LastFieldBitfieldOrUnnamed->getType()),
+ BytePos + LastBitfieldOrUnnamedOffset,
+ Size);
+ } else {
+ assert(!LastFieldBitfieldOrUnnamed->getIdentifier() &&"Expected unnamed");
+ // Last field was unnamed. Must update skip info.
+ CharUnits FieldSize
+ = CGM.getContext().getTypeSizeInChars(LastFieldBitfieldOrUnnamed->getType());
+ UpdateRunSkipBlockVars(false,
+ getBlockCaptureLifetime(LastFieldBitfieldOrUnnamed->getType()),
+ BytePos + LastBitfieldOrUnnamedOffset,
+ FieldSize);
+ }
+ }
+
+ if (MaxField)
+ UpdateRunSkipBlockVars(false,
+ getBlockCaptureLifetime(MaxField->getType()),
+ BytePos + MaxFieldOffset,
+ MaxUnionSize);
+}
+
+void CGObjCCommonMac::BuildRCBlockVarRecordLayout(const RecordType *RT,
+ CharUnits BytePos,
+ bool &HasUnion) {
+ const RecordDecl *RD = RT->getDecl();
+ SmallVector<const FieldDecl*, 16> Fields;
+ for (RecordDecl::field_iterator i = RD->field_begin(),
+ e = RD->field_end(); i != e; ++i)
+ Fields.push_back(*i);
+ llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
+ const llvm::StructLayout *RecLayout =
+ CGM.getDataLayout().getStructLayout(cast<llvm::StructType>(Ty));
+
+ BuildRCRecordLayout(RecLayout, RD, Fields, BytePos, HasUnion);
+}
+
+/// InlineLayoutInstruction - This routine produce an inline instruction for the
+/// block variable layout if it can. If not, it returns 0. Rules are as follow:
+/// If ((uintptr_t) layout) < (1 << 12), the layout is inline. In the 64bit world,
+/// an inline layout of value 0x0000000000000xyz is interpreted as follows:
+/// x captured object pointers of BLOCK_LAYOUT_STRONG. Followed by
+/// y captured object of BLOCK_LAYOUT_BYREF. Followed by
+/// z captured object of BLOCK_LAYOUT_WEAK. If any of the above is missing, zero
+/// replaces it. For example, 0x00000x00 means x BLOCK_LAYOUT_STRONG and no
+/// BLOCK_LAYOUT_BYREF and no BLOCK_LAYOUT_WEAK objects are captured.
+uint64_t CGObjCCommonMac::InlineLayoutInstruction(
+ SmallVectorImpl<unsigned char> &Layout) {
+ uint64_t Result = 0;
+ if (Layout.size() <= 3) {
+ unsigned size = Layout.size();
+ unsigned strong_word_count = 0, byref_word_count=0, weak_word_count=0;
+ unsigned char inst;
+ enum BLOCK_LAYOUT_OPCODE opcode ;
+ switch (size) {
+ case 3:
+ inst = Layout[0];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_STRONG)
+ strong_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ inst = Layout[1];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_BYREF)
+ byref_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ inst = Layout[2];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_WEAK)
+ weak_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ break;
+
+ case 2:
+ inst = Layout[0];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_STRONG) {
+ strong_word_count = (inst & 0xF)+1;
+ inst = Layout[1];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_BYREF)
+ byref_word_count = (inst & 0xF)+1;
+ else if (opcode == BLOCK_LAYOUT_WEAK)
+ weak_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ }
+ else if (opcode == BLOCK_LAYOUT_BYREF) {
+ byref_word_count = (inst & 0xF)+1;
+ inst = Layout[1];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_WEAK)
+ weak_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ }
+ else
+ return 0;
+ break;
+
+ case 1:
+ inst = Layout[0];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_STRONG)
+ strong_word_count = (inst & 0xF)+1;
+ else if (opcode == BLOCK_LAYOUT_BYREF)
+ byref_word_count = (inst & 0xF)+1;
+ else if (opcode == BLOCK_LAYOUT_WEAK)
+ weak_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ break;
+
+ default:
+ return 0;
+ }
+
+ // Cannot inline when any of the word counts is 15. Because this is one less
+ // than the actual work count (so 15 means 16 actual word counts),
+ // and we can only display 0 thru 15 word counts.
+ if (strong_word_count == 16 || byref_word_count == 16 || weak_word_count == 16)
+ return 0;
+
+ unsigned count =
+ (strong_word_count != 0) + (byref_word_count != 0) + (weak_word_count != 0);
+
+ if (size == count) {
+ if (strong_word_count)
+ Result = strong_word_count;
+ Result <<= 4;
+ if (byref_word_count)
+ Result += byref_word_count;
+ Result <<= 4;
+ if (weak_word_count)
+ Result += weak_word_count;
+ }
+ }
+ return Result;
+}
+
+llvm::Constant *CGObjCCommonMac::BuildRCBlockLayout(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ assert(CGM.getLangOpts().getGC() == LangOptions::NonGC);
+
+ llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy);
+
+ RunSkipBlockVars.clear();
+ bool hasUnion = false;
+
+ unsigned WordSizeInBits = CGM.getContext().getTargetInfo().getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
+ unsigned WordSizeInBytes = WordSizeInBits/ByteSizeInBits;
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ // Calculate the basic layout of the block structure.
+ const llvm::StructLayout *layout =
+ CGM.getDataLayout().getStructLayout(blockInfo.StructureType);
+
+ // Ignore the optional 'this' capture: C++ objects are not assumed
+ // to be GC'ed.
+
+ // Walk the captured variables.
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+
+ // Ignore constant captures.
+ if (capture.isConstant()) continue;
+
+ CharUnits fieldOffset =
+ CharUnits::fromQuantity(layout->getElementOffset(capture.getIndex()));
+
+ assert(!type->isArrayType() && "array variable should not be caught");
+ if (const RecordType *record = type->getAs<RecordType>()) {
+ BuildRCBlockVarRecordLayout(record, fieldOffset, hasUnion);
+ continue;
+ }
+ CharUnits fieldSize;
+ if (ci->isByRef())
+ fieldSize = CharUnits::fromQuantity(WordSizeInBytes);
+ else
+ fieldSize = CGM.getContext().getTypeSizeInChars(type);
+ UpdateRunSkipBlockVars(ci->isByRef(), getBlockCaptureLifetime(type),
+ fieldOffset, fieldSize);
+ }
+
+ if (RunSkipBlockVars.empty())
+ return nullPtr;
+
+ // Sort on byte position; captures might not be allocated in order,
+ // and unions can do funny things.
+ llvm::array_pod_sort(RunSkipBlockVars.begin(), RunSkipBlockVars.end());
+ SmallVector<unsigned char, 16> Layout;
+
+ unsigned size = RunSkipBlockVars.size();
+ for (unsigned i = 0; i < size; i++) {
+ enum BLOCK_LAYOUT_OPCODE opcode = RunSkipBlockVars[i].opcode;
+ CharUnits start_byte_pos = RunSkipBlockVars[i].block_var_bytepos;
+ CharUnits end_byte_pos = start_byte_pos;
+ unsigned j = i+1;
+ while (j < size) {
+ if (opcode == RunSkipBlockVars[j].opcode) {
+ end_byte_pos = RunSkipBlockVars[j++].block_var_bytepos;
+ i++;
+ }
+ else
+ break;
+ }
+ CharUnits size_in_bytes =
+ end_byte_pos - start_byte_pos + RunSkipBlockVars[j-1].block_var_size;
+ if (j < size) {
+ CharUnits gap =
+ RunSkipBlockVars[j].block_var_bytepos -
+ RunSkipBlockVars[j-1].block_var_bytepos - RunSkipBlockVars[j-1].block_var_size;
+ size_in_bytes += gap;
+ }
+ CharUnits residue_in_bytes = CharUnits::Zero();
+ if (opcode == BLOCK_LAYOUT_NON_OBJECT_BYTES) {
+ residue_in_bytes = size_in_bytes % WordSizeInBytes;
+ size_in_bytes -= residue_in_bytes;
+ opcode = BLOCK_LAYOUT_NON_OBJECT_WORDS;
+ }
+
+ unsigned size_in_words = size_in_bytes.getQuantity() / WordSizeInBytes;
+ while (size_in_words >= 16) {
+ // Note that value in imm. is one less that the actual
+ // value. So, 0xf means 16 words follow!
+ unsigned char inst = (opcode << 4) | 0xf;
+ Layout.push_back(inst);
+ size_in_words -= 16;
+ }
+ if (size_in_words > 0) {
+ // Note that value in imm. is one less that the actual
+ // value. So, we subtract 1 away!
+ unsigned char inst = (opcode << 4) | (size_in_words-1);
+ Layout.push_back(inst);
+ }
+ if (residue_in_bytes > CharUnits::Zero()) {
+ unsigned char inst =
+ (BLOCK_LAYOUT_NON_OBJECT_BYTES << 4) | (residue_in_bytes.getQuantity()-1);
+ Layout.push_back(inst);
+ }
+ }
+
+ int e = Layout.size()-1;
+ while (e >= 0) {
+ unsigned char inst = Layout[e--];
+ enum BLOCK_LAYOUT_OPCODE opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_NON_OBJECT_BYTES || opcode == BLOCK_LAYOUT_NON_OBJECT_WORDS)
+ Layout.pop_back();
+ else
+ break;
+ }
+
+ uint64_t Result = InlineLayoutInstruction(Layout);
+ if (Result != 0) {
+ // Block variable layout instruction has been inlined.
+ if (CGM.getLangOpts().ObjCGCBitmapPrint) {
+ printf("\n Inline instruction for block variable layout: ");
+ printf("0x0%llx\n", (unsigned long long)Result);
+ }
+ if (WordSizeInBytes == 8) {
+ const llvm::APInt Instruction(64, Result);
+ return llvm::Constant::getIntegerValue(CGM.Int64Ty, Instruction);
+ }
+ else {
+ const llvm::APInt Instruction(32, Result);
+ return llvm::Constant::getIntegerValue(CGM.Int32Ty, Instruction);
+ }
+ }
+
+ unsigned char inst = (BLOCK_LAYOUT_OPERATOR << 4) | 0;
+ Layout.push_back(inst);
+ std::string BitMap;
+ for (unsigned i = 0, e = Layout.size(); i != e; i++)
+ BitMap += Layout[i];
+
+ if (CGM.getLangOpts().ObjCGCBitmapPrint) {
+ printf("\n block variable layout: ");
+ for (unsigned i = 0, e = BitMap.size(); i != e; i++) {
+ unsigned char inst = BitMap[i];
+ enum BLOCK_LAYOUT_OPCODE opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ unsigned delta = 1;
+ switch (opcode) {
+ case BLOCK_LAYOUT_OPERATOR:
+ printf("BL_OPERATOR:");
+ delta = 0;
+ break;
+ case BLOCK_LAYOUT_NON_OBJECT_BYTES:
+ printf("BL_NON_OBJECT_BYTES:");
+ break;
+ case BLOCK_LAYOUT_NON_OBJECT_WORDS:
+ printf("BL_NON_OBJECT_WORD:");
+ break;
+ case BLOCK_LAYOUT_STRONG:
+ printf("BL_STRONG:");
+ break;
+ case BLOCK_LAYOUT_BYREF:
+ printf("BL_BYREF:");
+ break;
+ case BLOCK_LAYOUT_WEAK:
+ printf("BL_WEAK:");
+ break;
+ case BLOCK_LAYOUT_UNRETAINED:
+ printf("BL_UNRETAINED:");
+ break;
+ }
+ // Actual value of word count is one more that what is in the imm.
+ // field of the instruction
+ printf("%d", (inst & 0xf) + delta);
+ if (i < e-1)
+ printf(", ");
+ else
+ printf("\n");
+ }
+ }
+
+ llvm::GlobalVariable * Entry =
+ CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, BitMap,false),
+ "__TEXT,__objc_classname,cstring_literals", 1, true);
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
llvm::Value *CGObjCMac::GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD) {
// FIXME: I don't understand why gcc generates this, or where it is
@@ -2040,7 +2603,7 @@ CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
ArrayRef<llvm::Constant*> OptClassMethods,
ArrayRef<llvm::Constant*> MethodTypesExt) {
uint64_t Size =
- CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
llvm::Constant *Values[] = {
llvm::ConstantInt::get(ObjCTypes.IntTy, Size),
EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_OPT_"
@@ -2180,7 +2743,7 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
unsigned PropertySize =
- CGM.getTargetData().getTypeAllocSize(ObjCTypes.PropertyTy);
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.PropertyTy);
llvm::Constant *Values[3];
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize);
Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size());
@@ -2269,7 +2832,7 @@ CGObjCMac::EmitMethodDescList(Twine Name, const char *Section,
};
*/
void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.CategoryTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.CategoryTy);
// FIXME: This is poor design, the OCD should have a pointer to the category
// decl. Additionally, note that Category can be null for the @implementation
@@ -2338,15 +2901,37 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
MethodDefinitions.clear();
}
-// FIXME: Get from somewhere?
-enum ClassFlags {
- eClassFlags_Factory = 0x00001,
- eClassFlags_Meta = 0x00002,
- // <rdr://5142207>
- eClassFlags_HasCXXStructors = 0x02000,
- eClassFlags_Hidden = 0x20000,
- eClassFlags_ABI2_Hidden = 0x00010,
- eClassFlags_ABI2_HasCXXStructors = 0x00004 // <rdr://4923634>
+enum FragileClassFlags {
+ FragileABI_Class_Factory = 0x00001,
+ FragileABI_Class_Meta = 0x00002,
+ FragileABI_Class_HasCXXStructors = 0x02000,
+ FragileABI_Class_Hidden = 0x20000
+};
+
+enum NonFragileClassFlags {
+ /// Is a meta-class.
+ NonFragileABI_Class_Meta = 0x00001,
+
+ /// Is a root class.
+ NonFragileABI_Class_Root = 0x00002,
+
+ /// Has a C++ constructor and destructor.
+ NonFragileABI_Class_HasCXXStructors = 0x00004,
+
+ /// Has hidden visibility.
+ NonFragileABI_Class_Hidden = 0x00010,
+
+ /// Has the exception attribute.
+ NonFragileABI_Class_Exception = 0x00020,
+
+ /// (Obsolete) ARC-specific: this class has a .release_ivars method
+ NonFragileABI_Class_HasIvarReleaser = 0x00040,
+
+ /// Class implementation was compiled under ARC.
+ NonFragileABI_Class_CompiledByARC = 0x00080,
+
+ /// Class has non-trivial destructors, but zero-initialization is okay.
+ NonFragileABI_Class_HasCXXDestructorOnly = 0x00100
};
/*
@@ -2379,15 +2964,15 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
EmitProtocolList("\01L_OBJC_CLASS_PROTOCOLS_" + ID->getName(),
Interface->all_referenced_protocol_begin(),
Interface->all_referenced_protocol_end());
- unsigned Flags = eClassFlags_Factory;
- if (ID->hasCXXStructors())
- Flags |= eClassFlags_HasCXXStructors;
+ unsigned Flags = FragileABI_Class_Factory;
+ if (ID->hasNonZeroConstructors() || ID->hasDestructors())
+ Flags |= FragileABI_Class_HasCXXStructors;
unsigned Size =
CGM.getContext().getASTObjCImplementationLayout(ID).getSize().getQuantity();
// FIXME: Set CXX-structors flag.
if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
- Flags |= eClassFlags_Hidden;
+ Flags |= FragileABI_Class_Hidden;
llvm::SmallVector<llvm::Constant*, 16> InstanceMethods, ClassMethods;
for (ObjCImplementationDecl::instmeth_iterator
@@ -2470,11 +3055,11 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
llvm::Constant *Protocols,
ArrayRef<llvm::Constant*> Methods) {
- unsigned Flags = eClassFlags_Meta;
- unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassTy);
+ unsigned Flags = FragileABI_Class_Meta;
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassTy);
if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
- Flags |= eClassFlags_Hidden;
+ Flags |= FragileABI_Class_Hidden;
llvm::Constant *Values[12];
// The isa for the metaclass is the root of the hierarchy.
@@ -2588,7 +3173,7 @@ llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
llvm::Constant *
CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
uint64_t Size =
- CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
llvm::Constant *Values[3];
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
@@ -3481,7 +4066,7 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
@@ -3502,7 +4087,7 @@ void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
bool threadlocal) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
@@ -3528,7 +4113,7 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
assert(ivarOffset && "EmitObjCIvarAssign - ivarOffset is NULL");
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
@@ -3548,7 +4133,7 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
@@ -3679,7 +4264,7 @@ void CGObjCCommonMac::EmitImageInfo() {
static const int ModuleVersion = 7;
void CGObjCMac::EmitModuleInfo() {
- uint64_t Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ModuleTy);
+ uint64_t Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ModuleTy);
llvm::Constant *Values[] = {
llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion),
@@ -3824,7 +4409,7 @@ void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
Fields.push_back(*i);
llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
const llvm::StructLayout *RecLayout =
- CGM.getTargetData().getStructLayout(cast<llvm::StructType>(Ty));
+ CGM.getDataLayout().getStructLayout(cast<llvm::StructType>(Ty));
BuildAggrIvarLayout(0, RecLayout, RD, Fields, BytePos,
ForStrongLayout, HasUnion);
@@ -3951,7 +4536,7 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
if (IsUnion) {
// FIXME: Why the asymmetry? We divide by word size in bits on other
// side.
- uint64_t UnionIvarSize = FieldSize;
+ uint64_t UnionIvarSize = FieldSize / ByteSizeInBits;
if (UnionIvarSize > MaxSkippedUnionIvarSize) {
MaxSkippedUnionIvarSize = UnionIvarSize;
MaxSkippedField = Field;
@@ -4005,7 +4590,7 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string &BitMap) {
// Build the string of skip/scan nibbles
SmallVector<SKIP_SCAN, 32> SkipScanIvars;
unsigned int WordSize =
- CGM.getTypes().getTargetData().getTypeAllocSize(PtrTy);
+ CGM.getTypes().getDataLayout().getTypeAllocSize(PtrTy);
if (IvarsInfo[0].ivar_bytepos == 0) {
WordsToSkip = 0;
WordsToScan = IvarsInfo[0].ivar_size;
@@ -4187,7 +4772,7 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
printf("\n%s ivar layout for class '%s': ",
ForStrongLayout ? "strong" : "weak",
OMD->getClassInterface()->getName().data());
- const unsigned char *s = (unsigned char*)BitMap.c_str();
+ const unsigned char *s = (const unsigned char*)BitMap.c_str();
for (unsigned i = 0, e = BitMap.size(); i < e; i++)
if (!(s[i] & 0xf0))
printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
@@ -4835,7 +5420,7 @@ AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container,
llvm::GlobalValue::InternalLinkage,
Init,
SymbolName);
- GV->setAlignment(CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType()));
GV->setSection(SectionName);
CGM.AddUsedGlobal(GV);
}
@@ -4941,19 +5526,6 @@ bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) {
return VTableDispatchMethods.count(Sel);
}
-// Metadata flags
-enum MetaDataDlags {
- CLS = 0x0,
- CLS_META = 0x1,
- CLS_ROOT = 0x2,
- OBJC2_CLS_HIDDEN = 0x10,
- CLS_EXCEPTION = 0x20,
-
- /// (Obsolete) ARC-specific: this class has a .release_ivars method
- CLS_HAS_IVAR_RELEASER = 0x40,
- /// class was compiled with -fobjc-arr
- CLS_COMPILED_BY_ARC = 0x80 // (1<<7)
-};
/// BuildClassRoTInitializer - generate meta-data for:
/// struct _class_ro_t {
/// uint32_t const flags;
@@ -4978,19 +5550,20 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
llvm::Constant *Values[10]; // 11 for 64bit targets!
if (CGM.getLangOpts().ObjCAutoRefCount)
- flags |= CLS_COMPILED_BY_ARC;
+ flags |= NonFragileABI_Class_CompiledByARC;
Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize);
// FIXME. For 64bit targets add 0 here.
- Values[ 3] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+ Values[ 3] = (flags & NonFragileABI_Class_Meta)
+ ? GetIvarLayoutName(0, ObjCTypes)
: BuildIvarLayout(ID, true);
Values[ 4] = GetClassName(ID->getIdentifier());
// const struct _method_list_t * const baseMethods;
std::vector<llvm::Constant*> Methods;
std::string MethodListName("\01l_OBJC_$_");
- if (flags & CLS_META) {
+ if (flags & NonFragileABI_Class_Meta) {
MethodListName += "CLASS_METHODS_" + ID->getNameAsString();
for (ObjCImplementationDecl::classmeth_iterator
i = ID->classmeth_begin(), e = ID->classmeth_end(); i != e; ++i) {
@@ -5030,28 +5603,27 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
OID->all_referenced_protocol_begin(),
OID->all_referenced_protocol_end());
- if (flags & CLS_META)
+ if (flags & NonFragileABI_Class_Meta) {
Values[ 7] = llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
- else
- Values[ 7] = EmitIvarList(ID);
- Values[ 8] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
- : BuildIvarLayout(ID, false);
- if (flags & CLS_META)
+ Values[ 8] = GetIvarLayoutName(0, ObjCTypes);
Values[ 9] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
- else
+ } else {
+ Values[ 7] = EmitIvarList(ID);
+ Values[ 8] = BuildIvarLayout(ID, false);
Values[ 9] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
ID, ID->getClassInterface(), ObjCTypes);
+ }
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassRonfABITy,
Values);
llvm::GlobalVariable *CLASS_RO_GV =
new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassRonfABITy, false,
llvm::GlobalValue::InternalLinkage,
Init,
- (flags & CLS_META) ?
+ (flags & NonFragileABI_Class_Meta) ?
std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
std::string("\01l_OBJC_CLASS_RO_$_")+ClassName);
CLASS_RO_GV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(ObjCTypes.ClassRonfABITy));
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassRonfABITy));
CLASS_RO_GV->setSection("__DATA, __objc_const");
return CLASS_RO_GV;
@@ -5088,7 +5660,7 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassMetaData(
GV->setInitializer(Init);
GV->setSection("__DATA, __objc_data");
GV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(ObjCTypes.ClassnfABITy));
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassnfABITy));
if (HiddenVisibility)
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
return GV;
@@ -5138,23 +5710,31 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
"CGObjCNonFragileABIMac::GenerateClass - class is 0");
// FIXME: Is this correct (that meta class size is never computed)?
uint32_t InstanceStart =
- CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassnfABITy);
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassnfABITy);
uint32_t InstanceSize = InstanceStart;
- uint32_t flags = CLS_META;
+ uint32_t flags = NonFragileABI_Class_Meta;
std::string ObjCMetaClassName(getMetaclassSymbolPrefix());
std::string ObjCClassName(getClassSymbolPrefix());
llvm::GlobalVariable *SuperClassGV, *IsAGV;
+ // Build the flags for the metaclass.
bool classIsHidden =
ID->getClassInterface()->getVisibility() == HiddenVisibility;
if (classIsHidden)
- flags |= OBJC2_CLS_HIDDEN;
- if (ID->hasCXXStructors())
- flags |= eClassFlags_ABI2_HasCXXStructors;
+ flags |= NonFragileABI_Class_Hidden;
+
+ // FIXME: why is this flag set on the metaclass?
+ // ObjC metaclasses have no fields and don't really get constructed.
+ if (ID->hasNonZeroConstructors() || ID->hasDestructors()) {
+ flags |= NonFragileABI_Class_HasCXXStructors;
+ if (!ID->hasNonZeroConstructors())
+ flags |= NonFragileABI_Class_HasCXXDestructorOnly;
+ }
+
if (!ID->getClassInterface()->getSuperClass()) {
// class is root
- flags |= CLS_ROOT;
+ flags |= NonFragileABI_Class_Root;
SuperClassGV = GetClassGlobal(ObjCClassName + ClassName);
IsAGV = GetClassGlobal(ObjCMetaClassName + ClassName);
} else {
@@ -5183,17 +5763,28 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
DefinedMetaClasses.push_back(MetaTClass);
// Metadata for the class
- flags = CLS;
+ flags = 0;
if (classIsHidden)
- flags |= OBJC2_CLS_HIDDEN;
- if (ID->hasCXXStructors())
- flags |= eClassFlags_ABI2_HasCXXStructors;
+ flags |= NonFragileABI_Class_Hidden;
+
+ if (ID->hasNonZeroConstructors() || ID->hasDestructors()) {
+ flags |= NonFragileABI_Class_HasCXXStructors;
+
+ // Set a flag to enable a runtime optimization when a class has
+ // fields that require destruction but which don't require
+ // anything except zero-initialization during construction. This
+ // is most notably true of __strong and __weak types, but you can
+ // also imagine there being C++ types with non-trivial default
+ // constructors that merely set all fields to null.
+ if (!ID->hasNonZeroConstructors())
+ flags |= NonFragileABI_Class_HasCXXDestructorOnly;
+ }
if (hasObjCExceptionAttribute(CGM.getContext(), ID->getClassInterface()))
- flags |= CLS_EXCEPTION;
+ flags |= NonFragileABI_Class_Exception;
if (!ID->getClassInterface()->getSuperClass()) {
- flags |= CLS_ROOT;
+ flags |= NonFragileABI_Class_Root;
SuperClassGV = 0;
} else {
// Has a root. Current class is not a root.
@@ -5220,7 +5811,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
DefinedNonLazyClasses.push_back(ClassMD);
// Force the definition of the EHType if necessary.
- if (flags & CLS_EXCEPTION)
+ if (flags & NonFragileABI_Class_Exception)
GetInterfaceEHType(ID->getClassInterface(), true);
// Make sure method definition entries are all clear for next implementation.
MethodDefinitions.clear();
@@ -5344,7 +5935,7 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
Init,
ExtCatName);
GCATV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(ObjCTypes.CategorynfABITy));
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.CategorynfABITy));
GCATV->setSection("__DATA, __objc_const");
CGM.AddUsedGlobal(GCATV);
DefinedCategories.push_back(GCATV);
@@ -5391,7 +5982,7 @@ CGObjCNonFragileABIMac::EmitMethodList(Twine Name,
llvm::Constant *Values[3];
// sizeof(struct _objc_method)
- unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.MethodTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.MethodTy);
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
// method_count
Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
@@ -5403,7 +5994,7 @@ CGObjCNonFragileABIMac::EmitMethodList(Twine Name,
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
llvm::GlobalValue::InternalLinkage, Init, Name);
- GV->setAlignment(CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType()));
GV->setSection(Section);
CGM.AddUsedGlobal(GV);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListnfABIPtrTy);
@@ -5437,7 +6028,7 @@ CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
IvarOffsetGV->setInitializer(llvm::ConstantInt::get(ObjCTypes.LongTy,
Offset));
IvarOffsetGV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(ObjCTypes.LongTy));
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.LongTy));
// FIXME: This matches gcc, but shouldn't the visibility be set on the use as
// well (i.e., in ObjCIvarOffsetVariable).
@@ -5490,7 +6081,7 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
Ivar[2] = GetMethodVarType(IVD);
llvm::Type *FieldTy =
CGM.getTypes().ConvertTypeForMem(IVD->getType());
- unsigned Size = CGM.getTargetData().getTypeAllocSize(FieldTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(FieldTy);
unsigned Align = CGM.getContext().getPreferredTypeAlign(
IVD->getType().getTypePtr()) >> 3;
Align = llvm::Log2_32(Align);
@@ -5508,7 +6099,7 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
return llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
llvm::Constant *Values[3];
- unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.IvarnfABITy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.IvarnfABITy);
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarnfABITy,
@@ -5522,7 +6113,7 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
Init,
Prefix + OID->getName());
GV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ CGM.getDataLayout().getABITypeAlignment(Init->getType()));
GV->setSection("__DATA, __objc_const");
CGM.AddUsedGlobal(GV);
@@ -5644,7 +6235,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
Values[7] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + PD->getName(),
0, PD, ObjCTypes);
uint32_t Size =
- CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
Values[8] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
Values[9] = llvm::Constant::getNullValue(ObjCTypes.IntTy);
Values[10] = EmitProtocolMethodTypes("\01l_OBJC_$_PROTOCOL_METHOD_TYPES_"
@@ -5663,7 +6254,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
false, llvm::GlobalValue::WeakAnyLinkage, Init,
"\01l_OBJC_PROTOCOL_$_" + PD->getName());
Entry->setAlignment(
- CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABITy));
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ProtocolnfABITy));
Entry->setSection("__DATA,__datacoal_nt,coalesced");
Protocols[PD->getIdentifier()] = Entry;
@@ -5678,7 +6269,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
false, llvm::GlobalValue::WeakAnyLinkage, Entry,
"\01l_OBJC_LABEL_PROTOCOL_$_" + PD->getName());
PTGV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
CGM.AddUsedGlobal(PTGV);
@@ -5732,7 +6323,7 @@ CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
Init, Name);
GV->setSection("__DATA, __objc_const");
GV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ CGM.getDataLayout().getABITypeAlignment(Init->getType()));
CGM.AddUsedGlobal(GV);
return llvm::ConstantExpr::getBitCast(GV,
ObjCTypes.ProtocolListnfABIPtrTy);
@@ -5970,7 +6561,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CGBuilderTy &Builder,
ClassGV,
"\01L_OBJC_CLASSLIST_REFERENCES_$_");
Entry->setAlignment(
- CGM.getTargetData().getABITypeAlignment(
+ CGM.getDataLayout().getABITypeAlignment(
ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
CGM.AddUsedGlobal(Entry);
@@ -6004,7 +6595,7 @@ CGObjCNonFragileABIMac::EmitSuperClassRef(CGBuilderTy &Builder,
ClassGV,
"\01L_OBJC_CLASSLIST_SUP_REFS_$_");
Entry->setAlignment(
- CGM.getTargetData().getABITypeAlignment(
+ CGM.getDataLayout().getABITypeAlignment(
ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
CGM.AddUsedGlobal(Entry);
@@ -6030,7 +6621,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CGBuilderTy &Builder,
MetaClassGV,
"\01L_OBJC_CLASSLIST_SUP_REFS_$_");
Entry->setAlignment(
- CGM.getTargetData().getABITypeAlignment(
+ CGM.getDataLayout().getABITypeAlignment(
ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
@@ -6079,16 +6670,9 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// If this is a class message the metaclass is passed as the target.
llvm::Value *Target;
- if (IsClassMessage) {
- if (isCategoryImpl) {
- // Message sent to "super' in a class method defined in
- // a category implementation.
- Target = EmitClassRef(CGF.Builder, Class);
- Target = CGF.Builder.CreateStructGEP(Target, 0);
- Target = CGF.Builder.CreateLoad(Target);
- } else
+ if (IsClassMessage)
Target = EmitMetaClassRef(CGF.Builder, Class);
- } else
+ else
Target = EmitSuperClassRef(CGF.Builder, Class);
// FIXME: We shouldn't need to do this cast, rectify the ASTContext and
@@ -6143,7 +6727,7 @@ void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *ivarOffset) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
@@ -6164,7 +6748,7 @@ void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
llvm::Value *src, llvm::Value *dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
@@ -6211,7 +6795,7 @@ void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
@@ -6232,7 +6816,7 @@ void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
bool threadlocal) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
@@ -6364,7 +6948,7 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
if (CGM.getLangOpts().getVisibilityMode() == HiddenVisibility)
Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
- Entry->setAlignment(CGM.getTargetData().getABITypeAlignment(
+ Entry->setAlignment(CGM.getDataLayout().getABITypeAlignment(
ObjCTypes.EHTypeTy));
if (ForDefinition) {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp
index 9aa6837..6932dd7 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -78,6 +78,13 @@ uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
CGM.getContext().getCharWidth();
}
+unsigned CGObjCRuntime::ComputeBitfieldBitOffset(
+ CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, ID, ID->getImplementation(), Ivar);
+}
+
LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
const ObjCInterfaceDecl *OID,
llvm::Value *BaseValue,
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
index 219a3e4..3e77875 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
@@ -261,6 +261,8 @@ public:
llvm::Value *Size) = 0;
virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
const CodeGen::CGBlockInfo &blockInfo) = 0;
+ virtual llvm::Constant *BuildRCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CodeGen::CGBlockInfo &blockInfo) = 0;
virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) = 0;
struct MessageSendInfo {
@@ -275,6 +277,12 @@ public:
MessageSendInfo getMessageSendInfo(const ObjCMethodDecl *method,
QualType resultType,
CallArgList &callArgs);
+
+ // FIXME: This probably shouldn't be here, but the code to compute
+ // it is here.
+ unsigned ComputeBitfieldBitOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
};
/// Creates an instance of an Objective-C runtime class.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
index d1b370a..7c83d39 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
@@ -105,7 +105,6 @@ public:
/// BuildTypeInfo - Build the RTTI type info struct for the given type.
///
/// \param Force - true to force the creation of this RTTI value
- /// \param ForEH - true if this is for exception handling
llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false);
};
}
@@ -779,28 +778,24 @@ static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
if (Base->isVirtual()) {
- if (Bases.VirtualBases.count(BaseDecl)) {
+ // Mark the virtual base as seen.
+ if (!Bases.VirtualBases.insert(BaseDecl)) {
// If this virtual base has been seen before, then the class is diamond
// shaped.
Flags |= RTTIBuilder::VMI_DiamondShaped;
} else {
if (Bases.NonVirtualBases.count(BaseDecl))
Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
-
- // Mark the virtual base as seen.
- Bases.VirtualBases.insert(BaseDecl);
}
} else {
- if (Bases.NonVirtualBases.count(BaseDecl)) {
+ // Mark the non-virtual base as seen.
+ if (!Bases.NonVirtualBases.insert(BaseDecl)) {
// If this non-virtual base has been seen before, then the class has non-
// diamond shaped repeated inheritance.
Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
} else {
if (Bases.VirtualBases.count(BaseDecl))
Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
-
- // Mark the non-virtual base as seen.
- Bases.NonVirtualBases.insert(BaseDecl);
}
}
@@ -891,7 +886,7 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
Offset = Layout.getBaseClassOffset(BaseDecl);
};
- OffsetFlags = Offset.getQuantity() << 8;
+ OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
// The low-order byte of __offset_flags contains flags, as given by the
// masks from the enumeration __offset_flags_masks.
@@ -982,7 +977,7 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
// Return a bogus pointer if RTTI is disabled, unless it's for EH.
// FIXME: should we even be calling this method if RTTI is disabled
// and it's not for EH?
- if (!ForEH && !getContext().getLangOpts().RTTI)
+ if (!ForEH && !getLangOpts().RTTI)
return llvm::Constant::getNullValue(Int8PtrTy);
if (ForEH && Ty->isObjCObjectPointerType() &&
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
index 94c822f..3db5e04 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
@@ -166,8 +166,8 @@ public:
class CGRecordLayout {
friend class CodeGenTypes;
- CGRecordLayout(const CGRecordLayout&); // DO NOT IMPLEMENT
- void operator=(const CGRecordLayout&); // DO NOT IMPLEMENT
+ CGRecordLayout(const CGRecordLayout &) LLVM_DELETED_FUNCTION;
+ void operator=(const CGRecordLayout &) LLVM_DELETED_FUNCTION;
private:
/// The LLVM type corresponding to this record layout; used when
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index d642ef8..26ef3ef 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -25,7 +25,7 @@
#include "llvm/Type.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace clang;
using namespace CodeGen;
@@ -206,7 +206,7 @@ void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
Packed = D->hasAttr<PackedAttr>();
- IsMsStruct = D->hasAttr<MsStructAttr>();
+ IsMsStruct = D->isMsStruct(Types.getContext());
if (D->isUnion()) {
LayoutUnion(D);
@@ -239,7 +239,7 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
CharUnits TypeSizeInBytes =
- CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
+ CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
@@ -259,7 +259,7 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
// in big-endian machines the first fields are in higher bit positions,
// so revert the offset. The byte offsets are reversed(back) later.
- if (Types.getTargetData().isBigEndian()) {
+ if (Types.getDataLayout().isBigEndian()) {
FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
}
@@ -334,7 +334,7 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
// on big-endian machines we reverted the bit offset because first fields are
// in higher bits. But this also reverts the bytes, so fix this here by reverting
// the byte offset on big-endian machines.
- if (Types.getTargetData().isBigEndian()) {
+ if (Types.getDataLayout().isBigEndian()) {
AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(
ContainingTypeSizeInBits - AccessStart - AccessWidth);
} else {
@@ -553,9 +553,9 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
hasOnlyZeroSizedBitFields = false;
CharUnits fieldAlign = CharUnits::fromQuantity(
- Types.getTargetData().getABITypeAlignment(fieldType));
+ Types.getDataLayout().getABITypeAlignment(fieldType));
CharUnits fieldSize = CharUnits::fromQuantity(
- Types.getTargetData().getTypeAllocSize(fieldType));
+ Types.getDataLayout().getTypeAllocSize(fieldType));
if (fieldAlign < unionAlign)
continue;
@@ -884,7 +884,7 @@ void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
llvm::Type *fieldType) {
CharUnits fieldSize =
- CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
+ CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(fieldType));
FieldTypes.push_back(fieldType);
@@ -957,7 +957,7 @@ CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const {
if (Packed)
return CharUnits::One();
- return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty));
+ return CharUnits::fromQuantity(Types.getDataLayout().getABITypeAlignment(Ty));
}
CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
@@ -1036,7 +1036,7 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
- assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
+ assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
"Type size mismatch!");
if (BaseTy) {
@@ -1049,19 +1049,19 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
getContext().toBits(AlignedNonVirtualTypeSize);
assert(AlignedNonVirtualTypeSizeInBits ==
- getTargetData().getTypeAllocSizeInBits(BaseTy) &&
+ getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
"Type size mismatch!");
}
// Verify that the LLVM and AST field offsets agree.
llvm::StructType *ST =
dyn_cast<llvm::StructType>(RL->getLLVMType());
- const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
+ const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
RecordDecl::field_iterator it = D->field_begin();
const FieldDecl *LastFD = 0;
- bool IsMsStruct = D->hasAttr<MsStructAttr>();
+ bool IsMsStruct = D->isMsStruct(getContext());
for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
const FieldDecl *FD = *it;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
index d78908d..3548dba 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
@@ -21,7 +21,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/InlineAsm.h"
#include "llvm/Intrinsics.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace clang;
using namespace CodeGen;
@@ -132,8 +132,8 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
- case Stmt::AsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
- case Stmt::MSAsmStmtClass: EmitMSAsmStmt(cast<MSAsmStmt>(*S)); break;
+ case Stmt::GCCAsmStmtClass: // Intentional fall-through.
+ case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
case Stmt::ObjCAtTryStmtClass:
EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
@@ -237,6 +237,10 @@ void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
if (!BI || !BI->isUnconditional())
return;
+ // Can only simplify empty blocks.
+ if (BI != BB->begin())
+ return;
+
BB->replaceAllUsesWith(BI->getSuccessor(0));
BI->eraseFromParent();
BB->eraseFromParent();
@@ -743,6 +747,17 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// Emit the result value, even if unused, to evalute the side effects.
const Expr *RV = S.getRetValue();
+ // Treat block literals in a return expression as if they appeared
+ // in their own scope. This permits a small, easily-implemented
+ // exception to our over-conservative rules about not jumping to
+ // statements following block literals with non-trivial cleanups.
+ RunCleanupsScope cleanupScope(*this);
+ if (const ExprWithCleanups *cleanups =
+ dyn_cast_or_null<ExprWithCleanups>(RV)) {
+ enterFullExpression(cleanups);
+ RV = cleanups->getSubExpr();
+ }
+
// FIXME: Clean this up by using an LValue for ReturnTemp,
// EmitStoreThroughLValue, and EmitAnyExpr.
if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable() &&
@@ -779,6 +794,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
AggValueSlot::IsNotAliased));
}
+ cleanupScope.ForceCleanup();
EmitBranchThroughCleanup(ReturnBlock);
}
@@ -899,7 +915,8 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
// If the body of the case is just a 'break', and if there was no fallthrough,
// try to not emit an empty block.
- if ((CGM.getCodeGenOpts().OptimizationLevel > 0) && isa<BreakStmt>(S.getSubStmt())) {
+ if ((CGM.getCodeGenOpts().OptimizationLevel > 0) &&
+ isa<BreakStmt>(S.getSubStmt())) {
JumpDest Block = BreakContinueStack.back().BreakBlock;
// Only do this optimization if there are no cleanups that need emitting.
@@ -1263,6 +1280,10 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
case '=': // Will see this and the following in mult-alt constraints.
case '+':
break;
+ case '#': // Ignore the rest of the constraint alternative.
+ while (Constraint[1] && Constraint[1] != ',')
+ Constraint++;
+ break;
case ',':
Result += "|";
break;
@@ -1323,8 +1344,7 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
}
llvm::Value*
-CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
- const TargetInfo::ConstraintInfo &Info,
+CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
LValue InputValue, QualType InputType,
std::string &ConstraintStr) {
llvm::Value *Arg;
@@ -1333,7 +1353,7 @@ CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
Arg = EmitLoadOfLValue(InputValue).getScalarVal();
} else {
llvm::Type *Ty = ConvertType(InputType);
- uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
+ uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
Ty = llvm::IntegerType::get(getLLVMContext(), Size);
Ty = llvm::PointerType::getUnqual(Ty);
@@ -1353,7 +1373,7 @@ CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
return Arg;
}
-llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
+llvm::Value* CodeGenFunction::EmitAsmInput(
const TargetInfo::ConstraintInfo &Info,
const Expr *InputExpr,
std::string &ConstraintStr) {
@@ -1363,7 +1383,7 @@ llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
LValue Dest = EmitLValue(InputExpr);
- return EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(), ConstraintStr);
+ return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr);
}
/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
@@ -1396,23 +1416,8 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
}
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
- // Analyze the asm string to decompose it into its pieces. We know that Sema
- // has already done this, so it is guaranteed to be successful.
- SmallVector<AsmStmt::AsmStringPiece, 4> Pieces;
- unsigned DiagOffs;
- S.AnalyzeAsmString(Pieces, getContext(), DiagOffs);
-
- // Assemble the pieces into the final asm string.
- std::string AsmString;
- for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
- if (Pieces[i].isString())
- AsmString += Pieces[i].getString();
- else if (Pieces[i].getModifier() == '\0')
- AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo());
- else
- AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
- Pieces[i].getModifier() + '}';
- }
+ // Assemble the final asm string.
+ std::string AsmString = S.generateAsmString(getContext());
// Get all the output and input constraints together.
SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
@@ -1511,7 +1516,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
InOutConstraints += ',';
const Expr *InputExpr = S.getOutputExpr(i);
- llvm::Value *Arg = EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(),
+ llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
InOutConstraints);
if (llvm::Type* AdjTy =
@@ -1549,7 +1554,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
*InputExpr->IgnoreParenNoopCasts(getContext()),
Target, CGM, S);
- llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, Constraints);
+ llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
// If this input argument is tied to a larger output result, extend the
// input to be the same size as the output. The LLVM backend wants to see
@@ -1596,7 +1601,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Clobbers
for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
- StringRef Clobber = S.getClobber(i)->getString();
+ StringRef Clobber = S.getClobber(i);
if (Clobber != "memory" && Clobber != "cc")
Clobber = Target.getNormalizedGCCRegisterName(Clobber);
@@ -1628,15 +1633,22 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(ResultType, ArgTypes, false);
+ bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
+ llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
+ llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
llvm::InlineAsm *IA =
- llvm::InlineAsm::get(FTy, AsmString, Constraints,
- S.isVolatile() || S.getNumOutputs() == 0);
+ llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
+ /* IsAlignStack */ false, AsmDialect);
llvm::CallInst *Result = Builder.CreateCall(IA, Args);
- Result->addAttribute(~0, llvm::Attribute::NoUnwind);
+ Result->addAttribute(llvm::AttrListPtr::FunctionIndex,
+ llvm::Attributes::get(getLLVMContext(),
+ llvm::Attributes::NoUnwind));
// Slap the source location of the inline asm into a !srcloc metadata on the
- // call.
- Result->setMetadata("srcloc", getAsmSrcLocInfo(S.getAsmString(), *this));
+ // call. FIXME: Handle metadata for MS-style inline asms.
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
+ Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(),
+ *this));
// Extract all of the register value results from the asm.
std::vector<llvm::Value*> RegResults;
@@ -1662,12 +1674,12 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
if (TruncTy->isFloatingPointTy())
Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
- uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
+ uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
Tmp = Builder.CreateTrunc(Tmp,
llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
} else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
- uint64_t TmpSize =CGM.getTargetData().getTypeSizeInBits(Tmp->getType());
+ uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
Tmp = Builder.CreatePtrToInt(Tmp,
llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
Tmp = Builder.CreateTrunc(Tmp, TruncTy);
@@ -1681,47 +1693,3 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
}
}
-
-void CodeGenFunction::EmitMSAsmStmt(const MSAsmStmt &S) {
- // MS-style inline assembly is not fully supported, so sema emits a warning.
- if (!CGM.getCodeGenOpts().EmitMicrosoftInlineAsm)
- return;
-
- assert (S.isSimple() && "CodeGen can only handle simple MSAsmStmts.");
-
- std::vector<llvm::Value*> Args;
- std::vector<llvm::Type *> ArgTypes;
- std::string Constraints;
-
- // Clobbers
- for (unsigned i = 0, e = S.getNumClobbers(); i != e; ++i) {
- StringRef Clobber = S.getClobber(i);
-
- if (Clobber != "memory" && Clobber != "cc")
- Clobber = Target.getNormalizedGCCRegisterName(Clobber);
-
- if (i != 0)
- Constraints += ',';
-
- Constraints += "~{";
- Constraints += Clobber;
- Constraints += '}';
- }
-
- // Add machine specific clobbers
- std::string MachineClobbers = Target.getClobbers();
- if (!MachineClobbers.empty()) {
- if (!Constraints.empty())
- Constraints += ',';
- Constraints += MachineClobbers;
- }
-
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(VoidTy, ArgTypes, false);
-
- llvm::InlineAsm *IA =
- llvm::InlineAsm::get(FTy, *S.getAsmString(), Constraints, true);
- llvm::CallInst *Result = Builder.CreateCall(IA, Args);
- Result->addAttribute(~0, llvm::Attribute::NoUnwind);
- Result->addAttribute(~0, llvm::Attribute::IANSDialect);
-}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
index cdaa26a..5b37fe4 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
@@ -79,15 +79,16 @@ llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
llvm::Value *Ptr,
int64_t NonVirtualAdjustment,
- int64_t VirtualAdjustment) {
+ int64_t VirtualAdjustment,
+ bool IsReturnAdjustment) {
if (!NonVirtualAdjustment && !VirtualAdjustment)
return Ptr;
llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
- if (NonVirtualAdjustment) {
- // Do the non-virtual adjustment.
+ if (NonVirtualAdjustment && !IsReturnAdjustment) {
+ // Perform the non-virtual adjustment for a base-to-derived cast.
V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
}
@@ -95,7 +96,7 @@ static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
- // Do the virtual adjustment.
+ // Perform the virtual adjustment.
llvm::Value *VTablePtrPtr =
CGF.Builder.CreateBitCast(V, Int8PtrTy->getPointerTo());
@@ -113,6 +114,11 @@ static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
V = CGF.Builder.CreateInBoundsGEP(V, Offset);
}
+ if (NonVirtualAdjustment && IsReturnAdjustment) {
+ // Perform the non-virtual adjustment for a derived-to-base cast.
+ V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
+ }
+
// Cast back to the original type.
return CGF.Builder.CreateBitCast(V, Ptr->getType());
}
@@ -150,8 +156,7 @@ static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
case TSK_ExplicitSpecialization:
case TSK_ImplicitInstantiation:
- if (!CGM.getCodeGenOpts().HiddenWeakTemplateVTables)
- return;
+ return;
break;
}
@@ -199,7 +204,8 @@ static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
ReturnValue = PerformTypeAdjustment(CGF, ReturnValue,
Thunk.Return.NonVirtual,
- Thunk.Return.VBaseOffsetOffset);
+ Thunk.Return.VBaseOffsetOffset,
+ /*IsReturnAdjustment*/true);
if (NullCheckValue) {
CGF.Builder.CreateBr(AdjustEnd);
@@ -248,7 +254,9 @@ void CodeGenFunction::GenerateVarArgsThunk(
llvm::Function *BaseFn = cast<llvm::Function>(Callee);
// Clone to thunk.
- llvm::Function *NewFn = llvm::CloneFunction(BaseFn);
+ llvm::ValueToValueMapTy VMap;
+ llvm::Function *NewFn = llvm::CloneFunction(BaseFn, VMap,
+ /*ModuleLevelChanges=*/false);
CGM.getModule().getFunctionList().push_back(NewFn);
Fn->replaceAllUsesWith(NewFn);
NewFn->takeName(Fn);
@@ -281,7 +289,8 @@ void CodeGenFunction::GenerateVarArgsThunk(
llvm::Value *AdjustedThisPtr =
PerformTypeAdjustment(*this, ThisPtr,
Thunk.This.NonVirtual,
- Thunk.This.VCallOffsetOffset);
+ Thunk.This.VCallOffsetOffset,
+ /*IsReturnAdjustment*/false);
ThisStore->setOperand(0, AdjustedThisPtr);
if (!Thunk.Return.isEmpty()) {
@@ -324,7 +333,10 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
FunctionArgs.push_back(Param);
}
-
+
+ // Initialize debug info if needed.
+ maybeInitializeDebugInfo();
+
StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs,
SourceLocation());
@@ -335,7 +347,8 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
llvm::Value *AdjustedThisPtr =
PerformTypeAdjustment(*this, LoadCXXThis(),
Thunk.This.NonVirtual,
- Thunk.This.VCallOffsetOffset);
+ Thunk.This.VCallOffsetOffset,
+ /*IsReturnAdjustment*/false);
CallArgList CallArgs;
@@ -455,6 +468,8 @@ void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
return;
}
+ CGM.SetLLVMFunctionAttributesForDefinition(GD.getDecl(), ThunkFn);
+
if (ThunkFn->isVarArg()) {
// Varargs thunks are special; we can't just generate a call because
// we can't copy the varargs. Our implementation is rather
@@ -524,7 +539,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
unsigned NextVTableThunkIndex = 0;
- llvm::Constant* PureVirtualFn = 0;
+ llvm::Constant *PureVirtualFn = 0, *DeletedVirtualFn = 0;
for (unsigned I = 0; I != NumComponents; ++I) {
VTableComponent Component = Components[I];
@@ -573,14 +588,25 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
// We have a pure virtual member function.
if (!PureVirtualFn) {
- llvm::FunctionType *Ty =
- llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
- StringRef PureCallName = CGM.getCXXABI().GetPureVirtualCallName();
- PureVirtualFn = CGM.CreateRuntimeFunction(Ty, PureCallName);
- PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
+ llvm::FunctionType *Ty =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ StringRef PureCallName = CGM.getCXXABI().GetPureVirtualCallName();
+ PureVirtualFn = CGM.CreateRuntimeFunction(Ty, PureCallName);
+ PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
CGM.Int8PtrTy);
}
Init = PureVirtualFn;
+ } else if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) {
+ if (!DeletedVirtualFn) {
+ llvm::FunctionType *Ty =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ StringRef DeletedCallName =
+ CGM.getCXXABI().GetDeletedVirtualCallName();
+ DeletedVirtualFn = CGM.CreateRuntimeFunction(Ty, DeletedCallName);
+ DeletedVirtualFn = llvm::ConstantExpr::getBitCast(DeletedVirtualFn,
+ CGM.Int8PtrTy);
+ }
+ Init = DeletedVirtualFn;
} else {
// Check if we should use a thunk.
if (NextVTableThunkIndex < NumVTableThunks &&
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
index dd32167..9d6d183 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
@@ -65,9 +65,11 @@ namespace clang {
TargetOpts(targetopts),
LangOpts(langopts),
AsmOutStream(OS),
+ Context(),
LLVMIRGeneration("LLVM IR Generation Time"),
Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)),
- LinkModule(LinkModule) {
+ LinkModule(LinkModule)
+ {
llvm::TimePassesIsEnabled = TimePasses;
}
@@ -379,7 +381,7 @@ void CodeGenAction::ExecuteAction() {
// FIXME: This is stupid, IRReader shouldn't take ownership.
llvm::MemoryBuffer *MainFileCopy =
llvm::MemoryBuffer::getMemBufferCopy(MainFile->getBuffer(),
- getCurrentFile().c_str());
+ getCurrentFile());
llvm::SMDiagnostic Err;
TheModule.reset(ParseIR(MainFileCopy, Err, *VMContext));
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
index 1d02861..18f1623 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -24,7 +24,7 @@
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
#include "llvm/MDBuilder.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace clang;
using namespace CodeGen;
@@ -32,6 +32,10 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
: CodeGenTypeCache(cgm), CGM(cgm),
Target(CGM.getContext().getTargetInfo()),
Builder(cgm.getModule().getContext()),
+ SanitizePerformTypeCheck(CGM.getLangOpts().SanitizeNull |
+ CGM.getLangOpts().SanitizeAlignment |
+ CGM.getLangOpts().SanitizeObjectSize |
+ CGM.getLangOpts().SanitizeVptr),
AutoreleaseResult(false), BlockInfo(0), BlockPointer(0),
LambdaThisCaptureField(0), NormalCleanupDest(0), NextCleanupDestIndex(1),
FirstBlockInfo(0), EHResumeBlock(0), ExceptionSlot(0), EHSelectorSlot(0),
@@ -40,8 +44,6 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
CXXABIThisDecl(0), CXXABIThisValue(0), CXXThisValue(0), CXXVTTDecl(0),
CXXVTTValue(0), OutermostConditional(0), TerminateLandingPad(0),
TerminateHandler(0), TrapBB(0) {
-
- CatchUndefined = getContext().getLangOpts().CatchUndefined;
if (!suppressNewContext)
CGM.getCXXABI().getMangleContext().startNewFunction();
}
@@ -348,11 +350,11 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
RE = FD->redecls_end(); RI != RE; ++RI)
if (RI->isInlineSpecified()) {
- Fn->addFnAttr(llvm::Attribute::InlineHint);
+ Fn->addFnAttr(llvm::Attributes::InlineHint);
break;
}
- if (getContext().getLangOpts().OpenCL) {
+ if (getLangOpts().OpenCL) {
// Add metadata for a kernel function.
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
EmitOpenCLKernelMetadata(FD, Fn);
@@ -485,7 +487,7 @@ static void TryMarkNoThrow(llvm::Function *F) {
} else if (isa<llvm::ResumeInst>(&*BI)) {
return;
}
- F->setDoesNotThrow(true);
+ F->setDoesNotThrow();
}
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
@@ -493,8 +495,8 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
// Check if we should generate debug info for this function.
- if (CGM.getModuleDebugInfo() && !FD->hasAttr<NoDebugAttr>())
- DebugInfo = CGM.getModuleDebugInfo();
+ if (!FD->hasAttr<NoDebugAttr>())
+ maybeInitializeDebugInfo();
FunctionArgList Args;
QualType ResTy = FD->getResultType();
@@ -517,7 +519,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
EmitDestructorBody(Args);
else if (isa<CXXConstructorDecl>(FD))
EmitConstructorBody(Args);
- else if (getContext().getLangOpts().CUDA &&
+ else if (getLangOpts().CUDA &&
!CGM.getCodeGenOpts().CUDAIsDevice &&
FD->hasAttr<CUDAGlobalAttr>())
CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args);
@@ -535,6 +537,24 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
else
EmitFunctionBody(Args);
+ // C++11 [stmt.return]p2:
+ // Flowing off the end of a function [...] results in undefined behavior in
+ // a value-returning function.
+ // C11 6.9.1p12:
+ // If the '}' that terminates a function is reached, and the value of the
+ // function call is used by the caller, the behavior is undefined.
+ if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() &&
+ !FD->getResultType()->isVoidType() && Builder.GetInsertBlock()) {
+ if (getLangOpts().SanitizeReturn)
+ EmitCheck(Builder.getFalse(), "missing_return",
+ EmitCheckSourceLocation(FD->getLocation()),
+ llvm::ArrayRef<llvm::Value*>());
+ else if (CGM.getCodeGenOpts().OptimizationLevel == 0)
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::trap));
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ }
+
// Emit the standard function epilogue.
FinishFunction(BodyRange.getEnd());
@@ -806,7 +826,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
void
CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
// Ignore empty classes in C++.
- if (getContext().getLangOpts().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
return;
@@ -983,8 +1003,7 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
arrayType = getContext().getAsArrayType(eltType);
}
- unsigned AddressSpace =
- cast<llvm::PointerType>(addr->getType())->getAddressSpace();
+ unsigned AddressSpace = addr->getType()->getPointerAddressSpace();
llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace);
addr = Builder.CreateBitCast(addr, BaseType, "array.begin");
} else {
@@ -1027,6 +1046,7 @@ CodeGenFunction::getVLASize(const VariableArrayType *type) {
numElements = vlaSize;
} else {
// It's undefined behavior if this wraps around, so mark it that way.
+ // FIXME: Teach -fcatch-undefined-behavior to trap this.
numElements = Builder.CreateNUWMul(numElements, vlaSize);
}
} while ((type = getContext().getAsVariableArrayType(elementType)));
@@ -1104,10 +1124,26 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
// e.g. with a typedef and a pointer to it.
llvm::Value *&entry = VLASizeMap[size];
if (!entry) {
+ llvm::Value *Size = EmitScalarExpr(size);
+
+ // C11 6.7.6.2p5:
+ // If the size is an expression that is not an integer constant
+ // expression [...] each time it is evaluated it shall have a value
+ // greater than zero.
+ if (getLangOpts().SanitizeVLABound &&
+ size->getType()->isSignedIntegerType()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
+ llvm::Constant *StaticArgs[] = {
+ EmitCheckSourceLocation(size->getLocStart()),
+ EmitCheckTypeDescriptor(size->getType())
+ };
+ EmitCheck(Builder.CreateICmpSGT(Size, Zero),
+ "vla_bound_not_positive", StaticArgs, Size);
+ }
+
// Always zexting here would be wrong if it weren't
// undefined behavior to have a negative bound.
- entry = Builder.CreateIntCast(EmitScalarExpr(size), SizeTy,
- /*signed*/ false);
+ entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
}
}
type = vat->getElementType();
@@ -1156,7 +1192,7 @@ void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
llvm::Constant *Init) {
assert (Init && "Invalid DeclRefExpr initializer!");
if (CGDebugInfo *Dbg = getDebugInfo())
- if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo)
+ if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo)
Dbg->EmitGlobalVariable(E->getDecl(), Init);
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
index ed3e43b..f2ab226 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
@@ -222,8 +222,7 @@ public:
/// immediately-enclosing context of the cleanup scope. For
/// EH cleanups, this is run in a terminate context.
///
- // \param IsForEHCleanup true if this is for an EH cleanup, false
- /// if for a normal cleanup.
+ // \param flags cleanup kind.
virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0;
};
@@ -533,8 +532,8 @@ public:
/// CodeGenFunction - This class organizes the per-function state that is used
/// while generating LLVM code.
class CodeGenFunction : public CodeGenTypeCache {
- CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
- void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
+ CodeGenFunction(const CodeGenFunction &) LLVM_DELETED_FUNCTION;
+ void operator=(const CodeGenFunction &) LLVM_DELETED_FUNCTION;
friend class CGCXXABI;
public:
@@ -595,8 +594,9 @@ public:
/// potentially higher performance penalties.
unsigned char BoundsChecking;
- /// CatchUndefined - Emit run-time checks to catch undefined behaviors.
- bool CatchUndefined;
+ /// \brief Whether any type-checking sanitizers are enabled. If \c false,
+ /// calls to EmitTypeCheck can be skipped.
+ bool SanitizePerformTypeCheck;
/// In ARC, whether we should autorelease the return value.
bool AutoreleaseResult;
@@ -795,8 +795,8 @@ public:
bool OldDidCallStackSave;
bool PerformCleanup;
- RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
- RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
+ RunCleanupsScope(const RunCleanupsScope &) LLVM_DELETED_FUNCTION;
+ void operator=(const RunCleanupsScope &) LLVM_DELETED_FUNCTION;
protected:
CodeGenFunction& CGF;
@@ -839,8 +839,8 @@ public:
SourceRange Range;
bool PopDebugStack;
- LexicalScope(const LexicalScope &); // DO NOT IMPLEMENT THESE
- LexicalScope &operator=(const LexicalScope &);
+ LexicalScope(const LexicalScope &) LLVM_DELETED_FUNCTION;
+ void operator=(const LexicalScope &) LLVM_DELETED_FUNCTION;
public:
/// \brief Enter a new cleanup scope.
@@ -908,7 +908,7 @@ public:
/// themselves).
void popCatchScope();
- llvm::BasicBlock *getEHResumeBlock();
+ llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
/// An object to manage conditionally-evaluated expressions.
@@ -1213,6 +1213,14 @@ public:
CodeGenTypes &getTypes() const { return CGM.getTypes(); }
ASTContext &getContext() const { return CGM.getContext(); }
+ /// Returns true if DebugInfo is actually initialized.
+ bool maybeInitializeDebugInfo() {
+ if (CGM.getModuleDebugInfo()) {
+ DebugInfo = CGM.getModuleDebugInfo();
+ return true;
+ }
+ return false;
+ }
CGDebugInfo *getDebugInfo() {
if (DisableDebugInfo)
return NULL;
@@ -1504,7 +1512,7 @@ public:
static bool hasAggregateLLVMType(QualType T);
/// createBasicBlock - Create an LLVM basic block.
- llvm::BasicBlock *createBasicBlock(StringRef name = "",
+ llvm::BasicBlock *createBasicBlock(const Twine &name = "",
llvm::Function *parent = 0,
llvm::BasicBlock *before = 0) {
#ifdef NDEBUG
@@ -1631,7 +1639,7 @@ public:
/// aggregate expression, the aggloc/agglocvolatile arguments indicate where
/// the result should be returned.
///
- /// \param IgnoreResult - True if the resulting value isn't used.
+ /// \param ignoreResult True if the resulting value isn't used.
RValue EmitAnyExpr(const Expr *E,
AggValueSlot aggSlot = AggValueSlot::ignored(),
bool ignoreResult = false);
@@ -1654,13 +1662,26 @@ public:
void EmitExprAsInit(const Expr *init, const ValueDecl *D,
LValue lvalue, bool capturedByInit);
+ /// EmitAggregateCopy - Emit an aggrate assignment.
+ ///
+ /// The difference to EmitAggregateCopy is that tail padding is not copied.
+ /// This is required for correctness when assigning non-POD structures in C++.
+ void EmitAggregateAssign(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ QualType EltTy, bool isVolatile=false,
+ CharUnits Alignment = CharUnits::Zero()) {
+ EmitAggregateCopy(DestPtr, SrcPtr, EltTy, isVolatile, Alignment, true);
+ }
+
/// EmitAggregateCopy - Emit an aggrate copy.
///
/// \param isVolatile - True iff either the source or the destination is
/// volatile.
+ /// \param isAssignment - If false, allow padding to be copied. This often
+ /// yields more efficient.
void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
QualType EltTy, bool isVolatile=false,
- CharUnits Alignment = CharUnits::Zero());
+ CharUnits Alignment = CharUnits::Zero(),
+ bool isAssignment = false);
/// StartBlock - Start new block named N. If insert block is a dummy block
/// then reuse it.
@@ -1829,12 +1850,37 @@ public:
llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
+ llvm::Value* EmitCXXUuidofExpr(const CXXUuidofExpr *E);
void MaybeEmitStdInitializerListCleanup(llvm::Value *loc, const Expr *init);
void EmitStdInitializerListCleanup(llvm::Value *loc,
const InitListExpr *init);
- void EmitCheck(llvm::Value *, unsigned Size);
+ /// \brief Situations in which we might emit a check for the suitability of a
+ /// pointer or glvalue.
+ enum TypeCheckKind {
+ /// Checking the operand of a load. Must be suitably sized and aligned.
+ TCK_Load,
+ /// Checking the destination of a store. Must be suitably sized and aligned.
+ TCK_Store,
+ /// Checking the bound value in a reference binding. Must be suitably sized
+ /// and aligned, but is not required to refer to an object (until the
+ /// reference is used), per core issue 453.
+ TCK_ReferenceBinding,
+ /// Checking the object expression in a non-static data member access. Must
+ /// be an object within its lifetime.
+ TCK_MemberAccess,
+ /// Checking the 'this' pointer for a call to a non-static member function.
+ /// Must be an object within its lifetime.
+ TCK_MemberCall,
+ /// Checking the 'this' pointer for a constructor call.
+ TCK_ConstructorCall
+ };
+
+ /// \brief Emit a check that \p V is the address of storage of the
+ /// appropriate size and alignment for an object of type \p Type.
+ void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
+ QualType Type, CharUnits Alignment = CharUnits::Zero());
llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre);
@@ -1981,7 +2027,6 @@ public:
void EmitCaseStmt(const CaseStmt &S);
void EmitCaseStmtRange(const CaseStmt &S);
void EmitAsmStmt(const AsmStmt &S);
- void EmitMSAsmStmt(const MSAsmStmt &S);
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
@@ -2033,11 +2078,10 @@ public:
///
LValue EmitLValue(const Expr *E);
- /// EmitCheckedLValue - Same as EmitLValue but additionally we generate
- /// checking code to guard against undefined behavior. This is only
- /// suitable when we know that the address will be used to access the
- /// object.
- LValue EmitCheckedLValue(const Expr *E);
+ /// \brief Same as EmitLValue but additionally we generate checking code to
+ /// guard against undefined behavior. This is only suitable when we know
+ /// that the address will be used to access the object.
+ LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
/// EmitToMemory - Change a scalar value from its value
/// representation to its in-memory representation.
@@ -2178,6 +2222,7 @@ public:
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
LValue EmitLambdaLValue(const LambdaExpr *E);
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
+ LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
@@ -2230,6 +2275,7 @@ public:
const CXXRecordDecl *RD);
RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
+ SourceLocation CallLoc,
llvm::Value *Callee,
ReturnValueSlot ReturnValue,
llvm::Value *This,
@@ -2310,6 +2356,7 @@ public:
llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
+ void EmitARCDestroyStrong(llvm::Value *addr, bool precise);
void EmitARCRelease(llvm::Value *value, bool precise);
llvm::Value *EmitARCAutorelease(llvm::Value *value);
llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
@@ -2516,9 +2563,29 @@ public:
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
llvm::BasicBlock *FalseBlock);
- /// getTrapBB - Create a basic block that will call the trap intrinsic. We'll
- /// generate a branch around the created basic block as necessary.
- llvm::BasicBlock *getTrapBB();
+ /// \brief Emit a description of a type in a format suitable for passing to
+ /// a runtime sanitizer handler.
+ llvm::Constant *EmitCheckTypeDescriptor(QualType T);
+
+ /// \brief Convert a value into a format suitable for passing to a runtime
+ /// sanitizer handler.
+ llvm::Value *EmitCheckValue(llvm::Value *V);
+
+ /// \brief Emit a description of a source location in a format suitable for
+ /// passing to a runtime sanitizer handler.
+ llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
+
+ /// \brief Create a basic block that will call a handler function in a
+ /// sanitizer runtime with the provided arguments, and create a conditional
+ /// branch to it.
+ void EmitCheck(llvm::Value *Checked, StringRef CheckName,
+ llvm::ArrayRef<llvm::Constant *> StaticArgs,
+ llvm::ArrayRef<llvm::Value *> DynamicArgs,
+ bool Recoverable = false);
+
+ /// \brief Create a basic block that will call the trap intrinsic, and emit a
+ /// conditional branch to it, for the -ftrapv checks.
+ void EmitTrapvCheck(llvm::Value *Checked);
/// EmitCallArg - Emit a single call argument.
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
@@ -2553,12 +2620,10 @@ private:
SmallVector<llvm::Value*, 16> &Args,
llvm::FunctionType *IRFuncTy);
- llvm::Value* EmitAsmInput(const AsmStmt &S,
- const TargetInfo::ConstraintInfo &Info,
+ llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
const Expr *InputExpr, std::string &ConstraintStr);
- llvm::Value* EmitAsmInputLValue(const AsmStmt &S,
- const TargetInfo::ConstraintInfo &Info,
+ llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
LValue InputValue, QualType InputType,
std::string &ConstraintStr);
@@ -2624,15 +2689,9 @@ private:
void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
- /// GetPointeeAlignment - Given an expression with a pointer type, find the
- /// alignment of the type referenced by the pointer. Skip over implicit
- /// casts.
- unsigned GetPointeeAlignment(const Expr *Addr);
-
- /// GetPointeeAlignmentValue - Given an expression with a pointer type, find
- /// the alignment of the type referenced by the pointer. Skip over implicit
- /// casts. Return the alignment as an llvm::Value.
- llvm::Value *GetPointeeAlignmentValue(const Expr *Addr);
+ /// GetPointeeAlignment - Given an expression with a pointer type, emit the
+ /// value and compute our best estimate of the alignment of the pointee.
+ std::pair<llvm::Value*, unsigned> EmitPointerWithAlignment(const Expr *Addr);
};
/// Helper class with most of the code for saving a value for a
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
index 3ae3c52..17972e2 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
@@ -42,7 +42,7 @@
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
@@ -62,10 +62,10 @@ static CGCXXABI &createCXXABI(CodeGenModule &CGM) {
CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
- llvm::Module &M, const llvm::TargetData &TD,
+ llvm::Module &M, const llvm::DataLayout &TD,
DiagnosticsEngine &diags)
: Context(C), LangOpts(C.getLangOpts()), CodeGenOpts(CGO), TheModule(M),
- TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
+ TheDataLayout(TD), TheTargetCodeGenInfo(0), Diags(diags),
ABI(createCXXABI(*this)),
Types(*this),
TBAA(0),
@@ -103,14 +103,14 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
createCUDARuntime();
// Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
- if (LangOpts.ThreadSanitizer ||
+ if (LangOpts.SanitizeThread ||
(!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
TBAA = new CodeGenTBAA(Context, VMContext, CodeGenOpts, getLangOpts(),
ABI.getMangleContext());
// If debug info or coverage generation is enabled, create the CGDebugInfo
// object.
- if (CodeGenOpts.DebugInfo != CodeGenOptions::NoDebugInfo ||
+ if (CodeGenOpts.getDebugInfo() != CodeGenOptions::NoDebugInfo ||
CodeGenOpts.EmitGcovArcs ||
CodeGenOpts.EmitGcovNotes)
DebugInfo = new CGDebugInfo(*this);
@@ -202,6 +202,12 @@ llvm::MDNode *CodeGenModule::getTBAAInfoForVTablePtr() {
return TBAA->getTBAAInfoForVTablePtr();
}
+llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) {
+ if (!TBAA)
+ return 0;
+ return TBAA->getTBAAStructInfo(QTy);
+}
+
void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst,
llvm::MDNode *TBAAInfo) {
Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo);
@@ -287,7 +293,7 @@ void CodeGenModule::setTLSMode(llvm::GlobalVariable *GV,
assert(D.isThreadSpecified() && "setting TLS mode on non-TLS var!");
llvm::GlobalVariable::ThreadLocalMode TLM;
- TLM = GetLLVMTLSModel(CodeGenOpts.DefaultTLSModel);
+ TLM = GetLLVMTLSModel(CodeGenOpts.getDefaultTLSModel());
// Override the TLS model if it is explicitly specified.
if (D.hasAttr<TLSModelAttr>()) {
@@ -347,9 +353,7 @@ void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
// to deal with mixed-visibility symbols.
case TSK_ExplicitSpecialization:
case TSK_ImplicitInstantiation:
- if (!CodeGenOpts.HiddenWeakTemplateVTables)
- return;
- break;
+ return;
}
// If there's a key function, there may be translation units
@@ -529,7 +533,7 @@ void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
unsigned CallingConv;
AttributeListType AttributeList;
ConstructAttributeList(Info, D, AttributeList, CallingConv);
- F->setAttributes(llvm::AttrListPtr::get(AttributeList));
+ F->setAttributes(llvm::AttrListPtr::get(getLLVMContext(), AttributeList));
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
}
@@ -559,39 +563,46 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
F->setHasUWTable();
if (!hasUnwindExceptions(LangOpts))
- F->addFnAttr(llvm::Attribute::NoUnwind);
+ F->addFnAttr(llvm::Attributes::NoUnwind);
if (D->hasAttr<NakedAttr>()) {
// Naked implies noinline: we should not be inlining such functions.
- F->addFnAttr(llvm::Attribute::Naked);
- F->addFnAttr(llvm::Attribute::NoInline);
+ F->addFnAttr(llvm::Attributes::Naked);
+ F->addFnAttr(llvm::Attributes::NoInline);
}
if (D->hasAttr<NoInlineAttr>())
- F->addFnAttr(llvm::Attribute::NoInline);
+ F->addFnAttr(llvm::Attributes::NoInline);
// (noinline wins over always_inline, and we can't specify both in IR)
if ((D->hasAttr<AlwaysInlineAttr>() || D->hasAttr<ForceInlineAttr>()) &&
- !F->hasFnAttr(llvm::Attribute::NoInline))
- F->addFnAttr(llvm::Attribute::AlwaysInline);
+ !F->getFnAttributes().hasAttribute(llvm::Attributes::NoInline))
+ F->addFnAttr(llvm::Attributes::AlwaysInline);
// FIXME: Communicate hot and cold attributes to LLVM more directly.
if (D->hasAttr<ColdAttr>())
- F->addFnAttr(llvm::Attribute::OptimizeForSize);
+ F->addFnAttr(llvm::Attributes::OptimizeForSize);
+
+ if (D->hasAttr<MinSizeAttr>())
+ F->addFnAttr(llvm::Attributes::MinSize);
if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
F->setUnnamedAddr(true);
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D))
+ if (MD->isVirtual())
+ F->setUnnamedAddr(true);
+
if (LangOpts.getStackProtector() == LangOptions::SSPOn)
- F->addFnAttr(llvm::Attribute::StackProtect);
+ F->addFnAttr(llvm::Attributes::StackProtect);
else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
- F->addFnAttr(llvm::Attribute::StackProtectReq);
+ F->addFnAttr(llvm::Attributes::StackProtectReq);
- if (LangOpts.AddressSanitizer) {
+ if (LangOpts.SanitizeAddress) {
// When AddressSanitizer is enabled, set AddressSafety attribute
// unless __attribute__((no_address_safety_analysis)) is used.
if (!D->hasAttr<NoAddressSafetyAnalysisAttr>())
- F->addFnAttr(llvm::Attribute::AddressSafety);
+ F->addFnAttr(llvm::Attributes::AddressSafety);
}
unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
@@ -636,7 +647,8 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD,
if (unsigned IID = F->getIntrinsicID()) {
// If this is an intrinsic function, set the function's attributes
// to the intrinsic's attributes.
- F->setAttributes(llvm::Intrinsic::getAttributes((llvm::Intrinsic::ID)IID));
+ F->setAttributes(llvm::Intrinsic::getAttributes(getLLVMContext(),
+ (llvm::Intrinsic::ID)IID));
return;
}
@@ -822,6 +834,49 @@ bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
return !getContext().DeclMustBeEmitted(Global);
}
+llvm::Constant *CodeGenModule::GetAddrOfUuidDescriptor(
+ const CXXUuidofExpr* E) {
+ // Sema has verified that IIDSource has a __declspec(uuid()), and that its
+ // well-formed.
+ StringRef Uuid;
+ if (E->isTypeOperand())
+ Uuid = CXXUuidofExpr::GetUuidAttrOfType(E->getTypeOperand())->getGuid();
+ else {
+ // Special case: __uuidof(0) means an all-zero GUID.
+ Expr *Op = E->getExprOperand();
+ if (!Op->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull))
+ Uuid = CXXUuidofExpr::GetUuidAttrOfType(Op->getType())->getGuid();
+ else
+ Uuid = "00000000-0000-0000-0000-000000000000";
+ }
+ std::string Name = "__uuid_" + Uuid.str();
+
+ // Look for an existing global.
+ if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
+ return GV;
+
+ llvm::Constant *Init = EmitUuidofInitializer(Uuid, E->getType());
+ assert(Init && "failed to initialize as constant");
+
+ // GUIDs are assumed to be 16 bytes, spread over 4-2-2-8 bytes. However, the
+ // first field is declared as "long", which for many targets is 8 bytes.
+ // Those architectures are not supported. (With the MS abi, long is always 4
+ // bytes.)
+ llvm::Type *GuidType = getTypes().ConvertType(E->getType());
+ if (Init->getType() != GuidType) {
+ DiagnosticsEngine &Diags = getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "__uuidof codegen is not supported on this architecture");
+ Diags.Report(E->getExprLoc(), DiagID) << E->getSourceRange();
+ Init = llvm::UndefValue::get(GuidType);
+ }
+
+ llvm::GlobalVariable *GV = new llvm::GlobalVariable(getModule(), GuidType,
+ /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, Init, Name);
+ GV->setUnnamedAddr(true);
+ return GV;
+}
+
llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
const AliasAttr *AA = VD->getAttr<AliasAttr>();
assert(AA && "No alias?");
@@ -830,19 +885,23 @@ llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
// See if there is already something with the target's name in the module.
llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
+ if (Entry) {
+ unsigned AS = getContext().getTargetAddressSpace(VD->getType());
+ return llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
+ }
llvm::Constant *Aliasee;
if (isa<llvm::FunctionType>(DeclTy))
- Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl(),
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy,
+ GlobalDecl(cast<FunctionDecl>(VD)),
/*ForVTable=*/false);
else
Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
llvm::PointerType::getUnqual(DeclTy), 0);
- if (!Entry) {
- llvm::GlobalValue* F = cast<llvm::GlobalValue>(Aliasee);
- F->setLinkage(llvm::Function::ExternalWeakLinkage);
- WeakRefReferences.insert(F);
- }
+
+ llvm::GlobalValue* F = cast<llvm::GlobalValue>(Aliasee);
+ F->setLinkage(llvm::Function::ExternalWeakLinkage);
+ WeakRefReferences.insert(F);
return Aliasee;
}
@@ -1051,12 +1110,10 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
- if (WeakRefReferences.count(Entry)) {
+ if (WeakRefReferences.erase(Entry)) {
const FunctionDecl *FD = cast_or_null<FunctionDecl>(D.getDecl());
if (FD && !FD->hasAttr<WeakAttr>())
Entry->setLinkage(llvm::Function::ExternalLinkage);
-
- WeakRefReferences.erase(Entry);
}
if (Entry->getType()->getElementType() == Ty)
@@ -1085,8 +1142,8 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
assert(F->getName() == MangledName && "name was uniqued!");
if (D.getDecl())
SetFunctionAttributes(D, F, IsIncompleteFunction);
- if (ExtraAttrs != llvm::Attribute::None)
- F->addFnAttr(ExtraAttrs);
+ if (ExtraAttrs.hasAttributes())
+ F->addAttribute(llvm::AttrListPtr::FunctionIndex, ExtraAttrs);
// This is the first use or definition of a mangled name. If there is a
// deferred decl with this name, remember that we need to emit it at the end
@@ -1197,11 +1254,9 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
- if (WeakRefReferences.count(Entry)) {
+ if (WeakRefReferences.erase(Entry)) {
if (D && !D->hasAttr<WeakAttr>())
Entry->setLinkage(llvm::Function::ExternalLinkage);
-
- WeakRefReferences.erase(Entry);
}
if (UnnamedAddr)
@@ -1279,7 +1334,7 @@ CodeGenModule::CreateOrReplaceCXXRuntimeVariable(StringRef Name,
// Because C++ name mangling, the only way we can end up with an already
// existing global with the same name is if it has been declared extern "C".
- assert(GV->isDeclaration() && "Declaration has wrong type!");
+ assert(GV->isDeclaration() && "Declaration has wrong type!");
OldGV = GV;
}
@@ -1424,7 +1479,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
return Context.toCharUnitsFromBits(
- TheTargetData.getTypeStoreSizeInBits(Ty));
+ TheDataLayout.getTypeStoreSizeInBits(Ty));
}
llvm::Constant *
@@ -1473,10 +1528,10 @@ CodeGenModule::MaybeEmitGlobalStdInitializerListInitializer(const VarDecl *D,
// Now clone the InitListExpr to initialize the array instead.
// Incredible hack: we want to use the existing InitListExpr here, so we need
// to tell it that it no longer initializes a std::initializer_list.
- Expr *arrayInit = new (ctx) InitListExpr(ctx, init->getLBraceLoc(),
- const_cast<InitListExpr*>(init)->getInits(),
- init->getNumInits(),
- init->getRBraceLoc());
+ ArrayRef<Expr*> Inits(const_cast<InitListExpr*>(init)->getInits(),
+ init->getNumInits());
+ Expr *arrayInit = new (ctx) InitListExpr(ctx, init->getLBraceLoc(), Inits,
+ init->getRBraceLoc());
arrayInit->setType(arrayType);
if (!cleanups.empty())
@@ -1682,9 +1737,21 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
if (NeedsGlobalCtor || NeedsGlobalDtor)
EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
+ // If we are compiling with ASan, add metadata indicating dynamically
+ // initialized globals.
+ if (LangOpts.SanitizeAddress && NeedsGlobalCtor) {
+ llvm::Module &M = getModule();
+
+ llvm::NamedMDNode *DynamicInitializers =
+ M.getOrInsertNamedMetadata("llvm.asan.dynamically_initialized_globals");
+ llvm::Value *GlobalToAdd[] = { GV };
+ llvm::MDNode *ThisGlobal = llvm::MDNode::get(VMContext, GlobalToAdd);
+ DynamicInitializers->addOperand(ThisGlobal);
+ }
+
// Emit global variable debug information.
if (CGDebugInfo *DI = getModuleDebugInfo())
- if (getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo)
+ if (getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo)
DI->EmitGlobalVariable(GV, D);
}
@@ -1758,8 +1825,10 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
llvm::Attributes RAttrs = AttrList.getRetAttributes();
// Add the return attributes.
- if (RAttrs)
- AttrVec.push_back(llvm::AttributeWithIndex::get(0, RAttrs));
+ if (RAttrs.hasAttributes())
+ AttrVec.push_back(llvm::
+ AttributeWithIndex::get(llvm::AttrListPtr::ReturnIndex,
+ RAttrs));
// If the function was passed too few arguments, don't transform. If extra
// arguments were passed, we silently drop them. If any of the types
@@ -1775,14 +1844,18 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
}
// Add any parameter attributes.
- if (llvm::Attributes PAttrs = AttrList.getParamAttributes(ArgNo + 1))
+ llvm::Attributes PAttrs = AttrList.getParamAttributes(ArgNo + 1);
+ if (PAttrs.hasAttributes())
AttrVec.push_back(llvm::AttributeWithIndex::get(ArgNo + 1, PAttrs));
}
if (DontTransform)
continue;
- if (llvm::Attributes FnAttrs = AttrList.getFnAttributes())
- AttrVec.push_back(llvm::AttributeWithIndex::get(~0, FnAttrs));
+ llvm::Attributes FnAttrs = AttrList.getFnAttributes();
+ if (FnAttrs.hasAttributes())
+ AttrVec.push_back(llvm::
+ AttributeWithIndex::get(llvm::AttrListPtr::FunctionIndex,
+ FnAttrs));
// Okay, we can transform this. Create the new call instruction and copy
// over the required information.
@@ -1791,7 +1864,7 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
ArgList.clear();
if (!NewCall->getType()->isVoidTy())
NewCall->takeName(CI);
- NewCall->setAttributes(llvm::AttrListPtr::get(AttrVec));
+ NewCall->setAttributes(llvm::AttrListPtr::get(OldFn->getContext(), AttrVec));
NewCall->setCallingConv(CI->getCallingConv());
// Finally, remove the old call, replacing any uses with the new one.
@@ -1911,7 +1984,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
// if a deferred decl.
llvm::Constant *Aliasee;
if (isa<llvm::FunctionType>(DeclTy))
- Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl(),
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GD,
/*ForVTable=*/false);
else
Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
@@ -1987,7 +2060,7 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
IsUTF16 = true;
SmallVector<UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
- const UTF8 *FromPtr = (UTF8 *)String.data();
+ const UTF8 *FromPtr = (const UTF8 *)String.data();
UTF16 *ToPtr = &ToBuf[0];
(void)ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
@@ -2019,7 +2092,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
bool isUTF16 = false;
llvm::StringMapEntry<llvm::Constant*> &Entry =
GetConstantCFStringEntry(CFConstantStringMap, Literal,
- getTargetData().isLittleEndian(),
+ getDataLayout().isLittleEndian(),
isUTF16, StringLength);
if (llvm::Constant *C = Entry.getValue())
@@ -2429,7 +2502,7 @@ void CodeGenModule::EmitObjCPropertyImplementations(const
ObjCPropertyDecl *PD = PID->getPropertyDecl();
// Determine which methods need to be implemented, some may have
- // been overridden. Note that ::isSynthesized is not the method
+ // been overridden. Note that ::isPropertyAccessor is not the method
// we want, that just indicates if the decl came from a
// property. What we want to know is if the method is defined in
// this implementation.
@@ -2465,11 +2538,11 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
ObjCMethodDecl::Create(getContext(), D->getLocation(), D->getLocation(),
cxxSelector, getContext().VoidTy, 0, D,
/*isInstance=*/true, /*isVariadic=*/false,
- /*isSynthesized=*/true, /*isImplicitlyDeclared=*/true,
+ /*isPropertyAccessor=*/true, /*isImplicitlyDeclared=*/true,
/*isDefined=*/false, ObjCMethodDecl::Required);
D->addInstanceMethod(DTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
- D->setHasCXXStructors(true);
+ D->setHasDestructors(true);
}
// If the implementation doesn't have any ivar initializers, we don't need
@@ -2487,13 +2560,13 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
getContext().getObjCIdType(), 0,
D, /*isInstance=*/true,
/*isVariadic=*/false,
- /*isSynthesized=*/true,
+ /*isPropertyAccessor=*/true,
/*isImplicitlyDeclared=*/true,
/*isDefined=*/false,
ObjCMethodDecl::Required);
D->addInstanceMethod(CTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
- D->setHasCXXStructors(true);
+ D->setHasNonZeroConstructors(true);
}
/// EmitNamespace - Emit all declarations in a namespace.
@@ -2512,8 +2585,17 @@ void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
}
for (RecordDecl::decl_iterator I = LSD->decls_begin(), E = LSD->decls_end();
- I != E; ++I)
+ I != E; ++I) {
+ // Meta-data for ObjC class includes references to implemented methods.
+ // Generate class's method definitions first.
+ if (ObjCImplDecl *OID = dyn_cast<ObjCImplDecl>(*I)) {
+ for (ObjCContainerDecl::method_iterator M = OID->meth_begin(),
+ MEnd = OID->meth_end();
+ M != MEnd; ++M)
+ EmitTopLevelDecl(*M);
+ }
EmitTopLevelDecl(*I);
+ }
}
/// EmitTopLevelDecl - Emit code for a single top level declaration.
@@ -2737,3 +2819,32 @@ void CodeGenModule::EmitCoverageFile() {
}
}
}
+
+llvm::Constant *CodeGenModule::EmitUuidofInitializer(StringRef Uuid,
+ QualType GuidType) {
+ // Sema has checked that all uuid strings are of the form
+ // "12345678-1234-1234-1234-1234567890ab".
+ assert(Uuid.size() == 36);
+ const char *Uuidstr = Uuid.data();
+ for (int i = 0; i < 36; ++i) {
+ if (i == 8 || i == 13 || i == 18 || i == 23) assert(Uuidstr[i] == '-');
+ else assert(isxdigit(Uuidstr[i]));
+ }
+
+ llvm::APInt Field0(32, StringRef(Uuidstr , 8), 16);
+ llvm::APInt Field1(16, StringRef(Uuidstr + 9, 4), 16);
+ llvm::APInt Field2(16, StringRef(Uuidstr + 14, 4), 16);
+ static const int Field3ValueOffsets[] = { 19, 21, 24, 26, 28, 30, 32, 34 };
+
+ APValue InitStruct(APValue::UninitStruct(), /*NumBases=*/0, /*NumFields=*/4);
+ InitStruct.getStructField(0) = APValue(llvm::APSInt(Field0));
+ InitStruct.getStructField(1) = APValue(llvm::APSInt(Field1));
+ InitStruct.getStructField(2) = APValue(llvm::APSInt(Field2));
+ APValue& Arr = InitStruct.getStructField(3);
+ Arr = APValue(APValue::UninitArray(), 8, 8);
+ for (int t = 0; t < 8; ++t)
+ Arr.getArrayInitializedElt(t) = APValue(llvm::APSInt(
+ llvm::APInt(8, StringRef(Uuidstr + Field3ValueOffsets[t], 2), 16)));
+
+ return EmitConstantValue(InitStruct, GuidType);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
index d6ff50d..1167c87 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
@@ -35,7 +35,7 @@ namespace llvm {
class ConstantInt;
class Function;
class GlobalValue;
- class TargetData;
+ class DataLayout;
class FunctionType;
class LLVMContext;
}
@@ -210,8 +210,8 @@ struct ARCEntrypoints {
/// CodeGenModule - This class organizes the cross-function state that is used
/// while generating LLVM code.
class CodeGenModule : public CodeGenTypeCache {
- CodeGenModule(const CodeGenModule&); // DO NOT IMPLEMENT
- void operator=(const CodeGenModule&); // DO NOT IMPLEMENT
+ CodeGenModule(const CodeGenModule &) LLVM_DELETED_FUNCTION;
+ void operator=(const CodeGenModule &) LLVM_DELETED_FUNCTION;
typedef std::vector<std::pair<llvm::Constant*, int> > CtorList;
@@ -219,7 +219,7 @@ class CodeGenModule : public CodeGenTypeCache {
const LangOptions &LangOpts;
const CodeGenOptions &CodeGenOpts;
llvm::Module &TheModule;
- const llvm::TargetData &TheTargetData;
+ const llvm::DataLayout &TheDataLayout;
mutable const TargetCodeGenInfo *TheTargetCodeGenInfo;
DiagnosticsEngine &Diags;
CGCXXABI &ABI;
@@ -296,11 +296,18 @@ class CodeGenModule : public CodeGenTypeCache {
/// order.
llvm::DenseMap<const Decl*, unsigned> DelayedCXXInitPosition;
+ typedef std::pair<OrderGlobalInits, llvm::Function*> GlobalInitData;
+
+ struct GlobalInitPriorityCmp {
+ bool operator()(const GlobalInitData &LHS,
+ const GlobalInitData &RHS) const {
+ return LHS.first.priority < RHS.first.priority;
+ }
+ };
+
/// - Global variables with initializers whose order of initialization
/// is set by init_priority attribute.
-
- SmallVector<std::pair<OrderGlobalInits, llvm::Function*>, 8>
- PrioritizedCXXGlobalInits;
+ SmallVector<GlobalInitData, 8> PrioritizedCXXGlobalInits;
/// CXXGlobalDtors - Global destructor functions and arguments that need to
/// run on termination.
@@ -357,7 +364,7 @@ class CodeGenModule : public CodeGenTypeCache {
/// @}
public:
CodeGenModule(ASTContext &C, const CodeGenOptions &CodeGenOpts,
- llvm::Module &M, const llvm::TargetData &TD,
+ llvm::Module &M, const llvm::DataLayout &TD,
DiagnosticsEngine &Diags);
~CodeGenModule();
@@ -451,7 +458,7 @@ public:
CodeGenVTables &getVTables() { return VTables; }
VTableContext &getVTableContext() { return VTables.getVTableContext(); }
DiagnosticsEngine &getDiags() const { return Diags; }
- const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ const llvm::DataLayout &getDataLayout() const { return TheDataLayout; }
const TargetInfo &getTarget() const { return Context.getTargetInfo(); }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
const TargetCodeGenInfo &getTargetCodeGenInfo();
@@ -461,6 +468,7 @@ public:
llvm::MDNode *getTBAAInfo(QualType QTy);
llvm::MDNode *getTBAAInfoForVTablePtr();
+ llvm::MDNode *getTBAAStructInfo(QualType QTy);
bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor);
@@ -548,6 +556,9 @@ public:
/// for the given type.
llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
+ /// GetAddrOfUuidDescriptor - Get the address of a uuid descriptor .
+ llvm::Constant *GetAddrOfUuidDescriptor(const CXXUuidofExpr* E);
+
/// GetAddrOfThunk - Get the address of the thunk for the given global decl.
llvm::Constant *GetAddrOfThunk(GlobalDecl GD, const ThunkInfo &Thunk);
@@ -701,7 +712,7 @@ public:
llvm::Constant *CreateRuntimeFunction(llvm::FunctionType *Ty,
StringRef Name,
llvm::Attributes ExtraAttrs =
- llvm::Attribute::None);
+ llvm::Attributes());
/// CreateRuntimeVariable - Create a new runtime global variable with the
/// specified type and name.
llvm::Constant *CreateRuntimeVariable(llvm::Type *Ty,
@@ -880,7 +891,7 @@ private:
GlobalDecl D,
bool ForVTable,
llvm::Attributes ExtraAttrs =
- llvm::Attribute::None);
+ llvm::Attributes());
llvm::Constant *GetOrCreateLLVMGlobal(StringRef MangledName,
llvm::PointerType *PTy,
const VarDecl *D,
@@ -984,6 +995,9 @@ private:
/// to emit the .gcno and .gcda files in a way that persists in .bc files.
void EmitCoverageFile();
+ /// Emits the initializer for a uuidof string.
+ llvm::Constant *EmitUuidofInitializer(StringRef uuidstr, QualType IIDType);
+
/// MayDeferGeneration - Determine if the given decl can be emitted
/// lazily; this is only relevant for definitions. The given decl
/// must be either a function or var decl.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
index bab60af..d9004a0 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -17,6 +17,7 @@
#include "CodeGenTBAA.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
#include "clang/AST/Mangle.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/LLVMContext.h"
@@ -167,3 +168,59 @@ CodeGenTBAA::getTBAAInfo(QualType QTy) {
llvm::MDNode *CodeGenTBAA::getTBAAInfoForVTablePtr() {
return MDHelper.createTBAANode("vtable pointer", getRoot());
}
+
+bool
+CodeGenTBAA::CollectFields(uint64_t BaseOffset,
+ QualType QTy,
+ SmallVectorImpl<llvm::MDBuilder::TBAAStructField> &
+ Fields,
+ bool MayAlias) {
+ /* Things not handled yet include: C++ base classes, bitfields, */
+
+ if (const RecordType *TTy = QTy->getAs<RecordType>()) {
+ const RecordDecl *RD = TTy->getDecl()->getDefinition();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // TODO: Handle C++ base classes.
+ if (const CXXRecordDecl *Decl = dyn_cast<CXXRecordDecl>(RD))
+ if (Decl->bases_begin() != Decl->bases_end())
+ return false;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(),
+ e = RD->field_end(); i != e; ++i, ++idx) {
+ uint64_t Offset = BaseOffset +
+ Layout.getFieldOffset(idx) / Context.getCharWidth();
+ QualType FieldQTy = i->getType();
+ if (!CollectFields(Offset, FieldQTy, Fields,
+ MayAlias || TypeHasMayAlias(FieldQTy)))
+ return false;
+ }
+ return true;
+ }
+
+ /* Otherwise, treat whatever it is as a field. */
+ uint64_t Offset = BaseOffset;
+ uint64_t Size = Context.getTypeSizeInChars(QTy).getQuantity();
+ llvm::MDNode *TBAAInfo = MayAlias ? getChar() : getTBAAInfo(QTy);
+ Fields.push_back(llvm::MDBuilder::TBAAStructField(Offset, Size, TBAAInfo));
+ return true;
+}
+
+llvm::MDNode *
+CodeGenTBAA::getTBAAStructInfo(QualType QTy) {
+ const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
+
+ if (llvm::MDNode *N = StructMetadataCache[Ty])
+ return N;
+
+ SmallVector<llvm::MDBuilder::TBAAStructField, 4> Fields;
+ if (CollectFields(0, QTy, Fields, TypeHasMayAlias(QTy)))
+ return MDHelper.createTBAAStructNode(Fields);
+
+ // For now, handle any other kind of type conservatively.
+ return StructMetadataCache[Ty] = NULL;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h
index c17a5cf..eedb996 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h
@@ -49,6 +49,10 @@ class CodeGenTBAA {
/// MetadataCache - This maps clang::Types to llvm::MDNodes describing them.
llvm::DenseMap<const Type *, llvm::MDNode *> MetadataCache;
+ /// StructMetadataCache - This maps clang::Types to llvm::MDNodes describing
+ /// them for struct assignments.
+ llvm::DenseMap<const Type *, llvm::MDNode *> StructMetadataCache;
+
llvm::MDNode *Root;
llvm::MDNode *Char;
@@ -60,6 +64,13 @@ class CodeGenTBAA {
/// considered to be equivalent to it.
llvm::MDNode *getChar();
+ /// CollectFields - Collect information about the fields of a type for
+ /// !tbaa.struct metadata formation. Return false for an unsupported type.
+ bool CollectFields(uint64_t BaseOffset,
+ QualType Ty,
+ SmallVectorImpl<llvm::MDBuilder::TBAAStructField> &Fields,
+ bool MayAlias);
+
public:
CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext &VMContext,
const CodeGenOptions &CGO,
@@ -74,6 +85,10 @@ public:
/// getTBAAInfoForVTablePtr - Get the TBAA MDNode to be used for a
/// dereference of a vtable pointer.
llvm::MDNode *getTBAAInfoForVTablePtr();
+
+ /// getTBAAStructInfo - Get the TBAAStruct MDNode to be used for a memcpy of
+ /// the given type.
+ llvm::MDNode *getTBAAStructInfo(QualType QTy);
};
} // end namespace CodeGen
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
index 9a78dae..3c6c5c9 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -23,13 +23,13 @@
#include "clang/AST/RecordLayout.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace clang;
using namespace CodeGen;
CodeGenTypes::CodeGenTypes(CodeGenModule &CGM)
: Context(CGM.getContext()), Target(Context.getTargetInfo()),
- TheModule(CGM.getModule()), TheTargetData(CGM.getTargetData()),
+ TheModule(CGM.getModule()), TheDataLayout(CGM.getDataLayout()),
TheABIInfo(CGM.getTargetCodeGenInfo().getABIInfo()),
TheCXXABI(CGM.getCXXABI()),
CodeGenOpts(CGM.getCodeGenOpts()), CGM(CGM) {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
index 3c29d2d..0519911 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
@@ -23,7 +23,7 @@
namespace llvm {
class FunctionType;
class Module;
- class TargetData;
+ class DataLayout;
class Type;
class LLVMContext;
class StructType;
@@ -62,7 +62,7 @@ class CodeGenTypes {
ASTContext &Context;
const TargetInfo &Target;
llvm::Module &TheModule;
- const llvm::TargetData &TheTargetData;
+ const llvm::DataLayout &TheDataLayout;
const ABIInfo &TheABIInfo;
CGCXXABI &TheCXXABI;
const CodeGenOptions &CodeGenOpts;
@@ -108,7 +108,7 @@ public:
CodeGenTypes(CodeGenModule &CGM);
~CodeGenTypes();
- const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ const llvm::DataLayout &getDataLayout() const { return TheDataLayout; }
const TargetInfo &getTarget() const { return Target; }
ASTContext &getContext() const { return Context; }
const ABIInfo &getABIInfo() const { return TheABIInfo; }
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 0b7ce36..245150c 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -23,11 +23,11 @@
#include "CGVTables.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
-#include <clang/AST/Mangle.h>
-#include <clang/AST/Type.h>
-#include <llvm/Intrinsics.h>
-#include <llvm/Target/TargetData.h>
-#include <llvm/Value.h>
+#include "clang/AST/Mangle.h"
+#include "clang/AST/Type.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Value.h"
using namespace clang;
using namespace CodeGen;
@@ -92,6 +92,10 @@ public:
llvm::Value *Addr,
const MemberPointerType *MPT);
+ llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF,
+ llvm::Value *ptr,
+ QualType type);
+
void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
CXXCtorType T,
CanQualType &ResTy,
@@ -109,6 +113,7 @@ public:
void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
StringRef GetPureVirtualCallName() { return "__cxa_pure_virtual"; }
+ StringRef GetDeletedVirtualCallName() { return "__cxa_deleted_virtual"; }
CharUnits getArrayCookieSizeImpl(QualType elementType);
llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
@@ -299,7 +304,7 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
CGBuilderTy &Builder = CGF.Builder;
- unsigned AS = cast<llvm::PointerType>(Base->getType())->getAddressSpace();
+ unsigned AS = Base->getType()->getPointerAddressSpace();
// Cast to char*.
Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS));
@@ -677,6 +682,25 @@ bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
return MPT->getPointeeType()->isFunctionType();
}
+/// The Itanium ABI always places an offset to the complete object
+/// at entry -2 in the vtable.
+llvm::Value *ItaniumCXXABI::adjustToCompleteObject(CodeGenFunction &CGF,
+ llvm::Value *ptr,
+ QualType type) {
+ // Grab the vtable pointer as an intptr_t*.
+ llvm::Value *vtable = CGF.GetVTablePtr(ptr, CGF.IntPtrTy->getPointerTo());
+
+ // Track back to entry -2 and pull out the offset there.
+ llvm::Value *offsetPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(vtable, -2, "complete-offset.ptr");
+ llvm::LoadInst *offset = CGF.Builder.CreateLoad(offsetPtr);
+ offset->setAlignment(CGF.PointerAlignInBytes);
+
+ // Apply the offset.
+ ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
+ return CGF.Builder.CreateInBoundsGEP(ptr, offset);
+}
+
/// The generic ABI passes 'this', plus a VTT if it's initializing a
/// base subobject.
void ItaniumCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
@@ -810,7 +834,7 @@ llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
QualType ElementType) {
assert(requiresArrayCookie(expr));
- unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
+ unsigned AS = NewPtr->getType()->getPointerAddressSpace();
ASTContext &Ctx = getContext();
QualType SizeTy = Ctx.getSizeType();
@@ -852,7 +876,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
CGF.Builder.CreateConstInBoundsGEP1_64(numElementsPtr,
numElementsOffset.getQuantity());
- unsigned AS = cast<llvm::PointerType>(allocPtr->getType())->getAddressSpace();
+ unsigned AS = allocPtr->getType()->getPointerAddressSpace();
numElementsPtr =
CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
return CGF.Builder.CreateLoad(numElementsPtr);
@@ -878,7 +902,7 @@ llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
// NewPtr is a char*.
- unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
+ unsigned AS = NewPtr->getType()->getPointerAddressSpace();
ASTContext &Ctx = getContext();
CharUnits SizeSize = Ctx.getTypeSizeInChars(Ctx.getSizeType());
@@ -913,7 +937,7 @@ llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
llvm::Value *numElementsPtr
= CGF.Builder.CreateConstInBoundsGEP1_64(allocPtr, CGF.SizeSizeInBytes);
- unsigned AS = cast<llvm::PointerType>(allocPtr->getType())->getAddressSpace();
+ unsigned AS = allocPtr->getType()->getPointerAddressSpace();
numElementsPtr =
CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
return CGF.Builder.CreateLoad(numElementsPtr);
@@ -927,9 +951,9 @@ static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
GuardPtrTy, /*isVarArg=*/false);
-
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire",
- llvm::Attribute::NoUnwind);
+ llvm::Attributes::get(CGM.getLLVMContext(),
+ llvm::Attributes::NoUnwind));
}
static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
@@ -937,9 +961,9 @@ static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
// void __cxa_guard_release(__guard *guard_object);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
-
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release",
- llvm::Attribute::NoUnwind);
+ llvm::Attributes::get(CGM.getLLVMContext(),
+ llvm::Attributes::NoUnwind));
}
static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
@@ -947,9 +971,9 @@ static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
// void __cxa_guard_abort(__guard *guard_object);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
-
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort",
- llvm::Attribute::NoUnwind);
+ llvm::Attributes::get(CGM.getLLVMContext(),
+ llvm::Attributes::NoUnwind));
}
namespace {
@@ -1149,7 +1173,7 @@ void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
// In Apple kexts, we want to add a global destructor entry.
// FIXME: shouldn't this be guarded by some variable?
- if (CGM.getContext().getLangOpts().AppleKext) {
+ if (CGM.getLangOpts().AppleKext) {
// Generate a global destructor entry.
return CGM.AddCXXDtorEntry(dtor, addr);
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 6a2925b..8d205c3 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -29,14 +29,18 @@ public:
MicrosoftCXXABI(CodeGenModule &CGM) : CGCXXABI(CGM) {}
StringRef GetPureVirtualCallName() { return "_purecall"; }
+ // No known support for deleted functions in MSVC yet, so this choice is
+ // arbitrary.
+ StringRef GetDeletedVirtualCallName() { return "_purecall"; }
+
+ llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF,
+ llvm::Value *ptr,
+ QualType type);
void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
CXXCtorType Type,
CanQualType &ResTy,
- SmallVectorImpl<CanQualType> &ArgTys) {
- // 'this' is already in place
- // TODO: 'for base' flag
- }
+ SmallVectorImpl<CanQualType> &ArgTys);
void BuildDestructorSignature(const CXXDestructorDecl *Ctor,
CXXDtorType Type,
@@ -48,15 +52,9 @@ public:
void BuildInstanceFunctionParams(CodeGenFunction &CGF,
QualType &ResTy,
- FunctionArgList &Params) {
- BuildThisParam(CGF, Params);
- // TODO: 'for base' flag
- }
+ FunctionArgList &Params);
- void EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
- EmitThisParam(CGF);
- // TODO: 'for base' flag
- }
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::GlobalVariable *DeclPtr,
@@ -99,10 +97,49 @@ public:
llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
llvm::Value *allocPtr,
CharUnits cookieSize);
+ static bool needThisReturn(GlobalDecl GD);
};
}
+llvm::Value *MicrosoftCXXABI::adjustToCompleteObject(CodeGenFunction &CGF,
+ llvm::Value *ptr,
+ QualType type) {
+ // FIXME: implement
+ return ptr;
+}
+
+bool MicrosoftCXXABI::needThisReturn(GlobalDecl GD) {
+ const CXXMethodDecl* MD = cast<CXXMethodDecl>(GD.getDecl());
+ return isa<CXXConstructorDecl>(MD);
+}
+
+void MicrosoftCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ // 'this' is already in place
+ // TODO: 'for base' flag
+ // Ctor returns this ptr
+ ResTy = ArgTys[0];
+}
+
+void MicrosoftCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ BuildThisParam(CGF, Params);
+ if (needThisReturn(CGF.CurGD)) {
+ ResTy = Params[0]->getType();
+ }
+}
+
+void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ EmitThisParam(CGF);
+ if (needThisReturn(CGF.CurGD)) {
+ CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
+ }
+}
+
bool MicrosoftCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
QualType elementType) {
// Microsoft seems to completely ignore the possibility of a
@@ -127,7 +164,7 @@ CharUnits MicrosoftCXXABI::getArrayCookieSizeImpl(QualType type) {
llvm::Value *MicrosoftCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
llvm::Value *allocPtr,
CharUnits cookieSize) {
- unsigned AS = cast<llvm::PointerType>(allocPtr->getType())->getAddressSpace();
+ unsigned AS = allocPtr->getType()->getPointerAddressSpace();
llvm::Value *numElementsPtr =
CGF.Builder.CreateBitCast(allocPtr, CGF.SizeTy->getPointerTo(AS));
return CGF.Builder.CreateLoad(numElementsPtr);
@@ -147,7 +184,7 @@ llvm::Value* MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *cookiePtr = newPtr;
// Write the number of elements into the appropriate slot.
- unsigned AS = cast<llvm::PointerType>(newPtr->getType())->getAddressSpace();
+ unsigned AS = newPtr->getType()->getPointerAddressSpace();
llvm::Value *numElementsPtr
= CGF.Builder.CreateBitCast(cookiePtr, CGF.SizeTy->getPointerTo(AS));
CGF.Builder.CreateStore(numElements, numElementsPtr);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
index ea2389e..0125559 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -21,14 +21,14 @@
#include "clang/Basic/TargetInfo.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/ADT/OwningPtr.h"
using namespace clang;
namespace {
class CodeGeneratorImpl : public CodeGenerator {
DiagnosticsEngine &Diags;
- OwningPtr<const llvm::TargetData> TD;
+ OwningPtr<const llvm::DataLayout> TD;
ASTContext *Ctx;
const CodeGenOptions CodeGenOpts; // Intentionally copied in.
protected:
@@ -54,7 +54,7 @@ namespace {
M->setTargetTriple(Ctx->getTargetInfo().getTriple().getTriple());
M->setDataLayout(Ctx->getTargetInfo().getTargetDescription());
- TD.reset(new llvm::TargetData(Ctx->getTargetInfo().getTargetDescription()));
+ TD.reset(new llvm::DataLayout(Ctx->getTargetInfo().getTargetDescription()));
Builder.reset(new CodeGen::CodeGenModule(Context, CodeGenOpts,
*M, *TD, Diags));
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
index 9c23ed9..ffff0d0 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
@@ -18,7 +18,7 @@
#include "clang/AST/RecordLayout.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Type.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -51,8 +51,8 @@ llvm::LLVMContext &ABIInfo::getVMContext() const {
return CGT.getLLVMContext();
}
-const llvm::TargetData &ABIInfo::getTargetData() const {
- return CGT.getTargetData();
+const llvm::DataLayout &ABIInfo::getDataLayout() const {
+ return CGT.getDataLayout();
}
@@ -389,6 +389,90 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+//===----------------------------------------------------------------------===//
+// le32/PNaCl bitcode ABI Implementation
+//===----------------------------------------------------------------------===//
+
+class PNaClABIInfo : public ABIInfo {
+ public:
+ PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
+ public:
+ PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
+};
+
+void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 0;
+
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, FreeRegs);
+ }
+
+llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ return 0;
+}
+
+ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty,
+ unsigned &FreeRegs) const {
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non trivial destructors/constructors should not be passed
+ // by value.
+ FreeRegs = 0;
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ ABIArgInfo BaseInfo = (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+
+ // Regparm regs hold 32 bits.
+ unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ if (SizeInRegs == 0) return BaseInfo;
+ if (SizeInRegs > FreeRegs) {
+ FreeRegs = 0;
+ return BaseInfo;
+ }
+ FreeRegs -= SizeInRegs;
+ return BaseInfo.isDirect() ?
+ ABIArgInfo::getDirectInReg(BaseInfo.getCoerceToType()) :
+ ABIArgInfo::getExtendInReg(BaseInfo.getCoerceToType());
+}
+
+ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
/// UseX86_MMXType - Return true if this is an MMX type that should use the
/// special x86_mmx type.
bool UseX86_MMXType(llvm::Type *IRType) {
@@ -435,7 +519,8 @@ class X86_32ABIInfo : public ABIInfo {
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be passed in memory.
- ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const;
+ ABIArgInfo getIndirectResult(QualType Ty, bool ByVal,
+ unsigned &FreeRegs) const;
/// \brief Return the alignment to use for the given type on the stack.
unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
@@ -443,9 +528,10 @@ class X86_32ABIInfo : public ABIInfo {
Class classify(QualType Ty) const;
ABIArgInfo classifyReturnType(QualType RetTy,
unsigned callingConvention) const;
- ABIArgInfo classifyArgumentTypeWithReg(QualType RetTy,
- unsigned &FreeRegs) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs,
+ bool IsFastCall) const;
+ bool shouldUseInReg(QualType Ty, unsigned &FreeRegs,
+ bool IsFastCall, bool &NeedsPadding) const;
public:
@@ -682,9 +768,15 @@ unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
return MinABIStackAlignInBytes;
}
-ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const {
- if (!ByVal)
+ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
+ unsigned &FreeRegs) const {
+ if (!ByVal) {
+ if (FreeRegs) {
+ --FreeRegs; // Non byval indirects just use one pointer.
+ return ABIArgInfo::getIndirectInReg(0, false);
+ }
return ABIArgInfo::getIndirect(0, false);
+ }
// Compute the byval alignment.
unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
@@ -714,45 +806,51 @@ X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
return Integer;
}
-ABIArgInfo
-X86_32ABIInfo::classifyArgumentTypeWithReg(QualType Ty,
- unsigned &FreeRegs) const {
- // Common case first.
- if (FreeRegs == 0)
- return classifyArgumentType(Ty);
-
+bool X86_32ABIInfo::shouldUseInReg(QualType Ty, unsigned &FreeRegs,
+ bool IsFastCall, bool &NeedsPadding) const {
+ NeedsPadding = false;
Class C = classify(Ty);
if (C == Float)
- return classifyArgumentType(Ty);
+ return false;
+
+ unsigned Size = getContext().getTypeSize(Ty);
+ unsigned SizeInRegs = (Size + 31) / 32;
- unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
if (SizeInRegs == 0)
- return classifyArgumentType(Ty);
+ return false;
if (SizeInRegs > FreeRegs) {
FreeRegs = 0;
- return classifyArgumentType(Ty);
+ return false;
}
- assert(SizeInRegs >= 1 && SizeInRegs <= 3);
+
FreeRegs -= SizeInRegs;
- // If it is a simple scalar, keep the type so that we produce a cleaner IR.
- ABIArgInfo Foo = classifyArgumentType(Ty);
- if (Foo.isDirect() && !Foo.getDirectOffset() && !Foo.getPaddingType())
- return ABIArgInfo::getDirectInReg(Foo.getCoerceToType());
- if (Foo.isExtend())
- return ABIArgInfo::getExtendInReg(Foo.getCoerceToType());
+ if (IsFastCall) {
+ if (Size > 32)
+ return false;
+
+ if (Ty->isIntegralOrEnumerationType())
+ return true;
+
+ if (Ty->isPointerType())
+ return true;
+
+ if (Ty->isReferenceType())
+ return true;
+
+ if (FreeRegs)
+ NeedsPadding = true;
- llvm::LLVMContext &LLVMContext = getVMContext();
- llvm::Type *Int32 = llvm::Type::getInt32Ty(LLVMContext);
- SmallVector<llvm::Type*, 3> Elements;
- for (unsigned I = 0; I < SizeInRegs; ++I)
- Elements.push_back(Int32);
- llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
- return ABIArgInfo::getDirectInReg(Result);
+ return false;
+ }
+
+ return true;
}
-ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
+ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
+ unsigned &FreeRegs,
+ bool IsFastCall) const {
// FIXME: Set alignment on indirect arguments.
if (isAggregateTypeForABI(Ty)) {
// Structures with flexible arrays are always indirect.
@@ -760,25 +858,38 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are always indirect.
if (hasNonTrivialDestructorOrCopyConstructor(RT))
- return getIndirectResult(Ty, /*ByVal=*/false);
+ return getIndirectResult(Ty, false, FreeRegs);
if (RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectResult(Ty);
+ return getIndirectResult(Ty, true, FreeRegs);
}
// Ignore empty structs/unions.
if (isEmptyRecord(getContext(), Ty, true))
return ABIArgInfo::getIgnore();
+ llvm::LLVMContext &LLVMContext = getVMContext();
+ llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
+ bool NeedsPadding;
+ if (shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding)) {
+ unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ SmallVector<llvm::Type*, 3> Elements;
+ for (unsigned I = 0; I < SizeInRegs; ++I)
+ Elements.push_back(Int32);
+ llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
+ return ABIArgInfo::getDirectInReg(Result);
+ }
+ llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : 0;
+
// Expand small (<= 128-bit) record types when we know that the stack layout
// of those arguments will match the struct. This is important because the
// LLVM backend isn't smart enough to remove byval, which inhibits many
// optimizations.
if (getContext().getTypeSize(Ty) <= 4*32 &&
canExpandIndirectArgument(Ty, getContext()))
- return ABIArgInfo::getExpand();
+ return ABIArgInfo::getExpandWithPadding(IsFastCall, PaddingType);
- return getIndirectResult(Ty);
+ return getIndirectResult(Ty, true, FreeRegs);
}
if (const VectorType *VT = Ty->getAs<VectorType>()) {
@@ -809,16 +920,32 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ bool NeedsPadding;
+ bool InReg = shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding);
+
+ if (Ty->isPromotableIntegerType()) {
+ if (InReg)
+ return ABIArgInfo::getExtendInReg();
+ return ABIArgInfo::getExtend();
+ }
+ if (InReg)
+ return ABIArgInfo::getDirectInReg();
+ return ABIArgInfo::getDirect();
}
void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
FI.getCallingConvention());
- unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() :
- DefaultNumRegisterParameters;
+ unsigned CC = FI.getCallingConvention();
+ bool IsFastCall = CC == llvm::CallingConv::X86_FastCall;
+ unsigned FreeRegs;
+ if (IsFastCall)
+ FreeRegs = 2;
+ else if (FI.getHasRegParm())
+ FreeRegs = FI.getRegParm();
+ else
+ FreeRegs = DefaultNumRegisterParameters;
// If the return value is indirect, then the hidden argument is consuming one
// integer register.
@@ -832,7 +959,7 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
- it->info = classifyArgumentTypeWithReg(it->type, FreeRegs);
+ it->info = classifyArgumentType(it->type, FreeRegs, IsFastCall);
}
llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -884,7 +1011,10 @@ void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
llvm::Function *Fn = cast<llvm::Function>(GV);
// Now add the 'alignstack' attribute with a value of 16.
- Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16));
+ llvm::AttrBuilder B;
+ B.addStackAlignmentAttr(16);
+ Fn->addAttribute(llvm::AttrListPtr::FunctionIndex,
+ llvm::Attributes::get(CGM.getLLVMContext(), B));
}
}
}
@@ -1030,10 +1160,15 @@ class X86_64ABIInfo : public ABIInfo {
}
bool HasAVX;
+ // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
+ // 64-bit hardware.
+ bool Has64BitPointers;
public:
X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
- ABIInfo(CGT), HasAVX(hasavx) {}
+ ABIInfo(CGT), HasAVX(hasavx),
+ Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
+ }
bool isPassedUsingAVXType(QualType type) const {
unsigned neededInt, neededSSE;
@@ -1070,7 +1205,7 @@ public:
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
- : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
+ : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
const X86_64ABIInfo &getABIInfo() const {
return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
@@ -1243,7 +1378,10 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Hi = Integer;
} else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
Current = Integer;
- } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
+ } else if ((k == BuiltinType::Float || k == BuiltinType::Double) ||
+ (k == BuiltinType::LongDouble &&
+ getContext().getTargetInfo().getTriple().getOS() ==
+ llvm::Triple::NativeClient)) {
Current = SSE;
} else if (k == BuiltinType::LongDouble) {
Lo = X87;
@@ -1266,7 +1404,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
}
if (Ty->isMemberPointerType()) {
- if (Ty->isMemberFunctionPointerType())
+ if (Ty->isMemberFunctionPointerType() && Has64BitPointers)
Lo = Hi = Integer;
else
Current = Integer;
@@ -1329,7 +1467,10 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Lo = Hi = Integer;
} else if (ET == getContext().FloatTy)
Current = SSE;
- else if (ET == getContext().DoubleTy)
+ else if (ET == getContext().DoubleTy ||
+ (ET == getContext().LongDoubleTy &&
+ getContext().getTargetInfo().getTriple().getOS() ==
+ llvm::Triple::NativeClient))
Lo = Hi = SSE;
else if (ET == getContext().LongDoubleTy)
Current = ComplexX87;
@@ -1708,7 +1849,7 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
/// float at offset 4. It is conservatively correct for this routine to return
/// false.
static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
- const llvm::TargetData &TD) {
+ const llvm::DataLayout &TD) {
// Base case if we find a float.
if (IROffset == 0 && IRType->isFloatTy())
return true;
@@ -1748,8 +1889,8 @@ GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
// We want to pass as <2 x float> if the LLVM IR type contains a float at
// offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
// case.
- if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) &&
- ContainsFloatAtOffset(IRType, IROffset+4, getTargetData()))
+ if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
+ ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
return llvm::Type::getDoubleTy(getVMContext());
@@ -1777,7 +1918,8 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
// returning an 8-byte unit starting with it. See if we can safely use it.
if (IROffset == 0) {
// Pointers and int64's always fill the 8-byte unit.
- if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64))
+ if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
+ IRType->isIntegerTy(64))
return IRType;
// If we have a 1/2/4-byte integer, we can use it only if the rest of the
@@ -1787,8 +1929,10 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
// have to do this analysis on the source type because we can't depend on
// unions being lowered a specific way etc.
if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
- IRType->isIntegerTy(32)) {
- unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth();
+ IRType->isIntegerTy(32) ||
+ (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
+ unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
+ cast<llvm::IntegerType>(IRType)->getBitWidth();
if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
SourceOffset*8+64, getContext()))
@@ -1798,7 +1942,7 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
// If this is a struct, recurse into the field at the specified offset.
- const llvm::StructLayout *SL = getTargetData().getStructLayout(STy);
+ const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
if (IROffset < SL->getSizeInBytes()) {
unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
IROffset -= SL->getElementOffset(FieldIdx);
@@ -1810,7 +1954,7 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
llvm::Type *EltTy = ATy->getElementType();
- unsigned EltSize = getTargetData().getTypeAllocSize(EltTy);
+ unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
unsigned EltOffset = IROffset/EltSize*EltSize;
return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
SourceOffset);
@@ -1837,14 +1981,14 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
/// return {i32*, float}.
static llvm::Type *
GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
- const llvm::TargetData &TD) {
+ const llvm::DataLayout &TD) {
// In order to correctly satisfy the ABI, we need to the high part to start
// at offset 8. If the high and low parts we inferred are both 4-byte types
// (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
// the second element at offset 8. Check for this:
unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
unsigned HiAlign = TD.getABITypeAlignment(Hi);
- unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign);
+ unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign);
assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
// To handle this, we have to increase the size of the low part so that the
@@ -1996,7 +2140,7 @@ classifyReturnType(QualType RetTy) const {
// known to pass in the high eightbyte of the result. We do this by forming a
// first class struct aggregate with the high and low part: {low, high}
if (HighPart)
- ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
return ABIArgInfo::getDirect(ResType);
}
@@ -2122,7 +2266,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(
// known to pass in the high eightbyte of the result. We do this by forming a
// first class struct aggregate with the high and low part: {low, high}
if (HighPart)
- ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
return ABIArgInfo::getDirect(ResType);
}
@@ -2435,6 +2579,43 @@ llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return AddrTyped;
}
+namespace {
+
+class NaClX86_64ABIInfo : public ABIInfo {
+ public:
+ NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
+ : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {}
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+ private:
+ PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
+ X86_64ABIInfo NInfo; // Used for everything else.
+};
+
+class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+ public:
+ NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
+ : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {}
+};
+
+}
+
+void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (FI.getASTCallingConvention() == CC_PnaclCall)
+ PInfo.computeInfo(FI);
+ else
+ NInfo.computeInfo(FI);
+}
+
+llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // Always use the native convention; calling pnacl-style varargs functions
+ // is unuspported.
+ return NInfo.EmitVAArg(VAListAddr, Ty, CGF);
+}
+
+
// PowerPC-32
namespace {
@@ -2497,6 +2678,62 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
// PowerPC-64
namespace {
+/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
+class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
+
+public:
+ PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+ bool isPromotableTypeForABI(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ // TODO: We can add more logic to computeInfo to improve performance.
+ // Example: For aggregate arguments that fit in a register, we could
+ // use getDirectInReg (as is done below for structs containing a single
+ // floating-point value) to avoid pushing them to memory on function
+ // entry. This would require changing the logic in PPCISelLowering
+ // when lowering the parameters in the caller and args in the callee.
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it) {
+ // We rely on the default argument classification for the most part.
+ // One exception: An aggregate containing a single floating-point
+ // item must be passed in a register if one is available.
+ const Type *T = isSingleElementStruct(it->type, getContext());
+ if (T) {
+ const BuiltinType *BT = T->getAs<BuiltinType>();
+ if (BT && BT->isFloatingPoint()) {
+ QualType QT(T, 0);
+ it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
+ continue;
+ }
+ }
+ it->info = classifyArgumentType(it->type);
+ }
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr,
+ QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+};
+
class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
public:
PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
@@ -2512,9 +2749,94 @@ public:
}
+// Return true if the ABI requires Ty to be passed sign- or zero-
+// extended to 64 bits.
bool
-PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const {
+PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Promotable integer types are required to be promoted by the ABI.
+ if (Ty->isPromotableIntegerType())
+ return true;
+
+ // In addition to the usual promotable integer types, we also need to
+ // extend all 32-bit types, since the ABI requires promotion to 64 bits.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+ABIArgInfo
+PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non trivial destructors/constructors should not be passed
+ // by value.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ return (isPromotableTypeForABI(Ty) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo
+PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+
+ return (isPromotableTypeForABI(RetTy) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
+llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
+ QualType Ty,
+ CodeGenFunction &CGF) const {
+ llvm::Type *BP = CGF.Int8PtrTy;
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+
+ // Update the va_list pointer.
+ unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
+ unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ // If the argument is smaller than 8 bytes, it is right-adjusted in
+ // its doubleword slot. Adjust the pointer to pick it up from the
+ // correct offset.
+ if (SizeInBytes < 8) {
+ llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
+ AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
+ Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
+ }
+
+ llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ return Builder.CreateBitCast(Addr, PTy);
+}
+
+static bool
+PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) {
// This is calculated from the LLVM and GCC tables and verified
// against gcc output. AFAIK all ABIs use the same encoding.
@@ -2553,6 +2875,21 @@ PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
return false;
}
+bool
+PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+
+ return PPC64_initDwarfEHRegSizeTable(CGF, Address);
+}
+
+bool
+PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+
+ return PPC64_initDwarfEHRegSizeTable(CGF, Address);
+}
+
//===----------------------------------------------------------------------===//
// ARM ABI Implementation
//===----------------------------------------------------------------------===//
@@ -2576,14 +2913,18 @@ public:
bool isEABI() const {
StringRef Env =
getContext().getTargetInfo().getTriple().getEnvironmentName();
- return (Env == "gnueabi" || Env == "eabi" || Env == "androideabi");
+ return (Env == "gnueabi" || Env == "eabi" ||
+ Env == "android" || Env == "androideabi");
}
private:
ABIKind getABIKind() const { return Kind; }
ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, int *VFPRegs,
+ unsigned &AllocatedVFP,
+ bool &IsHA) const;
+ bool isIllegalVectorType(QualType Ty) const;
virtual void computeInfo(CGFunctionInfo &FI) const;
@@ -2626,10 +2967,33 @@ public:
}
void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ // To correctly handle Homogeneous Aggregate, we need to keep track of the
+ // VFP registers allocated so far.
+ // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
+ // VFP registers of the appropriate type unallocated then the argument is
+ // allocated to the lowest-numbered sequence of such registers.
+ // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
+ // unallocated are marked as unavailable.
+ unsigned AllocatedVFP = 0;
+ int VFPRegs[16] = { 0 };
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type);
+ it != ie; ++it) {
+ unsigned PreAllocation = AllocatedVFP;
+ bool IsHA = false;
+ // 6.1.2.3 There is one VFP co-processor register class using registers
+ // s0-s15 (d0-d7) for passing arguments.
+ const unsigned NumVFPs = 16;
+ it->info = classifyArgumentType(it->type, VFPRegs, AllocatedVFP, IsHA);
+ // If we do not have enough VFP registers for the HA, any VFP registers
+ // that are unallocated are marked as unavailable. To achieve this, we add
+ // padding of (NumVFPs - PreAllocation) floats.
+ if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) {
+ llvm::Type *PaddingTy = llvm::ArrayType::get(
+ llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation);
+ it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
+ }
+ }
// Always honor user-specified calling convention.
if (FI.getCallingConvention() != llvm::CallingConv::C)
@@ -2637,7 +3001,9 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
// Calling convention as default by an ABI.
llvm::CallingConv::ID DefaultCC;
- if (isEABI())
+ if (getContext().getTargetInfo().getTriple().getEnvironmentName()=="gnueabihf")
+ DefaultCC = llvm::CallingConv::ARM_AAPCS_VFP;
+ else if (isEABI())
DefaultCC = llvm::CallingConv::ARM_AAPCS;
else
DefaultCC = llvm::CallingConv::ARM_APCS;
@@ -2729,7 +3095,88 @@ static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
return (Members > 0 && Members <= 4);
}
-ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
+/// markAllocatedVFPs - update VFPRegs according to the alignment and
+/// number of VFP registers (unit is S register) requested.
+static void markAllocatedVFPs(int *VFPRegs, unsigned &AllocatedVFP,
+ unsigned Alignment,
+ unsigned NumRequired) {
+ // Early Exit.
+ if (AllocatedVFP >= 16)
+ return;
+ // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
+ // VFP registers of the appropriate type unallocated then the argument is
+ // allocated to the lowest-numbered sequence of such registers.
+ for (unsigned I = 0; I < 16; I += Alignment) {
+ bool FoundSlot = true;
+ for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
+ if (J >= 16 || VFPRegs[J]) {
+ FoundSlot = false;
+ break;
+ }
+ if (FoundSlot) {
+ for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
+ VFPRegs[J] = 1;
+ AllocatedVFP += NumRequired;
+ return;
+ }
+ }
+ // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
+ // unallocated are marked as unavailable.
+ for (unsigned I = 0; I < 16; I++)
+ VFPRegs[I] = 1;
+ AllocatedVFP = 17; // We do not have enough VFP registers.
+}
+
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, int *VFPRegs,
+ unsigned &AllocatedVFP,
+ bool &IsHA) const {
+ // We update number of allocated VFPs according to
+ // 6.1.2.1 The following argument types are VFP CPRCs:
+ // A single-precision floating-point type (including promoted
+ // half-precision types); A double-precision floating-point type;
+ // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
+ // with a Base Type of a single- or double-precision floating-point type,
+ // 64-bit containerized vectors or 128-bit containerized vectors with one
+ // to four Elements.
+
+ // Handle illegal vector types here.
+ if (isIllegalVectorType(Ty)) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size <= 32) {
+ llvm::Type *ResType =
+ llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 64) {
+ llvm::Type *ResType = llvm::VectorType::get(
+ llvm::Type::getInt32Ty(getVMContext()), 2);
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 128) {
+ llvm::Type *ResType = llvm::VectorType::get(
+ llvm::Type::getInt32Ty(getVMContext()), 4);
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, 4, 4);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ }
+ // Update VFPRegs for legal vector types.
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ uint64_t Size = getContext().getTypeSize(VT);
+ // Size of a legal vector should be power of 2 and above 64.
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, Size >= 128 ? 4 : 2, Size / 32);
+ }
+ // Update VFPRegs for floating point types.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::Half ||
+ BT->getKind() == BuiltinType::Float)
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, 1);
+ if (BT->getKind() == BuiltinType::Double ||
+ BT->getKind() == BuiltinType::LongDouble)
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2);
+ }
+
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
@@ -2749,18 +3196,42 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
if (getABIKind() == ARMABIInfo::AAPCS_VFP) {
- // Homogeneous Aggregates need to be expanded.
+ // Homogeneous Aggregates need to be expanded when we can fit the aggregate
+ // into VFP registers.
const Type *Base = 0;
- if (isHomogeneousAggregate(Ty, Base, getContext())) {
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
assert(Base && "Base class should be set for homogeneous aggregate");
+ // Base can be a floating-point or a vector.
+ if (Base->isVectorType()) {
+ // ElementSize is in number of floats.
+ unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4;
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, ElementSize,
+ Members * ElementSize);
+ } else if (Base->isSpecificBuiltinType(BuiltinType::Float))
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, Members);
+ else {
+ assert(Base->isSpecificBuiltinType(BuiltinType::Double) ||
+ Base->isSpecificBuiltinType(BuiltinType::LongDouble));
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, Members * 2);
+ }
+ IsHA = true;
return ABIArgInfo::getExpand();
}
}
// Support byval for ARM.
- if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64) ||
- getContext().getTypeAlign(Ty) > 64) {
- return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+ // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
+ // most 8-byte. We realign the indirect argument if type alignment is bigger
+ // than ABI alignment.
+ uint64_t ABIAlign = 4;
+ uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
+ if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
+ getABIKind() == ARMABIInfo::AAPCS)
+ ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
+ if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/true,
+ /*Realign=*/TyAlign > ABIAlign);
}
// Otherwise, pass by coercing to a structure of the appropriate size.
@@ -2946,6 +3417,21 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getIndirect(0);
}
+/// isIllegalVector - check whether Ty is an illegal vector type.
+bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // Check whether VT is legal.
+ unsigned NumElements = VT->getNumElements();
+ uint64_t Size = getContext().getTypeSize(VT);
+ // NumElements should be power of 2.
+ if ((NumElements & (NumElements - 1)) != 0)
+ return true;
+ // Size should be greater than 32 bits.
+ return Size <= 32;
+ }
+ return false;
+}
+
llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
llvm::Type *BP = CGF.Int8PtrTy;
@@ -2954,30 +3440,104 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- // Handle address alignment for type alignment > 32 bits
+
+ uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
+ bool IsIndirect = false;
+
+ // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
+ // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
+ if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
+ getABIKind() == ARMABIInfo::AAPCS)
+ TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
+ else
+ TyAlign = 4;
+ // Use indirect if size of the illegal vector is bigger than 16 bytes.
+ if (isIllegalVectorType(Ty) && Size > 16) {
+ IsIndirect = true;
+ Size = 4;
+ TyAlign = 4;
+ }
+
+ // Handle address alignment for ABI alignment > 4 bytes.
if (TyAlign > 4) {
assert((TyAlign & (TyAlign - 1)) == 0 &&
"Alignment is not power of 2!");
llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
- Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
+ Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
}
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::RoundUpToAlignment(Size, 4);
llvm::Value *NextAddr =
Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+ if (IsIndirect)
+ Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
+ else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
+ // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
+ // may not be correctly aligned for the vector type. We create an aligned
+ // temporary space and copy the content over from ap.cur to the temporary
+ // space. This is necessary if the natural alignment of the type is greater
+ // than the ABI alignment.
+ llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
+ CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
+ llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
+ "var.align");
+ llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
+ llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
+ Builder.CreateMemCpy(Dst, Src,
+ llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
+ TyAlign, false);
+ Addr = AlignedTemp; //The content is in aligned location.
+ }
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
return AddrTyped;
}
+namespace {
+
+class NaClARMABIInfo : public ABIInfo {
+ public:
+ NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
+ : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {}
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+ private:
+ PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
+ ARMABIInfo NInfo; // Used for everything else.
+};
+
+class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo {
+ public:
+ NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
+ : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {}
+};
+
+}
+
+void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (FI.getASTCallingConvention() == CC_PnaclCall)
+ PInfo.computeInfo(FI);
+ else
+ static_cast<const ABIInfo&>(NInfo).computeInfo(FI);
+}
+
+llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // Always use the native convention; calling pnacl-style varargs functions
+ // is unsupported.
+ return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF);
+}
+
//===----------------------------------------------------------------------===//
// NVPTX ABI Implementation
//===----------------------------------------------------------------------===//
@@ -3072,7 +3632,7 @@ SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
// OpenCL __kernel functions get a kernel calling convention
F->setCallingConv(llvm::CallingConv::PTX_Kernel);
// And kernel functions are not subject to inlining
- F->addFnAttr(llvm::Attribute::NoInline);
+ F->addFnAttr(llvm::Attributes::NoInline);
}
}
@@ -3188,7 +3748,7 @@ void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
F->setCallingConv(CC);
// Step 2: Add attributes goodness.
- F->addFnAttr(llvm::Attribute::NoInline);
+ F->addFnAttr(llvm::Attributes::NoInline);
}
// Step 3: Emit _interrupt_handler alias.
@@ -3226,7 +3786,7 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
F->setCallingConv(llvm::CallingConv::MSP430_INTR);
// Step 2: Add attributes goodness.
- F->addFnAttr(llvm::Attribute::NoInline);
+ F->addFnAttr(llvm::Attributes::NoInline);
// Step 3: Emit ISR vector alias.
unsigned Num = attr->getNumber() + 0xffe0;
@@ -3583,7 +4143,7 @@ void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
if (M.getLangOpts().OpenCL) {
if (FD->hasAttr<OpenCLKernelAttr>()) {
// OpenCL C Kernel functions are not subject to inlining
- F->addFnAttr(llvm::Attribute::NoInline);
+ F->addFnAttr(llvm::Attributes::NoInline);
if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) {
@@ -3767,6 +4327,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
default:
return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
+ case llvm::Triple::le32:
+ return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
case llvm::Triple::mips:
case llvm::Triple::mipsel:
return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
@@ -3779,19 +4341,29 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::thumb:
{
ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
-
if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0)
Kind = ARMABIInfo::APCS;
- else if (CodeGenOpts.FloatABI == "hard")
+ else if (CodeGenOpts.FloatABI == "hard" ||
+ (CodeGenOpts.FloatABI != "soft" && Triple.getEnvironment()==llvm::Triple::GNUEABIHF))
Kind = ARMABIInfo::AAPCS_VFP;
- return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind));
+ switch (Triple.getOS()) {
+ case llvm::Triple::NativeClient:
+ return *(TheTargetCodeGenInfo =
+ new NaClARMTargetCodeGenInfo(Types, Kind));
+ default:
+ return *(TheTargetCodeGenInfo =
+ new ARMTargetCodeGenInfo(Types, Kind));
+ }
}
case llvm::Triple::ppc:
return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
case llvm::Triple::ppc64:
- return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
+ if (Triple.isOSBinFormatELF())
+ return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types));
+ else
+ return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
@@ -3848,6 +4420,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::MinGW32:
case llvm::Triple::Cygwin:
return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
+ case llvm::Triple::NativeClient:
+ return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types, HasAVX));
default:
return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
HasAVX));
diff --git a/contrib/llvm/tools/clang/lib/Driver/Arg.cpp b/contrib/llvm/tools/clang/lib/Driver/Arg.cpp
index c0a2a50..93d70a9 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Arg.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Arg.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/Arg.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Driver/ArgList.h"
#include "clang/Driver/Option.h"
#include "llvm/ADT/SmallString.h"
@@ -15,22 +16,23 @@
#include "llvm/Support/raw_ostream.h"
using namespace clang::driver;
+using clang::StringRef;
-Arg::Arg(const Option *_Opt, unsigned _Index, const Arg *_BaseArg)
- : Opt(_Opt), BaseArg(_BaseArg), Index(_Index),
+Arg::Arg(const Option _Opt, StringRef S, unsigned _Index, const Arg *_BaseArg)
+ : Opt(_Opt), BaseArg(_BaseArg), Spelling(S), Index(_Index),
Claimed(false), OwnsValues(false) {
}
-Arg::Arg(const Option *_Opt, unsigned _Index,
+Arg::Arg(const Option _Opt, StringRef S, unsigned _Index,
const char *Value0, const Arg *_BaseArg)
- : Opt(_Opt), BaseArg(_BaseArg), Index(_Index),
+ : Opt(_Opt), BaseArg(_BaseArg), Spelling(S), Index(_Index),
Claimed(false), OwnsValues(false) {
Values.push_back(Value0);
}
-Arg::Arg(const Option *_Opt, unsigned _Index,
+Arg::Arg(const Option _Opt, StringRef S, unsigned _Index,
const char *Value0, const char *Value1, const Arg *_BaseArg)
- : Opt(_Opt), BaseArg(_BaseArg), Index(_Index),
+ : Opt(_Opt), BaseArg(_BaseArg), Spelling(S), Index(_Index),
Claimed(false), OwnsValues(false) {
Values.push_back(Value0);
Values.push_back(Value1);
@@ -47,7 +49,7 @@ void Arg::dump() const {
llvm::errs() << "<";
llvm::errs() << " Opt:";
- Opt->dump();
+ Opt.dump();
llvm::errs() << " Index:" << Index;
@@ -83,39 +85,39 @@ void Arg::renderAsInput(const ArgList &Args, ArgStringList &Output) const {
}
for (unsigned i = 0, e = getNumValues(); i != e; ++i)
- Output.push_back(getValue(Args, i));
+ Output.push_back(getValue(i));
}
void Arg::render(const ArgList &Args, ArgStringList &Output) const {
switch (getOption().getRenderStyle()) {
case Option::RenderValuesStyle:
for (unsigned i = 0, e = getNumValues(); i != e; ++i)
- Output.push_back(getValue(Args, i));
+ Output.push_back(getValue(i));
break;
case Option::RenderCommaJoinedStyle: {
SmallString<256> Res;
llvm::raw_svector_ostream OS(Res);
- OS << getOption().getName();
+ OS << getSpelling();
for (unsigned i = 0, e = getNumValues(); i != e; ++i) {
if (i) OS << ',';
- OS << getValue(Args, i);
+ OS << getValue(i);
}
Output.push_back(Args.MakeArgString(OS.str()));
break;
}
-
+
case Option::RenderJoinedStyle:
Output.push_back(Args.GetOrMakeJoinedArgString(
- getIndex(), getOption().getName(), getValue(Args, 0)));
+ getIndex(), getSpelling(), getValue(0)));
for (unsigned i = 1, e = getNumValues(); i != e; ++i)
- Output.push_back(getValue(Args, i));
+ Output.push_back(getValue(i));
break;
case Option::RenderSeparateStyle:
- Output.push_back(getOption().getName().data());
+ Output.push_back(Args.MakeArgString(getSpelling()));
for (unsigned i = 0, e = getNumValues(); i != e; ++i)
- Output.push_back(getValue(Args, i));
+ Output.push_back(getValue(i));
break;
}
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp b/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
index 7fd439e..b3a43df 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
@@ -211,7 +211,7 @@ bool ArgList::hasFlag(OptSpecifier Pos, OptSpecifier Neg, bool Default) const {
StringRef ArgList::getLastArgValue(OptSpecifier Id,
StringRef Default) const {
if (Arg *A = getLastArg(Id))
- return A->getValue(*this);
+ return A->getValue();
return Default;
}
@@ -220,10 +220,10 @@ int ArgList::getLastArgIntValue(OptSpecifier Id, int Default,
int Res = Default;
if (Arg *A = getLastArg(Id)) {
- if (StringRef(A->getValue(*this)).getAsInteger(10, Res)) {
+ if (StringRef(A->getValue()).getAsInteger(10, Res)) {
if (Diags)
Diags->Report(diag::err_drv_invalid_int_value)
- << A->getAsString(*this) << A->getValue(*this);
+ << A->getAsString(*this) << A->getValue();
}
}
@@ -258,7 +258,7 @@ void ArgList::AddAllArgValues(ArgStringList &Output, OptSpecifier Id0,
ie = filtered_end(); it != ie; ++it) {
(*it)->claim();
for (unsigned i = 0, e = (*it)->getNumValues(); i != e; ++i)
- Output.push_back((*it)->getValue(*this, i));
+ Output.push_back((*it)->getValue(i));
}
}
@@ -271,10 +271,10 @@ void ArgList::AddAllArgsTranslated(ArgStringList &Output, OptSpecifier Id0,
if (Joined) {
Output.push_back(MakeArgString(StringRef(Translation) +
- (*it)->getValue(*this, 0)));
+ (*it)->getValue(0)));
} else {
Output.push_back(Translation);
- Output.push_back((*it)->getValue(*this, 0));
+ Output.push_back((*it)->getValue(0));
}
}
}
@@ -362,33 +362,40 @@ const char *DerivedArgList::MakeArgString(StringRef Str) const {
return BaseArgs.MakeArgString(Str);
}
-Arg *DerivedArgList::MakeFlagArg(const Arg *BaseArg, const Option *Opt) const {
- Arg *A = new Arg(Opt, BaseArgs.MakeIndex(Opt->getName()), BaseArg);
+Arg *DerivedArgList::MakeFlagArg(const Arg *BaseArg, const Option Opt) const {
+ Arg *A = new Arg(Opt, ArgList::MakeArgString(Twine(Opt.getPrefix()) +
+ Twine(Opt.getName())),
+ BaseArgs.MakeIndex(Opt.getName()), BaseArg);
SynthesizedArgs.push_back(A);
return A;
}
-Arg *DerivedArgList::MakePositionalArg(const Arg *BaseArg, const Option *Opt,
+Arg *DerivedArgList::MakePositionalArg(const Arg *BaseArg, const Option Opt,
StringRef Value) const {
unsigned Index = BaseArgs.MakeIndex(Value);
- Arg *A = new Arg(Opt, Index, BaseArgs.getArgString(Index), BaseArg);
+ Arg *A = new Arg(Opt, ArgList::MakeArgString(Twine(Opt.getPrefix()) +
+ Twine(Opt.getName())),
+ Index, BaseArgs.getArgString(Index), BaseArg);
SynthesizedArgs.push_back(A);
return A;
}
-Arg *DerivedArgList::MakeSeparateArg(const Arg *BaseArg, const Option *Opt,
+Arg *DerivedArgList::MakeSeparateArg(const Arg *BaseArg, const Option Opt,
StringRef Value) const {
- unsigned Index = BaseArgs.MakeIndex(Opt->getName(), Value);
- Arg *A = new Arg(Opt, Index, BaseArgs.getArgString(Index + 1), BaseArg);
+ unsigned Index = BaseArgs.MakeIndex(Opt.getName(), Value);
+ Arg *A = new Arg(Opt, ArgList::MakeArgString(Twine(Opt.getPrefix()) +
+ Twine(Opt.getName())),
+ Index, BaseArgs.getArgString(Index + 1), BaseArg);
SynthesizedArgs.push_back(A);
return A;
}
-Arg *DerivedArgList::MakeJoinedArg(const Arg *BaseArg, const Option *Opt,
+Arg *DerivedArgList::MakeJoinedArg(const Arg *BaseArg, const Option Opt,
StringRef Value) const {
- unsigned Index = BaseArgs.MakeIndex(Opt->getName().str() + Value.str());
- Arg *A = new Arg(Opt, Index,
- BaseArgs.getArgString(Index) + Opt->getName().size(),
+ unsigned Index = BaseArgs.MakeIndex(Opt.getName().str() + Value.str());
+ Arg *A = new Arg(Opt, ArgList::MakeArgString(Twine(Opt.getPrefix()) +
+ Twine(Opt.getName())), Index,
+ BaseArgs.getArgString(Index) + Opt.getName().size(),
BaseArg);
SynthesizedArgs.push_back(A);
return A;
diff --git a/contrib/llvm/tools/clang/lib/Driver/CC1AsOptions.cpp b/contrib/llvm/tools/clang/lib/Driver/CC1AsOptions.cpp
index ea80f5a..4f89b73 100644
--- a/contrib/llvm/tools/clang/lib/Driver/CC1AsOptions.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/CC1AsOptions.cpp
@@ -15,11 +15,19 @@ using namespace clang::driver;
using namespace clang::driver::options;
using namespace clang::driver::cc1asoptions;
+#define PREFIX(NAME, VALUE) const char *const NAME[] = VALUE;
+#define OPTION(PREFIX, NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR)
+#include "clang/Driver/CC1AsOptions.inc"
+#undef OPTION
+#undef PREFIX
+
static const OptTable::Info CC1AsInfoTable[] = {
-#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
+#define PREFIX(NAME, VALUE)
+#define OPTION(PREFIX, NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
HELPTEXT, METAVAR) \
- { NAME, HELPTEXT, METAVAR, Option::KIND##Class, PARAM, FLAGS, \
- OPT_##GROUP, OPT_##ALIAS },
+ { PREFIX, NAME, HELPTEXT, METAVAR, OPT_##ID, Option::KIND##Class, PARAM, \
+ FLAGS, OPT_##GROUP, OPT_##ALIAS },
#include "clang/Driver/CC1AsOptions.inc"
};
diff --git a/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
index c962fca..124e50c 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
@@ -17,6 +17,7 @@
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Program.h"
#include <sys/stat.h>
@@ -101,6 +102,105 @@ void Compilation::PrintJob(raw_ostream &OS, const Job &J,
}
}
+static bool skipArg(const char *Flag, bool &SkipNextArg) {
+ StringRef FlagRef(Flag);
+
+ // Assume we're going to see -Flag <Arg>.
+ SkipNextArg = true;
+
+ // These flags are all of the form -Flag <Arg> and are treated as two
+ // arguments. Therefore, we need to skip the flag and the next argument.
+ bool Res = llvm::StringSwitch<bool>(Flag)
+ .Cases("-I", "-MF", "-MT", "-MQ", true)
+ .Cases("-o", "-coverage-file", "-dependency-file", true)
+ .Cases("-fdebug-compilation-dir", "-fmodule-cache-path", "-idirafter", true)
+ .Cases("-include", "-include-pch", "-internal-isystem", true)
+ .Cases("-internal-externc-isystem", "-iprefix", "-iwithprefix", true)
+ .Cases("-iwithprefixbefore", "-isysroot", "-isystem", "-iquote", true)
+ .Cases("-resource-dir", "-serialize-diagnostic-file", true)
+ .Case("-dwarf-debug-flags", true)
+ .Default(false);
+
+ // Match found.
+ if (Res)
+ return Res;
+
+ // The remaining flags are treated as a single argument.
+ SkipNextArg = false;
+
+ // These flags are all of the form -Flag and have no second argument.
+ Res = llvm::StringSwitch<bool>(Flag)
+ .Cases("-M", "-MM", "-MG", "-MP", "-MD", true)
+ .Case("-MMD", true)
+ .Default(false);
+
+ // Match found.
+ if (Res)
+ return Res;
+
+ // These flags are treated as a single argument (e.g., -F<Dir>).
+ if (FlagRef.startswith("-F") || FlagRef.startswith("-I"))
+ return true;
+
+ return false;
+}
+
+static bool quoteNextArg(const char *flag) {
+ return llvm::StringSwitch<bool>(flag)
+ .Case("-D", true)
+ .Default(false);
+}
+
+void Compilation::PrintDiagnosticJob(raw_ostream &OS, const Job &J) const {
+ if (const Command *C = dyn_cast<Command>(&J)) {
+ OS << C->getExecutable();
+ unsigned QuoteNextArg = 0;
+ for (ArgStringList::const_iterator it = C->getArguments().begin(),
+ ie = C->getArguments().end(); it != ie; ++it) {
+
+ bool SkipNext;
+ if (skipArg(*it, SkipNext)) {
+ if (SkipNext) ++it;
+ continue;
+ }
+
+ if (!QuoteNextArg)
+ QuoteNextArg = quoteNextArg(*it) ? 2 : 0;
+
+ OS << ' ';
+
+ if (QuoteNextArg == 1)
+ OS << '"';
+
+ if (!std::strpbrk(*it, " \"\\$")) {
+ OS << *it;
+ } else {
+ // Quote the argument and escape shell special characters; this isn't
+ // really complete but is good enough.
+ OS << '"';
+ for (const char *s = *it; *s; ++s) {
+ if (*s == '"' || *s == '\\' || *s == '$')
+ OS << '\\';
+ OS << *s;
+ }
+ OS << '"';
+ }
+
+ if (QuoteNextArg) {
+ if (QuoteNextArg == 1)
+ OS << '"';
+ --QuoteNextArg;
+ }
+ }
+ OS << '\n';
+ } else {
+ const JobList *Jobs = cast<JobList>(&J);
+ for (JobList::const_iterator
+ it = Jobs->begin(), ie = Jobs->end(); it != ie; ++it)
+ PrintDiagnosticJob(OS, **it);
+ }
+}
+
bool Compilation::CleanupFileList(const ArgStringList &Files,
bool IssueErrors) const {
bool Success = true;
diff --git a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
index 57b3417..3c410bb 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
@@ -52,25 +52,13 @@ Driver::Driver(StringRef ClangExecutable,
ClangExecutable(ClangExecutable), SysRoot(DEFAULT_SYSROOT),
UseStdLib(true), DefaultTargetTriple(DefaultTargetTriple),
DefaultImageName(DefaultImageName),
- DriverTitle("clang \"gcc-compatible\" driver"),
+ DriverTitle("clang LLVM compiler"),
CCPrintOptionsFilename(0), CCPrintHeadersFilename(0),
CCLogDiagnosticsFilename(0), CCCIsCXX(false),
CCCIsCPP(false),CCCEcho(false), CCCPrintBindings(false),
CCPrintOptions(false), CCPrintHeaders(false), CCLogDiagnostics(false),
CCGenDiagnostics(false), CCCGenericGCCName(""), CheckInputsExist(true),
- CCCUseClang(true), CCCUseClangCXX(true), CCCUseClangCPP(true),
- ForcedClangUse(false), CCCUsePCH(true), SuppressMissingInputWarning(false) {
- if (IsProduction) {
- // In a "production" build, only use clang on architectures we expect to
- // work.
- //
- // During development its more convenient to always have the driver use
- // clang, but we don't want users to be confused when things don't work, or
- // to file bugs for things we don't support.
- CCCClangArchs.insert(llvm::Triple::x86);
- CCCClangArchs.insert(llvm::Triple::x86_64);
- CCCClangArchs.insert(llvm::Triple::arm);
- }
+ CCCUsePCH(true), SuppressMissingInputWarning(false) {
Name = llvm::sys::path::stem(ClangExecutable);
Dir = llvm::sys::path::parent_path(ClangExecutable);
@@ -109,7 +97,7 @@ InputArgList *Driver::ParseArgStrings(ArrayRef<const char *> ArgList) {
for (ArgList::const_iterator it = Args->begin(), ie = Args->end();
it != ie; ++it) {
Arg *A = *it;
- if (A->getOption().isUnsupported()) {
+ if (A->getOption().hasFlag(options::Unsupported)) {
Diag(clang::diag::err_drv_unsupported_opt) << A->getAsString(*Args);
continue;
}
@@ -186,9 +174,9 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
// Add the remaining values as Xlinker arguments.
for (unsigned i = 0, e = A->getNumValues(); i != e; ++i)
- if (StringRef(A->getValue(Args, i)) != "--no-demangle")
+ if (StringRef(A->getValue(i)) != "--no-demangle")
DAL->AddSeparateArg(A, Opts->getOption(options::OPT_Xlinker),
- A->getValue(Args, i));
+ A->getValue(i));
continue;
}
@@ -197,22 +185,22 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
// some build systems. We don't try to be complete here because we don't
// care to encourage this usage model.
if (A->getOption().matches(options::OPT_Wp_COMMA) &&
- A->getNumValues() == 2 &&
- (A->getValue(Args, 0) == StringRef("-MD") ||
- A->getValue(Args, 0) == StringRef("-MMD"))) {
+ (A->getValue(0) == StringRef("-MD") ||
+ A->getValue(0) == StringRef("-MMD"))) {
// Rewrite to -MD/-MMD along with -MF.
- if (A->getValue(Args, 0) == StringRef("-MD"))
+ if (A->getValue(0) == StringRef("-MD"))
DAL->AddFlagArg(A, Opts->getOption(options::OPT_MD));
else
DAL->AddFlagArg(A, Opts->getOption(options::OPT_MMD));
- DAL->AddSeparateArg(A, Opts->getOption(options::OPT_MF),
- A->getValue(Args, 1));
+ if (A->getNumValues() == 2)
+ DAL->AddSeparateArg(A, Opts->getOption(options::OPT_MF),
+ A->getValue(1));
continue;
}
// Rewrite reserved library names.
if (A->getOption().matches(options::OPT_l)) {
- StringRef Value = A->getValue(Args);
+ StringRef Value = A->getValue();
// Rewrite unless -nostdlib is present.
if (!HasNostdlib && Value == "stdc++") {
@@ -285,48 +273,23 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
CCCIsCXX = Args->hasArg(options::OPT_ccc_cxx) || CCCIsCXX;
CCCEcho = Args->hasArg(options::OPT_ccc_echo);
if (const Arg *A = Args->getLastArg(options::OPT_ccc_gcc_name))
- CCCGenericGCCName = A->getValue(*Args);
- CCCUseClangCXX = Args->hasFlag(options::OPT_ccc_clang_cxx,
- options::OPT_ccc_no_clang_cxx,
- CCCUseClangCXX);
+ CCCGenericGCCName = A->getValue();
CCCUsePCH = Args->hasFlag(options::OPT_ccc_pch_is_pch,
options::OPT_ccc_pch_is_pth);
- CCCUseClang = !Args->hasArg(options::OPT_ccc_no_clang);
- CCCUseClangCPP = !Args->hasArg(options::OPT_ccc_no_clang_cpp);
- if (const Arg *A = Args->getLastArg(options::OPT_ccc_clang_archs)) {
- StringRef Cur = A->getValue(*Args);
-
- CCCClangArchs.clear();
- while (!Cur.empty()) {
- std::pair<StringRef, StringRef> Split = Cur.split(',');
-
- if (!Split.first.empty()) {
- llvm::Triple::ArchType Arch =
- llvm::Triple(Split.first, "", "").getArch();
-
- if (Arch == llvm::Triple::UnknownArch)
- Diag(clang::diag::err_drv_invalid_arch_name) << Split.first;
-
- CCCClangArchs.insert(Arch);
- }
-
- Cur = Split.second;
- }
- }
// FIXME: DefaultTargetTriple is used by the target-prefixed calls to as/ld
// and getToolChain is const.
if (const Arg *A = Args->getLastArg(options::OPT_target))
- DefaultTargetTriple = A->getValue(*Args);
+ DefaultTargetTriple = A->getValue();
if (const Arg *A = Args->getLastArg(options::OPT_ccc_install_dir))
- Dir = InstalledDir = A->getValue(*Args);
+ Dir = InstalledDir = A->getValue();
for (arg_iterator it = Args->filtered_begin(options::OPT_B),
ie = Args->filtered_end(); it != ie; ++it) {
const Arg *A = *it;
A->claim();
- PrefixDirs.push_back(A->getValue(*Args, 0));
+ PrefixDirs.push_back(A->getValue(0));
}
if (const Arg *A = Args->getLastArg(options::OPT__sysroot_EQ))
- SysRoot = A->getValue(*Args);
+ SysRoot = A->getValue();
if (Args->hasArg(options::OPT_nostdlib))
UseStdLib = false;
@@ -399,11 +362,11 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
std::string Cmd;
llvm::raw_string_ostream OS(Cmd);
if (FailingCommand)
- C.PrintJob(OS, *FailingCommand, "\n", false);
+ C.PrintDiagnosticJob(OS, *FailingCommand);
else
// Crash triggered by FORCE_CLANG_DIAGNOSTICS_CRASH, which doesn't have an
// associated FailingCommand, so just pass all jobs.
- C.PrintJob(OS, C.getJobs(), "\n", false);
+ C.PrintDiagnosticJob(OS, C.getJobs());
OS.flush();
// Clear stale state and suppress tool output.
@@ -418,7 +381,7 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
bool IgnoreInput = false;
// Ignore input from stdin or any inputs that cannot be preprocessed.
- if (!strcmp(it->second->getValue(C.getArgs()), "-")) {
+ if (!strcmp(it->second->getValue(), "-")) {
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "Error generating preprocessed source(s) - ignoring input from stdin"
".";
@@ -442,7 +405,7 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
it != ie; ++it) {
Arg *A = *it;
if (A->getOption().matches(options::OPT_arch)) {
- StringRef ArchName = A->getValue(C.getArgs());
+ StringRef ArchName = A->getValue();
ArchNames.insert(ArchName);
}
}
@@ -501,57 +464,6 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "Error generating run script: " + Script + " " + Err;
} else {
- // Strip away options not necessary to reproduce the crash.
- // FIXME: This doesn't work with quotes (e.g., -D "foo bar").
- SmallVector<std::string, 16> Flag;
- Flag.push_back("-D ");
- Flag.push_back("-F");
- Flag.push_back("-I ");
- Flag.push_back("-M ");
- Flag.push_back("-MD ");
- Flag.push_back("-MF ");
- Flag.push_back("-MG ");
- Flag.push_back("-MM ");
- Flag.push_back("-MMD ");
- Flag.push_back("-MP ");
- Flag.push_back("-MQ ");
- Flag.push_back("-MT ");
- Flag.push_back("-o ");
- Flag.push_back("-coverage-file ");
- Flag.push_back("-dependency-file ");
- Flag.push_back("-fdebug-compilation-dir ");
- Flag.push_back("-fmodule-cache-path ");
- Flag.push_back("-idirafter ");
- Flag.push_back("-include ");
- Flag.push_back("-include-pch ");
- Flag.push_back("-internal-isystem ");
- Flag.push_back("-internal-externc-isystem ");
- Flag.push_back("-iprefix ");
- Flag.push_back("-iwithprefix ");
- Flag.push_back("-iwithprefixbefore ");
- Flag.push_back("-isysroot ");
- Flag.push_back("-isystem ");
- Flag.push_back("-iquote ");
- Flag.push_back("-resource-dir ");
- Flag.push_back("-serialize-diagnostic-file ");
- for (unsigned i = 0, e = Flag.size(); i < e; ++i) {
- size_t I = 0, E = 0;
- do {
- I = Cmd.find(Flag[i], I);
- if (I == std::string::npos) break;
-
- E = Cmd.find(" ", I + Flag[i].length());
- if (E == std::string::npos) break;
- // The -D option is not removed. Instead, the argument is quoted.
- if (Flag[i] != "-D ") {
- Cmd.erase(I, E - I + 1);
- } else {
- Cmd.insert(I+3, "\"");
- Cmd.insert(++E, "\"");
- I = E;
- }
- } while(1);
- }
// Append the new filename with correct preprocessed suffix.
size_t I, E;
I = Cmd.find("-main-file-name ");
@@ -639,12 +551,12 @@ void Driver::PrintOptions(const ArgList &Args) const {
it != ie; ++it, ++i) {
Arg *A = *it;
llvm::errs() << "Option " << i << " - "
- << "Name: \"" << A->getOption().getName() << "\", "
+ << "Name: \"" << A->getOption().getPrefixedName() << "\", "
<< "Values: {";
for (unsigned j = 0; j < A->getNumValues(); ++j) {
if (j)
llvm::errs() << ", ";
- llvm::errs() << '"' << A->getValue(Args, j) << '"';
+ llvm::errs() << '"' << A->getValue(j) << '"';
}
llvm::errs() << "}\n";
}
@@ -652,7 +564,9 @@ void Driver::PrintOptions(const ArgList &Args) const {
void Driver::PrintHelp(bool ShowHidden) const {
getOpts().PrintHelp(llvm::outs(), Name.c_str(), DriverTitle.c_str(),
- ShowHidden);
+ /*Include*/0,
+ /*Exclude*/options::NoDriverOption |
+ (ShowHidden ? 0 : options::HelpHidden));
}
void Driver::PrintVersion(const Compilation &C, raw_ostream &OS) const {
@@ -750,12 +664,12 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
// FIXME: The following handlers should use a callback mechanism, we don't
// know what the client would like to do.
if (Arg *A = C.getArgs().getLastArg(options::OPT_print_file_name_EQ)) {
- llvm::outs() << GetFilePath(A->getValue(C.getArgs()), TC) << "\n";
+ llvm::outs() << GetFilePath(A->getValue(), TC) << "\n";
return false;
}
if (Arg *A = C.getArgs().getLastArg(options::OPT_print_prog_name_EQ)) {
- llvm::outs() << GetProgramPath(A->getValue(C.getArgs()), TC) << "\n";
+ llvm::outs() << GetProgramPath(A->getValue(), TC) << "\n";
return false;
}
@@ -818,7 +732,7 @@ static unsigned PrintActions1(const Compilation &C, Action *A,
os << Action::getClassName(A->getKind()) << ", ";
if (InputAction *IA = dyn_cast<InputAction>(A)) {
- os << "\"" << IA->getInputArg().getValue(C.getArgs()) << "\"";
+ os << "\"" << IA->getInputArg().getValue() << "\"";
} else if (BindArchAction *BIA = dyn_cast<BindArchAction>(A)) {
os << '"' << BIA->getArchName() << '"'
<< ", {" << PrintActions1(C, *BIA->begin(), Ids) << "}";
@@ -878,7 +792,7 @@ void Driver::BuildUniversalActions(const ToolChain &TC,
// Validate the option here; we don't save the type here because its
// particular spelling may participate in other driver choices.
llvm::Triple::ArchType Arch =
- llvm::Triple::getArchTypeForDarwinArchName(A->getValue(Args));
+ tools::darwin::getArchTypeForDarwinArchName(A->getValue());
if (Arch == llvm::Triple::UnknownArch) {
Diag(clang::diag::err_drv_invalid_arch_name)
<< A->getAsString(Args);
@@ -886,15 +800,15 @@ void Driver::BuildUniversalActions(const ToolChain &TC,
}
A->claim();
- if (ArchNames.insert(A->getValue(Args)))
- Archs.push_back(A->getValue(Args));
+ if (ArchNames.insert(A->getValue()))
+ Archs.push_back(A->getValue());
}
}
// When there is no explicit arch for this platform, make sure we still bind
// the architecture (to the default) so that -Xarch_ is handled correctly.
if (!Archs.size())
- Archs.push_back(Args.MakeArgString(TC.getArchName()));
+ Archs.push_back(Args.MakeArgString(TC.getDefaultUniversalArchName()));
// FIXME: We killed off some others but these aren't yet detected in a
// functional manner. If we added information to jobs about which "auxiliary"
@@ -981,8 +895,8 @@ void Driver::BuildInputs(const ToolChain &TC, const DerivedArgList &Args,
it != ie; ++it) {
Arg *A = *it;
- if (isa<InputOption>(A->getOption())) {
- const char *Value = A->getValue(Args);
+ if (A->getOption().getKind() == Option::InputClass) {
+ const char *Value = A->getValue();
types::ID Ty = types::TY_INVALID;
// Infer the input type if necessary.
@@ -1049,8 +963,8 @@ void Driver::BuildInputs(const ToolChain &TC, const DerivedArgList &Args,
if (CheckInputsExist && memcmp(Value, "-", 2) != 0) {
SmallString<64> Path(Value);
if (Arg *WorkDir = Args.getLastArg(options::OPT_working_directory)) {
- SmallString<64> Directory(WorkDir->getValue(Args));
- if (llvm::sys::path::is_absolute(Directory.str())) {
+ if (!llvm::sys::path::is_absolute(Path.str())) {
+ SmallString<64> Directory(WorkDir->getValue());
llvm::sys::path::append(Directory, Value);
Path.assign(Directory);
}
@@ -1064,21 +978,21 @@ void Driver::BuildInputs(const ToolChain &TC, const DerivedArgList &Args,
} else
Inputs.push_back(std::make_pair(Ty, A));
- } else if (A->getOption().isLinkerInput()) {
+ } else if (A->getOption().hasFlag(options::LinkerInput)) {
// Just treat as object type, we could make a special type for this if
// necessary.
Inputs.push_back(std::make_pair(types::TY_Object, A));
} else if (A->getOption().matches(options::OPT_x)) {
InputTypeArg = A;
- InputType = types::lookupTypeForTypeSpecifier(A->getValue(Args));
+ InputType = types::lookupTypeForTypeSpecifier(A->getValue());
A->claim();
// Follow gcc behavior and treat as linker input for invalid -x
// options. Its not clear why we shouldn't just revert to unknown; but
// this isn't very important, we might as well be bug compatible.
if (!InputType) {
- Diag(clang::diag::err_drv_unknown_language) << A->getValue(Args);
+ Diag(clang::diag::err_drv_unknown_language) << A->getValue();
InputType = types::TY_Object;
}
}
@@ -1301,7 +1215,7 @@ void Driver::BuildJobs(Compilation &C) const {
const char *LinkingOutput = 0;
if (isa<LipoJobAction>(A)) {
if (FinalOutput)
- LinkingOutput = FinalOutput->getValue(C.getArgs());
+ LinkingOutput = FinalOutput->getValue();
else
LinkingOutput = DefaultImageName.c_str();
}
@@ -1331,13 +1245,13 @@ void Driver::BuildJobs(Compilation &C) const {
// DiagnosticsEngine, so that extra values, position, and so on could be
// printed.
if (!A->isClaimed()) {
- if (A->getOption().hasNoArgumentUnused())
+ if (A->getOption().hasFlag(options::NoArgumentUnused))
continue;
// Suppress the warning automatically if this is just a flag, and it is an
// instance of an argument we already claimed.
const Option &Opt = A->getOption();
- if (isa<FlagOption>(Opt)) {
+ if (Opt.getKind() == Option::FlagClass) {
bool DuplicateClaimed = false;
for (arg_iterator it = C.getArgs().filtered_begin(&Opt),
@@ -1392,6 +1306,7 @@ static const Tool &SelectToolForJob(Compilation &C, const ToolChain *TC,
!C.getArgs().hasArg(options::OPT_no_integrated_cpp) &&
!C.getArgs().hasArg(options::OPT_traditional_cpp) &&
!C.getArgs().hasArg(options::OPT_save_temps) &&
+ !C.getArgs().hasArg(options::OPT_rewrite_objc) &&
ToolForJob->hasIntegratedCPP())
Inputs = &(*Inputs)[0]->getInputs();
@@ -1413,7 +1328,7 @@ void Driver::BuildJobsForAction(Compilation &C,
const Arg &Input = IA->getInputArg();
Input.claim();
if (Input.getOption().matches(options::OPT_INPUT)) {
- const char *Name = Input.getValue(C.getArgs());
+ const char *Name = Input.getValue();
Result = InputInfo(Name, A->getType(), Name);
} else
Result = InputInfo(&Input, A->getType(), "");
@@ -1502,7 +1417,7 @@ const char *Driver::GetNamedOutputPath(Compilation &C,
if (AtTopLevel && !isa<DsymutilJobAction>(JA) &&
!isa<VerifyJobAction>(JA)) {
if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o))
- return C.addResultFile(FinalOutput->getValue(C.getArgs()));
+ return C.addResultFile(FinalOutput->getValue());
}
// Default to writing to stdout?
@@ -1580,7 +1495,7 @@ const char *Driver::GetNamedOutputPath(Compilation &C,
std::string Driver::GetFilePath(const char *Name, const ToolChain &TC) const {
// Respect a limited subset of the '-Bprefix' functionality in GCC by
- // attempting to use this prefix when lokup up program paths.
+ // attempting to use this prefix when looking for file paths.
for (Driver::prefix_list::const_iterator it = PrefixDirs.begin(),
ie = PrefixDirs.end(); it != ie; ++it) {
std::string Dir(*it);
@@ -1619,26 +1534,26 @@ std::string Driver::GetFilePath(const char *Name, const ToolChain &TC) const {
return Name;
}
-static bool isPathExecutable(llvm::sys::Path &P, bool WantFile) {
- bool Exists;
- return (WantFile ? !llvm::sys::fs::exists(P.str(), Exists) && Exists
- : P.canExecute());
-}
-
-std::string Driver::GetProgramPath(const char *Name, const ToolChain &TC,
- bool WantFile) const {
+std::string Driver::GetProgramPath(const char *Name,
+ const ToolChain &TC) const {
// FIXME: Needs a better variable than DefaultTargetTriple
std::string TargetSpecificExecutable(DefaultTargetTriple + "-" + Name);
// Respect a limited subset of the '-Bprefix' functionality in GCC by
- // attempting to use this prefix when lokup up program paths.
+ // attempting to use this prefix when looking for program paths.
for (Driver::prefix_list::const_iterator it = PrefixDirs.begin(),
ie = PrefixDirs.end(); it != ie; ++it) {
- llvm::sys::Path P(*it);
- P.appendComponent(TargetSpecificExecutable);
- if (isPathExecutable(P, WantFile)) return P.str();
- P.eraseComponent();
- P.appendComponent(Name);
- if (isPathExecutable(P, WantFile)) return P.str();
+ bool IsDirectory;
+ if (!llvm::sys::fs::is_directory(*it, IsDirectory) && IsDirectory) {
+ llvm::sys::Path P(*it);
+ P.appendComponent(TargetSpecificExecutable);
+ if (P.canExecute()) return P.str();
+ P.eraseComponent();
+ P.appendComponent(Name);
+ if (P.canExecute()) return P.str();
+ } else {
+ llvm::sys::Path P(*it + Name);
+ if (P.canExecute()) return P.str();
+ }
}
const ToolChain::path_list &List = TC.getProgramPaths();
@@ -1646,10 +1561,10 @@ std::string Driver::GetProgramPath(const char *Name, const ToolChain &TC,
it = List.begin(), ie = List.end(); it != ie; ++it) {
llvm::sys::Path P(*it);
P.appendComponent(TargetSpecificExecutable);
- if (isPathExecutable(P, WantFile)) return P.str();
+ if (P.canExecute()) return P.str();
P.eraseComponent();
P.appendComponent(Name);
- if (isPathExecutable(P, WantFile)) return P.str();
+ if (P.canExecute()) return P.str();
}
// If all else failed, search the path.
@@ -1701,7 +1616,7 @@ static llvm::Triple computeTargetTriple(StringRef DefaultTargetTriple,
StringRef DarwinArchName) {
// FIXME: Already done in Compilation *Driver::BuildCompilation
if (const Arg *A = Args.getLastArg(options::OPT_target))
- DefaultTargetTriple = A->getValue(Args);
+ DefaultTargetTriple = A->getValue();
llvm::Triple Target(llvm::Triple::normalize(DefaultTargetTriple));
@@ -1710,14 +1625,14 @@ static llvm::Triple computeTargetTriple(StringRef DefaultTargetTriple,
// If an explict Darwin arch name is given, that trumps all.
if (!DarwinArchName.empty()) {
Target.setArch(
- llvm::Triple::getArchTypeForDarwinArchName(DarwinArchName));
+ tools::darwin::getArchTypeForDarwinArchName(DarwinArchName));
return Target;
}
// Handle the Darwin '-arch' flag.
if (Arg *A = Args.getLastArg(options::OPT_arch)) {
llvm::Triple::ArchType DarwinArch
- = llvm::Triple::getArchTypeForDarwinArchName(A->getValue(Args));
+ = tools::darwin::getArchTypeForDarwinArchName(A->getValue());
if (DarwinArch != llvm::Triple::UnknownArch)
Target.setArch(DarwinArch);
}
@@ -1820,37 +1735,14 @@ bool Driver::ShouldUseClangCompiler(const Compilation &C, const JobAction &JA,
const llvm::Triple &Triple) const {
// Check if user requested no clang, or clang doesn't understand this type (we
// only handle single inputs for now).
- if (!CCCUseClang || JA.size() != 1 ||
+ if (JA.size() != 1 ||
!types::isAcceptedByClang((*JA.begin())->getType()))
return false;
// Otherwise make sure this is an action clang understands.
- if (isa<PreprocessJobAction>(JA)) {
- if (!CCCUseClangCPP) {
- Diag(clang::diag::warn_drv_not_using_clang_cpp);
- return false;
- }
- } else if (!isa<PrecompileJobAction>(JA) && !isa<CompileJobAction>(JA))
- return false;
-
- // Use clang for C++?
- if (!CCCUseClangCXX && types::isCXX((*JA.begin())->getType())) {
- Diag(clang::diag::warn_drv_not_using_clang_cxx);
+ if (!isa<PreprocessJobAction>(JA) && !isa<PrecompileJobAction>(JA) &&
+ !isa<CompileJobAction>(JA))
return false;
- }
-
- // Always use clang for precompiling, AST generation, and rewriting,
- // regardless of archs.
- if (isa<PrecompileJobAction>(JA) ||
- types::isOnlyAcceptedByClang(JA.getType()))
- return true;
-
- // Finally, don't use clang if this isn't one of the user specified archs to
- // build.
- if (!CCCClangArchs.empty() && !CCCClangArchs.count(Triple.getArch())) {
- Diag(clang::diag::warn_drv_not_using_clang_arch) << Triple.getArchName();
- return false;
- }
return true;
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp b/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp
index 715819d..3925b8a 100644
--- a/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp
@@ -14,11 +14,19 @@
using namespace clang::driver;
using namespace clang::driver::options;
+#define PREFIX(NAME, VALUE) const char *const NAME[] = VALUE;
+#define OPTION(PREFIX, NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR)
+#include "clang/Driver/Options.inc"
+#undef OPTION
+#undef PREFIX
+
static const OptTable::Info InfoTable[] = {
-#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
+#define PREFIX(NAME, VALUE)
+#define OPTION(PREFIX, NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
HELPTEXT, METAVAR) \
- { NAME, HELPTEXT, METAVAR, Option::KIND##Class, PARAM, FLAGS, \
- OPT_##GROUP, OPT_##ALIAS },
+ { PREFIX, NAME, HELPTEXT, METAVAR, OPT_##ID, Option::KIND##Class, PARAM, \
+ FLAGS, OPT_##GROUP, OPT_##ALIAS },
#include "clang/Driver/Options.inc"
};
diff --git a/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp b/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp
index a3e38b2..6e7b695 100644
--- a/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp
@@ -11,6 +11,7 @@
#include "clang/Driver/Arg.h"
#include "clang/Driver/ArgList.h"
#include "clang/Driver/Option.h"
+#include "clang/Driver/Options.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
@@ -54,6 +55,13 @@ static inline bool operator<(const OptTable::Info &A, const OptTable::Info &B) {
if (int N = StrCmpOptionName(A.Name, B.Name))
return N == -1;
+ for (const char * const *APre = A.Prefixes,
+ * const *BPre = B.Prefixes;
+ *APre != 0 && *BPre != 0; ++APre, ++BPre) {
+ if (int N = StrCmpOptionName(*APre, *BPre))
+ return N == -1;
+ }
+
// Names are the same, check that classes are in order; exactly one
// should be joined, and it should succeed the other.
assert(((A.Kind == Option::JoinedClass) ^ (B.Kind == Option::JoinedClass)) &&
@@ -78,23 +86,24 @@ OptSpecifier::OptSpecifier(const Option *Opt) : ID(Opt->getID()) {}
//
OptTable::OptTable(const Info *_OptionInfos, unsigned _NumOptionInfos)
- : OptionInfos(_OptionInfos), NumOptionInfos(_NumOptionInfos),
- Options(new Option*[NumOptionInfos]),
- TheInputOption(0), TheUnknownOption(0), FirstSearchableIndex(0)
+ : OptionInfos(_OptionInfos),
+ NumOptionInfos(_NumOptionInfos),
+ TheInputOptionID(0),
+ TheUnknownOptionID(0),
+ FirstSearchableIndex(0)
{
// Explicitly zero initialize the error to work around a bug in array
// value-initialization on MinGW with gcc 4.3.5.
- memset(Options, 0, sizeof(*Options) * NumOptionInfos);
// Find start of normal options.
for (unsigned i = 0, e = getNumOptions(); i != e; ++i) {
unsigned Kind = getInfo(i + 1).Kind;
if (Kind == Option::InputClass) {
- assert(!TheInputOption && "Cannot have multiple input options!");
- TheInputOption = getOption(i + 1);
+ assert(!TheInputOptionID && "Cannot have multiple input options!");
+ TheInputOptionID = getInfo(i + 1).ID;
} else if (Kind == Option::UnknownClass) {
- assert(!TheUnknownOption && "Cannot have multiple input options!");
- TheUnknownOption = getOption(i + 1);
+ assert(!TheUnknownOptionID && "Cannot have multiple unknown options!");
+ TheUnknownOptionID = getInfo(i + 1).ID;
} else if (Kind != Option::GroupClass) {
FirstSearchableIndex = i;
break;
@@ -115,91 +124,80 @@ OptTable::OptTable(const Info *_OptionInfos, unsigned _NumOptionInfos)
// Check that options are in order.
for (unsigned i = FirstSearchableIndex+1, e = getNumOptions(); i != e; ++i) {
if (!(getInfo(i) < getInfo(i + 1))) {
- getOption(i)->dump();
- getOption(i + 1)->dump();
+ getOption(i).dump();
+ getOption(i + 1).dump();
llvm_unreachable("Options are not in order!");
}
}
#endif
+
+ // Build prefixes.
+ for (unsigned i = FirstSearchableIndex+1, e = getNumOptions(); i != e; ++i) {
+ if (const char *const *P = getInfo(i).Prefixes) {
+ for (; *P != 0; ++P) {
+ PrefixesUnion.insert(*P);
+ }
+ }
+ }
+
+ // Build prefix chars.
+ for (llvm::StringSet<>::const_iterator I = PrefixesUnion.begin(),
+ E = PrefixesUnion.end(); I != E; ++I) {
+ StringRef Prefix = I->getKey();
+ for (StringRef::const_iterator C = Prefix.begin(), CE = Prefix.end();
+ C != CE; ++C)
+ if (std::find(PrefixChars.begin(), PrefixChars.end(), *C)
+ == PrefixChars.end())
+ PrefixChars.push_back(*C);
+ }
}
OptTable::~OptTable() {
- for (unsigned i = 0, e = getNumOptions(); i != e; ++i)
- delete Options[i];
- delete[] Options;
}
-Option *OptTable::CreateOption(unsigned id) const {
- const Info &info = getInfo(id);
- const OptionGroup *Group =
- cast_or_null<OptionGroup>(getOption(info.GroupID));
- const Option *Alias = getOption(info.AliasID);
-
- Option *Opt = 0;
- switch (info.Kind) {
- case Option::InputClass:
- Opt = new InputOption(id); break;
- case Option::UnknownClass:
- Opt = new UnknownOption(id); break;
- case Option::GroupClass:
- Opt = new OptionGroup(id, info.Name, Group); break;
- case Option::FlagClass:
- Opt = new FlagOption(id, info.Name, Group, Alias); break;
- case Option::JoinedClass:
- Opt = new JoinedOption(id, info.Name, Group, Alias); break;
- case Option::SeparateClass:
- Opt = new SeparateOption(id, info.Name, Group, Alias); break;
- case Option::CommaJoinedClass:
- Opt = new CommaJoinedOption(id, info.Name, Group, Alias); break;
- case Option::MultiArgClass:
- Opt = new MultiArgOption(id, info.Name, Group, Alias, info.Param); break;
- case Option::JoinedOrSeparateClass:
- Opt = new JoinedOrSeparateOption(id, info.Name, Group, Alias); break;
- case Option::JoinedAndSeparateClass:
- Opt = new JoinedAndSeparateOption(id, info.Name, Group, Alias); break;
- }
+const Option OptTable::getOption(OptSpecifier Opt) const {
+ unsigned id = Opt.getID();
+ if (id == 0)
+ return Option(0, 0);
+ assert((unsigned) (id - 1) < getNumOptions() && "Invalid ID.");
+ return Option(&getInfo(id), this);
+}
- if (info.Flags & DriverOption)
- Opt->setDriverOption(true);
- if (info.Flags & LinkerInput)
- Opt->setLinkerInput(true);
- if (info.Flags & NoArgumentUnused)
- Opt->setNoArgumentUnused(true);
- if (info.Flags & NoForward)
- Opt->setNoForward(true);
- if (info.Flags & RenderAsInput)
- Opt->setNoOptAsInput(true);
- if (info.Flags & RenderJoined) {
- assert((info.Kind == Option::JoinedOrSeparateClass ||
- info.Kind == Option::SeparateClass) && "Invalid option.");
- Opt->setRenderStyle(Option::RenderJoinedStyle);
- }
- if (info.Flags & RenderSeparate) {
- assert((info.Kind == Option::JoinedOrSeparateClass ||
- info.Kind == Option::JoinedClass) && "Invalid option.");
- Opt->setRenderStyle(Option::RenderSeparateStyle);
- }
- if (info.Flags & Unsupported)
- Opt->setUnsupported(true);
- if (info.Flags & CC1Option)
- Opt->setIsCC1Option(true);
+static bool isInput(const llvm::StringSet<> &Prefixes, StringRef Arg) {
+ if (Arg == "-")
+ return true;
+ for (llvm::StringSet<>::const_iterator I = Prefixes.begin(),
+ E = Prefixes.end(); I != E; ++I)
+ if (Arg.startswith(I->getKey()))
+ return false;
+ return true;
+}
- return Opt;
+/// \returns Matched size. 0 means no match.
+static unsigned matchOption(const OptTable::Info *I, StringRef Str) {
+ for (const char * const *Pre = I->Prefixes; *Pre != 0; ++Pre) {
+ StringRef Prefix(*Pre);
+ if (Str.startswith(Prefix) && Str.substr(Prefix.size()).startswith(I->Name))
+ return Prefix.size() + StringRef(I->Name).size();
+ }
+ return 0;
}
Arg *OptTable::ParseOneArg(const ArgList &Args, unsigned &Index) const {
unsigned Prev = Index;
const char *Str = Args.getArgString(Index);
- // Anything that doesn't start with '-' is an input, as is '-' itself.
- if (Str[0] != '-' || Str[1] == '\0')
- return new Arg(TheInputOption, Index++, Str);
+ // Anything that doesn't start with PrefixesUnion is an input, as is '-'
+ // itself.
+ if (isInput(PrefixesUnion, Str))
+ return new Arg(getOption(TheInputOptionID), Str, Index++, Str);
const Info *Start = OptionInfos + FirstSearchableIndex;
const Info *End = OptionInfos + getNumOptions();
+ StringRef Name = StringRef(Str).ltrim(PrefixChars);
// Search for the first next option which could be a prefix.
- Start = std::lower_bound(Start, End, Str);
+ Start = std::lower_bound(Start, End, Name.data());
// Options are stored in sorted order, with '\0' at the end of the
// alphabet. Since the only options which can accept a string must
@@ -210,15 +208,16 @@ Arg *OptTable::ParseOneArg(const ArgList &Args, unsigned &Index) const {
// blanking on the simplest way to make it fast. We can solve this
// problem when we move to TableGen.
for (; Start != End; ++Start) {
+ unsigned ArgSize = 0;
// Scan for first option which is a proper prefix.
for (; Start != End; ++Start)
- if (memcmp(Str, Start->Name, strlen(Start->Name)) == 0)
+ if ((ArgSize = matchOption(Start, Str)))
break;
if (Start == End)
break;
// See if this option matches.
- if (Arg *A = getOption(Start - OptionInfos + 1)->accept(Args, Index))
+ if (Arg *A = Option(Start, this).accept(Args, Index, ArgSize))
return A;
// Otherwise, see if this argument was missing values.
@@ -226,7 +225,7 @@ Arg *OptTable::ParseOneArg(const ArgList &Args, unsigned &Index) const {
return 0;
}
- return new Arg(TheUnknownOption, Index++, Str);
+ return new Arg(getOption(TheUnknownOptionID), Str, Index++, Str);
}
InputArgList *OptTable::ParseArgs(const char* const *ArgBegin,
@@ -266,10 +265,11 @@ InputArgList *OptTable::ParseArgs(const char* const *ArgBegin,
}
static std::string getOptionHelpName(const OptTable &Opts, OptSpecifier Id) {
- std::string Name = Opts.getOptionName(Id);
+ const Option O = Opts.getOption(Id);
+ std::string Name = O.getPrefixedName();
// Add metavar, if used.
- switch (Opts.getOptionKind(Id)) {
+ switch (O.getKind()) {
case Option::GroupClass: case Option::InputClass: case Option::UnknownClass:
llvm_unreachable("Invalid option with help text.");
@@ -346,7 +346,8 @@ static const char *getOptionHelpGroup(const OptTable &Opts, OptSpecifier Id) {
}
void OptTable::PrintHelp(raw_ostream &OS, const char *Name,
- const char *Title, bool ShowHidden) const {
+ const char *Title, unsigned short FlagsToInclude,
+ unsigned short FlagsToExclude) const {
OS << "OVERVIEW: " << Title << "\n";
OS << '\n';
OS << "USAGE: " << Name << " [options] <inputs>\n";
@@ -365,7 +366,8 @@ void OptTable::PrintHelp(raw_ostream &OS, const char *Name,
if (getOptionKind(Id) == Option::GroupClass)
continue;
- if (!ShowHidden && isOptionHelpHidden(Id))
+ if ((FlagsToInclude && !(getInfo(Id).Flags & FlagsToInclude)) ||
+ getInfo(Id).Flags & FlagsToExclude)
continue;
if (const char *Text = getOptionHelpText(Id)) {
diff --git a/contrib/llvm/tools/clang/lib/Driver/Option.cpp b/contrib/llvm/tools/clang/lib/Driver/Option.cpp
index 03360ea..9a34df5 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Option.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Option.cpp
@@ -13,46 +13,20 @@
#include "clang/Driver/ArgList.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/Twine.h"
#include <cassert>
#include <algorithm>
using namespace clang::driver;
-Option::Option(OptionClass _Kind, OptSpecifier _ID, const char *_Name,
- const OptionGroup *_Group, const Option *_Alias)
- : Kind(_Kind), ID(_ID.getID()), Name(_Name), Group(_Group), Alias(_Alias),
- Unsupported(false), LinkerInput(false), NoOptAsInput(false),
- DriverOption(false), NoArgumentUnused(false), NoForward(false) {
+Option::Option(const OptTable::Info *info, const OptTable *owner)
+ : Info(info), Owner(owner) {
// Multi-level aliases are not supported, and alias options cannot
// have groups. This just simplifies option tracking, it is not an
// inherent limitation.
- assert((!Alias || (!Alias->Alias && !Group)) &&
+ assert((!Info || !getAlias().isValid() || (!getAlias().getAlias().isValid() &&
+ !getGroup().isValid())) &&
"Multi-level aliases and aliases with groups are unsupported.");
-
- // Initialize rendering options based on the class.
- switch (Kind) {
- case GroupClass:
- case InputClass:
- case UnknownClass:
- RenderStyle = RenderValuesStyle;
- break;
-
- case JoinedClass:
- case JoinedAndSeparateClass:
- RenderStyle = RenderJoinedStyle;
- break;
-
- case CommaJoinedClass:
- RenderStyle = RenderCommaJoinedStyle;
- break;
-
- case FlagClass:
- case SeparateClass:
- case MultiArgClass:
- case JoinedOrSeparateClass:
- RenderStyle = RenderSeparateStyle;
- break;
- }
}
Option::~Option() {
@@ -60,7 +34,7 @@ Option::~Option() {
void Option::dump() const {
llvm::errs() << "<";
- switch (Kind) {
+ switch (getKind()) {
#define P(N) case N: llvm::errs() << #N; break
P(GroupClass);
P(InputClass);
@@ -75,206 +49,153 @@ void Option::dump() const {
#undef P
}
- llvm::errs() << " Name:\"" << Name << '"';
+ llvm::errs() << " Prefixes:[";
+ for (const char * const *Pre = Info->Prefixes; *Pre != 0; ++Pre) {
+ llvm::errs() << '"' << *Pre << (*(Pre + 1) == 0 ? "\"" : "\", ");
+ }
+ llvm::errs() << ']';
- if (Group) {
+ llvm::errs() << " Name:\"" << getName() << '"';
+
+ const Option Group = getGroup();
+ if (Group.isValid()) {
llvm::errs() << " Group:";
- Group->dump();
+ Group.dump();
}
- if (Alias) {
+ const Option Alias = getAlias();
+ if (Alias.isValid()) {
llvm::errs() << " Alias:";
- Alias->dump();
+ Alias.dump();
}
- if (const MultiArgOption *MOA = dyn_cast<MultiArgOption>(this))
- llvm::errs() << " NumArgs:" << MOA->getNumArgs();
+ if (getKind() == MultiArgClass)
+ llvm::errs() << " NumArgs:" << getNumArgs();
llvm::errs() << ">\n";
}
bool Option::matches(OptSpecifier Opt) const {
// Aliases are never considered in matching, look through them.
- if (Alias)
- return Alias->matches(Opt);
+ const Option Alias = getAlias();
+ if (Alias.isValid())
+ return Alias.matches(Opt);
// Check exact match.
- if (ID == Opt)
+ if (getID() == Opt.getID())
return true;
- if (Group)
- return Group->matches(Opt);
+ const Option Group = getGroup();
+ if (Group.isValid())
+ return Group.matches(Opt);
return false;
}
-OptionGroup::OptionGroup(OptSpecifier ID, const char *Name,
- const OptionGroup *Group)
- : Option(Option::GroupClass, ID, Name, Group, 0) {
-}
-
-Arg *OptionGroup::accept(const ArgList &Args, unsigned &Index) const {
- llvm_unreachable("accept() should never be called on an OptionGroup");
-}
-
-InputOption::InputOption(OptSpecifier ID)
- : Option(Option::InputClass, ID, "<input>", 0, 0) {
-}
-
-Arg *InputOption::accept(const ArgList &Args, unsigned &Index) const {
- llvm_unreachable("accept() should never be called on an InputOption");
-}
-
-UnknownOption::UnknownOption(OptSpecifier ID)
- : Option(Option::UnknownClass, ID, "<unknown>", 0, 0) {
-}
-
-Arg *UnknownOption::accept(const ArgList &Args, unsigned &Index) const {
- llvm_unreachable("accept() should never be called on an UnknownOption");
-}
-
-FlagOption::FlagOption(OptSpecifier ID, const char *Name,
- const OptionGroup *Group, const Option *Alias)
- : Option(Option::FlagClass, ID, Name, Group, Alias) {
-}
-
-Arg *FlagOption::accept(const ArgList &Args, unsigned &Index) const {
- // Matches iff this is an exact match.
- // FIXME: Avoid strlen.
- if (getName().size() != strlen(Args.getArgString(Index)))
- return 0;
-
- return new Arg(getUnaliasedOption(), Index++);
-}
-
-JoinedOption::JoinedOption(OptSpecifier ID, const char *Name,
- const OptionGroup *Group, const Option *Alias)
- : Option(Option::JoinedClass, ID, Name, Group, Alias) {
-}
-
-Arg *JoinedOption::accept(const ArgList &Args, unsigned &Index) const {
- // Always matches.
- const char *Value = Args.getArgString(Index) + getName().size();
- return new Arg(getUnaliasedOption(), Index++, Value);
-}
-
-CommaJoinedOption::CommaJoinedOption(OptSpecifier ID, const char *Name,
- const OptionGroup *Group,
- const Option *Alias)
- : Option(Option::CommaJoinedClass, ID, Name, Group, Alias) {
-}
-
-Arg *CommaJoinedOption::accept(const ArgList &Args,
- unsigned &Index) const {
- // Always matches.
- const char *Str = Args.getArgString(Index) + getName().size();
- Arg *A = new Arg(getUnaliasedOption(), Index++);
+Arg *Option::accept(const ArgList &Args,
+ unsigned &Index,
+ unsigned ArgSize) const {
+ const Option &UnaliasedOption = getUnaliasedOption();
+ StringRef Spelling;
+ // If the option was an alias, get the spelling from the unaliased one.
+ if (getID() == UnaliasedOption.getID()) {
+ Spelling = StringRef(Args.getArgString(Index), ArgSize);
+ } else {
+ Spelling = Args.MakeArgString(Twine(UnaliasedOption.getPrefix()) +
+ Twine(UnaliasedOption.getName()));
+ }
- // Parse out the comma separated values.
- const char *Prev = Str;
- for (;; ++Str) {
- char c = *Str;
+ switch (getKind()) {
+ case FlagClass:
+ if (ArgSize != strlen(Args.getArgString(Index)))
+ return 0;
- if (!c || c == ',') {
- if (Prev != Str) {
- char *Value = new char[Str - Prev + 1];
- memcpy(Value, Prev, Str - Prev);
- Value[Str - Prev] = '\0';
- A->getValues().push_back(Value);
+ return new Arg(UnaliasedOption, Spelling, Index++);
+ case JoinedClass: {
+ const char *Value = Args.getArgString(Index) + ArgSize;
+ return new Arg(UnaliasedOption, Spelling, Index++, Value);
+ }
+ case CommaJoinedClass: {
+ // Always matches.
+ const char *Str = Args.getArgString(Index) + ArgSize;
+ Arg *A = new Arg(UnaliasedOption, Spelling, Index++);
+
+ // Parse out the comma separated values.
+ const char *Prev = Str;
+ for (;; ++Str) {
+ char c = *Str;
+
+ if (!c || c == ',') {
+ if (Prev != Str) {
+ char *Value = new char[Str - Prev + 1];
+ memcpy(Value, Prev, Str - Prev);
+ Value[Str - Prev] = '\0';
+ A->getValues().push_back(Value);
+ }
+
+ if (!c)
+ break;
+
+ Prev = Str + 1;
}
-
- if (!c)
- break;
-
- Prev = Str + 1;
}
- }
- A->setOwnsValues(true);
-
- return A;
-}
-
-SeparateOption::SeparateOption(OptSpecifier ID, const char *Name,
- const OptionGroup *Group, const Option *Alias)
- : Option(Option::SeparateClass, ID, Name, Group, Alias) {
-}
-
-Arg *SeparateOption::accept(const ArgList &Args, unsigned &Index) const {
- // Matches iff this is an exact match.
- // FIXME: Avoid strlen.
- if (getName().size() != strlen(Args.getArgString(Index)))
- return 0;
-
- Index += 2;
- if (Index > Args.getNumInputArgStrings())
- return 0;
-
- return new Arg(getUnaliasedOption(), Index - 2, Args.getArgString(Index - 1));
-}
-
-MultiArgOption::MultiArgOption(OptSpecifier ID, const char *Name,
- const OptionGroup *Group, const Option *Alias,
- unsigned _NumArgs)
- : Option(Option::MultiArgClass, ID, Name, Group, Alias), NumArgs(_NumArgs) {
- assert(NumArgs > 1 && "Invalid MultiArgOption!");
-}
-
-Arg *MultiArgOption::accept(const ArgList &Args, unsigned &Index) const {
- // Matches iff this is an exact match.
- // FIXME: Avoid strlen.
- if (getName().size() != strlen(Args.getArgString(Index)))
- return 0;
-
- Index += 1 + NumArgs;
- if (Index > Args.getNumInputArgStrings())
- return 0;
+ A->setOwnsValues(true);
- Arg *A = new Arg(getUnaliasedOption(), Index - 1 - NumArgs,
- Args.getArgString(Index - NumArgs));
- for (unsigned i = 1; i != NumArgs; ++i)
- A->getValues().push_back(Args.getArgString(Index - NumArgs + i));
- return A;
-}
-
-JoinedOrSeparateOption::JoinedOrSeparateOption(OptSpecifier ID,
- const char *Name,
- const OptionGroup *Group,
- const Option *Alias)
- : Option(Option::JoinedOrSeparateClass, ID, Name, Group, Alias) {
-}
-
-Arg *JoinedOrSeparateOption::accept(const ArgList &Args,
- unsigned &Index) const {
- // If this is not an exact match, it is a joined arg.
- // FIXME: Avoid strlen.
- if (getName().size() != strlen(Args.getArgString(Index))) {
- const char *Value = Args.getArgString(Index) + getName().size();
- return new Arg(this, Index++, Value);
+ return A;
}
+ case SeparateClass:
+ // Matches iff this is an exact match.
+ // FIXME: Avoid strlen.
+ if (ArgSize != strlen(Args.getArgString(Index)))
+ return 0;
+
+ Index += 2;
+ if (Index > Args.getNumInputArgStrings())
+ return 0;
+
+ return new Arg(UnaliasedOption, Spelling,
+ Index - 2, Args.getArgString(Index - 1));
+ case MultiArgClass: {
+ // Matches iff this is an exact match.
+ // FIXME: Avoid strlen.
+ if (ArgSize != strlen(Args.getArgString(Index)))
+ return 0;
+
+ Index += 1 + getNumArgs();
+ if (Index > Args.getNumInputArgStrings())
+ return 0;
+
+ Arg *A = new Arg(UnaliasedOption, Spelling, Index - 1 - getNumArgs(),
+ Args.getArgString(Index - getNumArgs()));
+ for (unsigned i = 1; i != getNumArgs(); ++i)
+ A->getValues().push_back(Args.getArgString(Index - getNumArgs() + i));
+ return A;
+ }
+ case JoinedOrSeparateClass: {
+ // If this is not an exact match, it is a joined arg.
+ // FIXME: Avoid strlen.
+ if (ArgSize != strlen(Args.getArgString(Index))) {
+ const char *Value = Args.getArgString(Index) + ArgSize;
+ return new Arg(*this, Spelling, Index++, Value);
+ }
- // Otherwise it must be separate.
- Index += 2;
- if (Index > Args.getNumInputArgStrings())
- return 0;
-
- return new Arg(getUnaliasedOption(), Index - 2, Args.getArgString(Index - 1));
-}
-
-JoinedAndSeparateOption::JoinedAndSeparateOption(OptSpecifier ID,
- const char *Name,
- const OptionGroup *Group,
- const Option *Alias)
- : Option(Option::JoinedAndSeparateClass, ID, Name, Group, Alias) {
-}
-
-Arg *JoinedAndSeparateOption::accept(const ArgList &Args,
- unsigned &Index) const {
- // Always matches.
-
- Index += 2;
- if (Index > Args.getNumInputArgStrings())
- return 0;
+ // Otherwise it must be separate.
+ Index += 2;
+ if (Index > Args.getNumInputArgStrings())
+ return 0;
- return new Arg(getUnaliasedOption(), Index - 2,
- Args.getArgString(Index-2)+getName().size(),
- Args.getArgString(Index-1));
+ return new Arg(UnaliasedOption, Spelling,
+ Index - 2, Args.getArgString(Index - 1));
+ }
+ case JoinedAndSeparateClass:
+ // Always matches.
+ Index += 2;
+ if (Index > Args.getNumInputArgStrings())
+ return 0;
+
+ return new Arg(UnaliasedOption, Spelling, Index - 2,
+ Args.getArgString(Index - 2) + ArgSize,
+ Args.getArgString(Index - 1));
+ default:
+ llvm_unreachable("Invalid option kind!");
+ }
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.h b/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.h
new file mode 100644
index 0000000..ecb396e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.h
@@ -0,0 +1,106 @@
+//===--- SanitizerArgs.h - Arguments for sanitizer tools -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef CLANG_LIB_DRIVER_SANITIZERARGS_H_
+#define CLANG_LIB_DRIVER_SANITIZERARGS_H_
+
+#include "clang/Driver/ArgList.h"
+
+namespace clang {
+namespace driver {
+
+class SanitizerArgs {
+ /// Assign ordinals to sanitizer flags. We'll use the ordinal values as
+ /// bit positions within \c Kind.
+ enum SanitizeOrdinal {
+#define SANITIZER(NAME, ID) SO_##ID,
+#include "clang/Basic/Sanitizers.def"
+ SO_Count
+ };
+
+ /// Bugs to catch at runtime.
+ enum SanitizeKind {
+#define SANITIZER(NAME, ID) ID = 1 << SO_##ID,
+#define SANITIZER_GROUP(NAME, ID, ALIAS) ID = ALIAS,
+#include "clang/Basic/Sanitizers.def"
+ NeedsAsanRt = Address,
+ NeedsTsanRt = Thread,
+ NeedsUbsanRt = Undefined
+ };
+ unsigned Kind;
+
+ public:
+ SanitizerArgs() : Kind(0) {}
+ /// Parses the sanitizer arguments from an argument list.
+ SanitizerArgs(const Driver &D, const ArgList &Args);
+
+ bool needsAsanRt() const { return Kind & NeedsAsanRt; }
+ bool needsTsanRt() const { return Kind & NeedsTsanRt; }
+ bool needsUbsanRt() const { return Kind & NeedsUbsanRt; }
+
+ bool sanitizesVptr() const { return Kind & Vptr; }
+
+ void addArgs(const ArgList &Args, ArgStringList &CmdArgs) const {
+ if (!Kind)
+ return;
+ llvm::SmallString<256> SanitizeOpt("-fsanitize=");
+#define SANITIZER(NAME, ID) \
+ if (Kind & ID) \
+ SanitizeOpt += NAME ",";
+#include "clang/Basic/Sanitizers.def"
+ SanitizeOpt.pop_back();
+ CmdArgs.push_back(Args.MakeArgString(SanitizeOpt));
+ }
+
+ private:
+ /// Parse a single value from a -fsanitize= or -fno-sanitize= value list.
+ /// Returns a member of the \c SanitizeKind enumeration, or \c 0 if \p Value
+ /// is not known.
+ static unsigned parse(const char *Value) {
+ return llvm::StringSwitch<SanitizeKind>(Value)
+#define SANITIZER(NAME, ID) .Case(NAME, ID)
+#define SANITIZER_GROUP(NAME, ID, ALIAS) .Case(NAME, ID)
+#include "clang/Basic/Sanitizers.def"
+ .Default(SanitizeKind());
+ }
+
+ /// Parse a -fsanitize= or -fno-sanitize= argument's values, diagnosing any
+ /// invalid components.
+ static unsigned parse(const Driver &D, const Arg *A) {
+ unsigned Kind = 0;
+ for (unsigned I = 0, N = A->getNumValues(); I != N; ++I) {
+ if (unsigned K = parse(A->getValue(I)))
+ Kind |= K;
+ else
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << A->getValue(I);
+ }
+ return Kind;
+ }
+
+ /// Produce an argument string from argument \p A, which shows how it provides
+ /// a value in \p Mask. For instance, the argument
+ /// "-fsanitize=address,alignment" with mask \c NeedsUbsanRt would produce
+ /// "-fsanitize=alignment".
+ static std::string describeSanitizeArg(const ArgList &Args, const Arg *A,
+ unsigned Mask) {
+ if (!A->getOption().matches(options::OPT_fsanitize_EQ))
+ return A->getAsString(Args);
+
+ for (unsigned I = 0, N = A->getNumValues(); I != N; ++I)
+ if (parse(A->getValue(I)) & Mask)
+ return std::string("-fsanitize=") + A->getValue(I);
+
+ llvm_unreachable("arg didn't provide expected value");
+ }
+};
+
+} // namespace driver
+} // namespace clang
+
+#endif // CLANG_LIB_DRIVER_SANITIZERARGS_H_
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
index 48ed044..de8ed1d 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
@@ -14,6 +14,7 @@
#include "clang/Driver/ArgList.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Option.h"
#include "clang/Driver/Options.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ErrorHandling.h"
@@ -32,13 +33,32 @@ const Driver &ToolChain::getDriver() const {
return D;
}
+std::string ToolChain::getDefaultUniversalArchName() const {
+ // In universal driver terms, the arch name accepted by -arch isn't exactly
+ // the same as the ones that appear in the triple. Roughly speaking, this is
+ // an inverse of the darwin::getArchTypeForDarwinArchName() function, but the
+ // only interesting special case is powerpc.
+ switch (Triple.getArch()) {
+ case llvm::Triple::ppc:
+ return "ppc";
+ case llvm::Triple::ppc64:
+ return "ppc64";
+ default:
+ return Triple.getArchName();
+ }
+}
+
+bool ToolChain::IsUnwindTablesDefault() const {
+ return false;
+}
+
std::string ToolChain::GetFilePath(const char *Name) const {
return D.GetFilePath(Name, *this);
}
-std::string ToolChain::GetProgramPath(const char *Name, bool WantFile) const {
- return D.GetProgramPath(Name, *this, WantFile);
+std::string ToolChain::GetProgramPath(const char *Name) const {
+ return D.GetProgramPath(Name, *this);
}
types::ID ToolChain::LookupTypeForExtension(const char *Ext) const {
@@ -66,13 +86,13 @@ static const char *getARMTargetCPU(const ArgList &Args,
// FIXME: Warn on inconsistent use of -mcpu and -march.
// If we have -mcpu=, use that.
if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
- return A->getValue(Args);
+ return A->getValue();
}
StringRef MArch;
if (Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
// Otherwise, if we have -march= choose the base CPU for that arch.
- MArch = A->getValue(Args);
+ MArch = A->getValue();
} else {
// Otherwise, use the Arch from the triple.
MArch = Triple.getArchName();
@@ -91,6 +111,8 @@ static const char *getARMTargetCPU(const ArgList &Args,
.Cases("armv6z", "armv6zk", "arm1176jzf-s")
.Case("armv6t2", "arm1156t2-s")
.Cases("armv7", "armv7a", "armv7-a", "cortex-a8")
+ .Cases("armv7f", "armv7-f", "cortex-a9-mp")
+ .Cases("armv7s", "armv7-s", "swift")
.Cases("armv7r", "armv7-r", "cortex-r4")
.Cases("armv7m", "armv7-m", "cortex-m3")
.Case("ep9312", "ep9312")
@@ -119,10 +141,12 @@ static const char *getLLVMArchSuffixForARM(StringRef CPU) {
.Cases("arm1136j-s", "arm1136jf-s", "arm1176jz-s", "v6")
.Cases("arm1176jzf-s", "mpcorenovfp", "mpcore", "v6")
.Cases("arm1156t2-s", "arm1156t2f-s", "v6t2")
- .Cases("cortex-a8", "cortex-a9", "v7")
+ .Cases("cortex-a8", "cortex-a9", "cortex-a15", "v7")
.Case("cortex-m3", "v7m")
.Case("cortex-m4", "v7m")
.Case("cortex-m0", "v6m")
+ .Case("cortex-a9-mp", "v7f")
+ .Case("swift", "v7s")
.Default("");
}
@@ -142,7 +166,7 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
// FIXME: Thumb should just be another -target-feaure, not in the triple.
StringRef Suffix =
getLLVMArchSuffixForARM(getARMTargetCPU(Args, Triple));
- bool ThumbDefault = (Suffix == "v7" && getTriple().isOSDarwin());
+ bool ThumbDefault = (Suffix.startswith("v7") && getTriple().isOSDarwin());
std::string ArchName = "arm";
// Assembly files should start in ARM mode.
@@ -180,7 +204,7 @@ ToolChain::RuntimeLibType ToolChain::GetRuntimeLibType(
const ArgList &Args) const
{
if (Arg *A = Args.getLastArg(options::OPT_rtlib_EQ)) {
- StringRef Value = A->getValue(Args);
+ StringRef Value = A->getValue();
if (Value == "compiler-rt")
return ToolChain::RLT_CompilerRT;
if (Value == "libgcc")
@@ -194,7 +218,7 @@ ToolChain::RuntimeLibType ToolChain::GetRuntimeLibType(
ToolChain::CXXStdlibType ToolChain::GetCXXStdlibType(const ArgList &Args) const{
if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
- StringRef Value = A->getValue(Args);
+ StringRef Value = A->getValue();
if (Value == "libc++")
return ToolChain::CST_Libcxx;
if (Value == "libstdc++")
@@ -273,3 +297,24 @@ void ToolChain::AddCCKextLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
CmdArgs.push_back("-lcc_kext");
}
+
+bool ToolChain::AddFastMathRuntimeIfAvailable(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Check if -ffast-math or -funsafe-math is enabled.
+ Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_fno_fast_math,
+ options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations);
+
+ if (!A || A->getOption().getID() == options::OPT_fno_fast_math ||
+ A->getOption().getID() == options::OPT_fno_unsafe_math_optimizations)
+ return false;
+
+ // If crtfastmath.o exists add it to the arguments.
+ std::string Path = GetFilePath("crtfastmath.o");
+ if (Path == "crtfastmath.o") // Not found.
+ return false;
+
+ CmdArgs.push_back(Args.MakeArgString(Path));
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
index 01c6623..a2ccb35 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
@@ -31,6 +31,8 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/system_error.h"
+#include "SanitizerArgs.h"
+
#include <cstdlib> // ::getenv
#include "clang/Config/config.h" // for GCC_INSTALL_PREFIX
@@ -80,17 +82,11 @@ bool Darwin::HasNativeLLVMSupport() const {
/// Darwin provides an ARC runtime starting in MacOS X 10.7 and iOS 5.0.
ObjCRuntime Darwin::getDefaultObjCRuntime(bool isNonFragile) const {
- if (isTargetIPhoneOS()) {
+ if (isTargetIPhoneOS())
return ObjCRuntime(ObjCRuntime::iOS, TargetVersion);
- } else if (TargetSimulatorVersionFromDefines != VersionTuple()) {
- return ObjCRuntime(ObjCRuntime::iOS, TargetSimulatorVersionFromDefines);
- } else {
- if (isNonFragile) {
- return ObjCRuntime(ObjCRuntime::MacOSX, TargetVersion);
- } else {
- return ObjCRuntime(ObjCRuntime::FragileMacOSX, TargetVersion);
- }
- }
+ if (isNonFragile)
+ return ObjCRuntime(ObjCRuntime::MacOSX, TargetVersion);
+ return ObjCRuntime(ObjCRuntime::FragileMacOSX, TargetVersion);
}
/// Darwin provides a blocks runtime starting in MacOS X 10.6 and iOS 3.2.
@@ -111,6 +107,9 @@ static const char *GetArmArchForMArch(StringRef Value) {
.Cases("armv7a", "armv7-a", "armv7")
.Cases("armv7r", "armv7-r", "armv7")
.Cases("armv7m", "armv7-m", "armv7")
+ .Cases("armv7f", "armv7-f", "armv7f")
+ .Cases("armv7k", "armv7-k", "armv7k")
+ .Cases("armv7s", "armv7-s", "armv7s")
.Default(0);
}
@@ -122,7 +121,10 @@ static const char *GetArmArchForMCpu(StringRef Value) {
.Case("xscale", "xscale")
.Cases("arm1136j-s", "arm1136jf-s", "arm1176jz-s",
"arm1176jzf-s", "cortex-m0", "armv6")
- .Cases("cortex-a8", "cortex-r4", "cortex-m3", "cortex-a9", "armv7")
+ .Cases("cortex-a8", "cortex-r4", "cortex-m3", "cortex-a9", "cortex-a15",
+ "armv7")
+ .Case("cortex-a9-mp", "armv7f")
+ .Case("swift", "armv7s")
.Default(0);
}
@@ -134,11 +136,11 @@ StringRef Darwin::getDarwinArchName(const ArgList &Args) const {
case llvm::Triple::thumb:
case llvm::Triple::arm: {
if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
- if (const char *Arch = GetArmArchForMArch(A->getValue(Args)))
+ if (const char *Arch = GetArmArchForMArch(A->getValue()))
return Arch;
if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
- if (const char *Arch = GetArmArchForMCpu(A->getValue(Args)))
+ if (const char *Arch = GetArmArchForMCpu(A->getValue()))
return Arch;
return "arm";
@@ -175,24 +177,11 @@ void Generic_ELF::anchor() {}
Tool &Darwin::SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const {
Action::ActionClass Key = JA.getKind();
- bool useClang = false;
if (getDriver().ShouldUseClangCompiler(C, JA, getTriple())) {
- useClang = true;
- // Fallback to llvm-gcc for i386 kext compiles, we don't support that ABI.
- if (!getDriver().shouldForceClangUse() &&
- Inputs.size() == 1 &&
- types::isCXX(Inputs[0]->getType()) &&
- getTriple().isOSDarwin() &&
- getTriple().getArch() == llvm::Triple::x86 &&
- (C.getArgs().getLastArg(options::OPT_fapple_kext) ||
- C.getArgs().getLastArg(options::OPT_mkernel)))
- useClang = false;
- }
-
- // FIXME: This seems like a hacky way to choose clang frontend.
- if (useClang)
+ // FIXME: This seems like a hacky way to choose clang frontend.
Key = Action::AnalyzeJobClass;
+ }
bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
options::OPT_no_integrated_as,
@@ -245,30 +234,6 @@ DarwinClang::DarwinClang(const Driver &D, const llvm::Triple& Triple)
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().getInstalledDir() != getDriver().Dir)
getProgramPaths().push_back(getDriver().Dir);
-
- // For fallback, we need to know how to find the GCC cc1 executables, so we
- // also add the GCC libexec paths. This is legacy code that can be removed
- // once fallback is no longer useful.
- AddGCCLibexecPath(DarwinVersion[0]);
- AddGCCLibexecPath(DarwinVersion[0] - 2);
- AddGCCLibexecPath(DarwinVersion[0] - 1);
- AddGCCLibexecPath(DarwinVersion[0] + 1);
- AddGCCLibexecPath(DarwinVersion[0] + 2);
-}
-
-void DarwinClang::AddGCCLibexecPath(unsigned darwinVersion) {
- std::string ToolChainDir = "i686-apple-darwin";
- ToolChainDir += llvm::utostr(darwinVersion);
- ToolChainDir += "/4.2.1";
-
- std::string Path = getDriver().Dir;
- Path += "/../llvm-gcc-4.2/libexec/gcc/";
- Path += ToolChainDir;
- getProgramPaths().push_back(Path);
-
- Path = "/usr/llvm-gcc-4.2/libexec/gcc/";
- Path += ToolChainDir;
- getProgramPaths().push_back(Path);
}
void DarwinClang::AddLinkARCArgs(const ArgList &Args,
@@ -287,9 +252,6 @@ void DarwinClang::AddLinkARCArgs(const ArgList &Args,
s += "iphonesimulator";
else if (isTargetIPhoneOS())
s += "iphoneos";
- // FIXME: Remove this once we depend fully on -mios-simulator-version-min.
- else if (TargetSimulatorVersionFromDefines != VersionTuple())
- s += "iphonesimulator";
else
s += "macosx";
s += ".a";
@@ -320,13 +282,15 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
break;
default:
getDriver().Diag(diag::err_drv_unsupported_rtlib_for_platform)
- << Args.getLastArg(options::OPT_rtlib_EQ)->getValue(Args) << "darwin";
+ << Args.getLastArg(options::OPT_rtlib_EQ)->getValue() << "darwin";
return;
}
// Darwin doesn't support real static executables, don't link any runtime
// libraries with -static.
- if (Args.hasArg(options::OPT_static))
+ if (Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_fapple_kext) ||
+ Args.hasArg(options::OPT_mkernel))
return;
// Reject -static-libgcc for now, we can deal with this when and if someone
@@ -351,15 +315,16 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
}
}
+ SanitizerArgs Sanitize(getDriver(), Args);
+
// Add ASAN runtime library, if required. Dynamic libraries and bundles
// should not be linked with the runtime library.
- if (Args.hasFlag(options::OPT_faddress_sanitizer,
- options::OPT_fno_address_sanitizer, false)) {
+ if (Sanitize.needsAsanRt()) {
if (Args.hasArg(options::OPT_dynamiclib) ||
Args.hasArg(options::OPT_bundle)) return;
if (isTargetIPhoneOS()) {
getDriver().Diag(diag::err_drv_clang_unsupported_per_platform)
- << "-faddress-sanitizer";
+ << "-fsanitize=address";
} else {
AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.asan_osx.a");
@@ -410,67 +375,28 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
}
}
-static inline StringRef SimulatorVersionDefineName() {
- return "__IPHONE_OS_VERSION_MIN_REQUIRED";
-}
-
-/// \brief Parse the simulator version define:
-/// __IPHONE_OS_VERSION_MIN_REQUIRED=([0-9])([0-9][0-9])([0-9][0-9])
-// and return the grouped values as integers, e.g:
-// __IPHONE_OS_VERSION_MIN_REQUIRED=40201
-// will return Major=4, Minor=2, Micro=1.
-static bool GetVersionFromSimulatorDefine(StringRef define,
- unsigned &Major, unsigned &Minor,
- unsigned &Micro) {
- assert(define.startswith(SimulatorVersionDefineName()));
- StringRef name, version;
- llvm::tie(name, version) = define.split('=');
- if (version.empty())
- return false;
- std::string verstr = version.str();
- char *end;
- unsigned num = (unsigned) strtol(verstr.c_str(), &end, 10);
- if (*end != '\0')
- return false;
- Major = num / 10000;
- num = num % 10000;
- Minor = num / 100;
- Micro = num % 100;
- return true;
-}
-
void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
const OptTable &Opts = getDriver().getOpts();
+ // Support allowing the SDKROOT environment variable used by xcrun and other
+ // Xcode tools to define the default sysroot, by making it the default for
+ // isysroot.
+ if (!Args.hasArg(options::OPT_isysroot)) {
+ if (char *env = ::getenv("SDKROOT")) {
+ // We only use this value as the default if it is an absolute path and
+ // exists.
+ if (llvm::sys::path::is_absolute(env) && llvm::sys::fs::exists(env)) {
+ Args.append(Args.MakeSeparateArg(
+ 0, Opts.getOption(options::OPT_isysroot), env));
+ }
+ }
+ }
+
Arg *OSXVersion = Args.getLastArg(options::OPT_mmacosx_version_min_EQ);
Arg *iOSVersion = Args.getLastArg(options::OPT_miphoneos_version_min_EQ);
Arg *iOSSimVersion = Args.getLastArg(
options::OPT_mios_simulator_version_min_EQ);
- // FIXME: HACK! When compiling for the simulator we don't get a
- // '-miphoneos-version-min' to help us know whether there is an ARC runtime
- // or not; try to parse a __IPHONE_OS_VERSION_MIN_REQUIRED
- // define passed in command-line.
- if (!iOSVersion && !iOSSimVersion) {
- for (arg_iterator it = Args.filtered_begin(options::OPT_D),
- ie = Args.filtered_end(); it != ie; ++it) {
- StringRef define = (*it)->getValue(Args);
- if (define.startswith(SimulatorVersionDefineName())) {
- unsigned Major = 0, Minor = 0, Micro = 0;
- if (GetVersionFromSimulatorDefine(define, Major, Minor, Micro) &&
- Major < 10 && Minor < 100 && Micro < 100) {
- TargetSimulatorVersionFromDefines = VersionTuple(Major, Minor, Micro);
- }
- // When using the define to indicate the simulator, we force
- // 10.6 macosx target.
- const Option *O = Opts.getOption(options::OPT_mmacosx_version_min_EQ);
- OSXVersion = Args.MakeJoinedArg(0, O, "10.6");
- Args.append(OSXVersion);
- break;
- }
- }
- }
-
if (OSXVersion && (iOSVersion || iOSSimVersion)) {
getDriver().Diag(diag::err_drv_argument_not_allowed_with)
<< OSXVersion->getAsString(Args)
@@ -500,7 +426,7 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
if (iOSTarget.empty()) {
if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
StringRef first, second;
- StringRef isysroot = A->getValue(Args);
+ StringRef isysroot = A->getValue();
llvm::tie(first, second) = isysroot.split(StringRef("SDKs/iPhoneOS"));
if (second != "")
iOSTarget = second.substr(0,3);
@@ -510,7 +436,8 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
// If no OSX or iOS target has been specified and we're compiling for armv7,
// go ahead as assume we're targeting iOS.
if (OSXTarget.empty() && iOSTarget.empty() &&
- getDarwinArchName(Args) == "armv7")
+ (getDarwinArchName(Args) == "armv7" ||
+ getDarwinArchName(Args) == "armv7s"))
iOSTarget = iOSVersionMin;
// Handle conflicting deployment targets
@@ -536,21 +463,21 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
}
if (!OSXTarget.empty()) {
- const Option *O = Opts.getOption(options::OPT_mmacosx_version_min_EQ);
+ const Option O = Opts.getOption(options::OPT_mmacosx_version_min_EQ);
OSXVersion = Args.MakeJoinedArg(0, O, OSXTarget);
Args.append(OSXVersion);
} else if (!iOSTarget.empty()) {
- const Option *O = Opts.getOption(options::OPT_miphoneos_version_min_EQ);
+ const Option O = Opts.getOption(options::OPT_miphoneos_version_min_EQ);
iOSVersion = Args.MakeJoinedArg(0, O, iOSTarget);
Args.append(iOSVersion);
} else if (!iOSSimTarget.empty()) {
- const Option *O = Opts.getOption(
+ const Option O = Opts.getOption(
options::OPT_mios_simulator_version_min_EQ);
iOSSimVersion = Args.MakeJoinedArg(0, O, iOSSimTarget);
Args.append(iOSSimVersion);
} else {
// Otherwise, assume we are targeting OS X.
- const Option *O = Opts.getOption(options::OPT_mmacosx_version_min_EQ);
+ const Option O = Opts.getOption(options::OPT_mmacosx_version_min_EQ);
OSXVersion = Args.MakeJoinedArg(0, O, MacosxVersionMin);
Args.append(OSXVersion);
}
@@ -568,7 +495,7 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
bool HadExtra;
if (OSXVersion) {
assert((!iOSVersion && !iOSSimVersion) && "Unknown target platform!");
- if (!Driver::GetReleaseVersion(OSXVersion->getValue(Args), Major, Minor,
+ if (!Driver::GetReleaseVersion(OSXVersion->getValue(), Major, Minor,
Micro, HadExtra) || HadExtra ||
Major != 10 || Minor >= 100 || Micro >= 100)
getDriver().Diag(diag::err_drv_invalid_version_number)
@@ -576,7 +503,7 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
} else {
const Arg *Version = iOSVersion ? iOSVersion : iOSSimVersion;
assert(Version && "Unknown target platform!");
- if (!Driver::GetReleaseVersion(Version->getValue(Args), Major, Minor,
+ if (!Driver::GetReleaseVersion(Version->getValue(), Major, Minor,
Micro, HadExtra) || HadExtra ||
Major >= 10 || Minor >= 100 || Micro >= 100)
getDriver().Diag(diag::err_drv_invalid_version_number)
@@ -614,7 +541,7 @@ void DarwinClang::AddCXXStdlibLibArgs(const ArgList &Args,
// Check in the sysroot first.
bool Exists;
if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
- llvm::sys::Path P(A->getValue(Args));
+ llvm::sys::Path P(A->getValue());
P.appendComponent("usr");
P.appendComponent("lib");
P.appendComponent("libstdc++.dylib");
@@ -655,7 +582,14 @@ void DarwinClang::AddCCKextLibArgs(const ArgList &Args,
llvm::sys::Path P(getDriver().ResourceDir);
P.appendComponent("lib");
P.appendComponent("darwin");
- P.appendComponent("libclang_rt.cc_kext.a");
+
+ // Use the newer cc_kext for iOS ARM after 6.0.
+ if (!isTargetIPhoneOS() || isTargetIOSSimulator() ||
+ !isIPhoneOSVersionLT(6, 0)) {
+ P.appendComponent("libclang_rt.cc_kext.a");
+ } else {
+ P.appendComponent("libclang_rt.cc_kext_ios5.a");
+ }
// For now, allow missing resource libraries to support developers who may
// not have compiler-rt checked out or integrated into their build.
@@ -683,15 +617,15 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
if (A->getOption().matches(options::OPT_Xarch__)) {
// Skip this argument unless the architecture matches either the toolchain
// triple arch, or the arch being bound.
- //
- // FIXME: Canonicalize name.
- StringRef XarchArch = A->getValue(Args, 0);
- if (!(XarchArch == getArchName() ||
- (BoundArch && XarchArch == BoundArch)))
+ llvm::Triple::ArchType XarchArch =
+ tools::darwin::getArchTypeForDarwinArchName(A->getValue(0));
+ if (!(XarchArch == getArch() ||
+ (BoundArch && XarchArch ==
+ tools::darwin::getArchTypeForDarwinArchName(BoundArch))))
continue;
Arg *OriginalArg = A;
- unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(Args, 1));
+ unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(1));
unsigned Prev = Index;
Arg *XarchArg = Opts.ParseOneArg(Args, Index);
@@ -707,7 +641,7 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
getDriver().Diag(diag::err_drv_invalid_Xarch_argument_with_args)
<< A->getAsString(Args);
continue;
- } else if (XarchArg->getOption().isDriverOption()) {
+ } else if (XarchArg->getOption().hasFlag(options::DriverOption)) {
getDriver().Diag(diag::err_drv_invalid_Xarch_argument_isdriver)
<< A->getAsString(Args);
continue;
@@ -721,12 +655,12 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
// Linker input arguments require custom handling. The problem is that we
// have already constructed the phase actions, so we can not treat them as
// "input arguments".
- if (A->getOption().isLinkerInput()) {
+ if (A->getOption().hasFlag(options::LinkerInput)) {
// Convert the argument into individual Zlinker_input_args.
for (unsigned i = 0, e = A->getNumValues(); i != e; ++i) {
DAL->AddSeparateArg(OriginalArg,
Opts.getOption(options::OPT_Zlinker_input),
- A->getValue(Args, i));
+ A->getValue(i));
}
continue;
@@ -749,7 +683,7 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
case options::OPT_dependency_file:
DAL->AddSeparateArg(A, Opts.getOption(options::OPT_MF),
- A->getValue(Args));
+ A->getValue());
break;
case options::OPT_gfull:
@@ -805,8 +739,8 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
// how the driver driver works.
if (BoundArch) {
StringRef Name = BoundArch;
- const Option *MCpu = Opts.getOption(options::OPT_mcpu_EQ);
- const Option *MArch = Opts.getOption(options::OPT_march_EQ);
+ const Option MCpu = Opts.getOption(options::OPT_mcpu_EQ);
+ const Option MArch = Opts.getOption(options::OPT_march_EQ);
// This code must be kept in sync with LLVM's getArchTypeForDarwinArch,
// which defines the list of which architectures we accept.
@@ -864,6 +798,12 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
DAL->AddJoinedArg(0, MArch, "armv6k");
else if (Name == "armv7")
DAL->AddJoinedArg(0, MArch, "armv7a");
+ else if (Name == "armv7f")
+ DAL->AddJoinedArg(0, MArch, "armv7f");
+ else if (Name == "armv7k")
+ DAL->AddJoinedArg(0, MArch, "armv7k");
+ else if (Name == "armv7s")
+ DAL->AddJoinedArg(0, MArch, "armv7s");
else
llvm_unreachable("invalid Darwin arch");
@@ -875,6 +815,25 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
if (BoundArch)
AddDeploymentTarget(*DAL);
+ // For iOS 6, undo the translation to add -static for -mkernel/-fapple-kext.
+ // FIXME: It would be far better to avoid inserting those -static arguments,
+ // but we can't check the deployment target in the translation code until
+ // it is set here.
+ if (isTargetIPhoneOS() && !isIPhoneOSVersionLT(6, 0)) {
+ for (ArgList::iterator it = DAL->begin(), ie = DAL->end(); it != ie; ) {
+ Arg *A = *it;
+ ++it;
+ if (A->getOption().getID() != options::OPT_mkernel &&
+ A->getOption().getID() != options::OPT_fapple_kext)
+ continue;
+ assert(it != ie && "unexpected argument translation");
+ A = *it;
+ assert(A->getOption().getID() == options::OPT_static &&
+ "missing expected -static argument");
+ it = DAL->getArgs().erase(it);
+ }
+ }
+
// Validate the C++ standard library choice.
CXXStdlibType Type = GetCXXStdlibType(*DAL);
if (Type == ToolChain::CST_Libcxx) {
@@ -882,13 +841,8 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
StringRef where;
// Complain about targetting iOS < 5.0 in any way.
- if (TargetSimulatorVersionFromDefines != VersionTuple()) {
- if (TargetSimulatorVersionFromDefines < VersionTuple(5, 0))
- where = "iOS 5.0";
- } else if (isTargetIPhoneOS()) {
- if (isIPhoneOSVersionLT(5, 0))
- where = "iOS 5.0";
- }
+ if (isTargetIPhoneOS() && isIPhoneOSVersionLT(5, 0))
+ where = "iOS 5.0";
if (where != StringRef()) {
getDriver().Diag(clang::diag::err_drv_invalid_libcxx_deployment)
@@ -900,9 +854,7 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
}
bool Darwin::IsUnwindTablesDefault() const {
- // FIXME: Gross; we should probably have some separate target
- // definition, possibly even reusing the one in clang.
- return getArchName() == "x86_64";
+ return getArch() == llvm::Triple::x86_64;
}
bool Darwin::UseDwarfDebugFlags() const {
@@ -917,19 +869,17 @@ bool Darwin::UseSjLjExceptions() const {
getTriple().getArch() == llvm::Triple::thumb);
}
-const char *Darwin::GetDefaultRelocationModel() const {
- return "pic";
+bool Darwin::isPICDefault() const {
+ return true;
}
-const char *Darwin::GetForcedPicModel() const {
- if (getArchName() == "x86_64")
- return "pic";
- return 0;
+bool Darwin::isPICDefaultForced() const {
+ return getArch() == llvm::Triple::x86_64;
}
bool Darwin::SupportsProfiling() const {
// Profiling instrumentation is only supported on x86.
- return getArchName() == "i386" || getArchName() == "x86_64";
+ return getArch() == llvm::Triple::x86 || getArch() == llvm::Triple::x86_64;
}
bool Darwin::SupportsObjCGC() const {
@@ -937,8 +887,10 @@ bool Darwin::SupportsObjCGC() const {
return !isTargetIPhoneOS();
}
-bool Darwin::SupportsObjCARC() const {
- return isTargetIPhoneOS() || !isMacosxVersionLT(10, 6);
+void Darwin::CheckObjCARC() const {
+ if (isTargetIPhoneOS() || !isMacosxVersionLT(10, 6))
+ return;
+ getDriver().Diag(diag::err_arc_unsupported_on_toolchain);
}
std::string
@@ -1013,7 +965,7 @@ bool Generic_GCC::GCCVersion::operator<(const GCCVersion &RHS) const {
static StringRef getGCCToolchainDir(const ArgList &Args) {
const Arg *A = Args.getLastArg(options::OPT_gcc_toolchain);
if (A)
- return A->getValue(Args);
+ return A->getValue();
return GCC_INSTALL_PREFIX;
}
@@ -1072,7 +1024,8 @@ Generic_GCC::GCCInstallationDetector::GCCInstallationDetector(
if (!llvm::sys::fs::exists(LibDir))
continue;
for (unsigned k = 0, ke = CandidateTripleAliases.size(); k < ke; ++k)
- ScanLibDirForGCCTriple(TargetArch, LibDir, CandidateTripleAliases[k]);
+ ScanLibDirForGCCTriple(TargetArch, Args, LibDir,
+ CandidateTripleAliases[k]);
}
for (unsigned j = 0, je = CandidateMultiarchLibDirs.size(); j < je; ++j) {
const std::string LibDir
@@ -1081,7 +1034,7 @@ Generic_GCC::GCCInstallationDetector::GCCInstallationDetector(
continue;
for (unsigned k = 0, ke = CandidateMultiarchTripleAliases.size(); k < ke;
++k)
- ScanLibDirForGCCTriple(TargetArch, LibDir,
+ ScanLibDirForGCCTriple(TargetArch, Args, LibDir,
CandidateMultiarchTripleAliases[k],
/*NeedsMultiarchSuffix=*/true);
}
@@ -1136,7 +1089,10 @@ Generic_GCC::GCCInstallationDetector::GCCInstallationDetector(
static const char *const MIPSLibDirs[] = { "/lib" };
static const char *const MIPSTriples[] = { "mips-linux-gnu" };
static const char *const MIPSELLibDirs[] = { "/lib" };
- static const char *const MIPSELTriples[] = { "mipsel-linux-gnu" };
+ static const char *const MIPSELTriples[] = {
+ "mipsel-linux-gnu",
+ "mipsel-linux-android"
+ };
static const char *const MIPS64LibDirs[] = { "/lib64", "/lib" };
static const char *const MIPS64Triples[] = { "mips64-linux-gnu" };
@@ -1264,8 +1220,32 @@ Generic_GCC::GCCInstallationDetector::GCCInstallationDetector(
MultiarchTripleAliases.push_back(MultiarchTriple.str());
}
+// FIXME: There is the same routine in the Tools.cpp.
+static bool hasMipsN32ABIArg(const ArgList &Args) {
+ Arg *A = Args.getLastArg(options::OPT_mabi_EQ);
+ return A && (A->getValue() == StringRef("n32"));
+}
+
+static StringRef getTargetMultiarchSuffix(llvm::Triple::ArchType TargetArch,
+ const ArgList &Args) {
+ if (TargetArch == llvm::Triple::x86_64 ||
+ TargetArch == llvm::Triple::ppc64)
+ return "/64";
+
+ if (TargetArch == llvm::Triple::mips64 ||
+ TargetArch == llvm::Triple::mips64el) {
+ if (hasMipsN32ABIArg(Args))
+ return "/n32";
+ else
+ return "/64";
+ }
+
+ return "/32";
+}
+
void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
- llvm::Triple::ArchType TargetArch, const std::string &LibDir,
+ llvm::Triple::ArchType TargetArch, const ArgList &Args,
+ const std::string &LibDir,
StringRef CandidateTriple, bool NeedsMultiarchSuffix) {
// There are various different suffixes involving the triple we
// check for. We also record what is necessary to walk from each back
@@ -1274,6 +1254,10 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
"/gcc/" + CandidateTriple.str(),
"/" + CandidateTriple.str() + "/gcc/" + CandidateTriple.str(),
+ // The Freescale PPC SDK has the gcc libraries in
+ // <sysroot>/usr/lib/<triple>/x.y.z so have a look there as well.
+ "/" + CandidateTriple.str(),
+
// Ubuntu has a strange mis-matched pair of triples that this happens to
// match.
// FIXME: It may be worthwhile to generalize this and look for a second
@@ -1283,6 +1267,7 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
const std::string InstallSuffixes[] = {
"/../../..",
"/../../../..",
+ "/../..",
"/../../../.."
};
// Only look at the final, weird Ubuntu suffix for i386-linux-gnu.
@@ -1307,11 +1292,7 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
// *if* there is a subdirectory of the right name with crtbegin.o in it,
// we use that. If not, and if not a multiarch triple, we look for
// crtbegin.o without the subdirectory.
- StringRef MultiarchSuffix
- = (TargetArch == llvm::Triple::x86_64 ||
- TargetArch == llvm::Triple::ppc64 ||
- TargetArch == llvm::Triple::mips64 ||
- TargetArch == llvm::Triple::mips64el) ? "/64" : "/32";
+ StringRef MultiarchSuffix = getTargetMultiarchSuffix(TargetArch, Args);
if (llvm::sys::fs::exists(LI->path() + MultiarchSuffix + "/crtbegin.o")) {
GCCMultiarchSuffix = MultiarchSuffix.str();
} else {
@@ -1392,18 +1373,17 @@ Tool &Generic_GCC::SelectTool(const Compilation &C,
}
bool Generic_GCC::IsUnwindTablesDefault() const {
- // FIXME: Gross; we should probably have some separate target
- // definition, possibly even reusing the one in clang.
- return getArchName() == "x86_64";
+ return getArch() == llvm::Triple::x86_64;
}
-const char *Generic_GCC::GetDefaultRelocationModel() const {
- return "static";
+bool Generic_GCC::isPICDefault() const {
+ return false;
}
-const char *Generic_GCC::GetForcedPicModel() const {
- return 0;
+bool Generic_GCC::isPICDefaultForced() const {
+ return false;
}
+
/// Hexagon Toolchain
Hexagon_TC::Hexagon_TC(const Driver &D, const llvm::Triple& Triple)
@@ -1457,21 +1437,14 @@ Tool &Hexagon_TC::SelectTool(const Compilation &C,
return *T;
}
-bool Hexagon_TC::IsUnwindTablesDefault() const {
- // FIXME: Gross; we should probably have some separate target
- // definition, possibly even reusing the one in clang.
- return getArchName() == "x86_64";
+bool Hexagon_TC::isPICDefault() const {
+ return false;
}
-const char *Hexagon_TC::GetDefaultRelocationModel() const {
- return "static";
+bool Hexagon_TC::isPICDefaultForced() const {
+ return false;
}
-const char *Hexagon_TC::GetForcedPicModel() const {
- return 0;
-} // End Hexagon
-
-
/// TCEToolChain - A tool chain using the llvm bitcode tools to perform
/// all subcommands. See http://tce.cs.tut.fi for our peculiar target.
/// Currently does not support anything else but compilation.
@@ -1495,16 +1468,12 @@ bool TCEToolChain::IsMathErrnoDefault() const {
return true;
}
-bool TCEToolChain::IsUnwindTablesDefault() const {
+bool TCEToolChain::isPICDefault() const {
return false;
}
-const char *TCEToolChain::GetDefaultRelocationModel() const {
- return "static";
-}
-
-const char *TCEToolChain::GetForcedPicModel() const {
- return 0;
+bool TCEToolChain::isPICDefaultForced() const {
+ return false;
}
Tool &TCEToolChain::SelectTool(const Compilation &C,
@@ -1613,19 +1582,43 @@ void Bitrig::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
DriverArgs.hasArg(options::OPT_nostdincxx))
return;
- std::string Triple = getTriple().str();
- if (Triple.substr(0, 5) == "amd64")
- Triple.replace(0, 5, "x86_64");
-
- addSystemInclude(DriverArgs, CC1Args, "/usr/include/c++/4.6.2");
- addSystemInclude(DriverArgs, CC1Args, "/usr/include/c++/4.6.2/backward");
- addSystemInclude(DriverArgs, CC1Args, "/usr/include/c++/4.6.2/" + Triple);
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx:
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/");
+ break;
+ case ToolChain::CST_Libstdcxx:
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/stdc++");
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/stdc++/backward");
+ StringRef Triple = getTriple().str();
+ if (Triple.startswith("amd64"))
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/stdc++/x86_64" +
+ Triple.substr(5));
+ else
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/stdc++/" +
+ Triple);
+ break;
+ }
}
void Bitrig::AddCXXStdlibLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
- CmdArgs.push_back("-lstdc++");
+ switch (GetCXXStdlibType(Args)) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-lc++");
+ CmdArgs.push_back("-lcxxrt");
+ // Include supc++ to provide Unwind until provided by libcxx.
+ CmdArgs.push_back("-lgcc");
+ break;
+ case ToolChain::CST_Libstdcxx:
+ CmdArgs.push_back("-lstdc++");
+ break;
+ }
}
/// FreeBSD - FreeBSD tool chain which can call as(1) and ld(1) directly.
@@ -2020,6 +2013,46 @@ static void addPathIfExists(Twine Path, ToolChain::path_list &Paths) {
if (llvm::sys::fs::exists(Path)) Paths.push_back(Path.str());
}
+static bool isMipsArch(llvm::Triple::ArchType Arch) {
+ return Arch == llvm::Triple::mips ||
+ Arch == llvm::Triple::mipsel ||
+ Arch == llvm::Triple::mips64 ||
+ Arch == llvm::Triple::mips64el;
+}
+
+static bool isMipsR2Arch(llvm::Triple::ArchType Arch,
+ const ArgList &Args) {
+ if (Arch != llvm::Triple::mips &&
+ Arch != llvm::Triple::mipsel)
+ return false;
+
+ Arg *A = Args.getLastArg(options::OPT_march_EQ,
+ options::OPT_mcpu_EQ,
+ options::OPT_mips_CPUs_Group);
+
+ if (!A)
+ return false;
+
+ if (A->getOption().matches(options::OPT_mips_CPUs_Group))
+ return A->getOption().matches(options::OPT_mips32r2);
+
+ return A->getValue() == StringRef("mips32r2");
+}
+
+static StringRef getMultilibDir(const llvm::Triple &Triple,
+ const ArgList &Args) {
+ if (!isMipsArch(Triple.getArch()))
+ return Triple.isArch32Bit() ? "lib32" : "lib64";
+
+ // lib32 directory has a special meaning on MIPS targets.
+ // It contains N32 ABI binaries. Use this folder if produce
+ // code for N32 ABI only.
+ if (hasMipsN32ABIArg(Args))
+ return "lib32";
+
+ return Triple.isArch32Bit() ? "lib" : "lib64";
+}
+
Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
llvm::Triple::ArchType Arch = Triple.getArch();
@@ -2043,19 +2076,14 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
if (Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb)
ExtraOpts.push_back("-X");
- const bool IsMips = Arch == llvm::Triple::mips ||
- Arch == llvm::Triple::mipsel ||
- Arch == llvm::Triple::mips64 ||
- Arch == llvm::Triple::mips64el;
-
- const bool IsAndroid = Triple.getEnvironment() == llvm::Triple::ANDROIDEABI;
+ const bool IsAndroid = Triple.getEnvironment() == llvm::Triple::Android;
// Do not use 'gnu' hash style for Mips targets because .gnu.hash
// and the MIPS ABI require .dynsym to be sorted in different ways.
// .gnu.hash needs symbols to be grouped by hash code whereas the MIPS
// ABI requires a mapping between the GOT and the symbol table.
// Android loader does not support .gnu.hash.
- if (!IsMips && !IsAndroid) {
+ if (!isMipsArch(Arch) && !IsAndroid) {
if (IsRedhat(Distro) || IsOpenSuse(Distro) ||
(IsUbuntu(Distro) && Distro >= UbuntuMaverick))
ExtraOpts.push_back("--hash-style=gnu");
@@ -2084,16 +2112,23 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// to the link paths.
path_list &Paths = getFilePaths();
- const std::string Multilib = Triple.isArch32Bit() ? "lib32" : "lib64";
+ const std::string Multilib = getMultilibDir(Triple, Args);
const std::string MultiarchTriple = getMultiarchTriple(Triple, SysRoot);
// Add the multilib suffixed paths where they are available.
if (GCCInstallation.isValid()) {
const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
const std::string &LibPath = GCCInstallation.getParentLibPath();
- addPathIfExists((GCCInstallation.getInstallPath() +
- GCCInstallation.getMultiarchSuffix()),
- Paths);
+
+ if (IsAndroid && isMipsR2Arch(Triple.getArch(), Args))
+ addPathIfExists(GCCInstallation.getInstallPath() +
+ GCCInstallation.getMultiarchSuffix() +
+ "/mips-r2",
+ Paths);
+ else
+ addPathIfExists((GCCInstallation.getInstallPath() +
+ GCCInstallation.getMultiarchSuffix()),
+ Paths);
// If the GCC installation we found is inside of the sysroot, we want to
// prefer libraries installed in the parent prefix of the GCC installation.
@@ -2108,6 +2143,11 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
addPathIfExists(LibPath + "/" + MultiarchTriple, Paths);
addPathIfExists(LibPath + "/../" + Multilib, Paths);
}
+ // On Android, libraries in the parent prefix of the GCC installation are
+ // preferred to the ones under sysroot.
+ if (IsAndroid) {
+ addPathIfExists(LibPath + "/../" + GCCTriple.str() + "/lib", Paths);
+ }
}
addPathIfExists(SysRoot + "/lib/" + MultiarchTriple, Paths);
addPathIfExists(SysRoot + "/lib/../" + Multilib, Paths);
@@ -2326,16 +2366,25 @@ void Linux::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
StringRef LibDir = GCCInstallation.getParentLibPath();
StringRef InstallDir = GCCInstallation.getInstallPath();
StringRef Version = GCCInstallation.getVersion().Text;
- if (!addLibStdCXXIncludePaths(LibDir + "/../include/c++/" + Version,
- (GCCInstallation.getTriple().str() +
- GCCInstallation.getMultiarchSuffix()),
- DriverArgs, CC1Args)) {
+ StringRef TripleStr = GCCInstallation.getTriple().str();
+
+ const std::string IncludePathCandidates[] = {
+ LibDir.str() + "/../include/c++/" + Version.str(),
// Gentoo is weird and places its headers inside the GCC install, so if the
// first attempt to find the headers fails, try this pattern.
- addLibStdCXXIncludePaths(InstallDir + "/include/g++-v4",
- (GCCInstallation.getTriple().str() +
- GCCInstallation.getMultiarchSuffix()),
- DriverArgs, CC1Args);
+ InstallDir.str() + "/include/g++-v4",
+ // Android standalone toolchain has C++ headers in yet another place.
+ LibDir.str() + "/../" + TripleStr.str() + "/include/c++/" + Version.str(),
+ // Freescale SDK C++ headers are directly in <sysroot>/usr/include/c++,
+ // without a subdirectory corresponding to the gcc version.
+ LibDir.str() + "/../include/c++",
+ };
+
+ for (unsigned i = 0; i < llvm::array_lengthof(IncludePathCandidates); ++i) {
+ if (addLibStdCXXIncludePaths(IncludePathCandidates[i], (TripleStr +
+ GCCInstallation.getMultiarchSuffix()),
+ DriverArgs, CC1Args))
+ break;
}
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
index 95a11be..4c267e8 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
@@ -111,6 +111,7 @@ protected:
SmallVectorImpl<StringRef> &MultiarchTripleAliases);
void ScanLibDirForGCCTriple(llvm::Triple::ArchType TargetArch,
+ const ArgList &Args,
const std::string &LibDir,
StringRef CandidateTriple,
bool NeedsMultiarchSuffix = false);
@@ -128,8 +129,8 @@ public:
const ActionList &Inputs) const;
virtual bool IsUnwindTablesDefault() const;
- virtual const char *GetDefaultRelocationModel() const;
- virtual const char *GetForcedPicModel() const;
+ virtual bool isPICDefault() const;
+ virtual bool isPICDefaultForced() const;
protected:
/// \name ToolChain Implementation Helper Functions
@@ -155,9 +156,8 @@ public:
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const;
- virtual bool IsUnwindTablesDefault() const;
- virtual const char *GetDefaultRelocationModel() const;
- virtual const char *GetForcedPicModel() const;
+ virtual bool isPICDefault() const;
+ virtual bool isPICDefaultForced() const;
};
/// Darwin - The base Darwin tool chain.
@@ -185,11 +185,6 @@ private:
/// The OS version we are targeting.
mutable VersionTuple TargetVersion;
-protected:
- // FIXME: Remove this once there is a proper way to detect an ARC runtime
- // for the simulator.
- mutable VersionTuple TargetSimulatorVersionFromDefines;
-
private:
/// The default macosx-version-min of this tool chain; empty until
/// initialized.
@@ -243,9 +238,7 @@ public:
}
bool isTargetMacOS() const {
- return !isTargetIOSSimulator() &&
- !isTargetIPhoneOS() &&
- TargetSimulatorVersionFromDefines == VersionTuple();
+ return !isTargetIOSSimulator() && !isTargetIPhoneOS();
}
bool isTargetInitialized() const { return TargetInitialized; }
@@ -325,6 +318,10 @@ public:
return true;
}
+ virtual bool IsEncodeExtendedBlockSignatureDefault() const {
+ return true;
+ }
+
virtual bool IsObjCNonFragileABIDefault() const {
// Non-fragile ABI is default for everything but i386.
return getTriple().getArch() != llvm::Triple::x86;
@@ -347,14 +344,14 @@ public:
virtual RuntimeLibType GetDefaultRuntimeLibType() const {
return ToolChain::RLT_CompilerRT;
}
- virtual const char *GetDefaultRelocationModel() const;
- virtual const char *GetForcedPicModel() const;
+ virtual bool isPICDefault() const;
+ virtual bool isPICDefaultForced() const;
virtual bool SupportsProfiling() const;
virtual bool SupportsObjCGC() const;
- virtual bool SupportsObjCARC() const;
+ virtual void CheckObjCARC() const;
virtual bool UseDwarfDebugFlags() const;
@@ -365,9 +362,6 @@ public:
/// DarwinClang - The Darwin toolchain used by Clang.
class LLVM_LIBRARY_VISIBILITY DarwinClang : public Darwin {
-private:
- void AddGCCLibexecPath(unsigned darwinVersion);
-
public:
DarwinClang(const Driver &D, const llvm::Triple& Triple);
@@ -399,7 +393,7 @@ public:
std::string ComputeEffectiveClangTriple(const ArgList &Args,
types::ID InputType) const;
- virtual const char *GetDefaultRelocationModel() const { return "pic"; }
+ virtual bool isPICDefault() const { return false; };
};
class LLVM_LIBRARY_VISIBILITY Generic_ELF : public Generic_GCC {
@@ -540,9 +534,8 @@ public:
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const;
bool IsMathErrnoDefault() const;
- bool IsUnwindTablesDefault() const;
- const char* GetDefaultRelocationModel() const;
- const char* GetForcedPicModel() const;
+ bool isPICDefault() const;
+ bool isPICDefaultForced() const;
private:
mutable llvm::DenseMap<unsigned, Tool*> Tools;
@@ -564,8 +557,8 @@ public:
virtual bool IsIntegratedAssemblerDefault() const;
virtual bool IsUnwindTablesDefault() const;
- virtual const char *GetDefaultRelocationModel() const;
- virtual const char *GetForcedPicModel() const;
+ virtual bool isPICDefault() const;
+ virtual bool isPICDefaultForced() const;
virtual void AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const;
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
index 936bde9..927ffe0 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
@@ -33,6 +33,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "InputInfo.h"
+#include "SanitizerArgs.h"
#include "ToolChains.h"
using namespace clang::driver;
@@ -93,9 +94,15 @@ static void addDirectoryList(const ArgList &Args,
const char *ArgName,
const char *EnvVar) {
const char *DirList = ::getenv(EnvVar);
+ bool CombinedArg = false;
+
if (!DirList)
return; // Nothing to do.
+ StringRef Name(ArgName);
+ if (Name.equals("-I") || Name.equals("-L"))
+ CombinedArg = true;
+
StringRef Dirs(DirList);
if (Dirs.empty()) // Empty string should not add '.'.
return;
@@ -103,21 +110,37 @@ static void addDirectoryList(const ArgList &Args,
StringRef::size_type Delim;
while ((Delim = Dirs.find(llvm::sys::PathSeparator)) != StringRef::npos) {
if (Delim == 0) { // Leading colon.
- CmdArgs.push_back(ArgName);
- CmdArgs.push_back(".");
+ if (CombinedArg) {
+ CmdArgs.push_back(Args.MakeArgString(std::string(ArgName) + "."));
+ } else {
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(".");
+ }
} else {
- CmdArgs.push_back(ArgName);
- CmdArgs.push_back(Args.MakeArgString(Dirs.substr(0, Delim)));
+ if (CombinedArg) {
+ CmdArgs.push_back(Args.MakeArgString(std::string(ArgName) + Dirs.substr(0, Delim)));
+ } else {
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(Args.MakeArgString(Dirs.substr(0, Delim)));
+ }
}
Dirs = Dirs.substr(Delim + 1);
}
if (Dirs.empty()) { // Trailing colon.
- CmdArgs.push_back(ArgName);
- CmdArgs.push_back(".");
+ if (CombinedArg) {
+ CmdArgs.push_back(Args.MakeArgString(std::string(ArgName) + "."));
+ } else {
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(".");
+ }
} else { // Add the last path.
- CmdArgs.push_back(ArgName);
- CmdArgs.push_back(Args.MakeArgString(Dirs));
+ if (CombinedArg) {
+ CmdArgs.push_back(Args.MakeArgString(std::string(ArgName) + Dirs));
+ } else {
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(Args.MakeArgString(Dirs));
+ }
}
}
@@ -200,6 +223,12 @@ static void addProfileRT(const ToolChain &TC, const ArgList &Args,
CmdArgs.push_back(Args.MakeArgString(ProfileRT));
}
+static bool forwardToGCC(const Option &O) {
+ return !O.hasFlag(options::NoForward) &&
+ !O.hasFlag(options::DriverOption) &&
+ !O.hasFlag(options::LinkerInput);
+}
+
void Clang::AddPreprocessingOptions(Compilation &C,
const Driver &D,
const ArgList &Args,
@@ -220,7 +249,7 @@ void Clang::AddPreprocessingOptions(Compilation &C,
// Determine the output location.
const char *DepFile;
if (Arg *MF = Args.getLastArg(options::OPT_MF)) {
- DepFile = MF->getValue(Args);
+ DepFile = MF->getValue();
C.addFailureResultFile(DepFile);
} else if (Output.getType() == types::TY_Dependencies) {
DepFile = Output.getFilename();
@@ -242,7 +271,7 @@ void Clang::AddPreprocessingOptions(Compilation &C,
// when we are only generating a dependency file.
Arg *OutputOpt = Args.getLastArg(options::OPT_o);
if (OutputOpt && Output.getType() != types::TY_Dependencies) {
- DepTarget = OutputOpt->getValue(Args);
+ DepTarget = OutputOpt->getValue();
} else {
// Otherwise derive from the base input.
//
@@ -282,7 +311,7 @@ void Clang::AddPreprocessingOptions(Compilation &C,
if (A->getOption().matches(options::OPT_MQ)) {
CmdArgs.push_back("-MT");
SmallString<128> Quoted;
- QuoteTarget(A->getValue(Args), Quoted);
+ QuoteTarget(A->getValue(), Quoted);
CmdArgs.push_back(Args.MakeArgString(Quoted));
// -MT flag - no change
@@ -310,7 +339,7 @@ void Clang::AddPreprocessingOptions(Compilation &C,
bool FoundPTH = false;
bool FoundPCH = false;
- llvm::sys::Path P(A->getValue(Args));
+ llvm::sys::Path P(A->getValue());
bool Exists;
if (UsePCH) {
P.appendSuffix("pch");
@@ -442,10 +471,12 @@ static const char *getLLVMArchSuffixForARM(StringRef CPU) {
.Cases("arm1136j-s", "arm1136jf-s", "arm1176jz-s", "v6")
.Cases("arm1176jzf-s", "mpcorenovfp", "mpcore", "v6")
.Cases("arm1156t2-s", "arm1156t2f-s", "v6t2")
- .Cases("cortex-a8", "cortex-a9", "v7")
+ .Cases("cortex-a8", "cortex-a9", "cortex-a15", "v7")
.Case("cortex-m3", "v7m")
.Case("cortex-m4", "v7m")
.Case("cortex-m0", "v6m")
+ .Case("cortex-a9-mp", "v7f")
+ .Case("swift", "v7s")
.Default("");
}
@@ -458,7 +489,7 @@ static std::string getARMTargetCPU(const ArgList &Args,
// If we have -mcpu=, use that.
if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
- StringRef MCPU = A->getValue(Args);
+ StringRef MCPU = A->getValue();
// Handle -mcpu=native.
if (MCPU == "native")
return llvm::sys::getHostCPUName();
@@ -469,7 +500,7 @@ static std::string getARMTargetCPU(const ArgList &Args,
StringRef MArch;
if (Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
// Otherwise, if we have -march= choose the base CPU for that arch.
- MArch = A->getValue(Args);
+ MArch = A->getValue();
} else {
// Otherwise, use the Arch from the triple.
MArch = Triple.getArchName();
@@ -500,6 +531,8 @@ static std::string getARMTargetCPU(const ArgList &Args,
.Cases("armv6z", "armv6zk", "arm1176jzf-s")
.Case("armv6t2", "arm1156t2-s")
.Cases("armv7", "armv7a", "armv7-a", "cortex-a8")
+ .Cases("armv7f", "armv7-f", "cortex-a9-mp")
+ .Cases("armv7s", "armv7-s", "swift")
.Cases("armv7r", "armv7-r", "cortex-r4")
.Cases("armv7m", "armv7-m", "cortex-m3")
.Case("ep9312", "ep9312")
@@ -531,7 +564,7 @@ static bool isSignedCharDefault(const llvm::Triple &Triple) {
// frontend target.
static void addFPUArgs(const Driver &D, const Arg *A, const ArgList &Args,
ArgStringList &CmdArgs) {
- StringRef FPU = A->getValue(Args);
+ StringRef FPU = A->getValue();
// Set the target features based on the FPU.
if (FPU == "fpa" || FPU == "fpe2" || FPU == "fpe3" || FPU == "maverick") {
@@ -569,14 +602,15 @@ static void addFPUArgs(const Driver &D, const Arg *A, const ArgList &Args,
// Handle -mfpmath=.
static void addFPMathArgs(const Driver &D, const Arg *A, const ArgList &Args,
ArgStringList &CmdArgs, StringRef CPU) {
- StringRef FPMath = A->getValue(Args);
+ StringRef FPMath = A->getValue();
// Set the target features based on the FPMath.
if (FPMath == "neon") {
CmdArgs.push_back("-target-feature");
CmdArgs.push_back("+neonfp");
- if (CPU != "cortex-a8" && CPU != "cortex-a9" && CPU != "cortex-a9-mp")
+ if (CPU != "cortex-a8" && CPU != "cortex-a9" && CPU != "cortex-a9-mp" &&
+ CPU != "cortex-a15")
D.Diag(diag::err_drv_invalid_feature) << "-mfpmath=neon" << CPU;
} else if (FPMath == "vfp" || FPMath == "vfp2" || FPMath == "vfp3" ||
@@ -603,7 +637,7 @@ static StringRef getARMFloatABI(const Driver &D,
else if (A->getOption().matches(options::OPT_mhard_float))
FloatABI = "hard";
else {
- FloatABI = A->getValue(Args);
+ FloatABI = A->getValue();
if (FloatABI != "soft" && FloatABI != "softfp" && FloatABI != "hard") {
D.Diag(diag::err_drv_invalid_mfloat_abi)
<< A->getAsString(Args);
@@ -643,7 +677,7 @@ static StringRef getARMFloatABI(const Driver &D,
// EABI is always AAPCS, and if it was not marked 'hard', it's softfp
FloatABI = "softfp";
break;
- case llvm::Triple::ANDROIDEABI: {
+ case llvm::Triple::Android: {
std::string ArchName =
getLLVMArchSuffixForARM(getARMTargetCPU(Args, Triple));
if (StringRef(ArchName).startswith("v7"))
@@ -669,18 +703,29 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
ArgStringList &CmdArgs,
bool KernelOrKext) const {
const Driver &D = getToolChain().getDriver();
- llvm::Triple Triple = getToolChain().getTriple();
+ // Get the effective triple, which takes into account the deployment target.
+ std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
+ llvm::Triple Triple(TripleStr);
+ std::string CPUName = getARMTargetCPU(Args, Triple);
// Select the ABI to use.
//
// FIXME: Support -meabi.
const char *ABIName = 0;
if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
- ABIName = A->getValue(Args);
+ ABIName = A->getValue();
+ } else if (Triple.isOSDarwin()) {
+ // The backend is hardwired to assume AAPCS for M-class processors, ensure
+ // the frontend matches that.
+ if (StringRef(CPUName).startswith("cortex-m")) {
+ ABIName = "aapcs";
+ } else {
+ ABIName = "apcs-gnu";
+ }
} else {
// Select the default based on the platform.
switch(Triple.getEnvironment()) {
- case llvm::Triple::ANDROIDEABI:
+ case llvm::Triple::Android:
case llvm::Triple::GNUEABI:
case llvm::Triple::GNUEABIHF:
ABIName = "aapcs-linux";
@@ -697,7 +742,7 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
// Set the CPU based on -march= and -mcpu=.
CmdArgs.push_back("-target-cpu");
- CmdArgs.push_back(Args.MakeArgString(getARMTargetCPU(Args, Triple)));
+ CmdArgs.push_back(Args.MakeArgString(CPUName));
// Determine floating point ABI from the options & target defaults.
StringRef FloatABI = getARMFloatABI(D, Args, Triple);
@@ -754,8 +799,10 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
// Kernel code has more strict alignment requirements.
if (KernelOrKext) {
- CmdArgs.push_back("-backend-option");
- CmdArgs.push_back("-arm-long-calls");
+ if (Triple.getOS() != llvm::Triple::IOS || Triple.isOSVersionLT(6)) {
+ CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-arm-long-calls");
+ }
CmdArgs.push_back("-backend-option");
CmdArgs.push_back("-arm-strict-align");
@@ -777,44 +824,18 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
CmdArgs.push_back("-no-implicit-float");
}
-// Get default architecture.
-static const char* getMipsArchFromCPU(StringRef CPUName) {
- if (CPUName == "mips32" || CPUName == "mips32r2")
- return "mips";
-
- assert((CPUName == "mips64" || CPUName == "mips64r2") &&
- "Unexpected cpu name.");
-
- return "mips64";
-}
-
-// Check that ArchName is a known Mips architecture name.
-static bool checkMipsArchName(StringRef ArchName) {
- return ArchName == "mips" ||
- ArchName == "mipsel" ||
- ArchName == "mips64" ||
- ArchName == "mips64el";
-}
-
-// Get default target cpu.
-static const char* getMipsCPUFromArch(StringRef ArchName) {
- if (ArchName == "mips" || ArchName == "mipsel")
+// Translate MIPS CPU name alias option to CPU name.
+static StringRef getMipsCPUFromAlias(const Arg &A) {
+ if (A.getOption().matches(options::OPT_mips32))
return "mips32";
-
- assert((ArchName == "mips64" || ArchName == "mips64el") &&
- "Unexpected arch name.");
-
- return "mips64";
-}
-
-// Get default ABI.
-static const char* getMipsABIFromArch(StringRef ArchName) {
- if (ArchName == "mips" || ArchName == "mipsel")
- return "o32";
-
- assert((ArchName == "mips64" || ArchName == "mips64el") &&
- "Unexpected arch name.");
- return "n64";
+ if (A.getOption().matches(options::OPT_mips32r2))
+ return "mips32r2";
+ if (A.getOption().matches(options::OPT_mips64))
+ return "mips64";
+ if (A.getOption().matches(options::OPT_mips64r2))
+ return "mips64r2";
+ llvm_unreachable("Unexpected option");
+ return "";
}
// Get CPU and ABI names. They are not independent
@@ -823,26 +844,53 @@ static void getMipsCPUAndABI(const ArgList &Args,
const ToolChain &TC,
StringRef &CPUName,
StringRef &ABIName) {
- StringRef ArchName;
-
- // Select target cpu and architecture.
- if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
- CPUName = A->getValue(Args);
- ArchName = getMipsArchFromCPU(CPUName);
- }
- else {
- ArchName = Args.MakeArgString(TC.getArchName());
- if (!checkMipsArchName(ArchName))
- TC.getDriver().Diag(diag::err_drv_invalid_arch_name) << ArchName;
+ const char *DefMips32CPU = "mips32";
+ const char *DefMips64CPU = "mips64";
+
+ if (Arg *A = Args.getLastArg(options::OPT_march_EQ,
+ options::OPT_mcpu_EQ,
+ options::OPT_mips_CPUs_Group)) {
+ if (A->getOption().matches(options::OPT_mips_CPUs_Group))
+ CPUName = getMipsCPUFromAlias(*A);
else
- CPUName = getMipsCPUFromArch(ArchName);
+ CPUName = A->getValue();
}
-
- // Select the ABI to use.
+
if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ))
- ABIName = A->getValue(Args);
- else
- ABIName = getMipsABIFromArch(ArchName);
+ ABIName = A->getValue();
+
+ // Setup default CPU and ABI names.
+ if (CPUName.empty() && ABIName.empty()) {
+ switch (TC.getTriple().getArch()) {
+ default:
+ llvm_unreachable("Unexpected triple arch name");
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ CPUName = DefMips32CPU;
+ break;
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ CPUName = DefMips64CPU;
+ break;
+ }
+ }
+
+ if (!ABIName.empty()) {
+ // Deduce CPU name from ABI name.
+ CPUName = llvm::StringSwitch<const char *>(ABIName)
+ .Cases("o32", "eabi", DefMips32CPU)
+ .Cases("n32", "n64", DefMips64CPU)
+ .Default("");
+ }
+ else if (!CPUName.empty()) {
+ // Deduce ABI name from CPU name.
+ ABIName = llvm::StringSwitch<const char *>(CPUName)
+ .Cases("mips32", "mips32r2", "o32")
+ .Cases("mips64", "mips64r2", "n64")
+ .Default("");
+ }
+
+ // FIXME: Warn on inconsistent cpu and abi usage.
}
// Select the MIPS float ABI as determined by -msoft-float, -mhard-float,
@@ -859,7 +907,7 @@ static StringRef getMipsFloatABI(const Driver &D, const ArgList &Args) {
else if (A->getOption().matches(options::OPT_mhard_float))
FloatABI = "hard";
else {
- FloatABI = A->getValue(Args);
+ FloatABI = A->getValue();
if (FloatABI != "soft" && FloatABI != "single" && FloatABI != "hard") {
D.Diag(diag::err_drv_invalid_mfloat_abi) << A->getAsString(Args);
FloatABI = "hard";
@@ -941,12 +989,19 @@ void Clang::AddMIPSTargetArgs(const ArgList &Args,
AddTargetFeature(Args, CmdArgs,
options::OPT_mdspr2, options::OPT_mno_dspr2,
"dspr2");
+
+ if (Arg *A = Args.getLastArg(options::OPT_G)) {
+ StringRef v = A->getValue();
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back(Args.MakeArgString("-mips-ssection-threshold=" + v));
+ A->claim();
+ }
}
/// getPPCTargetCPU - Get the (LLVM) name of the PowerPC cpu we are targeting.
static std::string getPPCTargetCPU(const ArgList &Args) {
if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
- StringRef CPUName = A->getValue(Args);
+ StringRef CPUName = A->getValue();
if (CPUName == "native") {
std::string CPU = llvm::sys::getHostCPUName();
@@ -978,6 +1033,8 @@ static std::string getPPCTargetCPU(const ArgList &Args) {
.Case("970", "970")
.Case("G5", "g5")
.Case("a2", "a2")
+ .Case("e500mc", "e500mc")
+ .Case("e5500", "e5500")
.Case("power6", "pwr6")
.Case("power7", "pwr7")
.Case("powerpc", "ppc")
@@ -1015,7 +1072,7 @@ void Clang::AddSparcTargetArgs(const ArgList &Args,
if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
CmdArgs.push_back("-target-cpu");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
// Select the float ABI as determined by -msoft-float, -mhard-float, and
@@ -1054,6 +1111,8 @@ void Clang::AddSparcTargetArgs(const ArgList &Args,
void Clang::AddX86TargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
+ const bool isAndroid =
+ getToolChain().getTriple().getEnvironment() == llvm::Triple::Android;
if (!Args.hasFlag(options::OPT_mred_zone,
options::OPT_mno_red_zone,
true) ||
@@ -1068,7 +1127,7 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
const char *CPUName = 0;
if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
- if (StringRef(A->getValue(Args)) == "native") {
+ if (StringRef(A->getValue()) == "native") {
// FIXME: Reject attempts to use -march=native unless the target matches
// the host.
//
@@ -1078,7 +1137,7 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
if (!CPU.empty() && CPU != "generic")
CPUName = Args.MakeArgString(CPU);
} else
- CPUName = A->getValue(Args);
+ CPUName = A->getValue();
}
// Select the default CPU if none was given (or detection failed).
@@ -1118,7 +1177,9 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
if (getToolChain().getArch() == llvm::Triple::x86_64)
CPUName = "x86-64";
else if (getToolChain().getArch() == llvm::Triple::x86)
- CPUName = "pentium4";
+ // All x86 devices running Android have core2 as their common
+ // denominator. This makes a better choice than pentium4.
+ CPUName = isAndroid ? "core2" : "pentium4";
}
}
@@ -1141,8 +1202,8 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
(*it)->claim();
// Skip over "-m".
- assert(Name.startswith("-m") && "Invalid feature name.");
- Name = Name.substr(2);
+ assert(Name.startswith("m") && "Invalid feature name.");
+ Name = Name.substr(1);
bool IsNegative = Name.startswith("no-");
if (IsNegative)
@@ -1174,7 +1235,7 @@ static Arg* getLastHexagonArchArg (const ArgList &Args)
A->claim();
}
else if ((*it)->getOption().matches(options::OPT_m_Joined)){
- StringRef Value = (*it)->getValue(Args,0);
+ StringRef Value = (*it)->getValue(0);
if (Value.startswith("v")) {
A = *it;
A->claim();
@@ -1191,7 +1252,7 @@ static StringRef getHexagonTargetCPU(const ArgList &Args)
// Select the default CPU (v4) if none was given or detection failed.
if ((A = getLastHexagonArchArg (Args))) {
- WhichHexagon = A->getValue(Args);
+ WhichHexagon = A->getValue();
if (WhichHexagon == "")
return "v4";
else
@@ -1216,7 +1277,7 @@ void Clang::AddHexagonTargetArgs(const ArgList &Args,
if (Arg *A = Args.getLastArg(options::OPT_G,
options::OPT_msmall_data_threshold_EQ)) {
std::string SmallDataThreshold="-small-data-threshold=";
- SmallDataThreshold += A->getValue(Args);
+ SmallDataThreshold += A->getValue();
CmdArgs.push_back ("-mllvm");
CmdArgs.push_back(Args.MakeArgString(SmallDataThreshold));
A->claim();
@@ -1392,25 +1453,80 @@ static bool UseRelaxAll(Compilation &C, const ArgList &Args) {
RelaxDefault);
}
+SanitizerArgs::SanitizerArgs(const Driver &D, const ArgList &Args) {
+ Kind = 0;
+
+ const Arg *AsanArg, *TsanArg, *UbsanArg;
+ for (ArgList::const_iterator I = Args.begin(), E = Args.end(); I != E; ++I) {
+ unsigned Add = 0, Remove = 0;
+ const char *DeprecatedReplacement = 0;
+ if ((*I)->getOption().matches(options::OPT_faddress_sanitizer)) {
+ Add = Address;
+ DeprecatedReplacement = "-fsanitize=address";
+ } else if ((*I)->getOption().matches(options::OPT_fno_address_sanitizer)) {
+ Remove = Address;
+ DeprecatedReplacement = "-fno-sanitize=address";
+ } else if ((*I)->getOption().matches(options::OPT_fthread_sanitizer)) {
+ Add = Thread;
+ DeprecatedReplacement = "-fsanitize=thread";
+ } else if ((*I)->getOption().matches(options::OPT_fno_thread_sanitizer)) {
+ Remove = Thread;
+ DeprecatedReplacement = "-fno-sanitize=thread";
+ } else if ((*I)->getOption().matches(options::OPT_fcatch_undefined_behavior)) {
+ Add = Undefined;
+ DeprecatedReplacement = "-fsanitize=undefined";
+ } else if ((*I)->getOption().matches(options::OPT_fsanitize_EQ)) {
+ Add = parse(D, *I);
+ } else if ((*I)->getOption().matches(options::OPT_fno_sanitize_EQ)) {
+ Remove = parse(D, *I);
+ } else {
+ continue;
+ }
+
+ (*I)->claim();
+
+ Kind |= Add;
+ Kind &= ~Remove;
+
+ if (Add & NeedsAsanRt) AsanArg = *I;
+ if (Add & NeedsTsanRt) TsanArg = *I;
+ if (Add & NeedsUbsanRt) UbsanArg = *I;
+
+ // If this is a deprecated synonym, produce a warning directing users
+ // towards the new spelling.
+ if (DeprecatedReplacement)
+ D.Diag(diag::warn_drv_deprecated_arg)
+ << (*I)->getAsString(Args) << DeprecatedReplacement;
+ }
+
+ // Only one runtime library can be used at once.
+ // FIXME: Allow Ubsan to be combined with the other two.
+ bool NeedsAsan = needsAsanRt();
+ bool NeedsTsan = needsTsanRt();
+ bool NeedsUbsan = needsUbsanRt();
+ if (NeedsAsan + NeedsTsan + NeedsUbsan > 1)
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << describeSanitizeArg(Args, NeedsAsan ? AsanArg : TsanArg,
+ NeedsAsan ? NeedsAsanRt : NeedsTsanRt)
+ << describeSanitizeArg(Args, NeedsUbsan ? UbsanArg : TsanArg,
+ NeedsUbsan ? NeedsUbsanRt : NeedsTsanRt);
+}
+
/// If AddressSanitizer is enabled, add appropriate linker flags (Linux).
/// This needs to be called before we add the C run-time (malloc, etc).
static void addAsanRTLinux(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
- if (!Args.hasFlag(options::OPT_faddress_sanitizer,
- options::OPT_fno_address_sanitizer, false))
- return;
- if(TC.getTriple().getEnvironment() == llvm::Triple::ANDROIDEABI) {
+ if(TC.getTriple().getEnvironment() == llvm::Triple::Android) {
if (!Args.hasArg(options::OPT_shared)) {
if (!Args.hasArg(options::OPT_pie))
TC.getDriver().Diag(diag::err_drv_asan_android_requires_pie);
- // For an executable, we add a .preinit_array stub.
- CmdArgs.push_back("-u");
- CmdArgs.push_back("__asan_preinit");
- CmdArgs.push_back("-lasan");
}
- CmdArgs.push_back("-lasan_preload");
- CmdArgs.push_back("-ldl");
+ SmallString<128> LibAsan(TC.getDriver().ResourceDir);
+ llvm::sys::path::append(LibAsan, "lib", "linux",
+ (Twine("libclang_rt.asan-") +
+ TC.getArchName() + "-android.so"));
+ CmdArgs.push_back(Args.MakeArgString(LibAsan));
} else {
if (!Args.hasArg(options::OPT_shared)) {
// LibAsan is "libclang_rt.asan-<ArchName>.a" in the Linux library
@@ -1431,9 +1547,6 @@ static void addAsanRTLinux(const ToolChain &TC, const ArgList &Args,
/// This needs to be called before we add the C run-time (malloc, etc).
static void addTsanRTLinux(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
- if (!Args.hasFlag(options::OPT_fthread_sanitizer,
- options::OPT_fno_thread_sanitizer, false))
- return;
if (!Args.hasArg(options::OPT_shared)) {
// LibTsan is "libclang_rt.tsan-<ArchName>.a" in the Linux library
// resource directory.
@@ -1448,6 +1561,22 @@ static void addTsanRTLinux(const ToolChain &TC, const ArgList &Args,
}
}
+/// If UndefinedBehaviorSanitizer is enabled, add appropriate linker flags
+/// (Linux).
+static void addUbsanRTLinux(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ // LibUbsan is "libclang_rt.ubsan-<ArchName>.a" in the Linux library
+ // resource directory.
+ SmallString<128> LibUbsan(TC.getDriver().ResourceDir);
+ llvm::sys::path::append(LibUbsan, "lib", "linux",
+ (Twine("libclang_rt.ubsan-") +
+ TC.getArchName() + ".a"));
+ CmdArgs.push_back(Args.MakeArgString(LibUbsan));
+ CmdArgs.push_back("-lpthread");
+ }
+}
+
static bool shouldUseFramePointer(const ArgList &Args,
const llvm::Triple &Triple) {
if (Arg *A = Args.getLastArg(options::OPT_fno_omit_frame_pointer,
@@ -1516,7 +1645,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
A->claim();
for (unsigned i = 0, e = A->getNumValues(); i != e; ++i) {
- StringRef Value = A->getValue(Args, i);
+ StringRef Value = A->getValue(i);
if (Value == "-force_cpusubtype_ALL") {
// Do nothing, this is the default and we don't support anything else.
@@ -1600,8 +1729,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-analyzer-eagerly-assume");
- CmdArgs.push_back("-analyzer-ipa=inlining");
-
// Add default argument set.
if (!Args.hasArg(options::OPT__analyzer_no_default_checks)) {
CmdArgs.push_back("-analyzer-checker=core");
@@ -1627,7 +1754,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// reasons.
CmdArgs.push_back("-analyzer-output");
if (Arg *A = Args.getLastArg(options::OPT__analyzer_output))
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
else
CmdArgs.push_back("plist");
@@ -1642,67 +1769,90 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CheckCodeGenerationOptions(D, Args);
- // Perform argument translation for LLVM backend. This
- // takes some care in reconciling with llvm-gcc. The
- // issue is that llvm-gcc translates these options based on
- // the values in cc1, whereas we are processing based on
- // the driver arguments.
-
- // This comes from the default translation the driver + cc1
- // would do to enable flag_pic.
-
- Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
- options::OPT_fpic, options::OPT_fno_pic,
- options::OPT_fPIE, options::OPT_fno_PIE,
- options::OPT_fpie, options::OPT_fno_pie);
- bool PICDisabled = false;
- bool PICEnabled = false;
- bool PICForPIE = false;
- if (LastPICArg) {
- PICForPIE = (LastPICArg->getOption().matches(options::OPT_fPIE) ||
- LastPICArg->getOption().matches(options::OPT_fpie));
- PICEnabled = (PICForPIE ||
- LastPICArg->getOption().matches(options::OPT_fPIC) ||
- LastPICArg->getOption().matches(options::OPT_fpic));
- PICDisabled = !PICEnabled;
+ // For the PIC and PIE flag options, this logic is different from the legacy
+ // logic in very old versions of GCC, as that logic was just a bug no one had
+ // ever fixed. This logic is both more rational and consistent with GCC's new
+ // logic now that the bugs are fixed. The last argument relating to either
+ // PIC or PIE wins, and no other argument is used. If the last argument is
+ // any flavor of the '-fno-...' arguments, both PIC and PIE are disabled. Any
+ // PIE option implicitly enables PIC at the same level.
+ bool PIE = false;
+ bool PIC = getToolChain().isPICDefault();
+ bool IsPICLevelTwo = PIC;
+ if (Arg *A = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
+ options::OPT_fpic, options::OPT_fno_pic,
+ options::OPT_fPIE, options::OPT_fno_PIE,
+ options::OPT_fpie, options::OPT_fno_pie)) {
+ Option O = A->getOption();
+ if (O.matches(options::OPT_fPIC) || O.matches(options::OPT_fpic) ||
+ O.matches(options::OPT_fPIE) || O.matches(options::OPT_fpie)) {
+ PIE = O.matches(options::OPT_fPIE) || O.matches(options::OPT_fpie);
+ PIC = PIE || O.matches(options::OPT_fPIC) || O.matches(options::OPT_fpic);
+ IsPICLevelTwo = O.matches(options::OPT_fPIE) ||
+ O.matches(options::OPT_fPIC);
+ } else {
+ PIE = PIC = false;
+ }
+ }
+ // Check whether the tool chain trumps the PIC-ness decision. If the PIC-ness
+ // is forced, then neither PIC nor PIE flags will have no effect.
+ if (getToolChain().isPICDefaultForced()) {
+ PIE = false;
+ PIC = getToolChain().isPICDefault();
+ IsPICLevelTwo = PIC;
}
+
+ // Inroduce a Darwin-specific hack. If the default is PIC but the flags
+ // specified while enabling PIC enabled level 1 PIC, just force it back to
+ // level 2 PIC instead. This matches the behavior of Darwin GCC (based on my
+ // informal testing).
+ if (PIC && getToolChain().getTriple().isOSDarwin())
+ IsPICLevelTwo |= getToolChain().isPICDefault();
+
// Note that these flags are trump-cards. Regardless of the order w.r.t. the
// PIC or PIE options above, if these show up, PIC is disabled.
- if (Args.hasArg(options::OPT_mkernel))
- PICDisabled = true;
+ llvm::Triple Triple(TripleStr);
+ if ((Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_fapple_kext)) &&
+ (Triple.getOS() != llvm::Triple::IOS ||
+ Triple.isOSVersionLT(6)))
+ PIC = PIE = false;
if (Args.hasArg(options::OPT_static))
- PICDisabled = true;
- bool DynamicNoPIC = Args.hasArg(options::OPT_mdynamic_no_pic);
-
- // Select the relocation model.
- const char *Model = getToolChain().GetForcedPicModel();
- if (!Model) {
- if (DynamicNoPIC)
- Model = "dynamic-no-pic";
- else if (PICDisabled)
- Model = "static";
- else if (PICEnabled)
- Model = "pic";
- else
- Model = getToolChain().GetDefaultRelocationModel();
- }
- StringRef ModelStr = Model ? Model : "";
- if (Model && ModelStr != "pic") {
- CmdArgs.push_back("-mrelocation-model");
- CmdArgs.push_back(Model);
- }
+ PIC = PIE = false;
+
+ if (Arg *A = Args.getLastArg(options::OPT_mdynamic_no_pic)) {
+ // This is a very special mode. It trumps the other modes, almost no one
+ // uses it, and it isn't even valid on any OS but Darwin.
+ if (!getToolChain().getTriple().isOSDarwin())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << getToolChain().getTriple().str();
- // Infer the __PIC__ and __PIE__ values.
- if (ModelStr == "pic" && PICForPIE) {
- CmdArgs.push_back("-pie-level");
- CmdArgs.push_back((LastPICArg &&
- LastPICArg->getOption().matches(options::OPT_fPIE)) ?
- "2" : "1");
- } else if (ModelStr == "pic" || ModelStr == "dynamic-no-pic") {
- CmdArgs.push_back("-pic-level");
- CmdArgs.push_back(((ModelStr != "dynamic-no-pic" && LastPICArg &&
- LastPICArg->getOption().matches(options::OPT_fPIC)) ||
- getToolChain().getTriple().isOSDarwin()) ? "2" : "1");
+ // FIXME: Warn when this flag trumps some other PIC or PIE flag.
+
+ CmdArgs.push_back("-mrelocation-model");
+ CmdArgs.push_back("dynamic-no-pic");
+
+ // Only a forced PIC mode can cause the actual compile to have PIC defines
+ // etc., no flags are sufficient. This behavior was selected to closely
+ // match that of llvm-gcc and Apple GCC before that.
+ if (getToolChain().isPICDefault() && getToolChain().isPICDefaultForced()) {
+ CmdArgs.push_back("-pic-level");
+ CmdArgs.push_back("2");
+ }
+ } else {
+ // Currently, LLVM only knows about PIC vs. static; the PIE differences are
+ // handled in Clang's IRGen by the -pie-level flag.
+ CmdArgs.push_back("-mrelocation-model");
+ CmdArgs.push_back(PIC ? "pic" : "static");
+
+ if (PIC) {
+ CmdArgs.push_back("-pic-level");
+ CmdArgs.push_back(IsPICLevelTwo ? "2" : "1");
+ if (PIE) {
+ CmdArgs.push_back("-pie-level");
+ CmdArgs.push_back(IsPICLevelTwo ? "2" : "1");
+ }
+ }
}
if (!Args.hasFlag(options::OPT_fmerge_all_constants,
@@ -1713,7 +1863,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_mregparm_EQ)) {
CmdArgs.push_back("-mregparm");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
if (Args.hasFlag(options::OPT_mrtd, options::OPT_mno_rtd, false))
@@ -1741,25 +1891,30 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// flag disables them after the flag enabling them, enable the codegen
// optimization. This is complicated by several "umbrella" flags.
if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_fno_fast_math,
options::OPT_ffinite_math_only,
options::OPT_fno_finite_math_only,
options::OPT_fhonor_infinities,
options::OPT_fno_honor_infinities))
- if (A->getOption().getID() != options::OPT_fno_finite_math_only &&
+ if (A->getOption().getID() != options::OPT_fno_fast_math &&
+ A->getOption().getID() != options::OPT_fno_finite_math_only &&
A->getOption().getID() != options::OPT_fhonor_infinities)
CmdArgs.push_back("-menable-no-infs");
if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_fno_fast_math,
options::OPT_ffinite_math_only,
options::OPT_fno_finite_math_only,
options::OPT_fhonor_nans,
options::OPT_fno_honor_nans))
- if (A->getOption().getID() != options::OPT_fno_finite_math_only &&
+ if (A->getOption().getID() != options::OPT_fno_fast_math &&
+ A->getOption().getID() != options::OPT_fno_finite_math_only &&
A->getOption().getID() != options::OPT_fhonor_nans)
CmdArgs.push_back("-menable-no-nans");
// -fmath-errno is the default on some platforms, e.g. BSD-derived OSes.
bool MathErrno = getToolChain().IsMathErrnoDefault();
if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_fno_fast_math,
options::OPT_fmath_errno,
options::OPT_fno_math_errno))
MathErrno = A->getOption().getID() == options::OPT_fmath_errno;
@@ -1772,38 +1927,46 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// madness.
bool AssociativeMath = false;
if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_fno_fast_math,
options::OPT_funsafe_math_optimizations,
options::OPT_fno_unsafe_math_optimizations,
options::OPT_fassociative_math,
options::OPT_fno_associative_math))
- if (A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ if (A->getOption().getID() != options::OPT_fno_fast_math &&
+ A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
A->getOption().getID() != options::OPT_fno_associative_math)
AssociativeMath = true;
bool ReciprocalMath = false;
if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_fno_fast_math,
options::OPT_funsafe_math_optimizations,
options::OPT_fno_unsafe_math_optimizations,
options::OPT_freciprocal_math,
options::OPT_fno_reciprocal_math))
- if (A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ if (A->getOption().getID() != options::OPT_fno_fast_math &&
+ A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
A->getOption().getID() != options::OPT_fno_reciprocal_math)
ReciprocalMath = true;
bool SignedZeros = true;
if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_fno_fast_math,
options::OPT_funsafe_math_optimizations,
options::OPT_fno_unsafe_math_optimizations,
options::OPT_fsigned_zeros,
options::OPT_fno_signed_zeros))
- if (A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ if (A->getOption().getID() != options::OPT_fno_fast_math &&
+ A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
A->getOption().getID() != options::OPT_fsigned_zeros)
SignedZeros = false;
bool TrappingMath = true;
if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_fno_fast_math,
options::OPT_funsafe_math_optimizations,
options::OPT_fno_unsafe_math_optimizations,
options::OPT_ftrapping_math,
options::OPT_fno_trapping_math))
- if (A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ if (A->getOption().getID() != options::OPT_fno_fast_math &&
+ A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
A->getOption().getID() != options::OPT_ftrapping_math)
TrappingMath = false;
if (!MathErrno && AssociativeMath && ReciprocalMath && !SignedZeros &&
@@ -1813,16 +1976,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Validate and pass through -fp-contract option.
if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_fno_fast_math,
options::OPT_ffp_contract)) {
if (A->getOption().getID() == options::OPT_ffp_contract) {
- StringRef Val = A->getValue(Args);
+ StringRef Val = A->getValue();
if (Val == "fast" || Val == "on" || Val == "off") {
CmdArgs.push_back(Args.MakeArgString("-ffp-contract=" + Val));
} else {
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << Val;
}
- } else { // A is OPT_ffast_math
+ } else if (A->getOption().getID() == options::OPT_ffast_math) {
// If fast-math is set then set the fp-contract mode to fast.
CmdArgs.push_back(Args.MakeArgString("-ffp-contract=fast"));
}
@@ -1833,10 +1997,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// preprocessor macros. This is distinct from enabling any optimizations as
// these options induce language changes which must survive serialization
// and deserialization, etc.
- if (Args.hasArg(options::OPT_ffast_math))
- CmdArgs.push_back("-ffast-math");
- if (Args.hasArg(options::OPT_ffinite_math_only))
- CmdArgs.push_back("-ffinite-math-only");
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math, options::OPT_fno_fast_math))
+ if (A->getOption().matches(options::OPT_ffast_math))
+ CmdArgs.push_back("-ffast-math");
+ if (Arg *A = Args.getLastArg(options::OPT_ffinite_math_only, options::OPT_fno_fast_math))
+ if (A->getOption().matches(options::OPT_ffinite_math_only))
+ CmdArgs.push_back("-ffinite-math-only");
// Decide whether to use verbose asm. Verbose assembly is the default on
// toolchains which have the integrated assembler on by default.
@@ -1885,7 +2051,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_flimited_precision_EQ)) {
CmdArgs.push_back("-mlimit-float-precision");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
// FIXME: Handle -mtune=.
@@ -1893,7 +2059,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
CmdArgs.push_back("-mcode-model");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
// Add target specific cpu and features flags.
@@ -1937,7 +2103,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Pass the linker version in use.
if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) {
CmdArgs.push_back("-target-linker-version");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
// -mno-omit-leaf-frame-pointer is the default on Darwin.
@@ -1991,6 +2157,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// We ignore flags -gstrict-dwarf and -grecord-gcc-switches for now.
Args.ClaimAllArgs(options::OPT_g_flags_Group);
+ if (Args.hasArg(options::OPT_gcolumn_info))
+ CmdArgs.push_back("-dwarf-column-info");
Args.AddAllArgs(CmdArgs, options::OPT_ffunction_sections);
Args.AddAllArgs(CmdArgs, options::OPT_fdata_sections);
@@ -2008,7 +2176,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
C.getArgs().hasArg(options::OPT_S)) {
if (Output.isFilename()) {
CmdArgs.push_back("-coverage-file");
- CmdArgs.push_back(Args.MakeArgString(Output.getFilename()));
+ SmallString<128> absFilename(Output.getFilename());
+ llvm::sys::fs::make_absolute(absFilename);
+ CmdArgs.push_back(Args.MakeArgString(absFilename));
}
}
@@ -2047,7 +2217,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
case options::OPT_ccc_arcmt_migrate:
CmdArgs.push_back("-arcmt-migrate");
CmdArgs.push_back("-mt-migrate-directory");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
Args.AddLastArg(CmdArgs, options::OPT_arcmt_migrate_report_output);
Args.AddLastArg(CmdArgs, options::OPT_arcmt_migrate_emit_arc_errors);
@@ -2062,7 +2232,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
<< A->getAsString(Args) << "-ccc-arcmt-migrate";
}
CmdArgs.push_back("-mt-migrate-directory");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
if (!Args.hasArg(options::OPT_objcmt_migrate_literals,
options::OPT_objcmt_migrate_subscripting)) {
@@ -2094,7 +2264,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (A->getOption().matches(options::OPT_O4))
CmdArgs.push_back("-O3");
else if (A->getOption().matches(options::OPT_O) &&
- A->getValue(Args)[0] == '\0')
+ A->getValue()[0] == '\0')
CmdArgs.push_back("-O2");
else
A->render(Args, CmdArgs);
@@ -2132,8 +2302,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// eventually we want to do all the standard defaulting here instead of
// splitting it between the driver and clang -cc1.
if (!types::isCXX(InputType))
- Args.AddAllArgsTranslated(CmdArgs, options::OPT_std_default_EQ,
- "-std=", /*Joined=*/true);
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_std_default_EQ,
+ "-std=", /*Joined=*/true);
+ else if (getToolChain().getTriple().getOS() == llvm::Triple::Win32)
+ CmdArgs.push_back("-std=c++11");
+
Args.AddLastArg(CmdArgs, options::OPT_trigraphs);
}
@@ -2182,18 +2355,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_ftemplate_depth_,
options::OPT_ftemplate_depth_EQ)) {
CmdArgs.push_back("-ftemplate-depth");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_depth_EQ)) {
CmdArgs.push_back("-fconstexpr-depth");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
if (Arg *A = Args.getLastArg(options::OPT_Wlarge_by_value_copy_EQ,
options::OPT_Wlarge_by_value_copy_def)) {
if (A->getNumValues()) {
- StringRef bytes = A->getValue(Args);
+ StringRef bytes = A->getValue();
CmdArgs.push_back(Args.MakeArgString("-Wlarge-by-value-copy=" + bytes));
} else
CmdArgs.push_back("-Wlarge-by-value-copy=64"); // default value
@@ -2202,50 +2375,50 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_fbounds_checking,
options::OPT_fbounds_checking_EQ)) {
if (A->getNumValues()) {
- StringRef val = A->getValue(Args);
+ StringRef val = A->getValue();
CmdArgs.push_back(Args.MakeArgString("-fbounds-checking=" + val));
} else
CmdArgs.push_back("-fbounds-checking=1");
}
- if (Args.hasArg(options::OPT__relocatable_pch))
+ if (Args.hasArg(options::OPT_relocatable_pch))
CmdArgs.push_back("-relocatable-pch");
if (Arg *A = Args.getLastArg(options::OPT_fconstant_string_class_EQ)) {
CmdArgs.push_back("-fconstant-string-class");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
if (Arg *A = Args.getLastArg(options::OPT_ftabstop_EQ)) {
CmdArgs.push_back("-ftabstop");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
CmdArgs.push_back("-ferror-limit");
if (Arg *A = Args.getLastArg(options::OPT_ferror_limit_EQ))
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
else
CmdArgs.push_back("19");
if (Arg *A = Args.getLastArg(options::OPT_fmacro_backtrace_limit_EQ)) {
CmdArgs.push_back("-fmacro-backtrace-limit");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
if (Arg *A = Args.getLastArg(options::OPT_ftemplate_backtrace_limit_EQ)) {
CmdArgs.push_back("-ftemplate-backtrace-limit");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_backtrace_limit_EQ)) {
CmdArgs.push_back("-fconstexpr-backtrace-limit");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
// Pass -fmessage-length=.
CmdArgs.push_back("-fmessage-length");
if (Arg *A = Args.getLastArg(options::OPT_fmessage_length_EQ)) {
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
} else {
// If -fmessage-length=N was not specified, determine whether this is a
// terminal and, if so, implicitly define -fmessage-length appropriately.
@@ -2255,7 +2428,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (const Arg *A = Args.getLastArg(options::OPT_fvisibility_EQ)) {
CmdArgs.push_back("-fvisibility");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden);
@@ -2268,7 +2441,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-ffreestanding");
// Forward -f (flag) options which we can pass directly.
- Args.AddLastArg(CmdArgs, options::OPT_fcatch_undefined_behavior);
Args.AddLastArg(CmdArgs, options::OPT_femit_all_decls);
Args.AddLastArg(CmdArgs, options::OPT_fformat_extensions);
Args.AddLastArg(CmdArgs, options::OPT_fheinous_gnu_extensions);
@@ -2279,6 +2451,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_show_template_tree);
Args.AddLastArg(CmdArgs, options::OPT_fno_elide_type);
+ SanitizerArgs Sanitize(D, Args);
+ Sanitize.addArgs(Args, CmdArgs);
+
// Report and error for -faltivec on anything other then PowerPC.
if (const Arg *A = Args.getLastArg(options::OPT_faltivec))
if (!(getToolChain().getTriple().getArch() == llvm::Triple::ppc ||
@@ -2289,14 +2464,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (getToolChain().SupportsProfiling())
Args.AddLastArg(CmdArgs, options::OPT_pg);
- if (Args.hasFlag(options::OPT_faddress_sanitizer,
- options::OPT_fno_address_sanitizer, false))
- CmdArgs.push_back("-faddress-sanitizer");
-
- if (Args.hasFlag(options::OPT_fthread_sanitizer,
- options::OPT_fno_thread_sanitizer, false))
- CmdArgs.push_back("-fthread-sanitizer");
-
// -flax-vector-conversions is default.
if (!Args.hasFlag(options::OPT_flax_vector_conversions,
options::OPT_fno_lax_vector_conversions))
@@ -2317,7 +2484,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_ftrapv_handler_EQ)) {
CmdArgs.push_back("-ftrapv-handler");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
Args.AddLastArg(CmdArgs, options::OPT_ftrap_function_EQ);
@@ -2338,6 +2505,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_pthread);
+
// -stack-protector=0 is default.
unsigned StackProtectorLevel = 0;
if (Arg *A = Args.getLastArg(options::OPT_fno_stack_protector,
@@ -2356,6 +2524,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(Twine(StackProtectorLevel)));
}
+ // --param ssp-buffer-size=
+ for (arg_iterator it = Args.filtered_begin(options::OPT__param),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ StringRef Str((*it)->getValue());
+ if (Str.startswith("ssp-buffer-size=")) {
+ if (StackProtectorLevel) {
+ CmdArgs.push_back("-stack-protector-buffer-size");
+ // FIXME: Verify the argument is a valid integer.
+ CmdArgs.push_back(Args.MakeArgString(Str.drop_front(16)));
+ }
+ (*it)->claim();
+ }
+ }
+
// Translate -mstackrealign
if (Args.hasFlag(options::OPT_mstackrealign, options::OPT_mno_stackrealign,
false)) {
@@ -2371,6 +2553,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
StringRef alignment = Args.getLastArgValue(options::OPT_mstack_alignment);
CmdArgs.push_back(Args.MakeArgString("-mstack-alignment=" + alignment));
}
+ if (Args.hasArg(options::OPT_mstrict_align)) {
+ CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-arm-strict-align");
+ }
// Forward -f options with positive and negative forms; we translate
// these by hand.
@@ -2428,9 +2614,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -frtti is default.
if (!Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti) ||
- KernelOrKext)
+ KernelOrKext) {
CmdArgs.push_back("-fno-rtti");
+ // -fno-rtti cannot usefully be combined with -fsanitize=vptr.
+ if (Sanitize.sanitizesVptr()) {
+ std::string NoRttiArg =
+ Args.getLastArg(options::OPT_mkernel,
+ options::OPT_fapple_kext,
+ options::OPT_fno_rtti)->getAsString(Args);
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << "-fsanitize=vptr" << NoRttiArg;
+ }
+ }
+
// -fshort-enums=0 is default for all architectures except Hexagon.
if (Args.hasFlag(options::OPT_fshort_enums,
options::OPT_fno_short_enums,
@@ -2538,12 +2735,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fobjc-default-synthesize-properties");
}
+ // -fencode-extended-block-signature=1 is default.
+ if (getToolChain().IsEncodeExtendedBlockSignatureDefault()) {
+ CmdArgs.push_back("-fencode-extended-block-signature");
+ }
+
// Allow -fno-objc-arr to trump -fobjc-arr/-fobjc-arc.
// NOTE: This logic is duplicated in ToolChains.cpp.
bool ARC = isObjCAutoRefCount(Args);
if (ARC) {
- if (!getToolChain().SupportsObjCARC())
- D.Diag(diag::err_arc_unsupported);
+ getToolChain().CheckObjCARC();
CmdArgs.push_back("-fobjc-arc");
@@ -2630,7 +2831,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fno-pack-struct doesn't apply to -fpack-struct=.
if (Arg *A = Args.getLastArg(options::OPT_fpack_struct_EQ)) {
std::string PackStructStr = "-fpack-struct=";
- PackStructStr += A->getValue(Args);
+ PackStructStr += A->getValue();
CmdArgs.push_back(Args.MakeArgString(PackStructStr));
} else if (Args.hasFlag(options::OPT_fpack_struct,
options::OPT_fno_pack_struct, false)) {
@@ -2679,13 +2880,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (const Arg *A =
Args.getLastArg(options::OPT_fdiagnostics_show_category_EQ)) {
CmdArgs.push_back("-fdiagnostics-show-category");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
if (const Arg *A =
Args.getLastArg(options::OPT_fdiagnostics_format_EQ)) {
CmdArgs.push_back("-fdiagnostics-format");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
if (Arg *A = Args.getLastArg(
@@ -2777,9 +2978,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Handle serialized diagnostics.
if (Arg *A = Args.getLastArg(options::OPT__serialize_diags)) {
CmdArgs.push_back("-serialize-diagnostic-file");
- CmdArgs.push_back(Args.MakeArgString(A->getValue(Args)));
+ CmdArgs.push_back(Args.MakeArgString(A->getValue()));
}
+ if (Args.hasArg(options::OPT_fretain_comments_from_system_headers))
+ CmdArgs.push_back("-fretain-comments-from-system-headers");
+
// Forward -Xclang arguments to -cc1, and -mllvm arguments to the LLVM option
// parser.
Args.AddAllArgValues(CmdArgs, options::OPT_Xclang);
@@ -2789,7 +2993,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// We translate this by hand to the -cc1 argument, since nightly test uses
// it and developers have been trained to spell it with -mllvm.
- if (StringRef((*it)->getValue(Args, 0)) == "-disable-llvm-optzns")
+ if (StringRef((*it)->getValue(0)) == "-disable-llvm-optzns")
CmdArgs.push_back("-disable-llvm-optzns");
else
(*it)->render(Args, CmdArgs);
@@ -2808,7 +3012,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
const InputInfo &II = *it;
CmdArgs.push_back("-x");
- CmdArgs.push_back(types::getTypeName(II.getType()));
+ if (Args.hasArg(options::OPT_rewrite_objc))
+ CmdArgs.push_back(types::getTypeName(types::TY_PP_ObjCXX));
+ else
+ CmdArgs.push_back(types::getTypeName(II.getType()));
if (II.isFilename())
CmdArgs.push_back(II.getFilename());
else
@@ -2895,7 +3102,7 @@ ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args,
if (runtimeArg &&
runtimeArg->getOption().matches(options::OPT_fobjc_runtime_EQ)) {
ObjCRuntime runtime;
- StringRef value = runtimeArg->getValue(args);
+ StringRef value = runtimeArg->getValue();
if (runtime.tryParse(value)) {
getToolChain().getDriver().Diag(diag::err_drv_unknown_objc_runtime)
<< value;
@@ -2913,7 +3120,7 @@ ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args,
unsigned objcABIVersion = 1;
// If -fobjc-abi-version= is present, use that to set the version.
if (Arg *abiArg = args.getLastArg(options::OPT_fobjc_abi_version_EQ)) {
- StringRef value = abiArg->getValue(args);
+ StringRef value = abiArg->getValue();
if (value == "1")
objcABIVersion = 1;
else if (value == "2")
@@ -2941,7 +3148,7 @@ ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args,
if (Arg *abiArg = args.getLastArg(
options::OPT_fobjc_nonfragile_abi_version_EQ)) {
- StringRef value = abiArg->getValue(args);
+ StringRef value = abiArg->getValue();
if (value == "1")
nonFragileABIVersion = 1;
else if (value == "2")
@@ -2994,7 +3201,7 @@ ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args,
// Legacy behaviour is to target the gnustep runtime if we are i
// non-fragile mode or the GCC runtime in fragile mode.
if (isNonFragile)
- runtime = ObjCRuntime(ObjCRuntime::GNUstep, VersionTuple());
+ runtime = ObjCRuntime(ObjCRuntime::GNUstep, VersionTuple(1,6));
else
runtime = ObjCRuntime(ObjCRuntime::GCC, VersionTuple());
}
@@ -3117,7 +3324,7 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
for (ArgList::const_iterator
it = Args.begin(), ie = Args.end(); it != ie; ++it) {
Arg *A = *it;
- if (A->getOption().hasForwardToGCC()) {
+ if (forwardToGCC(A->getOption())) {
// Don't forward any -g arguments to assembly steps.
if (isa<AssembleJobAction>(JA) &&
A->getOption().matches(options::OPT_g_Group))
@@ -3135,17 +3342,17 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
RenderExtraToolArgs(JA, CmdArgs);
// If using a driver driver, force the arch.
- const std::string &Arch = getToolChain().getArchName();
+ llvm::Triple::ArchType Arch = getToolChain().getArch();
if (getToolChain().getTriple().isOSDarwin()) {
CmdArgs.push_back("-arch");
// FIXME: Remove these special cases.
- if (Arch == "powerpc")
+ if (Arch == llvm::Triple::ppc)
CmdArgs.push_back("ppc");
- else if (Arch == "powerpc64")
+ else if (Arch == llvm::Triple::ppc64)
CmdArgs.push_back("ppc64");
else
- CmdArgs.push_back(Args.MakeArgString(Arch));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().getArchName()));
}
// Try to force gcc to match the tool chain we want, if we recognize
@@ -3153,9 +3360,9 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
//
// FIXME: The triple class should directly provide the information we want
// here.
- if (Arch == "i386" || Arch == "powerpc")
+ if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::ppc)
CmdArgs.push_back("-m32");
- else if (Arch == "x86_64" || Arch == "powerpc64")
+ else if (Arch == llvm::Triple::x86_64 || Arch == llvm::Triple::x86_64)
CmdArgs.push_back("-m64");
if (Output.isFilename()) {
@@ -3342,7 +3549,7 @@ void hexagon::Link::ConstructJob(Compilation &C, const JobAction &JA,
for (ArgList::const_iterator
it = Args.begin(), ie = Args.end(); it != ie; ++it) {
Arg *A = *it;
- if (A->getOption().hasForwardToGCC()) {
+ if (forwardToGCC(A->getOption())) {
// Don't forward any -g arguments to assembly steps.
if (isa<AssembleJobAction>(JA) &&
A->getOption().matches(options::OPT_g_Group))
@@ -3410,6 +3617,37 @@ void hexagon::Link::ConstructJob(Compilation &C, const JobAction &JA,
}
// Hexagon tools end.
+llvm::Triple::ArchType darwin::getArchTypeForDarwinArchName(StringRef Str) {
+ // See arch(3) and llvm-gcc's driver-driver.c. We don't implement support for
+ // archs which Darwin doesn't use.
+
+ // The matching this routine does is fairly pointless, since it is neither the
+ // complete architecture list, nor a reasonable subset. The problem is that
+ // historically the driver driver accepts this and also ties its -march=
+ // handling to the architecture name, so we need to be careful before removing
+ // support for it.
+
+ // This code must be kept in sync with Clang's Darwin specific argument
+ // translation.
+
+ return llvm::StringSwitch<llvm::Triple::ArchType>(Str)
+ .Cases("ppc", "ppc601", "ppc603", "ppc604", "ppc604e", llvm::Triple::ppc)
+ .Cases("ppc750", "ppc7400", "ppc7450", "ppc970", llvm::Triple::ppc)
+ .Case("ppc64", llvm::Triple::ppc64)
+ .Cases("i386", "i486", "i486SX", "i586", "i686", llvm::Triple::x86)
+ .Cases("pentium", "pentpro", "pentIIm3", "pentIIm5", "pentium4",
+ llvm::Triple::x86)
+ .Case("x86_64", llvm::Triple::x86_64)
+ // This is derived from the driver driver.
+ .Cases("arm", "armv4t", "armv5", "armv6", llvm::Triple::arm)
+ .Cases("armv7", "armv7f", "armv7k", "armv7s", "xscale", llvm::Triple::arm)
+ .Case("r600", llvm::Triple::r600)
+ .Case("nvptx", llvm::Triple::nvptx)
+ .Case("nvptx64", llvm::Triple::nvptx64)
+ .Case("amdil", llvm::Triple::amdil)
+ .Case("spir", llvm::Triple::spir)
+ .Default(llvm::Triple::UnknownArch);
+}
const char *darwin::CC1::getCC1Name(types::ID Type) const {
switch (Type) {
@@ -3458,7 +3696,7 @@ darwin::CC1::getDependencyFileName(const ArgList &Args,
std::string Res;
if (Arg *OutputOpt = Args.getLastArg(options::OPT_o)) {
- std::string Str(OutputOpt->getValue(Args));
+ std::string Str(OutputOpt->getValue());
Res = Str.substr(0, Str.rfind('.'));
} else {
Res = darwin::CC1::getBaseInputStem(Args, Inputs);
@@ -3547,6 +3785,7 @@ void darwin::CC1::RemoveCC1UnsupportedArgs(ArgStringList &CmdArgs) const {
.Case("duplicate-method-arg", true)
.Case("dynamic-class-memaccess", true)
.Case("enum-compare", true)
+ .Case("enum-conversion", true)
.Case("exit-time-destructors", true)
.Case("gnu", true)
.Case("gnu-designator", true)
@@ -3556,6 +3795,7 @@ void darwin::CC1::RemoveCC1UnsupportedArgs(ArgStringList &CmdArgs) const {
.Case("implicit-atomic-properties", true)
.Case("incompatible-pointer-types", true)
.Case("incomplete-implementation", true)
+ .Case("int-conversion", true)
.Case("initializer-overrides", true)
.Case("invalid-noreturn", true)
.Case("invalid-token-paste", true)
@@ -3620,7 +3860,10 @@ void darwin::CC1::AddCC1Args(const ArgList &Args,
CheckCodeGenerationOptions(D, Args);
// Derived from cc1 spec.
- if (!Args.hasArg(options::OPT_mkernel) && !Args.hasArg(options::OPT_static) &&
+ if ((!Args.hasArg(options::OPT_mkernel) ||
+ (getDarwinToolChain().isTargetIPhoneOS() &&
+ !getDarwinToolChain().isIPhoneOSVersionLT(6, 0))) &&
+ !Args.hasArg(options::OPT_static) &&
!Args.hasArg(options::OPT_mdynamic_no_pic))
CmdArgs.push_back("-fPIC");
@@ -3673,7 +3916,7 @@ void darwin::CC1::AddCC1OptionsArgs(const ArgList &Args, ArgStringList &CmdArgs,
Args.hasArg(options::OPT_o)) {
Arg *OutputOpt = Args.getLastArg(options::OPT_o);
CmdArgs.push_back("-auxbase-strip");
- CmdArgs.push_back(OutputOpt->getValue(Args));
+ CmdArgs.push_back(OutputOpt->getValue());
} else {
CmdArgs.push_back("-auxbase");
CmdArgs.push_back(darwin::CC1::getBaseInputStem(Args, Inputs));
@@ -3812,7 +4055,7 @@ void darwin::CC1::AddCPPUniqueOptionsArgs(const ArgList &Args,
Args.AddLastArg(CmdArgs, options::OPT_P);
// FIXME: Handle %I properly.
- if (getToolChain().getArchName() == "x86_64") {
+ if (getToolChain().getArch() == llvm::Triple::x86_64) {
CmdArgs.push_back("-imultilib");
CmdArgs.push_back("x86_64");
}
@@ -3838,7 +4081,7 @@ void darwin::CC1::AddCPPUniqueOptionsArgs(const ArgList &Args,
(Args.hasArg(options::OPT_MD) || Args.hasArg(options::OPT_MMD))) {
if (Arg *OutputOpt = Args.getLastArg(options::OPT_o)) {
CmdArgs.push_back("-MQ");
- CmdArgs.push_back(OutputOpt->getValue(Args));
+ CmdArgs.push_back(OutputOpt->getValue());
}
}
@@ -4074,9 +4317,11 @@ void darwin::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-force_cpusubtype_ALL");
if (getToolChain().getTriple().getArch() != llvm::Triple::x86_64 &&
- (Args.hasArg(options::OPT_mkernel) ||
- Args.hasArg(options::OPT_static) ||
- Args.hasArg(options::OPT_fapple_kext)))
+ (((Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_fapple_kext)) &&
+ (!getDarwinToolChain().isTargetIPhoneOS() ||
+ getDarwinToolChain().isIPhoneOSVersionLT(6, 0))) ||
+ Args.hasArg(options::OPT_static)))
CmdArgs.push_back("-static");
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
@@ -4111,16 +4356,29 @@ void darwin::DarwinTool::AddDarwinArch(const ArgList &Args,
CmdArgs.push_back("-force_cpusubtype_ALL");
}
+bool darwin::Link::NeedsTempPath(const InputInfoList &Inputs) const {
+ // We only need to generate a temp path for LTO if we aren't compiling object
+ // files. When compiling source files, we run 'dsymutil' after linking. We
+ // don't run 'dsymutil' when compiling object files.
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it)
+ if (it->getType() != types::TY_Object)
+ return true;
+
+ return false;
+}
+
void darwin::Link::AddLinkArgs(Compilation &C,
const ArgList &Args,
- ArgStringList &CmdArgs) const {
+ ArgStringList &CmdArgs,
+ const InputInfoList &Inputs) const {
const Driver &D = getToolChain().getDriver();
const toolchains::Darwin &DarwinTC = getDarwinToolChain();
unsigned Version[3] = { 0, 0, 0 };
if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) {
bool HadExtra;
- if (!Driver::GetReleaseVersion(A->getValue(Args), Version[0],
+ if (!Driver::GetReleaseVersion(A->getValue(), Version[0],
Version[1], Version[2], HadExtra) ||
HadExtra)
D.Diag(diag::err_drv_invalid_version_number)
@@ -4141,7 +4399,7 @@ void darwin::Link::AddLinkArgs(Compilation &C,
ie = Args.filtered_end(); it != ie; ++it) {
const Arg *A = *it;
for (unsigned i = 0, e = A->getNumValues(); i != e; ++i)
- if (StringRef(A->getValue(Args, i)) == "-kext")
+ if (StringRef(A->getValue(i)) == "-kext")
UsesLdClassic = true;
}
}
@@ -4152,7 +4410,7 @@ void darwin::Link::AddLinkArgs(Compilation &C,
// If we are using LTO, then automatically create a temporary file path for
// the linker to use, so that it's lifetime will extend past a possible
// dsymutil step.
- if (Version[0] >= 116 && D.IsUsingLTO(Args)) {
+ if (Version[0] >= 116 && D.IsUsingLTO(Args) && NeedsTempPath(Inputs)) {
const char *TmpPath = C.getArgs().MakeArgString(
D.GetTemporaryPath("cc", types::getTypeTempSuffix(types::TY_Object)));
C.addTempFile(TmpPath);
@@ -4287,7 +4545,7 @@ void darwin::Link::AddLinkArgs(Compilation &C,
CmdArgs.push_back(C.getArgs().MakeArgString(sysroot));
} else if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
CmdArgs.push_back("-syslibroot");
- CmdArgs.push_back(A->getValue(Args));
+ CmdArgs.push_back(A->getValue());
}
Args.AddLastArg(CmdArgs, options::OPT_twolevel__namespace);
@@ -4339,7 +4597,7 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
// I'm not sure why this particular decomposition exists in gcc, but
// we follow suite for ease of comparison.
- AddLinkArgs(C, Args, CmdArgs);
+ AddLinkArgs(C, Args, CmdArgs, Inputs);
Args.AddAllArgs(CmdArgs, options::OPT_d_Flag);
Args.AddAllArgs(CmdArgs, options::OPT_s);
@@ -4424,7 +4682,7 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
} else if (getDarwinToolChain().isTargetIPhoneOS()) {
if (getDarwinToolChain().isIPhoneOSVersionLT(3, 1))
CmdArgs.push_back("-lcrt1.o");
- else
+ else if (getDarwinToolChain().isIPhoneOSVersionLT(6, 0))
CmdArgs.push_back("-lcrt1.3.1.o");
} else {
if (getDarwinToolChain().isMacosxVersionLT(10, 5))
@@ -4452,11 +4710,12 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
- // If we're building a dynamic lib with -faddress-sanitizer, unresolved
- // symbols may appear. Mark all of them as dynamic_lookup.
- // Linking executables is handled in lib/Driver/ToolChains.cpp.
- if (Args.hasFlag(options::OPT_faddress_sanitizer,
- options::OPT_fno_address_sanitizer, false)) {
+ SanitizerArgs Sanitize(getToolChain().getDriver(), Args);
+ // If we're building a dynamic lib with -fsanitize=address, or
+ // -fsanitize=undefined, unresolved symbols may appear. Mark all
+ // of them as dynamic_lookup. Linking executables is handled in
+ // lib/Driver/ToolChains.cpp.
+ if (Sanitize.needsAsanRt() || Sanitize.needsUbsanRt()) {
if (Args.hasArg(options::OPT_dynamiclib) ||
Args.hasArg(options::OPT_bundle)) {
CmdArgs.push_back("-undefined");
@@ -4475,14 +4734,14 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
!Args.hasArg(options::OPT_nodefaultlibs)) {
// Avoid linking compatibility stubs on i386 mac.
if (!getDarwinToolChain().isTargetMacOS() ||
- getDarwinToolChain().getArchName() != "i386") {
+ getDarwinToolChain().getArch() != llvm::Triple::x86) {
// If we don't have ARC or subscripting runtime support, link in the
// runtime stubs. We have to do this *before* adding any of the normal
// linker inputs so that its initializer gets run first.
ObjCRuntime runtime =
getDarwinToolChain().getDefaultObjCRuntime(/*nonfragile*/ true);
// We use arclite library for both ARC and subscripting support.
- if ((!runtime.hasARC() && isObjCAutoRefCount(Args)) ||
+ if ((!runtime.hasNativeARC() && isObjCAutoRefCount(Args)) ||
!runtime.hasSubscripting())
getDarwinToolChain().AddLinkARCArgs(Args, CmdArgs);
}
@@ -4938,14 +5197,21 @@ void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
// the default system libraries. Just mimic this for now.
CmdArgs.push_back("-lgcc");
- if (Args.hasArg(options::OPT_pthread))
- CmdArgs.push_back("-lpthread");
+ if (Args.hasArg(options::OPT_pthread)) {
+ if (!Args.hasArg(options::OPT_shared) &&
+ Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lpthread_p");
+ else
+ CmdArgs.push_back("-lpthread");
+ }
+
if (!Args.hasArg(options::OPT_shared)) {
- if (Args.hasArg(options::OPT_pg))
+ if (Args.hasArg(options::OPT_pg))
CmdArgs.push_back("-lc_p");
else
CmdArgs.push_back("-lc");
}
+
CmdArgs.push_back("-lgcc");
}
@@ -5057,8 +5323,14 @@ void bitrig::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lm");
}
- if (Args.hasArg(options::OPT_pthread))
- CmdArgs.push_back("-lpthread");
+ if (Args.hasArg(options::OPT_pthread)) {
+ if (!Args.hasArg(options::OPT_shared) &&
+ Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lpthread_p");
+ else
+ CmdArgs.push_back("-lpthread");
+ }
+
if (!Args.hasArg(options::OPT_shared)) {
if (Args.hasArg(options::OPT_pg))
CmdArgs.push_back("-lc_p");
@@ -5109,17 +5381,48 @@ void freebsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
// When building 32-bit code on FreeBSD/amd64, we have to explicitly
// instruct as in the base system to assemble 32-bit code.
- if (getToolChain().getArchName() == "i386")
+ if (getToolChain().getArch() == llvm::Triple::x86)
CmdArgs.push_back("--32");
-
- if (getToolChain().getArchName() == "powerpc")
+ else if (getToolChain().getArch() == llvm::Triple::ppc)
CmdArgs.push_back("-a32");
+ else if (getToolChain().getArch() == llvm::Triple::mips ||
+ getToolChain().getArch() == llvm::Triple::mipsel ||
+ getToolChain().getArch() == llvm::Triple::mips64 ||
+ getToolChain().getArch() == llvm::Triple::mips64el) {
+ StringRef CPUName;
+ StringRef ABIName;
+ getMipsCPUAndABI(Args, getToolChain(), CPUName, ABIName);
- // Set byte order explicitly
- if (getToolChain().getArchName() == "mips")
- CmdArgs.push_back("-EB");
- else if (getToolChain().getArchName() == "mipsel")
- CmdArgs.push_back("-EL");
+ CmdArgs.push_back("-march");
+ CmdArgs.push_back(CPUName.data());
+
+ // Convert ABI name to the GNU tools acceptable variant.
+ if (ABIName == "o32")
+ ABIName = "32";
+ else if (ABIName == "n64")
+ ABIName = "64";
+
+ CmdArgs.push_back("-mabi");
+ CmdArgs.push_back(ABIName.data());
+
+ if (getToolChain().getArch() == llvm::Triple::mips ||
+ getToolChain().getArch() == llvm::Triple::mips64)
+ CmdArgs.push_back("-EB");
+ else
+ CmdArgs.push_back("-EL");
+
+ Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
+ options::OPT_fpic, options::OPT_fno_pic,
+ options::OPT_fPIE, options::OPT_fno_PIE,
+ options::OPT_fpie, options::OPT_fno_pie);
+ if (LastPICArg &&
+ (LastPICArg->getOption().matches(options::OPT_fPIC) ||
+ LastPICArg->getOption().matches(options::OPT_fpic) ||
+ LastPICArg->getOption().matches(options::OPT_fPIE) ||
+ LastPICArg->getOption().matches(options::OPT_fpie))) {
+ CmdArgs.push_back("-KPIC");
+ }
+ }
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
options::OPT_Xassembler);
@@ -5143,7 +5446,9 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- const Driver &D = getToolChain().getDriver();
+ const toolchains::FreeBSD& ToolChain =
+ static_cast<const toolchains::FreeBSD&>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
ArgStringList CmdArgs;
// Silence warning for "clang -g foo.o -o foo"
@@ -5157,6 +5462,9 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!D.SysRoot.empty())
CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+ if (Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back("-pie");
+
if (Args.hasArg(options::OPT_static)) {
CmdArgs.push_back("-Bstatic");
} else {
@@ -5169,8 +5477,8 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-dynamic-linker");
CmdArgs.push_back("/libexec/ld-elf.so.1");
}
- if (getToolChain().getTriple().getOSMajorVersion() >= 9) {
- llvm::Triple::ArchType Arch = getToolChain().getArch();
+ if (ToolChain.getTriple().getOSMajorVersion() >= 9) {
+ llvm::Triple::ArchType Arch = ToolChain.getArch();
if (Arch == llvm::Triple::arm || Arch == llvm::Triple::sparc ||
Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64) {
CmdArgs.push_back("--hash-style=both");
@@ -5181,12 +5489,12 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
// When building 32-bit code on FreeBSD/amd64, we have to explicitly
// instruct ld in the base system to link 32-bit code.
- if (getToolChain().getArchName() == "i386") {
+ if (ToolChain.getArch() == llvm::Triple::x86) {
CmdArgs.push_back("-m");
CmdArgs.push_back("elf_i386_fbsd");
}
- if (getToolChain().getArchName() == "powerpc") {
+ if (ToolChain.getArch() == llvm::Triple::ppc) {
CmdArgs.push_back("-m");
CmdArgs.push_back("elf32ppc_fbsd");
}
@@ -5200,29 +5508,33 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
+ const char *crt1 = NULL;
if (!Args.hasArg(options::OPT_shared)) {
if (Args.hasArg(options::OPT_pg))
- CmdArgs.push_back(Args.MakeArgString(
- getToolChain().GetFilePath("gcrt1.o")));
- else {
- const char *crt = Args.hasArg(options::OPT_pie) ? "Scrt1.o" : "crt1.o";
- CmdArgs.push_back(Args.MakeArgString(
- getToolChain().GetFilePath(crt)));
- }
- CmdArgs.push_back(Args.MakeArgString(
- getToolChain().GetFilePath("crti.o")));
- CmdArgs.push_back(Args.MakeArgString(
- getToolChain().GetFilePath("crtbegin.o")));
- } else {
- CmdArgs.push_back(Args.MakeArgString(
- getToolChain().GetFilePath("crti.o")));
- CmdArgs.push_back(Args.MakeArgString(
- getToolChain().GetFilePath("crtbeginS.o")));
+ crt1 = "gcrt1.o";
+ else if (Args.hasArg(options::OPT_pie))
+ crt1 = "Scrt1.o";
+ else
+ crt1 = "crt1.o";
}
+ if (crt1)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt1)));
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
+
+ const char *crtbegin = NULL;
+ if (Args.hasArg(options::OPT_static))
+ crtbegin = "crtbeginT.o";
+ else if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+ crtbegin = "crtbeginS.o";
+ else
+ crtbegin = "crtbegin.o";
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
}
Args.AddAllArgs(CmdArgs, options::OPT_L);
- const ToolChain::path_list Paths = getToolChain().getFilePaths();
+ const ToolChain::path_list Paths = ToolChain.getFilePaths();
for (ToolChain::path_list::const_iterator i = Paths.begin(), e = Paths.end();
i != e; ++i)
CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + *i));
@@ -5233,12 +5545,12 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
Args.AddAllArgs(CmdArgs, options::OPT_r);
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX) {
- getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
if (Args.hasArg(options::OPT_pg))
CmdArgs.push_back("-lm_p");
else
@@ -5291,20 +5603,17 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
- if (!Args.hasArg(options::OPT_shared))
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
- "crtend.o")));
+ if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtendS.o")));
else
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
- "crtendS.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
- "crtn.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
}
- addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
+ addProfileRT(ToolChain, Args, CmdArgs, ToolChain.getTriple());
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath("ld"));
+ Args.MakeArgString(ToolChain.GetProgramPath("ld"));
C.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
@@ -5321,9 +5630,9 @@ void netbsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--32");
// Set byte order explicitly
- if (getToolChain().getArchName() == "mips")
+ if (getToolChain().getArch() == llvm::Triple::mips)
CmdArgs.push_back("-EB");
- else if (getToolChain().getArchName() == "mipsel")
+ else if (getToolChain().getArch() == llvm::Triple::mipsel)
CmdArgs.push_back("-EL");
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
@@ -5548,7 +5857,7 @@ void linuxtools::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
static void AddLibgcc(llvm::Triple Triple, const Driver &D,
ArgStringList &CmdArgs, const ArgList &Args) {
- bool isAndroid = Triple.getEnvironment() == llvm::Triple::ANDROIDEABI;
+ bool isAndroid = Triple.getEnvironment() == llvm::Triple::Android;
bool StaticLibgcc = isAndroid || Args.hasArg(options::OPT_static) ||
Args.hasArg(options::OPT_static_libgcc);
if (!D.CCCIsCXX)
@@ -5571,6 +5880,11 @@ static void AddLibgcc(llvm::Triple Triple, const Driver &D,
CmdArgs.push_back("-lgcc");
}
+static bool hasMipsN32ABIArg(const ArgList &Args) {
+ Arg *A = Args.getLastArg(options::OPT_mabi_EQ);
+ return A && (A->getValue() == StringRef("n32"));
+}
+
void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -5579,8 +5893,8 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
const toolchains::Linux& ToolChain =
static_cast<const toolchains::Linux&>(getToolChain());
const Driver &D = ToolChain.getDriver();
- const bool isAndroid = ToolChain.getTriple().getEnvironment() ==
- llvm::Triple::ANDROIDEABI;
+ const bool isAndroid =
+ ToolChain.getTriple().getEnvironment() == llvm::Triple::Android;
ArgStringList CmdArgs;
@@ -5627,10 +5941,18 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("elf32btsmip");
else if (ToolChain.getArch() == llvm::Triple::mipsel)
CmdArgs.push_back("elf32ltsmip");
- else if (ToolChain.getArch() == llvm::Triple::mips64)
- CmdArgs.push_back("elf64btsmip");
- else if (ToolChain.getArch() == llvm::Triple::mips64el)
- CmdArgs.push_back("elf64ltsmip");
+ else if (ToolChain.getArch() == llvm::Triple::mips64) {
+ if (hasMipsN32ABIArg(Args))
+ CmdArgs.push_back("elf32btsmipn32");
+ else
+ CmdArgs.push_back("elf64btsmip");
+ }
+ else if (ToolChain.getArch() == llvm::Triple::mips64el) {
+ if (hasMipsN32ABIArg(Args))
+ CmdArgs.push_back("elf32ltsmipn32");
+ else
+ CmdArgs.push_back("elf64ltsmip");
+ }
else
CmdArgs.push_back("elf_x86_64");
@@ -5642,8 +5964,7 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-static");
} else if (Args.hasArg(options::OPT_shared)) {
CmdArgs.push_back("-shared");
- if ((ToolChain.getArch() == llvm::Triple::arm
- || ToolChain.getArch() == llvm::Triple::thumb) && isAndroid) {
+ if (isAndroid) {
CmdArgs.push_back("-Bsymbolic");
}
}
@@ -5668,8 +5989,12 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
ToolChain.getArch() == llvm::Triple::mipsel)
CmdArgs.push_back("/lib/ld.so.1");
else if (ToolChain.getArch() == llvm::Triple::mips64 ||
- ToolChain.getArch() == llvm::Triple::mips64el)
- CmdArgs.push_back("/lib64/ld.so.1");
+ ToolChain.getArch() == llvm::Triple::mips64el) {
+ if (hasMipsN32ABIArg(Args))
+ CmdArgs.push_back("/lib32/ld.so.1");
+ else
+ CmdArgs.push_back("/lib64/ld.so.1");
+ }
else if (ToolChain.getArch() == llvm::Triple::ppc)
CmdArgs.push_back("/lib/ld.so.1");
else if (ToolChain.getArch() == llvm::Triple::ppc64)
@@ -5700,11 +6025,16 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
const char *crtbegin;
if (Args.hasArg(options::OPT_static))
crtbegin = isAndroid ? "crtbegin_static.o" : "crtbeginT.o";
- else if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+ else if (Args.hasArg(options::OPT_shared))
crtbegin = isAndroid ? "crtbegin_so.o" : "crtbeginS.o";
+ else if (Args.hasArg(options::OPT_pie))
+ crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbeginS.o";
else
crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbegin.o";
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+
+ // Add crtfastmath.o if available and fast math is enabled.
+ ToolChain.AddFastMathRuntimeIfAvailable(Args, CmdArgs);
}
Args.AddAllArgs(CmdArgs, options::OPT_L);
@@ -5729,6 +6059,12 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+ SanitizerArgs Sanitize(D, Args);
+
+ // Call this before we add the C++ ABI library.
+ if (Sanitize.needsUbsanRt())
+ addUbsanRTLinux(getToolChain(), Args, CmdArgs);
+
if (D.CCCIsCXX &&
!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs)) {
@@ -5743,8 +6079,10 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
}
// Call this before we add the C run-time.
- addAsanRTLinux(getToolChain(), Args, CmdArgs);
- addTsanRTLinux(getToolChain(), Args, CmdArgs);
+ if (Sanitize.needsAsanRt())
+ addAsanRTLinux(getToolChain(), Args, CmdArgs);
+ if (Sanitize.needsTsanRt())
+ addTsanRTLinux(getToolChain(), Args, CmdArgs);
if (!Args.hasArg(options::OPT_nostdlib)) {
if (!Args.hasArg(options::OPT_nodefaultlibs)) {
@@ -5767,8 +6105,10 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostartfiles)) {
const char *crtend;
- if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+ if (Args.hasArg(options::OPT_shared))
crtend = isAndroid ? "crtend_so.o" : "crtendS.o";
+ else if (Args.hasArg(options::OPT_pie))
+ crtend = isAndroid ? "crtend_android.o" : "crtendS.o";
else
crtend = isAndroid ? "crtend_android.o" : "crtend.o";
@@ -5874,7 +6214,7 @@ void dragonfly::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
// When building 32-bit code on DragonFly/pc64, we have to explicitly
// instruct as in the base system to assemble 32-bit code.
- if (getToolChain().getArchName() == "i386")
+ if (getToolChain().getArch() == llvm::Triple::x86)
CmdArgs.push_back("--32");
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
@@ -5918,7 +6258,7 @@ void dragonfly::Link::ConstructJob(Compilation &C, const JobAction &JA,
// When building 32-bit code on DragonFly/pc64, we have to explicitly
// instruct ld in the base system to link 32-bit code.
- if (getToolChain().getArchName() == "i386") {
+ if (getToolChain().getArch() == llvm::Triple::x86) {
CmdArgs.push_back("-m");
CmdArgs.push_back("elf_i386");
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.h b/contrib/llvm/tools/clang/lib/Driver/Tools.h
index 999c57a..5898c66 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Tools.h
+++ b/contrib/llvm/tools/clang/lib/Driver/Tools.h
@@ -202,6 +202,8 @@ namespace hexagon {
namespace darwin {
+ llvm::Triple::ArchType getArchTypeForDarwinArchName(StringRef Str);
+
class LLVM_LIBRARY_VISIBILITY DarwinTool : public Tool {
virtual void anchor();
protected:
@@ -288,8 +290,9 @@ namespace darwin {
};
class LLVM_LIBRARY_VISIBILITY Link : public DarwinTool {
+ bool NeedsTempPath(const InputInfoList &Inputs) const;
void AddLinkArgs(Compilation &C, const ArgList &Args,
- ArgStringList &CmdArgs) const;
+ ArgStringList &CmdArgs, const InputInfoList &Inputs) const;
public:
Link(const ToolChain &TC) : DarwinTool("darwin::Link", "linker", TC) {}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Types.cpp b/contrib/llvm/tools/clang/lib/Driver/Types.cpp
index 9d8fcfd..862025e 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Types.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Types.cpp
@@ -94,20 +94,6 @@ bool types::isAcceptedByClang(ID Id) {
}
}
-bool types::isOnlyAcceptedByClang(ID Id) {
- switch (Id) {
- default:
- return false;
-
- case TY_AST:
- case TY_LLVM_IR:
- case TY_LLVM_BC:
- case TY_RewrittenObjC:
- case TY_RewrittenLegacyObjC:
- return true;
- }
-}
-
bool types::isObjC(ID Id) {
switch (Id) {
default:
diff --git a/contrib/llvm/tools/clang/lib/Driver/WindowsToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/WindowsToolChain.cpp
index 6827034..de2d535 100644
--- a/contrib/llvm/tools/clang/lib/Driver/WindowsToolChain.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/WindowsToolChain.cpp
@@ -81,19 +81,15 @@ bool Windows::IsIntegratedAssemblerDefault() const {
}
bool Windows::IsUnwindTablesDefault() const {
- // FIXME: Gross; we should probably have some separate target
- // definition, possibly even reusing the one in clang.
- return getArchName() == "x86_64";
+ return getArch() == llvm::Triple::x86_64;
}
-const char *Windows::GetDefaultRelocationModel() const {
- return "static";
+bool Windows::isPICDefault() const {
+ return getArch() == llvm::Triple::x86_64;
}
-const char *Windows::GetForcedPicModel() const {
- if (getArchName() == "x86_64")
- return "pic";
- return 0;
+bool Windows::isPICDefaultForced() const {
+ return getArch() == llvm::Triple::x86_64;
}
// FIXME: This probably should goto to some platform utils place.
diff --git a/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp b/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
index d15b7a7..de96fee 100644
--- a/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -920,6 +920,7 @@ static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
case CK_ARCExtendBlockObject:
case CK_NonAtomicToAtomic:
case CK_CopyAndAutoreleaseBlockObject:
+ case CK_BuiltinFnToFnPtr:
return false;
}
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
index 0f0d835..882d400 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
@@ -58,7 +58,7 @@ namespace {
bool shouldWalkTypesOfTypeLocs() const { return false; }
bool TraverseDecl(Decl *D) {
- if (filterMatches(D)) {
+ if (D != NULL && filterMatches(D)) {
Out.changeColor(llvm::raw_ostream::BLUE) <<
(Dump ? "Dumping " : "Printing ") << getName(D) << ":\n";
Out.resetColor();
@@ -66,6 +66,7 @@ namespace {
D->dump(Out);
else
D->print(Out, /*Indentation=*/0, /*PrintInstantiation=*/true);
+ Out << "\n";
// Don't traverse child nodes to avoid output duplication.
return true;
}
@@ -89,8 +90,6 @@ namespace {
class ASTDeclNodeLister : public ASTConsumer,
public RecursiveASTVisitor<ASTDeclNodeLister> {
- typedef RecursiveASTVisitor<ASTDeclNodeLister> base;
-
public:
ASTDeclNodeLister(raw_ostream *Out = NULL)
: Out(Out ? *Out : llvm::outs()) {}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
index 9feb3de..31b1df4 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
@@ -41,8 +41,9 @@ void ASTMergeAction::ExecuteAction() {
DiagIDs(CI.getDiagnostics().getDiagnosticIDs());
for (unsigned I = 0, N = ASTFiles.size(); I != N; ++I) {
IntrusiveRefCntPtr<DiagnosticsEngine>
- Diags(new DiagnosticsEngine(DiagIDs, CI.getDiagnostics().getClient(),
- /*ShouldOwnClient=*/false));
+ Diags(new DiagnosticsEngine(DiagIDs, &CI.getDiagnosticOpts(),
+ CI.getDiagnostics().getClient(),
+ /*ShouldOwnClient=*/false));
ASTUnit *Unit = ASTUnit::LoadFromASTFile(ASTFiles[I], Diags,
CI.getFileSystemOpts(), false);
if (!Unit)
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
index 42a6772..5576854 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
@@ -27,6 +27,7 @@
#include "clang/Serialization/ASTWriter.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/Diagnostic.h"
@@ -180,6 +181,14 @@ void OnDiskData::Cleanup() {
CleanPreambleFile();
}
+struct ASTUnit::ASTWriterData {
+ SmallString<128> Buffer;
+ llvm::BitstreamWriter Stream;
+ ASTWriter Writer;
+
+ ASTWriterData() : Stream(Buffer), Writer(Stream) { }
+};
+
void ASTUnit::clearFileLevelDecls() {
for (FileDeclsTy::iterator
I = FileDecls.begin(), E = FileDecls.end(); I != E; ++I)
@@ -495,8 +504,8 @@ class ASTInfoCollector : public ASTReaderListener {
ASTContext &Context;
LangOptions &LangOpt;
HeaderSearch &HSI;
+ IntrusiveRefCntPtr<TargetOptions> &TargetOpts;
IntrusiveRefCntPtr<TargetInfo> &Target;
- std::string &Predefines;
unsigned &Counter;
unsigned NumHeaderInfos;
@@ -504,54 +513,38 @@ class ASTInfoCollector : public ASTReaderListener {
bool InitializedLanguage;
public:
ASTInfoCollector(Preprocessor &PP, ASTContext &Context, LangOptions &LangOpt,
- HeaderSearch &HSI,
+ HeaderSearch &HSI,
+ IntrusiveRefCntPtr<TargetOptions> &TargetOpts,
IntrusiveRefCntPtr<TargetInfo> &Target,
- std::string &Predefines,
unsigned &Counter)
- : PP(PP), Context(Context), LangOpt(LangOpt), HSI(HSI), Target(Target),
- Predefines(Predefines), Counter(Counter), NumHeaderInfos(0),
+ : PP(PP), Context(Context), LangOpt(LangOpt), HSI(HSI),
+ TargetOpts(TargetOpts), Target(Target),
+ Counter(Counter), NumHeaderInfos(0),
InitializedLanguage(false) {}
- virtual bool ReadLanguageOptions(const LangOptions &LangOpts) {
+ virtual bool ReadLanguageOptions(const LangOptions &LangOpts,
+ bool Complain) {
if (InitializedLanguage)
return false;
LangOpt = LangOpts;
-
- // Initialize the preprocessor.
- PP.Initialize(*Target);
-
- // Initialize the ASTContext
- Context.InitBuiltinTypes(*Target);
-
InitializedLanguage = true;
+
+ updated();
return false;
}
- virtual bool ReadTargetTriple(StringRef Triple) {
+ virtual bool ReadTargetOptions(const TargetOptions &TargetOpts,
+ bool Complain) {
// If we've already initialized the target, don't do it again.
if (Target)
return false;
- // FIXME: This is broken, we should store the TargetOptions in the AST file.
- TargetOptions TargetOpts;
- TargetOpts.ABI = "";
- TargetOpts.CXXABI = "";
- TargetOpts.CPU = "";
- TargetOpts.Features.clear();
- TargetOpts.Triple = Triple;
- Target = TargetInfo::CreateTargetInfo(PP.getDiagnostics(), TargetOpts);
- return false;
- }
+ this->TargetOpts = new TargetOptions(TargetOpts);
+ Target = TargetInfo::CreateTargetInfo(PP.getDiagnostics(),
+ *this->TargetOpts);
- virtual bool ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
- StringRef OriginalFileName,
- std::string &SuggestedPredefines,
- FileManager &FileMgr) {
- Predefines = Buffers[0].Data;
- for (unsigned I = 1, N = Buffers.size(); I != N; ++I) {
- Predefines += Buffers[I].Data;
- }
+ updated();
return false;
}
@@ -559,9 +552,27 @@ public:
HSI.setHeaderFileInfoForUID(HFI, NumHeaderInfos++);
}
- virtual void ReadCounter(unsigned Value) {
+ virtual void ReadCounter(const serialization::ModuleFile &M, unsigned Value) {
Counter = Value;
}
+
+private:
+ void updated() {
+ if (!Target || !InitializedLanguage)
+ return;
+
+ // Inform the target of the language options.
+ //
+ // FIXME: We shouldn't need to do this, the target should be immutable once
+ // created. This complexity should be lifted elsewhere.
+ Target->setForcedLangOptions(LangOpt);
+
+ // Initialize the preprocessor.
+ PP.Initialize(*Target);
+
+ // Initialize the ASTContext
+ Context.InitBuiltinTypes(*Target);
+ }
};
class StoredDiagnosticConsumer : public DiagnosticConsumer {
@@ -621,8 +632,10 @@ void StoredDiagnosticConsumer::HandleDiagnostic(DiagnosticsEngine::Level Level,
StoredDiags.push_back(StoredDiagnostic(Level, Info));
}
-const std::string &ASTUnit::getOriginalSourceFileName() {
- return OriginalSourceFile;
+ASTDeserializationListener *ASTUnit::getDeserializationListener() {
+ if (WriterData)
+ return &WriterData->Writer;
+ return 0;
}
llvm::MemoryBuffer *ASTUnit::getBufferForFile(StringRef Filename,
@@ -638,11 +651,11 @@ void ASTUnit::ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> &Diags,
if (!Diags.getPtr()) {
// No diagnostics engine was provided, so create our own diagnostics object
// with the default options.
- DiagnosticOptions DiagOpts;
DiagnosticConsumer *Client = 0;
if (CaptureDiagnostics)
Client = new StoredDiagnosticConsumer(AST.StoredDiagnostics);
- Diags = CompilerInstance::createDiagnostics(DiagOpts, ArgEnd-ArgBegin,
+ Diags = CompilerInstance::createDiagnostics(new DiagnosticOptions(),
+ ArgEnd-ArgBegin,
ArgBegin, Client,
/*ShouldOwnClient=*/true,
/*ShouldCloneClient=*/false);
@@ -679,7 +692,10 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
AST->SourceMgr = new SourceManager(AST->getDiagnostics(),
AST->getFileManager(),
UserFilesAreVolatile);
- AST->HeaderInfo.reset(new HeaderSearch(AST->getFileManager(),
+ AST->HSOpts = new HeaderSearchOptions();
+
+ AST->HeaderInfo.reset(new HeaderSearch(AST->HSOpts,
+ AST->getFileManager(),
AST->getDiagnostics(),
AST->ASTFileLangOpts,
/*Target=*/0));
@@ -734,12 +750,12 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
// Gather Info for preprocessor construction later on.
HeaderSearch &HeaderInfo = *AST->HeaderInfo.get();
- std::string Predefines;
unsigned Counter;
OwningPtr<ASTReader> Reader;
- AST->PP = new Preprocessor(AST->getDiagnostics(), AST->ASTFileLangOpts,
+ AST->PP = new Preprocessor(new PreprocessorOptions(),
+ AST->getDiagnostics(), AST->ASTFileLangOpts,
/*Target=*/0, AST->getSourceManager(), HeaderInfo,
*AST,
/*IILookup=*/0,
@@ -757,10 +773,12 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
/*DelayInitialization=*/true);
ASTContext &Context = *AST->Ctx;
+ bool disableValid = false;
+ if (::getenv("LIBCLANG_DISABLE_PCH_VALIDATION"))
+ disableValid = true;
Reader.reset(new ASTReader(PP, Context,
/*isysroot=*/"",
- /*DisableValidation=*/false,
- /*DisableStatCache=*/false,
+ /*DisableValidation=*/disableValid,
AllowPCHWithCompilerErrors));
// Recover resources if we crash before exiting this method.
@@ -769,21 +787,25 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
Reader->setListener(new ASTInfoCollector(*AST->PP, Context,
AST->ASTFileLangOpts, HeaderInfo,
- AST->Target, Predefines, Counter));
+ AST->TargetOpts, AST->Target,
+ Counter));
- switch (Reader->ReadAST(Filename, serialization::MK_MainFile)) {
+ switch (Reader->ReadAST(Filename, serialization::MK_MainFile,
+ ASTReader::ARR_None)) {
case ASTReader::Success:
break;
case ASTReader::Failure:
- case ASTReader::IgnorePCH:
+ case ASTReader::OutOfDate:
+ case ASTReader::VersionMismatch:
+ case ASTReader::ConfigurationMismatch:
+ case ASTReader::HadErrors:
AST->getDiagnostics().Report(diag::err_fe_unable_to_load_pch);
return NULL;
}
AST->OriginalSourceFile = Reader->getOriginalSourceFile();
- PP.setPredefines(Reader->getSuggestedPredefines());
PP.setCounterValue(Counter);
// Attach the AST reader to the AST context as an external AST
@@ -897,6 +919,10 @@ public:
for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it)
handleTopLevelDecl(*it);
}
+
+ virtual ASTDeserializationListener *GetASTDeserializationListener() {
+ return Unit.getDeserializationListener();
+ }
};
class TopLevelDeclTrackerAction : public ASTFrontendAction {
@@ -1047,14 +1073,13 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
CCInvocation(new CompilerInvocation(*Invocation));
Clang->setInvocation(CCInvocation.getPtr());
- OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].File;
+ OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
// Set up diagnostics, capturing any diagnostics that would
// otherwise be dropped.
Clang->setDiagnostics(&getDiagnostics());
// Create the target instance.
- Clang->getTargetOpts().Features = TargetFeatures;
Clang->setTarget(TargetInfo::CreateTargetInfo(Clang->getDiagnostics(),
Clang->getTargetOpts()));
if (!Clang->hasTarget()) {
@@ -1070,9 +1095,9 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
- assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_AST &&
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_AST &&
"FIXME: AST inputs not yet supported here!");
- assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_LLVM_IR &&
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_LLVM_IR &&
"IR inputs not support here!");
// Configure the various subsystems.
@@ -1217,7 +1242,7 @@ ASTUnit::ComputePreamble(CompilerInvocation &Invocation,
// command line (to another file) or directly through the compiler invocation
// (to a memory buffer).
llvm::MemoryBuffer *Buffer = 0;
- llvm::sys::PathWithStatus MainFilePath(FrontendOpts.Inputs[0].File);
+ llvm::sys::PathWithStatus MainFilePath(FrontendOpts.Inputs[0].getFile());
if (const llvm::sys::FileStatus *MainFileStatus = MainFilePath.getFileStatus()) {
// Check whether there is a file-file remapping of the main file
for (PreprocessorOptions::remapped_file_iterator
@@ -1267,7 +1292,7 @@ ASTUnit::ComputePreamble(CompilerInvocation &Invocation,
// If the main source file was not remapped, load it now.
if (!Buffer) {
- Buffer = getBufferForFile(FrontendOpts.Inputs[0].File);
+ Buffer = getBufferForFile(FrontendOpts.Inputs[0].getFile());
if (!Buffer)
return std::make_pair((llvm::MemoryBuffer*)0, std::make_pair(0, true));
@@ -1429,7 +1454,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// buffer size we reserved when creating the preamble.
return CreatePaddedMainFileBuffer(NewPreamble.first,
PreambleReservedSize,
- FrontendOpts.Inputs[0].File);
+ FrontendOpts.Inputs[0].getFile());
}
}
@@ -1482,7 +1507,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// Save the preamble text for later; we'll need to compare against it for
// subsequent reparses.
- StringRef MainFilename = PreambleInvocation->getFrontendOpts().Inputs[0].File;
+ StringRef MainFilename = PreambleInvocation->getFrontendOpts().Inputs[0].getFile();
Preamble.assign(FileMgr->getFile(MainFilename),
NewPreamble.first->getBufferStart(),
NewPreamble.first->getBufferStart()
@@ -1492,7 +1517,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
delete PreambleBuffer;
PreambleBuffer
= llvm::MemoryBuffer::getNewUninitMemBuffer(PreambleReservedSize,
- FrontendOpts.Inputs[0].File);
+ FrontendOpts.Inputs[0].getFile());
memcpy(const_cast<char*>(PreambleBuffer->getBufferStart()),
NewPreamble.first->getBufferStart(), Preamble.size());
memset(const_cast<char*>(PreambleBuffer->getBufferStart()) + Preamble.size(),
@@ -1500,7 +1525,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
const_cast<char*>(PreambleBuffer->getBufferEnd())[-1] = '\n';
// Remap the main source file to the preamble buffer.
- llvm::sys::PathWithStatus MainFilePath(FrontendOpts.Inputs[0].File);
+ llvm::sys::PathWithStatus MainFilePath(FrontendOpts.Inputs[0].getFile());
PreprocessorOpts.addRemappedFile(MainFilePath.str(), PreambleBuffer);
// Tell the compiler invocation to generate a temporary precompiled header.
@@ -1518,15 +1543,14 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
CICleanup(Clang.get());
Clang->setInvocation(&*PreambleInvocation);
- OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].File;
+ OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
// Set up diagnostics, capturing all of the diagnostics produced.
Clang->setDiagnostics(&getDiagnostics());
// Create the target instance.
- Clang->getTargetOpts().Features = TargetFeatures;
Clang->setTarget(TargetInfo::CreateTargetInfo(Clang->getDiagnostics(),
- Clang->getTargetOpts()));
+ Clang->getTargetOpts()));
if (!Clang->hasTarget()) {
llvm::sys::Path(FrontendOpts.OutputFile).eraseFromDisk();
Preamble.clear();
@@ -1544,9 +1568,9 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
- assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_AST &&
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_AST &&
"FIXME: AST inputs not yet supported here!");
- assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_LLVM_IR &&
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_LLVM_IR &&
"IR inputs not support here!");
// Clear out old caches and data.
@@ -1633,7 +1657,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
return CreatePaddedMainFileBuffer(NewPreamble.first,
PreambleReservedSize,
- FrontendOpts.Inputs[0].File);
+ FrontendOpts.Inputs[0].getFile());
}
void ASTUnit::RealizeTopLevelDeclsFromPreamble() {
@@ -1664,7 +1688,7 @@ void ASTUnit::transferASTDataFromCompilerInstance(CompilerInstance &CI) {
}
StringRef ASTUnit::getMainFileName() const {
- return Invocation->getFrontendOpts().Inputs[0].File;
+ return Invocation->getFrontendOpts().Inputs[0].getFile();
}
ASTUnit *ASTUnit::create(CompilerInvocation *CI,
@@ -1733,9 +1757,6 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(CompilerInvocation *CI,
CI->getFrontendOpts().DisableFree = false;
ProcessWarningOptions(AST->getDiagnostics(), CI->getDiagnosticOpts());
- // Save the target features.
- AST->TargetFeatures = CI->getTargetOpts().Features;
-
// Create the compiler instance to use for building the AST.
OwningPtr<CompilerInstance> Clang(new CompilerInstance());
@@ -1744,14 +1765,13 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(CompilerInvocation *CI,
CICleanup(Clang.get());
Clang->setInvocation(CI);
- AST->OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].File;
+ AST->OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
// Set up diagnostics, capturing any diagnostics that would
// otherwise be dropped.
Clang->setDiagnostics(&AST->getDiagnostics());
// Create the target instance.
- Clang->getTargetOpts().Features = AST->TargetFeatures;
Clang->setTarget(TargetInfo::CreateTargetInfo(Clang->getDiagnostics(),
Clang->getTargetOpts()));
if (!Clang->hasTarget())
@@ -1765,9 +1785,9 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(CompilerInvocation *CI,
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
- assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_AST &&
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_AST &&
"FIXME: AST inputs not yet supported here!");
- assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_LLVM_IR &&
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_LLVM_IR &&
"IR inputs not supported here!");
// Configure the various subsystems.
@@ -1840,9 +1860,6 @@ bool ASTUnit::LoadFromCompilerInvocation(bool PrecompilePreamble) {
Invocation->getFrontendOpts().DisableFree = false;
ProcessWarningOptions(getDiagnostics(), Invocation->getDiagnosticOpts());
- // Save the target features.
- TargetFeatures = Invocation->getTargetOpts().Features;
-
llvm::MemoryBuffer *OverrideMainBuffer = 0;
if (PrecompilePreamble) {
PreambleRebuildCounter = 2;
@@ -1909,12 +1926,13 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
bool AllowPCHWithCompilerErrors,
bool SkipFunctionBodies,
bool UserFilesAreVolatile,
+ bool ForSerialization,
OwningPtr<ASTUnit> *ErrAST) {
if (!Diags.getPtr()) {
// No diagnostics engine was provided, so create our own diagnostics object
// with the default options.
- DiagnosticOptions DiagOpts;
- Diags = CompilerInstance::createDiagnostics(DiagOpts, ArgEnd - ArgBegin,
+ Diags = CompilerInstance::createDiagnostics(new DiagnosticOptions(),
+ ArgEnd - ArgBegin,
ArgBegin);
}
@@ -1972,6 +1990,8 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
AST->NumStoredDiagnosticsFromDriver = StoredDiagnostics.size();
AST->StoredDiagnostics.swap(StoredDiagnostics);
AST->Invocation = CI;
+ if (ForSerialization)
+ AST->WriterData.reset(new ASTWriterData());
CI = 0; // Zero out now to ease cleanup during crash recovery.
// Recover resources if we crash before exiting this method.
@@ -2002,7 +2022,6 @@ bool ASTUnit::Reparse(RemappedFile *RemappedFiles, unsigned NumRemappedFiles) {
// Remap files.
PreprocessorOptions &PPOpts = Invocation->getPreprocessorOpts();
- PPOpts.DisableStatCache = true;
for (PreprocessorOptions::remapped_file_buffer_iterator
R = PPOpts.remapped_file_buffer_begin(),
REnd = PPOpts.remapped_file_buffer_end();
@@ -2238,7 +2257,6 @@ void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
// Adjust priority based on similar type classes.
unsigned Priority = C->Priority;
- CXCursorKind CursorKind = C->Kind;
CodeCompletionString *Completion = C->Completion;
if (!Context.getPreferredType().isNull()) {
if (C->Kind == CXCursor_MacroDefinition) {
@@ -2272,12 +2290,11 @@ void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
CodeCompletionBuilder Builder(getAllocator(), getCodeCompletionTUInfo(),
CCP_CodePattern, C->Availability);
Builder.AddTypedTextChunk(C->Completion->getTypedText());
- CursorKind = CXCursor_NotImplemented;
Priority = CCP_CodePattern;
Completion = Builder.TakeString();
}
- AllResults.push_back(Result(Completion, Priority, CursorKind,
+ AllResults.push_back(Result(Completion, Priority, C->Kind,
C->Availability));
}
@@ -2341,7 +2358,7 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
CICleanup(Clang.get());
Clang->setInvocation(&*CCInvocation);
- OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].File;
+ OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
// Set up diagnostics, capturing any diagnostics produced.
Clang->setDiagnostics(&Diag);
@@ -2351,7 +2368,6 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
StoredDiagnostics);
// Create the target instance.
- Clang->getTargetOpts().Features = TargetFeatures;
Clang->setTarget(TargetInfo::CreateTargetInfo(Clang->getDiagnostics(),
Clang->getTargetOpts()));
if (!Clang->hasTarget()) {
@@ -2367,9 +2383,9 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
- assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_AST &&
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_AST &&
"FIXME: AST inputs not yet supported here!");
- assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_LLVM_IR &&
+ assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_LLVM_IR &&
"IR inputs not support here!");
@@ -2398,8 +2414,6 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
= new AugmentedCodeCompleteConsumer(*this, Consumer, CodeCompleteOpts);
Clang->setCodeCompletionConsumer(AugmentedConsumer);
- Clang->getFrontendOpts().SkipFunctionBodies = true;
-
// If we have a precompiled preamble, try to use it. We only allow
// the use of the precompiled preamble if we're if the completion
// point is within the main file, after the end of the precompiled
@@ -2420,7 +2434,6 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
// If the main file has been overridden due to the use of a preamble,
// make that override happen and introduce the preamble.
- PreprocessorOpts.DisableStatCache = true;
StoredDiagnostics.insert(StoredDiagnostics.end(),
stored_diag_begin(),
stored_diag_afterDriver_begin());
@@ -2438,8 +2451,9 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
PreprocessorOpts.PrecompiledPreambleBytes.second = false;
}
- // Disable the preprocessing record
- PreprocessorOpts.DetailedRecord = false;
+ // Disable the preprocessing record if modules are not enabled.
+ if (!Clang->getLangOpts().Modules)
+ PreprocessorOpts.DetailedRecord = false;
OwningPtr<SyntaxOnlyAction> Act;
Act.reset(new SyntaxOnlyAction);
@@ -2457,7 +2471,7 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
checkAndSanitizeDiags(StoredDiagnostics, getSourceManager());
}
-CXSaveError ASTUnit::Save(StringRef File) {
+bool ASTUnit::Save(StringRef File) {
// Write to a temporary file and later rename it to the actual file, to avoid
// possible race conditions.
SmallString<128> TempPath;
@@ -2466,7 +2480,7 @@ CXSaveError ASTUnit::Save(StringRef File) {
int fd;
if (llvm::sys::fs::unique_file(TempPath.str(), fd, TempPath,
/*makeAbsolute=*/false))
- return CXSaveError_Unknown;
+ return true;
// FIXME: Can we somehow regenerate the stat cache here, or do we need to
// unconditionally create a stat cache when we parse the file?
@@ -2476,32 +2490,43 @@ CXSaveError ASTUnit::Save(StringRef File) {
Out.close();
if (Out.has_error()) {
Out.clear_error();
- return CXSaveError_Unknown;
+ return true;
}
if (llvm::sys::fs::rename(TempPath.str(), File)) {
bool exists;
llvm::sys::fs::remove(TempPath.str(), exists);
- return CXSaveError_Unknown;
+ return true;
}
- return CXSaveError_None;
+ return false;
+}
+
+static bool serializeUnit(ASTWriter &Writer,
+ SmallVectorImpl<char> &Buffer,
+ Sema &S,
+ bool hasErrors,
+ raw_ostream &OS) {
+ Writer.WriteAST(S, std::string(), 0, "", hasErrors);
+
+ // Write the generated bitstream to "Out".
+ if (!Buffer.empty())
+ OS.write(Buffer.data(), Buffer.size());
+
+ return false;
}
bool ASTUnit::serialize(raw_ostream &OS) {
bool hasErrors = getDiagnostics().hasErrorOccurred();
+ if (WriterData)
+ return serializeUnit(WriterData->Writer, WriterData->Buffer,
+ getSema(), hasErrors, OS);
+
SmallString<128> Buffer;
llvm::BitstreamWriter Stream(Buffer);
ASTWriter Writer(Stream);
- // FIXME: Handle modules
- Writer.WriteAST(getSema(), 0, std::string(), 0, "", hasErrors);
-
- // Write the generated bitstream to "Out".
- if (!Buffer.empty())
- OS.write((char *)&Buffer.front(), Buffer.size());
-
- return false;
+ return serializeUnit(Writer, Buffer, getSema(), hasErrors, OS);
}
typedef ContinuousRangeMap<unsigned, int, 2> SLocRemap;
@@ -2761,6 +2786,85 @@ SourceLocation ASTUnit::getStartOfMainFileID() {
return SourceMgr->getLocForStartOfFile(FID);
}
+std::pair<PreprocessingRecord::iterator, PreprocessingRecord::iterator>
+ASTUnit::getLocalPreprocessingEntities() const {
+ if (isMainFileAST()) {
+ serialization::ModuleFile &
+ Mod = Reader->getModuleManager().getPrimaryModule();
+ return Reader->getModulePreprocessedEntities(Mod);
+ }
+
+ if (PreprocessingRecord *PPRec = PP->getPreprocessingRecord())
+ return std::make_pair(PPRec->local_begin(), PPRec->local_end());
+
+ return std::make_pair(PreprocessingRecord::iterator(),
+ PreprocessingRecord::iterator());
+}
+
+bool ASTUnit::visitLocalTopLevelDecls(void *context, DeclVisitorFn Fn) {
+ if (isMainFileAST()) {
+ serialization::ModuleFile &
+ Mod = Reader->getModuleManager().getPrimaryModule();
+ ASTReader::ModuleDeclIterator MDI, MDE;
+ llvm::tie(MDI, MDE) = Reader->getModuleFileLevelDecls(Mod);
+ for (; MDI != MDE; ++MDI) {
+ if (!Fn(context, *MDI))
+ return false;
+ }
+
+ return true;
+ }
+
+ for (ASTUnit::top_level_iterator TL = top_level_begin(),
+ TLEnd = top_level_end();
+ TL != TLEnd; ++TL) {
+ if (!Fn(context, *TL))
+ return false;
+ }
+
+ return true;
+}
+
+namespace {
+struct PCHLocatorInfo {
+ serialization::ModuleFile *Mod;
+ PCHLocatorInfo() : Mod(0) {}
+};
+}
+
+static bool PCHLocator(serialization::ModuleFile &M, void *UserData) {
+ PCHLocatorInfo &Info = *static_cast<PCHLocatorInfo*>(UserData);
+ switch (M.Kind) {
+ case serialization::MK_Module:
+ return true; // skip dependencies.
+ case serialization::MK_PCH:
+ Info.Mod = &M;
+ return true; // found it.
+ case serialization::MK_Preamble:
+ return false; // look in dependencies.
+ case serialization::MK_MainFile:
+ return false; // look in dependencies.
+ }
+
+ return true;
+}
+
+const FileEntry *ASTUnit::getPCHFile() {
+ if (!Reader)
+ return 0;
+
+ PCHLocatorInfo Info;
+ Reader->getModuleManager().visit(PCHLocator, &Info);
+ if (Info.Mod)
+ return Info.Mod->File;
+
+ return 0;
+}
+
+bool ASTUnit::isModuleFile() {
+ return isMainFileAST() && !ASTFileLangOpts.CurrentModule.empty();
+}
+
void ASTUnit::PreambleData::countLines() const {
NumLines = 0;
if (empty())
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp b/contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp
index c1d3db8..d77fd18 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp
@@ -1,4 +1,4 @@
-//===- ChainedDiagnosticConsumer.cpp - Chain Diagnostic Clients -*- C++ -*-===//
+//===- ChainedDiagnosticConsumer.cpp - Chain Diagnostic Clients -----------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp b/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp
index dbb06bd..2d58640 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp
@@ -39,14 +39,18 @@ static ASTReader *createASTReader(CompilerInstance &CI,
Reader->addInMemoryBuffer(sr, memBufs[ti]);
}
Reader->setDeserializationListener(deserialListener);
- switch (Reader->ReadAST(pchFile, serialization::MK_PCH)) {
+ switch (Reader->ReadAST(pchFile, serialization::MK_PCH,
+ ASTReader::ARR_None)) {
case ASTReader::Success:
// Set the predefines buffer as suggested by the PCH reader.
PP.setPredefines(Reader->getSuggestedPredefines());
return Reader.take();
case ASTReader::Failure:
- case ASTReader::IgnorePCH:
+ case ASTReader::OutOfDate:
+ case ASTReader::VersionMismatch:
+ case ASTReader::ConfigurationMismatch:
+ case ASTReader::HadErrors:
break;
}
return 0;
@@ -63,7 +67,7 @@ ChainedIncludesSource *ChainedIncludesSource::create(CompilerInstance &CI) {
assert(!includes.empty() && "No '-chain-include' in options!");
OwningPtr<ChainedIncludesSource> source(new ChainedIncludesSource());
- InputKind IK = CI.getFrontendOpts().Inputs[0].Kind;
+ InputKind IK = CI.getFrontendOpts().Inputs[0].getKind();
SmallVector<llvm::MemoryBuffer *, 4> serialBufs;
SmallVector<std::string, 4> serialBufNames;
@@ -82,14 +86,14 @@ ChainedIncludesSource *ChainedIncludesSource::create(CompilerInstance &CI) {
CInvok->getPreprocessorOpts().Macros.clear();
CInvok->getFrontendOpts().Inputs.clear();
- CInvok->getFrontendOpts().Inputs.push_back(FrontendInputFile(includes[i],
- IK));
+ FrontendInputFile InputFile(includes[i], IK);
+ CInvok->getFrontendOpts().Inputs.push_back(InputFile);
TextDiagnosticPrinter *DiagClient =
- new TextDiagnosticPrinter(llvm::errs(), DiagnosticOptions());
+ new TextDiagnosticPrinter(llvm::errs(), new DiagnosticOptions());
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
- new DiagnosticsEngine(DiagID, DiagClient));
+ new DiagnosticsEngine(DiagID, &CI.getDiagnosticOpts(), DiagClient));
OwningPtr<CompilerInstance> Clang(new CompilerInstance());
Clang->setInvocation(CInvok.take());
@@ -108,6 +112,8 @@ ChainedIncludesSource *ChainedIncludesSource::create(CompilerInstance &CI) {
OwningPtr<ASTConsumer> consumer;
consumer.reset(new PCHGenerator(Clang->getPreprocessor(), "-", 0,
/*isysroot=*/"", &OS));
+ Clang->getPreprocessor().setPPMutationListener(
+ consumer->GetPPMutationListener());
Clang->getASTContext().setASTMutationListener(
consumer->GetASTMutationListener());
Clang->setASTConsumer(consumer.take());
@@ -141,7 +147,7 @@ ChainedIncludesSource *ChainedIncludesSource::create(CompilerInstance &CI) {
Clang->getASTContext().setExternalSource(Reader);
}
- if (!Clang->InitializeSourceManager(includes[i]))
+ if (!Clang->InitializeSourceManager(InputFile))
return 0;
ParseAST(Clang->getSema());
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
index 6de1531..22a74fc 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
@@ -52,6 +52,7 @@ CompilerInstance::CompilerInstance()
}
CompilerInstance::~CompilerInstance() {
+ assert(OutputFiles.empty() && "Still output files in flight?");
}
void CompilerInstance::setInvocation(CompilerInvocation *Value) {
@@ -88,19 +89,18 @@ void CompilerInstance::setASTConsumer(ASTConsumer *Value) {
void CompilerInstance::setCodeCompletionConsumer(CodeCompleteConsumer *Value) {
CompletionConsumer.reset(Value);
- getFrontendOpts().SkipFunctionBodies = Value != 0;
}
// Diagnostics
-static void SetUpBuildDumpLog(const DiagnosticOptions &DiagOpts,
+static void SetUpBuildDumpLog(DiagnosticOptions *DiagOpts,
unsigned argc, const char* const *argv,
DiagnosticsEngine &Diags) {
std::string ErrorInfo;
OwningPtr<raw_ostream> OS(
- new llvm::raw_fd_ostream(DiagOpts.DumpBuildInformation.c_str(), ErrorInfo));
+ new llvm::raw_fd_ostream(DiagOpts->DumpBuildInformation.c_str(),ErrorInfo));
if (!ErrorInfo.empty()) {
Diags.Report(diag::err_fe_unable_to_open_logfile)
- << DiagOpts.DumpBuildInformation << ErrorInfo;
+ << DiagOpts->DumpBuildInformation << ErrorInfo;
return;
}
@@ -115,20 +115,20 @@ static void SetUpBuildDumpLog(const DiagnosticOptions &DiagOpts,
Diags.setClient(new ChainedDiagnosticConsumer(Diags.takeClient(), Logger));
}
-static void SetUpDiagnosticLog(const DiagnosticOptions &DiagOpts,
+static void SetUpDiagnosticLog(DiagnosticOptions *DiagOpts,
const CodeGenOptions *CodeGenOpts,
DiagnosticsEngine &Diags) {
std::string ErrorInfo;
bool OwnsStream = false;
raw_ostream *OS = &llvm::errs();
- if (DiagOpts.DiagnosticLogFile != "-") {
+ if (DiagOpts->DiagnosticLogFile != "-") {
// Create the output stream.
llvm::raw_fd_ostream *FileOS(
- new llvm::raw_fd_ostream(DiagOpts.DiagnosticLogFile.c_str(),
+ new llvm::raw_fd_ostream(DiagOpts->DiagnosticLogFile.c_str(),
ErrorInfo, llvm::raw_fd_ostream::F_Append));
if (!ErrorInfo.empty()) {
Diags.Report(diag::warn_fe_cc_log_diagnostics_failure)
- << DiagOpts.DumpBuildInformation << ErrorInfo;
+ << DiagOpts->DumpBuildInformation << ErrorInfo;
} else {
FileOS->SetUnbuffered();
FileOS->SetUseAtomicWrites(true);
@@ -145,7 +145,7 @@ static void SetUpDiagnosticLog(const DiagnosticOptions &DiagOpts,
Diags.setClient(new ChainedDiagnosticConsumer(Diags.takeClient(), Logger));
}
-static void SetupSerializedDiagnostics(const DiagnosticOptions &DiagOpts,
+static void SetupSerializedDiagnostics(DiagnosticOptions *DiagOpts,
DiagnosticsEngine &Diags,
StringRef OutputFile) {
std::string ErrorInfo;
@@ -171,13 +171,13 @@ void CompilerInstance::createDiagnostics(int Argc, const char* const *Argv,
DiagnosticConsumer *Client,
bool ShouldOwnClient,
bool ShouldCloneClient) {
- Diagnostics = createDiagnostics(getDiagnosticOpts(), Argc, Argv, Client,
+ Diagnostics = createDiagnostics(&getDiagnosticOpts(), Argc, Argv, Client,
ShouldOwnClient, ShouldCloneClient,
&getCodeGenOpts());
}
IntrusiveRefCntPtr<DiagnosticsEngine>
-CompilerInstance::createDiagnostics(const DiagnosticOptions &Opts,
+CompilerInstance::createDiagnostics(DiagnosticOptions *Opts,
int Argc, const char* const *Argv,
DiagnosticConsumer *Client,
bool ShouldOwnClient,
@@ -185,7 +185,7 @@ CompilerInstance::createDiagnostics(const DiagnosticOptions &Opts,
const CodeGenOptions *CodeGenOpts) {
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
IntrusiveRefCntPtr<DiagnosticsEngine>
- Diags(new DiagnosticsEngine(DiagID));
+ Diags(new DiagnosticsEngine(DiagID, Opts));
// Create the diagnostic client for reporting errors or for
// implementing -verify.
@@ -198,22 +198,22 @@ CompilerInstance::createDiagnostics(const DiagnosticOptions &Opts,
Diags->setClient(new TextDiagnosticPrinter(llvm::errs(), Opts));
// Chain in -verify checker, if requested.
- if (Opts.VerifyDiagnostics)
+ if (Opts->VerifyDiagnostics)
Diags->setClient(new VerifyDiagnosticConsumer(*Diags));
// Chain in -diagnostic-log-file dumper, if requested.
- if (!Opts.DiagnosticLogFile.empty())
+ if (!Opts->DiagnosticLogFile.empty())
SetUpDiagnosticLog(Opts, CodeGenOpts, *Diags);
- if (!Opts.DumpBuildInformation.empty())
+ if (!Opts->DumpBuildInformation.empty())
SetUpBuildDumpLog(Opts, Argc, Argv, *Diags);
- if (!Opts.DiagnosticSerializationFile.empty())
+ if (!Opts->DiagnosticSerializationFile.empty())
SetupSerializedDiagnostics(Opts, *Diags,
- Opts.DiagnosticSerializationFile);
+ Opts->DiagnosticSerializationFile);
// Configure our handling of diagnostics.
- ProcessWarningOptions(*Diags, Opts);
+ ProcessWarningOptions(*Diags, *Opts);
return Diags;
}
@@ -241,11 +241,13 @@ void CompilerInstance::createPreprocessor() {
PTHMgr = PTHManager::Create(PPOpts.TokenCache, getDiagnostics());
// Create the Preprocessor.
- HeaderSearch *HeaderInfo = new HeaderSearch(getFileManager(),
+ HeaderSearch *HeaderInfo = new HeaderSearch(&getHeaderSearchOpts(),
+ getFileManager(),
getDiagnostics(),
getLangOpts(),
&getTarget());
- PP = new Preprocessor(getDiagnostics(), getLangOpts(), &getTarget(),
+ PP = new Preprocessor(&getPreprocessorOpts(),
+ getDiagnostics(), getLangOpts(), &getTarget(),
getSourceManager(), *HeaderInfo, *this, PTHMgr,
/*OwnsHeaderSearch=*/true);
@@ -306,14 +308,12 @@ void CompilerInstance::createASTContext() {
void CompilerInstance::createPCHExternalASTSource(StringRef Path,
bool DisablePCHValidation,
- bool DisableStatCache,
bool AllowPCHWithCompilerErrors,
void *DeserializationListener){
OwningPtr<ExternalASTSource> Source;
bool Preamble = getPreprocessorOpts().PrecompiledPreambleBytes.first != 0;
Source.reset(createPCHExternalASTSource(Path, getHeaderSearchOpts().Sysroot,
DisablePCHValidation,
- DisableStatCache,
AllowPCHWithCompilerErrors,
getPreprocessor(), getASTContext(),
DeserializationListener,
@@ -326,7 +326,6 @@ ExternalASTSource *
CompilerInstance::createPCHExternalASTSource(StringRef Path,
const std::string &Sysroot,
bool DisablePCHValidation,
- bool DisableStatCache,
bool AllowPCHWithCompilerErrors,
Preprocessor &PP,
ASTContext &Context,
@@ -335,14 +334,15 @@ CompilerInstance::createPCHExternalASTSource(StringRef Path,
OwningPtr<ASTReader> Reader;
Reader.reset(new ASTReader(PP, Context,
Sysroot.empty() ? "" : Sysroot.c_str(),
- DisablePCHValidation, DisableStatCache,
+ DisablePCHValidation,
AllowPCHWithCompilerErrors));
Reader->setDeserializationListener(
static_cast<ASTDeserializationListener *>(DeserializationListener));
switch (Reader->ReadAST(Path,
Preamble ? serialization::MK_Preamble
- : serialization::MK_PCH)) {
+ : serialization::MK_PCH,
+ ASTReader::ARR_None)) {
case ASTReader::Success:
// Set the predefines buffer as suggested by the PCH reader. Typically, the
// predefines buffer will be empty.
@@ -353,7 +353,10 @@ CompilerInstance::createPCHExternalASTSource(StringRef Path,
// Unrecoverable failure: don't even try to process the input file.
break;
- case ASTReader::IgnorePCH:
+ case ASTReader::OutOfDate:
+ case ASTReader::VersionMismatch:
+ case ASTReader::ConfigurationMismatch:
+ case ASTReader::HadErrors:
// No suitable PCH file could be found. Return an error.
break;
}
@@ -586,19 +589,29 @@ CompilerInstance::createOutputFile(StringRef OutputPath,
// Initialization Utilities
-bool CompilerInstance::InitializeSourceManager(StringRef InputFile,
- SrcMgr::CharacteristicKind Kind){
- return InitializeSourceManager(InputFile, Kind, getDiagnostics(),
+bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input){
+ return InitializeSourceManager(Input, getDiagnostics(),
getFileManager(), getSourceManager(),
getFrontendOpts());
}
-bool CompilerInstance::InitializeSourceManager(StringRef InputFile,
- SrcMgr::CharacteristicKind Kind,
+bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input,
DiagnosticsEngine &Diags,
FileManager &FileMgr,
SourceManager &SourceMgr,
const FrontendOptions &Opts) {
+ SrcMgr::CharacteristicKind
+ Kind = Input.isSystem() ? SrcMgr::C_System : SrcMgr::C_User;
+
+ if (Input.isBuffer()) {
+ SourceMgr.createMainFileIDForMemBuffer(Input.getBuffer(), Kind);
+ assert(!SourceMgr.getMainFileID().isInvalid() &&
+ "Couldn't establish MainFileID!");
+ return true;
+ }
+
+ StringRef InputFile = Input.getFile();
+
// Figure out where to get and map in the main file.
if (InputFile != "-") {
const FileEntry *File = FileMgr.getFile(InputFile);
@@ -607,6 +620,19 @@ bool CompilerInstance::InitializeSourceManager(StringRef InputFile,
return false;
}
SourceMgr.createMainFileID(File, Kind);
+
+ // The natural SourceManager infrastructure can't currently handle named
+ // pipes, but we would at least like to accept them for the main
+ // file. Detect them here, read them with the more generic MemoryBuffer
+ // function, and simply override their contents as we do for STDIN.
+ if (File->isNamedPipe()) {
+ OwningPtr<llvm::MemoryBuffer> MB;
+ if (llvm::error_code ec = llvm::MemoryBuffer::getFile(InputFile, MB)) {
+ Diags.Report(diag::err_cannot_open_file) << InputFile << ec.message();
+ return false;
+ }
+ SourceMgr.overrideFileContents(File, MB.take());
+ }
} else {
OwningPtr<llvm::MemoryBuffer> SB;
if (llvm::MemoryBuffer::getSTDIN(SB)) {
@@ -746,7 +772,7 @@ static void compileModule(CompilerInstance &ImportingInstance,
// Someone else is responsible for building the module. Wait for them to
// finish.
Locked.waitForUnlock();
- break;
+ return;
}
ModuleMap &ModMap
@@ -836,6 +862,7 @@ static void compileModule(CompilerInstance &ImportingInstance,
// FIXME: Even though we're executing under crash protection, it would still
// be nice to do this with RemoveFileOnSignal when we can. However, that
// doesn't make sense for all clients, so clean this up manually.
+ Instance.clearOutputFiles(/*EraseFiles=*/true);
if (!TempModuleMapFileName.empty())
llvm::sys::Path(TempModuleMapFileName).eraseFromDisk();
}
@@ -939,13 +966,14 @@ Module *CompilerInstance::loadModule(SourceLocation ImportLoc,
const PreprocessorOptions &PPOpts = getPreprocessorOpts();
ModuleManager = new ASTReader(getPreprocessor(), *Context,
Sysroot.empty() ? "" : Sysroot.c_str(),
- PPOpts.DisablePCHValidation,
- PPOpts.DisableStatCache);
+ PPOpts.DisablePCHValidation);
if (hasASTConsumer()) {
ModuleManager->setDeserializationListener(
getASTConsumer().GetASTDeserializationListener());
getASTContext().setASTMutationListener(
getASTConsumer().GetASTMutationListener());
+ getPreprocessor().setPPMutationListener(
+ getASTConsumer().GetPPMutationListener());
}
OwningPtr<ExternalASTSource> Source;
Source.reset(ModuleManager);
@@ -957,12 +985,39 @@ Module *CompilerInstance::loadModule(SourceLocation ImportLoc,
}
// Try to load the module we found.
+ unsigned ARRFlags = ASTReader::ARR_None;
+ if (Module)
+ ARRFlags |= ASTReader::ARR_OutOfDate;
switch (ModuleManager->ReadAST(ModuleFile->getName(),
- serialization::MK_Module)) {
+ serialization::MK_Module,
+ ARRFlags)) {
case ASTReader::Success:
break;
- case ASTReader::IgnorePCH:
+ case ASTReader::OutOfDate: {
+ // The module file is out-of-date. Rebuild it.
+ getFileManager().invalidateCache(ModuleFile);
+ bool Existed;
+ llvm::sys::fs::remove(ModuleFileName, Existed);
+ compileModule(*this, Module, ModuleFileName);
+
+ // Try loading the module again.
+ ModuleFile = FileMgr->getFile(ModuleFileName);
+ if (!ModuleFile ||
+ ModuleManager->ReadAST(ModuleFileName,
+ serialization::MK_Module,
+ ASTReader::ARR_None) != ASTReader::Success) {
+ KnownModules[Path[0].first] = 0;
+ return 0;
+ }
+
+ // Okay, we've rebuilt and now loaded the module.
+ break;
+ }
+
+ case ASTReader::VersionMismatch:
+ case ASTReader::ConfigurationMismatch:
+ case ASTReader::HadErrors:
// FIXME: The ASTReader will already have complained, but can we showhorn
// that diagnostic information into a more useful form?
KnownModules[Path[0].first] = 0;
@@ -980,6 +1035,9 @@ Module *CompilerInstance::loadModule(SourceLocation ImportLoc,
Module = PP->getHeaderSearchInfo().getModuleMap()
.findModule((Path[0].first->getName()));
}
+
+ if (Module)
+ Module->setASTFile(ModuleFile);
// Cache the result of this top-level module lookup for later.
Known = KnownModules.insert(std::make_pair(Path[0].first, Module)).first;
@@ -1079,9 +1137,12 @@ Module *CompilerInstance::loadModule(SourceLocation ImportLoc,
// implicit import declaration to capture it in the AST.
if (IsInclusionDirective && hasASTContext()) {
TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
- TU->addDecl(ImportDecl::CreateImplicit(getASTContext(), TU,
- ImportLoc, Module,
- Path.back().second));
+ ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU,
+ ImportLoc, Module,
+ Path.back().second);
+ TU->addDecl(ImportD);
+ if (Consumer)
+ Consumer->HandleImplicitImportDecl(ImportD);
}
LastModuleImportLoc = ImportLoc;
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
index 0afef6b..a9a299a 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
@@ -11,6 +11,7 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/Version.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Driver/Arg.h"
#include "clang/Driver/ArgList.h"
#include "clang/Driver/Options.h"
@@ -20,6 +21,7 @@
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/LangStandard.h"
#include "clang/Serialization/ASTReader.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
@@ -35,919 +37,21 @@ using namespace clang;
//===----------------------------------------------------------------------===//
CompilerInvocationBase::CompilerInvocationBase()
- : LangOpts(new LangOptions()) {}
+ : LangOpts(new LangOptions()), TargetOpts(new TargetOptions()),
+ DiagnosticOpts(new DiagnosticOptions()),
+ HeaderSearchOpts(new HeaderSearchOptions()),
+ PreprocessorOpts(new PreprocessorOptions()) {}
CompilerInvocationBase::CompilerInvocationBase(const CompilerInvocationBase &X)
: RefCountedBase<CompilerInvocation>(),
- LangOpts(new LangOptions(*X.getLangOpts())) {}
+ LangOpts(new LangOptions(*X.getLangOpts())),
+ TargetOpts(new TargetOptions(X.getTargetOpts())),
+ DiagnosticOpts(new DiagnosticOptions(X.getDiagnosticOpts())),
+ HeaderSearchOpts(new HeaderSearchOptions(X.getHeaderSearchOpts())),
+ PreprocessorOpts(new PreprocessorOptions(X.getPreprocessorOpts())) {}
//===----------------------------------------------------------------------===//
-// Utility functions.
-//===----------------------------------------------------------------------===//
-
-static const char *getAnalysisStoreName(AnalysisStores Kind) {
- switch (Kind) {
- default:
- llvm_unreachable("Unknown analysis store!");
-#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN) \
- case NAME##Model: return CMDFLAG;
-#include "clang/Frontend/Analyses.def"
- }
-}
-
-static const char *getAnalysisConstraintName(AnalysisConstraints Kind) {
- switch (Kind) {
- default:
- llvm_unreachable("Unknown analysis constraints!");
-#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN) \
- case NAME##Model: return CMDFLAG;
-#include "clang/Frontend/Analyses.def"
- }
-}
-
-static const char *getAnalysisDiagClientName(AnalysisDiagClients Kind) {
- switch (Kind) {
- default:
- llvm_unreachable("Unknown analysis client!");
-#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATFN, AUTOCREATE) \
- case PD_##NAME: return CMDFLAG;
-#include "clang/Frontend/Analyses.def"
- }
-}
-
-static const char *getAnalysisPurgeModeName(AnalysisPurgeMode Kind) {
- switch (Kind) {
- default:
- llvm_unreachable("Unknown analysis purge mode!");
-#define ANALYSIS_PURGE(NAME, CMDFLAG, DESC) \
- case NAME: return CMDFLAG;
-#include "clang/Frontend/Analyses.def"
- }
-}
-
-static const char *getAnalysisIPAModeName(AnalysisIPAMode Kind) {
- switch (Kind) {
- default:
- llvm_unreachable("Unknown analysis ipa mode!");
-#define ANALYSIS_IPA(NAME, CMDFLAG, DESC) \
- case NAME: return CMDFLAG;
-#include "clang/Frontend/Analyses.def"
- }
-}
-
-static const char *
- getAnalysisInliningModeName(AnalysisInliningMode Kind) {
- switch (Kind) {
- default:
- llvm_unreachable("Unknown analysis inlining mode!");
-#define ANALYSIS_INLINE_SELECTION(NAME, CMDFLAG, DESC) \
- case NAME: return CMDFLAG;
-#include "clang/Frontend/Analyses.def"
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Serialization (to args)
-//===----------------------------------------------------------------------===//
-
-namespace {
- /// ToArgsList - Helper class to create a list of std::strings.
- class ToArgsList {
- std::vector<std::string> &Res;
- public:
- explicit ToArgsList(std::vector<std::string> &Res) : Res(Res) {}
-
- void push_back(StringRef Str) {
- // Avoid creating a temporary string.
- Res.push_back(std::string());
- Res.back().assign(Str.data(), Str.size());
- }
-
- void push_back(StringRef Str1, StringRef Str2) {
- push_back(Str1);
- push_back(Str2);
- }
- };
-}
-
-static void AnalyzerOptsToArgs(const AnalyzerOptions &Opts, ToArgsList &Res) {
- if (Opts.ShowCheckerHelp)
- Res.push_back("-analyzer-checker-help");
- if (Opts.AnalysisStoreOpt != RegionStoreModel)
- Res.push_back("-analyzer-store",
- getAnalysisStoreName(Opts.AnalysisStoreOpt));
- if (Opts.AnalysisConstraintsOpt != RangeConstraintsModel)
- Res.push_back("-analyzer-constraints",
- getAnalysisConstraintName(Opts.AnalysisConstraintsOpt));
- if (Opts.AnalysisDiagOpt != PD_HTML)
- Res.push_back("-analyzer-output",
- getAnalysisDiagClientName(Opts.AnalysisDiagOpt));
- if (Opts.AnalysisPurgeOpt != PurgeStmt)
- Res.push_back("-analyzer-purge",
- getAnalysisPurgeModeName(Opts.AnalysisPurgeOpt));
- if (!Opts.AnalyzeSpecificFunction.empty())
- Res.push_back("-analyze-function", Opts.AnalyzeSpecificFunction);
- if (Opts.IPAMode != Inlining)
- Res.push_back("-analyzer-ipa", getAnalysisIPAModeName(Opts.IPAMode));
- if (Opts.InliningMode != NoRedundancy)
- Res.push_back("-analyzer-inlining-mode",
- getAnalysisInliningModeName(Opts.InliningMode));
-
- if (Opts.AnalyzeAll)
- Res.push_back("-analyzer-opt-analyze-headers");
- if (Opts.AnalyzerDisplayProgress)
- Res.push_back("-analyzer-display-progress");
- if (Opts.AnalyzeNestedBlocks)
- Res.push_back("-analyzer-opt-analyze-nested-blocks");
- if (Opts.EagerlyAssume)
- Res.push_back("-analyzer-eagerly-assume");
- if (Opts.TrimGraph)
- Res.push_back("-trim-egraph");
- if (Opts.VisualizeEGDot)
- Res.push_back("-analyzer-viz-egraph-graphviz");
- if (Opts.VisualizeEGUbi)
- Res.push_back("-analyzer-viz-egraph-ubigraph");
- if (Opts.NoRetryExhausted)
- Res.push_back("-analyzer-disable-retry-exhausted");
-
- for (unsigned i = 0, e = Opts.CheckersControlList.size(); i != e; ++i) {
- const std::pair<std::string, bool> &opt = Opts.CheckersControlList[i];
- if (opt.second)
- Res.push_back("-analyzer-disable-checker");
- else
- Res.push_back("-analyzer-checker");
- Res.push_back(opt.first);
- }
-}
-
-static void CodeGenOptsToArgs(const CodeGenOptions &Opts, ToArgsList &Res) {
- switch (Opts.DebugInfo) {
- case CodeGenOptions::NoDebugInfo:
- break;
- case CodeGenOptions::DebugLineTablesOnly:
- Res.push_back("-gline-tables-only");
- break;
- case CodeGenOptions::LimitedDebugInfo:
- Res.push_back("-g");
- Res.push_back("-flimit-debug-info");
- break;
- case CodeGenOptions::FullDebugInfo:
- Res.push_back("-g");
- Res.push_back("-fno-limit-debug-info");
- break;
- }
- if (Opts.DisableLLVMOpts)
- Res.push_back("-disable-llvm-optzns");
- if (Opts.DisableRedZone)
- Res.push_back("-disable-red-zone");
- if (Opts.DisableTailCalls)
- Res.push_back("-mdisable-tail-calls");
- if (!Opts.DebugCompilationDir.empty())
- Res.push_back("-fdebug-compilation-dir", Opts.DebugCompilationDir);
- if (!Opts.DwarfDebugFlags.empty())
- Res.push_back("-dwarf-debug-flags", Opts.DwarfDebugFlags);
- if (Opts.EmitGcovArcs)
- Res.push_back("-femit-coverage-data");
- if (Opts.EmitGcovNotes)
- Res.push_back("-femit-coverage-notes");
- if (Opts.EmitOpenCLArgMetadata)
- Res.push_back("-cl-kernel-arg-info");
- if (!Opts.MergeAllConstants)
- Res.push_back("-fno-merge-all-constants");
- if (Opts.NoCommon)
- Res.push_back("-fno-common");
- if (Opts.ForbidGuardVariables)
- Res.push_back("-fforbid-guard-variables");
- if (Opts.UseRegisterSizedBitfieldAccess)
- Res.push_back("-fuse-register-sized-bitfield-access");
- if (Opts.NoImplicitFloat)
- Res.push_back("-no-implicit-float");
- if (Opts.OmitLeafFramePointer)
- Res.push_back("-momit-leaf-frame-pointer");
- if (Opts.OptimizeSize) {
- assert(Opts.OptimizationLevel == 2 && "Invalid options!");
- Opts.OptimizeSize == 1 ? Res.push_back("-Os") : Res.push_back("-Oz");
- } else if (Opts.OptimizationLevel != 0)
- Res.push_back("-O" + llvm::utostr(Opts.OptimizationLevel));
- if (!Opts.MainFileName.empty())
- Res.push_back("-main-file-name", Opts.MainFileName);
- if (Opts.NoInfsFPMath)
- Res.push_back("-menable-no-infinities");
- if (Opts.NoNaNsFPMath)
- Res.push_back("-menable-no-nans");
- // SimplifyLibCalls is only derived.
- // TimePasses is only derived.
- // UnitAtATime is unused.
- // Inlining is only derived.
-
- // UnrollLoops is derived, but also accepts an option, no
- // harm in pushing it back here.
- if (Opts.UnrollLoops)
- Res.push_back("-funroll-loops");
- if (Opts.DataSections)
- Res.push_back("-fdata-sections");
- if (Opts.FunctionSections)
- Res.push_back("-ffunction-sections");
- if (Opts.AsmVerbose)
- Res.push_back("-masm-verbose");
- if (!Opts.CodeModel.empty())
- Res.push_back("-mcode-model", Opts.CodeModel);
- if (Opts.CUDAIsDevice)
- Res.push_back("-fcuda-is-device");
- if (!Opts.CXAAtExit)
- Res.push_back("-fno-use-cxa-atexit");
- if (Opts.CXXCtorDtorAliases)
- Res.push_back("-mconstructor-aliases");
- if (Opts.ObjCAutoRefCountExceptions)
- Res.push_back("-fobjc-arc-eh");
- if (!Opts.DebugPass.empty()) {
- Res.push_back("-mdebug-pass", Opts.DebugPass);
- }
- if (Opts.DisableFPElim)
- Res.push_back("-mdisable-fp-elim");
- if (!Opts.FloatABI.empty())
- Res.push_back("-mfloat-abi", Opts.FloatABI);
- if (!Opts.LimitFloatPrecision.empty())
- Res.push_back("-mlimit-float-precision", Opts.LimitFloatPrecision);
- if (Opts.NoZeroInitializedInBSS)
- Res.push_back("-mno-zero-initialized-bss");
- switch (Opts.getObjCDispatchMethod()) {
- case CodeGenOptions::Legacy:
- break;
- case CodeGenOptions::Mixed:
- Res.push_back("-fobjc-dispatch-method=mixed");
- break;
- case CodeGenOptions::NonLegacy:
- Res.push_back("-fobjc-dispatch-method=non-legacy");
- break;
- }
- if (Opts.BoundsChecking > 0)
- Res.push_back("-fbounds-checking=" + llvm::utostr(Opts.BoundsChecking));
- if (Opts.NumRegisterParameters)
- Res.push_back("-mregparm", llvm::utostr(Opts.NumRegisterParameters));
- if (Opts.NoGlobalMerge)
- Res.push_back("-mno-global-merge");
- if (Opts.NoExecStack)
- Res.push_back("-mnoexecstack");
- if (Opts.RelaxAll)
- Res.push_back("-mrelax-all");
- if (Opts.SaveTempLabels)
- Res.push_back("-msave-temp-labels");
- if (Opts.NoDwarf2CFIAsm)
- Res.push_back("-fno-dwarf2-cfi-asm");
- if (Opts.NoDwarfDirectoryAsm)
- Res.push_back("-fno-dwarf-directory-asm");
- if (Opts.SoftFloat)
- Res.push_back("-msoft-float");
- if (Opts.StrictEnums)
- Res.push_back("-fstrict-enums");
- if (Opts.UnwindTables)
- Res.push_back("-munwind-tables");
- if (Opts.RelocationModel != "pic")
- Res.push_back("-mrelocation-model", Opts.RelocationModel);
- if (!Opts.VerifyModule)
- Res.push_back("-disable-llvm-verifier");
- for (unsigned i = 0, e = Opts.BackendOptions.size(); i != e; ++i)
- Res.push_back("-backend-option", Opts.BackendOptions[i]);
-
- switch (Opts.DefaultTLSModel) {
- case CodeGenOptions::GeneralDynamicTLSModel:
- break;
- case CodeGenOptions::LocalDynamicTLSModel:
- Res.push_back("-ftls-model=local-dynamic");
- break;
- case CodeGenOptions::InitialExecTLSModel:
- Res.push_back("-ftls-model=initial-exec");
- break;
- case CodeGenOptions::LocalExecTLSModel:
- Res.push_back("-ftls-model=local-exec");
- break;
- }
-}
-
-static void DependencyOutputOptsToArgs(const DependencyOutputOptions &Opts,
- ToArgsList &Res) {
- if (Opts.IncludeSystemHeaders)
- Res.push_back("-sys-header-deps");
- if (Opts.ShowHeaderIncludes)
- Res.push_back("-H");
- if (!Opts.HeaderIncludeOutputFile.empty())
- Res.push_back("-header-include-file", Opts.HeaderIncludeOutputFile);
- if (Opts.UsePhonyTargets)
- Res.push_back("-MP");
- if (!Opts.OutputFile.empty())
- Res.push_back("-dependency-file", Opts.OutputFile);
- for (unsigned i = 0, e = Opts.Targets.size(); i != e; ++i)
- Res.push_back("-MT", Opts.Targets[i]);
-}
-
-static void DiagnosticOptsToArgs(const DiagnosticOptions &Opts,
- ToArgsList &Res) {
- if (Opts.IgnoreWarnings)
- Res.push_back("-w");
- if (Opts.NoRewriteMacros)
- Res.push_back("-Wno-rewrite-macros");
- if (Opts.Pedantic)
- Res.push_back("-pedantic");
- if (Opts.PedanticErrors)
- Res.push_back("-pedantic-errors");
- if (!Opts.ShowColumn)
- Res.push_back("-fno-show-column");
- if (!Opts.ShowLocation)
- Res.push_back("-fno-show-source-location");
- if (!Opts.ShowCarets)
- Res.push_back("-fno-caret-diagnostics");
- if (!Opts.ShowFixits)
- Res.push_back("-fno-diagnostics-fixit-info");
- if (Opts.ShowSourceRanges)
- Res.push_back("-fdiagnostics-print-source-range-info");
- if (Opts.ShowParseableFixits)
- Res.push_back("-fdiagnostics-parseable-fixits");
- if (Opts.ShowColors)
- Res.push_back("-fcolor-diagnostics");
- if (Opts.VerifyDiagnostics)
- Res.push_back("-verify");
- if (Opts.ShowOptionNames)
- Res.push_back("-fdiagnostics-show-option");
- if (Opts.ShowCategories == 1)
- Res.push_back("-fdiagnostics-show-category=id");
- else if (Opts.ShowCategories == 2)
- Res.push_back("-fdiagnostics-show-category=name");
- switch (Opts.Format) {
- case DiagnosticOptions::Clang:
- Res.push_back("-fdiagnostics-format=clang"); break;
- case DiagnosticOptions::Msvc:
- Res.push_back("-fdiagnostics-format=msvc"); break;
- case DiagnosticOptions::Vi:
- Res.push_back("-fdiagnostics-format=vi"); break;
- }
- if (Opts.ErrorLimit)
- Res.push_back("-ferror-limit", llvm::utostr(Opts.ErrorLimit));
- if (!Opts.DiagnosticLogFile.empty())
- Res.push_back("-diagnostic-log-file", Opts.DiagnosticLogFile);
- if (Opts.MacroBacktraceLimit
- != DiagnosticOptions::DefaultMacroBacktraceLimit)
- Res.push_back("-fmacro-backtrace-limit",
- llvm::utostr(Opts.MacroBacktraceLimit));
- if (Opts.TemplateBacktraceLimit
- != DiagnosticOptions::DefaultTemplateBacktraceLimit)
- Res.push_back("-ftemplate-backtrace-limit",
- llvm::utostr(Opts.TemplateBacktraceLimit));
- if (Opts.ConstexprBacktraceLimit
- != DiagnosticOptions::DefaultConstexprBacktraceLimit)
- Res.push_back("-fconstexpr-backtrace-limit",
- llvm::utostr(Opts.ConstexprBacktraceLimit));
-
- if (Opts.TabStop != DiagnosticOptions::DefaultTabStop)
- Res.push_back("-ftabstop", llvm::utostr(Opts.TabStop));
- if (Opts.MessageLength)
- Res.push_back("-fmessage-length", llvm::utostr(Opts.MessageLength));
- if (!Opts.DumpBuildInformation.empty())
- Res.push_back("-dump-build-information", Opts.DumpBuildInformation);
- for (unsigned i = 0, e = Opts.Warnings.size(); i != e; ++i)
- Res.push_back("-W" + Opts.Warnings[i]);
-}
-
-static const char *getInputKindName(InputKind Kind) {
- switch (Kind) {
- case IK_None: break;
- case IK_AST: return "ast";
- case IK_Asm: return "assembler-with-cpp";
- case IK_C: return "c";
- case IK_CXX: return "c++";
- case IK_LLVM_IR: return "ir";
- case IK_ObjC: return "objective-c";
- case IK_ObjCXX: return "objective-c++";
- case IK_OpenCL: return "cl";
- case IK_CUDA: return "cuda";
- case IK_PreprocessedC: return "cpp-output";
- case IK_PreprocessedCXX: return "c++-cpp-output";
- case IK_PreprocessedObjC: return "objective-c-cpp-output";
- case IK_PreprocessedObjCXX:return "objective-c++-cpp-output";
- }
-
- llvm_unreachable("Unexpected language kind!");
-}
-
-static const char *getActionName(frontend::ActionKind Kind) {
- switch (Kind) {
- case frontend::PluginAction:
- llvm_unreachable("Invalid kind!");
-
- case frontend::ASTDeclList: return "-ast-list";
- case frontend::ASTDump: return "-ast-dump";
- case frontend::ASTDumpXML: return "-ast-dump-xml";
- case frontend::ASTPrint: return "-ast-print";
- case frontend::ASTView: return "-ast-view";
- case frontend::DumpRawTokens: return "-dump-raw-tokens";
- case frontend::DumpTokens: return "-dump-tokens";
- case frontend::EmitAssembly: return "-S";
- case frontend::EmitBC: return "-emit-llvm-bc";
- case frontend::EmitHTML: return "-emit-html";
- case frontend::EmitLLVM: return "-emit-llvm";
- case frontend::EmitLLVMOnly: return "-emit-llvm-only";
- case frontend::EmitCodeGenOnly: return "-emit-codegen-only";
- case frontend::EmitObj: return "-emit-obj";
- case frontend::FixIt: return "-fixit";
- case frontend::GenerateModule: return "-emit-module";
- case frontend::GeneratePCH: return "-emit-pch";
- case frontend::GeneratePTH: return "-emit-pth";
- case frontend::InitOnly: return "-init-only";
- case frontend::ParseSyntaxOnly: return "-fsyntax-only";
- case frontend::PrintDeclContext: return "-print-decl-contexts";
- case frontend::PrintPreamble: return "-print-preamble";
- case frontend::PrintPreprocessedInput: return "-E";
- case frontend::RewriteMacros: return "-rewrite-macros";
- case frontend::RewriteObjC: return "-rewrite-objc";
- case frontend::RewriteTest: return "-rewrite-test";
- case frontend::RunAnalysis: return "-analyze";
- case frontend::MigrateSource: return "-migrate";
- case frontend::RunPreprocessorOnly: return "-Eonly";
- }
-
- llvm_unreachable("Unexpected language kind!");
-}
-
-static void FileSystemOptsToArgs(const FileSystemOptions &Opts, ToArgsList &Res){
- if (!Opts.WorkingDir.empty())
- Res.push_back("-working-directory", Opts.WorkingDir);
-}
-
-static void CodeCompleteOptionsToArgs(const CodeCompleteOptions &Opts,
- ToArgsList &Res) {
- if (Opts.IncludeMacros)
- Res.push_back("-code-completion-macros");
- if (Opts.IncludeCodePatterns)
- Res.push_back("-code-completion-patterns");
- if (!Opts.IncludeGlobals)
- Res.push_back("-no-code-completion-globals");
- if (Opts.IncludeBriefComments)
- Res.push_back("-code-completion-brief-comments");
-}
-
-static void FrontendOptsToArgs(const FrontendOptions &Opts, ToArgsList &Res) {
- if (Opts.DisableFree)
- Res.push_back("-disable-free");
- if (Opts.RelocatablePCH)
- Res.push_back("-relocatable-pch");
- if (Opts.ShowHelp)
- Res.push_back("-help");
- if (Opts.ShowStats)
- Res.push_back("-print-stats");
- if (Opts.ShowTimers)
- Res.push_back("-ftime-report");
- if (Opts.ShowVersion)
- Res.push_back("-version");
- if (Opts.FixWhatYouCan)
- Res.push_back("-fix-what-you-can");
- if (Opts.FixOnlyWarnings)
- Res.push_back("-fix-only-warnings");
- if (Opts.FixAndRecompile)
- Res.push_back("-fixit-recompile");
- if (Opts.FixToTemporaries)
- Res.push_back("-fixit-to-temporary");
- switch (Opts.ARCMTAction) {
- case FrontendOptions::ARCMT_None:
- break;
- case FrontendOptions::ARCMT_Check:
- Res.push_back("-arcmt-check");
- break;
- case FrontendOptions::ARCMT_Modify:
- Res.push_back("-arcmt-modify");
- break;
- case FrontendOptions::ARCMT_Migrate:
- Res.push_back("-arcmt-migrate");
- break;
- }
- CodeCompleteOptionsToArgs(Opts.CodeCompleteOpts, Res);
- if (!Opts.MTMigrateDir.empty())
- Res.push_back("-mt-migrate-directory", Opts.MTMigrateDir);
- if (!Opts.ARCMTMigrateReportOut.empty())
- Res.push_back("-arcmt-migrate-report-output", Opts.ARCMTMigrateReportOut);
- if (Opts.ARCMTMigrateEmitARCErrors)
- Res.push_back("-arcmt-migrate-emit-errors");
-
- if (Opts.ObjCMTAction & ~FrontendOptions::ObjCMT_Literals)
- Res.push_back("-objcmt-migrate-literals");
- if (Opts.ObjCMTAction & ~FrontendOptions::ObjCMT_Subscripting)
- Res.push_back("-objcmt-migrate-subscripting");
-
- bool NeedLang = false;
- for (unsigned i = 0, e = Opts.Inputs.size(); i != e; ++i)
- if (FrontendOptions::getInputKindForExtension(Opts.Inputs[i].File) !=
- Opts.Inputs[i].Kind)
- NeedLang = true;
- if (NeedLang)
- Res.push_back("-x", getInputKindName(Opts.Inputs[0].Kind));
- for (unsigned i = 0, e = Opts.Inputs.size(); i != e; ++i) {
- assert((!NeedLang || Opts.Inputs[i].Kind == Opts.Inputs[0].Kind) &&
- "Unable to represent this input vector!");
- Res.push_back(Opts.Inputs[i].File);
- }
-
- if (!Opts.OutputFile.empty())
- Res.push_back("-o", Opts.OutputFile);
- if (!Opts.CodeCompletionAt.FileName.empty())
- Res.push_back("-code-completion-at",
- Opts.CodeCompletionAt.FileName + ":" +
- llvm::utostr(Opts.CodeCompletionAt.Line) + ":" +
- llvm::utostr(Opts.CodeCompletionAt.Column));
- if (Opts.ProgramAction != frontend::PluginAction)
- Res.push_back(getActionName(Opts.ProgramAction));
- if (!Opts.ActionName.empty()) {
- Res.push_back("-plugin", Opts.ActionName);
- for(unsigned i = 0, e = Opts.PluginArgs.size(); i != e; ++i)
- Res.push_back("-plugin-arg-" + Opts.ActionName, Opts.PluginArgs[i]);
- }
- if (!Opts.ASTDumpFilter.empty())
- Res.push_back("-ast-dump-filter", Opts.ASTDumpFilter);
- for (unsigned i = 0, e = Opts.Plugins.size(); i != e; ++i)
- Res.push_back("-load", Opts.Plugins[i]);
- for (unsigned i = 0, e = Opts.AddPluginActions.size(); i != e; ++i) {
- Res.push_back("-add-plugin", Opts.AddPluginActions[i]);
- for(unsigned ai = 0, ae = Opts.AddPluginArgs.size(); ai != ae; ++ai)
- Res.push_back("-plugin-arg-" + Opts.AddPluginActions[i],
- Opts.AddPluginArgs[i][ai]);
- }
- for (unsigned i = 0, e = Opts.ASTMergeFiles.size(); i != e; ++i)
- Res.push_back("-ast-merge", Opts.ASTMergeFiles[i]);
- for (unsigned i = 0, e = Opts.LLVMArgs.size(); i != e; ++i)
- Res.push_back("-mllvm", Opts.LLVMArgs[i]);
- if (!Opts.OverrideRecordLayoutsFile.empty())
- Res.push_back("-foverride-record-layout=" + Opts.OverrideRecordLayoutsFile);
-}
-
-static void HeaderSearchOptsToArgs(const HeaderSearchOptions &Opts,
- ToArgsList &Res) {
- if (Opts.Sysroot != "/") {
- Res.push_back("-isysroot");
- Res.push_back(Opts.Sysroot);
- }
-
- /// User specified include entries.
- for (unsigned i = 0, e = Opts.UserEntries.size(); i != e; ++i) {
- const HeaderSearchOptions::Entry &E = Opts.UserEntries[i];
- if (E.IsFramework && (E.Group != frontend::Angled || !E.IsUserSupplied))
- llvm::report_fatal_error("Invalid option set!");
- if (E.IsUserSupplied) {
- switch (E.Group) {
- case frontend::After:
- Res.push_back("-idirafter");
- break;
-
- case frontend::Quoted:
- Res.push_back("-iquote");
- break;
-
- case frontend::System:
- Res.push_back("-isystem");
- break;
-
- case frontend::IndexHeaderMap:
- Res.push_back("-index-header-map");
- Res.push_back(E.IsFramework? "-F" : "-I");
- break;
-
- case frontend::CSystem:
- Res.push_back("-c-isystem");
- break;
-
- case frontend::CXXSystem:
- Res.push_back("-cxx-isystem");
- break;
-
- case frontend::ObjCSystem:
- Res.push_back("-objc-isystem");
- break;
-
- case frontend::ObjCXXSystem:
- Res.push_back("-objcxx-isystem");
- break;
-
- case frontend::Angled:
- Res.push_back(E.IsFramework ? "-F" : "-I");
- break;
- }
- } else {
- if (E.IsInternal) {
- assert(E.Group == frontend::System && "Unexpected header search group");
- if (E.ImplicitExternC)
- Res.push_back("-internal-externc-isystem");
- else
- Res.push_back("-internal-isystem");
- } else {
- if (E.Group != frontend::Angled && E.Group != frontend::System)
- llvm::report_fatal_error("Invalid option set!");
- Res.push_back(E.Group == frontend::Angled ? "-iwithprefixbefore" :
- "-iwithprefix");
- }
- }
- Res.push_back(E.Path);
- }
-
- /// User-specified system header prefixes.
- for (unsigned i = 0, e = Opts.SystemHeaderPrefixes.size(); i != e; ++i) {
- if (Opts.SystemHeaderPrefixes[i].IsSystemHeader)
- Res.push_back("-isystem-prefix");
- else
- Res.push_back("-ino-system-prefix");
-
- Res.push_back(Opts.SystemHeaderPrefixes[i].Prefix);
- }
-
- if (!Opts.ResourceDir.empty())
- Res.push_back("-resource-dir", Opts.ResourceDir);
- if (!Opts.ModuleCachePath.empty())
- Res.push_back("-fmodule-cache-path", Opts.ModuleCachePath);
- if (!Opts.UseStandardSystemIncludes)
- Res.push_back("-nostdsysteminc");
- if (!Opts.UseStandardCXXIncludes)
- Res.push_back("-nostdinc++");
- if (Opts.UseLibcxx)
- Res.push_back("-stdlib=libc++");
- if (Opts.Verbose)
- Res.push_back("-v");
-}
-
-static void LangOptsToArgs(const LangOptions &Opts, ToArgsList &Res) {
- LangOptions DefaultLangOpts;
-
- // FIXME: Need to set -std to get all the implicit options.
-
- // FIXME: We want to only pass options relative to the defaults, which
- // requires constructing a target. :(
- //
- // It would be better to push the all target specific choices into the driver,
- // so that everything below that was more uniform.
-
- if (Opts.Trigraphs)
- Res.push_back("-trigraphs");
- // Implicit based on the input kind:
- // AsmPreprocessor, CPlusPlus, ObjC1, ObjC2, OpenCL
- // Implicit based on the input language standard:
- // BCPLComment, C99, CPlusPlus0x, Digraphs, GNUInline, ImplicitInt, GNUMode
- if (Opts.DollarIdents)
- Res.push_back("-fdollars-in-identifiers");
- if (Opts.GNUMode && !Opts.GNUKeywords)
- Res.push_back("-fno-gnu-keywords");
- if (!Opts.GNUMode && Opts.GNUKeywords)
- Res.push_back("-fgnu-keywords");
- if (Opts.MicrosoftExt)
- Res.push_back("-fms-extensions");
- if (Opts.MicrosoftMode)
- Res.push_back("-fms-compatibility");
- if (Opts.MSCVersion != 0)
- Res.push_back("-fmsc-version=" + llvm::utostr(Opts.MSCVersion));
- if (Opts.Borland)
- Res.push_back("-fborland-extensions");
- if (Opts.ObjCDefaultSynthProperties)
- Res.push_back("-fobjc-default-synthesize-properties");
- // NoInline is implicit.
- if (!Opts.CXXOperatorNames)
- Res.push_back("-fno-operator-names");
- if (Opts.PascalStrings)
- Res.push_back("-fpascal-strings");
- if (Opts.CatchUndefined)
- Res.push_back("-fcatch-undefined-behavior");
- if (Opts.AddressSanitizer)
- Res.push_back("-faddress-sanitizer");
- if (Opts.ThreadSanitizer)
- Res.push_back("-fthread-sanitizer");
- if (Opts.WritableStrings)
- Res.push_back("-fwritable-strings");
- if (Opts.ConstStrings)
- Res.push_back("-fconst-strings");
- if (!Opts.LaxVectorConversions)
- Res.push_back("-fno-lax-vector-conversions");
- if (Opts.AltiVec)
- Res.push_back("-faltivec");
- if (Opts.Exceptions)
- Res.push_back("-fexceptions");
- if (Opts.ObjCExceptions)
- Res.push_back("-fobjc-exceptions");
- if (Opts.CXXExceptions)
- Res.push_back("-fcxx-exceptions");
- if (Opts.SjLjExceptions)
- Res.push_back("-fsjlj-exceptions");
- if (Opts.TraditionalCPP)
- Res.push_back("-traditional-cpp");
- if (!Opts.RTTI)
- Res.push_back("-fno-rtti");
- if (Opts.MSBitfields)
- Res.push_back("-mms-bitfields");
- if (Opts.Freestanding)
- Res.push_back("-ffreestanding");
- if (Opts.FormatExtensions)
- Res.push_back("-fformat-extensions");
- if (Opts.NoBuiltin)
- Res.push_back("-fno-builtin");
- if (!Opts.AssumeSaneOperatorNew)
- Res.push_back("-fno-assume-sane-operator-new");
- if (!Opts.ThreadsafeStatics)
- Res.push_back("-fno-threadsafe-statics");
- if (Opts.POSIXThreads)
- Res.push_back("-pthread");
- if (Opts.Blocks)
- Res.push_back("-fblocks");
- if (Opts.BlocksRuntimeOptional)
- Res.push_back("-fblocks-runtime-optional");
- if (Opts.Modules)
- Res.push_back("-fmodules");
- if (Opts.EmitAllDecls)
- Res.push_back("-femit-all-decls");
- if (Opts.MathErrno)
- Res.push_back("-fmath-errno");
- switch (Opts.getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined: break;
- case LangOptions::SOB_Defined: Res.push_back("-fwrapv"); break;
- case LangOptions::SOB_Trapping:
- Res.push_back("-ftrapv");
- if (!Opts.OverflowHandler.empty())
- Res.push_back("-ftrapv-handler", Opts.OverflowHandler);
- break;
- }
- switch (Opts.getFPContractMode()) {
- case LangOptions::FPC_Off: Res.push_back("-ffp-contract=off"); break;
- case LangOptions::FPC_On: Res.push_back("-ffp-contract=on"); break;
- case LangOptions::FPC_Fast: Res.push_back("-ffp-contract=fast"); break;
- }
- if (Opts.HeinousExtensions)
- Res.push_back("-fheinous-gnu-extensions");
- // Optimize is implicit.
- // OptimizeSize is implicit.
- if (Opts.FastMath)
- Res.push_back("-ffast-math");
- if (Opts.Static)
- Res.push_back("-static-define");
- if (Opts.DumpRecordLayoutsSimple)
- Res.push_back("-fdump-record-layouts-simple");
- else if (Opts.DumpRecordLayouts)
- Res.push_back("-fdump-record-layouts");
- if (Opts.DumpVTableLayouts)
- Res.push_back("-fdump-vtable-layouts");
- if (Opts.NoBitFieldTypeAlign)
- Res.push_back("-fno-bitfield-type-alignment");
- if (Opts.PICLevel)
- Res.push_back("-pic-level", llvm::utostr(Opts.PICLevel));
- if (Opts.PIELevel)
- Res.push_back("-pie-level", llvm::utostr(Opts.PIELevel));
- if (Opts.ObjCGCBitmapPrint)
- Res.push_back("-print-ivar-layout");
- if (Opts.NoConstantCFStrings)
- Res.push_back("-fno-constant-cfstrings");
- if (!Opts.AccessControl)
- Res.push_back("-fno-access-control");
- if (!Opts.CharIsSigned)
- Res.push_back("-fno-signed-char");
- if (Opts.ShortWChar)
- Res.push_back("-fshort-wchar");
- if (!Opts.ElideConstructors)
- Res.push_back("-fno-elide-constructors");
- if (Opts.getGC() != LangOptions::NonGC) {
- if (Opts.getGC() == LangOptions::HybridGC) {
- Res.push_back("-fobjc-gc");
- } else {
- assert(Opts.getGC() == LangOptions::GCOnly && "Invalid GC mode!");
- Res.push_back("-fobjc-gc-only");
- }
- }
- Res.push_back("-fobjc-runtime=" + Opts.ObjCRuntime.getAsString());
- if (Opts.ObjCAutoRefCount)
- Res.push_back("-fobjc-arc");
- if (Opts.ObjCRuntimeHasWeak)
- Res.push_back("-fobjc-runtime-has-weak");
- if (!Opts.ObjCInferRelatedResultType)
- Res.push_back("-fno-objc-infer-related-result-type");
-
- if (Opts.AppleKext)
- Res.push_back("-fapple-kext");
-
- if (Opts.getVisibilityMode() != DefaultVisibility) {
- Res.push_back("-fvisibility");
- if (Opts.getVisibilityMode() == HiddenVisibility) {
- Res.push_back("hidden");
- } else {
- assert(Opts.getVisibilityMode() == ProtectedVisibility &&
- "Invalid visibility!");
- Res.push_back("protected");
- }
- }
- if (Opts.InlineVisibilityHidden)
- Res.push_back("-fvisibility-inlines-hidden");
-
- if (Opts.getStackProtector() != 0)
- Res.push_back("-stack-protector", llvm::utostr(Opts.getStackProtector()));
- if (Opts.InstantiationDepth != DefaultLangOpts.InstantiationDepth)
- Res.push_back("-ftemplate-depth", llvm::utostr(Opts.InstantiationDepth));
- if (Opts.ConstexprCallDepth != DefaultLangOpts.ConstexprCallDepth)
- Res.push_back("-fconstexpr-depth", llvm::utostr(Opts.ConstexprCallDepth));
- if (!Opts.ObjCConstantStringClass.empty())
- Res.push_back("-fconstant-string-class", Opts.ObjCConstantStringClass);
- if (Opts.FakeAddressSpaceMap)
- Res.push_back("-ffake-address-space-map");
- if (Opts.ParseUnknownAnytype)
- Res.push_back("-funknown-anytype");
- if (Opts.DebuggerSupport)
- Res.push_back("-fdebugger-support");
- if (Opts.DebuggerCastResultToId)
- Res.push_back("-fdebugger-cast-result-to-id");
- if (Opts.DebuggerObjCLiteral)
- Res.push_back("-fdebugger-objc-literal");
- if (Opts.DelayedTemplateParsing)
- Res.push_back("-fdelayed-template-parsing");
- if (Opts.Deprecated)
- Res.push_back("-fdeprecated-macro");
- if (Opts.ApplePragmaPack)
- Res.push_back("-fapple-pragma-pack");
- if (!Opts.CurrentModule.empty())
- Res.push_back("-fmodule-name=" + Opts.CurrentModule);
-}
-
-static void PreprocessorOptsToArgs(const PreprocessorOptions &Opts,
- ToArgsList &Res) {
- for (unsigned i = 0, e = Opts.Macros.size(); i != e; ++i)
- Res.push_back(std::string(Opts.Macros[i].second ? "-U" : "-D") +
- Opts.Macros[i].first);
- for (unsigned i = 0, e = Opts.Includes.size(); i != e; ++i) {
- // FIXME: We need to avoid reincluding the implicit PCH and PTH includes.
- Res.push_back("-include", Opts.Includes[i]);
- }
- for (unsigned i = 0, e = Opts.MacroIncludes.size(); i != e; ++i)
- Res.push_back("-imacros", Opts.MacroIncludes[i]);
- if (!Opts.UsePredefines)
- Res.push_back("-undef");
- if (Opts.DetailedRecord)
- Res.push_back("-detailed-preprocessing-record");
- if (!Opts.ImplicitPCHInclude.empty())
- Res.push_back("-include-pch", Opts.ImplicitPCHInclude);
- if (!Opts.ImplicitPTHInclude.empty())
- Res.push_back("-include-pth", Opts.ImplicitPTHInclude);
- if (!Opts.TokenCache.empty()) {
- if (Opts.ImplicitPTHInclude.empty())
- Res.push_back("-token-cache", Opts.TokenCache);
- else
- assert(Opts.ImplicitPTHInclude == Opts.TokenCache &&
- "Unsupported option combination!");
- }
- for (unsigned i = 0, e = Opts.ChainedIncludes.size(); i != e; ++i)
- Res.push_back("-chain-include", Opts.ChainedIncludes[i]);
- for (unsigned i = 0, e = Opts.RemappedFiles.size(); i != e; ++i) {
- Res.push_back("-remap-file", Opts.RemappedFiles[i].first + ";" +
- Opts.RemappedFiles[i].second);
- }
-}
-
-static void PreprocessorOutputOptsToArgs(const PreprocessorOutputOptions &Opts,
- ToArgsList &Res) {
- if (!Opts.ShowCPP && !Opts.ShowMacros)
- llvm::report_fatal_error("Invalid option combination!");
-
- if (Opts.ShowCPP && Opts.ShowMacros)
- Res.push_back("-dD");
- else if (!Opts.ShowCPP && Opts.ShowMacros)
- Res.push_back("-dM");
-
- if (!Opts.ShowLineMarkers)
- Res.push_back("-P");
- if (Opts.ShowComments)
- Res.push_back("-C");
- if (Opts.ShowMacroComments)
- Res.push_back("-CC");
-}
-
-static void TargetOptsToArgs(const TargetOptions &Opts,
- ToArgsList &Res) {
- Res.push_back("-triple");
- Res.push_back(Opts.Triple);
- if (!Opts.CPU.empty())
- Res.push_back("-target-cpu", Opts.CPU);
- if (!Opts.ABI.empty())
- Res.push_back("-target-abi", Opts.ABI);
- if (!Opts.LinkerVersion.empty())
- Res.push_back("-target-linker-version", Opts.LinkerVersion);
- if (!Opts.CXXABI.empty())
- Res.push_back("-cxx-abi", Opts.CXXABI);
- for (unsigned i = 0, e = Opts.Features.size(); i != e; ++i)
- Res.push_back("-target-feature", Opts.Features[i]);
-}
-
-void CompilerInvocation::toArgs(std::vector<std::string> &Res) const {
- ToArgsList List(Res);
- AnalyzerOptsToArgs(getAnalyzerOpts(), List);
- CodeGenOptsToArgs(getCodeGenOpts(), List);
- DependencyOutputOptsToArgs(getDependencyOutputOpts(), List);
- DiagnosticOptsToArgs(getDiagnosticOpts(), List);
- FileSystemOptsToArgs(getFileSystemOpts(), List);
- FrontendOptsToArgs(getFrontendOpts(), List);
- HeaderSearchOptsToArgs(getHeaderSearchOpts(), List);
- LangOptsToArgs(*getLangOpts(), List);
- PreprocessorOptsToArgs(getPreprocessorOpts(), List);
- PreprocessorOutputOptsToArgs(getPreprocessorOutputOpts(), List);
- TargetOptsToArgs(getTargetOpts(), List);
-}
-
-//===----------------------------------------------------------------------===//
-// Deserialization (to args)
+// Deserialization (from args)
//===----------------------------------------------------------------------===//
using namespace clang::driver;
@@ -967,7 +71,7 @@ static unsigned getOptimizationLevel(ArgList &Args, InputKind IK,
assert (A->getOption().matches(options::OPT_O));
- llvm::StringRef S(A->getValue(Args));
+ llvm::StringRef S(A->getValue());
if (S == "s" || S == "z" || S.empty())
return 2;
@@ -981,7 +85,7 @@ static unsigned getOptimizationLevelSize(ArgList &Args, InputKind IK,
DiagnosticsEngine &Diags) {
if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
if (A->getOption().matches(options::OPT_O)) {
- switch (A->getValue(Args)[0]) {
+ switch (A->getValue()[0]) {
default:
return 0;
case 's':
@@ -998,14 +102,14 @@ static void addWarningArgs(ArgList &Args, std::vector<std::string> &Warnings) {
for (arg_iterator I = Args.filtered_begin(OPT_W_Group),
E = Args.filtered_end(); I != E; ++I) {
Arg *A = *I;
- // If the argument is a pure flag, add its name (minus the "-W" at the beginning)
+ // If the argument is a pure flag, add its name (minus the "W" at the beginning)
// to the warning list. Else, add its value (for the OPT_W case).
if (A->getOption().getKind() == Option::FlagClass) {
- Warnings.push_back(A->getOption().getName().substr(2));
+ Warnings.push_back(A->getOption().getName().substr(1));
} else {
for (unsigned Idx = 0, End = A->getNumValues();
Idx < End; ++Idx) {
- StringRef V = A->getValue(Args, Idx);
+ StringRef V = A->getValue(Idx);
// "-Wl," and such are not warning options.
// FIXME: Should be handled by putting these in separate flags.
if (V.startswith("l,") || V.startswith("a,") || V.startswith("p,"))
@@ -1022,11 +126,11 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
using namespace options;
bool Success = true;
if (Arg *A = Args.getLastArg(OPT_analyzer_store)) {
- StringRef Name = A->getValue(Args);
+ StringRef Name = A->getValue();
AnalysisStores Value = llvm::StringSwitch<AnalysisStores>(Name)
#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN) \
.Case(CMDFLAG, NAME##Model)
-#include "clang/Frontend/Analyses.def"
+#include "clang/StaticAnalyzer/Core/Analyses.def"
.Default(NumStores);
if (Value == NumStores) {
Diags.Report(diag::err_drv_invalid_value)
@@ -1038,11 +142,11 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
if (Arg *A = Args.getLastArg(OPT_analyzer_constraints)) {
- StringRef Name = A->getValue(Args);
+ StringRef Name = A->getValue();
AnalysisConstraints Value = llvm::StringSwitch<AnalysisConstraints>(Name)
#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN) \
.Case(CMDFLAG, NAME##Model)
-#include "clang/Frontend/Analyses.def"
+#include "clang/StaticAnalyzer/Core/Analyses.def"
.Default(NumConstraints);
if (Value == NumConstraints) {
Diags.Report(diag::err_drv_invalid_value)
@@ -1054,11 +158,11 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
if (Arg *A = Args.getLastArg(OPT_analyzer_output)) {
- StringRef Name = A->getValue(Args);
+ StringRef Name = A->getValue();
AnalysisDiagClients Value = llvm::StringSwitch<AnalysisDiagClients>(Name)
#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATFN, AUTOCREAT) \
.Case(CMDFLAG, PD_##NAME)
-#include "clang/Frontend/Analyses.def"
+#include "clang/StaticAnalyzer/Core/Analyses.def"
.Default(NUM_ANALYSIS_DIAG_CLIENTS);
if (Value == NUM_ANALYSIS_DIAG_CLIENTS) {
Diags.Report(diag::err_drv_invalid_value)
@@ -1070,11 +174,11 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
if (Arg *A = Args.getLastArg(OPT_analyzer_purge)) {
- StringRef Name = A->getValue(Args);
+ StringRef Name = A->getValue();
AnalysisPurgeMode Value = llvm::StringSwitch<AnalysisPurgeMode>(Name)
#define ANALYSIS_PURGE(NAME, CMDFLAG, DESC) \
.Case(CMDFLAG, NAME)
-#include "clang/Frontend/Analyses.def"
+#include "clang/StaticAnalyzer/Core/Analyses.def"
.Default(NumPurgeModes);
if (Value == NumPurgeModes) {
Diags.Report(diag::err_drv_invalid_value)
@@ -1086,11 +190,11 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
if (Arg *A = Args.getLastArg(OPT_analyzer_ipa)) {
- StringRef Name = A->getValue(Args);
+ StringRef Name = A->getValue();
AnalysisIPAMode Value = llvm::StringSwitch<AnalysisIPAMode>(Name)
#define ANALYSIS_IPA(NAME, CMDFLAG, DESC) \
.Case(CMDFLAG, NAME)
-#include "clang/Frontend/Analyses.def"
+#include "clang/StaticAnalyzer/Core/Analyses.def"
.Default(NumIPAModes);
if (Value == NumIPAModes) {
Diags.Report(diag::err_drv_invalid_value)
@@ -1102,11 +206,11 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
if (Arg *A = Args.getLastArg(OPT_analyzer_inlining_mode)) {
- StringRef Name = A->getValue(Args);
+ StringRef Name = A->getValue();
AnalysisInliningMode Value = llvm::StringSwitch<AnalysisInliningMode>(Name)
#define ANALYSIS_INLINING_MODE(NAME, CMDFLAG, DESC) \
.Case(CMDFLAG, NAME)
-#include "clang/Frontend/Analyses.def"
+#include "clang/StaticAnalyzer/Core/Analyses.def"
.Default(NumInliningModes);
if (Value == NumInliningModes) {
Diags.Report(diag::err_drv_invalid_value)
@@ -1118,21 +222,21 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
Opts.ShowCheckerHelp = Args.hasArg(OPT_analyzer_checker_help);
- Opts.VisualizeEGDot = Args.hasArg(OPT_analyzer_viz_egraph_graphviz);
- Opts.VisualizeEGUbi = Args.hasArg(OPT_analyzer_viz_egraph_ubigraph);
+ Opts.visualizeExplodedGraphWithGraphViz =
+ Args.hasArg(OPT_analyzer_viz_egraph_graphviz);
+ Opts.visualizeExplodedGraphWithUbiGraph =
+ Args.hasArg(OPT_analyzer_viz_egraph_ubigraph);
Opts.NoRetryExhausted = Args.hasArg(OPT_analyzer_disable_retry_exhausted);
Opts.AnalyzeAll = Args.hasArg(OPT_analyzer_opt_analyze_headers);
Opts.AnalyzerDisplayProgress = Args.hasArg(OPT_analyzer_display_progress);
Opts.AnalyzeNestedBlocks =
Args.hasArg(OPT_analyzer_opt_analyze_nested_blocks);
- Opts.EagerlyAssume = Args.hasArg(OPT_analyzer_eagerly_assume);
+ Opts.eagerlyAssumeBinOpBifurcation = Args.hasArg(OPT_analyzer_eagerly_assume);
Opts.AnalyzeSpecificFunction = Args.getLastArgValue(OPT_analyze_function);
Opts.UnoptimizedCFG = Args.hasArg(OPT_analysis_UnoptimizedCFG);
- Opts.CFGAddImplicitDtors = Args.hasArg(OPT_analysis_CFGAddImplicitDtors);
Opts.TrimGraph = Args.hasArg(OPT_trim_egraph);
Opts.MaxNodes = Args.getLastArgIntValue(OPT_analyzer_max_nodes, 150000,Diags);
- Opts.MaxLoop = Args.getLastArgIntValue(OPT_analyzer_max_loop, 4, Diags);
- Opts.EagerlyTrimEGraph = !Args.hasArg(OPT_analyzer_no_eagerly_trim_egraph);
+ Opts.maxBlockVisitOnPath = Args.getLastArgIntValue(OPT_analyzer_max_loop, 4, Diags);
Opts.PrintStats = Args.hasArg(OPT_analyzer_stats);
Opts.InlineMaxStackDepth =
Args.getLastArgIntValue(OPT_analyzer_inline_max_stack_depth,
@@ -1150,12 +254,42 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
bool enable = (A->getOption().getID() == OPT_analyzer_checker);
// We can have a list of comma separated checker names, e.g:
// '-analyzer-checker=cocoa,unix'
- StringRef checkerList = A->getValue(Args);
+ StringRef checkerList = A->getValue();
SmallVector<StringRef, 4> checkers;
checkerList.split(checkers, ",");
for (unsigned i = 0, e = checkers.size(); i != e; ++i)
Opts.CheckersControlList.push_back(std::make_pair(checkers[i], enable));
}
+
+ // Go through the analyzer configuration options.
+ for (arg_iterator it = Args.filtered_begin(OPT_analyzer_config),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ A->claim();
+ // We can have a list of comma separated config names, e.g:
+ // '-analyzer-config key1=val1,key2=val2'
+ StringRef configList = A->getValue();
+ SmallVector<StringRef, 4> configVals;
+ configList.split(configVals, ",");
+ for (unsigned i = 0, e = configVals.size(); i != e; ++i) {
+ StringRef key, val;
+ llvm::tie(key, val) = configVals[i].split("=");
+ if (val.empty()) {
+ Diags.Report(SourceLocation(),
+ diag::err_analyzer_config_no_value) << configVals[i];
+ Success = false;
+ break;
+ }
+ if (val.find('=') != StringRef::npos) {
+ Diags.Report(SourceLocation(),
+ diag::err_analyzer_config_multiple_values)
+ << configVals[i];
+ Success = false;
+ break;
+ }
+ Opts.Config[key] = val;
+ }
+ }
return Success;
}
@@ -1181,21 +315,23 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.OptimizationLevel = OptLevel;
// We must always run at least the always inlining pass.
- Opts.Inlining = (Opts.OptimizationLevel > 1) ? CodeGenOptions::NormalInlining
- : CodeGenOptions::OnlyAlwaysInlining;
+ Opts.setInlining(
+ (Opts.OptimizationLevel > 1) ? CodeGenOptions::NormalInlining
+ : CodeGenOptions::OnlyAlwaysInlining);
// -fno-inline-functions overrides OptimizationLevel > 1.
Opts.NoInline = Args.hasArg(OPT_fno_inline);
- Opts.Inlining = Args.hasArg(OPT_fno_inline_functions) ?
- CodeGenOptions::OnlyAlwaysInlining : Opts.Inlining;
+ Opts.setInlining(Args.hasArg(OPT_fno_inline_functions) ?
+ CodeGenOptions::OnlyAlwaysInlining : Opts.getInlining());
if (Args.hasArg(OPT_gline_tables_only)) {
- Opts.DebugInfo = CodeGenOptions::DebugLineTablesOnly;
+ Opts.setDebugInfo(CodeGenOptions::DebugLineTablesOnly);
} else if (Args.hasArg(OPT_g_Flag)) {
if (Args.hasFlag(OPT_flimit_debug_info, OPT_fno_limit_debug_info, true))
- Opts.DebugInfo = CodeGenOptions::LimitedDebugInfo;
+ Opts.setDebugInfo(CodeGenOptions::LimitedDebugInfo);
else
- Opts.DebugInfo = CodeGenOptions::FullDebugInfo;
+ Opts.setDebugInfo(CodeGenOptions::FullDebugInfo);
}
+ Opts.DebugColumnInfo = Args.hasArg(OPT_dwarf_column_info);
Opts.DisableLLVMOpts = Args.hasArg(OPT_disable_llvm_optzns);
Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone);
@@ -1265,18 +401,21 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.EmitGcovArcs = Args.hasArg(OPT_femit_coverage_data);
Opts.EmitGcovNotes = Args.hasArg(OPT_femit_coverage_notes);
Opts.EmitOpenCLArgMetadata = Args.hasArg(OPT_cl_kernel_arg_info);
- Opts.EmitMicrosoftInlineAsm = Args.hasArg(OPT_fenable_experimental_ms_inline_asm);
Opts.CoverageFile = Args.getLastArgValue(OPT_coverage_file);
Opts.DebugCompilationDir = Args.getLastArgValue(OPT_fdebug_compilation_dir);
Opts.LinkBitcodeFile = Args.getLastArgValue(OPT_mlink_bitcode_file);
+ Opts.SSPBufferSize =
+ Args.getLastArgIntValue(OPT_stack_protector_buffer_size, 8, Diags);
Opts.StackRealignment = Args.hasArg(OPT_mstackrealign);
if (Arg *A = Args.getLastArg(OPT_mstack_alignment)) {
- StringRef Val = A->getValue(Args);
- Val.getAsInteger(10, Opts.StackAlignment);
+ StringRef Val = A->getValue();
+ unsigned StackAlignment = Opts.StackAlignment;
+ Val.getAsInteger(10, StackAlignment);
+ Opts.StackAlignment = StackAlignment;
}
if (Arg *A = Args.getLastArg(OPT_fobjc_dispatch_method_EQ)) {
- StringRef Name = A->getValue(Args);
+ StringRef Name = A->getValue();
unsigned Method = llvm::StringSwitch<unsigned>(Name)
.Case("legacy", CodeGenOptions::Legacy)
.Case("non-legacy", CodeGenOptions::NonLegacy)
@@ -1286,12 +425,13 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
Success = false;
} else {
- Opts.ObjCDispatchMethod = Method;
+ Opts.setObjCDispatchMethod(
+ static_cast<CodeGenOptions::ObjCDispatchMethodKind>(Method));
}
}
if (Arg *A = Args.getLastArg(OPT_ftlsmodel_EQ)) {
- StringRef Name = A->getValue(Args);
+ StringRef Name = A->getValue();
unsigned Model = llvm::StringSwitch<unsigned>(Name)
.Case("global-dynamic", CodeGenOptions::GeneralDynamicTLSModel)
.Case("local-dynamic", CodeGenOptions::LocalDynamicTLSModel)
@@ -1302,7 +442,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
Success = false;
} else {
- Opts.DefaultTLSModel = static_cast<CodeGenOptions::TLSModel>(Model);
+ Opts.setDefaultTLSModel(static_cast<CodeGenOptions::TLSModel>(Model));
}
}
@@ -1353,9 +493,9 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
StringRef ShowOverloads =
Args.getLastArgValue(OPT_fshow_overloads_EQ, "all");
if (ShowOverloads == "best")
- Opts.ShowOverloads = DiagnosticsEngine::Ovl_Best;
+ Opts.setShowOverloads(Ovl_Best);
else if (ShowOverloads == "all")
- Opts.ShowOverloads = DiagnosticsEngine::Ovl_All;
+ Opts.setShowOverloads(Ovl_All);
else {
Success = false;
if (Diags)
@@ -1383,11 +523,11 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
StringRef Format =
Args.getLastArgValue(OPT_fdiagnostics_format, "clang");
if (Format == "clang")
- Opts.Format = DiagnosticOptions::Clang;
+ Opts.setFormat(DiagnosticOptions::Clang);
else if (Format == "msvc")
- Opts.Format = DiagnosticOptions::Msvc;
+ Opts.setFormat(DiagnosticOptions::Msvc);
else if (Format == "vi")
- Opts.Format = DiagnosticOptions::Vi;
+ Opts.setFormat(DiagnosticOptions::Vi);
else {
Success = false;
if (Diags)
@@ -1469,7 +609,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
case OPT_emit_obj:
Opts.ProgramAction = frontend::EmitObj; break;
case OPT_fixit_EQ:
- Opts.FixItSuffix = A->getValue(Args);
+ Opts.FixItSuffix = A->getValue();
// fall-through!
case OPT_fixit:
Opts.ProgramAction = frontend::FixIt; break;
@@ -1505,14 +645,14 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
}
if (const Arg* A = Args.getLastArg(OPT_plugin)) {
- Opts.Plugins.push_back(A->getValue(Args,0));
+ Opts.Plugins.push_back(A->getValue(0));
Opts.ProgramAction = frontend::PluginAction;
- Opts.ActionName = A->getValue(Args);
+ Opts.ActionName = A->getValue();
for (arg_iterator it = Args.filtered_begin(OPT_plugin_arg),
end = Args.filtered_end(); it != end; ++it) {
- if ((*it)->getValue(Args, 0) == Opts.ActionName)
- Opts.PluginArgs.push_back((*it)->getValue(Args, 1));
+ if ((*it)->getValue(0) == Opts.ActionName)
+ Opts.PluginArgs.push_back((*it)->getValue(1));
}
}
@@ -1521,17 +661,17 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
for (int i = 0, e = Opts.AddPluginActions.size(); i != e; ++i) {
for (arg_iterator it = Args.filtered_begin(OPT_plugin_arg),
end = Args.filtered_end(); it != end; ++it) {
- if ((*it)->getValue(Args, 0) == Opts.AddPluginActions[i])
- Opts.AddPluginArgs[i].push_back((*it)->getValue(Args, 1));
+ if ((*it)->getValue(0) == Opts.AddPluginActions[i])
+ Opts.AddPluginArgs[i].push_back((*it)->getValue(1));
}
}
if (const Arg *A = Args.getLastArg(OPT_code_completion_at)) {
Opts.CodeCompletionAt =
- ParsedSourceLocation::FromString(A->getValue(Args));
+ ParsedSourceLocation::FromString(A->getValue());
if (Opts.CodeCompletionAt.FileName.empty())
Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << A->getValue(Args);
+ << A->getAsString(Args) << A->getValue();
}
Opts.DisableFree = Args.hasArg(OPT_disable_free);
@@ -1597,7 +737,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
InputKind DashX = IK_None;
if (const Arg *A = Args.getLastArg(OPT_x)) {
- DashX = llvm::StringSwitch<InputKind>(A->getValue(Args))
+ DashX = llvm::StringSwitch<InputKind>(A->getValue())
.Case("c", IK_C)
.Case("cl", IK_OpenCL)
.Case("cuda", IK_CUDA)
@@ -1621,7 +761,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
.Default(IK_None);
if (DashX == IK_None)
Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << A->getValue(Args);
+ << A->getAsString(Args) << A->getValue();
}
// '-' is the default input if none is given.
@@ -1669,7 +809,7 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
Opts.UseStandardSystemIncludes = !Args.hasArg(OPT_nostdsysteminc);
Opts.UseStandardCXXIncludes = !Args.hasArg(OPT_nostdincxx);
if (const Arg *A = Args.getLastArg(OPT_stdlib_EQ))
- Opts.UseLibcxx = (strcmp(A->getValue(Args), "libc++") == 0);
+ Opts.UseLibcxx = (strcmp(A->getValue(), "libc++") == 0);
Opts.ResourceDir = Args.getLastArgValue(OPT_resource_dir);
Opts.ModuleCachePath = Args.getLastArgValue(OPT_fmodule_cache_path);
Opts.DisableModuleHash = Args.hasArg(OPT_fdisable_module_hash);
@@ -1688,7 +828,7 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
frontend::IncludeDirGroup Group
= IsIndexHeaderMap? frontend::IndexHeaderMap : frontend::Angled;
- Opts.AddPath((*it)->getValue(Args), Group, true,
+ Opts.AddPath((*it)->getValue(), Group, true,
/*IsFramework=*/ (*it)->getOption().matches(OPT_F), false);
IsIndexHeaderMap = false;
}
@@ -1700,43 +840,43 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
ie = Args.filtered_end(); it != ie; ++it) {
const Arg *A = *it;
if (A->getOption().matches(OPT_iprefix))
- Prefix = A->getValue(Args);
+ Prefix = A->getValue();
else if (A->getOption().matches(OPT_iwithprefix))
- Opts.AddPath(Prefix.str() + A->getValue(Args),
+ Opts.AddPath(Prefix.str() + A->getValue(),
frontend::System, false, false, false);
else
- Opts.AddPath(Prefix.str() + A->getValue(Args),
+ Opts.AddPath(Prefix.str() + A->getValue(),
frontend::Angled, false, false, false);
}
for (arg_iterator it = Args.filtered_begin(OPT_idirafter),
ie = Args.filtered_end(); it != ie; ++it)
- Opts.AddPath((*it)->getValue(Args), frontend::After, true, false, false);
+ Opts.AddPath((*it)->getValue(), frontend::After, true, false, false);
for (arg_iterator it = Args.filtered_begin(OPT_iquote),
ie = Args.filtered_end(); it != ie; ++it)
- Opts.AddPath((*it)->getValue(Args), frontend::Quoted, true, false, false);
+ Opts.AddPath((*it)->getValue(), frontend::Quoted, true, false, false);
for (arg_iterator it = Args.filtered_begin(OPT_isystem,
OPT_iwithsysroot), ie = Args.filtered_end(); it != ie; ++it)
- Opts.AddPath((*it)->getValue(Args), frontend::System, true, false,
+ Opts.AddPath((*it)->getValue(), frontend::System, true, false,
!(*it)->getOption().matches(OPT_iwithsysroot));
for (arg_iterator it = Args.filtered_begin(OPT_iframework),
ie = Args.filtered_end(); it != ie; ++it)
- Opts.AddPath((*it)->getValue(Args), frontend::System, true, true,
+ Opts.AddPath((*it)->getValue(), frontend::System, true, true,
true);
// Add the paths for the various language specific isystem flags.
for (arg_iterator it = Args.filtered_begin(OPT_c_isystem),
ie = Args.filtered_end(); it != ie; ++it)
- Opts.AddPath((*it)->getValue(Args), frontend::CSystem, true, false, true);
+ Opts.AddPath((*it)->getValue(), frontend::CSystem, true, false, true);
for (arg_iterator it = Args.filtered_begin(OPT_cxx_isystem),
ie = Args.filtered_end(); it != ie; ++it)
- Opts.AddPath((*it)->getValue(Args), frontend::CXXSystem, true, false, true);
+ Opts.AddPath((*it)->getValue(), frontend::CXXSystem, true, false, true);
for (arg_iterator it = Args.filtered_begin(OPT_objc_isystem),
ie = Args.filtered_end(); it != ie; ++it)
- Opts.AddPath((*it)->getValue(Args), frontend::ObjCSystem, true, false,true);
+ Opts.AddPath((*it)->getValue(), frontend::ObjCSystem, true, false,true);
for (arg_iterator it = Args.filtered_begin(OPT_objcxx_isystem),
ie = Args.filtered_end(); it != ie; ++it)
- Opts.AddPath((*it)->getValue(Args), frontend::ObjCXXSystem, true, false,
+ Opts.AddPath((*it)->getValue(), frontend::ObjCXXSystem, true, false,
true);
// Add the internal paths from a driver that detects standard include paths.
@@ -1744,7 +884,7 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
OPT_internal_externc_isystem),
E = Args.filtered_end();
I != E; ++I)
- Opts.AddPath((*I)->getValue(Args), frontend::System,
+ Opts.AddPath((*I)->getValue(), frontend::System,
false, false, /*IgnoreSysRoot=*/true, /*IsInternal=*/true,
(*I)->getOption().matches(OPT_internal_externc_isystem));
@@ -1753,7 +893,7 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
OPT_ino_system_prefix),
E = Args.filtered_end();
I != E; ++I)
- Opts.AddSystemHeaderPrefix((*I)->getValue(Args),
+ Opts.AddSystemHeaderPrefix((*I)->getValue(),
(*I)->getOption().matches(OPT_isystem_prefix));
}
@@ -1801,11 +941,12 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
}
const LangStandard &Std = LangStandard::getLangStandardForKind(LangStd);
- Opts.BCPLComment = Std.hasBCPLComments();
+ Opts.LineComment = Std.hasLineComments();
Opts.C99 = Std.isC99();
Opts.C11 = Std.isC11();
Opts.CPlusPlus = Std.isCPlusPlus();
Opts.CPlusPlus0x = Std.isCPlusPlus0x();
+ Opts.CPlusPlus1y = Std.isCPlusPlus1y();
Opts.Digraphs = Std.hasDigraphs();
Opts.GNUMode = Std.isGNUMode();
Opts.GNUInline = !Std.isC99();
@@ -1840,6 +981,9 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
// OpenCL and C++ both have bool, true, false keywords.
Opts.Bool = Opts.OpenCL || Opts.CPlusPlus;
+ // C++ has wchar_t keyword.
+ Opts.WChar = Opts.CPlusPlus;
+
Opts.GNUKeywords = Opts.GNUMode;
Opts.CXXOperatorNames = Opts.CPlusPlus;
@@ -1855,14 +999,14 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// FIXME: Cleanup per-file based stuff.
LangStandard::Kind LangStd = LangStandard::lang_unspecified;
if (const Arg *A = Args.getLastArg(OPT_std_EQ)) {
- LangStd = llvm::StringSwitch<LangStandard::Kind>(A->getValue(Args))
+ LangStd = llvm::StringSwitch<LangStandard::Kind>(A->getValue())
#define LANGSTANDARD(id, name, desc, features) \
.Case(name, LangStandard::lang_##id)
#include "clang/Frontend/LangStandards.def"
.Default(LangStandard::lang_unspecified);
if (LangStd == LangStandard::lang_unspecified)
Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << A->getValue(Args);
+ << A->getAsString(Args) << A->getValue();
else {
// Valid standard, check to make sure language and standard are compatable.
const LangStandard &Std = LangStandard::getLangStandardForKind(LangStd);
@@ -1903,7 +1047,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// Override the -std option in this case.
if (const Arg *A = Args.getLastArg(OPT_cl_std_EQ)) {
LangStandard::Kind OpenCLLangStd
- = llvm::StringSwitch<LangStandard::Kind>(A->getValue(Args))
+ = llvm::StringSwitch<LangStandard::Kind>(A->getValue())
.Case("CL", LangStandard::lang_opencl)
.Case("CL1.1", LangStandard::lang_opencl11)
.Case("CL1.2", LangStandard::lang_opencl12)
@@ -1911,7 +1055,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (OpenCLLangStd == LangStandard::lang_unspecified) {
Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << A->getValue(Args);
+ << A->getAsString(Args) << A->getValue();
}
else
LangStd = OpenCLLangStd;
@@ -1932,7 +1076,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Opts.ObjC1) {
if (Arg *arg = Args.getLastArg(OPT_fobjc_runtime_EQ)) {
- StringRef value = arg->getValue(Args);
+ StringRef value = arg->getValue();
if (Opts.ObjCRuntime.tryParse(value))
Diags.Report(diag::err_drv_unknown_objc_runtime) << value;
}
@@ -1943,14 +1087,16 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.setGC(LangOptions::HybridGC);
else if (Args.hasArg(OPT_fobjc_arc)) {
Opts.ObjCAutoRefCount = 1;
- if (!Opts.ObjCRuntime.isNonFragile())
- Diags.Report(diag::err_arc_nonfragile_abi);
+ if (!Opts.ObjCRuntime.allowsARC())
+ Diags.Report(diag::err_arc_unsupported_on_runtime);
+
+ // Only set ObjCARCWeak if ARC is enabled.
+ if (Args.hasArg(OPT_fobjc_runtime_has_weak))
+ Opts.ObjCARCWeak = 1;
+ else
+ Opts.ObjCARCWeak = Opts.ObjCRuntime.allowsWeak();
}
- Opts.ObjCRuntimeHasWeak = Opts.ObjCRuntime.hasWeak();
- if (Args.hasArg(OPT_fobjc_runtime_has_weak))
- Opts.ObjCRuntimeHasWeak = 1;
-
if (Args.hasArg(OPT_fno_objc_infer_related_result_type))
Opts.ObjCInferRelatedResultType = 0;
}
@@ -1992,7 +1138,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
<< Args.getLastArg(OPT_fvisibility)->getAsString(Args) << Vis;
if (Arg *A = Args.getLastArg(OPT_ffp_contract)) {
- StringRef Val = A->getValue(Args);
+ StringRef Val = A->getValue();
if (Val == "fast")
Opts.setFPContractMode(LangOptions::FPC_Fast);
else if (Val == "on")
@@ -2045,6 +1191,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.BlocksRuntimeOptional = Args.hasArg(OPT_fblocks_runtime_optional);
Opts.Modules = Args.hasArg(OPT_fmodules);
Opts.CharIsSigned = !Args.hasArg(OPT_fno_signed_char);
+ Opts.WChar = Opts.CPlusPlus && !Args.hasArg(OPT_fno_wchar);
Opts.ShortWChar = Args.hasArg(OPT_fshort_wchar);
Opts.ShortEnums = Args.hasArg(OPT_fshort_enums);
Opts.Freestanding = Args.hasArg(OPT_ffreestanding);
@@ -2067,7 +1214,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Args.getLastArgValue(OPT_fconstant_string_class);
Opts.ObjCDefaultSynthProperties =
Args.hasArg(OPT_fobjc_default_synthesize_properties);
- Opts.CatchUndefined = Args.hasArg(OPT_fcatch_undefined_behavior);
+ Opts.EncodeExtendedBlockSig =
+ Args.hasArg(OPT_fencode_extended_block_signature);
Opts.EmitAllDecls = Args.hasArg(OPT_femit_all_decls);
Opts.PackStruct = Args.getLastArgIntValue(OPT_fpack_struct_EQ, 0, Diags);
Opts.PICLevel = Args.getLastArgIntValue(OPT_pic_level, 0, Diags);
@@ -2088,8 +1236,6 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.DebuggerSupport = Args.hasArg(OPT_fdebugger_support);
Opts.DebuggerCastResultToId = Args.hasArg(OPT_fdebugger_cast_result_to_id);
Opts.DebuggerObjCLiteral = Args.hasArg(OPT_fdebugger_objc_literal);
- Opts.AddressSanitizer = Args.hasArg(OPT_faddress_sanitizer);
- Opts.ThreadSanitizer = Args.hasArg(OPT_fthread_sanitizer);
Opts.ApplePragmaPack = Args.hasArg(OPT_fapple_pragma_pack);
Opts.CurrentModule = Args.getLastArgValue(OPT_fmodule_name);
@@ -2112,6 +1258,11 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.FastMath = Args.hasArg(OPT_ffast_math);
Opts.FiniteMathOnly = Args.hasArg(OPT_ffinite_math_only);
+ Opts.EmitMicrosoftInlineAsm = Args.hasArg(OPT_fenable_experimental_ms_inline_asm);
+
+ Opts.RetainCommentsFromSystemHeaders =
+ Args.hasArg(OPT_fretain_comments_from_system_headers);
+
unsigned SSP = Args.getLastArgIntValue(OPT_stack_protector, 0, Diags);
switch (SSP) {
default:
@@ -2122,6 +1273,37 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
case 1: Opts.setStackProtector(LangOptions::SSPOn); break;
case 2: Opts.setStackProtector(LangOptions::SSPReq); break;
}
+
+ // Parse -fsanitize= arguments.
+ std::vector<std::string> Sanitizers = Args.getAllArgValues(OPT_fsanitize_EQ);
+ for (unsigned I = 0, N = Sanitizers.size(); I != N; ++I) {
+ // Since the Opts.Sanitize* values are bitfields, it's a little tricky to
+ // efficiently map string values to them. Perform the mapping indirectly:
+ // convert strings to enumerated values, then switch over the enum to set
+ // the right bitfield value.
+ enum Sanitizer {
+#define SANITIZER(NAME, ID) \
+ ID,
+#include "clang/Basic/Sanitizers.def"
+ Unknown
+ };
+ switch (llvm::StringSwitch<unsigned>(Sanitizers[I])
+#define SANITIZER(NAME, ID) \
+ .Case(NAME, ID)
+#include "clang/Basic/Sanitizers.def"
+ .Default(Unknown)) {
+#define SANITIZER(NAME, ID) \
+ case ID: \
+ Opts.Sanitize##ID = true; \
+ break;
+#include "clang/Basic/Sanitizers.def"
+
+ case Unknown:
+ Diags.Report(diag::err_drv_invalid_value)
+ << "-fsanitize=" << Sanitizers[I];
+ break;
+ }
+ }
}
static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
@@ -2131,7 +1313,7 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
Opts.ImplicitPCHInclude = Args.getLastArgValue(OPT_include_pch);
Opts.ImplicitPTHInclude = Args.getLastArgValue(OPT_include_pth);
if (const Arg *A = Args.getLastArg(OPT_token_cache))
- Opts.TokenCache = A->getValue(Args);
+ Opts.TokenCache = A->getValue();
else
Opts.TokenCache = Opts.ImplicitPTHInclude;
Opts.UsePredefines = !Args.hasArg(OPT_undef);
@@ -2142,11 +1324,11 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
for (arg_iterator it = Args.filtered_begin(OPT_error_on_deserialized_pch_decl),
ie = Args.filtered_end(); it != ie; ++it) {
const Arg *A = *it;
- Opts.DeserializedPCHDeclsToErrorOn.insert(A->getValue(Args));
+ Opts.DeserializedPCHDeclsToErrorOn.insert(A->getValue());
}
if (const Arg *A = Args.getLastArg(OPT_preamble_bytes_EQ)) {
- StringRef Value(A->getValue(Args));
+ StringRef Value(A->getValue());
size_t Comma = Value.find(',');
unsigned Bytes = 0;
unsigned EndOfLine = 0;
@@ -2165,9 +1347,9 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
for (arg_iterator it = Args.filtered_begin(OPT_D, OPT_U),
ie = Args.filtered_end(); it != ie; ++it) {
if ((*it)->getOption().matches(OPT_D))
- Opts.addMacroDef((*it)->getValue(Args));
+ Opts.addMacroDef((*it)->getValue());
else
- Opts.addMacroUndef((*it)->getValue(Args));
+ Opts.addMacroUndef((*it)->getValue());
}
Opts.MacroIncludes = Args.getAllArgValues(OPT_imacros);
@@ -2177,22 +1359,13 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
OPT_include_pth),
ie = Args.filtered_end(); it != ie; ++it) {
const Arg *A = *it;
- // PCH is handled specially, we need to extra the original include path.
- if (A->getOption().matches(OPT_include_pch)) {
- std::string OriginalFile =
- ASTReader::getOriginalSourceFile(A->getValue(Args), FileMgr, Diags);
- if (OriginalFile.empty())
- continue;
-
- Opts.Includes.push_back(OriginalFile);
- } else
- Opts.Includes.push_back(A->getValue(Args));
+ Opts.Includes.push_back(A->getValue());
}
for (arg_iterator it = Args.filtered_begin(OPT_chain_include),
ie = Args.filtered_end(); it != ie; ++it) {
const Arg *A = *it;
- Opts.ChainedIncludes.push_back(A->getValue(Args));
+ Opts.ChainedIncludes.push_back(A->getValue());
}
// Include 'altivec.h' if -faltivec option present
@@ -2203,7 +1376,7 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
ie = Args.filtered_end(); it != ie; ++it) {
const Arg *A = *it;
std::pair<StringRef,StringRef> Split =
- StringRef(A->getValue(Args)).split(';');
+ StringRef(A->getValue()).split(';');
if (Split.second.empty()) {
Diags.Report(diag::err_drv_invalid_remap_file) << A->getAsString(Args);
@@ -2214,7 +1387,7 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
}
if (Arg *A = Args.getLastArg(OPT_fobjc_arc_cxxlib_EQ)) {
- StringRef Name = A->getValue(Args);
+ StringRef Name = A->getValue();
unsigned Library = llvm::StringSwitch<unsigned>(Name)
.Case("libc++", ARCXX_libcxx)
.Case("libstdc++", ARCXX_libstdcxx)
@@ -2243,7 +1416,7 @@ static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args) {
Opts.ABI = Args.getLastArgValue(OPT_target_abi);
Opts.CXXABI = Args.getLastArgValue(OPT_cxx_abi);
Opts.CPU = Args.getLastArgValue(OPT_target_cpu);
- Opts.Features = Args.getAllArgValues(OPT_target_feature);
+ Opts.FeaturesAsWritten = Args.getAllArgValues(OPT_target_feature);
Opts.LinkerVersion = Args.getLastArgValue(OPT_target_linker_version);
Opts.Triple = llvm::Triple::normalize(Args.getLastArgValue(OPT_triple));
@@ -2283,13 +1456,13 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
// Issue errors on arguments that are not valid for CC1.
for (ArgList::iterator I = Args->begin(), E = Args->end();
I != E; ++I) {
- if (!(*I)->getOption().isCC1Option()) {
+ if (!(*I)->getOption().hasFlag(options::CC1Option)) {
Diags.Report(diag::err_drv_unknown_argument) << (*I)->getAsString(*Args);
Success = false;
}
}
- Success = ParseAnalyzerArgs(Res.getAnalyzerOpts(), *Args, Diags) && Success;
+ Success = ParseAnalyzerArgs(*Res.getAnalyzerOpts(), *Args, Diags) && Success;
Success = ParseMigratorArgs(Res.getMigratorOpts(), *Args) && Success;
ParseDependencyOutputArgs(Res.getDependencyOutputOpts(), *Args);
Success = ParseDiagnosticArgs(Res.getDiagnosticOpts(), *Args, &Diags)
@@ -2371,53 +1544,49 @@ llvm::APInt ModuleSignature::getAsInteger() const {
}
std::string CompilerInvocation::getModuleHash() const {
- ModuleSignature Signature;
-
+ using llvm::hash_code;
+ using llvm::hash_value;
+ using llvm::hash_combine;
+
// Start the signature with the compiler version.
- // FIXME: The full version string can be quite long. Omit it from the
- // module hash for now to avoid failures where the path name becomes too
- // long. An MD5 or similar checksum would work well here.
- // Signature.add(getClangFullRepositoryVersion());
-
+ // FIXME: We'd rather use something more cryptographically sound than
+ // CityHash, but this will do for now.
+ hash_code code = hash_value(getClangFullRepositoryVersion());
+
// Extend the signature with the language options
#define LANGOPT(Name, Bits, Default, Description) \
- Signature.add(LangOpts->Name, Bits);
+ code = hash_combine(code, LangOpts->Name);
#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
- Signature.add(static_cast<unsigned>(LangOpts->get##Name()), Bits);
+ code = hash_combine(code, static_cast<unsigned>(LangOpts->get##Name()));
#define BENIGN_LANGOPT(Name, Bits, Default, Description)
#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description)
#include "clang/Basic/LangOptions.def"
- // Extend the signature with the target triple
- llvm::Triple T(TargetOpts.Triple);
- Signature.add((unsigned)T.getArch(), 5);
- Signature.add((unsigned)T.getVendor(), 4);
- Signature.add((unsigned)T.getOS(), 5);
- Signature.add((unsigned)T.getEnvironment(), 4);
+ // Extend the signature with the target options.
+ code = hash_combine(code, TargetOpts->Triple, TargetOpts->CPU,
+ TargetOpts->ABI, TargetOpts->CXXABI,
+ TargetOpts->LinkerVersion);
+ for (unsigned i = 0, n = TargetOpts->FeaturesAsWritten.size(); i != n; ++i)
+ code = hash_combine(code, TargetOpts->FeaturesAsWritten[i]);
// Extend the signature with preprocessor options.
- Signature.add(getPreprocessorOpts().UsePredefines, 1);
- Signature.add(getPreprocessorOpts().DetailedRecord, 1);
-
- // Hash the preprocessor defines.
- // FIXME: This is terrible. Use an MD5 sum of the preprocessor defines.
+ const PreprocessorOptions &ppOpts = getPreprocessorOpts();
+ code = hash_combine(code, ppOpts.UsePredefines, ppOpts.DetailedRecord);
+
std::vector<StringRef> MacroDefs;
for (std::vector<std::pair<std::string, bool/*isUndef*/> >::const_iterator
I = getPreprocessorOpts().Macros.begin(),
IEnd = getPreprocessorOpts().Macros.end();
I != IEnd; ++I) {
- if (!I->second)
- MacroDefs.push_back(I->first);
+ code = hash_combine(code, I->first, I->second);
}
- llvm::array_pod_sort(MacroDefs.begin(), MacroDefs.end());
-
- unsigned PPHashResult = 0;
- for (unsigned I = 0, N = MacroDefs.size(); I != N; ++I)
- PPHashResult = llvm::HashString(MacroDefs[I], PPHashResult);
- Signature.add(PPHashResult, 32);
-
- // We've generated the signature. Treat it as one large APInt that we'll
- // encode in base-36 and return.
- Signature.flush();
- return Signature.getAsInteger().toString(36, /*Signed=*/false);
+
+ // Extend the signature with the sysroot.
+ const HeaderSearchOptions &hsOpts = getHeaderSearchOpts();
+ code = hash_combine(code, hsOpts.Sysroot, hsOpts.UseBuiltinIncludes,
+ hsOpts.UseStandardSystemIncludes,
+ hsOpts.UseStandardCXXIncludes,
+ hsOpts.UseLibcxx);
+
+ return llvm::APInt(64, code).toString(36, /*Signed=*/false);
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp b/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
index 0aca86e..d82cb6d 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -11,9 +11,9 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Frontend/Utils.h"
#include "clang/Frontend/CompilerInstance.h"
-#include "clang/Frontend/DiagnosticOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
@@ -34,8 +34,8 @@ clang::createInvocationFromCommandLine(ArrayRef<const char *> ArgList,
if (!Diags.getPtr()) {
// No diagnostics engine was provided, so create our own diagnostics object
// with the default options.
- DiagnosticOptions DiagOpts;
- Diags = CompilerInstance::createDiagnostics(DiagOpts, ArgList.size(),
+ Diags = CompilerInstance::createDiagnostics(new DiagnosticOptions,
+ ArgList.size(),
ArgList.begin());
}
@@ -49,11 +49,6 @@ clang::createInvocationFromCommandLine(ArrayRef<const char *> ArgList,
// FIXME: We shouldn't have to pass in the path info.
driver::Driver TheDriver("clang", llvm::sys::getDefaultTargetTriple(),
"a.out", false, *Diags);
- // Force driver to use clang.
- // FIXME: This seems like a hack. Maybe the "Clang" tool subclass should be
- // available for using it to get the arguments, thus avoiding the overkill
- // of using the driver.
- TheDriver.setForcedClangUse();
// Don't check that inputs exist, they may have been remapped.
TheDriver.setCheckInputsExist(false);
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp b/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp
index 21f5daa..53ea8be 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp
@@ -59,10 +59,11 @@ public:
const Token &IncludeTok,
StringRef FileName,
bool IsAngled,
+ CharSourceRange FilenameRange,
const FileEntry *File,
- SourceLocation EndLoc,
StringRef SearchPath,
- StringRef RelativePath);
+ StringRef RelativePath,
+ const Module *Imported);
virtual void EndOfMainFile() {
OutputDependencyFile();
@@ -132,10 +133,11 @@ void DependencyFileCallback::InclusionDirective(SourceLocation HashLoc,
const Token &IncludeTok,
StringRef FileName,
bool IsAngled,
+ CharSourceRange FilenameRange,
const FileEntry *File,
- SourceLocation EndLoc,
StringRef SearchPath,
- StringRef RelativePath) {
+ StringRef RelativePath,
+ const Module *Imported) {
if (!File) {
if (AddMissingHeaderDeps)
AddFilename(FileName);
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp b/contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp
index eebaf0c..28d9c5d 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp
@@ -51,10 +51,11 @@ public:
const Token &IncludeTok,
StringRef FileName,
bool IsAngled,
+ CharSourceRange FilenameRange,
const FileEntry *File,
- SourceLocation EndLoc,
StringRef SearchPath,
- StringRef RelativePath);
+ StringRef RelativePath,
+ const Module *Imported);
virtual void EndOfMainFile() {
OutputGraphFile();
@@ -72,10 +73,11 @@ void DependencyGraphCallback::InclusionDirective(SourceLocation HashLoc,
const Token &IncludeTok,
StringRef FileName,
bool IsAngled,
+ CharSourceRange FilenameRange,
const FileEntry *File,
- SourceLocation EndLoc,
StringRef SearchPath,
- StringRef RelativePath) {
+ StringRef RelativePath,
+ const Module *Imported) {
if (!File)
return;
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp b/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp
index f052f90..359b82b 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp
@@ -8,9 +8,9 @@
//===----------------------------------------------------------------------===//
#include "clang/Frontend/DiagnosticRenderer.h"
+#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Frontend/DiagnosticOptions.h"
#include "clang/Lex/Lexer.h"
#include "clang/Edit/EditedSource.h"
#include "clang/Edit/Commit.h"
@@ -18,6 +18,7 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include <algorithm>
using namespace clang;
@@ -60,8 +61,8 @@ static StringRef getImmediateMacroName(SourceLocation Loc,
}
DiagnosticRenderer::DiagnosticRenderer(const LangOptions &LangOpts,
- const DiagnosticOptions &DiagOpts)
-: LangOpts(LangOpts), DiagOpts(DiagOpts), LastLevel() {}
+ DiagnosticOptions *DiagOpts)
+ : LangOpts(LangOpts), DiagOpts(DiagOpts), LastLevel() {}
DiagnosticRenderer::~DiagnosticRenderer() {}
@@ -194,7 +195,7 @@ void DiagnosticRenderer::emitIncludeStack(SourceLocation Loc,
return;
LastIncludeLoc = Loc;
- if (!DiagOpts.ShowNoteIncludeStack && Level == DiagnosticsEngine::Note)
+ if (!DiagOpts->ShowNoteIncludeStack && Level == DiagnosticsEngine::Note)
return;
emitIncludeStackRecursively(Loc, SM);
@@ -218,6 +219,53 @@ void DiagnosticRenderer::emitIncludeStackRecursively(SourceLocation Loc,
emitIncludeLocation(Loc, PLoc, SM);
}
+// Helper function to fix up source ranges. It takes in an array of ranges,
+// and outputs an array of ranges where we want to draw the range highlighting
+// around the location specified by CaretLoc.
+//
+// To find locations which correspond to the caret, we crawl the macro caller
+// chain for the beginning and end of each range. If the caret location
+// is in a macro expansion, we search each chain for a location
+// in the same expansion as the caret; otherwise, we crawl to the top of
+// each chain. Two locations are part of the same macro expansion
+// iff the FileID is the same.
+static void mapDiagnosticRanges(
+ SourceLocation CaretLoc,
+ const SmallVectorImpl<CharSourceRange>& Ranges,
+ SmallVectorImpl<CharSourceRange>& SpellingRanges,
+ const SourceManager *SM) {
+ FileID CaretLocFileID = SM->getFileID(CaretLoc);
+
+ for (SmallVectorImpl<CharSourceRange>::const_iterator I = Ranges.begin(),
+ E = Ranges.end();
+ I != E; ++I) {
+ SourceLocation Begin = I->getBegin(), End = I->getEnd();
+ bool IsTokenRange = I->isTokenRange();
+
+ // Search the macro caller chain for the beginning of the range.
+ while (Begin.isMacroID() && SM->getFileID(Begin) != CaretLocFileID)
+ Begin = SM->getImmediateMacroCallerLoc(Begin);
+
+ // Search the macro caller chain for the beginning of the range.
+ while (End.isMacroID() && SM->getFileID(End) != CaretLocFileID) {
+ // The computation of the next End is an inlined version of
+ // getImmediateMacroCallerLoc, except it chooses the end of an
+ // expansion range.
+ if (SM->isMacroArgExpansion(End)) {
+ End = SM->getImmediateSpellingLoc(End);
+ } else {
+ End = SM->getImmediateExpansionRange(End).second;
+ }
+ }
+
+ // Return the spelling location of the beginning and end of the range.
+ Begin = SM->getSpellingLoc(Begin);
+ End = SM->getSpellingLoc(End);
+ SpellingRanges.push_back(CharSourceRange(SourceRange(Begin, End),
+ IsTokenRange));
+ }
+}
+
/// \brief Recursively emit notes for each macro expansion and caret
/// diagnostics where appropriate.
///
@@ -245,9 +293,13 @@ void DiagnosticRenderer::emitMacroExpansionsAndCarets(
// If this is a file source location, directly emit the source snippet and
// caret line. Also record the macro depth reached.
if (Loc.isFileID()) {
+ // Map the ranges.
+ SmallVector<CharSourceRange, 4> SpellingRanges;
+ mapDiagnosticRanges(Loc, Ranges, SpellingRanges, &SM);
+
assert(MacroDepth == 0 && "We shouldn't hit a leaf node twice!");
MacroDepth = OnMacroInst;
- emitCodeContext(Loc, Level, Ranges, Hints, SM);
+ emitCodeContext(Loc, Level, SpellingRanges, Hints, SM);
return;
}
// Otherwise recurse through each macro expansion layer.
@@ -257,8 +309,7 @@ void DiagnosticRenderer::emitMacroExpansionsAndCarets(
Loc = SM.skipToMacroArgExpansion(Loc);
SourceLocation OneLevelUp = SM.getImmediateMacroCallerLoc(Loc);
-
- // FIXME: Map ranges?
+
emitMacroExpansionsAndCarets(OneLevelUp, Level, Ranges, Hints, SM, MacroDepth,
OnMacroInst + 1);
@@ -269,28 +320,17 @@ void DiagnosticRenderer::emitMacroExpansionsAndCarets(
Loc = SM.getImmediateMacroCalleeLoc(Loc);
unsigned MacroSkipStart = 0, MacroSkipEnd = 0;
- if (MacroDepth > DiagOpts.MacroBacktraceLimit &&
- DiagOpts.MacroBacktraceLimit != 0) {
- MacroSkipStart = DiagOpts.MacroBacktraceLimit / 2 +
- DiagOpts.MacroBacktraceLimit % 2;
- MacroSkipEnd = MacroDepth - DiagOpts.MacroBacktraceLimit / 2;
+ if (MacroDepth > DiagOpts->MacroBacktraceLimit &&
+ DiagOpts->MacroBacktraceLimit != 0) {
+ MacroSkipStart = DiagOpts->MacroBacktraceLimit / 2 +
+ DiagOpts->MacroBacktraceLimit % 2;
+ MacroSkipEnd = MacroDepth - DiagOpts->MacroBacktraceLimit / 2;
}
// Whether to suppress printing this macro expansion.
bool Suppressed = (OnMacroInst >= MacroSkipStart &&
OnMacroInst < MacroSkipEnd);
- // Map the ranges.
- for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
- E = Ranges.end();
- I != E; ++I) {
- SourceLocation Start = I->getBegin(), End = I->getEnd();
- if (Start.isMacroID())
- I->setBegin(SM.getImmediateMacroCalleeLoc(Start));
- if (End.isMacroID())
- I->setEnd(SM.getImmediateMacroCalleeLoc(End));
- }
-
if (Suppressed) {
// Tell the user that we've skipped contexts.
if (OnMacroInst == MacroSkipStart) {
@@ -303,14 +343,18 @@ void DiagnosticRenderer::emitMacroExpansionsAndCarets(
}
return;
}
-
+
+ // Map the ranges.
+ SmallVector<CharSourceRange, 4> SpellingRanges;
+ mapDiagnosticRanges(MacroLoc, Ranges, SpellingRanges, &SM);
+
SmallString<100> MessageStorage;
llvm::raw_svector_ostream Message(MessageStorage);
Message << "expanded from macro '"
<< getImmediateMacroName(MacroLoc, SM, LangOpts) << "'";
emitDiagnostic(SM.getSpellingLoc(Loc), DiagnosticsEngine::Note,
Message.str(),
- Ranges, ArrayRef<FixItHint>(), &SM);
+ SpellingRanges, ArrayRef<FixItHint>(), &SM);
}
DiagnosticNoteRenderer::~DiagnosticNoteRenderer() {}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
index a4321e7..2e9a791 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
@@ -23,10 +23,12 @@
#include "clang/Parse/ParseAST.h"
#include "clang/Serialization/ASTDeserializationListener.h"
#include "clang/Serialization/ASTReader.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/Timer.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+#include "llvm/Support/Timer.h"
using namespace clang;
namespace {
@@ -155,20 +157,22 @@ ASTConsumer* FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
return new MultiplexConsumer(Consumers);
}
+
bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
const FrontendInputFile &Input) {
assert(!Instance && "Already processing a source file!");
- assert(!Input.File.empty() && "Unexpected empty filename!");
+ assert(!Input.isEmpty() && "Unexpected empty filename!");
setCurrentInput(Input);
setCompilerInstance(&CI);
+ StringRef InputFile = Input.getFile();
bool HasBegunSourceFile = false;
if (!BeginInvocation(CI))
goto failure;
// AST files follow a very different path, since they share objects via the
// AST unit.
- if (Input.Kind == IK_AST) {
+ if (Input.getKind() == IK_AST) {
assert(!usesPreprocessorOnly() &&
"Attempt to pass AST file to preprocessor only action!");
assert(hasASTFileSupport() &&
@@ -176,7 +180,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(&CI.getDiagnostics());
std::string Error;
- ASTUnit *AST = ASTUnit::LoadFromASTFile(Input.File, Diags,
+ ASTUnit *AST = ASTUnit::LoadFromASTFile(InputFile, Diags,
CI.getFileSystemOpts());
if (!AST)
goto failure;
@@ -191,11 +195,11 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
CI.setASTContext(&AST->getASTContext());
// Initialize the action.
- if (!BeginSourceFileAction(CI, Input.File))
+ if (!BeginSourceFileAction(CI, InputFile))
goto failure;
/// Create the AST consumer.
- CI.setASTConsumer(CreateWrappedASTConsumer(CI, Input.File));
+ CI.setASTConsumer(CreateWrappedASTConsumer(CI, InputFile));
if (!CI.hasASTConsumer())
goto failure;
@@ -209,7 +213,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
CI.createSourceManager(CI.getFileManager());
// IR files bypass the rest of initialization.
- if (Input.Kind == IK_LLVM_IR) {
+ if (Input.getKind() == IK_LLVM_IR) {
assert(hasIRSupport() &&
"This action does not have IR file support!");
@@ -218,12 +222,51 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
HasBegunSourceFile = true;
// Initialize the action.
- if (!BeginSourceFileAction(CI, Input.File))
+ if (!BeginSourceFileAction(CI, InputFile))
goto failure;
return true;
}
+ // If the implicit PCH include is actually a directory, rather than
+ // a single file, search for a suitable PCH file in that directory.
+ if (!CI.getPreprocessorOpts().ImplicitPCHInclude.empty()) {
+ FileManager &FileMgr = CI.getFileManager();
+ PreprocessorOptions &PPOpts = CI.getPreprocessorOpts();
+ StringRef PCHInclude = PPOpts.ImplicitPCHInclude;
+ if (const DirectoryEntry *PCHDir = FileMgr.getDirectory(PCHInclude)) {
+ llvm::error_code EC;
+ SmallString<128> DirNative;
+ llvm::sys::path::native(PCHDir->getName(), DirNative);
+ bool Found = false;
+ for (llvm::sys::fs::directory_iterator Dir(DirNative.str(), EC), DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ // Check whether this is an acceptable AST file.
+ if (ASTReader::isAcceptableASTFile(Dir->path(), FileMgr,
+ CI.getLangOpts(),
+ CI.getTargetOpts(),
+ CI.getPreprocessorOpts())) {
+ for (unsigned I = 0, N = PPOpts.Includes.size(); I != N; ++I) {
+ if (PPOpts.Includes[I] == PPOpts.ImplicitPCHInclude) {
+ PPOpts.Includes[I] = Dir->path();
+ PPOpts.ImplicitPCHInclude = Dir->path();
+ Found = true;
+ break;
+ }
+ }
+
+ assert(Found && "Implicit PCH include not in includes list?");
+ break;
+ }
+ }
+
+ if (!Found) {
+ CI.getDiagnostics().Report(diag::err_fe_no_pch_in_dir) << PCHInclude;
+ return true;
+ }
+ }
+ }
+
// Set up the preprocessor.
CI.createPreprocessor();
@@ -233,7 +276,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
HasBegunSourceFile = true;
// Initialize the action.
- if (!BeginSourceFileAction(CI, Input.File))
+ if (!BeginSourceFileAction(CI, InputFile))
goto failure;
/// Create the AST context and consumer unless this is a preprocessor only
@@ -242,12 +285,14 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
CI.createASTContext();
OwningPtr<ASTConsumer> Consumer(
- CreateWrappedASTConsumer(CI, Input.File));
+ CreateWrappedASTConsumer(CI, InputFile));
if (!Consumer)
goto failure;
CI.getASTContext().setASTMutationListener(Consumer->GetASTMutationListener());
-
+ CI.getPreprocessor().setPPMutationListener(
+ Consumer->GetPPMutationListener());
+
if (!CI.getPreprocessorOpts().ChainedIncludes.empty()) {
// Convert headers to PCH and chain them.
OwningPtr<ExternalASTSource> source;
@@ -270,7 +315,6 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
CI.createPCHExternalASTSource(
CI.getPreprocessorOpts().ImplicitPCHInclude,
CI.getPreprocessorOpts().DisablePCHValidation,
- CI.getPreprocessorOpts().DisableStatCache,
CI.getPreprocessorOpts().AllowPCHWithCompilerErrors,
DeserialListener);
if (!CI.getASTContext().getExternalSource())
@@ -314,6 +358,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (HasBegunSourceFile)
CI.getDiagnosticClient().EndSourceFile();
+ CI.clearOutputFiles(/*EraseFiles=*/true);
setCurrentInput(FrontendInputFile());
setCompilerInstance(0);
return false;
@@ -325,10 +370,7 @@ bool FrontendAction::Execute() {
// Initialize the main file entry. This needs to be delayed until after PCH
// has loaded.
if (!isCurrentFileAST()) {
- if (!CI.InitializeSourceManager(getCurrentFile(),
- getCurrentInput().IsSystem
- ? SrcMgr::C_System
- : SrcMgr::C_User))
+ if (!CI.InitializeSourceManager(getCurrentInput()))
return false;
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
index 24960cf..47063f7 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
@@ -131,8 +131,31 @@ ASTConsumer *GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
Sysroot, OS);
}
+static SmallVectorImpl<char> &
+operator+=(SmallVectorImpl<char> &Includes, StringRef RHS) {
+ Includes.append(RHS.begin(), RHS.end());
+ return Includes;
+}
+
+static void addHeaderInclude(StringRef HeaderName,
+ SmallVectorImpl<char> &Includes,
+ const LangOptions &LangOpts) {
+ if (LangOpts.ObjC1)
+ Includes += "#import \"";
+ else
+ Includes += "#include \"";
+ Includes += HeaderName;
+ Includes += "\"\n";
+}
+
+static void addHeaderInclude(const FileEntry *Header,
+ SmallVectorImpl<char> &Includes,
+ const LangOptions &LangOpts) {
+ addHeaderInclude(Header->getName(), Includes, LangOpts);
+}
+
/// \brief Collect the set of header includes needed to construct the given
-/// module.
+/// module and update the TopHeaders file set of the module.
///
/// \param Module The module we're collecting includes from.
///
@@ -142,30 +165,23 @@ static void collectModuleHeaderIncludes(const LangOptions &LangOpts,
FileManager &FileMgr,
ModuleMap &ModMap,
clang::Module *Module,
- SmallString<256> &Includes) {
+ SmallVectorImpl<char> &Includes) {
// Don't collect any headers for unavailable modules.
if (!Module->isAvailable())
return;
// Add includes for each of these headers.
for (unsigned I = 0, N = Module->Headers.size(); I != N; ++I) {
- if (LangOpts.ObjC1)
- Includes += "#import \"";
- else
- Includes += "#include \"";
- Includes += Module->Headers[I]->getName();
- Includes += "\"\n";
+ const FileEntry *Header = Module->Headers[I];
+ Module->TopHeaders.insert(Header);
+ addHeaderInclude(Header, Includes, LangOpts);
}
if (const FileEntry *UmbrellaHeader = Module->getUmbrellaHeader()) {
+ Module->TopHeaders.insert(UmbrellaHeader);
if (Module->Parent) {
// Include the umbrella header for submodules.
- if (LangOpts.ObjC1)
- Includes += "#import \"";
- else
- Includes += "#include \"";
- Includes += UmbrellaHeader->getName();
- Includes += "\"\n";
+ addHeaderInclude(UmbrellaHeader, Includes, LangOpts);
}
} else if (const DirectoryEntry *UmbrellaDir = Module->getUmbrellaDir()) {
// Add all of the headers we find in this subdirectory.
@@ -184,17 +200,14 @@ static void collectModuleHeaderIncludes(const LangOptions &LangOpts,
// If this header is marked 'unavailable' in this module, don't include
// it.
- if (const FileEntry *Header = FileMgr.getFile(Dir->path()))
+ if (const FileEntry *Header = FileMgr.getFile(Dir->path())) {
if (ModMap.isHeaderInUnavailableModule(Header))
continue;
+ Module->TopHeaders.insert(Header);
+ }
// Include this header umbrella header for submodules.
- if (LangOpts.ObjC1)
- Includes += "#import \"";
- else
- Includes += "#include \"";
- Includes += Dir->path();
- Includes += "\"\n";
+ addHeaderInclude(Dir->path(), Includes, LangOpts);
}
}
@@ -250,77 +263,29 @@ bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI,
return false;
}
- // Do we have an umbrella header for this module?
- const FileEntry *UmbrellaHeader = Module->getUmbrellaHeader();
-
+ FileManager &FileMgr = CI.getFileManager();
+
// Collect the set of #includes we need to build the module.
SmallString<256> HeaderContents;
- collectModuleHeaderIncludes(CI.getLangOpts(), CI.getFileManager(),
+ if (const FileEntry *UmbrellaHeader = Module->getUmbrellaHeader())
+ addHeaderInclude(UmbrellaHeader, HeaderContents, CI.getLangOpts());
+ collectModuleHeaderIncludes(CI.getLangOpts(), FileMgr,
CI.getPreprocessor().getHeaderSearchInfo().getModuleMap(),
Module, HeaderContents);
- if (UmbrellaHeader && HeaderContents.empty()) {
- // Simple case: we have an umbrella header and there are no additional
- // includes, we can just parse the umbrella header directly.
- setCurrentInput(FrontendInputFile(UmbrellaHeader->getName(),
- getCurrentFileKind(),
- Module->IsSystem));
- return true;
- }
-
- FileManager &FileMgr = CI.getFileManager();
- SmallString<128> HeaderName;
- time_t ModTime;
- if (UmbrellaHeader) {
- // Read in the umbrella header.
- // FIXME: Go through the source manager; the umbrella header may have
- // been overridden.
- std::string ErrorStr;
- llvm::MemoryBuffer *UmbrellaContents
- = FileMgr.getBufferForFile(UmbrellaHeader, &ErrorStr);
- if (!UmbrellaContents) {
- CI.getDiagnostics().Report(diag::err_missing_umbrella_header)
- << UmbrellaHeader->getName() << ErrorStr;
- return false;
- }
-
- // Combine the contents of the umbrella header with the automatically-
- // generated includes.
- SmallString<256> OldContents = HeaderContents;
- HeaderContents = UmbrellaContents->getBuffer();
- HeaderContents += "\n\n";
- HeaderContents += "/* Module includes */\n";
- HeaderContents += OldContents;
-
- // Pretend that we're parsing the umbrella header.
- HeaderName = UmbrellaHeader->getName();
- ModTime = UmbrellaHeader->getModificationTime();
-
- delete UmbrellaContents;
- } else {
- // Pick an innocuous-sounding name for the umbrella header.
- HeaderName = Module->Name + ".h";
- if (FileMgr.getFile(HeaderName, /*OpenFile=*/false,
- /*CacheFailure=*/false)) {
- // Try again!
- HeaderName = Module->Name + "-module.h";
- if (FileMgr.getFile(HeaderName, /*OpenFile=*/false,
- /*CacheFailure=*/false)) {
- // Pick something ridiculous and go with it.
- HeaderName = Module->Name + "-module.hmod";
- }
- }
- ModTime = time(0);
- }
-
- // Remap the contents of the header name we're using to our synthesized
- // buffer.
- const FileEntry *HeaderFile = FileMgr.getVirtualFile(HeaderName,
+
+ StringRef InputName = Module::getModuleInputBufferName();
+
+ // We consistently construct a buffer as input to build the module.
+ // This means the main file for modules will always be a virtual one.
+ // FIXME: Maybe allow using a memory buffer as input directly instead of
+ // messing with virtual files.
+ const FileEntry *HeaderFile = FileMgr.getVirtualFile(InputName,
HeaderContents.size(),
- ModTime);
+ time(0));
llvm::MemoryBuffer *HeaderContentsBuf
= llvm::MemoryBuffer::getMemBufferCopy(HeaderContents);
CI.getSourceManager().overrideFileContents(HeaderFile, HeaderContentsBuf);
- setCurrentInput(FrontendInputFile(HeaderName, getCurrentFileKind(),
+ setCurrentInput(FrontendInputFile(InputName, getCurrentFileKind(),
Module->IsSystem));
return true;
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
index 20e771a..4e73163 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
@@ -15,7 +15,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Version.h"
-#include "clang/Frontend/HeaderSearchOptions.h"
+#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/HeaderSearch.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -321,7 +321,9 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
P.appendComponent("../../../include"); // <sysroot>/include
AddPath(P.str(), System, true, false, false);
AddPath("/mingw/include", System, true, false, false);
+#if defined(_WIN32)
AddPath("c:/mingw/include", System, true, false, false);
+#endif
}
break;
case llvm::Triple::FreeBSD:
@@ -401,12 +403,14 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOp
AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.7.0");
// mingw.org C++ include paths
AddMinGWCPlusPlusIncludePaths("/mingw/lib/gcc", "mingw32", "4.5.2"); //MSYS
+#if defined(_WIN32)
AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.6.2");
AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.6.1");
AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.5.2");
AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.5.0");
AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.4.0");
AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.3.0");
+#endif
break;
case llvm::Triple::DragonFly:
AddPath("/usr/include/c++/4.1", CXXSystem, true, false, false);
diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
index 1440da6..4bbd033 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
@@ -17,11 +17,12 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendOptions.h"
-#include "clang/Frontend/PreprocessorOptions.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Serialization/ASTReader.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -83,6 +84,19 @@ static void AddImplicitIncludePTH(MacroBuilder &Builder, Preprocessor &PP,
AddImplicitInclude(Builder, OriginalFile, PP.getFileManager());
}
+/// \brief Add an implicit \#include using the original file used to generate
+/// a PCH file.
+static void AddImplicitIncludePCH(MacroBuilder &Builder, Preprocessor &PP,
+ StringRef ImplicitIncludePCH) {
+ std::string OriginalFile =
+ ASTReader::getOriginalSourceFile(ImplicitIncludePCH, PP.getFileManager(),
+ PP.getDiagnostics());
+ if (OriginalFile.empty())
+ return;
+
+ AddImplicitInclude(Builder, OriginalFile, PP.getFileManager());
+}
+
/// PickFP - This is used to pick a value based on the FP semantics of the
/// specified FP model.
template <typename T>
@@ -102,51 +116,51 @@ static T PickFP(const llvm::fltSemantics *Sem, T IEEESingleVal,
}
static void DefineFloatMacros(MacroBuilder &Builder, StringRef Prefix,
- const llvm::fltSemantics *Sem) {
+ const llvm::fltSemantics *Sem, StringRef Ext) {
const char *DenormMin, *Epsilon, *Max, *Min;
- DenormMin = PickFP(Sem, "1.40129846e-45F", "4.9406564584124654e-324",
- "3.64519953188247460253e-4951L",
- "4.94065645841246544176568792868221e-324L",
- "6.47517511943802511092443895822764655e-4966L");
+ DenormMin = PickFP(Sem, "1.40129846e-45", "4.9406564584124654e-324",
+ "3.64519953188247460253e-4951",
+ "4.94065645841246544176568792868221e-324",
+ "6.47517511943802511092443895822764655e-4966");
int Digits = PickFP(Sem, 6, 15, 18, 31, 33);
- Epsilon = PickFP(Sem, "1.19209290e-7F", "2.2204460492503131e-16",
- "1.08420217248550443401e-19L",
- "4.94065645841246544176568792868221e-324L",
- "1.92592994438723585305597794258492732e-34L");
+ Epsilon = PickFP(Sem, "1.19209290e-7", "2.2204460492503131e-16",
+ "1.08420217248550443401e-19",
+ "4.94065645841246544176568792868221e-324",
+ "1.92592994438723585305597794258492732e-34");
int MantissaDigits = PickFP(Sem, 24, 53, 64, 106, 113);
int Min10Exp = PickFP(Sem, -37, -307, -4931, -291, -4931);
int Max10Exp = PickFP(Sem, 38, 308, 4932, 308, 4932);
int MinExp = PickFP(Sem, -125, -1021, -16381, -968, -16381);
int MaxExp = PickFP(Sem, 128, 1024, 16384, 1024, 16384);
- Min = PickFP(Sem, "1.17549435e-38F", "2.2250738585072014e-308",
- "3.36210314311209350626e-4932L",
- "2.00416836000897277799610805135016e-292L",
- "3.36210314311209350626267781732175260e-4932L");
- Max = PickFP(Sem, "3.40282347e+38F", "1.7976931348623157e+308",
- "1.18973149535723176502e+4932L",
- "1.79769313486231580793728971405301e+308L",
- "1.18973149535723176508575932662800702e+4932L");
+ Min = PickFP(Sem, "1.17549435e-38", "2.2250738585072014e-308",
+ "3.36210314311209350626e-4932",
+ "2.00416836000897277799610805135016e-292",
+ "3.36210314311209350626267781732175260e-4932");
+ Max = PickFP(Sem, "3.40282347e+38", "1.7976931348623157e+308",
+ "1.18973149535723176502e+4932",
+ "1.79769313486231580793728971405301e+308",
+ "1.18973149535723176508575932662800702e+4932");
SmallString<32> DefPrefix;
DefPrefix = "__";
DefPrefix += Prefix;
DefPrefix += "_";
- Builder.defineMacro(DefPrefix + "DENORM_MIN__", DenormMin);
+ Builder.defineMacro(DefPrefix + "DENORM_MIN__", Twine(DenormMin)+Ext);
Builder.defineMacro(DefPrefix + "HAS_DENORM__");
Builder.defineMacro(DefPrefix + "DIG__", Twine(Digits));
- Builder.defineMacro(DefPrefix + "EPSILON__", Twine(Epsilon));
+ Builder.defineMacro(DefPrefix + "EPSILON__", Twine(Epsilon)+Ext);
Builder.defineMacro(DefPrefix + "HAS_INFINITY__");
Builder.defineMacro(DefPrefix + "HAS_QUIET_NAN__");
Builder.defineMacro(DefPrefix + "MANT_DIG__", Twine(MantissaDigits));
Builder.defineMacro(DefPrefix + "MAX_10_EXP__", Twine(Max10Exp));
Builder.defineMacro(DefPrefix + "MAX_EXP__", Twine(MaxExp));
- Builder.defineMacro(DefPrefix + "MAX__", Twine(Max));
+ Builder.defineMacro(DefPrefix + "MAX__", Twine(Max)+Ext);
Builder.defineMacro(DefPrefix + "MIN_10_EXP__","("+Twine(Min10Exp)+")");
Builder.defineMacro(DefPrefix + "MIN_EXP__", "("+Twine(MinExp)+")");
- Builder.defineMacro(DefPrefix + "MIN__", Twine(Min));
+ Builder.defineMacro(DefPrefix + "MIN__", Twine(Min)+Ext);
}
@@ -247,7 +261,7 @@ static void AddObjCXXARCLibstdcxxDefines(const LangOptions &LangOpts,
<< "};\n"
<< "\n";
- if (LangOpts.ObjCRuntimeHasWeak) {
+ if (LangOpts.ObjCARCWeak) {
Out << "template<typename _Tp>\n"
<< "struct __is_scalar<__attribute__((objc_ownership(weak))) _Tp> {\n"
<< " enum { __value = 0 };\n"
@@ -288,6 +302,8 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
else if (!LangOpts.GNUMode && LangOpts.Digraphs)
Builder.defineMacro("__STDC_VERSION__", "199409L");
} else {
+ // FIXME: LangOpts.CPlusPlus1y
+
// C++11 [cpp.predefined]p1:
// The name __cplusplus is defined to the value 201103L when compiling a
// C++ translation unit.
@@ -325,8 +341,8 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__clang_patchlevel__", "0");
#endif
Builder.defineMacro("__clang_version__",
- "\"" CLANG_VERSION_STRING " ("
- + getClangFullRepositoryVersion() + ")\"");
+ "\"" CLANG_VERSION_STRING " "
+ + getClangFullRepositoryVersion() + "\"");
#undef TOSTR
#undef TOSTR2
if (!LangOpts.MicrosoftMode) {
@@ -420,19 +436,17 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
// Both __PRETTY_FUNCTION__ and __FUNCTION__ are GCC extensions, however
// VC++ appears to only like __FUNCTION__.
Builder.defineMacro("__PRETTY_FUNCTION__", "__FUNCTION__");
- // Work around some issues with Visual C++ headerws.
- if (LangOpts.CPlusPlus) {
- // Since we define wchar_t in C++ mode.
+ // Work around some issues with Visual C++ headers.
+ if (LangOpts.WChar) {
+ // wchar_t supported as a keyword.
Builder.defineMacro("_WCHAR_T_DEFINED");
Builder.defineMacro("_NATIVE_WCHAR_T_DEFINED");
+ }
+ if (LangOpts.CPlusPlus) {
// FIXME: Support Microsoft's __identifier extension in the lexer.
Builder.append("#define __identifier(x) x");
Builder.append("class type_info;");
}
-
- if (LangOpts.CPlusPlus0x) {
- Builder.defineMacro("_HAS_CHAR16_T_LANGUAGE_SUPPORT", "1");
- }
}
if (LangOpts.Optimize)
@@ -511,9 +525,9 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
DefineType("__CHAR16_TYPE__", TI.getChar16Type(), Builder);
DefineType("__CHAR32_TYPE__", TI.getChar32Type(), Builder);
- DefineFloatMacros(Builder, "FLT", &TI.getFloatFormat());
- DefineFloatMacros(Builder, "DBL", &TI.getDoubleFormat());
- DefineFloatMacros(Builder, "LDBL", &TI.getLongDoubleFormat());
+ DefineFloatMacros(Builder, "FLT", &TI.getFloatFormat(), "F");
+ DefineFloatMacros(Builder, "DBL", &TI.getDoubleFormat(), "");
+ DefineFloatMacros(Builder, "LDBL", &TI.getLongDoubleFormat(), "L");
// Define a __POINTER_WIDTH__ macro for stdint.h.
Builder.defineMacro("__POINTER_WIDTH__",
@@ -763,6 +777,8 @@ void clang::InitializePreprocessor(Preprocessor &PP,
const std::string &Path = InitOpts.Includes[i];
if (Path == InitOpts.ImplicitPTHInclude)
AddImplicitIncludePTH(Builder, PP, Path);
+ else if (Path == InitOpts.ImplicitPCHInclude)
+ AddImplicitIncludePCH(Builder, PP, Path);
else
AddImplicitInclude(Builder, Path, PP.getFileManager());
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp
index 3fee957..3a04f18 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Frontend/LogDiagnosticPrinter.h"
+#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallString.h"
@@ -16,9 +17,9 @@
using namespace clang;
LogDiagnosticPrinter::LogDiagnosticPrinter(raw_ostream &os,
- const DiagnosticOptions &diags,
+ DiagnosticOptions *diags,
bool _OwnsOutputStream)
- : OS(os), LangOpts(0), DiagOpts(&diags),
+ : OS(os), LangOpts(0), DiagOpts(diags),
OwnsOutputStream(_OwnsOutputStream) {
}
@@ -172,6 +173,6 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
DiagnosticConsumer *
LogDiagnosticPrinter::clone(DiagnosticsEngine &Diags) const {
- return new LogDiagnosticPrinter(OS, *DiagOpts, /*OwnsOutputStream=*/false);
+ return new LogDiagnosticPrinter(OS, &*DiagOpts, /*OwnsOutputStream=*/false);
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
index 5311ed5..30707dc 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -570,8 +570,12 @@ static void DoPrintMacros(Preprocessor &PP, raw_ostream *OS) {
do PP.Lex(Tok);
while (Tok.isNot(tok::eof));
- SmallVector<id_macro_pair, 128>
- MacrosByID(PP.macro_begin(), PP.macro_end());
+ SmallVector<id_macro_pair, 128> MacrosByID;
+ for (Preprocessor::macro_iterator I = PP.macro_begin(), E = PP.macro_end();
+ I != E; ++I) {
+ if (I->first->hasMacroDefinition())
+ MacrosByID.push_back(id_macro_pair(I->first, I->second));
+ }
llvm::array_pod_sort(MacrosByID.begin(), MacrosByID.end(), MacroIDCompare);
for (unsigned i = 0, e = MacrosByID.size(); i != e; ++i) {
diff --git a/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
index a20f30d..5f8fc1e 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -12,6 +12,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/DenseSet.h"
+#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/Diagnostic.h"
@@ -50,13 +51,10 @@ class SDiagsWriter;
class SDiagsRenderer : public DiagnosticNoteRenderer {
SDiagsWriter &Writer;
- RecordData &Record;
public:
- SDiagsRenderer(SDiagsWriter &Writer, RecordData &Record,
- const LangOptions &LangOpts,
- const DiagnosticOptions &DiagOpts)
- : DiagnosticNoteRenderer(LangOpts, DiagOpts),
- Writer(Writer), Record(Record){}
+ SDiagsRenderer(SDiagsWriter &Writer, const LangOptions &LangOpts,
+ DiagnosticOptions *DiagOpts)
+ : DiagnosticNoteRenderer(LangOpts, DiagOpts), Writer(Writer) {}
virtual ~SDiagsRenderer() {}
@@ -73,15 +71,16 @@ protected:
DiagnosticsEngine::Level Level,
ArrayRef<CharSourceRange> Ranges,
const SourceManager &SM) {}
-
- void emitNote(SourceLocation Loc, StringRef Message, const SourceManager *SM);
-
+
+ virtual void emitNote(SourceLocation Loc, StringRef Message,
+ const SourceManager *SM);
+
virtual void emitCodeContext(SourceLocation Loc,
DiagnosticsEngine::Level Level,
SmallVectorImpl<CharSourceRange>& Ranges,
ArrayRef<FixItHint> Hints,
const SourceManager &SM);
-
+
virtual void beginDiagnostic(DiagOrStoredDiag D,
DiagnosticsEngine::Level Level);
virtual void endDiagnostic(DiagOrStoredDiag D,
@@ -91,13 +90,12 @@ protected:
class SDiagsWriter : public DiagnosticConsumer {
friend class SDiagsRenderer;
public:
- explicit SDiagsWriter(llvm::raw_ostream *os, const DiagnosticOptions &diags)
- : LangOpts(0), DiagOpts(diags),
- Stream(Buffer), OS(os), inNonNoteDiagnostic(false)
- {
+ explicit SDiagsWriter(llvm::raw_ostream *os, DiagnosticOptions *diags)
+ : LangOpts(0), DiagOpts(diags), Stream(Buffer), OS(os),
+ EmittedAnyDiagBlocks(false) {
EmitPreamble();
}
-
+
~SDiagsWriter() {}
void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
@@ -124,7 +122,26 @@ private:
/// \brief Emit the META data block.
void EmitMetaBlock();
-
+
+ /// \brief Start a DIAG block.
+ void EnterDiagBlock();
+
+ /// \brief End a DIAG block.
+ void ExitDiagBlock();
+
+ /// \brief Emit a DIAG record.
+ void EmitDiagnosticMessage(SourceLocation Loc,
+ PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ const SourceManager *SM,
+ DiagOrStoredDiag D);
+
+ /// \brief Emit FIXIT and SOURCE_RANGE records for a diagnostic.
+ void EmitCodeContext(SmallVectorImpl<CharSourceRange> &Ranges,
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM);
+
/// \brief Emit a record for a CharSourceRange.
void EmitCharSourceRange(CharSourceRange R, const SourceManager &SM);
@@ -159,7 +176,7 @@ private:
enum { Version = 1 };
const LangOptions *LangOpts;
- const DiagnosticOptions &DiagOpts;
+ llvm::IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
/// \brief The byte buffer for the serialized content.
SmallString<1024> Buffer;
@@ -190,17 +207,18 @@ private:
/// \brief Map for uniquing strings.
DiagFlagsTy DiagFlags;
-
- /// \brief Flag indicating whether or not we are in the process of
- /// emitting a non-note diagnostic.
- bool inNonNoteDiagnostic;
+
+ /// \brief Whether we have already started emission of any DIAG blocks. Once
+ /// this becomes \c true, we never close a DIAG block until we know that we're
+ /// starting another one or we're done.
+ bool EmittedAnyDiagBlocks;
};
} // end anonymous namespace
namespace clang {
namespace serialized_diags {
DiagnosticConsumer *create(llvm::raw_ostream *OS,
- const DiagnosticOptions &diags) {
+ DiagnosticOptions *diags) {
return new SDiagsWriter(OS, diags);
}
} // end namespace serialized_diags
@@ -474,31 +492,69 @@ unsigned SDiagsWriter::getEmitDiagnosticFlag(DiagnosticsEngine::Level DiagLevel,
void SDiagsWriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
const Diagnostic &Info) {
+ // Enter the block for a non-note diagnostic immediately, rather than waiting
+ // for beginDiagnostic, in case associated notes are emitted before we get
+ // there.
if (DiagLevel != DiagnosticsEngine::Note) {
- if (inNonNoteDiagnostic) {
- // We have encountered a non-note diagnostic. Finish up the previous
- // diagnostic block before starting a new one.
- Stream.ExitBlock();
- }
- inNonNoteDiagnostic = true;
+ if (EmittedAnyDiagBlocks)
+ ExitDiagBlock();
+
+ EnterDiagBlock();
+ EmittedAnyDiagBlocks = true;
}
// Compute the diagnostic text.
- diagBuf.clear();
+ diagBuf.clear();
Info.FormatDiagnostic(diagBuf);
- const SourceManager *
- SM = Info.hasSourceManager() ? &Info.getSourceManager() : 0;
- SDiagsRenderer Renderer(*this, Record, *LangOpts, DiagOpts);
+ if (Info.getLocation().isInvalid()) {
+ // Special-case diagnostics with no location. We may not have entered a
+ // source file in this case, so we can't use the normal DiagnosticsRenderer
+ // machinery.
+ EmitDiagnosticMessage(SourceLocation(), PresumedLoc(), DiagLevel,
+ diagBuf, 0, &Info);
+ return;
+ }
+
+ assert(Info.hasSourceManager() && LangOpts &&
+ "Unexpected diagnostic with valid location outside of a source file");
+ SDiagsRenderer Renderer(*this, *LangOpts, &*DiagOpts);
Renderer.emitDiagnostic(Info.getLocation(), DiagLevel,
diagBuf.str(),
Info.getRanges(),
llvm::makeArrayRef(Info.getFixItHints(),
Info.getNumFixItHints()),
- SM,
+ &Info.getSourceManager(),
&Info);
}
+void SDiagsWriter::EmitDiagnosticMessage(SourceLocation Loc,
+ PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ const SourceManager *SM,
+ DiagOrStoredDiag D) {
+ // Emit the RECORD_DIAG record.
+ Record.clear();
+ Record.push_back(RECORD_DIAG);
+ Record.push_back(Level);
+ AddLocToRecord(Loc, SM, PLoc, Record);
+
+ if (const Diagnostic *Info = D.dyn_cast<const Diagnostic*>()) {
+ // Emit the category string lazily and get the category ID.
+ unsigned DiagID = DiagnosticIDs::getCategoryNumberForDiag(Info->getID());
+ Record.push_back(getEmitCategory(DiagID));
+ // Emit the diagnostic flag string lazily and get the mapped ID.
+ Record.push_back(getEmitDiagnosticFlag(Level, Info->getID()));
+ } else {
+ Record.push_back(getEmitCategory());
+ Record.push_back(getEmitDiagnosticFlag(Level));
+ }
+
+ Record.push_back(Message.size());
+ Stream.EmitRecordWithBlob(Abbrevs.get(RECORD_DIAG), Record, Message);
+}
+
void
SDiagsRenderer::emitDiagnosticMessage(SourceLocation Loc,
PresumedLoc PLoc,
@@ -507,94 +563,80 @@ SDiagsRenderer::emitDiagnosticMessage(SourceLocation Loc,
ArrayRef<clang::CharSourceRange> Ranges,
const SourceManager *SM,
DiagOrStoredDiag D) {
- // Emit the RECORD_DIAG record.
- Writer.Record.clear();
- Writer.Record.push_back(RECORD_DIAG);
- Writer.Record.push_back(Level);
- Writer.AddLocToRecord(Loc, SM, PLoc, Record);
+ Writer.EmitDiagnosticMessage(Loc, PLoc, Level, Message, SM, D);
+}
- if (const Diagnostic *Info = D.dyn_cast<const Diagnostic*>()) {
- // Emit the category string lazily and get the category ID.
- unsigned DiagID = DiagnosticIDs::getCategoryNumberForDiag(Info->getID());
- Writer.Record.push_back(Writer.getEmitCategory(DiagID));
- // Emit the diagnostic flag string lazily and get the mapped ID.
- Writer.Record.push_back(Writer.getEmitDiagnosticFlag(Level, Info->getID()));
- }
- else {
- Writer.Record.push_back(Writer.getEmitCategory());
- Writer.Record.push_back(Writer.getEmitDiagnosticFlag(Level));
- }
+void SDiagsWriter::EnterDiagBlock() {
+ Stream.EnterSubblock(BLOCK_DIAG, 4);
+}
- Writer.Record.push_back(Message.size());
- Writer.Stream.EmitRecordWithBlob(Writer.Abbrevs.get(RECORD_DIAG),
- Writer.Record, Message);
+void SDiagsWriter::ExitDiagBlock() {
+ Stream.ExitBlock();
}
void SDiagsRenderer::beginDiagnostic(DiagOrStoredDiag D,
DiagnosticsEngine::Level Level) {
- Writer.Stream.EnterSubblock(BLOCK_DIAG, 4);
+ if (Level == DiagnosticsEngine::Note)
+ Writer.EnterDiagBlock();
}
void SDiagsRenderer::endDiagnostic(DiagOrStoredDiag D,
DiagnosticsEngine::Level Level) {
- if (D && Level != DiagnosticsEngine::Note)
- return;
- Writer.Stream.ExitBlock();
+ // Only end note diagnostics here, because we can't be sure when we've seen
+ // the last note associated with a non-note diagnostic.
+ if (Level == DiagnosticsEngine::Note)
+ Writer.ExitDiagBlock();
}
-void SDiagsRenderer::emitCodeContext(SourceLocation Loc,
- DiagnosticsEngine::Level Level,
- SmallVectorImpl<CharSourceRange> &Ranges,
- ArrayRef<FixItHint> Hints,
- const SourceManager &SM) {
+void SDiagsWriter::EmitCodeContext(SmallVectorImpl<CharSourceRange> &Ranges,
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM) {
// Emit Source Ranges.
- for (ArrayRef<CharSourceRange>::iterator it=Ranges.begin(), ei=Ranges.end();
- it != ei; ++it) {
- if (it->isValid())
- Writer.EmitCharSourceRange(*it, SM);
- }
-
+ for (ArrayRef<CharSourceRange>::iterator I = Ranges.begin(), E = Ranges.end();
+ I != E; ++I)
+ if (I->isValid())
+ EmitCharSourceRange(*I, SM);
+
// Emit FixIts.
- for (ArrayRef<FixItHint>::iterator it = Hints.begin(), et = Hints.end();
- it != et; ++it) {
- const FixItHint &fix = *it;
- if (fix.isNull())
+ for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
+ I != E; ++I) {
+ const FixItHint &Fix = *I;
+ if (Fix.isNull())
continue;
- Writer.Record.clear();
- Writer.Record.push_back(RECORD_FIXIT);
- Writer.AddCharSourceRangeToRecord(fix.RemoveRange, Record, SM);
- Writer.Record.push_back(fix.CodeToInsert.size());
- Writer.Stream.EmitRecordWithBlob(Writer.Abbrevs.get(RECORD_FIXIT), Record,
- fix.CodeToInsert);
+ Record.clear();
+ Record.push_back(RECORD_FIXIT);
+ AddCharSourceRangeToRecord(Fix.RemoveRange, Record, SM);
+ Record.push_back(Fix.CodeToInsert.size());
+ Stream.EmitRecordWithBlob(Abbrevs.get(RECORD_FIXIT), Record,
+ Fix.CodeToInsert);
}
}
+void SDiagsRenderer::emitCodeContext(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange> &Ranges,
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM) {
+ Writer.EmitCodeContext(Ranges, Hints, SM);
+}
+
void SDiagsRenderer::emitNote(SourceLocation Loc, StringRef Message,
const SourceManager *SM) {
- Writer.Stream.EnterSubblock(BLOCK_DIAG, 4);
- RecordData Record;
- Record.push_back(RECORD_DIAG);
- Record.push_back(DiagnosticsEngine::Note);
- Writer.AddLocToRecord(Loc, Record, SM);
- Record.push_back(Writer.getEmitCategory());
- Record.push_back(Writer.getEmitDiagnosticFlag(DiagnosticsEngine::Note));
- Record.push_back(Message.size());
- Writer.Stream.EmitRecordWithBlob(Writer.Abbrevs.get(RECORD_DIAG),
- Record, Message);
- Writer.Stream.ExitBlock();
+ Writer.EnterDiagBlock();
+ PresumedLoc PLoc = SM ? SM->getPresumedLoc(Loc) : PresumedLoc();
+ Writer.EmitDiagnosticMessage(Loc, PLoc, DiagnosticsEngine::Note,
+ Message, SM, DiagOrStoredDiag());
+ Writer.ExitDiagBlock();
}
void SDiagsWriter::finish() {
- if (inNonNoteDiagnostic) {
- // Finish off any diagnostics we were in the process of emitting.
- Stream.ExitBlock();
- inNonNoteDiagnostic = false;
- }
+ // Finish off any diagnostic we were in the process of emitting.
+ if (EmittedAnyDiagBlocks)
+ ExitDiagBlock();
// Write the generated bitstream to "Out".
OS->write((char *)&Buffer.front(), Buffer.size());
OS->flush();
-
+
OS.reset(0);
}
-
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp
index 9bb3e1d..35dabad 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp
@@ -11,7 +11,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/ConvertUTF.h"
-#include "clang/Frontend/DiagnosticOptions.h"
+#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Lex/Lexer.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
@@ -43,19 +43,22 @@ static const enum raw_ostream::Colors savedColor =
/// \brief Add highlights to differences in template strings.
static void applyTemplateHighlighting(raw_ostream &OS, StringRef Str,
bool &Normal, bool Bold) {
- for (unsigned i = 0, e = Str.size(); i < e; ++i)
- if (Str[i] != ToggleHighlight) {
- OS << Str[i];
- } else {
- if (Normal)
- OS.changeColor(templateColor, true);
- else {
- OS.resetColor();
- if (Bold)
- OS.changeColor(savedColor, true);
- }
- Normal = !Normal;
+ while (1) {
+ size_t Pos = Str.find(ToggleHighlight);
+ OS << Str.slice(0, Pos);
+ if (Pos == StringRef::npos)
+ break;
+
+ Str = Str.substr(Pos + 1);
+ if (Normal)
+ OS.changeColor(templateColor, true);
+ else {
+ OS.resetColor();
+ if (Bold)
+ OS.changeColor(savedColor, true);
}
+ Normal = !Normal;
+ }
}
/// \brief Number of spaces to indent when word-wrapping.
@@ -110,28 +113,15 @@ printableTextForNextCharacter(StringRef SourceLine, size_t *i,
return std::make_pair(expandedTab, true);
}
- // FIXME: this data is copied from the private implementation of ConvertUTF.h
- static const char trailingBytesForUTF8[256] = {
- 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
- 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
- 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
- };
-
unsigned char const *begin, *end;
begin = reinterpret_cast<unsigned char const *>(&*(SourceLine.begin() + *i));
- end = begin + SourceLine.size();
+ end = begin + (SourceLine.size() - *i);
if (isLegalUTF8Sequence(begin, end)) {
UTF32 c;
UTF32 *cptr = &c;
unsigned char const *original_begin = begin;
- char trailingBytes = trailingBytesForUTF8[(unsigned char)SourceLine[*i]];
- unsigned char const *cp_end = begin+trailingBytes+1;
+ unsigned char const *cp_end = begin+getNumBytesForUTF8(SourceLine[*i]);
ConversionResult res = ConvertUTF8toUTF32(&begin, cp_end, &cptr, cptr+1,
strictConversion);
@@ -274,14 +264,44 @@ struct SourceColumnMap {
}
int columns() const { return m_byteToColumn.back(); }
int bytes() const { return m_columnToByte.back(); }
+
+ /// \brief Map a byte to the column which it is at the start of, or return -1
+ /// if it is not at the start of a column (for a UTF-8 trailing byte).
int byteToColumn(int n) const {
assert(0<=n && n<static_cast<int>(m_byteToColumn.size()));
return m_byteToColumn[n];
}
+
+ /// \brief Map a byte to the first column which contains it.
+ int byteToContainingColumn(int N) const {
+ assert(0 <= N && N < static_cast<int>(m_byteToColumn.size()));
+ while (m_byteToColumn[N] == -1)
+ --N;
+ return m_byteToColumn[N];
+ }
+
+ /// \brief Map a column to the byte which starts the column, or return -1 if
+ /// the column the second or subsequent column of an expanded tab or similar
+ /// multi-column entity.
int columnToByte(int n) const {
assert(0<=n && n<static_cast<int>(m_columnToByte.size()));
return m_columnToByte[n];
}
+
+ /// \brief Map from a byte index to the next byte which starts a column.
+ int startOfNextColumn(int N) const {
+ assert(0 <= N && N < static_cast<int>(m_columnToByte.size() - 1));
+ while (byteToColumn(++N) == -1) {}
+ return N;
+ }
+
+ /// \brief Map from a byte index to the previous byte which starts a column.
+ int startOfPreviousColumn(int N) const {
+ assert(0 < N && N < static_cast<int>(m_columnToByte.size()));
+ while (byteToColumn(--N) == -1) {}
+ return N;
+ }
+
StringRef getSourceLine() const {
return m_SourceLine;
}
@@ -398,25 +418,24 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
bool ExpandedRegion = false;
if (SourceStart>0) {
- unsigned NewStart = SourceStart-1;
+ unsigned NewStart = map.startOfPreviousColumn(SourceStart);
// Skip over any whitespace we see here; we're looking for
// another bit of interesting text.
+ // FIXME: Detect non-ASCII whitespace characters too.
while (NewStart &&
- (map.byteToColumn(NewStart)==-1 ||
- isspace(static_cast<unsigned char>(SourceLine[NewStart]))))
- --NewStart;
+ isspace(static_cast<unsigned char>(SourceLine[NewStart])))
+ NewStart = map.startOfPreviousColumn(NewStart);
// Skip over this bit of "interesting" text.
- while (NewStart &&
- (map.byteToColumn(NewStart)!=-1 &&
- !isspace(static_cast<unsigned char>(SourceLine[NewStart]))))
- --NewStart;
-
- // Move up to the non-whitespace character we just saw.
- if (NewStart)
- ++NewStart;
+ while (NewStart) {
+ unsigned Prev = map.startOfPreviousColumn(NewStart);
+ if (isspace(static_cast<unsigned char>(SourceLine[Prev])))
+ break;
+ NewStart = Prev;
+ }
+ assert(map.byteToColumn(NewStart) != -1);
unsigned NewColumns = map.byteToColumn(SourceEnd) -
map.byteToColumn(NewStart);
if (NewColumns <= TargetColumns) {
@@ -426,21 +445,21 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
}
if (SourceEnd<SourceLine.size()) {
- unsigned NewEnd = SourceEnd+1;
+ unsigned NewEnd = map.startOfNextColumn(SourceEnd);
// Skip over any whitespace we see here; we're looking for
// another bit of interesting text.
- while (NewEnd<SourceLine.size() &&
- (map.byteToColumn(NewEnd)==-1 ||
- isspace(static_cast<unsigned char>(SourceLine[NewEnd]))))
- ++NewEnd;
+ // FIXME: Detect non-ASCII whitespace characters too.
+ while (NewEnd < SourceLine.size() &&
+ isspace(static_cast<unsigned char>(SourceLine[NewEnd])))
+ NewEnd = map.startOfNextColumn(NewEnd);
// Skip over this bit of "interesting" text.
- while (NewEnd<SourceLine.size() &&
- (map.byteToColumn(NewEnd)!=-1 &&
- !isspace(static_cast<unsigned char>(SourceLine[NewEnd]))))
- ++NewEnd;
+ while (NewEnd < SourceLine.size() &&
+ !isspace(static_cast<unsigned char>(SourceLine[NewEnd])))
+ NewEnd = map.startOfNextColumn(NewEnd);
+ assert(map.byteToColumn(NewEnd) != -1);
unsigned NewColumns = map.byteToColumn(NewEnd) -
map.byteToColumn(SourceStart);
if (NewColumns <= TargetColumns) {
@@ -475,7 +494,7 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
// The line needs some trunctiona, and we'd prefer to keep the front
// if possible, so remove the back
- if (BackColumnsRemoved)
+ if (BackColumnsRemoved > strlen(back_ellipse))
SourceLine.replace(SourceEnd, std::string::npos, back_ellipse);
// If that's enough then we're done
@@ -483,7 +502,7 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
return;
// Otherwise remove the front as well
- if (FrontColumnsRemoved) {
+ if (FrontColumnsRemoved > strlen(front_ellipse)) {
SourceLine.replace(0, SourceStart, front_ellipse);
CaretLine.replace(0, CaretStart, front_space);
if (!FixItInsertionLine.empty())
@@ -651,7 +670,7 @@ static bool printWordWrapped(raw_ostream &OS, StringRef Str,
TextDiagnostic::TextDiagnostic(raw_ostream &OS,
const LangOptions &LangOpts,
- const DiagnosticOptions &DiagOpts)
+ DiagnosticOptions *DiagOpts)
: DiagnosticRenderer(LangOpts, DiagOpts), OS(OS) {}
TextDiagnostic::~TextDiagnostic() {}
@@ -670,13 +689,13 @@ TextDiagnostic::emitDiagnosticMessage(SourceLocation Loc,
if (Loc.isValid())
emitDiagnosticLoc(Loc, PLoc, Level, Ranges, *SM);
- if (DiagOpts.ShowColors)
+ if (DiagOpts->ShowColors)
OS.resetColor();
- printDiagnosticLevel(OS, Level, DiagOpts.ShowColors);
+ printDiagnosticLevel(OS, Level, DiagOpts->ShowColors);
printDiagnosticMessage(OS, Level, Message,
OS.tell() - StartOfLocationInfo,
- DiagOpts.MessageLength, DiagOpts.ShowColors);
+ DiagOpts->MessageLength, DiagOpts->ShowColors);
}
/*static*/ void
@@ -770,36 +789,36 @@ void TextDiagnostic::emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
}
unsigned LineNo = PLoc.getLine();
- if (!DiagOpts.ShowLocation)
+ if (!DiagOpts->ShowLocation)
return;
- if (DiagOpts.ShowColors)
+ if (DiagOpts->ShowColors)
OS.changeColor(savedColor, true);
OS << PLoc.getFilename();
- switch (DiagOpts.Format) {
+ switch (DiagOpts->getFormat()) {
case DiagnosticOptions::Clang: OS << ':' << LineNo; break;
case DiagnosticOptions::Msvc: OS << '(' << LineNo; break;
case DiagnosticOptions::Vi: OS << " +" << LineNo; break;
}
- if (DiagOpts.ShowColumn)
+ if (DiagOpts->ShowColumn)
// Compute the column number.
if (unsigned ColNo = PLoc.getColumn()) {
- if (DiagOpts.Format == DiagnosticOptions::Msvc) {
+ if (DiagOpts->getFormat() == DiagnosticOptions::Msvc) {
OS << ',';
ColNo--;
} else
OS << ':';
OS << ColNo;
}
- switch (DiagOpts.Format) {
+ switch (DiagOpts->getFormat()) {
case DiagnosticOptions::Clang:
case DiagnosticOptions::Vi: OS << ':'; break;
case DiagnosticOptions::Msvc: OS << ") : "; break;
}
- if (DiagOpts.ShowSourceRanges && !Ranges.empty()) {
+ if (DiagOpts->ShowSourceRanges && !Ranges.empty()) {
FileID CaretFileID =
SM.getFileID(SM.getExpansionLoc(Loc));
bool PrintedRange = false;
@@ -858,7 +877,7 @@ void TextDiagnostic::emitBasicNote(StringRef Message) {
void TextDiagnostic::emitIncludeLocation(SourceLocation Loc,
PresumedLoc PLoc,
const SourceManager &SM) {
- if (DiagOpts.ShowLocation)
+ if (DiagOpts->ShowLocation)
OS << "In file included from " << PLoc.getFilename() << ':'
<< PLoc.getLine() << ":\n";
else
@@ -886,7 +905,7 @@ void TextDiagnostic::emitSnippetAndCaret(
// was part of a different warning or error diagnostic, or if the
// diagnostic has ranges. We don't want to emit the same caret
// multiple times if one loc has multiple diagnostics.
- if (!DiagOpts.ShowCarets)
+ if (!DiagOpts->ShowCarets)
return;
if (Loc == LastLoc && Ranges.empty() && Hints.empty() &&
(LastLevel != DiagnosticsEngine::Note || Level == LastLevel))
@@ -924,7 +943,7 @@ void TextDiagnostic::emitSnippetAndCaret(
// length as the line of source code.
std::string CaretLine(LineEnd-LineStart, ' ');
- const SourceColumnMap sourceColMap(SourceLine, DiagOpts.TabStop);
+ const SourceColumnMap sourceColMap(SourceLine, DiagOpts->TabStop);
// Highlight all of the characters covered by Ranges with ~ characters.
for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
@@ -933,7 +952,7 @@ void TextDiagnostic::emitSnippetAndCaret(
highlightRange(*I, LineNo, FID, sourceColMap, CaretLine, SM);
// Next, insert the caret itself.
- ColNo = sourceColMap.byteToColumn(ColNo-1);
+ ColNo = sourceColMap.byteToContainingColumn(ColNo-1);
if (CaretLine.size()<ColNo+1)
CaretLine.resize(ColNo+1, ' ');
CaretLine[ColNo] = '^';
@@ -944,7 +963,7 @@ void TextDiagnostic::emitSnippetAndCaret(
// If the source line is too long for our terminal, select only the
// "interesting" source region within that line.
- unsigned Columns = DiagOpts.MessageLength;
+ unsigned Columns = DiagOpts->MessageLength;
if (Columns)
selectInterestingSourceRegion(SourceLine, CaretLine, FixItInsertionLine,
Columns, sourceColMap);
@@ -953,7 +972,7 @@ void TextDiagnostic::emitSnippetAndCaret(
// to produce easily machine parsable output. Add a space before the
// source line and the caret to make it trivial to tell the main diagnostic
// line from what the user is intended to see.
- if (DiagOpts.ShowSourceRanges) {
+ if (DiagOpts->ShowSourceRanges) {
SourceLine = ' ' + SourceLine;
CaretLine = ' ' + CaretLine;
}
@@ -965,20 +984,20 @@ void TextDiagnostic::emitSnippetAndCaret(
// Emit what we have computed.
emitSnippet(SourceLine);
- if (DiagOpts.ShowColors)
+ if (DiagOpts->ShowColors)
OS.changeColor(caretColor, true);
OS << CaretLine << '\n';
- if (DiagOpts.ShowColors)
+ if (DiagOpts->ShowColors)
OS.resetColor();
if (!FixItInsertionLine.empty()) {
- if (DiagOpts.ShowColors)
+ if (DiagOpts->ShowColors)
// Print fixit line in color
OS.changeColor(fixitColor, false);
- if (DiagOpts.ShowSourceRanges)
+ if (DiagOpts->ShowSourceRanges)
OS << ' ';
OS << FixItInsertionLine << '\n';
- if (DiagOpts.ShowColors)
+ if (DiagOpts->ShowColors)
OS.resetColor();
}
@@ -997,15 +1016,15 @@ void TextDiagnostic::emitSnippet(StringRef line) {
while (i<line.size()) {
std::pair<SmallString<16>,bool> res
- = printableTextForNextCharacter(line, &i, DiagOpts.TabStop);
+ = printableTextForNextCharacter(line, &i, DiagOpts->TabStop);
bool was_printable = res.second;
- if (DiagOpts.ShowColors && was_printable == print_reversed) {
+ if (DiagOpts->ShowColors && was_printable == print_reversed) {
if (print_reversed)
OS.reverseColor();
OS << to_print;
to_print.clear();
- if (DiagOpts.ShowColors)
+ if (DiagOpts->ShowColors)
OS.resetColor();
}
@@ -1013,10 +1032,10 @@ void TextDiagnostic::emitSnippet(StringRef line) {
to_print += res.first.str();
}
- if (print_reversed && DiagOpts.ShowColors)
+ if (print_reversed && DiagOpts->ShowColors)
OS.reverseColor();
OS << to_print;
- if (print_reversed && DiagOpts.ShowColors)
+ if (print_reversed && DiagOpts->ShowColors)
OS.resetColor();
OS << '\n';
@@ -1030,16 +1049,8 @@ void TextDiagnostic::highlightRange(const CharSourceRange &R,
const SourceManager &SM) {
if (!R.isValid()) return;
- SourceLocation Begin = SM.getExpansionLoc(R.getBegin());
- SourceLocation End = SM.getExpansionLoc(R.getEnd());
-
- // If the End location and the start location are the same and are a macro
- // location, then the range was something that came from a macro expansion
- // or _Pragma. If this is an object-like macro, the best we can do is to
- // highlight the range. If this is a function-like macro, we'd also like to
- // highlight the arguments.
- if (Begin == End && R.getEnd().isMacroID())
- End = SM.getExpansionRange(R.getEnd()).second;
+ SourceLocation Begin = R.getBegin();
+ SourceLocation End = R.getEnd();
unsigned StartLineNo = SM.getExpansionLineNumber(Begin);
if (StartLineNo > LineNo || SM.getFileID(Begin) != FID)
@@ -1080,7 +1091,7 @@ void TextDiagnostic::highlightRange(const CharSourceRange &R,
while (StartColNo < map.getSourceLine().size() &&
(map.getSourceLine()[StartColNo] == ' ' ||
map.getSourceLine()[StartColNo] == '\t'))
- ++StartColNo;
+ StartColNo = map.startOfNextColumn(StartColNo);
// Pick the last non-whitespace column.
if (EndColNo > map.getSourceLine().size())
@@ -1088,7 +1099,7 @@ void TextDiagnostic::highlightRange(const CharSourceRange &R,
while (EndColNo-1 &&
(map.getSourceLine()[EndColNo-1] == ' ' ||
map.getSourceLine()[EndColNo-1] == '\t'))
- --EndColNo;
+ EndColNo = map.startOfPreviousColumn(EndColNo);
// If the start/end passed each other, then we are trying to highlight a
// range that just exists in whitespace, which must be some sort of other
@@ -1100,8 +1111,8 @@ void TextDiagnostic::highlightRange(const CharSourceRange &R,
assert(EndColNo <= map.getSourceLine().size() && "Invalid range!");
// Fill the range with ~'s.
- StartColNo = map.byteToColumn(StartColNo);
- EndColNo = map.byteToColumn(EndColNo);
+ StartColNo = map.byteToContainingColumn(StartColNo);
+ EndColNo = map.byteToContainingColumn(EndColNo);
assert(StartColNo <= EndColNo && "Invalid range!");
if (CaretLine.size() < EndColNo)
@@ -1116,7 +1127,7 @@ std::string TextDiagnostic::buildFixItInsertionLine(
const SourceManager &SM) {
std::string FixItInsertionLine;
- if (Hints.empty() || !DiagOpts.ShowFixits)
+ if (Hints.empty() || !DiagOpts->ShowFixits)
return FixItInsertionLine;
unsigned PrevHintEndCol = 0;
@@ -1139,7 +1150,7 @@ std::string TextDiagnostic::buildFixItInsertionLine(
// The hint must start inside the source or right at the end
assert(HintByteOffset < static_cast<unsigned>(map.bytes())+1);
- unsigned HintCol = map.byteToColumn(HintByteOffset);
+ unsigned HintCol = map.byteToContainingColumn(HintByteOffset);
// If we inserted a long previous hint, push this one forwards, and add
// an extra space to show that this is not part of the previous
@@ -1176,14 +1187,14 @@ std::string TextDiagnostic::buildFixItInsertionLine(
}
}
- expandTabs(FixItInsertionLine, DiagOpts.TabStop);
+ expandTabs(FixItInsertionLine, DiagOpts->TabStop);
return FixItInsertionLine;
}
void TextDiagnostic::emitParseableFixits(ArrayRef<FixItHint> Hints,
const SourceManager &SM) {
- if (!DiagOpts.ShowParseableFixits)
+ if (!DiagOpts->ShowParseableFixits)
return;
// We follow FixItRewriter's example in not (yet) handling
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
index 382e156..aa7a61a 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -12,9 +12,9 @@
//===----------------------------------------------------------------------===//
#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Frontend/DiagnosticOptions.h"
#include "clang/Frontend/TextDiagnostic.h"
#include "clang/Lex/Lexer.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -25,9 +25,9 @@
using namespace clang;
TextDiagnosticPrinter::TextDiagnosticPrinter(raw_ostream &os,
- const DiagnosticOptions &diags,
+ DiagnosticOptions *diags,
bool _OwnsOutputStream)
- : OS(os), DiagOpts(&diags),
+ : OS(os), DiagOpts(diags),
OwnsOutputStream(_OwnsOutputStream) {
}
@@ -39,7 +39,7 @@ TextDiagnosticPrinter::~TextDiagnosticPrinter() {
void TextDiagnosticPrinter::BeginSourceFile(const LangOptions &LO,
const Preprocessor *PP) {
// Build the TextDiagnostic utility.
- TextDiag.reset(new TextDiagnostic(OS, LO, *DiagOpts));
+ TextDiag.reset(new TextDiagnostic(OS, LO, &*DiagOpts));
}
void TextDiagnosticPrinter::EndSourceFile() {
@@ -158,5 +158,5 @@ void TextDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
DiagnosticConsumer *
TextDiagnosticPrinter::clone(DiagnosticsEngine &Diags) const {
- return new TextDiagnosticPrinter(OS, *DiagOpts, /*OwnsOutputStream=*/false);
+ return new TextDiagnosticPrinter(OS, &*DiagOpts, /*OwnsOutputStream=*/false);
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp b/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
index a9378a1..1750946 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -31,14 +31,17 @@ VerifyDiagnosticConsumer::VerifyDiagnosticConsumer(DiagnosticsEngine &_Diags)
: Diags(_Diags),
PrimaryClient(Diags.getClient()), OwnsPrimaryClient(Diags.ownsClient()),
Buffer(new TextDiagnosticBuffer()), CurrentPreprocessor(0),
- ActiveSourceFiles(0)
+ LangOpts(0), SrcManager(0), ActiveSourceFiles(0), Status(HasNoDirectives)
{
Diags.takeClient();
+ if (Diags.hasSourceManager())
+ setSourceManager(Diags.getSourceManager());
}
VerifyDiagnosticConsumer::~VerifyDiagnosticConsumer() {
assert(!ActiveSourceFiles && "Incomplete parsing of source files!");
assert(!CurrentPreprocessor && "CurrentPreprocessor should be invalid!");
+ SrcManager = 0;
CheckDiagnostics();
Diags.takeClient();
if (OwnsPrimaryClient)
@@ -48,21 +51,20 @@ VerifyDiagnosticConsumer::~VerifyDiagnosticConsumer() {
#ifndef NDEBUG
namespace {
class VerifyFileTracker : public PPCallbacks {
- typedef VerifyDiagnosticConsumer::FilesParsedForDirectivesSet ListType;
- ListType &FilesList;
+ VerifyDiagnosticConsumer &Verify;
SourceManager &SM;
public:
- VerifyFileTracker(ListType &FilesList, SourceManager &SM)
- : FilesList(FilesList), SM(SM) { }
+ VerifyFileTracker(VerifyDiagnosticConsumer &Verify, SourceManager &SM)
+ : Verify(Verify), SM(SM) { }
/// \brief Hook into the preprocessor and update the list of parsed
/// files when the preprocessor indicates a new file is entered.
virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID) {
- if (const FileEntry *E = SM.getFileEntryForID(SM.getFileID(Loc)))
- FilesList.insert(E);
+ Verify.UpdateParsedFileStatus(SM, SM.getFileID(Loc),
+ VerifyDiagnosticConsumer::IsParsed);
}
};
} // End anonymous namespace.
@@ -76,10 +78,12 @@ void VerifyDiagnosticConsumer::BeginSourceFile(const LangOptions &LangOpts,
if (++ActiveSourceFiles == 1) {
if (PP) {
CurrentPreprocessor = PP;
+ this->LangOpts = &LangOpts;
+ setSourceManager(PP->getSourceManager());
const_cast<Preprocessor*>(PP)->addCommentHandler(this);
#ifndef NDEBUG
- VerifyFileTracker *V = new VerifyFileTracker(FilesParsedForDirectives,
- PP->getSourceManager());
+ // Debug build tracks parsed files.
+ VerifyFileTracker *V = new VerifyFileTracker(*this, *SrcManager);
const_cast<Preprocessor*>(PP)->addPPCallbacks(V);
#endif
}
@@ -101,18 +105,40 @@ void VerifyDiagnosticConsumer::EndSourceFile() {
// Check diagnostics once last file completed.
CheckDiagnostics();
CurrentPreprocessor = 0;
+ LangOpts = 0;
}
}
void VerifyDiagnosticConsumer::HandleDiagnostic(
DiagnosticsEngine::Level DiagLevel, const Diagnostic &Info) {
+ if (Info.hasSourceManager())
+ setSourceManager(Info.getSourceManager());
+
#ifndef NDEBUG
- if (Info.hasSourceManager()) {
- FileID FID = Info.getSourceManager().getFileID(Info.getLocation());
- if (!FID.isInvalid())
- FilesWithDiagnostics.insert(FID);
+ // Debug build tracks unparsed files for possible
+ // unparsed expected-* directives.
+ if (SrcManager) {
+ SourceLocation Loc = Info.getLocation();
+ if (Loc.isValid()) {
+ ParsedStatus PS = IsUnparsed;
+
+ Loc = SrcManager->getExpansionLoc(Loc);
+ FileID FID = SrcManager->getFileID(Loc);
+
+ const FileEntry *FE = SrcManager->getFileEntryForID(FID);
+ if (FE && CurrentPreprocessor && SrcManager->isLoadedFileID(FID)) {
+ // If the file is a modules header file it shall not be parsed
+ // for expected-* directives.
+ HeaderSearch &HS = CurrentPreprocessor->getHeaderSearchInfo();
+ if (HS.findModuleForHeader(FE))
+ PS = IsUnparsedNoDirectives;
+ }
+
+ UpdateParsedFileStatus(*SrcManager, FID, PS);
+ }
}
#endif
+
// Send the diagnostic to the buffer, we will check it once we reach the end
// of the source file (or are destructed).
Buffer->HandleDiagnostic(DiagLevel, Info);
@@ -200,10 +226,22 @@ public:
// Return true if string literal is found.
// When true, P marks begin-position of S in content.
- bool Search(StringRef S) {
- P = std::search(C, End, S.begin(), S.end());
- PEnd = P + S.size();
- return P != End;
+ bool Search(StringRef S, bool EnsureStartOfWord = false) {
+ do {
+ P = std::search(C, End, S.begin(), S.end());
+ PEnd = P + S.size();
+ if (P == End)
+ break;
+ if (!EnsureStartOfWord
+ // Check if string literal starts a new word.
+ || P == Begin || isspace(P[-1])
+ // Or it could be preceeded by the start of a comment.
+ || (P > (Begin + 1) && (P[-1] == '/' || P[-1] == '*')
+ && P[-2] == '/'))
+ return true;
+ // Otherwise, skip and search again.
+ } while (Advance());
+ return false;
}
// Advance 1-past previous next/search.
@@ -240,12 +278,13 @@ private:
///
/// Returns true if any valid directives were found.
static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
- SourceLocation Pos, DiagnosticsEngine &Diags) {
+ SourceLocation Pos, DiagnosticsEngine &Diags,
+ VerifyDiagnosticConsumer::DirectiveStatus &Status) {
// A single comment may contain multiple directives.
bool FoundDirective = false;
for (ParseHelper PH(S); !PH.Done();) {
// Search for token: expected
- if (!PH.Search("expected"))
+ if (!PH.Search("expected", true))
break;
PH.Advance();
@@ -262,10 +301,24 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
DL = ED ? &ED->Warnings : NULL;
else if (PH.Next("note"))
DL = ED ? &ED->Notes : NULL;
- else
+ else if (PH.Next("no-diagnostics")) {
+ if (Status == VerifyDiagnosticConsumer::HasOtherExpectedDirectives)
+ Diags.Report(Pos, diag::err_verify_invalid_no_diags)
+ << /*IsExpectedNoDiagnostics=*/true;
+ else
+ Status = VerifyDiagnosticConsumer::HasExpectedNoDiagnostics;
+ continue;
+ } else
continue;
PH.Advance();
+ if (Status == VerifyDiagnosticConsumer::HasExpectedNoDiagnostics) {
+ Diags.Report(Pos, diag::err_verify_invalid_no_diags)
+ << /*IsExpectedNoDiagnostics=*/false;
+ continue;
+ }
+ Status = VerifyDiagnosticConsumer::HasOtherExpectedDirectives;
+
// If a directive has been found but we're not interested
// in storing the directive information, return now.
if (!DL)
@@ -412,7 +465,7 @@ bool VerifyDiagnosticConsumer::HandleComment(Preprocessor &PP,
// Fold any "\<EOL>" sequences
size_t loc = C.find('\\');
if (loc == StringRef::npos) {
- ParseDirective(C, &ED, SM, CommentBegin, PP.getDiagnostics());
+ ParseDirective(C, &ED, SM, CommentBegin, PP.getDiagnostics(), Status);
return false;
}
@@ -442,7 +495,7 @@ bool VerifyDiagnosticConsumer::HandleComment(Preprocessor &PP,
}
if (!C2.empty())
- ParseDirective(C2, &ED, SM, CommentBegin, PP.getDiagnostics());
+ ParseDirective(C2, &ED, SM, CommentBegin, PP.getDiagnostics(), Status);
return false;
}
@@ -452,34 +505,36 @@ bool VerifyDiagnosticConsumer::HandleComment(Preprocessor &PP,
/// Preprocessor, directives inside skipped #if blocks will still be found.
///
/// \return true if any directives were found.
-static bool findDirectives(const Preprocessor &PP, FileID FID) {
+static bool findDirectives(SourceManager &SM, FileID FID,
+ const LangOptions &LangOpts) {
// Create a raw lexer to pull all the comments out of FID.
if (FID.isInvalid())
return false;
- SourceManager& SM = PP.getSourceManager();
// Create a lexer to lex all the tokens of the main file in raw mode.
const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
- Lexer RawLex(FID, FromFile, SM, PP.getLangOpts());
+ Lexer RawLex(FID, FromFile, SM, LangOpts);
// Return comments as tokens, this is how we find expected diagnostics.
RawLex.SetCommentRetentionState(true);
Token Tok;
Tok.setKind(tok::comment);
- bool Found = false;
+ VerifyDiagnosticConsumer::DirectiveStatus Status =
+ VerifyDiagnosticConsumer::HasNoDirectives;
while (Tok.isNot(tok::eof)) {
RawLex.Lex(Tok);
if (!Tok.is(tok::comment)) continue;
- std::string Comment = PP.getSpelling(Tok);
+ std::string Comment = RawLex.getSpelling(Tok, SM, LangOpts);
if (Comment.empty()) continue;
- // Find all expected errors/warnings/notes.
- Found |= ParseDirective(Comment, 0, SM, Tok.getLocation(),
- PP.getDiagnostics());
+ // Find first directive.
+ if (ParseDirective(Comment, 0, SM, Tok.getLocation(),
+ SM.getDiagnostics(), Status))
+ return true;
}
- return Found;
+ return false;
}
#endif // !NDEBUG
@@ -601,41 +656,95 @@ static unsigned CheckResults(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
return NumProblems;
}
+void VerifyDiagnosticConsumer::UpdateParsedFileStatus(SourceManager &SM,
+ FileID FID,
+ ParsedStatus PS) {
+ // Check SourceManager hasn't changed.
+ setSourceManager(SM);
+
+#ifndef NDEBUG
+ if (FID.isInvalid())
+ return;
+
+ const FileEntry *FE = SM.getFileEntryForID(FID);
+
+ if (PS == IsParsed) {
+ // Move the FileID from the unparsed set to the parsed set.
+ UnparsedFiles.erase(FID);
+ ParsedFiles.insert(std::make_pair(FID, FE));
+ } else if (!ParsedFiles.count(FID) && !UnparsedFiles.count(FID)) {
+ // Add the FileID to the unparsed set if we haven't seen it before.
+
+ // Check for directives.
+ bool FoundDirectives;
+ if (PS == IsUnparsedNoDirectives)
+ FoundDirectives = false;
+ else
+ FoundDirectives = !LangOpts || findDirectives(SM, FID, *LangOpts);
+
+ // Add the FileID to the unparsed set.
+ UnparsedFiles.insert(std::make_pair(FID,
+ UnparsedFileStatus(FE, FoundDirectives)));
+ }
+#endif
+}
+
void VerifyDiagnosticConsumer::CheckDiagnostics() {
// Ensure any diagnostics go to the primary client.
bool OwnsCurClient = Diags.ownsClient();
DiagnosticConsumer *CurClient = Diags.takeClient();
Diags.setClient(PrimaryClient, false);
- // If we have a preprocessor, scan the source for expected diagnostic
- // markers. If not then any diagnostics are unexpected.
- if (CurrentPreprocessor) {
- SourceManager &SM = CurrentPreprocessor->getSourceManager();
-
#ifndef NDEBUG
- // In a debug build, scan through any files that may have been missed
- // during parsing and issue a fatal error if directives are contained
- // within these files. If a fatal error occurs, this suggests that
- // this file is being parsed separately from the main file.
- HeaderSearch &HS = CurrentPreprocessor->getHeaderSearchInfo();
- for (FilesWithDiagnosticsSet::iterator I = FilesWithDiagnostics.begin(),
- End = FilesWithDiagnostics.end();
- I != End; ++I) {
- const FileEntry *E = SM.getFileEntryForID(*I);
- // Don't check files already parsed or those handled as modules.
- if (E && (FilesParsedForDirectives.count(E)
- || HS.findModuleForHeader(E)))
+ // In a debug build, scan through any files that may have been missed
+ // during parsing and issue a fatal error if directives are contained
+ // within these files. If a fatal error occurs, this suggests that
+ // this file is being parsed separately from the main file, in which
+ // case consider moving the directives to the correct place, if this
+ // is applicable.
+ if (UnparsedFiles.size() > 0) {
+ // Generate a cache of parsed FileEntry pointers for alias lookups.
+ llvm::SmallPtrSet<const FileEntry *, 8> ParsedFileCache;
+ for (ParsedFilesMap::iterator I = ParsedFiles.begin(),
+ End = ParsedFiles.end(); I != End; ++I) {
+ if (const FileEntry *FE = I->second)
+ ParsedFileCache.insert(FE);
+ }
+
+ // Iterate through list of unparsed files.
+ for (UnparsedFilesMap::iterator I = UnparsedFiles.begin(),
+ End = UnparsedFiles.end(); I != End; ++I) {
+ const UnparsedFileStatus &Status = I->second;
+ const FileEntry *FE = Status.getFile();
+
+ // Skip files that have been parsed via an alias.
+ if (FE && ParsedFileCache.count(FE))
continue;
- if (findDirectives(*CurrentPreprocessor, *I))
+ // Report a fatal error if this file contained directives.
+ if (Status.foundDirectives()) {
llvm::report_fatal_error(Twine("-verify directives found after rather"
" than during normal parsing of ",
- StringRef(E ? E->getName() : "(unknown)")));
+ StringRef(FE ? FE->getName() : "(unknown)")));
+ }
+ }
+
+ // UnparsedFiles has been processed now, so clear it.
+ UnparsedFiles.clear();
+ }
+#endif // !NDEBUG
+
+ if (SrcManager) {
+ // Produce an error if no expected-* directives could be found in the
+ // source file(s) processed.
+ if (Status == HasNoDirectives) {
+ Diags.Report(diag::err_verify_no_directives).setForceEmit();
+ ++NumErrors;
+ Status = HasNoDirectivesReported;
}
-#endif
// Check that the expected diagnostics occurred.
- NumErrors += CheckResults(Diags, SM, *Buffer, ED);
+ NumErrors += CheckResults(Diags, *SrcManager, *Buffer, ED);
} else {
NumErrors += (PrintUnexpected(Diags, 0, Buffer->err_begin(),
Buffer->err_end(), "error") +
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp b/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp
index b7d4a3b..f789b7f 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp
@@ -24,7 +24,7 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Lex/LexDiagnostic.h"
-#include "clang/Frontend/DiagnosticOptions.h"
+#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include <cstring>
#include <utility>
@@ -51,8 +51,7 @@ void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
const DiagnosticOptions &Opts) {
Diags.setSuppressSystemWarnings(true); // Default to -Wno-system-headers
Diags.setIgnoreAllWarnings(Opts.IgnoreWarnings);
- Diags.setShowOverloads(
- static_cast<DiagnosticsEngine::OverloadsShown>(Opts.ShowOverloads));
+ Diags.setShowOverloads(Opts.getShowOverloads());
Diags.setElideType(Opts.ElideType);
Diags.setPrintTemplateTree(Opts.ShowTemplateTree);
diff --git a/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index bd50083..c7c55b0 100644
--- a/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -16,6 +16,7 @@
#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
#include "clang/ARCMigrate/ARCMTActions.h"
#include "clang/CodeGen/CodeGenAction.h"
+#include "clang/Driver/Option.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/OptTable.h"
#include "clang/Frontend/CompilerInvocation.h"
@@ -23,7 +24,7 @@
#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendPluginRegistry.h"
-#include "clang/Rewrite/FrontendActions.h"
+#include "clang/Rewrite/Frontend/FrontendActions.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/DynamicLibrary.h"
using namespace clang;
@@ -137,7 +138,9 @@ bool clang::ExecuteCompilerInvocation(CompilerInstance *Clang) {
if (Clang->getFrontendOpts().ShowHelp) {
OwningPtr<driver::OptTable> Opts(driver::createDriverOptTable());
Opts->PrintHelp(llvm::outs(), "clang -cc1",
- "LLVM 'Clang' Compiler: http://clang.llvm.org");
+ "LLVM 'Clang' Compiler: http://clang.llvm.org",
+ /*Include=*/driver::options::CC1Option,
+ /*Exclude=*/0);
return 0;
}
@@ -175,7 +178,7 @@ bool clang::ExecuteCompilerInvocation(CompilerInstance *Clang) {
// Honor -analyzer-checker-help.
// This should happen AFTER plugins have been loaded!
- if (Clang->getAnalyzerOpts().ShowCheckerHelp) {
+ if (Clang->getAnalyzerOpts()->ShowCheckerHelp) {
ento::printCheckerHelp(llvm::outs(), Clang->getFrontendOpts().Plugins);
return 0;
}
diff --git a/contrib/llvm/tools/clang/lib/Headers/__wmmintrin_aes.h b/contrib/llvm/tools/clang/lib/Headers/__wmmintrin_aes.h
new file mode 100644
index 0000000..2bfa027
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/__wmmintrin_aes.h
@@ -0,0 +1,67 @@
+/*===---- __wmmintrin_aes.h - AES intrinsics -------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef _WMMINTRIN_AES_H
+#define _WMMINTRIN_AES_H
+
+#include <emmintrin.h>
+
+#if !defined (__AES__)
+# error "AES instructions not enabled"
+#else
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_aesenc_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesenc128(__V, __R);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_aesenclast_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesenclast128(__V, __R);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_aesdec_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesdec128(__V, __R);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_aesdeclast_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesdeclast128(__V, __R);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_aesimc_si128(__m128i __V)
+{
+ return (__m128i)__builtin_ia32_aesimc128(__V);
+}
+
+#define _mm_aeskeygenassist_si128(C, R) \
+ __builtin_ia32_aeskeygenassist128((C), (R))
+
+#endif
+
+#endif /* _WMMINTRIN_AES_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/__wmmintrin_pclmul.h b/contrib/llvm/tools/clang/lib/Headers/__wmmintrin_pclmul.h
new file mode 100644
index 0000000..8d1f1b7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/__wmmintrin_pclmul.h
@@ -0,0 +1,34 @@
+/*===---- __wmmintrin_pclmul.h - AES intrinsics ----------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef _WMMINTRIN_PCLMUL_H
+#define _WMMINTRIN_PCLMUL_H
+
+#if !defined (__PCLMUL__)
+# error "PCLMUL instruction is not enabled"
+#else
+#define _mm_clmulepi64_si128(__X, __Y, __I) \
+ ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(__X), \
+ (__v2di)(__m128i)(__Y), (char)(__I)))
+#endif
+
+#endif /* _WMMINTRIN_PCLMUL_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/altivec.h b/contrib/llvm/tools/clang/lib/Headers/altivec.h
index a225378..2bf53fb 100644
--- a/contrib/llvm/tools/clang/lib/Headers/altivec.h
+++ b/contrib/llvm/tools/clang/lib/Headers/altivec.h
@@ -4363,14 +4363,14 @@ vec_perm(vector float a, vector float b, vector unsigned char c)
/* vec_vperm */
-vector signed char __ATTRS_o_ai
+static vector signed char __ATTRS_o_ai
vec_vperm(vector signed char a, vector signed char b, vector unsigned char c)
{
return (vector signed char)
__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
-vector unsigned char __ATTRS_o_ai
+static vector unsigned char __ATTRS_o_ai
vec_vperm(vector unsigned char a,
vector unsigned char b,
vector unsigned char c)
@@ -4379,21 +4379,21 @@ vec_vperm(vector unsigned char a,
__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
-vector bool char __ATTRS_o_ai
+static vector bool char __ATTRS_o_ai
vec_vperm(vector bool char a, vector bool char b, vector unsigned char c)
{
return (vector bool char)
__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
-vector short __ATTRS_o_ai
+static vector short __ATTRS_o_ai
vec_vperm(vector short a, vector short b, vector unsigned char c)
{
return (vector short)
__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
-vector unsigned short __ATTRS_o_ai
+static vector unsigned short __ATTRS_o_ai
vec_vperm(vector unsigned short a,
vector unsigned short b,
vector unsigned char c)
@@ -4402,41 +4402,41 @@ vec_vperm(vector unsigned short a,
__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
-vector bool short __ATTRS_o_ai
+static vector bool short __ATTRS_o_ai
vec_vperm(vector bool short a, vector bool short b, vector unsigned char c)
{
return (vector bool short)
__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
-vector pixel __ATTRS_o_ai
+static vector pixel __ATTRS_o_ai
vec_vperm(vector pixel a, vector pixel b, vector unsigned char c)
{
return (vector pixel)
__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
-vector int __ATTRS_o_ai
+static vector int __ATTRS_o_ai
vec_vperm(vector int a, vector int b, vector unsigned char c)
{
return (vector int)__builtin_altivec_vperm_4si(a, b, c);
}
-vector unsigned int __ATTRS_o_ai
+static vector unsigned int __ATTRS_o_ai
vec_vperm(vector unsigned int a, vector unsigned int b, vector unsigned char c)
{
return (vector unsigned int)
__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
-vector bool int __ATTRS_o_ai
+static vector bool int __ATTRS_o_ai
vec_vperm(vector bool int a, vector bool int b, vector unsigned char c)
{
return (vector bool int)
__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
-vector float __ATTRS_o_ai
+static vector float __ATTRS_o_ai
vec_vperm(vector float a, vector float b, vector unsigned char c)
{
return (vector float)
@@ -4445,7 +4445,7 @@ vec_vperm(vector float a, vector float b, vector unsigned char c)
/* vec_re */
-vector float __attribute__((__always_inline__))
+static vector float __attribute__((__always_inline__))
vec_re(vector float a)
{
return __builtin_altivec_vrefp(a);
@@ -4453,7 +4453,7 @@ vec_re(vector float a)
/* vec_vrefp */
-vector float __attribute__((__always_inline__))
+static vector float __attribute__((__always_inline__))
vec_vrefp(vector float a)
{
return __builtin_altivec_vrefp(a);
diff --git a/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h b/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h
index c60b0c4..a05cfad 100644
--- a/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h
@@ -70,6 +70,25 @@ _pext_u64(unsigned long long __X, unsigned long long __Y)
return __builtin_ia32_pext_di(__X, __Y);
}
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_mulx_u64 (unsigned long long __X, unsigned long long __Y,
+ unsigned long long *__P)
+{
+ unsigned __int128 __res = (unsigned __int128) __X * __Y;
+ *__P = (unsigned long long) (__res >> 64);
+ return (unsigned long long) __res;
+}
+
+#else /* !__x86_64__ */
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_mulx_u32 (unsigned int __X, unsigned int __Y, unsigned int *__P)
+{
+ unsigned long long __res = (unsigned long long) __X * __Y;
+ *__P = (unsigned int) (__res >> 32);
+ return (unsigned int) __res;
+}
+
#endif /* !__x86_64__ */
#endif /* __BMI2INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/cpuid.h b/contrib/llvm/tools/clang/lib/Headers/cpuid.h
index 05c293f..33df7c2 100644
--- a/contrib/llvm/tools/clang/lib/Headers/cpuid.h
+++ b/contrib/llvm/tools/clang/lib/Headers/cpuid.h
@@ -28,6 +28,6 @@
static inline int __get_cpuid (unsigned int level, unsigned int *eax,
unsigned int *ebx, unsigned int *ecx,
unsigned int *edx) {
- asm("cpuid" : "=a"(*eax), "=b" (*ebx), "=c"(*ecx), "=d"(*edx) : "0"(level));
+ __asm("cpuid" : "=a"(*eax), "=b" (*ebx), "=c"(*ecx), "=d"(*edx) : "0"(level));
return 1;
}
diff --git a/contrib/llvm/tools/clang/lib/Headers/f16cintrin.h b/contrib/llvm/tools/clang/lib/Headers/f16cintrin.h
new file mode 100644
index 0000000..2c96952
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/f16cintrin.h
@@ -0,0 +1,58 @@
+/*===---- f16cintrin.h - F16C intrinsics ---------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <f16cintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __F16C__
+# error "F16C instruction is not enabled"
+#endif /* __F16C__ */
+
+#ifndef __F16CINTRIN_H
+#define __F16CINTRIN_H
+
+typedef float __v8sf __attribute__ ((__vector_size__ (32)));
+typedef float __m256 __attribute__ ((__vector_size__ (32)));
+
+#define _mm_cvtps_ph(a, imm) __extension__ ({ \
+ __m128 __a = (a); \
+ (__m128i)__builtin_ia32_vcvtps2ph((__v4sf)__a, (imm)); })
+
+#define _mm256_cvtps_ph(a, imm) __extension__ ({ \
+ __m256 __a = (a); \
+ (__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)__a, (imm)); })
+
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtph_ps(__m128i a)
+{
+ return (__m128)__builtin_ia32_vcvtph2ps((__v8hi)a);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtph_ps(__m128i a)
+{
+ return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)a);
+}
+
+#endif /* __F16CINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/immintrin.h b/contrib/llvm/tools/clang/lib/Headers/immintrin.h
index 15b65f3..cd733bf 100644
--- a/contrib/llvm/tools/clang/lib/Headers/immintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/immintrin.h
@@ -98,4 +98,8 @@ _rdrand64_step(unsigned long long *__p)
#endif
#endif /* __RDRND__ */
+#ifdef __RTM__
+#include <rtmintrin.h>
+#endif
+
#endif /* __IMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/module.map b/contrib/llvm/tools/clang/lib/Headers/module.map
index 418ba50..b24bccc 100644
--- a/contrib/llvm/tools/clang/lib/Headers/module.map
+++ b/contrib/llvm/tools/clang/lib/Headers/module.map
@@ -25,6 +25,11 @@ module _Builtin_intrinsics [system] {
header "mmintrin.h"
}
+ explicit module f16c {
+ requires f16c
+ header "f16cintrin.h"
+ }
+
explicit module sse {
requires sse
export mmx
@@ -62,6 +67,12 @@ module _Builtin_intrinsics [system] {
header "nmmintrin.h"
}
+ explicit module sse4a {
+ requires sse4a
+ export sse3
+ header "ammintrin.h"
+ }
+
explicit module avx {
requires avx
export sse4_2
@@ -84,6 +95,11 @@ module _Builtin_intrinsics [system] {
header "bmi2intrin.h"
}
+ explicit module fma {
+ requires fma
+ header "fmaintrin.h"
+ }
+
explicit module fma4 {
requires fma4
export sse3
@@ -104,5 +120,26 @@ module _Builtin_intrinsics [system] {
requires mm3dnow
header "mm3dnow.h"
}
+
+ explicit module xop {
+ requires xop
+ export fma4
+ header "xopintrin.h"
+ }
+
+ explicit module aes_pclmul {
+ requires aes, pclmul
+ header "wmmintrin.h"
+ }
+
+ explicit module aes {
+ requires aes
+ header "__wmmintrin_aes.h"
+ }
+
+ explicit module pclmul {
+ requires pclmul
+ header "__wmmintrin_pclmul.h"
+ }
}
}
diff --git a/contrib/llvm/tools/clang/lib/Headers/rtmintrin.h b/contrib/llvm/tools/clang/lib/Headers/rtmintrin.h
new file mode 100644
index 0000000..bdc2b99
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/rtmintrin.h
@@ -0,0 +1,49 @@
+/*===---- rtmintrin.h - RTM intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <rtmintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#define _XBEGIN_STARTED (~0u)
+#define _XABORT_EXPLICIT (1 << 0)
+#define _XABORT_RETRY (1 << 1)
+#define _XABORT_CONFLICT (1 << 2)
+#define _XABORT_CAPACITY (1 << 3)
+#define _XABORT_DEBUG (1 << 4)
+#define _XABORT_NESTED (1 << 5)
+#define _XABORT_CODE(x) (((x) >> 24) & 0xFF)
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_xbegin(void)
+{
+ return __builtin_ia32_xbegin();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_xend(void)
+{
+ __builtin_ia32_xend();
+}
+
+#define _xabort(imm) __builtin_ia32_xabort((imm))
diff --git a/contrib/llvm/tools/clang/lib/Headers/unwind.h b/contrib/llvm/tools/clang/lib/Headers/unwind.h
index a065920..6520b83 100644
--- a/contrib/llvm/tools/clang/lib/Headers/unwind.h
+++ b/contrib/llvm/tools/clang/lib/Headers/unwind.h
@@ -100,7 +100,7 @@ typedef enum {
_UVRSR_FAILED = 2
} _Unwind_VRS_Result;
-_Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *context,
+_Unwind_VRS_Result _Unwind_VRS_Get(struct _Unwind_Context *context,
_Unwind_VRS_RegClass regclass,
uint32_t regno,
_Unwind_VRS_DataRepresentation representation,
diff --git a/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
index dca896f..369e3c2 100644
--- a/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
@@ -31,48 +31,11 @@
#else
#ifdef __AES__
-
-static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
-_mm_aesenc_si128(__m128i __V, __m128i __R)
-{
- return (__m128i)__builtin_ia32_aesenc128(__V, __R);
-}
-
-static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
-_mm_aesenclast_si128(__m128i __V, __m128i __R)
-{
- return (__m128i)__builtin_ia32_aesenclast128(__V, __R);
-}
-
-static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
-_mm_aesdec_si128(__m128i __V, __m128i __R)
-{
- return (__m128i)__builtin_ia32_aesdec128(__V, __R);
-}
-
-static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
-_mm_aesdeclast_si128(__m128i __V, __m128i __R)
-{
- return (__m128i)__builtin_ia32_aesdeclast128(__V, __R);
-}
-
-static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
-_mm_aesimc_si128(__m128i __V)
-{
- return (__m128i)__builtin_ia32_aesimc128(__V);
-}
-
-#define _mm_aeskeygenassist_si128(C, R) \
- __builtin_ia32_aeskeygenassist128((C), (R))
-
+#include <__wmmintrin_aes.h>
#endif /* __AES__ */
#ifdef __PCLMUL__
-
-#define _mm_clmulepi64_si128(__X, __Y, __I) \
- ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(__X), \
- (__v2di)(__m128i)(__Y), (char)(__I)))
-
+#include <__wmmintrin_pclmul.h>
#endif /* __PCLMUL__ */
#endif /* __AES__ || __PCLMUL__ */
diff --git a/contrib/llvm/tools/clang/lib/Headers/x86intrin.h b/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
index 556cd01..68ce106 100644
--- a/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
@@ -58,6 +58,10 @@
#include <xopintrin.h>
#endif
+#ifdef __F16C__
+#include <f16cintrin.h>
+#endif
+
// FIXME: LWP
#endif /* __X86INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
index e616157..e2480ec 100644
--- a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
@@ -95,7 +95,8 @@ _mm_div_ps(__m128 a, __m128 b)
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_sqrt_ss(__m128 a)
{
- return __builtin_ia32_sqrtss(a);
+ __m128 c = __builtin_ia32_sqrtss(a);
+ return (__m128) { c[0], a[1], a[2], a[3] };
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
@@ -107,7 +108,8 @@ _mm_sqrt_ps(__m128 a)
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_rcp_ss(__m128 a)
{
- return __builtin_ia32_rcpss(a);
+ __m128 c = __builtin_ia32_rcpss(a);
+ return (__m128) { c[0], a[1], a[2], a[3] };
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
@@ -119,7 +121,8 @@ _mm_rcp_ps(__m128 a)
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_rsqrt_ss(__m128 a)
{
- return __builtin_ia32_rsqrtss(a);
+ __m128 c = __builtin_ia32_rsqrtss(a);
+ return (__m128) { c[0], a[1], a[2], a[3] };
}
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
diff --git a/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp b/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
index bbfc1df..7dc0491 100644
--- a/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
@@ -144,7 +144,7 @@ HMapBucket HeaderMap::getBucket(unsigned BucketNo) const {
sizeof(HMapHeader));
const HMapBucket *BucketPtr = BucketArray+BucketNo;
- if ((char*)(BucketPtr+1) > FileBuffer->getBufferEnd()) {
+ if ((const char*)(BucketPtr+1) > FileBuffer->getBufferEnd()) {
Result.Prefix = 0;
Result.Suffix = 0;
return Result; // Invalid buffer, corrupt hmap.
diff --git a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
index bb3a673..67000b68 100644
--- a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/HeaderMap.h"
#include "clang/Lex/Lexer.h"
#include "clang/Basic/Diagnostic.h"
@@ -38,10 +39,11 @@ HeaderFileInfo::getControllingMacro(ExternalIdentifierLookup *External) {
ExternalHeaderFileInfoSource::~ExternalHeaderFileInfoSource() {}
-HeaderSearch::HeaderSearch(FileManager &FM, DiagnosticsEngine &Diags,
+HeaderSearch::HeaderSearch(llvm::IntrusiveRefCntPtr<HeaderSearchOptions> HSOpts,
+ FileManager &FM, DiagnosticsEngine &Diags,
const LangOptions &LangOpts,
const TargetInfo *Target)
- : FileMgr(FM), Diags(Diags), FrameworkMap(64),
+ : HSOpts(HSOpts), FileMgr(FM), FrameworkMap(64),
ModMap(FileMgr, *Diags.getClient(), LangOpts, Target)
{
AngledDirIdx = 0;
@@ -905,7 +907,20 @@ Module *HeaderSearch::loadFrameworkModule(StringRef Name,
SubmodulePath.push_back(Name);
// Walk the directory structure to find any enclosing frameworks.
+#ifdef LLVM_ON_UNIX
+ // Note: as an egregious but useful hack we use the real path here, because
+ // frameworks moving from top-level frameworks to embedded frameworks tend
+ // to be symlinked from the top-level location to the embedded location,
+ // and we need to resolve lookups as if we had found the embedded location.
+ char RealDirName[PATH_MAX];
+ StringRef DirName;
+ if (realpath(Dir->getName(), RealDirName))
+ DirName = RealDirName;
+ else
+ DirName = Dir->getName();
+#else
StringRef DirName = Dir->getName();
+#endif
do {
// Get the parent directory name.
DirName = llvm::sys::path::parent_path(DirName);
@@ -924,7 +939,33 @@ Module *HeaderSearch::loadFrameworkModule(StringRef Name,
TopFrameworkDir = Dir;
}
} while (true);
-
+
+ // Determine whether we're allowed to infer a module map.
+ bool canInfer = false;
+ if (llvm::sys::path::has_parent_path(TopFrameworkDir->getName())) {
+ // Figure out the parent path.
+ StringRef Parent = llvm::sys::path::parent_path(TopFrameworkDir->getName());
+ if (const DirectoryEntry *ParentDir = FileMgr.getDirectory(Parent)) {
+ // If there's a module map file in the parent directory, it can
+ // explicitly allow us to infer framework modules.
+ switch (loadModuleMapFile(ParentDir)) {
+ case LMM_AlreadyLoaded:
+ case LMM_NewlyLoaded: {
+ StringRef Name = llvm::sys::path::stem(TopFrameworkDir->getName());
+ canInfer = ModMap.canInferFrameworkModule(ParentDir, Name, IsSystem);
+ break;
+ }
+ case LMM_InvalidModuleMap:
+ case LMM_NoDirectory:
+ break;
+ }
+ }
+ }
+
+ // If we're not allowed to infer a module map, we're done.
+ if (!canInfer)
+ return 0;
+
// Try to infer a module map from the top-level framework directory.
Module *Result = ModMap.inferFrameworkModule(SubmodulePath.back(),
TopFrameworkDir,
diff --git a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
index 5212dd8..a5ba7db 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
@@ -513,10 +513,13 @@ Lexer::ComputePreamble(const llvm::MemoryBuffer *Buffer,
// "fake" file source location at offset 1 so that the lexer will track our
// position within the file.
const unsigned StartOffset = 1;
- SourceLocation StartLoc = SourceLocation::getFromRawEncoding(StartOffset);
- Lexer TheLexer(StartLoc, LangOpts, Buffer->getBufferStart(),
+ SourceLocation FileLoc = SourceLocation::getFromRawEncoding(StartOffset);
+ Lexer TheLexer(FileLoc, LangOpts, Buffer->getBufferStart(),
Buffer->getBufferStart(), Buffer->getBufferEnd());
-
+
+ // StartLoc will differ from FileLoc if there is a BOM that was skipped.
+ SourceLocation StartLoc = TheLexer.getSourceLocation();
+
bool InPreprocessorDirective = false;
Token TheTok;
Token IfStartTok;
@@ -1534,7 +1537,7 @@ FinishIdentifier:
/// isHexaLiteral - Return true if Start points to a hex constant.
/// in microsoft mode (where this is supposed to be several different tokens).
-static bool isHexaLiteral(const char *Start, const LangOptions &LangOpts) {
+bool Lexer::isHexaLiteral(const char *Start, const LangOptions &LangOpts) {
unsigned Size;
char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, LangOpts);
if (C1 != '0')
@@ -1813,17 +1816,18 @@ void Lexer::LexCharConstant(Token &Result, const char *CurPtr,
while (C != '\'') {
// Skip escaped characters.
- if (C == '\\') {
- // Skip the escaped character.
- // FIXME: UCN's
- getAndAdvanceChar(CurPtr, Result);
- } else if (C == '\n' || C == '\r' || // Newline.
- (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
+ if (C == '\\')
+ C = getAndAdvanceChar(CurPtr, Result);
+
+ if (C == '\n' || C == '\r' || // Newline.
+ (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
Diag(BufferPtr, diag::ext_unterminated_char);
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
return;
- } else if (C == 0) {
+ }
+
+ if (C == 0) {
if (isCodeCompletionPoint(CurPtr-1)) {
PP->CodeCompleteNaturalLanguage();
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
@@ -1895,21 +1899,21 @@ bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr) {
return false;
}
-// SkipBCPLComment - We have just read the // characters from input. Skip until
-// we find the newline character thats terminate the comment. Then update
-/// BufferPtr and return.
+/// We have just read the // characters from input. Skip until we find the
+/// newline character thats terminate the comment. Then update BufferPtr and
+/// return.
///
/// If we're in KeepCommentMode or any CommentHandler has inserted
/// some tokens, this will store the first token and return true.
-bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
- // If BCPL comments aren't explicitly enabled for this language, emit an
+bool Lexer::SkipLineComment(Token &Result, const char *CurPtr) {
+ // If Line comments aren't explicitly enabled for this language, emit an
// extension warning.
- if (!LangOpts.BCPLComment && !isLexingRawMode()) {
- Diag(BufferPtr, diag::ext_bcpl_comment);
+ if (!LangOpts.LineComment && !isLexingRawMode()) {
+ Diag(BufferPtr, diag::ext_line_comment);
// Mark them enabled so we only emit one warning for this translation
// unit.
- LangOpts.BCPLComment = true;
+ LangOpts.LineComment = true;
}
// Scan over the body of the comment. The common case, when scanning, is that
@@ -1973,7 +1977,7 @@ bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
}
if (!isLexingRawMode())
- Diag(OldPtr-1, diag::ext_multi_line_bcpl_comment);
+ Diag(OldPtr-1, diag::ext_multi_line_line_comment);
break;
}
}
@@ -2002,7 +2006,7 @@ bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
// If we are returning comments as tokens, return this comment as a token.
if (inKeepCommentMode())
- return SaveBCPLComment(Result, CurPtr);
+ return SaveLineComment(Result, CurPtr);
// If we are inside a preprocessor directive and we see the end of line,
// return immediately, so that the lexer can return this as an EOD token.
@@ -2026,9 +2030,9 @@ bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
return false;
}
-/// SaveBCPLComment - If in save-comment mode, package up this BCPL comment in
-/// an appropriate way and return it.
-bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) {
+/// If in save-comment mode, package up this Line comment in an appropriate
+/// way and return it.
+bool Lexer::SaveLineComment(Token &Result, const char *CurPtr) {
// If we're not in a preprocessor directive, just return the // comment
// directly.
FormTokenWithChars(Result, CurPtr, tok::comment);
@@ -2036,19 +2040,19 @@ bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) {
if (!ParsingPreprocessorDirective || LexingRawMode)
return true;
- // If this BCPL-style comment is in a macro definition, transmogrify it into
+ // If this Line-style comment is in a macro definition, transmogrify it into
// a C-style block comment.
bool Invalid = false;
std::string Spelling = PP->getSpelling(Result, &Invalid);
if (Invalid)
return true;
- assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not bcpl comment?");
+ assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not line comment?");
Spelling[1] = '*'; // Change prefix to "/*".
Spelling += "*/"; // add suffix.
Result.setKind(tok::comment);
- PP->CreateString(&Spelling[0], Spelling.size(), Result,
+ PP->CreateString(Spelling, Result,
Result.getLocation(), Result.getLocation());
return true;
}
@@ -2179,7 +2183,8 @@ bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) {
#ifdef __SSE2__
__m128i Slashes = _mm_set1_epi8('/');
while (CurPtr+16 <= BufferEnd) {
- int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(__m128i*)CurPtr, Slashes));
+ int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(const __m128i*)CurPtr,
+ Slashes));
if (cmp != 0) {
// Adjust the pointer to point directly after the first slash. It's
// not necessary to set C here, it will be overwritten at the end of
@@ -2669,8 +2674,8 @@ LexNextToken:
// If the next token is obviously a // or /* */ comment, skip it efficiently
// too (without going through the big switch stmt).
if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() &&
- LangOpts.BCPLComment && !LangOpts.TraditionalCPP) {
- if (SkipBCPLComment(Result, CurPtr+2))
+ LangOpts.LineComment && !LangOpts.TraditionalCPP) {
+ if (SkipLineComment(Result, CurPtr+2))
return; // There is a token to return.
goto SkipIgnoredUnits;
} else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) {
@@ -2955,19 +2960,19 @@ LexNextToken:
case '/':
// 6.4.9: Comments
Char = getCharAndSize(CurPtr, SizeTmp);
- if (Char == '/') { // BCPL comment.
- // Even if BCPL comments are disabled (e.g. in C89 mode), we generally
+ if (Char == '/') { // Line comment.
+ // Even if Line comments are disabled (e.g. in C89 mode), we generally
// want to lex this as a comment. There is one problem with this though,
// that in one particular corner case, this can change the behavior of the
// resultant program. For example, In "foo //**/ bar", C89 would lex
- // this as "foo / bar" and langauges with BCPL comments would lex it as
+ // this as "foo / bar" and langauges with Line comments would lex it as
// "foo". Check to see if the character after the second slash is a '*'.
// If so, we will lex that as a "/" instead of the start of a comment.
// However, we never do this in -traditional-cpp mode.
- if ((LangOpts.BCPLComment ||
+ if ((LangOpts.LineComment ||
getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*') &&
!LangOpts.TraditionalCPP) {
- if (SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
+ if (SkipLineComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
return; // There is a token to return.
// It is common for the tokens immediately after a // comment to be
diff --git a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
index 9e3c778..e30612e 100644
--- a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
@@ -49,12 +49,46 @@ static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target) {
}
}
+static CharSourceRange MakeCharSourceRange(const LangOptions &Features,
+ FullSourceLoc TokLoc,
+ const char *TokBegin,
+ const char *TokRangeBegin,
+ const char *TokRangeEnd) {
+ SourceLocation Begin =
+ Lexer::AdvanceToTokenCharacter(TokLoc, TokRangeBegin - TokBegin,
+ TokLoc.getManager(), Features);
+ SourceLocation End =
+ Lexer::AdvanceToTokenCharacter(Begin, TokRangeEnd - TokRangeBegin,
+ TokLoc.getManager(), Features);
+ return CharSourceRange::getCharRange(Begin, End);
+}
+
+/// \brief Produce a diagnostic highlighting some portion of a literal.
+///
+/// Emits the diagnostic \p DiagID, highlighting the range of characters from
+/// \p TokRangeBegin (inclusive) to \p TokRangeEnd (exclusive), which must be
+/// a substring of a spelling buffer for the token beginning at \p TokBegin.
+static DiagnosticBuilder Diag(DiagnosticsEngine *Diags,
+ const LangOptions &Features, FullSourceLoc TokLoc,
+ const char *TokBegin, const char *TokRangeBegin,
+ const char *TokRangeEnd, unsigned DiagID) {
+ SourceLocation Begin =
+ Lexer::AdvanceToTokenCharacter(TokLoc, TokRangeBegin - TokBegin,
+ TokLoc.getManager(), Features);
+ return Diags->Report(Begin, DiagID) <<
+ MakeCharSourceRange(Features, TokLoc, TokBegin, TokRangeBegin, TokRangeEnd);
+}
+
/// ProcessCharEscape - Parse a standard C escape sequence, which can occur in
/// either a character or a string literal.
-static unsigned ProcessCharEscape(const char *&ThisTokBuf,
+static unsigned ProcessCharEscape(const char *ThisTokBegin,
+ const char *&ThisTokBuf,
const char *ThisTokEnd, bool &HadError,
FullSourceLoc Loc, unsigned CharWidth,
- DiagnosticsEngine *Diags) {
+ DiagnosticsEngine *Diags,
+ const LangOptions &Features) {
+ const char *EscapeBegin = ThisTokBuf;
+
// Skip the '\' char.
++ThisTokBuf;
@@ -75,12 +109,14 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf,
break;
case 'e':
if (Diags)
- Diags->Report(Loc, diag::ext_nonstandard_escape) << "e";
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::ext_nonstandard_escape) << "e";
ResultChar = 27;
break;
case 'E':
if (Diags)
- Diags->Report(Loc, diag::ext_nonstandard_escape) << "E";
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::ext_nonstandard_escape) << "E";
ResultChar = 27;
break;
case 'f':
@@ -102,7 +138,8 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf,
ResultChar = 0;
if (ThisTokBuf == ThisTokEnd || !isxdigit(*ThisTokBuf)) {
if (Diags)
- Diags->Report(Loc, diag::err_hex_escape_no_digits);
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::err_hex_escape_no_digits);
HadError = 1;
break;
}
@@ -126,7 +163,8 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf,
// Check for overflow.
if (Overflow && Diags) // Too many digits to fit in
- Diags->Report(Loc, diag::warn_hex_escape_too_large);
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::warn_hex_escape_too_large);
break;
}
case '0': case '1': case '2': case '3':
@@ -148,7 +186,8 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf,
// Check for overflow. Reject '\777', but not L'\777'.
if (CharWidth != 32 && (ResultChar >> CharWidth) != 0) {
if (Diags)
- Diags->Report(Loc, diag::warn_octal_escape_too_large);
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::warn_octal_escape_too_large);
ResultChar &= ~0U >> (32-CharWidth);
}
break;
@@ -158,19 +197,22 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf,
case '(': case '{': case '[': case '%':
// GCC accepts these as extensions. We warn about them as such though.
if (Diags)
- Diags->Report(Loc, diag::ext_nonstandard_escape)
- << std::string()+(char)ResultChar;
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::ext_nonstandard_escape)
+ << std::string(1, ResultChar);
break;
default:
if (Diags == 0)
break;
-
+
if (isgraph(ResultChar))
- Diags->Report(Loc, diag::ext_unknown_escape)
- << std::string()+(char)ResultChar;
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::ext_unknown_escape)
+ << std::string(1, ResultChar);
else
- Diags->Report(Loc, diag::ext_unknown_escape)
- << "x"+llvm::utohexstr(ResultChar);
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::ext_unknown_escape)
+ << "x" + llvm::utohexstr(ResultChar);
break;
}
@@ -185,9 +227,6 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
FullSourceLoc Loc, DiagnosticsEngine *Diags,
const LangOptions &Features,
bool in_char_string_literal = false) {
- if (!Features.CPlusPlus && !Features.C99 && Diags)
- Diags->Report(Loc, diag::warn_ucn_not_valid_in_c89);
-
const char *UcnBegin = ThisTokBuf;
// Skip the '\u' char's.
@@ -195,7 +234,8 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
if (ThisTokBuf == ThisTokEnd || !isxdigit(*ThisTokBuf)) {
if (Diags)
- Diags->Report(Loc, diag::err_ucn_escape_no_digits);
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ diag::err_ucn_escape_no_digits);
return false;
}
UcnLen = (ThisTokBuf[-1] == 'u' ? 4 : 8);
@@ -208,12 +248,9 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
}
// If we didn't consume the proper number of digits, there is a problem.
if (UcnLenSave) {
- if (Diags) {
- SourceLocation L =
- Lexer::AdvanceToTokenCharacter(Loc, UcnBegin - ThisTokBegin,
- Loc.getManager(), Features);
- Diags->Report(L, diag::err_ucn_escape_incomplete);
- }
+ if (Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ diag::err_ucn_escape_incomplete);
return false;
}
@@ -221,7 +258,8 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
if ((0xD800 <= UcnVal && UcnVal <= 0xDFFF) || // surrogate codepoints
UcnVal > 0x10FFFF) { // maximum legal UTF32 value
if (Diags)
- Diags->Report(Loc, diag::err_ucn_escape_invalid);
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ diag::err_ucn_escape_invalid);
return false;
}
@@ -231,22 +269,25 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
(UcnVal != 0x24 && UcnVal != 0x40 && UcnVal != 0x60)) { // $, @, `
bool IsError = (!Features.CPlusPlus0x || !in_char_string_literal);
if (Diags) {
- SourceLocation UcnBeginLoc =
- Lexer::AdvanceToTokenCharacter(Loc, UcnBegin - ThisTokBegin,
- Loc.getManager(), Features);
char BasicSCSChar = UcnVal;
if (UcnVal >= 0x20 && UcnVal < 0x7f)
- Diags->Report(UcnBeginLoc, IsError ? diag::err_ucn_escape_basic_scs :
- diag::warn_cxx98_compat_literal_ucn_escape_basic_scs)
- << StringRef(&BasicSCSChar, 1);
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ IsError ? diag::err_ucn_escape_basic_scs :
+ diag::warn_cxx98_compat_literal_ucn_escape_basic_scs)
+ << StringRef(&BasicSCSChar, 1);
else
- Diags->Report(UcnBeginLoc, IsError ? diag::err_ucn_control_character :
- diag::warn_cxx98_compat_literal_ucn_control_character);
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ IsError ? diag::err_ucn_control_character :
+ diag::warn_cxx98_compat_literal_ucn_control_character);
}
if (IsError)
return false;
}
+ if (!Features.CPlusPlus && !Features.C99 && Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ diag::warn_ucn_not_valid_in_c89);
+
return true;
}
@@ -365,10 +406,10 @@ static void EncodeUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
// Finally, we write the bytes into ResultBuf.
ResultBuf += bytesToWrite;
switch (bytesToWrite) { // note: everything falls through.
- case 4: *--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
- case 3: *--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
- case 2: *--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
- case 1: *--ResultBuf = (UTF8) (UcnVal | firstByteMark[bytesToWrite]);
+ case 4: *--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
+ case 3: *--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
+ case 2: *--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
+ case 1: *--ResultBuf = (UTF8) (UcnVal | firstByteMark[bytesToWrite]);
}
// Update the buffer.
ResultBuf += bytesToWrite;
@@ -417,19 +458,19 @@ static void EncodeUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
/// floating-constant: [C99 6.4.4.2]
/// TODO: add rules...
///
-NumericLiteralParser::
-NumericLiteralParser(const char *begin, const char *end,
- SourceLocation TokLoc, Preprocessor &pp)
- : PP(pp), ThisTokBegin(begin), ThisTokEnd(end) {
+NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
+ SourceLocation TokLoc,
+ Preprocessor &PP)
+ : PP(PP), ThisTokBegin(TokSpelling.begin()), ThisTokEnd(TokSpelling.end()) {
// This routine assumes that the range begin/end matches the regex for integer
// and FP constants (specifically, the 'pp-number' regex), and assumes that
// the byte at "*end" is both valid and not part of the regex. Because of
// this, it doesn't have to check for 'overscan' in various places.
- assert(!isalnum(*end) && *end != '.' && *end != '_' &&
+ assert(!isalnum(*ThisTokEnd) && *ThisTokEnd != '.' && *ThisTokEnd != '_' &&
"Lexer didn't maximally munch?");
- s = DigitsBegin = begin;
+ s = DigitsBegin = ThisTokBegin;
saw_exponent = false;
saw_period = false;
saw_ud_suffix = false;
@@ -451,7 +492,7 @@ NumericLiteralParser(const char *begin, const char *end,
if (s == ThisTokEnd) {
// Done.
} else if (isxdigit(*s) && !(*s == 'e' || *s == 'E')) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-begin),
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin),
diag::err_invalid_decimal_digit) << StringRef(s, 1);
hadError = true;
return;
@@ -469,7 +510,7 @@ NumericLiteralParser(const char *begin, const char *end,
if (first_non_digit != s) {
s = first_non_digit;
} else {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-begin),
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent - ThisTokBegin),
diag::err_exponent_has_no_digits);
hadError = true;
return;
@@ -565,7 +606,7 @@ NumericLiteralParser(const char *begin, const char *end,
case 'j':
case 'J':
if (isImaginary) break; // Cannot be repeated.
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-begin),
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin),
diag::ext_imaginary_constant);
isImaginary = true;
continue; // Success.
@@ -583,7 +624,7 @@ NumericLiteralParser(const char *begin, const char *end,
}
// Report an error if there are any.
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, SuffixBegin-begin),
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, SuffixBegin - ThisTokBegin),
isFPConstant ? diag::err_invalid_suffix_float_constant :
diag::err_invalid_suffix_integer_constant)
<< StringRef(SuffixBegin, ThisTokEnd-SuffixBegin);
@@ -619,7 +660,7 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
}
if (noSignificand) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin), \
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin),
diag::err_hexconstant_requires_digits);
hadError = true;
return;
@@ -722,6 +763,20 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
}
}
+static bool alwaysFitsInto64Bits(unsigned Radix, unsigned NumDigits) {
+ switch (Radix) {
+ case 2:
+ return NumDigits <= 64;
+ case 8:
+ return NumDigits <= 64 / 3; // Digits are groups of 3 bits.
+ case 10:
+ return NumDigits <= 19; // floor(log10(2^64))
+ case 16:
+ return NumDigits <= 64 / 4; // Digits are groups of 4 bits.
+ default:
+ llvm_unreachable("impossible Radix");
+ }
+}
/// GetIntegerValue - Convert this numeric literal value to an APInt that
/// matches Val's input width. If there is an overflow, set Val to the low bits
@@ -733,13 +788,11 @@ bool NumericLiteralParser::GetIntegerValue(llvm::APInt &Val) {
// integer. This avoids the expensive overflow checking below, and
// handles the common cases that matter (small decimal integers and
// hex/octal values which don't overflow).
- unsigned MaxBitsPerDigit = 1;
- while ((1U << MaxBitsPerDigit) < radix)
- MaxBitsPerDigit += 1;
- if ((SuffixBegin - DigitsBegin) * MaxBitsPerDigit <= 64) {
+ const unsigned NumDigits = SuffixBegin - DigitsBegin;
+ if (alwaysFitsInto64Bits(radix, NumDigits)) {
uint64_t N = 0;
- for (s = DigitsBegin; s != SuffixBegin; ++s)
- N = N*radix + HexDigitValue(*s);
+ for (const char *Ptr = DigitsBegin; Ptr != SuffixBegin; ++Ptr)
+ N = N * radix + HexDigitValue(*Ptr);
// This will truncate the value to Val's input width. Simply check
// for overflow by comparing.
@@ -748,15 +801,15 @@ bool NumericLiteralParser::GetIntegerValue(llvm::APInt &Val) {
}
Val = 0;
- s = DigitsBegin;
+ const char *Ptr = DigitsBegin;
llvm::APInt RadixVal(Val.getBitWidth(), radix);
llvm::APInt CharVal(Val.getBitWidth(), 0);
llvm::APInt OldVal = Val;
bool OverflowOccurred = false;
- while (s < SuffixBegin) {
- unsigned C = HexDigitValue(*s++);
+ while (Ptr < SuffixBegin) {
+ unsigned C = HexDigitValue(*Ptr++);
// If this letter is out of bound for this radix, reject it.
assert(C < radix && "NumericLiteralParser ctor should have rejected this");
@@ -943,7 +996,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
HadError = true;
} else if (*buffer_begin > largest_character_for_kind) {
HadError = true;
- PP.Diag(Loc,diag::err_character_too_large);
+ PP.Diag(Loc, diag::err_character_too_large);
}
++buffer_begin;
@@ -951,9 +1004,9 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
}
unsigned CharWidth = getCharWidth(Kind, PP.getTargetInfo());
uint64_t result =
- ProcessCharEscape(begin, end, HadError,
- FullSourceLoc(Loc,PP.getSourceManager()),
- CharWidth, &PP.getDiagnostics());
+ ProcessCharEscape(TokBegin, begin, end, HadError,
+ FullSourceLoc(Loc,PP.getSourceManager()),
+ CharWidth, &PP.getDiagnostics(), PP.getLangOpts());
*buffer_begin++ = result;
}
@@ -1110,7 +1163,7 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
Kind = StringToks[i].getKind();
} else {
if (Diags)
- Diags->Report(FullSourceLoc(StringToks[i].getLocation(), SM),
+ Diags->Report(StringToks[i].getLocation(),
diag::err_unsupported_string_concat);
hadError = true;
}
@@ -1218,9 +1271,9 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
assert(ThisTokEnd >= ThisTokBuf && "malformed raw string literal");
// Copy the string over
- if (CopyStringFragment(StringRef(ThisTokBuf, ThisTokEnd - ThisTokBuf)))
- if (DiagnoseBadString(StringToks[i]))
- hadError = true;
+ if (CopyStringFragment(StringToks[i], ThisTokBegin,
+ StringRef(ThisTokBuf, ThisTokEnd - ThisTokBuf)))
+ hadError = true;
} else {
if (ThisTokBuf[0] != '"') {
// The file may have come from PCH and then changed after loading the
@@ -1251,9 +1304,9 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
} while (ThisTokBuf != ThisTokEnd && ThisTokBuf[0] != '\\');
// Copy the character span over.
- if (CopyStringFragment(StringRef(InStart, ThisTokBuf - InStart)))
- if (DiagnoseBadString(StringToks[i]))
- hadError = true;
+ if (CopyStringFragment(StringToks[i], ThisTokBegin,
+ StringRef(InStart, ThisTokBuf - InStart)))
+ hadError = true;
continue;
}
// Is this a Universal Character Name escape?
@@ -1266,9 +1319,9 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
}
// Otherwise, this is a non-UCN escape character. Process it.
unsigned ResultChar =
- ProcessCharEscape(ThisTokBuf, ThisTokEnd, hadError,
+ ProcessCharEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, hadError,
FullSourceLoc(StringToks[i].getLocation(), SM),
- CharByteWidth*8, Diags);
+ CharByteWidth*8, Diags, Features);
if (CharByteWidth == 4) {
// FIXME: Make the type of the result buffer correct instead of
@@ -1308,8 +1361,8 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
// Verify that pascal strings aren't too large.
if (GetStringLength() > 256) {
- if (Diags)
- Diags->Report(FullSourceLoc(StringToks[0].getLocation(), SM),
+ if (Diags)
+ Diags->Report(StringToks[0].getLocation(),
diag::err_pascal_string_too_long)
<< SourceRange(StringToks[0].getLocation(),
StringToks[NumStringToks-1].getLocation());
@@ -1319,9 +1372,9 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
} else if (Diags) {
// Complain if this string literal has too many characters.
unsigned MaxChars = Features.CPlusPlus? 65536 : Features.C99 ? 4095 : 509;
-
+
if (GetNumStringChars() > MaxChars)
- Diags->Report(FullSourceLoc(StringToks[0].getLocation(), SM),
+ Diags->Report(StringToks[0].getLocation(),
diag::ext_string_too_long)
<< GetNumStringChars() << MaxChars
<< (Features.CPlusPlus ? 2 : Features.C99 ? 1 : 0)
@@ -1330,21 +1383,61 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
}
}
-/// copyStringFragment - This function copies from Start to End into ResultPtr.
-/// Performs widening for multi-byte characters.
-bool StringLiteralParser::CopyStringFragment(StringRef Fragment) {
- return !ConvertUTF8toWide(CharByteWidth, Fragment, ResultPtr);
+static const char *resyncUTF8(const char *Err, const char *End) {
+ if (Err == End)
+ return End;
+ End = Err + std::min<unsigned>(getNumBytesForUTF8(*Err), End-Err);
+ while (++Err != End && (*Err & 0xC0) == 0x80)
+ ;
+ return Err;
}
-bool StringLiteralParser::DiagnoseBadString(const Token &Tok) {
+/// \brief This function copies from Fragment, which is a sequence of bytes
+/// within Tok's contents (which begin at TokBegin) into ResultPtr.
+/// Performs widening for multi-byte characters.
+bool StringLiteralParser::CopyStringFragment(const Token &Tok,
+ const char *TokBegin,
+ StringRef Fragment) {
+ const UTF8 *ErrorPtrTmp;
+ if (ConvertUTF8toWide(CharByteWidth, Fragment, ResultPtr, ErrorPtrTmp))
+ return false;
+
// If we see bad encoding for unprefixed string literals, warn and
// simply copy the byte values, for compatibility with gcc and older
// versions of clang.
bool NoErrorOnBadEncoding = isAscii();
- unsigned Msg = NoErrorOnBadEncoding ? diag::warn_bad_string_encoding :
- diag::err_bad_string_encoding;
- if (Diags)
- Diags->Report(FullSourceLoc(Tok.getLocation(), SM), Msg);
+ if (NoErrorOnBadEncoding) {
+ memcpy(ResultPtr, Fragment.data(), Fragment.size());
+ ResultPtr += Fragment.size();
+ }
+
+ if (Diags) {
+ const char *ErrorPtr = reinterpret_cast<const char *>(ErrorPtrTmp);
+
+ FullSourceLoc SourceLoc(Tok.getLocation(), SM);
+ const DiagnosticBuilder &Builder =
+ Diag(Diags, Features, SourceLoc, TokBegin,
+ ErrorPtr, resyncUTF8(ErrorPtr, Fragment.end()),
+ NoErrorOnBadEncoding ? diag::warn_bad_string_encoding
+ : diag::err_bad_string_encoding);
+
+ const char *NextStart = resyncUTF8(ErrorPtr, Fragment.end());
+ StringRef NextFragment(NextStart, Fragment.end()-NextStart);
+
+ // Decode into a dummy buffer.
+ SmallString<512> Dummy;
+ Dummy.reserve(Fragment.size() * CharByteWidth);
+ char *Ptr = Dummy.data();
+
+ while (!Builder.hasMaxRanges() &&
+ !ConvertUTF8toWide(CharByteWidth, NextFragment, Ptr, ErrorPtrTmp)) {
+ const char *ErrorPtr = reinterpret_cast<const char *>(ErrorPtrTmp);
+ NextStart = resyncUTF8(ErrorPtr, Fragment.end());
+ Builder << MakeCharSourceRange(Features, SourceLoc, TokBegin,
+ ErrorPtr, NextStart);
+ NextFragment = StringRef(NextStart, Fragment.end()-NextStart);
+ }
+ }
return !NoErrorOnBadEncoding;
}
@@ -1422,9 +1515,9 @@ unsigned StringLiteralParser::getOffsetOfStringByte(const Token &Tok,
}
ByteNo -= Len;
} else {
- ProcessCharEscape(SpellingPtr, SpellingEnd, HadError,
+ ProcessCharEscape(SpellingStart, SpellingPtr, SpellingEnd, HadError,
FullSourceLoc(Tok.getLocation(), SM),
- CharByteWidth*8, Diags);
+ CharByteWidth*8, Diags, Features);
--ByteNo;
}
assert(!HadError && "This method isn't valid on erroneous strings");
diff --git a/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp b/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp
index e2b251a..ed8873d 100644
--- a/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp
@@ -291,7 +291,7 @@ Token MacroArgs::StringifyArgument(const Token *ArgToks,
}
}
- PP.CreateString(&Result[0], Result.size(), Tok,
+ PP.CreateString(Result, Tok,
ExpansionLocStart, ExpansionLocEnd);
return Tok;
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp b/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
index 3d0c9a1..904f04e 100644
--- a/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
@@ -15,47 +15,65 @@
#include "clang/Lex/Preprocessor.h"
using namespace clang;
-MacroInfo::MacroInfo(SourceLocation DefLoc) : Location(DefLoc) {
- IsFunctionLike = false;
- IsC99Varargs = false;
- IsGNUVarargs = false;
- IsBuiltinMacro = false;
- IsFromAST = false;
- ChangedAfterLoad = false;
- IsDisabled = false;
- IsUsed = false;
- IsAllowRedefinitionsWithoutWarning = false;
- IsWarnIfUnused = false;
- IsDefinitionLengthCached = false;
- IsPublic = true;
-
- ArgumentList = 0;
- NumArguments = 0;
+MacroInfo::MacroInfo(SourceLocation DefLoc)
+ : Location(DefLoc),
+ PreviousDefinition(0),
+ ArgumentList(0),
+ NumArguments(0),
+ IsDefinitionLengthCached(false),
+ IsFunctionLike(false),
+ IsC99Varargs(false),
+ IsGNUVarargs(false),
+ IsBuiltinMacro(false),
+ IsFromAST(false),
+ ChangedAfterLoad(false),
+ IsDisabled(false),
+ IsUsed(false),
+ IsAllowRedefinitionsWithoutWarning(false),
+ IsWarnIfUnused(false),
+ IsPublic(true),
+ IsHidden(false),
+ IsAmbiguous(false) {
}
-MacroInfo::MacroInfo(const MacroInfo &MI, llvm::BumpPtrAllocator &PPAllocator) {
- Location = MI.Location;
- EndLocation = MI.EndLocation;
- ReplacementTokens = MI.ReplacementTokens;
- IsFunctionLike = MI.IsFunctionLike;
- IsC99Varargs = MI.IsC99Varargs;
- IsGNUVarargs = MI.IsGNUVarargs;
- IsBuiltinMacro = MI.IsBuiltinMacro;
- IsFromAST = MI.IsFromAST;
- ChangedAfterLoad = MI.ChangedAfterLoad;
- IsDisabled = MI.IsDisabled;
- IsUsed = MI.IsUsed;
- IsAllowRedefinitionsWithoutWarning = MI.IsAllowRedefinitionsWithoutWarning;
- IsWarnIfUnused = MI.IsWarnIfUnused;
- IsDefinitionLengthCached = MI.IsDefinitionLengthCached;
- DefinitionLength = MI.DefinitionLength;
- IsPublic = MI.IsPublic;
-
- ArgumentList = 0;
- NumArguments = 0;
+MacroInfo::MacroInfo(const MacroInfo &MI, llvm::BumpPtrAllocator &PPAllocator)
+ : Location(MI.Location),
+ EndLocation(MI.EndLocation),
+ UndefLocation(MI.UndefLocation),
+ PreviousDefinition(0),
+ ArgumentList(0),
+ NumArguments(0),
+ ReplacementTokens(MI.ReplacementTokens),
+ DefinitionLength(MI.DefinitionLength),
+ IsDefinitionLengthCached(MI.IsDefinitionLengthCached),
+ IsFunctionLike(MI.IsFunctionLike),
+ IsC99Varargs(MI.IsC99Varargs),
+ IsGNUVarargs(MI.IsGNUVarargs),
+ IsBuiltinMacro(MI.IsBuiltinMacro),
+ IsFromAST(MI.IsFromAST),
+ ChangedAfterLoad(MI.ChangedAfterLoad),
+ IsDisabled(MI.IsDisabled),
+ IsUsed(MI.IsUsed),
+ IsAllowRedefinitionsWithoutWarning(MI.IsAllowRedefinitionsWithoutWarning),
+ IsWarnIfUnused(MI.IsWarnIfUnused),
+ IsPublic(MI.IsPublic),
+ IsHidden(MI.IsHidden),
+ IsAmbiguous(MI.IsAmbiguous) {
setArgumentList(MI.ArgumentList, MI.NumArguments, PPAllocator);
}
+const MacroInfo *MacroInfo::findDefinitionAtLoc(SourceLocation L,
+ SourceManager &SM) const {
+ assert(L.isValid() && "SourceLocation is invalid.");
+ for (const MacroInfo *MI = this; MI; MI = MI->PreviousDefinition) {
+ if (MI->Location.isInvalid() || // For macros defined on the command line.
+ SM.isBeforeInTranslationUnit(MI->Location, L))
+ return (MI->UndefLocation.isInvalid() ||
+ SM.isBeforeInTranslationUnit(L, MI->UndefLocation)) ? MI : NULL;
+ }
+ return NULL;
+}
+
unsigned MacroInfo::getDefinitionLengthSlow(SourceManager &SM) const {
assert(!IsDefinitionLengthCached);
IsDefinitionLengthCached = true;
diff --git a/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
index 5304311..8a936fa 100644
--- a/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
@@ -16,6 +16,7 @@
#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
@@ -26,6 +27,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
+#include <stdlib.h>
using namespace clang;
Module::ExportDecl
@@ -75,7 +77,7 @@ ModuleMap::ModuleMap(FileManager &FileMgr, const DiagnosticConsumer &DC,
{
IntrusiveRefCntPtr<DiagnosticIDs> DiagIDs(new DiagnosticIDs);
Diags = IntrusiveRefCntPtr<DiagnosticsEngine>(
- new DiagnosticsEngine(DiagIDs));
+ new DiagnosticsEngine(DiagIDs, new DiagnosticOptions));
Diags->setClient(DC.clone(*Diags), /*ShouldOwnClient=*/true);
SourceMgr = new SourceManager(*Diags, FileMgr);
}
@@ -96,16 +98,62 @@ void ModuleMap::setTarget(const TargetInfo &Target) {
this->Target = &Target;
}
+/// \brief "Sanitize" a filename so that it can be used as an identifier.
+static StringRef sanitizeFilenameAsIdentifier(StringRef Name,
+ SmallVectorImpl<char> &Buffer) {
+ if (Name.empty())
+ return Name;
+
+ // Check whether the filename is already an identifier; this is the common
+ // case.
+ bool isIdentifier = true;
+ for (unsigned I = 0, N = Name.size(); I != N; ++I) {
+ if (isalpha(Name[I]) || Name[I] == '_' || (isdigit(Name[I]) && I > 0))
+ continue;
+
+ isIdentifier = false;
+ break;
+ }
+
+ if (!isIdentifier) {
+ // If we don't already have something with the form of an identifier,
+ // create a buffer with the sanitized name.
+ Buffer.clear();
+ if (isdigit(Name[0]))
+ Buffer.push_back('_');
+ Buffer.reserve(Buffer.size() + Name.size());
+ for (unsigned I = 0, N = Name.size(); I != N; ++I) {
+ if (isalnum(Name[I]) || isspace(Name[I]))
+ Buffer.push_back(Name[I]);
+ else
+ Buffer.push_back('_');
+ }
+
+ Name = StringRef(Buffer.data(), Buffer.size());
+ }
+
+ while (llvm::StringSwitch<bool>(Name)
+#define KEYWORD(Keyword,Conditions) .Case(#Keyword, true)
+#define ALIAS(Keyword, AliasOf, Conditions) .Case(Keyword, true)
+#include "clang/Basic/TokenKinds.def"
+ .Default(false)) {
+ if (Name.data() != Buffer.data())
+ Buffer.append(Name.begin(), Name.end());
+ Buffer.push_back('_');
+ Name = StringRef(Buffer.data(), Buffer.size());
+ }
+
+ return Name;
+}
+
Module *ModuleMap::findModuleForHeader(const FileEntry *File) {
- llvm::DenseMap<const FileEntry *, Module *>::iterator Known
- = Headers.find(File);
+ HeadersMap::iterator Known = Headers.find(File);
if (Known != Headers.end()) {
- // If a header corresponds to an unavailable module, don't report
- // that it maps to anything.
- if (!Known->second->isAvailable())
+ // If a header is not available, don't report that it maps to anything.
+ if (!Known->second.isAvailable())
return 0;
- return Known->second;
+ return Known->second.getModule();
}
const DirectoryEntry *Dir = File->getDir();
@@ -134,7 +182,10 @@ Module *ModuleMap::findModuleForHeader(const FileEntry *File) {
for (unsigned I = SkippedDirs.size(); I != 0; --I) {
// Find or create the module that corresponds to this directory name.
- StringRef Name = llvm::sys::path::stem(SkippedDirs[I-1]->getName());
+ SmallString<32> NameBuf;
+ StringRef Name = sanitizeFilenameAsIdentifier(
+ llvm::sys::path::stem(SkippedDirs[I-1]->getName()),
+ NameBuf);
Result = findOrCreateModule(Name, Result, /*IsFramework=*/false,
Explicit).first;
@@ -148,9 +199,12 @@ Module *ModuleMap::findModuleForHeader(const FileEntry *File) {
}
// Infer a submodule with the same name as this header file.
- StringRef Name = llvm::sys::path::stem(File->getName());
+ SmallString<32> NameBuf;
+ StringRef Name = sanitizeFilenameAsIdentifier(
+ llvm::sys::path::stem(File->getName()), NameBuf);
Result = findOrCreateModule(Name, Result, /*IsFramework=*/false,
Explicit).first;
+ Result->TopHeaders.insert(File);
// If inferred submodules export everything they import, add a
// wildcard to the set of exports.
@@ -163,7 +217,7 @@ Module *ModuleMap::findModuleForHeader(const FileEntry *File) {
UmbrellaDirs[SkippedDirs[I]] = Result;
}
- Headers[File] = Result;
+ Headers[File] = KnownHeader(Result, /*Excluded=*/false);
// If a header corresponds to an unavailable module, don't report
// that it maps to anything.
@@ -188,10 +242,9 @@ Module *ModuleMap::findModuleForHeader(const FileEntry *File) {
}
bool ModuleMap::isHeaderInUnavailableModule(const FileEntry *Header) {
- llvm::DenseMap<const FileEntry *, Module *>::iterator Known
- = Headers.find(Header);
+ HeadersMap::iterator Known = Headers.find(Header);
if (Known != Headers.end())
- return !Known->second->isAvailable();
+ return !Known->second.isAvailable();
const DirectoryEntry *Dir = Header->getDir();
llvm::SmallVector<const DirectoryEntry *, 2> SkippedDirs;
@@ -216,7 +269,10 @@ bool ModuleMap::isHeaderInUnavailableModule(const FileEntry *Header) {
if (UmbrellaModule->InferSubmodules) {
for (unsigned I = SkippedDirs.size(); I != 0; --I) {
// Find or create the module that corresponds to this directory name.
- StringRef Name = llvm::sys::path::stem(SkippedDirs[I-1]->getName());
+ SmallString<32> NameBuf;
+ StringRef Name = sanitizeFilenameAsIdentifier(
+ llvm::sys::path::stem(SkippedDirs[I-1]->getName()),
+ NameBuf);
Found = lookupModuleQualified(Name, Found);
if (!Found)
return false;
@@ -225,7 +281,10 @@ bool ModuleMap::isHeaderInUnavailableModule(const FileEntry *Header) {
}
// Infer a submodule with the same name as this header file.
- StringRef Name = llvm::sys::path::stem(Header->getName());
+ SmallString<32> NameBuf;
+ StringRef Name = sanitizeFilenameAsIdentifier(
+ llvm::sys::path::stem(Header->getName()),
+ NameBuf);
Found = lookupModuleQualified(Name, Found);
if (!Found)
return false;
@@ -287,8 +346,32 @@ ModuleMap::findOrCreateModule(StringRef Name, Module *Parent, bool IsFramework,
return std::make_pair(Result, true);
}
+bool ModuleMap::canInferFrameworkModule(const DirectoryEntry *ParentDir,
+ StringRef Name, bool &IsSystem) {
+ // Check whether we have already looked into the parent directory
+ // for a module map.
+ llvm::DenseMap<const DirectoryEntry *, InferredDirectory>::iterator
+ inferred = InferredDirectories.find(ParentDir);
+ if (inferred == InferredDirectories.end())
+ return false;
+
+ if (!inferred->second.InferModules)
+ return false;
+
+ // We're allowed to infer for this directory, but make sure it's okay
+ // to infer this particular module.
+ bool canInfer = std::find(inferred->second.ExcludedModules.begin(),
+ inferred->second.ExcludedModules.end(),
+ Name) == inferred->second.ExcludedModules.end();
+
+ if (canInfer && inferred->second.InferSystemModules)
+ IsSystem = true;
+
+ return canInfer;
+}
+
Module *
-ModuleMap::inferFrameworkModule(StringRef ModuleName,
+ModuleMap::inferFrameworkModule(StringRef ModuleName,
const DirectoryEntry *FrameworkDir,
bool IsSystem,
Module *Parent) {
@@ -297,7 +380,54 @@ ModuleMap::inferFrameworkModule(StringRef ModuleName,
return Mod;
FileManager &FileMgr = SourceMgr->getFileManager();
-
+
+ // If the framework has a parent path from which we're allowed to infer
+ // a framework module, do so.
+ if (!Parent) {
+ bool canInfer = false;
+ if (llvm::sys::path::has_parent_path(FrameworkDir->getName())) {
+ // Figure out the parent path.
+ StringRef Parent = llvm::sys::path::parent_path(FrameworkDir->getName());
+ if (const DirectoryEntry *ParentDir = FileMgr.getDirectory(Parent)) {
+ // Check whether we have already looked into the parent directory
+ // for a module map.
+ llvm::DenseMap<const DirectoryEntry *, InferredDirectory>::iterator
+ inferred = InferredDirectories.find(ParentDir);
+ if (inferred == InferredDirectories.end()) {
+ // We haven't looked here before. Load a module map, if there is
+ // one.
+ SmallString<128> ModMapPath = Parent;
+ llvm::sys::path::append(ModMapPath, "module.map");
+ if (const FileEntry *ModMapFile = FileMgr.getFile(ModMapPath)) {
+ parseModuleMapFile(ModMapFile);
+ inferred = InferredDirectories.find(ParentDir);
+ }
+
+ if (inferred == InferredDirectories.end())
+ inferred = InferredDirectories.insert(
+ std::make_pair(ParentDir, InferredDirectory())).first;
+ }
+
+ if (inferred->second.InferModules) {
+ // We're allowed to infer for this directory, but make sure it's okay
+ // to infer this particular module.
+ StringRef Name = llvm::sys::path::filename(FrameworkDir->getName());
+ canInfer = std::find(inferred->second.ExcludedModules.begin(),
+ inferred->second.ExcludedModules.end(),
+ Name) == inferred->second.ExcludedModules.end();
+
+ if (inferred->second.InferSystemModules)
+ IsSystem = true;
+ }
+ }
+ }
+
+ // If we're not allowed to infer a framework module, don't.
+ if (!canInfer)
+ return 0;
+ }
+
+
// Look for an umbrella header.
SmallString<128> UmbrellaName = StringRef(FrameworkDir->getName());
llvm::sys::path::append(UmbrellaName, "Headers");
@@ -320,7 +450,7 @@ ModuleMap::inferFrameworkModule(StringRef ModuleName,
// umbrella header "umbrella-header-name"
Result->Umbrella = UmbrellaHeader;
- Headers[UmbrellaHeader] = Result;
+ Headers[UmbrellaHeader] = KnownHeader(Result, /*Excluded=*/false);
UmbrellaDirs[UmbrellaHeader->getDir()] = Result;
// export *
@@ -343,12 +473,42 @@ ModuleMap::inferFrameworkModule(StringRef ModuleName,
Dir != DirEnd && !EC; Dir.increment(EC)) {
if (!StringRef(Dir->path()).endswith(".framework"))
continue;
-
+
if (const DirectoryEntry *SubframeworkDir
= FileMgr.getDirectory(Dir->path())) {
+ // Note: as an egregious but useful hack, we use the real path here and
+ // check whether it is actually a subdirectory of the parent directory.
+ // This will not be the case if the 'subframework' is actually a symlink
+ // out to a top-level framework.
+#ifdef LLVM_ON_UNIX
+ char RealSubframeworkDirName[PATH_MAX];
+ if (realpath(Dir->path().c_str(), RealSubframeworkDirName)) {
+ StringRef SubframeworkDirName = RealSubframeworkDirName;
+
+ bool FoundParent = false;
+ do {
+ // Get the parent directory name.
+ SubframeworkDirName
+ = llvm::sys::path::parent_path(SubframeworkDirName);
+ if (SubframeworkDirName.empty())
+ break;
+
+ if (FileMgr.getDirectory(SubframeworkDirName) == FrameworkDir) {
+ FoundParent = true;
+ break;
+ }
+ } while (true);
+
+ if (!FoundParent)
+ continue;
+ }
+#endif
+
// FIXME: Do we want to warn about subframeworks without umbrella headers?
- inferFrameworkModule(llvm::sys::path::stem(Dir->path()), SubframeworkDir,
- IsSystem, Result);
+ SmallString<32> NameBuf;
+ inferFrameworkModule(sanitizeFilenameAsIdentifier(
+ llvm::sys::path::stem(Dir->path()), NameBuf),
+ SubframeworkDir, IsSystem, Result);
}
}
@@ -356,7 +516,7 @@ ModuleMap::inferFrameworkModule(StringRef ModuleName,
}
void ModuleMap::setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader){
- Headers[UmbrellaHeader] = Mod;
+ Headers[UmbrellaHeader] = KnownHeader(Mod, /*Excluded=*/false);
Mod->Umbrella = UmbrellaHeader;
UmbrellaDirs[UmbrellaHeader->getDir()] = Mod;
}
@@ -366,9 +526,13 @@ void ModuleMap::setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir) {
UmbrellaDirs[UmbrellaDir] = Mod;
}
-void ModuleMap::addHeader(Module *Mod, const FileEntry *Header) {
- Mod->Headers.push_back(Header);
- Headers[Header] = Mod;
+void ModuleMap::addHeader(Module *Mod, const FileEntry *Header,
+ bool Excluded) {
+ if (Excluded)
+ Mod->ExcludedHeaders.push_back(Header);
+ else
+ Mod->Headers.push_back(Header);
+ Headers[Header] = KnownHeader(Mod, Excluded);
}
const FileEntry *
@@ -388,12 +552,10 @@ void ModuleMap::dump() {
M->getValue()->print(llvm::errs(), 2);
llvm::errs() << "Headers:";
- for (llvm::DenseMap<const FileEntry *, Module *>::iterator
- H = Headers.begin(),
- HEnd = Headers.end();
+ for (HeadersMap::iterator H = Headers.begin(), HEnd = Headers.end();
H != HEnd; ++H) {
llvm::errs() << " \"" << H->first->getName() << "\" -> "
- << H->second->getFullModuleName() << "\n";
+ << H->second.getModule()->getFullModuleName() << "\n";
}
}
@@ -454,6 +616,7 @@ namespace clang {
EndOfFile,
HeaderKeyword,
Identifier,
+ ExcludeKeyword,
ExplicitKeyword,
ExportKeyword,
FrameworkKeyword,
@@ -490,10 +653,24 @@ namespace clang {
return StringRef(StringData, StringLength);
}
};
+
+ /// \brief The set of attributes that can be attached to a module.
+ struct Attributes {
+ Attributes() : IsSystem() { }
+
+ /// \brief Whether this is a system module.
+ unsigned IsSystem : 1;
+ };
+
class ModuleMapParser {
Lexer &L;
SourceManager &SourceMgr;
+
+ /// \brief Default target information, used only for string literal
+ /// parsing.
+ const TargetInfo *Target;
+
DiagnosticsEngine &Diags;
ModuleMap &Map;
@@ -505,11 +682,7 @@ namespace clang {
/// \brief Whether an error occurred.
bool HadError;
-
- /// \brief Default target information, used only for string literal
- /// parsing.
- OwningPtr<TargetInfo> Target;
-
+
/// \brief Stores string data for the various string literals referenced
/// during parsing.
llvm::BumpPtrAllocator StringData;
@@ -532,27 +705,25 @@ namespace clang {
bool parseModuleId(ModuleId &Id);
void parseModuleDecl();
void parseRequiresDecl();
- void parseHeaderDecl(SourceLocation UmbrellaLoc);
+ void parseHeaderDecl(SourceLocation UmbrellaLoc, SourceLocation ExcludeLoc);
void parseUmbrellaDirDecl(SourceLocation UmbrellaLoc);
void parseExportDecl();
- void parseInferredSubmoduleDecl(bool Explicit);
-
+ void parseInferredModuleDecl(bool Framework, bool Explicit);
+ bool parseOptionalAttributes(Attributes &Attrs);
+
const DirectoryEntry *getOverriddenHeaderSearchDir();
public:
explicit ModuleMapParser(Lexer &L, SourceManager &SourceMgr,
+ const TargetInfo *Target,
DiagnosticsEngine &Diags,
ModuleMap &Map,
const DirectoryEntry *Directory,
const DirectoryEntry *BuiltinIncludeDir)
- : L(L), SourceMgr(SourceMgr), Diags(Diags), Map(Map),
+ : L(L), SourceMgr(SourceMgr), Target(Target), Diags(Diags), Map(Map),
Directory(Directory), BuiltinIncludeDir(BuiltinIncludeDir),
HadError(false), ActiveModule(0)
{
- TargetOptions TargetOpts;
- TargetOpts.Triple = llvm::sys::getDefaultTargetTriple();
- Target.reset(TargetInfo::CreateTargetInfo(Diags, TargetOpts));
-
Tok.clear();
consumeToken();
}
@@ -575,6 +746,7 @@ retry:
Tok.StringLength = LToken.getLength();
Tok.Kind = llvm::StringSwitch<MMToken::TokenKind>(Tok.getString())
.Case("header", MMToken::HeaderKeyword)
+ .Case("exclude", MMToken::ExcludeKeyword)
.Case("explicit", MMToken::ExplicitKeyword)
.Case("export", MMToken::ExportKeyword)
.Case("framework", MMToken::FrameworkKeyword)
@@ -743,13 +915,6 @@ namespace {
/// 'explicit'[opt] 'framework'[opt] 'module' module-id attributes[opt]
/// { module-member* }
///
-/// attributes:
-/// attribute attributes
-/// attribute
-///
-/// attribute:
-/// [ identifier ]
-///
/// module-member:
/// requires-declaration
/// header-declaration
@@ -791,7 +956,7 @@ void ModuleMapParser::parseModuleDecl() {
// If we have a wildcard for the module name, this is an inferred submodule.
// Parse it.
if (Tok.is(MMToken::Star))
- return parseInferredSubmoduleDecl(Explicit);
+ return parseInferredModuleDecl(Framework, Explicit);
// Parse the module name.
ModuleId Id;
@@ -799,7 +964,7 @@ void ModuleMapParser::parseModuleDecl() {
HadError = true;
return;
}
-
+
if (ActiveModule) {
if (Id.size() > 1) {
Diags.Report(Id.front().second, diag::err_mmap_nested_submodule_id)
@@ -842,47 +1007,8 @@ void ModuleMapParser::parseModuleDecl() {
SourceLocation ModuleNameLoc = Id.back().second;
// Parse the optional attribute list.
- bool IsSystem = false;
- while (Tok.is(MMToken::LSquare)) {
- // Consume the '['.
- SourceLocation LSquareLoc = consumeToken();
-
- // Check whether we have an attribute name here.
- if (!Tok.is(MMToken::Identifier)) {
- Diags.Report(Tok.getLocation(), diag::err_mmap_expected_attribute);
- skipUntil(MMToken::RSquare);
- if (Tok.is(MMToken::RSquare))
- consumeToken();
- continue;
- }
-
- // Decode the attribute name.
- AttributeKind Attribute
- = llvm::StringSwitch<AttributeKind>(Tok.getString())
- .Case("system", AT_system)
- .Default(AT_unknown);
- switch (Attribute) {
- case AT_unknown:
- Diags.Report(Tok.getLocation(), diag::warn_mmap_unknown_attribute)
- << Tok.getString();
- break;
-
- case AT_system:
- IsSystem = true;
- break;
- }
- consumeToken();
-
- // Consume the ']'.
- if (!Tok.is(MMToken::RSquare)) {
- Diags.Report(Tok.getLocation(), diag::err_mmap_expected_rsquare);
- Diags.Report(LSquareLoc, diag::note_mmap_lsquare_match);
- skipUntil(MMToken::RSquare);
- }
-
- if (Tok.is(MMToken::RSquare))
- consumeToken();
- }
+ Attributes Attrs;
+ parseOptionalAttributes(Attrs);
// Parse the opening brace.
if (!Tok.is(MMToken::LBrace)) {
@@ -925,7 +1051,7 @@ void ModuleMapParser::parseModuleDecl() {
ActiveModule = Map.findOrCreateModule(ModuleName, ActiveModule, Framework,
Explicit).first;
ActiveModule->DefinitionLoc = ModuleNameLoc;
- if (IsSystem)
+ if (Attrs.IsSystem)
ActiveModule->IsSystem = true;
bool Done = false;
@@ -953,14 +1079,25 @@ void ModuleMapParser::parseModuleDecl() {
case MMToken::UmbrellaKeyword: {
SourceLocation UmbrellaLoc = consumeToken();
if (Tok.is(MMToken::HeaderKeyword))
- parseHeaderDecl(UmbrellaLoc);
+ parseHeaderDecl(UmbrellaLoc, SourceLocation());
else
parseUmbrellaDirDecl(UmbrellaLoc);
break;
}
+ case MMToken::ExcludeKeyword: {
+ SourceLocation ExcludeLoc = consumeToken();
+ if (Tok.is(MMToken::HeaderKeyword)) {
+ parseHeaderDecl(SourceLocation(), ExcludeLoc);
+ } else {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_header)
+ << "exclude";
+ }
+ break;
+ }
+
case MMToken::HeaderKeyword:
- parseHeaderDecl(SourceLocation());
+ parseHeaderDecl(SourceLocation(), SourceLocation());
break;
default:
@@ -1062,12 +1199,15 @@ static bool isBuiltinHeader(StringRef FileName) {
///
/// header-declaration:
/// 'umbrella'[opt] 'header' string-literal
-void ModuleMapParser::parseHeaderDecl(SourceLocation UmbrellaLoc) {
+/// 'exclude'[opt] 'header' string-literal
+void ModuleMapParser::parseHeaderDecl(SourceLocation UmbrellaLoc,
+ SourceLocation ExcludeLoc) {
assert(Tok.is(MMToken::HeaderKeyword));
consumeToken();
bool Umbrella = UmbrellaLoc.isValid();
-
+ bool Exclude = ExcludeLoc.isValid();
+ assert(!(Umbrella && Exclude) && "Cannot have both 'umbrella' and 'exclude'");
// Parse the header name.
if (!Tok.is(MMToken::StringLiteral)) {
Diags.Report(Tok.getLocation(), diag::err_mmap_expected_header)
@@ -1145,15 +1285,15 @@ void ModuleMapParser::parseHeaderDecl(SourceLocation UmbrellaLoc) {
// FIXME: We shouldn't be eagerly stat'ing every file named in a module map.
// Come up with a lazy way to do this.
if (File) {
- if (const Module *OwningModule = Map.Headers[File]) {
+ if (ModuleMap::KnownHeader OwningModule = Map.Headers[File]) {
Diags.Report(FileNameLoc, diag::err_mmap_header_conflict)
- << FileName << OwningModule->getFullModuleName();
+ << FileName << OwningModule.getModule()->getFullModuleName();
HadError = true;
} else if (Umbrella) {
const DirectoryEntry *UmbrellaDir = File->getDir();
- if ((OwningModule = Map.UmbrellaDirs[UmbrellaDir])) {
+ if (Module *UmbrellaModule = Map.UmbrellaDirs[UmbrellaDir]) {
Diags.Report(UmbrellaLoc, diag::err_mmap_umbrella_clash)
- << OwningModule->getFullModuleName();
+ << UmbrellaModule->getFullModuleName();
HadError = true;
} else {
// Record this umbrella header.
@@ -1161,11 +1301,11 @@ void ModuleMapParser::parseHeaderDecl(SourceLocation UmbrellaLoc) {
}
} else {
// Record this header.
- Map.addHeader(ActiveModule, File);
+ Map.addHeader(ActiveModule, File, Exclude);
// If there is a builtin counterpart to this file, add it now.
if (BuiltinFile)
- Map.addHeader(ActiveModule, BuiltinFile);
+ Map.addHeader(ActiveModule, BuiltinFile, Exclude);
}
} else {
Diags.Report(FileNameLoc, diag::err_mmap_header_not_found)
@@ -1274,32 +1414,52 @@ void ModuleMapParser::parseExportDecl() {
ActiveModule->UnresolvedExports.push_back(Unresolved);
}
-void ModuleMapParser::parseInferredSubmoduleDecl(bool Explicit) {
+/// \brief Parse an inferried module declaration (wildcard modules).
+///
+/// module-declaration:
+/// 'explicit'[opt] 'framework'[opt] 'module' * attributes[opt]
+/// { inferred-module-member* }
+///
+/// inferred-module-member:
+/// 'export' '*'
+/// 'exclude' identifier
+void ModuleMapParser::parseInferredModuleDecl(bool Framework, bool Explicit) {
assert(Tok.is(MMToken::Star));
SourceLocation StarLoc = consumeToken();
bool Failed = false;
-
+
// Inferred modules must be submodules.
- if (!ActiveModule) {
+ if (!ActiveModule && !Framework) {
Diags.Report(StarLoc, diag::err_mmap_top_level_inferred_submodule);
Failed = true;
}
-
- // Inferred modules must have umbrella directories.
- if (!Failed && !ActiveModule->getUmbrellaDir()) {
- Diags.Report(StarLoc, diag::err_mmap_inferred_no_umbrella);
- Failed = true;
- }
-
- // Check for redefinition of an inferred module.
- if (!Failed && ActiveModule->InferSubmodules) {
- Diags.Report(StarLoc, diag::err_mmap_inferred_redef);
- if (ActiveModule->InferredSubmoduleLoc.isValid())
- Diags.Report(ActiveModule->InferredSubmoduleLoc,
- diag::note_mmap_prev_definition);
- Failed = true;
+
+ if (ActiveModule) {
+ // Inferred modules must have umbrella directories.
+ if (!Failed && !ActiveModule->getUmbrellaDir()) {
+ Diags.Report(StarLoc, diag::err_mmap_inferred_no_umbrella);
+ Failed = true;
+ }
+
+ // Check for redefinition of an inferred module.
+ if (!Failed && ActiveModule->InferSubmodules) {
+ Diags.Report(StarLoc, diag::err_mmap_inferred_redef);
+ if (ActiveModule->InferredSubmoduleLoc.isValid())
+ Diags.Report(ActiveModule->InferredSubmoduleLoc,
+ diag::note_mmap_prev_definition);
+ Failed = true;
+ }
+
+ // Check for the 'framework' keyword, which is not permitted here.
+ if (Framework) {
+ Diags.Report(StarLoc, diag::err_mmap_inferred_framework_submodule);
+ Framework = false;
+ }
+ } else if (Explicit) {
+ Diags.Report(StarLoc, diag::err_mmap_explicit_inferred_framework);
+ Explicit = false;
}
-
+
// If there were any problems with this inferred submodule, skip its body.
if (Failed) {
if (Tok.is(MMToken::LBrace)) {
@@ -1311,12 +1471,22 @@ void ModuleMapParser::parseInferredSubmoduleDecl(bool Explicit) {
HadError = true;
return;
}
-
- // Note that we have an inferred submodule.
- ActiveModule->InferSubmodules = true;
- ActiveModule->InferredSubmoduleLoc = StarLoc;
- ActiveModule->InferExplicitSubmodules = Explicit;
-
+
+ // Parse optional attributes.
+ Attributes Attrs;
+ parseOptionalAttributes(Attrs);
+
+ if (ActiveModule) {
+ // Note that we have an inferred submodule.
+ ActiveModule->InferSubmodules = true;
+ ActiveModule->InferredSubmoduleLoc = StarLoc;
+ ActiveModule->InferExplicitSubmodules = Explicit;
+ } else {
+ // We'll be inferring framework modules for this directory.
+ Map.InferredDirectories[Directory].InferModules = true;
+ Map.InferredDirectories[Directory].InferSystemModules = Attrs.IsSystem;
+ }
+
// Parse the opening brace.
if (!Tok.is(MMToken::LBrace)) {
Diags.Report(Tok.getLocation(), diag::err_mmap_expected_lbrace_wildcard);
@@ -1333,8 +1503,35 @@ void ModuleMapParser::parseInferredSubmoduleDecl(bool Explicit) {
case MMToken::RBrace:
Done = true;
break;
-
- case MMToken::ExportKeyword: {
+
+ case MMToken::ExcludeKeyword: {
+ if (ActiveModule) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_inferred_member)
+ << (ActiveModule != 0);
+ consumeToken();
+ break;
+ }
+
+ consumeToken();
+ if (!Tok.is(MMToken::Identifier)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_missing_exclude_name);
+ break;
+ }
+
+ Map.InferredDirectories[Directory].ExcludedModules
+ .push_back(Tok.getString());
+ consumeToken();
+ break;
+ }
+
+ case MMToken::ExportKeyword:
+ if (!ActiveModule) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_inferred_member)
+ << (ActiveModule != 0);
+ consumeToken();
+ break;
+ }
+
consumeToken();
if (Tok.is(MMToken::Star))
ActiveModule->InferExportWildcard = true;
@@ -1343,14 +1540,14 @@ void ModuleMapParser::parseInferredSubmoduleDecl(bool Explicit) {
diag::err_mmap_expected_export_wildcard);
consumeToken();
break;
- }
-
+
case MMToken::ExplicitKeyword:
case MMToken::ModuleKeyword:
case MMToken::HeaderKeyword:
case MMToken::UmbrellaKeyword:
default:
- Diags.Report(Tok.getLocation(), diag::err_mmap_expected_wildcard_member);
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_inferred_member)
+ << (ActiveModule != 0);
consumeToken();
break;
}
@@ -1365,6 +1562,66 @@ void ModuleMapParser::parseInferredSubmoduleDecl(bool Explicit) {
}
}
+/// \brief Parse optional attributes.
+///
+/// attributes:
+/// attribute attributes
+/// attribute
+///
+/// attribute:
+/// [ identifier ]
+///
+/// \param Attrs Will be filled in with the parsed attributes.
+///
+/// \returns true if an error occurred, false otherwise.
+bool ModuleMapParser::parseOptionalAttributes(Attributes &Attrs) {
+ bool HadError = false;
+
+ while (Tok.is(MMToken::LSquare)) {
+ // Consume the '['.
+ SourceLocation LSquareLoc = consumeToken();
+
+ // Check whether we have an attribute name here.
+ if (!Tok.is(MMToken::Identifier)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_attribute);
+ skipUntil(MMToken::RSquare);
+ if (Tok.is(MMToken::RSquare))
+ consumeToken();
+ HadError = true;
+ }
+
+ // Decode the attribute name.
+ AttributeKind Attribute
+ = llvm::StringSwitch<AttributeKind>(Tok.getString())
+ .Case("system", AT_system)
+ .Default(AT_unknown);
+ switch (Attribute) {
+ case AT_unknown:
+ Diags.Report(Tok.getLocation(), diag::warn_mmap_unknown_attribute)
+ << Tok.getString();
+ break;
+
+ case AT_system:
+ Attrs.IsSystem = true;
+ break;
+ }
+ consumeToken();
+
+ // Consume the ']'.
+ if (!Tok.is(MMToken::RSquare)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_rsquare);
+ Diags.Report(LSquareLoc, diag::note_mmap_lsquare_match);
+ skipUntil(MMToken::RSquare);
+ HadError = true;
+ }
+
+ if (Tok.is(MMToken::RSquare))
+ consumeToken();
+ }
+
+ return HadError;
+}
+
/// \brief If there is a specific header search directory due the presence
/// of an umbrella directory, retrieve that directory. Otherwise, returns null.
const DirectoryEntry *ModuleMapParser::getOverriddenHeaderSearchDir() {
@@ -1398,6 +1655,7 @@ bool ModuleMapParser::parseModuleMapFile() {
break;
case MMToken::Comma:
+ case MMToken::ExcludeKeyword:
case MMToken::ExportKeyword:
case MMToken::HeaderKeyword:
case MMToken::Identifier:
@@ -1428,7 +1686,7 @@ bool ModuleMap::parseModuleMapFile(const FileEntry *File) {
// Parse this module map file.
Lexer L(ID, SourceMgr->getBuffer(ID), *SourceMgr, MMapLangOpts);
Diags->getClient()->BeginSourceFile(MMapLangOpts);
- ModuleMapParser Parser(L, *SourceMgr, *Diags, *this, File->getDir(),
+ ModuleMapParser Parser(L, *SourceMgr, Target, *Diags, *this, File->getDir(),
BuiltinIncludeDir);
bool Result = Parser.parseModuleMapFile();
Diags->getClient()->EndSourceFile();
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
index 74b9cbc..b7c1846 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
@@ -1296,7 +1296,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
case tok::string_literal:
Filename = getSpelling(FilenameTok, FilenameBuffer);
End = FilenameTok.getLocation();
- CharEnd = End.getLocWithOffset(Filename.size());
+ CharEnd = End.getLocWithOffset(FilenameTok.getLength());
break;
case tok::less:
@@ -1306,7 +1306,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
if (ConcatenateIncludeName(FilenameBuffer, End))
return; // Found <eod> but no ">"? Diagnostic already emitted.
Filename = FilenameBuffer.str();
- CharEnd = getLocForEndOfToken(End);
+ CharEnd = End.getLocWithOffset(1);
break;
default:
Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
@@ -1314,6 +1314,8 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
return;
}
+ CharSourceRange FilenameRange
+ = CharSourceRange::getCharRange(FilenameTok.getLocation(), CharEnd);
StringRef OriginalFilename = Filename;
bool isAngled =
GetIncludeFilenameSpelling(FilenameTok.getLocation(), Filename);
@@ -1384,9 +1386,13 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
}
}
- // Notify the callback object that we've seen an inclusion directive.
- Callbacks->InclusionDirective(HashLoc, IncludeTok, Filename, isAngled, File,
- End, SearchPath, RelativePath);
+ if (!SuggestedModule) {
+ // Notify the callback object that we've seen an inclusion directive.
+ Callbacks->InclusionDirective(HashLoc, IncludeTok, Filename, isAngled,
+ FilenameRange, File,
+ SearchPath, RelativePath,
+ /*ImportedModule=*/0);
+ }
}
if (File == 0) {
@@ -1480,10 +1486,28 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
Module *Imported
= TheModuleLoader.loadModule(IncludeTok.getLocation(), Path, Visibility,
/*IsIncludeDirective=*/true);
+ assert((Imported == 0 || Imported == SuggestedModule) &&
+ "the imported module is different than the suggested one");
// If this header isn't part of the module we're building, we're done.
- if (!BuildingImportedModule && Imported)
+ if (!BuildingImportedModule && Imported) {
+ if (Callbacks) {
+ Callbacks->InclusionDirective(HashLoc, IncludeTok, Filename, isAngled,
+ FilenameRange, File,
+ SearchPath, RelativePath, Imported);
+ }
return;
+ }
+ }
+
+ if (Callbacks && SuggestedModule) {
+ // We didn't notify the callback object that we've seen an inclusion
+ // directive before. Now that we are parsing the include normally and not
+ // turning it to a module import, notify the callback object.
+ Callbacks->InclusionDirective(HashLoc, IncludeTok, Filename, isAngled,
+ FilenameRange, File,
+ SearchPath, RelativePath,
+ /*ImportedModule=*/0);
}
// The #included file will be considered to be a system header if either it is
@@ -1849,7 +1873,7 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok) {
MI->setDefinitionEndLoc(LastTok.getLocation());
// Finally, if this identifier already had a macro defined for it, verify that
- // the macro bodies are identical and free the old definition.
+ // the macro bodies are identical, and issue diagnostics if they are not.
if (MacroInfo *OtherMI = getMacroInfo(MacroNameTok.getIdentifierInfo())) {
// It is very common for system headers to have tons of macro redefinitions
// and for warnings to be disabled in system headers. If this is the case,
@@ -1870,7 +1894,6 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok) {
}
if (OtherMI->isWarnIfUnused())
WarnUnusedMacroLocs.erase(OtherMI->getDefinitionLoc());
- ReleaseMacroInfo(OtherMI);
}
setMacroInfo(MacroNameTok.getIdentifierInfo(), MI);
@@ -1921,9 +1944,20 @@ void Preprocessor::HandleUndefDirective(Token &UndefTok) {
if (MI->isWarnIfUnused())
WarnUnusedMacroLocs.erase(MI->getDefinitionLoc());
- // Free macro definition.
- ReleaseMacroInfo(MI);
- setMacroInfo(MacroNameTok.getIdentifierInfo(), 0);
+ UndefineMacro(MacroNameTok.getIdentifierInfo(), MI,
+ MacroNameTok.getLocation());
+}
+
+void Preprocessor::UndefineMacro(IdentifierInfo *II, MacroInfo *MI,
+ SourceLocation UndefLoc) {
+ MI->setUndefLoc(UndefLoc);
+ if (MI->isFromAST()) {
+ MI->setChangedAfterLoad();
+ if (Listener)
+ Listener->UndefinedMacro(MI);
+ }
+
+ clearMacroInfo(II);
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp b/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp
index 7cac63e..d5a88db 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp
@@ -178,7 +178,9 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
// preprocessor keywords and it wasn't macro expanded, it turns
// into a simple 0, unless it is the C++ keyword "true", in which case it
// turns into "1".
- if (ValueLive)
+ if (ValueLive &&
+ II->getTokenID() != tok::kw_true &&
+ II->getTokenID() != tok::kw_false)
PP.Diag(PeekTok, diag::warn_pp_undef_identifier) << II;
Result.Val = II->getTokenID() == tok::kw_true;
Result.Val.setIsUnsigned(false); // "0" is signed intmax_t 0.
@@ -204,8 +206,7 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
if (NumberInvalid)
return true; // a diagnostic was already reported
- NumericLiteralParser Literal(Spelling.begin(), Spelling.end(),
- PeekTok.getLocation(), PP);
+ NumericLiteralParser Literal(Spelling, PeekTok.getLocation(), PP);
if (Literal.hadError)
return true; // a diagnostic was already reported.
@@ -219,10 +220,15 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
if (Literal.hasUDSuffix())
PP.Diag(PeekTok, diag::err_pp_invalid_udl) << /*integer*/1;
- // long long is a C99 feature.
- if (!PP.getLangOpts().C99 && Literal.isLongLong)
- PP.Diag(PeekTok, PP.getLangOpts().CPlusPlus0x ?
- diag::warn_cxx98_compat_longlong : diag::ext_longlong);
+ // 'long long' is a C99 or C++11 feature.
+ if (!PP.getLangOpts().C99 && Literal.isLongLong) {
+ if (PP.getLangOpts().CPlusPlus)
+ PP.Diag(PeekTok,
+ PP.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong);
+ else
+ PP.Diag(PeekTok, diag::ext_c99_longlong);
+ }
// Parse the integer literal into Result.
if (Literal.GetIntegerValue(Result.Val)) {
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
index e824320..d827f58 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
@@ -157,15 +157,15 @@ void Preprocessor::EnterSourceFileWithPTH(PTHLexer *PL,
/// EnterMacro - Add a Macro to the top of the include stack and start lexing
/// tokens from it instead of the current buffer.
void Preprocessor::EnterMacro(Token &Tok, SourceLocation ILEnd,
- MacroArgs *Args) {
+ MacroInfo *Macro, MacroArgs *Args) {
PushIncludeMacroStack();
CurDirLookup = 0;
if (NumCachedTokenLexers == 0) {
- CurTokenLexer.reset(new TokenLexer(Tok, ILEnd, Args, *this));
+ CurTokenLexer.reset(new TokenLexer(Tok, ILEnd, Macro, Args, *this));
} else {
CurTokenLexer.reset(TokenLexerCache[--NumCachedTokenLexers]);
- CurTokenLexer->Init(Tok, ILEnd, Args);
+ CurTokenLexer->Init(Tok, ILEnd, Macro, Args);
}
if (CurLexerKind != CLK_LexAfterModuleImport)
CurLexerKind = CLK_TokenLexer;
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
index ebdb644..eee4342 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
@@ -27,39 +27,138 @@
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
#include <cstdio>
#include <ctime>
using namespace clang;
-MacroInfo *Preprocessor::getInfoForMacro(IdentifierInfo *II) const {
- assert(II->hasMacroDefinition() && "Identifier is not a macro!");
-
- llvm::DenseMap<IdentifierInfo*, MacroInfo*>::const_iterator Pos
- = Macros.find(II);
- if (Pos == Macros.end()) {
- // Load this macro from the external source.
- getExternalSource()->LoadMacroDefinition(II);
- Pos = Macros.find(II);
- }
+MacroInfo *Preprocessor::getMacroInfoHistory(IdentifierInfo *II) const {
+ assert(II->hadMacroDefinition() && "Identifier has not been not a macro!");
+
+ macro_iterator Pos = Macros.find(II);
assert(Pos != Macros.end() && "Identifier macro info is missing!");
return Pos->second;
}
/// setMacroInfo - Specify a macro for this identifier.
///
-void Preprocessor::setMacroInfo(IdentifierInfo *II, MacroInfo *MI,
- bool LoadedFromAST) {
- if (MI) {
- Macros[II] = MI;
- II->setHasMacroDefinition(true);
- if (II->isFromAST() && !LoadedFromAST)
- II->setChangedSinceDeserialization();
- } else if (II->hasMacroDefinition()) {
- Macros.erase(II);
- II->setHasMacroDefinition(false);
- if (II->isFromAST() && !LoadedFromAST)
- II->setChangedSinceDeserialization();
+void Preprocessor::setMacroInfo(IdentifierInfo *II, MacroInfo *MI) {
+ assert(MI && "MacroInfo should be non-zero!");
+ assert(MI->getUndefLoc().isInvalid() &&
+ "Undefined macros cannot be registered");
+
+ MacroInfo *&StoredMI = Macros[II];
+ MI->setPreviousDefinition(StoredMI);
+ StoredMI = MI;
+ II->setHasMacroDefinition(MI->getUndefLoc().isInvalid());
+ if (II->isFromAST())
+ II->setChangedSinceDeserialization();
+}
+
+void Preprocessor::addLoadedMacroInfo(IdentifierInfo *II, MacroInfo *MI,
+ MacroInfo *Hint) {
+ assert(MI && "Missing macro?");
+ assert(MI->isFromAST() && "Macro is not from an AST?");
+ assert(!MI->getPreviousDefinition() && "Macro already in chain?");
+
+ MacroInfo *&StoredMI = Macros[II];
+
+ // Easy case: this is the first macro definition for this macro.
+ if (!StoredMI) {
+ StoredMI = MI;
+
+ if (MI->isDefined())
+ II->setHasMacroDefinition(true);
+ return;
+ }
+
+ // If this macro is a definition and this identifier has been neither
+ // defined nor undef'd in the current translation unit, add this macro
+ // to the end of the chain of definitions.
+ if (MI->isDefined() && StoredMI->isFromAST()) {
+ // Simple case: if this is the first actual definition, just put it at
+ // th beginning.
+ if (!StoredMI->isDefined()) {
+ MI->setPreviousDefinition(StoredMI);
+ StoredMI = MI;
+
+ II->setHasMacroDefinition(true);
+ return;
+ }
+
+ // Find the end of the definition chain.
+ MacroInfo *Prev;
+ MacroInfo *PrevPrev = StoredMI;
+ bool Ambiguous = StoredMI->isAmbiguous();
+ bool MatchedOther = false;
+ do {
+ Prev = PrevPrev;
+
+ // If the macros are not identical, we have an ambiguity.
+ if (!Prev->isIdenticalTo(*MI, *this)) {
+ if (!Ambiguous) {
+ Ambiguous = true;
+ StoredMI->setAmbiguous(true);
+ }
+ } else {
+ MatchedOther = true;
+ }
+ } while ((PrevPrev = Prev->getPreviousDefinition()) &&
+ PrevPrev->isDefined());
+
+ // If there are ambiguous definitions, and we didn't match any other
+ // definition, then mark us as ambiguous.
+ if (Ambiguous && !MatchedOther)
+ MI->setAmbiguous(true);
+
+ // Wire this macro information into the chain.
+ MI->setPreviousDefinition(Prev->getPreviousDefinition());
+ Prev->setPreviousDefinition(MI);
+ return;
+ }
+
+ // The macro is not a definition; put it at the end of the list.
+ MacroInfo *Prev = Hint? Hint : StoredMI;
+ while (Prev->getPreviousDefinition())
+ Prev = Prev->getPreviousDefinition();
+ Prev->setPreviousDefinition(MI);
+}
+
+void Preprocessor::makeLoadedMacroInfoVisible(IdentifierInfo *II,
+ MacroInfo *MI) {
+ assert(MI->isFromAST() && "Macro must be from the AST");
+
+ MacroInfo *&StoredMI = Macros[II];
+ if (StoredMI == MI) {
+ // Easy case: this is the first macro anyway.
+ II->setHasMacroDefinition(MI->isDefined());
+ return;
}
+
+ // Go find the macro and pull it out of the list.
+ // FIXME: Yes, this is O(N), and making a pile of macros visible or hidden
+ // would be quadratic, but it's extremely rare.
+ MacroInfo *Prev = StoredMI;
+ while (Prev->getPreviousDefinition() != MI)
+ Prev = Prev->getPreviousDefinition();
+ Prev->setPreviousDefinition(MI->getPreviousDefinition());
+ MI->setPreviousDefinition(0);
+
+ // Add the macro back to the list.
+ addLoadedMacroInfo(II, MI);
+
+ II->setHasMacroDefinition(StoredMI->isDefined());
+ if (II->isFromAST())
+ II->setChangedSinceDeserialization();
+}
+
+/// \brief Undefine a macro for this identifier.
+void Preprocessor::clearMacroInfo(IdentifierInfo *II) {
+ assert(II->hasMacroDefinition() && "Macro is not defined!");
+ assert(Macros[II]->getUndefLoc().isValid() && "Macro is still defined!");
+ II->setHasMacroDefinition(false);
+ if (II->isFromAST())
+ II->setChangedSinceDeserialization();
}
/// RegisterBuiltinMacro - Register the specified identifier in the identifier
@@ -100,6 +199,20 @@ void Preprocessor::RegisterBuiltinMacros() {
Ident__has_include_next = RegisterBuiltinMacro(*this, "__has_include_next");
Ident__has_warning = RegisterBuiltinMacro(*this, "__has_warning");
+ // Modules.
+ if (LangOpts.Modules) {
+ Ident__building_module = RegisterBuiltinMacro(*this, "__building_module");
+
+ // __MODULE__
+ if (!LangOpts.CurrentModule.empty())
+ Ident__MODULE__ = RegisterBuiltinMacro(*this, "__MODULE__");
+ else
+ Ident__MODULE__ = 0;
+ } else {
+ Ident__building_module = 0;
+ Ident__MODULE__ = 0;
+ }
+
// Microsoft Extensions.
if (LangOpts.MicrosoftExt)
Ident__pragma = RegisterBuiltinMacro(*this, "__pragma");
@@ -263,7 +376,23 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
}
}
}
-
+
+ // If the macro definition is ambiguous, complain.
+ if (MI->isAmbiguous()) {
+ Diag(Identifier, diag::warn_pp_ambiguous_macro)
+ << Identifier.getIdentifierInfo();
+ Diag(MI->getDefinitionLoc(), diag::note_pp_ambiguous_macro_chosen)
+ << Identifier.getIdentifierInfo();
+ for (MacroInfo *PrevMI = MI->getPreviousDefinition();
+ PrevMI && PrevMI->isDefined();
+ PrevMI = PrevMI->getPreviousDefinition()) {
+ if (PrevMI->isAmbiguous()) {
+ Diag(PrevMI->getDefinitionLoc(), diag::note_pp_ambiguous_macro_other)
+ << Identifier.getIdentifierInfo();
+ }
+ }
+ }
+
// If we started lexing a macro, enter the macro expansion body.
// If this macro expands to no tokens, don't bother to push it onto the
@@ -337,7 +466,7 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
}
// Start expanding the macro.
- EnterMacro(Identifier, ExpansionEnd, Args);
+ EnterMacro(Identifier, ExpansionEnd, MI, Args);
// Now that the macro is at the top of the include stack, ask the
// preprocessor to read the next token from it.
@@ -581,27 +710,27 @@ static void ComputeDATE_TIME(SourceLocation &DATELoc, SourceLocation &TIMELoc,
"Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"
};
- char TmpBuffer[32];
-#ifdef LLVM_ON_WIN32
- sprintf(TmpBuffer, "\"%s %2d %4d\"", Months[TM->tm_mon], TM->tm_mday,
- TM->tm_year+1900);
-#else
- snprintf(TmpBuffer, sizeof(TmpBuffer), "\"%s %2d %4d\"", Months[TM->tm_mon], TM->tm_mday,
- TM->tm_year+1900);
-#endif
-
- Token TmpTok;
- TmpTok.startToken();
- PP.CreateString(TmpBuffer, strlen(TmpBuffer), TmpTok);
- DATELoc = TmpTok.getLocation();
-
-#ifdef LLVM_ON_WIN32
- sprintf(TmpBuffer, "\"%02d:%02d:%02d\"", TM->tm_hour, TM->tm_min, TM->tm_sec);
-#else
- snprintf(TmpBuffer, sizeof(TmpBuffer), "\"%02d:%02d:%02d\"", TM->tm_hour, TM->tm_min, TM->tm_sec);
-#endif
- PP.CreateString(TmpBuffer, strlen(TmpBuffer), TmpTok);
- TIMELoc = TmpTok.getLocation();
+ {
+ SmallString<32> TmpBuffer;
+ llvm::raw_svector_ostream TmpStream(TmpBuffer);
+ TmpStream << llvm::format("\"%s %2d %4d\"", Months[TM->tm_mon],
+ TM->tm_mday, TM->tm_year + 1900);
+ Token TmpTok;
+ TmpTok.startToken();
+ PP.CreateString(TmpStream.str(), TmpTok);
+ DATELoc = TmpTok.getLocation();
+ }
+
+ {
+ SmallString<32> TmpBuffer;
+ llvm::raw_svector_ostream TmpStream(TmpBuffer);
+ TmpStream << llvm::format("\"%02d:%02d:%02d\"",
+ TM->tm_hour, TM->tm_min, TM->tm_sec);
+ Token TmpTok;
+ TmpTok.startToken();
+ PP.CreateString(TmpStream.str(), TmpTok);
+ TIMELoc = TmpTok.getLocation();
+ }
}
@@ -616,7 +745,7 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
Feature = Feature.substr(2, Feature.size() - 4);
return llvm::StringSwitch<bool>(Feature)
- .Case("address_sanitizer", LangOpts.AddressSanitizer)
+ .Case("address_sanitizer", LangOpts.SanitizeAddress)
.Case("attribute_analyzer_noreturn", true)
.Case("attribute_availability", true)
.Case("attribute_availability_with_message", true)
@@ -641,8 +770,7 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
// Objective-C features
.Case("objc_arr", LangOpts.ObjCAutoRefCount) // FIXME: REMOVE?
.Case("objc_arc", LangOpts.ObjCAutoRefCount)
- .Case("objc_arc_weak", LangOpts.ObjCAutoRefCount &&
- LangOpts.ObjCRuntimeHasWeak)
+ .Case("objc_arc_weak", LangOpts.ObjCARCWeak)
.Case("objc_default_synthesize_properties", LangOpts.ObjC2)
.Case("objc_fixed_enum", LangOpts.ObjC2)
.Case("objc_instancetype", LangOpts.ObjC2)
@@ -716,22 +844,12 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
// "struct __is_empty" parsing hack hasn't been needed in this
// translation unit. If it has, __is_empty reverts to a normal
// identifier and __has_feature(is_empty) evaluates false.
- .Case("is_empty",
- LangOpts.CPlusPlus &&
- PP.getIdentifierInfo("__is_empty")->getTokenID()
- != tok::identifier)
+ .Case("is_empty", LangOpts.CPlusPlus)
.Case("is_enum", LangOpts.CPlusPlus)
.Case("is_final", LangOpts.CPlusPlus)
.Case("is_literal", LangOpts.CPlusPlus)
.Case("is_standard_layout", LangOpts.CPlusPlus)
- // __is_pod is available only if the horrible
- // "struct __is_pod" parsing hack hasn't been needed in this
- // translation unit. If it has, __is_pod reverts to a normal
- // identifier and __has_feature(is_pod) evaluates false.
- .Case("is_pod",
- LangOpts.CPlusPlus &&
- PP.getIdentifierInfo("__is_pod")->getTokenID()
- != tok::identifier)
+ .Case("is_pod", LangOpts.CPlusPlus)
.Case("is_polymorphic", LangOpts.CPlusPlus)
.Case("is_trivial", LangOpts.CPlusPlus)
.Case("is_trivially_assignable", LangOpts.CPlusPlus)
@@ -807,22 +925,30 @@ static bool HasAttribute(const IdentifierInfo *II) {
static bool EvaluateHasIncludeCommon(Token &Tok,
IdentifierInfo *II, Preprocessor &PP,
const DirectoryLookup *LookupFrom) {
- SourceLocation LParenLoc;
+ // Save the location of the current token. If a '(' is later found, use
+ // that location. If no, use the end of this location instead.
+ SourceLocation LParenLoc = Tok.getLocation();
// Get '('.
PP.LexNonComment(Tok);
// Ensure we have a '('.
if (Tok.isNot(tok::l_paren)) {
- PP.Diag(Tok.getLocation(), diag::err_pp_missing_lparen) << II->getName();
- return false;
- }
-
- // Save '(' location for possible missing ')' message.
- LParenLoc = Tok.getLocation();
+ // No '(', use end of last token.
+ LParenLoc = PP.getLocForEndOfToken(LParenLoc);
+ PP.Diag(LParenLoc, diag::err_pp_missing_lparen) << II->getName();
+ // If the next token looks like a filename or the start of one,
+ // assume it is and process it as such.
+ if (!Tok.is(tok::angle_string_literal) && !Tok.is(tok::string_literal) &&
+ !Tok.is(tok::less))
+ return false;
+ } else {
+ // Save '(' location for possible missing ')' message.
+ LParenLoc = Tok.getLocation();
- // Get the file name.
- PP.getCurrentLexer()->LexIncludeFilename(Tok);
+ // Get the file name.
+ PP.getCurrentLexer()->LexIncludeFilename(Tok);
+ }
// Reserve a buffer to get the spelling.
SmallString<128> FilenameBuffer;
@@ -847,8 +973,11 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
// This could be a <foo/bar.h> file coming from a macro expansion. In this
// case, glue the tokens together into FilenameBuffer and interpret those.
FilenameBuffer.push_back('<');
- if (PP.ConcatenateIncludeName(FilenameBuffer, EndLoc))
+ if (PP.ConcatenateIncludeName(FilenameBuffer, EndLoc)) {
+ // Let the caller know a <eod> was found by changing the Token kind.
+ Tok.setKind(tok::eod);
return false; // Found <eod> but no ">"? Diagnostic already emitted.
+ }
Filename = FilenameBuffer.str();
break;
default:
@@ -856,12 +985,15 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
return false;
}
+ SourceLocation FilenameLoc = Tok.getLocation();
+
// Get ')'.
PP.LexNonComment(Tok);
// Ensure we have a trailing ).
if (Tok.isNot(tok::r_paren)) {
- PP.Diag(Tok.getLocation(), diag::err_pp_missing_rparen) << II->getName();
+ PP.Diag(PP.getLocForEndOfToken(FilenameLoc), diag::err_pp_missing_rparen)
+ << II->getName();
PP.Diag(LParenLoc, diag::note_matching) << "(";
return false;
}
@@ -909,6 +1041,47 @@ static bool EvaluateHasIncludeNext(Token &Tok,
return EvaluateHasIncludeCommon(Tok, II, PP, Lookup);
}
+/// \brief Process __building_module(identifier) expression.
+/// \returns true if we are building the named module, false otherwise.
+static bool EvaluateBuildingModule(Token &Tok,
+ IdentifierInfo *II, Preprocessor &PP) {
+ // Get '('.
+ PP.LexNonComment(Tok);
+
+ // Ensure we have a '('.
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_pp_missing_lparen) << II->getName();
+ return false;
+ }
+
+ // Save '(' location for possible missing ')' message.
+ SourceLocation LParenLoc = Tok.getLocation();
+
+ // Get the module name.
+ PP.LexNonComment(Tok);
+
+ // Ensure that we have an identifier.
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::err_expected_id_building_module);
+ return false;
+ }
+
+ bool Result
+ = Tok.getIdentifierInfo()->getName() == PP.getLangOpts().CurrentModule;
+
+ // Get ')'.
+ PP.LexNonComment(Tok);
+
+ // Ensure we have a trailing ).
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_pp_missing_rparen) << II->getName();
+ PP.Diag(LParenLoc, diag::note_matching) << "(";
+ return false;
+ }
+
+ return Result;
+}
+
/// ExpandBuiltinMacro - If an identifier token is read that is to be expanded
/// as a builtin macro, handle it and return the next token as 'Tok'.
void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
@@ -1093,7 +1266,8 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
else
Value = EvaluateHasIncludeNext(Tok, II, *this);
OS << (int)Value;
- Tok.setKind(tok::numeric_constant);
+ if (Tok.is(tok::r_paren))
+ Tok.setKind(tok::numeric_constant);
} else if (II == Ident__has_warning) {
// The argument should be a parenthesized string literal.
// The argument to these builtins should be a parenthesized identifier.
@@ -1164,11 +1338,22 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
OS << (int)Value;
Tok.setKind(tok::numeric_constant);
+ } else if (II == Ident__building_module) {
+ // The argument to this builtin should be an identifier. The
+ // builtin evaluates to 1 when that identifier names the module we are
+ // currently building.
+ OS << (int)EvaluateBuildingModule(Tok, II, *this);
+ Tok.setKind(tok::numeric_constant);
+ } else if (II == Ident__MODULE__) {
+ // The current module as an identifier.
+ OS << getLangOpts().CurrentModule;
+ IdentifierInfo *ModuleII = getIdentifierInfo(getLangOpts().CurrentModule);
+ Tok.setIdentifierInfo(ModuleII);
+ Tok.setKind(ModuleII->getTokenID());
} else {
llvm_unreachable("Unknown identifier!");
}
- CreateString(OS.str().data(), OS.str().size(), Tok,
- Tok.getLocation(), Tok.getLocation());
+ CreateString(OS.str(), Tok, Tok.getLocation(), Tok.getLocation());
}
void Preprocessor::markMacroAsUsed(MacroInfo *MI) {
diff --git a/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
index 67738e9..b167172 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
@@ -198,12 +198,11 @@ bool PTHLexer::SkipBlock() {
assert(LastHashTokPtr && "No known '#' token.");
const unsigned char* HashEntryI = 0;
- uint32_t Offset;
uint32_t TableIdx;
do {
// Read the token offset from the side-table.
- Offset = ReadLE32(CurPPCondPtr);
+ uint32_t Offset = ReadLE32(CurPPCondPtr);
// Read the target table index from the side-table.
TableIdx = ReadLE32(CurPPCondPtr);
@@ -223,13 +222,11 @@ bool PTHLexer::SkipBlock() {
PPCond + TableIdx*(sizeof(uint32_t)*2);
assert(NextPPCondPtr >= CurPPCondPtr);
// Read where we should jump to.
- uint32_t TmpOffset = ReadLE32(NextPPCondPtr);
- const unsigned char* HashEntryJ = TokBuf + TmpOffset;
+ const unsigned char* HashEntryJ = TokBuf + ReadLE32(NextPPCondPtr);
if (HashEntryJ <= LastHashTokPtr) {
// Jump directly to the next entry in the side table.
HashEntryI = HashEntryJ;
- Offset = TmpOffset;
TableIdx = ReadLE32(NextPPCondPtr);
CurPPCondPtr = NextPPCondPtr;
}
@@ -448,8 +445,8 @@ PTHManager *PTHManager::Create(const std::string &file,
// Get the buffer ranges and check if there are at least three 32-bit
// words at the end of the file.
- const unsigned char *BufBeg = (unsigned char*)File->getBufferStart();
- const unsigned char *BufEnd = (unsigned char*)File->getBufferEnd();
+ const unsigned char *BufBeg = (const unsigned char*)File->getBufferStart();
+ const unsigned char *BufEnd = (const unsigned char*)File->getBufferEnd();
// Check the prologue of the file.
if ((BufEnd - BufBeg) < (signed)(sizeof("cfe-pth") + 4 + 4) ||
diff --git a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
index c9cc4ad..e7e6c37 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
@@ -43,7 +43,6 @@ void EmptyPragmaHandler::HandlePragma(Preprocessor &PP,
// PragmaNamespace Implementation.
//===----------------------------------------------------------------------===//
-
PragmaNamespace::~PragmaNamespace() {
for (llvm::StringMap<PragmaHandler*>::iterator
I = Handlers.begin(), E = Handlers.end(); I != E; ++I)
@@ -251,7 +250,7 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
// where we can lex it.
Token TmpTok;
TmpTok.startToken();
- CreateString(&StrVal[0], StrVal.size(), TmpTok);
+ CreateString(StrVal, TmpTok);
SourceLocation TokLoc = TmpTok.getLocation();
// Make and enter a lexer object so that we lex and expand the tokens just
@@ -683,7 +682,7 @@ IdentifierInfo *Preprocessor::ParsePragmaPushOrPopMacro(Token &Tok) {
Token MacroTok;
MacroTok.startToken();
MacroTok.setKind(tok::raw_identifier);
- CreateString(&StrVal[1], StrVal.size() - 2, MacroTok);
+ CreateString(StringRef(&StrVal[1], StrVal.size() - 2), MacroTok);
// Get the IdentifierInfo of MacroToPushTok.
return LookUpIdentifierInfo(MacroTok);
@@ -733,19 +732,22 @@ void Preprocessor::HandlePragmaPopMacro(Token &PopMacroTok) {
llvm::DenseMap<IdentifierInfo*, std::vector<MacroInfo*> >::iterator iter =
PragmaPushMacroInfo.find(IdentInfo);
if (iter != PragmaPushMacroInfo.end()) {
- // Release the MacroInfo currently associated with IdentInfo.
- MacroInfo *CurrentMI = getMacroInfo(IdentInfo);
- if (CurrentMI) {
+ // Forget the MacroInfo currently associated with IdentInfo.
+ if (MacroInfo *CurrentMI = getMacroInfo(IdentInfo)) {
if (CurrentMI->isWarnIfUnused())
WarnUnusedMacroLocs.erase(CurrentMI->getDefinitionLoc());
- ReleaseMacroInfo(CurrentMI);
+ UndefineMacro(IdentInfo, CurrentMI, MessageLoc);
}
// Get the MacroInfo we want to reinstall.
MacroInfo *MacroToReInstall = iter->second.back();
- // Reinstall the previously pushed macro.
- setMacroInfo(IdentInfo, MacroToReInstall);
+ if (MacroToReInstall) {
+ // Reinstall the previously pushed macro.
+ setMacroInfo(IdentInfo, MacroToReInstall);
+ } else if (IdentInfo->hasMacroDefinition()) {
+ clearMacroInfo(IdentInfo);
+ }
// Pop PragmaPushMacroInfo stack.
iter->second.pop_back();
@@ -1009,7 +1011,7 @@ struct PragmaDebugHandler : public PragmaHandler {
if (II->isStr("assert")) {
llvm_unreachable("This is an assertion!");
} else if (II->isStr("crash")) {
- *(volatile int*) 0x11 = 0;
+ LLVM_BUILTIN_TRAP;
} else if (II->isStr("parser_crash")) {
Token Crasher;
Crasher.setKind(tok::annot_pragma_parser_crash);
diff --git a/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp b/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
index dfdeba3..01f3665e 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
@@ -25,10 +25,11 @@ ExternalPreprocessingRecordSource::~ExternalPreprocessingRecordSource() { }
InclusionDirective::InclusionDirective(PreprocessingRecord &PPRec,
InclusionKind Kind,
StringRef FileName,
- bool InQuotes, const FileEntry *File,
+ bool InQuotes, bool ImportedModule,
+ const FileEntry *File,
SourceRange Range)
: PreprocessingDirective(InclusionDirectiveKind, Range),
- InQuotes(InQuotes), Kind(Kind), File(File)
+ InQuotes(InQuotes), Kind(Kind), ImportedModule(ImportedModule), File(File)
{
char *Memory
= (char*)PPRec.Allocate(FileName.size() + 1, llvm::alignOf<char>());
@@ -59,8 +60,7 @@ PreprocessingRecord::getPreprocessedEntitiesInRange(SourceRange Range) {
iterator(this, CachedRangeQuery.Result.second));
}
- std::pair<PPEntityID, PPEntityID>
- Res = getPreprocessedEntitiesInRangeSlow(Range);
+ std::pair<int, int> Res = getPreprocessedEntitiesInRangeSlow(Range);
CachedRangeQuery.Range = Range;
CachedRangeQuery.Result = Res;
@@ -95,12 +95,12 @@ bool PreprocessingRecord::isEntityInFileID(iterator PPEI, FileID FID) {
if (FID.isInvalid())
return false;
- PPEntityID PPID = PPEI.Position;
- if (PPID < 0) {
- assert(unsigned(-PPID-1) < LoadedPreprocessedEntities.size() &&
+ int Pos = PPEI.Position;
+ if (Pos < 0) {
+ assert(unsigned(-Pos-1) < LoadedPreprocessedEntities.size() &&
"Out-of bounds loaded preprocessed entity");
assert(ExternalSource && "No external source to load from");
- unsigned LoadedIndex = LoadedPreprocessedEntities.size()+PPID;
+ unsigned LoadedIndex = LoadedPreprocessedEntities.size()+Pos;
if (PreprocessedEntity *PPE = LoadedPreprocessedEntities[LoadedIndex])
return isPreprocessedEntityIfInFileID(PPE, FID, SourceMgr);
@@ -118,15 +118,15 @@ bool PreprocessingRecord::isEntityInFileID(iterator PPEI, FileID FID) {
FID, SourceMgr);
}
- assert(unsigned(PPID) < PreprocessedEntities.size() &&
+ assert(unsigned(Pos) < PreprocessedEntities.size() &&
"Out-of bounds local preprocessed entity");
- return isPreprocessedEntityIfInFileID(PreprocessedEntities[PPID],
+ return isPreprocessedEntityIfInFileID(PreprocessedEntities[Pos],
FID, SourceMgr);
}
/// \brief Returns a pair of [Begin, End) iterators of preprocessed entities
/// that source range \arg R encompasses.
-std::pair<PreprocessingRecord::PPEntityID, PreprocessingRecord::PPEntityID>
+std::pair<int, int>
PreprocessingRecord::getPreprocessedEntitiesInRangeSlow(SourceRange Range) {
assert(Range.isValid());
assert(!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(),Range.getBegin()));
@@ -319,14 +319,19 @@ void PreprocessingRecord::RegisterMacroDefinition(MacroInfo *Macro,
/// \brief Retrieve the preprocessed entity at the given ID.
PreprocessedEntity *PreprocessingRecord::getPreprocessedEntity(PPEntityID PPID){
- if (PPID < 0) {
- assert(unsigned(-PPID-1) < LoadedPreprocessedEntities.size() &&
+ if (PPID.ID < 0) {
+ unsigned Index = -PPID.ID - 1;
+ assert(Index < LoadedPreprocessedEntities.size() &&
"Out-of bounds loaded preprocessed entity");
- return getLoadedPreprocessedEntity(LoadedPreprocessedEntities.size()+PPID);
+ return getLoadedPreprocessedEntity(Index);
}
- assert(unsigned(PPID) < PreprocessedEntities.size() &&
+
+ if (PPID.ID == 0)
+ return 0;
+ unsigned Index = PPID.ID - 1;
+ assert(Index < PreprocessedEntities.size() &&
"Out-of bounds local preprocessed entity");
- return PreprocessedEntities[PPID];
+ return PreprocessedEntities[Index];
}
/// \brief Retrieve the loaded preprocessed entity at the given index.
@@ -389,10 +394,11 @@ void PreprocessingRecord::InclusionDirective(
const clang::Token &IncludeTok,
StringRef FileName,
bool IsAngled,
+ CharSourceRange FilenameRange,
const FileEntry *File,
- clang::SourceLocation EndLoc,
StringRef SearchPath,
- StringRef RelativePath) {
+ StringRef RelativePath,
+ const Module *Imported) {
InclusionDirective::InclusionKind Kind = InclusionDirective::Include;
switch (IncludeTok.getIdentifierInfo()->getPPKeywordID()) {
@@ -415,9 +421,19 @@ void PreprocessingRecord::InclusionDirective(
default:
llvm_unreachable("Unknown include directive kind");
}
-
+
+ SourceLocation EndLoc;
+ if (!IsAngled) {
+ EndLoc = FilenameRange.getBegin();
+ } else {
+ EndLoc = FilenameRange.getEnd();
+ if (FilenameRange.isCharRange())
+ EndLoc = EndLoc.getLocWithOffset(-1); // the InclusionDirective expects
+ // a token range.
+ }
clang::InclusionDirective *ID
- = new (*this) clang::InclusionDirective(*this, Kind, FileName, !IsAngled,
+ = new (*this) clang::InclusionDirective(*this, Kind, FileName, !IsAngled,
+ (bool)Imported,
File, SourceRange(HashLoc, EndLoc));
addPreprocessedEntity(ID);
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
index 614530c..3b070ce 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
@@ -26,6 +26,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "MacroArgs.h"
#include "clang/Lex/ExternalPreprocessorSource.h"
#include "clang/Lex/HeaderSearch.h"
@@ -49,21 +50,25 @@ using namespace clang;
//===----------------------------------------------------------------------===//
ExternalPreprocessorSource::~ExternalPreprocessorSource() { }
-Preprocessor::Preprocessor(DiagnosticsEngine &diags, LangOptions &opts,
+PPMutationListener::~PPMutationListener() { }
+
+Preprocessor::Preprocessor(llvm::IntrusiveRefCntPtr<PreprocessorOptions> PPOpts,
+ DiagnosticsEngine &diags, LangOptions &opts,
const TargetInfo *target, SourceManager &SM,
HeaderSearch &Headers, ModuleLoader &TheModuleLoader,
IdentifierInfoLookup* IILookup,
bool OwnsHeaders,
bool DelayInitialization,
bool IncrProcessing)
- : Diags(&diags), LangOpts(opts), Target(target),FileMgr(Headers.getFileMgr()),
+ : PPOpts(PPOpts), Diags(&diags), LangOpts(opts), Target(target),
+ FileMgr(Headers.getFileMgr()),
SourceMgr(SM), HeaderInfo(Headers), TheModuleLoader(TheModuleLoader),
ExternalSource(0), Identifiers(opts, IILookup),
IncrementalProcessing(IncrProcessing), CodeComplete(0),
CodeCompletionFile(0), CodeCompletionOffset(0), CodeCompletionReached(0),
SkipMainFilePreamble(0, true), CurPPLexer(0),
- CurDirLookup(0), CurLexerKind(CLK_Lexer), Callbacks(0), MacroArgCache(0),
- Record(0), MIChainHead(0), MICache(0)
+ CurDirLookup(0), CurLexerKind(CLK_Lexer), Callbacks(0), Listener(0),
+ MacroArgCache(0), Record(0), MIChainHead(0), MICache(0)
{
OwnsHeaderSearch = OwnsHeaders;
@@ -285,6 +290,39 @@ Preprocessor::macro_end(bool IncludeExternalMacros) const {
return Macros.end();
}
+/// \brief Compares macro tokens with a specified token value sequence.
+static bool MacroDefinitionEquals(const MacroInfo *MI,
+ llvm::ArrayRef<TokenValue> Tokens) {
+ return Tokens.size() == MI->getNumTokens() &&
+ std::equal(Tokens.begin(), Tokens.end(), MI->tokens_begin());
+}
+
+StringRef Preprocessor::getLastMacroWithSpelling(
+ SourceLocation Loc,
+ ArrayRef<TokenValue> Tokens) const {
+ SourceLocation BestLocation;
+ StringRef BestSpelling;
+ for (Preprocessor::macro_iterator I = macro_begin(), E = macro_end();
+ I != E; ++I) {
+ if (!I->second->isObjectLike())
+ continue;
+ const MacroInfo *MI = I->second->findDefinitionAtLoc(Loc, SourceMgr);
+ if (!MI)
+ continue;
+ if (!MacroDefinitionEquals(MI, Tokens))
+ continue;
+ SourceLocation Location = I->second->getDefinitionLoc();
+ // Choose the macro defined latest.
+ if (BestLocation.isInvalid() ||
+ (Location.isValid() &&
+ SourceMgr.isBeforeInTranslationUnit(BestLocation, Location))) {
+ BestLocation = Location;
+ BestSpelling = I->first->getName();
+ }
+ }
+ return BestSpelling;
+}
+
void Preprocessor::recomputeCurLexerKind() {
if (CurLexer)
CurLexerKind = CLK_Lexer;
@@ -378,17 +416,17 @@ StringRef Preprocessor::getSpelling(const Token &Tok,
/// CreateString - Plop the specified string into a scratch buffer and return a
/// location for it. If specified, the source location provides a source
/// location for the token.
-void Preprocessor::CreateString(const char *Buf, unsigned Len, Token &Tok,
+void Preprocessor::CreateString(StringRef Str, Token &Tok,
SourceLocation ExpansionLocStart,
SourceLocation ExpansionLocEnd) {
- Tok.setLength(Len);
+ Tok.setLength(Str.size());
const char *DestPtr;
- SourceLocation Loc = ScratchBuf->getToken(Buf, Len, DestPtr);
+ SourceLocation Loc = ScratchBuf->getToken(Str.data(), Str.size(), DestPtr);
if (ExpansionLocStart.isValid())
Loc = SourceMgr.createExpansionLoc(Loc, ExpansionLocStart,
- ExpansionLocEnd, Len);
+ ExpansionLocEnd, Str.size());
Tok.setLocation(Loc);
// If this is a raw identifier or a literal token, set the pointer data.
@@ -641,10 +679,14 @@ void Preprocessor::LexAfterModuleImport(Token &Result) {
}
// If we have a non-empty module path, load the named module.
- if (!ModuleImportPath.empty())
- (void)TheModuleLoader.loadModule(ModuleImportLoc, ModuleImportPath,
- Module::MacrosVisible,
- /*IsIncludeDirective=*/false);
+ if (!ModuleImportPath.empty()) {
+ Module *Imported = TheModuleLoader.loadModule(ModuleImportLoc,
+ ModuleImportPath,
+ Module::MacrosVisible,
+ /*IsIncludeDirective=*/false);
+ if (Callbacks)
+ Callbacks->moduleImport(ModuleImportLoc, ModuleImportPath, Imported);
+ }
}
void Preprocessor::addCommentHandler(CommentHandler *Handler) {
diff --git a/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
index ade40da..59b7478 100644
--- a/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
@@ -23,12 +23,13 @@ using namespace clang;
/// Create a TokenLexer for the specified macro with the specified actual
/// arguments. Note that this ctor takes ownership of the ActualArgs pointer.
-void TokenLexer::Init(Token &Tok, SourceLocation ELEnd, MacroArgs *Actuals) {
+void TokenLexer::Init(Token &Tok, SourceLocation ELEnd, MacroInfo *MI,
+ MacroArgs *Actuals) {
// If the client is reusing a TokenLexer, make sure to free any memory
// associated with it.
destroy();
- Macro = PP.getMacroInfo(Tok.getIdentifierInfo());
+ Macro = MI;
ActualArgs = Actuals;
CurToken = 0;
@@ -118,6 +119,55 @@ void TokenLexer::destroy() {
if (ActualArgs) ActualArgs->destroy(PP);
}
+/// Remove comma ahead of __VA_ARGS__, if present, according to compiler dialect
+/// settings. Returns true if the comma is removed.
+static bool MaybeRemoveCommaBeforeVaArgs(SmallVector<Token, 128> &ResultToks,
+ bool &NextTokGetsSpace,
+ bool HasPasteOperator,
+ MacroInfo *Macro, unsigned MacroArgNo,
+ Preprocessor &PP) {
+ // Is the macro argument __VA_ARGS__?
+ if (!Macro->isVariadic() || MacroArgNo != Macro->getNumArgs()-1)
+ return false;
+
+ // In Microsoft-compatibility mode, a comma is removed in the expansion
+ // of " ... , __VA_ARGS__ " if __VA_ARGS__ is empty. This extension is
+ // not supported by gcc.
+ if (!HasPasteOperator && !PP.getLangOpts().MicrosoftMode)
+ return false;
+
+ // GCC removes the comma in the expansion of " ... , ## __VA_ARGS__ " if
+ // __VA_ARGS__ is empty, but not in strict C99 mode where there are no
+ // named arguments, where it remains. In all other modes, including C99
+ // with GNU extensions, it is removed regardless of named arguments.
+ // Microsoft also appears to support this extension, unofficially.
+ if (PP.getLangOpts().C99 && !PP.getLangOpts().GNUMode
+ && Macro->getNumArgs() < 2)
+ return false;
+
+ // Is a comma available to be removed?
+ if (ResultToks.empty() || !ResultToks.back().is(tok::comma))
+ return false;
+
+ // Issue an extension diagnostic for the paste operator.
+ if (HasPasteOperator)
+ PP.Diag(ResultToks.back().getLocation(), diag::ext_paste_comma);
+
+ // Remove the comma.
+ ResultToks.pop_back();
+
+ // If the comma was right after another paste (e.g. "X##,##__VA_ARGS__"),
+ // then removal of the comma should produce a placemarker token (in C99
+ // terms) which we model by popping off the previous ##, giving us a plain
+ // "X" when __VA_ARGS__ is empty.
+ if (!ResultToks.empty() && ResultToks.back().is(tok::hashhash))
+ ResultToks.pop_back();
+
+ // Never add a space, even if the comma, ##, or arg had a space.
+ NextTokGetsSpace = false;
+ return true;
+}
+
/// Expand the arguments of a function-like macro so that we can quickly
/// return preexpanded tokens from Tokens.
void TokenLexer::ExpandFunctionArguments() {
@@ -198,6 +248,14 @@ void TokenLexer::ExpandFunctionArguments() {
!ResultToks.empty() && ResultToks.back().is(tok::hashhash);
bool PasteAfter = i+1 != e && Tokens[i+1].is(tok::hashhash);
+ // In Microsoft mode, remove the comma before __VA_ARGS__ to ensure there
+ // are no trailing commas if __VA_ARGS__ is empty.
+ if (!PasteBefore && ActualArgs->isVarargsElidedUse() &&
+ MaybeRemoveCommaBeforeVaArgs(ResultToks, NextTokGetsSpace,
+ /*HasPasteOperator=*/false,
+ Macro, ArgNo, PP))
+ continue;
+
// If it is not the LHS/RHS of a ## operator, we must pre-expand the
// argument and substitute the expanded tokens into the result. This is
// C99 6.10.3.1p1.
@@ -320,23 +378,13 @@ void TokenLexer::ExpandFunctionArguments() {
// If this is the __VA_ARGS__ token, and if the argument wasn't provided,
// and if the macro had at least one real argument, and if the token before
- // the ## was a comma, remove the comma.
- if ((unsigned)ArgNo == Macro->getNumArgs()-1 && // is __VA_ARGS__
- ActualArgs->isVarargsElidedUse() && // Argument elided.
- !ResultToks.empty() && ResultToks.back().is(tok::comma)) {
- // Never add a space, even if the comma, ##, or arg had a space.
- NextTokGetsSpace = false;
- // Remove the paste operator, report use of the extension.
- PP.Diag(ResultToks.back().getLocation(), diag::ext_paste_comma);
- ResultToks.pop_back();
-
- // If the comma was right after another paste (e.g. "X##,##__VA_ARGS__"),
- // then removal of the comma should produce a placemarker token (in C99
- // terms) which we model by popping off the previous ##, giving us a plain
- // "X" when __VA_ARGS__ is empty.
- if (!ResultToks.empty() && ResultToks.back().is(tok::hashhash))
- ResultToks.pop_back();
- }
+ // the ## was a comma, remove the comma. This is a GCC extension which is
+ // disabled when using -std=c99.
+ if (ActualArgs->isVarargsElidedUse())
+ MaybeRemoveCommaBeforeVaArgs(ResultToks, NextTokGetsSpace,
+ /*HasPasteOperator=*/true,
+ Macro, ArgNo, PP);
+
continue;
}
@@ -494,7 +542,7 @@ bool TokenLexer::PasteTokens(Token &Tok) {
// Claim that the tmp token is a string_literal so that we can get the
// character pointer back from CreateString in getLiteralData().
ResultTokTmp.setKind(tok::string_literal);
- PP.CreateString(&Buffer[0], Buffer.size(), ResultTokTmp);
+ PP.CreateString(Buffer, ResultTokTmp);
SourceLocation ResultTokLoc = ResultTokTmp.getLocation();
ResultTokStrPtr = ResultTokTmp.getLiteralData();
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
index bd4f859..7d68e1f 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
@@ -78,7 +78,6 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
S.getPreprocessor().EnterMainSourceFile();
P.Initialize();
- S.Initialize();
// C11 6.9p1 says translation units must have at least one top-level
// declaration. C++ doesn't have this restriction. We also don't want to
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
index abce27c..9c5c0597 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -34,7 +34,7 @@ Decl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS,
Tok.is(tok::equal)) &&
"Current token not a '{', ':', '=', or 'try'!");
- MultiTemplateParamsArg TemplateParams(Actions,
+ MultiTemplateParamsArg TemplateParams(
TemplateInfo.TemplateParams ? TemplateInfo.TemplateParams->data() : 0,
TemplateInfo.TemplateParams ? TemplateInfo.TemplateParams->size() : 0);
@@ -42,10 +42,10 @@ Decl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS,
D.setFunctionDefinitionKind(DefinitionKind);
if (D.getDeclSpec().isFriendSpecified())
FnD = Actions.ActOnFriendFunctionDecl(getCurScope(), D,
- move(TemplateParams));
+ TemplateParams);
else {
FnD = Actions.ActOnCXXMemberDeclarator(getCurScope(), AS, D,
- move(TemplateParams), 0,
+ TemplateParams, 0,
VS, ICIS_NoInit);
if (FnD) {
Actions.ProcessDeclAttributeList(getCurScope(), FnD, AccessAttrs,
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
index cb865cc..f73907a 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
@@ -143,7 +143,7 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
// Attributes in a class are parsed at the end of the class, along
// with other late-parsed declarations.
- if (!ClassStack.empty())
+ if (!ClassStack.empty() && !LateAttrs->parseSoon())
getCurrentClass().LateParsedDeclarations.push_back(LA);
// consume everything up to and including the matching right parens
@@ -154,7 +154,8 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
Eof.setLocation(Tok.getLocation());
LA->Toks.push_back(Eof);
} else {
- ParseGNUAttributeArgs(AttrName, AttrNameLoc, attrs, endLoc);
+ ParseGNUAttributeArgs(AttrName, AttrNameLoc, attrs, endLoc,
+ 0, SourceLocation(), AttributeList::AS_GNU);
}
} else {
attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc,
@@ -173,11 +174,15 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
}
-/// Parse the arguments to a parameterized GNU attribute
+/// Parse the arguments to a parameterized GNU attribute or
+/// a C++11 attribute in "gnu" namespace.
void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
- SourceLocation *EndLoc) {
+ SourceLocation *EndLoc,
+ IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc,
+ AttributeList::Syntax Syntax) {
assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
@@ -236,7 +241,7 @@ void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
break;
}
- ExprVector ArgExprs(Actions);
+ ExprVector ArgExprs;
if (!BuiltinType &&
(ParmLoc.isValid() ? Tok.is(tok::comma) : Tok.isNot(tok::r_paren))) {
@@ -277,10 +282,11 @@ void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation RParen = Tok.getLocation();
if (!ExpectAndConsume(tok::r_paren, diag::err_expected_rparen)) {
+ SourceLocation AttrLoc = ScopeLoc.isValid() ? ScopeLoc : AttrNameLoc;
AttributeList *attr =
- Attrs.addNew(AttrName, SourceRange(AttrNameLoc, RParen), 0, AttrNameLoc,
- ParmName, ParmLoc, ArgExprs.take(), ArgExprs.size(),
- AttributeList::AS_GNU);
+ Attrs.addNew(AttrName, SourceRange(AttrLoc, RParen),
+ ScopeName, ScopeLoc, ParmName, ParmLoc,
+ ArgExprs.data(), ArgExprs.size(), Syntax);
if (BuiltinType && attr->getKind() == AttributeList::AT_IBOutletCollection)
Diag(Tok, diag::err_iboutletcollection_builtintype);
}
@@ -851,10 +857,6 @@ void Parser::ParseLexedAttributes(ParsingClass &Class) {
Actions.ActOnStartDelayedMemberDeclarations(getCurScope(),
Class.TagOrTemplate);
if (!Class.LateParsedDeclarations.empty()) {
- // Allow 'this' within late-parsed attributes.
- Sema::CXXThisScopeRAII ThisScope(Actions, Class.TagOrTemplate,
- /*TypeQuals=*/0);
-
for (unsigned i = 0, ni = Class.LateParsedDeclarations.size(); i < ni; ++i){
Class.LateParsedDeclarations[i]->ParseLexedAttributes();
}
@@ -869,6 +871,8 @@ void Parser::ParseLexedAttributes(ParsingClass &Class) {
/// \brief Parse all attributes in LAs, and attach them to Decl D.
void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition) {
+ assert(LAs.parseSoon() &&
+ "Attribute list should be marked for immediate parsing.");
for (unsigned i = 0, ni = LAs.size(); i < ni; ++i) {
if (D)
LAs[i]->addDecl(D);
@@ -904,34 +908,45 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
ParsedAttributes Attrs(AttrFactory);
SourceLocation endLoc;
- if (LA.Decls.size() == 1) {
+ if (LA.Decls.size() > 0) {
Decl *D = LA.Decls[0];
+ NamedDecl *ND = dyn_cast<NamedDecl>(D);
+ RecordDecl *RD = dyn_cast_or_null<RecordDecl>(D->getDeclContext());
- // If the Decl is templatized, add template parameters to scope.
- bool HasTemplateScope = EnterScope && D->isTemplateDecl();
- ParseScope TempScope(this, Scope::TemplateParamScope, HasTemplateScope);
- if (HasTemplateScope)
- Actions.ActOnReenterTemplateScope(Actions.CurScope, D);
-
- // If the Decl is on a function, add function parameters to the scope.
- bool HasFunctionScope = EnterScope && D->isFunctionOrFunctionTemplate();
- ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope, HasFunctionScope);
- if (HasFunctionScope)
- Actions.ActOnReenterFunctionContext(Actions.CurScope, D);
-
- ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc);
-
- if (HasFunctionScope) {
- Actions.ActOnExitFunctionContext();
- FnScope.Exit(); // Pop scope, and remove Decls from IdResolver
- }
- if (HasTemplateScope) {
- TempScope.Exit();
+ // Allow 'this' within late-parsed attributes.
+ Sema::CXXThisScopeRAII ThisScope(Actions, RD,
+ /*TypeQuals=*/0,
+ ND && RD && ND->isCXXInstanceMember());
+
+ if (LA.Decls.size() == 1) {
+ // If the Decl is templatized, add template parameters to scope.
+ bool HasTemplateScope = EnterScope && D->isTemplateDecl();
+ ParseScope TempScope(this, Scope::TemplateParamScope, HasTemplateScope);
+ if (HasTemplateScope)
+ Actions.ActOnReenterTemplateScope(Actions.CurScope, D);
+
+ // If the Decl is on a function, add function parameters to the scope.
+ bool HasFunScope = EnterScope && D->isFunctionOrFunctionTemplate();
+ ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope, HasFunScope);
+ if (HasFunScope)
+ Actions.ActOnReenterFunctionContext(Actions.CurScope, D);
+
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
+ 0, SourceLocation(), AttributeList::AS_GNU);
+
+ if (HasFunScope) {
+ Actions.ActOnExitFunctionContext();
+ FnScope.Exit(); // Pop scope, and remove Decls from IdResolver
+ }
+ if (HasTemplateScope) {
+ TempScope.Exit();
+ }
+ } else {
+ // If there are multiple decls, then the decl cannot be within the
+ // function scope.
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
+ 0, SourceLocation(), AttributeList::AS_GNU);
}
- } else if (LA.Decls.size() > 0) {
- // If there are multiple decls, then the decl cannot be within the
- // function scope.
- ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc);
} else {
Diag(Tok, diag::warn_attribute_no_decl) << LA.AttrName.getName();
}
@@ -998,7 +1013,7 @@ void Parser::ParseThreadSafetyAttribute(IdentifierInfo &AttrName,
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
- ExprVector ArgExprs(Actions);
+ ExprVector ArgExprs;
bool ArgExprsOk = true;
// now parse the list of expressions
@@ -1018,7 +1033,7 @@ void Parser::ParseThreadSafetyAttribute(IdentifierInfo &AttrName,
// Match the ')'.
if (ArgExprsOk && !T.consumeClose()) {
Attrs.addNew(&AttrName, AttrNameLoc, 0, AttrNameLoc, 0, SourceLocation(),
- ArgExprs.take(), ArgExprs.size(), AttributeList::AS_GNU);
+ ArgExprs.data(), ArgExprs.size(), AttributeList::AS_GNU);
}
if (EndLoc)
*EndLoc = T.getCloseLocation();
@@ -1127,6 +1142,18 @@ void Parser::DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs) {
<< attrs.Range;
}
+void Parser::ProhibitCXX11Attributes(ParsedAttributesWithRange &attrs) {
+ AttributeList *AttrList = attrs.getList();
+ while (AttrList) {
+ if (AttrList->isCXX0XAttribute()) {
+ Diag(AttrList->getLoc(), diag::warn_attribute_no_decl)
+ << AttrList->getName();
+ AttrList->setInvalid();
+ }
+ AttrList = AttrList->getNext();
+ }
+}
+
/// ParseDeclaration - Parse a full 'declaration', which consists of
/// declaration-specifiers, some number of declarators, and a semicolon.
/// 'Context' should be a Declarator::TheContext value. This returns the
@@ -1400,7 +1427,8 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
// Save late-parsed attributes for now; they need to be parsed in the
// appropriate function scope after the function Decl has been constructed.
- LateParsedAttrList LateParsedAttrs;
+ // These will be parsed in ParseFunctionDefinition or ParseLexedAttrList.
+ LateParsedAttrList LateParsedAttrs(true);
if (D.isFunctionDeclarator())
MaybeParseGNUAttributes(D, &LateParsedAttrs);
@@ -1587,9 +1615,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
case ParsedTemplateInfo::Template:
case ParsedTemplateInfo::ExplicitSpecialization:
ThisDecl = Actions.ActOnTemplateDeclarator(getCurScope(),
- MultiTemplateParamsArg(Actions,
- TemplateInfo.TemplateParams->data(),
- TemplateInfo.TemplateParams->size()),
+ *TemplateInfo.TemplateParams,
D);
break;
@@ -1660,7 +1686,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
- ExprVector Exprs(Actions);
+ ExprVector Exprs;
CommaLocsTy CommaLocs;
if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
@@ -1669,6 +1695,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
}
if (ParseExpressionList(Exprs, CommaLocs)) {
+ Actions.ActOnInitializerError(ThisDecl);
SkipUntil(tok::r_paren);
if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
@@ -1689,7 +1716,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
ExprResult Initializer = Actions.ActOnParenListExpr(T.getOpenLocation(),
T.getCloseLocation(),
- move_arg(Exprs));
+ Exprs);
Actions.AddInitializerToDecl(ThisDecl, Initializer.take(),
/*DirectInit=*/true, TypeContainsAuto);
}
@@ -1872,6 +1899,9 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
TagName="union" ; FixitTagName = "union " ;TagKind=tok::kw_union ;break;
case DeclSpec::TST_struct:
TagName="struct"; FixitTagName = "struct ";TagKind=tok::kw_struct;break;
+ case DeclSpec::TST_interface:
+ TagName="__interface"; FixitTagName = "__interface ";
+ TagKind=tok::kw___interface;break;
case DeclSpec::TST_class:
TagName="class" ; FixitTagName = "class " ;TagKind=tok::kw_class ;break;
}
@@ -2069,13 +2099,13 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
return;
}
- ExprVector ArgExprs(Actions);
+ ExprVector ArgExprs;
ArgExprs.push_back(ArgExpr.release());
// FIXME: This should not be GNU, but we since the attribute used is
// based on the spelling, and there is no true spelling for
// C++11 attributes, this isn't accepted.
Attrs.addNew(PP.getIdentifierInfo("aligned"), KWLoc, 0, KWLoc,
- 0, T.getOpenLocation(), ArgExprs.take(), 1,
+ 0, T.getOpenLocation(), ArgExprs.data(), 1,
AttributeList::AS_GNU);
}
@@ -2130,8 +2160,14 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
DoneWithDeclSpec:
if (!AttrsLastTime)
ProhibitAttributes(attrs);
- else
+ else {
+ // Reject C++11 attributes that appertain to decl specifiers as
+ // we don't support any C++11 attributes that appertain to decl
+ // specifiers. This also conforms to what g++ 4.8 is doing.
+ ProhibitCXX11Attributes(attrs);
+
DS.takeAttributesFrom(attrs);
+ }
// If this is not a declaration specifier token, we're done reading decl
// specifiers. First verify that DeclSpec's are consistent.
@@ -2271,6 +2307,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename,
Tok.getAnnotationEndLoc(),
PrevSpec, DiagID, T);
+ if (isInvalid)
+ break;
}
else
DS.SetTypeSpecError();
@@ -2476,12 +2514,12 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::kw___forceinline: {
isInvalid = DS.SetFunctionSpecInline(Loc, PrevSpec, DiagID);
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
- SourceLocation AttrNameLoc = ConsumeToken();
+ SourceLocation AttrNameLoc = Tok.getLocation();
// FIXME: This does not work correctly if it is set to be a declspec
// attribute, and a GNU attribute is simply incorrect.
DS.getAttributes().addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0,
SourceLocation(), 0, 0, AttributeList::AS_GNU);
- continue;
+ break;
}
case tok::kw___ptr64:
@@ -2706,6 +2744,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// class-specifier:
case tok::kw_class:
case tok::kw_struct:
+ case tok::kw___interface:
case tok::kw_union: {
tok::TokenKind Kind = Tok.getKind();
ConsumeToken();
@@ -2723,15 +2762,15 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// cv-qualifier:
case tok::kw_const:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_const, Loc, PrevSpec, DiagID,
- getLangOpts(), /*IsTypeSpec*/true);
+ getLangOpts());
break;
case tok::kw_volatile:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_volatile, Loc, PrevSpec, DiagID,
- getLangOpts(), /*IsTypeSpec*/true);
+ getLangOpts());
break;
case tok::kw_restrict:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_restrict, Loc, PrevSpec, DiagID,
- getLangOpts(), /*IsTypeSpec*/true);
+ getLangOpts());
break;
// C++ typename-specifier:
@@ -3165,6 +3204,8 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// anything that's a simple-type-specifier followed by '(' as an
// expression. This suffices because function types are not valid
// underlying types anyway.
+ EnterExpressionEvaluationContext Unevaluated(Actions,
+ Sema::ConstantEvaluated);
TPResult TPR = isExpressionOrTypeSpecifierSimple(NextToken().getKind());
// If the next token starts an expression, we know we're parsing a
// bit-field. This is the common case.
@@ -3213,11 +3254,14 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
SourceRange Range;
BaseType = ParseTypeName(&Range);
- if (!getLangOpts().CPlusPlus0x && !getLangOpts().ObjC2)
- Diag(StartLoc, diag::ext_ms_enum_fixed_underlying_type)
- << Range;
- if (getLangOpts().CPlusPlus0x)
+ if (getLangOpts().CPlusPlus0x) {
Diag(StartLoc, diag::warn_cxx98_compat_enum_fixed_underlying_type);
+ } else if (!getLangOpts().ObjC2) {
+ if (getLangOpts().CPlusPlus)
+ Diag(StartLoc, diag::ext_cxx11_enum_fixed_underlying_type) << Range;
+ else
+ Diag(StartLoc, diag::ext_c_enum_fixed_underlying_type) << Range;
+ }
}
}
@@ -3526,6 +3570,7 @@ bool Parser::isKnownToBeTypeSpecifier(const Token &Tok) const {
// struct-or-union-specifier (C99) or class-specifier (C++)
case tok::kw_class:
case tok::kw_struct:
+ case tok::kw___interface:
case tok::kw_union:
// enum-specifier
case tok::kw_enum:
@@ -3597,6 +3642,7 @@ bool Parser::isTypeSpecifierQualifier() {
// struct-or-union-specifier (C99) or class-specifier (C++)
case tok::kw_class:
case tok::kw_struct:
+ case tok::kw___interface:
case tok::kw_union:
// enum-specifier
case tok::kw_enum:
@@ -3735,6 +3781,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw_class:
case tok::kw_struct:
case tok::kw_union:
+ case tok::kw___interface:
// enum-specifier
case tok::kw_enum:
@@ -3748,6 +3795,9 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw_virtual:
case tok::kw_explicit:
+ // friend keyword.
+ case tok::kw_friend:
+
// static_assert-declaration
case tok::kw__Static_assert:
@@ -3756,11 +3806,10 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
// GNU attributes.
case tok::kw___attribute:
- return true;
- // C++0x decltype.
+ // C++11 decltype and constexpr.
case tok::annot_decltype:
- return true;
+ case tok::kw_constexpr:
// C11 _Atomic()
case tok::kw__Atomic:
@@ -3925,15 +3974,15 @@ void Parser::ParseTypeQualifierListOpt(DeclSpec &DS,
case tok::kw_const:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_const , Loc, PrevSpec, DiagID,
- getLangOpts(), /*IsTypeSpec*/false);
+ getLangOpts());
break;
case tok::kw_volatile:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_volatile, Loc, PrevSpec, DiagID,
- getLangOpts(), /*IsTypeSpec*/false);
+ getLangOpts());
break;
case tok::kw_restrict:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_restrict, Loc, PrevSpec, DiagID,
- getLangOpts(), /*IsTypeSpec*/false);
+ getLangOpts());
break;
// OpenCL qualifiers:
@@ -4344,7 +4393,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
D.SetIdentifier(0, Tok.getLocation());
} else {
if (Tok.getKind() == tok::annot_pragma_parser_crash)
- *(volatile int*) 0x11 = 0;
+ LLVM_BUILTIN_TRAP;
if (D.getContext() == Declarator::MemberContext)
Diag(Tok, diag::err_expected_member_name_or_semi)
<< D.getDeclSpec().getSourceRange();
@@ -4374,9 +4423,15 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// In such a case, check if we actually have a function declarator; if it
// is not, the declarator has been fully parsed.
bool IsAmbiguous = false;
- if (getLangOpts().CPlusPlus && D.mayBeFollowedByCXXDirectInit() &&
- !isCXXFunctionDeclarator(&IsAmbiguous))
- break;
+ if (getLangOpts().CPlusPlus && D.mayBeFollowedByCXXDirectInit()) {
+ // The name of the declarator, if any, is tentatively declared within
+ // a possible direct initializer.
+ TentativelyDeclaredIdentifiers.push_back(D.getIdentifier());
+ bool IsFunctionDecl = isCXXFunctionDeclarator(&IsAmbiguous);
+ TentativelyDeclaredIdentifiers.pop_back();
+ if (!IsFunctionDecl)
+ break;
+ }
ParsedAttributes attrs(AttrFactory);
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
@@ -4553,7 +4608,14 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
Actions.ActOnStartFunctionDeclarator();
- SourceLocation EndLoc;
+ /* LocalEndLoc is the end location for the local FunctionTypeLoc.
+ EndLoc is the end location for the function declarator.
+ They differ for trailing return types. */
+ SourceLocation StartLoc, LocalEndLoc, EndLoc;
+ SourceLocation LParenLoc, RParenLoc;
+ LParenLoc = Tracker.getOpenLocation();
+ StartLoc = LParenLoc;
+
if (isFunctionDeclaratorIdentifierList()) {
if (RequiresArg)
Diag(Tok, diag::err_argument_required_after_attribute);
@@ -4561,7 +4623,9 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
ParseFunctionDeclaratorIdentifierList(D, ParamInfo);
Tracker.consumeClose();
- EndLoc = Tracker.getCloseLocation();
+ RParenLoc = Tracker.getCloseLocation();
+ LocalEndLoc = RParenLoc;
+ EndLoc = RParenLoc;
} else {
if (Tok.isNot(tok::r_paren))
ParseParameterDeclarationClause(D, FirstArgAttrs, ParamInfo, EllipsisLoc);
@@ -4572,7 +4636,9 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
// If we have the closing ')', eat it.
Tracker.consumeClose();
- EndLoc = Tracker.getCloseLocation();
+ RParenLoc = Tracker.getCloseLocation();
+ LocalEndLoc = RParenLoc;
+ EndLoc = RParenLoc;
if (getLangOpts().CPlusPlus) {
// FIXME: Accept these components in any order, and produce fixits to
@@ -4628,21 +4694,25 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
MaybeParseCXX0XAttributes(FnAttrs);
// Parse trailing-return-type[opt].
+ LocalEndLoc = EndLoc;
if (getLangOpts().CPlusPlus0x && Tok.is(tok::arrow)) {
Diag(Tok, diag::warn_cxx98_compat_trailing_return_type);
+ if (D.getDeclSpec().getTypeSpecType() == TST_auto)
+ StartLoc = D.getDeclSpec().getTypeSpecTypeLoc();
+ LocalEndLoc = Tok.getLocation();
SourceRange Range;
TrailingReturnType = ParseTrailingReturnType(Range);
- if (Range.getEnd().isValid())
- EndLoc = Range.getEnd();
+ EndLoc = Range.getEnd();
}
}
}
// Remember that we parsed a function type, and remember the attributes.
D.AddTypeInfo(DeclaratorChunk::getFunction(HasProto,
- /*isVariadic=*/EllipsisLoc.isValid(),
- IsAmbiguous, EllipsisLoc,
+ IsAmbiguous,
+ LParenLoc,
ParamInfo.data(), ParamInfo.size(),
+ EllipsisLoc, RParenLoc,
DS.getTypeQualifiers(),
RefQualifierIsLValueRef,
RefQualifierLoc, ConstQualifierLoc,
@@ -4654,8 +4724,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
DynamicExceptions.size(),
NoexceptExpr.isUsable() ?
NoexceptExpr.get() : 0,
- Tracker.getOpenLocation(),
- EndLoc, D,
+ StartLoc, LocalEndLoc, D,
TrailingReturnType),
FnAttrs, EndLoc);
@@ -5058,7 +5127,8 @@ void Parser::ParseTypeofSpecifier(DeclSpec &DS) {
const bool hasParens = Tok.is(tok::l_paren);
- EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
bool isCastExpr;
ParsedType CastTy;
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
index 3dc96cf..4cb14e2 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
@@ -18,6 +18,7 @@
#include "clang/Sema/Scope.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/PrettyDeclStackTrace.h"
+#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/SmallString.h"
#include "RAIIObjectsForParser.h"
using namespace clang;
@@ -87,6 +88,12 @@ Decl *Parser::ParseNamespace(unsigned Context,
}
if (Tok.is(tok::equal)) {
+ if (Ident == 0) {
+ Diag(Tok, diag::err_expected_ident);
+ // Skip to end of the definition and eat the ';'.
+ SkipUntil(tok::semi);
+ return 0;
+ }
if (!attrs.empty())
Diag(attrTok, diag::err_unexpected_namespace_attributes_alias);
if (InlineLoc.isValid())
@@ -581,7 +588,7 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
if (IsAliasDecl) {
TemplateParameterLists *TemplateParams = TemplateInfo.TemplateParams;
- MultiTemplateParamsArg TemplateParamsArg(Actions,
+ MultiTemplateParamsArg TemplateParamsArg(
TemplateParams ? TemplateParams->data() : 0,
TemplateParams ? TemplateParams->size() : 0);
// FIXME: Propagate attributes.
@@ -616,12 +623,13 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
BalancedDelimiterTracker T(*this, tok::l_paren);
if (T.consumeOpen()) {
Diag(Tok, diag::err_expected_lparen);
+ SkipMalformedDecl();
return 0;
}
ExprResult AssertExpr(ParseConstantExpression());
if (AssertExpr.isInvalid()) {
- SkipUntil(tok::semi);
+ SkipMalformedDecl();
return 0;
}
@@ -630,13 +638,13 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
if (!isTokenStringLiteral()) {
Diag(Tok, diag::err_expected_string_literal);
- SkipUntil(tok::semi);
+ SkipMalformedDecl();
return 0;
}
ExprResult AssertMessage(ParseStringLiteralExpression());
if (AssertMessage.isInvalid()) {
- SkipUntil(tok::semi);
+ SkipMalformedDecl();
return 0;
}
@@ -694,9 +702,22 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
0, /*IsDecltype=*/true);
Result = ParseExpression();
if (Result.isInvalid()) {
- SkipUntil(tok::r_paren);
DS.SetTypeSpecError();
- return StartLoc;
+ if (SkipUntil(tok::r_paren, /*StopAtSemi=*/true, /*DontConsume=*/true)) {
+ EndLoc = ConsumeParen();
+ } else {
+ assert(Tok.is(tok::semi));
+ if (PP.isBacktrackEnabled()) {
+ // Backtrack to get the location of the last token before the semi.
+ PP.RevertCachedTokens(2);
+ ConsumeToken(); // the semi.
+ EndLoc = ConsumeAnyToken();
+ assert(Tok.is(tok::semi));
+ } else {
+ EndLoc = Tok.getLocation();
+ }
+ }
+ return EndLoc;
}
// Match the ')'
@@ -1034,6 +1055,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
DeclSpec::TST TagType;
if (TagTokKind == tok::kw_struct)
TagType = DeclSpec::TST_struct;
+ else if (TagTokKind == tok::kw___interface)
+ TagType = DeclSpec::TST_interface;
else if (TagTokKind == tok::kw_class)
TagType = DeclSpec::TST_class;
else {
@@ -1151,7 +1174,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
<< (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
<< (TagType == DeclSpec::TST_class? 0
: TagType == DeclSpec::TST_struct? 1
- : 2)
+ : TagType == DeclSpec::TST_interface? 2
+ : 3)
<< Name
<< SourceRange(LAngleLoc, RAngleLoc);
@@ -1243,8 +1267,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (Tok.isNot(tok::semi)) {
// A semicolon was missing after this declaration. Diagnose and recover.
ExpectAndConsume(tok::semi, diag::err_expected_semi_after_tagdecl,
- TagType == DeclSpec::TST_class ? "class" :
- TagType == DeclSpec::TST_struct ? "struct" : "union");
+ DeclSpec::getSpecifierName(TagType));
PP.EnterToken(Tok);
Tok.setKind(tok::semi);
}
@@ -1279,8 +1302,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (TemplateId) {
// Explicit specialization, class template partial specialization,
// or explicit instantiation.
- ASTTemplateArgsPtr TemplateArgsPtr(Actions,
- TemplateId->getTemplateArgs(),
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
TUK == Sema::TUK_Declaration) {
@@ -1362,7 +1384,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TemplateArgsPtr,
TemplateId->RAngleLoc,
attrs.getList(),
- MultiTemplateParamsArg(Actions,
+ MultiTemplateParamsArg(
TemplateParams? &(*TemplateParams)[0] : 0,
TemplateParams? TemplateParams->size() : 0));
}
@@ -1389,7 +1411,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
Actions.ActOnTemplatedFriendTag(getCurScope(), DS.getFriendSpecLoc(),
TagType, StartLoc, SS,
Name, NameLoc, attrs.getList(),
- MultiTemplateParamsArg(Actions,
+ MultiTemplateParamsArg(
TemplateParams? &(*TemplateParams)[0] : 0,
TemplateParams? TemplateParams->size() : 0));
} else {
@@ -1470,8 +1492,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (TUK == Sema::TUK_Definition &&
(TemplateInfo.Kind || !isValidAfterTypeSpecifier(false))) {
ExpectAndConsume(tok::semi, diag::err_expected_semi_after_tagdecl,
- TagType == DeclSpec::TST_class ? "class" :
- TagType == DeclSpec::TST_struct ? "struct" : "union");
+ DeclSpec::getSpecifierName(TagType));
// Push this token back into the preprocessor and change our current token
// to ';' so that the rest of the code recovers as though there were an
// ';' after the definition.
@@ -1667,7 +1688,8 @@ VirtSpecifiers::Specifier Parser::isCXX0XVirtSpecifier(const Token &Tok) const {
/// virt-specifier-seq:
/// virt-specifier
/// virt-specifier-seq virt-specifier
-void Parser::ParseOptionalCXX0XVirtSpecifierSeq(VirtSpecifiers &VS) {
+void Parser::ParseOptionalCXX0XVirtSpecifierSeq(VirtSpecifiers &VS,
+ bool IsInterface) {
while (true) {
VirtSpecifiers::Specifier Specifier = isCXX0XVirtSpecifier();
if (Specifier == VirtSpecifiers::VS_None)
@@ -1681,10 +1703,15 @@ void Parser::ParseOptionalCXX0XVirtSpecifierSeq(VirtSpecifiers &VS) {
<< PrevSpec
<< FixItHint::CreateRemoval(Tok.getLocation());
- Diag(Tok.getLocation(), getLangOpts().CPlusPlus0x ?
- diag::warn_cxx98_compat_override_control_keyword :
- diag::ext_override_control_keyword)
- << VirtSpecifiers::getSpecifierName(Specifier);
+ if (IsInterface && Specifier == VirtSpecifiers::VS_Final) {
+ Diag(Tok.getLocation(), diag::err_override_control_interface)
+ << VirtSpecifiers::getSpecifierName(Specifier);
+ } else {
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_override_control_keyword :
+ diag::ext_override_control_keyword)
+ << VirtSpecifiers::getSpecifierName(Specifier);
+ }
ConsumeToken();
}
}
@@ -1869,7 +1896,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
ParseDeclarationSpecifiers(DS, TemplateInfo, AS, DSC_class,
&CommonLateParsedAttrs);
- MultiTemplateParamsArg TemplateParams(Actions,
+ MultiTemplateParamsArg TemplateParams(
TemplateInfo.TemplateParams? TemplateInfo.TemplateParams->data() : 0,
TemplateInfo.TemplateParams? TemplateInfo.TemplateParams->size() : 0);
@@ -1905,7 +1932,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
return;
}
- ParseOptionalCXX0XVirtSpecifierSeq(VS);
+ ParseOptionalCXX0XVirtSpecifierSeq(VS, getCurrentClass().IsInterface);
// If attributes exist after the declarator, but before an '{', parse them.
MaybeParseGNUAttributes(DeclaratorInfo, &LateParsedAttrs);
@@ -2026,7 +2053,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// FIXME: When g++ adds support for this, we'll need to check whether it
// goes before or after the GNU attributes and __asm__.
- ParseOptionalCXX0XVirtSpecifierSeq(VS);
+ ParseOptionalCXX0XVirtSpecifierSeq(VS, getCurrentClass().IsInterface);
InClassInitStyle HasInClassInit = ICIS_NoInit;
if ((Tok.is(tok::equal) || Tok.is(tok::l_brace)) && !HasInitializer) {
@@ -2052,11 +2079,11 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
if (DS.isFriendSpecified()) {
// TODO: handle initializers, bitfields, 'delete'
ThisDecl = Actions.ActOnFriendFunctionDecl(getCurScope(), DeclaratorInfo,
- move(TemplateParams));
+ TemplateParams);
} else {
ThisDecl = Actions.ActOnCXXMemberDeclarator(getCurScope(), AS,
DeclaratorInfo,
- move(TemplateParams),
+ TemplateParams,
BitfieldSize.release(),
VS, HasInClassInit);
if (AccessAttrs)
@@ -2240,6 +2267,7 @@ ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction,
void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
unsigned TagType, Decl *TagDecl) {
assert((TagType == DeclSpec::TST_struct ||
+ TagType == DeclSpec::TST_interface ||
TagType == DeclSpec::TST_union ||
TagType == DeclSpec::TST_class) && "Invalid TagType!");
@@ -2254,6 +2282,15 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
if (S->isClassScope()) {
// We're inside a class scope, so this is a nested class.
NonNestedClass = false;
+
+ // The Microsoft extension __interface does not permit nested classes.
+ if (getCurrentClass().IsInterface) {
+ Diag(RecordLoc, diag::err_invalid_member_in_interface)
+ << /*ErrorType=*/6
+ << (isa<NamedDecl>(TagDecl)
+ ? cast<NamedDecl>(TagDecl)->getQualifiedNameAsString()
+ : "<anonymous>");
+ }
break;
}
@@ -2274,7 +2311,8 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope);
// Note that we are parsing a new (potentially-nested) class definition.
- ParsingClassDefinition ParsingDef(*this, TagDecl, NonNestedClass);
+ ParsingClassDefinition ParsingDef(*this, TagDecl, NonNestedClass,
+ TagType == DeclSpec::TST_interface);
if (TagDecl)
Actions.ActOnTagStartDefinition(getCurScope(), TagDecl);
@@ -2286,9 +2324,14 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
assert(isCXX0XFinalKeyword() && "not a class definition");
FinalLoc = ConsumeToken();
- Diag(FinalLoc, getLangOpts().CPlusPlus0x ?
- diag::warn_cxx98_compat_override_control_keyword :
- diag::ext_override_control_keyword) << "final";
+ if (TagType == DeclSpec::TST_interface) {
+ Diag(FinalLoc, diag::err_override_control_interface)
+ << "final";
+ } else {
+ Diag(FinalLoc, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_override_control_keyword :
+ diag::ext_override_control_keyword) << "final";
+ }
}
if (Tok.is(tok::colon)) {
@@ -2348,6 +2391,11 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
continue;
}
+ if (Tok.is(tok::annot_pragma_align)) {
+ HandlePragmaAlign();
+ continue;
+ }
+
AccessSpecifier AS = getAccessSpecifierIfPresent();
if (AS != AS_none) {
// Current token is a C++ access specifier.
@@ -2373,6 +2421,13 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
<< FixItHint::CreateInsertion(EndLoc, ":");
}
+ // The Microsoft extension __interface does not permit non-public
+ // access specifiers.
+ if (TagType == DeclSpec::TST_interface && CurAS != AS_public) {
+ Diag(ASLoc, diag::err_access_specifier_interface)
+ << (CurAS == AS_protected);
+ }
+
if (Actions.ActOnAccessSpecifier(AS, ASLoc, EndLoc,
AccessAttrs.getList())) {
// found another attribute than only annotations
@@ -2571,7 +2626,7 @@ Parser::MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
T.consumeOpen();
// Parse the optional expression-list.
- ExprVector ArgExprs(Actions);
+ ExprVector ArgExprs;
CommaLocsTy CommaLocs;
if (Tok.isNot(tok::r_paren) && ParseExpressionList(ArgExprs, CommaLocs)) {
SkipUntil(tok::r_paren);
@@ -2586,7 +2641,7 @@ Parser::MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
TemplateTypeTy, DS, IdLoc,
- T.getOpenLocation(), ArgExprs.take(),
+ T.getOpenLocation(), ArgExprs.data(),
ArgExprs.size(), T.getCloseLocation(),
EllipsisLoc);
}
@@ -2752,10 +2807,11 @@ TypeResult Parser::ParseTrailingReturnType(SourceRange &Range) {
/// so push that class onto our stack of classes that is currently
/// being parsed.
Sema::ParsingClassState
-Parser::PushParsingClass(Decl *ClassDecl, bool NonNestedClass) {
+Parser::PushParsingClass(Decl *ClassDecl, bool NonNestedClass,
+ bool IsInterface) {
assert((NonNestedClass || !ClassStack.empty()) &&
"Nested class without outer class");
- ClassStack.push(new ParsingClass(ClassDecl, NonNestedClass));
+ ClassStack.push(new ParsingClass(ClassDecl, NonNestedClass, IsInterface));
return Actions.PushParsingClass();
}
@@ -2773,9 +2829,6 @@ void Parser::DeallocateParsedClasses(Parser::ParsingClass *Class) {
/// This routine should be called when we have finished parsing the
/// definition of a class, but have not yet popped the Scope
/// associated with the class's definition.
-///
-/// \returns true if the class we've popped is a top-level class,
-/// false otherwise.
void Parser::PopParsingClass(Sema::ParsingClassState state) {
assert(!ClassStack.empty() && "Mismatched push/pop for class parsing");
@@ -2850,6 +2903,21 @@ IdentifierInfo *Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc) {
}
}
+static bool IsBuiltInOrStandardCXX11Attribute(IdentifierInfo *AttrName,
+ IdentifierInfo *ScopeName) {
+ switch (AttributeList::getKind(AttrName, ScopeName,
+ AttributeList::AS_CXX11)) {
+ case AttributeList::AT_CarriesDependency:
+ case AttributeList::AT_FallThrough:
+ case AttributeList::AT_NoReturn: {
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
/// ParseCXX11AttributeSpecifier - Parse a C++11 attribute-specifier. Currently
/// only parses standard attributes.
///
@@ -2934,46 +3002,38 @@ void Parser::ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
}
}
+ bool StandardAttr = IsBuiltInOrStandardCXX11Attribute(AttrName,ScopeName);
bool AttrParsed = false;
- switch (AttributeList::getKind(AttrName, ScopeName,
- AttributeList::AS_CXX11)) {
- // No arguments
- case AttributeList::AT_CarriesDependency:
- // FIXME: implement generic support of attributes with C++11 syntax
- // see Parse/ParseDecl.cpp: ParseGNUAttributes
- case AttributeList::AT_FallThrough:
- case AttributeList::AT_NoReturn: {
- if (Tok.is(tok::l_paren)) {
- Diag(Tok.getLocation(), diag::err_cxx11_attribute_forbids_arguments)
- << AttrName->getName();
- break;
+
+ // Parse attribute arguments
+ if (Tok.is(tok::l_paren)) {
+ if (ScopeName && ScopeName->getName() == "gnu") {
+ ParseGNUAttributeArgs(AttrName, AttrLoc, attrs, endLoc,
+ ScopeName, ScopeLoc, AttributeList::AS_CXX11);
+ AttrParsed = true;
+ } else {
+ if (StandardAttr)
+ Diag(Tok.getLocation(), diag::err_cxx11_attribute_forbids_arguments)
+ << AttrName->getName();
+
+ // FIXME: handle other formats of c++11 attribute arguments
+ ConsumeParen();
+ SkipUntil(tok::r_paren, false);
}
+ }
+ if (!AttrParsed)
attrs.addNew(AttrName,
SourceRange(ScopeLoc.isValid() ? ScopeLoc : AttrLoc,
AttrLoc),
ScopeName, ScopeLoc, 0,
SourceLocation(), 0, 0, AttributeList::AS_CXX11);
- AttrParsed = true;
- break;
- }
-
- // Silence warnings
- default: break;
- }
-
- // Skip the entire parameter clause, if any
- if (!AttrParsed && Tok.is(tok::l_paren)) {
- ConsumeParen();
- // SkipUntil maintains the balancedness of tokens.
- SkipUntil(tok::r_paren, false);
- }
if (Tok.is(tok::ellipsis)) {
- if (AttrParsed)
- Diag(Tok, diag::err_cxx11_attribute_forbids_ellipsis)
- << AttrName->getName();
ConsumeToken();
+
+ Diag(Tok, diag::err_cxx11_attribute_forbids_ellipsis)
+ << AttrName->getName();
}
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
index 8d4668b..c7be0d3 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
@@ -179,7 +179,7 @@ static prec::Level getBinOpPrecedence(tok::TokenKind Kind,
/// \endverbatim
ExprResult Parser::ParseExpression(TypeCastState isTypeCast) {
ExprResult LHS(ParseAssignmentExpression(isTypeCast));
- return ParseRHSOfBinaryExpression(move(LHS), prec::Comma);
+ return ParseRHSOfBinaryExpression(LHS, prec::Comma);
}
/// This routine is called when the '@' is seen and consumed.
@@ -190,7 +190,7 @@ ExprResult Parser::ParseExpression(TypeCastState isTypeCast) {
ExprResult
Parser::ParseExpressionWithLeadingAt(SourceLocation AtLoc) {
ExprResult LHS(ParseObjCAtExpression(AtLoc));
- return ParseRHSOfBinaryExpression(move(LHS), prec::Comma);
+ return ParseRHSOfBinaryExpression(LHS, prec::Comma);
}
/// This routine is called when a leading '__extension__' is seen and
@@ -210,7 +210,7 @@ Parser::ParseExpressionWithLeadingExtension(SourceLocation ExtLoc) {
LHS = Actions.ActOnUnaryOp(getCurScope(), ExtLoc, tok::kw___extension__,
LHS.take());
- return ParseRHSOfBinaryExpression(move(LHS), prec::Comma);
+ return ParseRHSOfBinaryExpression(LHS, prec::Comma);
}
/// \brief Parse an expr that doesn't include (top-level) commas.
@@ -227,7 +227,7 @@ ExprResult Parser::ParseAssignmentExpression(TypeCastState isTypeCast) {
ExprResult LHS = ParseCastExpression(/*isUnaryExpression=*/false,
/*isAddressOfOperand=*/false,
isTypeCast);
- return ParseRHSOfBinaryExpression(move(LHS), prec::Assignment);
+ return ParseRHSOfBinaryExpression(LHS, prec::Assignment);
}
/// \brief Parse an assignment expression where part of an Objective-C message
@@ -265,6 +265,17 @@ ExprResult Parser::ParseConstantExpression(TypeCastState isTypeCast) {
return Actions.ActOnConstantExpression(Res);
}
+bool Parser::isNotExpressionStart() {
+ tok::TokenKind K = Tok.getKind();
+ if (K == tok::l_brace || K == tok::r_brace ||
+ K == tok::kw_for || K == tok::kw_while ||
+ K == tok::kw_if || K == tok::kw_else ||
+ K == tok::kw_goto || K == tok::kw_try)
+ return true;
+ // If this is a decl-specifier, we can't be at the start of an expression.
+ return isKnownToBeDeclarationSpecifier();
+}
+
/// \brief Parse a binary expression that starts with \p LHS and has a
/// precedence of at least \p MinPrec.
ExprResult
@@ -279,12 +290,23 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
// because we are called recursively, or because the token is not a binop),
// then we are done!
if (NextTokPrec < MinPrec)
- return move(LHS);
+ return LHS;
// Consume the operator, saving the operator token for error reporting.
Token OpToken = Tok;
ConsumeToken();
+ // Bail out when encountering a comma followed by a token which can't
+ // possibly be the start of an expression. For instance:
+ // int f() { return 1, }
+ // We can't do this before consuming the comma, because
+ // isNotExpressionStart() looks at the token stream.
+ if (OpToken.is(tok::comma) && isNotExpressionStart()) {
+ PP.EnterToken(Tok);
+ Tok = OpToken;
+ return LHS;
+ }
+
// Special case handling for the ternary operator.
ExprResult TernaryMiddle(true);
if (NextTokPrec == prec::Conditional) {
@@ -458,7 +480,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
isTypeCast);
if (NotCastExpr)
Diag(Tok, diag::err_expected_expression);
- return move(Res);
+ return Res;
}
namespace {
@@ -698,7 +720,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case CastExpr:
// We have parsed the cast-expression and no postfix-expr pieces are
// following.
- return move(Res);
+ return Res;
}
break;
@@ -741,6 +763,53 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// Avoid the unnecessary parse-time lookup in the common case
// where the syntax forbids a type.
const Token &Next = NextToken();
+
+ // If this identifier was reverted from a token ID, and the next token
+ // is a parenthesis, this is likely to be a use of a type trait. Check
+ // those tokens.
+ if (Next.is(tok::l_paren) &&
+ Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo()->hasRevertedTokenIDToIdentifier()) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ // Build up the mapping of revertable type traits, for future use.
+ if (RevertableTypeTraits.empty()) {
+#define RTT_JOIN(X,Y) X##Y
+#define REVERTABLE_TYPE_TRAIT(Name) \
+ RevertableTypeTraits[PP.getIdentifierInfo(#Name)] \
+ = RTT_JOIN(tok::kw_,Name)
+
+ REVERTABLE_TYPE_TRAIT(__is_arithmetic);
+ REVERTABLE_TYPE_TRAIT(__is_convertible);
+ REVERTABLE_TYPE_TRAIT(__is_empty);
+ REVERTABLE_TYPE_TRAIT(__is_floating_point);
+ REVERTABLE_TYPE_TRAIT(__is_function);
+ REVERTABLE_TYPE_TRAIT(__is_fundamental);
+ REVERTABLE_TYPE_TRAIT(__is_integral);
+ REVERTABLE_TYPE_TRAIT(__is_member_function_pointer);
+ REVERTABLE_TYPE_TRAIT(__is_member_pointer);
+ REVERTABLE_TYPE_TRAIT(__is_pod);
+ REVERTABLE_TYPE_TRAIT(__is_pointer);
+ REVERTABLE_TYPE_TRAIT(__is_same);
+ REVERTABLE_TYPE_TRAIT(__is_scalar);
+ REVERTABLE_TYPE_TRAIT(__is_signed);
+ REVERTABLE_TYPE_TRAIT(__is_unsigned);
+ REVERTABLE_TYPE_TRAIT(__is_void);
+#undef REVERTABLE_TYPE_TRAIT
+#undef RTT_JOIN
+ }
+
+ // If we find that this is in fact the name of a type trait,
+ // update the token kind in place and parse again to treat it as
+ // the appropriate kind of type trait.
+ llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind>::iterator Known
+ = RevertableTypeTraits.find(II);
+ if (Known != RevertableTypeTraits.end()) {
+ Tok.setKind(Known->second);
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
+ NotCastExpr, isTypeCast);
+ }
+ }
+
if (Next.is(tok::coloncolon) ||
(!ColonIsSacred && Next.is(tok::colon)) ||
Next.is(tok::less) ||
@@ -758,7 +827,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// '.'.
IdentifierInfo &II = *Tok.getIdentifierInfo();
SourceLocation ILoc = ConsumeToken();
-
+
// Support 'Class.property' and 'super.property' notation.
if (getLangOpts().ObjC1 && Tok.is(tok::period) &&
(Actions.getTypeName(II, ILoc, getCurScope()) ||
@@ -888,7 +957,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
Res = ParseCastExpression(!getLangOpts().CPlusPlus);
if (!Res.isInvalid())
Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
- return move(Res);
+ return Res;
}
case tok::amp: { // unary-expression: '&' cast-expression
// Special treatment because of member pointers
@@ -896,7 +965,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
Res = ParseCastExpression(false, true);
if (!Res.isInvalid())
Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
- return move(Res);
+ return Res;
}
case tok::star: // unary-expression: '*' cast-expression
@@ -910,7 +979,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
Res = ParseCastExpression(false);
if (!Res.isInvalid())
Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
- return move(Res);
+ return Res;
}
case tok::kw___extension__:{//unary-expression:'__extension__' cast-expr [GNU]
@@ -920,7 +989,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
Res = ParseCastExpression(false);
if (!Res.isInvalid())
Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
- return move(Res);
+ return Res;
}
case tok::kw__Alignof: // unary-expression: '_Alignof' '(' type-name ')'
if (!getLangOpts().C11)
@@ -946,7 +1015,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
Tok.getLocation());
Res = Actions.ActOnAddrLabel(AmpAmpLoc, Tok.getLocation(), LD);
ConsumeToken();
- return move(Res);
+ return Res;
}
case tok::kw_const_cast:
case tok::kw_dynamic_cast:
@@ -1132,13 +1201,14 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
if (!Result.isInvalid())
Result = Actions.ActOnNoexceptExpr(KeyLoc, T.getOpenLocation(),
Result.take(), T.getCloseLocation());
- return move(Result);
+ return Result;
}
case tok::kw___is_abstract: // [GNU] unary-type-trait
case tok::kw___is_class:
case tok::kw___is_empty:
case tok::kw___is_enum:
+ case tok::kw___is_interface_class:
case tok::kw___is_literal:
case tok::kw___is_arithmetic:
case tok::kw___is_integral:
@@ -1270,7 +1340,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
switch (Tok.getKind()) {
case tok::code_completion:
if (InMessageExpression)
- return move(LHS);
+ return LHS;
Actions.CodeCompletePostfixExpression(getCurScope(), LHS);
cutOffParsing();
@@ -1290,7 +1360,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// Fall through; this isn't a message send.
default: // Not a postfix-expression suffix.
- return move(LHS);
+ return LHS;
case tok::l_square: { // postfix-expression: p-e '[' expression ']'
// If we have a array postfix expression that starts on a new line and
// Objective-C is enabled, it is highly likely that the user forgot a
@@ -1300,7 +1370,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// expression and recover by pretending there is no suffix.
if (getLangOpts().ObjC1 && Tok.isAtStartOfLine() &&
isSimpleObjCMessageExpression())
- return move(LHS);
+ return LHS;
// Reject array indices starting with a lambda-expression. '[[' is
// reserved for attributes.
@@ -1341,7 +1411,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
BalancedDelimiterTracker PT(*this, tok::l_paren);
if (OpKind == tok::lesslessless) {
- ExprVector ExecConfigExprs(Actions);
+ ExprVector ExecConfigExprs;
CommaLocsTy ExecConfigCommaLocs;
SourceLocation OpenLoc = ConsumeToken();
@@ -1372,7 +1442,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
if (!LHS.isInvalid()) {
ExprResult ECResult = Actions.ActOnCUDAExecConfigExpr(getCurScope(),
OpenLoc,
- move_arg(ExecConfigExprs),
+ ExecConfigExprs,
CloseLoc);
if (ECResult.isInvalid())
LHS = ExprError();
@@ -1384,7 +1454,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
Loc = PT.getOpenLocation();
}
- ExprVector ArgExprs(Actions);
+ ExprVector ArgExprs;
CommaLocsTy CommaLocs;
if (Tok.is(tok::code_completion)) {
@@ -1414,7 +1484,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
ArgExprs.size()-1 == CommaLocs.size())&&
"Unexpected number of commas!");
LHS = Actions.ActOnCallExpr(getCurScope(), LHS.take(), Loc,
- move_arg(ArgExprs), Tok.getLocation(),
+ ArgExprs, Tok.getLocation(),
ExecConfig);
PT.consumeClose();
}
@@ -1583,7 +1653,7 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
// If we get here, the operand to the typeof/sizeof/alignof was an expresion.
isCastExpr = false;
- return move(Operand);
+ return Operand;
}
@@ -1653,7 +1723,8 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
if (OpTok.is(tok::kw_alignof) || OpTok.is(tok::kw__Alignof))
Diag(OpTok, diag::warn_cxx98_compat_alignof);
- EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
bool isCastExpr;
ParsedType CastTy;
@@ -1684,7 +1755,7 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
/*isType=*/false,
Operand.release(),
CastRange);
- return move(Operand);
+ return Operand;
}
/// ParseBuiltinPrimaryExpression
@@ -1796,7 +1867,7 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
Res = ParseExpression();
if (Res.isInvalid()) {
SkipUntil(tok::r_paren);
- return move(Res);
+ return Res;
}
Comps.back().U.E = Res.release();
@@ -1823,7 +1894,7 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
ExprResult Cond(ParseAssignmentExpression());
if (Cond.isInvalid()) {
SkipUntil(tok::r_paren);
- return move(Cond);
+ return Cond;
}
if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
return ExprError();
@@ -1831,7 +1902,7 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
ExprResult Expr1(ParseAssignmentExpression());
if (Expr1.isInvalid()) {
SkipUntil(tok::r_paren);
- return move(Expr1);
+ return Expr1;
}
if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
return ExprError();
@@ -1839,7 +1910,7 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
ExprResult Expr2(ParseAssignmentExpression());
if (Expr2.isInvalid()) {
SkipUntil(tok::r_paren);
- return move(Expr2);
+ return Expr2;
}
if (Tok.isNot(tok::r_paren)) {
Diag(Tok, diag::err_expected_rparen);
@@ -2083,7 +2154,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
DeclaratorInfo, CastTy,
RParenLoc, Result.take());
}
- return move(Result);
+ return Result;
}
Diag(Tok, diag::err_expected_lbrace_in_compound_literal);
@@ -2093,13 +2164,13 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// Parse the expression-list.
InMessageExpressionRAIIObject InMessage(*this, false);
- ExprVector ArgExprs(Actions);
+ ExprVector ArgExprs;
CommaLocsTy CommaLocs;
if (!ParseExpressionList(ArgExprs, CommaLocs)) {
ExprType = SimpleExpr;
Result = Actions.ActOnParenListExpr(OpenLoc, Tok.getLocation(),
- move_arg(ArgExprs));
+ ArgExprs);
}
} else {
InMessageExpressionRAIIObject InMessage(*this, false);
@@ -2120,7 +2191,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
T.consumeClose();
RParenLoc = T.getCloseLocation();
- return move(Result);
+ return Result;
}
/// ParseCompoundLiteralExpression - We have parsed the parenthesized type-name
@@ -2141,7 +2212,7 @@ Parser::ParseCompoundLiteralExpression(ParsedType Ty,
ExprResult Result = ParseInitializer();
if (!Result.isInvalid() && Ty)
return Actions.ActOnCompoundLiteral(LParenLoc, Ty, RParenLoc, Result.take());
- return move(Result);
+ return Result;
}
/// ParseStringLiteralExpression - This handles the various token types that
@@ -2211,8 +2282,8 @@ ExprResult Parser::ParseGenericSelectionExpression() {
}
SourceLocation DefaultLoc;
- TypeVector Types(Actions);
- ExprVector Exprs(Actions);
+ TypeVector Types;
+ ExprVector Exprs;
while (1) {
ParsedType Ty;
if (Tok.is(tok::kw_default)) {
@@ -2263,7 +2334,7 @@ ExprResult Parser::ParseGenericSelectionExpression() {
return Actions.ActOnGenericSelectionExpr(KeyLoc, DefaultLoc,
T.getCloseLocation(),
ControllingExpr.release(),
- move_arg(Types), move_arg(Exprs));
+ Types, Exprs);
}
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
@@ -2415,18 +2486,28 @@ ExprResult Parser::ParseBlockLiteralExpression() {
} else {
// Otherwise, pretend we saw (void).
ParsedAttributes attrs(AttrFactory);
- ParamInfo.AddTypeInfo(DeclaratorChunk::getFunction(true, false, false,
- SourceLocation(),
- 0, 0, 0,
- true, SourceLocation(),
- SourceLocation(),
- SourceLocation(),
- SourceLocation(),
- EST_None,
- SourceLocation(),
- 0, 0, 0, 0,
- CaretLoc, CaretLoc,
- ParamInfo),
+ SourceLocation NoLoc;
+ ParamInfo.AddTypeInfo(DeclaratorChunk::getFunction(/*HasProto=*/true,
+ /*IsAmbiguous=*/false,
+ /*RParenLoc=*/NoLoc,
+ /*ArgInfo=*/0,
+ /*NumArgs=*/0,
+ /*EllipsisLoc=*/NoLoc,
+ /*RParenLoc=*/NoLoc,
+ /*TypeQuals=*/0,
+ /*RefQualifierIsLvalueRef=*/true,
+ /*RefQualifierLoc=*/NoLoc,
+ /*ConstQualifierLoc=*/NoLoc,
+ /*VolatileQualifierLoc=*/NoLoc,
+ /*MutableLoc=*/NoLoc,
+ EST_None,
+ /*ESpecLoc=*/NoLoc,
+ /*Exceptions=*/0,
+ /*ExceptionRanges=*/0,
+ /*NumExceptions=*/0,
+ /*NoexceptExpr=*/0,
+ CaretLoc, CaretLoc,
+ ParamInfo),
attrs, CaretLoc);
MaybeParseGNUAttributes(ParamInfo);
@@ -2450,7 +2531,7 @@ ExprResult Parser::ParseBlockLiteralExpression() {
Result = Actions.ActOnBlockStmtExpr(CaretLoc, Stmt.take(), getCurScope());
else
Actions.ActOnBlockError(CaretLoc, getCurScope());
- return move(Result);
+ return Result;
}
/// ParseObjCBoolLiteral - This handles the objective-c Boolean literals.
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
index afac257..2f615e1 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
@@ -96,6 +96,45 @@ void Parser::CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectType,
/*AtDigraph*/false);
}
+/// \brief Emits an error for a left parentheses after a double colon.
+///
+/// When a '(' is found after a '::', emit an error. Attempt to fix the token
+/// stream by removing the '(', and the matching ')' if it found.
+void Parser::CheckForLParenAfterColonColon() {
+ if (!Tok.is(tok::l_paren))
+ return;
+
+ SourceLocation l_parenLoc = ConsumeParen(), r_parenLoc;
+ Token Tok1 = getCurToken();
+ if (!Tok1.is(tok::identifier) && !Tok1.is(tok::star))
+ return;
+
+ if (Tok1.is(tok::identifier)) {
+ Token Tok2 = GetLookAheadToken(1);
+ if (Tok2.is(tok::r_paren)) {
+ ConsumeToken();
+ PP.EnterToken(Tok1);
+ r_parenLoc = ConsumeParen();
+ }
+ } else if (Tok1.is(tok::star)) {
+ Token Tok2 = GetLookAheadToken(1);
+ if (Tok2.is(tok::identifier)) {
+ Token Tok3 = GetLookAheadToken(2);
+ if (Tok3.is(tok::r_paren)) {
+ ConsumeToken();
+ ConsumeToken();
+ PP.EnterToken(Tok2);
+ PP.EnterToken(Tok1);
+ r_parenLoc = ConsumeParen();
+ }
+ }
+ }
+
+ Diag(l_parenLoc, diag::err_paren_after_colon_colon)
+ << FixItHint::CreateRemoval(l_parenLoc)
+ << FixItHint::CreateRemoval(r_parenLoc);
+}
+
/// \brief Parse global scope or nested-name-specifier if present.
///
/// Parses a C++ global scope specifier ('::') or nested-name-specifier (which
@@ -160,7 +199,9 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// '::' - Global scope qualifier.
if (Actions.ActOnCXXGlobalScopeSpecifier(getCurScope(), ConsumeToken(), SS))
return true;
-
+
+ CheckForLParenAfterColonColon();
+
HasScopeSpecifier = true;
}
@@ -301,8 +342,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
HasScopeSpecifier = true;
- ASTTemplateArgsPtr TemplateArgsPtr(Actions,
- TemplateId->getTemplateArgs(),
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
if (Actions.ActOnCXXNestedNameSpecifier(getCurScope(),
@@ -372,6 +412,8 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
"NextToken() not working properly!");
SourceLocation CCLoc = ConsumeToken();
+ CheckForLParenAfterColonColon();
+
HasScopeSpecifier = true;
if (Actions.ActOnCXXNestedNameSpecifier(getCurScope(), II, IdLoc, CCLoc,
ObjectType, EnteringContext, SS))
@@ -757,10 +799,10 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
Scope::FunctionPrototypeScope |
Scope::DeclScope);
- SourceLocation DeclLoc, DeclEndLoc;
+ SourceLocation DeclEndLoc;
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
- DeclLoc = T.getOpenLocation();
+ SourceLocation LParenLoc = T.getOpenLocation();
// Parse parameter-declaration-clause.
ParsedAttributes Attr(AttrFactory);
@@ -771,7 +813,8 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
ParseParameterDeclarationClause(D, Attr, ParamInfo, EllipsisLoc);
T.consumeClose();
- DeclEndLoc = T.getCloseLocation();
+ SourceLocation RParenLoc = T.getCloseLocation();
+ DeclEndLoc = RParenLoc;
// Parse 'mutable'[opt].
SourceLocation MutableLoc;
@@ -797,9 +840,12 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
// Parse attribute-specifier[opt].
MaybeParseCXX0XAttributes(Attr, &DeclEndLoc);
+ SourceLocation FunLocalRangeEnd = DeclEndLoc;
+
// Parse trailing-return-type[opt].
TypeResult TrailingReturnType;
if (Tok.is(tok::arrow)) {
+ FunLocalRangeEnd = Tok.getLocation();
SourceRange Range;
TrailingReturnType = ParseTrailingReturnType(Range);
if (Range.getEnd().isValid())
@@ -808,15 +854,17 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
PrototypeScope.Exit();
+ SourceLocation NoLoc;
D.AddTypeInfo(DeclaratorChunk::getFunction(/*hasProto=*/true,
- /*isVariadic=*/EllipsisLoc.isValid(),
- /*isAmbiguous=*/false, EllipsisLoc,
+ /*isAmbiguous=*/false,
+ LParenLoc,
ParamInfo.data(), ParamInfo.size(),
+ EllipsisLoc, RParenLoc,
DS.getTypeQualifiers(),
/*RefQualifierIsLValueRef=*/true,
- /*RefQualifierLoc=*/SourceLocation(),
- /*ConstQualifierLoc=*/SourceLocation(),
- /*VolatileQualifierLoc=*/SourceLocation(),
+ /*RefQualifierLoc=*/NoLoc,
+ /*ConstQualifierLoc=*/NoLoc,
+ /*VolatileQualifierLoc=*/NoLoc,
MutableLoc,
ESpecType, ESpecRange.getBegin(),
DynamicExceptions.data(),
@@ -824,7 +872,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
DynamicExceptions.size(),
NoexceptExpr.isUsable() ?
NoexceptExpr.get() : 0,
- DeclLoc, DeclEndLoc, D,
+ LParenLoc, FunLocalRangeEnd, D,
TrailingReturnType),
Attr, DeclEndLoc);
} else if (Tok.is(tok::kw_mutable) || Tok.is(tok::arrow)) {
@@ -853,25 +901,28 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
}
ParsedAttributes Attr(AttrFactory);
+ SourceLocation NoLoc;
D.AddTypeInfo(DeclaratorChunk::getFunction(/*hasProto=*/true,
- /*isVariadic=*/false,
- /*isAmbiguous=*/false,
- /*EllipsisLoc=*/SourceLocation(),
- /*Params=*/0, /*NumParams=*/0,
- /*TypeQuals=*/0,
- /*RefQualifierIsLValueRef=*/true,
- /*RefQualifierLoc=*/SourceLocation(),
- /*ConstQualifierLoc=*/SourceLocation(),
- /*VolatileQualifierLoc=*/SourceLocation(),
- MutableLoc,
- EST_None,
- /*ESpecLoc=*/SourceLocation(),
- /*Exceptions=*/0,
- /*ExceptionRanges=*/0,
- /*NumExceptions=*/0,
- /*NoexceptExpr=*/0,
- DeclLoc, DeclEndLoc, D,
- TrailingReturnType),
+ /*isAmbiguous=*/false,
+ /*LParenLoc=*/NoLoc,
+ /*Params=*/0,
+ /*NumParams=*/0,
+ /*EllipsisLoc=*/NoLoc,
+ /*RParenLoc=*/NoLoc,
+ /*TypeQuals=*/0,
+ /*RefQualifierIsLValueRef=*/true,
+ /*RefQualifierLoc=*/NoLoc,
+ /*ConstQualifierLoc=*/NoLoc,
+ /*VolatileQualifierLoc=*/NoLoc,
+ MutableLoc,
+ EST_None,
+ /*ESpecLoc=*/NoLoc,
+ /*Exceptions=*/0,
+ /*ExceptionRanges=*/0,
+ /*NumExceptions=*/0,
+ /*NoexceptExpr=*/0,
+ DeclLoc, DeclEndLoc, D,
+ TrailingReturnType),
Attr, DeclEndLoc);
}
@@ -926,10 +977,11 @@ ExprResult Parser::ParseCXXCasts() {
// Check for "<::" which is parsed as "[:". If found, fix token stream,
// diagnose error, suggest fix, and recover parsing.
- Token Next = NextToken();
- if (Tok.is(tok::l_square) && Tok.getLength() == 2 && Next.is(tok::colon) &&
- areTokensAdjacent(Tok, Next))
- FixDigraph(*this, PP, Tok, Next, Kind, /*AtDigraph*/true);
+ if (Tok.is(tok::l_square) && Tok.getLength() == 2) {
+ Token Next = NextToken();
+ if (Next.is(tok::colon) && areTokensAdjacent(Tok, Next))
+ FixDigraph(*this, PP, Tok, Next, Kind, /*AtDigraph*/true);
+ }
if (ExpectAndConsume(tok::less, diag::err_expected_less_after, CastName))
return ExprError();
@@ -965,7 +1017,7 @@ ExprResult Parser::ParseCXXCasts() {
T.getOpenLocation(), Result.take(),
T.getCloseLocation());
- return move(Result);
+ return Result;
}
/// ParseCXXTypeid - This handles the C++ typeid expression.
@@ -988,6 +1040,22 @@ ExprResult Parser::ParseCXXTypeid() {
ExprResult Result;
+ // C++0x [expr.typeid]p3:
+ // When typeid is applied to an expression other than an lvalue of a
+ // polymorphic class type [...] The expression is an unevaluated
+ // operand (Clause 5).
+ //
+ // Note that we can't tell whether the expression is an lvalue of a
+ // polymorphic class type until after we've parsed the expression; we
+ // speculatively assume the subexpression is unevaluated, and fix it up
+ // later.
+ //
+ // We enter the unevaluated context before trying to determine whether we
+ // have a type-id, because the tentative parse logic will try to resolve
+ // names, and must treat them as unevaluated.
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
+
if (isTypeIdInParens()) {
TypeResult Ty = ParseTypeName();
@@ -1000,16 +1068,6 @@ ExprResult Parser::ParseCXXTypeid() {
Result = Actions.ActOnCXXTypeid(OpLoc, LParenLoc, /*isType=*/true,
Ty.get().getAsOpaquePtr(), RParenLoc);
} else {
- // C++0x [expr.typeid]p3:
- // When typeid is applied to an expression other than an lvalue of a
- // polymorphic class type [...] The expression is an unevaluated
- // operand (Clause 5).
- //
- // Note that we can't tell whether the expression is an lvalue of a
- // polymorphic class type until after we've parsed the expression; we
- // speculatively assume the subexpression is unevaluated, and fix it up
- // later.
- EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
Result = ParseExpression();
// Match the ')'.
@@ -1026,7 +1084,7 @@ ExprResult Parser::ParseCXXTypeid() {
}
}
- return move(Result);
+ return Result;
}
/// ParseCXXUuidof - This handles the Microsoft C++ __uuidof expression.
@@ -1074,7 +1132,7 @@ ExprResult Parser::ParseCXXUuidof() {
}
}
- return move(Result);
+ return Result;
}
/// \brief Parse a C++ pseudo-destructor expression after the base,
@@ -1196,7 +1254,7 @@ ExprResult Parser::ParseThrowExpression() {
default:
ExprResult Expr(ParseAssignmentExpression());
- if (Expr.isInvalid()) return move(Expr);
+ if (Expr.isInvalid()) return Expr;
return Actions.ActOnCXXThrow(getCurScope(), ThrowLoc, Expr.take());
}
}
@@ -1245,7 +1303,7 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
- ExprVector Exprs(Actions);
+ ExprVector Exprs;
CommaLocsTy CommaLocs;
if (Tok.isNot(tok::r_paren)) {
@@ -1265,7 +1323,7 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
assert((Exprs.size() == 0 || Exprs.size()-1 == CommaLocs.size())&&
"Unexpected number of commas!");
return Actions.ActOnCXXTypeConstructExpr(TypeRep, T.getOpenLocation(),
- move_arg(Exprs),
+ Exprs,
T.getCloseLocation());
}
}
@@ -1280,11 +1338,11 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
/// [GNU] type-specifier-seq declarator simple-asm-expr[opt] attributes[opt]
/// '=' assignment-expression
///
-/// \param ExprResult if the condition was parsed as an expression, the
-/// parsed expression.
+/// \param ExprOut if the condition was parsed as an expression, the parsed
+/// expression.
///
-/// \param DeclResult if the condition was parsed as a declaration, the
-/// parsed declaration.
+/// \param DeclOut if the condition was parsed as a declaration, the parsed
+/// declaration.
///
/// \param Loc The location of the start of the statement that requires this
/// condition, e.g., the "for" in a for loop.
@@ -1714,8 +1772,7 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
}
// Bundle the template arguments together.
- ASTTemplateArgsPtr TemplateArgsPtr(Actions, TemplateArgs.data(),
- TemplateArgs.size());
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateArgs);
// Constructor and destructor names.
TypeResult Type
@@ -1762,7 +1819,7 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
/// ptr-operator conversion-declarator[opt]
/// \endcode
///
-/// \param The nested-name-specifier that preceded this unqualified-id. If
+/// \param SS The nested-name-specifier that preceded this unqualified-id. If
/// non-empty, then we are parsing the unqualified-id of a qualified-id.
///
/// \param EnteringContext whether we are entering the scope of the
@@ -1867,8 +1924,9 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
// Parse a literal-operator-id.
//
- // literal-operator-id: [C++0x 13.5.8]
- // operator "" identifier
+ // literal-operator-id: C++11 [over.literal]
+ // operator string-literal identifier
+ // operator user-defined-string-literal
if (getLangOpts().CPlusPlus0x && isTokenStringLiteral()) {
Diag(Tok.getLocation(), diag::warn_cxx98_compat_literal_operator);
@@ -1882,6 +1940,9 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
llvm::SmallVector<SourceLocation, 4> TokLocs;
while (isTokenStringLiteral()) {
if (!Tok.is(tok::string_literal) && !DiagId) {
+ // C++11 [over.literal]p1:
+ // The string-literal or user-defined-string-literal in a
+ // literal-operator-id shall have no encoding-prefix [...].
DiagLoc = Tok.getLocation();
DiagId = diag::err_literal_operator_string_prefix;
}
@@ -1903,9 +1964,6 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
Lexer::AdvanceToTokenCharacter(TokLocs[Literal.getUDSuffixToken()],
Literal.getUDSuffixOffset(),
PP.getSourceManager(), getLangOpts());
- // This form is not permitted by the standard (yet).
- DiagLoc = SuffixLoc;
- DiagId = diag::err_literal_operator_missing_space;
} else if (Tok.is(tok::identifier)) {
II = Tok.getIdentifierInfo();
SuffixLoc = ConsumeToken();
@@ -1917,6 +1975,10 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
// The string literal must be empty.
if (!Literal.GetString().empty() || Literal.Pascal) {
+ // C++11 [over.literal]p1:
+ // The string-literal or user-defined-string-literal in a
+ // literal-operator-id shall [...] contain no characters
+ // other than the implicit terminating '\0'.
DiagLoc = TokLocs.front();
DiagId = diag::err_literal_operator_string_not_empty;
}
@@ -1981,7 +2043,7 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
///
/// \endcode
///
-/// \param The nested-name-specifier that preceded this unqualified-id. If
+/// \param SS The nested-name-specifier that preceded this unqualified-id. If
/// non-empty, then we are parsing the unqualified-id of a qualified-id.
///
/// \param EnteringContext whether we are entering the scope of the
@@ -2209,7 +2271,7 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
// A '(' now can be a new-placement or the '(' wrapping the type-id in the
// second form of new-expression. It can't be a new-type-id.
- ExprVector PlacementArgs(Actions);
+ ExprVector PlacementArgs;
SourceLocation PlacementLParen, PlacementRParen;
SourceRange TypeIdParens;
@@ -2279,7 +2341,7 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
if (Tok.is(tok::l_paren)) {
SourceLocation ConstructorLParen, ConstructorRParen;
- ExprVector ConstructorArgs(Actions);
+ ExprVector ConstructorArgs;
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
ConstructorLParen = T.getOpenLocation();
@@ -2298,7 +2360,7 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
}
Initializer = Actions.ActOnParenListExpr(ConstructorLParen,
ConstructorRParen,
- move_arg(ConstructorArgs));
+ ConstructorArgs);
} else if (Tok.is(tok::l_brace) && getLangOpts().CPlusPlus0x) {
Diag(Tok.getLocation(),
diag::warn_cxx98_compat_generalized_initializer_lists);
@@ -2308,7 +2370,7 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
return Initializer;
return Actions.ActOnCXXNew(Start, UseGlobal, PlacementLParen,
- move_arg(PlacementArgs), PlacementRParen,
+ PlacementArgs, PlacementRParen,
TypeIdParens, DeclaratorInfo, Initializer.take());
}
@@ -2422,7 +2484,7 @@ Parser::ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start) {
ExprResult Operand(ParseCastExpression(false));
if (Operand.isInvalid())
- return move(Operand);
+ return Operand;
return Actions.ActOnCXXDelete(Start, UseGlobal, ArrayDelete, Operand.take());
}
@@ -2453,6 +2515,7 @@ static UnaryTypeTrait UnaryTypeTraitFromTokKind(tok::TokenKind kind) {
case tok::kw___is_function: return UTT_IsFunction;
case tok::kw___is_fundamental: return UTT_IsFundamental;
case tok::kw___is_integral: return UTT_IsIntegral;
+ case tok::kw___is_interface_class: return UTT_IsInterfaceClass;
case tok::kw___is_lvalue_reference: return UTT_IsLvalueReference;
case tok::kw___is_member_function_pointer: return UTT_IsMemberFunctionPointer;
case tok::kw___is_member_object_pointer: return UTT_IsMemberObjectPointer;
@@ -2804,7 +2867,7 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
Result = Actions.ActOnCastExpr(getCurScope(), Tracker.getOpenLocation(),
DeclaratorInfo, CastTy,
Tracker.getCloseLocation(), Result.take());
- return move(Result);
+ return Result;
}
// Not a compound literal, and not followed by a cast-expression.
@@ -2823,5 +2886,5 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
}
Tracker.consumeClose();
- return move(Result);
+ return Result;
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
index 1c349fd..e47fd9b 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
@@ -313,7 +313,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
Idx = ParseAssignmentExpression();
if (Idx.isInvalid()) {
SkipUntil(tok::r_square);
- return move(Idx);
+ return Idx;
}
}
@@ -341,7 +341,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
ExprResult RHS(ParseConstantExpression());
if (RHS.isInvalid()) {
SkipUntil(tok::r_square);
- return move(RHS);
+ return RHS;
}
Desig.AddDesignator(Designator::getArrayRange(Idx.release(),
RHS.release(),
@@ -405,15 +405,14 @@ ExprResult Parser::ParseBraceInitializer() {
/// InitExprs - This is the actual list of expressions contained in the
/// initializer.
- ExprVector InitExprs(Actions);
+ ExprVector InitExprs;
if (Tok.is(tok::r_brace)) {
// Empty initializers are a C++ feature and a GNU extension to C.
if (!getLangOpts().CPlusPlus)
Diag(LBraceLoc, diag::ext_gnu_empty_initializer);
// Match the '}'.
- return Actions.ActOnInitList(LBraceLoc, MultiExprArg(Actions),
- ConsumeBrace());
+ return Actions.ActOnInitList(LBraceLoc, MultiExprArg(), ConsumeBrace());
}
bool InitExprsOk = true;
@@ -476,7 +475,7 @@ ExprResult Parser::ParseBraceInitializer() {
bool closed = !T.consumeClose();
if (InitExprsOk && closed)
- return Actions.ActOnInitList(LBraceLoc, move_arg(InitExprs),
+ return Actions.ActOnInitList(LBraceLoc, InitExprs,
T.getCloseLocation());
return ExprError(); // an error occurred.
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
index db35a38..d321baf 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
@@ -18,6 +18,7 @@
#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
using namespace clang;
@@ -1031,7 +1032,6 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
Scope::FunctionPrototypeScope|Scope::DeclScope);
AttributePool allParamAttrs(AttrFactory);
-
while (1) {
ParsedAttributes paramAttrs(AttrFactory);
Sema::ObjCArgInfo ArgInfo;
@@ -1102,6 +1102,14 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
SelIdent = ParseObjCSelectorPiece(selLoc);
if (!SelIdent && Tok.isNot(tok::colon))
break;
+ if (!SelIdent) {
+ SourceLocation ColonLoc = Tok.getLocation();
+ if (PP.getLocForEndOfToken(ArgInfo.NameLoc) == ColonLoc) {
+ Diag(ArgInfo.NameLoc, diag::warn_missing_selector_name) << ArgInfo.Name;
+ Diag(ArgInfo.NameLoc, diag::note_missing_selector_name) << ArgInfo.Name;
+ Diag(ColonLoc, diag::note_force_empty_selector_name) << ArgInfo.Name;
+ }
+ }
// We have a selector or a colon, continue parsing.
}
@@ -1806,7 +1814,7 @@ StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
Diag(Tok, diag::err_expected_lbrace);
return StmtError();
}
- StmtVector CatchStmts(Actions);
+ StmtVector CatchStmts;
StmtResult FinallyStmt;
ParseScope TryScope(this, Scope::DeclScope);
StmtResult TryBody(ParseCompoundStatementBody());
@@ -1894,7 +1902,7 @@ StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
}
return Actions.ActOnObjCAtTryStmt(atLoc, TryBody.take(),
- move_arg(CatchStmts),
+ CatchStmts,
FinallyStmt.take());
}
@@ -2061,13 +2069,13 @@ ExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
ExprResult Lit(Actions.ActOnNumericConstant(Tok));
if (Lit.isInvalid()) {
- return move(Lit);
+ return Lit;
}
ConsumeToken(); // Consume the literal token.
Lit = Actions.ActOnUnaryOp(getCurScope(), OpLoc, Kind, Lit.take());
if (Lit.isInvalid())
- return move(Lit);
+ return Lit;
return ParsePostfixExpressionSuffix(
Actions.BuildObjCNumericLiteral(AtLoc, Lit.take()));
@@ -2134,7 +2142,7 @@ ExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
}
}
-/// \brirg Parse the receiver of an Objective-C++ message send.
+/// \brief Parse the receiver of an Objective-C++ message send.
///
/// This routine parses the receiver of a message send in
/// Objective-C++ either as a type or as an expression. Note that this
@@ -2346,7 +2354,7 @@ ExprResult Parser::ParseObjCMessageExpression() {
ExprResult Res(ParseExpression());
if (Res.isInvalid()) {
SkipUntil(tok::r_square);
- return move(Res);
+ return Res;
}
return ParseObjCMessageExpressionBody(LBracLoc, SourceLocation(),
@@ -2418,7 +2426,7 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
SmallVector<IdentifierInfo *, 12> KeyIdents;
SmallVector<SourceLocation, 12> KeyLocs;
- ExprVector KeyExprs(Actions);
+ ExprVector KeyExprs;
if (Tok.is(tok::colon)) {
while (1) {
@@ -2465,7 +2473,7 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
// stop at the ']' when it skips to the ';'. We want it to skip beyond
// the enclosing expression.
SkipUntil(tok::r_square);
- return move(Res);
+ return Res;
}
// We have a valid expression.
@@ -2512,7 +2520,7 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
// stop at the ']' when it skips to the ';'. We want it to skip beyond
// the enclosing expression.
SkipUntil(tok::r_square);
- return move(Res);
+ return Res;
}
// We have a valid expression.
@@ -2551,32 +2559,23 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
if (SuperLoc.isValid())
return Actions.ActOnSuperMessage(getCurScope(), SuperLoc, Sel,
- LBracLoc, KeyLocs, RBracLoc,
- MultiExprArg(Actions,
- KeyExprs.take(),
- KeyExprs.size()));
+ LBracLoc, KeyLocs, RBracLoc, KeyExprs);
else if (ReceiverType)
return Actions.ActOnClassMessage(getCurScope(), ReceiverType, Sel,
- LBracLoc, KeyLocs, RBracLoc,
- MultiExprArg(Actions,
- KeyExprs.take(),
- KeyExprs.size()));
+ LBracLoc, KeyLocs, RBracLoc, KeyExprs);
return Actions.ActOnInstanceMessage(getCurScope(), ReceiverExpr, Sel,
- LBracLoc, KeyLocs, RBracLoc,
- MultiExprArg(Actions,
- KeyExprs.take(),
- KeyExprs.size()));
+ LBracLoc, KeyLocs, RBracLoc, KeyExprs);
}
ExprResult Parser::ParseObjCStringLiteral(SourceLocation AtLoc) {
ExprResult Res(ParseStringLiteralExpression());
- if (Res.isInvalid()) return move(Res);
+ if (Res.isInvalid()) return Res;
// @"foo" @"bar" is a valid concatenated string. Eat any subsequent string
// expressions. At this point, we know that the only valid thing that starts
// with '@' is an @"".
SmallVector<SourceLocation, 4> AtLocs;
- ExprVector AtStrings(Actions);
+ ExprVector AtStrings;
AtLocs.push_back(AtLoc);
AtStrings.push_back(Res.release());
@@ -2589,12 +2588,12 @@ ExprResult Parser::ParseObjCStringLiteral(SourceLocation AtLoc) {
ExprResult Lit(ParseStringLiteralExpression());
if (Lit.isInvalid())
- return move(Lit);
+ return Lit;
AtStrings.push_back(Lit.release());
}
- return Owned(Actions.ParseObjCStringLiteral(&AtLocs[0], AtStrings.take(),
+ return Owned(Actions.ParseObjCStringLiteral(&AtLocs[0], AtStrings.data(),
AtStrings.size()));
}
@@ -2615,7 +2614,7 @@ ExprResult Parser::ParseObjCBooleanLiteral(SourceLocation AtLoc,
ExprResult Parser::ParseObjCCharacterLiteral(SourceLocation AtLoc) {
ExprResult Lit(Actions.ActOnCharacterConstant(Tok));
if (Lit.isInvalid()) {
- return move(Lit);
+ return Lit;
}
ConsumeToken(); // Consume the literal token.
return Owned(Actions.BuildObjCNumericLiteral(AtLoc, Lit.take()));
@@ -2629,7 +2628,7 @@ ExprResult Parser::ParseObjCCharacterLiteral(SourceLocation AtLoc) {
ExprResult Parser::ParseObjCNumericLiteral(SourceLocation AtLoc) {
ExprResult Lit(Actions.ActOnNumericConstant(Tok));
if (Lit.isInvalid()) {
- return move(Lit);
+ return Lit;
}
ConsumeToken(); // Consume the literal token.
return Owned(Actions.BuildObjCNumericLiteral(AtLoc, Lit.take()));
@@ -2661,7 +2660,7 @@ Parser::ParseObjCBoxedExpr(SourceLocation AtLoc) {
}
ExprResult Parser::ParseObjCArrayLiteral(SourceLocation AtLoc) {
- ExprVector ElementExprs(Actions); // array elements.
+ ExprVector ElementExprs; // array elements.
ConsumeBracket(); // consume the l_square.
while (Tok.isNot(tok::r_square)) {
@@ -2672,7 +2671,7 @@ ExprResult Parser::ParseObjCArrayLiteral(SourceLocation AtLoc) {
// stop at the ']' when it skips to the ';'. We want it to skip beyond
// the enclosing expression.
SkipUntil(tok::r_square);
- return move(Res);
+ return Res;
}
// Parse the ellipsis that indicates a pack expansion.
@@ -2689,7 +2688,7 @@ ExprResult Parser::ParseObjCArrayLiteral(SourceLocation AtLoc) {
return ExprError(Diag(Tok, diag::err_expected_rsquare_or_comma));
}
SourceLocation EndLoc = ConsumeBracket(); // location of ']'
- MultiExprArg Args(Actions, ElementExprs.take(), ElementExprs.size());
+ MultiExprArg Args(ElementExprs);
return Owned(Actions.BuildObjCArrayLiteral(SourceRange(AtLoc, EndLoc), Args));
}
@@ -2707,7 +2706,7 @@ ExprResult Parser::ParseObjCDictionaryLiteral(SourceLocation AtLoc) {
// stop at the '}' when it skips to the ';'. We want it to skip beyond
// the enclosing expression.
SkipUntil(tok::r_brace);
- return move(KeyExpr);
+ return KeyExpr;
}
}
@@ -2723,7 +2722,7 @@ ExprResult Parser::ParseObjCDictionaryLiteral(SourceLocation AtLoc) {
// stop at the '}' when it skips to the ';'. We want it to skip beyond
// the enclosing expression.
SkipUntil(tok::r_brace);
- return move(ValueExpr);
+ return ValueExpr;
}
// Parse the ellipsis that designates this as a pack expansion.
@@ -2752,7 +2751,7 @@ ExprResult Parser::ParseObjCDictionaryLiteral(SourceLocation AtLoc) {
}
/// objc-encode-expression:
-/// @encode ( type-name )
+/// \@encode ( type-name )
ExprResult
Parser::ParseObjCEncodeExpression(SourceLocation AtLoc) {
assert(Tok.isObjCAtKeyword(tok::objc_encode) && "Not an @encode expression!");
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
index eb13e0d..a7605f0 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
@@ -40,7 +40,7 @@ void Parser::HandlePragmaVisibility() {
struct PragmaPackInfo {
Sema::PragmaPackKind Kind;
IdentifierInfo *Name;
- Expr *Alignment;
+ Token Alignment;
SourceLocation LParenLoc;
SourceLocation RParenLoc;
};
@@ -50,10 +50,107 @@ void Parser::HandlePragmaPack() {
PragmaPackInfo *Info =
static_cast<PragmaPackInfo *>(Tok.getAnnotationValue());
SourceLocation PragmaLoc = ConsumeToken();
- Actions.ActOnPragmaPack(Info->Kind, Info->Name, Info->Alignment, PragmaLoc,
+ ExprResult Alignment;
+ if (Info->Alignment.is(tok::numeric_constant)) {
+ Alignment = Actions.ActOnNumericConstant(Info->Alignment);
+ if (Alignment.isInvalid())
+ return;
+ }
+ Actions.ActOnPragmaPack(Info->Kind, Info->Name, Alignment.get(), PragmaLoc,
Info->LParenLoc, Info->RParenLoc);
}
+void Parser::HandlePragmaMSStruct() {
+ assert(Tok.is(tok::annot_pragma_msstruct));
+ Sema::PragmaMSStructKind Kind =
+ static_cast<Sema::PragmaMSStructKind>(
+ reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
+ Actions.ActOnPragmaMSStruct(Kind);
+ ConsumeToken(); // The annotation token.
+}
+
+void Parser::HandlePragmaAlign() {
+ assert(Tok.is(tok::annot_pragma_align));
+ Sema::PragmaOptionsAlignKind Kind =
+ static_cast<Sema::PragmaOptionsAlignKind>(
+ reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
+ SourceLocation PragmaLoc = ConsumeToken();
+ Actions.ActOnPragmaOptionsAlign(Kind, PragmaLoc);
+}
+
+void Parser::HandlePragmaWeak() {
+ assert(Tok.is(tok::annot_pragma_weak));
+ SourceLocation PragmaLoc = ConsumeToken();
+ Actions.ActOnPragmaWeakID(Tok.getIdentifierInfo(), PragmaLoc,
+ Tok.getLocation());
+ ConsumeToken(); // The weak name.
+}
+
+void Parser::HandlePragmaWeakAlias() {
+ assert(Tok.is(tok::annot_pragma_weakalias));
+ SourceLocation PragmaLoc = ConsumeToken();
+ IdentifierInfo *WeakName = Tok.getIdentifierInfo();
+ SourceLocation WeakNameLoc = Tok.getLocation();
+ ConsumeToken();
+ IdentifierInfo *AliasName = Tok.getIdentifierInfo();
+ SourceLocation AliasNameLoc = Tok.getLocation();
+ ConsumeToken();
+ Actions.ActOnPragmaWeakAlias(WeakName, AliasName, PragmaLoc,
+ WeakNameLoc, AliasNameLoc);
+
+}
+
+void Parser::HandlePragmaRedefineExtname() {
+ assert(Tok.is(tok::annot_pragma_redefine_extname));
+ SourceLocation RedefLoc = ConsumeToken();
+ IdentifierInfo *RedefName = Tok.getIdentifierInfo();
+ SourceLocation RedefNameLoc = Tok.getLocation();
+ ConsumeToken();
+ IdentifierInfo *AliasName = Tok.getIdentifierInfo();
+ SourceLocation AliasNameLoc = Tok.getLocation();
+ ConsumeToken();
+ Actions.ActOnPragmaRedefineExtname(RedefName, AliasName, RedefLoc,
+ RedefNameLoc, AliasNameLoc);
+}
+
+void Parser::HandlePragmaFPContract() {
+ assert(Tok.is(tok::annot_pragma_fp_contract));
+ tok::OnOffSwitch OOS =
+ static_cast<tok::OnOffSwitch>(
+ reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
+ Actions.ActOnPragmaFPContract(OOS);
+ ConsumeToken(); // The annotation token.
+}
+
+namespace {
+ typedef llvm::PointerIntPair<IdentifierInfo *, 1, bool> OpenCLExtData;
+}
+
+void Parser::HandlePragmaOpenCLExtension() {
+ assert(Tok.is(tok::annot_pragma_opencl_extension));
+ OpenCLExtData data =
+ OpenCLExtData::getFromOpaqueValue(Tok.getAnnotationValue());
+ unsigned state = data.getInt();
+ IdentifierInfo *ename = data.getPointer();
+ SourceLocation NameLoc = Tok.getLocation();
+ ConsumeToken(); // The annotation token.
+
+ OpenCLOptions &f = Actions.getOpenCLOptions();
+ // OpenCL 1.1 9.1: "The all variant sets the behavior for all extensions,
+ // overriding all previously issued extension directives, but only if the
+ // behavior is set to disable."
+ if (state == 0 && ename->isStr("all")) {
+#define OPENCLEXT(nm) f.nm = 0;
+#include "clang/Basic/OpenCLExtensions.def"
+ }
+#define OPENCLEXT(nm) else if (ename->isStr(#nm)) { f.nm = state; }
+#include "clang/Basic/OpenCLExtensions.def"
+ else {
+ PP.Diag(NameLoc, diag::warn_pragma_unknown_extension) << ename;
+ return;
+ }
+}
+
// #pragma GCC visibility comes in two variants:
// 'push' '(' [visibility] ')'
// 'pop'
@@ -130,13 +227,12 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP,
Sema::PragmaPackKind Kind = Sema::PPK_Default;
IdentifierInfo *Name = 0;
- ExprResult Alignment;
+ Token Alignment;
+ Alignment.startToken();
SourceLocation LParenLoc = Tok.getLocation();
PP.Lex(Tok);
if (Tok.is(tok::numeric_constant)) {
- Alignment = Actions.ActOnNumericConstant(Tok);
- if (Alignment.isInvalid())
- return;
+ Alignment = Tok;
PP.Lex(Tok);
@@ -165,9 +261,7 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP,
PP.Lex(Tok);
if (Tok.is(tok::numeric_constant)) {
- Alignment = Actions.ActOnNumericConstant(Tok);
- if (Alignment.isInvalid())
- return;
+ Alignment = Tok;
PP.Lex(Tok);
} else if (Tok.is(tok::identifier)) {
@@ -182,9 +276,7 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP,
return;
}
- Alignment = Actions.ActOnNumericConstant(Tok);
- if (Alignment.isInvalid())
- return;
+ Alignment = Tok;
PP.Lex(Tok);
}
@@ -219,7 +311,7 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP,
new (Info) PragmaPackInfo();
Info->Kind = Kind;
Info->Name = Name;
- Info->Alignment = Alignment.release();
+ Info->Alignment = Alignment;
Info->LParenLoc = LParenLoc;
Info->RParenLoc = RParenLoc;
@@ -265,12 +357,23 @@ void PragmaMSStructHandler::HandlePragma(Preprocessor &PP,
<< "ms_struct";
return;
}
- Actions.ActOnPragmaMSStruct(Kind);
+
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 1, llvm::alignOf<Token>());
+ new (Toks) Token();
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_msstruct);
+ Toks[0].setLocation(MSStructTok.getLocation());
+ Toks[0].setAnnotationValue(reinterpret_cast<void*>(
+ static_cast<uintptr_t>(Kind)));
+ PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true,
+ /*OwnsTokens=*/false);
}
// #pragma 'align' '=' {'native','natural','mac68k','power','reset'}
// #pragma 'options 'align' '=' {'native','natural','mac68k','power','reset'}
-static void ParseAlignPragma(Sema &Actions, Preprocessor &PP, Token &FirstTok,
+static void ParseAlignPragma(Preprocessor &PP, Token &FirstTok,
bool IsOptions) {
Token Tok;
@@ -317,7 +420,6 @@ static void ParseAlignPragma(Sema &Actions, Preprocessor &PP, Token &FirstTok,
return;
}
- SourceLocation KindLoc = Tok.getLocation();
PP.Lex(Tok);
if (Tok.isNot(tok::eod)) {
PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
@@ -325,19 +427,29 @@ static void ParseAlignPragma(Sema &Actions, Preprocessor &PP, Token &FirstTok,
return;
}
- Actions.ActOnPragmaOptionsAlign(Kind, FirstTok.getLocation(), KindLoc);
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 1, llvm::alignOf<Token>());
+ new (Toks) Token();
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_align);
+ Toks[0].setLocation(FirstTok.getLocation());
+ Toks[0].setAnnotationValue(reinterpret_cast<void*>(
+ static_cast<uintptr_t>(Kind)));
+ PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true,
+ /*OwnsTokens=*/false);
}
void PragmaAlignHandler::HandlePragma(Preprocessor &PP,
PragmaIntroducerKind Introducer,
Token &AlignTok) {
- ParseAlignPragma(Actions, PP, AlignTok, /*IsOptions=*/false);
+ ParseAlignPragma(PP, AlignTok, /*IsOptions=*/false);
}
void PragmaOptionsHandler::HandlePragma(Preprocessor &PP,
PragmaIntroducerKind Introducer,
Token &OptionsTok) {
- ParseAlignPragma(Actions, PP, OptionsTok, /*IsOptions=*/true);
+ ParseAlignPragma(PP, OptionsTok, /*IsOptions=*/true);
}
// #pragma unused(identifier)
@@ -426,7 +538,6 @@ void PragmaUnusedHandler::HandlePragma(Preprocessor &PP,
void PragmaWeakHandler::HandlePragma(Preprocessor &PP,
PragmaIntroducerKind Introducer,
Token &WeakTok) {
- // FIXME: Should we be expanding macros here? My guess is no.
SourceLocation WeakLoc = WeakTok.getLocation();
Token Tok;
@@ -436,19 +547,20 @@ void PragmaWeakHandler::HandlePragma(Preprocessor &PP,
return;
}
- IdentifierInfo *WeakName = Tok.getIdentifierInfo(), *AliasName = 0;
- SourceLocation WeakNameLoc = Tok.getLocation(), AliasNameLoc;
+ Token WeakName = Tok;
+ bool HasAlias = false;
+ Token AliasName;
PP.Lex(Tok);
if (Tok.is(tok::equal)) {
+ HasAlias = true;
PP.Lex(Tok);
if (Tok.isNot(tok::identifier)) {
PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
<< "weak";
return;
}
- AliasName = Tok.getIdentifierInfo();
- AliasNameLoc = Tok.getLocation();
+ AliasName = Tok;
PP.Lex(Tok);
}
@@ -457,11 +569,29 @@ void PragmaWeakHandler::HandlePragma(Preprocessor &PP,
return;
}
- if (AliasName) {
- Actions.ActOnPragmaWeakAlias(WeakName, AliasName, WeakLoc, WeakNameLoc,
- AliasNameLoc);
+ if (HasAlias) {
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 3, llvm::alignOf<Token>());
+ Token &pragmaUnusedTok = Toks[0];
+ pragmaUnusedTok.startToken();
+ pragmaUnusedTok.setKind(tok::annot_pragma_weakalias);
+ pragmaUnusedTok.setLocation(WeakLoc);
+ Toks[1] = WeakName;
+ Toks[2] = AliasName;
+ PP.EnterTokenStream(Toks, 3,
+ /*DisableMacroExpansion=*/true, /*OwnsTokens=*/false);
} else {
- Actions.ActOnPragmaWeakID(WeakName, WeakLoc, WeakNameLoc);
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 2, llvm::alignOf<Token>());
+ Token &pragmaUnusedTok = Toks[0];
+ pragmaUnusedTok.startToken();
+ pragmaUnusedTok.setKind(tok::annot_pragma_weak);
+ pragmaUnusedTok.setLocation(WeakLoc);
+ Toks[1] = WeakName;
+ PP.EnterTokenStream(Toks, 2,
+ /*DisableMacroExpansion=*/true, /*OwnsTokens=*/false);
}
}
@@ -479,17 +609,16 @@ void PragmaRedefineExtnameHandler::HandlePragma(Preprocessor &PP,
return;
}
- IdentifierInfo *RedefName = Tok.getIdentifierInfo(), *AliasName = 0;
- SourceLocation RedefNameLoc = Tok.getLocation(), AliasNameLoc;
-
+ Token RedefName = Tok;
PP.Lex(Tok);
+
if (Tok.isNot(tok::identifier)) {
PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
<< "redefine_extname";
return;
}
- AliasName = Tok.getIdentifierInfo();
- AliasNameLoc = Tok.getLocation();
+
+ Token AliasName = Tok;
PP.Lex(Tok);
if (Tok.isNot(tok::eod)) {
@@ -498,8 +627,17 @@ void PragmaRedefineExtnameHandler::HandlePragma(Preprocessor &PP,
return;
}
- Actions.ActOnPragmaRedefineExtname(RedefName, AliasName, RedefLoc,
- RedefNameLoc, AliasNameLoc);
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 3, llvm::alignOf<Token>());
+ Token &pragmaRedefTok = Toks[0];
+ pragmaRedefTok.startToken();
+ pragmaRedefTok.setKind(tok::annot_pragma_redefine_extname);
+ pragmaRedefTok.setLocation(RedefLoc);
+ Toks[1] = RedefName;
+ Toks[2] = AliasName;
+ PP.EnterTokenStream(Toks, 3,
+ /*DisableMacroExpansion=*/true, /*OwnsTokens=*/false);
}
@@ -511,7 +649,17 @@ PragmaFPContractHandler::HandlePragma(Preprocessor &PP,
if (PP.LexOnOffSwitch(OOS))
return;
- Actions.ActOnPragmaFPContract(OOS);
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 1, llvm::alignOf<Token>());
+ new (Toks) Token();
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_fp_contract);
+ Toks[0].setLocation(Tok.getLocation());
+ Toks[0].setAnnotationValue(reinterpret_cast<void*>(
+ static_cast<uintptr_t>(OOS)));
+ PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true,
+ /*OwnsTokens=*/false);
}
void
@@ -550,19 +698,23 @@ PragmaOpenCLExtensionHandler::HandlePragma(Preprocessor &PP,
return;
}
- OpenCLOptions &f = Actions.getOpenCLOptions();
- // OpenCL 1.1 9.1: "The all variant sets the behavior for all extensions,
- // overriding all previously issued extension directives, but only if the
- // behavior is set to disable."
- if (state == 0 && ename->isStr("all")) {
-#define OPENCLEXT(nm) f.nm = 0;
-#include "clang/Basic/OpenCLExtensions.def"
- }
-#define OPENCLEXT(nm) else if (ename->isStr(#nm)) { f.nm = state; }
-#include "clang/Basic/OpenCLExtensions.def"
- else {
- PP.Diag(NameLoc, diag::warn_pragma_unknown_extension) << ename;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) <<
+ "OPENCL EXTENSION";
return;
}
+
+ OpenCLExtData data(ename, state);
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 1, llvm::alignOf<Token>());
+ new (Toks) Token();
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_opencl_extension);
+ Toks[0].setLocation(NameLoc);
+ Toks[0].setAnnotationValue(data.getOpaqueValue());
+ PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true,
+ /*OwnsTokens=*/false);
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
index fef6960..b9a2a25 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
+++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
@@ -21,9 +21,8 @@ namespace clang {
class Parser;
class PragmaAlignHandler : public PragmaHandler {
- Sema &Actions;
public:
- explicit PragmaAlignHandler(Sema &A) : PragmaHandler("align"), Actions(A) {}
+ explicit PragmaAlignHandler() : PragmaHandler("align") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken);
@@ -31,38 +30,31 @@ public:
class PragmaGCCVisibilityHandler : public PragmaHandler {
public:
- explicit PragmaGCCVisibilityHandler(Sema &/*A*/)
- : PragmaHandler("visibility") {}
+ explicit PragmaGCCVisibilityHandler() : PragmaHandler("visibility") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken);
};
class PragmaOptionsHandler : public PragmaHandler {
- Sema &Actions;
public:
- explicit PragmaOptionsHandler(Sema &A) : PragmaHandler("options"),
- Actions(A) {}
+ explicit PragmaOptionsHandler() : PragmaHandler("options") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken);
};
class PragmaPackHandler : public PragmaHandler {
- Sema &Actions;
public:
- explicit PragmaPackHandler(Sema &A) : PragmaHandler("pack"),
- Actions(A) {}
+ explicit PragmaPackHandler() : PragmaHandler("pack") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken);
};
class PragmaMSStructHandler : public PragmaHandler {
- Sema &Actions;
public:
- explicit PragmaMSStructHandler(Sema &A) : PragmaHandler("ms_struct"),
- Actions(A) {}
+ explicit PragmaMSStructHandler() : PragmaHandler("ms_struct") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken);
@@ -70,48 +62,39 @@ public:
class PragmaUnusedHandler : public PragmaHandler {
public:
- PragmaUnusedHandler(Sema &/*A*/)
- : PragmaHandler("unused") {}
+ PragmaUnusedHandler() : PragmaHandler("unused") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken);
};
class PragmaWeakHandler : public PragmaHandler {
- Sema &Actions;
public:
- explicit PragmaWeakHandler(Sema &A)
- : PragmaHandler("weak"), Actions(A) {}
+ explicit PragmaWeakHandler() : PragmaHandler("weak") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken);
};
class PragmaRedefineExtnameHandler : public PragmaHandler {
- Sema &Actions;
public:
- explicit PragmaRedefineExtnameHandler(Sema &A)
- : PragmaHandler("redefine_extname"), Actions(A) {}
+ explicit PragmaRedefineExtnameHandler() : PragmaHandler("redefine_extname") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken);
};
class PragmaOpenCLExtensionHandler : public PragmaHandler {
- Sema &Actions;
public:
- PragmaOpenCLExtensionHandler(Sema &A) :
- PragmaHandler("EXTENSION"), Actions(A) {}
+ PragmaOpenCLExtensionHandler() : PragmaHandler("EXTENSION") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken);
};
class PragmaFPContractHandler : public PragmaHandler {
- Sema &Actions;
public:
- PragmaFPContractHandler(Sema &A) :
- PragmaHandler("FP_CONTRACT"), Actions(A) {}
+ PragmaFPContractHandler() : PragmaHandler("FP_CONTRACT") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken);
};
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
index df9b996..f604e03 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
@@ -17,6 +17,7 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/TypoCorrection.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/SourceManager.h"
@@ -130,96 +131,38 @@ Retry:
return ParseLabeledStatement(Attrs);
}
+ // Look up the identifier, and typo-correct it to a keyword if it's not
+ // found.
if (Next.isNot(tok::coloncolon)) {
- CXXScopeSpec SS;
- IdentifierInfo *Name = Tok.getIdentifierInfo();
- SourceLocation NameLoc = Tok.getLocation();
-
- if (getLangOpts().CPlusPlus)
- CheckForTemplateAndDigraph(Next, ParsedType(),
- /*EnteringContext=*/false, *Name, SS);
-
- Sema::NameClassification Classification
- = Actions.ClassifyName(getCurScope(), SS, Name, NameLoc, Next);
- switch (Classification.getKind()) {
- case Sema::NC_Keyword:
- // The identifier was corrected to a keyword. Update the token
- // to this keyword, and try again.
- if (Name->getTokenID() != tok::identifier) {
- Tok.setIdentifierInfo(Name);
- Tok.setKind(Name->getTokenID());
- goto Retry;
- }
-
- // Fall through via the normal error path.
- // FIXME: This seems like it could only happen for context-sensitive
- // keywords.
-
- case Sema::NC_Error:
+ // Try to limit which sets of keywords should be included in typo
+ // correction based on what the next token is.
+ // FIXME: Pass the next token into the CorrectionCandidateCallback and
+ // do this filtering in a more fine-grained manner.
+ CorrectionCandidateCallback DefaultValidator;
+ DefaultValidator.WantTypeSpecifiers =
+ Next.is(tok::l_paren) || Next.is(tok::less) ||
+ Next.is(tok::identifier) || Next.is(tok::star) ||
+ Next.is(tok::amp) || Next.is(tok::l_square);
+ DefaultValidator.WantExpressionKeywords =
+ Next.is(tok::l_paren) || Next.is(tok::identifier) ||
+ Next.is(tok::arrow) || Next.is(tok::period);
+ DefaultValidator.WantRemainingKeywords =
+ Next.is(tok::l_paren) || Next.is(tok::semi) ||
+ Next.is(tok::identifier) || Next.is(tok::l_brace);
+ DefaultValidator.WantCXXNamedCasts = false;
+ if (TryAnnotateName(/*IsAddressOfOperand*/false, &DefaultValidator)
+ == ANK_Error) {
// Handle errors here by skipping up to the next semicolon or '}', and
// eat the semicolon if that's what stopped us.
SkipUntil(tok::r_brace, /*StopAtSemi=*/true, /*DontConsume=*/true);
if (Tok.is(tok::semi))
ConsumeToken();
return StmtError();
-
- case Sema::NC_Unknown:
- // Either we don't know anything about this identifier, or we know that
- // we're in a syntactic context we haven't handled yet.
- break;
-
- case Sema::NC_Type:
- Tok.setKind(tok::annot_typename);
- setTypeAnnotation(Tok, Classification.getType());
- Tok.setAnnotationEndLoc(NameLoc);
- PP.AnnotateCachedTokens(Tok);
- break;
-
- case Sema::NC_Expression:
- Tok.setKind(tok::annot_primary_expr);
- setExprAnnotation(Tok, Classification.getExpression());
- Tok.setAnnotationEndLoc(NameLoc);
- PP.AnnotateCachedTokens(Tok);
- break;
-
- case Sema::NC_TypeTemplate:
- case Sema::NC_FunctionTemplate: {
- ConsumeToken(); // the identifier
- UnqualifiedId Id;
- Id.setIdentifier(Name, NameLoc);
- if (AnnotateTemplateIdToken(
- TemplateTy::make(Classification.getTemplateName()),
- Classification.getTemplateNameKind(),
- SS, SourceLocation(), Id,
- /*AllowTypeAnnotation=*/false)) {
- // Handle errors here by skipping up to the next semicolon or '}', and
- // eat the semicolon if that's what stopped us.
- SkipUntil(tok::r_brace, /*StopAtSemi=*/true, /*DontConsume=*/true);
- if (Tok.is(tok::semi))
- ConsumeToken();
- return StmtError();
- }
-
- // If the next token is '::', jump right into parsing a
- // nested-name-specifier. We don't want to leave the template-id
- // hanging.
- if (NextToken().is(tok::coloncolon) && TryAnnotateCXXScopeToken(false)){
- // Handle errors here by skipping up to the next semicolon or '}', and
- // eat the semicolon if that's what stopped us.
- SkipUntil(tok::r_brace, /*StopAtSemi=*/true, /*DontConsume=*/true);
- if (Tok.is(tok::semi))
- ConsumeToken();
- return StmtError();
- }
-
- // We've annotated a template-id, so try again now.
- goto Retry;
}
- case Sema::NC_NestedNameSpecifier:
- // FIXME: Implement this!
- break;
- }
+ // If the identifier was typo-corrected, try again.
+ if (Tok.isNot(tok::identifier))
+ goto Retry;
}
// Fall through
@@ -289,7 +232,7 @@ Retry:
bool msAsm = false;
Res = ParseAsmStatement(msAsm);
Res = Actions.ActOnFinishFullStmt(Res.get());
- if (msAsm) return move(Res);
+ if (msAsm) return Res;
SemiError = "asm";
break;
}
@@ -310,6 +253,41 @@ Retry:
ProhibitAttributes(Attrs);
HandlePragmaPack();
return StmtEmpty();
+
+ case tok::annot_pragma_msstruct:
+ ProhibitAttributes(Attrs);
+ HandlePragmaMSStruct();
+ return StmtEmpty();
+
+ case tok::annot_pragma_align:
+ ProhibitAttributes(Attrs);
+ HandlePragmaAlign();
+ return StmtEmpty();
+
+ case tok::annot_pragma_weak:
+ ProhibitAttributes(Attrs);
+ HandlePragmaWeak();
+ return StmtEmpty();
+
+ case tok::annot_pragma_weakalias:
+ ProhibitAttributes(Attrs);
+ HandlePragmaWeakAlias();
+ return StmtEmpty();
+
+ case tok::annot_pragma_redefine_extname:
+ ProhibitAttributes(Attrs);
+ HandlePragmaRedefineExtname();
+ return StmtEmpty();
+
+ case tok::annot_pragma_fp_contract:
+ Diag(Tok, diag::err_pragma_fp_contract_scope);
+ ConsumeToken();
+ return StmtError();
+
+ case tok::annot_pragma_opencl_extension:
+ ProhibitAttributes(Attrs);
+ HandlePragmaOpenCLExtension();
+ return StmtEmpty();
}
// If we reached this code, the statement must end in a semicolon.
@@ -324,7 +302,7 @@ Retry:
SkipUntil(tok::r_brace, true, true);
}
- return move(Res);
+ return Res;
}
/// \brief Parse an expression statement.
@@ -381,7 +359,7 @@ StmtResult Parser::ParseSEHTryBlockCommon(SourceLocation TryLoc) {
StmtResult TryBlock(ParseCompoundStatement());
if(TryBlock.isInvalid())
- return move(TryBlock);
+ return TryBlock;
StmtResult Handler;
if (Tok.is(tok::identifier) &&
@@ -396,7 +374,7 @@ StmtResult Parser::ParseSEHTryBlockCommon(SourceLocation TryLoc) {
}
if(Handler.isInvalid())
- return move(Handler);
+ return Handler;
return Actions.ActOnSEHTryBlock(false /* IsCXXTry */,
TryLoc,
@@ -441,7 +419,7 @@ StmtResult Parser::ParseSEHExceptBlock(SourceLocation ExceptLoc) {
StmtResult Block(ParseCompoundStatement());
if(Block.isInvalid())
- return move(Block);
+ return Block;
return Actions.ActOnSEHExceptBlock(ExceptLoc, FilterExpr.take(), Block.take());
}
@@ -458,7 +436,7 @@ StmtResult Parser::ParseSEHFinallyBlock(SourceLocation FinallyBlock) {
StmtResult Block(ParseCompoundStatement());
if(Block.isInvalid())
- return move(Block);
+ return Block;
return Actions.ActOnSEHFinallyBlock(FinallyBlock,Block.take());
}
@@ -603,7 +581,7 @@ StmtResult Parser::ParseCaseStatement(bool MissingCase, ExprResult Expr) {
// Otherwise we link it into the current chain.
Stmt *NextDeepest = Case.get();
if (TopLevelCase.isInvalid())
- TopLevelCase = move(Case);
+ TopLevelCase = Case;
else
Actions.ActOnCaseStmtBody(DeepestParsedCaseStmt, Case.get());
DeepestParsedCaseStmt = NextDeepest;
@@ -636,7 +614,7 @@ StmtResult Parser::ParseCaseStatement(bool MissingCase, ExprResult Expr) {
Actions.ActOnCaseStmtBody(DeepestParsedCaseStmt, SubStmt.get());
// Return the top level parsed statement tree.
- return move(TopLevelCase);
+ return TopLevelCase;
}
/// ParseDefaultStatement
@@ -728,6 +706,48 @@ StmtResult Parser::ParseCompoundStatement(bool isStmtExpr,
return ParseCompoundStatementBody(isStmtExpr);
}
+/// Parse any pragmas at the start of the compound expression. We handle these
+/// separately since some pragmas (FP_CONTRACT) must appear before any C
+/// statement in the compound, but may be intermingled with other pragmas.
+void Parser::ParseCompoundStatementLeadingPragmas() {
+ bool checkForPragmas = true;
+ while (checkForPragmas) {
+ switch (Tok.getKind()) {
+ case tok::annot_pragma_vis:
+ HandlePragmaVisibility();
+ break;
+ case tok::annot_pragma_pack:
+ HandlePragmaPack();
+ break;
+ case tok::annot_pragma_msstruct:
+ HandlePragmaMSStruct();
+ break;
+ case tok::annot_pragma_align:
+ HandlePragmaAlign();
+ break;
+ case tok::annot_pragma_weak:
+ HandlePragmaWeak();
+ break;
+ case tok::annot_pragma_weakalias:
+ HandlePragmaWeakAlias();
+ break;
+ case tok::annot_pragma_redefine_extname:
+ HandlePragmaRedefineExtname();
+ break;
+ case tok::annot_pragma_opencl_extension:
+ HandlePragmaOpenCLExtension();
+ break;
+ case tok::annot_pragma_fp_contract:
+ HandlePragmaFPContract();
+ break;
+ default:
+ checkForPragmas = false;
+ break;
+ }
+ }
+
+}
+
/// ParseCompoundStatementBody - Parse a sequence of statements and invoke the
/// ActOnCompoundStmt action. This expects the '{' to be the current token, and
/// consume the '}' at the end of the block. It does not manipulate the scope
@@ -736,6 +756,11 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
PrettyStackTraceLoc CrashInfo(PP.getSourceManager(),
Tok.getLocation(),
"in compound statement ('{}')");
+
+ // Record the state of the FP_CONTRACT pragma, restore on leaving the
+ // compound statement.
+ Sema::FPContractStateRAII SaveFPContractState(Actions);
+
InMessageExpressionRAIIObject InMessage(*this, false);
BalancedDelimiterTracker T(*this, tok::l_brace);
if (T.consumeOpen())
@@ -743,7 +768,10 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
Sema::CompoundScopeRAII CompoundScope(Actions);
- StmtVector Stmts(Actions);
+ // Parse any pragmas at the beginning of the compound statement.
+ ParseCompoundStatementLeadingPragmas();
+
+ StmtVector Stmts;
// "__label__ X, Y, Z;" is the GNU "Local Label" extension. These are
// only allowed at the start of a compound stmt regardless of the language.
@@ -850,7 +878,7 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
}
return Actions.ActOnCompoundStmt(T.getOpenLocation(), CloseLoc,
- move_arg(Stmts), isStmtExpr);
+ Stmts, isStmtExpr);
}
/// ParseParenExprOrCondition:
@@ -1096,7 +1124,7 @@ StmtResult Parser::ParseSwitchStatement(SourceLocation *TrailingElseLoc) {
SkipUntil(tok::r_brace, false, false);
} else
SkipUntil(tok::semi);
- return move(Switch);
+ return Switch;
}
// C99 6.8.4p3 - In C99, the body of the switch statement is a scope, even if
@@ -1375,7 +1403,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
ColonProtectionRAIIObject ColonProtection(*this, MightBeForRangeStmt);
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- StmtVector Stmts(Actions);
+ StmtVector Stmts;
DeclGroupPtrTy DG = ParseSimpleDeclaration(Stmts, Declarator::ForContext,
DeclEnd, attrs, false,
MightBeForRangeStmt ?
@@ -1498,7 +1526,8 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
ForRangeStmt = Actions.ActOnCXXForRangeStmt(ForLoc, FirstPart.take(),
ForRangeInit.ColonLoc,
ForRangeInit.RangeExpr.get(),
- T.getCloseLocation());
+ T.getCloseLocation(),
+ Sema::BFRK_Build);
// Similarly, we need to do the semantic analysis for a for-range
@@ -1580,7 +1609,7 @@ StmtResult Parser::ParseGotoStatement() {
return StmtError();
}
- return move(Res);
+ return Res;
}
/// ParseContinueStatement
@@ -1653,6 +1682,9 @@ StmtResult Parser::ParseReturnStatement() {
/// ms-asm-line '\n' ms-asm-instruction-block
///
StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
+ // MS-style inline assembly is not fully supported, so emit a warning.
+ Diag(AsmLoc, diag::warn_unsupported_msasm);
+
SourceManager &SrcMgr = PP.getSourceManager();
SourceLocation EndLoc = AsmLoc;
SmallVector<Token, 4> AsmToks;
@@ -1745,6 +1777,21 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
return StmtError();
}
+ // If MS-style inline assembly is disabled, then build an empty asm.
+ if (!getLangOpts().EmitMicrosoftInlineAsm) {
+ Token t;
+ t.setKind(tok::string_literal);
+ t.setLiteralData("\"/*FIXME: not done*/\"");
+ t.clearFlag(Token::NeedsCleaning);
+ t.setLength(21);
+ ExprResult AsmString(Actions.ActOnStringLiteral(&t, 1));
+ ExprVector Constraints;
+ ExprVector Exprs;
+ ExprVector Clobbers;
+ return Actions.ActOnGCCAsmStmt(AsmLoc, true, true, 0, 0, 0, Constraints,
+ Exprs, AsmString.take(), Clobbers, EndLoc);
+ }
+
// FIXME: We should be passing source locations for better diagnostics.
return Actions.ActOnMSAsmStmt(AsmLoc, LBraceLoc,
llvm::makeArrayRef(AsmToks), EndLoc);
@@ -1806,18 +1853,17 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
}
SmallVector<IdentifierInfo *, 4> Names;
- ExprVector Constraints(Actions);
- ExprVector Exprs(Actions);
- ExprVector Clobbers(Actions);
+ ExprVector Constraints;
+ ExprVector Exprs;
+ ExprVector Clobbers;
if (Tok.is(tok::r_paren)) {
// We have a simple asm expression like 'asm("foo")'.
T.consumeClose();
- return Actions.ActOnAsmStmt(AsmLoc, /*isSimple*/ true, isVolatile,
- /*NumOutputs*/ 0, /*NumInputs*/ 0, 0,
- move_arg(Constraints), move_arg(Exprs),
- AsmString.take(), move_arg(Clobbers),
- T.getCloseLocation());
+ return Actions.ActOnGCCAsmStmt(AsmLoc, /*isSimple*/ true, isVolatile,
+ /*NumOutputs*/ 0, /*NumInputs*/ 0, 0,
+ Constraints, Exprs, AsmString.take(),
+ Clobbers, T.getCloseLocation());
}
// Parse Outputs, if present.
@@ -1878,11 +1924,10 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
}
T.consumeClose();
- return Actions.ActOnAsmStmt(AsmLoc, false, isVolatile,
- NumOutputs, NumInputs, Names.data(),
- move_arg(Constraints), move_arg(Exprs),
- AsmString.take(), move_arg(Clobbers),
- T.getCloseLocation());
+ return Actions.ActOnGCCAsmStmt(AsmLoc, false, isVolatile, NumOutputs,
+ NumInputs, Names.data(), Constraints, Exprs,
+ AsmString.take(), Clobbers,
+ T.getCloseLocation());
}
/// ParseAsmOperands - Parse the asm-operands production as used by
@@ -1975,7 +2020,7 @@ Decl *Parser::ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope) {
if (FnBody.isInvalid()) {
Sema::CompoundScopeRAII CompoundScope(Actions);
FnBody = Actions.ActOnCompoundStmt(LBraceLoc, LBraceLoc,
- MultiStmtArg(Actions), false);
+ MultiStmtArg(), false);
}
BodyScope.Exit();
@@ -2006,13 +2051,13 @@ Decl *Parser::ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope) {
}
SourceLocation LBraceLoc = Tok.getLocation();
- StmtResult FnBody(ParseCXXTryBlockCommon(TryLoc));
+ StmtResult FnBody(ParseCXXTryBlockCommon(TryLoc, /*FnTry*/true));
// If we failed to parse the try-catch, we just give the function an empty
// compound statement as the body.
if (FnBody.isInvalid()) {
Sema::CompoundScopeRAII CompoundScope(Actions);
FnBody = Actions.ActOnCompoundStmt(LBraceLoc, LBraceLoc,
- MultiStmtArg(Actions), false);
+ MultiStmtArg(), false);
}
BodyScope.Exit();
@@ -2024,12 +2069,18 @@ bool Parser::trySkippingFunctionBody() {
assert(SkipFunctionBodies &&
"Should only be called when SkipFunctionBodies is enabled");
+ if (!PP.isCodeCompletionEnabled()) {
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, /*StopAtSemi=*/false, /*DontConsume=*/false);
+ return true;
+ }
+
// We're in code-completion mode. Skip parsing for all function bodies unless
// the body contains the code-completion point.
TentativeParsingAction PA(*this);
ConsumeBrace();
if (SkipUntil(tok::r_brace, /*StopAtSemi=*/false, /*DontConsume=*/false,
- /*StopAtCodeCompletion=*/PP.isCodeCompletionEnabled())) {
+ /*StopAtCodeCompletion=*/true)) {
PA.Commit();
return true;
}
@@ -2066,15 +2117,16 @@ StmtResult Parser::ParseCXXTryBlock() {
/// 'try' compound-statement seh-except-block
/// 'try' compound-statment seh-finally-block
///
-StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc) {
+StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry) {
if (Tok.isNot(tok::l_brace))
return StmtError(Diag(Tok, diag::err_expected_lbrace));
// FIXME: Possible draft standard bug: attribute-specifier should be allowed?
StmtResult TryBlock(ParseCompoundStatement(/*isStmtExpr=*/false,
- Scope::DeclScope|Scope::TryScope));
+ Scope::DeclScope |
+ (FnTry ? Scope::FnTryScope : Scope::TryScope)));
if (TryBlock.isInvalid())
- return move(TryBlock);
+ return TryBlock;
// Borland allows SEH-handlers with 'try'
@@ -2092,7 +2144,7 @@ StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc) {
Handler = ParseSEHFinallyBlock(Loc);
}
if(Handler.isInvalid())
- return move(Handler);
+ return Handler;
return Actions.ActOnSEHTryBlock(true /* IsCXXTry */,
TryLoc,
@@ -2100,7 +2152,7 @@ StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc) {
Handler.take());
}
else {
- StmtVector Handlers(Actions);
+ StmtVector Handlers;
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX0XAttributes(attrs);
ProhibitAttributes(attrs);
@@ -2108,7 +2160,7 @@ StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc) {
if (Tok.isNot(tok::kw_catch))
return StmtError(Diag(Tok, diag::err_expected_catch));
while (Tok.is(tok::kw_catch)) {
- StmtResult Handler(ParseCXXCatchBlock());
+ StmtResult Handler(ParseCXXCatchBlock(FnTry));
if (!Handler.isInvalid())
Handlers.push_back(Handler.release());
}
@@ -2117,7 +2169,7 @@ StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc) {
if (Handlers.empty())
return StmtError();
- return Actions.ActOnCXXTryBlock(TryLoc, TryBlock.take(),move_arg(Handlers));
+ return Actions.ActOnCXXTryBlock(TryLoc, TryBlock.take(),Handlers);
}
}
@@ -2132,7 +2184,7 @@ StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc) {
/// type-specifier-seq
/// '...'
///
-StmtResult Parser::ParseCXXCatchBlock() {
+StmtResult Parser::ParseCXXCatchBlock(bool FnCatch) {
assert(Tok.is(tok::kw_catch) && "Expected 'catch'");
SourceLocation CatchLoc = ConsumeToken();
@@ -2144,7 +2196,8 @@ StmtResult Parser::ParseCXXCatchBlock() {
// C++ 3.3.2p3:
// The name in a catch exception-declaration is local to the handler and
// shall not be redeclared in the outermost block of the handler.
- ParseScope CatchScope(this, Scope::DeclScope | Scope::ControlScope);
+ ParseScope CatchScope(this, Scope::DeclScope | Scope::ControlScope |
+ (FnCatch ? Scope::FnCatchScope : 0));
// exception-declaration is equivalent to '...' or a parameter-declaration
// without default arguments.
@@ -2169,7 +2222,7 @@ StmtResult Parser::ParseCXXCatchBlock() {
// FIXME: Possible draft standard bug: attribute-specifier should be allowed?
StmtResult Block(ParseCompoundStatement());
if (Block.isInvalid())
- return move(Block);
+ return Block;
return Actions.ActOnCXXCatchBlock(CatchLoc, ExceptionDecl, Block.take());
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
index ade918f..2e0411e 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
@@ -246,7 +246,7 @@ Parser::ParseSingleDeclarationAfterTemplate(
return 0;
}
- LateParsedAttrList LateParsedAttrs;
+ LateParsedAttrList LateParsedAttrs(true);
if (DeclaratorInfo.isFunctionDeclarator())
MaybeParseGNUAttributes(DeclaratorInfo, &LateParsedAttrs);
@@ -889,8 +889,7 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
return true;
}
- ASTTemplateArgsPtr TemplateArgsPtr(Actions, TemplateArgs.data(),
- TemplateArgs.size());
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateArgs);
// Build the annotation token.
if (TNK == TNK_Type_template && AllowTypeAnnotation) {
@@ -942,8 +941,6 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
Tok.setLocation(TemplateKWLoc);
else
Tok.setLocation(TemplateNameLoc);
-
- TemplateArgsPtr.release();
}
// Common fields for the annotation token
@@ -969,8 +966,7 @@ void Parser::AnnotateTemplateIdTokenAsType() {
TemplateId->Kind == TNK_Dependent_template_name) &&
"Only works for type and dependent templates");
- ASTTemplateArgsPtr TemplateArgsPtr(Actions,
- TemplateId->getTemplateArgs(),
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
TypeResult Type
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
index 1a4df47..40c4eee 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
@@ -104,16 +104,27 @@ bool Parser::isCXXSimpleDeclaration(bool AllowForRangeDecl) {
// isCXXDeclarationSpecifier will return TPResult::Ambiguous() only in such
// a case.
- TPResult TPR = isCXXDeclarationSpecifier();
+ bool InvalidAsDeclaration = false;
+ TPResult TPR = isCXXDeclarationSpecifier(TPResult::False(),
+ &InvalidAsDeclaration);
if (TPR != TPResult::Ambiguous())
return TPR != TPResult::False(); // Returns true for TPResult::True() or
// TPResult::Error().
+ // FIXME: TryParseSimpleDeclaration doesn't look past the first initializer,
+ // and so gets some cases wrong. We can't carry on if we've already seen
+ // something which makes this statement invalid as a declaration in this case,
+ // since it can cause us to misparse valid code. Revisit this once
+ // TryParseInitDeclaratorList is fixed.
+ if (InvalidAsDeclaration)
+ return false;
+
// FIXME: Add statistics about the number of ambiguous statements encountered
// and how they were resolved (number of declarations+number of expressions).
- // Ok, we have a simple-type-specifier/typename-specifier followed by a '('.
- // We need tentative parsing...
+ // Ok, we have a simple-type-specifier/typename-specifier followed by a '(',
+ // or an identifier which doesn't resolve as anything. We need tentative
+ // parsing...
TentativeParsingAction PA(*this);
TPR = TryParseSimpleDeclaration(AllowForRangeDecl);
@@ -140,20 +151,28 @@ bool Parser::isCXXSimpleDeclaration(bool AllowForRangeDecl) {
/// attribute-specifier-seqopt type-specifier-seq declarator
///
Parser::TPResult Parser::TryParseSimpleDeclaration(bool AllowForRangeDecl) {
- // We know that we have a simple-type-specifier/typename-specifier followed
- // by a '('.
- assert(isCXXDeclarationSpecifier() == TPResult::Ambiguous());
-
if (Tok.is(tok::kw_typeof))
TryParseTypeofSpecifier();
else {
+ if (Tok.is(tok::annot_cxxscope))
+ ConsumeToken();
ConsumeToken();
-
+
if (getLangOpts().ObjC1 && Tok.is(tok::less))
TryParseProtocolQualifiers();
}
-
- assert(Tok.is(tok::l_paren) && "Expected '('");
+
+ // Two decl-specifiers in a row conclusively disambiguate this as being a
+ // simple-declaration. Don't bother calling isCXXDeclarationSpecifier in the
+ // overwhelmingly common case that the next token is a '('.
+ if (Tok.isNot(tok::l_paren)) {
+ TPResult TPR = isCXXDeclarationSpecifier();
+ if (TPR == TPResult::Ambiguous())
+ return TPResult::True();
+ if (TPR == TPResult::True() || TPR == TPResult::Error())
+ return TPR;
+ assert(TPR == TPResult::False());
+ }
TPResult TPR = TryParseInitDeclaratorList();
if (TPR != TPResult::Ambiguous())
@@ -623,6 +642,8 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
// declarator-id
if (Tok.is(tok::annot_cxxscope))
ConsumeToken();
+ else
+ TentativelyDeclaredIdentifiers.push_back(Tok.getIdentifierInfo());
ConsumeToken();
} else if (Tok.is(tok::l_paren)) {
ConsumeParen();
@@ -761,6 +782,7 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
case tok::kw___is_convertible_to:
case tok::kw___is_empty:
case tok::kw___is_enum:
+ case tok::kw___is_interface_class:
case tok::kw___is_final:
case tok::kw___is_literal:
case tok::kw___is_literal_type:
@@ -824,6 +846,12 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
return TPResult::Ambiguous();
}
+bool Parser::isTentativelyDeclared(IdentifierInfo *II) {
+ return std::find(TentativelyDeclaredIdentifiers.begin(),
+ TentativelyDeclaredIdentifiers.end(), II)
+ != TentativelyDeclaredIdentifiers.end();
+}
+
/// isCXXDeclarationSpecifier - Returns TPResult::True() if it is a declaration
/// specifier, TPResult::False() if it is not, TPResult::Ambiguous() if it could
/// be either a decl-specifier or a function-style cast, and TPResult::Error()
@@ -831,7 +859,10 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
///
/// If HasMissingTypename is provided, a name with a dependent scope specifier
/// will be treated as ambiguous if the 'typename' keyword is missing. If this
-/// happens, *HasMissingTypename will be set to 'true'.
+/// happens, *HasMissingTypename will be set to 'true'. This will also be used
+/// as an indicator that undeclared identifiers (which will trigger a later
+/// parse error) should be treated as types. Returns TPResult::Ambiguous() in
+/// such cases.
///
/// decl-specifier:
/// storage-class-specifier
@@ -927,22 +958,64 @@ Parser::TPResult
Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
bool *HasMissingTypename) {
switch (Tok.getKind()) {
- case tok::identifier: // foo::bar
+ case tok::identifier: {
// Check for need to substitute AltiVec __vector keyword
// for "vector" identifier.
if (TryAltiVecVectorToken())
return TPResult::True();
- // Fall through.
+
+ const Token &Next = NextToken();
+ // In 'foo bar', 'foo' is always a type name outside of Objective-C.
+ if (!getLangOpts().ObjC1 && Next.is(tok::identifier))
+ return TPResult::True();
+
+ if (Next.isNot(tok::coloncolon) && Next.isNot(tok::less)) {
+ // Determine whether this is a valid expression. If not, we will hit
+ // a parse error one way or another. In that case, tell the caller that
+ // this is ambiguous. Typo-correct to type and expression keywords and
+ // to types and identifiers, in order to try to recover from errors.
+ CorrectionCandidateCallback TypoCorrection;
+ TypoCorrection.WantRemainingKeywords = false;
+ switch (TryAnnotateName(false /* no nested name specifier */,
+ &TypoCorrection)) {
+ case ANK_Error:
+ return TPResult::Error();
+ case ANK_TentativeDecl:
+ return TPResult::False();
+ case ANK_TemplateName:
+ // A bare type template-name which can't be a template template
+ // argument is an error, and was probably intended to be a type.
+ return GreaterThanIsOperator ? TPResult::True() : TPResult::False();
+ case ANK_Unresolved:
+ return HasMissingTypename ? TPResult::Ambiguous() : TPResult::False();
+ case ANK_Success:
+ break;
+ }
+ assert(Tok.isNot(tok::identifier) &&
+ "TryAnnotateName succeeded without producing an annotation");
+ } else {
+ // This might possibly be a type with a dependent scope specifier and
+ // a missing 'typename' keyword. Don't use TryAnnotateName in this case,
+ // since it will annotate as a primary expression, and we want to use the
+ // "missing 'typename'" logic.
+ if (TryAnnotateTypeOrScopeToken())
+ return TPResult::Error();
+ // If annotation failed, assume it's a non-type.
+ // FIXME: If this happens due to an undeclared identifier, treat it as
+ // ambiguous.
+ if (Tok.is(tok::identifier))
+ return TPResult::False();
+ }
+
+ // We annotated this token as something. Recurse to handle whatever we got.
+ return isCXXDeclarationSpecifier(BracedCastResult, HasMissingTypename);
+ }
+
case tok::kw_typename: // typename T::type
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
return TPResult::Error();
- if (Tok.is(tok::identifier)) {
- const Token &Next = NextToken();
- return (!getLangOpts().ObjC1 && Next.is(tok::identifier)) ?
- TPResult::True() : TPResult::False();
- }
return isCXXDeclarationSpecifier(BracedCastResult, HasMissingTypename);
case tok::coloncolon: { // ::foo::bar
@@ -1073,6 +1146,28 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
*HasMissingTypename = true;
return TPResult::Ambiguous();
}
+ } else {
+ // Try to resolve the name. If it doesn't exist, assume it was
+ // intended to name a type and keep disambiguating.
+ switch (TryAnnotateName(false /* SS is not dependent */)) {
+ case ANK_Error:
+ return TPResult::Error();
+ case ANK_TentativeDecl:
+ return TPResult::False();
+ case ANK_TemplateName:
+ // A bare type template-name which can't be a template template
+ // argument is an error, and was probably intended to be a type.
+ return GreaterThanIsOperator ? TPResult::True() : TPResult::False();
+ case ANK_Unresolved:
+ return HasMissingTypename ? TPResult::Ambiguous()
+ : TPResult::False();
+ case ANK_Success:
+ // Annotated it, check again.
+ assert(Tok.isNot(tok::annot_cxxscope) ||
+ NextToken().isNot(tok::identifier));
+ return isCXXDeclarationSpecifier(BracedCastResult,
+ HasMissingTypename);
+ }
}
}
return TPResult::False();
diff --git a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
index 3725e2b..f4cdd61 100644
--- a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
@@ -23,6 +23,7 @@
#include "clang/AST/ASTConsumer.h"
using namespace clang;
+
namespace {
/// \brief A comment handler that passes comments found by the preprocessor
/// to the parser action.
@@ -47,11 +48,13 @@ IdentifierInfo *Parser::getSEHExceptKeyword() {
return Ident__except;
}
-Parser::Parser(Preprocessor &pp, Sema &actions, bool SkipFunctionBodies)
+Parser::Parser(Preprocessor &pp, Sema &actions, bool skipFunctionBodies)
: PP(pp), Actions(actions), Diags(PP.getDiagnostics()),
GreaterThanIsOperator(true), ColonIsSacred(false),
InMessageExpression(false), TemplateParameterDepth(0),
- ParsingInObjCContainer(false), SkipFunctionBodies(SkipFunctionBodies) {
+ ParsingInObjCContainer(false) {
+ SkipFunctionBodies = pp.isCodeCompletionEnabled() || skipFunctionBodies;
+ Tok.startToken();
Tok.setKind(tok::eof);
Actions.CurScope = 0;
NumCachedScopes = 0;
@@ -60,35 +63,35 @@ Parser::Parser(Preprocessor &pp, Sema &actions, bool SkipFunctionBodies)
// Add #pragma handlers. These are removed and destroyed in the
// destructor.
- AlignHandler.reset(new PragmaAlignHandler(actions));
+ AlignHandler.reset(new PragmaAlignHandler());
PP.AddPragmaHandler(AlignHandler.get());
- GCCVisibilityHandler.reset(new PragmaGCCVisibilityHandler(actions));
+ GCCVisibilityHandler.reset(new PragmaGCCVisibilityHandler());
PP.AddPragmaHandler("GCC", GCCVisibilityHandler.get());
- OptionsHandler.reset(new PragmaOptionsHandler(actions));
+ OptionsHandler.reset(new PragmaOptionsHandler());
PP.AddPragmaHandler(OptionsHandler.get());
- PackHandler.reset(new PragmaPackHandler(actions));
+ PackHandler.reset(new PragmaPackHandler());
PP.AddPragmaHandler(PackHandler.get());
- MSStructHandler.reset(new PragmaMSStructHandler(actions));
+ MSStructHandler.reset(new PragmaMSStructHandler());
PP.AddPragmaHandler(MSStructHandler.get());
- UnusedHandler.reset(new PragmaUnusedHandler(actions));
+ UnusedHandler.reset(new PragmaUnusedHandler());
PP.AddPragmaHandler(UnusedHandler.get());
- WeakHandler.reset(new PragmaWeakHandler(actions));
+ WeakHandler.reset(new PragmaWeakHandler());
PP.AddPragmaHandler(WeakHandler.get());
- RedefineExtnameHandler.reset(new PragmaRedefineExtnameHandler(actions));
+ RedefineExtnameHandler.reset(new PragmaRedefineExtnameHandler());
PP.AddPragmaHandler(RedefineExtnameHandler.get());
- FPContractHandler.reset(new PragmaFPContractHandler(actions));
+ FPContractHandler.reset(new PragmaFPContractHandler());
PP.AddPragmaHandler("STDC", FPContractHandler.get());
if (getLangOpts().OpenCL) {
- OpenCLExtensionHandler.reset(new PragmaOpenCLExtensionHandler(actions));
+ OpenCLExtensionHandler.reset(new PragmaOpenCLExtensionHandler());
PP.AddPragmaHandler("OPENCL", OpenCLExtensionHandler.get());
PP.AddPragmaHandler("OPENCL", FPContractHandler.get());
@@ -135,7 +138,7 @@ DiagnosticBuilder Parser::Diag(const Token &Tok, unsigned DiagID) {
/// given range.
///
/// \param Loc The location where we'll emit the diagnostic.
-/// \param Loc The kind of diagnostic to emit.
+/// \param DK The kind of diagnostic to emit.
/// \param ParenRange Source range enclosing code that should be parenthesized.
void Parser::SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange) {
@@ -154,7 +157,8 @@ void Parser::SuggestParentheses(SourceLocation Loc, unsigned DK,
static bool IsCommonTypo(tok::TokenKind ExpectedTok, const Token &Tok) {
switch (ExpectedTok) {
- case tok::semi: return Tok.is(tok::colon); // : for ;
+ case tok::semi:
+ return Tok.is(tok::colon) || Tok.is(tok::comma); // : or , for ;
default: return false;
}
}
@@ -466,9 +470,6 @@ void Parser::Initialize() {
EnterScope(Scope::DeclScope);
Actions.ActOnTranslationUnitScope(getCurScope());
- // Prime the lexer look-ahead.
- ConsumeToken();
-
// Initialization for Objective-C context sensitive keywords recognition.
// Referenced in Parser::ParseObjCTypeQualifierList.
if (getLangOpts().ObjC1) {
@@ -523,6 +524,11 @@ void Parser::Initialize() {
PP.SetPoisonReason(Ident___abnormal_termination,diag::err_seh___finally_block);
PP.SetPoisonReason(Ident_AbnormalTermination,diag::err_seh___finally_block);
}
+
+ Actions.Initialize();
+
+ // Prime the lexer look-ahead.
+ ConsumeToken();
}
namespace {
@@ -634,6 +640,27 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
case tok::annot_pragma_pack:
HandlePragmaPack();
return DeclGroupPtrTy();
+ case tok::annot_pragma_msstruct:
+ HandlePragmaMSStruct();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_align:
+ HandlePragmaAlign();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_weak:
+ HandlePragmaWeak();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_weakalias:
+ HandlePragmaWeakAlias();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_redefine_extname:
+ HandlePragmaRedefineExtname();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_fp_contract:
+ HandlePragmaFPContract();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_opencl_extension:
+ HandlePragmaOpenCLExtension();
+ return DeclGroupPtrTy();
case tok::semi:
ConsumeExtraSemi(OutsideFunction);
// TODO: Invoke action for top-level semicolon.
@@ -693,7 +720,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
// A function definition cannot start with any of these keywords.
{
SourceLocation DeclEnd;
- StmtVector Stmts(Actions);
+ StmtVector Stmts;
return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
}
@@ -704,7 +731,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
<< 0;
SourceLocation DeclEnd;
- StmtVector Stmts(Actions);
+ StmtVector Stmts;
return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
}
goto dont_know;
@@ -716,7 +743,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
// Inline namespaces. Allowed as an extension even in C++03.
if (NextKind == tok::kw_namespace) {
SourceLocation DeclEnd;
- StmtVector Stmts(Actions);
+ StmtVector Stmts;
return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
}
@@ -726,7 +753,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
<< 1;
SourceLocation DeclEnd;
- StmtVector Stmts(Actions);
+ StmtVector Stmts;
return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
}
}
@@ -972,16 +999,14 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
if (getLangOpts().DelayedTemplateParsing &&
Tok.isNot(tok::equal) &&
TemplateInfo.Kind == ParsedTemplateInfo::Template) {
- MultiTemplateParamsArg TemplateParameterLists(Actions,
- TemplateInfo.TemplateParams->data(),
- TemplateInfo.TemplateParams->size());
+ MultiTemplateParamsArg TemplateParameterLists(*TemplateInfo.TemplateParams);
ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
Scope *ParentScope = getCurScope()->getParent();
D.setFunctionDefinitionKind(FDK_Definition);
Decl *DP = Actions.HandleDeclarator(ParentScope, D,
- move(TemplateParameterLists));
+ TemplateParameterLists);
D.complete(DP);
D.getMutableDeclSpec().abort();
@@ -1009,13 +1034,12 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
(Tok.is(tok::l_brace) || Tok.is(tok::kw_try) ||
Tok.is(tok::colon)) &&
Actions.CurContext->isTranslationUnit()) {
- MultiTemplateParamsArg TemplateParameterLists(Actions, 0, 0);
ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
Scope *ParentScope = getCurScope()->getParent();
D.setFunctionDefinitionKind(FDK_Definition);
Decl *FuncDecl = Actions.HandleDeclarator(ParentScope, D,
- move(TemplateParameterLists));
+ MultiTemplateParamsArg());
D.complete(FuncDecl);
D.getMutableDeclSpec().abort();
if (FuncDecl) {
@@ -1033,10 +1057,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
// specified Declarator for the function.
Decl *Res = TemplateInfo.TemplateParams?
Actions.ActOnStartOfFunctionTemplateDef(getCurScope(),
- MultiTemplateParamsArg(Actions,
- TemplateInfo.TemplateParams->data(),
- TemplateInfo.TemplateParams->size()),
- D)
+ *TemplateInfo.TemplateParams, D)
: Actions.ActOnStartOfFunctionDef(getCurScope(), D);
// Break out of the ParsingDeclarator context before we parse the body.
@@ -1288,7 +1309,7 @@ Parser::ExprResult Parser::ParseSimpleAsm(SourceLocation *EndLoc) {
*EndLoc = T.getCloseLocation();
}
- return move(Result);
+ return Result;
}
/// \brief Get the TemplateIdAnnotation from the token and put it in the
@@ -1301,6 +1322,143 @@ TemplateIdAnnotation *Parser::takeTemplateIdAnnotation(const Token &tok) {
return Id;
}
+void Parser::AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation) {
+ // Push the current token back into the token stream (or revert it if it is
+ // cached) and use an annotation scope token for current token.
+ if (PP.isBacktrackEnabled())
+ PP.RevertCachedTokens(1);
+ else
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::annot_cxxscope);
+ Tok.setAnnotationValue(Actions.SaveNestedNameSpecifierAnnotation(SS));
+ Tok.setAnnotationRange(SS.getRange());
+
+ // In case the tokens were cached, have Preprocessor replace them
+ // with the annotation token. We don't need to do this if we've
+ // just reverted back to a prior state.
+ if (IsNewAnnotation)
+ PP.AnnotateCachedTokens(Tok);
+}
+
+/// \brief Attempt to classify the name at the current token position. This may
+/// form a type, scope or primary expression annotation, or replace the token
+/// with a typo-corrected keyword. This is only appropriate when the current
+/// name must refer to an entity which has already been declared.
+///
+/// \param IsAddressOfOperand Must be \c true if the name is preceded by an '&'
+/// and might possibly have a dependent nested name specifier.
+/// \param CCC Indicates how to perform typo-correction for this name. If NULL,
+/// no typo correction will be performed.
+Parser::AnnotatedNameKind
+Parser::TryAnnotateName(bool IsAddressOfOperand,
+ CorrectionCandidateCallback *CCC) {
+ assert(Tok.is(tok::identifier) || Tok.is(tok::annot_cxxscope));
+
+ const bool EnteringContext = false;
+ const bool WasScopeAnnotation = Tok.is(tok::annot_cxxscope);
+
+ CXXScopeSpec SS;
+ if (getLangOpts().CPlusPlus &&
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext))
+ return ANK_Error;
+
+ if (Tok.isNot(tok::identifier) || SS.isInvalid()) {
+ if (TryAnnotateTypeOrScopeTokenAfterScopeSpec(EnteringContext, false, SS,
+ !WasScopeAnnotation))
+ return ANK_Error;
+ return ANK_Unresolved;
+ }
+
+ IdentifierInfo *Name = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = Tok.getLocation();
+
+ // FIXME: Move the tentative declaration logic into ClassifyName so we can
+ // typo-correct to tentatively-declared identifiers.
+ if (isTentativelyDeclared(Name)) {
+ // Identifier has been tentatively declared, and thus cannot be resolved as
+ // an expression. Fall back to annotating it as a type.
+ if (TryAnnotateTypeOrScopeTokenAfterScopeSpec(EnteringContext, false, SS,
+ !WasScopeAnnotation))
+ return ANK_Error;
+ return Tok.is(tok::annot_typename) ? ANK_Success : ANK_TentativeDecl;
+ }
+
+ Token Next = NextToken();
+
+ // Look up and classify the identifier. We don't perform any typo-correction
+ // after a scope specifier, because in general we can't recover from typos
+ // there (eg, after correcting 'A::tempalte B<X>::C', we would need to jump
+ // back into scope specifier parsing).
+ Sema::NameClassification Classification
+ = Actions.ClassifyName(getCurScope(), SS, Name, NameLoc, Next,
+ IsAddressOfOperand, SS.isEmpty() ? CCC : 0);
+
+ switch (Classification.getKind()) {
+ case Sema::NC_Error:
+ return ANK_Error;
+
+ case Sema::NC_Keyword:
+ // The identifier was typo-corrected to a keyword.
+ Tok.setIdentifierInfo(Name);
+ Tok.setKind(Name->getTokenID());
+ PP.TypoCorrectToken(Tok);
+ if (SS.isNotEmpty())
+ AnnotateScopeToken(SS, !WasScopeAnnotation);
+ // We've "annotated" this as a keyword.
+ return ANK_Success;
+
+ case Sema::NC_Unknown:
+ // It's not something we know about. Leave it unannotated.
+ break;
+
+ case Sema::NC_Type:
+ Tok.setKind(tok::annot_typename);
+ setTypeAnnotation(Tok, Classification.getType());
+ Tok.setAnnotationEndLoc(NameLoc);
+ if (SS.isNotEmpty())
+ Tok.setLocation(SS.getBeginLoc());
+ PP.AnnotateCachedTokens(Tok);
+ return ANK_Success;
+
+ case Sema::NC_Expression:
+ Tok.setKind(tok::annot_primary_expr);
+ setExprAnnotation(Tok, Classification.getExpression());
+ Tok.setAnnotationEndLoc(NameLoc);
+ if (SS.isNotEmpty())
+ Tok.setLocation(SS.getBeginLoc());
+ PP.AnnotateCachedTokens(Tok);
+ return ANK_Success;
+
+ case Sema::NC_TypeTemplate:
+ if (Next.isNot(tok::less)) {
+ // This may be a type template being used as a template template argument.
+ if (SS.isNotEmpty())
+ AnnotateScopeToken(SS, !WasScopeAnnotation);
+ return ANK_TemplateName;
+ }
+ // Fall through.
+ case Sema::NC_FunctionTemplate: {
+ // We have a type or function template followed by '<'.
+ ConsumeToken();
+ UnqualifiedId Id;
+ Id.setIdentifier(Name, NameLoc);
+ if (AnnotateTemplateIdToken(
+ TemplateTy::make(Classification.getTemplateName()),
+ Classification.getTemplateNameKind(), SS, SourceLocation(), Id))
+ return ANK_Error;
+ return ANK_Success;
+ }
+
+ case Sema::NC_NestedNameSpecifier:
+ llvm_unreachable("already parsed nested name specifier");
+ }
+
+ // Unable to classify the name, but maybe we can annotate a scope specifier.
+ if (SS.isNotEmpty())
+ AnnotateScopeToken(SS, !WasScopeAnnotation);
+ return ANK_Unresolved;
+}
+
/// TryAnnotateTypeOrScopeToken - If the current token position is on a
/// typename (possibly qualified in C++) or a C++ scope specifier not followed
/// by a typename, TryAnnotateTypeOrScopeToken will replace one or more tokens
@@ -1377,8 +1535,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
return true;
}
- ASTTemplateArgsPtr TemplateArgsPtr(Actions,
- TemplateId->getTemplateArgs(),
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
Ty = Actions.ActOnTypenameType(getCurScope(), TypenameLoc, SS,
@@ -1404,13 +1561,24 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
}
// Remembers whether the token was originally a scope annotation.
- bool wasScopeAnnotation = Tok.is(tok::annot_cxxscope);
+ bool WasScopeAnnotation = Tok.is(tok::annot_cxxscope);
CXXScopeSpec SS;
if (getLangOpts().CPlusPlus)
if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext))
return true;
+ return TryAnnotateTypeOrScopeTokenAfterScopeSpec(EnteringContext, NeedType,
+ SS, !WasScopeAnnotation);
+}
+
+/// \brief Try to annotate a type or scope token, having already parsed an
+/// optional scope specifier. \p IsNewScope should be \c true unless the scope
+/// specifier was extracted from an existing tok::annot_cxxscope annotation.
+bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(bool EnteringContext,
+ bool NeedType,
+ CXXScopeSpec &SS,
+ bool IsNewScope) {
if (Tok.is(tok::identifier)) {
IdentifierInfo *CorrectedII = 0;
// Determine whether the identifier is a type name.
@@ -1492,21 +1660,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
return false;
// A C++ scope specifier that isn't followed by a typename.
- // Push the current token back into the token stream (or revert it if it is
- // cached) and use an annotation scope token for current token.
- if (PP.isBacktrackEnabled())
- PP.RevertCachedTokens(1);
- else
- PP.EnterToken(Tok);
- Tok.setKind(tok::annot_cxxscope);
- Tok.setAnnotationValue(Actions.SaveNestedNameSpecifierAnnotation(SS));
- Tok.setAnnotationRange(SS.getRange());
-
- // In case the tokens were cached, have Preprocessor replace them
- // with the annotation token. We don't need to do this if we've
- // just reverted back to the state we were in before being called.
- if (!wasScopeAnnotation)
- PP.AnnotateCachedTokens(Tok);
+ AnnotateScopeToken(SS, IsNewScope);
return false;
}
@@ -1529,19 +1683,7 @@ bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) {
if (SS.isEmpty())
return false;
- // Push the current token back into the token stream (or revert it if it is
- // cached) and use an annotation scope token for current token.
- if (PP.isBacktrackEnabled())
- PP.RevertCachedTokens(1);
- else
- PP.EnterToken(Tok);
- Tok.setKind(tok::annot_cxxscope);
- Tok.setAnnotationValue(Actions.SaveNestedNameSpecifierAnnotation(SS));
- Tok.setAnnotationRange(SS.getRange());
-
- // In case the tokens were cached, have Preprocessor replace them with the
- // annotation token.
- PP.AnnotateCachedTokens(Tok);
+ AnnotateScopeToken(SS, true);
return false;
}
@@ -1798,8 +1940,8 @@ bool BalancedDelimiterTracker::diagnoseMissingClose() {
}
P.Diag(P.Tok, DID);
P.Diag(LOpen, diag::note_matching) << LHSName;
- if (P.SkipUntil(Close))
- LClose = P.Tok.getLocation();
+ if (P.SkipUntil(Close, /*StopAtSemi*/ true, /*DontConsume*/ true))
+ LClose = P.ConsumeAnyToken();
return true;
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
index 455c4af..060fd20 100644
--- a/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
+++ b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
@@ -87,9 +87,8 @@ namespace clang {
Sema::ParsingDeclState State;
bool Popped;
- // Do not implement.
- ParsingDeclRAIIObject(const ParsingDeclRAIIObject &other);
- ParsingDeclRAIIObject &operator=(const ParsingDeclRAIIObject &other);
+ ParsingDeclRAIIObject(const ParsingDeclRAIIObject &) LLVM_DELETED_FUNCTION;
+ void operator=(const ParsingDeclRAIIObject &) LLVM_DELETED_FUNCTION;
public:
enum NoParent_t { NoParent };
@@ -245,8 +244,9 @@ namespace clang {
/// the way they used to be. This is used to handle __extension__ in the
/// parser.
class ExtensionRAIIObject {
- void operator=(const ExtensionRAIIObject &); // DO NOT IMPLEMENT
- ExtensionRAIIObject(const ExtensionRAIIObject&); // DO NOT IMPLEMENT
+ ExtensionRAIIObject(const ExtensionRAIIObject &) LLVM_DELETED_FUNCTION;
+ void operator=(const ExtensionRAIIObject &) LLVM_DELETED_FUNCTION;
+
DiagnosticsEngine &Diags;
public:
ExtensionRAIIObject(DiagnosticsEngine &diags) : Diags(diags) {
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/Core/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Rewrite/Core/CMakeLists.txt
new file mode 100644
index 0000000..0797818
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Core/CMakeLists.txt
@@ -0,0 +1,24 @@
+add_clang_library(clangRewriteCore
+ DeltaTree.cpp
+ HTMLRewrite.cpp
+ RewriteRope.cpp
+ Rewriter.cpp
+ TokenRewriter.cpp
+ )
+
+add_dependencies(clangRewriteCore
+ ClangAttrClasses
+ ClangAttrList
+ ClangAttrParsedAttrList
+ ClangCommentNodes
+ ClangDeclNodes
+ ClangDiagnosticCommon
+ ClangDiagnosticFrontend
+ ClangStmtNodes
+ )
+
+target_link_libraries(clangRewriteCore
+ clangBasic
+ clangAST
+ clangParse
+ )
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/DeltaTree.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Core/DeltaTree.cpp
index 4297dc8..4692277 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/DeltaTree.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Core/DeltaTree.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Rewrite/DeltaTree.h"
+#include "clang/Rewrite/Core/DeltaTree.h"
#include "clang/Basic/LLVM.h"
#include <cstring>
#include <cstdio>
@@ -113,8 +113,6 @@ namespace {
void RecomputeFullDeltaLocally();
void Destroy();
-
- //static inline bool classof(const DeltaTreeNode *) { return true; }
};
} // end anonymous namespace
@@ -149,7 +147,6 @@ namespace {
return Children[i];
}
- //static inline bool classof(const DeltaTreeInteriorNode *) { return true; }
static inline bool classof(const DeltaTreeNode *N) { return !N->isLeaf(); }
};
}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Core/HTMLRewrite.cpp
index 236b98f..0e8e4fe 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Core/HTMLRewrite.cpp
@@ -13,8 +13,8 @@
//===----------------------------------------------------------------------===//
#include "clang/Lex/Preprocessor.h"
-#include "clang/Rewrite/Rewriter.h"
-#include "clang/Rewrite/HTMLRewrite.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "clang/Rewrite/Core/HTMLRewrite.h"
#include "clang/Lex/TokenConcatenation.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/SourceManager.h"
@@ -484,6 +484,7 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
// Temporarily change the diagnostics object so that we ignore any generated
// diagnostics from this pass.
DiagnosticsEngine TmpDiags(PP.getDiagnostics().getDiagnosticIDs(),
+ &PP.getDiagnostics().getDiagnosticOptions(),
new IgnoringDiagConsumer);
// FIXME: This is a huge hack; we reuse the input preprocessor because we want
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/Core/Makefile b/contrib/llvm/tools/clang/lib/Rewrite/Core/Makefile
new file mode 100644
index 0000000..8c8d2e4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Core/Makefile
@@ -0,0 +1,18 @@
+##===- clang/lib/Rewrite/Makefile --------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements code transformation / rewriting facilities.
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../../..
+LIBRARYNAME := clangRewriteCore
+
+include $(CLANG_LEVEL)/Makefile
+
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Core/RewriteRope.cpp
index cc8de1b..fe7aa2d 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Core/RewriteRope.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Rewrite/RewriteRope.h"
+#include "clang/Rewrite/Core/RewriteRope.h"
#include "clang/Basic/LLVM.h"
#include <algorithm>
using namespace clang;
@@ -117,8 +117,6 @@ namespace {
/// guaranteed that there is a split at Offset.
void erase(unsigned Offset, unsigned NumBytes);
- //static inline bool classof(const RopePieceBTreeNode *) { return true; }
-
};
} // end anonymous namespace
@@ -221,7 +219,6 @@ namespace {
/// guaranteed that there is a split at Offset.
void erase(unsigned Offset, unsigned NumBytes);
- //static inline bool classof(const RopePieceBTreeLeaf *) { return true; }
static inline bool classof(const RopePieceBTreeNode *N) {
return N->isLeaf();
}
@@ -458,7 +455,6 @@ namespace {
/// guaranteed that there is a split at Offset.
void erase(unsigned Offset, unsigned NumBytes);
- //static inline bool classof(const RopePieceBTreeInterior *) { return true; }
static inline bool classof(const RopePieceBTreeNode *N) {
return !N->isLeaf();
}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Core/Rewriter.cpp
index 7c27114..4df967f 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Core/Rewriter.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Rewrite/Rewriter.h"
+#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/Decl.h"
#include "clang/Basic/DiagnosticIDs.h"
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Core/TokenRewriter.cpp
index 03ce63e..940ece2 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Core/TokenRewriter.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Rewrite/TokenRewriter.h"
+#include "clang/Rewrite/Core/TokenRewriter.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/ScratchBuffer.h"
#include "clang/Basic/SourceManager.h"
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/Frontend/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/CMakeLists.txt
new file mode 100644
index 0000000..9017e47
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/CMakeLists.txt
@@ -0,0 +1,28 @@
+add_clang_library(clangRewriteFrontend
+ FixItRewriter.cpp
+ FrontendActions.cpp
+ HTMLPrint.cpp
+ InclusionRewriter.cpp
+ RewriteMacros.cpp
+ RewriteModernObjC.cpp
+ RewriteObjC.cpp
+ RewriteTest.cpp
+ )
+
+add_dependencies(clangRewriteFrontend
+ ClangAttrClasses
+ ClangAttrList
+ ClangAttrParsedAttrList
+ ClangCommentNodes
+ ClangDeclNodes
+ ClangDiagnosticCommon
+ ClangDiagnosticFrontend
+ ClangStmtNodes
+ )
+
+target_link_libraries(clangRewriteFrontend
+ clangBasic
+ clangAST
+ clangParse
+ clangFrontend
+ )
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/FixItRewriter.cpp
index 3863adb..43a1ab1 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/FixItRewriter.cpp
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Rewrite/FixItRewriter.h"
+#include "clang/Rewrite/Frontend/FixItRewriter.h"
#include "clang/Edit/Commit.h"
#include "clang/Edit/EditsReceiver.h"
#include "clang/Basic/FileManager.h"
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/FrontendActions.cpp
index 9bc218e..7d29b6d 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/FrontendActions.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Rewrite/FrontendActions.h"
+#include "clang/Rewrite/Frontend/FrontendActions.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Parse/Parser.h"
@@ -16,9 +16,9 @@
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/Utils.h"
-#include "clang/Rewrite/ASTConsumers.h"
-#include "clang/Rewrite/FixItRewriter.h"
-#include "clang/Rewrite/Rewriters.h"
+#include "clang/Rewrite/Frontend/ASTConsumers.h"
+#include "clang/Rewrite/Frontend/FixItRewriter.h"
+#include "clang/Rewrite/Frontend/Rewriters.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Path.h"
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/HTMLPrint.cpp
index 3d190ab..79e4447 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/HTMLPrint.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Rewrite/ASTConsumers.h"
+#include "clang/Rewrite/Frontend/ASTConsumers.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
@@ -19,8 +19,8 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Preprocessor.h"
-#include "clang/Rewrite/HTMLRewrite.h"
-#include "clang/Rewrite/Rewriter.h"
+#include "clang/Rewrite/Core/HTMLRewrite.h"
+#include "clang/Rewrite/Core/Rewriter.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/InclusionRewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/InclusionRewriter.cpp
index 3dfc3b0..9d1bec9 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/InclusionRewriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/InclusionRewriter.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Rewrite/Rewriters.h"
+#include "clang/Rewrite/Frontend/Rewriters.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Frontend/PreprocessorOutputOptions.h"
@@ -57,10 +57,11 @@ private:
const Token &IncludeTok,
StringRef FileName,
bool IsAngled,
+ CharSourceRange FilenameRange,
const FileEntry *File,
- SourceLocation EndLoc,
StringRef SearchPath,
- StringRef RelativePath);
+ StringRef RelativePath,
+ const Module *Imported);
void WriteLineInfo(const char *Filename, int Line,
SrcMgr::CharacteristicKind FileType,
StringRef EOL, StringRef Extra = StringRef());
@@ -152,10 +153,11 @@ void InclusionRewriter::InclusionDirective(SourceLocation HashLoc,
const Token &/*IncludeTok*/,
StringRef /*FileName*/,
bool /*IsAngled*/,
+ CharSourceRange /*FilenameRange*/,
const FileEntry * /*File*/,
- SourceLocation /*EndLoc*/,
StringRef /*SearchPath*/,
- StringRef /*RelativePath*/) {
+ StringRef /*RelativePath*/,
+ const Module * /*Imported*/) {
assert(LastInsertedFileChange == FileChanges.end() && "Another inclusion "
"directive was found before the previous one was processed");
std::pair<FileChangeMap::iterator, bool> p = FileChanges.insert(
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/Frontend/Makefile b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/Makefile
new file mode 100644
index 0000000..ac97d40
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/Makefile
@@ -0,0 +1,18 @@
+##===- clang/lib/Rewrite/Makefile --------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements code transformation / rewriting facilities.
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../../..
+LIBRARYNAME := clangRewriteFrontend
+
+include $(CLANG_LEVEL)/Makefile
+
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/RewriteMacros.cpp
index 3fa0bdb..f399dd5 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/RewriteMacros.cpp
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Rewrite/Rewriters.h"
-#include "clang/Rewrite/Rewriter.h"
+#include "clang/Rewrite/Frontend/Rewriters.h"
+#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/RewriteModernObjC.cpp
index dcd003f..4b56b37 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/RewriteModernObjC.cpp
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Rewrite/ASTConsumers.h"
-#include "clang/Rewrite/Rewriter.h"
+#include "clang/Rewrite/Frontend/ASTConsumers.h"
+#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/AST/AST.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ParentMap.h"
@@ -278,6 +278,9 @@ namespace {
// Syntactic Rewriting.
void RewriteRecordBody(RecordDecl *RD);
void RewriteInclude();
+ void RewriteLineDirective(const Decl *D);
+ void ConvertSourceLocationToLineDirective(SourceLocation Loc,
+ std::string &LineString);
void RewriteForwardClassDecl(DeclGroupRef D);
void RewriteForwardClassDecl(const llvm::SmallVector<Decl*, 8> &DG);
void RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
@@ -1602,6 +1605,19 @@ Stmt *RewriteModernObjC::RewriteBreakStmt(BreakStmt *S) {
return 0;
}
+void RewriteModernObjC::ConvertSourceLocationToLineDirective(
+ SourceLocation Loc,
+ std::string &LineString) {
+ if (Loc.isFileID()) {
+ LineString += "\n#line ";
+ PresumedLoc PLoc = SM->getPresumedLoc(Loc);
+ LineString += utostr(PLoc.getLine());
+ LineString += " \"";
+ LineString += Lexer::Stringify(PLoc.getFilename());
+ LineString += "\"\n";
+ }
+}
+
/// RewriteContinueStmt - Rewrite for a continue-stmt inside an ObjC2's foreach
/// statement to continue with its inner synthesized loop.
///
@@ -1664,7 +1680,10 @@ Stmt *RewriteModernObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
StringRef elementName;
std::string elementTypeAsString;
std::string buf;
- buf = "\n{\n\t";
+ // line directive first.
+ SourceLocation ForEachLoc = S->getForLoc();
+ ConvertSourceLocationToLineDirective(ForEachLoc, buf);
+ buf += "{\n\t";
if (DeclStmt *DS = dyn_cast<DeclStmt>(S->getElement())) {
// type elem;
NamedDecl* D = cast<NamedDecl>(DS->getSingleDecl());
@@ -1836,7 +1855,9 @@ Stmt *RewriteModernObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S)
assert((*startBuf == '@') && "bogus @synchronized location");
std::string buf;
- buf = "{ id _rethrow = 0; id _sync_obj = ";
+ SourceLocation SynchLoc = S->getAtSynchronizedLoc();
+ ConvertSourceLocationToLineDirective(SynchLoc, buf);
+ buf += "{ id _rethrow = 0; id _sync_obj = ";
const char *lparenBuf = startBuf;
while (*lparenBuf != '(') lparenBuf++;
@@ -1902,12 +1923,14 @@ Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
ObjCAtFinallyStmt *finalStmt = S->getFinallyStmt();
bool noCatch = S->getNumCatchStmts() == 0;
std::string buf;
+ SourceLocation TryLocation = S->getAtTryLoc();
+ ConvertSourceLocationToLineDirective(TryLocation, buf);
if (finalStmt) {
if (noCatch)
- buf = "{ id volatile _rethrow = 0;\n";
+ buf += "{ id volatile _rethrow = 0;\n";
else {
- buf = "{ id volatile _rethrow = 0;\ntry {\n";
+ buf += "{ id volatile _rethrow = 0;\ntry {\n";
}
}
// Get the start location and compute the semi location.
@@ -1934,13 +1957,15 @@ Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
ObjCInterfaceDecl *IDecl = Ptr->getObjectType()->getInterface();
if (IDecl) {
std::string Result;
+ ConvertSourceLocationToLineDirective(Catch->getLocStart(), Result);
+
startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @catch location");
SourceLocation rParenLoc = Catch->getRParenLoc();
const char *rParenBuf = SM->getCharacterData(rParenLoc);
// _objc_exc_Foo *_e as argument to catch.
- Result = "catch (_objc_exc_"; Result += IDecl->getNameAsString();
+ Result += "catch (_objc_exc_"; Result += IDecl->getNameAsString();
Result += " *_"; Result += catchDecl->getNameAsString();
Result += ")";
ReplaceText(startLoc, rParenBuf-startBuf+1, Result);
@@ -1966,11 +1991,18 @@ Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
}
if (finalStmt) {
buf.clear();
- if (noCatch)
- buf = "catch (id e) {_rethrow = e;}\n";
- else
- buf = "}\ncatch (id e) {_rethrow = e;}\n";
-
+ SourceLocation FinallyLoc = finalStmt->getLocStart();
+
+ if (noCatch) {
+ ConvertSourceLocationToLineDirective(FinallyLoc, buf);
+ buf += "catch (id e) {_rethrow = e;}\n";
+ }
+ else {
+ buf += "}\n";
+ ConvertSourceLocationToLineDirective(FinallyLoc, buf);
+ buf += "catch (id e) {_rethrow = e;}\n";
+ }
+
SourceLocation startFinalLoc = finalStmt->getLocStart();
ReplaceText(startFinalLoc, 8, buf);
Stmt *body = finalStmt->getFinallyBody();
@@ -2070,7 +2102,7 @@ CallExpr *RewriteModernObjC::SynthesizeCallToFunctionDecl(
const FunctionType *FT = msgSendType->getAs<FunctionType>();
CallExpr *Exp =
- new (Context) CallExpr(*Context, ICE, args, nargs,
+ new (Context) CallExpr(*Context, ICE, llvm::makeArrayRef(args, nargs),
FT->getCallResultType(*Context),
VK_RValue, EndLoc);
return Exp;
@@ -2675,8 +2707,7 @@ Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) {
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
- MsgExprs.size(),
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, MsgExprs,
FT->getResultType(), VK_RValue,
EndLoc);
ReplaceStmt(Exp, CE);
@@ -2718,7 +2749,7 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
for (unsigned i = 0; i < NumElements; i++)
InitExprs.push_back(Exp->getElement(i));
Expr *NSArrayCallExpr =
- new (Context) CallExpr(*Context, NSArrayDRE, &InitExprs[0], InitExprs.size(),
+ new (Context) CallExpr(*Context, NSArrayDRE, InitExprs,
NSArrayFType, VK_LValue, SourceLocation());
FieldDecl *ARRFD = FieldDecl::Create(*Context, 0, SourceLocation(),
@@ -2814,8 +2845,7 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
- MsgExprs.size(),
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, MsgExprs,
FT->getResultType(), VK_RValue,
EndLoc);
ReplaceStmt(Exp, CE);
@@ -2865,7 +2895,7 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
// (const id [])objects
Expr *NSValueCallExpr =
- new (Context) CallExpr(*Context, NSDictDRE, &ValueExprs[0], ValueExprs.size(),
+ new (Context) CallExpr(*Context, NSDictDRE, ValueExprs,
NSDictFType, VK_LValue, SourceLocation());
FieldDecl *ARRFD = FieldDecl::Create(*Context, 0, SourceLocation(),
@@ -2887,7 +2917,7 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
DictLiteralValueME);
// (const id <NSCopying> [])keys
Expr *NSKeyCallExpr =
- new (Context) CallExpr(*Context, NSDictDRE, &KeyExprs[0], KeyExprs.size(),
+ new (Context) CallExpr(*Context, NSDictDRE, KeyExprs,
NSDictFType, VK_LValue, SourceLocation());
MemberExpr *DictLiteralKeyME =
@@ -2989,8 +3019,7 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
- MsgExprs.size(),
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, MsgExprs,
FT->getResultType(), VK_RValue,
EndLoc);
ReplaceStmt(Exp, CE);
@@ -3078,6 +3107,34 @@ static SourceLocation getFunctionSourceLocation (RewriteModernObjC &R,
return FD->getTypeSpecStartLoc();
}
+void RewriteModernObjC::RewriteLineDirective(const Decl *D) {
+
+ SourceLocation Location = D->getLocation();
+
+ if (Location.isFileID()) {
+ std::string LineString("\n#line ");
+ PresumedLoc PLoc = SM->getPresumedLoc(Location);
+ LineString += utostr(PLoc.getLine());
+ LineString += " \"";
+ LineString += Lexer::Stringify(PLoc.getFilename());
+ if (isa<ObjCMethodDecl>(D))
+ LineString += "\"";
+ else LineString += "\"\n";
+
+ Location = D->getLocStart();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isExternC() && !FD->isMain()) {
+ const DeclContext *DC = FD->getDeclContext();
+ if (const LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(DC))
+ // if it is extern "C" {...}, return function decl's own location.
+ if (!LSD->getRBraceLoc().isValid())
+ Location = LSD->getExternLoc();
+ }
+ }
+ InsertText(Location, LineString);
+ }
+}
+
/// SynthMsgSendStretCallExpr - This routine translates message expression
/// into a call to objc_msgSend_stret() entry point. Tricky part is that
/// nil check on receiver must be performed before calling objc_msgSend_stret.
@@ -3140,7 +3197,14 @@ Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFla
str += "\t"; str += returnType.getAsString(Context->getPrintingPolicy());
str += " s;\n";
str += "};\n\n";
- SourceLocation FunLocStart = getFunctionSourceLocation(*this, CurFunctionDef);
+ SourceLocation FunLocStart;
+ if (CurFunctionDef)
+ FunLocStart = getFunctionSourceLocation(*this, CurFunctionDef);
+ else {
+ assert(CurMethodDef && "SynthMsgSendStretCallExpr - CurMethodDef is null");
+ FunLocStart = CurMethodDef->getLocStart();
+ }
+
InsertText(FunLocStart, str);
++stretCount;
@@ -3151,7 +3215,7 @@ Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFla
SC_None, false, false);
DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, castType, VK_RValue,
SourceLocation());
- CallExpr *STCE = new (Context) CallExpr(*Context, DRE, &MsgExprs[0], MsgExprs.size(),
+ CallExpr *STCE = new (Context) CallExpr(*Context, DRE, MsgExprs,
castType, VK_LValue, SourceLocation());
FieldDecl *FieldD = FieldDecl::Create(*Context, 0, SourceLocation(),
@@ -3260,8 +3324,7 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
false, superType, VK_LValue,
SourceLocation());
- SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
- InitExprs.size(),
+ SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
superType, VK_LValue,
SourceLocation());
// The code for super is a little tricky to prevent collision with
@@ -3280,8 +3343,7 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
} else {
// (struct __rw_objc_super) { <exprs from above> }
InitListExpr *ILE =
- new (Context) InitListExpr(*Context, SourceLocation(),
- &InitExprs[0], InitExprs.size(),
+ new (Context) InitListExpr(*Context, SourceLocation(), InitExprs,
SourceLocation());
TypeSourceInfo *superTInfo
= Context->getTrivialTypeSourceInfo(superType);
@@ -3370,8 +3432,7 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
false, superType, VK_LValue,
SourceLocation());
- SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
- InitExprs.size(),
+ SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
superType, VK_LValue, SourceLocation());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
@@ -3389,8 +3450,7 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
} else {
// (struct __rw_objc_super) { <exprs from above> }
InitListExpr *ILE =
- new (Context) InitListExpr(*Context, SourceLocation(),
- &InitExprs[0], InitExprs.size(),
+ new (Context) InitListExpr(*Context, SourceLocation(), InitExprs,
SourceLocation());
TypeSourceInfo *superTInfo
= Context->getTrivialTypeSourceInfo(superType);
@@ -3544,10 +3604,8 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
- MsgExprs.size(),
- FT->getResultType(), VK_RValue,
- EndLoc);
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, MsgExprs,
+ FT->getResultType(), VK_RValue, EndLoc);
Stmt *ReplacingStmt = CE;
if (MsgSendStretFlavor) {
// We have the method which returns a struct/union. Must also generate
@@ -3578,7 +3636,8 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SourceLocation());
BinaryOperator *lessThanExpr =
new (Context) BinaryOperator(sizeofExpr, limit, BO_LE, Context->IntTy,
- VK_RValue, OK_Ordinary, SourceLocation());
+ VK_RValue, OK_Ordinary, SourceLocation(),
+ false);
// (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
ConditionalOperator *CondExpr =
new (Context) ConditionalOperator(lessThanExpr,
@@ -3968,8 +4027,12 @@ std::string RewriteModernObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
const FunctionType *AFT = CE->getFunctionType();
QualType RT = AFT->getResultType();
std::string StructRef = "struct " + Tag;
- std::string S = "static " + RT.getAsString(Context->getPrintingPolicy()) + " __" +
- funcName.str() + "_block_func_" + utostr(i);
+ SourceLocation BlockLoc = CE->getExprLoc();
+ std::string S;
+ ConvertSourceLocationToLineDirective(BlockLoc, S);
+
+ S += "static " + RT.getAsString(Context->getPrintingPolicy()) + " __" +
+ funcName.str() + "_block_func_" + utostr(i);
BlockDecl *BD = CE->getBlockDecl();
@@ -4585,8 +4648,7 @@ Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp
E = Exp->arg_end(); I != E; ++I) {
BlkExprs.push_back(*I);
}
- CallExpr *CE = new (Context) CallExpr(*Context, PE, &BlkExprs[0],
- BlkExprs.size(),
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, BlkExprs,
Exp->getType(), VK_RValue,
SourceLocation());
return CE;
@@ -4869,7 +4931,7 @@ void RewriteModernObjC::RewriteBlockPointerDecl(NamedDecl *ND) {
else if (*argListBegin == '<') {
buf += "/*";
buf += *argListBegin++;
- OrigLength++;;
+ OrigLength++;
while (*argListBegin != '>') {
buf += *argListBegin++;
OrigLength++;
@@ -5347,7 +5409,7 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
Context->IntTy, SourceLocation());
InitExprs.push_back(FlagExp);
}
- NewRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0], InitExprs.size(),
+ NewRep = new (Context) CallExpr(*Context, DRE, InitExprs,
FType, VK_LValue, SourceLocation());
if (GlobalBlockExpr) {
@@ -5666,6 +5728,7 @@ void RewriteModernObjC::HandleDeclInMainFile(Decl *D) {
// This synthesizes and inserts the block "impl" struct, invoke function,
// and any copy/dispose helper functions.
InsertBlockLiteralsWithinFunction(FD);
+ RewriteLineDirective(D);
CurFunctionDef = 0;
}
break;
@@ -5684,6 +5747,7 @@ void RewriteModernObjC::HandleDeclInMainFile(Decl *D) {
PropParentMap = 0;
}
InsertBlockLiteralsWithinMethod(MD);
+ RewriteLineDirective(D);
CurMethodDef = 0;
}
break;
@@ -5894,8 +5958,8 @@ void RewriteModernObjC::Initialize(ASTContext &context) {
Preamble += "(const char *);\n";
Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw( struct objc_object *);\n";
// @synchronized hooks.
- Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_enter( struct objc_object *);\n";
- Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_exit( struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_enter( struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_exit( struct objc_object *);\n";
Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n";
Preamble += "#ifndef __FASTENUMERATIONSTATE\n";
Preamble += "struct __objcFastEnumerationState {\n\t";
@@ -7476,7 +7540,7 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
BinaryOperator *addExpr =
new (Context) BinaryOperator(castExpr, DRE, BO_Add,
Context->getPointerType(Context->CharTy),
- VK_RValue, OK_Ordinary, SourceLocation());
+ VK_RValue, OK_Ordinary, SourceLocation(), false);
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(),
SourceLocation(),
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/RewriteObjC.cpp
index 37c17e6..a6dcc6b 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/RewriteObjC.cpp
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Rewrite/ASTConsumers.h"
-#include "clang/Rewrite/Rewriter.h"
+#include "clang/Rewrite/Frontend/ASTConsumers.h"
+#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/AST/AST.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ParentMap.h"
@@ -2059,7 +2059,7 @@ CallExpr *RewriteObjC::SynthesizeCallToFunctionDecl(
const FunctionType *FT = msgSendType->getAs<FunctionType>();
CallExpr *Exp =
- new (Context) CallExpr(*Context, ICE, args, nargs,
+ new (Context) CallExpr(*Context, ICE, llvm::makeArrayRef(args, nargs),
FT->getCallResultType(*Context),
VK_RValue, EndLoc);
return Exp;
@@ -2661,8 +2661,7 @@ CallExpr *RewriteObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavo
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *STCE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
- MsgExprs.size(),
+ CallExpr *STCE = new (Context) CallExpr(*Context, PE, MsgExprs,
FT->getResultType(), VK_RValue,
SourceLocation());
return STCE;
@@ -2766,8 +2765,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
false, superType, VK_LValue,
SourceLocation());
- SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
- InitExprs.size(),
+ SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
superType, VK_LValue,
SourceLocation());
// The code for super is a little tricky to prevent collision with
@@ -2786,8 +2784,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
} else {
// (struct objc_super) { <exprs from above> }
InitListExpr *ILE =
- new (Context) InitListExpr(*Context, SourceLocation(),
- &InitExprs[0], InitExprs.size(),
+ new (Context) InitListExpr(*Context, SourceLocation(), InitExprs,
SourceLocation());
TypeSourceInfo *superTInfo
= Context->getTrivialTypeSourceInfo(superType);
@@ -2876,8 +2873,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
false, superType, VK_LValue,
SourceLocation());
- SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
- InitExprs.size(),
+ SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
superType, VK_LValue, SourceLocation());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
@@ -2895,8 +2891,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
} else {
// (struct objc_super) { <exprs from above> }
InitListExpr *ILE =
- new (Context) InitListExpr(*Context, SourceLocation(),
- &InitExprs[0], InitExprs.size(),
+ new (Context) InitListExpr(*Context, SourceLocation(), InitExprs,
SourceLocation());
TypeSourceInfo *superTInfo
= Context->getTrivialTypeSourceInfo(superType);
@@ -3050,8 +3045,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
- MsgExprs.size(),
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, MsgExprs,
FT->getResultType(), VK_RValue,
EndLoc);
Stmt *ReplacingStmt = CE;
@@ -3084,7 +3078,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SourceLocation());
BinaryOperator *lessThanExpr =
new (Context) BinaryOperator(sizeofExpr, limit, BO_LE, Context->IntTy,
- VK_RValue, OK_Ordinary, SourceLocation());
+ VK_RValue, OK_Ordinary, SourceLocation(),
+ false);
// (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
ConditionalOperator *CondExpr =
new (Context) ConditionalOperator(lessThanExpr,
@@ -3923,8 +3918,7 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
E = Exp->arg_end(); I != E; ++I) {
BlkExprs.push_back(*I);
}
- CallExpr *CE = new (Context) CallExpr(*Context, PE, &BlkExprs[0],
- BlkExprs.size(),
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, BlkExprs,
Exp->getType(), VK_RValue,
SourceLocation());
return CE;
@@ -4190,7 +4184,7 @@ void RewriteObjC::RewriteBlockPointerDecl(NamedDecl *ND) {
else if (*argListBegin == '<') {
buf += "/*";
buf += *argListBegin++;
- OrigLength++;;
+ OrigLength++;
while (*argListBegin != '>') {
buf += *argListBegin++;
OrigLength++;
@@ -4651,7 +4645,7 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
Context->IntTy, SourceLocation());
InitExprs.push_back(FlagExp);
}
- NewRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0], InitExprs.size(),
+ NewRep = new (Context) CallExpr(*Context, DRE, InitExprs,
FType, VK_LValue, SourceLocation());
NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf,
Context->getPointerType(NewRep->getType()),
@@ -5119,8 +5113,8 @@ void RewriteObjCFragileABI::Initialize(ASTContext &context) {
Preamble += "__OBJC_RW_DLLIMPORT int objc_exception_match";
Preamble += "(struct objc_class *, struct objc_object *);\n";
// @synchronized hooks.
- Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_enter(struct objc_object *);\n";
- Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_exit(struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_enter(struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_exit(struct objc_object *);\n";
Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n";
Preamble += "#ifndef __FASTENUMERATIONSTATE\n";
Preamble += "struct __objcFastEnumerationState {\n\t";
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/RewriteTest.cpp
index 019e5e7..722c5e8 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Frontend/RewriteTest.cpp
@@ -11,9 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Rewrite/Rewriters.h"
+#include "clang/Rewrite/Frontend/Rewriters.h"
#include "clang/Lex/Preprocessor.h"
-#include "clang/Rewrite/TokenRewriter.h"
+#include "clang/Rewrite/Core/TokenRewriter.h"
#include "llvm/Support/raw_ostream.h"
void clang::DoRewriteTest(Preprocessor &PP, raw_ostream* OS) {
diff --git a/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp b/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
index 19a7d6f..801a1b1 100644
--- a/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -27,6 +27,7 @@
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/ParentMap.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Analysis/AnalysisContext.h"
@@ -36,10 +37,12 @@
#include "clang/Analysis/Analyses/ThreadSafety.h"
#include "clang/Analysis/CFGStmtMap.h"
#include "clang/Analysis/Analyses/UninitializedValues.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
@@ -182,13 +185,6 @@ static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
HasFakeEdge = true;
continue;
}
- if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) {
- if (AS->isMSAsm()) {
- HasFakeEdge = true;
- HasLiveReturn = true;
- continue;
- }
- }
if (isa<MSAsmStmt>(S)) {
// TODO: Verify this is correct.
HasFakeEdge = true;
@@ -506,7 +502,7 @@ static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use,
// Information used when building the diagnostic.
unsigned DiagKind;
- const char *Str;
+ StringRef Str;
SourceRange Range;
// FixIts to suppress the diagnosic by removing the dead condition.
@@ -822,6 +818,18 @@ namespace {
static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC,
bool PerFunction) {
+ // Only perform this analysis when using C++11. There is no good workflow
+ // for this warning when not using C++11. There is no good way to silence
+ // the warning (no attribute is available) unless we are using C++11's support
+ // for generalized attributes. Once could use pragmas to silence the warning,
+ // but as a general solution that is gross and not in the spirit of this
+ // warning.
+ //
+ // NOTE: This an intermediate solution. There are on-going discussions on
+ // how to properly support this warning outside of C++11 with an annotation.
+ if (!AC.getASTContext().getLangOpts().CPlusPlus0x)
+ return;
+
FallthroughMapper FM(S);
FM.TraverseStmt(AC.getBody());
@@ -859,8 +867,21 @@ static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC,
if (S.getLangOpts().CPlusPlus0x) {
const Stmt *Term = B.getTerminator();
if (!(B.empty() && Term && isa<BreakStmt>(Term))) {
+ Preprocessor &PP = S.getPreprocessor();
+ TokenValue Tokens[] = {
+ tok::l_square, tok::l_square, PP.getIdentifierInfo("clang"),
+ tok::coloncolon, PP.getIdentifierInfo("fallthrough"),
+ tok::r_square, tok::r_square
+ };
+ StringRef AnnotationSpelling = "[[clang::fallthrough]]";
+ StringRef MacroName = PP.getLastMacroWithSpelling(L, Tokens);
+ if (!MacroName.empty())
+ AnnotationSpelling = MacroName;
+ SmallString<64> TextToInsert(AnnotationSpelling);
+ TextToInsert += "; ";
S.Diag(L, diag::note_insert_fallthrough_fixit) <<
- FixItHint::CreateInsertion(L, "[[clang::fallthrough]]; ");
+ AnnotationSpelling <<
+ FixItHint::CreateInsertion(L, TextToInsert);
}
}
S.Diag(L, diag::note_insert_break_fixit) <<
@@ -878,6 +899,199 @@ static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC,
}
namespace {
+typedef std::pair<const Stmt *,
+ sema::FunctionScopeInfo::WeakObjectUseMap::const_iterator>
+ StmtUsesPair;
+
+class StmtUseSorter {
+ const SourceManager &SM;
+
+public:
+ explicit StmtUseSorter(const SourceManager &SM) : SM(SM) { }
+
+ bool operator()(const StmtUsesPair &LHS, const StmtUsesPair &RHS) {
+ return SM.isBeforeInTranslationUnit(LHS.first->getLocStart(),
+ RHS.first->getLocStart());
+ }
+};
+}
+
+static bool isInLoop(const ASTContext &Ctx, const ParentMap &PM,
+ const Stmt *S) {
+ assert(S);
+
+ do {
+ switch (S->getStmtClass()) {
+ case Stmt::ForStmtClass:
+ case Stmt::WhileStmtClass:
+ case Stmt::CXXForRangeStmtClass:
+ case Stmt::ObjCForCollectionStmtClass:
+ return true;
+ case Stmt::DoStmtClass: {
+ const Expr *Cond = cast<DoStmt>(S)->getCond();
+ llvm::APSInt Val;
+ if (!Cond->EvaluateAsInt(Val, Ctx))
+ return true;
+ return Val.getBoolValue();
+ }
+ default:
+ break;
+ }
+ } while ((S = PM.getParent(S)));
+
+ return false;
+}
+
+
+static void diagnoseRepeatedUseOfWeak(Sema &S,
+ const sema::FunctionScopeInfo *CurFn,
+ const Decl *D,
+ const ParentMap &PM) {
+ typedef sema::FunctionScopeInfo::WeakObjectProfileTy WeakObjectProfileTy;
+ typedef sema::FunctionScopeInfo::WeakObjectUseMap WeakObjectUseMap;
+ typedef sema::FunctionScopeInfo::WeakUseVector WeakUseVector;
+
+ ASTContext &Ctx = S.getASTContext();
+
+ const WeakObjectUseMap &WeakMap = CurFn->getWeakObjectUses();
+
+ // Extract all weak objects that are referenced more than once.
+ SmallVector<StmtUsesPair, 8> UsesByStmt;
+ for (WeakObjectUseMap::const_iterator I = WeakMap.begin(), E = WeakMap.end();
+ I != E; ++I) {
+ const WeakUseVector &Uses = I->second;
+
+ // Find the first read of the weak object.
+ WeakUseVector::const_iterator UI = Uses.begin(), UE = Uses.end();
+ for ( ; UI != UE; ++UI) {
+ if (UI->isUnsafe())
+ break;
+ }
+
+ // If there were only writes to this object, don't warn.
+ if (UI == UE)
+ continue;
+
+ // If there was only one read, followed by any number of writes, and the
+ // read is not within a loop, don't warn. Additionally, don't warn in a
+ // loop if the base object is a local variable -- local variables are often
+ // changed in loops.
+ if (UI == Uses.begin()) {
+ WeakUseVector::const_iterator UI2 = UI;
+ for (++UI2; UI2 != UE; ++UI2)
+ if (UI2->isUnsafe())
+ break;
+
+ if (UI2 == UE) {
+ if (!isInLoop(Ctx, PM, UI->getUseExpr()))
+ continue;
+
+ const WeakObjectProfileTy &Profile = I->first;
+ if (!Profile.isExactProfile())
+ continue;
+
+ const NamedDecl *Base = Profile.getBase();
+ if (!Base)
+ Base = Profile.getProperty();
+ assert(Base && "A profile always has a base or property.");
+
+ if (const VarDecl *BaseVar = dyn_cast<VarDecl>(Base))
+ if (BaseVar->hasLocalStorage() && !isa<ParmVarDecl>(Base))
+ continue;
+ }
+ }
+
+ UsesByStmt.push_back(StmtUsesPair(UI->getUseExpr(), I));
+ }
+
+ if (UsesByStmt.empty())
+ return;
+
+ // Sort by first use so that we emit the warnings in a deterministic order.
+ std::sort(UsesByStmt.begin(), UsesByStmt.end(),
+ StmtUseSorter(S.getSourceManager()));
+
+ // Classify the current code body for better warning text.
+ // This enum should stay in sync with the cases in
+ // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak.
+ // FIXME: Should we use a common classification enum and the same set of
+ // possibilities all throughout Sema?
+ enum {
+ Function,
+ Method,
+ Block,
+ Lambda
+ } FunctionKind;
+
+ if (isa<sema::BlockScopeInfo>(CurFn))
+ FunctionKind = Block;
+ else if (isa<sema::LambdaScopeInfo>(CurFn))
+ FunctionKind = Lambda;
+ else if (isa<ObjCMethodDecl>(D))
+ FunctionKind = Method;
+ else
+ FunctionKind = Function;
+
+ // Iterate through the sorted problems and emit warnings for each.
+ for (SmallVectorImpl<StmtUsesPair>::const_iterator I = UsesByStmt.begin(),
+ E = UsesByStmt.end();
+ I != E; ++I) {
+ const Stmt *FirstRead = I->first;
+ const WeakObjectProfileTy &Key = I->second->first;
+ const WeakUseVector &Uses = I->second->second;
+
+ // For complicated expressions like 'a.b.c' and 'x.b.c', WeakObjectProfileTy
+ // may not contain enough information to determine that these are different
+ // properties. We can only be 100% sure of a repeated use in certain cases,
+ // and we adjust the diagnostic kind accordingly so that the less certain
+ // case can be turned off if it is too noisy.
+ unsigned DiagKind;
+ if (Key.isExactProfile())
+ DiagKind = diag::warn_arc_repeated_use_of_weak;
+ else
+ DiagKind = diag::warn_arc_possible_repeated_use_of_weak;
+
+ // Classify the weak object being accessed for better warning text.
+ // This enum should stay in sync with the cases in
+ // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak.
+ enum {
+ Variable,
+ Property,
+ ImplicitProperty,
+ Ivar
+ } ObjectKind;
+
+ const NamedDecl *D = Key.getProperty();
+ if (isa<VarDecl>(D))
+ ObjectKind = Variable;
+ else if (isa<ObjCPropertyDecl>(D))
+ ObjectKind = Property;
+ else if (isa<ObjCMethodDecl>(D))
+ ObjectKind = ImplicitProperty;
+ else if (isa<ObjCIvarDecl>(D))
+ ObjectKind = Ivar;
+ else
+ llvm_unreachable("Unexpected weak object kind!");
+
+ // Show the first time the object was read.
+ S.Diag(FirstRead->getLocStart(), DiagKind)
+ << ObjectKind << D << FunctionKind
+ << FirstRead->getSourceRange();
+
+ // Print all the other accesses as notes.
+ for (WeakUseVector::const_iterator UI = Uses.begin(), UE = Uses.end();
+ UI != UE; ++UI) {
+ if (UI->getUseExpr() == FirstRead)
+ continue;
+ S.Diag(UI->getUseExpr()->getLocStart(),
+ diag::note_arc_weak_also_accessed_here)
+ << UI->getUseExpr()->getSourceRange();
+ }
+ }
+}
+
+
+namespace {
struct SLocSort {
bool operator()(const UninitUse &a, const UninitUse &b) {
// Prefer a more confident report over a less confident one.
@@ -1091,27 +1305,47 @@ class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
diag::warn_variable_requires_any_lock:
diag::warn_var_deref_requires_any_lock;
PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
- << D->getName() << getLockKindFromAccessKind(AK));
+ << D->getNameAsString() << getLockKindFromAccessKind(AK));
Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
}
void handleMutexNotHeld(const NamedDecl *D, ProtectedOperationKind POK,
- Name LockName, LockKind LK, SourceLocation Loc) {
+ Name LockName, LockKind LK, SourceLocation Loc,
+ Name *PossibleMatch) {
unsigned DiagID = 0;
- switch (POK) {
- case POK_VarAccess:
- DiagID = diag::warn_variable_requires_lock;
- break;
- case POK_VarDereference:
- DiagID = diag::warn_var_deref_requires_lock;
- break;
- case POK_FunctionCall:
- DiagID = diag::warn_fun_requires_lock;
- break;
+ if (PossibleMatch) {
+ switch (POK) {
+ case POK_VarAccess:
+ DiagID = diag::warn_variable_requires_lock_precise;
+ break;
+ case POK_VarDereference:
+ DiagID = diag::warn_var_deref_requires_lock_precise;
+ break;
+ case POK_FunctionCall:
+ DiagID = diag::warn_fun_requires_lock_precise;
+ break;
+ }
+ PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
+ << D->getNameAsString() << LockName << LK);
+ PartialDiagnosticAt Note(Loc, S.PDiag(diag::note_found_mutex_near_match)
+ << *PossibleMatch);
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
+ } else {
+ switch (POK) {
+ case POK_VarAccess:
+ DiagID = diag::warn_variable_requires_lock;
+ break;
+ case POK_VarDereference:
+ DiagID = diag::warn_var_deref_requires_lock;
+ break;
+ case POK_FunctionCall:
+ DiagID = diag::warn_fun_requires_lock;
+ break;
+ }
+ PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
+ << D->getNameAsString() << LockName << LK);
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
}
- PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
- << D->getName() << LockName << LK);
- Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
}
void handleFunExcludesLock(Name FunName, Name LockName, SourceLocation Loc) {
@@ -1206,7 +1440,8 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
AC.getCFGBuildOptions().AddEHEdges = false;
AC.getCFGBuildOptions().AddInitializers = true;
AC.getCFGBuildOptions().AddImplicitDtors = true;
-
+ AC.getCFGBuildOptions().AddTemporaryDtors = true;
+
// Force that certain expressions appear as CFGElements in the CFG. This
// is used to speed up various analyses.
// FIXME: This isn't the right factoring. This is here for initial
@@ -1350,6 +1585,11 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
DiagnoseSwitchLabelsFallthrough(S, AC, !FallThroughDiagFull);
}
+ if (S.getLangOpts().ObjCARCWeak &&
+ Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak,
+ D->getLocStart()) != DiagnosticsEngine::Ignored)
+ diagnoseRepeatedUseOfWeak(S, fscope, D, AC.getParentMap());
+
// Collect statistics about the CFG if it was built.
if (S.CollectStats && AC.isCFGBuilt()) {
++NumFunctionsAnalyzed;
diff --git a/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp b/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
index a835725..0a23601 100644
--- a/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -193,11 +193,10 @@ CodeCompletionString::CodeCompletionString(const Chunk *Chunks,
CXAvailabilityKind Availability,
const char **Annotations,
unsigned NumAnnotations,
- CXCursorKind ParentKind,
StringRef ParentName,
const char *BriefComment)
: NumChunks(NumChunks), NumAnnotations(NumAnnotations),
- Priority(Priority), Availability(Availability), ParentKind(ParentKind),
+ Priority(Priority), Availability(Availability),
ParentName(ParentName), BriefComment(BriefComment)
{
assert(NumChunks <= 0xffff);
@@ -339,7 +338,7 @@ CodeCompletionString *CodeCompletionBuilder::TakeString() {
= new (Mem) CodeCompletionString(Chunks.data(), Chunks.size(),
Priority, Availability,
Annotations.data(), Annotations.size(),
- ParentKind, ParentName, BriefComment);
+ ParentName, BriefComment);
Chunks.clear();
return Result;
}
@@ -380,7 +379,6 @@ void CodeCompletionBuilder::AddChunk(CodeCompletionString::ChunkKind CK,
void CodeCompletionBuilder::addParentContext(DeclContext *DC) {
if (DC->isTranslationUnit()) {
- ParentKind = CXCursor_TranslationUnit;
return;
}
@@ -391,7 +389,6 @@ void CodeCompletionBuilder::addParentContext(DeclContext *DC) {
if (!ND)
return;
- ParentKind = getCursorKindForDecl(ND);
ParentName = getCodeCompletionTUInfo().getParentName(DC);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
index d12ca78..b3066eb 100644
--- a/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
@@ -144,11 +144,13 @@ CXXScopeSpec::getWithLocInContext(ASTContext &Context) const {
/// DeclaratorChunk::getFunction - Return a DeclaratorChunk for a function.
/// "TheDeclarator" is the declarator that this will be added to.
-DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
+DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
bool isAmbiguous,
- SourceLocation EllipsisLoc,
+ SourceLocation LParenLoc,
ParamInfo *ArgInfo,
unsigned NumArgs,
+ SourceLocation EllipsisLoc,
+ SourceLocation RParenLoc,
unsigned TypeQuals,
bool RefQualifierIsLvalueRef,
SourceLocation RefQualifierLoc,
@@ -173,9 +175,11 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
I.EndLoc = LocalRangeEnd;
I.Fun.AttrList = 0;
I.Fun.hasPrototype = hasProto;
- I.Fun.isVariadic = isVariadic;
+ I.Fun.isVariadic = EllipsisLoc.isValid();
I.Fun.isAmbiguous = isAmbiguous;
+ I.Fun.LParenLoc = LParenLoc.getRawEncoding();
I.Fun.EllipsisLoc = EllipsisLoc.getRawEncoding();
+ I.Fun.RParenLoc = RParenLoc.getRawEncoding();
I.Fun.DeleteArgInfo = false;
I.Fun.TypeQuals = TypeQuals;
I.Fun.NumArgs = NumArgs;
@@ -270,6 +274,7 @@ bool Declarator::isDeclarationOfFunction() const {
case TST_int:
case TST_int128:
case TST_struct:
+ case TST_interface:
case TST_union:
case TST_unknown_anytype:
case TST_unspecified:
@@ -325,10 +330,14 @@ unsigned DeclSpec::getParsedSpecifiers() const {
template <class T> static bool BadSpecifier(T TNew, T TPrev,
const char *&PrevSpec,
- unsigned &DiagID) {
+ unsigned &DiagID,
+ bool IsExtension = true) {
PrevSpec = DeclSpec::getSpecifierName(TPrev);
- DiagID = (TNew == TPrev ? diag::ext_duplicate_declspec
- : diag::err_invalid_decl_spec_combination);
+ if (TNew != TPrev)
+ DiagID = diag::err_invalid_decl_spec_combination;
+ else
+ DiagID = IsExtension ? diag::ext_duplicate_declspec :
+ diag::warn_duplicate_declspec;
return true;
}
@@ -396,6 +405,7 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T) {
case DeclSpec::TST_class: return "class";
case DeclSpec::TST_union: return "union";
case DeclSpec::TST_struct: return "struct";
+ case DeclSpec::TST_interface: return "__interface";
case DeclSpec::TST_typename: return "type-name";
case DeclSpec::TST_typeofType:
case DeclSpec::TST_typeofExpr: return "typeof";
@@ -670,12 +680,16 @@ bool DeclSpec::SetTypeSpecError() {
}
bool DeclSpec::SetTypeQual(TQ T, SourceLocation Loc, const char *&PrevSpec,
- unsigned &DiagID, const LangOptions &Lang,
- bool IsTypeSpec) {
- // Duplicates are permitted in C99, and are permitted in C++11 unless the
- // cv-qualifier appears as a type-specifier.
- if ((TypeQualifiers & T) && !Lang.C99 && (!Lang.CPlusPlus0x || IsTypeSpec))
- return BadSpecifier(T, T, PrevSpec, DiagID);
+ unsigned &DiagID, const LangOptions &Lang) {
+ // Duplicates are permitted in C99, but are not permitted in C++. However,
+ // since this is likely not what the user intended, we will always warn. We
+ // do not need to set the qualifier's location since we already have it.
+ if (TypeQualifiers & T) {
+ bool IsExtension = true;
+ if (Lang.C99)
+ IsExtension = false;
+ return BadSpecifier(T, T, PrevSpec, DiagID, IsExtension);
+ }
TypeQualifiers |= T;
switch (T) {
diff --git a/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp b/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp
index 876f9d7..3100432 100644
--- a/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp
@@ -22,6 +22,7 @@ using namespace sema;
DelayedDiagnostic DelayedDiagnostic::makeDeprecation(SourceLocation Loc,
const NamedDecl *D,
const ObjCInterfaceDecl *UnknownObjCClass,
+ const ObjCPropertyDecl *ObjCProperty,
StringRef Msg) {
DelayedDiagnostic DD;
DD.Kind = Deprecation;
@@ -29,6 +30,7 @@ DelayedDiagnostic DelayedDiagnostic::makeDeprecation(SourceLocation Loc,
DD.Loc = Loc;
DD.DeprecationData.Decl = D;
DD.DeprecationData.UnknownObjCClass = UnknownObjCClass;
+ DD.DeprecationData.ObjCProperty = ObjCProperty;
char *MessageData = 0;
if (Msg.size()) {
MessageData = new char [Msg.size()];
diff --git a/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp b/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp
index 4d62cab..0093915 100644
--- a/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp
@@ -135,8 +135,16 @@ bool IdentifierResolver::isDeclInScope(Decl *D, DeclContext *Ctx,
// of the controlled statement.
//
assert(S->getParent() && "No TUScope?");
- if (S->getParent()->getFlags() & Scope::ControlScope)
+ if (S->getFlags() & Scope::FnTryScope)
return S->getParent()->isDeclScope(D);
+ if (S->getParent()->getFlags() & Scope::ControlScope) {
+ if (S->getParent()->getFlags() & Scope::FnCatchScope) {
+ S = S->getParent();
+ if (S->isDeclScope(D))
+ return true;
+ }
+ return S->getParent()->isDeclScope(D);
+ }
}
return false;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
index ab786c6..e2ec1cc 100644
--- a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
@@ -123,7 +123,7 @@ typedef std::pair<unsigned,unsigned> ScopePair;
/// diagnostic that should be emitted if control goes over it. If not, return 0.
static ScopePair GetDiagForGotoScopeDecl(ASTContext &Context, const Decl *D) {
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
- unsigned InDiag = 0, OutDiag = 0;
+ unsigned InDiag = 0;
if (VD->getType()->isVariablyModifiedType())
InDiag = diag::note_protected_by_vla;
@@ -164,43 +164,53 @@ static ScopePair GetDiagForGotoScopeDecl(ASTContext &Context, const Decl *D) {
// where it is in scope is ill-formed unless the variable has
// POD type and is declared without an initializer.
- if (const Expr *init = VD->getInit()) {
- // We actually give variables of record type (or array thereof)
- // an initializer even if that initializer only calls a trivial
- // ctor. Detect that case.
- // FIXME: With generalized initializer lists, this may
- // classify "X x{};" as having no initializer.
- unsigned inDiagToUse = diag::note_protected_by_variable_init;
-
- const CXXRecordDecl *record = 0;
-
- if (const CXXConstructExpr *cce = dyn_cast<CXXConstructExpr>(init)) {
- const CXXConstructorDecl *ctor = cce->getConstructor();
- record = ctor->getParent();
-
- if (ctor->isTrivial() && ctor->isDefaultConstructor()) {
- if (!record->hasTrivialDestructor())
- inDiagToUse = diag::note_protected_by_variable_nontriv_destructor;
- else if (!record->isPOD())
- inDiagToUse = diag::note_protected_by_variable_non_pod;
- else
- inDiagToUse = 0;
- }
- } else if (VD->getType()->isArrayType()) {
- record = VD->getType()->getBaseElementTypeUnsafe()
- ->getAsCXXRecordDecl();
+ const Expr *Init = VD->getInit();
+ if (!Init)
+ return ScopePair(InDiag, 0);
+
+ const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Init);
+ if (EWC)
+ Init = EWC->getSubExpr();
+
+ const MaterializeTemporaryExpr *M = NULL;
+ Init = Init->findMaterializedTemporary(M);
+
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ Init = Init->skipRValueSubobjectAdjustments(Adjustments);
+
+ QualType QT = Init->getType();
+ if (QT.isNull())
+ return ScopePair(diag::note_protected_by_variable_init, 0);
+
+ const Type *T = QT.getTypePtr();
+ if (T->isArrayType())
+ T = T->getBaseElementTypeUnsafe();
+
+ const CXXRecordDecl *Record = T->getAsCXXRecordDecl();
+ if (!Record)
+ return ScopePair(diag::note_protected_by_variable_init, 0);
+
+ // If we need to call a non trivial destructor for this variable,
+ // record an out diagnostic.
+ unsigned OutDiag = 0;
+ if (!Init->isGLValue() && !Record->hasTrivialDestructor())
+ OutDiag = diag::note_exits_dtor;
+
+ if (const CXXConstructExpr *cce = dyn_cast<CXXConstructExpr>(Init)) {
+ const CXXConstructorDecl *ctor = cce->getConstructor();
+ if (ctor->isTrivial() && ctor->isDefaultConstructor()) {
+ if (OutDiag)
+ InDiag = diag::note_protected_by_variable_nontriv_destructor;
+ else if (!Record->isPOD())
+ InDiag = diag::note_protected_by_variable_non_pod;
+ return ScopePair(InDiag, OutDiag);
}
-
- if (inDiagToUse)
- InDiag = inDiagToUse;
-
- // Also object to indirect jumps which leave scopes with dtors.
- if (record && !record->hasTrivialDestructor())
- OutDiag = diag::note_exits_dtor;
}
+
+ return ScopePair(diag::note_protected_by_variable_init, OutDiag);
}
-
- return ScopePair(InDiag, OutDiag);
+
+ return ScopePair(InDiag, 0);
}
if (const TypedefDecl *TD = dyn_cast<TypedefDecl>(D)) {
@@ -322,6 +332,29 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned &origParentScope)
Jumps.push_back(S);
break;
+ case Stmt::CXXTryStmtClass: {
+ CXXTryStmt *TS = cast<CXXTryStmt>(S);
+ unsigned newParentScope;
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_cxx_try,
+ diag::note_exits_cxx_try,
+ TS->getSourceRange().getBegin()));
+ if (Stmt *TryBlock = TS->getTryBlock())
+ BuildScopeInformation(TryBlock, (newParentScope = Scopes.size()-1));
+
+ // Jump from the catch into the try is not allowed either.
+ for (unsigned I = 0, E = TS->getNumHandlers(); I != E; ++I) {
+ CXXCatchStmt *CS = TS->getHandler(I);
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_cxx_catch,
+ diag::note_exits_cxx_catch,
+ CS->getSourceRange().getBegin()));
+ BuildScopeInformation(CS->getHandlerBlock(),
+ (newParentScope = Scopes.size()-1));
+ }
+ return;
+ }
+
default:
break;
}
@@ -418,30 +451,6 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned &origParentScope)
continue;
}
- // Disallow jumps into any part of a C++ try statement. This is pretty
- // much the same as for Obj-C.
- if (CXXTryStmt *TS = dyn_cast<CXXTryStmt>(SubStmt)) {
- Scopes.push_back(GotoScope(ParentScope,
- diag::note_protected_by_cxx_try,
- diag::note_exits_cxx_try,
- TS->getSourceRange().getBegin()));
- if (Stmt *TryBlock = TS->getTryBlock())
- BuildScopeInformation(TryBlock, (newParentScope = Scopes.size()-1));
-
- // Jump from the catch into the try is not allowed either.
- for (unsigned I = 0, E = TS->getNumHandlers(); I != E; ++I) {
- CXXCatchStmt *CS = TS->getHandler(I);
- Scopes.push_back(GotoScope(ParentScope,
- diag::note_protected_by_cxx_catch,
- diag::note_exits_cxx_catch,
- CS->getSourceRange().getBegin()));
- BuildScopeInformation(CS->getHandlerBlock(),
- (newParentScope = Scopes.size()-1));
- }
-
- continue;
- }
-
// Disallow jumps into the protected statement of an @autoreleasepool.
if (ObjCAutoreleasePoolStmt *AS = dyn_cast<ObjCAutoreleasePoolStmt>(SubStmt)){
// Recursively walk the AST for the @autoreleasepool part, protected by a new
@@ -453,14 +462,19 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned &origParentScope)
BuildScopeInformation(AS->getSubStmt(), (newParentScope = Scopes.size()-1));
continue;
}
-
- if (const BlockExpr *BE = dyn_cast<BlockExpr>(SubStmt)) {
- const BlockDecl *BDecl = BE->getBlockDecl();
+
+ // Disallow jumps past full-expressions that use blocks with
+ // non-trivial cleanups of their captures. This is theoretically
+ // implementable but a lot of work which we haven't felt up to doing.
+ if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(SubStmt)) {
+ for (unsigned i = 0, e = EWC->getNumObjects(); i != e; ++i) {
+ const BlockDecl *BDecl = EWC->getObject(i);
for (BlockDecl::capture_const_iterator ci = BDecl->capture_begin(),
ce = BDecl->capture_end(); ci != ce; ++ci) {
VarDecl *variable = ci->getVariable();
BuildScopeInformation(variable, BDecl, ParentScope);
}
+ }
}
// Recursively walk the AST.
diff --git a/contrib/llvm/tools/clang/lib/Sema/MultiplexExternalSemaSource.cpp b/contrib/llvm/tools/clang/lib/Sema/MultiplexExternalSemaSource.cpp
new file mode 100644
index 0000000..f930fb3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/MultiplexExternalSemaSource.cpp
@@ -0,0 +1,271 @@
+//===--- MultiplexExternalSemaSource.cpp ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the event dispatching to the subscribed clients.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/MultiplexExternalSemaSource.h"
+
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/Sema/Lookup.h"
+
+using namespace clang;
+
+///\brief Constructs a new multiplexing external sema source and appends the
+/// given element to it.
+///
+///\param[in] source - An ExternalSemaSource.
+///
+MultiplexExternalSemaSource::MultiplexExternalSemaSource(ExternalSemaSource &s1,
+ ExternalSemaSource &s2){
+ Sources.push_back(&s1);
+ Sources.push_back(&s2);
+}
+
+// pin the vtable here.
+MultiplexExternalSemaSource::~MultiplexExternalSemaSource() {}
+
+///\brief Appends new source to the source list.
+///
+///\param[in] source - An ExternalSemaSource.
+///
+void MultiplexExternalSemaSource::addSource(ExternalSemaSource &source) {
+ Sources.push_back(&source);
+}
+
+//===----------------------------------------------------------------------===//
+// ExternalASTSource.
+//===----------------------------------------------------------------------===//
+
+Decl *MultiplexExternalSemaSource::GetExternalDecl(uint32_t ID) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ if (Decl *Result = Sources[i]->GetExternalDecl(ID))
+ return Result;
+ return 0;
+}
+
+Selector MultiplexExternalSemaSource::GetExternalSelector(uint32_t ID) {
+ Selector Sel;
+ for(size_t i = 0; i < Sources.size(); ++i) {
+ Sel = Sources[i]->GetExternalSelector(ID);
+ if (!Sel.isNull())
+ return Sel;
+ }
+ return Sel;
+}
+
+uint32_t MultiplexExternalSemaSource::GetNumExternalSelectors() {
+ uint32_t total = 0;
+ for(size_t i = 0; i < Sources.size(); ++i)
+ total += Sources[i]->GetNumExternalSelectors();
+ return total;
+}
+
+Stmt *MultiplexExternalSemaSource::GetExternalDeclStmt(uint64_t Offset) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ if (Stmt *Result = Sources[i]->GetExternalDeclStmt(Offset))
+ return Result;
+ return 0;
+}
+
+CXXBaseSpecifier *MultiplexExternalSemaSource::GetExternalCXXBaseSpecifiers(
+ uint64_t Offset){
+ for(size_t i = 0; i < Sources.size(); ++i)
+ if (CXXBaseSpecifier *R = Sources[i]->GetExternalCXXBaseSpecifiers(Offset))
+ return R;
+ return 0;
+}
+
+DeclContextLookupResult MultiplexExternalSemaSource::
+FindExternalVisibleDeclsByName(const DeclContext *DC, DeclarationName Name) {
+ StoredDeclsList DeclsFound;
+ DeclContextLookupResult lookup;
+ for(size_t i = 0; i < Sources.size(); ++i) {
+ lookup = Sources[i]->FindExternalVisibleDeclsByName(DC, Name);
+ while(lookup.first != lookup.second) {
+ if (!DeclsFound.HandleRedeclaration(*lookup.first))
+ DeclsFound.AddSubsequentDecl(*lookup.first);
+ lookup.first++;
+ }
+ }
+ return DeclsFound.getLookupResult();
+}
+
+void MultiplexExternalSemaSource::completeVisibleDeclsMap(const DeclContext *DC){
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->completeVisibleDeclsMap(DC);
+}
+
+ExternalLoadResult MultiplexExternalSemaSource::
+FindExternalLexicalDecls(const DeclContext *DC,
+ bool (*isKindWeWant)(Decl::Kind),
+ SmallVectorImpl<Decl*> &Result) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ // FIXME: The semantics of the return result is unclear to me...
+ Sources[i]->FindExternalLexicalDecls(DC, isKindWeWant, Result);
+
+ return ELR_Success;
+}
+
+void MultiplexExternalSemaSource::FindFileRegionDecls(FileID File,
+ unsigned Offset,
+ unsigned Length,
+ SmallVectorImpl<Decl *> &Decls){
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->FindFileRegionDecls(File, Offset, Length, Decls);
+}
+
+void MultiplexExternalSemaSource::CompleteType(TagDecl *Tag) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->CompleteType(Tag);
+}
+
+void MultiplexExternalSemaSource::CompleteType(ObjCInterfaceDecl *Class) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->CompleteType(Class);
+}
+
+void MultiplexExternalSemaSource::ReadComments() {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadComments();
+}
+
+void MultiplexExternalSemaSource::StartedDeserializing() {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->StartedDeserializing();
+}
+
+void MultiplexExternalSemaSource::FinishedDeserializing() {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->FinishedDeserializing();
+}
+
+void MultiplexExternalSemaSource::StartTranslationUnit(ASTConsumer *Consumer) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->StartTranslationUnit(Consumer);
+}
+
+void MultiplexExternalSemaSource::PrintStats() {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->PrintStats();
+}
+
+bool MultiplexExternalSemaSource::layoutRecordType(const RecordDecl *Record,
+ uint64_t &Size,
+ uint64_t &Alignment,
+ llvm::DenseMap<const FieldDecl *, uint64_t> &FieldOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &BaseOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &VirtualBaseOffsets){
+ for(size_t i = 0; i < Sources.size(); ++i)
+ if (Sources[i]->layoutRecordType(Record, Size, Alignment, FieldOffsets,
+ BaseOffsets, VirtualBaseOffsets))
+ return true;
+ return false;
+}
+
+void MultiplexExternalSemaSource::
+getMemoryBufferSizes(MemoryBufferSizes &sizes) const {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->getMemoryBufferSizes(sizes);
+
+}
+
+//===----------------------------------------------------------------------===//
+// ExternalSemaSource.
+//===----------------------------------------------------------------------===//
+
+
+void MultiplexExternalSemaSource::InitializeSema(Sema &S) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->InitializeSema(S);
+}
+
+void MultiplexExternalSemaSource::ForgetSema() {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ForgetSema();
+}
+
+void MultiplexExternalSemaSource::ReadMethodPool(Selector Sel) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadMethodPool(Sel);
+}
+
+void MultiplexExternalSemaSource::ReadKnownNamespaces(
+ SmallVectorImpl<NamespaceDecl*> &Namespaces){
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadKnownNamespaces(Namespaces);
+}
+
+bool MultiplexExternalSemaSource::LookupUnqualified(LookupResult &R, Scope *S){
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->LookupUnqualified(R, S);
+
+ return !R.empty();
+}
+
+void MultiplexExternalSemaSource::ReadTentativeDefinitions(
+ SmallVectorImpl<VarDecl*> &TentativeDefs) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadTentativeDefinitions(TentativeDefs);
+}
+
+void MultiplexExternalSemaSource::ReadUnusedFileScopedDecls(
+ SmallVectorImpl<const DeclaratorDecl*> &Decls) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadUnusedFileScopedDecls(Decls);
+}
+
+void MultiplexExternalSemaSource::ReadDelegatingConstructors(
+ SmallVectorImpl<CXXConstructorDecl*> &Decls) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadDelegatingConstructors(Decls);
+}
+
+void MultiplexExternalSemaSource::ReadExtVectorDecls(
+ SmallVectorImpl<TypedefNameDecl*> &Decls) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadExtVectorDecls(Decls);
+}
+
+void MultiplexExternalSemaSource::ReadDynamicClasses(
+ SmallVectorImpl<CXXRecordDecl*> &Decls) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadDynamicClasses(Decls);
+}
+
+void MultiplexExternalSemaSource::ReadLocallyScopedExternalDecls(
+ SmallVectorImpl<NamedDecl*> &Decls) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadLocallyScopedExternalDecls(Decls);
+}
+
+void MultiplexExternalSemaSource::ReadReferencedSelectors(
+ SmallVectorImpl<std::pair<Selector, SourceLocation> > &Sels) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadReferencedSelectors(Sels);
+}
+
+void MultiplexExternalSemaSource::ReadWeakUndeclaredIdentifiers(
+ SmallVectorImpl<std::pair<IdentifierInfo*, WeakInfo> > &WI) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadWeakUndeclaredIdentifiers(WI);
+}
+
+void MultiplexExternalSemaSource::ReadUsedVTables(
+ SmallVectorImpl<ExternalVTableUse> &VTables) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadUsedVTables(VTables);
+}
+
+void MultiplexExternalSemaSource::ReadPendingInstantiations(
+ SmallVectorImpl<std::pair<ValueDecl*,
+ SourceLocation> > &Pending) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadPendingInstantiations(Pending);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/ScopeInfo.cpp b/contrib/llvm/tools/clang/lib/Sema/ScopeInfo.cpp
new file mode 100644
index 0000000..4d29a34
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/ScopeInfo.cpp
@@ -0,0 +1,189 @@
+//===--- ScopeInfo.cpp - Information about a semantic context -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements FunctionScopeInfo and its subclasses, which contain
+// information about a single function, block, lambda, or method body.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+
+using namespace clang;
+using namespace sema;
+
+void FunctionScopeInfo::Clear() {
+ HasBranchProtectedScope = false;
+ HasBranchIntoScope = false;
+ HasIndirectGoto = false;
+
+ SwitchStack.clear();
+ Returns.clear();
+ ErrorTrap.reset();
+ PossiblyUnreachableDiags.clear();
+ WeakObjectUses.clear();
+}
+
+static const NamedDecl *getBestPropertyDecl(const ObjCPropertyRefExpr *PropE) {
+ if (PropE->isExplicitProperty())
+ return PropE->getExplicitProperty();
+
+ return PropE->getImplicitPropertyGetter();
+}
+
+FunctionScopeInfo::WeakObjectProfileTy::BaseInfoTy
+FunctionScopeInfo::WeakObjectProfileTy::getBaseInfo(const Expr *E) {
+ E = E->IgnoreParenCasts();
+
+ const NamedDecl *D = 0;
+ bool IsExact = false;
+
+ switch (E->getStmtClass()) {
+ case Stmt::DeclRefExprClass:
+ D = cast<DeclRefExpr>(E)->getDecl();
+ IsExact = isa<VarDecl>(D);
+ break;
+ case Stmt::MemberExprClass: {
+ const MemberExpr *ME = cast<MemberExpr>(E);
+ D = ME->getMemberDecl();
+ IsExact = isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts());
+ break;
+ }
+ case Stmt::ObjCIvarRefExprClass: {
+ const ObjCIvarRefExpr *IE = cast<ObjCIvarRefExpr>(E);
+ D = IE->getDecl();
+ IsExact = IE->getBase()->isObjCSelfExpr();
+ break;
+ }
+ case Stmt::PseudoObjectExprClass: {
+ const PseudoObjectExpr *POE = cast<PseudoObjectExpr>(E);
+ const ObjCPropertyRefExpr *BaseProp =
+ dyn_cast<ObjCPropertyRefExpr>(POE->getSyntacticForm());
+ if (BaseProp) {
+ D = getBestPropertyDecl(BaseProp);
+
+ const Expr *DoubleBase = BaseProp->getBase();
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(DoubleBase))
+ DoubleBase = OVE->getSourceExpr();
+
+ IsExact = DoubleBase->isObjCSelfExpr();
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ return BaseInfoTy(D, IsExact);
+}
+
+
+FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy(
+ const ObjCPropertyRefExpr *PropE)
+ : Base(0, true), Property(getBestPropertyDecl(PropE)) {
+
+ if (PropE->isObjectReceiver()) {
+ const OpaqueValueExpr *OVE = cast<OpaqueValueExpr>(PropE->getBase());
+ const Expr *E = OVE->getSourceExpr();
+ Base = getBaseInfo(E);
+ } else if (PropE->isClassReceiver()) {
+ Base.setPointer(PropE->getClassReceiver());
+ } else {
+ assert(PropE->isSuperReceiver());
+ }
+}
+
+FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy(const Expr *BaseE,
+ const ObjCPropertyDecl *Prop)
+ : Base(0, true), Property(Prop) {
+ if (BaseE)
+ Base = getBaseInfo(BaseE);
+ // else, this is a message accessing a property on super.
+}
+
+FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy(
+ const DeclRefExpr *DRE)
+ : Base(0, true), Property(DRE->getDecl()) {
+ assert(isa<VarDecl>(Property));
+}
+
+FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy(
+ const ObjCIvarRefExpr *IvarE)
+ : Base(getBaseInfo(IvarE->getBase())), Property(IvarE->getDecl()) {
+}
+
+void FunctionScopeInfo::recordUseOfWeak(const ObjCMessageExpr *Msg,
+ const ObjCPropertyDecl *Prop) {
+ assert(Msg && Prop);
+ WeakUseVector &Uses =
+ WeakObjectUses[WeakObjectProfileTy(Msg->getInstanceReceiver(), Prop)];
+ Uses.push_back(WeakUseTy(Msg, Msg->getNumArgs() == 0));
+}
+
+void FunctionScopeInfo::markSafeWeakUse(const Expr *E) {
+ E = E->IgnoreParenCasts();
+
+ if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
+ markSafeWeakUse(POE->getSyntacticForm());
+ return;
+ }
+
+ if (const ConditionalOperator *Cond = dyn_cast<ConditionalOperator>(E)) {
+ markSafeWeakUse(Cond->getTrueExpr());
+ markSafeWeakUse(Cond->getFalseExpr());
+ return;
+ }
+
+ if (const BinaryConditionalOperator *Cond =
+ dyn_cast<BinaryConditionalOperator>(E)) {
+ markSafeWeakUse(Cond->getCommon());
+ markSafeWeakUse(Cond->getFalseExpr());
+ return;
+ }
+
+ // Has this weak object been seen before?
+ FunctionScopeInfo::WeakObjectUseMap::iterator Uses;
+ if (const ObjCPropertyRefExpr *RefExpr = dyn_cast<ObjCPropertyRefExpr>(E))
+ Uses = WeakObjectUses.find(WeakObjectProfileTy(RefExpr));
+ else if (const ObjCIvarRefExpr *IvarE = dyn_cast<ObjCIvarRefExpr>(E))
+ Uses = WeakObjectUses.find(WeakObjectProfileTy(IvarE));
+ else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ Uses = WeakObjectUses.find(WeakObjectProfileTy(DRE));
+ else if (const ObjCMessageExpr *MsgE = dyn_cast<ObjCMessageExpr>(E)) {
+ Uses = WeakObjectUses.end();
+ if (const ObjCMethodDecl *MD = MsgE->getMethodDecl()) {
+ if (const ObjCPropertyDecl *Prop = MD->findPropertyDecl()) {
+ Uses =
+ WeakObjectUses.find(WeakObjectProfileTy(MsgE->getInstanceReceiver(),
+ Prop));
+ }
+ }
+ }
+ else
+ return;
+
+ if (Uses == WeakObjectUses.end())
+ return;
+
+ // Has there been a read from the object using this Expr?
+ FunctionScopeInfo::WeakUseVector::reverse_iterator ThisUse =
+ std::find(Uses->second.rbegin(), Uses->second.rend(), WeakUseTy(E, true));
+ if (ThisUse == Uses->second.rend())
+ return;
+
+ ThisUse->markSafe();
+}
+
+FunctionScopeInfo::~FunctionScopeInfo() { }
+BlockScopeInfo::~BlockScopeInfo() { }
+LambdaScopeInfo::~LambdaScopeInfo() { }
diff --git a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
index 6e2de4d..13a33b7 100644
--- a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
@@ -22,6 +22,7 @@
#include "clang/Sema/CXXFieldCollector.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/Sema/MultiplexExternalSemaSource.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Scope.h"
@@ -43,22 +44,6 @@
using namespace clang;
using namespace sema;
-FunctionScopeInfo::~FunctionScopeInfo() { }
-
-void FunctionScopeInfo::Clear() {
- HasBranchProtectedScope = false;
- HasBranchIntoScope = false;
- HasIndirectGoto = false;
-
- SwitchStack.clear();
- Returns.clear();
- ErrorTrap.reset();
- PossiblyUnreachableDiags.clear();
-}
-
-BlockScopeInfo::~BlockScopeInfo() { }
-LambdaScopeInfo::~LambdaScopeInfo() { }
-
PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
const Preprocessor &PP) {
PrintingPolicy Policy = Context.getPrintingPolicy();
@@ -84,12 +69,14 @@ void Sema::ActOnTranslationUnitScope(Scope *S) {
Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind,
CodeCompleteConsumer *CodeCompleter)
- : TheTargetAttributesSema(0), FPFeatures(pp.getLangOpts()),
+ : TheTargetAttributesSema(0), ExternalSource(0),
+ isMultiplexExternalSource(false), FPFeatures(pp.getLangOpts()),
LangOpts(pp.getLangOpts()), PP(pp), Context(ctxt), Consumer(consumer),
Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
- CollectStats(false), ExternalSource(0), CodeCompleter(CodeCompleter),
+ CollectStats(false), CodeCompleter(CodeCompleter),
CurContext(0), OriginalLexicalContext(0),
PackContext(0), MSStructPragmaOn(false), VisContext(0),
+ IsBuildingRecoveryCallExpr(false),
ExprNeedsCleanups(false), LateTemplateParser(0), OpaqueParser(0),
IdResolver(pp), StdInitializerList(0), CXXTypeInfoDecl(0), MSVCGuidDecl(0),
NSNumberDecl(0),
@@ -203,6 +190,10 @@ Sema::~Sema() {
if (ExternalSemaSource *ExternalSema
= dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
ExternalSema->ForgetSema();
+
+ // If Sema's ExternalSource is the multiplexer - we own it.
+ if (isMultiplexExternalSource)
+ delete ExternalSource;
}
/// makeUnavailableInSystemHeader - There is an error in the current
@@ -234,6 +225,27 @@ ASTMutationListener *Sema::getASTMutationListener() const {
return getASTConsumer().GetASTMutationListener();
}
+///\brief Registers an external source. If an external source already exists,
+/// creates a multiplex external source and appends to it.
+///
+///\param[in] E - A non-null external sema source.
+///
+void Sema::addExternalSource(ExternalSemaSource *E) {
+ assert(E && "Cannot use with NULL ptr");
+
+ if (!ExternalSource) {
+ ExternalSource = E;
+ return;
+ }
+
+ if (isMultiplexExternalSource)
+ static_cast<MultiplexExternalSemaSource*>(ExternalSource)->addSource(*E);
+ else {
+ ExternalSource = new MultiplexExternalSemaSource(*ExternalSource, *E);
+ isMultiplexExternalSource = true;
+ }
+}
+
/// \brief Print out statistics about the semantic analysis.
void Sema::PrintStats() const {
llvm::errs() << "\n*** Semantic Analysis Stats:\n";
@@ -507,6 +519,11 @@ void Sema::ActOnEndOfTranslationUnit() {
assert(DelayedDiagnostics.getCurrentPool() == NULL
&& "reached end of translation unit with a pool attached?");
+ // If code completion is enabled, don't perform any end-of-translation-unit
+ // work.
+ if (PP.isCodeCompletionEnabled())
+ return;
+
// Only complete translation units define vtables and perform implicit
// instantiations.
if (TUKind == TU_Complete) {
@@ -1023,6 +1040,9 @@ LambdaScopeInfo *Sema::getCurLambda() {
}
void Sema::ActOnComment(SourceRange Comment) {
+ if (!LangOpts.RetainCommentsFromSystemHeaders &&
+ SourceMgr.isInSystemHeader(Comment.getBegin()))
+ return;
RawComment RC(SourceMgr, Comment);
if (RC.isAlmostTrailingComment()) {
SourceRange MagicMarkerRange(Comment.getBegin(),
@@ -1167,8 +1187,7 @@ static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
// FIXME: Magic number for max shown overloads stolen from
// OverloadCandidateSet::NoteCandidates.
- if (ShownOverloads >= 4 &&
- S.Diags.getShowOverloads() == DiagnosticsEngine::Ovl_Best) {
+ if (ShownOverloads >= 4 && S.Diags.getShowOverloads() == Ovl_Best) {
++SuppressedOverloads;
continue;
}
@@ -1239,8 +1258,7 @@ bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
// FIXME: Try this before emitting the fixit, and suppress diagnostics
// while doing so.
E = ActOnCallExpr(0, E.take(), ParenInsertionLoc,
- MultiExprArg(*this, 0, 0),
- ParenInsertionLoc.getLocWithOffset(1));
+ MultiExprArg(), ParenInsertionLoc.getLocWithOffset(1));
return true;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
index 3481171..58b1a51 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
@@ -97,14 +97,19 @@ struct EffectiveContext {
// functions (which can gain privileges through friendship), but we
// take that as an oversight.
while (true) {
+ // We want to add canonical declarations to the EC lists for
+ // simplicity of checking, but we need to walk up through the
+ // actual current DC chain. Otherwise, something like a local
+ // extern or friend which happens to be the canonical
+ // declaration will really mess us up.
+
if (isa<CXXRecordDecl>(DC)) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(DC)->getCanonicalDecl();
- Records.push_back(Record);
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
+ Records.push_back(Record->getCanonicalDecl());
DC = Record->getDeclContext();
} else if (isa<FunctionDecl>(DC)) {
- FunctionDecl *Function = cast<FunctionDecl>(DC)->getCanonicalDecl();
- Functions.push_back(Function);
-
+ FunctionDecl *Function = cast<FunctionDecl>(DC);
+ Functions.push_back(Function->getCanonicalDecl());
if (Function->getFriendObjectKind())
DC = Function->getLexicalDeclContext();
else
@@ -1791,7 +1796,7 @@ void Sema::CheckLookupAccess(const LookupResult &R) {
/// specifiers into account, but no member access expressions and such.
///
/// \param Decl the declaration to check if it can be accessed
-/// \param Class the class/context from which to start the search
+/// \param Ctx the class/context from which to start the search
/// \return true if the Decl is accessible from the Class, false otherwise.
bool Sema::IsSimplyAccessible(NamedDecl *Decl, DeclContext *Ctx) {
if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx)) {
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
index e935fc7..f1154c1 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
@@ -136,23 +136,12 @@ void Sema::AddMsStructLayoutForRecord(RecordDecl *RD) {
}
void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
- SourceLocation PragmaLoc,
- SourceLocation KindLoc) {
+ SourceLocation PragmaLoc) {
if (PackContext == 0)
PackContext = new PragmaPackStack();
PragmaPackStack *Context = static_cast<PragmaPackStack*>(PackContext);
- // Reset just pops the top of the stack, or resets the current alignment to
- // default.
- if (Kind == Sema::POAK_Reset) {
- if (!Context->pop(0, /*IsReset=*/true)) {
- Diag(PragmaLoc, diag::warn_pragma_options_align_reset_failed)
- << "stack empty";
- }
- return;
- }
-
switch (Kind) {
// For all targets we support native and natural are the same.
//
@@ -181,9 +170,13 @@ void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
Context->setAlignment(PackStackEntry::kMac68kAlignmentSentinel);
break;
- default:
- Diag(PragmaLoc, diag::warn_pragma_options_align_unsupported_option)
- << KindLoc;
+ case POAK_Reset:
+ // Reset just pops the top of the stack, or resets the current alignment to
+ // default.
+ if (!Context->pop(0, /*IsReset=*/true)) {
+ Diag(PragmaLoc, diag::warn_pragma_options_align_reset_failed)
+ << "stack empty";
+ }
break;
}
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
index 0de9dd5..15bfd1c 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -520,7 +520,8 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
if (LookupCtx)
Diag(Found.getNameLoc(), diag::err_no_member_suggest)
<< Name << LookupCtx << CorrectedQuotedStr << SS.getRange()
- << FixItHint::CreateReplacement(Found.getNameLoc(), CorrectedStr);
+ << FixItHint::CreateReplacement(Corrected.getCorrectionRange(),
+ CorrectedStr);
else
Diag(Found.getNameLoc(), diag::err_undeclared_var_use_suggest)
<< Name << CorrectedQuotedStr
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp
index d8d51e7..bf25c61 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp
@@ -227,7 +227,7 @@ Sema::ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
CheckExtraCXXDefaultArguments(D);
}
- return BuildCXXNamedCast(OpLoc, Kind, TInfo, move(E),
+ return BuildCXXNamedCast(OpLoc, Kind, TInfo, E,
SourceRange(LAngleBracketLoc, RAngleBracketLoc),
SourceRange(LParenLoc, RParenLoc));
}
@@ -1331,8 +1331,7 @@ TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
if (InitSeq.Failed() && (CStyle || !DestType->isReferenceType()))
return TC_NotApplicable;
- ExprResult Result
- = InitSeq.Perform(Self, Entity, InitKind, MultiExprArg(Self, &SrcExprRaw, 1));
+ ExprResult Result = InitSeq.Perform(Self, Entity, InitKind, SrcExprRaw);
if (Result.isInvalid()) {
msg = 0;
return TC_Failed;
@@ -1343,7 +1342,7 @@ TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
else
Kind = CK_NoOp;
- SrcExpr = move(Result);
+ SrcExpr = Result;
return TC_Success;
}
@@ -1492,6 +1491,22 @@ static void DiagnoseCastOfObjCSEL(Sema &Self, const ExprResult &SrcExpr,
}
}
+static void checkIntToPointerCast(bool CStyle, SourceLocation Loc,
+ const Expr *SrcExpr, QualType DestType,
+ Sema &Self) {
+ QualType SrcType = SrcExpr->getType();
+
+ // Not warning on reinterpret_cast, boolean, constant expressions, etc
+ // are not explicit design choices, but consistent with GCC's behavior.
+ // Feel free to modify them if you've reason/evidence for an alternative.
+ if (CStyle && SrcType->isIntegralType(Self.Context)
+ && !SrcType->isBooleanType()
+ && !SrcType->isEnumeralType()
+ && !SrcExpr->isIntegerConstantExpr(Self.Context)
+ && Self.Context.getTypeSize(DestType) > Self.Context.getTypeSize(SrcType))
+ Self.Diag(Loc, diag::warn_int_to_pointer_cast) << SrcType << DestType;
+}
+
static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
const SourceRange &OpRange,
@@ -1513,7 +1528,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
SingleFunctionExpr,
Expr::getValueKindForType(DestType) == VK_RValue // Convert Fun to Ptr
) && SingleFunctionExpr.isUsable()) {
- SrcExpr = move(SingleFunctionExpr);
+ SrcExpr = SingleFunctionExpr;
SrcType = SrcExpr.get()->getType();
} else {
return TC_NotApplicable;
@@ -1690,6 +1705,8 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
if (SrcType->isIntegralOrEnumerationType()) {
assert(destIsPtr && "One type must be a pointer");
+ checkIntToPointerCast(CStyle, OpRange.getBegin(), SrcExpr.get(), DestType,
+ Self);
// C++ 5.2.10p5: A value of integral or enumeration type can be explicitly
// converted to a pointer.
// C++ 5.2.10p9: [Note: ...a null pointer constant of integral type is not
@@ -1903,6 +1920,43 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
SrcExpr = ExprError();
}
+/// DiagnoseBadFunctionCast - Warn whenever a function call is cast to a
+/// non-matching type. Such as enum function call to int, int call to
+/// pointer; etc. Cast to 'void' is an exception.
+static void DiagnoseBadFunctionCast(Sema &Self, const ExprResult &SrcExpr,
+ QualType DestType) {
+ if (Self.Diags.getDiagnosticLevel(diag::warn_bad_function_cast,
+ SrcExpr.get()->getExprLoc())
+ == DiagnosticsEngine::Ignored)
+ return;
+
+ if (!isa<CallExpr>(SrcExpr.get()))
+ return;
+
+ QualType SrcType = SrcExpr.get()->getType();
+ if (DestType.getUnqualifiedType()->isVoidType())
+ return;
+ if ((SrcType->isAnyPointerType() || SrcType->isBlockPointerType())
+ && (DestType->isAnyPointerType() || DestType->isBlockPointerType()))
+ return;
+ if (SrcType->isIntegerType() && DestType->isIntegerType() &&
+ (SrcType->isBooleanType() == DestType->isBooleanType()) &&
+ (SrcType->isEnumeralType() == DestType->isEnumeralType()))
+ return;
+ if (SrcType->isRealFloatingType() && DestType->isRealFloatingType())
+ return;
+ if (SrcType->isEnumeralType() && DestType->isEnumeralType())
+ return;
+ if (SrcType->isComplexType() && DestType->isComplexType())
+ return;
+ if (SrcType->isComplexIntegerType() && DestType->isComplexIntegerType())
+ return;
+
+ Self.Diag(SrcExpr.get()->getExprLoc(),
+ diag::warn_bad_function_cast)
+ << SrcType << DestType << SrcExpr.get()->getSourceRange();
+}
+
/// Check the semantics of a C-style cast operation, in C.
void CastOperation::CheckCStyleCast() {
assert(!Self.getLangOpts().CPlusPlus);
@@ -2035,6 +2089,8 @@ void CastOperation::CheckCStyleCast() {
SrcExpr = ExprError();
return;
}
+ checkIntToPointerCast(/* CStyle */ true, OpRange.getBegin(), SrcExpr.get(),
+ DestType, Self);
} else if (!SrcType->isArithmeticType()) {
if (!DestType->isIntegralType(Self.Context) &&
DestType->isArithmeticType()) {
@@ -2076,7 +2132,7 @@ void CastOperation::CheckCStyleCast() {
}
}
DiagnoseCastOfObjCSEL(Self, SrcExpr, DestType);
-
+ DiagnoseBadFunctionCast(Self, SrcExpr, DestType);
Kind = Self.PrepareScalarCast(SrcExpr, DestType);
if (SrcExpr.isInvalid())
return;
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
index 1e75f59..94dfd59 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
@@ -266,11 +266,11 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case Builtin::BI__sync_swap_4:
case Builtin::BI__sync_swap_8:
case Builtin::BI__sync_swap_16:
- return SemaBuiltinAtomicOverloaded(move(TheCallResult));
+ return SemaBuiltinAtomicOverloaded(TheCallResult);
#define BUILTIN(ID, TYPE, ATTRS)
#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
case Builtin::BI##ID: \
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::AO##ID);
+ return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
#include "clang/Basic/Builtins.def"
case Builtin::BI__builtin_annotation:
if (SemaBuiltinAnnotation(*this, TheCall))
@@ -299,7 +299,7 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
}
}
- return move(TheCallResult);
+ return TheCallResult;
}
// Get the valid immediate range for the specified NEON type code.
@@ -437,6 +437,11 @@ bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
default: return false;
case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
+ case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
+ case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
+ case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
+ case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
+ case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
};
// We can't check the value of a dependent argument.
@@ -490,9 +495,8 @@ void Sema::checkCall(NamedDecl *FDecl, Expr **Args,
SourceLocation Loc,
SourceRange Range,
VariadicCallType CallType) {
- // FIXME: This mechanism should be abstracted to be less fragile and
- // more efficient. For example, just map function ids to custom
- // handlers.
+ if (CurContext->isDependentContext())
+ return;
// Printf and scanf checking.
bool HandledFormatString = false;
@@ -506,8 +510,11 @@ void Sema::checkCall(NamedDecl *FDecl, Expr **Args,
// Refuse POD arguments that weren't caught by the format string
// checks above.
if (!HandledFormatString && CallType != VariadicDoesNotApply)
- for (unsigned ArgIdx = NumProtoArgs; ArgIdx < NumArgs; ++ArgIdx)
- variadicArgumentPODCheck(Args[ArgIdx], CallType);
+ for (unsigned ArgIdx = NumProtoArgs; ArgIdx < NumArgs; ++ArgIdx) {
+ // Args[ArgIdx] can be null in malformed code.
+ if (Expr *Arg = Args[ArgIdx])
+ variadicArgumentPODCheck(Arg, CallType);
+ }
for (specific_attr_iterator<NonNullAttr>
I = FDecl->specific_attr_begin<NonNullAttr>(),
@@ -538,11 +545,23 @@ void Sema::CheckConstructorCall(FunctionDecl *FDecl, Expr **Args,
/// and safety properties not strictly enforced by the C type system.
bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto) {
- bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall);
+ bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) &&
+ isa<CXXMethodDecl>(FDecl);
+ bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) ||
+ IsMemberOperatorCall;
VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
TheCall->getCallee());
unsigned NumProtoArgs = Proto ? Proto->getNumArgs() : 0;
- checkCall(FDecl, TheCall->getArgs(), TheCall->getNumArgs(), NumProtoArgs,
+ Expr** Args = TheCall->getArgs();
+ unsigned NumArgs = TheCall->getNumArgs();
+ if (IsMemberOperatorCall) {
+ // If this is a call to a member operator, hide the first argument
+ // from checkCall.
+ // FIXME: Our choice of AST representation here is less than ideal.
+ ++Args;
+ --NumArgs;
+ }
+ checkCall(FDecl, Args, NumArgs, NumProtoArgs,
IsMemberFunction, TheCall->getRParenLoc(),
TheCall->getCallee()->getSourceRange(), CallType);
@@ -737,6 +756,11 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
<< Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
+ if (AtomTy.isConstQualified()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_atomic)
+ << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
ValType = AtomTy->getAs<AtomicType>()->getValueType();
}
@@ -885,8 +909,7 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
}
return Owned(new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(),
- SubExprs.data(), SubExprs.size(),
- ResultType, Op,
+ SubExprs, ResultType, Op,
TheCall->getRParenLoc()));
}
@@ -1189,10 +1212,19 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// concrete integer type we should convert to is.
unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
const char *NewBuiltinName = Context.BuiltinInfo.GetName(NewBuiltinID);
- IdentifierInfo *NewBuiltinII = PP.getIdentifierInfo(NewBuiltinName);
- FunctionDecl *NewBuiltinDecl =
- cast<FunctionDecl>(LazilyCreateBuiltin(NewBuiltinII, NewBuiltinID,
- TUScope, false, DRE->getLocStart()));
+ FunctionDecl *NewBuiltinDecl;
+ if (NewBuiltinID == BuiltinID)
+ NewBuiltinDecl = FDecl;
+ else {
+ // Perform builtin lookup to avoid redeclaring it.
+ DeclarationName DN(&Context.Idents.get(NewBuiltinName));
+ LookupResult Res(*this, DN, DRE->getLocStart(), LookupOrdinaryName);
+ LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true);
+ assert(Res.getFoundDecl());
+ NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl());
+ if (NewBuiltinDecl == 0)
+ return ExprError();
+ }
// The first argument --- the pointer --- has a fixed type; we
// deduce the types of the rest of the arguments accordingly. Walk
@@ -1228,14 +1260,14 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
NewBuiltinDecl,
/*enclosing*/ false,
DRE->getLocation(),
- NewBuiltinDecl->getType(),
+ Context.BuiltinFnTy,
DRE->getValueKind());
// Set the callee in the CallExpr.
- // FIXME: This leaks the original parens and implicit casts.
- ExprResult PromotedCall = UsualUnaryConversions(NewDRE);
- if (PromotedCall.isInvalid())
- return ExprError();
+ // FIXME: This loses syntactic information.
+ QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType());
+ ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy,
+ CK_BuiltinFnToFnPtr);
TheCall->setCallee(PromotedCall.take());
// Change the result type of the call to match the original value type. This
@@ -1243,7 +1275,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// gracefully.
TheCall->setType(ResultType);
- return move(TheCallResult);
+ return TheCallResult;
}
/// CheckObjCString - Checks that the argument to the builtin
@@ -1264,7 +1296,7 @@ bool Sema::CheckObjCString(Expr *Arg) {
StringRef String = Literal->getString();
unsigned NumBytes = String.size();
SmallVector<UTF16, 128> ToBuf(NumBytes);
- const UTF8 *FromPtr = (UTF8 *)String.data();
+ const UTF8 *FromPtr = (const UTF8 *)String.data();
UTF16 *ToPtr = &ToBuf[0];
ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
@@ -1503,8 +1535,7 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
TheCall->setArg(i, 0);
}
- return Owned(new (Context) ShuffleVectorExpr(Context, exprs.begin(),
- exprs.size(), resType,
+ return Owned(new (Context) ShuffleVectorExpr(Context, exprs, resType,
TheCall->getCallee()->getLocStart(),
TheCall->getRParenLoc()));
}
@@ -1935,19 +1966,19 @@ public:
void HandleIncompleteSpecifier(const char *startSpecifier,
unsigned specifierLen);
+ void HandleInvalidLengthModifier(
+ const analyze_format_string::FormatSpecifier &FS,
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen, unsigned DiagID);
+
void HandleNonStandardLengthModifier(
- const analyze_format_string::LengthModifier &LM,
+ const analyze_format_string::FormatSpecifier &FS,
const char *startSpecifier, unsigned specifierLen);
void HandleNonStandardConversionSpecifier(
const analyze_format_string::ConversionSpecifier &CS,
const char *startSpecifier, unsigned specifierLen);
- void HandleNonStandardConversionSpecification(
- const analyze_format_string::LengthModifier &LM,
- const analyze_format_string::ConversionSpecifier &CS,
- const char *startSpecifier, unsigned specifierLen);
-
virtual void HandlePosition(const char *startPos, unsigned posLen);
virtual void HandleInvalidPosition(const char *startSpecifier,
@@ -1964,7 +1995,7 @@ public:
PartialDiagnostic PDiag,
SourceLocation StringLoc,
bool IsStringLocation, Range StringRange,
- FixItHint Fixit = FixItHint());
+ ArrayRef<FixItHint> Fixit = ArrayRef<FixItHint>());
protected:
bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc,
@@ -1991,7 +2022,7 @@ protected:
template <typename Range>
void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc,
bool IsStringLocation, Range StringRange,
- FixItHint Fixit = FixItHint());
+ ArrayRef<FixItHint> Fixit = ArrayRef<FixItHint>());
void CheckPositionalAndNonpositionalArgs(
const analyze_format_string::FormatSpecifier *FS);
@@ -2025,35 +2056,95 @@ void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier,
getSpecifierRange(startSpecifier, specifierLen));
}
+void CheckFormatHandler::HandleInvalidLengthModifier(
+ const analyze_format_string::FormatSpecifier &FS,
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen, unsigned DiagID) {
+ using namespace analyze_format_string;
+
+ const LengthModifier &LM = FS.getLengthModifier();
+ CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
+
+ // See if we know how to fix this length modifier.
+ llvm::Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
+ if (FixedLM) {
+ EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(),
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+
+ S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
+ << FixedLM->toString()
+ << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
+
+ } else {
+ FixItHint Hint;
+ if (DiagID == diag::warn_format_nonsensical_length)
+ Hint = FixItHint::CreateRemoval(LMRange);
+
+ EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(),
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ Hint);
+ }
+}
+
void CheckFormatHandler::HandleNonStandardLengthModifier(
- const analyze_format_string::LengthModifier &LM,
+ const analyze_format_string::FormatSpecifier &FS,
const char *startSpecifier, unsigned specifierLen) {
- EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) << LM.toString()
- << 0,
- getLocationOfByte(LM.getStart()),
- /*IsStringLocation*/true,
- getSpecifierRange(startSpecifier, specifierLen));
+ using namespace analyze_format_string;
+
+ const LengthModifier &LM = FS.getLengthModifier();
+ CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
+
+ // See if we know how to fix this length modifier.
+ llvm::Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
+ if (FixedLM) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
+ << LM.toString() << 0,
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+
+ S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
+ << FixedLM->toString()
+ << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
+
+ } else {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
+ << LM.toString() << 0,
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+ }
}
void CheckFormatHandler::HandleNonStandardConversionSpecifier(
const analyze_format_string::ConversionSpecifier &CS,
const char *startSpecifier, unsigned specifierLen) {
- EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) << CS.toString()
- << 1,
- getLocationOfByte(CS.getStart()),
- /*IsStringLocation*/true,
- getSpecifierRange(startSpecifier, specifierLen));
-}
+ using namespace analyze_format_string;
-void CheckFormatHandler::HandleNonStandardConversionSpecification(
- const analyze_format_string::LengthModifier &LM,
- const analyze_format_string::ConversionSpecifier &CS,
- const char *startSpecifier, unsigned specifierLen) {
- EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_conversion_spec)
- << LM.toString() << CS.toString(),
- getLocationOfByte(LM.getStart()),
- /*IsStringLocation*/true,
- getSpecifierRange(startSpecifier, specifierLen));
+ // See if we know how to fix this conversion specifier.
+ llvm::Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier();
+ if (FixedCS) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
+ << CS.toString() << /*conversion specifier*/1,
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+
+ CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength());
+ S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier)
+ << FixedCS->toString()
+ << FixItHint::CreateReplacement(CSRange, FixedCS->toString());
+ } else {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
+ << CS.toString() << /*conversion specifier*/1,
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+ }
}
void CheckFormatHandler::HandlePosition(const char *startPos,
@@ -2182,7 +2273,7 @@ void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag,
SourceLocation Loc,
bool IsStringLocation,
Range StringRange,
- FixItHint FixIt) {
+ ArrayRef<FixItHint> FixIt) {
EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag,
Loc, IsStringLocation, StringRange, FixIt);
}
@@ -2190,7 +2281,7 @@ void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag,
/// \brief If the format string is not within the funcion call, emit a note
/// so that the function call and string are in diagnostic messages.
///
-/// \param inFunctionCall if true, the format string is within the function
+/// \param InFunctionCall if true, the format string is within the function
/// call and only one diagnostic message will be produced. Otherwise, an
/// extra note will be emitted pointing to location of the format string.
///
@@ -2213,7 +2304,7 @@ void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag,
/// \param StringRange some or all of the string to highlight. This is
/// templated so it can accept either a CharSourceRange or a SourceRange.
///
-/// \param Fixit optional fix it hint for the format string.
+/// \param FixIt optional fix it hint for the format string.
template<typename Range>
void CheckFormatHandler::EmitFormatDiagnostic(Sema &S, bool InFunctionCall,
const Expr *ArgumentExpr,
@@ -2221,15 +2312,27 @@ void CheckFormatHandler::EmitFormatDiagnostic(Sema &S, bool InFunctionCall,
SourceLocation Loc,
bool IsStringLocation,
Range StringRange,
- FixItHint FixIt) {
- if (InFunctionCall)
- S.Diag(Loc, PDiag) << StringRange << FixIt;
- else {
+ ArrayRef<FixItHint> FixIt) {
+ if (InFunctionCall) {
+ const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag);
+ D << StringRange;
+ for (ArrayRef<FixItHint>::iterator I = FixIt.begin(), E = FixIt.end();
+ I != E; ++I) {
+ D << *I;
+ }
+ } else {
S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag)
<< ArgumentExpr->getSourceRange();
- S.Diag(IsStringLocation ? Loc : StringRange.getBegin(),
- diag::note_format_string_defined)
- << StringRange << FixIt;
+
+ const Sema::SemaDiagnosticBuilder &Note =
+ S.Diag(IsStringLocation ? Loc : StringRange.getBegin(),
+ diag::note_format_string_defined);
+
+ Note << StringRange;
+ for (ArrayRef<FixItHint>::iterator I = FixIt.begin(), E = FixIt.end();
+ I != E; ++I) {
+ Note << *I;
+ }
}
}
@@ -2509,15 +2612,16 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
}
// FreeBSD extensions
- if (CS.getKind() == ConversionSpecifier::bArg || CS.getKind() == ConversionSpecifier::DArg) {
- // claim the second argument
- CoveredArgs.set(argIndex + 1);
+ if (CS.getKind() == ConversionSpecifier::FreeBSDbArg ||
+ CS.getKind() == ConversionSpecifier::FreeBSDDArg) {
+ // claim the second argument
+ CoveredArgs.set(argIndex + 1);
// Now type check the data expression that matches the
// format specifier.
const Expr *Ex = getDataArg(argIndex);
const analyze_printf::ArgType &AT =
- (CS.getKind() == ConversionSpecifier::bArg) ?
+ (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ?
ArgType(S.Context.IntTy) : ArgType::CStrTy;
if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType()))
S.Diag(getLocationOfByte(CS.getStart()),
@@ -2583,23 +2687,17 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
startSpecifier, specifierLen);
// Check the length modifier is valid with the given conversion specifier.
- const LengthModifier &LM = FS.getLengthModifier();
- if (!FS.hasValidLengthModifier())
- EmitFormatDiagnostic(S.PDiag(diag::warn_format_nonsensical_length)
- << LM.toString() << CS.toString(),
- getLocationOfByte(LM.getStart()),
- /*IsStringLocation*/true,
- getSpecifierRange(startSpecifier, specifierLen),
- FixItHint::CreateRemoval(
- getSpecifierRange(LM.getStart(),
- LM.getLength())));
- if (!FS.hasStandardLengthModifier())
- HandleNonStandardLengthModifier(LM, startSpecifier, specifierLen);
+ if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo()))
+ HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
+ diag::warn_format_nonsensical_length);
+ else if (!FS.hasStandardLengthModifier())
+ HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
+ else if (!FS.hasStandardLengthConversionCombination())
+ HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
+ diag::warn_format_non_standard_conversion_spec);
+
if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
- if (!FS.hasStandardLengthConversionCombination())
- HandleNonStandardConversionSpecification(LM, CS, startSpecifier,
- specifierLen);
// The remaining checks depend on the data arguments.
if (HasVAListArg)
@@ -2615,6 +2713,30 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
return checkFormatExpr(FS, startSpecifier, specifierLen, Arg);
}
+static bool requiresParensToAddCast(const Expr *E) {
+ // FIXME: We should have a general way to reason about operator
+ // precedence and whether parens are actually needed here.
+ // Take care of a few common cases where they aren't.
+ const Expr *Inside = E->IgnoreImpCasts();
+ if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside))
+ Inside = POE->getSyntacticForm()->IgnoreImpCasts();
+
+ switch (Inside->getStmtClass()) {
+ case Stmt::ArraySubscriptExprClass:
+ case Stmt::CallExprClass:
+ case Stmt::DeclRefExprClass:
+ case Stmt::MemberExprClass:
+ case Stmt::ObjCIvarRefExprClass:
+ case Stmt::ObjCMessageExprClass:
+ case Stmt::ObjCPropertyRefExprClass:
+ case Stmt::ParenExprClass:
+ case Stmt::UnaryOperatorClass:
+ return false;
+ default:
+ return true;
+ }
+}
+
bool
CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
const char *StartSpecifier,
@@ -2626,81 +2748,151 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
// format specifier.
const analyze_printf::ArgType &AT = FS.getArgType(S.Context,
ObjCContext);
- if (AT.isValid() && !AT.matchesType(S.Context, E->getType())) {
- // Look through argument promotions for our error message's reported type.
- // This includes the integral and floating promotions, but excludes array
- // and function pointer decay; seeing that an argument intended to be a
- // string has type 'char [6]' is probably more confusing than 'char *'.
- if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
- if (ICE->getCastKind() == CK_IntegralCast ||
- ICE->getCastKind() == CK_FloatingCast) {
- E = ICE->getSubExpr();
-
- // Check if we didn't match because of an implicit cast from a 'char'
- // or 'short' to an 'int'. This is done because printf is a varargs
- // function.
- if (ICE->getType() == S.Context.IntTy ||
- ICE->getType() == S.Context.UnsignedIntTy) {
- // All further checking is done on the subexpression.
- if (AT.matchesType(S.Context, E->getType()))
- return true;
- }
+ if (!AT.isValid())
+ return true;
+
+ QualType IntendedTy = E->getType();
+ if (AT.matchesType(S.Context, IntendedTy))
+ return true;
+
+ // Look through argument promotions for our error message's reported type.
+ // This includes the integral and floating promotions, but excludes array
+ // and function pointer decay; seeing that an argument intended to be a
+ // string has type 'char [6]' is probably more confusing than 'char *'.
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_IntegralCast ||
+ ICE->getCastKind() == CK_FloatingCast) {
+ E = ICE->getSubExpr();
+ IntendedTy = E->getType();
+
+ // Check if we didn't match because of an implicit cast from a 'char'
+ // or 'short' to an 'int'. This is done because printf is a varargs
+ // function.
+ if (ICE->getType() == S.Context.IntTy ||
+ ICE->getType() == S.Context.UnsignedIntTy) {
+ // All further checking is done on the subexpression.
+ if (AT.matchesType(S.Context, IntendedTy))
+ return true;
}
}
+ }
- // We may be able to offer a FixItHint if it is a supported type.
- PrintfSpecifier fixedFS = FS;
- bool success = fixedFS.fixType(E->getType(), S.getLangOpts(),
- S.Context, ObjCContext);
+ if (S.Context.getTargetInfo().getTriple().isOSDarwin()) {
+ // Special-case some of Darwin's platform-independence types.
+ if (const TypedefType *UserTy = IntendedTy->getAs<TypedefType>()) {
+ StringRef Name = UserTy->getDecl()->getName();
+ IntendedTy = llvm::StringSwitch<QualType>(Name)
+ .Case("NSInteger", S.Context.LongTy)
+ .Case("NSUInteger", S.Context.UnsignedLongTy)
+ .Case("SInt32", S.Context.IntTy)
+ .Case("UInt32", S.Context.UnsignedIntTy)
+ .Default(IntendedTy);
+ }
+ }
- if (success) {
- // Get the fix string from the fixed format specifier
- SmallString<16> buf;
- llvm::raw_svector_ostream os(buf);
- fixedFS.toString(os);
+ // We may be able to offer a FixItHint if it is a supported type.
+ PrintfSpecifier fixedFS = FS;
+ bool success = fixedFS.fixType(IntendedTy, S.getLangOpts(),
+ S.Context, ObjCContext);
+
+ if (success) {
+ // Get the fix string from the fixed format specifier
+ SmallString<16> buf;
+ llvm::raw_svector_ostream os(buf);
+ fixedFS.toString(os);
+
+ CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen);
+
+ if (IntendedTy != E->getType()) {
+ // The canonical type for formatting this value is different from the
+ // actual type of the expression. (This occurs, for example, with Darwin's
+ // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but
+ // should be printed as 'long' for 64-bit compatibility.)
+ // Rather than emitting a normal format/argument mismatch, we want to
+ // add a cast to the recommended type (and correct the format string
+ // if necessary).
+ SmallString<16> CastBuf;
+ llvm::raw_svector_ostream CastFix(CastBuf);
+ CastFix << "(";
+ IntendedTy.print(CastFix, S.Context.getPrintingPolicy());
+ CastFix << ")";
+
+ SmallVector<FixItHint,4> Hints;
+ if (!AT.matchesType(S.Context, IntendedTy))
+ Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str()));
+
+ if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) {
+ // If there's already a cast present, just replace it.
+ SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc());
+ Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str()));
+
+ } else if (!requiresParensToAddCast(E)) {
+ // If the expression has high enough precedence,
+ // just write the C-style cast.
+ Hints.push_back(FixItHint::CreateInsertion(E->getLocStart(),
+ CastFix.str()));
+ } else {
+ // Otherwise, add parens around the expression as well as the cast.
+ CastFix << "(";
+ Hints.push_back(FixItHint::CreateInsertion(E->getLocStart(),
+ CastFix.str()));
+
+ SourceLocation After = S.PP.getLocForEndOfToken(E->getLocEnd());
+ Hints.push_back(FixItHint::CreateInsertion(After, ")"));
+ }
+ // We extract the name from the typedef because we don't want to show
+ // the underlying type in the diagnostic.
+ const TypedefType *UserTy = cast<TypedefType>(E->getType());
+ StringRef Name = UserTy->getDecl()->getName();
+
+ // Finally, emit the diagnostic.
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_argument_needs_cast)
+ << Name << IntendedTy
+ << E->getSourceRange(),
+ E->getLocStart(), /*IsStringLocation=*/false,
+ SpecRange, Hints);
+ } else {
EmitFormatDiagnostic(
S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
- << AT.getRepresentativeTypeName(S.Context) << E->getType()
+ << AT.getRepresentativeTypeName(S.Context) << IntendedTy
<< E->getSourceRange(),
E->getLocStart(),
/*IsStringLocation*/false,
- getSpecifierRange(StartSpecifier, SpecifierLen),
- FixItHint::CreateReplacement(
- getSpecifierRange(StartSpecifier, SpecifierLen),
- os.str()));
- } else {
- const CharSourceRange &CSR = getSpecifierRange(StartSpecifier,
- SpecifierLen);
- // Since the warning for passing non-POD types to variadic functions
- // was deferred until now, we emit a warning for non-POD
- // arguments here.
- if (S.isValidVarArgType(E->getType()) == Sema::VAK_Invalid) {
- unsigned DiagKind;
- if (E->getType()->isObjCObjectType())
- DiagKind = diag::err_cannot_pass_objc_interface_to_vararg_format;
- else
- DiagKind = diag::warn_non_pod_vararg_with_format_string;
-
- EmitFormatDiagnostic(
- S.PDiag(DiagKind)
- << S.getLangOpts().CPlusPlus0x
- << E->getType()
- << CallType
- << AT.getRepresentativeTypeName(S.Context)
- << CSR
- << E->getSourceRange(),
- E->getLocStart(), /*IsStringLocation*/false, CSR);
-
- checkForCStrMembers(AT, E, CSR);
- } else
- EmitFormatDiagnostic(
- S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
- << AT.getRepresentativeTypeName(S.Context) << E->getType()
- << CSR
- << E->getSourceRange(),
- E->getLocStart(), /*IsStringLocation*/false, CSR);
+ SpecRange,
+ FixItHint::CreateReplacement(SpecRange, os.str()));
}
+ } else {
+ const CharSourceRange &CSR = getSpecifierRange(StartSpecifier,
+ SpecifierLen);
+ // Since the warning for passing non-POD types to variadic functions
+ // was deferred until now, we emit a warning for non-POD
+ // arguments here.
+ if (S.isValidVarArgType(E->getType()) == Sema::VAK_Invalid) {
+ unsigned DiagKind;
+ if (E->getType()->isObjCObjectType())
+ DiagKind = diag::err_cannot_pass_objc_interface_to_vararg_format;
+ else
+ DiagKind = diag::warn_non_pod_vararg_with_format_string;
+
+ EmitFormatDiagnostic(
+ S.PDiag(DiagKind)
+ << S.getLangOpts().CPlusPlus0x
+ << E->getType()
+ << CallType
+ << AT.getRepresentativeTypeName(S.Context)
+ << CSR
+ << E->getSourceRange(),
+ E->getLocStart(), /*IsStringLocation*/false, CSR);
+
+ checkForCStrMembers(AT, E, CSR);
+ } else
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
+ << AT.getRepresentativeTypeName(S.Context) << E->getType()
+ << CSR
+ << E->getSourceRange(),
+ E->getLocStart(), /*IsStringLocation*/false, CSR);
}
return true;
@@ -2809,24 +3001,17 @@ bool CheckScanfHandler::HandleScanfSpecifier(
}
// Check the length modifier is valid with the given conversion specifier.
- const LengthModifier &LM = FS.getLengthModifier();
- if (!FS.hasValidLengthModifier()) {
- const CharSourceRange &R = getSpecifierRange(LM.getStart(), LM.getLength());
- EmitFormatDiagnostic(S.PDiag(diag::warn_format_nonsensical_length)
- << LM.toString() << CS.toString()
- << getSpecifierRange(startSpecifier, specifierLen),
- getLocationOfByte(LM.getStart()),
- /*IsStringLocation*/true, R,
- FixItHint::CreateRemoval(R));
- }
+ if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo()))
+ HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
+ diag::warn_format_nonsensical_length);
+ else if (!FS.hasStandardLengthModifier())
+ HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
+ else if (!FS.hasStandardLengthConversionCombination())
+ HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
+ diag::warn_format_non_standard_conversion_spec);
- if (!FS.hasStandardLengthModifier())
- HandleNonStandardLengthModifier(LM, startSpecifier, specifierLen);
if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
- if (!FS.hasStandardLengthConversionCombination())
- HandleNonStandardConversionSpecification(LM, CS, startSpecifier,
- specifierLen);
// The remaining checks depend on the data arguments.
if (HasVAListArg)
@@ -2914,7 +3099,8 @@ void Sema::CheckFormatString(const StringLiteral *FExpr,
inFunctionCall, CallType);
if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen,
- getLangOpts()))
+ getLangOpts(),
+ Context.getTargetInfo()))
H.DoneProcessing();
} else if (Type == FST_Scanf) {
CheckScanfHandler H(*this, FExpr, OrigFormatExpr, firstDataArg, numDataArgs,
@@ -2922,7 +3108,8 @@ void Sema::CheckFormatString(const StringLiteral *FExpr,
inFunctionCall, CallType);
if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen,
- getLangOpts()))
+ getLangOpts(),
+ Context.getTargetInfo()))
H.DoneProcessing();
} // TODO: handle other formats
}
@@ -4171,6 +4358,44 @@ static void CheckTrivialUnsignedComparison(Sema &S, BinaryOperator *E) {
}
}
+static void DiagnoseOutOfRangeComparison(Sema &S, BinaryOperator *E,
+ Expr *Constant, Expr *Other,
+ llvm::APSInt Value,
+ bool RhsConstant) {
+ BinaryOperatorKind op = E->getOpcode();
+ QualType OtherT = Other->getType();
+ QualType ConstantT = Constant->getType();
+ if (S.Context.hasSameUnqualifiedType(OtherT, ConstantT))
+ return;
+ assert((OtherT->isIntegerType() && ConstantT->isIntegerType())
+ && "comparison with non-integer type");
+ // FIXME. handle cases for signedness to catch (signed char)N == 200
+ IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT);
+ IntRange LitRange = GetValueRange(S.Context, Value, Value.getBitWidth());
+ if (OtherRange.Width >= LitRange.Width)
+ return;
+ bool IsTrue = true;
+ if (op == BO_EQ)
+ IsTrue = false;
+ else if (op == BO_NE)
+ IsTrue = true;
+ else if (RhsConstant) {
+ if (op == BO_GT || op == BO_GE)
+ IsTrue = !LitRange.NonNegative;
+ else // op == BO_LT || op == BO_LE
+ IsTrue = LitRange.NonNegative;
+ } else {
+ if (op == BO_LT || op == BO_LE)
+ IsTrue = !LitRange.NonNegative;
+ else // op == BO_GT || op == BO_GE
+ IsTrue = LitRange.NonNegative;
+ }
+ SmallString<16> PrettySourceValue(Value.toString(10));
+ S.Diag(E->getOperatorLoc(), diag::warn_out_of_range_compare)
+ << PrettySourceValue << OtherT << IsTrue
+ << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
+}
+
/// Analyze the operands of the given comparison. Implements the
/// fallback case from AnalyzeComparison.
static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
@@ -4186,20 +4411,42 @@ static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
QualType T = E->getLHS()->getType();
assert(S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())
&& "comparison with mismatched types");
+ if (E->isValueDependent())
+ return AnalyzeImpConvsInComparison(S, E);
+ Expr *LHS = E->getLHS()->IgnoreParenImpCasts();
+ Expr *RHS = E->getRHS()->IgnoreParenImpCasts();
+
+ bool IsComparisonConstant = false;
+
+ // Check whether an integer constant comparison results in a value
+ // of 'true' or 'false'.
+ if (T->isIntegralType(S.Context)) {
+ llvm::APSInt RHSValue;
+ bool IsRHSIntegralLiteral =
+ RHS->isIntegerConstantExpr(RHSValue, S.Context);
+ llvm::APSInt LHSValue;
+ bool IsLHSIntegralLiteral =
+ LHS->isIntegerConstantExpr(LHSValue, S.Context);
+ if (IsRHSIntegralLiteral && !IsLHSIntegralLiteral)
+ DiagnoseOutOfRangeComparison(S, E, RHS, LHS, RHSValue, true);
+ else if (!IsRHSIntegralLiteral && IsLHSIntegralLiteral)
+ DiagnoseOutOfRangeComparison(S, E, LHS, RHS, LHSValue, false);
+ else
+ IsComparisonConstant =
+ (IsRHSIntegralLiteral && IsLHSIntegralLiteral);
+ } else if (!T->hasUnsignedIntegerRepresentation())
+ IsComparisonConstant = E->isIntegerConstantExpr(S.Context);
+
// We don't do anything special if this isn't an unsigned integral
// comparison: we're only interested in integral comparisons, and
// signed comparisons only happen in cases we don't care to warn about.
//
// We also don't care about value-dependent expressions or expressions
// whose result is a constant.
- if (!T->hasUnsignedIntegerRepresentation()
- || E->isValueDependent() || E->isIntegerConstantExpr(S.Context))
+ if (!T->hasUnsignedIntegerRepresentation() || IsComparisonConstant)
return AnalyzeImpConvsInComparison(S, E);
-
- Expr *LHS = E->getLHS()->IgnoreParenImpCasts();
- Expr *RHS = E->getRHS()->IgnoreParenImpCasts();
-
+
// Check to see if one of the (unmodified) operands is of different
// signedness.
Expr *signedOperand, *unsignedOperand;
@@ -4386,6 +4633,46 @@ std::string PrettyPrintInRange(const llvm::APSInt &Value, IntRange Range) {
return ValueInRange.toString(10);
}
+static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) {
+ if (!isa<ImplicitCastExpr>(Ex))
+ return false;
+
+ Expr *InnerE = Ex->IgnoreParenImpCasts();
+ const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr();
+ const Type *Source =
+ S.Context.getCanonicalType(InnerE->getType()).getTypePtr();
+ if (Target->isDependentType())
+ return false;
+
+ const BuiltinType *FloatCandidateBT =
+ dyn_cast<BuiltinType>(ToBool ? Source : Target);
+ const Type *BoolCandidateType = ToBool ? Target : Source;
+
+ return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) &&
+ FloatCandidateBT && (FloatCandidateBT->isFloatingPoint()));
+}
+
+void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall,
+ SourceLocation CC) {
+ unsigned NumArgs = TheCall->getNumArgs();
+ for (unsigned i = 0; i < NumArgs; ++i) {
+ Expr *CurrA = TheCall->getArg(i);
+ if (!IsImplicitBoolFloatConversion(S, CurrA, true))
+ continue;
+
+ bool IsSwapped = ((i > 0) &&
+ IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false));
+ IsSwapped |= ((i < (NumArgs - 1)) &&
+ IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false));
+ if (IsSwapped) {
+ // Warn on this floating-point to bool conversion.
+ DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(),
+ CurrA->getType(), CC,
+ diag::warn_impcast_floating_point_to_bool);
+ }
+ }
+}
+
void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
SourceLocation CC, bool *ICContext = 0) {
if (E->isTypeDependent() || E->isValueDependent()) return;
@@ -4521,12 +4808,33 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
}
}
+ // If the target is bool, warn if expr is a function or method call.
+ if (Target->isSpecificBuiltinType(BuiltinType::Bool) &&
+ isa<CallExpr>(E)) {
+ // Check last argument of function call to see if it is an
+ // implicit cast from a type matching the type the result
+ // is being cast to.
+ CallExpr *CEx = cast<CallExpr>(E);
+ unsigned NumArgs = CEx->getNumArgs();
+ if (NumArgs > 0) {
+ Expr *LastA = CEx->getArg(NumArgs - 1);
+ Expr *InnerE = LastA->IgnoreParenImpCasts();
+ const Type *InnerType =
+ S.Context.getCanonicalType(InnerE->getType()).getTypePtr();
+ if (isa<ImplicitCastExpr>(LastA) && (InnerType == Target)) {
+ // Warn on this floating-point to bool conversion
+ DiagnoseImpCast(S, E, T, CC,
+ diag::warn_impcast_floating_point_to_bool);
+ }
+ }
+ }
return;
}
if ((E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)
== Expr::NPCK_GNUNull) && !Target->isAnyPointerType()
- && !Target->isBlockPointerType() && !Target->isMemberPointerType()) {
+ && !Target->isBlockPointerType() && !Target->isMemberPointerType()
+ && Target->isScalarType()) {
SourceLocation Loc = E->getSourceRange().getBegin();
if (Loc.isMacroID())
Loc = S.SourceMgr.getImmediateExpansionRange(Loc).first;
@@ -4691,6 +4999,10 @@ void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC) {
return;
}
+ // Check implicit argument conversions for function calls.
+ if (CallExpr *Call = dyn_cast<CallExpr>(E))
+ CheckImplicitArgumentConversions(S, Call, CC);
+
// Go ahead and check any implicit conversions we might have skipped.
// The non-canonical typecheck is just an optimization;
// CheckImplicitConversion will filter out dead implicit conversions.
@@ -5110,7 +5422,8 @@ static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) {
return false;
owner.Variable = var;
- owner.setLocsFrom(ref);
+ if (ref)
+ owner.setLocsFrom(ref);
return true;
}
@@ -5219,6 +5532,12 @@ namespace {
if (block->getBlockDecl()->capturesVariable(Variable))
Visit(block->getBlockDecl()->getBody());
}
+
+ void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) {
+ if (Capturer) return;
+ if (OVE->getSourceExpr())
+ Visit(OVE->getSourceExpr());
+ }
};
}
@@ -5228,6 +5547,28 @@ static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) {
assert(owner.Variable && owner.Loc.isValid());
e = e->IgnoreParenCasts();
+
+ // Look through [^{...} copy] and Block_copy(^{...}).
+ if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) {
+ Selector Cmd = ME->getSelector();
+ if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") {
+ e = ME->getInstanceReceiver();
+ if (!e)
+ return 0;
+ e = e->IgnoreParenCasts();
+ }
+ } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) {
+ if (CE->getNumArgs() == 1) {
+ FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl());
+ if (Fn) {
+ const IdentifierInfo *FnI = Fn->getIdentifier();
+ if (FnI && FnI->isStr("_Block_copy")) {
+ e = CE->getArg(0)->IgnoreParenCasts();
+ }
+ }
+ }
+ }
+
BlockExpr *block = dyn_cast<BlockExpr>(e);
if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable))
return 0;
@@ -5304,6 +5645,20 @@ void Sema::checkRetainCycles(Expr *receiver, Expr *argument) {
diagnoseRetainCycle(*this, capturer, owner);
}
+void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) {
+ RetainCycleOwner Owner;
+ if (!considerVariable(Var, /*DeclRefExpr=*/0, Owner))
+ return;
+
+ // Because we don't have an expression for the variable, we have to set the
+ // location explicitly here.
+ Owner.Loc = Var->getLocation();
+ Owner.Range = Var->getSourceRange();
+
+ if (Expr *Capturer = findCapturingExpr(*this, Init, Owner))
+ diagnoseRetainCycle(*this, Capturer, Owner);
+}
+
bool Sema::checkUnsafeAssigns(SourceLocation Loc,
QualType LHS, Expr *RHS) {
Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime();
@@ -5337,9 +5692,19 @@ void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
if (LHSType.isNull())
LHSType = LHS->getType();
+
+ Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime();
+
+ if (LT == Qualifiers::OCL_Weak) {
+ DiagnosticsEngine::Level Level =
+ Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak, Loc);
+ if (Level != DiagnosticsEngine::Ignored)
+ getCurFunction()->markSafeWeakUse(LHS);
+ }
+
if (checkUnsafeAssigns(Loc, LHSType, RHS))
return;
- Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime();
+
// FIXME. Check for other life times.
if (LT != Qualifiers::OCL_None)
return;
@@ -5859,7 +6224,8 @@ void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
if (IsPointerAttr) {
// Skip implicit cast of pointer to `void *' (as a function argument).
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr))
- if (ICE->getType()->isVoidPointerType())
+ if (ICE->getType()->isVoidPointerType() &&
+ ICE->getCastKind() == CK_BitCast)
ArgumentExpr = ICE->getSubExpr();
}
QualType ArgumentType = ArgumentExpr->getType();
@@ -5914,4 +6280,3 @@ void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
<< ArgumentExpr->getSourceRange()
<< TypeTagExpr->getSourceRange();
}
-
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
index adf1327..b1aead8 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
@@ -1059,10 +1059,12 @@ bool ResultBuilder::IsClassOrStruct(NamedDecl *ND) const {
// Allow us to find class templates, too.
if (ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
ND = ClassTemplate->getTemplatedDecl();
-
+
+ // For purposes of this check, interfaces match too.
if (RecordDecl *RD = dyn_cast<RecordDecl>(ND))
return RD->getTagKind() == TTK_Class ||
- RD->getTagKind() == TTK_Struct;
+ RD->getTagKind() == TTK_Struct ||
+ RD->getTagKind() == TTK_Interface;
return false;
}
@@ -1422,7 +1424,8 @@ static const char *GetCompletionTypeString(QualType T,
if (!Tag->getIdentifier() && !Tag->getTypedefNameForAnonDecl()) {
switch (Tag->getTagKind()) {
case TTK_Struct: return "struct <anonymous>";
- case TTK_Class: return "class <anonymous>";
+ case TTK_Interface: return "__interface <anonymous>";
+ case TTK_Class: return "class <anonymous>";
case TTK_Union: return "union <anonymous>";
case TTK_Enum: return "enum <anonymous>";
}
@@ -1449,7 +1452,7 @@ static void addThisCompletion(Sema &S, ResultBuilder &Results) {
Policy,
Allocator));
Builder.AddTypedTextChunk("this");
- Results.AddResult(CodeCompletionResult(Builder.TakeString()));
+ Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
/// \brief Add language constructs that show up for "ordinary" names.
@@ -2480,7 +2483,6 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
if (Declaration) {
Result.addParentContext(Declaration->getDeclContext());
- Pattern->ParentKind = Result.getParentKind();
Pattern->ParentName = Result.getParentName();
}
@@ -2493,7 +2495,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
}
if (Kind == RK_Macro) {
- MacroInfo *MI = PP.getMacroInfo(Macro);
+ MacroInfo *MI = PP.getMacroInfoHistory(Macro);
assert(MI && "Not a macro?");
Result.AddTypedTextChunk(
@@ -2880,10 +2882,14 @@ CXCursorKind clang::getCursorKindForDecl(Decl *D) {
case ObjCPropertyImplDecl::Synthesize:
return CXCursor_ObjCSynthesizeDecl;
}
+
+ case Decl::Import:
+ return CXCursor_ModuleImportDecl;
default:
if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
switch (TD->getTagKind()) {
+ case TTK_Interface: // fall through
case TTK_Struct: return CXCursor_StructDecl;
case TTK_Class: return CXCursor_ClassDecl;
case TTK_Union: return CXCursor_UnionDecl;
@@ -2896,6 +2902,7 @@ CXCursorKind clang::getCursorKindForDecl(Decl *D) {
}
static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results,
+ bool IncludeUndefined,
bool TargetTypeIsPointer = false) {
typedef CodeCompletionResult Result;
@@ -2904,7 +2911,8 @@ static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results,
for (Preprocessor::macro_iterator M = PP.macro_begin(),
MEnd = PP.macro_end();
M != MEnd; ++M) {
- Results.AddResult(Result(M->first,
+ if (IncludeUndefined || M->first->hasMacroDefinition())
+ Results.AddResult(Result(M->first,
getMacroUsagePriority(M->first->getName(),
PP.getLangOpts(),
TargetTypeIsPointer)));
@@ -3125,7 +3133,6 @@ void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
void Sema::CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext) {
- typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
mapCodeCompletionContext(*this, CompletionContext));
@@ -3204,7 +3211,7 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
}
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results);
+ AddMacroResults(PP, Results, false);
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(),Results.size());
@@ -3296,7 +3303,6 @@ struct Sema::CodeCompleteExpressionData {
/// type we're looking for.
void Sema::CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data) {
- typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Expression);
@@ -3336,7 +3342,7 @@ void Sema::CodeCompleteExpression(Scope *S,
AddPrettyFunctionResults(PP.getLangOpts(), Results);
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results, PreferredTypeIsPointer);
+ AddMacroResults(PP, Results, false, PreferredTypeIsPointer);
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext(CodeCompletionContext::CCC_Expression,
Data.PreferredType),
@@ -3580,7 +3586,6 @@ void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
if (!CodeCompleter)
return;
- typedef CodeCompletionResult Result;
ResultBuilder::LookupFilter Filter = 0;
enum CodeCompletionContext::Kind ContextKind
= CodeCompletionContext::CCC_Other;
@@ -3597,6 +3602,7 @@ void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
case DeclSpec::TST_struct:
case DeclSpec::TST_class:
+ case DeclSpec::TST_interface:
Filter = &ResultBuilder::IsClassOrStruct;
ContextKind = CodeCompletionContext::CCC_ClassOrStructTag;
break;
@@ -3728,7 +3734,7 @@ void Sema::CodeCompleteCase(Scope *S) {
//so only say we include macros if the code completer says we do
enum CodeCompletionContext::Kind kind = CodeCompletionContext::CCC_Other;
if (CodeCompleter->includeMacros()) {
- AddMacroResults(PP, Results);
+ AddMacroResults(PP, Results, false);
kind = CodeCompletionContext::CCC_OtherWithMacros;
}
@@ -3898,7 +3904,6 @@ void Sema::CodeCompleteReturn(Scope *S) {
}
void Sema::CodeCompleteAfterIf(Scope *S) {
- typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
mapCodeCompletionContext(*this, PCC_Statement));
@@ -3952,7 +3957,7 @@ void Sema::CodeCompleteAfterIf(Scope *S) {
AddPrettyFunctionResults(PP.getLangOpts(), Results);
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results);
+ AddMacroResults(PP, Results, false);
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(),Results.size());
@@ -4408,7 +4413,6 @@ static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
}
void Sema::CodeCompleteObjCAtDirective(Scope *S) {
- typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
@@ -4595,26 +4599,23 @@ static bool ObjCPropertyFlagConflicts(unsigned Attributes, unsigned NewFlag) {
// Check for collisions with "readonly".
if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
- (Attributes & (ObjCDeclSpec::DQ_PR_readwrite |
- ObjCDeclSpec::DQ_PR_assign |
- ObjCDeclSpec::DQ_PR_unsafe_unretained |
- ObjCDeclSpec::DQ_PR_copy |
- ObjCDeclSpec::DQ_PR_retain |
- ObjCDeclSpec::DQ_PR_strong)))
+ (Attributes & ObjCDeclSpec::DQ_PR_readwrite))
return true;
- // Check for more than one of { assign, copy, retain, strong }.
+ // Check for more than one of { assign, copy, retain, strong, weak }.
unsigned AssignCopyRetMask = Attributes & (ObjCDeclSpec::DQ_PR_assign |
ObjCDeclSpec::DQ_PR_unsafe_unretained |
ObjCDeclSpec::DQ_PR_copy |
- ObjCDeclSpec::DQ_PR_retain|
- ObjCDeclSpec::DQ_PR_strong);
+ ObjCDeclSpec::DQ_PR_retain |
+ ObjCDeclSpec::DQ_PR_strong |
+ ObjCDeclSpec::DQ_PR_weak);
if (AssignCopyRetMask &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_assign &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_unsafe_unretained &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_copy &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_retain &&
- AssignCopyRetMask != ObjCDeclSpec::DQ_PR_strong)
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_strong &&
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_weak)
return true;
return false;
@@ -4626,7 +4627,6 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
unsigned Attributes = ODS.getPropertyAttributes();
- typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
@@ -4650,6 +4650,12 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
Results.AddResult(CodeCompletionResult("nonatomic"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_atomic))
Results.AddResult(CodeCompletionResult("atomic"));
+
+ // Only suggest "weak" if we're compiling for ARC-with-weak-references or GC.
+ if (getLangOpts().ObjCARCWeak || getLangOpts().getGC() != LangOptions::NonGC)
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_weak))
+ Results.AddResult(CodeCompletionResult("weak"));
+
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_setter)) {
CodeCompletionBuilder Setter(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
@@ -4837,8 +4843,6 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
void Sema::CodeCompleteObjCPropertyGetter(Scope *S) {
- typedef CodeCompletionResult Result;
-
// Try to find the interface where getters might live.
ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
if (!Class) {
@@ -4866,8 +4870,6 @@ void Sema::CodeCompleteObjCPropertyGetter(Scope *S) {
}
void Sema::CodeCompleteObjCPropertySetter(Scope *S) {
- typedef CodeCompletionResult Result;
-
// Try to find the interface where setters might live.
ObjCInterfaceDecl *Class
= dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
@@ -4898,7 +4900,6 @@ void Sema::CodeCompleteObjCPropertySetter(Scope *S) {
void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter) {
- typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Type);
@@ -4957,7 +4958,7 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
CodeCompleter->includeGlobals());
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results);
+ AddMacroResults(PP, Results, false);
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Type,
@@ -5041,7 +5042,7 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
///
/// \param S The semantic analysis object.
///
-/// \param S NeedSuperKeyword Whether we need to prefix this completion with
+/// \param NeedSuperKeyword Whether we need to prefix this completion with
/// the "super" keyword. Otherwise, we just need to provide the arguments.
///
/// \param SelIdents The identifiers in the selector that have already been
@@ -5187,7 +5188,7 @@ void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
Results.ExitScope();
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results);
+ AddMacroResults(PP, Results, false);
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
@@ -5339,11 +5340,11 @@ static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
// If we have an external source, load the entire class method
// pool from the AST file.
- if (SemaRef.ExternalSource) {
+ if (SemaRef.getExternalSource()) {
for (uint32_t I = 0,
- N = SemaRef.ExternalSource->GetNumExternalSelectors();
+ N = SemaRef.getExternalSource()->GetNumExternalSelectors();
I != N; ++I) {
- Selector Sel = SemaRef.ExternalSource->GetExternalSelector(I);
+ Selector Sel = SemaRef.getExternalSource()->GetExternalSelector(I);
if (Sel.isNull() || SemaRef.MethodPool.count(Sel))
continue;
@@ -5868,7 +5869,6 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
}
void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
- typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
@@ -7169,7 +7169,9 @@ void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
M != MEnd; ++M) {
Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
M->first->getName()));
- Results.AddResult(Builder.TakeString());
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
+ CCP_CodePattern,
+ CXCursor_MacroDefinition));
}
Results.ExitScope();
} else if (IsDefinition) {
@@ -7186,7 +7188,7 @@ void Sema::CodeCompletePreprocessorExpression() {
CodeCompletionContext::CCC_PreprocessorExpression);
if (!CodeCompleter || CodeCompleter->includeMacros())
- AddMacroResults(PP, Results);
+ AddMacroResults(PP, Results, true);
// defined (<macro>)
Results.EnterNewScope();
@@ -7235,7 +7237,7 @@ void Sema::GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
}
if (!CodeCompleter || CodeCompleter->includeMacros())
- AddMacroResults(PP, Builder);
+ AddMacroResults(PP, Builder, true);
Results.clear();
Results.insert(Results.end(),
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
index ff1eb84..0092d5d 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
@@ -350,8 +350,8 @@ ParsedType Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
/// isTagName() - This method is called *for error recovery purposes only*
/// to determine if the specified name is a valid tag name ("struct foo"). If
/// so, this returns the TST for the tag corresponding to it (TST_enum,
-/// TST_union, TST_struct, TST_class). This is used to diagnose cases in C
-/// where the user forgot to specify the tag.
+/// TST_union, TST_struct, TST_interface, TST_class). This is used to diagnose
+/// cases in C where the user forgot to specify the tag.
DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) {
// Do a tag name lookup in this scope.
LookupResult R(*this, &II, SourceLocation(), LookupTagName);
@@ -361,6 +361,7 @@ DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) {
if (const TagDecl *TD = R.getAsSingle<TagDecl>()) {
switch (TD->getTagKind()) {
case TTK_Struct: return DeclSpec::TST_struct;
+ case TTK_Interface: return DeclSpec::TST_interface;
case TTK_Union: return DeclSpec::TST_union;
case TTK_Class: return DeclSpec::TST_class;
case TTK_Enum: return DeclSpec::TST_enum;
@@ -434,7 +435,8 @@ bool Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
else if (DeclContext *DC = computeDeclContext(*SS, false))
Diag(IILoc, diag::err_unknown_nested_typename_suggest)
<< II << DC << CorrectedQuotedStr << SS->getRange()
- << FixItHint::CreateReplacement(SourceRange(IILoc), CorrectedStr);
+ << FixItHint::CreateReplacement(Corrected.getCorrectionRange(),
+ CorrectedStr);
else
llvm_unreachable("could not have corrected a typo here");
@@ -517,9 +519,9 @@ static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result,
Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name,
SourceLocation NameLoc) {
- Result.clear(Sema::LookupTagName);
- SemaRef.LookupParsedName(Result, S, &SS);
- if (TagDecl *Tag = Result.getAsSingle<TagDecl>()) {
+ LookupResult R(SemaRef, Name, NameLoc, Sema::LookupTagName);
+ SemaRef.LookupParsedName(R, S, &SS);
+ if (TagDecl *Tag = R.getAsSingle<TagDecl>()) {
const char *TagName = 0;
const char *FixItTagName = 0;
switch (Tag->getTagKind()) {
@@ -538,6 +540,11 @@ static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result,
FixItTagName = "struct ";
break;
+ case TTK_Interface:
+ TagName = "__interface";
+ FixItTagName = "__interface ";
+ break;
+
case TTK_Union:
TagName = "union";
FixItTagName = "union ";
@@ -548,25 +555,42 @@ static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result,
<< Name << TagName << SemaRef.getLangOpts().CPlusPlus
<< FixItHint::CreateInsertion(NameLoc, FixItTagName);
- LookupResult R(SemaRef, Name, NameLoc, Sema::LookupOrdinaryName);
- if (SemaRef.LookupParsedName(R, S, &SS)) {
- for (LookupResult::iterator I = R.begin(), IEnd = R.end();
- I != IEnd; ++I)
- SemaRef.Diag((*I)->getLocation(), diag::note_decl_hiding_tag_type)
- << Name << TagName;
- }
+ for (LookupResult::iterator I = Result.begin(), IEnd = Result.end();
+ I != IEnd; ++I)
+ SemaRef.Diag((*I)->getLocation(), diag::note_decl_hiding_tag_type)
+ << Name << TagName;
+
+ // Replace lookup results with just the tag decl.
+ Result.clear(Sema::LookupTagName);
+ SemaRef.LookupParsedName(Result, S, &SS);
return true;
}
- Result.clear(Sema::LookupOrdinaryName);
return false;
}
+/// Build a ParsedType for a simple-type-specifier with a nested-name-specifier.
+static ParsedType buildNestedType(Sema &S, CXXScopeSpec &SS,
+ QualType T, SourceLocation NameLoc) {
+ ASTContext &Context = S.Context;
+
+ TypeLocBuilder Builder;
+ Builder.pushTypeSpec(T).setNameLoc(NameLoc);
+
+ T = S.getElaboratedType(ETK_None, SS, T);
+ ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(T);
+ ElabTL.setElaboratedKeywordLoc(SourceLocation());
+ ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
+}
+
Sema::NameClassification Sema::ClassifyName(Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *&Name,
SourceLocation NameLoc,
- const Token &NextToken) {
+ const Token &NextToken,
+ bool IsAddressOfOperand,
+ CorrectionCandidateCallback *CCC) {
DeclarationNameInfo NameInfo(Name, NameLoc);
ObjCMethodDecl *CurMethod = getCurMethodDecl();
@@ -632,25 +656,11 @@ Corrected:
// Perform typo correction to determine if there is another name that is
// close to this name.
- if (!SecondTry) {
+ if (!SecondTry && CCC) {
SecondTry = true;
- CorrectionCandidateCallback DefaultValidator;
- // Try to limit which sets of keywords should be included in typo
- // correction based on what the next token is.
- DefaultValidator.WantTypeSpecifiers =
- NextToken.is(tok::l_paren) || NextToken.is(tok::less) ||
- NextToken.is(tok::identifier) || NextToken.is(tok::star) ||
- NextToken.is(tok::amp) || NextToken.is(tok::l_square);
- DefaultValidator.WantExpressionKeywords =
- NextToken.is(tok::l_paren) || NextToken.is(tok::identifier) ||
- NextToken.is(tok::arrow) || NextToken.is(tok::period);
- DefaultValidator.WantRemainingKeywords =
- NextToken.is(tok::l_paren) || NextToken.is(tok::semi) ||
- NextToken.is(tok::identifier) || NextToken.is(tok::l_brace);
- DefaultValidator.WantCXXNamedCasts = false;
if (TypoCorrection Corrected = CorrectTypo(Result.getLookupNameInfo(),
Result.getLookupKind(), S,
- &SS, DefaultValidator)) {
+ &SS, *CCC)) {
unsigned UnqualifiedDiag = diag::err_undeclared_var_use_suggest;
unsigned QualifiedDiag = diag::err_no_member_suggest;
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
@@ -675,11 +685,12 @@ Corrected:
Diag(NameLoc, UnqualifiedDiag)
<< Name << CorrectedQuotedStr
<< FixItHint::CreateReplacement(NameLoc, CorrectedStr);
- else
+ else // FIXME: is this even reachable? Test it.
Diag(NameLoc, QualifiedDiag)
<< Name << computeDeclContext(SS, false) << CorrectedQuotedStr
<< SS.getRange()
- << FixItHint::CreateReplacement(NameLoc, CorrectedStr);
+ << FixItHint::CreateReplacement(Corrected.getCorrectionRange(),
+ CorrectedStr);
// Update the name, so that the caller has the new name.
Name = Corrected.getCorrectionAsIdentifierInfo();
@@ -705,7 +716,7 @@ Corrected:
if (ObjCIvarDecl *Ivar = Result.getAsSingle<ObjCIvarDecl>()) {
Result.clear();
ExprResult E(LookupInObjCMethod(Result, S, Ivar->getIdentifier()));
- return move(E);
+ return E;
}
goto Corrected;
@@ -731,8 +742,9 @@ Corrected:
// perform some heroics to see if we actually have a
// template-argument-list, which would indicate a missing 'template'
// keyword here.
- return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
- NameInfo, /*TemplateArgs=*/0);
+ return ActOnDependentIdExpression(SS, /*TemplateKWLoc=*/SourceLocation(),
+ NameInfo, IsAddressOfOperand,
+ /*TemplateArgs=*/0);
}
case LookupResult::Found:
@@ -808,14 +820,16 @@ Corrected:
return NameClassification::TypeTemplate(Template);
}
}
-
+
NamedDecl *FirstDecl = (*Result.begin())->getUnderlyingDecl();
if (TypeDecl *Type = dyn_cast<TypeDecl>(FirstDecl)) {
DiagnoseUseOfDecl(Type, NameLoc);
QualType T = Context.getTypeDeclType(Type);
+ if (SS.isNotEmpty())
+ return buildNestedType(*this, SS, T, NameLoc);
return ParsedType::make(T);
}
-
+
ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(FirstDecl);
if (!Class) {
// FIXME: It's unfortunate that we don't have a Type node for handling this.
@@ -838,24 +852,28 @@ Corrected:
return ParsedType::make(T);
}
+ // We can have a type template here if we're classifying a template argument.
+ if (isa<TemplateDecl>(FirstDecl) && !isa<FunctionTemplateDecl>(FirstDecl))
+ return NameClassification::TypeTemplate(
+ TemplateName(cast<TemplateDecl>(FirstDecl)));
+
// Check for a tag type hidden by a non-type decl in a few cases where it
// seems likely a type is wanted instead of the non-type that was found.
- if (!getLangOpts().ObjC1 && FirstDecl && !isa<ClassTemplateDecl>(FirstDecl) &&
- !isa<TypeAliasTemplateDecl>(FirstDecl)) {
+ if (!getLangOpts().ObjC1) {
bool NextIsOp = NextToken.is(tok::amp) || NextToken.is(tok::star);
if ((NextToken.is(tok::identifier) ||
(NextIsOp && FirstDecl->isFunctionOrFunctionTemplate())) &&
isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) {
- FirstDecl = (*Result.begin())->getUnderlyingDecl();
- if (TypeDecl *Type = dyn_cast<TypeDecl>(FirstDecl)) {
- DiagnoseUseOfDecl(Type, NameLoc);
- QualType T = Context.getTypeDeclType(Type);
- return ParsedType::make(T);
- }
+ TypeDecl *Type = Result.getAsSingle<TypeDecl>();
+ DiagnoseUseOfDecl(Type, NameLoc);
+ QualType T = Context.getTypeDeclType(Type);
+ if (SS.isNotEmpty())
+ return buildNestedType(*this, SS, T, NameLoc);
+ return ParsedType::make(T);
}
}
- if (!Result.empty() && (*Result.begin())->isCXXClassMember())
+ if (FirstDecl->isCXXClassMember())
return BuildPossibleImplicitMemberExpr(SS, SourceLocation(), Result, 0);
bool ADL = UseArgumentDependentLookup(SS, Result, NextToken.is(tok::l_paren));
@@ -1186,8 +1204,14 @@ bool Sema::ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const {
Context.DeclMustBeEmitted(FD))
return false;
} else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ // Don't warn on variables of const-qualified or reference type, since their
+ // values can be used even if though they're not odr-used, and because const
+ // qualified variables can appear in headers in contexts where they're not
+ // intended to be used.
+ // FIXME: Use more principled rules for these exemptions.
if (!VD->isFileVarDecl() ||
- VD->getType().isConstant(Context) ||
+ VD->getType().isConstQualified() ||
+ VD->getType()->isReferenceType() ||
Context.DeclMustBeEmitted(VD))
return false;
@@ -1248,7 +1272,7 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
QualType Ty = VD->getType();
// Only look at the outermost level of typedef.
- if (const TypedefType *TT = dyn_cast<TypedefType>(Ty)) {
+ if (const TypedefType *TT = Ty->getAs<TypedefType>()) {
if (TT->getDecl()->hasAttr<UnusedAttr>())
return false;
}
@@ -1268,6 +1292,8 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
return false;
if (const Expr *Init = VD->getInit()) {
+ if (const ExprWithCleanups *Cleanups = dyn_cast<ExprWithCleanups>(Init))
+ Init = Cleanups->getSubExpr();
const CXXConstructExpr *Construct =
dyn_cast<CXXConstructExpr>(Init);
if (Construct && !Construct->isElidable()) {
@@ -1706,6 +1732,25 @@ DeclHasAttr(const Decl *D, const Attr *A) {
if (AA)
return false;
+ // The following thread safety attributes can also be duplicated.
+ switch (A->getKind()) {
+ case attr::ExclusiveLocksRequired:
+ case attr::SharedLocksRequired:
+ case attr::LocksExcluded:
+ case attr::ExclusiveLockFunction:
+ case attr::SharedLockFunction:
+ case attr::UnlockFunction:
+ case attr::ExclusiveTrylockFunction:
+ case attr::SharedTrylockFunction:
+ case attr::GuardedBy:
+ case attr::PtGuardedBy:
+ case attr::AcquiredBefore:
+ case attr::AcquiredAfter:
+ return false;
+ default:
+ ;
+ }
+
const OwnershipAttr *OA = dyn_cast<OwnershipAttr>(A);
const AnnotateAttr *Ann = dyn_cast<AnnotateAttr>(A);
for (Decl::attr_iterator i = D->attr_begin(), e = D->attr_end(); i != e; ++i)
@@ -1908,6 +1953,19 @@ static bool canRedefineFunction(const FunctionDecl *FD,
FD->getStorageClass() == SC_Extern);
}
+/// Is the given calling convention the ABI default for the given
+/// declaration?
+static bool isABIDefaultCC(Sema &S, CallingConv CC, FunctionDecl *D) {
+ CallingConv ABIDefaultCC;
+ if (isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
+ ABIDefaultCC = S.Context.getDefaultCXXMethodCallConv(D->isVariadic());
+ } else {
+ // Free C function or a static method.
+ ABIDefaultCC = (S.Context.getLangOpts().MRTD ? CC_X86StdCall : CC_C);
+ }
+ return ABIDefaultCC == CC;
+}
+
/// MergeFunctionDecl - We just parsed a function 'New' from
/// declarator D which has the same name and scope as a previous
/// declaration 'Old'. Figure out how to resolve this situation,
@@ -1976,6 +2034,9 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD, Scope *S) {
// later declared or defined without one, the second decl assumes the
// calling convention of the first.
//
+ // It's OK if a function is first declared without a calling convention,
+ // but is later declared or defined with the default calling convention.
+ //
// For the new decl, we have to look at the NON-canonical type to tell the
// difference between a function that really doesn't have a calling
// convention and one that is declared cdecl. That's because in
@@ -1989,10 +2050,22 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD, Scope *S) {
FunctionType::ExtInfo OldTypeInfo = OldType->getExtInfo();
FunctionType::ExtInfo NewTypeInfo = NewType->getExtInfo();
bool RequiresAdjustment = false;
- if (OldTypeInfo.getCC() != CC_Default &&
- NewTypeInfo.getCC() == CC_Default) {
+ if (OldTypeInfo.getCC() == NewTypeInfo.getCC()) {
+ // Fast path: nothing to do.
+
+ // Inherit the CC from the previous declaration if it was specified
+ // there but not here.
+ } else if (NewTypeInfo.getCC() == CC_Default) {
NewTypeInfo = NewTypeInfo.withCallingConv(OldTypeInfo.getCC());
RequiresAdjustment = true;
+
+ // Don't complain about mismatches when the default CC is
+ // effectively the same as the explict one.
+ } else if (OldTypeInfo.getCC() == CC_Default &&
+ isABIDefaultCC(*this, NewTypeInfo.getCC(), New)) {
+ NewTypeInfo = NewTypeInfo.withCallingConv(OldTypeInfo.getCC());
+ RequiresAdjustment = true;
+
} else if (!Context.isSameCallConv(OldTypeInfo.getCC(),
NewTypeInfo.getCC())) {
// Calling conventions really aren't compatible, so complain.
@@ -2398,7 +2471,7 @@ void Sema::MergeVarDeclTypes(VarDecl *New, VarDecl *Old) {
}
if (MergedT.isNull()) {
Diag(New->getLocation(), diag::err_redefinition_different_type)
- << New->getDeclName();
+ << New->getDeclName() << New->getType() << Old->getType();
Diag(Old->getLocation(), diag::note_previous_definition);
return New->setInvalidDecl();
}
@@ -2551,8 +2624,7 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
/// no declarator (e.g. "struct foo;") is parsed.
Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS) {
- return ParsedFreeStandingDeclSpec(S, AS, DS,
- MultiTemplateParamsArg(*this, 0, 0));
+ return ParsedFreeStandingDeclSpec(S, AS, DS, MultiTemplateParamsArg());
}
/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
@@ -2565,6 +2637,7 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
TagDecl *Tag = 0;
if (DS.getTypeSpecType() == DeclSpec::TST_class ||
DS.getTypeSpecType() == DeclSpec::TST_struct ||
+ DS.getTypeSpecType() == DeclSpec::TST_interface ||
DS.getTypeSpecType() == DeclSpec::TST_union ||
DS.getTypeSpecType() == DeclSpec::TST_enum) {
TagD = DS.getRepAsDecl();
@@ -2603,7 +2676,8 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_tag)
<< (DS.getTypeSpecType() == DeclSpec::TST_class ? 0 :
DS.getTypeSpecType() == DeclSpec::TST_struct ? 1 :
- DS.getTypeSpecType() == DeclSpec::TST_union ? 2 : 3);
+ DS.getTypeSpecType() == DeclSpec::TST_interface ? 2 :
+ DS.getTypeSpecType() == DeclSpec::TST_union ? 3 : 4);
else
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_no_declarators);
// Don't emit warnings after this error.
@@ -2724,16 +2798,17 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec::TST TypeSpecType = DS.getTypeSpecType();
if (TypeSpecType == DeclSpec::TST_class ||
TypeSpecType == DeclSpec::TST_struct ||
+ TypeSpecType == DeclSpec::TST_interface ||
TypeSpecType == DeclSpec::TST_union ||
TypeSpecType == DeclSpec::TST_enum) {
AttributeList* attrs = DS.getAttributes().getList();
while (attrs) {
- Diag(attrs->getScopeLoc(),
- diag::warn_declspec_attribute_ignored)
+ Diag(attrs->getLoc(), diag::warn_declspec_attribute_ignored)
<< attrs->getName()
<< (TypeSpecType == DeclSpec::TST_class ? 0 :
TypeSpecType == DeclSpec::TST_struct ? 1 :
- TypeSpecType == DeclSpec::TST_union ? 2 : 3);
+ TypeSpecType == DeclSpec::TST_union ? 2 :
+ TypeSpecType == DeclSpec::TST_interface ? 3 : 4);
attrs = attrs->getNext();
}
}
@@ -3353,7 +3428,6 @@ static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
switch (DS.getTypeSpecType()) {
case DeclSpec::TST_typename:
case DeclSpec::TST_typeofType:
- case DeclSpec::TST_decltype:
case DeclSpec::TST_underlyingType:
case DeclSpec::TST_atomic: {
// Grab the type from the parser.
@@ -3377,6 +3451,7 @@ static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
break;
}
+ case DeclSpec::TST_decltype:
case DeclSpec::TST_typeofExpr: {
Expr *E = DS.getRepAsExpr();
ExprResult Result = S.RebuildExprInCurrentInstantiation(E);
@@ -3411,7 +3486,7 @@ static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
D.setFunctionDefinitionKind(FDK_Declaration);
- Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg(*this));
+ Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg());
if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer() &&
Dcl && Dcl->getDeclContext()->isFileContext())
@@ -3476,7 +3551,8 @@ bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
// void X::f();
// };
if (Cur->Equals(DC)) {
- Diag(Loc, diag::warn_member_extra_qualification)
+ Diag(Loc, LangOpts.MicrosoftExt? diag::warn_member_extra_qualification
+ : diag::err_member_extra_qualification)
<< Name << FixItHint::CreateRemoval(SS.getRange());
SS.clear();
return false;
@@ -3710,11 +3786,11 @@ Decl *Sema::HandleDeclarator(Scope *S, Declarator &D,
New = ActOnTypedefDeclarator(S, D, DC, TInfo, Previous);
} else if (R->isFunctionType()) {
New = ActOnFunctionDeclarator(S, D, DC, TInfo, Previous,
- move(TemplateParamLists),
+ TemplateParamLists,
AddToScope);
} else {
New = ActOnVariableDeclarator(S, D, DC, TInfo, Previous,
- move(TemplateParamLists));
+ TemplateParamLists);
}
if (New == 0)
@@ -3729,9 +3805,9 @@ Decl *Sema::HandleDeclarator(Scope *S, Declarator &D,
return New;
}
-/// TryToFixInvalidVariablyModifiedType - Helper method to turn variable array
-/// types into constant array types in certain situations which would otherwise
-/// be errors (for GCC compatibility).
+/// Helper method to turn variable array types into constant array
+/// types in certain situations which would otherwise be errors (for
+/// GCC compatibility).
static QualType TryToFixInvalidVariablyModifiedType(QualType T,
ASTContext &Context,
bool &SizeIsNegative,
@@ -3799,6 +3875,52 @@ static QualType TryToFixInvalidVariablyModifiedType(QualType T,
Res, ArrayType::Normal, 0);
}
+static void
+FixInvalidVariablyModifiedTypeLoc(TypeLoc SrcTL, TypeLoc DstTL) {
+ if (PointerTypeLoc* SrcPTL = dyn_cast<PointerTypeLoc>(&SrcTL)) {
+ PointerTypeLoc* DstPTL = cast<PointerTypeLoc>(&DstTL);
+ FixInvalidVariablyModifiedTypeLoc(SrcPTL->getPointeeLoc(),
+ DstPTL->getPointeeLoc());
+ DstPTL->setStarLoc(SrcPTL->getStarLoc());
+ return;
+ }
+ if (ParenTypeLoc* SrcPTL = dyn_cast<ParenTypeLoc>(&SrcTL)) {
+ ParenTypeLoc* DstPTL = cast<ParenTypeLoc>(&DstTL);
+ FixInvalidVariablyModifiedTypeLoc(SrcPTL->getInnerLoc(),
+ DstPTL->getInnerLoc());
+ DstPTL->setLParenLoc(SrcPTL->getLParenLoc());
+ DstPTL->setRParenLoc(SrcPTL->getRParenLoc());
+ return;
+ }
+ ArrayTypeLoc* SrcATL = cast<ArrayTypeLoc>(&SrcTL);
+ ArrayTypeLoc* DstATL = cast<ArrayTypeLoc>(&DstTL);
+ TypeLoc SrcElemTL = SrcATL->getElementLoc();
+ TypeLoc DstElemTL = DstATL->getElementLoc();
+ DstElemTL.initializeFullCopy(SrcElemTL);
+ DstATL->setLBracketLoc(SrcATL->getLBracketLoc());
+ DstATL->setSizeExpr(SrcATL->getSizeExpr());
+ DstATL->setRBracketLoc(SrcATL->getRBracketLoc());
+}
+
+/// Helper method to turn variable array types into constant array
+/// types in certain situations which would otherwise be errors (for
+/// GCC compatibility).
+static TypeSourceInfo*
+TryToFixInvalidVariablyModifiedTypeSourceInfo(TypeSourceInfo *TInfo,
+ ASTContext &Context,
+ bool &SizeIsNegative,
+ llvm::APSInt &Oversized) {
+ QualType FixedTy
+ = TryToFixInvalidVariablyModifiedType(TInfo->getType(), Context,
+ SizeIsNegative, Oversized);
+ if (FixedTy.isNull())
+ return 0;
+ TypeSourceInfo *FixedTInfo = Context.getTrivialTypeSourceInfo(FixedTy);
+ FixInvalidVariablyModifiedTypeLoc(TInfo->getTypeLoc(),
+ FixedTInfo->getTypeLoc());
+ return FixedTInfo;
+}
+
/// \brief Register the given locally-scoped external C declaration so
/// that it can be found later for redeclarations
void
@@ -3926,19 +4048,21 @@ Sema::CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *NewTD) {
// then it shall have block scope.
// Note that variably modified types must be fixed before merging the decl so
// that redeclarations will match.
- QualType T = NewTD->getUnderlyingType();
+ TypeSourceInfo *TInfo = NewTD->getTypeSourceInfo();
+ QualType T = TInfo->getType();
if (T->isVariablyModifiedType()) {
getCurFunction()->setHasBranchProtectedScope();
if (S->getFnParent() == 0) {
bool SizeIsNegative;
llvm::APSInt Oversized;
- QualType FixedTy =
- TryToFixInvalidVariablyModifiedType(T, Context, SizeIsNegative,
- Oversized);
- if (!FixedTy.isNull()) {
+ TypeSourceInfo *FixedTInfo =
+ TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context,
+ SizeIsNegative,
+ Oversized);
+ if (FixedTInfo) {
Diag(NewTD->getLocation(), diag::warn_illegal_constant_array_size);
- NewTD->setTypeSourceInfo(Context.getTrivialTypeSourceInfo(FixedTy));
+ NewTD->setTypeSourceInfo(FixedTInfo);
} else {
if (SizeIsNegative)
Diag(NewTD->getLocation(), diag::err_typecheck_negative_array_size);
@@ -4203,7 +4327,7 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
D.getDeclSpec().getLocStart(),
D.getIdentifierLoc(),
D.getCXXScopeSpec(),
- TemplateParamLists.get(),
+ TemplateParamLists.data(),
TemplateParamLists.size(),
/*never a friend*/ false,
isExplicitSpecialization,
@@ -4244,7 +4368,7 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (TemplateParamLists.size() > 0 && D.getCXXScopeSpec().isSet()) {
NewVD->setTemplateParameterListsInfo(Context,
TemplateParamLists.size(),
- TemplateParamLists.release());
+ TemplateParamLists.data());
}
if (D.getDeclSpec().isConstexprSpecified())
@@ -4281,6 +4405,14 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// Handle attributes prior to checking for duplicates in MergeVarDecl
ProcessDeclAttributes(S, NewVD, D);
+ if (getLangOpts().CUDA) {
+ // CUDA B.2.5: "__shared__ and __constant__ variables have implied static
+ // storage [duration]."
+ if (SC == SC_None && S->getFnParent() != 0 &&
+ (NewVD->hasAttr<CUDASharedAttr>() || NewVD->hasAttr<CUDAConstantAttr>()))
+ NewVD->setStorageClass(SC_Static);
+ }
+
// In auto-retain/release, infer strong retension for variables of
// retainable type.
if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewVD))
@@ -4490,7 +4622,8 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD,
if (NewVD->isInvalidDecl())
return false;
- QualType T = NewVD->getType();
+ TypeSourceInfo *TInfo = NewVD->getTypeSourceInfo();
+ QualType T = TInfo->getType();
if (T->isObjCObjectType()) {
Diag(NewVD->getLocation(), diag::err_statically_allocated_object)
@@ -4522,8 +4655,10 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD,
&& !NewVD->hasAttr<BlocksAttr>()) {
if (getLangOpts().getGC() != LangOptions::NonGC)
Diag(NewVD->getLocation(), diag::warn_gc_attribute_weak_on_local);
- else
+ else {
+ assert(!getLangOpts().ObjCAutoRefCount);
Diag(NewVD->getLocation(), diag::warn_attribute_weak_on_local);
+ }
}
bool isVM = T->isVariablyModifiedType();
@@ -4535,11 +4670,10 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD,
(T->isVariableArrayType() && NewVD->hasGlobalStorage())) {
bool SizeIsNegative;
llvm::APSInt Oversized;
- QualType FixedTy =
- TryToFixInvalidVariablyModifiedType(T, Context, SizeIsNegative,
- Oversized);
-
- if (FixedTy.isNull() && T->isVariableArrayType()) {
+ TypeSourceInfo *FixedTInfo =
+ TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context,
+ SizeIsNegative, Oversized);
+ if (FixedTInfo == 0 && T->isVariableArrayType()) {
const VariableArrayType *VAT = Context.getAsVariableArrayType(T);
// FIXME: This won't give the correct result for
// int a[10][n];
@@ -4558,7 +4692,7 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD,
return false;
}
- if (FixedTy.isNull()) {
+ if (FixedTInfo == 0) {
if (NewVD->isFileVarDecl())
Diag(NewVD->getLocation(), diag::err_vm_decl_in_file_scope);
else
@@ -4568,7 +4702,8 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD,
}
Diag(NewVD->getLocation(), diag::warn_illegal_constant_array_size);
- NewVD->setType(FixedTy);
+ NewVD->setType(FixedTInfo->getType());
+ NewVD->setTypeSourceInfo(FixedTInfo);
}
if (Previous.empty() && NewVD->isExternC()) {
@@ -4655,6 +4790,31 @@ static bool FindOverriddenMethod(const CXXBaseSpecifier *Specifier,
return false;
}
+namespace {
+ enum OverrideErrorKind { OEK_All, OEK_NonDeleted, OEK_Deleted };
+}
+/// \brief Report an error regarding overriding, along with any relevant
+/// overriden methods.
+///
+/// \param DiagID the primary error to report.
+/// \param MD the overriding method.
+/// \param OEK which overrides to include as notes.
+static void ReportOverrides(Sema& S, unsigned DiagID, const CXXMethodDecl *MD,
+ OverrideErrorKind OEK = OEK_All) {
+ S.Diag(MD->getLocation(), DiagID) << MD->getDeclName();
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods();
+ I != E; ++I) {
+ // This check (& the OEK parameter) could be replaced by a predicate, but
+ // without lambdas that would be overkill. This is still nicer than writing
+ // out the diag loop 3 times.
+ if ((OEK == OEK_All) ||
+ (OEK == OEK_NonDeleted && !(*I)->isDeleted()) ||
+ (OEK == OEK_Deleted && (*I)->isDeleted()))
+ S.Diag((*I)->getLocation(), diag::note_overridden_virtual_function);
+ }
+}
+
/// AddOverriddenMethods - See if a method overrides any in the base classes,
/// and if so, check that it's a valid override and remember it.
bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
@@ -4663,6 +4823,8 @@ bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
FindOverriddenMethodData Data;
Data.Method = MD;
Data.S = this;
+ bool hasDeletedOverridenMethods = false;
+ bool hasNonDeletedOverridenMethods = false;
bool AddedAny = false;
if (DC->lookupInBases(&FindOverriddenMethod, &Data, Paths)) {
for (CXXBasePaths::decl_iterator I = Paths.found_decls_begin(),
@@ -4672,12 +4834,21 @@ bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
if (!CheckOverridingFunctionReturnType(MD, OldMD) &&
!CheckOverridingFunctionExceptionSpec(MD, OldMD) &&
!CheckIfOverriddenFunctionIsMarkedFinal(MD, OldMD)) {
+ hasDeletedOverridenMethods |= OldMD->isDeleted();
+ hasNonDeletedOverridenMethods |= !OldMD->isDeleted();
AddedAny = true;
}
}
}
}
-
+
+ if (hasDeletedOverridenMethods && !MD->isDeleted()) {
+ ReportOverrides(*this, diag::err_non_deleted_override, MD, OEK_Deleted);
+ }
+ if (hasNonDeletedOverridenMethods && MD->isDeleted()) {
+ ReportOverrides(*this, diag::err_deleted_override, MD, OEK_NonDeleted);
+ }
+
return AddedAny;
}
@@ -4837,6 +5008,10 @@ static NamedDecl* DiagnoseInvalidRedeclaration(
}
if (Correction) {
+ // FIXME: use Correction.getCorrectionRange() instead of computing the range
+ // here. This requires passing in the CXXScopeSpec to CorrectTypo which in
+ // turn causes the correction to fully qualify the name. If we fix
+ // CorrectTypo to minimally qualify then this change should be good.
SourceRange FixItLoc(NewFD->getLocation());
CXXScopeSpec &SS = ExtraArgs.D.getCXXScopeSpec();
if (Correction.getCorrectionSpecifier() && SS.isValid())
@@ -5072,6 +5247,22 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
}
}
+void Sema::checkVoidParamDecl(ParmVarDecl *Param) {
+ // In C++, the empty parameter-type-list must be spelled "void"; a
+ // typedef of void is not permitted.
+ if (getLangOpts().CPlusPlus &&
+ Param->getType().getUnqualifiedType() != Context.VoidTy) {
+ bool IsTypeAlias = false;
+ if (const TypedefType *TT = Param->getType()->getAs<TypedefType>())
+ IsTypeAlias = isa<TypeAliasDecl>(TT->getDecl());
+ else if (const TemplateSpecializationType *TST =
+ Param->getType()->getAs<TemplateSpecializationType>())
+ IsTypeAlias = TST->isTypeAlias();
+ Diag(Param->getLocation(), diag::err_param_typedef_of_void)
+ << IsTypeAlias;
+ }
+}
+
NamedDecl*
Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo, LookupResult &Previous,
@@ -5138,6 +5329,15 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewFD->setImplicitlyInline();
}
+ // If this is a method defined in an __interface, and is not a constructor
+ // or an overloaded operator, then set the pure flag (isVirtual will already
+ // return true).
+ if (const CXXRecordDecl *Parent =
+ dyn_cast<CXXRecordDecl>(NewFD->getDeclContext())) {
+ if (Parent->isInterface() && cast<CXXMethodDecl>(NewFD)->isUserProvided())
+ NewFD->setPure(true);
+ }
+
SetNestedNameSpecifier(NewFD, D);
isExplicitSpecialization = false;
isFunctionTemplateSpecialization = false;
@@ -5157,7 +5357,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
D.getDeclSpec().getLocStart(),
D.getIdentifierLoc(),
D.getCXXScopeSpec(),
- TemplateParamLists.get(),
+ TemplateParamLists.data(),
TemplateParamLists.size(),
isFriend,
isExplicitSpecialization,
@@ -5196,7 +5396,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (TemplateParamLists.size() > 1) {
NewFD->setTemplateParameterListsInfo(Context,
TemplateParamLists.size() - 1,
- TemplateParamLists.release());
+ TemplateParamLists.data());
}
} else {
// This is a function template specialization.
@@ -5204,7 +5404,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// For source fidelity, store all the template param lists.
NewFD->setTemplateParameterListsInfo(Context,
TemplateParamLists.size(),
- TemplateParamLists.release());
+ TemplateParamLists.data());
// C++0x [temp.expl.spec]p20 forbids "template<> friend void foo(int);".
if (isFriend) {
@@ -5236,7 +5436,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// For source fidelity, store all the template param lists.
NewFD->setTemplateParameterListsInfo(Context,
TemplateParamLists.size(),
- TemplateParamLists.release());
+ TemplateParamLists.data());
}
if (Invalid) {
@@ -5376,6 +5576,20 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
diag::err_static_out_of_line)
<< FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
}
+
+ // C++11 [except.spec]p15:
+ // A deallocation function with no exception-specification is treated
+ // as if it were specified with noexcept(true).
+ const FunctionProtoType *FPT = R->getAs<FunctionProtoType>();
+ if ((Name.getCXXOverloadedOperator() == OO_Delete ||
+ Name.getCXXOverloadedOperator() == OO_Array_Delete) &&
+ getLangOpts().CPlusPlus0x && FPT && !FPT->hasExceptionSpec()) {
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.ExceptionSpecType = EST_BasicNoexcept;
+ NewFD->setType(Context.getFunctionType(FPT->getResultType(),
+ FPT->arg_type_begin(),
+ FPT->getNumArgs(), EPI));
+ }
}
// Filter out previous declarations that don't match the scope.
@@ -5413,21 +5627,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
FTI.ArgInfo[0].Param &&
cast<ParmVarDecl>(FTI.ArgInfo[0].Param)->getType()->isVoidType()) {
// Empty arg list, don't push any params.
- ParmVarDecl *Param = cast<ParmVarDecl>(FTI.ArgInfo[0].Param);
-
- // In C++, the empty parameter-type-list must be spelled "void"; a
- // typedef of void is not permitted.
- if (getLangOpts().CPlusPlus &&
- Param->getType().getUnqualifiedType() != Context.VoidTy) {
- bool IsTypeAlias = false;
- if (const TypedefType *TT = Param->getType()->getAs<TypedefType>())
- IsTypeAlias = isa<TypeAliasDecl>(TT->getDecl());
- else if (const TemplateSpecializationType *TST =
- Param->getType()->getAs<TemplateSpecializationType>())
- IsTypeAlias = TST->isTypeAlias();
- Diag(Param->getLocation(), diag::err_param_typedef_of_void)
- << IsTypeAlias;
- }
+ checkVoidParamDecl(cast<ParmVarDecl>(FTI.ArgInfo[0].Param));
} else if (FTI.NumArgs > 0 && FTI.ArgInfo[0].Param != 0) {
for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) {
ParmVarDecl *Param = cast<ParmVarDecl>(FTI.ArgInfo[i].Param);
@@ -5500,6 +5700,9 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
isExplicitSpecialization));
}
+ // Make graceful recovery from an invalid redeclaration.
+ else if (!Previous.empty())
+ D.setRedeclaration(true);
assert((NewFD->isInvalidDecl() || !D.isRedeclaration() ||
Previous.getResultKind() != LookupResult::FoundOverloaded) &&
"previous declaration set still overloaded");
@@ -5510,12 +5713,10 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc);
TemplateArgs.setRAngleLoc(TemplateId->RAngleLoc);
- ASTTemplateArgsPtr TemplateArgsPtr(*this,
- TemplateId->getTemplateArgs(),
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
translateTemplateArguments(TemplateArgsPtr,
TemplateArgs);
- TemplateArgsPtr.release();
HasExplicitTemplateArgs = true;
@@ -5969,20 +6170,12 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// Find any virtual functions that this function overrides.
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewFD)) {
if (!Method->isFunctionTemplateSpecialization() &&
- !Method->getDescribedFunctionTemplate()) {
+ !Method->getDescribedFunctionTemplate() &&
+ Method->isCanonicalDecl()) {
if (AddOverriddenMethods(Method->getParent(), Method)) {
// If the function was marked as "static", we have a problem.
if (NewFD->getStorageClass() == SC_Static) {
- Diag(NewFD->getLocation(), diag::err_static_overrides_virtual)
- << NewFD->getDeclName();
- for (CXXMethodDecl::method_iterator
- Overridden = Method->begin_overridden_methods(),
- OverriddenEnd = Method->end_overridden_methods();
- Overridden != OverriddenEnd;
- ++Overridden) {
- Diag((*Overridden)->getLocation(),
- diag::note_overridden_virtual_function);
- }
+ ReportOverrides(*this, diag::err_static_overrides_virtual, Method);
}
}
}
@@ -6191,28 +6384,12 @@ namespace {
}
}
- // Sometimes, the expression passed in lacks the casts that are used
- // to determine which DeclRefExpr's to check. Assume that the casts
- // are present and continue visiting the expression.
- void HandleExpr(Expr *E) {
- // Skip checking T a = a where T is not a record or reference type.
- // Doing so is a way to silence uninitialized warnings.
- if (isRecordType || isReferenceType)
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
- HandleDeclRefExpr(DRE);
-
- if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
- HandleValue(CO->getTrueExpr());
- HandleValue(CO->getFalseExpr());
- }
-
- Visit(E);
- }
-
// For most expressions, the cast is directly above the DeclRefExpr.
// For conditional operators, the cast can be outside the conditional
// operator if both expressions are DeclRefExpr's.
void HandleValue(Expr *E) {
+ if (isReferenceType)
+ return;
E = E->IgnoreParenImpCasts();
if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(E)) {
HandleDeclRefExpr(DRE);
@@ -6222,11 +6399,32 @@ namespace {
if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
HandleValue(CO->getTrueExpr());
HandleValue(CO->getFalseExpr());
+ return;
}
+
+ if (isa<MemberExpr>(E)) {
+ Expr *Base = E->IgnoreParenImpCasts();
+ while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) {
+ // Check for static member variables and don't warn on them.
+ if (!isa<FieldDecl>(ME->getMemberDecl()))
+ return;
+ Base = ME->getBase()->IgnoreParenImpCasts();
+ }
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base))
+ HandleDeclRefExpr(DRE);
+ return;
+ }
+ }
+
+ // Reference types are handled here since all uses of references are
+ // bad, not just r-value uses.
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ if (isReferenceType)
+ HandleDeclRefExpr(E);
}
void VisitImplicitCastExpr(ImplicitCastExpr *E) {
- if ((!isRecordType && E->getCastKind() == CK_LValueToRValue) ||
+ if (E->getCastKind() == CK_LValueToRValue ||
(isRecordType && E->getCastKind() == CK_NoOp))
HandleValue(E->getSubExpr());
@@ -6237,22 +6435,36 @@ namespace {
// Don't warn on arrays since they can be treated as pointers.
if (E->getType()->canDecayToPointerType()) return;
- ValueDecl *VD = E->getMemberDecl();
- CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(VD);
- if (isa<FieldDecl>(VD) || (MD && !MD->isStatic()))
- if (DeclRefExpr *DRE
- = dyn_cast<DeclRefExpr>(E->getBase()->IgnoreParenImpCasts())) {
+ // Warn when a non-static method call is followed by non-static member
+ // field accesses, which is followed by a DeclRefExpr.
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(E->getMemberDecl());
+ bool Warn = (MD && !MD->isStatic());
+ Expr *Base = E->getBase()->IgnoreParenImpCasts();
+ while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) {
+ if (!isa<FieldDecl>(ME->getMemberDecl()))
+ Warn = false;
+ Base = ME->getBase()->IgnoreParenImpCasts();
+ }
+
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (Warn)
HandleDeclRefExpr(DRE);
- return;
- }
+ return;
+ }
- Inherited::VisitMemberExpr(E);
+ // The base of a MemberExpr is not a MemberExpr or a DeclRefExpr.
+ // Visit that expression.
+ Visit(Base);
}
void VisitUnaryOperator(UnaryOperator *E) {
// For POD record types, addresses of its own members are well-defined.
- if (E->getOpcode() == UO_AddrOf && isRecordType && isPODType &&
- isa<MemberExpr>(E->getSubExpr()->IgnoreParens())) return;
+ if (E->getOpcode() == UO_AddrOf && isRecordType &&
+ isa<MemberExpr>(E->getSubExpr()->IgnoreParens())) {
+ if (!isPODType)
+ HandleValue(E->getSubExpr());
+ return;
+ }
Inherited::VisitUnaryOperator(E);
}
@@ -6261,20 +6473,38 @@ namespace {
void HandleDeclRefExpr(DeclRefExpr *DRE) {
Decl* ReferenceDecl = DRE->getDecl();
if (OrigDecl != ReferenceDecl) return;
- LookupResult Result(S, DRE->getNameInfo(), Sema::LookupOrdinaryName,
- Sema::NotForRedeclaration);
+ unsigned diag = isReferenceType
+ ? diag::warn_uninit_self_reference_in_reference_init
+ : diag::warn_uninit_self_reference_in_init;
S.DiagRuntimeBehavior(DRE->getLocStart(), DRE,
- S.PDiag(diag::warn_uninit_self_reference_in_init)
- << Result.getLookupName()
+ S.PDiag(diag)
+ << DRE->getNameInfo().getName()
<< OrigDecl->getLocation()
<< DRE->getSourceRange());
}
};
-}
-/// CheckSelfReference - Warns if OrigDecl is used in expression E.
-void Sema::CheckSelfReference(Decl* OrigDecl, Expr *E) {
- SelfReferenceChecker(*this, OrigDecl).HandleExpr(E);
+ /// CheckSelfReference - Warns if OrigDecl is used in expression E.
+ static void CheckSelfReference(Sema &S, Decl* OrigDecl, Expr *E,
+ bool DirectInit) {
+ // Parameters arguments are occassionially constructed with itself,
+ // for instance, in recursive functions. Skip them.
+ if (isa<ParmVarDecl>(OrigDecl))
+ return;
+
+ E = E->IgnoreParens();
+
+ // Skip checking T a = a where T is not a record or reference type.
+ // Doing so is a way to silence uninitialized warnings.
+ if (!DirectInit && !cast<VarDecl>(OrigDecl)->getType()->isRecordType())
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
+ if (ICE->getCastKind() == CK_LValueToRValue)
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr()))
+ if (DRE->getDecl() == OrigDecl)
+ return;
+
+ SelfReferenceChecker(S, OrigDecl).Visit(E);
+ }
}
/// AddInitializerToDecl - Adds the initializer Init to the
@@ -6311,15 +6541,6 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
return;
}
- // Check for self-references within variable initializers.
- // Variables declared within a function/method body (except for references)
- // are handled by a dataflow analysis.
- // Record types initialized by initializer list are handled here.
- // Initialization by constructors are handled in TryConstructorInitialization.
- if ((!VDecl->hasLocalStorage() || VDecl->getType()->isReferenceType()) &&
- (isa<InitListExpr>(Init) || !VDecl->getType()->isRecordType()))
- CheckSelfReference(RealDecl, Init);
-
ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
// C++11 [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
@@ -6495,8 +6716,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
}
InitializationSequence InitSeq(*this, Entity, Kind, Args, NumArgs);
ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
- MultiExprArg(*this, Args,NumArgs),
- &DclT);
+ MultiExprArg(Args, NumArgs), &DclT);
if (Result.isInvalid()) {
VDecl->setInvalidDecl();
return;
@@ -6505,6 +6725,14 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
Init = Result.takeAs<Expr>();
}
+ // Check for self-references within variable initializers.
+ // Variables declared within a function/method body (except for references)
+ // are handled by a dataflow analysis.
+ if (!VDecl->hasLocalStorage() || VDecl->getType()->isRecordType() ||
+ VDecl->getType()->isReferenceType()) {
+ CheckSelfReference(*this, RealDecl, Init, DirectInit);
+ }
+
// If the type changed, it means we had an incomplete type that was
// completed by the initializer. For example:
// int ary[] = { 1, 3, 5 };
@@ -6515,9 +6743,28 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
// Check any implicit conversions within the expression.
CheckImplicitConversions(Init, VDecl->getLocation());
- if (!VDecl->isInvalidDecl())
+ if (!VDecl->isInvalidDecl()) {
checkUnsafeAssigns(VDecl->getLocation(), VDecl->getType(), Init);
+ if (VDecl->hasAttr<BlocksAttr>())
+ checkRetainCycles(VDecl, Init);
+
+ // It is safe to assign a weak reference into a strong variable.
+ // Although this code can still have problems:
+ // id x = self.weakProp;
+ // id y = self.weakProp;
+ // we do not warn to warn spuriously when 'x' and 'y' are on separate
+ // paths through the function. This should be revisited if
+ // -Wrepeated-use-of-weak is made flow-sensitive.
+ if (VDecl->getType().getObjCLifetime() == Qualifiers::OCL_Strong) {
+ DiagnosticsEngine::Level Level =
+ Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak,
+ Init->getLocStart());
+ if (Level != DiagnosticsEngine::Ignored)
+ getCurFunction()->markSafeWeakUse(Init);
+ }
+ }
+
Init = MaybeCreateExprWithCleanups(Init);
// Attach the initializer to the decl.
VDecl->setInit(Init);
@@ -6758,8 +7005,10 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl,
AbstractVariableType))
Var->setInvalidDecl();
if (!Type->isDependentType() && !Var->isInvalidDecl() &&
- Var->getStorageClass() == SC_PrivateExtern)
+ Var->getStorageClass() == SC_PrivateExtern) {
Diag(Var->getLocation(), diag::warn_private_extern);
+ Diag(Var->getLocation(), diag::note_private_extern);
+ }
return;
@@ -6881,8 +7130,7 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl,
= InitializationKind::CreateDefault(Var->getLocation());
InitializationSequence InitSeq(*this, Entity, Kind, 0, 0);
- ExprResult Init = InitSeq.Perform(*this, Entity, Kind,
- MultiExprArg(*this, 0, 0));
+ ExprResult Init = InitSeq.Perform(*this, Entity, Kind, MultiExprArg());
if (Init.isInvalid())
Var->setInvalidDecl();
else if (Init.get()) {
@@ -6971,8 +7219,8 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
// All the following checks are C++ only.
if (!getLangOpts().CPlusPlus) return;
- QualType baseType = Context.getBaseElementType(var->getType());
- if (baseType->isDependentType()) return;
+ QualType type = var->getType();
+ if (type->isDependentType()) return;
// __block variables might require us to capture a copy-initializer.
if (var->hasAttr<BlocksAttr>()) {
@@ -6981,8 +7229,6 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
// Regardless, we don't want to ignore array nesting when
// constructing this copy.
- QualType type = var->getType();
-
if (type->isStructureOrClassType()) {
SourceLocation poi = var->getLocation();
Expr *varRef =new (Context) DeclRefExpr(var, false, type, VK_LValue, poi);
@@ -7000,8 +7246,10 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
Expr *Init = var->getInit();
bool IsGlobal = var->hasGlobalStorage() && !var->isStaticLocal();
+ QualType baseType = Context.getBaseElementType(type);
- if (!var->getDeclContext()->isDependentContext() && Init) {
+ if (!var->getDeclContext()->isDependentContext() &&
+ Init && !Init->isValueDependent()) {
if (IsGlobal && !var->isConstexpr() &&
getDiagnostics().getDiagnosticLevel(diag::warn_global_constructor,
var->getLocation())
@@ -7189,7 +7437,7 @@ void Sema::ActOnDocumentableDecls(Decl **Group, unsigned NumDecls) {
// the lookahead in the lexer: we've consumed the semicolon and looked
// ahead through comments.
for (unsigned i = 0; i != NumDecls; ++i)
- Context.getCommentForDecl(Group[i]);
+ Context.getCommentForDecl(Group[i], &PP);
}
}
@@ -7461,6 +7709,9 @@ void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
unsigned DiagID; // unused
DS.SetTypeSpecType(DeclSpec::TST_int, FTI.ArgInfo[i].IdentLoc,
PrevSpec, DiagID);
+ // Use the identifier location for the type source range.
+ DS.SetRangeStart(FTI.ArgInfo[i].IdentLoc);
+ DS.SetRangeEnd(FTI.ArgInfo[i].IdentLoc);
Declarator ParamD(DS, Declarator::KNRTypeListContext);
ParamD.SetIdentifier(FTI.ArgInfo[i].Ident, FTI.ArgInfo[i].IdentLoc);
FTI.ArgInfo[i].Param = ActOnParamDeclarator(S, ParamD);
@@ -7475,8 +7726,7 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D) {
Scope *ParentScope = FnBodyScope->getParent();
D.setFunctionDefinitionKind(FDK_Definition);
- Decl *DP = HandleDeclarator(ParentScope, D,
- MultiTemplateParamsArg(*this));
+ Decl *DP = HandleDeclarator(ParentScope, D, MultiTemplateParamsArg());
return ActOnStartOfFunctionDef(FnBodyScope, DP);
}
@@ -7718,7 +7968,7 @@ void Sema::computeNRVO(Stmt *Body, FunctionScopeInfo *Scope) {
}
Decl *Sema::ActOnFinishFunctionBody(Decl *D, Stmt *BodyArg) {
- return ActOnFinishFunctionBody(D, move(BodyArg), false);
+ return ActOnFinishFunctionBody(D, BodyArg, false);
}
Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
@@ -7776,22 +8026,16 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (Body)
computeNRVO(Body, getCurFunction());
}
- if (getCurFunction()->ObjCShouldCallSuperDealloc) {
- Diag(MD->getLocEnd(), diag::warn_objc_missing_super_dealloc);
- getCurFunction()->ObjCShouldCallSuperDealloc = false;
- }
- if (getCurFunction()->ObjCShouldCallSuperFinalize) {
- Diag(MD->getLocEnd(), diag::warn_objc_missing_super_finalize);
- getCurFunction()->ObjCShouldCallSuperFinalize = false;
+ if (getCurFunction()->ObjCShouldCallSuper) {
+ Diag(MD->getLocEnd(), diag::warn_objc_missing_super_call)
+ << MD->getSelector().getAsString();
+ getCurFunction()->ObjCShouldCallSuper = false;
}
} else {
return 0;
}
- assert(!getCurFunction()->ObjCShouldCallSuperDealloc &&
- "This should only be set for ObjC methods, which should have been "
- "handled in the block above.");
- assert(!getCurFunction()->ObjCShouldCallSuperFinalize &&
+ assert(!getCurFunction()->ObjCShouldCallSuper &&
"This should only be set for ObjC methods, which should have been "
"handled in the block above.");
@@ -7927,13 +8171,28 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
bool Error = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, Dummy, DiagID);
(void)Error; // Silence warning.
assert(!Error && "Error setting up implicit decl!");
+ SourceLocation NoLoc;
Declarator D(DS, Declarator::BlockContext);
- D.AddTypeInfo(DeclaratorChunk::getFunction(false, false, false,
- SourceLocation(), 0, 0, 0, true,
- SourceLocation(), SourceLocation(),
- SourceLocation(), SourceLocation(),
- EST_None, SourceLocation(),
- 0, 0, 0, 0, Loc, Loc, D),
+ D.AddTypeInfo(DeclaratorChunk::getFunction(/*HasProto=*/false,
+ /*IsAmbiguous=*/false,
+ /*RParenLoc=*/NoLoc,
+ /*ArgInfo=*/0,
+ /*NumArgs=*/0,
+ /*EllipsisLoc=*/NoLoc,
+ /*RParenLoc=*/NoLoc,
+ /*TypeQuals=*/0,
+ /*RefQualifierIsLvalueRef=*/true,
+ /*RefQualifierLoc=*/NoLoc,
+ /*ConstQualifierLoc=*/NoLoc,
+ /*VolatileQualifierLoc=*/NoLoc,
+ /*MutableLoc=*/NoLoc,
+ EST_None,
+ /*ESpecLoc=*/NoLoc,
+ /*Exceptions=*/0,
+ /*ExceptionRanges=*/0,
+ /*NumExceptions=*/0,
+ /*NoexceptExpr=*/0,
+ Loc, Loc, D),
DS.getAttributes(),
SourceLocation());
D.SetIdentifier(&II, Loc);
@@ -8082,6 +8341,7 @@ TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
switch (D.getDeclSpec().getTypeSpecType()) {
case TST_enum:
case TST_struct:
+ case TST_interface:
case TST_union:
case TST_class: {
TagDecl *tagFromDeclSpec = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
@@ -8157,6 +8417,29 @@ bool Sema::CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
return false;
}
+/// \brief Get diagnostic %select index for tag kind for
+/// redeclaration diagnostic message.
+/// WARNING: Indexes apply to particular diagnostics only!
+///
+/// \returns diagnostic %select index.
+static unsigned getRedeclDiagFromTagKind(TagTypeKind Tag) {
+ switch (Tag) {
+ case TTK_Struct: return 0;
+ case TTK_Interface: return 1;
+ case TTK_Class: return 2;
+ default: llvm_unreachable("Invalid tag kind for redecl diagnostic!");
+ }
+}
+
+/// \brief Determine if tag kind is a class-key compatible with
+/// class for redeclaration (class, struct, or __interface).
+///
+/// \returns true iff the tag kind is compatible.
+static bool isClassCompatTagKind(TagTypeKind Tag)
+{
+ return Tag == TTK_Struct || Tag == TTK_Class || Tag == TTK_Interface;
+}
+
/// \brief Determine whether a tag with a given kind is acceptable
/// as a redeclaration of the given tag declaration.
///
@@ -8179,12 +8462,11 @@ bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous,
// struct class-key shall be used to refer to a class (clause 9)
// declared using the class or struct class-key.
TagTypeKind OldTag = Previous->getTagKind();
- if (!isDefinition || (NewTag != TTK_Class && NewTag != TTK_Struct))
+ if (!isDefinition || !isClassCompatTagKind(NewTag))
if (OldTag == NewTag)
return true;
- if ((OldTag == TTK_Struct || OldTag == TTK_Class) &&
- (NewTag == TTK_Struct || NewTag == TTK_Class)) {
+ if (isClassCompatTagKind(OldTag) && isClassCompatTagKind(NewTag)) {
// Warn about the struct/class tag mismatch.
bool isTemplate = false;
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Previous))
@@ -8194,7 +8476,8 @@ bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous,
// In a template instantiation, do not offer fix-its for tag mismatches
// since they usually mess up the template instead of fixing the problem.
Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch)
- << (NewTag == TTK_Class) << isTemplate << &Name;
+ << getRedeclDiagFromTagKind(NewTag) << isTemplate << &Name
+ << getRedeclDiagFromTagKind(OldTag);
return true;
}
@@ -8213,13 +8496,13 @@ bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous,
if (!previousMismatch) {
previousMismatch = true;
Diag(NewTagLoc, diag::warn_struct_class_previous_tag_mismatch)
- << (NewTag == TTK_Class) << isTemplate << &Name;
+ << getRedeclDiagFromTagKind(NewTag) << isTemplate << &Name
+ << getRedeclDiagFromTagKind(I->getTagKind());
}
Diag(I->getInnerLocStart(), diag::note_struct_class_suggestion)
- << (NewTag == TTK_Class)
+ << getRedeclDiagFromTagKind(NewTag)
<< FixItHint::CreateReplacement(I->getInnerLocStart(),
- NewTag == TTK_Class?
- "class" : "struct");
+ TypeWithKeyword::getTagTypeKindName(NewTag));
}
}
return true;
@@ -8235,16 +8518,16 @@ bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous,
}
Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch)
- << (NewTag == TTK_Class)
- << isTemplate << &Name;
+ << getRedeclDiagFromTagKind(NewTag) << isTemplate << &Name
+ << getRedeclDiagFromTagKind(OldTag);
Diag(Redecl->getLocation(), diag::note_previous_use);
// If there is a previous defintion, suggest a fix-it.
if (Previous->getDefinition()) {
Diag(NewTagLoc, diag::note_struct_class_suggestion)
- << (Redecl->getTagKind() == TTK_Class)
+ << getRedeclDiagFromTagKind(Redecl->getTagKind())
<< FixItHint::CreateReplacement(SourceRange(NewTagLoc),
- Redecl->getTagKind() == TTK_Class? "class" : "struct");
+ TypeWithKeyword::getTagTypeKindName(Redecl->getTagKind()));
}
return true;
@@ -8287,7 +8570,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
(SS.isNotEmpty() && TUK != TUK_Reference)) {
if (TemplateParameterList *TemplateParams
= MatchTemplateParametersToScopeSpecifier(KWLoc, NameLoc, SS,
- TemplateParameterLists.get(),
+ TemplateParameterLists.data(),
TemplateParameterLists.size(),
TUK == TUK_Friend,
isExplicitSpecialization,
@@ -8304,8 +8587,8 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SS, Name, NameLoc, Attr,
TemplateParams, AS,
ModulePrivateLoc,
- TemplateParameterLists.size() - 1,
- (TemplateParameterList**) TemplateParameterLists.release());
+ TemplateParameterLists.size()-1,
+ TemplateParameterLists.data());
return Result.get();
} else {
// The "template<>" header is extraneous.
@@ -8854,7 +9137,7 @@ CreateNewDecl:
if (TemplateParameterLists.size() > 0) {
New->setTemplateParameterListsInfo(Context,
TemplateParameterLists.size(),
- (TemplateParameterList**) TemplateParameterLists.release());
+ TemplateParameterLists.data());
}
}
else
@@ -9309,12 +9592,15 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
if (!InvalidDecl && T->isVariablyModifiedType()) {
bool SizeIsNegative;
llvm::APSInt Oversized;
- QualType FixedTy = TryToFixInvalidVariablyModifiedType(T, Context,
- SizeIsNegative,
- Oversized);
- if (!FixedTy.isNull()) {
+
+ TypeSourceInfo *FixedTInfo =
+ TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context,
+ SizeIsNegative,
+ Oversized);
+ if (FixedTInfo) {
Diag(Loc, diag::warn_illegal_constant_array_size);
- T = FixedTy;
+ TInfo = FixedTInfo;
+ T = FixedTInfo->getType();
} else {
if (SizeIsNegative)
Diag(Loc, diag::err_typecheck_negative_array_size);
@@ -9471,12 +9757,12 @@ bool Sema::CheckNontrivialField(FieldDecl *FD) {
return false;
}
-/// If the given constructor is user-provided, produce a diagnostic explaining
+/// If the given constructor is user-declared, produce a diagnostic explaining
/// that it makes the class non-trivial.
-static bool DiagnoseNontrivialUserProvidedCtor(Sema &S, QualType QT,
+static bool diagnoseNonTrivialUserDeclaredCtor(Sema &S, QualType QT,
CXXConstructorDecl *CD,
Sema::CXXSpecialMember CSM) {
- if (!CD->isUserProvided())
+ if (CD->isImplicit())
return false;
SourceLocation CtorLoc = CD->getLocation();
@@ -9499,17 +9785,17 @@ void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) {
if (RD->hasUserDeclaredConstructor()) {
typedef CXXRecordDecl::ctor_iterator ctor_iter;
for (ctor_iter CI = RD->ctor_begin(), CE = RD->ctor_end(); CI != CE; ++CI)
- if (DiagnoseNontrivialUserProvidedCtor(*this, QT, *CI, member))
+ if (diagnoseNonTrivialUserDeclaredCtor(*this, QT, *CI, member))
return;
- // No user-provided constructors; look for constructor templates.
+ // No user-delcared constructors; look for constructor templates.
typedef CXXRecordDecl::specific_decl_iterator<FunctionTemplateDecl>
tmpl_iter;
for (tmpl_iter TI(RD->decls_begin()), TE(RD->decls_end());
TI != TE; ++TI) {
CXXConstructorDecl *CD =
dyn_cast<CXXConstructorDecl>(TI->getTemplatedDecl());
- if (CD && DiagnoseNontrivialUserProvidedCtor(*this, QT, CD, member))
+ if (CD && diagnoseNonTrivialUserDeclaredCtor(*this, QT, CD, member))
return;
}
}
@@ -10036,42 +10322,6 @@ void Sema::ActOnFields(Scope* S,
Convs->setAccess(I, (*I)->getAccess());
if (!CXXRecord->isDependentType()) {
- // Objective-C Automatic Reference Counting:
- // If a class has a non-static data member of Objective-C pointer
- // type (or array thereof), it is a non-POD type and its
- // default constructor (if any), copy constructor, copy assignment
- // operator, and destructor are non-trivial.
- //
- // This rule is also handled by CXXRecordDecl::completeDefinition().
- // However, here we check whether this particular class is only
- // non-POD because of the presence of an Objective-C pointer member.
- // If so, objects of this type cannot be shared between code compiled
- // with ARC and code compiled with manual retain/release.
- if (getLangOpts().ObjCAutoRefCount &&
- CXXRecord->hasObjectMember() &&
- CXXRecord->getLinkage() == ExternalLinkage) {
- if (CXXRecord->isPOD()) {
- Diag(CXXRecord->getLocation(),
- diag::warn_arc_non_pod_class_with_object_member)
- << CXXRecord;
- } else {
- // FIXME: Fix-Its would be nice here, but finding a good location
- // for them is going to be tricky.
- if (CXXRecord->hasTrivialCopyConstructor())
- Diag(CXXRecord->getLocation(),
- diag::warn_arc_trivial_member_function_with_object_member)
- << CXXRecord << 0;
- if (CXXRecord->hasTrivialCopyAssignment())
- Diag(CXXRecord->getLocation(),
- diag::warn_arc_trivial_member_function_with_object_member)
- << CXXRecord << 1;
- if (CXXRecord->hasTrivialDestructor())
- Diag(CXXRecord->getLocation(),
- diag::warn_arc_trivial_member_function_with_object_member)
- << CXXRecord << 2;
- }
- }
-
// Adjust user-defined destructor exception spec.
if (getLangOpts().CPlusPlus0x &&
CXXRecord->hasUserDeclaredDestructor())
@@ -10103,7 +10353,7 @@ void Sema::ActOnFields(Scope* S,
// class subobject has more than one final overrider the
// program is ill-formed.
Diag(Record->getLocation(), diag::err_multiple_final_overriders)
- << (NamedDecl *)M->first << Record;
+ << (const NamedDecl *)M->first << Record;
Diag(M->first->getLocation(),
diag::note_overridden_virtual_function);
for (OverridingMethods::overriding_iterator
@@ -10111,7 +10361,7 @@ void Sema::ActOnFields(Scope* S,
OMEnd = SO->second.end();
OM != OMEnd; ++OM)
Diag(OM->Method->getLocation(), diag::note_final_overrider)
- << (NamedDecl *)M->first << OM->Method->getParent();
+ << (const NamedDecl *)M->first << OM->Method->getParent();
Record->setInvalidDecl();
}
@@ -10472,57 +10722,6 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
return New;
}
-// Emits a warning if every element in the enum is the same value and if
-// every element is initialized with a integer or boolean literal.
-static void CheckForUniqueEnumValues(Sema &S, Decl **Elements,
- unsigned NumElements, EnumDecl *Enum,
- QualType EnumType) {
- if (S.Diags.getDiagnosticLevel(diag::warn_identical_enum_values,
- Enum->getLocation()) ==
- DiagnosticsEngine::Ignored)
- return;
-
- if (NumElements < 2)
- return;
-
- if (!Enum->getIdentifier())
- return;
-
- llvm::APSInt FirstVal;
-
- for (unsigned i = 0; i != NumElements; ++i) {
- EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(Elements[i]);
- if (!ECD)
- return;
-
- Expr *InitExpr = ECD->getInitExpr();
- if (!InitExpr)
- return;
- InitExpr = InitExpr->IgnoreImpCasts();
- if (!isa<IntegerLiteral>(InitExpr) && !isa<CXXBoolLiteralExpr>(InitExpr))
- return;
-
- if (i == 0) {
- FirstVal = ECD->getInitVal();
- continue;
- }
-
- if (!llvm::APSInt::isSameValue(FirstVal, ECD->getInitVal()))
- return;
- }
-
- S.Diag(Enum->getLocation(), diag::warn_identical_enum_values)
- << EnumType << FirstVal.toString(10)
- << Enum->getSourceRange();
-
- EnumConstantDecl *Last = cast<EnumConstantDecl>(Elements[NumElements - 1]),
- *Next = cast<EnumConstantDecl>(Elements[NumElements - 2]);
-
- S.Diag(Last->getLocation(), diag::note_identical_enum_values)
- << FixItHint::CreateReplacement(Last->getInitExpr()->getSourceRange(),
- Next->getName());
-}
-
void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
SourceLocation RBraceLoc, Decl *EnumDeclX,
Decl **Elements, unsigned NumElements,
@@ -10745,8 +10944,6 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
// it needs to go into the function scope.
if (InFunctionDeclarator)
DeclsInPrototypeScope.push_back(Enum);
-
- CheckForUniqueEnumValues(*this, Elements, NumElements, Enum, EnumType);
}
Decl *Sema::ActOnFileScopeAsmDecl(Expr *expr,
@@ -10845,10 +11042,6 @@ Decl *Sema::getObjCDeclContext() const {
}
AvailabilityResult Sema::getCurContextAvailability() const {
- const Decl *D = cast<Decl>(getCurLexicalContext());
- // A category implicitly has the availability of the interface.
- if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(D))
- D = CatD->getClassInterface();
-
+ const Decl *D = cast<Decl>(getCurObjCLexicalContext());
return D->getAvailability();
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
index caa7b2f..e326a20 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
@@ -415,14 +415,19 @@ static void checkAttrArgsAreLockableObjs(Sema &S, Decl *D,
}
if (StringLiteral *StrLit = dyn_cast<StringLiteral>(ArgExp)) {
- // Ignore empty strings without warnings
- if (StrLit->getLength() == 0)
+ if (StrLit->getLength() == 0 ||
+ StrLit->getString() == StringRef("*")) {
+ // Pass empty strings to the analyzer without warnings.
+ // Treat "*" as the universal lock.
+ Args.push_back(ArgExp);
continue;
+ }
// We allow constant strings to be used as a placeholder for expressions
// that are not valid C++ syntax, but warn that they are ignored.
S.Diag(Attr.getLoc(), diag::warn_thread_attribute_ignored) <<
Attr.getName();
+ Args.push_back(ArgExp);
continue;
}
@@ -859,7 +864,6 @@ static void handleLockReturnedAttr(Sema &S, Decl *D,
if (!checkAttributeNumArgs(S, Attr, 1))
return;
- Expr *Arg = Attr.getArg(0);
if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
@@ -867,9 +871,6 @@ static void handleLockReturnedAttr(Sema &S, Decl *D,
return;
}
- if (Arg->isTypeDependent())
- return;
-
// check that the argument is lockable object
SmallVector<Expr*, 1> Args;
checkAttrArgsAreLockableObjs(S, D, Attr, Args);
@@ -962,7 +963,8 @@ static void handlePackedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
else if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
// If the alignment is less than or equal to 8 bits, the packed attribute
// has no effect.
- if (!FD->getType()->isIncompleteType() &&
+ if (!FD->getType()->isDependentType() &&
+ !FD->getType()->isIncompleteType() &&
S.Context.getTypeAlign(FD->getType()) <= 8)
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored_for_field_of_type)
<< Attr.getName() << FD->getType();
@@ -973,8 +975,8 @@ static void handlePackedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
static void handleMsStructAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (TagDecl *TD = dyn_cast<TagDecl>(D))
- TD->addAttr(::new (S.Context) MsStructAttr(Attr.getRange(), S.Context));
+ if (RecordDecl *RD = dyn_cast<RecordDecl>(D))
+ RD->addAttr(::new (S.Context) MsStructAttr(Attr.getRange(), S.Context));
else
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
}
@@ -1522,6 +1524,20 @@ static void handleAliasAttr(Sema &S, Decl *D, const AttributeList &Attr) {
Str->getString()));
}
+static void handleMinSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // Check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ if (!isa<FunctionDecl>(D) && !isa<ObjCMethodDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) MinSizeAttr(Attr.getRange(), S.Context));
+}
+
static void handleColdAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// Check the attribute arguments.
if (!checkAttributeNumArgs(S, Attr, 0))
@@ -2268,16 +2284,14 @@ static void handleObjCNSObject(Sema &S, Decl *D, const AttributeList &Attr) {
}
if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
QualType T = TD->getUnderlyingType();
- if (!T->isPointerType() ||
- !T->getAs<PointerType>()->getPointeeType()->isRecordType()) {
+ if (!T->isCARCBridgableType()) {
S.Diag(TD->getLocation(), diag::err_nsobject_attribute);
return;
}
}
else if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D)) {
QualType T = PD->getType();
- if (!T->isPointerType() ||
- !T->getAs<PointerType>()->getPointeeType()->isRecordType()) {
+ if (!T->isCARCBridgableType()) {
S.Diag(PD->getLocation(), diag::err_nsobject_attribute);
return;
}
@@ -3583,7 +3597,12 @@ static void handleCallConvAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
D->addAttr(::new (S.Context) PcsAttr(Attr.getRange(), S.Context, PCS));
+ return;
}
+ case AttributeList::AT_PnaclCall:
+ D->addAttr(::new (S.Context) PnaclCallAttr(Attr.getRange(), S.Context));
+ return;
+
default:
llvm_unreachable("unexpected attribute kind");
}
@@ -3636,9 +3655,17 @@ bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC) {
Diag(attr.getLoc(), diag::err_invalid_pcs);
return true;
}
+ case AttributeList::AT_PnaclCall: CC = CC_PnaclCall; break;
default: llvm_unreachable("unexpected attribute kind");
}
+ const TargetInfo &TI = Context.getTargetInfo();
+ TargetInfo::CallingConvCheckResult A = TI.checkCallingConvention(CC);
+ if (A == TargetInfo::CCCR_Warning) {
+ Diag(attr.getLoc(), diag::warn_cconv_ignored) << attr.getName();
+ CC = TI.getDefaultCallingConv();
+ }
+
return false;
}
@@ -3878,11 +3905,11 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
returnType = MD->getResultType();
- else if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D))
- returnType = PD->getType();
else if (S.getLangOpts().ObjCAutoRefCount && hasDeclarator(D) &&
(Attr.getKind() == AttributeList::AT_NSReturnsRetained))
return; // ignore: was handled as a type attribute
+ else if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D))
+ returnType = PD->getType();
else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
returnType = FD->getResultType();
else {
@@ -3971,6 +3998,33 @@ static void handleObjCReturnsInnerPointerAttr(Sema &S, Decl *D,
::new (S.Context) ObjCReturnsInnerPointerAttr(attr.getRange(), S.Context));
}
+static void handleObjCRequiresSuperAttr(Sema &S, Decl *D,
+ const AttributeList &attr) {
+ SourceLocation loc = attr.getLoc();
+ ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(D);
+
+ if (!method) {
+ S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
+ << SourceRange(loc, loc) << attr.getName() << ExpectedMethod;
+ return;
+ }
+ DeclContext *DC = method->getDeclContext();
+ if (const ObjCProtocolDecl *PDecl = dyn_cast_or_null<ObjCProtocolDecl>(DC)) {
+ S.Diag(D->getLocStart(), diag::warn_objc_requires_super_protocol)
+ << attr.getName() << 0;
+ S.Diag(PDecl->getLocation(), diag::note_protocol_decl);
+ return;
+ }
+ if (method->getMethodFamily() == OMF_dealloc) {
+ S.Diag(D->getLocStart(), diag::warn_objc_requires_super_protocol)
+ << attr.getName() << 1;
+ return;
+ }
+
+ method->addAttr(
+ ::new (S.Context) ObjCRequiresSuperAttr(attr.getRange(), S.Context));
+}
+
/// Handle cf_audited_transfer and cf_unknown_transfer.
static void handleCFTransferAttr(Sema &S, Decl *D, const AttributeList &A) {
if (!isa<FunctionDecl>(D)) {
@@ -4149,19 +4203,21 @@ static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
static void handleInheritanceAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (S.LangOpts.MicrosoftExt) {
- AttributeList::Kind Kind = Attr.getKind();
- if (Kind == AttributeList::AT_SingleInheritance)
- D->addAttr(
- ::new (S.Context) SingleInheritanceAttr(Attr.getRange(), S.Context));
- else if (Kind == AttributeList::AT_MultipleInheritance)
- D->addAttr(
- ::new (S.Context) MultipleInheritanceAttr(Attr.getRange(), S.Context));
- else if (Kind == AttributeList::AT_VirtualInheritance)
- D->addAttr(
- ::new (S.Context) VirtualInheritanceAttr(Attr.getRange(), S.Context));
- } else
+ if (!S.LangOpts.MicrosoftExt) {
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+ return;
+ }
+
+ AttributeList::Kind Kind = Attr.getKind();
+ if (Kind == AttributeList::AT_SingleInheritance)
+ D->addAttr(
+ ::new (S.Context) SingleInheritanceAttr(Attr.getRange(), S.Context));
+ else if (Kind == AttributeList::AT_MultipleInheritance)
+ D->addAttr(
+ ::new (S.Context) MultipleInheritanceAttr(Attr.getRange(), S.Context));
+ else if (Kind == AttributeList::AT_VirtualInheritance)
+ D->addAttr(
+ ::new (S.Context) VirtualInheritanceAttr(Attr.getRange(), S.Context));
}
static void handlePortabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) {
@@ -4246,6 +4302,9 @@ static void ProcessInheritableDeclAttr(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_ExtVectorType:
handleExtVectorTypeAttr(S, scope, D, Attr);
break;
+ case AttributeList::AT_MinSize:
+ handleMinSizeAttr(S, D, Attr);
+ break;
case AttributeList::AT_Format: handleFormatAttr (S, D, Attr); break;
case AttributeList::AT_FormatArg: handleFormatArgAttr (S, D, Attr); break;
case AttributeList::AT_CUDAGlobal: handleGlobalAttr (S, D, Attr); break;
@@ -4278,6 +4337,9 @@ static void ProcessInheritableDeclAttr(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_ObjCReturnsInnerPointer:
handleObjCReturnsInnerPointerAttr(S, D, Attr); break;
+ case AttributeList::AT_ObjCRequiresSuper:
+ handleObjCRequiresSuperAttr(S, D, Attr); break;
+
case AttributeList::AT_NSBridged:
handleNSBridgedAttr(S, scope, D, Attr); break;
@@ -4360,6 +4422,7 @@ static void ProcessInheritableDeclAttr(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_ThisCall:
case AttributeList::AT_Pascal:
case AttributeList::AT_Pcs:
+ case AttributeList::AT_PnaclCall:
handleCallConvAttr(S, D, Attr);
break;
case AttributeList::AT_OpenCLKernel:
@@ -4774,18 +4837,25 @@ static bool isDeclDeprecated(Decl *D) {
static void
DoEmitDeprecationWarning(Sema &S, const NamedDecl *D, StringRef Message,
SourceLocation Loc,
- const ObjCInterfaceDecl *UnknownObjCClass) {
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ const ObjCPropertyDecl *ObjCPropery) {
DeclarationName Name = D->getDeclName();
if (!Message.empty()) {
S.Diag(Loc, diag::warn_deprecated_message) << Name << Message;
S.Diag(D->getLocation(),
isa<ObjCMethodDecl>(D) ? diag::note_method_declared_at
: diag::note_previous_decl) << Name;
+ if (ObjCPropery)
+ S.Diag(ObjCPropery->getLocation(), diag::note_property_attribute)
+ << ObjCPropery->getDeclName() << 0;
} else if (!UnknownObjCClass) {
S.Diag(Loc, diag::warn_deprecated) << D->getDeclName();
S.Diag(D->getLocation(),
isa<ObjCMethodDecl>(D) ? diag::note_method_declared_at
: diag::note_previous_decl) << Name;
+ if (ObjCPropery)
+ S.Diag(ObjCPropery->getLocation(), diag::note_property_attribute)
+ << ObjCPropery->getDeclName() << 0;
} else {
S.Diag(Loc, diag::warn_deprecated_fwdclass_message) << Name;
S.Diag(UnknownObjCClass->getLocation(), diag::note_forward_class);
@@ -4800,16 +4870,19 @@ void Sema::HandleDelayedDeprecationCheck(DelayedDiagnostic &DD,
DD.Triggered = true;
DoEmitDeprecationWarning(*this, DD.getDeprecationDecl(),
DD.getDeprecationMessage(), DD.Loc,
- DD.getUnknownObjCClass());
+ DD.getUnknownObjCClass(),
+ DD.getObjCProperty());
}
void Sema::EmitDeprecationWarning(NamedDecl *D, StringRef Message,
SourceLocation Loc,
- const ObjCInterfaceDecl *UnknownObjCClass) {
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ const ObjCPropertyDecl *ObjCProperty) {
// Delay if we're currently parsing a declaration.
if (DelayedDiagnostics.shouldDelayDiagnostics()) {
DelayedDiagnostics.add(DelayedDiagnostic::makeDeprecation(Loc, D,
UnknownObjCClass,
+ ObjCProperty,
Message));
return;
}
@@ -4817,5 +4890,5 @@ void Sema::EmitDeprecationWarning(NamedDecl *D, StringRef Message,
// Otherwise, don't warn if our current context is deprecated.
if (isDeclDeprecated(cast<Decl>(getCurLexicalContext())))
return;
- DoEmitDeprecationWarning(*this, D, Message, Loc, UnknownObjCClass);
+ DoEmitDeprecationWarning(*this, D, Message, Loc, UnknownObjCClass, ObjCProperty);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
index eeac9b8..16eddf8 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
@@ -246,8 +246,7 @@ Sema::SetParamDefaultArgument(ParmVarDecl *Param, Expr *Arg,
InitializationKind Kind = InitializationKind::CreateCopy(Param->getLocation(),
EqualLoc);
InitializationSequence InitSeq(*this, Entity, Kind, &Arg, 1);
- ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
- MultiExprArg(*this, &Arg, 1));
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Arg);
if (Result.isInvalid())
return true;
Arg = Result.takeAs<Expr>();
@@ -374,10 +373,10 @@ void Sema::CheckExtraCXXDefaultArguments(Declarator &D) {
}
}
-// MergeCXXFunctionDecl - Merge two declarations of the same C++
-// function, once we already know that they have the same
-// type. Subroutine of MergeFunctionDecl. Returns true if there was an
-// error, false otherwise.
+/// MergeCXXFunctionDecl - Merge two declarations of the same C++
+/// function, once we already know that they have the same
+/// type. Subroutine of MergeFunctionDecl. Returns true if there was an
+/// error, false otherwise.
bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old,
Scope *S) {
bool Invalid = false;
@@ -676,6 +675,20 @@ static bool CheckConstexprParameterTypes(Sema &SemaRef,
return true;
}
+/// \brief Get diagnostic %select index for tag kind for
+/// record diagnostic message.
+/// WARNING: Indexes apply to particular diagnostics only!
+///
+/// \returns diagnostic %select index.
+static unsigned getRecordDiagFromTagKind(TagTypeKind Tag) {
+ switch (Tag) {
+ case TTK_Struct: return 0;
+ case TTK_Interface: return 1;
+ case TTK_Class: return 2;
+ default: llvm_unreachable("Invalid tag kind for record diagnostic!");
+ }
+}
+
// CheckConstexprFunctionDecl - Check whether a function declaration satisfies
// the requirements of a constexpr function definition or a constexpr
// constructor definition. If so, return true. If not, produce appropriate
@@ -692,8 +705,8 @@ bool Sema::CheckConstexprFunctionDecl(const FunctionDecl *NewFD) {
const CXXRecordDecl *RD = MD->getParent();
if (RD->getNumVBases()) {
Diag(NewFD->getLocation(), diag::err_constexpr_virtual_base)
- << isa<CXXConstructorDecl>(NewFD) << RD->isStruct()
- << RD->getNumVBases();
+ << isa<CXXConstructorDecl>(NewFD)
+ << getRecordDiagFromTagKind(RD->getTagKind()) << RD->getNumVBases();
for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
E = RD->vbases_end(); I != E; ++I)
Diag(I->getLocStart(),
@@ -1005,6 +1018,41 @@ bool Sema::isCurrentClassName(const IdentifierInfo &II, Scope *,
return false;
}
+/// \brief Determine whether the given class is a base class of the given
+/// class, including looking at dependent bases.
+static bool findCircularInheritance(const CXXRecordDecl *Class,
+ const CXXRecordDecl *Current) {
+ SmallVector<const CXXRecordDecl*, 8> Queue;
+
+ Class = Class->getCanonicalDecl();
+ while (true) {
+ for (CXXRecordDecl::base_class_const_iterator I = Current->bases_begin(),
+ E = Current->bases_end();
+ I != E; ++I) {
+ CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();
+ if (!Base)
+ continue;
+
+ Base = Base->getDefinition();
+ if (!Base)
+ continue;
+
+ if (Base->getCanonicalDecl() == Class)
+ return true;
+
+ Queue.push_back(Base);
+ }
+
+ if (Queue.empty())
+ return false;
+
+ Current = Queue.back();
+ Queue.pop_back();
+ }
+
+ return false;
+}
+
/// \brief Check the validity of a C++ base class specifier.
///
/// \returns a new CXXBaseSpecifier if well-formed, emits diagnostics
@@ -1031,13 +1079,32 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
<< TInfo->getTypeLoc().getSourceRange();
EllipsisLoc = SourceLocation();
}
-
- if (BaseType->isDependentType())
+
+ SourceLocation BaseLoc = TInfo->getTypeLoc().getBeginLoc();
+
+ if (BaseType->isDependentType()) {
+ // Make sure that we don't have circular inheritance among our dependent
+ // bases. For non-dependent bases, the check for completeness below handles
+ // this.
+ if (CXXRecordDecl *BaseDecl = BaseType->getAsCXXRecordDecl()) {
+ if (BaseDecl->getCanonicalDecl() == Class->getCanonicalDecl() ||
+ ((BaseDecl = BaseDecl->getDefinition()) &&
+ findCircularInheritance(Class, BaseDecl))) {
+ Diag(BaseLoc, diag::err_circular_inheritance)
+ << BaseType << Context.getTypeDeclType(Class);
+
+ if (BaseDecl->getCanonicalDecl() != Class->getCanonicalDecl())
+ Diag(BaseDecl->getLocation(), diag::note_previous_decl)
+ << BaseType;
+
+ return 0;
+ }
+ }
+
return new (Context) CXXBaseSpecifier(SpecifierRange, Virtual,
Class->getTagKind() == TTK_Class,
Access, TInfo, EllipsisLoc);
-
- SourceLocation BaseLoc = TInfo->getTypeLoc().getBeginLoc();
+ }
// Base specifiers must be record types.
if (!BaseType->isRecordType()) {
@@ -1165,10 +1232,21 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
// Okay, add this new base class.
KnownBase = Bases[idx];
Bases[NumGoodBases++] = Bases[idx];
- if (const RecordType *Record = NewBaseType->getAs<RecordType>())
- if (const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl()))
- if (RD->hasAttr<WeakAttr>())
- Class->addAttr(::new (Context) WeakAttr(SourceRange(), Context));
+ if (const RecordType *Record = NewBaseType->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ if (Class->isInterface() &&
+ (!RD->isInterface() ||
+ KnownBase->getAccessSpecifier() != AS_public)) {
+ // The Microsoft extension __interface does not permit bases that
+ // are not themselves public interfaces.
+ Diag(KnownBase->getLocStart(), diag::err_invalid_base_in_interface)
+ << getRecordDiagFromTagKind(RD->getTagKind()) << RD->getName()
+ << RD->getSourceRange();
+ Invalid = true;
+ }
+ if (RD->hasAttr<WeakAttr>())
+ Class->addAttr(::new (Context) WeakAttr(SourceRange(), Context));
+ }
}
}
@@ -1407,6 +1485,9 @@ bool Sema::ActOnAccessSpecifier(AccessSpecifier Access,
/// CheckOverrideControl - Check C++11 override control semantics.
void Sema::CheckOverrideControl(Decl *D) {
+ if (D->isInvalidDecl())
+ return;
+
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D);
// Do we know which functions this declaration might be overriding?
@@ -1496,6 +1577,50 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
bool isFunc = D.isDeclarationOfFunction();
+ if (cast<CXXRecordDecl>(CurContext)->isInterface()) {
+ // The Microsoft extension __interface only permits public member functions
+ // and prohibits constructors, destructors, operators, non-public member
+ // functions, static methods and data members.
+ unsigned InvalidDecl;
+ bool ShowDeclName = true;
+ if (!isFunc)
+ InvalidDecl = (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) ? 0 : 1;
+ else if (AS != AS_public)
+ InvalidDecl = 2;
+ else if (DS.getStorageClassSpec() == DeclSpec::SCS_static)
+ InvalidDecl = 3;
+ else switch (Name.getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ InvalidDecl = 4;
+ ShowDeclName = false;
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ InvalidDecl = 5;
+ ShowDeclName = false;
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXConversionFunctionName:
+ InvalidDecl = 6;
+ break;
+
+ default:
+ InvalidDecl = 0;
+ break;
+ }
+
+ if (InvalidDecl) {
+ if (ShowDeclName)
+ Diag(Loc, diag::err_invalid_member_in_interface)
+ << (InvalidDecl-1) << Name;
+ else
+ Diag(Loc, diag::err_invalid_member_in_interface)
+ << (InvalidDecl-1) << "";
+ return 0;
+ }
+ }
+
// C++ 9.2p6: A member shall not be declared to have automatic storage
// duration (auto, register) or with the extern storage-class-specifier.
// C++ 7.1.1p8: The mutable specifier can be applied only to names of class
@@ -1548,7 +1673,7 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
// Member field could not be with "template" keyword.
// So TemplateParameterLists should be empty in this case.
if (TemplateParameterLists.size()) {
- TemplateParameterList* TemplateParams = TemplateParameterLists.get()[0];
+ TemplateParameterList* TemplateParams = TemplateParameterLists[0];
if (TemplateParams->size()) {
// There is no such thing as a member field template.
Diag(D.getIdentifierLoc(), diag::err_template_member)
@@ -1588,7 +1713,7 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
} else {
assert(InitStyle == ICIS_NoInit);
- Member = HandleDeclarator(S, D, move(TemplateParameterLists));
+ Member = HandleDeclarator(S, D, TemplateParameterLists);
if (!Member) {
return 0;
}
@@ -1662,6 +1787,99 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
return Member;
}
+namespace {
+ class UninitializedFieldVisitor
+ : public EvaluatedExprVisitor<UninitializedFieldVisitor> {
+ Sema &S;
+ ValueDecl *VD;
+ public:
+ typedef EvaluatedExprVisitor<UninitializedFieldVisitor> Inherited;
+ UninitializedFieldVisitor(Sema &S, ValueDecl *VD) : Inherited(S.Context),
+ S(S), VD(VD) {
+ }
+
+ void HandleExpr(Expr *E) {
+ if (!E) return;
+
+ // Expressions like x(x) sometimes lack the surrounding expressions
+ // but need to be checked anyways.
+ HandleValue(E);
+ Visit(E);
+ }
+
+ void HandleValue(Expr *E) {
+ E = E->IgnoreParens();
+
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ if (isa<EnumConstantDecl>(ME->getMemberDecl()))
+ return;
+ Expr *Base = E;
+ while (isa<MemberExpr>(Base)) {
+ ME = dyn_cast<MemberExpr>(Base);
+ if (VarDecl *VarD = dyn_cast<VarDecl>(ME->getMemberDecl()))
+ if (VarD->hasGlobalStorage())
+ return;
+ Base = ME->getBase();
+ }
+
+ if (VD == ME->getMemberDecl() && isa<CXXThisExpr>(Base)) {
+ unsigned diag = VD->getType()->isReferenceType()
+ ? diag::warn_reference_field_is_uninit
+ : diag::warn_field_is_uninit;
+ S.Diag(ME->getExprLoc(), diag) << ME->getMemberNameInfo().getName();
+ return;
+ }
+ }
+
+ if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ HandleValue(CO->getTrueExpr());
+ HandleValue(CO->getFalseExpr());
+ return;
+ }
+
+ if (BinaryConditionalOperator *BCO =
+ dyn_cast<BinaryConditionalOperator>(E)) {
+ HandleValue(BCO->getCommon());
+ HandleValue(BCO->getFalseExpr());
+ return;
+ }
+
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ switch (BO->getOpcode()) {
+ default:
+ return;
+ case(BO_PtrMemD):
+ case(BO_PtrMemI):
+ HandleValue(BO->getLHS());
+ return;
+ case(BO_Comma):
+ HandleValue(BO->getRHS());
+ return;
+ }
+ }
+ }
+
+ void VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ if (E->getCastKind() == CK_LValueToRValue)
+ HandleValue(E->getSubExpr());
+
+ Inherited::VisitImplicitCastExpr(E);
+ }
+
+ void VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
+ Expr *Callee = E->getCallee();
+ if (isa<MemberExpr>(Callee))
+ HandleValue(Callee);
+
+ Inherited::VisitCXXMemberCallExpr(E);
+ }
+ };
+ static void CheckInitExprContainsUninitializedFields(Sema &S, Expr *E,
+ ValueDecl *VD) {
+ UninitializedFieldVisitor(S, VD).HandleExpr(E);
+ }
+} // namespace
+
/// ActOnCXXInClassMemberInitializer - This is invoked after parsing an
/// in-class initializer for a non-static C++ class member, and after
/// instantiating an in-class initializer in a class template. Such actions
@@ -1685,8 +1903,17 @@ Sema::ActOnCXXInClassMemberInitializer(Decl *D, SourceLocation InitLoc,
return;
}
+ if (getDiagnostics().getDiagnosticLevel(diag::warn_field_is_uninit, InitLoc)
+ != DiagnosticsEngine::Ignored) {
+ CheckInitExprContainsUninitializedFields(*this, InitExpr, FD);
+ }
+
ExprResult Init = InitExpr;
- if (!FD->getType()->isDependentType() && !InitExpr->isTypeDependent()) {
+ if (!FD->getType()->isDependentType() && !InitExpr->isTypeDependent() &&
+ !FD->getDeclContext()->isDependentContext()) {
+ // Note: We don't type-check when we're in a dependent context, because
+ // the initialization-substitution code does not properly handle direct
+ // list initialization. We have the same hackaround for ctor-initializers.
if (isa<InitListExpr>(InitExpr) && isStdInitializerList(FD->getType(), 0)) {
Diag(FD->getLocation(), diag::warn_dangling_std_initializer_list)
<< /*at end of ctor*/1 << InitExpr->getSourceRange();
@@ -1795,7 +2022,8 @@ Sema::ActOnMemInitializer(Decl *ConstructorD,
Expr **Args, unsigned NumArgs,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc) {
- Expr *List = new (Context) ParenListExpr(Context, LParenLoc, Args, NumArgs,
+ Expr *List = new (Context) ParenListExpr(Context, LParenLoc,
+ llvm::makeArrayRef(Args, NumArgs),
RParenLoc);
return BuildMemInitializer(ConstructorD, S, SS, MemberOrBase, TemplateTypeTy,
DS, IdLoc, List, EllipsisLoc);
@@ -2044,96 +2272,6 @@ static void CheckForDanglingReferenceOrPointer(Sema &S, ValueDecl *Member,
<< (unsigned)IsPointer;
}
-namespace {
- class UninitializedFieldVisitor
- : public EvaluatedExprVisitor<UninitializedFieldVisitor> {
- Sema &S;
- ValueDecl *VD;
- public:
- typedef EvaluatedExprVisitor<UninitializedFieldVisitor> Inherited;
- UninitializedFieldVisitor(Sema &S, ValueDecl *VD) : Inherited(S.Context),
- S(S), VD(VD) {
- }
-
- void HandleExpr(Expr *E) {
- if (!E) return;
-
- // Expressions like x(x) sometimes lack the surrounding expressions
- // but need to be checked anyways.
- HandleValue(E);
- Visit(E);
- }
-
- void HandleValue(Expr *E) {
- E = E->IgnoreParens();
-
- if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
- if (isa<EnumConstantDecl>(ME->getMemberDecl()))
- return;
- Expr *Base = E;
- while (isa<MemberExpr>(Base)) {
- ME = dyn_cast<MemberExpr>(Base);
- if (VarDecl *VarD = dyn_cast<VarDecl>(ME->getMemberDecl()))
- if (VarD->hasGlobalStorage())
- return;
- Base = ME->getBase();
- }
-
- if (VD == ME->getMemberDecl() && isa<CXXThisExpr>(Base)) {
- S.Diag(ME->getExprLoc(), diag::warn_field_is_uninit);
- return;
- }
- }
-
- if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
- HandleValue(CO->getTrueExpr());
- HandleValue(CO->getFalseExpr());
- return;
- }
-
- if (BinaryConditionalOperator *BCO =
- dyn_cast<BinaryConditionalOperator>(E)) {
- HandleValue(BCO->getCommon());
- HandleValue(BCO->getFalseExpr());
- return;
- }
-
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
- switch (BO->getOpcode()) {
- default:
- return;
- case(BO_PtrMemD):
- case(BO_PtrMemI):
- HandleValue(BO->getLHS());
- return;
- case(BO_Comma):
- HandleValue(BO->getRHS());
- return;
- }
- }
- }
-
- void VisitImplicitCastExpr(ImplicitCastExpr *E) {
- if (E->getCastKind() == CK_LValueToRValue)
- HandleValue(E->getSubExpr());
-
- Inherited::VisitImplicitCastExpr(E);
- }
-
- void VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
- Expr *Callee = E->getCallee();
- if (isa<MemberExpr>(Callee))
- HandleValue(Callee);
-
- Inherited::VisitCXXMemberCallExpr(E);
- }
- };
- static void CheckInitExprContainsUninitializedFields(Sema &S, Expr *E,
- ValueDecl *VD) {
- UninitializedFieldVisitor(S, VD).HandleExpr(E);
- }
-} // namespace
-
MemInitResult
Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init,
SourceLocation IdLoc) {
@@ -2167,11 +2305,13 @@ Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init,
!= DiagnosticsEngine::Ignored)
for (unsigned i = 0; i < NumArgs; ++i)
// FIXME: Warn about the case when other fields are used before being
- // uninitialized. For example, let this field be the i'th field. When
+ // initialized. For example, let this field be the i'th field. When
// initializing the i'th field, throw a warning if any of the >= i'th
// fields are used, as they are not yet initialized.
// Right now we are only handling the case where the i'th field uses
// itself in its initializer.
+ // Also need to take into account that some fields may be initialized by
+ // in-class initializers, see C++11 [class.base.init]p9.
CheckInitExprContainsUninitializedFields(*this, Args[i], Member);
SourceRange InitRange = Init->getSourceRange();
@@ -2204,7 +2344,7 @@ Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init,
InitializationSequence InitSeq(*this, MemberEntity, Kind, Args, NumArgs);
ExprResult MemberInit = InitSeq.Perform(*this, MemberEntity, Kind,
- MultiExprArg(*this, Args, NumArgs),
+ MultiExprArg(Args, NumArgs),
0);
if (MemberInit.isInvalid())
return true;
@@ -2273,7 +2413,7 @@ Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init,
InitRange.getEnd());
InitializationSequence InitSeq(*this, DelegationEntity, Kind, Args, NumArgs);
ExprResult DelegationInit = InitSeq.Perform(*this, DelegationEntity, Kind,
- MultiExprArg(*this, Args,NumArgs),
+ MultiExprArg(Args, NumArgs),
0);
if (DelegationInit.isInvalid())
return true;
@@ -2411,8 +2551,7 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
InitRange.getEnd());
InitializationSequence InitSeq(*this, BaseEntity, Kind, Args, NumArgs);
ExprResult BaseInit = InitSeq.Perform(*this, BaseEntity, Kind,
- MultiExprArg(*this, Args, NumArgs),
- 0);
+ MultiExprArg(Args, NumArgs), 0);
if (BaseInit.isInvalid())
return true;
@@ -2480,8 +2619,7 @@ BuildImplicitBaseInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
InitializationKind InitKind
= InitializationKind::CreateDefault(Constructor->getLocation());
InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, 0, 0);
- BaseInit = InitSeq.Perform(SemaRef, InitEntity, InitKind,
- MultiExprArg(SemaRef, 0, 0));
+ BaseInit = InitSeq.Perform(SemaRef, InitEntity, InitKind, MultiExprArg());
break;
}
@@ -2936,7 +3074,11 @@ bool Sema::SetCtorInitializers(CXXConstructorDecl *Constructor,
NumInitializers * sizeof(CXXCtorInitializer*));
Constructor->setCtorInitializers(baseOrMemberInitializers);
}
-
+
+ // Let template instantiation know whether we had errors.
+ if (AnyErrors)
+ Constructor->setInvalidDecl();
+
return false;
}
@@ -3324,11 +3466,10 @@ void Sema::ActOnMemInitializers(Decl *ConstructorDecl,
} else {
assert(Init->isDelegatingInitializer());
// This must be the only initializer
- if (i != 0 || NumMemInits > 1) {
- Diag(MemInits[0]->getSourceLocation(),
+ if (NumMemInits != 1) {
+ Diag(Init->getSourceLocation(),
diag::err_delegating_initializer_alone)
- << MemInits[0]->getSourceRange();
- HadError = true;
+ << Init->getSourceRange() << MemInits[i ? 0 : 1]->getSourceRange();
// We will treat this as being the only initializer.
}
SetDelegatingInitializer(Constructor, MemInits[i]);
@@ -3812,6 +3953,11 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
diag::warn_non_virtual_dtor) << Context.getRecordType(Record);
}
+ if (Record->isAbstract() && Record->hasAttr<FinalAttr>()) {
+ Diag(Record->getLocation(), diag::warn_abstract_final_class);
+ DiagnoseAbstractType(Record);
+ }
+
// See if a method overloads virtual methods in a base
/// class without overriding any.
if (!Record->isDependentType()) {
@@ -4065,7 +4211,7 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
// Compute argument constness, constexpr, and triviality.
bool CanHaveConstParam = false;
- bool Trivial;
+ bool Trivial = false;
switch (CSM) {
case CXXDefaultConstructor:
Trivial = RD->hasTrivialDefaultConstructor();
@@ -4304,7 +4450,7 @@ bool SpecialMemberDeletionInfo::isAccessible(Subobject Subobj,
/// If we're operating on a base class, the object type is the
/// type of this special member.
QualType objectTy;
- AccessSpecifier access = target->getAccess();;
+ AccessSpecifier access = target->getAccess();
if (CXXBaseSpecifier *base = Subobj.dyn_cast<CXXBaseSpecifier*>()) {
objectTy = S.Context.getTypeDeclType(MD->getParent());
access = CXXRecordDecl::MergeAccess(base->getAccessSpecifier(), access);
@@ -4647,6 +4793,19 @@ namespace {
};
}
+/// \brief Check whether any most overriden method from MD in Methods
+static bool CheckMostOverridenMethods(const CXXMethodDecl *MD,
+ const llvm::SmallPtrSet<const CXXMethodDecl *, 8>& Methods) {
+ if (MD->size_overridden_methods() == 0)
+ return Methods.count(MD->getCanonicalDecl());
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods();
+ I != E; ++I)
+ if (CheckMostOverridenMethods(*I, Methods))
+ return true;
+ return false;
+}
+
/// \brief Member lookup function that determines whether a given C++
/// method overloads virtual methods in a base class without overriding any,
/// to be used with CXXRecordDecl::lookupInBases().
@@ -4678,7 +4837,7 @@ static bool FindHiddenVirtualMethod(const CXXBaseSpecifier *Specifier,
if (!Data.S->IsOverload(Data.Method, MD, false))
return true;
// Collect the overload only if its hidden.
- if (!Data.OverridenAndUsingBaseMethods.count(MD))
+ if (!CheckMostOverridenMethods(MD, Data.OverridenAndUsingBaseMethods))
overloadedMethods.push_back(MD);
}
}
@@ -4689,6 +4848,17 @@ static bool FindHiddenVirtualMethod(const CXXBaseSpecifier *Specifier,
return foundSameNameMethod;
}
+/// \brief Add the most overriden methods from MD to Methods
+static void AddMostOverridenMethods(const CXXMethodDecl *MD,
+ llvm::SmallPtrSet<const CXXMethodDecl *, 8>& Methods) {
+ if (MD->size_overridden_methods() == 0)
+ Methods.insert(MD->getCanonicalDecl());
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods();
+ I != E; ++I)
+ AddMostOverridenMethods(*I, Methods);
+}
+
/// \brief See if a method overloads virtual methods in a base class without
/// overriding any.
void Sema::DiagnoseHiddenVirtualMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
@@ -4709,14 +4879,11 @@ void Sema::DiagnoseHiddenVirtualMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
// by 'using' in a set. A base method not in this set is hidden.
for (DeclContext::lookup_result res = DC->lookup(MD->getDeclName());
res.first != res.second; ++res.first) {
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(*res.first))
- for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
- E = MD->end_overridden_methods();
- I != E; ++I)
- Data.OverridenAndUsingBaseMethods.insert((*I)->getCanonicalDecl());
+ NamedDecl *ND = *res.first;
if (UsingShadowDecl *shad = dyn_cast<UsingShadowDecl>(*res.first))
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(shad->getTargetDecl()))
- Data.OverridenAndUsingBaseMethods.insert(MD->getCanonicalDecl());
+ ND = shad->getTargetDecl();
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
+ AddMostOverridenMethods(MD, Data.OverridenAndUsingBaseMethods);
}
if (DC->lookupInBases(&FindHiddenVirtualMethod, &Data, Paths) &&
@@ -5307,7 +5474,47 @@ Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
// Namespace Handling
//===----------------------------------------------------------------------===//
+/// \brief Diagnose a mismatch in 'inline' qualifiers when a namespace is
+/// reopened.
+static void DiagnoseNamespaceInlineMismatch(Sema &S, SourceLocation KeywordLoc,
+ SourceLocation Loc,
+ IdentifierInfo *II, bool *IsInline,
+ NamespaceDecl *PrevNS) {
+ assert(*IsInline != PrevNS->isInline());
+
+ // HACK: Work around a bug in libstdc++4.6's <atomic>, where
+ // std::__atomic[0,1,2] are defined as non-inline namespaces, then reopened as
+ // inline namespaces, with the intention of bringing names into namespace std.
+ //
+ // We support this just well enough to get that case working; this is not
+ // sufficient to support reopening namespaces as inline in general.
+ if (*IsInline && II && II->getName().startswith("__atomic") &&
+ S.getSourceManager().isInSystemHeader(Loc)) {
+ // Mark all prior declarations of the namespace as inline.
+ for (NamespaceDecl *NS = PrevNS->getMostRecentDecl(); NS;
+ NS = NS->getPreviousDecl())
+ NS->setInline(*IsInline);
+ // Patch up the lookup table for the containing namespace. This isn't really
+ // correct, but it's good enough for this particular case.
+ for (DeclContext::decl_iterator I = PrevNS->decls_begin(),
+ E = PrevNS->decls_end(); I != E; ++I)
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(*I))
+ PrevNS->getParent()->makeDeclVisibleInContext(ND);
+ return;
+ }
+
+ if (PrevNS->isInline())
+ // The user probably just forgot the 'inline', so suggest that it
+ // be added back.
+ S.Diag(Loc, diag::warn_inline_namespace_reopened_noninline)
+ << FixItHint::CreateInsertion(KeywordLoc, "inline ");
+ else
+ S.Diag(Loc, diag::err_inline_namespace_mismatch)
+ << IsInline;
+ S.Diag(PrevNS->getLocation(), diag::note_previous_definition);
+ *IsInline = PrevNS->isInline();
+}
/// ActOnStartNamespaceDef - This is called at the start of a namespace
/// definition.
@@ -5357,21 +5564,9 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
if (PrevNS) {
// This is an extended namespace definition.
- if (IsInline != PrevNS->isInline()) {
- // inline-ness must match
- if (PrevNS->isInline()) {
- // The user probably just forgot the 'inline', so suggest that it
- // be added back.
- Diag(Loc, diag::warn_inline_namespace_reopened_noninline)
- << FixItHint::CreateInsertion(NamespaceLoc, "inline ");
- } else {
- Diag(Loc, diag::err_inline_namespace_mismatch)
- << IsInline;
- }
- Diag(PrevNS->getLocation(), diag::note_previous_definition);
-
- IsInline = PrevNS->isInline();
- }
+ if (IsInline != PrevNS->isInline())
+ DiagnoseNamespaceInlineMismatch(*this, NamespaceLoc, Loc, II,
+ &IsInline, PrevNS);
} else if (PrevDecl) {
// This is an invalid name redefinition.
Diag(Loc, diag::err_redefinition_different_kind)
@@ -5402,15 +5597,9 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
PrevNS = ND->getAnonymousNamespace();
}
- if (PrevNS && IsInline != PrevNS->isInline()) {
- // inline-ness must match
- Diag(Loc, diag::err_inline_namespace_mismatch)
- << IsInline;
- Diag(PrevNS->getLocation(), diag::note_previous_definition);
-
- // Recover by ignoring the new namespace's inline status.
- IsInline = PrevNS->isInline();
- }
+ if (PrevNS && IsInline != PrevNS->isInline())
+ DiagnoseNamespaceInlineMismatch(*this, NamespaceLoc, NamespaceLoc, II,
+ &IsInline, PrevNS);
}
NamespaceDecl *Namespc = NamespaceDecl::Create(Context, CurContext, IsInline,
@@ -5460,15 +5649,15 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
if (!PrevNS) {
UsingDirectiveDecl* UD
- = UsingDirectiveDecl::Create(Context, CurContext,
+ = UsingDirectiveDecl::Create(Context, Parent,
/* 'using' */ LBrace,
/* 'namespace' */ SourceLocation(),
/* qualifier */ NestedNameSpecifierLoc(),
/* identifier */ SourceLocation(),
Namespc,
- /* Ancestor */ CurContext);
+ /* Ancestor */ Parent);
UD->setImplicit();
- CurContext->addDecl(UD);
+ Parent->addDecl(UD);
}
}
@@ -5697,7 +5886,8 @@ static bool TryNamespaceTypoCorrection(Sema &S, LookupResult &R, Scope *Sc,
if (DeclContext *DC = S.computeDeclContext(SS, false))
S.Diag(IdentLoc, diag::err_using_directive_member_suggest)
<< Ident << DC << CorrectedQuotedStr << SS.getRange()
- << FixItHint::CreateReplacement(IdentLoc, CorrectedStr);
+ << FixItHint::CreateReplacement(Corrected.getCorrectionRange(),
+ CorrectedStr);
else
S.Diag(IdentLoc, diag::err_using_directive_suggest)
<< Ident << CorrectedQuotedStr
@@ -6562,10 +6752,10 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S,
if (TemplateParamLists.size() != 1) {
Diag(UsingLoc, diag::err_alias_template_extra_headers)
- << SourceRange(TemplateParamLists.get()[1]->getTemplateLoc(),
- TemplateParamLists.get()[TemplateParamLists.size()-1]->getRAngleLoc());
+ << SourceRange(TemplateParamLists[1]->getTemplateLoc(),
+ TemplateParamLists[TemplateParamLists.size()-1]->getRAngleLoc());
}
- TemplateParameterList *TemplateParams = TemplateParamLists.get()[0];
+ TemplateParameterList *TemplateParams = TemplateParamLists[0];
// Only consider previous declarations in the same scope.
FilterLookupForScope(Previous, CurContext, S, /*ConsiderLinkage*/false,
@@ -6696,28 +6886,6 @@ Decl *Sema::ActOnNamespaceAliasDef(Scope *S,
return AliasDecl;
}
-namespace {
- /// \brief Scoped object used to handle the state changes required in Sema
- /// to implicitly define the body of a C++ member function;
- class ImplicitlyDefinedFunctionScope {
- Sema &S;
- Sema::ContextRAII SavedContext;
-
- public:
- ImplicitlyDefinedFunctionScope(Sema &S, CXXMethodDecl *Method)
- : S(S), SavedContext(S, Method)
- {
- S.PushFunctionScope();
- S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
- }
-
- ~ImplicitlyDefinedFunctionScope() {
- S.PopExpressionEvaluationContext();
- S.PopFunctionScopeInfo();
- }
- };
-}
-
Sema::ImplicitExceptionSpecification
Sema::ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD) {
@@ -6861,7 +7029,7 @@ void Sema::DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXRecordDecl *ClassDecl = Constructor->getParent();
assert(ClassDecl && "DefineImplicitDefaultConstructor - invalid constructor");
- ImplicitlyDefinedFunctionScope Scope(*this, Constructor);
+ SynthesizedFunctionScope Scope(*this, Constructor);
DiagnosticErrorTrap Trap(Diags);
if (SetCtorInitializers(Constructor, 0, 0, /*AnyErrors=*/false) ||
Trap.hasErrorOccurred()) {
@@ -7173,7 +7341,7 @@ void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation,
if (Destructor->isInvalidDecl())
return;
- ImplicitlyDefinedFunctionScope Scope(*this, Destructor);
+ SynthesizedFunctionScope Scope(*this, Destructor);
DiagnosticErrorTrap Trap(Diags);
MarkBaseAndMemberDestructorsReferenced(Destructor->getLocation(),
@@ -7412,7 +7580,7 @@ BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T,
= new (S.Context) BinaryOperator(IterationVarRefRVal,
IntegerLiteral::Create(S.Context, Upper, SizeType, Loc),
BO_NE, S.Context.BoolTy,
- VK_RValue, OK_Ordinary, Loc);
+ VK_RValue, OK_Ordinary, Loc, false);
// Create the pre-increment of the iteration variable.
Expr *Increment
@@ -7654,7 +7822,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CopyAssignOperator->setUsed();
- ImplicitlyDefinedFunctionScope Scope(*this, CopyAssignOperator);
+ SynthesizedFunctionScope Scope(*this, CopyAssignOperator);
DiagnosticErrorTrap Trap(Diags);
// C++0x [class.copy]p30:
@@ -7666,7 +7834,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
// which they were declared in the class definition.
// The statements that form the synthesized function body.
- ASTOwningVector<Stmt*> Statements(*this);
+ SmallVector<Stmt*, 8> Statements;
// The parameter for the "other" object, which we are copying from.
ParmVarDecl *Other = CopyAssignOperator->getParamDecl(0);
@@ -7846,8 +8014,8 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
}
CollectableMemCpyRef = BuildDeclRefExpr(CollectableMemCpy,
- CollectableMemCpy->getType(),
- VK_LValue, Loc, 0).take();
+ Context.BuiltinFnTy,
+ VK_RValue, Loc, 0).take();
assert(CollectableMemCpyRef && "Builtin reference cannot fail");
}
}
@@ -7866,12 +8034,12 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
}
BuiltinMemCpyRef = BuildDeclRefExpr(BuiltinMemCpy,
- BuiltinMemCpy->getType(),
- VK_LValue, Loc, 0).take();
+ Context.BuiltinFnTy,
+ VK_RValue, Loc, 0).take();
assert(BuiltinMemCpyRef && "Builtin reference cannot fail");
}
- ASTOwningVector<Expr*> CallArgs(*this);
+ SmallVector<Expr*, 8> CallArgs;
CallArgs.push_back(To.takeAs<Expr>());
CallArgs.push_back(From.takeAs<Expr>());
CallArgs.push_back(IntegerLiteral::Create(Context, Size, SizeType, Loc));
@@ -7879,12 +8047,12 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
if (NeedsCollectableMemCpy)
Call = ActOnCallExpr(/*Scope=*/0,
CollectableMemCpyRef,
- Loc, move_arg(CallArgs),
+ Loc, CallArgs,
Loc);
else
Call = ActOnCallExpr(/*Scope=*/0,
BuiltinMemCpyRef,
- Loc, move_arg(CallArgs),
+ Loc, CallArgs,
Loc);
assert(!Call.isInvalid() && "Call to __builtin_memcpy cannot fail!");
@@ -7934,7 +8102,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
StmtResult Body;
{
CompoundScopeRAII CompoundScope(*this);
- Body = ActOnCompoundStmt(Loc, Loc, move_arg(Statements),
+ Body = ActOnCompoundStmt(Loc, Loc, Statements,
/*isStmtExpr=*/false);
assert(!Body.isInvalid() && "Compound statement creation cannot fail");
}
@@ -8040,7 +8208,7 @@ hasMoveOrIsTriviallyCopyable(Sema &S, QualType Type, bool IsConstructor) {
// reference types, are supposed to return false here, but that appears
// to be a standard defect.
CXXRecordDecl *ClassDecl = Type->getAsCXXRecordDecl();
- if (!ClassDecl || !ClassDecl->getDefinition())
+ if (!ClassDecl || !ClassDecl->getDefinition() || ClassDecl->isInvalidDecl())
return true;
if (Type.isTriviallyCopyableType(S.Context))
@@ -8195,7 +8363,7 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
MoveAssignOperator->setUsed();
- ImplicitlyDefinedFunctionScope Scope(*this, MoveAssignOperator);
+ SynthesizedFunctionScope Scope(*this, MoveAssignOperator);
DiagnosticErrorTrap Trap(Diags);
// C++0x [class.copy]p28:
@@ -8207,7 +8375,7 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
// definition.
// The statements that form the synthesized function body.
- ASTOwningVector<Stmt*> Statements(*this);
+ SmallVector<Stmt*, 8> Statements;
// The parameter for the "other" object, which we are move from.
ParmVarDecl *Other = MoveAssignOperator->getParamDecl(0);
@@ -8395,8 +8563,8 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
}
CollectableMemCpyRef = BuildDeclRefExpr(CollectableMemCpy,
- CollectableMemCpy->getType(),
- VK_LValue, Loc, 0).take();
+ Context.BuiltinFnTy,
+ VK_RValue, Loc, 0).take();
assert(CollectableMemCpyRef && "Builtin reference cannot fail");
}
}
@@ -8415,12 +8583,12 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
}
BuiltinMemCpyRef = BuildDeclRefExpr(BuiltinMemCpy,
- BuiltinMemCpy->getType(),
- VK_LValue, Loc, 0).take();
+ Context.BuiltinFnTy,
+ VK_RValue, Loc, 0).take();
assert(BuiltinMemCpyRef && "Builtin reference cannot fail");
}
- ASTOwningVector<Expr*> CallArgs(*this);
+ SmallVector<Expr*, 8> CallArgs;
CallArgs.push_back(To.takeAs<Expr>());
CallArgs.push_back(From.takeAs<Expr>());
CallArgs.push_back(IntegerLiteral::Create(Context, Size, SizeType, Loc));
@@ -8428,12 +8596,12 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
if (NeedsCollectableMemCpy)
Call = ActOnCallExpr(/*Scope=*/0,
CollectableMemCpyRef,
- Loc, move_arg(CallArgs),
+ Loc, CallArgs,
Loc);
else
Call = ActOnCallExpr(/*Scope=*/0,
BuiltinMemCpyRef,
- Loc, move_arg(CallArgs),
+ Loc, CallArgs,
Loc);
assert(!Call.isInvalid() && "Call to __builtin_memcpy cannot fail!");
@@ -8483,7 +8651,7 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
StmtResult Body;
{
CompoundScopeRAII CompoundScope(*this);
- Body = ActOnCompoundStmt(Loc, Loc, move_arg(Statements),
+ Body = ActOnCompoundStmt(Loc, Loc, Statements,
/*isStmtExpr=*/false);
assert(!Body.isInvalid() && "Compound statement creation cannot fail");
}
@@ -8691,7 +8859,7 @@ void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXRecordDecl *ClassDecl = CopyConstructor->getParent();
assert(ClassDecl && "DefineImplicitCopyConstructor - invalid constructor");
- ImplicitlyDefinedFunctionScope Scope(*this, CopyConstructor);
+ SynthesizedFunctionScope Scope(*this, CopyConstructor);
DiagnosticErrorTrap Trap(Diags);
if (SetCtorInitializers(CopyConstructor, 0, 0, /*AnyErrors=*/false) ||
@@ -8703,7 +8871,7 @@ void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
Sema::CompoundScopeRAII CompoundScope(*this);
CopyConstructor->setBody(ActOnCompoundStmt(CopyConstructor->getLocation(),
CopyConstructor->getLocation(),
- MultiStmtArg(*this, 0, 0),
+ MultiStmtArg(),
/*isStmtExpr=*/false)
.takeAs<Stmt>());
CopyConstructor->setImplicitlyDefined(true);
@@ -8874,7 +9042,7 @@ void Sema::DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXRecordDecl *ClassDecl = MoveConstructor->getParent();
assert(ClassDecl && "DefineImplicitMoveConstructor - invalid constructor");
- ImplicitlyDefinedFunctionScope Scope(*this, MoveConstructor);
+ SynthesizedFunctionScope Scope(*this, MoveConstructor);
DiagnosticErrorTrap Trap(Diags);
if (SetCtorInitializers(MoveConstructor, 0, 0, /*AnyErrors=*/false) ||
@@ -8886,7 +9054,7 @@ void Sema::DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
Sema::CompoundScopeRAII CompoundScope(*this);
MoveConstructor->setBody(ActOnCompoundStmt(MoveConstructor->getLocation(),
MoveConstructor->getLocation(),
- MultiStmtArg(*this, 0, 0),
+ MultiStmtArg(),
/*isStmtExpr=*/false)
.takeAs<Stmt>());
MoveConstructor->setImplicitlyDefined(true);
@@ -8926,7 +9094,7 @@ void Sema::DefineImplicitLambdaToFunctionPointerConversion(
Conv->setUsed();
- ImplicitlyDefinedFunctionScope Scope(*this, Conv);
+ SynthesizedFunctionScope Scope(*this, Conv);
DiagnosticErrorTrap Trap(Diags);
// Return the address of the __invoke function.
@@ -8959,7 +9127,7 @@ void Sema::DefineImplicitLambdaToBlockPointerConversion(
{
Conv->setUsed();
- ImplicitlyDefinedFunctionScope Scope(*this, Conv);
+ SynthesizedFunctionScope Scope(*this, Conv);
DiagnosticErrorTrap Trap(Diags);
// Copy-initialize the lambda object as needed to capture it.
@@ -9014,12 +9182,12 @@ static bool hasOneRealArgument(MultiExprArg Args) {
return false;
default:
- if (!Args.get()[1]->isDefaultArgument())
+ if (!Args[1]->isDefaultArgument())
return false;
// fall through
case 1:
- return !Args.get()[0]->isDefaultArgument();
+ return !Args[0]->isDefaultArgument();
}
return false;
@@ -9047,12 +9215,12 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
// directly into the target of the omitted copy/move
if (ConstructKind == CXXConstructExpr::CK_Complete &&
Constructor->isCopyOrMoveConstructor() && hasOneRealArgument(ExprArgs)) {
- Expr *SubExpr = ((Expr **)ExprArgs.get())[0];
+ Expr *SubExpr = ExprArgs[0];
Elidable = SubExpr->isTemporaryObject(Context, Constructor->getParent());
}
return BuildCXXConstructExpr(ConstructLoc, DeclInitType, Constructor,
- Elidable, move(ExprArgs), HadMultipleCandidates,
+ Elidable, ExprArgs, HadMultipleCandidates,
RequiresZeroInit, ConstructKind, ParenRange);
}
@@ -9066,12 +9234,9 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
bool RequiresZeroInit,
unsigned ConstructKind,
SourceRange ParenRange) {
- unsigned NumExprs = ExprArgs.size();
- Expr **Exprs = (Expr **)ExprArgs.release();
-
MarkFunctionReferenced(ConstructLoc, Constructor);
return Owned(CXXConstructExpr::Create(Context, DeclInitType, ConstructLoc,
- Constructor, Elidable, Exprs, NumExprs,
+ Constructor, Elidable, ExprArgs,
HadMultipleCandidates, /*FIXME*/false,
RequiresZeroInit,
static_cast<CXXConstructExpr::ConstructionKind>(ConstructKind),
@@ -9085,7 +9250,7 @@ bool Sema::InitializeVarWithConstructor(VarDecl *VD,
// FIXME: Provide the correct paren SourceRange when available.
ExprResult TempResult =
BuildCXXConstructExpr(VD->getLocation(), VD->getType(), Constructor,
- move(Exprs), HadMultipleCandidates, false,
+ Exprs, HadMultipleCandidates, false,
CXXConstructExpr::CK_Complete, SourceRange());
if (TempResult.isInvalid())
return true;
@@ -9135,11 +9300,11 @@ bool
Sema::CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
- ASTOwningVector<Expr*> &ConvertedArgs,
+ SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit) {
// FIXME: This duplicates a lot of code from Sema::ConvertArgumentsForCall.
unsigned NumArgs = ArgsPtr.size();
- Expr **Args = (Expr **)ArgsPtr.get();
+ Expr **Args = ArgsPtr.data();
const FunctionProtoType *Proto
= Constructor->getType()->getAs<FunctionProtoType>();
@@ -9268,7 +9433,7 @@ CheckOperatorNewDeclaration(Sema &SemaRef, const FunctionDecl *FnDecl) {
}
static bool
-CheckOperatorDeleteDeclaration(Sema &SemaRef, const FunctionDecl *FnDecl) {
+CheckOperatorDeleteDeclaration(Sema &SemaRef, FunctionDecl *FnDecl) {
// C++ [basic.stc.dynamic.deallocation]p1:
// A program is ill-formed if deallocation functions are declared in a
// namespace scope other than global scope or declared static in global
@@ -9825,7 +9990,7 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
/// \brief Perform semantic analysis of the given friend type declaration.
///
/// \returns A friend declaration that.
-FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation Loc,
+FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo) {
assert(TSInfo && "NULL TypeSourceInfo for friend type declaration");
@@ -9864,7 +10029,7 @@ FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation Loc,
diag::warn_cxx98_compat_nonclass_type_friend :
diag::ext_nonclass_type_friend)
<< T
- << SourceRange(FriendLoc, TypeRange.getEnd());
+ << TypeRange;
}
} else if (T->getAs<EnumType>()) {
Diag(FriendLoc,
@@ -9872,18 +10037,22 @@ FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation Loc,
diag::warn_cxx98_compat_enum_friend :
diag::ext_enum_friend)
<< T
- << SourceRange(FriendLoc, TypeRange.getEnd());
+ << TypeRange;
}
- // C++0x [class.friend]p3:
+ // C++11 [class.friend]p3:
+ // A friend declaration that does not declare a function shall have one
+ // of the following forms:
+ // friend elaborated-type-specifier ;
+ // friend simple-type-specifier ;
+ // friend typename-specifier ;
+ if (getLangOpts().CPlusPlus0x && LocStart != FriendLoc)
+ Diag(FriendLoc, diag::err_friend_not_first_in_declaration) << T;
+
// If the type specifier in a friend declaration designates a (possibly
- // cv-qualified) class type, that class is declared as a friend; otherwise,
+ // cv-qualified) class type, that class is declared as a friend; otherwise,
// the friend declaration is ignored.
-
- // FIXME: C++0x has some syntactic restrictions on friend type declarations
- // in [class.friend]p3 that we do not implement.
-
- return FriendDecl::Create(Context, CurContext, Loc, TSInfo, FriendLoc);
+ return FriendDecl::Create(Context, CurContext, LocStart, TSInfo, FriendLoc);
}
/// Handle a friend tag declaration where the scope specifier was
@@ -9901,7 +10070,7 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
if (TemplateParameterList *TemplateParams
= MatchTemplateParametersToScopeSpecifier(TagLoc, NameLoc, SS,
- TempParamLists.get(),
+ TempParamLists.data(),
TempParamLists.size(),
/*friend*/ true,
isExplicitSpecialization,
@@ -9916,7 +10085,7 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
TemplateParams, AS_public,
/*ModulePrivateLoc=*/SourceLocation(),
TempParamLists.size() - 1,
- (TemplateParameterList**) TempParamLists.release()).take();
+ TempParamLists.data()).take();
} else {
// The "template<>" header is extraneous.
Diag(TemplateParams->getTemplateLoc(), diag::err_template_tag_noparams)
@@ -9929,7 +10098,7 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
bool isAllExplicitSpecializations = true;
for (unsigned I = TempParamLists.size(); I-- > 0; ) {
- if (TempParamLists.get()[I]->size()) {
+ if (TempParamLists[I]->size()) {
isAllExplicitSpecializations = false;
break;
}
@@ -10076,7 +10245,7 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
if (unsigned NumTempParamLists = TempParams.size())
D = FriendTemplateDecl::Create(Context, CurContext, Loc,
NumTempParamLists,
- TempParams.release(),
+ TempParams.data(),
TSI,
DS.getFriendSpecLoc());
else
@@ -10318,7 +10487,7 @@ Decl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
bool AddToScope = true;
NamedDecl *ND = ActOnFunctionDeclarator(DCScope, D, DC, TInfo, Previous,
- move(TemplateParams), AddToScope);
+ TemplateParams, AddToScope);
if (!ND) return 0;
assert(ND->getDeclContext() == DC);
@@ -10435,10 +10604,10 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
// If this definition appears within the record, do the checking when
// the record is complete.
const FunctionDecl *Primary = MD;
- if (MD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
+ if (const FunctionDecl *Pattern = MD->getTemplateInstantiationPattern())
// Find the uninstantiated declaration that actually had the '= default'
// on it.
- MD->getTemplateInstantiationPattern()->isDefined(Primary);
+ Pattern->isDefined(Primary);
if (Primary == Primary->getCanonicalDecl())
return;
@@ -10963,14 +11132,16 @@ void DelegatingCycleHelper(CXXConstructorDecl* Ctor,
if (Ctor->isInvalidDecl())
return;
- const FunctionDecl *FNTarget = 0;
- CXXConstructorDecl *Target;
-
- // We ignore the result here since if we don't have a body, Target will be
- // null below.
- (void)Ctor->getTargetConstructor()->hasBody(FNTarget);
- Target
-= const_cast<CXXConstructorDecl*>(cast_or_null<CXXConstructorDecl>(FNTarget));
+ CXXConstructorDecl *Target = Ctor->getTargetConstructor();
+
+ // Target may not be determinable yet, for instance if this is a dependent
+ // call in an uninstantiated template.
+ if (Target) {
+ const FunctionDecl *FNTarget = 0;
+ (void)Target->hasBody(FNTarget);
+ Target = const_cast<CXXConstructorDecl*>(
+ cast_or_null<CXXConstructorDecl>(FNTarget));
+ }
CXXConstructorDecl *Canonical = Ctor->getCanonicalDecl(),
// Avoid dereferencing a null pointer here.
@@ -10994,17 +11165,18 @@ void DelegatingCycleHelper(CXXConstructorDecl* Ctor,
diag::warn_delegating_ctor_cycle)
<< Ctor;
- // Don't add a note for a function delegating directo to itself.
+ // Don't add a note for a function delegating directly to itself.
if (TCanonical != Canonical)
S.Diag(Target->getLocation(), diag::note_it_delegates_to);
CXXConstructorDecl *C = Target;
while (C->getCanonicalDecl() != Canonical) {
+ const FunctionDecl *FNTarget = 0;
(void)C->getTargetConstructor()->hasBody(FNTarget);
assert(FNTarget && "Ctor cycle through bodiless function");
- C
- = const_cast<CXXConstructorDecl*>(cast<CXXConstructorDecl>(FNTarget));
+ C = const_cast<CXXConstructorDecl*>(
+ cast<CXXConstructorDecl>(FNTarget));
S.Diag(C->getLocation(), diag::note_which_delegates_to);
}
}
@@ -11027,9 +11199,8 @@ void Sema::CheckDelegatingCtorCycles() {
for (DelegatingCtorDeclsType::iterator
I = DelegatingCtorDecls.begin(ExternalSource),
E = DelegatingCtorDecls.end();
- I != E; ++I) {
- DelegatingCycleHelper(*I, Valid, Invalid, Current, *this);
- }
+ I != E; ++I)
+ DelegatingCycleHelper(*I, Valid, Invalid, Current, *this);
for (CI = Invalid.begin(), CE = Invalid.end(); CI != CE; ++CI)
(*CI)->setInvalidDecl();
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
index 9da4d69..c4e91e8 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
@@ -282,6 +282,25 @@ void Sema::AddAnyMethodToGlobalPool(Decl *D) {
AddFactoryMethodToGlobalPool(MDecl, true);
}
+/// HasExplicitOwnershipAttr - returns true when pointer to ObjC pointer
+/// has explicit ownership attribute; false otherwise.
+static bool
+HasExplicitOwnershipAttr(Sema &S, ParmVarDecl *Param) {
+ QualType T = Param->getType();
+
+ if (const PointerType *PT = T->getAs<PointerType>()) {
+ T = PT->getPointeeType();
+ } else if (const ReferenceType *RT = T->getAs<ReferenceType>()) {
+ T = RT->getPointeeType();
+ } else {
+ return true;
+ }
+
+ // If we have a lifetime qualifier, but it's local, we must have
+ // inferred it. So, it is implicit.
+ return !T.getLocalQualifiers().hasObjCLifetime();
+}
+
/// ActOnStartOfObjCMethodDef - This routine sets up parameters; invisible
/// and user declared, in the method definition's AST.
void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
@@ -313,6 +332,12 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
RequireCompleteType(Param->getLocation(), Param->getType(),
diag::err_typecheck_decl_incomplete_type))
Param->setInvalidDecl();
+ if (!Param->isInvalidDecl() &&
+ getLangOpts().ObjCAutoRefCount &&
+ !HasExplicitOwnershipAttr(*this, Param))
+ Diag(Param->getLocation(), diag::warn_arc_strong_pointer_objc_pointer) <<
+ Param->getType();
+
if ((*PI)->getIdentifier())
PushOnScopeChains(*PI, FnBodyScope);
}
@@ -345,8 +370,10 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
// Warn on deprecated methods under -Wdeprecated-implementations,
// and prepare for warning on missing super calls.
if (ObjCInterfaceDecl *IC = MDecl->getClassInterface()) {
- if (ObjCMethodDecl *IMD =
- IC->lookupMethod(MDecl->getSelector(), MDecl->isInstanceMethod()))
+ ObjCMethodDecl *IMD =
+ IC->lookupMethod(MDecl->getSelector(), MDecl->isInstanceMethod());
+
+ if (IMD)
DiagnoseObjCImplementedDeprecations(*this,
dyn_cast<NamedDecl>(IMD),
MDecl->getLocation(), 0);
@@ -356,13 +383,23 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
// Finally, in ActOnFinishFunctionBody() (SemaDecl), warn if flag is set.
// Only do this if the current class actually has a superclass.
if (IC->getSuperClass()) {
- getCurFunction()->ObjCShouldCallSuperDealloc =
- !(Context.getLangOpts().ObjCAutoRefCount ||
- Context.getLangOpts().getGC() == LangOptions::GCOnly) &&
- MDecl->getMethodFamily() == OMF_dealloc;
- getCurFunction()->ObjCShouldCallSuperFinalize =
- Context.getLangOpts().getGC() != LangOptions::NonGC &&
- MDecl->getMethodFamily() == OMF_finalize;
+ ObjCMethodFamily Family = MDecl->getMethodFamily();
+ if (Family == OMF_dealloc) {
+ if (!(getLangOpts().ObjCAutoRefCount ||
+ getLangOpts().getGC() == LangOptions::GCOnly))
+ getCurFunction()->ObjCShouldCallSuper = true;
+
+ } else if (Family == OMF_finalize) {
+ if (Context.getLangOpts().getGC() != LangOptions::NonGC)
+ getCurFunction()->ObjCShouldCallSuper = true;
+
+ } else {
+ const ObjCMethodDecl *SuperMethod =
+ IC->getSuperClass()->lookupMethod(MDecl->getSelector(),
+ MDecl->isInstanceMethod());
+ getCurFunction()->ObjCShouldCallSuper =
+ (SuperMethod && SuperMethod->hasAttr<ObjCRequiresSuperAttr>());
+ }
}
}
}
@@ -510,7 +547,7 @@ ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
// Check then save referenced protocols.
if (NumProtoRefs) {
- IDecl->setProtocolList((ObjCProtocolDecl**)ProtoRefs, NumProtoRefs,
+ IDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs,
ProtoLocs, Context);
IDecl->setEndOfDefinitionLoc(EndProtoLoc);
}
@@ -652,7 +689,7 @@ Sema::ActOnStartProtocolInterface(SourceLocation AtProtoInterfaceLoc,
if (!err && NumProtoRefs ) {
/// Check then save referenced protocols.
- PDecl->setProtocolList((ObjCProtocolDecl**)ProtoRefs, NumProtoRefs,
+ PDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs,
ProtoLocs, Context);
}
@@ -819,11 +856,11 @@ ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
CurContext->addDecl(CDecl);
if (NumProtoRefs) {
- CDecl->setProtocolList((ObjCProtocolDecl**)ProtoRefs, NumProtoRefs,
+ CDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs,
ProtoLocs, Context);
// Protocols in the class extension belong to the class.
if (CDecl->IsClassExtension())
- IDecl->mergeClassExtensionProtocolList((ObjCProtocolDecl**)ProtoRefs,
+ IDecl->mergeClassExtensionProtocolList((ObjCProtocolDecl*const*)ProtoRefs,
NumProtoRefs, Context);
}
@@ -1545,9 +1582,9 @@ void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc,
E = PDecl->instmeth_end(); I != E; ++I) {
ObjCMethodDecl *method = *I;
if (method->getImplementationControl() != ObjCMethodDecl::Optional &&
- !method->isSynthesized() && !InsMap.count(method->getSelector()) &&
- (!Super ||
- !Super->lookupInstanceMethod(method->getSelector()))) {
+ !method->isPropertyAccessor() &&
+ !InsMap.count(method->getSelector()) &&
+ (!Super || !Super->lookupInstanceMethod(method->getSelector()))) {
// If a method is not implemented in the category implementation but
// has been declared in its primary class, superclass,
// or in one of their protocols, no need to issue the warning.
@@ -1560,7 +1597,7 @@ void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc,
if (ObjCMethodDecl *MethodInClass =
IDecl->lookupInstanceMethod(method->getSelector(),
true /*shallowCategoryLookup*/))
- if (C || MethodInClass->isSynthesized())
+ if (C || MethodInClass->isPropertyAccessor())
continue;
unsigned DIAG = diag::warn_unimplemented_protocol_method;
if (Diags.getDiagnosticLevel(DIAG, ImpLoc)
@@ -1621,7 +1658,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
if (InsMapSeen.count((*I)->getSelector()))
continue;
InsMapSeen.insert((*I)->getSelector());
- if (!(*I)->isSynthesized() &&
+ if (!(*I)->isPropertyAccessor() &&
!InsMap.count((*I)->getSelector())) {
if (ImmediateClass)
WarnUndefinedMethod(IMPDecl->getLocation(), *I, IncompleteImpl,
@@ -1638,7 +1675,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
if (!WarnCategoryMethodImpl)
WarnConflictingTypedMethods(ImpMethodDecl, MethodDecl,
isa<ObjCProtocolDecl>(CDecl));
- else if (!MethodDecl->isSynthesized())
+ else if (!MethodDecl->isPropertyAccessor())
WarnExactTypedMethods(ImpMethodDecl, MethodDecl,
isa<ObjCProtocolDecl>(CDecl));
}
@@ -1672,14 +1709,26 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
}
if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl> (CDecl)) {
- // Also methods in class extensions need be looked at next.
- for (const ObjCCategoryDecl *ClsExtDecl = I->getFirstClassExtension();
- ClsExtDecl; ClsExtDecl = ClsExtDecl->getNextClassExtension())
- MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
- IMPDecl,
- const_cast<ObjCCategoryDecl *>(ClsExtDecl),
- IncompleteImpl, false,
- WarnCategoryMethodImpl);
+ // when checking that methods in implementation match their declaration,
+ // i.e. when WarnCategoryMethodImpl is false, check declarations in class
+ // extension; as well as those in categories.
+ if (!WarnCategoryMethodImpl)
+ for (const ObjCCategoryDecl *CDeclChain = I->getCategoryList();
+ CDeclChain; CDeclChain = CDeclChain->getNextClassCategory())
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl,
+ const_cast<ObjCCategoryDecl *>(CDeclChain),
+ IncompleteImpl, false,
+ WarnCategoryMethodImpl);
+ else
+ // Also methods in class extensions need be looked at next.
+ for (const ObjCCategoryDecl *ClsExtDecl = I->getFirstClassExtension();
+ ClsExtDecl; ClsExtDecl = ClsExtDecl->getNextClassExtension())
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl,
+ const_cast<ObjCCategoryDecl *>(ClsExtDecl),
+ IncompleteImpl, false,
+ WarnCategoryMethodImpl);
// Check for any implementation of a methods declared in protocol.
for (ObjCInterfaceDecl::all_protocol_iterator
@@ -2339,11 +2388,11 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
CExtDecl; CExtDecl = CExtDecl->getNextClassExtension()) {
if (ObjCMethodDecl *GetterMethod =
CExtDecl->getInstanceMethod(Property->getGetterName()))
- GetterMethod->setSynthesized(true);
+ GetterMethod->setPropertyAccessor(true);
if (!Property->isReadOnly())
if (ObjCMethodDecl *SetterMethod =
CExtDecl->getInstanceMethod(Property->getSetterName()))
- SetterMethod->setSynthesized(true);
+ SetterMethod->setPropertyAccessor(true);
}
}
}
@@ -2435,26 +2484,49 @@ CvtQTToAstBitMask(ObjCDeclSpec::ObjCDeclQualifier PQTVal) {
}
static inline
+unsigned countAlignAttr(const AttrVec &A) {
+ unsigned count=0;
+ for (AttrVec::const_iterator i = A.begin(), e = A.end(); i != e; ++i)
+ if ((*i)->getKind() == attr::Aligned)
+ ++count;
+ return count;
+}
+
+static inline
bool containsInvalidMethodImplAttribute(ObjCMethodDecl *IMD,
const AttrVec &A) {
// If method is only declared in implementation (private method),
// No need to issue any diagnostics on method definition with attributes.
if (!IMD)
return false;
-
+
// method declared in interface has no attribute.
- // But implementation has attributes. This is invalid
+ // But implementation has attributes. This is invalid.
+ // Except when implementation has 'Align' attribute which is
+ // immaterial to method declared in interface.
if (!IMD->hasAttrs())
- return true;
+ return (A.size() > countAlignAttr(A));
const AttrVec &D = IMD->getAttrs();
- if (D.size() != A.size())
- return true;
+ unsigned countAlignOnImpl = countAlignAttr(A);
+ if (!countAlignOnImpl && (A.size() != D.size()))
+ return true;
+ else if (countAlignOnImpl) {
+ unsigned countAlignOnDecl = countAlignAttr(D);
+ if (countAlignOnDecl && (A.size() != D.size()))
+ return true;
+ else if (!countAlignOnDecl &&
+ ((A.size()-countAlignOnImpl) != D.size()))
+ return true;
+ }
+
// attributes on method declaration and definition must match exactly.
// Note that we have at most a couple of attributes on methods, so this
// n*n search is good enough.
for (AttrVec::const_iterator i = A.begin(), e = A.end(); i != e; ++i) {
+ if ((*i)->getKind() == attr::Aligned)
+ continue;
bool match = false;
for (AttrVec::const_iterator i1 = D.begin(), e1 = D.end(); i1 != e1; ++i1) {
if ((*i)->getKind() == (*i1)->getKind()) {
@@ -2465,6 +2537,7 @@ bool containsInvalidMethodImplAttribute(ObjCMethodDecl *IMD,
if (!match)
return true;
}
+
return false;
}
@@ -2525,7 +2598,7 @@ public:
// with this selector before.
Sema::GlobalMethodPool::iterator it = S.MethodPool.find(selector);
if (it == S.MethodPool.end()) {
- if (!S.ExternalSource) return;
+ if (!S.getExternalSource()) return;
S.ReadMethodPool(selector);
it = S.MethodPool.find(selector);
@@ -2767,7 +2840,7 @@ Decl *Sema::ActOnMethodDeclaration(
ResultTInfo,
CurContext,
MethodType == tok::minus, isVariadic,
- /*isSynthesized=*/false,
+ /*isPropertyAccessor=*/false,
/*isImplicitlyDeclared=*/false, /*isDefined=*/false,
MethodDeclKind == tok::objc_optional
? ObjCMethodDecl::Optional
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
index e6266fb..e1f4888 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -120,6 +120,24 @@ Sema::ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT) {
return SourceDecl->getType()->castAs<FunctionProtoType>();
}
+/// Determine whether a function has an implicitly-generated exception
+/// specification.
+static bool hasImplicitExceptionSpec(FunctionDecl *Decl) {
+ if (!isa<CXXDestructorDecl>(Decl) &&
+ Decl->getDeclName().getCXXOverloadedOperator() != OO_Delete &&
+ Decl->getDeclName().getCXXOverloadedOperator() != OO_Array_Delete)
+ return false;
+
+ // If the user didn't declare the function, its exception specification must
+ // be implicit.
+ if (!Decl->getTypeSourceInfo())
+ return true;
+
+ const FunctionProtoType *Ty =
+ Decl->getTypeSourceInfo()->getType()->getAs<FunctionProtoType>();
+ return !Ty->hasExceptionSpec();
+}
+
bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
OverloadedOperatorKind OO = New->getDeclName().getCXXOverloadedOperator();
bool IsOperatorNew = OO == OO_New || OO == OO_Array_New;
@@ -129,25 +147,35 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
if (getLangOpts().MicrosoftExt)
DiagID = diag::warn_mismatched_exception_spec;
- if (!CheckEquivalentExceptionSpec(PDiag(DiagID),
- PDiag(diag::note_previous_declaration),
- Old->getType()->getAs<FunctionProtoType>(),
- Old->getLocation(),
- New->getType()->getAs<FunctionProtoType>(),
- New->getLocation(),
- &MissingExceptionSpecification,
- &MissingEmptyExceptionSpecification,
- /*AllowNoexceptAllMatchWithNoSpec=*/true,
- IsOperatorNew))
+ // Check the types as written: they must match before any exception
+ // specification adjustment is applied.
+ if (!CheckEquivalentExceptionSpec(
+ PDiag(DiagID), PDiag(diag::note_previous_declaration),
+ Old->getType()->getAs<FunctionProtoType>(), Old->getLocation(),
+ New->getType()->getAs<FunctionProtoType>(), New->getLocation(),
+ &MissingExceptionSpecification, &MissingEmptyExceptionSpecification,
+ /*AllowNoexceptAllMatchWithNoSpec=*/true, IsOperatorNew)) {
+ // C++11 [except.spec]p4 [DR1492]:
+ // If a declaration of a function has an implicit
+ // exception-specification, other declarations of the function shall
+ // not specify an exception-specification.
+ if (getLangOpts().CPlusPlus0x &&
+ hasImplicitExceptionSpec(Old) != hasImplicitExceptionSpec(New)) {
+ Diag(New->getLocation(), diag::ext_implicit_exception_spec_mismatch)
+ << hasImplicitExceptionSpec(Old);
+ if (!Old->getLocation().isInvalid())
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ }
return false;
+ }
// The failure was something other than an empty exception
// specification; return an error.
if (!MissingExceptionSpecification && !MissingEmptyExceptionSpecification)
return true;
- const FunctionProtoType *NewProto
- = New->getType()->getAs<FunctionProtoType>();
+ const FunctionProtoType *NewProto =
+ New->getType()->getAs<FunctionProtoType>();
// The new function declaration is only missing an empty exception
// specification "throw()". If the throw() specification came from a
@@ -172,8 +200,8 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
}
if (MissingExceptionSpecification && NewProto) {
- const FunctionProtoType *OldProto
- = Old->getType()->getAs<FunctionProtoType>();
+ const FunctionProtoType *OldProto =
+ Old->getType()->getAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = NewProto->getExtProtoInfo();
EPI.ExceptionSpecType = OldProto->getExceptionSpecType();
@@ -290,14 +318,17 @@ bool Sema::CheckEquivalentExceptionSpec(
unsigned DiagID = diag::err_mismatched_exception_spec;
if (getLangOpts().MicrosoftExt)
DiagID = diag::warn_mismatched_exception_spec;
- return CheckEquivalentExceptionSpec(
- PDiag(DiagID),
+ return CheckEquivalentExceptionSpec(PDiag(DiagID),
PDiag(diag::note_previous_declaration),
Old, OldLoc, New, NewLoc);
}
/// CheckEquivalentExceptionSpec - Check if the two types have compatible
/// exception specifications. See C++ [except.spec]p3.
+///
+/// \return \c false if the exception specifications match, \c true if there is
+/// a problem. If \c true is returned, either a diagnostic has already been
+/// produced or \c *MissingExceptionSpecification is set to \c true.
bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
const PartialDiagnostic & NoteID,
const FunctionProtoType *Old,
@@ -1029,6 +1060,7 @@ CanThrowResult Sema::canThrow(const Expr *E) {
case Expr::PseudoObjectExprClass:
case Expr::SubstNonTypeTemplateParmExprClass:
case Expr::SubstNonTypeTemplateParmPackExprClass:
+ case Expr::FunctionParmPackExprClass:
case Expr::UnaryExprOrTypeTraitExprClass:
case Expr::UnresolvedLookupExprClass:
case Expr::UnresolvedMemberExprClass:
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
index 3875ba1..bf4abfc 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
@@ -66,6 +66,15 @@ bool Sema::CanUseDecl(NamedDecl *D) {
return true;
}
+static void DiagnoseUnusedOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc) {
+ // Warn if this is used but marked unused.
+ if (D->hasAttr<UnusedAttr>()) {
+ const Decl *DC = cast<Decl>(S.getCurObjCLexicalContext());
+ if (!DC->hasAttr<UnusedAttr>())
+ S.Diag(Loc, diag::warn_used_but_marked_unused) << D->getDeclName();
+ }
+}
+
static AvailabilityResult DiagnoseAvailabilityOfDecl(Sema &S,
NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass) {
@@ -78,6 +87,17 @@ static AvailabilityResult DiagnoseAvailabilityOfDecl(Sema &S,
if (const EnumDecl *TheEnumDecl = dyn_cast<EnumDecl>(DC))
Result = TheEnumDecl->getAvailability(&Message);
}
+
+ const ObjCPropertyDecl *ObjCPDecl = 0;
+ if (Result == AR_Deprecated || Result == AR_Unavailable) {
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (const ObjCPropertyDecl *PD = MD->findPropertyDecl()) {
+ AvailabilityResult PDeclResult = PD->getAvailability(0);
+ if (PDeclResult == Result)
+ ObjCPDecl = PD;
+ }
+ }
+ }
switch (Result) {
case AR_Available:
@@ -85,23 +105,30 @@ static AvailabilityResult DiagnoseAvailabilityOfDecl(Sema &S,
break;
case AR_Deprecated:
- S.EmitDeprecationWarning(D, Message, Loc, UnknownObjCClass);
+ S.EmitDeprecationWarning(D, Message, Loc, UnknownObjCClass, ObjCPDecl);
break;
case AR_Unavailable:
if (S.getCurContextAvailability() != AR_Unavailable) {
if (Message.empty()) {
- if (!UnknownObjCClass)
+ if (!UnknownObjCClass) {
S.Diag(Loc, diag::err_unavailable) << D->getDeclName();
+ if (ObjCPDecl)
+ S.Diag(ObjCPDecl->getLocation(), diag::note_property_attribute)
+ << ObjCPDecl->getDeclName() << 1;
+ }
else
S.Diag(Loc, diag::warn_unavailable_fwdclass_message)
<< D->getDeclName();
}
- else
+ else
S.Diag(Loc, diag::err_unavailable_message)
<< D->getDeclName() << Message;
- S.Diag(D->getLocation(), diag::note_unavailable_here)
- << isa<FunctionDecl>(D) << false;
+ S.Diag(D->getLocation(), diag::note_unavailable_here)
+ << isa<FunctionDecl>(D) << false;
+ if (ObjCPDecl)
+ S.Diag(ObjCPDecl->getLocation(), diag::note_property_attribute)
+ << ObjCPDecl->getDeclName() << 1;
}
break;
}
@@ -250,9 +277,7 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
}
DiagnoseAvailabilityOfDecl(*this, D, Loc, UnknownObjCClass);
- // Warn if this is used but marked unused.
- if (D->hasAttr<UnusedAttr>())
- Diag(Loc, diag::warn_used_but_marked_unused) << D->getDeclName();
+ DiagnoseUnusedOfDecl(*this, D, Loc);
diagnoseUseOfInternalDeclInInlineFunction(*this, D, Loc);
@@ -502,7 +527,7 @@ ExprResult Sema::DefaultFunctionArrayLvalueConversion(Expr *E) {
Res = DefaultLvalueConversion(Res.take());
if (Res.isInvalid())
return ExprError();
- return move(Res);
+ return Res;
}
@@ -1098,8 +1123,8 @@ Sema::ActOnGenericSelectionExpr(SourceLocation KeyLoc,
unsigned NumAssocs = ArgTypes.size();
assert(NumAssocs == ArgExprs.size());
- ParsedType *ParsedTypes = ArgTypes.release();
- Expr **Exprs = ArgExprs.release();
+ ParsedType *ParsedTypes = ArgTypes.data();
+ Expr **Exprs = ArgExprs.data();
TypeSourceInfo **Types = new TypeSourceInfo*[NumAssocs];
for (unsigned i = 0; i < NumAssocs; ++i) {
@@ -1185,8 +1210,9 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
if (IsResultDependent)
return Owned(new (Context) GenericSelectionExpr(
Context, KeyLoc, ControllingExpr,
- Types, Exprs, NumAssocs, DefaultLoc,
- RParenLoc, ContainsUnexpandedParameterPack));
+ llvm::makeArrayRef(Types, NumAssocs),
+ llvm::makeArrayRef(Exprs, NumAssocs),
+ DefaultLoc, RParenLoc, ContainsUnexpandedParameterPack));
SmallVector<unsigned, 1> CompatIndices;
unsigned DefaultIndex = -1U;
@@ -1240,8 +1266,9 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
return Owned(new (Context) GenericSelectionExpr(
Context, KeyLoc, ControllingExpr,
- Types, Exprs, NumAssocs, DefaultLoc,
- RParenLoc, ContainsUnexpandedParameterPack,
+ llvm::makeArrayRef(Types, NumAssocs),
+ llvm::makeArrayRef(Exprs, NumAssocs),
+ DefaultLoc, RParenLoc, ContainsUnexpandedParameterPack,
ResultIndex));
}
@@ -1402,6 +1429,15 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
MarkDeclRefReferenced(E);
+ if (getLangOpts().ObjCARCWeak && isa<VarDecl>(D) &&
+ Ty.getObjCLifetime() == Qualifiers::OCL_Weak) {
+ DiagnosticsEngine::Level Level =
+ Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak,
+ E->getLocStart());
+ if (Level != DiagnosticsEngine::Ignored)
+ getCurFunction()->recordUseOfWeak(E);
+ }
+
// Just in case we're building an illegal pointer-to-member.
FieldDecl *FD = dyn_cast<FieldDecl>(D);
if (FD && FD->isBitField())
@@ -1428,11 +1464,9 @@ Sema::DecomposeUnqualifiedId(const UnqualifiedId &Id,
Buffer.setLAngleLoc(Id.TemplateId->LAngleLoc);
Buffer.setRAngleLoc(Id.TemplateId->RAngleLoc);
- ASTTemplateArgsPtr TemplateArgsPtr(*this,
- Id.TemplateId->getTemplateArgs(),
+ ASTTemplateArgsPtr TemplateArgsPtr(Id.TemplateId->getTemplateArgs(),
Id.TemplateId->NumArgs);
translateTemplateArguments(TemplateArgsPtr, Buffer);
- TemplateArgsPtr.release();
TemplateName TName = Id.TemplateId->Template.get();
SourceLocation TNameLoc = Id.TemplateId->TemplateNameLoc;
@@ -1606,7 +1640,8 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
Diag(R.getNameLoc(), diag::err_no_member_suggest)
<< Name << computeDeclContext(SS, false) << CorrectedQuotedStr
<< SS.getRange()
- << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr);
+ << FixItHint::CreateReplacement(Corrected.getCorrectionRange(),
+ CorrectedStr);
if (ND)
Diag(ND->getLocation(), diag::note_previous_decl)
<< CorrectedQuotedStr;
@@ -1797,7 +1832,7 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
// lookup fails and no expression will be built to reference it.
if (!E.isInvalid() && !E.get())
return ExprError();
- return move(E);
+ return E;
}
}
}
@@ -1860,9 +1895,10 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
/// this path.
ExprResult
Sema::BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
- const DeclarationNameInfo &NameInfo) {
- DeclContext *DC;
- if (!(DC = computeDeclContext(SS, false)) || DC->isDependentContext())
+ const DeclarationNameInfo &NameInfo,
+ bool IsAddressOfOperand) {
+ DeclContext *DC = computeDeclContext(SS, false);
+ if (!DC)
return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
NameInfo, /*TemplateArgs=*/0);
@@ -1875,13 +1911,26 @@ Sema::BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
if (R.isAmbiguous())
return ExprError();
+ if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation)
+ return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
+ NameInfo, /*TemplateArgs=*/0);
+
if (R.empty()) {
Diag(NameInfo.getLoc(), diag::err_no_member)
<< NameInfo.getName() << DC << SS.getRange();
return ExprError();
}
- return BuildDeclarationNameExpr(SS, R, /*ADL*/ false);
+ // Defend against this resolving to an implicit member access. We usually
+ // won't get here if this might be a legitimate a class member (we end up in
+ // BuildMemberReferenceExpr instead), but this can be valid if we're forming
+ // a pointer-to-member or in an unevaluated context in C++11.
+ if (!R.empty() && (*R.begin())->isCXXClassMember() && !IsAddressOfOperand)
+ return BuildPossibleImplicitMemberExpr(SS,
+ /*TemplateKWLoc=*/SourceLocation(),
+ R, /*TemplateArgs=*/0);
+
+ return BuildDeclarationNameExpr(SS, R, /* ADL */ false);
}
/// LookupInObjCMethod - The parser has read a name in, and Sema has
@@ -1965,9 +2014,25 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
ObjCMethodFamily MF = CurMethod->getMethodFamily();
if (MF != OMF_init && MF != OMF_dealloc && MF != OMF_finalize)
Diag(Loc, diag::warn_direct_ivar_access) << IV->getDeclName();
- return Owned(new (Context)
- ObjCIvarRefExpr(IV, IV->getType(), Loc,
- SelfExpr.take(), true, true));
+
+ ObjCIvarRefExpr *Result = new (Context) ObjCIvarRefExpr(IV, IV->getType(),
+ Loc,
+ SelfExpr.take(),
+ true, true);
+
+ if (getLangOpts().ObjCAutoRefCount) {
+ if (IV->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ DiagnosticsEngine::Level Level =
+ Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak, Loc);
+ if (Level != DiagnosticsEngine::Ignored)
+ getCurFunction()->recordUseOfWeak(Result);
+ }
+ if (CurContext->isClosure())
+ Diag(Loc, diag::warn_implicitly_retains_self)
+ << FixItHint::CreateInsertion(Loc, "self->");
+ }
+
+ return Owned(Result);
}
} else if (CurMethod->isInstanceMethod()) {
// We should warn if a local variable hides an ivar.
@@ -2416,6 +2481,14 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
}
case Decl::Function: {
+ if (unsigned BID = cast<FunctionDecl>(VD)->getBuiltinID()) {
+ if (!Context.BuiltinInfo.isPredefinedLibFunction(BID)) {
+ type = Context.BuiltinFnTy;
+ valueKind = VK_RValue;
+ break;
+ }
+ }
+
const FunctionType *fty = type->castAs<FunctionType>();
// If we're referring to a function with an __unknown_anytype
@@ -2615,19 +2688,20 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
return ActOnIntegerConstant(Tok.getLocation(), Val-'0');
}
- SmallString<512> IntegerBuffer;
- // Add padding so that NumericLiteralParser can overread by one character.
- IntegerBuffer.resize(Tok.getLength()+1);
- const char *ThisTokBegin = &IntegerBuffer[0];
+ SmallString<128> SpellingBuffer;
+ // NumericLiteralParser wants to overread by one character. Add padding to
+ // the buffer in case the token is copied to the buffer. If getSpelling()
+ // returns a StringRef to the memory buffer, it should have a null char at
+ // the EOF, so it is also safe.
+ SpellingBuffer.resize(Tok.getLength() + 1);
// Get the spelling of the token, which eliminates trigraphs, etc.
bool Invalid = false;
- unsigned ActualLength = PP.getSpelling(Tok, ThisTokBegin, &Invalid);
+ StringRef TokSpelling = PP.getSpelling(Tok, SpellingBuffer, &Invalid);
if (Invalid)
return ExprError();
- NumericLiteralParser Literal(ThisTokBegin, ThisTokBegin+ActualLength,
- Tok.getLocation(), PP);
+ NumericLiteralParser Literal(TokSpelling, Tok.getLocation(), PP);
if (Literal.hadError)
return ExprError();
@@ -2693,7 +2767,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
Context.CharTy, llvm::APInt(32, Length + 1),
ArrayType::Normal, 0);
Expr *Lit = StringLiteral::Create(
- Context, StringRef(ThisTokBegin, Length), StringLiteral::Ascii,
+ Context, StringRef(TokSpelling.data(), Length), StringLiteral::Ascii,
/*Pascal*/false, StrTy, &TokLoc, 1);
return BuildLiteralOperatorCall(R, OpNameInfo,
llvm::makeArrayRef(&Lit, 1), TokLoc);
@@ -2709,7 +2783,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
bool CharIsUnsigned = Context.CharTy->isUnsignedIntegerType();
llvm::APSInt Value(CharBits, CharIsUnsigned);
for (unsigned I = 0, N = Literal.getUDSuffixOffset(); I != N; ++I) {
- Value = ThisTokBegin[I];
+ Value = TokSpelling[I];
TemplateArgument Arg(Context, Value, Context.CharTy);
TemplateArgumentLocInfo ArgInfo;
ExplicitArgs.addArgument(TemplateArgumentLoc(Arg, ArgInfo));
@@ -2747,11 +2821,15 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
} else {
QualType Ty;
- // long long is a C99 feature.
- if (!getLangOpts().C99 && Literal.isLongLong)
- Diag(Tok.getLocation(),
- getLangOpts().CPlusPlus0x ?
- diag::warn_cxx98_compat_longlong : diag::ext_longlong);
+ // 'long long' is a C99 or C++11 feature.
+ if (!getLangOpts().C99 && Literal.isLongLong) {
+ if (getLangOpts().CPlusPlus)
+ Diag(Tok.getLocation(),
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong);
+ else
+ Diag(Tok.getLocation(), diag::ext_c99_longlong);
+ }
// Get the value in the widest-possible width.
unsigned MaxWidth = Context.getTargetInfo().getIntMaxTWidth();
@@ -3140,7 +3218,7 @@ Sema::ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
Expr *ArgEx = (Expr *)TyOrEx;
ExprResult Result = CreateUnaryExprOrTypeTraitExpr(ArgEx, OpLoc, ExprKind);
- return move(Result);
+ return Result;
}
static QualType CheckRealImagOperand(Sema &S, ExprResult &V, SourceLocation Loc,
@@ -3167,7 +3245,7 @@ static QualType CheckRealImagOperand(Sema &S, ExprResult &V, SourceLocation Loc,
ExprResult PR = S.CheckPlaceholderExpr(V.get());
if (PR.isInvalid()) return QualType();
if (PR.get() != V.get()) {
- V = move(PR);
+ V = PR;
return CheckRealImagOperand(S, V, Loc, IsReal);
}
@@ -3442,8 +3520,7 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
Expr *ResultE = Result.takeAs<Expr>();
InitializationSequence InitSeq(*this, Entity, Kind, &ResultE, 1);
- Result = InitSeq.Perform(*this, Entity, Kind,
- MultiExprArg(*this, &ResultE, 1));
+ Result = InitSeq.Perform(*this, Entity, Kind, ResultE);
if (Result.isInvalid())
return ExprError();
@@ -3776,28 +3853,25 @@ ExprResult
Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig, bool IsExecConfig) {
- unsigned NumArgs = ArgExprs.size();
-
// Since this might be a postfix expression, get rid of ParenListExprs.
ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Fn);
if (Result.isInvalid()) return ExprError();
Fn = Result.take();
- Expr **Args = ArgExprs.release();
-
if (getLangOpts().CPlusPlus) {
// If this is a pseudo-destructor expression, build the call immediately.
if (isa<CXXPseudoDestructorExpr>(Fn)) {
- if (NumArgs > 0) {
+ if (!ArgExprs.empty()) {
// Pseudo-destructor calls should not have any arguments.
Diag(Fn->getLocStart(), diag::err_pseudo_dtor_call_with_args)
<< FixItHint::CreateRemoval(
- SourceRange(Args[0]->getLocStart(),
- Args[NumArgs-1]->getLocEnd()));
+ SourceRange(ArgExprs[0]->getLocStart(),
+ ArgExprs.back()->getLocEnd()));
}
- return Owned(new (Context) CallExpr(Context, Fn, 0, 0, Context.VoidTy,
- VK_RValue, RParenLoc));
+ return Owned(new (Context) CallExpr(Context, Fn, MultiExprArg(),
+ Context.VoidTy, VK_RValue,
+ RParenLoc));
}
// Determine whether this is a dependent call inside a C++ template,
@@ -3807,17 +3881,16 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
bool Dependent = false;
if (Fn->isTypeDependent())
Dependent = true;
- else if (Expr::hasAnyTypeDependentArguments(
- llvm::makeArrayRef(Args, NumArgs)))
+ else if (Expr::hasAnyTypeDependentArguments(ArgExprs))
Dependent = true;
if (Dependent) {
if (ExecConfig) {
return Owned(new (Context) CUDAKernelCallExpr(
- Context, Fn, cast<CallExpr>(ExecConfig), Args, NumArgs,
+ Context, Fn, cast<CallExpr>(ExecConfig), ArgExprs,
Context.DependentTy, VK_RValue, RParenLoc));
} else {
- return Owned(new (Context) CallExpr(Context, Fn, Args, NumArgs,
+ return Owned(new (Context) CallExpr(Context, Fn, ArgExprs,
Context.DependentTy, VK_RValue,
RParenLoc));
}
@@ -3825,8 +3898,9 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
// Determine whether this is a call to an object (C++ [over.call.object]).
if (Fn->getType()->isRecordType())
- return Owned(BuildCallToObjectOfClassType(S, Fn, LParenLoc, Args, NumArgs,
- RParenLoc));
+ return Owned(BuildCallToObjectOfClassType(S, Fn, LParenLoc,
+ ArgExprs.data(),
+ ArgExprs.size(), RParenLoc));
if (Fn->getType() == Context.UnknownAnyTy) {
ExprResult result = rebuildUnknownAnyFunction(*this, Fn);
@@ -3835,8 +3909,8 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
}
if (Fn->getType() == Context.BoundMemberTy) {
- return BuildCallToMemberFunction(S, Fn, LParenLoc, Args, NumArgs,
- RParenLoc);
+ return BuildCallToMemberFunction(S, Fn, LParenLoc, ArgExprs.data(),
+ ArgExprs.size(), RParenLoc);
}
}
@@ -3849,11 +3923,11 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
OverloadExpr *ovl = find.Expression;
if (isa<UnresolvedLookupExpr>(ovl)) {
UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(ovl);
- return BuildOverloadedCallExpr(S, Fn, ULE, LParenLoc, Args, NumArgs,
- RParenLoc, ExecConfig);
+ return BuildOverloadedCallExpr(S, Fn, ULE, LParenLoc, ArgExprs.data(),
+ ArgExprs.size(), RParenLoc, ExecConfig);
} else {
- return BuildCallToMemberFunction(S, Fn, LParenLoc, Args, NumArgs,
- RParenLoc);
+ return BuildCallToMemberFunction(S, Fn, LParenLoc, ArgExprs.data(),
+ ArgExprs.size(), RParenLoc);
}
}
}
@@ -3877,8 +3951,9 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
else if (isa<MemberExpr>(NakedFn))
NDecl = cast<MemberExpr>(NakedFn)->getMemberDecl();
- return BuildResolvedCallExpr(Fn, NDecl, LParenLoc, Args, NumArgs, RParenLoc,
- ExecConfig, IsExecConfig);
+ return BuildResolvedCallExpr(Fn, NDecl, LParenLoc, ArgExprs.data(),
+ ArgExprs.size(), RParenLoc, ExecConfig,
+ IsExecConfig);
}
ExprResult
@@ -3932,9 +4007,19 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation RParenLoc,
Expr *Config, bool IsExecConfig) {
FunctionDecl *FDecl = dyn_cast_or_null<FunctionDecl>(NDecl);
+ unsigned BuiltinID = (FDecl ? FDecl->getBuiltinID() : 0);
// Promote the function operand.
- ExprResult Result = UsualUnaryConversions(Fn);
+ // We special-case function promotion here because we only allow promoting
+ // builtin functions to function pointers in the callee of a call.
+ ExprResult Result;
+ if (BuiltinID &&
+ Fn->getType()->isSpecificBuiltinType(BuiltinType::BuiltinFn)) {
+ Result = ImpCastExprToType(Fn, Context.getPointerType(FDecl->getType()),
+ CK_BuiltinFnToFnPtr).take();
+ } else {
+ Result = UsualUnaryConversions(Fn);
+ }
if (Result.isInvalid())
return ExprError();
Fn = Result.take();
@@ -3945,19 +4030,17 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
if (Config)
TheCall = new (Context) CUDAKernelCallExpr(Context, Fn,
cast<CallExpr>(Config),
- Args, NumArgs,
+ llvm::makeArrayRef(Args,NumArgs),
Context.BoolTy,
VK_RValue,
RParenLoc);
else
TheCall = new (Context) CallExpr(Context, Fn,
- Args, NumArgs,
+ llvm::makeArrayRef(Args, NumArgs),
Context.BoolTy,
VK_RValue,
RParenLoc);
- unsigned BuiltinID = (FDecl ? FDecl->getBuiltinID() : 0);
-
// Bail out early if calling a builtin with custom typechecking.
if (BuiltinID && Context.BuiltinInfo.hasCustomTypechecking(BuiltinID))
return CheckBuiltinFunctionCall(BuiltinID, TheCall);
@@ -4143,9 +4226,8 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
SourceRange(LParenLoc, RParenLoc),
/*InitList=*/true);
InitializationSequence InitSeq(*this, Entity, Kind, &LiteralExpr, 1);
- ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
- MultiExprArg(*this, &LiteralExpr, 1),
- &literalType);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, LiteralExpr,
+ &literalType);
if (Result.isInvalid())
return ExprError();
LiteralExpr = Result.get();
@@ -4167,28 +4249,25 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
ExprResult
Sema::ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList,
SourceLocation RBraceLoc) {
- unsigned NumInit = InitArgList.size();
- Expr **InitList = InitArgList.release();
-
// Immediately handle non-overload placeholders. Overloads can be
// resolved contextually, but everything else here can't.
- for (unsigned I = 0; I != NumInit; ++I) {
- if (InitList[I]->getType()->isNonOverloadPlaceholderType()) {
- ExprResult result = CheckPlaceholderExpr(InitList[I]);
+ for (unsigned I = 0, E = InitArgList.size(); I != E; ++I) {
+ if (InitArgList[I]->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(InitArgList[I]);
// Ignore failures; dropping the entire initializer list because
// of one failure would be terrible for indexing/etc.
if (result.isInvalid()) continue;
- InitList[I] = result.take();
+ InitArgList[I] = result.take();
}
}
// Semantic analysis for initializers is done by ActOnDeclarator() and
// CheckInitializer() - it requires knowledge of the object being intialized.
- InitListExpr *E = new (Context) InitListExpr(Context, LBraceLoc, InitList,
- NumInit, RBraceLoc);
+ InitListExpr *E = new (Context) InitListExpr(Context, LBraceLoc, InitArgList,
+ RBraceLoc);
E->setType(Context.VoidTy); // FIXME: just a place holder for now.
return Owned(E);
}
@@ -4575,8 +4654,7 @@ ExprResult Sema::BuildVectorLiteral(SourceLocation LParenLoc,
// FIXME: This means that pretty-printing the final AST will produce curly
// braces instead of the original commas.
InitListExpr *initE = new (Context) InitListExpr(Context, LParenLoc,
- &initExprs[0],
- initExprs.size(), RParenLoc);
+ initExprs, RParenLoc);
initE->setType(Ty);
return BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc, initE);
}
@@ -4603,10 +4681,8 @@ Sema::MaybeConvertParenListExprToParenExpr(Scope *S, Expr *OrigExpr) {
ExprResult Sema::ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val) {
- unsigned nexprs = Val.size();
- Expr **exprs = reinterpret_cast<Expr**>(Val.release());
- assert((exprs != 0) && "ActOnParenOrParenListExpr() missing expr list");
- Expr *expr = new (Context) ParenListExpr(Context, L, exprs, nexprs, R);
+ assert(Val.data() != 0 && "ActOnParenOrParenListExpr() missing expr list");
+ Expr *expr = new (Context) ParenListExpr(Context, L, Val, R);
return Owned(expr);
}
@@ -4884,11 +4960,11 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
ExprResult LHSResult = CheckPlaceholderExpr(LHS.get());
if (!LHSResult.isUsable()) return QualType();
- LHS = move(LHSResult);
+ LHS = LHSResult;
ExprResult RHSResult = CheckPlaceholderExpr(RHS.get());
if (!RHSResult.isUsable()) return QualType();
- RHS = move(RHSResult);
+ RHS = RHSResult;
// C++ is sufficiently different to merit its own checker.
if (getLangOpts().CPlusPlus)
@@ -5247,7 +5323,7 @@ static void DiagnoseConditionalPrecedence(Sema &Self,
<< BinaryOperator::getOpcodeStr(CondOpcode);
SuggestParentheses(Self, OpLoc,
- Self.PDiag(diag::note_precedence_conditional_silence)
+ Self.PDiag(diag::note_precedence_silence)
<< BinaryOperator::getOpcodeStr(CondOpcode),
SourceRange(Condition->getLocStart(), Condition->getLocEnd()));
@@ -5811,8 +5887,7 @@ static void ConstructTransparentUnion(Sema &S, ASTContext &C,
// of the transparent union.
Expr *E = EResult.take();
InitListExpr *Initializer = new (C) InitListExpr(C, SourceLocation(),
- &E, 1,
- SourceLocation());
+ E, SourceLocation());
Initializer->setType(UnionType);
Initializer->setInitializedFieldInUnion(Field);
@@ -5910,7 +5985,7 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS,
!CheckObjCARCUnavailableWeakConversion(LHSType,
RHS.get()->getType()))
result = IncompatibleObjCWeakRef;
- RHS = move(Res);
+ RHS = Res;
return result;
}
@@ -6708,7 +6783,7 @@ static void diagnoseFunctionPointerToVoidComparison(Sema &S, SourceLocation Loc,
}
static bool isObjCObjectLiteral(ExprResult &E) {
- switch (E.get()->getStmtClass()) {
+ switch (E.get()->IgnoreParenImpCasts()->getStmtClass()) {
case Stmt::ObjCArrayLiteralClass:
case Stmt::ObjCDictionaryLiteralClass:
case Stmt::ObjCStringLiteralClass:
@@ -6800,6 +6875,7 @@ static void diagnoseObjCLiteralComparison(Sema &S, SourceLocation Loc,
LK_String
} LiteralKind;
+ Literal = Literal->IgnoreParenImpCasts();
switch (Literal->getStmtClass()) {
case Stmt::ObjCStringLiteralClass:
// "string literal"
@@ -7202,7 +7278,10 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
(LHSType->isIntegerType() && RHSType->isAnyPointerType())) {
unsigned DiagID = 0;
bool isError = false;
- if ((LHSIsNull && LHSType->isIntegerType()) ||
+ if (LangOpts.DebuggerSupport) {
+ // Under a debugger, allow the comparison of pointers to integers,
+ // since users tend to want to compare addresses.
+ } else if ((LHSIsNull && LHSType->isIntegerType()) ||
(RHSIsNull && RHSType->isIntegerType())) {
if (IsRelational && !getLangOpts().CPlusPlus)
DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_and_zero;
@@ -7419,12 +7498,12 @@ inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult LHSRes = PerformContextuallyConvertToBool(LHS.get());
if (LHSRes.isInvalid())
return InvalidOperands(Loc, LHS, RHS);
- LHS = move(LHSRes);
+ LHS = LHSRes;
ExprResult RHSRes = PerformContextuallyConvertToBool(RHS.get());
if (RHSRes.isInvalid())
return InvalidOperands(Loc, LHS, RHS);
- RHS = move(RHSRes);
+ RHS = RHSRes;
// C++ [expr.log.and]p2
// C++ [expr.log.or]p2
@@ -7683,10 +7762,31 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
}
if (ConvTy == Compatible) {
- if (LHSType.getObjCLifetime() == Qualifiers::OCL_Strong)
- checkRetainCycles(LHSExpr, RHS.get());
- else if (getLangOpts().ObjCAutoRefCount)
+ if (LHSType.getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // Warn about retain cycles where a block captures the LHS, but
+ // not if the LHS is a simple variable into which the block is
+ // being stored...unless that variable can be captured by reference!
+ const Expr *InnerLHS = LHSExpr->IgnoreParenCasts();
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(InnerLHS);
+ if (!DRE || DRE->getDecl()->hasAttr<BlocksAttr>())
+ checkRetainCycles(LHSExpr, RHS.get());
+
+ // It is safe to assign a weak reference into a strong variable.
+ // Although this code can still have problems:
+ // id x = self.weakProp;
+ // id y = self.weakProp;
+ // we do not warn to warn spuriously when 'x' and 'y' are on separate
+ // paths through the function. This should be revisited if
+ // -Wrepeated-use-of-weak is made flow-sensitive.
+ DiagnosticsEngine::Level Level =
+ Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak,
+ RHS.get()->getLocStart());
+ if (Level != DiagnosticsEngine::Ignored)
+ getCurFunction()->markSafeWeakUse(RHS.get());
+
+ } else if (getLangOpts().ObjCAutoRefCount) {
checkUnsafeExprAssigns(Loc, LHSExpr, RHS.get());
+ }
}
} else {
// Compound assignment "x += y"
@@ -7972,8 +8072,16 @@ static QualType CheckAddressOfOperand(Sema &S, ExprResult &OrigOp,
// The method was named without a qualifier.
} else if (!DRE->getQualifier()) {
- S.Diag(OpLoc, diag::err_unqualified_pointer_member_function)
- << op->getSourceRange();
+ if (MD->getParent()->getName().empty())
+ S.Diag(OpLoc, diag::err_unqualified_pointer_member_function)
+ << op->getSourceRange();
+ else {
+ SmallString<32> Str;
+ StringRef Qual = (MD->getParent()->getName() + "::").toStringRef(Str);
+ S.Diag(OpLoc, diag::err_unqualified_pointer_member_function)
+ << op->getSourceRange()
+ << FixItHint::CreateInsertion(op->getSourceRange().getBegin(), Qual);
+ }
}
return S.Context.getMemberPointerType(op->getType(),
@@ -8216,8 +8324,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
InitializedEntity Entity =
InitializedEntity::InitializeTemporary(LHSExpr->getType());
InitializationSequence InitSeq(*this, Entity, Kind, &RHSExpr, 1);
- ExprResult Init = InitSeq.Perform(*this, Entity, Kind,
- MultiExprArg(&RHSExpr, 1));
+ ExprResult Init = InitSeq.Perform(*this, Entity, Kind, RHSExpr);
if (Init.isInvalid())
return Init;
RHSExpr = Init.take();
@@ -8340,7 +8447,8 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
if (CompResultTy.isNull())
return Owned(new (Context) BinaryOperator(LHS.take(), RHS.take(), Opc,
- ResultTy, VK, OK, OpLoc));
+ ResultTy, VK, OK, OpLoc,
+ FPFeatures.fp_contract));
if (getLangOpts().CPlusPlus && LHS.get()->getObjectKind() !=
OK_ObjCProperty) {
VK = VK_LValue;
@@ -8348,7 +8456,8 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
}
return Owned(new (Context) CompoundAssignOperator(LHS.take(), RHS.take(), Opc,
ResultTy, VK, OK, CompLHSTy,
- CompResultTy, OpLoc));
+ CompResultTy, OpLoc,
+ FPFeatures.fp_contract));
}
/// DiagnoseBitwisePrecedence - Emit a warning when bitwise and comparison
@@ -8383,8 +8492,8 @@ static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc,
SourceRange DiagRange = isLeftComp ? SourceRange(LHSExpr->getLocStart(),
OpLoc)
: SourceRange(OpLoc, RHSExpr->getLocEnd());
- std::string OpStr = isLeftComp ? BinOp::getOpcodeStr(LHSopc)
- : BinOp::getOpcodeStr(RHSopc);
+ StringRef OpStr = isLeftComp ? BinOp::getOpcodeStr(LHSopc)
+ : BinOp::getOpcodeStr(RHSopc);
SourceRange ParensRange = isLeftComp ?
SourceRange(cast<BinOp>(LHSExpr)->getRHS()->getLocStart(),
RHSExpr->getLocEnd())
@@ -8394,7 +8503,7 @@ static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc,
Self.Diag(OpLoc, diag::warn_precedence_bitwise_rel)
<< DiagRange << BinOp::getOpcodeStr(Opc) << OpStr;
SuggestParentheses(Self, OpLoc,
- Self.PDiag(diag::note_precedence_bitwise_silence) << OpStr,
+ Self.PDiag(diag::note_precedence_silence) << OpStr,
(isLeftComp ? LHSExpr : RHSExpr)->getSourceRange());
SuggestParentheses(Self, OpLoc,
Self.PDiag(diag::note_precedence_bitwise_first) << BinOp::getOpcodeStr(Opc),
@@ -8411,7 +8520,8 @@ EmitDiagnosticForBitwiseAndInBitwiseOr(Sema &Self, SourceLocation OpLoc,
Self.Diag(Bop->getOperatorLoc(), diag::warn_bitwise_and_in_bitwise_or)
<< Bop->getSourceRange() << OpLoc;
SuggestParentheses(Self, Bop->getOperatorLoc(),
- Self.PDiag(diag::note_bitwise_and_in_bitwise_or_silence),
+ Self.PDiag(diag::note_precedence_silence)
+ << Bop->getOpcodeStr(),
Bop->getSourceRange());
}
@@ -8425,7 +8535,8 @@ EmitDiagnosticForLogicalAndInLogicalOr(Sema &Self, SourceLocation OpLoc,
Self.Diag(Bop->getOperatorLoc(), diag::warn_logical_and_in_logical_or)
<< Bop->getSourceRange() << OpLoc;
SuggestParentheses(Self, Bop->getOperatorLoc(),
- Self.PDiag(diag::note_logical_and_in_logical_or_silence),
+ Self.PDiag(diag::note_precedence_silence)
+ << Bop->getOpcodeStr(),
Bop->getSourceRange());
}
@@ -8489,6 +8600,20 @@ static void DiagnoseBitwiseAndInBitwiseOr(Sema &S, SourceLocation OpLoc,
}
}
+static void DiagnoseAdditionInShift(Sema &S, SourceLocation OpLoc,
+ Expr *SubExpr, StringRef Shift) {
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(SubExpr)) {
+ if (Bop->getOpcode() == BO_Add || Bop->getOpcode() == BO_Sub) {
+ StringRef Op = Bop->getOpcodeStr();
+ S.Diag(Bop->getOperatorLoc(), diag::warn_addition_in_bitshift)
+ << Bop->getSourceRange() << OpLoc << Shift << Op;
+ SuggestParentheses(S, Bop->getOperatorLoc(),
+ S.PDiag(diag::note_precedence_silence) << Op,
+ Bop->getSourceRange());
+ }
+ }
+}
+
/// DiagnoseBinOpPrecedence - Emit warnings for expressions with tricky
/// precedence.
static void DiagnoseBinOpPrecedence(Sema &Self, BinaryOperatorKind Opc,
@@ -8510,6 +8635,13 @@ static void DiagnoseBinOpPrecedence(Sema &Self, BinaryOperatorKind Opc,
DiagnoseLogicalAndInLogicalOrLHS(Self, OpLoc, LHSExpr, RHSExpr);
DiagnoseLogicalAndInLogicalOrRHS(Self, OpLoc, LHSExpr, RHSExpr);
}
+
+ if ((Opc == BO_Shl && LHSExpr->getType()->isIntegralType(Self.getASTContext()))
+ || Opc == BO_Shr) {
+ StringRef Shift = BinaryOperator::getOpcodeStr(Opc);
+ DiagnoseAdditionInShift(Self, OpLoc, LHSExpr, Shift);
+ DiagnoseAdditionInShift(Self, OpLoc, RHSExpr, Shift);
+ }
}
// Binary Operators. 'Tok' is the token for the operator.
@@ -8647,6 +8779,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
break;
case UO_Deref: {
Input = DefaultFunctionArrayLvalueConversion(Input.take());
+ if (Input.isInvalid()) return ExprError();
resultType = CheckIndirectionOperand(*this, Input.get(), VK, OpLoc);
break;
}
@@ -9147,8 +9280,7 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
}
return Owned(OffsetOfExpr::Create(Context, Context.getSizeType(), BuiltinLoc,
- TInfo, Comps.data(), Comps.size(),
- Exprs.data(), Exprs.size(), RParenLoc));
+ TInfo, Comps, Exprs, RParenLoc));
}
ExprResult Sema::ActOnBuiltinOffsetOf(Scope *S,
@@ -9526,6 +9658,16 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
if (Result.isInvalid())
return ExprError();
E = Result.take();
+ } else if (VaListType->isRecordType() && getLangOpts().CPlusPlus) {
+ // If va_list is a record type and we are compiling in C++ mode,
+ // check the argument using reference binding.
+ InitializedEntity Entity
+ = InitializedEntity::InitializeParameter(Context,
+ Context.getLValueReferenceType(VaListType), false);
+ ExprResult Init = PerformCopyInitialization(Entity, SourceLocation(), E);
+ if (Init.isInvalid())
+ return ExprError();
+ E = Init.takeAs<Expr>();
} else {
// Otherwise, the va_list argument must be an l-value because
// it is modified by va_arg.
@@ -10094,6 +10236,14 @@ Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
std::swap(MaybeODRUseExprs, ExprEvalContexts.back().SavedMaybeODRUseExprs);
}
+void
+Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
+ ReuseLambdaContextDecl_t,
+ bool IsDecltype) {
+ Decl *LambdaContextDecl = ExprEvalContexts.back().LambdaContextDecl;
+ PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype);
+}
+
void Sema::PopExpressionEvaluationContext() {
ExpressionEvaluationContextRecord& Rec = ExprEvalContexts.back();
@@ -10191,15 +10341,44 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func) {
Func->setReferenced();
- // Don't mark this function as used multiple times, unless it's a constexpr
- // function which we need to instantiate.
- if (Func->isUsed(false) &&
- !(Func->isConstexpr() && !Func->getBody() &&
- Func->isImplicitlyInstantiable()))
- return;
-
- if (!IsPotentiallyEvaluatedContext(*this))
- return;
+ // C++11 [basic.def.odr]p3:
+ // A function whose name appears as a potentially-evaluated expression is
+ // odr-used if it is the unique lookup result or the selected member of a
+ // set of overloaded functions [...].
+ //
+ // We (incorrectly) mark overload resolution as an unevaluated context, so we
+ // can just check that here. Skip the rest of this function if we've already
+ // marked the function as used.
+ if (Func->isUsed(false) || !IsPotentiallyEvaluatedContext(*this)) {
+ // C++11 [temp.inst]p3:
+ // Unless a function template specialization has been explicitly
+ // instantiated or explicitly specialized, the function template
+ // specialization is implicitly instantiated when the specialization is
+ // referenced in a context that requires a function definition to exist.
+ //
+ // We consider constexpr function templates to be referenced in a context
+ // that requires a definition to exist whenever they are referenced.
+ //
+ // FIXME: This instantiates constexpr functions too frequently. If this is
+ // really an unevaluated context (and we're not just in the definition of a
+ // function template or overload resolution or other cases which we
+ // incorrectly consider to be unevaluated contexts), and we're not in a
+ // subexpression which we actually need to evaluate (for instance, a
+ // template argument, array bound or an expression in a braced-init-list),
+ // we are not permitted to instantiate this constexpr function definition.
+ //
+ // FIXME: This also implicitly defines special members too frequently. They
+ // are only supposed to be implicitly defined if they are odr-used, but they
+ // are not odr-used from constant expressions in unevaluated contexts.
+ // However, they cannot be referenced if they are deleted, and they are
+ // deleted whenever the implicit definition of the special member would
+ // fail.
+ if (!Func->isConstexpr() || Func->getBody())
+ return;
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Func);
+ if (!Func->isImplicitlyInstantiable() && (!MD || MD->isUserProvided()))
+ return;
+ }
// Note that this declaration has been used.
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Func)) {
@@ -10469,8 +10648,7 @@ static ExprResult captureInLambda(Sema &S, LambdaScopeInfo *LSI,
InitializationSequence Init(S, Entities.back(), InitKind, &Ref, 1);
ExprResult Result(true);
if (!Init.Diagnose(S, Entities.back(), InitKind, &Ref, 1))
- Result = Init.Perform(S, Entities.back(), InitKind,
- MultiExprArg(S, &Ref, 1));
+ Result = Init.Perform(S, Entities.back(), InitKind, Ref);
// If this initialization requires any cleanups (e.g., due to a
// default argument to a copy constructor), note that for the
@@ -10886,20 +11064,21 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
}
}
- // Per C++11 [basic.def.odr], a variable is odr-used "unless it is
- // an object that satisfies the requirements for appearing in a
- // constant expression (5.19) and the lvalue-to-rvalue conversion (4.1)
+ // Per C++11 [basic.def.odr], a variable is odr-used "unless it satisfies
+ // the requirements for appearing in a constant expression (5.19) and, if
+ // it is an object, the lvalue-to-rvalue conversion (4.1)
// is immediately applied." We check the first part here, and
// Sema::UpdateMarkingForLValueToRValue deals with the second part.
// Note that we use the C++11 definition everywhere because nothing in
- // C++03 depends on whether we get the C++03 version correct. This does not
- // apply to references, since they are not objects.
+ // C++03 depends on whether we get the C++03 version correct. The second
+ // part does not apply to references, since they are not objects.
const VarDecl *DefVD;
- if (E && !isa<ParmVarDecl>(Var) && !Var->getType()->isReferenceType() &&
+ if (E && !isa<ParmVarDecl>(Var) &&
Var->isUsableInConstantExpressions(SemaRef.Context) &&
- Var->getAnyInitializer(DefVD) && DefVD->checkInitIsICE())
- SemaRef.MaybeODRUseExprs.insert(E);
- else
+ Var->getAnyInitializer(DefVD) && DefVD->checkInitIsICE()) {
+ if (!Var->getType()->isReferenceType())
+ SemaRef.MaybeODRUseExprs.insert(E);
+ } else
MarkVarDeclODRUsed(SemaRef, Var, Loc);
}
@@ -11205,7 +11384,9 @@ void Sema::DiagnoseAssignmentAsCondition(Expr *E) {
IsOrAssign = Op->getOperator() == OO_PipeEqual;
Loc = Op->getOperatorLoc();
- } else {
+ } else if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E))
+ return DiagnoseAssignmentAsCondition(POE->getSyntacticForm());
+ else {
// Not an assignment.
return;
}
@@ -11759,6 +11940,10 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
case BuiltinType::PseudoObject:
return checkPseudoObjectRValue(E);
+ case BuiltinType::BuiltinFn:
+ Diag(E->getLocStart(), diag::err_builtin_fn_use);
+ return ExprError();
+
// Everything else should be impossible.
#define BUILTIN_TYPE(Id, SingletonId) \
case BuiltinType::Id:
@@ -11783,6 +11968,18 @@ ExprResult
Sema::ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
assert((Kind == tok::kw___objc_yes || Kind == tok::kw___objc_no) &&
"Unknown Objective-C Boolean value!");
+ QualType BoolT = Context.ObjCBuiltinBoolTy;
+ if (!Context.getBOOLDecl()) {
+ LookupResult Result(*this, &Context.Idents.get("BOOL"), OpLoc,
+ Sema::LookupOrdinaryName);
+ if (LookupName(Result, getCurScope()) && Result.isSingleResult()) {
+ NamedDecl *ND = Result.getFoundDecl();
+ if (TypedefDecl *TD = dyn_cast<TypedefDecl>(ND))
+ Context.setBOOLDecl(TD);
+ }
+ }
+ if (Context.getBOOLDecl())
+ BoolT = Context.getBOOLType();
return Owned(new (Context) ObjCBoolLiteralExpr(Kind == tok::kw___objc_yes,
- Context.ObjCBuiltinBoolTy, OpLoc));
+ BoolT, OpLoc));
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
index 2740259..0919bc5 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
@@ -410,33 +410,13 @@ Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
return BuildCXXTypeId(TypeInfoType, OpLoc, (Expr*)TyOrExpr, RParenLoc);
}
-/// Retrieve the UuidAttr associated with QT.
-static UuidAttr *GetUuidAttrOfType(QualType QT) {
- // Optionally remove one level of pointer, reference or array indirection.
- const Type *Ty = QT.getTypePtr();;
- if (QT->isPointerType() || QT->isReferenceType())
- Ty = QT->getPointeeType().getTypePtr();
- else if (QT->isArrayType())
- Ty = cast<ArrayType>(QT)->getElementType().getTypePtr();
-
- // Loop all record redeclaration looking for an uuid attribute.
- CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
- for (CXXRecordDecl::redecl_iterator I = RD->redecls_begin(),
- E = RD->redecls_end(); I != E; ++I) {
- if (UuidAttr *Uuid = I->getAttr<UuidAttr>())
- return Uuid;
- }
-
- return 0;
-}
-
/// \brief Build a Microsoft __uuidof expression with a type operand.
ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc) {
if (!Operand->getType()->isDependentType()) {
- if (!GetUuidAttrOfType(Operand->getType()))
+ if (!CXXUuidofExpr::GetUuidAttrOfType(Operand->getType()))
return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
}
@@ -452,7 +432,7 @@ ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
Expr *E,
SourceLocation RParenLoc) {
if (!E->getType()->isDependentType()) {
- if (!GetUuidAttrOfType(E->getType()) &&
+ if (!CXXUuidofExpr::GetUuidAttrOfType(E->getType()) &&
!E->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull))
return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
}
@@ -808,21 +788,18 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
MultiExprArg exprs,
SourceLocation RParenLoc) {
QualType Ty = TInfo->getType();
- unsigned NumExprs = exprs.size();
- Expr **Exprs = (Expr**)exprs.get();
SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
- if (Ty->isDependentType() ||
- CallExpr::hasAnyTypeDependentArguments(
- llvm::makeArrayRef(Exprs, NumExprs))) {
- exprs.release();
-
+ if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(exprs)) {
return Owned(CXXUnresolvedConstructExpr::Create(Context, TInfo,
LParenLoc,
- Exprs, NumExprs,
+ exprs,
RParenLoc));
}
+ unsigned NumExprs = exprs.size();
+ Expr **Exprs = exprs.data();
+
bool ListInitialization = LParenLoc.isInvalid();
assert((!ListInitialization || (NumExprs == 1 && isa<InitListExpr>(Exprs[0])))
&& "List initialization must have initializer list as expression.");
@@ -835,7 +812,6 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
// corresponding cast expression.
if (NumExprs == 1 && !ListInitialization) {
Expr *Arg = Exprs[0];
- exprs.release();
return BuildCXXFunctionalCastExpr(TInfo, LParenLoc, Arg, RParenLoc);
}
@@ -865,7 +841,7 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
: InitializationKind::CreateValue(TyBeginLoc,
LParenLoc, RParenLoc);
InitializationSequence InitSeq(*this, Entity, Kind, Exprs, NumExprs);
- ExprResult Result = InitSeq.Perform(*this, Entity, Kind, move(exprs));
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, exprs);
if (!Result.isInvalid() && ListInitialization &&
isa<InitListExpr>(Result.get())) {
@@ -881,7 +857,7 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
}
// FIXME: Improve AST representation?
- return move(Result);
+ return Result;
}
/// doesUsualArrayDeleteWantSize - Answers whether the usual
@@ -1011,9 +987,9 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer))
DirectInitRange = List->getSourceRange();
- return BuildCXXNew(StartLoc, UseGlobal,
+ return BuildCXXNew(SourceRange(StartLoc, D.getLocEnd()), UseGlobal,
PlacementLParen,
- move(PlacementArgs),
+ PlacementArgs,
PlacementRParen,
TypeIdParens,
AllocType,
@@ -1044,7 +1020,7 @@ static bool isLegalArrayNewInitializer(CXXNewExpr::InitializationStyle Style,
}
ExprResult
-Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
+Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
@@ -1056,6 +1032,7 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
Expr *Initializer,
bool TypeMayContainAuto) {
SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange();
+ SourceLocation StartLoc = Range.getBegin();
CXXNewExpr::InitializationStyle initStyle;
if (DirectInitRange.isValid()) {
@@ -1279,21 +1256,13 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
}
}
- // ARC: warn about ABI issues.
- if (getLangOpts().ObjCAutoRefCount) {
- QualType BaseAllocType = Context.getBaseElementType(AllocType);
- if (BaseAllocType.hasStrongOrWeakObjCLifetime())
- Diag(StartLoc, diag::warn_err_new_delete_object_array)
- << 0 << BaseAllocType;
- }
-
// Note that we do *not* convert the argument in any way. It can
// be signed, larger than size_t, whatever.
}
FunctionDecl *OperatorNew = 0;
FunctionDecl *OperatorDelete = 0;
- Expr **PlaceArgs = (Expr**)PlacementArgs.get();
+ Expr **PlaceArgs = PlacementArgs.data();
unsigned NumPlaceArgs = PlacementArgs.size();
if (!AllocType->isDependentType() &&
@@ -1432,15 +1401,14 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
}
}
- PlacementArgs.release();
-
return Owned(new (Context) CXXNewExpr(Context, UseGlobal, OperatorNew,
OperatorDelete,
UsualArrayDeleteWantsSize,
- PlaceArgs, NumPlaceArgs, TypeIdParens,
+ llvm::makeArrayRef(PlaceArgs, NumPlaceArgs),
+ TypeIdParens,
ArraySize, initStyle, Initializer,
ResultType, AllocTypeInfo,
- StartLoc, DirectInitRange));
+ Range, DirectInitRange));
}
/// \brief Checks that a type is suitable as the allocated type
@@ -1638,7 +1606,7 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
= dyn_cast<FunctionTemplateDecl>((*D)->getUnderlyingDecl())) {
// Perform template argument deduction to try to match the
// expected function type.
- TemplateDeductionInfo Info(Context, StartLoc);
+ TemplateDeductionInfo Info(StartLoc);
if (DeduceTemplateArguments(FnTmpl, 0, ExpectedFunctionType, Fn, Info))
continue;
} else
@@ -2100,7 +2068,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
ObjectPtrConversions.front()->getConversionType(),
AA_Converting);
if (Res.isUsable()) {
- Ex = move(Res);
+ Ex = Res;
Type = Ex.get()->getType();
}
}
@@ -2211,13 +2179,6 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
}
}
- } else if (getLangOpts().ObjCAutoRefCount &&
- PointeeElem->isObjCLifetimeType() &&
- (PointeeElem.getObjCLifetime() == Qualifiers::OCL_Strong ||
- PointeeElem.getObjCLifetime() == Qualifiers::OCL_Weak) &&
- ArrayForm) {
- Diag(StartLoc, diag::warn_err_new_delete_object_array)
- << 1 << PointeeElem;
}
if (!OperatorDelete) {
@@ -2287,7 +2248,7 @@ ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar,
return ExprError();
}
- return move(Condition);
+ return Condition;
}
/// CheckCXXBooleanCondition - Returns true if a conversion to bool is invalid.
@@ -2354,11 +2315,9 @@ static ExprResult BuildCXXCastArgument(Sema &S,
default: llvm_unreachable("Unhandled cast kind!");
case CK_ConstructorConversion: {
CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Method);
- ASTOwningVector<Expr*> ConstructorArgs(S);
+ SmallVector<Expr*, 8> ConstructorArgs;
- if (S.CompleteConstructorCall(Constructor,
- MultiExprArg(&From, 1),
- CastLoc, ConstructorArgs))
+ if (S.CompleteConstructorCall(Constructor, From, CastLoc, ConstructorArgs))
return ExprError();
S.CheckConstructorAccess(CastLoc, Constructor,
@@ -2367,7 +2326,7 @@ static ExprResult BuildCXXCastArgument(Sema &S,
ExprResult Result
= S.BuildCXXConstructExpr(CastLoc, Ty, cast<CXXConstructorDecl>(Method),
- move_arg(ConstructorArgs),
+ ConstructorArgs,
HadMultipleCandidates, /*ZeroInit*/ false,
CXXConstructExpr::CK_Complete, SourceRange());
if (Result.isInvalid())
@@ -2511,15 +2470,14 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// FIXME: When can ToType be a reference type?
assert(!ToType->isReferenceType());
if (SCS.Second == ICK_Derived_To_Base) {
- ASTOwningVector<Expr*> ConstructorArgs(*this);
+ SmallVector<Expr*, 8> ConstructorArgs;
if (CompleteConstructorCall(cast<CXXConstructorDecl>(SCS.CopyConstructor),
- MultiExprArg(*this, &From, 1),
- /*FIXME:ConstructLoc*/SourceLocation(),
+ From, /*FIXME:ConstructLoc*/SourceLocation(),
ConstructorArgs))
return ExprError();
return BuildCXXConstructExpr(/*FIXME:ConstructLoc*/SourceLocation(),
ToType, SCS.CopyConstructor,
- move_arg(ConstructorArgs),
+ ConstructorArgs,
/*HadMultipleCandidates*/ false,
/*ZeroInit*/ false,
CXXConstructExpr::CK_Complete,
@@ -2527,8 +2485,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
}
return BuildCXXConstructExpr(/*FIXME:ConstructLoc*/SourceLocation(),
ToType, SCS.CopyConstructor,
- MultiExprArg(*this, &From, 1),
- /*HadMultipleCandidates*/ false,
+ From, /*HadMultipleCandidates*/ false,
/*ZeroInit*/ false,
CXXConstructExpr::CK_Complete,
SourceRange());
@@ -2602,8 +2559,16 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
case ICK_Integral_Promotion:
case ICK_Integral_Conversion:
- From = ImpCastExprToType(From, ToType, CK_IntegralCast,
- VK_RValue, /*BasePath=*/0, CCK).take();
+ if (ToType->isBooleanType()) {
+ assert(FromType->castAs<EnumType>()->getDecl()->isFixed() &&
+ SCS.Second == ICK_Integral_Promotion &&
+ "only enums with fixed underlying type can promote to bool");
+ From = ImpCastExprToType(From, ToType, CK_IntegralToBoolean,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ } else {
+ From = ImpCastExprToType(From, ToType, CK_IntegralCast,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ }
break;
case ICK_Floating_Promotion:
@@ -2943,6 +2908,7 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S,
case UTT_IsEmpty:
case UTT_IsPolymorphic:
case UTT_IsAbstract:
+ case UTT_IsInterfaceClass:
// Fall-through
// These traits require a complete type.
@@ -3007,7 +2973,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
case UTT_IsUnion:
return T->isUnionType();
case UTT_IsClass:
- return T->isClassType() || T->isStructureType();
+ return T->isClassType() || T->isStructureType() || T->isInterfaceType();
case UTT_IsFunction:
return T->isFunctionType();
@@ -3073,6 +3039,10 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
return RD->isAbstract();
return false;
+ case UTT_IsInterfaceClass:
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ return RD->isInterface();
+ return false;
case UTT_IsFinal:
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
return RD->hasAttr<FinalAttr>();
@@ -3417,9 +3387,7 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
if (Init.Failed())
return false;
- ExprResult Result = Init.Perform(S, To, InitKind,
- MultiExprArg(ArgExprs.data(),
- ArgExprs.size()));
+ ExprResult Result = Init.Perform(S, To, InitKind, ArgExprs);
if (Result.isInvalid() || SFINAE.hasErrorOccurred())
return false;
@@ -3577,7 +3545,7 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, BinaryTypeTrait BTT,
if (Init.Failed())
return false;
- ExprResult Result = Init.Perform(Self, To, Kind, MultiExprArg(&FromPtr, 1));
+ ExprResult Result = Init.Perform(Self, To, Kind, FromPtr);
return !Result.isInvalid() && !SFINAE.hasErrorOccurred();
}
@@ -3774,7 +3742,7 @@ ExprResult Sema::ActOnExpressionTrait(ExpressionTrait ET,
ExprResult Result = BuildExpressionTrait(ET, KWLoc, Queried, RParen);
- return move(Result);
+ return Result;
}
static bool EvaluateExpressionTrait(ExpressionTrait ET, Expr *E) {
@@ -4056,14 +4024,14 @@ static bool FindConditionalOverload(Sema &Self, ExprResult &LHS, ExprResult &RHS
Best->Conversions[0], Sema::AA_Converting);
if (LHSRes.isInvalid())
break;
- LHS = move(LHSRes);
+ LHS = LHSRes;
ExprResult RHSRes =
Self.PerformImplicitConversion(RHS.get(), Best->BuiltinTypes.ParamTypes[1],
Best->Conversions[1], Sema::AA_Converting);
if (RHSRes.isInvalid())
break;
- RHS = move(RHSRes);
+ RHS = RHSRes;
if (Best->Function)
Self.MarkFunctionReferenced(QuestionLoc, Best->Function);
return false;
@@ -4104,7 +4072,7 @@ static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) {
SourceLocation());
Expr *Arg = E.take();
InitializationSequence InitSeq(Self, Entity, Kind, &Arg, 1);
- ExprResult Result = InitSeq.Perform(Self, Entity, Kind, MultiExprArg(&Arg, 1));
+ ExprResult Result = InitSeq.Perform(Self, Entity, Kind, Arg);
if (Result.isInvalid())
return true;
@@ -4129,7 +4097,7 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
ExprResult CondRes = CheckCXXBooleanCondition(Cond.take());
if (CondRes.isInvalid())
return QualType();
- Cond = move(CondRes);
+ Cond = CondRes;
}
// Assume r-value.
@@ -4160,6 +4128,9 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
ExprResult &NonVoid = LVoid ? RHS : LHS;
if (NonVoid.get()->getType()->isRecordType() &&
NonVoid.get()->isGLValue()) {
+ if (RequireNonAbstractType(QuestionLoc, NonVoid.get()->getType(),
+ diag::err_allocation_of_abstract_type))
+ return QualType();
InitializedEntity Entity =
InitializedEntity::InitializeTemporary(NonVoid.get()->getType());
NonVoid = PerformCopyInitialization(Entity, SourceLocation(), NonVoid);
@@ -4302,7 +4273,11 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
if (Context.getCanonicalType(LTy) == Context.getCanonicalType(RTy)) {
if (LTy->isRecordType()) {
// The operands have class type. Make a temporary copy.
+ if (RequireNonAbstractType(QuestionLoc, LTy,
+ diag::err_allocation_of_abstract_type))
+ return QualType();
InitializedEntity Entity = InitializedEntity::InitializeTemporary(LTy);
+
ExprResult LHSCopy = PerformCopyInitialization(Entity,
SourceLocation(),
LHS);
@@ -4566,14 +4541,14 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
// Convert E1 to Composite1
ExprResult E1Result
- = E1ToC1.Perform(*this, Entity1, Kind, MultiExprArg(*this,&E1,1));
+ = E1ToC1.Perform(*this, Entity1, Kind, E1);
if (E1Result.isInvalid())
return QualType();
E1 = E1Result.takeAs<Expr>();
// Convert E2 to Composite1
ExprResult E2Result
- = E2ToC1.Perform(*this, Entity1, Kind, MultiExprArg(*this,&E2,1));
+ = E2ToC1.Perform(*this, Entity1, Kind, E2);
if (E2Result.isInvalid())
return QualType();
E2 = E2Result.takeAs<Expr>();
@@ -4591,14 +4566,14 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
// Convert E1 to Composite2
ExprResult E1Result
- = E1ToC2.Perform(*this, Entity2, Kind, MultiExprArg(*this, &E1, 1));
+ = E1ToC2.Perform(*this, Entity2, Kind, E1);
if (E1Result.isInvalid())
return QualType();
E1 = E1Result.takeAs<Expr>();
// Convert E2 to Composite2
ExprResult E2Result
- = E2ToC2.Perform(*this, Entity2, Kind, MultiExprArg(*this, &E2, 1));
+ = E2ToC2.Perform(*this, Entity2, Kind, E2);
if (E2Result.isInvalid())
return QualType();
E2 = E2Result.takeAs<Expr>();
@@ -4839,7 +4814,8 @@ ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
BO_Comma, BO->getType(),
BO->getValueKind(),
BO->getObjectKind(),
- BO->getOperatorLoc()));
+ BO->getOperatorLoc(),
+ BO->isFPContractable()));
}
}
@@ -4991,7 +4967,7 @@ Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc,
// type C (or of pointer to a class type C), the unqualified-id is looked
// up in the scope of class C. [...]
ObjectType = ParsedType::make(BaseType);
- return move(Base);
+ return Base;
}
ExprResult Sema::DiagnoseDtorReference(SourceLocation NameLoc,
@@ -5056,7 +5032,8 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc))
return ExprError();
- if (!ObjectType->isDependentType() && !ObjectType->isScalarType()) {
+ if (!ObjectType->isDependentType() && !ObjectType->isScalarType() &&
+ !ObjectType->isVectorType()) {
if (getLangOpts().MicrosoftMode && ObjectType->isVoidType())
Diag(OpLoc, diag::ext_pseudo_dtor_on_void) << Base->getSourceRange();
else
@@ -5203,8 +5180,7 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
} else {
// Resolve the template-id to a type.
TemplateIdAnnotation *TemplateId = SecondTypeName.TemplateId;
- ASTTemplateArgsPtr TemplateArgsPtr(*this,
- TemplateId->getTemplateArgs(),
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
TypeResult T = ActOnTemplateIdType(TemplateId->SS,
TemplateId->TemplateKWLoc,
@@ -5253,8 +5229,7 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
} else {
// Resolve the template-id to a type.
TemplateIdAnnotation *TemplateId = FirstTypeName.TemplateId;
- ASTTemplateArgsPtr TemplateArgsPtr(*this,
- TemplateId->getTemplateArgs(),
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
TypeResult T = ActOnTemplateIdType(TemplateId->SS,
TemplateId->TemplateKWLoc,
@@ -5353,7 +5328,7 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
MarkFunctionReferenced(Exp.get()->getLocStart(), Method);
CXXMemberCallExpr *CE =
- new (Context) CXXMemberCallExpr(Context, ME, 0, 0, ResultType, VK,
+ new (Context) CXXMemberCallExpr(Context, ME, MultiExprArg(), ResultType, VK,
Exp.get()->getLocEnd());
return CE;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp
index 8f445e2..a7fd471 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp
@@ -13,6 +13,7 @@
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
@@ -353,7 +354,7 @@ CheckExtVectorComponent(Sema &S, QualType baseType, ExprValueKind &VK,
// Now look up the TypeDefDecl from the vector type. Without this,
// diagostics look bad. We want extended vector types to appear built-in.
for (Sema::ExtVectorDeclsType::iterator
- I = S.ExtVectorDecls.begin(S.ExternalSource),
+ I = S.ExtVectorDecls.begin(S.getExternalSource()),
E = S.ExtVectorDecls.end();
I != E; ++I) {
if ((*I)->getUnderlyingType() == VT)
@@ -605,7 +606,8 @@ LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
R.addDecl(ND);
SemaRef.Diag(R.getNameLoc(), diag::err_no_member_suggest)
<< Name << DC << CorrectedQuotedStr << SS.getRange()
- << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr);
+ << FixItHint::CreateReplacement(Corrected.getCorrectionRange(),
+ CorrectedStr);
SemaRef.Diag(ND->getLocation(), diag::note_previous_decl)
<< ND->getDeclName();
}
@@ -656,7 +658,7 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
}
if (Result.get())
- return move(Result);
+ return Result;
// LookupMemberExpr can modify Base, and thus change BaseType
BaseType = Base->getType();
@@ -1021,7 +1023,7 @@ static bool ShouldTryAgainWithRedefinitionType(Sema &S, ExprResult &base) {
// Do the substitution as long as the redefinition type isn't just a
// possibly-qualified pointer to builtin-id or builtin-Class again.
opty = redef->getAs<ObjCObjectPointerType>();
- if (opty && !opty->getObjectType()->getInterface() != 0)
+ if (opty && !opty->getObjectType()->getInterface())
return false;
base = S.ImpCastExprToType(base.take(), redef, CK_BitCast);
@@ -1272,9 +1274,23 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
if (warn)
Diag(MemberLoc, diag::warn_direct_ivar_access) << IV->getDeclName();
}
- return Owned(new (Context) ObjCIvarRefExpr(IV, IV->getType(),
- MemberLoc, BaseExpr.take(),
- IsArrow));
+
+ ObjCIvarRefExpr *Result = new (Context) ObjCIvarRefExpr(IV, IV->getType(),
+ MemberLoc,
+ BaseExpr.take(),
+ IsArrow);
+
+ if (getLangOpts().ObjCAutoRefCount) {
+ if (IV->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ DiagnosticsEngine::Level Level =
+ Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak,
+ MemberLoc);
+ if (Level != DiagnosticsEngine::Ignored)
+ getCurFunction()->recordUseOfWeak(Result);
+ }
+ }
+
+ return Owned(Result);
}
// Objective-C property access.
@@ -1550,7 +1566,7 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
Id.getKind() == UnqualifiedId::IK_DestructorName)
return DiagnoseDtorReference(NameInfo.getLoc(), Result.get());
- return move(Result);
+ return Result;
}
ActOnMemberAccessExtraArgs ExtraArgs = {S, Id, ObjCImpDecl, HasTrailingLParen};
@@ -1560,7 +1576,7 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
false, &ExtraArgs);
}
- return move(Result);
+ return Result;
}
static ExprResult
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
index 0aabf8b..e43b6bf 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
@@ -229,7 +229,7 @@ static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
S.NSNumberPointer, ResultTInfo,
S.NSNumberDecl,
/*isInstance=*/false, /*isVariadic=*/false,
- /*isSynthesized=*/false,
+ /*isPropertyAccessor=*/false,
/*isImplicitlyDeclared=*/true,
/*isDefined=*/false,
ObjCMethodDecl::Required,
@@ -345,7 +345,7 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
SourceLocation());
InitializationSequence Seq(S, Entity, Kind, &Element, 1);
if (!Seq.Failed())
- return Seq.Perform(S, Entity, Kind, MultiExprArg(S, &Element, 1));
+ return Seq.Perform(S, Entity, Kind, Element);
}
Expr *OrigElement = Element;
@@ -477,7 +477,7 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
stringWithUTF8String, NSStringPointer,
ResultTInfo, NSStringDecl,
/*isInstance=*/false, /*isVariadic=*/false,
- /*isSynthesized=*/false,
+ /*isPropertyAccessor=*/false,
/*isImplicitlyDeclared=*/true,
/*isDefined=*/false,
ObjCMethodDecl::Required,
@@ -646,7 +646,7 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
ResultTInfo,
Context.getTranslationUnitDecl(),
false /*Instance*/, false/*isVariadic*/,
- /*isSynthesized=*/false,
+ /*isPropertyAccessor=*/false,
/*isImplicitlyDeclared=*/true, /*isDefined=*/false,
ObjCMethodDecl::Required,
false);
@@ -708,7 +708,7 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
// Check that each of the elements provided is valid in a collection literal,
// performing conversions as necessary.
- Expr **ElementsBuffer = Elements.get();
+ Expr **ElementsBuffer = Elements.data();
for (unsigned I = 0, N = Elements.size(); I != N; ++I) {
ExprResult Converted = CheckObjCCollectionLiteralElement(*this,
ElementsBuffer[I],
@@ -724,10 +724,8 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
Context.getObjCInterfaceType(NSArrayDecl));
return MaybeBindToTemporary(
- ObjCArrayLiteral::Create(Context,
- llvm::makeArrayRef(Elements.get(),
- Elements.size()),
- Ty, ArrayWithObjectsMethod, SR));
+ ObjCArrayLiteral::Create(Context, Elements, Ty,
+ ArrayWithObjectsMethod, SR));
}
ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
@@ -766,7 +764,7 @@ ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
0 /*TypeSourceInfo */,
Context.getTranslationUnitDecl(),
false /*Instance*/, false/*isVariadic*/,
- /*isSynthesized=*/false,
+ /*isPropertyAccessor=*/false,
/*isImplicitlyDeclared=*/true, /*isDefined=*/false,
ObjCMethodDecl::Required,
false);
@@ -1125,7 +1123,9 @@ void Sema::EmitRelatedResultTypeNote(const Expr *E) {
bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
Expr **Args, unsigned NumArgs,
- Selector Sel, ObjCMethodDecl *Method,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelectorLocs,
+ ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
QualType &ReturnType, ExprValueKind &VK) {
@@ -1149,7 +1149,8 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
: diag::warn_inst_method_not_found;
if (!getLangOpts().DebuggerSupport)
Diag(lbrac, DiagID)
- << Sel << isClassMessage << SourceRange(lbrac, rbrac);
+ << Sel << isClassMessage << SourceRange(SelectorLocs.front(),
+ SelectorLocs.back());
// In debuggers, we want to use __unknown_anytype for these
// results so that clients can cast them.
@@ -1304,8 +1305,8 @@ static void DiagnoseARCUseOfWeakReceiver(Sema &S, Expr *Receiver) {
Expr *RExpr = Receiver->IgnoreParenImpCasts();
SourceLocation Loc = RExpr->getLocStart();
QualType T = RExpr->getType();
- ObjCPropertyDecl *PDecl = 0;
- ObjCMethodDecl *GDecl = 0;
+ const ObjCPropertyDecl *PDecl = 0;
+ const ObjCMethodDecl *GDecl = 0;
if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(RExpr)) {
RExpr = POE->getSyntacticForm();
if (ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(RExpr)) {
@@ -1327,34 +1328,29 @@ static void DiagnoseARCUseOfWeakReceiver(Sema &S, Expr *Receiver) {
// See if receiver is a method which envokes a synthesized getter
// backing a 'weak' property.
ObjCMethodDecl *Method = ME->getMethodDecl();
- if (Method && Method->isSynthesized()) {
- Selector Sel = Method->getSelector();
- if (Sel.getNumArgs() == 0) {
- const DeclContext *Container = Method->getDeclContext();
- PDecl =
- S.LookupPropertyDecl(cast<ObjCContainerDecl>(Container),
- Sel.getIdentifierInfoForSlot(0));
- }
+ if (Method && Method->getSelector().getNumArgs() == 0) {
+ PDecl = Method->findPropertyDecl();
if (PDecl)
T = PDecl->getType();
}
}
- if (T.getObjCLifetime() == Qualifiers::OCL_Weak) {
- S.Diag(Loc, diag::warn_receiver_is_weak)
- << ((!PDecl && !GDecl) ? 0 : (PDecl ? 1 : 2));
- if (PDecl)
- S.Diag(PDecl->getLocation(), diag::note_property_declare);
- else if (GDecl)
- S.Diag(GDecl->getLocation(), diag::note_method_declared_at) << GDecl;
- return;
+ if (T.getObjCLifetime() != Qualifiers::OCL_Weak) {
+ if (!PDecl)
+ return;
+ if (!(PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak))
+ return;
}
-
- if (PDecl &&
- (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)) {
- S.Diag(Loc, diag::warn_receiver_is_weak) << 1;
+
+ S.Diag(Loc, diag::warn_receiver_is_weak)
+ << ((!PDecl && !GDecl) ? 0 : (PDecl ? 1 : 2));
+
+ if (PDecl)
S.Diag(PDecl->getLocation(), diag::note_property_declare);
- }
+ else if (GDecl)
+ S.Diag(GDecl->getLocation(), diag::note_method_declared_at) << GDecl;
+
+ S.Diag(Loc, diag::note_arc_assign_to_strong);
}
/// HandleExprPropertyRefExpr - Handle foo.bar where foo is a pointer to an
@@ -1776,19 +1772,17 @@ ExprResult Sema::ActOnSuperMessage(Scope *S,
// We are in a method whose class has a superclass, so 'super'
// is acting as a keyword.
- if (Method->isInstanceMethod()) {
- if (Sel.getMethodFamily() == OMF_dealloc)
- getCurFunction()->ObjCShouldCallSuperDealloc = false;
- if (Sel.getMethodFamily() == OMF_finalize)
- getCurFunction()->ObjCShouldCallSuperFinalize = false;
+ if (Method->getSelector() == Sel)
+ getCurFunction()->ObjCShouldCallSuper = false;
+ if (Method->isInstanceMethod()) {
// Since we are in an instance method, this is an instance
// message to the superclass instance.
QualType SuperTy = Context.getObjCInterfaceType(Super);
SuperTy = Context.getObjCObjectPointerType(SuperTy);
return BuildInstanceMessage(0, SuperTy, SuperLoc,
Sel, /*Method=*/0,
- LBracLoc, SelectorLocs, RBracLoc, move(Args));
+ LBracLoc, SelectorLocs, RBracLoc, Args);
}
// Since we are in a class method, this is a class message to
@@ -1796,7 +1790,7 @@ ExprResult Sema::ActOnSuperMessage(Scope *S,
return BuildClassMessage(/*ReceiverTypeInfo=*/0,
Context.getObjCInterfaceType(Super),
SuperLoc, Sel, /*Method=*/0,
- LBracLoc, SelectorLocs, RBracLoc, move(Args));
+ LBracLoc, SelectorLocs, RBracLoc, Args);
}
@@ -1911,7 +1905,7 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
// If the receiver type is dependent, we can't type-check anything
// at this point. Build a dependent expression.
unsigned NumArgs = ArgsIn.size();
- Expr **Args = reinterpret_cast<Expr **>(ArgsIn.release());
+ Expr **Args = ArgsIn.data();
assert(SuperLoc.isInvalid() && "Message to super with dependent type");
return Owned(ObjCMessageExpr::Create(Context, ReceiverType,
VK_RValue, LBracLoc, ReceiverTypeInfo,
@@ -1965,8 +1959,9 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
ExprValueKind VK = VK_RValue;
unsigned NumArgs = ArgsIn.size();
- Expr **Args = reinterpret_cast<Expr **>(ArgsIn.release());
- if (CheckMessageArgumentTypes(ReceiverType, Args, NumArgs, Sel, Method, true,
+ Expr **Args = ArgsIn.data();
+ if (CheckMessageArgumentTypes(ReceiverType, Args, NumArgs, Sel, SelectorLocs,
+ Method, true,
SuperLoc.isValid(), LBracLoc, RBracLoc,
ReturnType, VK))
return ExprError();
@@ -2016,7 +2011,7 @@ ExprResult Sema::ActOnClassMessage(Scope *S,
return BuildClassMessage(ReceiverTypeInfo, ReceiverType,
/*SuperLoc=*/SourceLocation(), Sel, /*Method=*/0,
- LBracLoc, SelectorLocs, RBracLoc, move(Args));
+ LBracLoc, SelectorLocs, RBracLoc, Args);
}
ExprResult Sema::BuildInstanceMessageImplicit(Expr *Receiver,
@@ -2095,7 +2090,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// If the receiver is type-dependent, we can't type-check anything
// at this point. Build a dependent expression.
unsigned NumArgs = ArgsIn.size();
- Expr **Args = reinterpret_cast<Expr **>(ArgsIn.release());
+ Expr **Args = ArgsIn.data();
assert(SuperLoc.isInvalid() && "Message to super with dependent type");
return Owned(ObjCMessageExpr::Create(Context, Context.DependentTy,
VK_RValue, LBracLoc, Receiver, Sel,
@@ -2282,7 +2277,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
LBracLoc,
SelectorLocs,
RBracLoc,
- move(ArgsIn));
+ ArgsIn);
} else {
// Reject other random receiver types (e.g. structs).
Diag(Loc, diag::err_bad_receiver_type)
@@ -2295,12 +2290,13 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// Check the message arguments.
unsigned NumArgs = ArgsIn.size();
- Expr **Args = reinterpret_cast<Expr **>(ArgsIn.release());
+ Expr **Args = ArgsIn.data();
QualType ReturnType;
ExprValueKind VK = VK_RValue;
bool ClassMessage = (ReceiverType->isObjCClassType() ||
ReceiverType->isObjCQualifiedClassType());
- if (CheckMessageArgumentTypes(ReceiverType, Args, NumArgs, Sel, Method,
+ if (CheckMessageArgumentTypes(ReceiverType, Args, NumArgs, Sel,
+ SelectorLocs, Method,
ClassMessage, SuperLoc.isValid(),
LBracLoc, RBracLoc, ReturnType, VK))
return ExprError();
@@ -2428,6 +2424,24 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// In ARC, check for message sends which are likely to introduce
// retain cycles.
checkRetainCycles(Result);
+
+ if (!isImplicit && Method) {
+ if (const ObjCPropertyDecl *Prop = Method->findPropertyDecl()) {
+ bool IsWeak =
+ Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak;
+ if (!IsWeak && Sel.isUnarySelector())
+ IsWeak = ReturnType.getObjCLifetime() & Qualifiers::OCL_Weak;
+
+ if (IsWeak) {
+ DiagnosticsEngine::Level Level =
+ Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak,
+ LBracLoc);
+ if (Level != DiagnosticsEngine::Ignored)
+ getCurFunction()->recordUseOfWeak(Result, Prop);
+
+ }
+ }
+ }
}
return MaybeBindToTemporary(Result);
@@ -2448,7 +2462,7 @@ ExprResult Sema::ActOnInstanceMessage(Scope *S,
return BuildInstanceMessage(Receiver, Receiver->getType(),
/*SuperLoc=*/SourceLocation(), Sel, /*Method=*/0,
- LBracLoc, SelectorLocs, RBracLoc, move(Args));
+ LBracLoc, SelectorLocs, RBracLoc, Args);
}
enum ARCConversionTypeClass {
@@ -3079,8 +3093,8 @@ Expr *Sema::stripARCUnbridgedCast(Expr *e) {
return new (Context) GenericSelectionExpr(Context, gse->getGenericLoc(),
gse->getControllingExpr(),
- subTypes.data(), subExprs.data(),
- n, gse->getDefaultLoc(),
+ subTypes, subExprs,
+ gse->getDefaultLoc(),
gse->getRParenLoc(),
gse->containsUnexpandedParameterPack(),
gse->getResultIndex());
@@ -3101,8 +3115,8 @@ bool Sema::CheckObjCARCUnavailableWeakConversion(QualType castType,
canExprType->isObjCObjectPointerType()) {
if (const ObjCObjectPointerType *ObjT =
canExprType->getAs<ObjCObjectPointerType>())
- if (ObjT->getInterfaceDecl()->isArcWeakrefUnavailable())
- return false;
+ if (const ObjCInterfaceDecl *ObjI = ObjT->getInterfaceDecl())
+ return !ObjI->isArcWeakrefUnavailable();
}
return true;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
index 62ab1e6..3596bbf 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
@@ -1316,6 +1316,8 @@ void InitListChecker::CheckStructUnionTypes(const InitializedEntity &Entity,
// If the record is invalid, some of it's members are invalid. To avoid
// confusion, we forgo checking the intializer for the entire record.
if (structDecl->isInvalidDecl()) {
+ // Assume it was supposed to consume a single initializer.
+ ++Index;
hadError = true;
return;
}
@@ -1503,11 +1505,14 @@ static void ExpandAnonymousFieldDesignator(Sema &SemaRef,
/// corresponds to FieldName.
static IndirectFieldDecl *FindIndirectFieldDesignator(FieldDecl *AnonField,
IdentifierInfo *FieldName) {
+ if (!FieldName)
+ return 0;
+
assert(AnonField->isAnonymousStructOrUnion());
Decl *NextDecl = AnonField->getNextDeclInContext();
while (IndirectFieldDecl *IF =
dyn_cast_or_null<IndirectFieldDecl>(NextDecl)) {
- if (FieldName && FieldName == IF->getAnonField()->getIdentifier())
+ if (FieldName == IF->getAnonField()->getIdentifier())
return IF;
NextDecl = NextDecl->getNextDeclInContext();
}
@@ -1521,8 +1526,8 @@ static DesignatedInitExpr *CloneDesignatedInitExpr(Sema &SemaRef,
for (unsigned I = 0; I < NumIndexExprs; ++I)
IndexExprs[I] = DIE->getSubExpr(I + 1);
return DesignatedInitExpr::Create(SemaRef.Context, DIE->designators_begin(),
- DIE->size(), IndexExprs.data(),
- NumIndexExprs, DIE->getEqualOrColonLoc(),
+ DIE->size(), IndexExprs,
+ DIE->getEqualOrColonLoc(),
DIE->usesGNUSyntax(), DIE->getInit());
}
@@ -1562,7 +1567,7 @@ class FieldInitializerValidatorCCC : public CorrectionCandidateCallback {
///
/// @param DesigIdx The index of the current designator.
///
-/// @param DeclType The type of the "current object" (C99 6.7.8p17),
+/// @param CurrentObjectType The type of the "current object" (C99 6.7.8p17),
/// into which the designation in @p DIE should refer.
///
/// @param NextField If non-NULL and the first designator in @p DIE is
@@ -2068,7 +2073,7 @@ InitListChecker::getStructuredSubobjectInit(InitListExpr *IList, unsigned Index,
InitListExpr *Result
= new (SemaRef.Context) InitListExpr(SemaRef.Context,
- InitRange.getBegin(), 0, 0,
+ InitRange.getBegin(), MultiExprArg(),
InitRange.getEnd());
QualType ResultType = CurrentObjectType;
@@ -2261,8 +2266,8 @@ ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
DesignatedInitExpr *DIE
= DesignatedInitExpr::Create(Context,
Designators.data(), Designators.size(),
- InitExpressions.data(), InitExpressions.size(),
- Loc, GNUSyntax, Init.takeAs<Expr>());
+ InitExpressions, Loc, GNUSyntax,
+ Init.takeAs<Expr>());
if (!getLangOpts().C99)
Diag(DIE->getLocStart(), diag::ext_designated_init)
@@ -2814,14 +2819,6 @@ static void TryConstructorInitialization(Sema &S,
assert((!InitListSyntax || (NumArgs == 1 && isa<InitListExpr>(Args[0]))) &&
"InitListSyntax must come with a single initializer list argument.");
- // Check constructor arguments for self reference.
- if (DeclaratorDecl *DD = Entity.getDecl())
- // Parameters arguments are occassionially constructed with itself,
- // for instance, in recursive functions. Skip them.
- if (!isa<ParmVarDecl>(DD))
- for (unsigned i = 0; i < NumArgs; ++i)
- S.CheckSelfReference(DD, Args[i]);
-
// The type we're constructing needs to be complete.
if (S.RequireCompleteType(Kind.getLocation(), DestType, 0)) {
Sequence.setIncompleteTypeFailure(DestType);
@@ -3614,8 +3611,8 @@ static void TryValueInitialization(Sema &S,
// user-provided or deleted default constructor, then the object is
// zero-initialized and, if T has a non-trivial default constructor,
// default-initialized;
- // FIXME: The 'non-union' here is a defect (not yet assigned an issue
- // number). Update the quotation when the defect is resolved.
+ // The 'non-union' here was removed by DR1502. The 'non-trivial default
+ // constructor' part was removed by DR1507.
if (NeedZeroInitialization)
Sequence.AddZeroInitializationStep(Entity.getType());
@@ -3703,8 +3700,14 @@ static void TryUserDefinedConversion(Sema &S,
// Try to complete the type we're converting to.
if (!S.RequireCompleteType(Kind.getLocation(), DestType, 0)) {
- DeclContext::lookup_iterator Con, ConEnd;
- for (llvm::tie(Con, ConEnd) = S.LookupConstructors(DestRecordDecl);
+ DeclContext::lookup_iterator ConOrig, ConEndOrig;
+ llvm::tie(ConOrig, ConEndOrig) = S.LookupConstructors(DestRecordDecl);
+ // The container holding the constructors can under certain conditions
+ // be changed while iterating. To be safe we copy the lookup results
+ // to a new container.
+ SmallVector<NamedDecl*, 8> CopyOfCon(ConOrig, ConEndOrig);
+ for (SmallVector<NamedDecl*, 8>::iterator
+ Con = CopyOfCon.begin(), ConEnd = CopyOfCon.end();
Con != ConEnd; ++Con) {
NamedDecl *D = *Con;
DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
@@ -4413,7 +4416,7 @@ static ExprResult CopyObject(Sema &S,
if (const RecordType *Record = T->getAs<RecordType>())
Class = cast<CXXRecordDecl>(Record->getDecl());
if (!Class)
- return move(CurInit);
+ return CurInit;
// C++0x [class.copy]p32:
// When certain criteria are met, an implementation is allowed to
@@ -4435,7 +4438,7 @@ static ExprResult CopyObject(Sema &S,
// Make sure that the type we are copying is complete.
if (S.RequireCompleteType(Loc, T, diag::err_temp_copy_incomplete))
- return move(CurInit);
+ return CurInit;
// Perform overload resolution using the class's copy/move constructors.
// Only consider constructors and constructor templates. Per
@@ -4460,7 +4463,7 @@ static ExprResult CopyObject(Sema &S,
CandidateSet.NoteCandidates(S, OCD_AllCandidates, CurInitExpr);
if (!IsExtraneousCopy || S.isSFINAEContext())
return ExprError();
- return move(CurInit);
+ return CurInit;
case OR_Ambiguous:
S.Diag(Loc, diag::err_temp_copy_ambiguous)
@@ -4478,7 +4481,7 @@ static ExprResult CopyObject(Sema &S,
}
CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Best->Function);
- ASTOwningVector<Expr*> ConstructorArgs(S);
+ SmallVector<Expr*, 8> ConstructorArgs;
CurInit.release(); // Ownership transferred into MultiExprArg, below.
S.CheckConstructorAccess(Loc, Constructor, Entity,
@@ -4521,7 +4524,7 @@ static ExprResult CopyObject(Sema &S,
// Actually perform the constructor call.
CurInit = S.BuildCXXConstructExpr(Loc, T, Constructor, Elidable,
- move_arg(ConstructorArgs),
+ ConstructorArgs,
HadMultipleCandidates,
/*ZeroInit*/ false,
CXXConstructExpr::CK_Complete,
@@ -4530,7 +4533,7 @@ static ExprResult CopyObject(Sema &S,
// If we're supposed to bind temporaries, do so.
if (!CurInit.isInvalid() && shouldBindAsTemporary(Entity))
CurInit = S.MaybeBindToTemporary(CurInit.takeAs<Expr>());
- return move(CurInit);
+ return CurInit;
}
/// \brief Check whether elidable copy construction for binding a reference to
@@ -4619,7 +4622,7 @@ PerformConstructorInitialization(Sema &S,
bool HadMultipleCandidates = Step.Function.HadMultipleCandidates;
// Build a call to the selected constructor.
- ASTOwningVector<Expr*> ConstructorArgs(S);
+ SmallVector<Expr*, 8> ConstructorArgs;
SourceLocation Loc = (Kind.isCopyInit() && Kind.getEqualLoc().isValid())
? Kind.getEqualLoc()
: Kind.getLocation();
@@ -4648,7 +4651,7 @@ PerformConstructorInitialization(Sema &S,
// Determine the arguments required to actually perform the constructor
// call.
- if (S.CompleteConstructorCall(Constructor, move(Args),
+ if (S.CompleteConstructorCall(Constructor, Args,
Loc, ConstructorArgs,
AllowExplicitConv))
return ExprError();
@@ -4660,8 +4663,6 @@ PerformConstructorInitialization(Sema &S,
(Kind.getKind() == InitializationKind::IK_Direct ||
Kind.getKind() == InitializationKind::IK_Value)))) {
// An explicitly-constructed temporary, e.g., X(1, 2).
- unsigned NumExprs = ConstructorArgs.size();
- Expr **Exprs = (Expr **)ConstructorArgs.take();
S.MarkFunctionReferenced(Loc, Constructor);
S.DiagnoseUseOfDecl(Constructor, Loc);
@@ -4675,8 +4676,7 @@ PerformConstructorInitialization(Sema &S,
CurInit = S.Owned(new (S.Context) CXXTemporaryObjectExpr(S.Context,
Constructor,
TSInfo,
- Exprs,
- NumExprs,
+ ConstructorArgs,
ParenRange,
HadMultipleCandidates,
ConstructorInitRequiresZeroInit));
@@ -4702,7 +4702,7 @@ PerformConstructorInitialization(Sema &S,
if (Entity.allowsNRVO())
CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(),
Constructor, /*Elidable=*/true,
- move_arg(ConstructorArgs),
+ ConstructorArgs,
HadMultipleCandidates,
ConstructorInitRequiresZeroInit,
ConstructKind,
@@ -4710,7 +4710,7 @@ PerformConstructorInitialization(Sema &S,
else
CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(),
Constructor,
- move_arg(ConstructorArgs),
+ ConstructorArgs,
HadMultipleCandidates,
ConstructorInitRequiresZeroInit,
ConstructKind,
@@ -4727,7 +4727,7 @@ PerformConstructorInitialization(Sema &S,
if (shouldBindAsTemporary(Entity))
CurInit = S.MaybeBindToTemporary(CurInit.takeAs<Expr>());
- return move(CurInit);
+ return CurInit;
}
/// Determine whether the specified InitializedEntity definitely has a lifetime
@@ -4775,7 +4775,7 @@ InitializationSequence::Perform(Sema &S,
QualType *ResultType) {
if (Failed()) {
unsigned NumArgs = Args.size();
- Diagnose(S, Entity, Kind, (Expr **)Args.release(), NumArgs);
+ Diagnose(S, Entity, Kind, Args.data(), NumArgs);
return ExprError();
}
@@ -4795,7 +4795,7 @@ InitializationSequence::Perform(Sema &S,
// introduced and such). So, we fall back to making the array
// type a dependently-sized array type with no specified
// bound.
- if (isa<InitListExpr>((Expr *)Args.get()[0])) {
+ if (isa<InitListExpr>((Expr *)Args[0])) {
SourceRange Brackets;
// Scavange the location of the brackets from the entity, if we can.
@@ -4823,12 +4823,12 @@ InitializationSequence::Perform(Sema &S,
// Rebuild the ParenListExpr.
SourceRange ParenRange = Kind.getParenRange();
return S.ActOnParenListExpr(ParenRange.getBegin(), ParenRange.getEnd(),
- move(Args));
+ Args);
}
assert(Kind.getKind() == InitializationKind::IK_Copy ||
Kind.isExplicitCast() ||
Kind.getKind() == InitializationKind::IK_DirectList);
- return ExprResult(Args.release()[0]);
+ return ExprResult(Args[0]);
}
// No steps means no initialization.
@@ -4836,22 +4836,22 @@ InitializationSequence::Perform(Sema &S,
return S.Owned((Expr *)0);
if (S.getLangOpts().CPlusPlus0x && Entity.getType()->isReferenceType() &&
- Args.size() == 1 && isa<InitListExpr>(Args.get()[0]) &&
+ Args.size() == 1 && isa<InitListExpr>(Args[0]) &&
Entity.getKind() != InitializedEntity::EK_Parameter) {
// Produce a C++98 compatibility warning if we are initializing a reference
// from an initializer list. For parameters, we produce a better warning
// elsewhere.
- Expr *Init = Args.get()[0];
+ Expr *Init = Args[0];
S.Diag(Init->getLocStart(), diag::warn_cxx98_compat_reference_list_init)
<< Init->getSourceRange();
}
// Diagnose cases where we initialize a pointer to an array temporary, and the
// pointer obviously outlives the temporary.
- if (Args.size() == 1 && Args.get()[0]->getType()->isArrayType() &&
+ if (Args.size() == 1 && Args[0]->getType()->isArrayType() &&
Entity.getType()->isPointerType() &&
InitializedEntityOutlivesFullExpression(Entity)) {
- Expr *Init = Args.get()[0];
+ Expr *Init = Args[0];
Expr::LValueClassification Kind = Init->ClassifyLValue(S.Context);
if (Kind == Expr::LV_ClassTemporary || Kind == Expr::LV_ArrayTemporary)
S.Diag(Init->getLocStart(), diag::warn_temporary_array_to_pointer_decay)
@@ -4897,7 +4897,7 @@ InitializationSequence::Perform(Sema &S,
case SK_ProduceObjCObject:
case SK_StdInitializerList: {
assert(Args.size() == 1);
- CurInit = Args.get()[0];
+ CurInit = Args[0];
if (!CurInit.get()) return ExprError();
break;
}
@@ -4924,7 +4924,7 @@ InitializationSequence::Perform(Sema &S,
// initializer to reflect that choice.
S.CheckAddressOfMemberAccess(CurInit.get(), Step->Function.FoundDecl);
S.DiagnoseUseOfDecl(Step->Function.FoundDecl, Kind.getLocation());
- CurInit = S.FixOverloadedFunctionReference(move(CurInit),
+ CurInit = S.FixOverloadedFunctionReference(CurInit,
Step->Function.FoundDecl,
Step->Function.Function);
break;
@@ -5016,7 +5016,7 @@ InitializationSequence::Perform(Sema &S,
break;
case SK_ExtraneousCopyToTemporary:
- CurInit = CopyObject(S, Step->Type, Entity, move(CurInit),
+ CurInit = CopyObject(S, Step->Type, Entity, CurInit,
/*IsExtraneousCopy=*/true);
break;
@@ -5031,7 +5031,7 @@ InitializationSequence::Perform(Sema &S,
bool CreatedObject = false;
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Fn)) {
// Build a call to the selected constructor.
- ASTOwningVector<Expr*> ConstructorArgs(S);
+ SmallVector<Expr*, 8> ConstructorArgs;
SourceLocation Loc = CurInit.get()->getLocStart();
CurInit.release(); // Ownership transferred into MultiExprArg, below.
@@ -5045,7 +5045,7 @@ InitializationSequence::Perform(Sema &S,
// Build an expression that constructs a temporary.
CurInit = S.BuildCXXConstructExpr(Loc, Step->Type, Constructor,
- move_arg(ConstructorArgs),
+ ConstructorArgs,
HadMultipleCandidates,
/*ZeroInit*/ false,
CXXConstructExpr::CK_Complete,
@@ -5079,7 +5079,7 @@ InitializationSequence::Perform(Sema &S,
FoundFn, Conversion);
if(CurInitExprRes.isInvalid())
return ExprError();
- CurInit = move(CurInitExprRes);
+ CurInit = CurInitExprRes;
// Build the actual call to the conversion function.
CurInit = S.BuildCXXMemberCallExpr(CurInit.get(), FoundFn, Conversion,
@@ -5115,7 +5115,7 @@ InitializationSequence::Perform(Sema &S,
CurInit = S.MaybeBindToTemporary(CurInit.takeAs<Expr>());
if (RequiresCopy)
CurInit = CopyObject(S, Entity.getType().getNonReferenceType(), Entity,
- move(CurInit), /*IsExtraneousCopy=*/false);
+ CurInit, /*IsExtraneousCopy=*/false);
break;
}
@@ -5144,7 +5144,7 @@ InitializationSequence::Perform(Sema &S,
getAssignmentAction(Entity), CCK);
if (CurInitExprRes.isInvalid())
return ExprError();
- CurInit = move(CurInitExprRes);
+ CurInit = CurInitExprRes;
break;
}
@@ -5195,13 +5195,13 @@ InitializationSequence::Perform(Sema &S,
Entity.getType().getNonReferenceType());
bool UseTemporary = Entity.getType()->isReferenceType();
assert(Args.size() == 1 && "expected a single argument for list init");
- InitListExpr *InitList = cast<InitListExpr>(Args.get()[0]);
+ InitListExpr *InitList = cast<InitListExpr>(Args[0]);
S.Diag(InitList->getExprLoc(), diag::warn_cxx98_compat_ctor_list_init)
<< InitList->getSourceRange();
MultiExprArg Arg(InitList->getInits(), InitList->getNumInits());
CurInit = PerformConstructorInitialization(S, UseTemporary ? TempEntity :
Entity,
- Kind, move(Arg), *Step,
+ Kind, Arg, *Step,
ConstructorInitRequiresZeroInit);
break;
}
@@ -5214,7 +5214,7 @@ InitializationSequence::Perform(Sema &S,
Expr *E = CurInit.take();
InitListExpr *Syntactic = Step->WrappingSyntacticList;
InitListExpr *ILE = new (S.Context) InitListExpr(S.Context,
- Syntactic->getLBraceLoc(), &E, 1, Syntactic->getRBraceLoc());
+ Syntactic->getLBraceLoc(), E, Syntactic->getRBraceLoc());
ILE->setSyntacticForm(Syntactic);
ILE->setType(E->getType());
ILE->setValueKind(E->getValueKind());
@@ -5234,7 +5234,7 @@ InitializationSequence::Perform(Sema &S,
bool UseTemporary = Entity.getType()->isReferenceType();
CurInit = PerformConstructorInitialization(S, UseTemporary ? TempEntity
: Entity,
- Kind, move(Args), *Step,
+ Kind, Args, *Step,
ConstructorInitRequiresZeroInit);
break;
}
@@ -5268,15 +5268,15 @@ InitializationSequence::Perform(Sema &S,
case SK_CAssignment: {
QualType SourceType = CurInit.get()->getType();
- ExprResult Result = move(CurInit);
+ ExprResult Result = CurInit;
Sema::AssignConvertType ConvTy =
S.CheckSingleAssignmentConstraints(Step->Type, Result);
if (Result.isInvalid())
return ExprError();
- CurInit = move(Result);
+ CurInit = Result;
// If this is a call, allow conversion to a transparent union.
- ExprResult CurInitExprRes = move(CurInit);
+ ExprResult CurInitExprRes = CurInit;
if (ConvTy != Sema::Compatible &&
Entity.getKind() == InitializedEntity::EK_Parameter &&
S.CheckTransparentUnionArgumentConstraints(Step->Type, CurInitExprRes)
@@ -5284,7 +5284,7 @@ InitializationSequence::Perform(Sema &S,
ConvTy = Sema::Compatible;
if (CurInitExprRes.isInvalid())
return ExprError();
- CurInit = move(CurInitExprRes);
+ CurInit = CurInitExprRes;
bool Complained;
if (S.DiagnoseAssignmentResult(ConvTy, Kind.getLocation(),
@@ -5398,7 +5398,7 @@ InitializationSequence::Perform(Sema &S,
}
InitListExpr *Semantic = new (S.Context)
InitListExpr(S.Context, ILE->getLBraceLoc(),
- Converted.data(), NumInits, ILE->getRBraceLoc());
+ Converted, ILE->getRBraceLoc());
Semantic->setSyntacticForm(ILE);
Semantic->setType(Dest);
Semantic->setInitializesStdInitializerList();
@@ -5415,7 +5415,7 @@ InitializationSequence::Perform(Sema &S,
cast<FieldDecl>(Entity.getDecl()),
CurInit.get());
- return move(CurInit);
+ return CurInit;
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp
index 6414c6f..15cd2a7 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp
@@ -22,13 +22,14 @@ using namespace clang;
using namespace sema;
CXXRecordDecl *Sema::createLambdaClosureType(SourceRange IntroducerRange,
+ TypeSourceInfo *Info,
bool KnownDependent) {
DeclContext *DC = CurContext;
while (!(DC->isFunctionOrMethod() || DC->isRecord() || DC->isFileContext()))
DC = DC->getParent();
// Start constructing the lambda class.
- CXXRecordDecl *Class = CXXRecordDecl::CreateLambda(Context, DC,
+ CXXRecordDecl *Class = CXXRecordDecl::CreateLambda(Context, DC, Info,
IntroducerRange.getBegin(),
KnownDependent);
DC->addDecl(Class);
@@ -369,15 +370,13 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
if (!TmplScope->decl_empty())
KnownDependent = true;
- CXXRecordDecl *Class = createLambdaClosureType(Intro.Range, KnownDependent);
-
// Determine the signature of the call operator.
TypeSourceInfo *MethodTyInfo;
bool ExplicitParams = true;
bool ExplicitResultType = true;
bool ContainsUnexpandedParameterPack = false;
SourceLocation EndLoc;
- llvm::ArrayRef<ParmVarDecl *> Params;
+ llvm::SmallVector<ParmVarDecl *, 8> Params;
if (ParamInfo.getNumTypeObjects() == 0) {
// C++11 [expr.prim.lambda]p4:
// If a lambda-expression does not include a lambda-declarator, it is as
@@ -410,17 +409,25 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
ExplicitResultType
= MethodTyInfo->getType()->getAs<FunctionType>()->getResultType()
!= Context.DependentTy;
-
- TypeLoc TL = MethodTyInfo->getTypeLoc();
- FunctionProtoTypeLoc Proto = cast<FunctionProtoTypeLoc>(TL);
- Params = llvm::ArrayRef<ParmVarDecl *>(Proto.getParmArray(),
- Proto.getNumArgs());
+
+ if (FTI.NumArgs == 1 && !FTI.isVariadic && FTI.ArgInfo[0].Ident == 0 &&
+ cast<ParmVarDecl>(FTI.ArgInfo[0].Param)->getType()->isVoidType()) {
+ // Empty arg list, don't push any params.
+ checkVoidParamDecl(cast<ParmVarDecl>(FTI.ArgInfo[0].Param));
+ } else {
+ Params.reserve(FTI.NumArgs);
+ for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i)
+ Params.push_back(cast<ParmVarDecl>(FTI.ArgInfo[i].Param));
+ }
// Check for unexpanded parameter packs in the method type.
if (MethodTyInfo->getType()->containsUnexpandedParameterPack())
ContainsUnexpandedParameterPack = true;
}
-
+
+ CXXRecordDecl *Class = createLambdaClosureType(Intro.Range, MethodTyInfo,
+ KnownDependent);
+
CXXMethodDecl *Method = startLambdaDefinition(Class, Intro.Range,
MethodTyInfo, EndLoc, Params);
@@ -528,6 +535,10 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
continue;
}
+ // Ignore invalid decls; they'll just confuse the code later.
+ if (Var->isInvalidDecl())
+ continue;
+
if (!Var->hasLocalStorage()) {
Diag(C->Loc, diag::err_capture_non_automatic_variable) << C->Id;
Diag(Var->getLocation(), diag::note_previous_decl) << C->Id;
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
index dad196b..f6987e7 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
@@ -707,7 +707,7 @@ static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) {
// result), perform template argument deduction and place the
// specialization into the result set. We do this to avoid forcing all
// callers to perform special deduction for conversion functions.
- TemplateDeductionInfo Info(R.getSema().Context, R.getNameLoc());
+ TemplateDeductionInfo Info(R.getNameLoc());
FunctionDecl *Specialization = 0;
const FunctionProtoType *ConvProto
@@ -1725,15 +1725,17 @@ bool Sema::DiagnoseAmbiguousLookup(LookupResult &Result) {
namespace {
struct AssociatedLookup {
- AssociatedLookup(Sema &S,
+ AssociatedLookup(Sema &S, SourceLocation InstantiationLoc,
Sema::AssociatedNamespaceSet &Namespaces,
Sema::AssociatedClassSet &Classes)
- : S(S), Namespaces(Namespaces), Classes(Classes) {
+ : S(S), Namespaces(Namespaces), Classes(Classes),
+ InstantiationLoc(InstantiationLoc) {
}
Sema &S;
Sema::AssociatedNamespaceSet &Namespaces;
Sema::AssociatedClassSet &Classes;
+ SourceLocation InstantiationLoc;
};
}
@@ -1796,6 +1798,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
case TemplateArgument::Declaration:
case TemplateArgument::Integral:
case TemplateArgument::Expression:
+ case TemplateArgument::NullPtr:
// [Note: non-type template arguments do not contribute to the set of
// associated namespaces. ]
break;
@@ -1864,8 +1867,10 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
// Only recurse into base classes for complete types.
if (!Class->hasDefinition()) {
- // FIXME: we might need to instantiate templates here
- return;
+ QualType type = Result.S.Context.getTypeDeclType(Class);
+ if (Result.S.RequireCompleteType(Result.InstantiationLoc, type,
+ /*no diagnostic*/ 0))
+ return;
}
// Add direct and indirect base classes along with their associated
@@ -2069,13 +2074,15 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
/// namespaces searched by argument-dependent lookup
/// (C++ [basic.lookup.argdep]) for a given set of arguments.
void
-Sema::FindAssociatedClassesAndNamespaces(llvm::ArrayRef<Expr *> Args,
+Sema::FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
+ llvm::ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses) {
AssociatedNamespaces.clear();
AssociatedClasses.clear();
- AssociatedLookup Result(*this, AssociatedNamespaces, AssociatedClasses);
+ AssociatedLookup Result(*this, InstantiationLoc,
+ AssociatedNamespaces, AssociatedClasses);
// C++ [basic.lookup.koenig]p2:
// For each argument type T in the function call, there is a set
@@ -2642,17 +2649,14 @@ void ADLResult::insert(NamedDecl *New) {
void Sema::ArgumentDependentLookup(DeclarationName Name, bool Operator,
SourceLocation Loc,
llvm::ArrayRef<Expr *> Args,
- ADLResult &Result,
- bool StdNamespaceIsAssociated) {
+ ADLResult &Result) {
// Find all of the associated namespaces and classes based on the
// arguments we have.
AssociatedNamespaceSet AssociatedNamespaces;
AssociatedClassSet AssociatedClasses;
- FindAssociatedClassesAndNamespaces(Args,
+ FindAssociatedClassesAndNamespaces(Loc, Args,
AssociatedNamespaces,
AssociatedClasses);
- if (StdNamespaceIsAssociated && StdNamespace)
- AssociatedNamespaces.insert(getStdNamespace());
QualType T1, T2;
if (Operator) {
@@ -2661,13 +2665,6 @@ void Sema::ArgumentDependentLookup(DeclarationName Name, bool Operator,
T2 = Args[1]->getType();
}
- // Try to complete all associated classes, in case they contain a
- // declaration of a friend function.
- for (AssociatedClassSet::iterator C = AssociatedClasses.begin(),
- CEnd = AssociatedClasses.end();
- C != CEnd; ++C)
- RequireCompleteType(Loc, Context.getRecordType(*C), 0);
-
// C++ [basic.lookup.argdep]p3:
// Let X be the lookup set produced by unqualified lookup (3.4.1)
// and let Y be the lookup set produced by argument dependent
@@ -4056,7 +4053,9 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
if (IsUnqualifiedLookup)
UnqualifiedTyposCorrected[Typo] = Result;
- return Result;
+ TypoCorrection TC = Result;
+ TC.setCorrectionRange(SS, TypoName);
+ return TC;
}
else if (BestResults.size() > 1
// Ugly hack equivalent to CTC == CTC_ObjCMessageReceiver;
@@ -4076,7 +4075,9 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
if (IsUnqualifiedLookup)
UnqualifiedTyposCorrected[Typo] = BestResults["super"].front();
- return BestResults["super"].front();
+ TypoCorrection TC = BestResults["super"].front();
+ TC.setCorrectionRange(SS, TypoName);
+ return TC;
}
// If this was an unqualified lookup and we believe the callback object did
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
index 27deab2..8d70860 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
@@ -22,6 +22,7 @@
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallString.h"
+#include "clang/Lex/Preprocessor.h"
using namespace clang;
@@ -43,7 +44,7 @@ static Qualifiers::ObjCLifetime getImpliedARCOwnership(
if (attrs & (ObjCPropertyDecl::OBJC_PR_retain |
ObjCPropertyDecl::OBJC_PR_strong |
ObjCPropertyDecl::OBJC_PR_copy)) {
- return type->getObjCARCImplicitLifetime();
+ return Qualifiers::OCL_Strong;
} else if (attrs & ObjCPropertyDecl::OBJC_PR_weak) {
return Qualifiers::OCL_Weak;
} else if (attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained) {
@@ -102,6 +103,15 @@ static void checkARCPropertyDecl(Sema &S, ObjCPropertyDecl *property) {
<< propertyLifetime;
}
+static unsigned deduceWeakPropertyFromType(Sema &S, QualType T) {
+ if ((S.getLangOpts().getGC() != LangOptions::NonGC &&
+ T.isObjCGCWeak()) ||
+ (S.getLangOpts().ObjCAutoRefCount &&
+ T.getObjCLifetime() == Qualifiers::OCL_Weak))
+ return ObjCDeclSpec::DQ_PR_weak;
+ return 0;
+}
+
Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
@@ -114,12 +124,8 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
unsigned Attributes = ODS.getPropertyAttributes();
TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D, S);
QualType T = TSI->getType();
- if ((getLangOpts().getGC() != LangOptions::NonGC &&
- T.isObjCGCWeak()) ||
- (getLangOpts().ObjCAutoRefCount &&
- T.getObjCLifetime() == Qualifiers::OCL_Weak))
- Attributes |= ObjCDeclSpec::DQ_PR_weak;
-
+ Attributes |= deduceWeakPropertyFromType(*this, T);
+
bool isReadWrite = ((Attributes & ObjCDeclSpec::DQ_PR_readwrite) ||
// default is readwrite!
!(Attributes & ObjCDeclSpec::DQ_PR_readonly));
@@ -236,6 +242,15 @@ static bool LocPropertyAttribute( ASTContext &Context, const char *attrName,
}
+static unsigned getOwnershipRule(unsigned attr) {
+ return attr & (ObjCPropertyDecl::OBJC_PR_assign |
+ ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy |
+ ObjCPropertyDecl::OBJC_PR_weak |
+ ObjCPropertyDecl::OBJC_PR_strong |
+ ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+}
+
Decl *
Sema::HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
@@ -335,6 +350,7 @@ Sema::HandlePropertyInClassExtension(Scope *S,
Diag(AtLoc,
diag::err_type_mismatch_continuation_class) << PDecl->getType();
Diag(PIDecl->getLocation(), diag::note_property_declare);
+ return 0;
}
}
@@ -342,13 +358,11 @@ Sema::HandlePropertyInClassExtension(Scope *S,
// with continuation class's readwrite property attribute!
unsigned PIkind = PIDecl->getPropertyAttributesAsWritten();
if (isReadWrite && (PIkind & ObjCPropertyDecl::OBJC_PR_readonly)) {
- unsigned retainCopyNonatomic =
- (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_strong |
- ObjCPropertyDecl::OBJC_PR_copy |
- ObjCPropertyDecl::OBJC_PR_nonatomic);
- if ((Attributes & retainCopyNonatomic) !=
- (PIkind & retainCopyNonatomic)) {
+ PIkind |= deduceWeakPropertyFromType(*this, PIDecl->getType());
+ unsigned ClassExtensionMemoryModel = getOwnershipRule(Attributes);
+ unsigned PrimaryClassMemoryModel = getOwnershipRule(PIkind);
+ if (PrimaryClassMemoryModel && ClassExtensionMemoryModel &&
+ (PrimaryClassMemoryModel != ClassExtensionMemoryModel)) {
Diag(AtLoc, diag::warn_property_attr_mismatch);
Diag(PIDecl->getLocation(), diag::note_property_declare);
}
@@ -397,6 +411,7 @@ Sema::HandlePropertyInClassExtension(Scope *S,
Diag(AtLoc, diag)
<< CCPrimary->getDeclName();
Diag(PIDecl->getLocation(), diag::note_property_declare);
+ return 0;
}
*isOverridingProperty = true;
// Make sure setter decl is synthesized, and added to primary class's list.
@@ -405,7 +420,7 @@ Sema::HandlePropertyInClassExtension(Scope *S,
PDecl->setSetterMethodDecl(PIDecl->getSetterMethodDecl());
if (ASTMutationListener *L = Context.getASTMutationListener())
L->AddedObjCPropertyInClassExtension(PDecl, PIDecl, CDecl);
- return 0;
+ return PDecl;
}
ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
@@ -543,6 +558,23 @@ static void checkARCPropertyImpl(Sema &S, SourceLocation propertyImplLoc,
ivarLifetime == Qualifiers::OCL_Autoreleasing)
return;
+ // If the ivar is private, and it's implicitly __unsafe_unretained
+ // becaues of its type, then pretend it was actually implicitly
+ // __strong. This is only sound because we're processing the
+ // property implementation before parsing any method bodies.
+ if (ivarLifetime == Qualifiers::OCL_ExplicitNone &&
+ propertyLifetime == Qualifiers::OCL_Strong &&
+ ivar->getAccessControl() == ObjCIvarDecl::Private) {
+ SplitQualType split = ivarType.split();
+ if (split.Quals.hasObjCLifetime()) {
+ assert(ivarType->isObjCARCImplicitlyUnretainedType());
+ split.Quals.setObjCLifetime(Qualifiers::OCL_Strong);
+ ivarType = S.Context.getQualifiedType(split);
+ ivar->setType(ivarType);
+ return;
+ }
+ }
+
switch (propertyLifetime) {
case Qualifiers::OCL_Strong:
S.Diag(propertyImplLoc, diag::err_arc_strong_property_ownership)
@@ -632,7 +664,13 @@ DiagnoseClassAndClassExtPropertyMismatch(Sema &S, ObjCInterfaceDecl *ClassDecl,
// property.
if (Attributes & ObjCDeclSpec::DQ_PR_readonly) {
if (!classExtPropertyAttr ||
- (classExtPropertyAttr & ObjCDeclSpec::DQ_PR_readwrite))
+ (classExtPropertyAttr &
+ (ObjCDeclSpec::DQ_PR_readwrite|
+ ObjCDeclSpec::DQ_PR_assign |
+ ObjCDeclSpec::DQ_PR_unsafe_unretained |
+ ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain |
+ ObjCDeclSpec::DQ_PR_strong)))
continue;
warn = true;
break;
@@ -857,13 +895,15 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (lifetime == Qualifiers::OCL_Weak) {
bool err = false;
if (const ObjCObjectPointerType *ObjT =
- PropertyIvarType->getAs<ObjCObjectPointerType>())
- if (ObjT->getInterfaceDecl()->isArcWeakrefUnavailable()) {
+ PropertyIvarType->getAs<ObjCObjectPointerType>()) {
+ const ObjCInterfaceDecl *ObjI = ObjT->getInterfaceDecl();
+ if (ObjI && ObjI->isArcWeakrefUnavailable()) {
Diag(PropertyDiagLoc, diag::err_arc_weak_unavailable_property);
Diag(property->getLocation(), diag::note_property_declare);
err = true;
}
- if (!err && !getLangOpts().ObjCRuntimeHasWeak) {
+ }
+ if (!err && !getLangOpts().ObjCARCWeak) {
Diag(PropertyDiagLoc, diag::err_arc_weak_no_runtime);
Diag(property->getLocation(), diag::note_property_declare);
}
@@ -891,7 +931,6 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
Ivar->setInvalidDecl();
ClassImpDecl->addDecl(Ivar);
IDecl->makeDeclVisibleInContext(Ivar);
- property->setPropertyIvarDecl(Ivar);
if (getLangOpts().ObjCRuntime.isFragile())
Diag(PropertyDiagLoc, diag::error_missing_property_ivar_decl)
@@ -907,14 +946,15 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
<< Ivar << Ivar->getName();
// Note! I deliberately want it to fall thru so more errors are caught.
}
+ property->setPropertyIvarDecl(Ivar);
+
QualType IvarType = Context.getCanonicalType(Ivar->getType());
// Check that type of property and its ivar are type compatible.
if (!Context.hasSameType(PropertyIvarType, IvarType)) {
- compat = false;
if (isa<ObjCObjectPointerType>(PropertyIvarType)
&& isa<ObjCObjectPointerType>(IvarType))
- compat =
+ compat =
Context.canAssignObjCInterfaces(
PropertyIvarType->getAs<ObjCObjectPointerType>(),
IvarType->getAs<ObjCObjectPointerType>());
@@ -988,19 +1028,21 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// For Objective-C++, need to synthesize the AST for the IVAR object to be
// returned by the getter as it must conform to C++'s copy-return rules.
// FIXME. Eventually we want to do this for Objective-C as well.
+ SynthesizedFunctionScope Scope(*this, getterMethod);
ImplicitParamDecl *SelfDecl = getterMethod->getSelfDecl();
DeclRefExpr *SelfExpr =
new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(),
- VK_RValue, SourceLocation());
+ VK_RValue, PropertyDiagLoc);
+ MarkDeclRefReferenced(SelfExpr);
Expr *IvarRefExpr =
- new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), AtLoc,
+ new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), PropertyDiagLoc,
SelfExpr, true, true);
ExprResult Res =
PerformCopyInitialization(InitializedEntity::InitializeResult(
- SourceLocation(),
+ PropertyDiagLoc,
getterMethod->getResultType(),
/*NRVO=*/false),
- SourceLocation(),
+ PropertyDiagLoc,
Owned(IvarRefExpr));
if (!Res.isInvalid()) {
Expr *ResExpr = Res.takeAs<Expr>();
@@ -1021,19 +1063,22 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (getLangOpts().CPlusPlus && Synthesize && !CompleteTypeErr &&
Ivar->getType()->isRecordType()) {
// FIXME. Eventually we want to do this for Objective-C as well.
+ SynthesizedFunctionScope Scope(*this, setterMethod);
ImplicitParamDecl *SelfDecl = setterMethod->getSelfDecl();
DeclRefExpr *SelfExpr =
new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(),
- VK_RValue, SourceLocation());
+ VK_RValue, PropertyDiagLoc);
+ MarkDeclRefReferenced(SelfExpr);
Expr *lhs =
- new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), AtLoc,
+ new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), PropertyDiagLoc,
SelfExpr, true, true);
ObjCMethodDecl::param_iterator P = setterMethod->param_begin();
ParmVarDecl *Param = (*P);
QualType T = Param->getType().getNonReferenceType();
- Expr *rhs = new (Context) DeclRefExpr(Param, false, T,
- VK_LValue, SourceLocation());
- ExprResult Res = BuildBinOp(S, lhs->getLocEnd(),
+ DeclRefExpr *rhs = new (Context) DeclRefExpr(Param, false, T,
+ VK_LValue, PropertyDiagLoc);
+ MarkDeclRefReferenced(rhs);
+ ExprResult Res = BuildBinOp(S, PropertyDiagLoc,
BO_Assign, lhs, rhs);
if (property->getPropertyAttributes() &
ObjCPropertyDecl::OBJC_PR_atomic) {
@@ -1043,7 +1088,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (const FunctionDecl *FuncDecl = CXXCE->getDirectCallee())
if (!FuncDecl->isTrivial())
if (property->getType()->isReferenceType()) {
- Diag(PropertyLoc,
+ Diag(PropertyDiagLoc,
diag::err_atomic_property_nontrivial_assign_op)
<< property->getType();
Diag(FuncDecl->getLocStart(),
@@ -1395,8 +1440,8 @@ bool Sema::isPropertyReadonly(ObjCPropertyDecl *PDecl,
/// CollectImmediateProperties - This routine collects all properties in
/// the class and its conforming protocols; but not those it its super class.
void Sema::CollectImmediateProperties(ObjCContainerDecl *CDecl,
- llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap,
- llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& SuperPropMap) {
+ ObjCContainerDecl::PropertyMap &PropMap,
+ ObjCContainerDecl::PropertyMap &SuperPropMap) {
if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
for (ObjCContainerDecl::prop_iterator P = IDecl->prop_begin(),
E = IDecl->prop_end(); P != E; ++P) {
@@ -1442,118 +1487,38 @@ void Sema::CollectImmediateProperties(ObjCContainerDecl *CDecl,
}
}
-/// CollectClassPropertyImplementations - This routine collects list of
-/// properties to be implemented in the class. This includes, class's
-/// and its conforming protocols' properties.
-static void CollectClassPropertyImplementations(ObjCContainerDecl *CDecl,
- llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap) {
- if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
- for (ObjCContainerDecl::prop_iterator P = IDecl->prop_begin(),
- E = IDecl->prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Prop = *P;
- PropMap[Prop->getIdentifier()] = Prop;
- }
- for (ObjCInterfaceDecl::all_protocol_iterator
- PI = IDecl->all_referenced_protocol_begin(),
- E = IDecl->all_referenced_protocol_end(); PI != E; ++PI)
- CollectClassPropertyImplementations((*PI), PropMap);
- }
- else if (ObjCProtocolDecl *PDecl = dyn_cast<ObjCProtocolDecl>(CDecl)) {
- for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
- E = PDecl->prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Prop = *P;
- if (!PropMap.count(Prop->getIdentifier()))
- PropMap[Prop->getIdentifier()] = Prop;
- }
- // scan through protocol's protocols.
- for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
- E = PDecl->protocol_end(); PI != E; ++PI)
- CollectClassPropertyImplementations((*PI), PropMap);
- }
-}
-
/// CollectSuperClassPropertyImplementations - This routine collects list of
/// properties to be implemented in super class(s) and also coming from their
/// conforming protocols.
static void CollectSuperClassPropertyImplementations(ObjCInterfaceDecl *CDecl,
- llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap) {
+ ObjCInterfaceDecl::PropertyMap &PropMap) {
if (ObjCInterfaceDecl *SDecl = CDecl->getSuperClass()) {
while (SDecl) {
- CollectClassPropertyImplementations(SDecl, PropMap);
+ SDecl->collectPropertiesToImplement(PropMap);
SDecl = SDecl->getSuperClass();
}
}
}
-/// LookupPropertyDecl - Looks up a property in the current class and all
-/// its protocols.
-ObjCPropertyDecl *Sema::LookupPropertyDecl(const ObjCContainerDecl *CDecl,
- IdentifierInfo *II) {
- if (const ObjCInterfaceDecl *IDecl =
- dyn_cast<ObjCInterfaceDecl>(CDecl)) {
- for (ObjCContainerDecl::prop_iterator P = IDecl->prop_begin(),
- E = IDecl->prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Prop = *P;
- if (Prop->getIdentifier() == II)
- return Prop;
- }
- // scan through class's protocols.
- for (ObjCInterfaceDecl::all_protocol_iterator
- PI = IDecl->all_referenced_protocol_begin(),
- E = IDecl->all_referenced_protocol_end(); PI != E; ++PI) {
- ObjCPropertyDecl *Prop = LookupPropertyDecl((*PI), II);
- if (Prop)
- return Prop;
- }
- }
- else if (const ObjCProtocolDecl *PDecl =
- dyn_cast<ObjCProtocolDecl>(CDecl)) {
- for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
- E = PDecl->prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Prop = *P;
- if (Prop->getIdentifier() == II)
- return Prop;
- }
- // scan through protocol's protocols.
- for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
- E = PDecl->protocol_end(); PI != E; ++PI) {
- ObjCPropertyDecl *Prop = LookupPropertyDecl((*PI), II);
- if (Prop)
- return Prop;
- }
- }
- return 0;
-}
-
-static IdentifierInfo * getDefaultSynthIvarName(ObjCPropertyDecl *Prop,
- ASTContext &Ctx) {
- SmallString<128> ivarName;
- {
- llvm::raw_svector_ostream os(ivarName);
- os << '_' << Prop->getIdentifier()->getName();
- }
- return &Ctx.Idents.get(ivarName.str());
-}
-
/// \brief Default synthesizes all properties which must be synthesized
/// in class's \@implementation.
void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl *IDecl) {
- llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*> PropMap;
- CollectClassPropertyImplementations(IDecl, PropMap);
+ ObjCInterfaceDecl::PropertyMap PropMap;
+ IDecl->collectPropertiesToImplement(PropMap);
if (PropMap.empty())
return;
- llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*> SuperPropMap;
+ ObjCInterfaceDecl::PropertyMap SuperPropMap;
CollectSuperClassPropertyImplementations(IDecl, SuperPropMap);
- for (llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>::iterator
+ for (ObjCInterfaceDecl::PropertyMap::iterator
P = PropMap.begin(), E = PropMap.end(); P != E; ++P) {
ObjCPropertyDecl *Prop = P->second;
// If property to be implemented in the super class, ignore.
if (SuperPropMap[Prop->getIdentifier()])
continue;
- // Is there a matching propery synthesize/dynamic?
+ // Is there a matching property synthesize/dynamic?
if (Prop->isInvalidDecl() ||
Prop->getPropertyImplementation() == ObjCPropertyDecl::Optional ||
IMPDecl->FindPropertyImplIvarDecl(Prop->getIdentifier()))
@@ -1583,7 +1548,7 @@ void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl* IMPDecl,
ActOnPropertyImplDecl(S, SourceLocation(), SourceLocation(),
true,
/* property = */ Prop->getIdentifier(),
- /* ivar = */ getDefaultSynthIvarName(Prop, Context),
+ /* ivar = */ Prop->getDefaultSynthIvarName(Context),
Prop->getLocation()));
if (PIDecl) {
Diag(Prop->getLocation(), diag::warn_missing_explicit_synthesis);
@@ -1606,11 +1571,11 @@ void Sema::DefaultSynthesizeProperties(Scope *S, Decl *D) {
void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
const SelectorSet &InsMap) {
- llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*> SuperPropMap;
+ ObjCContainerDecl::PropertyMap SuperPropMap;
if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl))
CollectSuperClassPropertyImplementations(IDecl, SuperPropMap);
- llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*> PropMap;
+ ObjCContainerDecl::PropertyMap PropMap;
CollectImmediateProperties(CDecl, PropMap, SuperPropMap);
if (PropMap.empty())
return;
@@ -1621,7 +1586,7 @@ void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
EI = IMPDecl->propimpl_end(); I != EI; ++I)
PropImplMap.insert(I->getPropertyDecl());
- for (llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>::iterator
+ for (ObjCContainerDecl::PropertyMap::iterator
P = PropMap.begin(), E = PropMap.end(); P != E; ++P) {
ObjCPropertyDecl *Prop = P->second;
// Is there a matching propery synthesize/dynamic?
@@ -1847,7 +1812,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
GetterMethod = ObjCMethodDecl::Create(Context, Loc, Loc,
property->getGetterName(),
property->getType(), 0, CD, /*isInstance=*/true,
- /*isVariadic=*/false, /*isSynthesized=*/true,
+ /*isVariadic=*/false, /*isPropertyAccessor=*/true,
/*isImplicitlyDeclared=*/true, /*isDefined=*/false,
(property->getPropertyImplementation() ==
ObjCPropertyDecl::Optional) ?
@@ -1867,7 +1832,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
} else
// A user declared getter will be synthesize when @synthesize of
// the property with the same name is seen in the @implementation
- GetterMethod->setSynthesized(true);
+ GetterMethod->setPropertyAccessor(true);
property->setGetterMethodDecl(GetterMethod);
// Skip setter if property is read-only.
@@ -1885,7 +1850,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
ObjCMethodDecl::Create(Context, Loc, Loc,
property->getSetterName(), Context.VoidTy, 0,
CD, /*isInstance=*/true, /*isVariadic=*/false,
- /*isSynthesized=*/true,
+ /*isPropertyAccessor=*/true,
/*isImplicitlyDeclared=*/true,
/*isDefined=*/false,
(property->getPropertyImplementation() ==
@@ -1916,7 +1881,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
} else
// A user declared setter will be synthesize when @synthesize of
// the property with the same name is seen in the @implementation
- SetterMethod->setSynthesized(true);
+ SetterMethod->setPropertyAccessor(true);
property->setSetterMethodDecl(SetterMethod);
}
// Add any synthesized methods to the global pool. This allows us to
@@ -2121,7 +2086,9 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
// issue any warning.
if (isAnyClassTy && getLangOpts().getGC() == LangOptions::NonGC)
;
- else {
+ else if (propertyInPrimaryClass) {
+ // Don't issue warning on property with no life time in class
+ // extension as it is inherited from property in primary class.
// Skip this warning in gc-only mode.
if (getLangOpts().getGC() != LangOptions::GCOnly)
Diag(Loc, diag::warn_objc_property_no_assignment_attribute);
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
index 9382f7d..9111878 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
@@ -49,7 +49,7 @@ CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn, bool HadMultipleCandidates,
E = S.DefaultFunctionArrayConversion(E.take());
if (E.isInvalid())
return ExprError();
- return move(E);
+ return E;
}
static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
@@ -555,6 +555,7 @@ static MakeDeductionFailureInfo(ASTContext &Context,
Result.Data = 0;
switch (TDK) {
case Sema::TDK_Success:
+ case Sema::TDK_Invalid:
case Sema::TDK_InstantiationDepth:
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
@@ -597,6 +598,7 @@ static MakeDeductionFailureInfo(ASTContext &Context,
void OverloadCandidate::DeductionFailureInfo::Destroy() {
switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
case Sema::TDK_Success:
+ case Sema::TDK_Invalid:
case Sema::TDK_InstantiationDepth:
case Sema::TDK_Incomplete:
case Sema::TDK_TooManyArguments:
@@ -637,6 +639,7 @@ TemplateParameter
OverloadCandidate::DeductionFailureInfo::getTemplateParameter() {
switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
case Sema::TDK_Success:
+ case Sema::TDK_Invalid:
case Sema::TDK_InstantiationDepth:
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
@@ -664,6 +667,7 @@ TemplateArgumentList *
OverloadCandidate::DeductionFailureInfo::getTemplateArgumentList() {
switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
case Sema::TDK_Success:
+ case Sema::TDK_Invalid:
case Sema::TDK_InstantiationDepth:
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
@@ -688,6 +692,7 @@ OverloadCandidate::DeductionFailureInfo::getTemplateArgumentList() {
const TemplateArgument *OverloadCandidate::DeductionFailureInfo::getFirstArg() {
switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
case Sema::TDK_Success:
+ case Sema::TDK_Invalid:
case Sema::TDK_InstantiationDepth:
case Sema::TDK_Incomplete:
case Sema::TDK_TooManyArguments:
@@ -713,6 +718,7 @@ const TemplateArgument *
OverloadCandidate::DeductionFailureInfo::getSecondArg() {
switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
case Sema::TDK_Success:
+ case Sema::TDK_Invalid:
case Sema::TDK_InstantiationDepth:
case Sema::TDK_Incomplete:
case Sema::TDK_TooManyArguments:
@@ -734,13 +740,17 @@ OverloadCandidate::DeductionFailureInfo::getSecondArg() {
return 0;
}
-void OverloadCandidateSet::clear() {
+void OverloadCandidateSet::destroyCandidates() {
for (iterator i = begin(), e = end(); i != e; ++i) {
for (unsigned ii = 0, ie = i->NumConversions; ii != ie; ++ii)
i->Conversions[ii].~ImplicitConversionSequence();
if (!i->Viable && i->FailureKind == ovl_fail_bad_deduction)
i->DeductionFailure.Destroy();
}
+}
+
+void OverloadCandidateSet::clear() {
+ destroyCandidates();
NumInlineSequences = 0;
Candidates.clear();
Functions.clear();
@@ -1668,7 +1678,7 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
return To->getKind() == BuiltinType::UInt;
}
- // C++0x [conv.prom]p3:
+ // C++11 [conv.prom]p3:
// A prvalue of an unscoped enumeration type whose underlying type is not
// fixed (7.2) can be converted to an rvalue a prvalue of the first of the
// following types that can represent all the values of the enumeration
@@ -1680,12 +1690,26 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
// with lowest integer conversion rank (4.13) greater than the rank of long
// long in which all the values of the enumeration can be represented. If
// there are two such extended types, the signed one is chosen.
+ // C++11 [conv.prom]p4:
+ // A prvalue of an unscoped enumeration type whose underlying type is fixed
+ // can be converted to a prvalue of its underlying type. Moreover, if
+ // integral promotion can be applied to its underlying type, a prvalue of an
+ // unscoped enumeration type whose underlying type is fixed can also be
+ // converted to a prvalue of the promoted underlying type.
if (const EnumType *FromEnumType = FromType->getAs<EnumType>()) {
// C++0x 7.2p9: Note that this implicit enum to int conversion is not
// provided for a scoped enumeration.
if (FromEnumType->getDecl()->isScoped())
return false;
+ // We can perform an integral promotion to the underlying type of the enum,
+ // even if that's not the promoted type.
+ if (FromEnumType->getDecl()->isFixed()) {
+ QualType Underlying = FromEnumType->getDecl()->getIntegerType();
+ return Context.hasSameUnqualifiedType(Underlying, ToType) ||
+ IsIntegralPromotion(From, Underlying, ToType);
+ }
+
// We have already pre-calculated the promotion type, so this is trivial.
if (ToType->isIntegerType() &&
!RequireCompleteType(From->getLocStart(), FromType, 0))
@@ -2899,8 +2923,6 @@ IsInitializerListConstructorConversion(Sema &S, Expr *From, QualType ToType,
case OR_Success: {
// Record the standard conversion we used and the conversion function.
CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Best->Function);
- S.MarkFunctionReferenced(From->getLocStart(), Constructor);
-
QualType ThisType = Constructor->getThisType(S.Context);
// Initializer lists don't have conversions as such.
User.Before.setAsIdentityConversion();
@@ -3081,8 +3103,6 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// Record the standard conversion we used and the conversion function.
if (CXXConstructorDecl *Constructor
= dyn_cast<CXXConstructorDecl>(Best->Function)) {
- S.MarkFunctionReferenced(From->getLocStart(), Constructor);
-
// C++ [over.ics.user]p1:
// If the user-defined conversion is specified by a
// constructor (12.3.1), the initial standard conversion
@@ -3111,8 +3131,6 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
}
if (CXXConversionDecl *Conversion
= dyn_cast<CXXConversionDecl>(Best->Function)) {
- S.MarkFunctionReferenced(From->getLocStart(), Conversion);
-
// C++ [over.ics.user]p1:
//
// [...] If the user-defined conversion is specified by a
@@ -4025,8 +4043,6 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
if (!Best->FinalConversion.DirectBinding)
return false;
- if (Best->Function)
- S.MarkFunctionReferenced(DeclLoc, Best->Function);
ICS.setUserDefined();
ICS.UserDefined.Before = Best->Conversions[0].Standard;
ICS.UserDefined.After = Best->FinalConversion;
@@ -5531,7 +5547,7 @@ Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
// functions. In such a case, the candidate functions generated from each
// function template are combined with the set of non-template candidate
// functions.
- TemplateDeductionInfo Info(Context, CandidateSet.getLocation());
+ TemplateDeductionInfo Info(CandidateSet.getLocation());
FunctionDecl *Specialization = 0;
if (TemplateDeductionResult Result
= DeduceTemplateArguments(MethodTmpl, ExplicitTemplateArgs, Args,
@@ -5581,7 +5597,7 @@ Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
// functions. In such a case, the candidate functions generated from each
// function template are combined with the set of non-template candidate
// functions.
- TemplateDeductionInfo Info(Context, CandidateSet.getLocation());
+ TemplateDeductionInfo Info(CandidateSet.getLocation());
FunctionDecl *Specialization = 0;
if (TemplateDeductionResult Result
= DeduceTemplateArguments(FunctionTemplate, ExplicitTemplateArgs, Args,
@@ -5703,7 +5719,7 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
// there are 0 arguments (i.e., nothing is allocated using ASTContext's
// allocator).
QualType CallResultType = ConversionType.getNonLValueExprType(Context);
- CallExpr Call(Context, &ConversionFn, 0, 0, CallResultType, VK,
+ CallExpr Call(Context, &ConversionFn, MultiExprArg(), CallResultType, VK,
From->getLocStart());
ImplicitConversionSequence ICS =
TryCopyInitialization(*this, &Call, ToType,
@@ -5765,7 +5781,7 @@ Sema::AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
if (!CandidateSet.isNewCandidate(FunctionTemplate))
return;
- TemplateDeductionInfo Info(Context, CandidateSet.getLocation());
+ TemplateDeductionInfo Info(CandidateSet.getLocation());
CXXConversionDecl *Specialization = 0;
if (TemplateDeductionResult Result
= DeduceTemplateArguments(FunctionTemplate, ToType,
@@ -6770,17 +6786,16 @@ public:
// bool operator==(T, T);
// bool operator!=(T, T);
void addRelationalPointerOrEnumeralOverloads() {
- // C++ [over.built]p1:
- // If there is a user-written candidate with the same name and parameter
- // types as a built-in candidate operator function, the built-in operator
- // function is hidden and is not included in the set of candidate
- // functions.
+ // C++ [over.match.oper]p3:
+ // [...]the built-in candidates include all of the candidate operator
+ // functions defined in 13.6 that, compared to the given operator, [...]
+ // do not have the same parameter-type-list as any non-template non-member
+ // candidate.
//
- // The text is actually in a note, but if we don't implement it then we end
- // up with ambiguities when the user provides an overloaded operator for
- // an enumeration type. Note that only enumeration types have this problem,
- // so we track which enumeration types we've seen operators for. Also, the
- // only other overloaded operator with enumeration argumenst, operator=,
+ // Note that in practice, this only affects enumeration types because there
+ // aren't any built-in candidates of record type, and a user-defined operator
+ // must have an operand of record or enumeration type. Also, the only other
+ // overloaded operator with enumeration arguments, operator=,
// cannot be overloaded for enumeration types, so this is the only place
// where we must suppress candidates like this.
llvm::DenseSet<std::pair<CanQualType, CanQualType> >
@@ -6795,6 +6810,9 @@ public:
if (!C->Viable || !C->Function || C->Function->getNumParams() != 2)
continue;
+ if (C->Function->isFunctionTemplateSpecialization())
+ continue;
+
QualType FirstParamType =
C->Function->getParamDecl(0)->getType().getUnqualifiedType();
QualType SecondParamType =
@@ -7652,8 +7670,7 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
llvm::ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
- bool PartialOverloading,
- bool StdNamespaceIsAssociated) {
+ bool PartialOverloading) {
ADLResult Fns;
// FIXME: This approach for uniquing ADL results (and removing
@@ -7664,8 +7681,7 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
// we supposed to consider on ADL candidates, anyway?
// FIXME: Pass in the explicit template arguments?
- ArgumentDependentLookup(Name, Operator, Loc, Args, Fns,
- StdNamespaceIsAssociated);
+ ArgumentDependentLookup(Name, Operator, Loc, Args, Fns);
// Erase all of the candidates we already knew about.
for (OverloadCandidateSet::iterator Cand = CandidateSet.begin(),
@@ -7976,10 +7992,20 @@ void ImplicitConversionSequence::DiagnoseAmbiguousConversion(
const PartialDiagnostic &PDiag) const {
S.Diag(CaretLoc, PDiag)
<< Ambiguous.getFromType() << Ambiguous.getToType();
- for (AmbiguousConversionSequence::const_iterator
- I = Ambiguous.begin(), E = Ambiguous.end(); I != E; ++I) {
+ // FIXME: The note limiting machinery is borrowed from
+ // OverloadCandidateSet::NoteCandidates; there's an opportunity for
+ // refactoring here.
+ const OverloadsShown ShowOverloads = S.Diags.getShowOverloads();
+ unsigned CandsShown = 0;
+ AmbiguousConversionSequence::const_iterator I, E;
+ for (I = Ambiguous.begin(), E = Ambiguous.end(); I != E; ++I) {
+ if (CandsShown >= 4 && ShowOverloads == Ovl_Best)
+ break;
+ ++CandsShown;
S.NoteOverloadCandidate(*I);
}
+ if (I != E)
+ S.Diag(SourceLocation(), diag::note_ovl_too_many_candidates) << int(E - I);
}
namespace {
@@ -8515,7 +8541,7 @@ void NoteSurrogateCandidate(Sema &S, OverloadCandidate *Cand) {
}
void NoteBuiltinOperatorCandidate(Sema &S,
- const char *Opc,
+ StringRef Opc,
SourceLocation OpLoc,
OverloadCandidate *Cand) {
assert(Cand->NumConversions <= 2 && "builtin operator is not binary");
@@ -8561,6 +8587,7 @@ RankDeductionFailure(const OverloadCandidate::DeductionFailureInfo &DFI) {
case Sema::TDK_Success:
llvm_unreachable("TDK_success while diagnosing bad deduction");
+ case Sema::TDK_Invalid:
case Sema::TDK_Incomplete:
return 1;
@@ -8783,7 +8810,7 @@ void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
void OverloadCandidateSet::NoteCandidates(Sema &S,
OverloadCandidateDisplayKind OCD,
llvm::ArrayRef<Expr *> Args,
- const char *Opc,
+ StringRef Opc,
SourceLocation OpLoc) {
// Sort the candidates by viability and position. Sorting directly would
// be prohibitive, so we make a set of pointers and sort those.
@@ -8807,8 +8834,7 @@ void OverloadCandidateSet::NoteCandidates(Sema &S,
bool ReportedAmbiguousConversions = false;
SmallVectorImpl<OverloadCandidate*>::iterator I, E;
- const DiagnosticsEngine::OverloadsShown ShowOverloads =
- S.Diags.getShowOverloads();
+ const OverloadsShown ShowOverloads = S.Diags.getShowOverloads();
unsigned CandsShown = 0;
for (I = Cands.begin(), E = Cands.end(); I != E; ++I) {
OverloadCandidate *Cand = *I;
@@ -8816,7 +8842,7 @@ void OverloadCandidateSet::NoteCandidates(Sema &S,
// Set an arbitrary limit on the number of candidate functions we'll spam
// the user with. FIXME: This limit should depend on details of the
// candidate list.
- if (CandsShown >= 4 && ShowOverloads == DiagnosticsEngine::Ovl_Best) {
+ if (CandsShown >= 4 && ShowOverloads == Ovl_Best) {
break;
}
++CandsShown;
@@ -8979,7 +9005,7 @@ private:
// function template specialization, which is added to the set of
// overloaded functions considered.
FunctionDecl *Specialization = 0;
- TemplateDeductionInfo Info(Context, OvlExpr->getNameLoc());
+ TemplateDeductionInfo Info(OvlExpr->getNameLoc());
if (Sema::TemplateDeductionResult Result
= S.DeduceTemplateArguments(FunctionTemplate,
&OvlExplicitTemplateArgs,
@@ -9201,7 +9227,6 @@ Sema::ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
Fn = Resolver.getMatchingFunctionDecl();
assert(Fn);
FoundResult = *Resolver.getMatchingFunctionAccessPair();
- MarkFunctionReferenced(AddressOfExpr->getLocStart(), Fn);
if (Complain)
CheckAddressOfMemberAccess(AddressOfExpr, FoundResult);
}
@@ -9257,7 +9282,7 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
// function template specialization, which is added to the set of
// overloaded functions considered.
FunctionDecl *Specialization = 0;
- TemplateDeductionInfo Info(Context, ovl->getNameLoc());
+ TemplateDeductionInfo Info(ovl->getNameLoc());
if (TemplateDeductionResult Result
= DeduceTemplateArguments(FunctionTemplate, &ExplicitTemplateArgs,
Specialization, Info)) {
@@ -9457,8 +9482,7 @@ void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
AddArgumentDependentLookupCandidates(ULE->getName(), /*Operator*/ false,
ULE->getExprLoc(),
Args, ExplicitTemplateArgs,
- CandidateSet, PartialOverloading,
- ULE->isStdAssociatedNamespace());
+ CandidateSet, PartialOverloading);
}
/// Attempt to recover from an ill-formed use of a non-dependent name in a
@@ -9509,7 +9533,7 @@ DiagnoseTwoPhaseLookup(Sema &SemaRef, SourceLocation FnLoc,
// declaring the function there instead.
Sema::AssociatedNamespaceSet AssociatedNamespaces;
Sema::AssociatedClassSet AssociatedClasses;
- SemaRef.FindAssociatedClassesAndNamespaces(Args,
+ SemaRef.FindAssociatedClassesAndNamespaces(FnLoc, Args,
AssociatedNamespaces,
AssociatedClasses);
// Never suggest declaring a function within namespace 'std'.
@@ -9632,6 +9656,20 @@ class NoTypoCorrectionCCC : public CorrectionCandidateCallback {
return false;
}
};
+
+class BuildRecoveryCallExprRAII {
+ Sema &SemaRef;
+public:
+ BuildRecoveryCallExprRAII(Sema &S) : SemaRef(S) {
+ assert(SemaRef.IsBuildingRecoveryCallExpr == false);
+ SemaRef.IsBuildingRecoveryCallExpr = true;
+ }
+
+ ~BuildRecoveryCallExprRAII() {
+ SemaRef.IsBuildingRecoveryCallExpr = false;
+ }
+};
+
}
/// Attempts to recover from a call where no functions were found.
@@ -9644,6 +9682,15 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
llvm::MutableArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool EmptyLookup, bool AllowTypoCorrection) {
+ // Do not try to recover if it is already building a recovery call.
+ // This stops infinite loops for template instantiations like
+ //
+ // template <typename T> auto foo(T t) -> decltype(foo(t)) {}
+ // template <typename T> auto foo(T t) -> decltype(foo(&t)) {}
+ //
+ if (SemaRef.IsBuildingRecoveryCallExpr)
+ return ExprError();
+ BuildRecoveryCallExprRAII RCE(SemaRef);
CXXScopeSpec SS;
SS.Adopt(ULE->getQualifierLoc());
@@ -9695,20 +9742,15 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
RParenLoc);
}
-/// ResolveOverloadedCallFn - Given the call expression that calls Fn
-/// (which eventually refers to the declaration Func) and the call
-/// arguments Args/NumArgs, attempt to resolve the function call down
-/// to a specific function. If overload resolution succeeds, returns
-/// the function declaration produced by overload
-/// resolution. Otherwise, emits diagnostics, deletes all of the
-/// arguments and Fn, and returns NULL.
-ExprResult
-Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
- SourceLocation LParenLoc,
- Expr **Args, unsigned NumArgs,
- SourceLocation RParenLoc,
- Expr *ExecConfig,
- bool AllowTypoCorrection) {
+/// \brief Constructs and populates an OverloadedCandidateSet from
+/// the given function.
+/// \returns true when an the ExprResult output parameter has been set.
+bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn,
+ UnresolvedLookupExpr *ULE,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc,
+ OverloadCandidateSet *CandidateSet,
+ ExprResult *Result) {
#ifndef NDEBUG
if (ULE->requiresADL()) {
// To do ADL, we must have found an unqualified name.
@@ -9724,62 +9766,79 @@ Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
// We don't perform ADL in C.
assert(getLangOpts().CPlusPlus && "ADL enabled in C");
- } else
- assert(!ULE->isStdAssociatedNamespace() &&
- "std is associated namespace but not doing ADL");
+ }
#endif
UnbridgedCastsSet UnbridgedCasts;
- if (checkArgPlaceholdersForOverload(*this, Args, NumArgs, UnbridgedCasts))
- return ExprError();
-
- OverloadCandidateSet CandidateSet(Fn->getExprLoc());
+ if (checkArgPlaceholdersForOverload(*this, Args, NumArgs, UnbridgedCasts)) {
+ *Result = ExprError();
+ return true;
+ }
// Add the functions denoted by the callee to the set of candidate
// functions, including those from argument-dependent lookup.
AddOverloadedCallCandidates(ULE, llvm::makeArrayRef(Args, NumArgs),
- CandidateSet);
+ *CandidateSet);
// If we found nothing, try to recover.
// BuildRecoveryCallExpr diagnoses the error itself, so we just bail
// out if it fails.
- if (CandidateSet.empty()) {
+ if (CandidateSet->empty()) {
// In Microsoft mode, if we are inside a template class member function then
// create a type dependent CallExpr. The goal is to postpone name lookup
// to instantiation time to be able to search into type dependent base
// classes.
if (getLangOpts().MicrosoftMode && CurContext->isDependentContext() &&
(isa<FunctionDecl>(CurContext) || isa<CXXRecordDecl>(CurContext))) {
- CallExpr *CE = new (Context) CallExpr(Context, Fn, Args, NumArgs,
- Context.DependentTy, VK_RValue,
- RParenLoc);
+ CallExpr *CE = new (Context) CallExpr(Context, Fn,
+ llvm::makeArrayRef(Args, NumArgs),
+ Context.DependentTy, VK_RValue,
+ RParenLoc);
CE->setTypeDependent(true);
- return Owned(CE);
+ *Result = Owned(CE);
+ return true;
}
- return BuildRecoveryCallExpr(*this, S, Fn, ULE, LParenLoc,
- llvm::MutableArrayRef<Expr *>(Args, NumArgs),
- RParenLoc, /*EmptyLookup=*/true,
- AllowTypoCorrection);
+ return false;
}
UnbridgedCasts.restore();
+ return false;
+}
- OverloadCandidateSet::iterator Best;
- switch (CandidateSet.BestViableFunction(*this, Fn->getLocStart(), Best)) {
+/// FinishOverloadedCallExpr - given an OverloadCandidateSet, builds and returns
+/// the completed call expression. If overload resolution fails, emits
+/// diagnostics and returns ExprError()
+static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
+ UnresolvedLookupExpr *ULE,
+ SourceLocation LParenLoc,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc,
+ Expr *ExecConfig,
+ OverloadCandidateSet *CandidateSet,
+ OverloadCandidateSet::iterator *Best,
+ OverloadingResult OverloadResult,
+ bool AllowTypoCorrection) {
+ if (CandidateSet->empty())
+ return BuildRecoveryCallExpr(SemaRef, S, Fn, ULE, LParenLoc,
+ llvm::MutableArrayRef<Expr *>(Args, NumArgs),
+ RParenLoc, /*EmptyLookup=*/true,
+ AllowTypoCorrection);
+
+ switch (OverloadResult) {
case OR_Success: {
- FunctionDecl *FDecl = Best->Function;
- MarkFunctionReferenced(Fn->getExprLoc(), FDecl);
- CheckUnresolvedLookupAccess(ULE, Best->FoundDecl);
- DiagnoseUseOfDecl(FDecl, ULE->getNameLoc());
- Fn = FixOverloadedFunctionReference(Fn, Best->FoundDecl, FDecl);
- return BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, NumArgs, RParenLoc,
- ExecConfig);
+ FunctionDecl *FDecl = (*Best)->Function;
+ SemaRef.MarkFunctionReferenced(Fn->getExprLoc(), FDecl);
+ SemaRef.CheckUnresolvedLookupAccess(ULE, (*Best)->FoundDecl);
+ SemaRef.DiagnoseUseOfDecl(FDecl, ULE->getNameLoc());
+ Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
+ return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, NumArgs,
+ RParenLoc, ExecConfig);
}
case OR_No_Viable_Function: {
// Try to recover by looking for viable functions which the user might
// have meant to call.
- ExprResult Recovery = BuildRecoveryCallExpr(*this, S, Fn, ULE, LParenLoc,
+ ExprResult Recovery = BuildRecoveryCallExpr(SemaRef, S, Fn, ULE, LParenLoc,
llvm::MutableArrayRef<Expr *>(Args, NumArgs),
RParenLoc,
/*EmptyLookup=*/false,
@@ -9787,44 +9846,73 @@ Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
if (!Recovery.isInvalid())
return Recovery;
- Diag(Fn->getLocStart(),
+ SemaRef.Diag(Fn->getLocStart(),
diag::err_ovl_no_viable_function_in_call)
<< ULE->getName() << Fn->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
- llvm::makeArrayRef(Args, NumArgs));
+ CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
break;
}
case OR_Ambiguous:
- Diag(Fn->getLocStart(), diag::err_ovl_ambiguous_call)
+ SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_ambiguous_call)
<< ULE->getName() << Fn->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_ViableCandidates,
- llvm::makeArrayRef(Args, NumArgs));
+ CandidateSet->NoteCandidates(SemaRef, OCD_ViableCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
break;
- case OR_Deleted:
- {
- Diag(Fn->getLocStart(), diag::err_ovl_deleted_call)
- << Best->Function->isDeleted()
- << ULE->getName()
- << getDeletedOrUnavailableSuffix(Best->Function)
- << Fn->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
- llvm::makeArrayRef(Args, NumArgs));
+ case OR_Deleted: {
+ SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_deleted_call)
+ << (*Best)->Function->isDeleted()
+ << ULE->getName()
+ << SemaRef.getDeletedOrUnavailableSuffix((*Best)->Function)
+ << Fn->getSourceRange();
+ CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
- // We emitted an error for the unvailable/deleted function call but keep
- // the call in the AST.
- FunctionDecl *FDecl = Best->Function;
- Fn = FixOverloadedFunctionReference(Fn, Best->FoundDecl, FDecl);
- return BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, NumArgs,
- RParenLoc, ExecConfig);
- }
+ // We emitted an error for the unvailable/deleted function call but keep
+ // the call in the AST.
+ FunctionDecl *FDecl = (*Best)->Function;
+ Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
+ return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, NumArgs,
+ RParenLoc, ExecConfig);
+ }
}
// Overload resolution failed.
return ExprError();
}
+/// BuildOverloadedCallExpr - Given the call expression that calls Fn
+/// (which eventually refers to the declaration Func) and the call
+/// arguments Args/NumArgs, attempt to resolve the function call down
+/// to a specific function. If overload resolution succeeds, returns
+/// the call expression produced by overload resolution.
+/// Otherwise, emits diagnostics and returns ExprError.
+ExprResult Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn,
+ UnresolvedLookupExpr *ULE,
+ SourceLocation LParenLoc,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc,
+ Expr *ExecConfig,
+ bool AllowTypoCorrection) {
+ OverloadCandidateSet CandidateSet(Fn->getExprLoc());
+ ExprResult result;
+
+ if (buildOverloadedCallSet(S, Fn, ULE, Args, NumArgs, LParenLoc,
+ &CandidateSet, &result))
+ return result;
+
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult OverloadResult =
+ CandidateSet.BestViableFunction(*this, Fn->getLocStart(), Best);
+
+ return FinishOverloadedCallExpr(*this, S, Fn, ULE, LParenLoc, Args, NumArgs,
+ RParenLoc, ExecConfig, &CandidateSet,
+ &Best, OverloadResult,
+ AllowTypoCorrection);
+}
+
static bool IsOverloaded(const UnresolvedSetImpl &Functions) {
return Functions.size() > 1 ||
(Functions.size() == 1 && isa<FunctionTemplateDecl>(*Functions.begin()));
@@ -9889,10 +9977,10 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
/*ADL*/ true, IsOverloaded(Fns),
Fns.begin(), Fns.end());
return Owned(new (Context) CXXOperatorCallExpr(Context, Op, Fn,
- &Args[0], NumArgs,
+ llvm::makeArrayRef(Args, NumArgs),
Context.DependentTy,
VK_RValue,
- OpLoc));
+ OpLoc, false));
}
// Build an empty overload set.
@@ -9968,7 +10056,8 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
Args[0] = Input;
CallExpr *TheCall =
new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.take(),
- Args, NumArgs, ResultTy, VK, OpLoc);
+ llvm::makeArrayRef(Args, NumArgs),
+ ResultTy, VK, OpLoc, false);
if (CheckCallReturnType(FnDecl->getResultType(), OpLoc, TheCall,
FnDecl))
@@ -10069,7 +10158,8 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
return Owned(new (Context) BinaryOperator(Args[0], Args[1], Opc,
Context.DependentTy,
VK_RValue, OK_Ordinary,
- OpLoc));
+ OpLoc,
+ FPFeatures.fp_contract));
return Owned(new (Context) CompoundAssignOperator(Args[0], Args[1], Opc,
Context.DependentTy,
@@ -10077,7 +10167,8 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
OK_Ordinary,
Context.DependentTy,
Context.DependentTy,
- OpLoc));
+ OpLoc,
+ FPFeatures.fp_contract));
}
// FIXME: save results of ADL from here?
@@ -10089,11 +10180,9 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
NestedNameSpecifierLoc(), OpNameInfo,
/*ADL*/ true, IsOverloaded(Fns),
Fns.begin(), Fns.end());
- return Owned(new (Context) CXXOperatorCallExpr(Context, Op, Fn,
- Args, 2,
- Context.DependentTy,
- VK_RValue,
- OpLoc));
+ return Owned(new (Context) CXXOperatorCallExpr(Context, Op, Fn, Args,
+ Context.DependentTy, VK_RValue,
+ OpLoc, FPFeatures.fp_contract));
}
// Always do placeholder-like conversions on the RHS.
@@ -10208,7 +10297,8 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
CXXOperatorCallExpr *TheCall =
new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.take(),
- Args, 2, ResultTy, VK, OpLoc);
+ Args, ResultTy, VK, OpLoc,
+ FPFeatures.fp_contract);
if (CheckCallReturnType(FnDecl->getResultType(), OpLoc, TheCall,
FnDecl))
@@ -10270,7 +10360,7 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
if (Result.isInvalid())
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
BinaryOperator::getOpcodeStr(Opc), OpLoc);
- return move(Result);
+ return Result;
}
case OR_Ambiguous:
@@ -10337,10 +10427,10 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
// Can't add any actual overloads yet
return Owned(new (Context) CXXOperatorCallExpr(Context, OO_Subscript, Fn,
- Args, 2,
+ Args,
Context.DependentTy,
VK_RValue,
- RLoc));
+ RLoc, false));
}
// Handle placeholders on both operands.
@@ -10416,8 +10506,9 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
CXXOperatorCallExpr *TheCall =
new (Context) CXXOperatorCallExpr(Context, OO_Subscript,
- FnExpr.take(), Args, 2,
- ResultTy, VK, RLoc);
+ FnExpr.take(), Args,
+ ResultTy, VK, RLoc,
+ false);
if (CheckCallReturnType(FnDecl->getResultType(), LLoc, TheCall,
FnDecl))
@@ -10534,7 +10625,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
}
CXXMemberCallExpr *call
- = new (Context) CXXMemberCallExpr(Context, MemExprE, Args, NumArgs,
+ = new (Context) CXXMemberCallExpr(Context, MemExprE,
+ llvm::makeArrayRef(Args, NumArgs),
resultType, valueKind, RParenLoc);
if (CheckCallReturnType(proto->getResultType(),
@@ -10676,7 +10768,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
assert(Method && "Member call to something that isn't a method?");
CXXMemberCallExpr *TheCall =
- new (Context) CXXMemberCallExpr(Context, MemExprE, Args, NumArgs,
+ new (Context) CXXMemberCallExpr(Context, MemExprE,
+ llvm::makeArrayRef(Args, NumArgs),
ResultType, VK, RParenLoc);
// Check for a valid return type.
@@ -10904,6 +10997,11 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
// that calls this method, using Object for the implicit object
// parameter and passing along the remaining arguments.
CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function);
+
+ // An error diagnostic has already been printed when parsing the declaration.
+ if (Method->isInvalidDecl())
+ return ExprError();
+
const FunctionProtoType *Proto =
Method->getType()->getAs<FunctionProtoType>();
@@ -10942,8 +11040,8 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
CXXOperatorCallExpr *TheCall =
new (Context) CXXOperatorCallExpr(Context, OO_Call, NewFn.take(),
- MethodArgs, NumArgs + 1,
- ResultTy, VK, RParenLoc);
+ llvm::makeArrayRef(MethodArgs, NumArgs+1),
+ ResultTy, VK, RParenLoc, false);
delete [] MethodArgs;
if (CheckCallReturnType(Method->getResultType(), LParenLoc, TheCall,
@@ -10966,7 +11064,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
if (ObjRes.isInvalid())
IsError = true;
else
- Object = move(ObjRes);
+ Object = ObjRes;
TheCall->setArg(0, Object.take());
// Check the argument types.
@@ -11116,7 +11214,7 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc) {
ResultTy = ResultTy.getNonLValueExprType(Context);
CXXOperatorCallExpr *TheCall =
new (Context) CXXOperatorCallExpr(Context, OO_Arrow, FnExpr.take(),
- &Base, 1, ResultTy, VK, OpLoc);
+ Base, ResultTy, VK, OpLoc, false);
if (CheckCallReturnType(Method->getResultType(), OpLoc, TheCall,
Method))
@@ -11187,7 +11285,8 @@ ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
ResultTy = ResultTy.getNonLValueExprType(Context);
UserDefinedLiteral *UDL =
- new (Context) UserDefinedLiteral(Context, Fn.take(), ConvArgs, Args.size(),
+ new (Context) UserDefinedLiteral(Context, Fn.take(),
+ llvm::makeArrayRef(ConvArgs, Args.size()),
ResultTy, VK, LitEndLoc, UDSuffixLoc);
if (CheckCallReturnType(FD->getResultType(), UDSuffixLoc, UDL, FD))
@@ -11199,6 +11298,80 @@ ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
return MaybeBindToTemporary(UDL);
}
+/// Build a call to 'begin' or 'end' for a C++11 for-range statement. If the
+/// given LookupResult is non-empty, it is assumed to describe a member which
+/// will be invoked. Otherwise, the function will be found via argument
+/// dependent lookup.
+/// CallExpr is set to a valid expression and FRS_Success returned on success,
+/// otherwise CallExpr is set to ExprError() and some non-success value
+/// is returned.
+Sema::ForRangeStatus
+Sema::BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc,
+ SourceLocation RangeLoc, VarDecl *Decl,
+ BeginEndFunction BEF,
+ const DeclarationNameInfo &NameInfo,
+ LookupResult &MemberLookup,
+ OverloadCandidateSet *CandidateSet,
+ Expr *Range, ExprResult *CallExpr) {
+ CandidateSet->clear();
+ if (!MemberLookup.empty()) {
+ ExprResult MemberRef =
+ BuildMemberReferenceExpr(Range, Range->getType(), Loc,
+ /*IsPtr=*/false, CXXScopeSpec(),
+ /*TemplateKWLoc=*/SourceLocation(),
+ /*FirstQualifierInScope=*/0,
+ MemberLookup,
+ /*TemplateArgs=*/0);
+ if (MemberRef.isInvalid()) {
+ *CallExpr = ExprError();
+ Diag(Range->getLocStart(), diag::note_in_for_range)
+ << RangeLoc << BEF << Range->getType();
+ return FRS_DiagnosticIssued;
+ }
+ *CallExpr = ActOnCallExpr(S, MemberRef.get(), Loc, MultiExprArg(), Loc, 0);
+ if (CallExpr->isInvalid()) {
+ *CallExpr = ExprError();
+ Diag(Range->getLocStart(), diag::note_in_for_range)
+ << RangeLoc << BEF << Range->getType();
+ return FRS_DiagnosticIssued;
+ }
+ } else {
+ UnresolvedSet<0> FoundNames;
+ UnresolvedLookupExpr *Fn =
+ UnresolvedLookupExpr::Create(Context, /*NamingClass=*/0,
+ NestedNameSpecifierLoc(), NameInfo,
+ /*NeedsADL=*/true, /*Overloaded=*/false,
+ FoundNames.begin(), FoundNames.end());
+
+ bool CandidateSetError = buildOverloadedCallSet(S, Fn, Fn, &Range, 1, Loc,
+ CandidateSet, CallExpr);
+ if (CandidateSet->empty() || CandidateSetError) {
+ *CallExpr = ExprError();
+ return FRS_NoViableFunction;
+ }
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult OverloadResult =
+ CandidateSet->BestViableFunction(*this, Fn->getLocStart(), Best);
+
+ if (OverloadResult == OR_No_Viable_Function) {
+ *CallExpr = ExprError();
+ return FRS_NoViableFunction;
+ }
+ *CallExpr = FinishOverloadedCallExpr(*this, S, Fn, Fn, Loc, &Range, 1,
+ Loc, 0, CandidateSet, &Best,
+ OverloadResult,
+ /*AllowTypoCorrection=*/false);
+ if (CallExpr->isInvalid() || OverloadResult != OR_Success) {
+ *CallExpr = ExprError();
+ Diag(Range->getLocStart(), diag::note_in_for_range)
+ << RangeLoc << BEF << Range->getType();
+ return FRS_DiagnosticIssued;
+ }
+ }
+ return FRS_Success;
+}
+
+
/// FixOverloadedFunctionReference - E is an expression that refers to
/// a C++ overloaded function (possibly with some parentheses and
/// perhaps a '&' around it). We have resolved the overloaded function
@@ -11358,6 +11531,7 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
TemplateArgs,
type, valueKind, OK_Ordinary);
ME->setHadMultipleCandidates(true);
+ MarkMemberReferenced(ME);
return ME;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp
index 722ac19..a8d75b2 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp
@@ -31,6 +31,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Initialization.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Lex/Preprocessor.h"
@@ -91,9 +92,8 @@ namespace {
return new (S.Context) GenericSelectionExpr(S.Context,
gse->getGenericLoc(),
gse->getControllingExpr(),
- assocTypes.data(),
- assocs.data(),
- numAssocs,
+ assocTypes,
+ assocs,
gse->getDefaultLoc(),
gse->getRParenLoc(),
gse->containsUnexpandedParameterPack(),
@@ -187,7 +187,7 @@ namespace {
UnaryOperatorKind opcode,
Expr *op);
- ExprResult complete(Expr *syntacticForm);
+ virtual ExprResult complete(Expr *syntacticForm);
OpaqueValueExpr *capture(Expr *op);
OpaqueValueExpr *captureValueAsResult(Expr *op);
@@ -198,7 +198,14 @@ namespace {
}
/// Return true if assignments have a non-void result.
- virtual bool assignmentsHaveResult() { return true; }
+ bool CanCaptureValueOfType(QualType ty) {
+ assert(!ty->isIncompleteType());
+ assert(!ty->isDependentType());
+
+ if (const CXXRecordDecl *ClassDecl = ty->getAsCXXRecordDecl())
+ return ClassDecl->isTriviallyCopyable();
+ return true;
+ }
virtual Expr *rebuildAndCaptureObject(Expr *) = 0;
virtual ExprResult buildGet() = 0;
@@ -206,7 +213,7 @@ namespace {
bool captureSetValueAsResult) = 0;
};
- /// A PseudoOpBuilder for Objective-C @properties.
+ /// A PseudoOpBuilder for Objective-C \@properties.
class ObjCPropertyOpBuilder : public PseudoOpBuilder {
ObjCPropertyRefExpr *RefExpr;
ObjCPropertyRefExpr *SyntacticRefExpr;
@@ -239,6 +246,9 @@ namespace {
Expr *rebuildAndCaptureObject(Expr *syntacticBase);
ExprResult buildGet();
ExprResult buildSet(Expr *op, SourceLocation, bool);
+ ExprResult complete(Expr *SyntacticForm);
+
+ bool isWeakProperty() const;
};
/// A PseudoOpBuilder for Objective-C array/dictionary indexing.
@@ -292,7 +302,7 @@ OpaqueValueExpr *PseudoOpBuilder::capture(Expr *e) {
/// operation. This routine is safe against expressions which may
/// already be captured.
///
-/// \param Returns the captured expression, which will be the
+/// \returns the captured expression, which will be the
/// same as the input if the input was already captured
OpaqueValueExpr *PseudoOpBuilder::captureValueAsResult(Expr *e) {
assert(ResultIndex == PseudoObjectExpr::NoResult);
@@ -353,7 +363,7 @@ PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc,
syntactic = new (S.Context) BinaryOperator(syntacticLHS, capturedRHS,
opcode, capturedRHS->getType(),
capturedRHS->getValueKind(),
- OK_Ordinary, opcLoc);
+ OK_Ordinary, opcLoc, false);
} else {
ExprResult opLHS = buildGet();
if (opLHS.isInvalid()) return ExprError();
@@ -372,12 +382,12 @@ PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc,
OK_Ordinary,
opLHS.get()->getType(),
result.get()->getType(),
- opcLoc);
+ opcLoc, false);
}
// The result of the assignment, if not void, is the value set into
// the l-value.
- result = buildSet(result.take(), opcLoc, assignmentsHaveResult());
+ result = buildSet(result.take(), opcLoc, /*captureSetValueAsResult*/ true);
if (result.isInvalid()) return ExprError();
addSemanticExpr(result.take());
@@ -401,7 +411,7 @@ PseudoOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
QualType resultType = result.get()->getType();
// That's the postfix result.
- if (UnaryOperator::isPostfix(opcode) && assignmentsHaveResult()) {
+ if (UnaryOperator::isPostfix(opcode) && CanCaptureValueOfType(resultType)) {
result = capture(result.take());
setResultToLastSemantic();
}
@@ -420,8 +430,7 @@ PseudoOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
// Store that back into the result. The value stored is the result
// of a prefix operation.
- result = buildSet(result.take(), opcLoc,
- UnaryOperator::isPrefix(opcode) && assignmentsHaveResult());
+ result = buildSet(result.take(), opcLoc, UnaryOperator::isPrefix(opcode));
if (result.isInvalid()) return ExprError();
addSemanticExpr(result.take());
@@ -472,6 +481,23 @@ static ObjCMethodDecl *LookupMethodInReceiverType(Sema &S, Selector sel,
return S.LookupMethodInObjectType(sel, IT, false);
}
+bool ObjCPropertyOpBuilder::isWeakProperty() const {
+ QualType T;
+ if (RefExpr->isExplicitProperty()) {
+ const ObjCPropertyDecl *Prop = RefExpr->getExplicitProperty();
+ if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
+ return true;
+
+ T = Prop->getType();
+ } else if (Getter) {
+ T = Getter->getResultType();
+ } else {
+ return false;
+ }
+
+ return T.getObjCLifetime() == Qualifiers::OCL_Weak;
+}
+
bool ObjCPropertyOpBuilder::findGetter() {
if (Getter) return true;
@@ -532,7 +558,7 @@ bool ObjCPropertyOpBuilder::findSetter(bool warn) {
// Do a normal method lookup first.
if (ObjCMethodDecl *setter =
LookupMethodInReceiverType(S, SetterSelector, RefExpr)) {
- if (setter->isSynthesized() && warn)
+ if (setter->isPropertyAccessor() && warn)
if (const ObjCInterfaceDecl *IFace =
dyn_cast<ObjCInterfaceDecl>(setter->getDeclContext())) {
const StringRef thisPropertyName(prop->getName());
@@ -617,7 +643,7 @@ ExprResult ObjCPropertyOpBuilder::buildGet() {
/// Store to an Objective-C property reference.
///
-/// \param bindSetValueAsResult - If true, capture the actual
+/// \param captureSetValueAsResult If true, capture the actual
/// value being set as the value of the property operation.
ExprResult ObjCPropertyOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
bool captureSetValueAsResult) {
@@ -676,7 +702,8 @@ ExprResult ObjCPropertyOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
ObjCMessageExpr *msgExpr =
cast<ObjCMessageExpr>(msg.get()->IgnoreImplicit());
Expr *arg = msgExpr->getArg(0);
- msgExpr->setArg(0, captureValueAsResult(arg));
+ if (CanCaptureValueOfType(arg->getType()))
+ msgExpr->setArg(0, captureValueAsResult(arg));
}
return msg;
@@ -819,6 +846,19 @@ ObjCPropertyOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
return PseudoOpBuilder::buildIncDecOperation(Sc, opcLoc, opcode, op);
}
+ExprResult ObjCPropertyOpBuilder::complete(Expr *SyntacticForm) {
+ if (S.getLangOpts().ObjCAutoRefCount && isWeakProperty()) {
+ DiagnosticsEngine::Level Level =
+ S.Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak,
+ SyntacticForm->getLocStart());
+ if (Level != DiagnosticsEngine::Ignored)
+ S.getCurFunction()->recordUseOfWeak(SyntacticRefExpr,
+ SyntacticRefExpr->isMessagingGetter());
+ }
+
+ return PseudoOpBuilder::complete(SyntacticForm);
+}
+
// ObjCSubscript build stuff.
//
@@ -1035,7 +1075,7 @@ bool ObjCSubscriptOpBuilder::findAtIndexGetter() {
0 /*TypeSourceInfo */,
S.Context.getTranslationUnitDecl(),
true /*Instance*/, false/*isVariadic*/,
- /*isSynthesized=*/false,
+ /*isPropertyAccessor=*/false,
/*isImplicitlyDeclared=*/true, /*isDefined=*/false,
ObjCMethodDecl::Required,
false);
@@ -1151,7 +1191,7 @@ bool ObjCSubscriptOpBuilder::findAtIndexSetter() {
ResultTInfo,
S.Context.getTranslationUnitDecl(),
true /*Instance*/, false/*isVariadic*/,
- /*isSynthesized=*/false,
+ /*isPropertyAccessor=*/false,
/*isImplicitlyDeclared=*/true, /*isDefined=*/false,
ObjCMethodDecl::Required,
false);
@@ -1255,7 +1295,7 @@ ExprResult ObjCSubscriptOpBuilder::buildGet() {
/// Store into the container the "op" object at "Index"'ed location
/// by building this messaging expression:
/// - (void)setObject:(id)object atIndexedSubscript:(NSInteger)index;
-/// \param bindSetValueAsResult - If true, capture the actual
+/// \param captureSetValueAsResult If true, capture the actual
/// value being set as the value of the property operation.
ExprResult ObjCSubscriptOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
bool captureSetValueAsResult) {
@@ -1279,7 +1319,8 @@ ExprResult ObjCSubscriptOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
ObjCMessageExpr *msgExpr =
cast<ObjCMessageExpr>(msg.get()->IgnoreImplicit());
Expr *arg = msgExpr->getArg(0);
- msgExpr->setArg(0, captureValueAsResult(arg));
+ if (CanCaptureValueOfType(arg->getType()))
+ msgExpr->setArg(0, captureValueAsResult(arg));
}
return msg;
@@ -1333,7 +1374,7 @@ ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
// Do nothing if either argument is dependent.
if (LHS->isTypeDependent() || RHS->isTypeDependent())
return new (Context) BinaryOperator(LHS, RHS, opcode, Context.DependentTy,
- VK_RValue, OK_Ordinary, opcLoc);
+ VK_RValue, OK_Ordinary, opcLoc, false);
// Filter out non-overload placeholder types in the RHS.
if (RHS->getType()->isNonOverloadPlaceholderType()) {
@@ -1404,14 +1445,14 @@ Expr *Sema::recreateSyntacticForm(PseudoObjectExpr *E) {
cop->getObjectKind(),
cop->getComputationLHSType(),
cop->getComputationResultType(),
- cop->getOperatorLoc());
+ cop->getOperatorLoc(), false);
} else if (BinaryOperator *bop = dyn_cast<BinaryOperator>(syntax)) {
Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, bop->getLHS());
Expr *rhs = cast<OpaqueValueExpr>(bop->getRHS())->getSourceExpr();
return new (Context) BinaryOperator(lhs, rhs, bop->getOpcode(),
bop->getType(), bop->getValueKind(),
bop->getObjectKind(),
- bop->getOperatorLoc());
+ bop->getOperatorLoc(), false);
} else {
assert(syntax->hasPlaceholderType(BuiltinType::PseudoObject));
return stripOpaqueValuesFromPseudoObjectRef(*this, syntax);
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
index 86884b7..f55174e 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
@@ -28,27 +28,10 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/Triple.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCInstPrinter.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCObjectFileInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/MC/MCTargetAsmParser.h"
-#include "llvm/MC/MCParser/MCAsmLexer.h"
-#include "llvm/MC/MCParser/MCAsmParser.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/TargetSelect.h"
using namespace clang;
using namespace sema;
@@ -177,6 +160,13 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
!E->isUnusedResultAWarning(WarnExpr, Loc, R1, R2, Context))
return;
+ // If this is a GNU statement expression expanded from a macro, it is probably
+ // unused because it is a function-like macro that can be used as either an
+ // expression or statement. Don't warn, because it is almost certainly a
+ // false positive.
+ if (isa<StmtExpr>(E) && Loc.isMacroID())
+ return;
+
// Okay, we have an unused result. Depending on what the base expression is,
// we might want to make a more specific diagnostic. Check for one of these
// cases now.
@@ -271,7 +261,7 @@ StmtResult
Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
MultiStmtArg elts, bool isStmtExpr) {
unsigned NumElts = elts.size();
- Stmt **Elts = reinterpret_cast<Stmt**>(elts.release());
+ Stmt **Elts = elts.data();
// If we're in C89 mode, check that we don't have any decls after stmts. If
// so, emit an extension diagnostic.
if (!getLangOpts().C99 && !getLangOpts().CPlusPlus) {
@@ -381,8 +371,10 @@ Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
// Otherwise, things are good. Fill in the declaration and return it.
LabelStmt *LS = new (Context) LabelStmt(IdentLoc, TheDecl, SubStmt);
TheDecl->setStmt(LS);
- if (!TheDecl->isGnuLocal())
+ if (!TheDecl->isGnuLocal()) {
+ TheDecl->setLocStart(IdentLoc);
TheDecl->setLocation(IdentLoc);
+ }
return Owned(LS);
}
@@ -1566,25 +1558,6 @@ Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
ForLoc, RParenLoc));
}
-namespace {
-
-enum BeginEndFunction {
- BEF_begin,
- BEF_end
-};
-
-/// Build a variable declaration for a for-range statement.
-static VarDecl *BuildForRangeVarDecl(Sema &SemaRef, SourceLocation Loc,
- QualType Type, const char *Name) {
- DeclContext *DC = SemaRef.CurContext;
- IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
- TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
- VarDecl *Decl = VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type,
- TInfo, SC_Auto, SC_None);
- Decl->setImplicit();
- return Decl;
-}
-
/// Finish building a variable declaration for a for-range statement.
/// \return true if an error occurs.
static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init,
@@ -1617,12 +1590,14 @@ static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init,
return false;
}
+namespace {
+
/// Produce a note indicating which begin/end function was implicitly called
-/// by a C++0x for-range statement. This is often not obvious from the code,
+/// by a C++11 for-range statement. This is often not obvious from the code,
/// nor from the diagnostics produced when analysing the implicit expressions
/// required in a for-range statement.
void NoteForRangeBeginEndFunction(Sema &SemaRef, Expr *E,
- BeginEndFunction BEF) {
+ Sema::BeginEndFunction BEF) {
CallExpr *CE = dyn_cast<CallExpr>(E);
if (!CE)
return;
@@ -1643,56 +1618,16 @@ void NoteForRangeBeginEndFunction(Sema &SemaRef, Expr *E,
<< BEF << IsTemplate << Description << E->getType();
}
-/// Build a call to 'begin' or 'end' for a C++0x for-range statement. If the
-/// given LookupResult is non-empty, it is assumed to describe a member which
-/// will be invoked. Otherwise, the function will be found via argument
-/// dependent lookup.
-static ExprResult BuildForRangeBeginEndCall(Sema &SemaRef, Scope *S,
- SourceLocation Loc,
- VarDecl *Decl,
- BeginEndFunction BEF,
- const DeclarationNameInfo &NameInfo,
- LookupResult &MemberLookup,
- Expr *Range) {
- ExprResult CallExpr;
- if (!MemberLookup.empty()) {
- ExprResult MemberRef =
- SemaRef.BuildMemberReferenceExpr(Range, Range->getType(), Loc,
- /*IsPtr=*/false, CXXScopeSpec(),
- /*TemplateKWLoc=*/SourceLocation(),
- /*FirstQualifierInScope=*/0,
- MemberLookup,
- /*TemplateArgs=*/0);
- if (MemberRef.isInvalid())
- return ExprError();
- CallExpr = SemaRef.ActOnCallExpr(S, MemberRef.get(), Loc, MultiExprArg(),
- Loc, 0);
- if (CallExpr.isInvalid())
- return ExprError();
- } else {
- UnresolvedSet<0> FoundNames;
- // C++0x [stmt.ranged]p1: For the purposes of this name lookup, namespace
- // std is an associated namespace.
- UnresolvedLookupExpr *Fn =
- UnresolvedLookupExpr::Create(SemaRef.Context, /*NamingClass=*/0,
- NestedNameSpecifierLoc(), NameInfo,
- /*NeedsADL=*/true, /*Overloaded=*/false,
- FoundNames.begin(), FoundNames.end(),
- /*LookInStdNamespace=*/true);
- CallExpr = SemaRef.BuildOverloadedCallExpr(S, Fn, Fn, Loc, &Range, 1, Loc,
- 0, /*AllowTypoCorrection=*/false);
- if (CallExpr.isInvalid()) {
- SemaRef.Diag(Range->getLocStart(), diag::note_for_range_type)
- << Range->getType();
- return ExprError();
- }
- }
- if (FinishForRangeVarDecl(SemaRef, Decl, CallExpr.get(), Loc,
- diag::err_for_range_iter_deduction_failure)) {
- NoteForRangeBeginEndFunction(SemaRef, CallExpr.get(), BEF);
- return ExprError();
- }
- return CallExpr;
+/// Build a variable declaration for a for-range statement.
+VarDecl *BuildForRangeVarDecl(Sema &SemaRef, SourceLocation Loc,
+ QualType Type, const char *Name) {
+ DeclContext *DC = SemaRef.CurContext;
+ IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
+ TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
+ VarDecl *Decl = VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type,
+ TInfo, SC_Auto, SC_None);
+ Decl->setImplicit();
+ return Decl;
}
}
@@ -1723,7 +1658,7 @@ static bool ObjCEnumerationCollection(Expr *Collection) {
StmtResult
Sema::ActOnCXXForRangeStmt(SourceLocation ForLoc,
Stmt *First, SourceLocation ColonLoc, Expr *Range,
- SourceLocation RParenLoc) {
+ SourceLocation RParenLoc, BuildForRangeKind Kind) {
if (!First || !Range)
return StmtError();
@@ -1761,15 +1696,137 @@ Sema::ActOnCXXForRangeStmt(SourceLocation ForLoc,
return BuildCXXForRangeStmt(ForLoc, ColonLoc, RangeDecl.get(),
/*BeginEndDecl=*/0, /*Cond=*/0, /*Inc=*/0, DS,
- RParenLoc);
+ RParenLoc, Kind);
}
-/// BuildCXXForRangeStmt - Build or instantiate a C++0x for-range statement.
+/// \brief Create the initialization, compare, and increment steps for
+/// the range-based for loop expression.
+/// This function does not handle array-based for loops,
+/// which are created in Sema::BuildCXXForRangeStmt.
+///
+/// \returns a ForRangeStatus indicating success or what kind of error occurred.
+/// BeginExpr and EndExpr are set and FRS_Success is returned on success;
+/// CandidateSet and BEF are set and some non-success value is returned on
+/// failure.
+static Sema::ForRangeStatus BuildNonArrayForRange(Sema &SemaRef, Scope *S,
+ Expr *BeginRange, Expr *EndRange,
+ QualType RangeType,
+ VarDecl *BeginVar,
+ VarDecl *EndVar,
+ SourceLocation ColonLoc,
+ OverloadCandidateSet *CandidateSet,
+ ExprResult *BeginExpr,
+ ExprResult *EndExpr,
+ Sema::BeginEndFunction *BEF) {
+ DeclarationNameInfo BeginNameInfo(
+ &SemaRef.PP.getIdentifierTable().get("begin"), ColonLoc);
+ DeclarationNameInfo EndNameInfo(&SemaRef.PP.getIdentifierTable().get("end"),
+ ColonLoc);
+
+ LookupResult BeginMemberLookup(SemaRef, BeginNameInfo,
+ Sema::LookupMemberName);
+ LookupResult EndMemberLookup(SemaRef, EndNameInfo, Sema::LookupMemberName);
+
+ if (CXXRecordDecl *D = RangeType->getAsCXXRecordDecl()) {
+ // - if _RangeT is a class type, the unqualified-ids begin and end are
+ // looked up in the scope of class _RangeT as if by class member access
+ // lookup (3.4.5), and if either (or both) finds at least one
+ // declaration, begin-expr and end-expr are __range.begin() and
+ // __range.end(), respectively;
+ SemaRef.LookupQualifiedName(BeginMemberLookup, D);
+ SemaRef.LookupQualifiedName(EndMemberLookup, D);
+
+ if (BeginMemberLookup.empty() != EndMemberLookup.empty()) {
+ SourceLocation RangeLoc = BeginVar->getLocation();
+ *BEF = BeginMemberLookup.empty() ? Sema::BEF_end : Sema::BEF_begin;
+
+ SemaRef.Diag(RangeLoc, diag::err_for_range_member_begin_end_mismatch)
+ << RangeLoc << BeginRange->getType() << *BEF;
+ return Sema::FRS_DiagnosticIssued;
+ }
+ } else {
+ // - otherwise, begin-expr and end-expr are begin(__range) and
+ // end(__range), respectively, where begin and end are looked up with
+ // argument-dependent lookup (3.4.2). For the purposes of this name
+ // lookup, namespace std is an associated namespace.
+
+ }
+
+ *BEF = Sema::BEF_begin;
+ Sema::ForRangeStatus RangeStatus =
+ SemaRef.BuildForRangeBeginEndCall(S, ColonLoc, ColonLoc, BeginVar,
+ Sema::BEF_begin, BeginNameInfo,
+ BeginMemberLookup, CandidateSet,
+ BeginRange, BeginExpr);
+
+ if (RangeStatus != Sema::FRS_Success)
+ return RangeStatus;
+ if (FinishForRangeVarDecl(SemaRef, BeginVar, BeginExpr->get(), ColonLoc,
+ diag::err_for_range_iter_deduction_failure)) {
+ NoteForRangeBeginEndFunction(SemaRef, BeginExpr->get(), *BEF);
+ return Sema::FRS_DiagnosticIssued;
+ }
+
+ *BEF = Sema::BEF_end;
+ RangeStatus =
+ SemaRef.BuildForRangeBeginEndCall(S, ColonLoc, ColonLoc, EndVar,
+ Sema::BEF_end, EndNameInfo,
+ EndMemberLookup, CandidateSet,
+ EndRange, EndExpr);
+ if (RangeStatus != Sema::FRS_Success)
+ return RangeStatus;
+ if (FinishForRangeVarDecl(SemaRef, EndVar, EndExpr->get(), ColonLoc,
+ diag::err_for_range_iter_deduction_failure)) {
+ NoteForRangeBeginEndFunction(SemaRef, EndExpr->get(), *BEF);
+ return Sema::FRS_DiagnosticIssued;
+ }
+ return Sema::FRS_Success;
+}
+
+/// Speculatively attempt to dereference an invalid range expression.
+/// If the attempt fails, this function will return a valid, null StmtResult
+/// and emit no diagnostics.
+static StmtResult RebuildForRangeWithDereference(Sema &SemaRef, Scope *S,
+ SourceLocation ForLoc,
+ Stmt *LoopVarDecl,
+ SourceLocation ColonLoc,
+ Expr *Range,
+ SourceLocation RangeLoc,
+ SourceLocation RParenLoc) {
+ // Determine whether we can rebuild the for-range statement with a
+ // dereferenced range expression.
+ ExprResult AdjustedRange;
+ {
+ Sema::SFINAETrap Trap(SemaRef);
+
+ AdjustedRange = SemaRef.BuildUnaryOp(S, RangeLoc, UO_Deref, Range);
+ if (AdjustedRange.isInvalid())
+ return StmtResult();
+
+ StmtResult SR =
+ SemaRef.ActOnCXXForRangeStmt(ForLoc, LoopVarDecl, ColonLoc,
+ AdjustedRange.get(), RParenLoc,
+ Sema::BFRK_Check);
+ if (SR.isInvalid())
+ return StmtResult();
+ }
+
+ // The attempt to dereference worked well enough that it could produce a valid
+ // loop. Produce a fixit, and rebuild the loop with diagnostics enabled, in
+ // case there are any other (non-fatal) problems with it.
+ SemaRef.Diag(RangeLoc, diag::err_for_range_dereference)
+ << Range->getType() << FixItHint::CreateInsertion(RangeLoc, "*");
+ return SemaRef.ActOnCXXForRangeStmt(ForLoc, LoopVarDecl, ColonLoc,
+ AdjustedRange.get(), RParenLoc,
+ Sema::BFRK_Rebuild);
+}
+
+/// BuildCXXForRangeStmt - Build or instantiate a C++11 for-range statement.
StmtResult
Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *BeginEnd, Expr *Cond,
Expr *Inc, Stmt *LoopVarDecl,
- SourceLocation RParenLoc) {
+ SourceLocation RParenLoc, BuildForRangeKind Kind) {
Scope *S = getCurScope();
DeclStmt *RangeDS = cast<DeclStmt>(RangeDecl);
@@ -1855,50 +1912,43 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
return StmtError();
}
} else {
- DeclarationNameInfo BeginNameInfo(&PP.getIdentifierTable().get("begin"),
- ColonLoc);
- DeclarationNameInfo EndNameInfo(&PP.getIdentifierTable().get("end"),
- ColonLoc);
-
- LookupResult BeginMemberLookup(*this, BeginNameInfo, LookupMemberName);
- LookupResult EndMemberLookup(*this, EndNameInfo, LookupMemberName);
-
- if (CXXRecordDecl *D = RangeType->getAsCXXRecordDecl()) {
- // - if _RangeT is a class type, the unqualified-ids begin and end are
- // looked up in the scope of class _RangeT as if by class member access
- // lookup (3.4.5), and if either (or both) finds at least one
- // declaration, begin-expr and end-expr are __range.begin() and
- // __range.end(), respectively;
- LookupQualifiedName(BeginMemberLookup, D);
- LookupQualifiedName(EndMemberLookup, D);
-
- if (BeginMemberLookup.empty() != EndMemberLookup.empty()) {
- Diag(ColonLoc, diag::err_for_range_member_begin_end_mismatch)
- << RangeType << BeginMemberLookup.empty();
- return StmtError();
- }
- } else {
- // - otherwise, begin-expr and end-expr are begin(__range) and
- // end(__range), respectively, where begin and end are looked up with
- // argument-dependent lookup (3.4.2). For the purposes of this name
- // lookup, namespace std is an associated namespace.
+ OverloadCandidateSet CandidateSet(RangeLoc);
+ Sema::BeginEndFunction BEFFailure;
+ ForRangeStatus RangeStatus =
+ BuildNonArrayForRange(*this, S, BeginRangeRef.get(),
+ EndRangeRef.get(), RangeType,
+ BeginVar, EndVar, ColonLoc, &CandidateSet,
+ &BeginExpr, &EndExpr, &BEFFailure);
+
+ // If building the range failed, try dereferencing the range expression
+ // unless a diagnostic was issued or the end function is problematic.
+ if (Kind == BFRK_Build && RangeStatus == FRS_NoViableFunction &&
+ BEFFailure == BEF_begin) {
+ StmtResult SR = RebuildForRangeWithDereference(*this, S, ForLoc,
+ LoopVarDecl, ColonLoc,
+ Range, RangeLoc,
+ RParenLoc);
+ if (SR.isInvalid() || SR.isUsable())
+ return SR;
}
- BeginExpr = BuildForRangeBeginEndCall(*this, S, ColonLoc, BeginVar,
- BEF_begin, BeginNameInfo,
- BeginMemberLookup,
- BeginRangeRef.get());
- if (BeginExpr.isInvalid())
- return StmtError();
-
- EndExpr = BuildForRangeBeginEndCall(*this, S, ColonLoc, EndVar,
- BEF_end, EndNameInfo,
- EndMemberLookup, EndRangeRef.get());
- if (EndExpr.isInvalid())
+ // Otherwise, emit diagnostics if we haven't already.
+ if (RangeStatus == FRS_NoViableFunction) {
+ Expr *Range = BEFFailure ? EndRangeRef.get() : BeginRangeRef.get();
+ Diag(Range->getLocStart(), diag::err_for_range_invalid)
+ << RangeLoc << Range->getType() << BEFFailure;
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(&Range, /*NumArgs=*/1));
+ }
+ // Return an error if no fix was discovered.
+ if (RangeStatus != FRS_Success)
return StmtError();
}
- // C++0x [decl.spec.auto]p6: BeginType and EndType must be the same.
+ assert(!BeginExpr.isInvalid() && !EndExpr.isInvalid() &&
+ "invalid range expression in for loop");
+
+ // C++11 [dcl.spec.auto]p7: BeginType and EndType must be the same.
QualType BeginType = BeginVar->getType(), EndType = EndVar->getType();
if (!Context.hasSameType(BeginType, EndType)) {
Diag(RangeLoc, diag::err_for_range_begin_end_types_differ)
@@ -1930,6 +1980,8 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
NotEqExpr = ActOnBooleanCondition(S, ColonLoc, NotEqExpr.get());
NotEqExpr = ActOnFinishFullExpr(NotEqExpr.get());
if (NotEqExpr.isInvalid()) {
+ Diag(RangeLoc, diag::note_for_range_invalid_iterator)
+ << RangeLoc << 0 << BeginRangeRef.get()->getType();
NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
if (!Context.hasSameType(BeginType, EndType))
NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end);
@@ -1945,6 +1997,8 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
IncrExpr = ActOnUnaryOp(S, ColonLoc, tok::plusplus, BeginRef.get());
IncrExpr = ActOnFinishFullExpr(IncrExpr.get());
if (IncrExpr.isInvalid()) {
+ Diag(RangeLoc, diag::note_for_range_invalid_iterator)
+ << RangeLoc << 2 << BeginRangeRef.get()->getType() ;
NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
return StmtError();
}
@@ -1957,12 +2011,15 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
ExprResult DerefExpr = ActOnUnaryOp(S, ColonLoc, tok::star, BeginRef.get());
if (DerefExpr.isInvalid()) {
+ Diag(RangeLoc, diag::note_for_range_invalid_iterator)
+ << RangeLoc << 1 << BeginRangeRef.get()->getType();
NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
return StmtError();
}
- // Attach *__begin as initializer for VD.
- if (!LoopVar->isInvalidDecl()) {
+ // Attach *__begin as initializer for VD. Don't touch it if we're just
+ // trying to determine whether this would be a valid range.
+ if (!LoopVar->isInvalidDecl() && Kind != BFRK_Check) {
AddInitializerToDecl(LoopVar, DerefExpr.get(), /*DirectInit=*/false,
/*TypeMayContainAuto=*/true);
if (LoopVar->isInvalidDecl())
@@ -1973,6 +2030,11 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
RangeVar->setUsed();
}
+ // Don't bother to actually allocate the result if we're just trying to
+ // determine whether it would be valid.
+ if (Kind == BFRK_Check)
+ return StmtResult();
+
return Owned(new (Context) CXXForRangeStmt(RangeDS,
cast_or_null<DeclStmt>(BeginEndDecl.get()),
NotEqExpr.take(), IncrExpr.take(),
@@ -2485,600 +2547,6 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
return Owned(Result);
}
-/// CheckAsmLValue - GNU C has an extremely ugly extension whereby they silently
-/// ignore "noop" casts in places where an lvalue is required by an inline asm.
-/// We emulate this behavior when -fheinous-gnu-extensions is specified, but
-/// provide a strong guidance to not use it.
-///
-/// This method checks to see if the argument is an acceptable l-value and
-/// returns false if it is a case we can handle.
-static bool CheckAsmLValue(const Expr *E, Sema &S) {
- // Type dependent expressions will be checked during instantiation.
- if (E->isTypeDependent())
- return false;
-
- if (E->isLValue())
- return false; // Cool, this is an lvalue.
-
- // Okay, this is not an lvalue, but perhaps it is the result of a cast that we
- // are supposed to allow.
- const Expr *E2 = E->IgnoreParenNoopCasts(S.Context);
- if (E != E2 && E2->isLValue()) {
- if (!S.getLangOpts().HeinousExtensions)
- S.Diag(E2->getLocStart(), diag::err_invalid_asm_cast_lvalue)
- << E->getSourceRange();
- else
- S.Diag(E2->getLocStart(), diag::warn_invalid_asm_cast_lvalue)
- << E->getSourceRange();
- // Accept, even if we emitted an error diagnostic.
- return false;
- }
-
- // None of the above, just randomly invalid non-lvalue.
- return true;
-}
-
-/// isOperandMentioned - Return true if the specified operand # is mentioned
-/// anywhere in the decomposed asm string.
-static bool isOperandMentioned(unsigned OpNo,
- ArrayRef<AsmStmt::AsmStringPiece> AsmStrPieces) {
- for (unsigned p = 0, e = AsmStrPieces.size(); p != e; ++p) {
- const AsmStmt::AsmStringPiece &Piece = AsmStrPieces[p];
- if (!Piece.isOperand()) continue;
-
- // If this is a reference to the input and if the input was the smaller
- // one, then we have to reject this asm.
- if (Piece.getOperandNo() == OpNo)
- return true;
- }
- return false;
-}
-
-StmtResult Sema::ActOnAsmStmt(SourceLocation AsmLoc, bool IsSimple,
- bool IsVolatile, unsigned NumOutputs,
- unsigned NumInputs, IdentifierInfo **Names,
- MultiExprArg constraints, MultiExprArg exprs,
- Expr *asmString, MultiExprArg clobbers,
- SourceLocation RParenLoc, bool MSAsm) {
- unsigned NumClobbers = clobbers.size();
- StringLiteral **Constraints =
- reinterpret_cast<StringLiteral**>(constraints.get());
- Expr **Exprs = exprs.get();
- StringLiteral *AsmString = cast<StringLiteral>(asmString);
- StringLiteral **Clobbers = reinterpret_cast<StringLiteral**>(clobbers.get());
-
- SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
-
- // The parser verifies that there is a string literal here.
- if (!AsmString->isAscii())
- return StmtError(Diag(AsmString->getLocStart(),diag::err_asm_wide_character)
- << AsmString->getSourceRange());
-
- for (unsigned i = 0; i != NumOutputs; i++) {
- StringLiteral *Literal = Constraints[i];
- if (!Literal->isAscii())
- return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character)
- << Literal->getSourceRange());
-
- StringRef OutputName;
- if (Names[i])
- OutputName = Names[i]->getName();
-
- TargetInfo::ConstraintInfo Info(Literal->getString(), OutputName);
- if (!Context.getTargetInfo().validateOutputConstraint(Info))
- return StmtError(Diag(Literal->getLocStart(),
- diag::err_asm_invalid_output_constraint)
- << Info.getConstraintStr());
-
- // Check that the output exprs are valid lvalues.
- Expr *OutputExpr = Exprs[i];
- if (CheckAsmLValue(OutputExpr, *this)) {
- return StmtError(Diag(OutputExpr->getLocStart(),
- diag::err_asm_invalid_lvalue_in_output)
- << OutputExpr->getSourceRange());
- }
-
- OutputConstraintInfos.push_back(Info);
- }
-
- SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
-
- for (unsigned i = NumOutputs, e = NumOutputs + NumInputs; i != e; i++) {
- StringLiteral *Literal = Constraints[i];
- if (!Literal->isAscii())
- return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character)
- << Literal->getSourceRange());
-
- StringRef InputName;
- if (Names[i])
- InputName = Names[i]->getName();
-
- TargetInfo::ConstraintInfo Info(Literal->getString(), InputName);
- if (!Context.getTargetInfo().validateInputConstraint(OutputConstraintInfos.data(),
- NumOutputs, Info)) {
- return StmtError(Diag(Literal->getLocStart(),
- diag::err_asm_invalid_input_constraint)
- << Info.getConstraintStr());
- }
-
- Expr *InputExpr = Exprs[i];
-
- // Only allow void types for memory constraints.
- if (Info.allowsMemory() && !Info.allowsRegister()) {
- if (CheckAsmLValue(InputExpr, *this))
- return StmtError(Diag(InputExpr->getLocStart(),
- diag::err_asm_invalid_lvalue_in_input)
- << Info.getConstraintStr()
- << InputExpr->getSourceRange());
- }
-
- if (Info.allowsRegister()) {
- if (InputExpr->getType()->isVoidType()) {
- return StmtError(Diag(InputExpr->getLocStart(),
- diag::err_asm_invalid_type_in_input)
- << InputExpr->getType() << Info.getConstraintStr()
- << InputExpr->getSourceRange());
- }
- }
-
- ExprResult Result = DefaultFunctionArrayLvalueConversion(Exprs[i]);
- if (Result.isInvalid())
- return StmtError();
-
- Exprs[i] = Result.take();
- InputConstraintInfos.push_back(Info);
- }
-
- // Check that the clobbers are valid.
- for (unsigned i = 0; i != NumClobbers; i++) {
- StringLiteral *Literal = Clobbers[i];
- if (!Literal->isAscii())
- return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character)
- << Literal->getSourceRange());
-
- StringRef Clobber = Literal->getString();
-
- if (!Context.getTargetInfo().isValidClobber(Clobber))
- return StmtError(Diag(Literal->getLocStart(),
- diag::err_asm_unknown_register_name) << Clobber);
- }
-
- AsmStmt *NS =
- new (Context) AsmStmt(Context, AsmLoc, IsSimple, IsVolatile, MSAsm,
- NumOutputs, NumInputs, Names, Constraints, Exprs,
- AsmString, NumClobbers, Clobbers, RParenLoc);
- // Validate the asm string, ensuring it makes sense given the operands we
- // have.
- SmallVector<AsmStmt::AsmStringPiece, 8> Pieces;
- unsigned DiagOffs;
- if (unsigned DiagID = NS->AnalyzeAsmString(Pieces, Context, DiagOffs)) {
- Diag(getLocationOfStringLiteralByte(AsmString, DiagOffs), DiagID)
- << AsmString->getSourceRange();
- return StmtError();
- }
-
- // Validate tied input operands for type mismatches.
- for (unsigned i = 0, e = InputConstraintInfos.size(); i != e; ++i) {
- TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
-
- // If this is a tied constraint, verify that the output and input have
- // either exactly the same type, or that they are int/ptr operands with the
- // same size (int/long, int*/long, are ok etc).
- if (!Info.hasTiedOperand()) continue;
-
- unsigned TiedTo = Info.getTiedOperand();
- unsigned InputOpNo = i+NumOutputs;
- Expr *OutputExpr = Exprs[TiedTo];
- Expr *InputExpr = Exprs[InputOpNo];
-
- if (OutputExpr->isTypeDependent() || InputExpr->isTypeDependent())
- continue;
-
- QualType InTy = InputExpr->getType();
- QualType OutTy = OutputExpr->getType();
- if (Context.hasSameType(InTy, OutTy))
- continue; // All types can be tied to themselves.
-
- // Decide if the input and output are in the same domain (integer/ptr or
- // floating point.
- enum AsmDomain {
- AD_Int, AD_FP, AD_Other
- } InputDomain, OutputDomain;
-
- if (InTy->isIntegerType() || InTy->isPointerType())
- InputDomain = AD_Int;
- else if (InTy->isRealFloatingType())
- InputDomain = AD_FP;
- else
- InputDomain = AD_Other;
-
- if (OutTy->isIntegerType() || OutTy->isPointerType())
- OutputDomain = AD_Int;
- else if (OutTy->isRealFloatingType())
- OutputDomain = AD_FP;
- else
- OutputDomain = AD_Other;
-
- // They are ok if they are the same size and in the same domain. This
- // allows tying things like:
- // void* to int*
- // void* to int if they are the same size.
- // double to long double if they are the same size.
- //
- uint64_t OutSize = Context.getTypeSize(OutTy);
- uint64_t InSize = Context.getTypeSize(InTy);
- if (OutSize == InSize && InputDomain == OutputDomain &&
- InputDomain != AD_Other)
- continue;
-
- // If the smaller input/output operand is not mentioned in the asm string,
- // then we can promote the smaller one to a larger input and the asm string
- // won't notice.
- bool SmallerValueMentioned = false;
-
- // If this is a reference to the input and if the input was the smaller
- // one, then we have to reject this asm.
- if (isOperandMentioned(InputOpNo, Pieces)) {
- // This is a use in the asm string of the smaller operand. Since we
- // codegen this by promoting to a wider value, the asm will get printed
- // "wrong".
- SmallerValueMentioned |= InSize < OutSize;
- }
- if (isOperandMentioned(TiedTo, Pieces)) {
- // If this is a reference to the output, and if the output is the larger
- // value, then it's ok because we'll promote the input to the larger type.
- SmallerValueMentioned |= OutSize < InSize;
- }
-
- // If the smaller value wasn't mentioned in the asm string, and if the
- // output was a register, just extend the shorter one to the size of the
- // larger one.
- if (!SmallerValueMentioned && InputDomain != AD_Other &&
- OutputConstraintInfos[TiedTo].allowsRegister())
- continue;
-
- // Either both of the operands were mentioned or the smaller one was
- // mentioned. One more special case that we'll allow: if the tied input is
- // integer, unmentioned, and is a constant, then we'll allow truncating it
- // down to the size of the destination.
- if (InputDomain == AD_Int && OutputDomain == AD_Int &&
- !isOperandMentioned(InputOpNo, Pieces) &&
- InputExpr->isEvaluatable(Context)) {
- CastKind castKind =
- (OutTy->isBooleanType() ? CK_IntegralToBoolean : CK_IntegralCast);
- InputExpr = ImpCastExprToType(InputExpr, OutTy, castKind).take();
- Exprs[InputOpNo] = InputExpr;
- NS->setInputExpr(i, InputExpr);
- continue;
- }
-
- Diag(InputExpr->getLocStart(),
- diag::err_asm_tying_incompatible_types)
- << InTy << OutTy << OutputExpr->getSourceRange()
- << InputExpr->getSourceRange();
- return StmtError();
- }
-
- return Owned(NS);
-}
-
-// isMSAsmKeyword - Return true if this is an MS-style inline asm keyword. These
-// require special handling.
-static bool isMSAsmKeyword(StringRef Name) {
- bool Ret = llvm::StringSwitch<bool>(Name)
- .Cases("EVEN", "ALIGN", true) // Alignment directives.
- .Cases("LENGTH", "SIZE", "TYPE", true) // Type and variable sizes.
- .Case("_emit", true) // _emit Pseudoinstruction.
- .Default(false);
- return Ret;
-}
-
-static StringRef getSpelling(Sema &SemaRef, Token AsmTok) {
- StringRef Asm;
- SmallString<512> TokenBuf;
- TokenBuf.resize(512);
- bool StringInvalid = false;
- Asm = SemaRef.PP.getSpelling(AsmTok, TokenBuf, &StringInvalid);
- assert (!StringInvalid && "Expected valid string!");
- return Asm;
-}
-
-static void patchMSAsmStrings(Sema &SemaRef, bool &IsSimple,
- SourceLocation AsmLoc,
- ArrayRef<Token> AsmToks,
- const TargetInfo &TI,
- std::vector<llvm::BitVector> &AsmRegs,
- std::vector<llvm::BitVector> &AsmNames,
- std::vector<std::string> &AsmStrings) {
- assert (!AsmToks.empty() && "Didn't expect an empty AsmToks!");
-
- // Assume simple asm stmt until we parse a non-register identifer (or we just
- // need to bail gracefully).
- IsSimple = true;
-
- SmallString<512> Asm;
- unsigned NumAsmStrings = 0;
- for (unsigned i = 0, e = AsmToks.size(); i != e; ++i) {
-
- // Determine if this should be considered a new asm.
- bool isNewAsm = i == 0 || AsmToks[i].isAtStartOfLine() ||
- AsmToks[i].is(tok::kw_asm);
-
- // Emit the previous asm string.
- if (i && isNewAsm) {
- AsmStrings[NumAsmStrings++] = Asm.c_str();
- if (AsmToks[i].is(tok::kw_asm)) {
- ++i; // Skip __asm
- assert (i != e && "Expected another token.");
- }
- }
-
- // Start a new asm string with the opcode.
- if (isNewAsm) {
- AsmRegs[NumAsmStrings].resize(AsmToks.size());
- AsmNames[NumAsmStrings].resize(AsmToks.size());
-
- StringRef Piece = AsmToks[i].getIdentifierInfo()->getName();
- // MS-style inline asm keywords require special handling.
- if (isMSAsmKeyword(Piece))
- IsSimple = false;
-
- // TODO: Verify this is a valid opcode.
- Asm = Piece;
- continue;
- }
-
- if (i && AsmToks[i].hasLeadingSpace())
- Asm += ' ';
-
- // Check the operand(s).
- switch (AsmToks[i].getKind()) {
- default:
- IsSimple = false;
- Asm += getSpelling(SemaRef, AsmToks[i]);
- break;
- case tok::comma: Asm += ","; break;
- case tok::colon: Asm += ":"; break;
- case tok::l_square: Asm += "["; break;
- case tok::r_square: Asm += "]"; break;
- case tok::l_brace: Asm += "{"; break;
- case tok::r_brace: Asm += "}"; break;
- case tok::numeric_constant:
- Asm += getSpelling(SemaRef, AsmToks[i]);
- break;
- case tok::identifier: {
- IdentifierInfo *II = AsmToks[i].getIdentifierInfo();
- StringRef Name = II->getName();
-
- // Valid register?
- if (TI.isValidGCCRegisterName(Name)) {
- AsmRegs[NumAsmStrings].set(i);
- Asm += Name;
- break;
- }
-
- IsSimple = false;
-
- // MS-style inline asm keywords require special handling.
- if (isMSAsmKeyword(Name)) {
- IsSimple = false;
- Asm += Name;
- break;
- }
-
- // FIXME: Why are we missing this segment register?
- if (Name == "fs") {
- Asm += Name;
- break;
- }
-
- // Lookup the identifier.
- // TODO: Someone with more experience with clang should verify this the
- // proper way of doing a symbol lookup.
- DeclarationName DeclName(II);
- Scope *CurScope = SemaRef.getCurScope();
- LookupResult R(SemaRef, DeclName, AsmLoc, Sema::LookupOrdinaryName);
- if (!SemaRef.LookupName(R, CurScope, false/*AllowBuiltinCreation*/))
- break;
-
- assert (R.isSingleResult() && "Expected a single result?!");
- NamedDecl *Decl = R.getFoundDecl();
- switch (Decl->getKind()) {
- default:
- assert(0 && "Unknown decl kind.");
- break;
- case Decl::Var: {
- case Decl::ParmVar:
- AsmNames[NumAsmStrings].set(i);
-
- VarDecl *Var = cast<VarDecl>(Decl);
- QualType Ty = Var->getType();
- (void)Ty; // Avoid warning.
- // TODO: Patch identifier with valid operand. One potential idea is to
- // probe the backend with type information to guess the possible
- // operand.
- break;
- }
- }
- break;
- }
- }
- }
-
- // Emit the final (and possibly only) asm string.
- AsmStrings[NumAsmStrings] = Asm.c_str();
-}
-
-// Build the unmodified MSAsmString.
-static std::string buildMSAsmString(Sema &SemaRef,
- ArrayRef<Token> AsmToks,
- unsigned &NumAsmStrings) {
- assert (!AsmToks.empty() && "Didn't expect an empty AsmToks!");
- NumAsmStrings = 0;
-
- SmallString<512> Asm;
- for (unsigned i = 0, e = AsmToks.size(); i < e; ++i) {
- bool isNewAsm = i == 0 || AsmToks[i].isAtStartOfLine() ||
- AsmToks[i].is(tok::kw_asm);
-
- if (isNewAsm) {
- ++NumAsmStrings;
- if (i)
- Asm += '\n';
- if (AsmToks[i].is(tok::kw_asm)) {
- i++; // Skip __asm
- assert (i != e && "Expected another token");
- }
- }
-
- if (i && AsmToks[i].hasLeadingSpace() && !isNewAsm)
- Asm += ' ';
-
- Asm += getSpelling(SemaRef, AsmToks[i]);
- }
- return Asm.c_str();
-}
-
-StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc,
- SourceLocation LBraceLoc,
- ArrayRef<Token> AsmToks,
- SourceLocation EndLoc) {
- // MS-style inline assembly is not fully supported, so emit a warning.
- Diag(AsmLoc, diag::warn_unsupported_msasm);
- SmallVector<StringRef,4> Clobbers;
- std::set<std::string> ClobberRegs;
- SmallVector<IdentifierInfo*, 4> Inputs;
- SmallVector<IdentifierInfo*, 4> Outputs;
-
- // Empty asm statements don't need to instantiate the AsmParser, etc.
- if (AsmToks.empty()) {
- StringRef AsmString;
- MSAsmStmt *NS =
- new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, /*IsSimple*/ true,
- /*IsVolatile*/ true, AsmToks, Inputs, Outputs,
- AsmString, Clobbers, EndLoc);
- return Owned(NS);
- }
-
- unsigned NumAsmStrings;
- std::string AsmString = buildMSAsmString(*this, AsmToks, NumAsmStrings);
-
- bool IsSimple;
- std::vector<llvm::BitVector> Regs;
- std::vector<llvm::BitVector> Names;
- std::vector<std::string> PatchedAsmStrings;
-
- Regs.resize(NumAsmStrings);
- Names.resize(NumAsmStrings);
- PatchedAsmStrings.resize(NumAsmStrings);
-
- // Rewrite operands to appease the AsmParser.
- patchMSAsmStrings(*this, IsSimple, AsmLoc, AsmToks,
- Context.getTargetInfo(), Regs, Names, PatchedAsmStrings);
-
- // patchMSAsmStrings doesn't correctly patch non-simple asm statements.
- if (!IsSimple) {
- MSAsmStmt *NS =
- new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, /*IsSimple*/ true,
- /*IsVolatile*/ true, AsmToks, Inputs, Outputs,
- AsmString, Clobbers, EndLoc);
- return Owned(NS);
- }
-
- // Initialize targets and assembly printers/parsers.
- llvm::InitializeAllTargetInfos();
- llvm::InitializeAllTargetMCs();
- llvm::InitializeAllAsmParsers();
-
- // Get the target specific parser.
- std::string Error;
- const std::string &TT = Context.getTargetInfo().getTriple().getTriple();
- const llvm::Target *TheTarget(llvm::TargetRegistry::lookupTarget(TT, Error));
-
- OwningPtr<llvm::MCAsmInfo> MAI(TheTarget->createMCAsmInfo(TT));
- OwningPtr<llvm::MCRegisterInfo> MRI(TheTarget->createMCRegInfo(TT));
- OwningPtr<llvm::MCObjectFileInfo> MOFI(new llvm::MCObjectFileInfo());
- OwningPtr<llvm::MCSubtargetInfo>
- STI(TheTarget->createMCSubtargetInfo(TT, "", ""));
-
- for (unsigned i = 0, e = PatchedAsmStrings.size(); i != e; ++i) {
- llvm::SourceMgr SrcMgr;
- llvm::MCContext Ctx(*MAI, *MRI, MOFI.get(), &SrcMgr);
- llvm::MemoryBuffer *Buffer =
- llvm::MemoryBuffer::getMemBuffer(PatchedAsmStrings[i], "<inline asm>");
-
- // Tell SrcMgr about this buffer, which is what the parser will pick up.
- SrcMgr.AddNewSourceBuffer(Buffer, llvm::SMLoc());
-
- OwningPtr<llvm::MCStreamer> Str(createNullStreamer(Ctx));
- OwningPtr<llvm::MCAsmParser>
- Parser(createMCAsmParser(SrcMgr, Ctx, *Str.get(), *MAI));
- OwningPtr<llvm::MCTargetAsmParser>
- TargetParser(TheTarget->createMCAsmParser(*STI, *Parser));
- // Change to the Intel dialect.
- Parser->setAssemblerDialect(1);
- Parser->setTargetParser(*TargetParser.get());
-
- // Prime the lexer.
- Parser->Lex();
-
- // Parse the opcode.
- StringRef IDVal;
- Parser->ParseIdentifier(IDVal);
-
- // Canonicalize the opcode to lower case.
- SmallString<128> Opcode;
- for (unsigned i = 0, e = IDVal.size(); i != e; ++i)
- Opcode.push_back(tolower(IDVal[i]));
-
- // Parse the operands.
- llvm::SMLoc IDLoc;
- SmallVector<llvm::MCParsedAsmOperand*, 8> Operands;
- bool HadError = TargetParser->ParseInstruction(Opcode.str(), IDLoc,
- Operands);
- assert (!HadError && "Unexpected error parsing instruction");
-
- // Match the MCInstr.
- SmallVector<llvm::MCInst, 2> Instrs;
- HadError = TargetParser->MatchInstruction(IDLoc, Operands, Instrs);
- assert (!HadError && "Unexpected error matching instruction");
- assert ((Instrs.size() == 1) && "Expected only a single instruction.");
-
- // Get the instruction descriptor.
- llvm::MCInst Inst = Instrs[0];
- const llvm::MCInstrInfo *MII = TheTarget->createMCInstrInfo();
- const llvm::MCInstrDesc &Desc = MII->get(Inst.getOpcode());
- llvm::MCInstPrinter *IP =
- TheTarget->createMCInstPrinter(1, *MAI, *MII, *MRI, *STI);
-
- // Build the list of clobbers.
- for (unsigned i = 0, e = Desc.getNumDefs(); i != e; ++i) {
- const llvm::MCOperand &Op = Inst.getOperand(i);
- if (!Op.isReg())
- continue;
-
- std::string Reg;
- llvm::raw_string_ostream OS(Reg);
- IP->printRegName(OS, Op.getReg());
-
- StringRef Clobber(OS.str());
- if (!Context.getTargetInfo().isValidClobber(Clobber))
- return StmtError(Diag(AsmLoc, diag::err_asm_unknown_register_name) <<
- Clobber);
- ClobberRegs.insert(Reg);
- }
- }
- for (std::set<std::string>::iterator I = ClobberRegs.begin(),
- E = ClobberRegs.end(); I != E; ++I)
- Clobbers.push_back(*I);
-
- MSAsmStmt *NS =
- new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, IsSimple,
- /*IsVolatile*/ true, AsmToks, Inputs, Outputs,
- AsmString, Clobbers, EndLoc);
- return Owned(NS);
-}
-
StmtResult
Sema::ActOnObjCAtCatchStmt(SourceLocation AtLoc,
SourceLocation RParen, Decl *Parm,
@@ -3104,7 +2572,7 @@ Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
getCurFunction()->setHasBranchProtectedScope();
unsigned NumCatchStmts = CatchStmts.size();
return Owned(ObjCAtTryStmt::Create(Context, AtLoc, Try,
- CatchStmts.release(),
+ CatchStmts.data(),
NumCatchStmts,
Finally));
}
@@ -3239,7 +2707,7 @@ Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
unsigned NumHandlers = RawHandlers.size();
assert(NumHandlers > 0 &&
"The parser shouldn't call this if there are no handlers.");
- Stmt **Handlers = RawHandlers.get();
+ Stmt **Handlers = RawHandlers.data();
SmallVector<TypeWithHandler, 8> TypesWithHandlers;
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmtAsm.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmtAsm.cpp
new file mode 100644
index 0000000..7c2c766
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmtAsm.cpp
@@ -0,0 +1,661 @@
+//===--- SemaStmtAsm.cpp - Semantic Analysis for Asm Statements -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for inline asm statements.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCTargetAsmParser.h"
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TargetSelect.h"
+using namespace clang;
+using namespace sema;
+
+/// CheckAsmLValue - GNU C has an extremely ugly extension whereby they silently
+/// ignore "noop" casts in places where an lvalue is required by an inline asm.
+/// We emulate this behavior when -fheinous-gnu-extensions is specified, but
+/// provide a strong guidance to not use it.
+///
+/// This method checks to see if the argument is an acceptable l-value and
+/// returns false if it is a case we can handle.
+static bool CheckAsmLValue(const Expr *E, Sema &S) {
+ // Type dependent expressions will be checked during instantiation.
+ if (E->isTypeDependent())
+ return false;
+
+ if (E->isLValue())
+ return false; // Cool, this is an lvalue.
+
+ // Okay, this is not an lvalue, but perhaps it is the result of a cast that we
+ // are supposed to allow.
+ const Expr *E2 = E->IgnoreParenNoopCasts(S.Context);
+ if (E != E2 && E2->isLValue()) {
+ if (!S.getLangOpts().HeinousExtensions)
+ S.Diag(E2->getLocStart(), diag::err_invalid_asm_cast_lvalue)
+ << E->getSourceRange();
+ else
+ S.Diag(E2->getLocStart(), diag::warn_invalid_asm_cast_lvalue)
+ << E->getSourceRange();
+ // Accept, even if we emitted an error diagnostic.
+ return false;
+ }
+
+ // None of the above, just randomly invalid non-lvalue.
+ return true;
+}
+
+/// isOperandMentioned - Return true if the specified operand # is mentioned
+/// anywhere in the decomposed asm string.
+static bool isOperandMentioned(unsigned OpNo,
+ ArrayRef<GCCAsmStmt::AsmStringPiece> AsmStrPieces) {
+ for (unsigned p = 0, e = AsmStrPieces.size(); p != e; ++p) {
+ const GCCAsmStmt::AsmStringPiece &Piece = AsmStrPieces[p];
+ if (!Piece.isOperand()) continue;
+
+ // If this is a reference to the input and if the input was the smaller
+ // one, then we have to reject this asm.
+ if (Piece.getOperandNo() == OpNo)
+ return true;
+ }
+ return false;
+}
+
+StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
+ bool IsVolatile, unsigned NumOutputs,
+ unsigned NumInputs, IdentifierInfo **Names,
+ MultiExprArg constraints, MultiExprArg exprs,
+ Expr *asmString, MultiExprArg clobbers,
+ SourceLocation RParenLoc) {
+ unsigned NumClobbers = clobbers.size();
+ StringLiteral **Constraints =
+ reinterpret_cast<StringLiteral**>(constraints.data());
+ Expr **Exprs = exprs.data();
+ StringLiteral *AsmString = cast<StringLiteral>(asmString);
+ StringLiteral **Clobbers = reinterpret_cast<StringLiteral**>(clobbers.data());
+
+ SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
+
+ // The parser verifies that there is a string literal here.
+ if (!AsmString->isAscii())
+ return StmtError(Diag(AsmString->getLocStart(),diag::err_asm_wide_character)
+ << AsmString->getSourceRange());
+
+ for (unsigned i = 0; i != NumOutputs; i++) {
+ StringLiteral *Literal = Constraints[i];
+ if (!Literal->isAscii())
+ return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character)
+ << Literal->getSourceRange());
+
+ StringRef OutputName;
+ if (Names[i])
+ OutputName = Names[i]->getName();
+
+ TargetInfo::ConstraintInfo Info(Literal->getString(), OutputName);
+ if (!Context.getTargetInfo().validateOutputConstraint(Info))
+ return StmtError(Diag(Literal->getLocStart(),
+ diag::err_asm_invalid_output_constraint)
+ << Info.getConstraintStr());
+
+ // Check that the output exprs are valid lvalues.
+ Expr *OutputExpr = Exprs[i];
+ if (CheckAsmLValue(OutputExpr, *this)) {
+ return StmtError(Diag(OutputExpr->getLocStart(),
+ diag::err_asm_invalid_lvalue_in_output)
+ << OutputExpr->getSourceRange());
+ }
+
+ OutputConstraintInfos.push_back(Info);
+ }
+
+ SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
+
+ for (unsigned i = NumOutputs, e = NumOutputs + NumInputs; i != e; i++) {
+ StringLiteral *Literal = Constraints[i];
+ if (!Literal->isAscii())
+ return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character)
+ << Literal->getSourceRange());
+
+ StringRef InputName;
+ if (Names[i])
+ InputName = Names[i]->getName();
+
+ TargetInfo::ConstraintInfo Info(Literal->getString(), InputName);
+ if (!Context.getTargetInfo().validateInputConstraint(OutputConstraintInfos.data(),
+ NumOutputs, Info)) {
+ return StmtError(Diag(Literal->getLocStart(),
+ diag::err_asm_invalid_input_constraint)
+ << Info.getConstraintStr());
+ }
+
+ Expr *InputExpr = Exprs[i];
+
+ // Only allow void types for memory constraints.
+ if (Info.allowsMemory() && !Info.allowsRegister()) {
+ if (CheckAsmLValue(InputExpr, *this))
+ return StmtError(Diag(InputExpr->getLocStart(),
+ diag::err_asm_invalid_lvalue_in_input)
+ << Info.getConstraintStr()
+ << InputExpr->getSourceRange());
+ }
+
+ if (Info.allowsRegister()) {
+ if (InputExpr->getType()->isVoidType()) {
+ return StmtError(Diag(InputExpr->getLocStart(),
+ diag::err_asm_invalid_type_in_input)
+ << InputExpr->getType() << Info.getConstraintStr()
+ << InputExpr->getSourceRange());
+ }
+ }
+
+ ExprResult Result = DefaultFunctionArrayLvalueConversion(Exprs[i]);
+ if (Result.isInvalid())
+ return StmtError();
+
+ Exprs[i] = Result.take();
+ InputConstraintInfos.push_back(Info);
+ }
+
+ // Check that the clobbers are valid.
+ for (unsigned i = 0; i != NumClobbers; i++) {
+ StringLiteral *Literal = Clobbers[i];
+ if (!Literal->isAscii())
+ return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character)
+ << Literal->getSourceRange());
+
+ StringRef Clobber = Literal->getString();
+
+ if (!Context.getTargetInfo().isValidClobber(Clobber))
+ return StmtError(Diag(Literal->getLocStart(),
+ diag::err_asm_unknown_register_name) << Clobber);
+ }
+
+ GCCAsmStmt *NS =
+ new (Context) GCCAsmStmt(Context, AsmLoc, IsSimple, IsVolatile, NumOutputs,
+ NumInputs, Names, Constraints, Exprs, AsmString,
+ NumClobbers, Clobbers, RParenLoc);
+ // Validate the asm string, ensuring it makes sense given the operands we
+ // have.
+ SmallVector<GCCAsmStmt::AsmStringPiece, 8> Pieces;
+ unsigned DiagOffs;
+ if (unsigned DiagID = NS->AnalyzeAsmString(Pieces, Context, DiagOffs)) {
+ Diag(getLocationOfStringLiteralByte(AsmString, DiagOffs), DiagID)
+ << AsmString->getSourceRange();
+ return StmtError();
+ }
+
+ // Validate constraints and modifiers.
+ for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
+ GCCAsmStmt::AsmStringPiece &Piece = Pieces[i];
+ if (!Piece.isOperand()) continue;
+
+ // Look for the correct constraint index.
+ unsigned Idx = 0;
+ unsigned ConstraintIdx = 0;
+ for (unsigned i = 0, e = NS->getNumOutputs(); i != e; ++i, ++ConstraintIdx) {
+ TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
+ if (Idx == Piece.getOperandNo())
+ break;
+ ++Idx;
+
+ if (Info.isReadWrite()) {
+ if (Idx == Piece.getOperandNo())
+ break;
+ ++Idx;
+ }
+ }
+
+ for (unsigned i = 0, e = NS->getNumInputs(); i != e; ++i, ++ConstraintIdx) {
+ TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
+ if (Idx == Piece.getOperandNo())
+ break;
+ ++Idx;
+
+ if (Info.isReadWrite()) {
+ if (Idx == Piece.getOperandNo())
+ break;
+ ++Idx;
+ }
+ }
+
+ // Now that we have the right indexes go ahead and check.
+ StringLiteral *Literal = Constraints[ConstraintIdx];
+ const Type *Ty = Exprs[ConstraintIdx]->getType().getTypePtr();
+ if (Ty->isDependentType() || Ty->isIncompleteType())
+ continue;
+
+ unsigned Size = Context.getTypeSize(Ty);
+ if (!Context.getTargetInfo()
+ .validateConstraintModifier(Literal->getString(), Piece.getModifier(),
+ Size))
+ Diag(Exprs[ConstraintIdx]->getLocStart(),
+ diag::warn_asm_mismatched_size_modifier);
+ }
+
+ // Validate tied input operands for type mismatches.
+ for (unsigned i = 0, e = InputConstraintInfos.size(); i != e; ++i) {
+ TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
+
+ // If this is a tied constraint, verify that the output and input have
+ // either exactly the same type, or that they are int/ptr operands with the
+ // same size (int/long, int*/long, are ok etc).
+ if (!Info.hasTiedOperand()) continue;
+
+ unsigned TiedTo = Info.getTiedOperand();
+ unsigned InputOpNo = i+NumOutputs;
+ Expr *OutputExpr = Exprs[TiedTo];
+ Expr *InputExpr = Exprs[InputOpNo];
+
+ if (OutputExpr->isTypeDependent() || InputExpr->isTypeDependent())
+ continue;
+
+ QualType InTy = InputExpr->getType();
+ QualType OutTy = OutputExpr->getType();
+ if (Context.hasSameType(InTy, OutTy))
+ continue; // All types can be tied to themselves.
+
+ // Decide if the input and output are in the same domain (integer/ptr or
+ // floating point.
+ enum AsmDomain {
+ AD_Int, AD_FP, AD_Other
+ } InputDomain, OutputDomain;
+
+ if (InTy->isIntegerType() || InTy->isPointerType())
+ InputDomain = AD_Int;
+ else if (InTy->isRealFloatingType())
+ InputDomain = AD_FP;
+ else
+ InputDomain = AD_Other;
+
+ if (OutTy->isIntegerType() || OutTy->isPointerType())
+ OutputDomain = AD_Int;
+ else if (OutTy->isRealFloatingType())
+ OutputDomain = AD_FP;
+ else
+ OutputDomain = AD_Other;
+
+ // They are ok if they are the same size and in the same domain. This
+ // allows tying things like:
+ // void* to int*
+ // void* to int if they are the same size.
+ // double to long double if they are the same size.
+ //
+ uint64_t OutSize = Context.getTypeSize(OutTy);
+ uint64_t InSize = Context.getTypeSize(InTy);
+ if (OutSize == InSize && InputDomain == OutputDomain &&
+ InputDomain != AD_Other)
+ continue;
+
+ // If the smaller input/output operand is not mentioned in the asm string,
+ // then we can promote the smaller one to a larger input and the asm string
+ // won't notice.
+ bool SmallerValueMentioned = false;
+
+ // If this is a reference to the input and if the input was the smaller
+ // one, then we have to reject this asm.
+ if (isOperandMentioned(InputOpNo, Pieces)) {
+ // This is a use in the asm string of the smaller operand. Since we
+ // codegen this by promoting to a wider value, the asm will get printed
+ // "wrong".
+ SmallerValueMentioned |= InSize < OutSize;
+ }
+ if (isOperandMentioned(TiedTo, Pieces)) {
+ // If this is a reference to the output, and if the output is the larger
+ // value, then it's ok because we'll promote the input to the larger type.
+ SmallerValueMentioned |= OutSize < InSize;
+ }
+
+ // If the smaller value wasn't mentioned in the asm string, and if the
+ // output was a register, just extend the shorter one to the size of the
+ // larger one.
+ if (!SmallerValueMentioned && InputDomain != AD_Other &&
+ OutputConstraintInfos[TiedTo].allowsRegister())
+ continue;
+
+ // Either both of the operands were mentioned or the smaller one was
+ // mentioned. One more special case that we'll allow: if the tied input is
+ // integer, unmentioned, and is a constant, then we'll allow truncating it
+ // down to the size of the destination.
+ if (InputDomain == AD_Int && OutputDomain == AD_Int &&
+ !isOperandMentioned(InputOpNo, Pieces) &&
+ InputExpr->isEvaluatable(Context)) {
+ CastKind castKind =
+ (OutTy->isBooleanType() ? CK_IntegralToBoolean : CK_IntegralCast);
+ InputExpr = ImpCastExprToType(InputExpr, OutTy, castKind).take();
+ Exprs[InputOpNo] = InputExpr;
+ NS->setInputExpr(i, InputExpr);
+ continue;
+ }
+
+ Diag(InputExpr->getLocStart(),
+ diag::err_asm_tying_incompatible_types)
+ << InTy << OutTy << OutputExpr->getSourceRange()
+ << InputExpr->getSourceRange();
+ return StmtError();
+ }
+
+ return Owned(NS);
+}
+
+// getSpelling - Get the spelling of the AsmTok token.
+static StringRef getSpelling(Sema &SemaRef, Token AsmTok) {
+ StringRef Asm;
+ SmallString<512> TokenBuf;
+ TokenBuf.resize(512);
+ bool StringInvalid = false;
+ Asm = SemaRef.PP.getSpelling(AsmTok, TokenBuf, &StringInvalid);
+ assert (!StringInvalid && "Expected valid string!");
+ return Asm;
+}
+
+// Build the inline assembly string. Returns true on error.
+static bool buildMSAsmString(Sema &SemaRef,
+ SourceLocation AsmLoc,
+ ArrayRef<Token> AsmToks,
+ llvm::SmallVectorImpl<unsigned> &TokOffsets,
+ std::string &AsmString) {
+ assert (!AsmToks.empty() && "Didn't expect an empty AsmToks!");
+
+ SmallString<512> Asm;
+ for (unsigned i = 0, e = AsmToks.size(); i < e; ++i) {
+ bool isNewAsm = ((i == 0) ||
+ AsmToks[i].isAtStartOfLine() ||
+ AsmToks[i].is(tok::kw_asm));
+ if (isNewAsm) {
+ if (i != 0)
+ Asm += "\n\t";
+
+ if (AsmToks[i].is(tok::kw_asm)) {
+ i++; // Skip __asm
+ if (i == e) {
+ SemaRef.Diag(AsmLoc, diag::err_asm_empty);
+ return true;
+ }
+
+ }
+ }
+
+ if (i && AsmToks[i].hasLeadingSpace() && !isNewAsm)
+ Asm += ' ';
+
+ StringRef Spelling = getSpelling(SemaRef, AsmToks[i]);
+ Asm += Spelling;
+ TokOffsets.push_back(Asm.size());
+ }
+ AsmString = Asm.str();
+ return false;
+}
+
+namespace {
+
+class MCAsmParserSemaCallbackImpl : public llvm::MCAsmParserSemaCallback {
+ Sema &SemaRef;
+ SourceLocation AsmLoc;
+ ArrayRef<Token> AsmToks;
+ ArrayRef<unsigned> TokOffsets;
+
+public:
+ MCAsmParserSemaCallbackImpl(Sema &Ref, SourceLocation Loc,
+ ArrayRef<Token> Toks,
+ ArrayRef<unsigned> Offsets)
+ : SemaRef(Ref), AsmLoc(Loc), AsmToks(Toks), TokOffsets(Offsets) { }
+ ~MCAsmParserSemaCallbackImpl() {}
+
+ void *LookupInlineAsmIdentifier(StringRef Name, void *SrcLoc, unsigned &Size){
+ SourceLocation Loc = SourceLocation::getFromPtrEncoding(SrcLoc);
+ NamedDecl *OpDecl = SemaRef.LookupInlineAsmIdentifier(Name, Loc, Size);
+ return static_cast<void *>(OpDecl);
+ }
+
+ bool LookupInlineAsmField(StringRef Base, StringRef Member,
+ unsigned &Offset) {
+ return SemaRef.LookupInlineAsmField(Base, Member, Offset, AsmLoc);
+ }
+
+ static void MSAsmDiagHandlerCallback(const llvm::SMDiagnostic &D,
+ void *Context) {
+ ((MCAsmParserSemaCallbackImpl*)Context)->MSAsmDiagHandler(D);
+ }
+ void MSAsmDiagHandler(const llvm::SMDiagnostic &D) {
+ // Compute an offset into the inline asm buffer.
+ // FIXME: This isn't right if .macro is involved (but hopefully, no
+ // real-world code does that).
+ const llvm::SourceMgr &LSM = *D.getSourceMgr();
+ const llvm::MemoryBuffer *LBuf =
+ LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc()));
+ unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart();
+
+ // Figure out which token that offset points into.
+ const unsigned *OffsetPtr =
+ std::lower_bound(TokOffsets.begin(), TokOffsets.end(), Offset);
+ unsigned TokIndex = OffsetPtr - TokOffsets.begin();
+
+ // If we come up with an answer which seems sane, use it; otherwise,
+ // just point at the __asm keyword.
+ // FIXME: Assert the answer is sane once we handle .macro correctly.
+ SourceLocation Loc = AsmLoc;
+ if (TokIndex < AsmToks.size()) {
+ const Token *Tok = &AsmToks[TokIndex];
+ Loc = Tok->getLocation();
+ Loc = Loc.getLocWithOffset(Offset - (*OffsetPtr - Tok->getLength()));
+ }
+ SemaRef.Diag(Loc, diag::err_inline_ms_asm_parsing) << D.getMessage();
+ }
+};
+
+}
+
+NamedDecl *Sema::LookupInlineAsmIdentifier(StringRef Name, SourceLocation Loc,
+ unsigned &Size) {
+ Size = 0;
+ LookupResult Result(*this, &Context.Idents.get(Name), Loc,
+ Sema::LookupOrdinaryName);
+
+ if (!LookupName(Result, getCurScope())) {
+ // If we don't find anything, return null; the AsmParser will assume
+ // it is a label of some sort.
+ return 0;
+ }
+
+ if (!Result.isSingleResult()) {
+ // FIXME: Diagnose result.
+ return 0;
+ }
+
+ NamedDecl *ND = Result.getFoundDecl();
+ if (isa<VarDecl>(ND) || isa<FunctionDecl>(ND)) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(ND))
+ Size = Context.getTypeInfo(Var->getType()).first;
+
+ return ND;
+ }
+
+ // FIXME: Handle other kinds of results? (FieldDecl, etc.)
+ // FIXME: Diagnose if we find something we can't handle, like a typedef.
+ return 0;
+}
+
+bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
+ unsigned &Offset, SourceLocation AsmLoc) {
+ Offset = 0;
+ LookupResult BaseResult(*this, &Context.Idents.get(Base), SourceLocation(),
+ LookupOrdinaryName);
+
+ if (!LookupName(BaseResult, getCurScope()))
+ return true;
+
+ if (!BaseResult.isSingleResult())
+ return true;
+
+ NamedDecl *FoundDecl = BaseResult.getFoundDecl();
+ const RecordType *RT = 0;
+ if (VarDecl *VD = dyn_cast<VarDecl>(FoundDecl)) {
+ RT = VD->getType()->getAs<RecordType>();
+ } else if (TypedefDecl *TD = dyn_cast<TypedefDecl>(FoundDecl)) {
+ RT = TD->getUnderlyingType()->getAs<RecordType>();
+ }
+ if (!RT)
+ return true;
+
+ if (RequireCompleteType(AsmLoc, QualType(RT, 0), 0))
+ return true;
+
+ LookupResult FieldResult(*this, &Context.Idents.get(Member), SourceLocation(),
+ LookupMemberName);
+
+ if (!LookupQualifiedName(FieldResult, RT->getDecl()))
+ return true;
+
+ // FIXME: Handle IndirectFieldDecl?
+ FieldDecl *FD = dyn_cast<FieldDecl>(FieldResult.getFoundDecl());
+ if (!FD)
+ return true;
+
+ const ASTRecordLayout &RL = Context.getASTRecordLayout(RT->getDecl());
+ unsigned i = FD->getFieldIndex();
+ CharUnits Result = Context.toCharUnitsFromBits(RL.getFieldOffset(i));
+ Offset = (unsigned)Result.getQuantity();
+
+ return false;
+}
+
+StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
+ ArrayRef<Token> AsmToks,SourceLocation EndLoc) {
+ SmallVector<IdentifierInfo*, 4> Names;
+ SmallVector<StringRef, 4> ConstraintRefs;
+ SmallVector<Expr*, 4> Exprs;
+ SmallVector<StringRef, 4> ClobberRefs;
+
+ // Empty asm statements don't need to instantiate the AsmParser, etc.
+ if (AsmToks.empty()) {
+ StringRef EmptyAsmStr;
+ MSAsmStmt *NS =
+ new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, /*IsSimple*/ true,
+ /*IsVolatile*/ true, AsmToks, /*NumOutputs*/ 0,
+ /*NumInputs*/ 0, Names, ConstraintRefs, Exprs,
+ EmptyAsmStr, ClobberRefs, EndLoc);
+ return Owned(NS);
+ }
+
+ std::string AsmString;
+ llvm::SmallVector<unsigned, 8> TokOffsets;
+ if (buildMSAsmString(*this, AsmLoc, AsmToks, TokOffsets, AsmString))
+ return StmtError();
+
+ // Get the target specific parser.
+ std::string Error;
+ const std::string &TT = Context.getTargetInfo().getTriple().getTriple();
+ const llvm::Target *TheTarget(llvm::TargetRegistry::lookupTarget(TT, Error));
+
+ OwningPtr<llvm::MCAsmInfo> MAI(TheTarget->createMCAsmInfo(TT));
+ OwningPtr<llvm::MCRegisterInfo> MRI(TheTarget->createMCRegInfo(TT));
+ OwningPtr<llvm::MCObjectFileInfo> MOFI(new llvm::MCObjectFileInfo());
+ OwningPtr<llvm::MCSubtargetInfo>
+ STI(TheTarget->createMCSubtargetInfo(TT, "", ""));
+
+ llvm::SourceMgr SrcMgr;
+ llvm::MCContext Ctx(*MAI, *MRI, MOFI.get(), &SrcMgr);
+ llvm::MemoryBuffer *Buffer =
+ llvm::MemoryBuffer::getMemBuffer(AsmString, "<inline asm>");
+
+ // Tell SrcMgr about this buffer, which is what the parser will pick up.
+ SrcMgr.AddNewSourceBuffer(Buffer, llvm::SMLoc());
+
+ OwningPtr<llvm::MCStreamer> Str(createNullStreamer(Ctx));
+ OwningPtr<llvm::MCAsmParser>
+ Parser(createMCAsmParser(SrcMgr, Ctx, *Str.get(), *MAI));
+ OwningPtr<llvm::MCTargetAsmParser>
+ TargetParser(TheTarget->createMCAsmParser(*STI, *Parser));
+
+ // Get the instruction descriptor.
+ const llvm::MCInstrInfo *MII = TheTarget->createMCInstrInfo();
+ llvm::MCInstPrinter *IP =
+ TheTarget->createMCInstPrinter(1, *MAI, *MII, *MRI, *STI);
+
+ // Change to the Intel dialect.
+ Parser->setAssemblerDialect(1);
+ Parser->setTargetParser(*TargetParser.get());
+ Parser->setParsingInlineAsm(true);
+ TargetParser->setParsingInlineAsm(true);
+
+ MCAsmParserSemaCallbackImpl MCAPSI(*this, AsmLoc, AsmToks, TokOffsets);
+ TargetParser->setSemaCallback(&MCAPSI);
+ SrcMgr.setDiagHandler(MCAsmParserSemaCallbackImpl::MSAsmDiagHandlerCallback,
+ &MCAPSI);
+
+ unsigned NumOutputs;
+ unsigned NumInputs;
+ std::string AsmStringIR;
+ SmallVector<std::pair<void *, bool>, 4> OpDecls;
+ SmallVector<std::string, 4> Constraints;
+ SmallVector<std::string, 4> Clobbers;
+ if (Parser->ParseMSInlineAsm(AsmLoc.getPtrEncoding(), AsmStringIR,
+ NumOutputs, NumInputs, OpDecls, Constraints,
+ Clobbers, MII, IP, MCAPSI))
+ return StmtError();
+
+ // Build the vector of clobber StringRefs.
+ unsigned NumClobbers = Clobbers.size();
+ ClobberRefs.resize(NumClobbers);
+ for (unsigned i = 0; i != NumClobbers; ++i)
+ ClobberRefs[i] = StringRef(Clobbers[i]);
+
+ // Recast the void pointers and build the vector of constraint StringRefs.
+ unsigned NumExprs = NumOutputs + NumInputs;
+ Names.resize(NumExprs);
+ ConstraintRefs.resize(NumExprs);
+ Exprs.resize(NumExprs);
+ for (unsigned i = 0, e = NumExprs; i != e; ++i) {
+ NamedDecl *OpDecl = static_cast<NamedDecl *>(OpDecls[i].first);
+ if (!OpDecl)
+ return StmtError();
+
+ DeclarationNameInfo NameInfo(OpDecl->getDeclName(), AsmLoc);
+ ExprResult OpExpr = BuildDeclarationNameExpr(CXXScopeSpec(), NameInfo,
+ OpDecl);
+ if (OpExpr.isInvalid())
+ return StmtError();
+
+ // Need offset of variable.
+ if (OpDecls[i].second)
+ OpExpr = BuildUnaryOp(getCurScope(), AsmLoc, clang::UO_AddrOf,
+ OpExpr.take());
+
+ Names[i] = OpDecl->getIdentifier();
+ ConstraintRefs[i] = StringRef(Constraints[i]);
+ Exprs[i] = OpExpr.take();
+ }
+
+ bool IsSimple = NumExprs > 0;
+ MSAsmStmt *NS =
+ new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, IsSimple,
+ /*IsVolatile*/ true, AsmToks, NumOutputs, NumInputs,
+ Names, ConstraintRefs, Exprs, AsmStringIR,
+ ClobberRefs, EndLoc);
+ return Owned(NS);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmtAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmtAttr.cpp
index 3c15b7a..b268b45 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmtAttr.cpp
@@ -48,11 +48,16 @@ static Attr *handleFallThroughAttr(Sema &S, Stmt *St, const AttributeList &A,
static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const AttributeList &A,
SourceRange Range) {
switch (A.getKind()) {
+ case AttributeList::UnknownAttribute:
+ S.Diag(A.getLoc(), A.isDeclspecAttribute() ?
+ diag::warn_unhandled_ms_attribute_ignored :
+ diag::warn_unknown_attribute_ignored) << A.getName();
+ return 0;
case AttributeList::AT_FallThrough:
return handleFallThroughAttr(S, St, A, Range);
default:
- // if we're here, then we parsed an attribute, but didn't recognize it as a
- // statement attribute => it is declaration attribute
+ // if we're here, then we parsed a known attribute, but didn't recognize
+ // it as a statement attribute => it is declaration attribute
S.Diag(A.getRange().getBegin(), diag::warn_attribute_invalid_on_stmt)
<< A.getName()->getName() << St->getLocStart();
return 0;
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
index 98497cb..f56b054 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
@@ -333,7 +333,8 @@ void Sema::LookupTemplateName(LookupResult &Found,
if (LookupCtx)
Diag(Found.getNameLoc(), diag::err_no_member_template_suggest)
<< Name << LookupCtx << CorrectedQuotedStr << SS.getRange()
- << FixItHint::CreateReplacement(Found.getNameLoc(), CorrectedStr);
+ << FixItHint::CreateReplacement(Corrected.getCorrectionRange(),
+ CorrectedStr);
else
Diag(Found.getNameLoc(), diag::err_no_template_suggest)
<< Name << CorrectedQuotedStr
@@ -1205,11 +1206,17 @@ static bool DiagnoseDefaultTemplateArgument(Sema &S,
/// of a template template parameter, recursively.
static bool DiagnoseUnexpandedParameterPacks(Sema &S,
TemplateTemplateParmDecl *TTP) {
+ // A template template parameter which is a parameter pack is also a pack
+ // expansion.
+ if (TTP->isParameterPack())
+ return false;
+
TemplateParameterList *Params = TTP->getTemplateParameters();
for (unsigned I = 0, N = Params->size(); I != N; ++I) {
NamedDecl *P = Params->getParam(I);
if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P)) {
- if (S.DiagnoseUnexpandedParameterPack(NTTP->getLocation(),
+ if (!NTTP->isParameterPack() &&
+ S.DiagnoseUnexpandedParameterPack(NTTP->getLocation(),
NTTP->getTypeSourceInfo(),
Sema::UPPC_NonTypeTemplateParameterType))
return true;
@@ -1322,7 +1329,8 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
} else if (NonTypeTemplateParmDecl *NewNonTypeParm
= dyn_cast<NonTypeTemplateParmDecl>(*NewParam)) {
// Check for unexpanded parameter packs.
- if (DiagnoseUnexpandedParameterPack(NewNonTypeParm->getLocation(),
+ if (!NewNonTypeParm->isParameterPack() &&
+ DiagnoseUnexpandedParameterPack(NewNonTypeParm->getLocation(),
NewNonTypeParm->getTypeSourceInfo(),
UPPC_NonTypeTemplateParameterType)) {
Invalid = true;
@@ -1343,7 +1351,8 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
if (NewNonTypeParm->isParameterPack()) {
assert(!NewNonTypeParm->hasDefaultArgument() &&
"Parameter packs can't have a default argument!");
- SawParameterPack = true;
+ if (!NewNonTypeParm->isPackExpansion())
+ SawParameterPack = true;
} else if (OldNonTypeParm && OldNonTypeParm->hasDefaultArgument() &&
NewNonTypeParm->hasDefaultArgument()) {
OldDefaultLoc = OldNonTypeParm->getDefaultArgumentLoc();
@@ -1390,7 +1399,8 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
if (NewTemplateParm->isParameterPack()) {
assert(!NewTemplateParm->hasDefaultArgument() &&
"Parameter packs can't have a default argument!");
- SawParameterPack = true;
+ if (!NewTemplateParm->isPackExpansion())
+ SawParameterPack = true;
} else if (OldTemplateParm && OldTemplateParm->hasDefaultArgument() &&
NewTemplateParm->hasDefaultArgument()) {
OldDefaultLoc = OldTemplateParm->getDefaultArgument().getLocation();
@@ -1417,10 +1427,10 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
MissingDefaultArg = true;
}
- // C++0x [temp.param]p11:
+ // C++11 [temp.param]p11:
// If a template parameter of a primary class template or alias template
// is a template parameter pack, it shall be the last template parameter.
- if (SawParameterPack && (NewParam + 1) != NewParamEnd &&
+ if (SawParameterPack && (NewParam + 1) != NewParamEnd &&
(TPC == TPC_ClassTemplate || TPC == TPC_TypeAliasTemplate)) {
Diag((*NewParam)->getLocation(),
diag::err_template_param_pack_must_be_last_template_parameter);
@@ -1999,9 +2009,11 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
for (unsigned I = 0; I < Depth; ++I)
TemplateArgLists.addOuterTemplateArguments(0, 0);
+ LocalInstantiationScope Scope(*this);
InstantiatingTemplate Inst(*this, TemplateLoc, Template);
if (Inst)
return QualType();
+
CanonType = SubstType(Pattern->getUnderlyingType(),
TemplateArgLists, AliasTemplate->getLocation(),
AliasTemplate->getDeclName());
@@ -2086,7 +2098,8 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
Converted.data(),
Converted.size(), 0);
ClassTemplate->AddSpecialization(Decl, InsertPos);
- Decl->setLexicalDeclContext(CurContext);
+ if (ClassTemplate->isOutOfLine())
+ Decl->setLexicalDeclContext(ClassTemplate->getLexicalDeclContext());
}
CanonType = Context.getTypeDeclType(Decl);
@@ -2138,7 +2151,6 @@ Sema::ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
}
QualType Result = CheckTemplateIdType(Template, TemplateLoc, TemplateArgs);
- TemplateArgsIn.release();
if (Result.isNull())
return true;
@@ -2832,6 +2844,7 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
case TemplateArgument::Declaration:
case TemplateArgument::Integral:
+ case TemplateArgument::NullPtr:
// We've already checked this template argument, so just copy
// it to the list of converted arguments.
Converted.push_back(Arg.getArgument());
@@ -2948,7 +2961,7 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
- if (CheckTemplateArgument(TempParm, Arg))
+ if (CheckTemplateArgument(TempParm, Arg, ArgumentPackIndex))
return true;
Converted.push_back(Arg.getArgument());
@@ -2966,6 +2979,8 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
llvm_unreachable("Declaration argument with template template parameter");
case TemplateArgument::Integral:
llvm_unreachable("Integral argument with template template parameter");
+ case TemplateArgument::NullPtr:
+ llvm_unreachable("Null pointer argument with template template parameter");
case TemplateArgument::Pack:
llvm_unreachable("Caller must expand template argument packs");
@@ -2997,6 +3012,33 @@ static bool diagnoseArityMismatch(Sema &S, TemplateDecl *Template,
return true;
}
+/// \brief Check whether the template parameter is a pack expansion, and if so,
+/// determine the number of parameters produced by that expansion. For instance:
+///
+/// \code
+/// template<typename ...Ts> struct A {
+/// template<Ts ...NTs, template<Ts> class ...TTs, typename ...Us> struct B;
+/// };
+/// \endcode
+///
+/// In \c A<int,int>::B, \c NTs and \c TTs have expanded pack size 2, and \c Us
+/// is not a pack expansion, so returns an empty Optional.
+static llvm::Optional<unsigned> getExpandedPackSize(NamedDecl *Param) {
+ if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ if (NTTP->isExpandedParameterPack())
+ return NTTP->getNumExpansionTypes();
+ }
+
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Param)) {
+ if (TTP->isExpandedParameterPack())
+ return TTP->getNumExpansionTemplateParameters();
+ }
+
+ return llvm::Optional<unsigned>();
+}
+
/// \brief Check that the given template argument list is well-formed
/// for specializing the given template.
bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
@@ -3009,15 +3051,9 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
*ExpansionIntoFixedList = false;
TemplateParameterList *Params = Template->getTemplateParameters();
- unsigned NumParams = Params->size();
- unsigned NumArgs = TemplateArgs.size();
- bool Invalid = false;
SourceLocation RAngleLoc = TemplateArgs.getRAngleLoc();
- bool HasParameterPack =
- NumParams > 0 && Params->getParam(NumParams - 1)->isTemplateParameterPack();
-
// C++ [temp.arg]p1:
// [...] The type and form of each template-argument specified in
// a template-id shall match the type and form specified for the
@@ -3025,38 +3061,50 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
// template-parameter-list.
bool isTemplateTemplateParameter = isa<TemplateTemplateParmDecl>(Template);
SmallVector<TemplateArgument, 2> ArgumentPack;
- TemplateParameterList::iterator Param = Params->begin(),
- ParamEnd = Params->end();
- unsigned ArgIdx = 0;
+ unsigned ArgIdx = 0, NumArgs = TemplateArgs.size();
LocalInstantiationScope InstScope(*this, true);
- bool SawPackExpansion = false;
- while (Param != ParamEnd) {
- if (ArgIdx < NumArgs) {
- // If we have an expanded parameter pack, make sure we don't have too
- // many arguments.
- // FIXME: This really should fall out from the normal arity checking.
- if (NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
- if (NTTP->isExpandedParameterPack() &&
- ArgumentPack.size() >= NTTP->getNumExpansionTypes()) {
- Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
- << true
- << (isa<ClassTemplateDecl>(Template)? 0 :
- isa<FunctionTemplateDecl>(Template)? 1 :
- isa<TemplateTemplateParmDecl>(Template)? 2 : 3)
- << Template;
- Diag(Template->getLocation(), diag::note_template_decl_here)
- << Params->getSourceRange();
- return true;
- }
+ for (TemplateParameterList::iterator Param = Params->begin(),
+ ParamEnd = Params->end();
+ Param != ParamEnd; /* increment in loop */) {
+ // If we have an expanded parameter pack, make sure we don't have too
+ // many arguments.
+ if (llvm::Optional<unsigned> Expansions = getExpandedPackSize(*Param)) {
+ if (*Expansions == ArgumentPack.size()) {
+ // We're done with this parameter pack. Pack up its arguments and add
+ // them to the list.
+ Converted.push_back(
+ TemplateArgument::CreatePackCopy(Context,
+ ArgumentPack.data(),
+ ArgumentPack.size()));
+ ArgumentPack.clear();
+
+ // This argument is assigned to the next parameter.
+ ++Param;
+ continue;
+ } else if (ArgIdx == NumArgs && !PartialTemplateArgs) {
+ // Not enough arguments for this parameter pack.
+ Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
+ << false
+ << (isa<ClassTemplateDecl>(Template)? 0 :
+ isa<FunctionTemplateDecl>(Template)? 1 :
+ isa<TemplateTemplateParmDecl>(Template)? 2 : 3)
+ << Template;
+ Diag(Template->getLocation(), diag::note_template_decl_here)
+ << Params->getSourceRange();
+ return true;
}
+ }
+ if (ArgIdx < NumArgs) {
// Check the template argument we were given.
if (CheckTemplateArgument(*Param, TemplateArgs[ArgIdx], Template,
TemplateLoc, RAngleLoc,
ArgumentPack.size(), Converted))
return true;
+ // We're now done with this argument.
+ ++ArgIdx;
+
if ((*Param)->isTemplateParameterPack()) {
// The template parameter was a template parameter pack, so take the
// deduced argument and place it on the argument pack. Note that we
@@ -3068,16 +3116,47 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
// Move to the next template parameter.
++Param;
}
-
- // If this template argument is a pack expansion, record that fact
- // and break out; we can't actually check any more.
- if (TemplateArgs[ArgIdx].getArgument().isPackExpansion()) {
- SawPackExpansion = true;
- ++ArgIdx;
- break;
+
+ // If we just saw a pack expansion, then directly convert the remaining
+ // arguments, because we don't know what parameters they'll match up
+ // with.
+ if (TemplateArgs[ArgIdx-1].getArgument().isPackExpansion()) {
+ bool InFinalParameterPack = Param != ParamEnd &&
+ Param + 1 == ParamEnd &&
+ (*Param)->isTemplateParameterPack() &&
+ !getExpandedPackSize(*Param);
+
+ if (!InFinalParameterPack && !ArgumentPack.empty()) {
+ // If we were part way through filling in an expanded parameter pack,
+ // fall back to just producing individual arguments.
+ Converted.insert(Converted.end(),
+ ArgumentPack.begin(), ArgumentPack.end());
+ ArgumentPack.clear();
+ }
+
+ while (ArgIdx < NumArgs) {
+ if (InFinalParameterPack)
+ ArgumentPack.push_back(TemplateArgs[ArgIdx].getArgument());
+ else
+ Converted.push_back(TemplateArgs[ArgIdx].getArgument());
+ ++ArgIdx;
+ }
+
+ // Push the argument pack onto the list of converted arguments.
+ if (InFinalParameterPack) {
+ Converted.push_back(
+ TemplateArgument::CreatePackCopy(Context,
+ ArgumentPack.data(),
+ ArgumentPack.size()));
+ ArgumentPack.clear();
+ } else if (ExpansionIntoFixedList) {
+ // We have expanded a pack into a fixed list.
+ *ExpansionIntoFixedList = true;
+ }
+
+ return false;
}
-
- ++ArgIdx;
+
continue;
}
@@ -3088,14 +3167,30 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
ArgumentPack.data(),
ArgumentPack.size()));
- return Invalid;
+ return false;
}
// If we have a template parameter pack with no more corresponding
// arguments, just break out now and we'll fill in the argument pack below.
- if ((*Param)->isTemplateParameterPack())
- break;
-
+ if ((*Param)->isTemplateParameterPack()) {
+ assert(!getExpandedPackSize(*Param) &&
+ "Should have dealt with this already");
+
+ // A non-expanded parameter pack before the end of the parameter list
+ // only occurs for an ill-formed template parameter list, unless we've
+ // got a partial argument list for a function template, so just bail out.
+ if (Param + 1 != ParamEnd)
+ return true;
+
+ Converted.push_back(TemplateArgument::CreatePackCopy(Context,
+ ArgumentPack.data(),
+ ArgumentPack.size()));
+ ArgumentPack.clear();
+
+ ++Param;
+ continue;
+ }
+
// Check whether we have a default argument.
TemplateArgumentLoc Arg;
@@ -3182,86 +3277,12 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
++ArgIdx;
}
- // If we saw a pack expansion, then directly convert the remaining arguments,
- // because we don't know what parameters they'll match up with.
- if (SawPackExpansion) {
- bool AddToArgumentPack
- = Param != ParamEnd && (*Param)->isTemplateParameterPack();
- while (ArgIdx < NumArgs) {
- if (AddToArgumentPack)
- ArgumentPack.push_back(TemplateArgs[ArgIdx].getArgument());
- else
- Converted.push_back(TemplateArgs[ArgIdx].getArgument());
- ++ArgIdx;
- }
-
- // Push the argument pack onto the list of converted arguments.
- if (AddToArgumentPack) {
- if (ArgumentPack.empty())
- Converted.push_back(TemplateArgument(0, 0));
- else {
- Converted.push_back(
- TemplateArgument::CreatePackCopy(Context,
- ArgumentPack.data(),
- ArgumentPack.size()));
- ArgumentPack.clear();
- }
- } else if (ExpansionIntoFixedList) {
- // We have expanded a pack into a fixed list.
- *ExpansionIntoFixedList = true;
- }
-
- return Invalid;
- }
-
// If we have any leftover arguments, then there were too many arguments.
// Complain and fail.
if (ArgIdx < NumArgs)
return diagnoseArityMismatch(*this, Template, TemplateLoc, TemplateArgs);
-
- // If we have an expanded parameter pack, make sure we don't have too
- // many arguments.
- // FIXME: This really should fall out from the normal arity checking.
- if (Param != ParamEnd) {
- if (NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
- if (NTTP->isExpandedParameterPack() &&
- ArgumentPack.size() < NTTP->getNumExpansionTypes()) {
- Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
- << false
- << (isa<ClassTemplateDecl>(Template)? 0 :
- isa<FunctionTemplateDecl>(Template)? 1 :
- isa<TemplateTemplateParmDecl>(Template)? 2 : 3)
- << Template;
- Diag(Template->getLocation(), diag::note_template_decl_here)
- << Params->getSourceRange();
- return true;
- }
- }
- }
-
- // Form argument packs for each of the parameter packs remaining.
- while (Param != ParamEnd) {
- // If we're checking a partial list of template arguments, don't fill
- // in arguments for non-template parameter packs.
- if ((*Param)->isTemplateParameterPack()) {
- if (!HasParameterPack)
- return true;
- if (ArgumentPack.empty())
- Converted.push_back(TemplateArgument(0, 0));
- else {
- Converted.push_back(TemplateArgument::CreatePackCopy(Context,
- ArgumentPack.data(),
- ArgumentPack.size()));
- ArgumentPack.clear();
- }
- } else if (!PartialTemplateArgs)
- return diagnoseArityMismatch(*this, Template, TemplateLoc, TemplateArgs);
- ++Param;
- }
-
- return Invalid;
+ return false;
}
namespace {
@@ -3651,7 +3672,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
switch (isNullPointerValueTemplateArgument(S, Param, ParamType, Arg)) {
case NPV_NullPointer:
S.Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
- Converted = TemplateArgument((Decl *)0);
+ Converted = TemplateArgument(ParamType, /*isNullPtr*/true);
return false;
case NPV_Error:
@@ -3739,7 +3760,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
return true;
}
- NamedDecl *Entity = DRE->getDecl();
+ ValueDecl *Entity = DRE->getDecl();
// Cannot refer to non-static data members
if (FieldDecl *Field = dyn_cast<FieldDecl>(Entity)) {
@@ -3927,7 +3948,8 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
}
// Create the template argument.
- Converted = TemplateArgument(Entity->getCanonicalDecl());
+ Converted = TemplateArgument(cast<ValueDecl>(Entity->getCanonicalDecl()),
+ ParamType->isReferenceType());
S.MarkAnyDeclReferenced(Arg->getLocStart(), Entity);
return false;
}
@@ -3948,7 +3970,7 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
return true;
case NPV_NullPointer:
S.Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
- Converted = TemplateArgument((Decl *)0);
+ Converted = TemplateArgument(ParamType, /*isNullPtr*/true);
return false;
case NPV_NotNullPointer:
break;
@@ -4017,10 +4039,12 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
if (isa<NonTypeTemplateParmDecl>(VD) ||
(isa<VarDecl>(VD) &&
S.Context.getCanonicalType(VD->getType()).isConstQualified())) {
- if (Arg->isTypeDependent() || Arg->isValueDependent())
+ if (Arg->isTypeDependent() || Arg->isValueDependent()) {
Converted = TemplateArgument(Arg);
- else
- Converted = TemplateArgument(VD->getCanonicalDecl());
+ } else {
+ VD = cast<ValueDecl>(VD->getCanonicalDecl());
+ Converted = TemplateArgument(VD, /*isReferenceParam*/false);
+ }
return Invalid;
}
}
@@ -4041,10 +4065,12 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
// Okay: this is the address of a non-static member, and therefore
// a member pointer constant.
- if (Arg->isTypeDependent() || Arg->isValueDependent())
+ if (Arg->isTypeDependent() || Arg->isValueDependent()) {
Converted = TemplateArgument(Arg);
- else
- Converted = TemplateArgument(DRE->getDecl()->getCanonicalDecl());
+ } else {
+ ValueDecl *D = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
+ Converted = TemplateArgument(D, /*isReferenceParam*/false);
+ }
return Invalid;
}
@@ -4397,8 +4423,8 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
case NPV_NullPointer:
Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
- Converted = TemplateArgument((Decl *)0);
- return Owned(Arg);;
+ Converted = TemplateArgument(ParamType, /*isNullPtr*/true);
+ return Owned(Arg);
}
}
@@ -4418,8 +4444,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
/// This routine implements the semantics of C++ [temp.arg.template].
/// It returns true if an error occurred, and false otherwise.
bool Sema::CheckTemplateArgument(TemplateTemplateParmDecl *Param,
- const TemplateArgumentLoc &Arg) {
- TemplateName Name = Arg.getArgument().getAsTemplate();
+ const TemplateArgumentLoc &Arg,
+ unsigned ArgumentPackIndex) {
+ TemplateName Name = Arg.getArgument().getAsTemplateOrTemplatePattern();
TemplateDecl *Template = Name.getAsTemplateDecl();
if (!Template) {
// Any dependent template name is fine.
@@ -4449,8 +4476,12 @@ bool Sema::CheckTemplateArgument(TemplateTemplateParmDecl *Param,
<< Template;
}
+ TemplateParameterList *Params = Param->getTemplateParameters();
+ if (Param->isExpandedParameterPack())
+ Params = Param->getExpansionTemplateParameters(ArgumentPackIndex);
+
return !TemplateParameterListsAreEqual(Template->getTemplateParameters(),
- Param->getTemplateParameters(),
+ Params,
true,
TPL_TemplateTemplateArgumentMatch,
Arg.getLocation());
@@ -4464,12 +4495,9 @@ ExprResult
Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc) {
- assert(Arg.getKind() == TemplateArgument::Declaration &&
- "Only declaration template arguments permitted here");
-
// For a NULL non-type template argument, return nullptr casted to the
// parameter's type.
- if (!Arg.getAsDecl()) {
+ if (Arg.getKind() == TemplateArgument::NullPtr) {
return ImpCastExprToType(
new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc),
ParamType,
@@ -4477,7 +4505,9 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
? CK_NullToMemberPointer
: CK_NullToPointer);
}
-
+ assert(Arg.getKind() == TemplateArgument::Declaration &&
+ "Only declaration template arguments permitted here");
+
ValueDecl *VD = cast<ValueDecl>(Arg.getAsDecl());
if (VD->getDeclContext()->isRecord() &&
@@ -4525,7 +4555,7 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
assert(!RefExpr.isInvalid() &&
Context.hasSameType(((Expr*) RefExpr.get())->getType(),
ParamType.getUnqualifiedType()));
- return move(RefExpr);
+ return RefExpr;
}
}
@@ -4543,7 +4573,7 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
if (RefExpr.isInvalid())
return ExprError();
- return move(RefExpr);
+ return RefExpr;
}
// Take the address of everything else
@@ -5072,10 +5102,10 @@ static bool CheckNonTypeClassTemplatePartialSpecializationArgs(Sema &S,
continue;
}
- Expr *ArgExpr = Args[I].getAsExpr();
- if (!ArgExpr) {
+ if (Args[I].getKind() != TemplateArgument::Expression)
continue;
- }
+
+ Expr *ArgExpr = Args[I].getAsExpr();
// We can have a pack expansion of any of the bullets below.
if (PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(ArgExpr))
@@ -5174,7 +5204,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
// NOTE: KWLoc is the location of the tag keyword. This will instead
// store the location of the outermost template keyword in the declaration.
SourceLocation TemplateKWLoc = TemplateParameterLists.size() > 0
- ? TemplateParameterLists.get()[0]->getTemplateLoc() : SourceLocation();
+ ? TemplateParameterLists[0]->getTemplateLoc() : SourceLocation();
// Find the class template we're specializing
TemplateName Name = TemplateD.getAsVal<TemplateName>();
@@ -5200,7 +5230,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
= MatchTemplateParametersToScopeSpecifier(TemplateNameLoc,
TemplateNameLoc,
SS,
- (TemplateParameterList**)TemplateParameterLists.get(),
+ TemplateParameterLists.data(),
TemplateParameterLists.size(),
TUK == TUK_Friend,
isExplicitSpecialization,
@@ -5355,7 +5385,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
if (TemplateParameterLists.size() > 0) {
Specialization->setTemplateParameterListsInfo(Context,
TemplateParameterLists.size(),
- (TemplateParameterList**) TemplateParameterLists.release());
+ TemplateParameterLists.data());
}
PrevDecl = 0;
CanonType = Context.getTypeDeclType(Specialization);
@@ -5383,7 +5413,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
TemplateParams,
AS_none, /*ModulePrivateLoc=*/SourceLocation(),
TemplateParameterLists.size() - 1,
- (TemplateParameterList**) TemplateParameterLists.release());
+ TemplateParameterLists.data());
}
// Create a new class template partial specialization declaration node.
@@ -5407,7 +5437,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
if (TemplateParameterLists.size() > 1 && SS.isSet()) {
Partial->setTemplateParameterListsInfo(Context,
TemplateParameterLists.size() - 1,
- (TemplateParameterList**) TemplateParameterLists.release());
+ TemplateParameterLists.data());
}
if (!PrevPartial)
@@ -5462,7 +5492,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
if (TemplateParameterLists.size() > 0) {
Specialization->setTemplateParameterListsInfo(Context,
TemplateParameterLists.size(),
- (TemplateParameterList**) TemplateParameterLists.release());
+ TemplateParameterLists.data());
}
if (!PrevDecl)
@@ -5545,7 +5575,6 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
Specialization->setTypeAsWritten(WrittenTy);
Specialization->setTemplateKeywordLoc(TemplateKWLoc);
}
- TemplateArgsIn.release();
// C++ [temp.expl.spec]p9:
// A template explicit specialization is in the scope of the
@@ -5580,7 +5609,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
Decl *Sema::ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D) {
- Decl *NewDecl = HandleDeclarator(S, D, move(TemplateParameterLists));
+ Decl *NewDecl = HandleDeclarator(S, D, TemplateParameterLists);
ActOnDocumentableDecl(NewDecl);
return NewDecl;
}
@@ -5599,7 +5628,7 @@ Decl *Sema::ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
D.setFunctionDefinitionKind(FDK_Definition);
Decl *DP = HandleDeclarator(ParentScope, D,
- move(TemplateParameterLists));
+ TemplateParameterLists);
if (FunctionTemplateDecl *FunctionTemplate
= dyn_cast_or_null<FunctionTemplateDecl>(DP))
return ActOnStartOfFunctionDef(FnBodyScope,
@@ -5903,7 +5932,7 @@ Sema::CheckFunctionTemplateSpecialization(FunctionDecl *FD,
// Perform template argument deduction to determine whether we may be
// specializing this template.
// FIXME: It is somewhat wasteful to build
- TemplateDeductionInfo Info(Context, FD->getLocation());
+ TemplateDeductionInfo Info(FD->getLocation());
FunctionDecl *Specialization = 0;
if (TemplateDeductionResult TDK
= DeduceTemplateArguments(FunTmpl, ExplicitTemplateArgs,
@@ -6400,7 +6429,6 @@ Sema::ActOnExplicitInstantiation(Scope *S,
TemplateArgs,
Context.getTypeDeclType(Specialization));
Specialization->setTypeAsWritten(WrittenTy);
- TemplateArgsIn.release();
// Set source locations for keywords.
Specialization->setExternLoc(ExternLoc);
@@ -6476,9 +6504,8 @@ Sema::ActOnExplicitInstantiation(Scope *S,
Decl *TagD = ActOnTag(S, TagSpec, Sema::TUK_Reference,
KWLoc, SS, Name, NameLoc, Attr, AS_none,
/*ModulePrivateLoc=*/SourceLocation(),
- MultiTemplateParamsArg(*this, 0, 0),
- Owned, IsDependent, SourceLocation(), false,
- TypeResult());
+ MultiTemplateParamsArg(), Owned, IsDependent,
+ SourceLocation(), false, TypeResult());
assert(!IsDependent && "explicit instantiation of dependent name not yet handled");
if (!TagD)
@@ -6730,12 +6757,10 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc);
TemplateArgs.setRAngleLoc(TemplateId->RAngleLoc);
- ASTTemplateArgsPtr TemplateArgsPtr(*this,
- TemplateId->getTemplateArgs(),
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
translateTemplateArguments(TemplateArgsPtr, TemplateArgs);
HasExplicitTemplateArgs = true;
- TemplateArgsPtr.release();
}
// C++ [temp.explicit]p1:
@@ -6763,7 +6788,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
if (!FunTmpl)
continue;
- TemplateDeductionInfo Info(Context, D.getIdentifierLoc());
+ TemplateDeductionInfo Info(D.getIdentifierLoc());
FunctionDecl *Specialization = 0;
if (TemplateDeductionResult TDK
= DeduceTemplateArguments(FunTmpl,
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
index 9500ec3..bf4533d 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -158,9 +158,6 @@ static NonTypeTemplateParmDecl *getDeducedParameterFromExpr(Expr *E) {
/// \brief Determine whether two declaration pointers refer to the same
/// declaration.
static bool isSameDeclaration(Decl *X, Decl *Y) {
- if (!X || !Y)
- return !X && !Y;
-
if (NamedDecl *NX = dyn_cast<NamedDecl>(X))
X = NX->getUnderlyingDecl();
if (NamedDecl *NY = dyn_cast<NamedDecl>(Y))
@@ -262,7 +259,27 @@ checkDeducedTemplateArguments(ASTContext &Context,
// If we deduced two declarations, make sure they they refer to the
// same declaration.
if (Y.getKind() == TemplateArgument::Declaration &&
- isSameDeclaration(X.getAsDecl(), Y.getAsDecl()))
+ isSameDeclaration(X.getAsDecl(), Y.getAsDecl()) &&
+ X.isDeclForReferenceParam() == Y.isDeclForReferenceParam())
+ return X;
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::NullPtr:
+ // If we deduced a null pointer and a dependent expression, keep the
+ // null pointer.
+ if (Y.getKind() == TemplateArgument::Expression)
+ return X;
+
+ // If we deduced a null pointer and an integral constant, keep the
+ // integral constant.
+ if (Y.getKind() == TemplateArgument::Integral)
+ return Y;
+
+ // If we deduced two null pointers, make sure they have the same type.
+ if (Y.getKind() == TemplateArgument::NullPtr &&
+ Context.hasSameType(X.getNullPtrType(), Y.getNullPtrType()))
return X;
// All other combinations are incompatible.
@@ -356,13 +373,15 @@ DeduceNonTypeTemplateArgument(Sema &S,
static Sema::TemplateDeductionResult
DeduceNonTypeTemplateArgument(Sema &S,
NonTypeTemplateParmDecl *NTTP,
- Decl *D,
+ ValueDecl *D,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
assert(NTTP->getDepth() == 0 &&
"Cannot deduce non-type template argument with depth > 0");
- DeducedTemplateArgument NewDeduced(D? D->getCanonicalDecl() : 0);
+ D = D ? cast<ValueDecl>(D->getCanonicalDecl()) : 0;
+ TemplateArgument New(D, NTTP->getType()->isReferenceType());
+ DeducedTemplateArgument NewDeduced(New);
DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
Deduced[NTTP->getIndex()],
NewDeduced);
@@ -570,7 +589,10 @@ static void PrepareArgumentPackDeduction(Sema &S,
SavedPacks[I] = Deduced[PackIndices[I]];
Deduced[PackIndices[I]] = TemplateArgument();
- // If the template arugment pack was explicitly specified, add that to
+ if (!S.CurrentInstantiationScope)
+ continue;
+
+ // If the template argument pack was explicitly specified, add that to
// the set of deduced arguments.
const TemplateArgument *ExplicitArgs;
unsigned NumExplicitArgs;
@@ -612,7 +634,7 @@ FinishArgumentPackDeduction(Sema &S,
if (NewlyDeducedPacks[I].empty()) {
// If we deduced an empty argument pack, create it now.
- NewPack = DeducedTemplateArgument(TemplateArgument(0, 0));
+ NewPack = DeducedTemplateArgument(TemplateArgument::getEmptyPack());
} else {
TemplateArgument *ArgumentPack
= new (S.Context) TemplateArgument [NewlyDeducedPacks[I].size()];
@@ -1371,9 +1393,11 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// If this is a base class, try to perform template argument
// deduction from it.
if (NextT != RecordT) {
+ TemplateDeductionInfo BaseInfo(Info.getLocation());
Sema::TemplateDeductionResult BaseResult
= DeduceTemplateArguments(S, TemplateParams, SpecParam,
- QualType(NextT, 0), Info, Deduced);
+ QualType(NextT, 0), BaseInfo,
+ Deduced);
// If template argument deduction for this base was successful,
// note that we had some success. Otherwise, ignore any deductions
@@ -1382,6 +1406,9 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
Successful = true;
DeducedOrig.clear();
DeducedOrig.append(Deduced.begin(), Deduced.end());
+ Info.Param = BaseInfo.Param;
+ Info.FirstArg = BaseInfo.FirstArg;
+ Info.SecondArg = BaseInfo.SecondArg;
}
else
Deduced = DeducedOrig;
@@ -1596,7 +1623,17 @@ DeduceTemplateArguments(Sema &S,
case TemplateArgument::Declaration:
if (Arg.getKind() == TemplateArgument::Declaration &&
- isSameDeclaration(Param.getAsDecl(), Arg.getAsDecl()))
+ isSameDeclaration(Param.getAsDecl(), Arg.getAsDecl()) &&
+ Param.isDeclForReferenceParam() == Arg.isDeclForReferenceParam())
+ return Sema::TDK_Success;
+
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+
+ case TemplateArgument::NullPtr:
+ if (Arg.getKind() == TemplateArgument::NullPtr &&
+ S.Context.hasSameType(Param.getNullPtrType(), Arg.getNullPtrType()))
return Sema::TDK_Success;
Info.FirstArg = Param;
@@ -1867,7 +1904,11 @@ static bool isSameTemplateArg(ASTContext &Context,
Context.getCanonicalType(Y.getAsType());
case TemplateArgument::Declaration:
- return isSameDeclaration(X.getAsDecl(), Y.getAsDecl());
+ return isSameDeclaration(X.getAsDecl(), Y.getAsDecl()) &&
+ X.isDeclForReferenceParam() == Y.isDeclForReferenceParam();
+
+ case TemplateArgument::NullPtr:
+ return Context.hasSameType(X.getNullPtrType(), Y.getNullPtrType());
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
@@ -1937,6 +1978,14 @@ getTrivialTemplateArgumentLoc(Sema &S,
return TemplateArgumentLoc(TemplateArgument(E), E);
}
+ case TemplateArgument::NullPtr: {
+ Expr *E
+ = S.BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc)
+ .takeAs<Expr>();
+ return TemplateArgumentLoc(TemplateArgument(NTTPType, /*isNullPtr*/true),
+ E);
+ }
+
case TemplateArgument::Integral: {
Expr *E
= S.BuildExpressionFromIntegralTemplateArgument(Arg, Loc).takeAs<Expr>();
@@ -2162,6 +2211,9 @@ Sema::TemplateDeductionResult
Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
TemplateDeductionInfo &Info) {
+ if (Partial->isInvalidDecl())
+ return TDK_Invalid;
+
// C++ [temp.class.spec.match]p2:
// A partial specialization matches a given actual template
// argument list if the template arguments of the partial
@@ -2601,12 +2653,13 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
// explicitly-specified set (C++0x [temp.arg.explicit]p9).
const TemplateArgument *ExplicitArgs;
unsigned NumExplicitArgs;
- if (CurrentInstantiationScope->getPartiallySubstitutedPack(&ExplicitArgs,
+ if (CurrentInstantiationScope &&
+ CurrentInstantiationScope->getPartiallySubstitutedPack(&ExplicitArgs,
&NumExplicitArgs)
== Param)
Builder.push_back(TemplateArgument(ExplicitArgs, NumExplicitArgs));
else
- Builder.push_back(TemplateArgument(0, 0));
+ Builder.push_back(TemplateArgument::getEmptyPack());
continue;
}
@@ -2784,7 +2837,7 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
// Otherwise, see if we can resolve a function type
FunctionDecl *Specialization = 0;
- TemplateDeductionInfo Info(S.Context, Ovl->getNameLoc());
+ TemplateDeductionInfo Info(Ovl->getNameLoc());
if (S.DeduceTemplateArguments(FunTmpl, &ExplicitTemplateArgs,
Specialization, Info))
continue;
@@ -2815,7 +2868,7 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
// So we do not reject deductions which were made elsewhere.
SmallVector<DeducedTemplateArgument, 8>
Deduced(TemplateParams->size());
- TemplateDeductionInfo Info(S.Context, Ovl->getNameLoc());
+ TemplateDeductionInfo Info(Ovl->getNameLoc());
Sema::TemplateDeductionResult Result
= DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamType,
ArgType, Info, Deduced, TDF);
@@ -2992,8 +3045,6 @@ DeduceTemplateArgumentByListElement(Sema &S,
///
/// \param Args the function call arguments
///
-/// \param NumArgs the number of arguments in Args
-///
/// \param Name the name of the function being called. This is only significant
/// when the function template is a conversion function template, in which
/// case this routine will also perform template argument deduction based on
@@ -3013,6 +3064,9 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
llvm::ArrayRef<Expr *> Args,
FunctionDecl *&Specialization,
TemplateDeductionInfo &Info) {
+ if (FunctionTemplate->isInvalidDecl())
+ return TDK_Invalid;
+
FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
// C++ [temp.deduct.call]p1:
@@ -3269,6 +3323,9 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
TemplateDeductionInfo &Info) {
+ if (FunctionTemplate->isInvalidDecl())
+ return TDK_Invalid;
+
FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
TemplateParameterList *TemplateParams
= FunctionTemplate->getTemplateParameters();
@@ -3328,6 +3385,9 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
TemplateDeductionInfo &Info) {
+ if (FunctionTemplate->isInvalidDecl())
+ return TDK_Invalid;
+
CXXConversionDecl *Conv
= cast<CXXConversionDecl>(FunctionTemplate->getTemplatedDecl());
QualType FromType = Conv->getConversionType();
@@ -3572,7 +3632,7 @@ Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init,
QualType InitType = Init->getType();
unsigned TDF = 0;
- TemplateDeductionInfo Info(Context, Loc);
+ TemplateDeductionInfo Info(Loc);
InitListExpr *InitList = dyn_cast<InitListExpr>(Init);
if (InitList) {
@@ -3594,10 +3654,11 @@ Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init,
return DAR_Failed;
}
- QualType DeducedType = Deduced[0].getAsType();
- if (DeducedType.isNull())
+ if (Deduced[0].getKind() != TemplateArgument::Type)
return DAR_Failed;
+ QualType DeducedType = Deduced[0].getAsType();
+
if (InitList) {
DeducedType = BuildStdInitializerList(DeducedType, Loc);
if (DeducedType.isNull())
@@ -3637,26 +3698,23 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
llvm::SmallBitVector &Deduced);
/// \brief If this is a non-static member function,
-static void MaybeAddImplicitObjectParameterType(ASTContext &Context,
+static void AddImplicitObjectParameterType(ASTContext &Context,
CXXMethodDecl *Method,
SmallVectorImpl<QualType> &ArgTypes) {
- if (Method->isStatic())
- return;
-
- // C++ [over.match.funcs]p4:
- //
- // For non-static member functions, the type of the implicit
- // object parameter is
- // - "lvalue reference to cv X" for functions declared without a
- // ref-qualifier or with the & ref-qualifier
- // - "rvalue reference to cv X" for functions declared with the
- // && ref-qualifier
+ // C++11 [temp.func.order]p3:
+ // [...] The new parameter is of type "reference to cv A," where cv are
+ // the cv-qualifiers of the function template (if any) and A is
+ // the class of which the function template is a member.
//
- // FIXME: We don't have ref-qualifiers yet, so we don't do that part.
+ // The standard doesn't say explicitly, but we pick the appropriate kind of
+ // reference type based on [over.match.funcs]p4.
QualType ArgTy = Context.getTypeDeclType(Method->getParent());
ArgTy = Context.getQualifiedType(ArgTy,
Qualifiers::fromCVRMask(Method->getTypeQualifiers()));
- ArgTy = Context.getLValueReferenceType(ArgTy);
+ if (Method->getRefQualifier() == RQ_RValue)
+ ArgTy = Context.getRValueReferenceType(ArgTy);
+ else
+ ArgTy = Context.getLValueReferenceType(ArgTy);
ArgTypes.push_back(ArgTy);
}
@@ -3682,7 +3740,7 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
// C++0x [temp.deduct.partial]p3:
// The types used to determine the ordering depend on the context in which
// the partial ordering is done:
- TemplateDeductionInfo Info(S.Context, Loc);
+ TemplateDeductionInfo Info(Loc);
CXXMethodDecl *Method1 = 0;
CXXMethodDecl *Method2 = 0;
bool IsNonStatic2 = false;
@@ -3697,7 +3755,7 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
IsNonStatic1 = Method1 && !Method1->isStatic();
IsNonStatic2 = Method2 && !Method2->isStatic();
- // C++0x [temp.func.order]p3:
+ // C++11 [temp.func.order]p3:
// [...] If only one of the function templates is a non-static
// member, that function template is considered to have a new
// first parameter inserted in its function parameter list. The
@@ -3705,22 +3763,25 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
// the cv-qualifiers of the function template (if any) and A is
// the class of which the function template is a member.
//
+ // Note that we interpret this to mean "if one of the function
+ // templates is a non-static member and the other is a non-member";
+ // otherwise, the ordering rules for static functions against non-static
+ // functions don't make any sense.
+ //
// C++98/03 doesn't have this provision, so instead we drop the
- // first argument of the free function or static member, which
- // seems to match existing practice.
+ // first argument of the free function, which seems to match
+ // existing practice.
SmallVector<QualType, 4> Args1;
- unsigned Skip1 = !S.getLangOpts().CPlusPlus0x &&
- IsNonStatic2 && !IsNonStatic1;
- if (S.getLangOpts().CPlusPlus0x && IsNonStatic1 && !IsNonStatic2)
- MaybeAddImplicitObjectParameterType(S.Context, Method1, Args1);
+ unsigned Skip1 = !S.getLangOpts().CPlusPlus0x && IsNonStatic2 && !Method1;
+ if (S.getLangOpts().CPlusPlus0x && IsNonStatic1 && !Method2)
+ AddImplicitObjectParameterType(S.Context, Method1, Args1);
Args1.insert(Args1.end(),
Proto1->arg_type_begin() + Skip1, Proto1->arg_type_end());
SmallVector<QualType, 4> Args2;
- Skip2 = !S.getLangOpts().CPlusPlus0x &&
- IsNonStatic1 && !IsNonStatic2;
- if (S.getLangOpts().CPlusPlus0x && IsNonStatic2 && !IsNonStatic1)
- MaybeAddImplicitObjectParameterType(S.Context, Method2, Args2);
+ Skip2 = !S.getLangOpts().CPlusPlus0x && IsNonStatic1 && !Method2;
+ if (S.getLangOpts().CPlusPlus0x && IsNonStatic2 && !Method1)
+ AddImplicitObjectParameterType(S.Context, Method2, Args2);
Args2.insert(Args2.end(),
Proto2->arg_type_begin() + Skip2, Proto2->arg_type_end());
@@ -4118,7 +4179,7 @@ Sema::getMoreSpecializedPartialSpecialization(
// template partial specialization's template arguments, for
// example.
SmallVector<DeducedTemplateArgument, 4> Deduced;
- TemplateDeductionInfo Info(Context, Loc);
+ TemplateDeductionInfo Info(Loc);
QualType PT1 = PS1->getInjectedSpecializationType();
QualType PT2 = PS2->getInjectedSpecializationType();
@@ -4496,6 +4557,11 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
case TemplateArgument::Declaration:
break;
+ case TemplateArgument::NullPtr:
+ MarkUsedTemplateParameters(Ctx, TemplateArg.getNullPtrType(), OnlyDeduced,
+ Depth, Used);
+ break;
+
case TemplateArgument::Type:
MarkUsedTemplateParameters(Ctx, TemplateArg.getAsType(), OnlyDeduced,
Depth, Used);
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 20e755f..665dd07 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -769,7 +769,7 @@ namespace {
/// instantiating it.
Decl *TransformDefinition(SourceLocation Loc, Decl *D);
- /// \bried Transform the first qualifier within a scope by instantiating the
+ /// \brief Transform the first qualifier within a scope by instantiating the
/// declaration.
NamedDecl *TransformFirstQualifierInScope(NamedDecl *D, SourceLocation Loc);
@@ -802,11 +802,24 @@ namespace {
ExprResult TransformPredefinedExpr(PredefinedExpr *E);
ExprResult TransformDeclRefExpr(DeclRefExpr *E);
ExprResult TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E);
+
ExprResult TransformTemplateParmRefExpr(DeclRefExpr *E,
NonTypeTemplateParmDecl *D);
ExprResult TransformSubstNonTypeTemplateParmPackExpr(
SubstNonTypeTemplateParmPackExpr *E);
-
+
+ /// \brief Rebuild a DeclRefExpr for a ParmVarDecl reference.
+ ExprResult RebuildParmVarDeclRefExpr(ParmVarDecl *PD, SourceLocation Loc);
+
+ /// \brief Transform a reference to a function parameter pack.
+ ExprResult TransformFunctionParmPackRefExpr(DeclRefExpr *E,
+ ParmVarDecl *PD);
+
+ /// \brief Transform a FunctionParmPackExpr which was built when we couldn't
+ /// expand a function parameter pack reference which refers to an expanded
+ /// pack.
+ ExprResult TransformFunctionParmPackExpr(FunctionParmPackExpr *E);
+
QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL);
QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
@@ -835,7 +848,7 @@ namespace {
ExprResult Result =
TreeTransform<TemplateInstantiator>::TransformCallExpr(CE);
getSema().CallsUndergoingInstantiation.pop_back();
- return move(Result);
+ return Result;
}
ExprResult TransformLambdaExpr(LambdaExpr *E) {
@@ -1161,10 +1174,11 @@ ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
result = SemaRef.Owned(argExpr);
type = argExpr->getType();
- } else if (arg.getKind() == TemplateArgument::Declaration) {
+ } else if (arg.getKind() == TemplateArgument::Declaration ||
+ arg.getKind() == TemplateArgument::NullPtr) {
ValueDecl *VD;
- if (Decl *D = arg.getAsDecl()) {
- VD = cast<ValueDecl>(D);
+ if (arg.getKind() == TemplateArgument::Declaration) {
+ VD = cast<ValueDecl>(arg.getAsDecl());
// Find the instantiation of the template argument. This is
// required for nested templates.
@@ -1230,8 +1244,81 @@ TemplateInstantiator::TransformSubstNonTypeTemplateParmPackExpr(
}
ExprResult
+TemplateInstantiator::RebuildParmVarDeclRefExpr(ParmVarDecl *PD,
+ SourceLocation Loc) {
+ DeclarationNameInfo NameInfo(PD->getDeclName(), Loc);
+ return getSema().BuildDeclarationNameExpr(CXXScopeSpec(), NameInfo, PD);
+}
+
+ExprResult
+TemplateInstantiator::TransformFunctionParmPackExpr(FunctionParmPackExpr *E) {
+ if (getSema().ArgumentPackSubstitutionIndex != -1) {
+ // We can expand this parameter pack now.
+ ParmVarDecl *D = E->getExpansion(getSema().ArgumentPackSubstitutionIndex);
+ ValueDecl *VD = cast_or_null<ValueDecl>(TransformDecl(E->getExprLoc(), D));
+ if (!VD)
+ return ExprError();
+ return RebuildParmVarDeclRefExpr(cast<ParmVarDecl>(VD), E->getExprLoc());
+ }
+
+ QualType T = TransformType(E->getType());
+ if (T.isNull())
+ return ExprError();
+
+ // Transform each of the parameter expansions into the corresponding
+ // parameters in the instantiation of the function decl.
+ llvm::SmallVector<Decl*, 8> Parms;
+ Parms.reserve(E->getNumExpansions());
+ for (FunctionParmPackExpr::iterator I = E->begin(), End = E->end();
+ I != End; ++I) {
+ ParmVarDecl *D =
+ cast_or_null<ParmVarDecl>(TransformDecl(E->getExprLoc(), *I));
+ if (!D)
+ return ExprError();
+ Parms.push_back(D);
+ }
+
+ return FunctionParmPackExpr::Create(getSema().Context, T,
+ E->getParameterPack(),
+ E->getParameterPackLocation(), Parms);
+}
+
+ExprResult
+TemplateInstantiator::TransformFunctionParmPackRefExpr(DeclRefExpr *E,
+ ParmVarDecl *PD) {
+ typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *Found
+ = getSema().CurrentInstantiationScope->findInstantiationOf(PD);
+ assert(Found && "no instantiation for parameter pack");
+
+ Decl *TransformedDecl;
+ if (DeclArgumentPack *Pack = Found->dyn_cast<DeclArgumentPack *>()) {
+ // If this is a reference to a function parameter pack which we can substitute
+ // but can't yet expand, build a FunctionParmPackExpr for it.
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ QualType T = TransformType(E->getType());
+ if (T.isNull())
+ return ExprError();
+ return FunctionParmPackExpr::Create(getSema().Context, T, PD,
+ E->getExprLoc(), *Pack);
+ }
+
+ TransformedDecl = (*Pack)[getSema().ArgumentPackSubstitutionIndex];
+ } else {
+ TransformedDecl = Found->get<Decl*>();
+ }
+
+ // We have either an unexpanded pack or a specific expansion.
+ return RebuildParmVarDeclRefExpr(cast<ParmVarDecl>(TransformedDecl),
+ E->getExprLoc());
+}
+
+ExprResult
TemplateInstantiator::TransformDeclRefExpr(DeclRefExpr *E) {
NamedDecl *D = E->getDecl();
+
+ // Handle references to non-type template parameters and non-type template
+ // parameter packs.
if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D)) {
if (NTTP->getDepth() < TemplateArgs.getNumLevels())
return TransformTemplateParmRefExpr(E, NTTP);
@@ -1240,6 +1327,11 @@ TemplateInstantiator::TransformDeclRefExpr(DeclRefExpr *E) {
// FindInstantiatedDecl will find it in the local instantiation scope.
}
+ // Handle references to function parameter packs.
+ if (ParmVarDecl *PD = dyn_cast<ParmVarDecl>(D))
+ if (PD->isParameterPack())
+ return TransformFunctionParmPackRefExpr(E, PD);
+
return TreeTransform<TemplateInstantiator>::TransformDeclRefExpr(E);
}
@@ -2159,7 +2251,7 @@ Sema::InstantiateClassTemplateSpecialization(
Template->getPartialSpecializations(PartialSpecs);
for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) {
ClassTemplatePartialSpecializationDecl *Partial = PartialSpecs[I];
- TemplateDeductionInfo Info(Context, PointOfInstantiation);
+ TemplateDeductionInfo Info(PointOfInstantiation);
if (TemplateDeductionResult Result
= DeduceTemplateArguments(Partial,
ClassTemplateSpec->getTemplateArgs(),
@@ -2543,8 +2635,25 @@ bool Sema::Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
return Instantiator.TransformTemplateArguments(Args, NumArgs, Result);
}
+
+static const Decl* getCanonicalParmVarDecl(const Decl *D) {
+ // When storing ParmVarDecls in the local instantiation scope, we always
+ // want to use the ParmVarDecl from the canonical function declaration,
+ // since the map is then valid for any redeclaration or definition of that
+ // function.
+ if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(D)) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
+ unsigned i = PV->getFunctionScopeIndex();
+ return FD->getCanonicalDecl()->getParamDecl(i);
+ }
+ }
+ return D;
+}
+
+
llvm::PointerUnion<Decl *, LocalInstantiationScope::DeclArgumentPack *> *
LocalInstantiationScope::findInstantiationOf(const Decl *D) {
+ D = getCanonicalParmVarDecl(D);
for (LocalInstantiationScope *Current = this; Current;
Current = Current->Outer) {
@@ -2576,6 +2685,7 @@ LocalInstantiationScope::findInstantiationOf(const Decl *D) {
}
void LocalInstantiationScope::InstantiatedLocal(const Decl *D, Decl *Inst) {
+ D = getCanonicalParmVarDecl(D);
llvm::PointerUnion<Decl *, DeclArgumentPack *> &Stored = LocalDecls[D];
if (Stored.isNull())
Stored = Inst;
@@ -2588,11 +2698,13 @@ void LocalInstantiationScope::InstantiatedLocal(const Decl *D, Decl *Inst) {
void LocalInstantiationScope::InstantiatedLocalPackArg(const Decl *D,
Decl *Inst) {
+ D = getCanonicalParmVarDecl(D);
DeclArgumentPack *Pack = LocalDecls[D].get<DeclArgumentPack *>();
Pack->push_back(Inst);
}
void LocalInstantiationScope::MakeInstantiatedLocalArgPack(const Decl *D) {
+ D = getCanonicalParmVarDecl(D);
llvm::PointerUnion<Decl *, DeclArgumentPack *> &Stored = LocalDecls[D];
assert(Stored.isNull() && "Already instantiated this local");
DeclArgumentPack *Pack = new DeclArgumentPack;
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index bdbe71d..19c46ab 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -158,6 +158,22 @@ Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D,
SemaRef.MarkDeclarationsReferencedInType(D->getLocation(), DI->getType());
}
+ // HACK: g++ has a bug where it gets the value kind of ?: wrong.
+ // libstdc++ relies upon this bug in its implementation of common_type.
+ // If we happen to be processing that implementation, fake up the g++ ?:
+ // semantics. See LWG issue 2141 for more information on the bug.
+ const DecltypeType *DT = DI->getType()->getAs<DecltypeType>();
+ CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
+ if (DT && RD && isa<ConditionalOperator>(DT->getUnderlyingExpr()) &&
+ DT->isReferenceType() &&
+ RD->getEnclosingNamespaceContext() == SemaRef.getStdNamespace() &&
+ RD->getIdentifier() && RD->getIdentifier()->isStr("common_type") &&
+ D->getIdentifier() && D->getIdentifier()->isStr("type") &&
+ SemaRef.getSourceManager().isInSystemHeader(D->getLocStart()))
+ // Fold it to the (non-reference) type which g++ would have produced.
+ DI = SemaRef.Context.getTrivialTypeSourceInfo(
+ DI->getType().getNonReferenceType());
+
// Create the new typedef
TypedefNameDecl *Typedef;
if (IsTypeAlias)
@@ -510,7 +526,7 @@ Decl *TemplateDeclInstantiator::VisitFriendDecl(FriendDecl *D) {
if (!InstTy)
return 0;
- FriendDecl *FD = SemaRef.CheckFriendTypeDecl(D->getLocation(),
+ FriendDecl *FD = SemaRef.CheckFriendTypeDecl(D->getLocStart(),
D->getFriendLoc(), InstTy);
if (!FD)
return 0;
@@ -1008,6 +1024,30 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
return Record;
}
+/// \brief Adjust the given function type for an instantiation of the
+/// given declaration, to cope with modifications to the function's type that
+/// aren't reflected in the type-source information.
+///
+/// \param D The declaration we're instantiating.
+/// \param TInfo The already-instantiated type.
+static QualType adjustFunctionTypeForInstantiation(ASTContext &Context,
+ FunctionDecl *D,
+ TypeSourceInfo *TInfo) {
+ const FunctionProtoType *OrigFunc
+ = D->getType()->castAs<FunctionProtoType>();
+ const FunctionProtoType *NewFunc
+ = TInfo->getType()->castAs<FunctionProtoType>();
+ if (OrigFunc->getExtInfo() == NewFunc->getExtInfo())
+ return TInfo->getType();
+
+ FunctionProtoType::ExtProtoInfo NewEPI = NewFunc->getExtProtoInfo();
+ NewEPI.ExtInfo = OrigFunc->getExtInfo();
+ return Context.getFunctionType(NewFunc->getResultType(),
+ NewFunc->arg_type_begin(),
+ NewFunc->getNumArgs(),
+ NewEPI);
+}
+
/// Normal class members are of more specific types and therefore
/// don't make it here. This function serves two purposes:
/// 1) instantiating function templates
@@ -1048,7 +1088,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
TypeSourceInfo *TInfo = SubstFunctionType(D, Params);
if (!TInfo)
return 0;
- QualType T = TInfo->getType();
+ QualType T = adjustFunctionTypeForInstantiation(SemaRef.Context, D, TInfo);
NestedNameSpecifierLoc QualifierLoc = D->getQualifierLoc();
if (QualifierLoc) {
@@ -1075,7 +1115,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
FunctionDecl *Function =
FunctionDecl::Create(SemaRef.Context, DC, D->getInnerLocStart(),
- D->getLocation(), D->getDeclName(), T, TInfo,
+ D->getNameInfo(), T, TInfo,
D->getStorageClass(), D->getStorageClassAsWritten(),
D->isInlineSpecified(), D->hasWrittenPrototype(),
D->isConstexpr());
@@ -1366,7 +1406,7 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
TypeSourceInfo *TInfo = SubstFunctionType(D, Params);
if (!TInfo)
return 0;
- QualType T = TInfo->getType();
+ QualType T = adjustFunctionTypeForInstantiation(SemaRef.Context, D, TInfo);
// \brief If the type of this function, after ignoring parentheses,
// is not *directly* a function type, then we're instantiating a function
@@ -1657,7 +1697,7 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
IsExpandedParameterPack = true;
DI = D->getTypeSourceInfo();
T = DI->getType();
- } else if (isa<PackExpansionTypeLoc>(TL)) {
+ } else if (D->isPackExpansion()) {
// The non-type template parameter pack's type is a pack expansion of types.
// Determine whether we need to expand this parameter pack into separate
// types.
@@ -1771,27 +1811,121 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
return Param;
}
+static void collectUnexpandedParameterPacks(
+ Sema &S,
+ TemplateParameterList *Params,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ for (TemplateParameterList::const_iterator I = Params->begin(),
+ E = Params->end(); I != E; ++I) {
+ if ((*I)->isTemplateParameterPack())
+ continue;
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*I))
+ S.collectUnexpandedParameterPacks(NTTP->getTypeSourceInfo()->getTypeLoc(),
+ Unexpanded);
+ if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(*I))
+ collectUnexpandedParameterPacks(S, TTP->getTemplateParameters(),
+ Unexpanded);
+ }
+}
+
Decl *
TemplateDeclInstantiator::VisitTemplateTemplateParmDecl(
TemplateTemplateParmDecl *D) {
// Instantiate the template parameter list of the template template parameter.
TemplateParameterList *TempParams = D->getTemplateParameters();
TemplateParameterList *InstParams;
- {
+ SmallVector<TemplateParameterList*, 8> ExpandedParams;
+
+ bool IsExpandedParameterPack = false;
+
+ if (D->isExpandedParameterPack()) {
+ // The template template parameter pack is an already-expanded pack
+ // expansion of template parameters. Substitute into each of the expanded
+ // parameters.
+ ExpandedParams.reserve(D->getNumExpansionTemplateParameters());
+ for (unsigned I = 0, N = D->getNumExpansionTemplateParameters();
+ I != N; ++I) {
+ LocalInstantiationScope Scope(SemaRef);
+ TemplateParameterList *Expansion =
+ SubstTemplateParams(D->getExpansionTemplateParameters(I));
+ if (!Expansion)
+ return 0;
+ ExpandedParams.push_back(Expansion);
+ }
+
+ IsExpandedParameterPack = true;
+ InstParams = TempParams;
+ } else if (D->isPackExpansion()) {
+ // The template template parameter pack expands to a pack of template
+ // template parameters. Determine whether we need to expand this parameter
+ // pack into separate parameters.
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ collectUnexpandedParameterPacks(SemaRef, D->getTemplateParameters(),
+ Unexpanded);
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> NumExpansions;
+ if (SemaRef.CheckParameterPacksForExpansion(D->getLocation(),
+ TempParams->getSourceRange(),
+ Unexpanded,
+ TemplateArgs,
+ Expand, RetainExpansion,
+ NumExpansions))
+ return 0;
+
+ if (Expand) {
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, I);
+ LocalInstantiationScope Scope(SemaRef);
+ TemplateParameterList *Expansion = SubstTemplateParams(TempParams);
+ if (!Expansion)
+ return 0;
+ ExpandedParams.push_back(Expansion);
+ }
+
+ // Note that we have an expanded parameter pack. The "type" of this
+ // expanded parameter pack is the original expansion type, but callers
+ // will end up using the expanded parameter pack types for type-checking.
+ IsExpandedParameterPack = true;
+ InstParams = TempParams;
+ } else {
+ // We cannot fully expand the pack expansion now, so just substitute
+ // into the pattern.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, -1);
+
+ LocalInstantiationScope Scope(SemaRef);
+ InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return 0;
+ }
+ } else {
// Perform the actual substitution of template parameters within a new,
// local instantiation scope.
LocalInstantiationScope Scope(SemaRef);
InstParams = SubstTemplateParams(TempParams);
if (!InstParams)
- return NULL;
+ return 0;
}
// Build the template template parameter.
- TemplateTemplateParmDecl *Param
- = TemplateTemplateParmDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ TemplateTemplateParmDecl *Param;
+ if (IsExpandedParameterPack)
+ Param = TemplateTemplateParmDecl::Create(SemaRef.Context, Owner,
+ D->getLocation(),
+ D->getDepth() - TemplateArgs.getNumLevels(),
+ D->getPosition(),
+ D->getIdentifier(), InstParams,
+ ExpandedParams);
+ else
+ Param = TemplateTemplateParmDecl::Create(SemaRef.Context, Owner,
+ D->getLocation(),
D->getDepth() - TemplateArgs.getNumLevels(),
- D->getPosition(), D->isParameterPack(),
- D->getIdentifier(), InstParams);
+ D->getPosition(),
+ D->isParameterPack(),
+ D->getIdentifier(), InstParams);
Param->setDefaultArgument(D->getDefaultArgument(), false);
Param->setAccess(AS_public);
@@ -1813,7 +1947,12 @@ Decl *TemplateDeclInstantiator::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
D->getIdentLocation(),
D->getNominatedNamespace(),
D->getCommonAncestor());
- Owner->addDecl(Inst);
+
+ // Add the using directive to its declaration context
+ // only if this is not a function or method.
+ if (!Owner->isFunctionOrMethod())
+ Owner->addDecl(Inst);
+
return Inst;
}
@@ -2863,7 +3002,7 @@ Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
const MultiLevelTemplateArgumentList &TemplateArgs) {
SmallVector<CXXCtorInitializer*, 4> NewInits;
- bool AnyErrors = false;
+ bool AnyErrors = Tmpl->isInvalidDecl();
// Instantiate all the initializers.
for (CXXConstructorDecl::init_const_iterator Inits = Tmpl->init_begin(),
@@ -3031,7 +3170,7 @@ ExprResult Sema::SubstInitializer(Expr *Init,
isa<CXXTemporaryObjectExpr>(Construct))
return SubstExpr(Init, TemplateArgs);
- ASTOwningVector<Expr*> NewArgs(*this);
+ SmallVector<Expr*, 8> NewArgs;
if (SubstExprs(Construct->getArgs(), Construct->getNumArgs(), true,
TemplateArgs, NewArgs))
return ExprError();
@@ -3043,7 +3182,7 @@ ExprResult Sema::SubstInitializer(Expr *Init,
// Build a ParenListExpr to represent anything else.
// FIXME: Fake locations!
SourceLocation Loc = PP.getLocForEndOfToken(Init->getLocStart());
- return ActOnParenListExpr(Loc, Loc, move_arg(NewArgs));
+ return ActOnParenListExpr(Loc, Loc, NewArgs);
}
// TODO: this could be templated if the various decl types used the
@@ -3298,7 +3437,8 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
if (Decl *FD = Found->dyn_cast<Decl *>())
return cast<NamedDecl>(FD);
- unsigned PackIdx = ArgumentPackSubstitutionIndex;
+ int PackIdx = ArgumentPackSubstitutionIndex;
+ assert(PackIdx != -1 && "found declaration pack but not pack expanding");
return cast<NamedDecl>((*Found->get<DeclArgumentPack *>())[PackIdx]);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
index aece90b..6147d63 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -727,6 +727,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
case TST_enum:
case TST_union:
case TST_struct:
+ case TST_interface:
case TST_class:
case TST_auto:
case TST_unknown_anytype:
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
index 54f8dba..4b23167 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
@@ -105,7 +105,8 @@ static void diagnoseBadTypeAttribute(Sema &S, const AttributeList &attr,
case AttributeList::AT_ThisCall: \
case AttributeList::AT_Pascal: \
case AttributeList::AT_Regparm: \
- case AttributeList::AT_Pcs \
+ case AttributeList::AT_Pcs: \
+ case AttributeList::AT_PnaclCall \
namespace {
/// An object which stores processing state for the entire
@@ -552,19 +553,28 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
SourceLocation loc = declarator.getLocStart();
// ...and *prepend* it to the declarator.
+ SourceLocation NoLoc;
declarator.AddInnermostTypeInfo(DeclaratorChunk::getFunction(
- /*proto*/ true,
- /*variadic*/ false,
- /*ambiguous*/ false, SourceLocation(),
- /*args*/ 0, 0,
- /*type quals*/ 0,
- /*ref-qualifier*/true, SourceLocation(),
- /*const qualifier*/SourceLocation(),
- /*volatile qualifier*/SourceLocation(),
- /*mutable qualifier*/SourceLocation(),
- /*EH*/ EST_None, SourceLocation(), 0, 0, 0, 0,
- /*parens*/ loc, loc,
- declarator));
+ /*HasProto=*/true,
+ /*IsAmbiguous=*/false,
+ /*LParenLoc=*/NoLoc,
+ /*ArgInfo=*/0,
+ /*NumArgs=*/0,
+ /*EllipsisLoc=*/NoLoc,
+ /*RParenLoc=*/NoLoc,
+ /*TypeQuals=*/0,
+ /*RefQualifierIsLvalueRef=*/true,
+ /*RefQualifierLoc=*/NoLoc,
+ /*ConstQualifierLoc=*/NoLoc,
+ /*VolatileQualifierLoc=*/NoLoc,
+ /*MutableLoc=*/NoLoc,
+ EST_None,
+ /*ESpecLoc=*/NoLoc,
+ /*Exceptions=*/0,
+ /*ExceptionRanges=*/0,
+ /*NumExceptions=*/0,
+ /*NoexceptExpr=*/0,
+ loc, loc, declarator));
// For consistency, make sure the state still has us as processing
// the decl spec.
@@ -636,7 +646,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// "<proto1,proto2>" is an objc qualified ID with a missing id.
if (DeclSpec::ProtocolQualifierListTy PQ = DS.getProtocolQualifiers()) {
Result = Context.getObjCObjectType(Context.ObjCBuiltinIdTy,
- (ObjCProtocolDecl**)PQ,
+ (ObjCProtocolDecl*const*)PQ,
DS.getNumProtocolQualifiers());
Result = Context.getObjCObjectPointerType(Result);
break;
@@ -698,11 +708,15 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
case DeclSpec::TSW_longlong:
Result = Context.LongLongTy;
- // long long is a C99 feature.
- if (!S.getLangOpts().C99)
- S.Diag(DS.getTypeSpecWidthLoc(),
- S.getLangOpts().CPlusPlus0x ?
- diag::warn_cxx98_compat_longlong : diag::ext_longlong);
+ // 'long long' is a C99 or C++11 feature.
+ if (!S.getLangOpts().C99) {
+ if (S.getLangOpts().CPlusPlus)
+ S.Diag(DS.getTypeSpecWidthLoc(),
+ S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong);
+ else
+ S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_c99_longlong);
+ }
break;
}
} else {
@@ -713,11 +727,15 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
case DeclSpec::TSW_longlong:
Result = Context.UnsignedLongLongTy;
- // long long is a C99 feature.
- if (!S.getLangOpts().C99)
- S.Diag(DS.getTypeSpecWidthLoc(),
- S.getLangOpts().CPlusPlus0x ?
- diag::warn_cxx98_compat_longlong : diag::ext_longlong);
+ // 'long long' is a C99 or C++11 feature.
+ if (!S.getLangOpts().C99) {
+ if (S.getLangOpts().CPlusPlus)
+ S.Diag(DS.getTypeSpecWidthLoc(),
+ S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong);
+ else
+ S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_c99_longlong);
+ }
break;
}
}
@@ -753,7 +771,8 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
case DeclSpec::TST_class:
case DeclSpec::TST_enum:
case DeclSpec::TST_union:
- case DeclSpec::TST_struct: {
+ case DeclSpec::TST_struct:
+ case DeclSpec::TST_interface: {
TypeDecl *D = dyn_cast_or_null<TypeDecl>(DS.getRepAsDecl());
if (!D) {
// This can happen in C++ with ambiguous lookups.
@@ -794,18 +813,18 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
if (DS.getNumProtocolQualifiers())
Result = Context.getObjCObjectType(Result,
- (ObjCProtocolDecl**) PQ,
+ (ObjCProtocolDecl*const*) PQ,
DS.getNumProtocolQualifiers());
} else if (Result->isObjCIdType()) {
// id<protocol-list>
Result = Context.getObjCObjectType(Context.ObjCBuiltinIdTy,
- (ObjCProtocolDecl**) PQ,
+ (ObjCProtocolDecl*const*) PQ,
DS.getNumProtocolQualifiers());
Result = Context.getObjCObjectPointerType(Result);
} else if (Result->isObjCClassType()) {
// Class<protocol-list>
Result = Context.getObjCObjectType(Context.ObjCBuiltinClassTy,
- (ObjCProtocolDecl**) PQ,
+ (ObjCProtocolDecl*const*) PQ,
DS.getNumProtocolQualifiers());
Result = Context.getObjCObjectPointerType(Result);
} else {
@@ -1853,30 +1872,31 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
case TTK_Struct: Error = 1; /* Struct member */ break;
case TTK_Union: Error = 2; /* Union member */ break;
case TTK_Class: Error = 3; /* Class member */ break;
+ case TTK_Interface: Error = 4; /* Interface member */ break;
}
break;
case Declarator::CXXCatchContext:
case Declarator::ObjCCatchContext:
- Error = 4; // Exception declaration
+ Error = 5; // Exception declaration
break;
case Declarator::TemplateParamContext:
- Error = 5; // Template parameter
+ Error = 6; // Template parameter
break;
case Declarator::BlockLiteralContext:
- Error = 6; // Block literal
+ Error = 7; // Block literal
break;
case Declarator::TemplateTypeArgContext:
- Error = 7; // Template type argument
+ Error = 8; // Template type argument
break;
case Declarator::AliasDeclContext:
case Declarator::AliasTemplateContext:
- Error = 9; // Type alias
+ Error = 10; // Type alias
break;
case Declarator::TrailingReturnContext:
- Error = 10; // Function return type
+ Error = 11; // Function return type
break;
case Declarator::TypeNameContext:
- Error = 11; // Generic
+ Error = 12; // Generic
break;
case Declarator::FileContext:
case Declarator::BlockContext:
@@ -1887,11 +1907,11 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
}
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
- Error = 8;
+ Error = 9;
// In Objective-C it is an error to use 'auto' on a function declarator.
if (D.isFunctionDeclarator())
- Error = 10;
+ Error = 11;
// C++11 [dcl.spec.auto]p2: 'auto' is always fine if the declarator
// contains a trailing return type. That is only legal at the outermost
@@ -2149,16 +2169,6 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
ASTContext &Context = S.Context;
const LangOptions &LangOpts = S.getLangOpts();
- bool ImplicitlyNoexcept = false;
- if (D.getName().getKind() == UnqualifiedId::IK_OperatorFunctionId &&
- LangOpts.CPlusPlus0x) {
- OverloadedOperatorKind OO = D.getName().OperatorFunctionId.Operator;
- /// In C++0x, deallocation functions (normal and array operator delete)
- /// are implicitly noexcept.
- if (OO == OO_Delete || OO == OO_Array_Delete)
- ImplicitlyNoexcept = true;
- }
-
// The name we're declaring, if any.
DeclarationName Name;
if (D.getIdentifier())
@@ -2558,12 +2568,6 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
Exceptions,
EPI);
- if (FTI.getExceptionSpecType() == EST_None &&
- ImplicitlyNoexcept && chunkIndex == 0) {
- // Only the outermost chunk is marked noexcept, of course.
- EPI.ExceptionSpecType = EST_BasicNoexcept;
- }
-
T = Context.getFunctionType(T, ArgTys.data(), ArgTys.size(), EPI);
}
@@ -2657,6 +2661,9 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// C++0x [dcl.constexpr]p8: A constexpr specifier for a non-static member
// function that is not a constructor declares that function to be const.
+ // FIXME: This should be deferred until we know whether this is a static
+ // member function (for an out-of-class definition, we don't know
+ // this until we perform redeclaration lookup).
if (D.getDeclSpec().isConstexprSpecified() && !FreeFunction &&
D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static &&
D.getName().getKind() != UnqualifiedId::IK_ConstructorName &&
@@ -2668,6 +2675,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
T = Context.getFunctionType(FnTy->getResultType(),
FnTy->arg_type_begin(),
FnTy->getNumArgs(), EPI);
+ // Rebuild any parens around the identifier in the function type.
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ if (D.getTypeObject(i).Kind != DeclaratorChunk::Paren)
+ break;
+ T = S.BuildParenType(T);
+ }
}
// C++11 [dcl.fct]p6 (w/DR1417):
@@ -2721,6 +2734,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
T = Context.getFunctionType(FnTy->getResultType(),
FnTy->arg_type_begin(),
FnTy->getNumArgs(), EPI);
+ // Rebuild any parens around the identifier in the function type.
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ if (D.getTypeObject(i).Kind != DeclaratorChunk::Paren)
+ break;
+ T = S.BuildParenType(T);
+ }
}
}
@@ -2985,6 +3004,8 @@ static AttributeList::Kind getAttrListKind(AttributedType::Kind kind) {
return AttributeList::AT_Pascal;
case AttributedType::attr_pcs:
return AttributeList::AT_Pcs;
+ case AttributedType::attr_pnaclcall:
+ return AttributeList::AT_PnaclCall;
}
llvm_unreachable("unexpected attribute kind!");
}
@@ -3271,6 +3292,8 @@ namespace {
TL.setLocalRangeEnd(Chunk.EndLoc);
const DeclaratorChunk::FunctionTypeInfo &FTI = Chunk.Fun;
+ TL.setLParenLoc(FTI.getLParenLoc());
+ TL.setRParenLoc(FTI.getRParenLoc());
for (unsigned i = 0, e = TL.getNumArgs(), tpi = 0; i != e; ++i) {
ParmVarDecl *Param = cast<ParmVarDecl>(FTI.ArgInfo[i].Param);
TL.setArg(tpi++, Param);
@@ -3588,7 +3611,7 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
// Forbid __weak if the runtime doesn't support it.
if (lifetime == Qualifiers::OCL_Weak &&
- !S.getLangOpts().ObjCRuntimeHasWeak && !NonObjCPointer) {
+ !S.getLangOpts().ObjCARCWeak && !NonObjCPointer) {
// Actually, delay this until we know what we're parsing.
if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
@@ -3611,11 +3634,12 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
while (const PointerType *ptr = T->getAs<PointerType>())
T = ptr->getPointeeType();
if (const ObjCObjectPointerType *ObjT = T->getAs<ObjCObjectPointerType>()) {
- ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl();
- if (Class->isArcWeakrefUnavailable()) {
- S.Diag(AttrLoc, diag::err_arc_unsupported_weak_class);
- S.Diag(ObjT->getInterfaceDecl()->getLocation(),
- diag::note_class_declared);
+ if (ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl()) {
+ if (Class->isArcWeakrefUnavailable()) {
+ S.Diag(AttrLoc, diag::err_arc_unsupported_weak_class);
+ S.Diag(ObjT->getInterfaceDecl()->getLocation(),
+ diag::note_class_declared);
+ }
}
}
}
@@ -3875,14 +3899,14 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state,
return true;
}
+ // Delay if the type didn't work out to a function.
+ if (!unwrapped.isFunctionType()) return false;
+
// Otherwise, a calling convention.
CallingConv CC;
if (S.CheckCallingConvAttr(attr, CC))
return true;
- // Delay if the type didn't work out to a function.
- if (!unwrapped.isFunctionType()) return false;
-
const FunctionType *fn = unwrapped.get();
CallingConv CCOld = fn->getCallConv();
if (S.Context.getCanonicalCallConv(CC) ==
@@ -4429,6 +4453,20 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
return RequireCompleteType(Loc, T, Diagnoser);
}
+/// \brief Get diagnostic %select index for tag kind for
+/// literal type diagnostic message.
+/// WARNING: Indexes apply to particular diagnostics only!
+///
+/// \returns diagnostic %select index.
+static unsigned getLiteralDiagFromTagKind(TagTypeKind Tag) {
+ switch (Tag) {
+ case TTK_Struct: return 0;
+ case TTK_Interface: return 1;
+ case TTK_Class: return 2;
+ default: llvm_unreachable("Invalid tag kind for literal type diagnostic!");
+ }
+}
+
/// @brief Ensure that the type T is a literal type.
///
/// This routine checks whether the type @p T is a literal type. If @p T is an
@@ -4485,7 +4523,7 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
// of constexpr constructors.
if (RD->getNumVBases()) {
Diag(RD->getLocation(), diag::note_non_literal_virtual_base)
- << RD->isStruct() << RD->getNumVBases();
+ << getLiteralDiagFromTagKind(RD->getTagKind()) << RD->getNumVBases();
for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
E = RD->vbases_end(); I != E; ++I)
Diag(I->getLocStart(),
@@ -4578,15 +4616,21 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
// member access (5.2.5), decltype(e) is the type of the entity named
// by e. If there is no such entity, or if e names a set of overloaded
// functions, the program is ill-formed;
+ //
+ // We apply the same rules for Objective-C ivar and property references.
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
if (const ValueDecl *VD = dyn_cast<ValueDecl>(DRE->getDecl()))
return VD->getType();
- }
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
if (const FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()))
return FD->getType();
+ } else if (const ObjCIvarRefExpr *IR = dyn_cast<ObjCIvarRefExpr>(E)) {
+ return IR->getDecl()->getType();
+ } else if (const ObjCPropertyRefExpr *PR = dyn_cast<ObjCPropertyRefExpr>(E)) {
+ if (PR->isExplicitProperty())
+ return PR->getExplicitProperty()->getType();
}
-
+
// C++11 [expr.lambda.prim]p18:
// Every occurrence of decltype((x)) where x is a possibly
// parenthesized id-expression that names an entity of automatic
diff --git a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
index 619ad33..294d742 100644
--- a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
+++ b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
@@ -574,6 +574,10 @@ public:
/// \brief Transform the captures and body of a lambda expression.
ExprResult TransformLambdaScope(LambdaExpr *E, CXXMethodDecl *CallOperator);
+ ExprResult TransformAddressOfOperand(Expr *E);
+ ExprResult TransformDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E,
+ bool IsAddressOfOperand);
+
#define STMT(Node, Parent) \
StmtResult Transform##Node(Node *S);
#define EXPR(Node, Parent) \
@@ -1162,32 +1166,23 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- StmtResult RebuildAsmStmt(SourceLocation AsmLoc,
- bool IsSimple,
- bool IsVolatile,
- unsigned NumOutputs,
- unsigned NumInputs,
- IdentifierInfo **Names,
- MultiExprArg Constraints,
- MultiExprArg Exprs,
- Expr *AsmString,
- MultiExprArg Clobbers,
- SourceLocation RParenLoc,
- bool MSAsm) {
- return getSema().ActOnAsmStmt(AsmLoc, IsSimple, IsVolatile, NumOutputs,
- NumInputs, Names, move(Constraints),
- Exprs, AsmString, Clobbers,
- RParenLoc, MSAsm);
+ StmtResult RebuildGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
+ bool IsVolatile, unsigned NumOutputs,
+ unsigned NumInputs, IdentifierInfo **Names,
+ MultiExprArg Constraints, MultiExprArg Exprs,
+ Expr *AsmString, MultiExprArg Clobbers,
+ SourceLocation RParenLoc) {
+ return getSema().ActOnGCCAsmStmt(AsmLoc, IsSimple, IsVolatile, NumOutputs,
+ NumInputs, Names, Constraints, Exprs,
+ AsmString, Clobbers, RParenLoc);
}
/// \brief Build a new MS style inline asm statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- StmtResult RebuildMSAsmStmt(SourceLocation AsmLoc,
- SourceLocation LBraceLoc,
- ArrayRef<Token> AsmToks,
- SourceLocation EndLoc) {
+ StmtResult RebuildMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
+ ArrayRef<Token> AsmToks, SourceLocation EndLoc) {
return getSema().ActOnMSAsmStmt(AsmLoc, LBraceLoc, AsmToks, EndLoc);
}
@@ -1199,7 +1194,7 @@ public:
Stmt *TryBody,
MultiStmtArg CatchStmts,
Stmt *Finally) {
- return getSema().ActOnObjCAtTryStmt(AtLoc, TryBody, move(CatchStmts),
+ return getSema().ActOnObjCAtTryStmt(AtLoc, TryBody, CatchStmts,
Finally);
}
@@ -1325,7 +1320,7 @@ public:
StmtResult RebuildCXXTryStmt(SourceLocation TryLoc,
Stmt *TryBlock,
MultiStmtArg Handlers) {
- return getSema().ActOnCXXTryBlock(TryLoc, TryBlock, move(Handlers));
+ return getSema().ActOnCXXTryBlock(TryLoc, TryBlock, Handlers);
}
/// \brief Build a new C++0x range-based for statement.
@@ -1339,7 +1334,8 @@ public:
Stmt *LoopVar,
SourceLocation RParenLoc) {
return getSema().BuildCXXForRangeStmt(ForLoc, ColonLoc, Range, BeginEnd,
- Cond, Inc, LoopVar, RParenLoc);
+ Cond, Inc, LoopVar, RParenLoc,
+ Sema::BFRK_Rebuild);
}
/// \brief Build a new C++0x range-based for statement.
@@ -1478,7 +1474,7 @@ public:
if (Result.isInvalid())
return ExprError();
- return move(Result);
+ return Result;
}
/// \brief Build a new array subscript expression.
@@ -1503,7 +1499,7 @@ public:
SourceLocation RParenLoc,
Expr *ExecConfig = 0) {
return getSema().ActOnCallExpr(/*Scope=*/0, Callee, LParenLoc,
- move(Args), RParenLoc, ExecConfig);
+ Args, RParenLoc, ExecConfig);
}
/// \brief Build a new member access expression.
@@ -1638,15 +1634,15 @@ public:
SourceLocation RBraceLoc,
QualType ResultTy) {
ExprResult Result
- = SemaRef.ActOnInitList(LBraceLoc, move(Inits), RBraceLoc);
+ = SemaRef.ActOnInitList(LBraceLoc, Inits, RBraceLoc);
if (Result.isInvalid() || ResultTy->isDependentType())
- return move(Result);
+ return Result;
// Patch in the result type we were given, which may have been computed
// when the initial InitListExpr was built.
InitListExpr *ILE = cast<InitListExpr>((Expr *)Result.get());
ILE->setType(ResultTy);
- return move(Result);
+ return Result;
}
/// \brief Build a new designated initializer expression.
@@ -1664,8 +1660,7 @@ public:
if (Result.isInvalid())
return ExprError();
- ArrayExprs.release();
- return move(Result);
+ return Result;
}
/// \brief Build a new value-initialized expression.
@@ -1696,7 +1691,7 @@ public:
ExprResult RebuildParenListExpr(SourceLocation LParenLoc,
MultiExprArg SubExprs,
SourceLocation RParenLoc) {
- return getSema().ActOnParenListExpr(LParenLoc, RParenLoc, move(SubExprs));
+ return getSema().ActOnParenListExpr(LParenLoc, RParenLoc, SubExprs);
}
/// \brief Build a new address-of-label expression.
@@ -1974,8 +1969,7 @@ public:
SourceLocation LParenLoc,
SourceLocation RParenLoc) {
return getSema().BuildCXXTypeConstructExpr(TSInfo, LParenLoc,
- MultiExprArg(getSema(), 0, 0),
- RParenLoc);
+ MultiExprArg(), RParenLoc);
}
/// \brief Build a new C++ "new" expression.
@@ -1995,7 +1989,7 @@ public:
Expr *Initializer) {
return getSema().BuildCXXNew(StartLoc, UseGlobal,
PlacementLParen,
- move(PlacementArgs),
+ PlacementArgs,
PlacementRParen,
TypeIdParens,
AllocatedType,
@@ -2083,7 +2077,8 @@ public:
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
- const TemplateArgumentListInfo *TemplateArgs) {
+ const TemplateArgumentListInfo *TemplateArgs,
+ bool IsAddressOfOperand) {
CXXScopeSpec SS;
SS.Adopt(QualifierLoc);
@@ -2091,7 +2086,8 @@ public:
return getSema().BuildQualifiedTemplateIdExpr(SS, TemplateKWLoc,
NameInfo, TemplateArgs);
- return getSema().BuildQualifiedDeclarationNameExpr(SS, NameInfo);
+ return getSema().BuildQualifiedDeclarationNameExpr(SS, NameInfo,
+ IsAddressOfOperand);
}
/// \brief Build a new template-id expression.
@@ -2120,13 +2116,13 @@ public:
bool RequiresZeroInit,
CXXConstructExpr::ConstructionKind ConstructKind,
SourceRange ParenRange) {
- ASTOwningVector<Expr*> ConvertedArgs(SemaRef);
- if (getSema().CompleteConstructorCall(Constructor, move(Args), Loc,
+ SmallVector<Expr*, 8> ConvertedArgs;
+ if (getSema().CompleteConstructorCall(Constructor, Args, Loc,
ConvertedArgs))
return ExprError();
return getSema().BuildCXXConstructExpr(Loc, T, Constructor, IsElidable,
- move_arg(ConvertedArgs),
+ ConvertedArgs,
HadMultipleCandidates,
RequiresZeroInit, ConstructKind,
ParenRange);
@@ -2142,7 +2138,7 @@ public:
SourceLocation RParenLoc) {
return getSema().BuildCXXTypeConstructExpr(TSInfo,
LParenLoc,
- move(Args),
+ Args,
RParenLoc);
}
@@ -2156,7 +2152,7 @@ public:
SourceLocation RParenLoc) {
return getSema().BuildCXXTypeConstructExpr(TSInfo,
LParenLoc,
- move(Args),
+ Args,
RParenLoc);
}
@@ -2288,7 +2284,7 @@ public:
ReceiverTypeInfo->getType(),
/*SuperLoc=*/SourceLocation(),
Sel, Method, LBracLoc, SelectorLocs,
- RBracLoc, move(Args));
+ RBracLoc, Args);
}
/// \brief Build a new Objective-C instance message.
@@ -2303,7 +2299,7 @@ public:
Receiver->getType(),
/*SuperLoc=*/SourceLocation(),
Sel, Method, LBracLoc, SelectorLocs,
- RBracLoc, move(Args));
+ RBracLoc, Args);
}
/// \brief Build a new Objective-C ivar reference expression.
@@ -2326,7 +2322,7 @@ public:
return ExprError();
if (Result.get())
- return move(Result);
+ return Result;
return getSema().BuildMemberReferenceExpr(Base.get(), Base.get()->getType(),
/*FIXME:*/IvarLoc, IsArrow,
@@ -2355,7 +2351,7 @@ public:
return ExprError();
if (Result.get())
- return move(Result);
+ return Result;
return getSema().BuildMemberReferenceExpr(Base.get(), Base.get()->getType(),
/*FIXME:*/PropertyLoc, IsArrow,
@@ -2398,7 +2394,7 @@ public:
return ExprError();
if (Result.get())
- return move(Result);
+ return Result;
return getSema().BuildMemberReferenceExpr(Base.get(), Base.get()->getType(),
/*FIXME:*/IsaLoc, IsArrow,
@@ -2424,21 +2420,17 @@ public:
// Build a reference to the __builtin_shufflevector builtin
FunctionDecl *Builtin = cast<FunctionDecl>(*Lookup.first);
- ExprResult Callee
- = SemaRef.Owned(new (SemaRef.Context) DeclRefExpr(Builtin, false,
- Builtin->getType(),
- VK_LValue, BuiltinLoc));
- Callee = SemaRef.UsualUnaryConversions(Callee.take());
- if (Callee.isInvalid())
- return ExprError();
+ Expr *Callee = new (SemaRef.Context) DeclRefExpr(Builtin, false,
+ SemaRef.Context.BuiltinFnTy,
+ VK_RValue, BuiltinLoc);
+ QualType CalleePtrTy = SemaRef.Context.getPointerType(Builtin->getType());
+ Callee = SemaRef.ImpCastExprToType(Callee, CalleePtrTy,
+ CK_BuiltinFnToFnPtr).take();
// Build the CallExpr
- unsigned NumSubExprs = SubExprs.size();
- Expr **Subs = (Expr **)SubExprs.release();
ExprResult TheCall = SemaRef.Owned(
- new (SemaRef.Context) CallExpr(SemaRef.Context, Callee.take(),
- Subs, NumSubExprs,
- Builtin->getCallResultType(),
+ new (SemaRef.Context) CallExpr(SemaRef.Context, Callee, SubExprs,
+ Builtin->getCallResultType(),
Expr::getValueKindForType(Builtin->getResultType()),
RParenLoc));
@@ -2478,6 +2470,7 @@ public:
case TemplateArgument::Declaration:
case TemplateArgument::Pack:
case TemplateArgument::TemplateExpansion:
+ case TemplateArgument::NullPtr:
llvm_unreachable("Pack expansion pattern has no parameter packs");
case TemplateArgument::Type:
@@ -2515,10 +2508,7 @@ public:
// Just create the expression; there is not any interesting semantic
// analysis here because we can't actually build an AtomicExpr until
// we are sure it is semantically sound.
- unsigned NumSubExprs = SubExprs.size();
- Expr **Subs = (Expr **)SubExprs.release();
- return new (SemaRef.Context) AtomicExpr(BuiltinLoc, Subs,
- NumSubExprs, RetTy, Op,
+ return new (SemaRef.Context) AtomicExpr(BuiltinLoc, SubExprs, RetTy, Op,
RParenLoc);
}
@@ -2963,6 +2953,7 @@ void TreeTransform<Derived>::InventTemplateArgumentLoc(
case TemplateArgument::Declaration:
case TemplateArgument::Integral:
case TemplateArgument::Pack:
+ case TemplateArgument::NullPtr:
Output = TemplateArgumentLoc(Arg, TemplateArgumentLocInfo());
break;
}
@@ -2976,8 +2967,10 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
switch (Arg.getKind()) {
case TemplateArgument::Null:
case TemplateArgument::Integral:
- Output = Input;
- return false;
+ case TemplateArgument::Pack:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::NullPtr:
+ llvm_unreachable("Unexpected TemplateArgument");
case TemplateArgument::Type: {
TypeSourceInfo *DI = Input.getTypeSourceInfo();
@@ -2991,28 +2984,6 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
return false;
}
- case TemplateArgument::Declaration: {
- // FIXME: we should never have to transform one of these.
- DeclarationName Name;
- if (NamedDecl *ND = dyn_cast<NamedDecl>(Arg.getAsDecl()))
- Name = ND->getDeclName();
- TemporaryBase Rebase(*this, Input.getLocation(), Name);
- Decl *D = getDerived().TransformDecl(Input.getLocation(), Arg.getAsDecl());
- if (!D) return true;
-
- Expr *SourceExpr = Input.getSourceDeclExpression();
- if (SourceExpr) {
- EnterExpressionEvaluationContext Unevaluated(getSema(),
- Sema::ConstantEvaluated);
- ExprResult E = getDerived().TransformExpr(SourceExpr);
- E = SemaRef.ActOnConstantExpression(E);
- SourceExpr = (E.isInvalid() ? 0 : E.take());
- }
-
- Output = TemplateArgumentLoc(TemplateArgument(D), SourceExpr);
- return false;
- }
-
case TemplateArgument::Template: {
NestedNameSpecifierLoc QualifierLoc = Input.getTemplateQualifierLoc();
if (QualifierLoc) {
@@ -3051,35 +3022,6 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
Output = TemplateArgumentLoc(TemplateArgument(E.take()), E.take());
return false;
}
-
- case TemplateArgument::Pack: {
- SmallVector<TemplateArgument, 4> TransformedArgs;
- TransformedArgs.reserve(Arg.pack_size());
- for (TemplateArgument::pack_iterator A = Arg.pack_begin(),
- AEnd = Arg.pack_end();
- A != AEnd; ++A) {
-
- // FIXME: preserve source information here when we start
- // caring about parameter packs.
-
- TemplateArgumentLoc InputArg;
- TemplateArgumentLoc OutputArg;
- getDerived().InventTemplateArgumentLoc(*A, InputArg);
- if (getDerived().TransformTemplateArgument(InputArg, OutputArg))
- return true;
-
- TransformedArgs.push_back(OutputArg.getArgument());
- }
-
- TemplateArgument *TransformedArgsPtr
- = new (getSema().Context) TemplateArgument[TransformedArgs.size()];
- std::copy(TransformedArgs.begin(), TransformedArgs.end(),
- TransformedArgsPtr);
- Output = TemplateArgumentLoc(TemplateArgument(TransformedArgsPtr,
- TransformedArgs.size()),
- Input.getLocInfo());
- return false;
- }
}
// Work around bogus GCC warning
@@ -4260,6 +4202,8 @@ TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc NewTL = TLB.push<FunctionProtoTypeLoc>(Result);
NewTL.setLocalRangeBegin(TL.getLocalRangeBegin());
+ NewTL.setLParenLoc(TL.getLParenLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
NewTL.setLocalRangeEnd(TL.getLocalRangeEnd());
for (unsigned i = 0, e = NewTL.getNumArgs(); i != e; ++i)
NewTL.setArg(i, ParamDecls[i]);
@@ -4283,6 +4227,8 @@ QualType TreeTransform<Derived>::TransformFunctionNoProtoType(
FunctionNoProtoTypeLoc NewTL = TLB.push<FunctionNoProtoTypeLoc>(Result);
NewTL.setLocalRangeBegin(TL.getLocalRangeBegin());
+ NewTL.setLParenLoc(TL.getLParenLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
NewTL.setLocalRangeEnd(TL.getLocalRangeEnd());
return Result;
@@ -4339,7 +4285,8 @@ template<typename Derived>
QualType TreeTransform<Derived>::TransformTypeOfExprType(TypeLocBuilder &TLB,
TypeOfExprTypeLoc TL) {
// typeof expressions are not potentially evaluated contexts
- EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
ExprResult E = getDerived().TransformExpr(TL.getUnderlyingExpr());
if (E.isInvalid())
@@ -5099,7 +5046,7 @@ TreeTransform<Derived>::TransformCompoundStmt(CompoundStmt *S,
bool SubStmtInvalid = false;
bool SubStmtChanged = false;
- ASTOwningVector<Stmt*> Statements(getSema());
+ SmallVector<Stmt*, 8> Statements;
for (CompoundStmt::body_iterator B = S->body_begin(), BEnd = S->body_end();
B != BEnd; ++B) {
StmtResult Result = getDerived().TransformStmt(*B);
@@ -5126,7 +5073,7 @@ TreeTransform<Derived>::TransformCompoundStmt(CompoundStmt *S,
return SemaRef.Owned(S);
return getDerived().RebuildCompoundStmt(S->getLBracLoc(),
- move_arg(Statements),
+ Statements,
S->getRBracLoc(),
IsStmtExpr);
}
@@ -5533,14 +5480,14 @@ TreeTransform<Derived>::TransformDeclStmt(DeclStmt *S) {
template<typename Derived>
StmtResult
-TreeTransform<Derived>::TransformAsmStmt(AsmStmt *S) {
+TreeTransform<Derived>::TransformGCCAsmStmt(GCCAsmStmt *S) {
- ASTOwningVector<Expr*> Constraints(getSema());
- ASTOwningVector<Expr*> Exprs(getSema());
+ SmallVector<Expr*, 8> Constraints;
+ SmallVector<Expr*, 8> Exprs;
SmallVector<IdentifierInfo *, 4> Names;
ExprResult AsmString;
- ASTOwningVector<Expr*> Clobbers(getSema());
+ SmallVector<Expr*, 8> Clobbers;
bool ExprsChanged = false;
@@ -5585,23 +5532,15 @@ TreeTransform<Derived>::TransformAsmStmt(AsmStmt *S) {
// Go through the clobbers.
for (unsigned I = 0, E = S->getNumClobbers(); I != E; ++I)
- Clobbers.push_back(S->getClobber(I));
+ Clobbers.push_back(S->getClobberStringLiteral(I));
// No need to transform the asm string literal.
AsmString = SemaRef.Owned(S->getAsmString());
-
- return getDerived().RebuildAsmStmt(S->getAsmLoc(),
- S->isSimple(),
- S->isVolatile(),
- S->getNumOutputs(),
- S->getNumInputs(),
- Names.data(),
- move_arg(Constraints),
- move_arg(Exprs),
- AsmString.get(),
- move_arg(Clobbers),
- S->getRParenLoc(),
- S->isMSAsm());
+ return getDerived().RebuildGCCAsmStmt(S->getAsmLoc(), S->isSimple(),
+ S->isVolatile(), S->getNumOutputs(),
+ S->getNumInputs(), Names.data(),
+ Constraints, Exprs, AsmString.get(),
+ Clobbers, S->getRParenLoc());
}
template<typename Derived>
@@ -5624,7 +5563,7 @@ TreeTransform<Derived>::TransformObjCAtTryStmt(ObjCAtTryStmt *S) {
// Transform the @catch statements (if present).
bool AnyCatchChanged = false;
- ASTOwningVector<Stmt*> CatchStmts(SemaRef);
+ SmallVector<Stmt*, 8> CatchStmts;
for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) {
StmtResult Catch = getDerived().TransformStmt(S->getCatchStmt(I));
if (Catch.isInvalid())
@@ -5651,7 +5590,7 @@ TreeTransform<Derived>::TransformObjCAtTryStmt(ObjCAtTryStmt *S) {
// Build a new statement.
return getDerived().RebuildObjCAtTryStmt(S->getAtTryLoc(), TryBody.get(),
- move_arg(CatchStmts), Finally.get());
+ CatchStmts, Finally.get());
}
template<typename Derived>
@@ -5855,7 +5794,7 @@ TreeTransform<Derived>::TransformCXXTryStmt(CXXTryStmt *S) {
// Transform the handlers.
bool HandlerChanged = false;
- ASTOwningVector<Stmt*> Handlers(SemaRef);
+ SmallVector<Stmt*, 8> Handlers;
for (unsigned I = 0, N = S->getNumHandlers(); I != N; ++I) {
StmtResult Handler
= getDerived().TransformCXXCatchStmt(S->getHandler(I));
@@ -5872,7 +5811,7 @@ TreeTransform<Derived>::TransformCXXTryStmt(CXXTryStmt *S) {
return SemaRef.Owned(S);
return getDerived().RebuildCXXTryStmt(S->getTryLoc(), TryBlock.get(),
- move_arg(Handlers));
+ Handlers);
}
template<typename Derived>
@@ -6205,10 +6144,22 @@ TreeTransform<Derived>::TransformParenExpr(ParenExpr *E) {
E->getRParen());
}
+/// \brief The operand of a unary address-of operator has special rules: it's
+/// allowed to refer to a non-static member of a class even if there's no 'this'
+/// object available.
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformAddressOfOperand(Expr *E) {
+ if (DependentScopeDeclRefExpr *DRE = dyn_cast<DependentScopeDeclRefExpr>(E))
+ return getDerived().TransformDependentScopeDeclRefExpr(DRE, true);
+ else
+ return getDerived().TransformExpr(E);
+}
+
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformUnaryOperator(UnaryOperator *E) {
- ExprResult SubExpr = getDerived().TransformExpr(E->getSubExpr());
+ ExprResult SubExpr = TransformAddressOfOperand(E->getSubExpr());
if (SubExpr.isInvalid())
return ExprError();
@@ -6338,7 +6289,8 @@ TreeTransform<Derived>::TransformUnaryExprOrTypeTraitExpr(
// C++0x [expr.sizeof]p1:
// The operand is either an expression, which is an unevaluated operand
// [...]
- EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
ExprResult SubExpr = getDerived().TransformExpr(E->getArgumentExpr());
if (SubExpr.isInvalid())
@@ -6386,7 +6338,7 @@ TreeTransform<Derived>::TransformCallExpr(CallExpr *E) {
// Transform arguments.
bool ArgChanged = false;
- ASTOwningVector<Expr*> Args(SemaRef);
+ SmallVector<Expr*, 8> Args;
if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
&ArgChanged))
return ExprError();
@@ -6394,13 +6346,13 @@ TreeTransform<Derived>::TransformCallExpr(CallExpr *E) {
if (!getDerived().AlwaysRebuild() &&
Callee.get() == E->getCallee() &&
!ArgChanged)
- return SemaRef.MaybeBindToTemporary(E);;
+ return SemaRef.MaybeBindToTemporary(E);
// FIXME: Wrong source location information for the '('.
SourceLocation FakeLParenLoc
= ((Expr *)Callee.get())->getSourceRange().getBegin();
return getDerived().RebuildCallExpr(Callee.get(), FakeLParenLoc,
- move_arg(Args),
+ Args,
E->getRParenLoc());
}
@@ -6499,6 +6451,9 @@ TreeTransform<Derived>::TransformBinaryOperator(BinaryOperator *E) {
RHS.get() == E->getRHS())
return SemaRef.Owned(E);
+ Sema::FPContractStateRAII FPContractState(getSema());
+ getSema().FPFeatures.fp_contract = E->isFPContractable();
+
return getDerived().RebuildBinaryOperator(E->getOperatorLoc(), E->getOpcode(),
LHS.get(), RHS.get());
}
@@ -6645,7 +6600,7 @@ ExprResult
TreeTransform<Derived>::TransformInitListExpr(InitListExpr *E) {
bool InitChanged = false;
- ASTOwningVector<Expr*, 4> Inits(SemaRef);
+ SmallVector<Expr*, 4> Inits;
if (getDerived().TransformExprs(E->getInits(), E->getNumInits(), false,
Inits, &InitChanged))
return ExprError();
@@ -6653,7 +6608,7 @@ TreeTransform<Derived>::TransformInitListExpr(InitListExpr *E) {
if (!getDerived().AlwaysRebuild() && !InitChanged)
return SemaRef.Owned(E);
- return getDerived().RebuildInitList(E->getLBraceLoc(), move_arg(Inits),
+ return getDerived().RebuildInitList(E->getLBraceLoc(), Inits,
E->getRBraceLoc(), E->getType());
}
@@ -6668,7 +6623,7 @@ TreeTransform<Derived>::TransformDesignatedInitExpr(DesignatedInitExpr *E) {
return ExprError();
// transform the designators.
- ASTOwningVector<Expr*, 4> ArrayExprs(SemaRef);
+ SmallVector<Expr*, 4> ArrayExprs;
bool ExprChanged = false;
for (DesignatedInitExpr::designators_iterator D = E->designators_begin(),
DEnd = E->designators_end();
@@ -6720,7 +6675,7 @@ TreeTransform<Derived>::TransformDesignatedInitExpr(DesignatedInitExpr *E) {
!ExprChanged)
return SemaRef.Owned(E);
- return getDerived().RebuildDesignatedInitExpr(Desig, move_arg(ArrayExprs),
+ return getDerived().RebuildDesignatedInitExpr(Desig, ArrayExprs,
E->getEqualOrColonLoc(),
E->usesGNUSyntax(), Init.get());
}
@@ -6768,13 +6723,13 @@ template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformParenListExpr(ParenListExpr *E) {
bool ArgumentChanged = false;
- ASTOwningVector<Expr*, 4> Inits(SemaRef);
+ SmallVector<Expr*, 4> Inits;
if (TransformExprs(E->getExprs(), E->getNumExprs(), true, Inits,
&ArgumentChanged))
return ExprError();
return getDerived().RebuildParenListExpr(E->getLParenLoc(),
- move_arg(Inits),
+ Inits,
E->getRParenLoc());
}
@@ -6875,13 +6830,13 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
static_cast<Expr *>(Object.get())->getLocEnd());
// Transform the call arguments.
- ASTOwningVector<Expr*> Args(SemaRef);
+ SmallVector<Expr*, 8> Args;
if (getDerived().TransformExprs(E->getArgs() + 1, E->getNumArgs() - 1, true,
Args))
return ExprError();
return getDerived().RebuildCallExpr(Object.get(), FakeLParenLoc,
- move_arg(Args),
+ Args,
E->getLocEnd());
}
@@ -6905,7 +6860,11 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
if (Callee.isInvalid())
return ExprError();
- ExprResult First = getDerived().TransformExpr(E->getArg(0));
+ ExprResult First;
+ if (E->getOperator() == OO_Amp)
+ First = getDerived().TransformAddressOfOperand(E->getArg(0));
+ else
+ First = getDerived().TransformExpr(E->getArg(0));
if (First.isInvalid())
return ExprError();
@@ -6922,6 +6881,9 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
(E->getNumArgs() != 2 || Second.get() == E->getArg(1)))
return SemaRef.MaybeBindToTemporary(E);
+ Sema::FPContractStateRAII FPContractState(getSema());
+ getSema().FPFeatures.fp_contract = E->isFPContractable();
+
return getDerived().RebuildCXXOperatorCallExpr(E->getOperator(),
E->getOperatorLoc(),
Callee.get(),
@@ -6950,7 +6912,7 @@ TreeTransform<Derived>::TransformCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
// Transform arguments.
bool ArgChanged = false;
- ASTOwningVector<Expr*> Args(SemaRef);
+ SmallVector<Expr*, 8> Args;
if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
&ArgChanged))
return ExprError();
@@ -6964,7 +6926,7 @@ TreeTransform<Derived>::TransformCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
SourceLocation FakeLParenLoc
= ((Expr *)Callee.get())->getSourceRange().getBegin();
return getDerived().RebuildCallExpr(Callee.get(), FakeLParenLoc,
- move_arg(Args),
+ Args,
E->getRParenLoc(), EC.get());
}
@@ -6989,9 +6951,6 @@ TreeTransform<Derived>::TransformCXXNamedCastExpr(CXXNamedCastExpr *E) {
SourceLocation FakeLAngleLoc
= SemaRef.PP.getLocForEndOfToken(E->getOperatorLoc());
SourceLocation FakeRAngleLoc = E->getSubExpr()->getSourceRange().getBegin();
- SourceLocation FakeRParenLoc
- = SemaRef.PP.getLocForEndOfToken(
- E->getSubExpr()->getSourceRange().getEnd());
return getDerived().RebuildCXXNamedCastExpr(E->getOperatorLoc(),
E->getStmtClass(),
FakeLAngleLoc,
@@ -6999,7 +6958,7 @@ TreeTransform<Derived>::TransformCXXNamedCastExpr(CXXNamedCastExpr *E) {
FakeRAngleLoc,
FakeRAngleLoc,
SubExpr.get(),
- FakeRParenLoc);
+ E->getRParenLoc());
}
template<typename Derived>
@@ -7074,7 +7033,8 @@ TreeTransform<Derived>::TransformCXXTypeidExpr(CXXTypeidExpr *E) {
// after we perform semantic analysis. We speculatively assume it is
// unevaluated; it will get fixed later if the subexpression is in fact
// potentially evaluated.
- EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
ExprResult SubExpr = getDerived().TransformExpr(E->getExprOperand());
if (SubExpr.isInvalid())
@@ -7222,7 +7182,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
// Transform the placement arguments (if any).
bool ArgumentChanged = false;
- ASTOwningVector<Expr*> PlacementArgs(SemaRef);
+ SmallVector<Expr*, 8> PlacementArgs;
if (getDerived().TransformExprs(E->getPlacementArgs(),
E->getNumPlacementArgs(), true,
PlacementArgs, &ArgumentChanged))
@@ -7313,7 +7273,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
return getDerived().RebuildCXXNewExpr(E->getLocStart(),
E->isGlobalNew(),
/*FIXME:*/E->getLocStart(),
- move_arg(PlacementArgs),
+ PlacementArgs,
/*FIXME:*/E->getLocStart(),
E->getTypeIdParens(),
AllocType,
@@ -7512,7 +7472,8 @@ TreeTransform<Derived>::TransformUnresolvedLookupExpr(
// If we have template arguments, rebuild them, then rebuild the
// templateid expression.
TemplateArgumentListInfo TransArgs(Old->getLAngleLoc(), Old->getRAngleLoc());
- if (getDerived().TransformTemplateArguments(Old->getTemplateArgs(),
+ if (Old->hasExplicitTemplateArgs() &&
+ getDerived().TransformTemplateArguments(Old->getTemplateArgs(),
Old->getNumTemplateArgs(),
TransArgs))
return ExprError();
@@ -7732,6 +7693,14 @@ template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
DependentScopeDeclRefExpr *E) {
+ return TransformDependentScopeDeclRefExpr(E, /*IsAddressOfOperand*/false);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
+ DependentScopeDeclRefExpr *E,
+ bool IsAddressOfOperand) {
NestedNameSpecifierLoc QualifierLoc
= getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc());
if (!QualifierLoc)
@@ -7758,7 +7727,8 @@ TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
return getDerived().RebuildDependentScopeDeclRefExpr(QualifierLoc,
TemplateKWLoc,
NameInfo,
- /*TemplateArgs*/ 0);
+ /*TemplateArgs*/ 0,
+ IsAddressOfOperand);
}
TemplateArgumentListInfo TransArgs(E->getLAngleLoc(), E->getRAngleLoc());
@@ -7770,7 +7740,8 @@ TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
return getDerived().RebuildDependentScopeDeclRefExpr(QualifierLoc,
TemplateKWLoc,
NameInfo,
- &TransArgs);
+ &TransArgs,
+ IsAddressOfOperand);
}
template<typename Derived>
@@ -7796,7 +7767,7 @@ TreeTransform<Derived>::TransformCXXConstructExpr(CXXConstructExpr *E) {
return ExprError();
bool ArgumentChanged = false;
- ASTOwningVector<Expr*> Args(SemaRef);
+ SmallVector<Expr*, 8> Args;
if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
&ArgumentChanged))
return ExprError();
@@ -7813,7 +7784,7 @@ TreeTransform<Derived>::TransformCXXConstructExpr(CXXConstructExpr *E) {
return getDerived().RebuildCXXConstructExpr(T, /*FIXME:*/E->getLocStart(),
Constructor, E->isElidable(),
- move_arg(Args),
+ Args,
E->hadMultipleCandidates(),
E->requiresZeroInitialization(),
E->getConstructionKind(),
@@ -7857,7 +7828,7 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
return ExprError();
bool ArgumentChanged = false;
- ASTOwningVector<Expr*> Args(SemaRef);
+ SmallVector<Expr*, 8> Args;
Args.reserve(E->getNumArgs());
if (TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
&ArgumentChanged))
@@ -7874,19 +7845,13 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
return getDerived().RebuildCXXTemporaryObjectExpr(T,
/*FIXME:*/T->getTypeLoc().getEndLoc(),
- move_arg(Args),
+ Args,
E->getLocEnd());
}
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
- // Create the local class that will describe the lambda.
- CXXRecordDecl *Class
- = getSema().createLambdaClosureType(E->getIntroducerRange(),
- /*KnownDependent=*/false);
- getDerived().transformedLocalDecl(E->getLambdaClass(), Class);
-
// Transform the type of the lambda parameters and start the definition of
// the lambda itself.
TypeSourceInfo *MethodTy
@@ -7894,6 +7859,13 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
if (!MethodTy)
return ExprError();
+ // Create the local class that will describe the lambda.
+ CXXRecordDecl *Class
+ = getSema().createLambdaClosureType(E->getIntroducerRange(),
+ MethodTy,
+ /*KnownDependent=*/false);
+ getDerived().transformedLocalDecl(E->getLambdaClass(), Class);
+
// Transform lambda parameters.
llvm::SmallVector<QualType, 4> ParamTypes;
llvm::SmallVector<ParmVarDecl *, 4> Params;
@@ -8038,7 +8010,7 @@ TreeTransform<Derived>::TransformCXXUnresolvedConstructExpr(
return ExprError();
bool ArgumentChanged = false;
- ASTOwningVector<Expr*> Args(SemaRef);
+ SmallVector<Expr*, 8> Args;
Args.reserve(E->arg_size());
if (getDerived().TransformExprs(E->arg_begin(), E->arg_size(), true, Args,
&ArgumentChanged))
@@ -8052,7 +8024,7 @@ TreeTransform<Derived>::TransformCXXUnresolvedConstructExpr(
// FIXME: we're faking the locations of the commas
return getDerived().RebuildCXXUnresolvedConstructExpr(T,
E->getLParenLoc(),
- move_arg(Args),
+ Args,
E->getRParenLoc());
}
@@ -8346,6 +8318,13 @@ TreeTransform<Derived>::TransformSubstNonTypeTemplateParmExpr(
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformFunctionParmPackExpr(FunctionParmPackExpr *E) {
+ // Default behavior is to do nothing with this transformation.
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformMaterializeTemporaryExpr(
MaterializeTemporaryExpr *E) {
return getDerived().TransformExpr(E->GetTemporaryExpr());
@@ -8576,7 +8555,7 @@ ExprResult
TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
// Transform arguments.
bool ArgChanged = false;
- ASTOwningVector<Expr*> Args(SemaRef);
+ SmallVector<Expr*, 8> Args;
Args.reserve(E->getNumArgs());
if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), false, Args,
&ArgChanged))
@@ -8602,7 +8581,7 @@ TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
SelLocs,
E->getMethodDecl(),
E->getLeftLoc(),
- move_arg(Args),
+ Args,
E->getRightLoc());
}
@@ -8627,7 +8606,7 @@ TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
SelLocs,
E->getMethodDecl(),
E->getLeftLoc(),
- move_arg(Args),
+ Args,
E->getRightLoc());
}
@@ -8740,7 +8719,7 @@ template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformShuffleVectorExpr(ShuffleVectorExpr *E) {
bool ArgumentChanged = false;
- ASTOwningVector<Expr*> SubExprs(SemaRef);
+ SmallVector<Expr*, 8> SubExprs;
SubExprs.reserve(E->getNumSubExprs());
if (getDerived().TransformExprs(E->getSubExprs(), E->getNumSubExprs(), false,
SubExprs, &ArgumentChanged))
@@ -8751,7 +8730,7 @@ TreeTransform<Derived>::TransformShuffleVectorExpr(ShuffleVectorExpr *E) {
return SemaRef.Owned(E);
return getDerived().RebuildShuffleVectorExpr(E->getBuiltinLoc(),
- move_arg(SubExprs),
+ SubExprs,
E->getRParenLoc());
}
@@ -8854,7 +8833,7 @@ ExprResult
TreeTransform<Derived>::TransformAtomicExpr(AtomicExpr *E) {
QualType RetTy = getDerived().TransformType(E->getType());
bool ArgumentChanged = false;
- ASTOwningVector<Expr*> SubExprs(SemaRef);
+ SmallVector<Expr*, 8> SubExprs;
SubExprs.reserve(E->getNumSubExprs());
if (getDerived().TransformExprs(E->getSubExprs(), E->getNumSubExprs(), false,
SubExprs, &ArgumentChanged))
@@ -8864,7 +8843,7 @@ TreeTransform<Derived>::TransformAtomicExpr(AtomicExpr *E) {
!ArgumentChanged)
return SemaRef.Owned(E);
- return getDerived().RebuildAtomicExpr(E->getBuiltinLoc(), move_arg(SubExprs),
+ return getDerived().RebuildAtomicExpr(E->getBuiltinLoc(), SubExprs,
RetTy, E->getOp(), E->getRParenLoc());
}
@@ -9185,7 +9164,7 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
if (Result.isInvalid())
return ExprError();
- return move(Result);
+ return Result;
}
}
@@ -9200,7 +9179,12 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
// IsAcceptableNonMemberOperatorCandidate for each of these?
Functions.append(ULE->decls_begin(), ULE->decls_end());
} else {
- Functions.addDecl(cast<DeclRefExpr>(Callee)->getDecl());
+ // If we've resolved this to a particular non-member function, just call
+ // that function. If we resolved it to a member function,
+ // CreateOverloaded* will find that function for us.
+ NamedDecl *ND = cast<DeclRefExpr>(Callee)->getDecl();
+ if (!isa<CXXMethodDecl>(ND))
+ Functions.addDecl(ND);
}
// Add any functions found via argument-dependent lookup.
@@ -9240,7 +9224,7 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
if (Result.isInvalid())
return ExprError();
- return move(Result);
+ return Result;
}
template<typename Derived>
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp
index 67f74f7..0ec03cf 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp
@@ -60,6 +60,9 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
case BuiltinType::ObjCId: ID = PREDEF_TYPE_OBJC_ID; break;
case BuiltinType::ObjCClass: ID = PREDEF_TYPE_OBJC_CLASS; break;
case BuiltinType::ObjCSel: ID = PREDEF_TYPE_OBJC_SEL; break;
+ case BuiltinType::BuiltinFn:
+ ID = PREDEF_TYPE_BUILTIN_FN; break;
+
}
return TypeIdx(ID);
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
index 3adbc57..deba302 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
@@ -30,13 +30,16 @@
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Basic/OnDiskHashTable.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/SourceManagerInternals.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/FileSystemStatCache.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/Version.h"
#include "clang/Basic/VersionTuple.h"
#include "llvm/ADT/StringExtras.h"
@@ -62,353 +65,306 @@ using namespace clang::serialization::reader;
ASTReaderListener::~ASTReaderListener() {}
-bool
-PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) {
- const LangOptions &PPLangOpts = PP.getLangOpts();
-
-#define LANGOPT(Name, Bits, Default, Description) \
- if (PPLangOpts.Name != LangOpts.Name) { \
- Reader.Diag(diag::err_pch_langopt_mismatch) \
- << Description << LangOpts.Name << PPLangOpts.Name; \
+/// \brief Compare the given set of language options against an existing set of
+/// language options.
+///
+/// \param Diags If non-NULL, diagnostics will be emitted via this engine.
+///
+/// \returns true if the languagae options mis-match, false otherwise.
+static bool checkLanguageOptions(const LangOptions &LangOpts,
+ const LangOptions &ExistingLangOpts,
+ DiagnosticsEngine *Diags) {
+#define LANGOPT(Name, Bits, Default, Description) \
+ if (ExistingLangOpts.Name != LangOpts.Name) { \
+ if (Diags) \
+ Diags->Report(diag::err_pch_langopt_mismatch) \
+ << Description << LangOpts.Name << ExistingLangOpts.Name; \
+ return true; \
+ }
+
+#define VALUE_LANGOPT(Name, Bits, Default, Description) \
+ if (ExistingLangOpts.Name != LangOpts.Name) { \
+ if (Diags) \
+ Diags->Report(diag::err_pch_langopt_value_mismatch) \
+ << Description; \
return true; \
}
-#define VALUE_LANGOPT(Name, Bits, Default, Description) \
- if (PPLangOpts.Name != LangOpts.Name) { \
- Reader.Diag(diag::err_pch_langopt_value_mismatch) \
- << Description; \
- return true; \
-}
-
-#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
- if (PPLangOpts.get##Name() != LangOpts.get##Name()) { \
- Reader.Diag(diag::err_pch_langopt_value_mismatch) \
- << Description; \
- return true; \
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ if (ExistingLangOpts.get##Name() != LangOpts.get##Name()) { \
+ if (Diags) \
+ Diags->Report(diag::err_pch_langopt_value_mismatch) \
+ << Description; \
+ return true; \
}
#define BENIGN_LANGOPT(Name, Bits, Default, Description)
#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description)
#include "clang/Basic/LangOptions.def"
- if (PPLangOpts.ObjCRuntime != LangOpts.ObjCRuntime) {
- Reader.Diag(diag::err_pch_langopt_value_mismatch)
+ if (ExistingLangOpts.ObjCRuntime != LangOpts.ObjCRuntime) {
+ if (Diags)
+ Diags->Report(diag::err_pch_langopt_value_mismatch)
<< "target Objective-C runtime";
return true;
}
-
+
return false;
}
-bool PCHValidator::ReadTargetTriple(StringRef Triple) {
- if (Triple == PP.getTargetInfo().getTriple().str())
- return false;
-
- Reader.Diag(diag::warn_pch_target_triple)
- << Triple << PP.getTargetInfo().getTriple().str();
- return true;
-}
+/// \brief Compare the given set of target options against an existing set of
+/// target options.
+///
+/// \param Diags If non-NULL, diagnostics will be emitted via this engine.
+///
+/// \returns true if the target options mis-match, false otherwise.
+static bool checkTargetOptions(const TargetOptions &TargetOpts,
+ const TargetOptions &ExistingTargetOpts,
+ DiagnosticsEngine *Diags) {
+#define CHECK_TARGET_OPT(Field, Name) \
+ if (TargetOpts.Field != ExistingTargetOpts.Field) { \
+ if (Diags) \
+ Diags->Report(diag::err_pch_targetopt_mismatch) \
+ << Name << TargetOpts.Field << ExistingTargetOpts.Field; \
+ return true; \
+ }
+
+ CHECK_TARGET_OPT(Triple, "target");
+ CHECK_TARGET_OPT(CPU, "target CPU");
+ CHECK_TARGET_OPT(ABI, "target ABI");
+ CHECK_TARGET_OPT(CXXABI, "target C++ ABI");
+ CHECK_TARGET_OPT(LinkerVersion, "target linker version");
+#undef CHECK_TARGET_OPT
+
+ // Compare feature sets.
+ SmallVector<StringRef, 4> ExistingFeatures(
+ ExistingTargetOpts.FeaturesAsWritten.begin(),
+ ExistingTargetOpts.FeaturesAsWritten.end());
+ SmallVector<StringRef, 4> ReadFeatures(TargetOpts.FeaturesAsWritten.begin(),
+ TargetOpts.FeaturesAsWritten.end());
+ std::sort(ExistingFeatures.begin(), ExistingFeatures.end());
+ std::sort(ReadFeatures.begin(), ReadFeatures.end());
+
+ unsigned ExistingIdx = 0, ExistingN = ExistingFeatures.size();
+ unsigned ReadIdx = 0, ReadN = ReadFeatures.size();
+ while (ExistingIdx < ExistingN && ReadIdx < ReadN) {
+ if (ExistingFeatures[ExistingIdx] == ReadFeatures[ReadIdx]) {
+ ++ExistingIdx;
+ ++ReadIdx;
+ continue;
+ }
-namespace {
- struct EmptyStringRef {
- bool operator ()(StringRef r) const { return r.empty(); }
- };
- struct EmptyBlock {
- bool operator ()(const PCHPredefinesBlock &r) const {return r.Data.empty();}
- };
-}
+ if (ReadFeatures[ReadIdx] < ExistingFeatures[ExistingIdx]) {
+ if (Diags)
+ Diags->Report(diag::err_pch_targetopt_feature_mismatch)
+ << false << ReadFeatures[ReadIdx];
+ return true;
+ }
-static bool EqualConcatenations(SmallVector<StringRef, 2> L,
- PCHPredefinesBlocks R) {
- // First, sum up the lengths.
- unsigned LL = 0, RL = 0;
- for (unsigned I = 0, N = L.size(); I != N; ++I) {
- LL += L[I].size();
- }
- for (unsigned I = 0, N = R.size(); I != N; ++I) {
- RL += R[I].Data.size();
- }
- if (LL != RL)
- return false;
- if (LL == 0 && RL == 0)
+ if (Diags)
+ Diags->Report(diag::err_pch_targetopt_feature_mismatch)
+ << true << ExistingFeatures[ExistingIdx];
return true;
-
- // Kick out empty parts, they confuse the algorithm below.
- L.erase(std::remove_if(L.begin(), L.end(), EmptyStringRef()), L.end());
- R.erase(std::remove_if(R.begin(), R.end(), EmptyBlock()), R.end());
-
- // Do it the hard way. At this point, both vectors must be non-empty.
- StringRef LR = L[0], RR = R[0].Data;
- unsigned LI = 0, RI = 0, LN = L.size(), RN = R.size();
- (void) RN;
- for (;;) {
- // Compare the current pieces.
- if (LR.size() == RR.size()) {
- // If they're the same length, it's pretty easy.
- if (LR != RR)
- return false;
- // Both pieces are done, advance.
- ++LI;
- ++RI;
- // If either string is done, they're both done, since they're the same
- // length.
- if (LI == LN) {
- assert(RI == RN && "Strings not the same length after all?");
- return true;
- }
- LR = L[LI];
- RR = R[RI].Data;
- } else if (LR.size() < RR.size()) {
- // Right piece is longer.
- if (!RR.startswith(LR))
- return false;
- ++LI;
- assert(LI != LN && "Strings not the same length after all?");
- RR = RR.substr(LR.size());
- LR = L[LI];
- } else {
- // Left piece is longer.
- if (!LR.startswith(RR))
- return false;
- ++RI;
- assert(RI != RN && "Strings not the same length after all?");
- LR = LR.substr(RR.size());
- RR = R[RI].Data;
- }
}
-}
-static std::pair<FileID, StringRef::size_type>
-FindMacro(const PCHPredefinesBlocks &Buffers, StringRef MacroDef) {
- std::pair<FileID, StringRef::size_type> Res;
- for (unsigned I = 0, N = Buffers.size(); I != N; ++I) {
- Res.second = Buffers[I].Data.find(MacroDef);
- if (Res.second != StringRef::npos) {
- Res.first = Buffers[I].BufferID;
- break;
- }
+ if (ExistingIdx < ExistingN) {
+ if (Diags)
+ Diags->Report(diag::err_pch_targetopt_feature_mismatch)
+ << true << ExistingFeatures[ExistingIdx];
+ return true;
}
- return Res;
-}
-
-bool PCHValidator::ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
- StringRef OriginalFileName,
- std::string &SuggestedPredefines,
- FileManager &FileMgr) {
- // We are in the context of an implicit include, so the predefines buffer will
- // have a #include entry for the PCH file itself (as normalized by the
- // preprocessor initialization). Find it and skip over it in the checking
- // below.
- SmallString<256> PCHInclude;
- PCHInclude += "#include \"";
- PCHInclude += HeaderSearch::NormalizeDashIncludePath(OriginalFileName,
- FileMgr);
- PCHInclude += "\"\n";
- std::pair<StringRef,StringRef> Split =
- StringRef(PP.getPredefines()).split(PCHInclude.str());
- StringRef Left = Split.first, Right = Split.second;
- if (Left == PP.getPredefines()) {
- Error("Missing PCH include entry!");
+
+ if (ReadIdx < ReadN) {
+ if (Diags)
+ Diags->Report(diag::err_pch_targetopt_feature_mismatch)
+ << false << ReadFeatures[ReadIdx];
return true;
}
- // If the concatenation of all the PCH buffers is equal to the adjusted
- // command line, we're done.
- SmallVector<StringRef, 2> CommandLine;
- CommandLine.push_back(Left);
- CommandLine.push_back(Right);
- if (EqualConcatenations(CommandLine, Buffers))
- return false;
+ return false;
+}
- SourceManager &SourceMgr = PP.getSourceManager();
-
- // The predefines buffers are different. Determine what the differences are,
- // and whether they require us to reject the PCH file.
- SmallVector<StringRef, 8> PCHLines;
- for (unsigned I = 0, N = Buffers.size(); I != N; ++I)
- Buffers[I].Data.split(PCHLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
-
- SmallVector<StringRef, 8> CmdLineLines;
- Left.split(CmdLineLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
-
- // Pick out implicit #includes after the PCH and don't consider them for
- // validation; we will insert them into SuggestedPredefines so that the
- // preprocessor includes them.
- std::string IncludesAfterPCH;
- SmallVector<StringRef, 8> AfterPCHLines;
- Right.split(AfterPCHLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
- for (unsigned i = 0, e = AfterPCHLines.size(); i != e; ++i) {
- if (AfterPCHLines[i].startswith("#include ")) {
- IncludesAfterPCH += AfterPCHLines[i];
- IncludesAfterPCH += '\n';
- } else {
- CmdLineLines.push_back(AfterPCHLines[i]);
- }
- }
-
- // Make sure we add the includes last into SuggestedPredefines before we
- // exit this function.
- struct AddIncludesRAII {
- std::string &SuggestedPredefines;
- std::string &IncludesAfterPCH;
-
- AddIncludesRAII(std::string &SuggestedPredefines,
- std::string &IncludesAfterPCH)
- : SuggestedPredefines(SuggestedPredefines),
- IncludesAfterPCH(IncludesAfterPCH) { }
- ~AddIncludesRAII() {
- SuggestedPredefines += IncludesAfterPCH;
- }
- } AddIncludes(SuggestedPredefines, IncludesAfterPCH);
-
- // Sort both sets of predefined buffer lines, since we allow some extra
- // definitions and they may appear at any point in the output.
- std::sort(CmdLineLines.begin(), CmdLineLines.end());
- std::sort(PCHLines.begin(), PCHLines.end());
-
- // Determine which predefines that were used to build the PCH file are missing
- // from the command line.
- std::vector<StringRef> MissingPredefines;
- std::set_difference(PCHLines.begin(), PCHLines.end(),
- CmdLineLines.begin(), CmdLineLines.end(),
- std::back_inserter(MissingPredefines));
-
- bool MissingDefines = false;
- bool ConflictingDefines = false;
- for (unsigned I = 0, N = MissingPredefines.size(); I != N; ++I) {
- StringRef Missing = MissingPredefines[I];
- if (Missing.startswith("#include ")) {
- // An -include was specified when generating the PCH; it is included in
- // the PCH, just ignore it.
- continue;
- }
- if (!Missing.startswith("#define ")) {
- Reader.Diag(diag::warn_pch_compiler_options_mismatch);
- return true;
- }
+bool
+PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts,
+ bool Complain) {
+ const LangOptions &ExistingLangOpts = PP.getLangOpts();
+ return checkLanguageOptions(LangOpts, ExistingLangOpts,
+ Complain? &Reader.Diags : 0);
+}
- // This is a macro definition. Determine the name of the macro we're
- // defining.
- std::string::size_type StartOfMacroName = strlen("#define ");
- std::string::size_type EndOfMacroName
- = Missing.find_first_of("( \n\r", StartOfMacroName);
- assert(EndOfMacroName != std::string::npos &&
- "Couldn't find the end of the macro name");
- StringRef MacroName = Missing.slice(StartOfMacroName, EndOfMacroName);
-
- // Determine whether this macro was given a different definition on the
- // command line.
- std::string MacroDefStart = "#define " + MacroName.str();
- std::string::size_type MacroDefLen = MacroDefStart.size();
- SmallVector<StringRef, 8>::iterator ConflictPos
- = std::lower_bound(CmdLineLines.begin(), CmdLineLines.end(),
- MacroDefStart);
- for (; ConflictPos != CmdLineLines.end(); ++ConflictPos) {
- if (!ConflictPos->startswith(MacroDefStart)) {
- // Different macro; we're done.
- ConflictPos = CmdLineLines.end();
- break;
- }
+bool PCHValidator::ReadTargetOptions(const TargetOptions &TargetOpts,
+ bool Complain) {
+ const TargetOptions &ExistingTargetOpts = PP.getTargetInfo().getTargetOpts();
+ return checkTargetOptions(TargetOpts, ExistingTargetOpts,
+ Complain? &Reader.Diags : 0);
+}
- assert(ConflictPos->size() > MacroDefLen &&
- "Invalid #define in predefines buffer?");
- if ((*ConflictPos)[MacroDefLen] != ' ' &&
- (*ConflictPos)[MacroDefLen] != '(')
- continue; // Longer macro name; keep trying.
+namespace {
+ typedef llvm::StringMap<std::pair<StringRef, bool /*IsUndef*/> >
+ MacroDefinitionsMap;
+}
- // We found a conflicting macro definition.
- break;
- }
+/// \brief Collect the macro definitions provided by the given preprocessor
+/// options.
+static void collectMacroDefinitions(const PreprocessorOptions &PPOpts,
+ MacroDefinitionsMap &Macros,
+ SmallVectorImpl<StringRef> *MacroNames = 0){
+ for (unsigned I = 0, N = PPOpts.Macros.size(); I != N; ++I) {
+ StringRef Macro = PPOpts.Macros[I].first;
+ bool IsUndef = PPOpts.Macros[I].second;
- if (ConflictPos != CmdLineLines.end()) {
- Reader.Diag(diag::warn_cmdline_conflicting_macro_def)
- << MacroName;
+ std::pair<StringRef, StringRef> MacroPair = Macro.split('=');
+ StringRef MacroName = MacroPair.first;
+ StringRef MacroBody = MacroPair.second;
- // Show the definition of this macro within the PCH file.
- std::pair<FileID, StringRef::size_type> MacroLoc =
- FindMacro(Buffers, Missing);
- assert(MacroLoc.second!=StringRef::npos && "Unable to find macro!");
- SourceLocation PCHMissingLoc =
- SourceMgr.getLocForStartOfFile(MacroLoc.first)
- .getLocWithOffset(MacroLoc.second);
- Reader.Diag(PCHMissingLoc, diag::note_pch_macro_defined_as) << MacroName;
+ // For an #undef'd macro, we only care about the name.
+ if (IsUndef) {
+ if (MacroNames && !Macros.count(MacroName))
+ MacroNames->push_back(MacroName);
- ConflictingDefines = true;
+ Macros[MacroName] = std::make_pair("", true);
continue;
}
- // If the macro doesn't conflict, then we'll just pick up the macro
- // definition from the PCH file. Warn the user that they made a mistake.
- if (ConflictingDefines)
- continue; // Don't complain if there are already conflicting defs
-
- if (!MissingDefines) {
- Reader.Diag(diag::warn_cmdline_missing_macro_defs);
- MissingDefines = true;
+ // For a #define'd macro, figure out the actual definition.
+ if (MacroName.size() == Macro.size())
+ MacroBody = "1";
+ else {
+ // Note: GCC drops anything following an end-of-line character.
+ StringRef::size_type End = MacroBody.find_first_of("\n\r");
+ MacroBody = MacroBody.substr(0, End);
}
- // Show the definition of this macro within the PCH file.
- std::pair<FileID, StringRef::size_type> MacroLoc =
- FindMacro(Buffers, Missing);
- assert(MacroLoc.second!=StringRef::npos && "Unable to find macro!");
- SourceLocation PCHMissingLoc =
- SourceMgr.getLocForStartOfFile(MacroLoc.first)
- .getLocWithOffset(MacroLoc.second);
- Reader.Diag(PCHMissingLoc, diag::note_using_macro_def_from_pch);
+ if (MacroNames && !Macros.count(MacroName))
+ MacroNames->push_back(MacroName);
+ Macros[MacroName] = std::make_pair(MacroBody, false);
}
+}
+
+/// \brief Check the preprocessor options deserialized from the control block
+/// against the preprocessor options in an existing preprocessor.
+///
+/// \param Diags If non-null, produce diagnostics for any mismatches incurred.
+static bool checkPreprocessorOptions(const PreprocessorOptions &PPOpts,
+ const PreprocessorOptions &ExistingPPOpts,
+ DiagnosticsEngine *Diags,
+ FileManager &FileMgr,
+ std::string &SuggestedPredefines) {
+ // Check macro definitions.
+ MacroDefinitionsMap ASTFileMacros;
+ collectMacroDefinitions(PPOpts, ASTFileMacros);
+ MacroDefinitionsMap ExistingMacros;
+ SmallVector<StringRef, 4> ExistingMacroNames;
+ collectMacroDefinitions(ExistingPPOpts, ExistingMacros, &ExistingMacroNames);
+
+ for (unsigned I = 0, N = ExistingMacroNames.size(); I != N; ++I) {
+ // Dig out the macro definition in the existing preprocessor options.
+ StringRef MacroName = ExistingMacroNames[I];
+ std::pair<StringRef, bool> Existing = ExistingMacros[MacroName];
+
+ // Check whether we know anything about this macro name or not.
+ llvm::StringMap<std::pair<StringRef, bool /*IsUndef*/> >::iterator Known
+ = ASTFileMacros.find(MacroName);
+ if (Known == ASTFileMacros.end()) {
+ // FIXME: Check whether this identifier was referenced anywhere in the
+ // AST file. If so, we should reject the AST file. Unfortunately, this
+ // information isn't in the control block. What shall we do about it?
+
+ if (Existing.second) {
+ SuggestedPredefines += "#undef ";
+ SuggestedPredefines += MacroName.str();
+ SuggestedPredefines += '\n';
+ } else {
+ SuggestedPredefines += "#define ";
+ SuggestedPredefines += MacroName.str();
+ SuggestedPredefines += ' ';
+ SuggestedPredefines += Existing.first.str();
+ SuggestedPredefines += '\n';
+ }
+ continue;
+ }
- if (ConflictingDefines)
- return true;
-
- // Determine what predefines were introduced based on command-line
- // parameters that were not present when building the PCH
- // file. Extra #defines are okay, so long as the identifiers being
- // defined were not used within the precompiled header.
- std::vector<StringRef> ExtraPredefines;
- std::set_difference(CmdLineLines.begin(), CmdLineLines.end(),
- PCHLines.begin(), PCHLines.end(),
- std::back_inserter(ExtraPredefines));
- for (unsigned I = 0, N = ExtraPredefines.size(); I != N; ++I) {
- StringRef &Extra = ExtraPredefines[I];
- if (!Extra.startswith("#define ")) {
- Reader.Diag(diag::warn_pch_compiler_options_mismatch);
+ // If the macro was defined in one but undef'd in the other, we have a
+ // conflict.
+ if (Existing.second != Known->second.second) {
+ if (Diags) {
+ Diags->Report(diag::err_pch_macro_def_undef)
+ << MacroName << Known->second.second;
+ }
return true;
}
- // This is an extra macro definition. Determine the name of the
- // macro we're defining.
- std::string::size_type StartOfMacroName = strlen("#define ");
- std::string::size_type EndOfMacroName
- = Extra.find_first_of("( \n\r", StartOfMacroName);
- assert(EndOfMacroName != std::string::npos &&
- "Couldn't find the end of the macro name");
- StringRef MacroName = Extra.slice(StartOfMacroName, EndOfMacroName);
-
- // Check whether this name was used somewhere in the PCH file. If
- // so, defining it as a macro could change behavior, so we reject
- // the PCH file.
- if (IdentifierInfo *II = Reader.get(MacroName)) {
- Reader.Diag(diag::warn_macro_name_used_in_pch) << II;
- return true;
+ // If the macro was #undef'd in both, or if the macro bodies are identical,
+ // it's fine.
+ if (Existing.second || Existing.first == Known->second.first)
+ continue;
+
+ // The macro bodies differ; complain.
+ if (Diags) {
+ Diags->Report(diag::err_pch_macro_def_conflict)
+ << MacroName << Known->second.first << Existing.first;
}
+ return true;
+ }
- // Add this definition to the suggested predefines buffer.
- SuggestedPredefines += Extra;
- SuggestedPredefines += '\n';
+ // Check whether we're using predefines.
+ if (PPOpts.UsePredefines != ExistingPPOpts.UsePredefines) {
+ if (Diags) {
+ Diags->Report(diag::err_pch_undef) << ExistingPPOpts.UsePredefines;
+ }
+ return true;
+ }
+
+ // Compute the #include and #include_macros lines we need.
+ for (unsigned I = 0, N = ExistingPPOpts.Includes.size(); I != N; ++I) {
+ StringRef File = ExistingPPOpts.Includes[I];
+ if (File == ExistingPPOpts.ImplicitPCHInclude)
+ continue;
+
+ if (std::find(PPOpts.Includes.begin(), PPOpts.Includes.end(), File)
+ != PPOpts.Includes.end())
+ continue;
+
+ SuggestedPredefines += "#include \"";
+ SuggestedPredefines +=
+ HeaderSearch::NormalizeDashIncludePath(File, FileMgr);
+ SuggestedPredefines += "\"\n";
+ }
+
+ for (unsigned I = 0, N = ExistingPPOpts.MacroIncludes.size(); I != N; ++I) {
+ StringRef File = ExistingPPOpts.MacroIncludes[I];
+ if (std::find(PPOpts.MacroIncludes.begin(), PPOpts.MacroIncludes.end(),
+ File)
+ != PPOpts.MacroIncludes.end())
+ continue;
+
+ SuggestedPredefines += "#__include_macros \"";
+ SuggestedPredefines +=
+ HeaderSearch::NormalizeDashIncludePath(File, FileMgr);
+ SuggestedPredefines += "\"\n##\n";
}
- // If we get here, it's because the predefines buffer had compatible
- // contents. Accept the PCH file.
return false;
}
+bool PCHValidator::ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
+ bool Complain,
+ std::string &SuggestedPredefines) {
+ const PreprocessorOptions &ExistingPPOpts = PP.getPreprocessorOpts();
+
+ return checkPreprocessorOptions(PPOpts, ExistingPPOpts,
+ Complain? &Reader.Diags : 0,
+ PP.getFileManager(),
+ SuggestedPredefines);
+}
+
void PCHValidator::ReadHeaderFileInfo(const HeaderFileInfo &HFI,
unsigned ID) {
PP.getHeaderSearchInfo().setHeaderFileInfoForUID(HFI, ID);
++NumHeaderInfos;
}
-void PCHValidator::ReadCounter(unsigned Value) {
+void PCHValidator::ReadCounter(const ModuleFile &M, unsigned Value) {
PP.setCounterValue(Value);
}
@@ -527,6 +483,7 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
return II;
}
+ unsigned ObjCOrBuiltinID = ReadUnalignedLE16(d);
unsigned Bits = ReadUnalignedLE16(d);
bool CPlusPlusOperatorKeyword = Bits & 0x01;
Bits >>= 1;
@@ -536,13 +493,11 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
Bits >>= 1;
bool ExtensionToken = Bits & 0x01;
Bits >>= 1;
- bool hasMacroDefinition = Bits & 0x01;
+ bool hadMacroDefinition = Bits & 0x01;
Bits >>= 1;
- unsigned ObjCOrBuiltinID = Bits & 0x7FF;
- Bits >>= 11;
assert(Bits == 0 && "Extra bits in the identifier?");
- DataLen -= 6;
+ DataLen -= 8;
// Build the IdentifierInfo itself and link the identifier ID with
// the new IdentifierInfo.
@@ -570,31 +525,14 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
// If this identifier is a macro, deserialize the macro
// definition.
- if (hasMacroDefinition) {
- // FIXME: Check for conflicts?
- uint32_t Offset = ReadUnalignedLE32(d);
- unsigned LocalSubmoduleID = ReadUnalignedLE32(d);
-
- // Determine whether this macro definition should be visible now, or
- // whether it is in a hidden submodule.
- bool Visible = true;
- if (SubmoduleID GlobalSubmoduleID
- = Reader.getGlobalSubmoduleID(F, LocalSubmoduleID)) {
- if (Module *Owner = Reader.getSubmodule(GlobalSubmoduleID)) {
- if (Owner->NameVisibility == Module::Hidden) {
- // The owning module is not visible, and this macro definition should
- // not be, either.
- Visible = false;
-
- // Note that this macro definition was hidden because its owning
- // module is not yet visible.
- Reader.HiddenNamesMap[Owner].push_back(II);
- }
- }
+ if (hadMacroDefinition) {
+ SmallVector<MacroID, 4> MacroIDs;
+ while (uint32_t LocalID = ReadUnalignedLE32(d)) {
+ MacroIDs.push_back(Reader.getGlobalMacroID(F, LocalID));
+ DataLen -= 4;
}
-
- Reader.setIdentifierIsMacro(II, F, Offset, Visible);
- DataLen -= 8;
+ DataLen -= 4;
+ Reader.setIdentifierIsMacro(II, MacroIDs);
}
Reader.SetIdentifierInfo(ID, II);
@@ -780,16 +718,6 @@ void ASTReader::Error(unsigned DiagID,
Diag(DiagID) << Arg1 << Arg2;
}
-/// \brief Tell the AST listener about the predefines buffers in the chain.
-bool ASTReader::CheckPredefinesBuffers() {
- if (Listener)
- return Listener->ReadPredefinesBuffer(PCHPredefinesBuffers,
- ActualOriginalFileName,
- SuggestedPredefines,
- FileMgr);
- return false;
-}
-
//===----------------------------------------------------------------------===//
// Source Manager Deserialization
//===----------------------------------------------------------------------===//
@@ -808,7 +736,7 @@ bool ASTReader::ParseLineTable(ModuleFile &F,
unsigned FilenameLen = Record[Idx++];
std::string Filename(&Record[Idx], &Record[Idx] + FilenameLen);
Idx += FilenameLen;
- MaybeAddSystemRootToFilename(Filename);
+ MaybeAddSystemRootToFilename(F, Filename);
FileIDs[I] = LineTable.getLineTableFilenameID(Filename);
}
@@ -841,106 +769,8 @@ bool ASTReader::ParseLineTable(ModuleFile &F,
return false;
}
-namespace {
-
-class ASTStatData {
-public:
- const ino_t ino;
- const dev_t dev;
- const mode_t mode;
- const time_t mtime;
- const off_t size;
-
- ASTStatData(ino_t i, dev_t d, mode_t mo, time_t m, off_t s)
- : ino(i), dev(d), mode(mo), mtime(m), size(s) {}
-};
-
-class ASTStatLookupTrait {
- public:
- typedef const char *external_key_type;
- typedef const char *internal_key_type;
-
- typedef ASTStatData data_type;
-
- static unsigned ComputeHash(const char *path) {
- return llvm::HashString(path);
- }
-
- static internal_key_type GetInternalKey(const char *path) { return path; }
-
- static bool EqualKey(internal_key_type a, internal_key_type b) {
- return strcmp(a, b) == 0;
- }
-
- static std::pair<unsigned, unsigned>
- ReadKeyDataLength(const unsigned char*& d) {
- unsigned KeyLen = (unsigned) clang::io::ReadUnalignedLE16(d);
- unsigned DataLen = (unsigned) *d++;
- return std::make_pair(KeyLen + 1, DataLen);
- }
-
- static internal_key_type ReadKey(const unsigned char *d, unsigned) {
- return (const char *)d;
- }
-
- static data_type ReadData(const internal_key_type, const unsigned char *d,
- unsigned /*DataLen*/) {
- using namespace clang::io;
-
- ino_t ino = (ino_t) ReadUnalignedLE32(d);
- dev_t dev = (dev_t) ReadUnalignedLE32(d);
- mode_t mode = (mode_t) ReadUnalignedLE16(d);
- time_t mtime = (time_t) ReadUnalignedLE64(d);
- off_t size = (off_t) ReadUnalignedLE64(d);
- return data_type(ino, dev, mode, mtime, size);
- }
-};
-
-/// \brief stat() cache for precompiled headers.
-///
-/// This cache is very similar to the stat cache used by pretokenized
-/// headers.
-class ASTStatCache : public FileSystemStatCache {
- typedef OnDiskChainedHashTable<ASTStatLookupTrait> CacheTy;
- CacheTy *Cache;
-
- unsigned &NumStatHits, &NumStatMisses;
-public:
- ASTStatCache(const unsigned char *Buckets, const unsigned char *Base,
- unsigned &NumStatHits, unsigned &NumStatMisses)
- : Cache(0), NumStatHits(NumStatHits), NumStatMisses(NumStatMisses) {
- Cache = CacheTy::Create(Buckets, Base);
- }
-
- ~ASTStatCache() { delete Cache; }
-
- LookupResult getStat(const char *Path, struct stat &StatBuf,
- int *FileDescriptor) {
- // Do the lookup for the file's data in the AST file.
- CacheTy::iterator I = Cache->find(Path);
-
- // If we don't get a hit in the AST file just forward to 'stat'.
- if (I == Cache->end()) {
- ++NumStatMisses;
- return statChained(Path, StatBuf, FileDescriptor);
- }
-
- ++NumStatHits;
- ASTStatData Data = *I;
-
- StatBuf.st_ino = Data.ino;
- StatBuf.st_dev = Data.dev;
- StatBuf.st_mtime = Data.mtime;
- StatBuf.st_mode = Data.mode;
- StatBuf.st_size = Data.size;
- return CacheExists;
- }
-};
-} // end anonymous namespace
-
-
/// \brief Read a source manager block
-ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
+bool ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
using namespace SrcMgr;
llvm::BitstreamCursor &SLocEntryCursor = F.SLocEntryCursor;
@@ -954,13 +784,13 @@ ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
// The stream itself is going to skip over the source manager block.
if (F.Stream.SkipBlock()) {
Error("malformed block record in AST file");
- return Failure;
+ return true;
}
// Enter the source manager block.
if (SLocEntryCursor.EnterSubBlock(SOURCE_MANAGER_BLOCK_ID)) {
Error("malformed source manager block record in AST file");
- return Failure;
+ return true;
}
RecordData Record;
@@ -969,9 +799,9 @@ ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
if (Code == llvm::bitc::END_BLOCK) {
if (SLocEntryCursor.ReadBlockEnd()) {
Error("error at end of Source Manager block in AST file");
- return Failure;
+ return true;
}
- return Success;
+ return false;
}
if (Code == llvm::bitc::ENTER_SUBBLOCK) {
@@ -979,7 +809,7 @@ ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
SLocEntryCursor.ReadSubBlockID();
if (SLocEntryCursor.SkipBlock()) {
Error("malformed block record in AST file");
- return Failure;
+ return true;
}
continue;
}
@@ -1001,7 +831,7 @@ ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
case SM_SLOC_BUFFER_ENTRY:
case SM_SLOC_EXPANSION_ENTRY:
// Once we hit one of the source location entries, we're done.
- return Success;
+ return false;
}
}
}
@@ -1039,14 +869,13 @@ resolveFileRelativeToOriginalDir(const std::string &Filename,
return currPCHPath.str();
}
-/// \brief Read in the source location entry with the given ID.
-ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
+bool ASTReader::ReadSLocEntry(int ID) {
if (ID == 0)
- return Success;
+ return false;
if (unsigned(-ID) - 2 >= getTotalNumSLocs() || ID > 0) {
Error("source location entry ID out-of-range for AST file");
- return Failure;
+ return true;
}
ModuleFile *F = GlobalSLocEntryMap.find(-ID)->second;
@@ -1060,7 +889,7 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
Code == llvm::bitc::ENTER_SUBBLOCK ||
Code == llvm::bitc::DEFINE_ABBREV) {
Error("incorrectly-formatted source location entry in AST file");
- return Failure;
+ return true;
}
RecordData Record;
@@ -1069,58 +898,18 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
switch (SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen)) {
default:
Error("incorrectly-formatted source location entry in AST file");
- return Failure;
+ return true;
case SM_SLOC_FILE_ENTRY: {
- if (Record.size() < 7) {
- Error("source location entry is incorrect");
- return Failure;
- }
-
// We will detect whether a file changed and return 'Failure' for it, but
// we will also try to fail gracefully by setting up the SLocEntry.
- ASTReader::ASTReadResult Result = Success;
-
- bool OverriddenBuffer = Record[6];
-
- std::string OrigFilename(BlobStart, BlobStart + BlobLen);
- std::string Filename = OrigFilename;
- MaybeAddSystemRootToFilename(Filename);
- const FileEntry *File =
- OverriddenBuffer? FileMgr.getVirtualFile(Filename, (off_t)Record[4],
- (time_t)Record[5])
- : FileMgr.getFile(Filename, /*OpenFile=*/false);
- if (File == 0 && !OriginalDir.empty() && !CurrentDir.empty() &&
- OriginalDir != CurrentDir) {
- std::string resolved = resolveFileRelativeToOriginalDir(Filename,
- OriginalDir,
- CurrentDir);
- if (!resolved.empty())
- File = FileMgr.getFile(resolved);
- }
- if (File == 0)
- File = FileMgr.getVirtualFile(Filename, (off_t)Record[4],
- (time_t)Record[5]);
- if (File == 0) {
- std::string ErrorStr = "could not find file '";
- ErrorStr += Filename;
- ErrorStr += "' referenced by AST file";
- Error(ErrorStr.c_str());
- return Failure;
- }
+ unsigned InputID = Record[4];
+ InputFile IF = getInputFile(*F, InputID);
+ const FileEntry *File = IF.getPointer();
+ bool OverriddenBuffer = IF.getInt();
- if (!DisableValidation &&
- ((off_t)Record[4] != File->getSize()
-#if !defined(LLVM_ON_WIN32)
- // In our regression testing, the Windows file system seems to
- // have inconsistent modification times that sometimes
- // erroneously trigger this error-handling path.
- || (time_t)Record[5] != File->getModificationTime()
-#endif
- )) {
- Error(diag::err_fe_pch_file_modified, Filename);
- Result = Failure;
- }
+ if (!IF.getPointer())
+ return true;
SourceLocation IncludeLoc = ReadSourceLocation(*F, Record[1]);
if (IncludeLoc.isInvalid() && F->Kind != MK_MainFile) {
@@ -1133,12 +922,12 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
ID, BaseOffset + Record[0]);
SrcMgr::FileInfo &FileInfo =
const_cast<SrcMgr::FileInfo&>(SourceMgr.getSLocEntry(FID).getFile());
- FileInfo.NumCreatedFIDs = Record[7];
+ FileInfo.NumCreatedFIDs = Record[5];
if (Record[3])
FileInfo.setHasLineDirectives();
- const DeclID *FirstDecl = F->FileSortedDecls + Record[8];
- unsigned NumFileDecls = Record[9];
+ const DeclID *FirstDecl = F->FileSortedDecls + Record[6];
+ unsigned NumFileDecls = Record[7];
if (NumFileDecls) {
assert(F->FileSortedDecls && "FILE_SORTED_DECLS not encountered yet ?");
FileDeclIDs[FID] = FileDeclsInfo(F, llvm::makeArrayRef(FirstDecl,
@@ -1157,23 +946,24 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
if (RecCode != SM_SLOC_BUFFER_BLOB) {
Error("AST record has invalid code");
- return Failure;
+ return true;
}
llvm::MemoryBuffer *Buffer
= llvm::MemoryBuffer::getMemBuffer(StringRef(BlobStart, BlobLen - 1),
- Filename);
+ File->getName());
SourceMgr.overrideFileContents(File, Buffer);
}
- if (Result == Failure)
- return Failure;
break;
}
case SM_SLOC_BUFFER_ENTRY: {
const char *Name = BlobStart;
unsigned Offset = Record[0];
+ SrcMgr::CharacteristicKind
+ FileCharacter = (SrcMgr::CharacteristicKind)Record[2];
+ SourceLocation IncludeLoc = ReadSourceLocation(*F, Record[1]);
unsigned Code = SLocEntryCursor.ReadCode();
Record.clear();
unsigned RecCode
@@ -1181,23 +971,14 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
if (RecCode != SM_SLOC_BUFFER_BLOB) {
Error("AST record has invalid code");
- return Failure;
+ return true;
}
llvm::MemoryBuffer *Buffer
= llvm::MemoryBuffer::getMemBuffer(StringRef(BlobStart, BlobLen - 1),
Name);
- FileID BufferID = SourceMgr.createFileIDForMemBuffer(Buffer, ID,
- BaseOffset + Offset);
-
- if (strcmp(Name, "<built-in>") == 0 && F->Kind == MK_PCH) {
- PCHPredefinesBlock Block = {
- BufferID,
- StringRef(BlobStart, BlobLen - 1)
- };
- PCHPredefinesBuffers.push_back(Block);
- }
-
+ SourceMgr.createFileIDForMemBuffer(Buffer, FileCharacter, ID,
+ BaseOffset + Offset, IncludeLoc);
break;
}
@@ -1213,7 +994,7 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
}
}
- return Success;
+ return false;
}
/// \brief Find the location where the module F is imported.
@@ -1258,7 +1039,8 @@ bool ASTReader::ReadBlockAbbrevs(llvm::BitstreamCursor &Cursor,
}
}
-void ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
+void ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset,
+ MacroInfo *Hint) {
llvm::BitstreamCursor &Stream = F.MacroCursor;
// Keep track of where we are in the stream, then jump back there
@@ -1270,6 +1052,24 @@ void ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
SmallVector<IdentifierInfo*, 16> MacroArgs;
MacroInfo *Macro = 0;
+ // RAII object to add the loaded macro information once we're done
+ // adding tokens.
+ struct AddLoadedMacroInfoRAII {
+ Preprocessor &PP;
+ MacroInfo *Hint;
+ MacroInfo *MI;
+ IdentifierInfo *II;
+
+ AddLoadedMacroInfoRAII(Preprocessor &PP, MacroInfo *Hint)
+ : PP(PP), Hint(Hint), MI(), II() { }
+ ~AddLoadedMacroInfoRAII( ) {
+ if (MI) {
+ // Finally, install the macro.
+ PP.addLoadedMacroInfo(II, MI, Hint);
+ }
+ }
+ } AddLoadedMacroInfo(PP, Hint);
+
while (true) {
unsigned Code = Stream.ReadCode();
switch (Code) {
@@ -1312,18 +1112,31 @@ void ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
Error("macro must have a name in AST file");
return;
}
-
- SourceLocation Loc = ReadSourceLocation(F, Record[1]);
- bool isUsed = Record[2];
+ unsigned GlobalID = getGlobalMacroID(F, Record[1]);
+
+ // If this macro has already been loaded, don't do so again.
+ if (MacrosLoaded[GlobalID - NUM_PREDEF_MACRO_IDS])
+ return;
+
+ SubmoduleID GlobalSubmoduleID = getGlobalSubmoduleID(F, Record[2]);
+ unsigned NextIndex = 3;
+ SourceLocation Loc = ReadSourceLocation(F, Record, NextIndex);
MacroInfo *MI = PP.AllocateMacroInfo(Loc);
- MI->setIsUsed(isUsed);
+
+ // Record this macro.
+ MacrosLoaded[GlobalID - NUM_PREDEF_MACRO_IDS] = MI;
+
+ SourceLocation UndefLoc = ReadSourceLocation(F, Record, NextIndex);
+ if (UndefLoc.isValid())
+ MI->setUndefLoc(UndefLoc);
+
+ MI->setIsUsed(Record[NextIndex++]);
MI->setIsFromAST();
- bool IsPublic = Record[3];
- unsigned NextIndex = 4;
+ bool IsPublic = Record[NextIndex++];
MI->setVisibility(IsPublic, ReadSourceLocation(F, Record, NextIndex));
-
+
if (RecType == PP_MACRO_FUNCTION_LIKE) {
// Decode function-like macro info.
bool isC99VarArgs = Record[NextIndex++];
@@ -1341,8 +1154,60 @@ void ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
PP.getPreprocessorAllocator());
}
- // Finally, install the macro.
- PP.setMacroInfo(II, MI, /*LoadedFromAST=*/true);
+ if (DeserializationListener)
+ DeserializationListener->MacroRead(GlobalID, MI);
+
+ // If an update record marked this as undefined, do so now.
+ // FIXME: Only if the submodule this update came from is visible?
+ MacroUpdatesMap::iterator Update = MacroUpdates.find(GlobalID);
+ if (Update != MacroUpdates.end()) {
+ if (MI->getUndefLoc().isInvalid()) {
+ for (unsigned I = 0, N = Update->second.size(); I != N; ++I) {
+ bool Hidden = false;
+ if (unsigned SubmoduleID = Update->second[I].first) {
+ if (Module *Owner = getSubmodule(SubmoduleID)) {
+ if (Owner->NameVisibility == Module::Hidden) {
+ // Note that this #undef is hidden.
+ Hidden = true;
+
+ // Record this hiding for later.
+ HiddenNamesMap[Owner].push_back(
+ HiddenName(II, MI, Update->second[I].second.UndefLoc));
+ }
+ }
+ }
+
+ if (!Hidden) {
+ MI->setUndefLoc(Update->second[I].second.UndefLoc);
+ if (PPMutationListener *Listener = PP.getPPMutationListener())
+ Listener->UndefinedMacro(MI);
+ break;
+ }
+ }
+ }
+ MacroUpdates.erase(Update);
+ }
+
+ // Determine whether this macro definition is visible.
+ bool Hidden = !MI->isPublic();
+ if (!Hidden && GlobalSubmoduleID) {
+ if (Module *Owner = getSubmodule(GlobalSubmoduleID)) {
+ if (Owner->NameVisibility == Module::Hidden) {
+ // The owning module is not visible, and this macro definition
+ // should not be, either.
+ Hidden = true;
+
+ // Note that this macro definition was hidden because its owning
+ // module is not yet visible.
+ HiddenNamesMap[Owner].push_back(HiddenName(II, MI));
+ }
+ }
+ }
+ MI->setHidden(Hidden);
+
+ // Make sure we install the macro once we're done.
+ AddLoadedMacroInfo.MI = MI;
+ AddLoadedMacroInfo.II = II;
// Remember that we saw this macro last so that we add the tokens that
// form its body to it.
@@ -1451,18 +1316,16 @@ HeaderFileInfoTrait::ReadData(const internal_key_type, const unsigned char *d,
return HFI;
}
-void ASTReader::setIdentifierIsMacro(IdentifierInfo *II, ModuleFile &F,
- uint64_t LocalOffset, bool Visible) {
- if (Visible) {
- // Note that this identifier has a macro definition.
- II->setHasMacroDefinition(true);
- }
-
- // Adjust the offset to a global offset.
- UnreadMacroRecordOffsets[II] = F.GlobalBitOffset + LocalOffset;
+void ASTReader::setIdentifierIsMacro(IdentifierInfo *II, ArrayRef<MacroID> IDs){
+ II->setHadMacroDefinition(true);
+ assert(NumCurrentElementsDeserializing > 0 &&"Missing deserialization guard");
+ PendingMacroIDs[II].append(IDs.begin(), IDs.end());
}
void ASTReader::ReadDefinedMacros() {
+ // Note that we are loading defined macros.
+ Deserializing Macros(this);
+
for (ModuleReverseIterator I = ModuleMgr.rbegin(),
E = ModuleMgr.rend(); I != E; ++I) {
llvm::BitstreamCursor &MacroCursor = (*I)->MacroCursor;
@@ -1514,26 +1377,6 @@ void ASTReader::ReadDefinedMacros() {
}
}
}
-
- // Drain the unread macro-record offsets map.
- while (!UnreadMacroRecordOffsets.empty())
- LoadMacroDefinition(UnreadMacroRecordOffsets.begin());
-}
-
-void ASTReader::LoadMacroDefinition(
- llvm::DenseMap<IdentifierInfo *, uint64_t>::iterator Pos) {
- assert(Pos != UnreadMacroRecordOffsets.end() && "Unknown macro definition");
- uint64_t Offset = Pos->second;
- UnreadMacroRecordOffsets.erase(Pos);
-
- RecordLocation Loc = getLocalBitOffset(Offset);
- ReadMacroRecord(*Loc.F, Loc.Offset);
-}
-
-void ASTReader::LoadMacroDefinition(IdentifierInfo *II) {
- llvm::DenseMap<IdentifierInfo *, uint64_t>::iterator Pos
- = UnreadMacroRecordOffsets.find(II);
- LoadMacroDefinition(Pos);
}
namespace {
@@ -1582,6 +1425,9 @@ namespace {
}
void ASTReader::updateOutOfDateIdentifier(IdentifierInfo &II) {
+ // Note that we are loading an identifier.
+ Deserializing AnIdentifier(this);
+
unsigned PriorGeneration = 0;
if (getContext().getLangOpts().Modules)
PriorGeneration = IdentifierGeneration[&II];
@@ -1602,14 +1448,132 @@ void ASTReader::markIdentifierUpToDate(IdentifierInfo *II) {
IdentifierGeneration[II] = CurrentGeneration;
}
+llvm::PointerIntPair<const FileEntry *, 1, bool>
+ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
+ // If this ID is bogus, just return an empty input file.
+ if (ID == 0 || ID > F.InputFilesLoaded.size())
+ return InputFile();
+
+ // If we've already loaded this input file, return it.
+ if (F.InputFilesLoaded[ID-1].getPointer())
+ return F.InputFilesLoaded[ID-1];
+
+ // Go find this input file.
+ llvm::BitstreamCursor &Cursor = F.InputFilesCursor;
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(F.InputFileOffsets[ID-1]);
+
+ unsigned Code = Cursor.ReadCode();
+ RecordData Record;
+ const char *BlobStart = 0;
+ unsigned BlobLen = 0;
+ switch ((InputFileRecordTypes)Cursor.ReadRecord(Code, Record,
+ &BlobStart, &BlobLen)) {
+ case INPUT_FILE: {
+ unsigned StoredID = Record[0];
+ assert(ID == StoredID && "Bogus stored ID or offset");
+ (void)StoredID;
+ off_t StoredSize = (off_t)Record[1];
+ time_t StoredTime = (time_t)Record[2];
+ bool Overridden = (bool)Record[3];
+
+ // Get the file entry for this input file.
+ StringRef OrigFilename(BlobStart, BlobLen);
+ std::string Filename = OrigFilename;
+ MaybeAddSystemRootToFilename(F, Filename);
+ const FileEntry *File
+ = Overridden? FileMgr.getVirtualFile(Filename, StoredSize, StoredTime)
+ : FileMgr.getFile(Filename, /*OpenFile=*/false);
+
+ // If we didn't find the file, resolve it relative to the
+ // original directory from which this AST file was created.
+ if (File == 0 && !F.OriginalDir.empty() && !CurrentDir.empty() &&
+ F.OriginalDir != CurrentDir) {
+ std::string Resolved = resolveFileRelativeToOriginalDir(Filename,
+ F.OriginalDir,
+ CurrentDir);
+ if (!Resolved.empty())
+ File = FileMgr.getFile(Resolved);
+ }
+
+ // For an overridden file, create a virtual file with the stored
+ // size/timestamp.
+ if (Overridden && File == 0) {
+ File = FileMgr.getVirtualFile(Filename, StoredSize, StoredTime);
+ }
+
+ if (File == 0) {
+ if (Complain) {
+ std::string ErrorStr = "could not find file '";
+ ErrorStr += Filename;
+ ErrorStr += "' referenced by AST file";
+ Error(ErrorStr.c_str());
+ }
+ return InputFile();
+ }
+
+ // Note that we've loaded this input file.
+ F.InputFilesLoaded[ID-1] = InputFile(File, Overridden);
+
+ // Check if there was a request to override the contents of the file
+ // that was part of the precompiled header. Overridding such a file
+ // can lead to problems when lexing using the source locations from the
+ // PCH.
+ SourceManager &SM = getSourceManager();
+ if (!Overridden && SM.isFileOverridden(File)) {
+ Error(diag::err_fe_pch_file_overridden, Filename);
+ // After emitting the diagnostic, recover by disabling the override so
+ // that the original file will be used.
+ SM.disableFileContentsOverride(File);
+ // The FileEntry is a virtual file entry with the size of the contents
+ // that would override the original contents. Set it to the original's
+ // size/time.
+ FileMgr.modifyFileEntry(const_cast<FileEntry*>(File),
+ StoredSize, StoredTime);
+ }
+
+ // For an overridden file, there is nothing to validate.
+ if (Overridden)
+ return InputFile(File, Overridden);
+
+ // The stat info from the FileEntry came from the cached stat
+ // info of the PCH, so we cannot trust it.
+ struct stat StatBuf;
+ if (::stat(File->getName(), &StatBuf) != 0) {
+ StatBuf.st_size = File->getSize();
+ StatBuf.st_mtime = File->getModificationTime();
+ }
+
+ if ((StoredSize != StatBuf.st_size
+#if !defined(LLVM_ON_WIN32)
+ // In our regression testing, the Windows file system seems to
+ // have inconsistent modification times that sometimes
+ // erroneously trigger this error-handling path.
+ || StoredTime != StatBuf.st_mtime
+#endif
+ )) {
+ if (Complain)
+ Error(diag::err_fe_pch_file_modified, Filename);
+
+ return InputFile();
+ }
+
+ return InputFile(File, Overridden);
+ }
+ }
+
+ return InputFile();
+}
+
const FileEntry *ASTReader::getFileEntry(StringRef filenameStrRef) {
+ ModuleFile &M = ModuleMgr.getPrimaryModule();
std::string Filename = filenameStrRef;
- MaybeAddSystemRootToFilename(Filename);
+ MaybeAddSystemRootToFilename(M, Filename);
const FileEntry *File = FileMgr.getFile(Filename);
- if (File == 0 && !OriginalDir.empty() && !CurrentDir.empty() &&
- OriginalDir != CurrentDir) {
+ if (File == 0 && !M.OriginalDir.empty() && !CurrentDir.empty() &&
+ M.OriginalDir != CurrentDir) {
std::string resolved = resolveFileRelativeToOriginalDir(Filename,
- OriginalDir,
+ M.OriginalDir,
CurrentDir);
if (!resolved.empty())
File = FileMgr.getFile(resolved);
@@ -1621,9 +1585,10 @@ const FileEntry *ASTReader::getFileEntry(StringRef filenameStrRef) {
/// \brief If we are loading a relocatable PCH file, and the filename is
/// not an absolute path, add the system root to the beginning of the file
/// name.
-void ASTReader::MaybeAddSystemRootToFilename(std::string &Filename) {
+void ASTReader::MaybeAddSystemRootToFilename(ModuleFile &M,
+ std::string &Filename) {
// If this is not a relocatable PCH file, there's nothing to do.
- if (!RelocatablePCH)
+ if (!M.RelocatablePCH)
return;
if (Filename.empty() || llvm::sys::path::is_absolute(Filename))
@@ -1643,29 +1608,226 @@ void ASTReader::MaybeAddSystemRootToFilename(std::string &Filename) {
}
ASTReader::ASTReadResult
-ASTReader::ReadASTBlock(ModuleFile &F) {
+ASTReader::ReadControlBlock(ModuleFile &F,
+ llvm::SmallVectorImpl<ModuleFile *> &Loaded,
+ unsigned ClientLoadCapabilities) {
llvm::BitstreamCursor &Stream = F.Stream;
- if (Stream.EnterSubBlock(AST_BLOCK_ID)) {
+ if (Stream.EnterSubBlock(CONTROL_BLOCK_ID)) {
Error("malformed block record in AST file");
return Failure;
}
- // Read all of the records and blocks for the ASt file.
+ // Read all of the records and blocks in the control block.
RecordData Record;
while (!Stream.AtEndOfStream()) {
unsigned Code = Stream.ReadCode();
if (Code == llvm::bitc::END_BLOCK) {
if (Stream.ReadBlockEnd()) {
- Error("error at end of module block in AST file");
+ Error("error at end of control block in AST file");
return Failure;
}
+ // Validate all of the input files.
+ if (!DisableValidation) {
+ bool Complain = (ClientLoadCapabilities & ARR_OutOfDate) == 0;
+ for (unsigned I = 0, N = Record[0]; I < N; ++I)
+ if (!getInputFile(F, I+1, Complain).getPointer())
+ return OutOfDate;
+ }
+
return Success;
}
if (Code == llvm::bitc::ENTER_SUBBLOCK) {
switch (Stream.ReadSubBlockID()) {
+ case INPUT_FILES_BLOCK_ID:
+ F.InputFilesCursor = Stream;
+ if (Stream.SkipBlock() || // Skip with the main cursor
+ // Read the abbreviations
+ ReadBlockAbbrevs(F.InputFilesCursor, INPUT_FILES_BLOCK_ID)) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ continue;
+
+ default:
+ if (!Stream.SkipBlock())
+ continue;
+ break;
+ }
+
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+
+ if (Code == llvm::bitc::DEFINE_ABBREV) {
+ Stream.ReadAbbrevRecord();
+ continue;
+ }
+
+ // Read and process a record.
+ Record.clear();
+ const char *BlobStart = 0;
+ unsigned BlobLen = 0;
+ switch ((ControlRecordTypes)Stream.ReadRecord(Code, Record,
+ &BlobStart, &BlobLen)) {
+ case METADATA: {
+ if (Record[0] != VERSION_MAJOR && !DisableValidation) {
+ if ((ClientLoadCapabilities & ARR_VersionMismatch) == 0)
+ Diag(Record[0] < VERSION_MAJOR? diag::warn_pch_version_too_old
+ : diag::warn_pch_version_too_new);
+ return VersionMismatch;
+ }
+
+ bool hasErrors = Record[5];
+ if (hasErrors && !DisableValidation && !AllowASTWithCompilerErrors) {
+ Diag(diag::err_pch_with_compiler_errors);
+ return HadErrors;
+ }
+
+ F.RelocatablePCH = Record[4];
+
+ const std::string &CurBranch = getClangFullRepositoryVersion();
+ StringRef ASTBranch(BlobStart, BlobLen);
+ if (StringRef(CurBranch) != ASTBranch && !DisableValidation) {
+ if ((ClientLoadCapabilities & ARR_VersionMismatch) == 0)
+ Diag(diag::warn_pch_different_branch) << ASTBranch << CurBranch;
+ return VersionMismatch;
+ }
+ break;
+ }
+
+ case IMPORTS: {
+ // Load each of the imported PCH files.
+ unsigned Idx = 0, N = Record.size();
+ while (Idx < N) {
+ // Read information about the AST file.
+ ModuleKind ImportedKind = (ModuleKind)Record[Idx++];
+ unsigned Length = Record[Idx++];
+ SmallString<128> ImportedFile(Record.begin() + Idx,
+ Record.begin() + Idx + Length);
+ Idx += Length;
+
+ // Load the AST file.
+ switch(ReadASTCore(ImportedFile, ImportedKind, &F, Loaded,
+ ClientLoadCapabilities)) {
+ case Failure: return Failure;
+ // If we have to ignore the dependency, we'll have to ignore this too.
+ case OutOfDate: return OutOfDate;
+ case VersionMismatch: return VersionMismatch;
+ case ConfigurationMismatch: return ConfigurationMismatch;
+ case HadErrors: return HadErrors;
+ case Success: break;
+ }
+ }
+ break;
+ }
+
+ case LANGUAGE_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
+ if (Listener && &F == *ModuleMgr.begin() &&
+ ParseLanguageOptions(Record, Complain, *Listener) &&
+ !DisableValidation)
+ return ConfigurationMismatch;
+ break;
+ }
+
+ case TARGET_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
+ if (Listener && &F == *ModuleMgr.begin() &&
+ ParseTargetOptions(Record, Complain, *Listener) &&
+ !DisableValidation)
+ return ConfigurationMismatch;
+ break;
+ }
+
+ case DIAGNOSTIC_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
+ if (Listener && &F == *ModuleMgr.begin() &&
+ ParseDiagnosticOptions(Record, Complain, *Listener) &&
+ !DisableValidation)
+ return ConfigurationMismatch;
+ break;
+ }
+
+ case FILE_SYSTEM_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
+ if (Listener && &F == *ModuleMgr.begin() &&
+ ParseFileSystemOptions(Record, Complain, *Listener) &&
+ !DisableValidation)
+ return ConfigurationMismatch;
+ break;
+ }
+
+ case HEADER_SEARCH_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
+ if (Listener && &F == *ModuleMgr.begin() &&
+ ParseHeaderSearchOptions(Record, Complain, *Listener) &&
+ !DisableValidation)
+ return ConfigurationMismatch;
+ break;
+ }
+
+ case PREPROCESSOR_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
+ if (Listener && &F == *ModuleMgr.begin() &&
+ ParsePreprocessorOptions(Record, Complain, *Listener,
+ SuggestedPredefines) &&
+ !DisableValidation)
+ return ConfigurationMismatch;
+ break;
+ }
+
+ case ORIGINAL_FILE:
+ F.OriginalSourceFileID = FileID::get(Record[0]);
+ F.ActualOriginalSourceFileName.assign(BlobStart, BlobLen);
+ F.OriginalSourceFileName = F.ActualOriginalSourceFileName;
+ MaybeAddSystemRootToFilename(F, F.OriginalSourceFileName);
+ break;
+
+ case ORIGINAL_PCH_DIR:
+ F.OriginalDir.assign(BlobStart, BlobLen);
+ break;
+
+ case INPUT_FILE_OFFSETS:
+ F.InputFileOffsets = (const uint32_t *)BlobStart;
+ F.InputFilesLoaded.resize(Record[0]);
+ break;
+ }
+ }
+
+ Error("premature end of bitstream in AST file");
+ return Failure;
+}
+
+bool ASTReader::ReadASTBlock(ModuleFile &F) {
+ llvm::BitstreamCursor &Stream = F.Stream;
+
+ if (Stream.EnterSubBlock(AST_BLOCK_ID)) {
+ Error("malformed block record in AST file");
+ return true;
+ }
+
+ // Read all of the records and blocks for the AST file.
+ RecordData Record;
+ while (!Stream.AtEndOfStream()) {
+ unsigned Code = Stream.ReadCode();
+ if (Code == llvm::bitc::END_BLOCK) {
+ if (Stream.ReadBlockEnd()) {
+ Error("error at end of module block in AST file");
+ return true;
+ }
+
+ DeclContext *DC = Context.getTranslationUnitDecl();
+ if (!DC->hasExternalVisibleStorage() && DC->hasExternalLexicalStorage())
+ DC->setMustBuildLookupTable();
+
+ return false;
+ }
+
+ if (Code == llvm::bitc::ENTER_SUBBLOCK) {
+ switch (Stream.ReadSubBlockID()) {
case DECLTYPES_BLOCK_ID:
// We lazily load the decls block, but we want to set up the
// DeclsCursor cursor to point into it. Clone our current bitcode
@@ -1676,14 +1838,14 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
// Read the abbrevs.
ReadBlockAbbrevs(F.DeclsCursor, DECLTYPES_BLOCK_ID)) {
Error("malformed block record in AST file");
- return Failure;
+ return true;
}
break;
case DECL_UPDATES_BLOCK_ID:
if (Stream.SkipBlock()) {
Error("malformed block record in AST file");
- return Failure;
+ return true;
}
break;
@@ -1695,7 +1857,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
if (Stream.SkipBlock() ||
ReadBlockAbbrevs(F.MacroCursor, PREPROCESSOR_BLOCK_ID)) {
Error("malformed block record in AST file");
- return Failure;
+ return true;
}
F.MacroStartOffset = F.MacroCursor.GetCurrentBitNo();
break;
@@ -1706,7 +1868,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
ReadBlockAbbrevs(F.PreprocessorDetailCursor,
PREPROCESSOR_DETAIL_BLOCK_ID)) {
Error("malformed preprocessor detail record in AST file");
- return Failure;
+ return true;
}
F.PreprocessorDetailStartOffset
= F.PreprocessorDetailCursor.GetCurrentBitNo();
@@ -1718,31 +1880,13 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
break;
case SOURCE_MANAGER_BLOCK_ID:
- switch (ReadSourceManagerBlock(F)) {
- case Success:
- break;
-
- case Failure:
- Error("malformed source manager block in AST file");
- return Failure;
-
- case IgnorePCH:
- return IgnorePCH;
- }
+ if (ReadSourceManagerBlock(F))
+ return true;
break;
case SUBMODULE_BLOCK_ID:
- switch (ReadSubmoduleBlock(F)) {
- case Success:
- break;
-
- case Failure:
- Error("malformed submodule block in AST file");
- return Failure;
-
- case IgnorePCH:
- return IgnorePCH;
- }
+ if (ReadSubmoduleBlock(F))
+ return true;
break;
case COMMENTS_BLOCK_ID: {
@@ -1750,7 +1894,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
if (Stream.SkipBlock() ||
ReadBlockAbbrevs(C, COMMENTS_BLOCK_ID)) {
Error("malformed comments block in AST file");
- return Failure;
+ return true;
}
CommentsCursors.push_back(std::make_pair(C, &F));
break;
@@ -1760,7 +1904,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
if (!Stream.SkipBlock())
break;
Error("malformed block record in AST file");
- return Failure;
+ return true;
}
continue;
}
@@ -1779,54 +1923,10 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
default: // Default behavior: ignore.
break;
- case METADATA: {
- if (Record[0] != VERSION_MAJOR && !DisableValidation) {
- Diag(Record[0] < VERSION_MAJOR? diag::warn_pch_version_too_old
- : diag::warn_pch_version_too_new);
- return IgnorePCH;
- }
-
- bool hasErrors = Record[5];
- if (hasErrors && !DisableValidation && !AllowASTWithCompilerErrors) {
- Diag(diag::err_pch_with_compiler_errors);
- return IgnorePCH;
- }
-
- RelocatablePCH = Record[4];
- if (Listener) {
- std::string TargetTriple(BlobStart, BlobLen);
- if (Listener->ReadTargetTriple(TargetTriple))
- return IgnorePCH;
- }
- break;
- }
-
- case IMPORTS: {
- // Load each of the imported PCH files.
- unsigned Idx = 0, N = Record.size();
- while (Idx < N) {
- // Read information about the AST file.
- ModuleKind ImportedKind = (ModuleKind)Record[Idx++];
- unsigned Length = Record[Idx++];
- SmallString<128> ImportedFile(Record.begin() + Idx,
- Record.begin() + Idx + Length);
- Idx += Length;
-
- // Load the AST file.
- switch(ReadASTCore(ImportedFile, ImportedKind, &F)) {
- case Failure: return Failure;
- // If we have to ignore the dependency, we'll have to ignore this too.
- case IgnorePCH: return IgnorePCH;
- case Success: break;
- }
- }
- break;
- }
-
case TYPE_OFFSET: {
if (F.LocalNumTypes != 0) {
Error("duplicate TYPE_OFFSET record in AST file");
- return Failure;
+ return true;
}
F.TypeOffsets = (const uint32_t *)BlobStart;
F.LocalNumTypes = Record[0];
@@ -1850,7 +1950,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
case DECL_OFFSET: {
if (F.LocalNumDecls != 0) {
Error("duplicate DECL_OFFSET record in AST file");
- return Failure;
+ return true;
}
F.DeclOffsets = (const DeclOffset *)BlobStart;
F.LocalNumDecls = Record[0];
@@ -1904,11 +2004,6 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
break;
}
- case LANGUAGE_OPTIONS:
- if (ParseLanguageOptions(Record) && !DisableValidation)
- return IgnorePCH;
- break;
-
case IDENTIFIER_TABLE:
F.IdentifierTableData = BlobStart;
if (Record[0]) {
@@ -1925,7 +2020,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
case IDENTIFIER_OFFSET: {
if (F.LocalNumIdentifiers != 0) {
Error("duplicate IDENTIFIER_OFFSET record in AST file");
- return Failure;
+ return true;
}
F.IdentifierOffsets = (const uint32_t *)BlobStart;
F.LocalNumIdentifiers = Record[0];
@@ -1949,7 +2044,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
}
break;
}
-
+
case EXTERNAL_DEFINITIONS:
for (unsigned I = 0, N = Record.size(); I != N; ++I)
ExternalDefinitions.push_back(getGlobalDeclID(F, Record[I]));
@@ -1980,7 +2075,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
case WEAK_UNDECLARED_IDENTIFIERS:
if (Record.size() % 4 != 0) {
Error("invalid weak identifiers record");
- return Failure;
+ return true;
}
// FIXME: Ignore weak undeclared identifiers from non-original PCH
@@ -2050,11 +2145,12 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
case PP_COUNTER_VALUE:
if (!Record.empty() && Listener)
- Listener->ReadCounter(Record[0]);
+ Listener->ReadCounter(F, Record[0]);
break;
case FILE_SORTED_DECLS:
F.FileSortedDecls = (const DeclID *)BlobStart;
+ F.NumFileSortedDecls = Record[0];
break;
case SOURCE_LOCATION_OFFSETS: {
@@ -2098,7 +2194,9 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
ContinuousRangeMap<uint32_t, int, 2>::Builder SLocRemap(F.SLocRemap);
ContinuousRangeMap<uint32_t, int, 2>::Builder
IdentifierRemap(F.IdentifierRemap);
- ContinuousRangeMap<uint32_t, int, 2>::Builder
+ ContinuousRangeMap<uint32_t, int, 2>::Builder
+ MacroRemap(F.MacroRemap);
+ ContinuousRangeMap<uint32_t, int, 2>::Builder
PreprocessedEntityRemap(F.PreprocessedEntityRemap);
ContinuousRangeMap<uint32_t, int, 2>::Builder
SubmoduleRemap(F.SubmoduleRemap);
@@ -2114,11 +2212,12 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
ModuleFile *OM = ModuleMgr.lookup(Name);
if (!OM) {
Error("SourceLocation remap refers to unknown module");
- return Failure;
+ return true;
}
uint32_t SLocOffset = io::ReadUnalignedLE32(Data);
uint32_t IdentifierIDOffset = io::ReadUnalignedLE32(Data);
+ uint32_t MacroIDOffset = io::ReadUnalignedLE32(Data);
uint32_t PreprocessedEntityIDOffset = io::ReadUnalignedLE32(Data);
uint32_t SubmoduleIDOffset = io::ReadUnalignedLE32(Data);
uint32_t SelectorIDOffset = io::ReadUnalignedLE32(Data);
@@ -2131,6 +2230,8 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
IdentifierRemap.insert(
std::make_pair(IdentifierIDOffset,
OM->BaseIdentifierID - IdentifierIDOffset));
+ MacroRemap.insert(std::make_pair(MacroIDOffset,
+ OM->BaseMacroID - MacroIDOffset));
PreprocessedEntityRemap.insert(
std::make_pair(PreprocessedEntityIDOffset,
OM->BasePreprocessedEntityID - PreprocessedEntityIDOffset));
@@ -2152,12 +2253,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
case SOURCE_MANAGER_LINE_TABLE:
if (ParseLineTable(F, Record))
- return Failure;
- break;
-
- case FILE_SOURCE_LOCATION_OFFSETS:
- F.SLocFileOffsets = (const uint32_t *)BlobStart;
- F.LocalNumSLocFileEntries = Record[0];
+ return true;
break;
case SOURCE_LOCATION_PRELOADS: {
@@ -2165,25 +2261,13 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
// which is based off F.SLocEntryBaseID.
if (!F.PreloadSLocEntries.empty()) {
Error("Multiple SOURCE_LOCATION_PRELOADS records in AST file");
- return Failure;
+ return true;
}
F.PreloadSLocEntries.swap(Record);
break;
}
- case STAT_CACHE: {
- if (!DisableStatCache) {
- ASTStatCache *MyStatCache =
- new ASTStatCache((const unsigned char *)BlobStart + Record[0],
- (const unsigned char *)BlobStart,
- NumStatHits, NumStatMisses);
- FileMgr.addStatCache(MyStatCache);
- F.StatCache = MyStatCache;
- }
- break;
- }
-
case EXT_VECTOR_DECLS:
for (unsigned I = 0, N = Record.size(); I != N; ++I)
ExtVectorDecls.push_back(getGlobalDeclID(F, Record[I]));
@@ -2192,7 +2276,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
case VTABLE_USES:
if (Record.size() % 3 != 0) {
Error("Invalid VTABLE_USES record");
- return Failure;
+ return true;
}
// Later tables overwrite earlier ones.
@@ -2215,13 +2299,15 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
case PENDING_IMPLICIT_INSTANTIATIONS:
if (PendingInstantiations.size() % 2 != 0) {
+ Error("Invalid existing PendingInstantiations");
+ return true;
+ }
+
+ if (Record.size() % 2 != 0) {
Error("Invalid PENDING_IMPLICIT_INSTANTIATIONS block");
- return Failure;
+ return true;
}
-
- // Later lists of pending instantiations overwrite earlier ones.
- // FIXME: This is most certainly wrong for modules.
- PendingInstantiations.clear();
+
for (unsigned I = 0, N = Record.size(); I != N; /* in loop */) {
PendingInstantiations.push_back(getGlobalDeclID(F, Record[I++]));
PendingInstantiations.push_back(
@@ -2237,34 +2323,6 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
SemaDeclRefs.push_back(getGlobalDeclID(F, Record[I]));
break;
- case ORIGINAL_FILE_NAME:
- // The primary AST will be the last to get here, so it will be the one
- // that's used.
- ActualOriginalFileName.assign(BlobStart, BlobLen);
- OriginalFileName = ActualOriginalFileName;
- MaybeAddSystemRootToFilename(OriginalFileName);
- break;
-
- case ORIGINAL_FILE_ID:
- OriginalFileID = FileID::get(Record[0]);
- break;
-
- case ORIGINAL_PCH_DIR:
- // The primary AST will be the last to get here, so it will be the one
- // that's used.
- OriginalDir.assign(BlobStart, BlobLen);
- break;
-
- case VERSION_CONTROL_BRANCH_REVISION: {
- const std::string &CurBranch = getClangFullRepositoryVersion();
- StringRef ASTBranch(BlobStart, BlobLen);
- if (StringRef(CurBranch) != ASTBranch && !DisableValidation) {
- Diag(diag::warn_pch_different_branch) << ASTBranch << CurBranch;
- return IgnorePCH;
- }
- break;
- }
-
case PPD_ENTITIES_OFFSETS: {
F.PreprocessedEntityOffsets = (const PPEntityOffset *)BlobStart;
assert(BlobLen % sizeof(PPEntityOffset) == 0);
@@ -2300,7 +2358,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
case DECL_UPDATE_OFFSETS: {
if (Record.size() % 2 != 0) {
Error("invalid DECL_UPDATE_OFFSETS block in AST file");
- return Failure;
+ return true;
}
for (unsigned I = 0, N = Record.size(); I != N; I += 2)
DeclUpdateOffsets[getGlobalDeclID(F, Record[I])]
@@ -2311,7 +2369,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
case DECL_REPLACEMENTS: {
if (Record.size() % 3 != 0) {
Error("invalid DECL_REPLACEMENTS block in AST file");
- return Failure;
+ return true;
}
for (unsigned I = 0, N = Record.size(); I != N; I += 3)
ReplacedDecls[getGlobalDeclID(F, Record[I])]
@@ -2322,7 +2380,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
case OBJC_CATEGORIES_MAP: {
if (F.LocalNumObjCCategoriesInMap != 0) {
Error("duplicate OBJC_CATEGORIES_MAP record in AST file");
- return Failure;
+ return true;
}
F.LocalNumObjCCategoriesInMap = Record[0];
@@ -2337,7 +2395,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
case CXX_BASE_SPECIFIER_OFFSETS: {
if (F.LocalNumCXXBaseSpecifiers != 0) {
Error("duplicate CXX_BASE_SPECIFIER_OFFSETS record in AST file");
- return Failure;
+ return true;
}
F.LocalNumCXXBaseSpecifiers = Record[0];
@@ -2347,11 +2405,6 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
}
case DIAG_PRAGMA_MAPPINGS:
- if (Record.size() % 2 != 0) {
- Error("invalid DIAG_USER_MAPPINGS block in AST file");
- return Failure;
- }
-
if (F.PragmaDiagMappings.empty())
F.PragmaDiagMappings.swap(Record);
else
@@ -2428,7 +2481,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
case LOCAL_REDECLARATIONS_MAP: {
if (F.LocalNumRedeclarationsInMap != 0) {
Error("duplicate LOCAL_REDECLARATIONS_MAP record in AST file");
- return Failure;
+ return true;
}
F.LocalNumRedeclarationsInMap = Record[0];
@@ -2445,113 +2498,77 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
}
break;
}
- }
- }
- Error("premature end of bitstream in AST file");
- return Failure;
-}
-
-ASTReader::ASTReadResult ASTReader::validateFileEntries(ModuleFile &M) {
- llvm::BitstreamCursor &SLocEntryCursor = M.SLocEntryCursor;
-
- for (unsigned i = 0, e = M.LocalNumSLocFileEntries; i != e; ++i) {
- SLocEntryCursor.JumpToBit(M.SLocFileOffsets[i]);
- unsigned Code = SLocEntryCursor.ReadCode();
- if (Code == llvm::bitc::END_BLOCK ||
- Code == llvm::bitc::ENTER_SUBBLOCK ||
- Code == llvm::bitc::DEFINE_ABBREV) {
- Error("incorrectly-formatted source location entry in AST file");
- return Failure;
- }
- RecordData Record;
- const char *BlobStart;
- unsigned BlobLen;
- switch (SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen)) {
- default:
- Error("incorrectly-formatted source location entry in AST file");
- return Failure;
+ case MACRO_OFFSET: {
+ if (F.LocalNumMacros != 0) {
+ Error("duplicate MACRO_OFFSET record in AST file");
+ return true;
+ }
+ F.MacroOffsets = (const uint32_t *)BlobStart;
+ F.LocalNumMacros = Record[0];
+ unsigned LocalBaseMacroID = Record[1];
+ F.BaseMacroID = getTotalNumMacros();
- case SM_SLOC_FILE_ENTRY: {
- // If the buffer was overridden, the file need not exist.
- if (Record[6])
- break;
-
- StringRef Filename(BlobStart, BlobLen);
- const FileEntry *File = getFileEntry(Filename);
+ if (F.LocalNumMacros > 0) {
+ // Introduce the global -> local mapping for macros within this module.
+ GlobalMacroMap.insert(std::make_pair(getTotalNumMacros() + 1, &F));
- if (File == 0) {
- std::string ErrorStr = "could not find file '";
- ErrorStr += Filename;
- ErrorStr += "' referenced by AST file";
- Error(ErrorStr.c_str());
- return IgnorePCH;
- }
+ // Introduce the local -> global mapping for macros within this module.
+ F.MacroRemap.insertOrReplace(
+ std::make_pair(LocalBaseMacroID,
+ F.BaseMacroID - LocalBaseMacroID));
- if (Record.size() < 7) {
- Error("source location entry is incorrect");
- return Failure;
- }
-
- off_t StoredSize = (off_t)Record[4];
- time_t StoredTime = (time_t)Record[5];
-
- // Check if there was a request to override the contents of the file
- // that was part of the precompiled header. Overridding such a file
- // can lead to problems when lexing using the source locations from the
- // PCH.
- SourceManager &SM = getSourceManager();
- if (SM.isFileOverridden(File)) {
- Error(diag::err_fe_pch_file_overridden, Filename);
- // After emitting the diagnostic, recover by disabling the override so
- // that the original file will be used.
- SM.disableFileContentsOverride(File);
- // The FileEntry is a virtual file entry with the size of the contents
- // that would override the original contents. Set it to the original's
- // size/time.
- FileMgr.modifyFileEntry(const_cast<FileEntry*>(File),
- StoredSize, StoredTime);
+ MacrosLoaded.resize(MacrosLoaded.size() + F.LocalNumMacros);
}
+ break;
+ }
- // The stat info from the FileEntry came from the cached stat
- // info of the PCH, so we cannot trust it.
- struct stat StatBuf;
- if (::stat(File->getName(), &StatBuf) != 0) {
- StatBuf.st_size = File->getSize();
- StatBuf.st_mtime = File->getModificationTime();
- }
+ case MACRO_UPDATES: {
+ for (unsigned I = 0, N = Record.size(); I != N; /* in loop */) {
+ MacroID ID = getGlobalMacroID(F, Record[I++]);
+ if (I == N)
+ break;
- if ((StoredSize != StatBuf.st_size
-#if !defined(LLVM_ON_WIN32)
- // In our regression testing, the Windows file system seems to
- // have inconsistent modification times that sometimes
- // erroneously trigger this error-handling path.
- || StoredTime != StatBuf.st_mtime
-#endif
- )) {
- Error(diag::err_fe_pch_file_modified, Filename);
- return IgnorePCH;
+ SourceLocation UndefLoc = ReadSourceLocation(F, Record, I);
+ SubmoduleID SubmoduleID = getGlobalSubmoduleID(F, Record[I++]);;
+ MacroUpdate Update;
+ Update.UndefLoc = UndefLoc;
+ MacroUpdates[ID].push_back(std::make_pair(SubmoduleID, Update));
}
-
break;
}
}
}
-
- return Success;
+ Error("premature end of bitstream in AST file");
+ return true;
}
void ASTReader::makeNamesVisible(const HiddenNames &Names) {
for (unsigned I = 0, N = Names.size(); I != N; ++I) {
- if (Decl *D = Names[I].dyn_cast<Decl *>())
- D->Hidden = false;
- else {
- IdentifierInfo *II = Names[I].get<IdentifierInfo *>();
- if (!II->hasMacroDefinition()) {
- II->setHasMacroDefinition(true);
- if (DeserializationListener)
- DeserializationListener->MacroVisible(II);
+ switch (Names[I].getKind()) {
+ case HiddenName::Declaration:
+ Names[I].getDecl()->Hidden = false;
+ break;
+
+ case HiddenName::MacroVisibility: {
+ std::pair<IdentifierInfo *, MacroInfo *> Macro = Names[I].getMacro();
+ Macro.second->setHidden(!Macro.second->isPublic());
+ if (Macro.second->isDefined()) {
+ PP.makeLoadedMacroInfoVisible(Macro.first, Macro.second);
+ }
+ break;
+ }
+
+ case HiddenName::MacroUndef: {
+ std::pair<IdentifierInfo *, MacroInfo *> Macro = Names[I].getMacro();
+ if (Macro.second->isDefined()) {
+ Macro.second->setUndefLoc(Names[I].getMacroUndefLoc());
+ if (PPMutationListener *Listener = PP.getPPMutationListener())
+ Listener->UndefinedMacro(Macro.second);
+ PP.makeLoadedMacroInfoVisible(Macro.first, Macro.second);
}
+ break;
+ }
}
}
}
@@ -2631,7 +2648,7 @@ void ASTReader::makeModuleVisible(Module *Mod,
for (unsigned I = 0, N = Mod->Imports.size(); I != N; ++I) {
Module *Imported = Mod->Imports[I];
- if (Visited.count(Imported))
+ if (!Visited.insert(Imported))
continue;
bool Acceptable = UnrestrictedWildcard;
@@ -2649,32 +2666,62 @@ void ASTReader::makeModuleVisible(Module *Mod,
if (!Acceptable)
continue;
- Visited.insert(Imported);
Stack.push_back(Imported);
}
}
}
ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
- ModuleKind Type) {
+ ModuleKind Type,
+ unsigned ClientLoadCapabilities) {
// Bump the generation number.
unsigned PreviousGeneration = CurrentGeneration++;
-
- switch(ReadASTCore(FileName, Type, /*ImportedBy=*/0)) {
- case Failure: return Failure;
- case IgnorePCH: return IgnorePCH;
- case Success: break;
+
+ unsigned NumModules = ModuleMgr.size();
+ llvm::SmallVector<ModuleFile *, 4> Loaded;
+ switch(ASTReadResult ReadResult = ReadASTCore(FileName, Type,
+ /*ImportedBy=*/0, Loaded,
+ ClientLoadCapabilities)) {
+ case Failure:
+ case OutOfDate:
+ case VersionMismatch:
+ case ConfigurationMismatch:
+ case HadErrors:
+ ModuleMgr.removeModules(ModuleMgr.begin() + NumModules, ModuleMgr.end());
+ return ReadResult;
+
+ case Success:
+ break;
}
// Here comes stuff that we only do once the entire chain is loaded.
- // Check the predefines buffers.
- if (!DisableValidation && Type == MK_PCH &&
- // FIXME: CheckPredefinesBuffers also sets the SuggestedPredefines;
- // if DisableValidation is true, defines that were set on command-line
- // but not in the PCH file will not be added to SuggestedPredefines.
- CheckPredefinesBuffers())
- return IgnorePCH;
+ // Load the AST blocks of all of the modules that we loaded.
+ for (llvm::SmallVectorImpl<ModuleFile *>::iterator M = Loaded.begin(),
+ MEnd = Loaded.end();
+ M != MEnd; ++M) {
+ ModuleFile &F = **M;
+
+ // Read the AST block.
+ if (ReadASTBlock(F))
+ return Failure;
+
+ // Once read, set the ModuleFile bit base offset and update the size in
+ // bits of all files we've seen.
+ F.GlobalBitOffset = TotalModulesSizeInBits;
+ TotalModulesSizeInBits += F.SizeInBits;
+ GlobalBitOffsetsMap.insert(std::make_pair(F.GlobalBitOffset, &F));
+
+ // Preload SLocEntries.
+ for (unsigned I = 0, N = F.PreloadSLocEntries.size(); I != N; ++I) {
+ int Index = int(F.PreloadSLocEntries[I] - 1) + F.SLocEntryBaseID;
+ // Load it through the SourceManager and don't call ReadSLocEntry()
+ // directly because the entry may have already been loaded in which case
+ // calling ReadSLocEntry() directly would trigger an assertion in
+ // SourceManager.
+ SourceMgr.getLoadedSLocEntryByID(Index);
+ }
+ }
// Mark all of the identifiers in the identifier table as being out of date,
// so that various accessors know to check the loaded modules when the
@@ -2707,17 +2754,19 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
if (DeserializationListener)
DeserializationListener->ReaderInitialized(this);
- if (!OriginalFileID.isInvalid()) {
- OriginalFileID = FileID::get(ModuleMgr.getPrimaryModule().SLocEntryBaseID
- + OriginalFileID.getOpaqueValue() - 1);
+ ModuleFile &PrimaryModule = ModuleMgr.getPrimaryModule();
+ if (!PrimaryModule.OriginalSourceFileID.isInvalid()) {
+ PrimaryModule.OriginalSourceFileID
+ = FileID::get(PrimaryModule.SLocEntryBaseID
+ + PrimaryModule.OriginalSourceFileID.getOpaqueValue() - 1);
- // If this AST file is a precompiled preamble, then set the preamble file ID
- // of the source manager to the file source file from which the preamble was
- // built.
+ // If this AST file is a precompiled preamble, then set the
+ // preamble file ID of the source manager to the file source file
+ // from which the preamble was built.
if (Type == MK_Preamble) {
- SourceMgr.setPreambleFileID(OriginalFileID);
+ SourceMgr.setPreambleFileID(PrimaryModule.OriginalSourceFileID);
} else if (Type == MK_MainFile) {
- SourceMgr.setMainFileID(OriginalFileID);
+ SourceMgr.setMainFileID(PrimaryModule.OriginalSourceFileID);
}
}
@@ -2732,9 +2781,12 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
return Success;
}
-ASTReader::ASTReadResult ASTReader::ReadASTCore(StringRef FileName,
- ModuleKind Type,
- ModuleFile *ImportedBy) {
+ASTReader::ASTReadResult
+ASTReader::ReadASTCore(StringRef FileName,
+ ModuleKind Type,
+ ModuleFile *ImportedBy,
+ llvm::SmallVectorImpl<ModuleFile *> &Loaded,
+ unsigned ClientLoadCapabilities) {
ModuleFile *M;
bool NewModule;
std::string ErrorStr;
@@ -2785,7 +2837,7 @@ ASTReader::ASTReadResult ASTReader::ReadASTCore(StringRef FileName,
unsigned BlockID = Stream.ReadSubBlockID();
- // We only know the AST subblock ID.
+ // We only know the control subblock ID.
switch (BlockID) {
case llvm::bitc::BLOCKINFO_BLOCK_ID:
if (Stream.ReadBlockInfoBlock()) {
@@ -2793,29 +2845,23 @@ ASTReader::ASTReadResult ASTReader::ReadASTCore(StringRef FileName,
return Failure;
}
break;
- case AST_BLOCK_ID:
- switch (ReadASTBlock(F)) {
+ case CONTROL_BLOCK_ID:
+ switch (ReadControlBlock(F, Loaded, ClientLoadCapabilities)) {
case Success:
break;
- case Failure:
- return Failure;
-
- case IgnorePCH:
- // FIXME: We could consider reading through to the end of this
- // AST block, skipping subblocks, to see if there are other
- // AST blocks elsewhere.
-
- // FIXME: We can't clear loaded slocentries anymore.
- //SourceMgr.ClearPreallocatedSLocEntries();
-
- // Remove the stat cache.
- if (F.StatCache)
- FileMgr.removeStatCache((ASTStatCache*)F.StatCache);
-
- return IgnorePCH;
+ case Failure: return Failure;
+ case OutOfDate: return OutOfDate;
+ case VersionMismatch: return VersionMismatch;
+ case ConfigurationMismatch: return ConfigurationMismatch;
+ case HadErrors: return HadErrors;
}
break;
+ case AST_BLOCK_ID:
+ // Record that we've loaded this module.
+ Loaded.push_back(M);
+ return Success;
+
default:
if (Stream.SkipBlock()) {
Error("malformed block record in AST file");
@@ -2825,32 +2871,6 @@ ASTReader::ASTReadResult ASTReader::ReadASTCore(StringRef FileName,
}
}
- // Once read, set the ModuleFile bit base offset and update the size in
- // bits of all files we've seen.
- F.GlobalBitOffset = TotalModulesSizeInBits;
- TotalModulesSizeInBits += F.SizeInBits;
- GlobalBitOffsetsMap.insert(std::make_pair(F.GlobalBitOffset, &F));
-
- // Make sure that the files this module was built against are still available.
- if (!DisableValidation) {
- switch(validateFileEntries(*M)) {
- case Failure: return Failure;
- case IgnorePCH: return IgnorePCH;
- case Success: break;
- }
- }
-
- // Preload SLocEntries.
- for (unsigned I = 0, N = M->PreloadSLocEntries.size(); I != N; ++I) {
- int Index = int(M->PreloadSLocEntries[I] - 1) + F.SLocEntryBaseID;
- // Load it through the SourceManager and don't call ReadSLocEntryRecord()
- // directly because the entry may have already been loaded in which case
- // calling ReadSLocEntryRecord() directly would trigger an assertion in
- // SourceManager.
- SourceMgr.getLoadedSLocEntryByID(Index);
- }
-
-
return Success;
}
@@ -3038,8 +3058,8 @@ std::string ASTReader::getOriginalSourceFile(const std::string &ASTFileName,
// We only know the AST subblock ID.
switch (BlockID) {
- case AST_BLOCK_ID:
- if (Stream.EnterSubBlock(AST_BLOCK_ID)) {
+ case CONTROL_BLOCK_ID:
+ if (Stream.EnterSubBlock(CONTROL_BLOCK_ID)) {
Diags.Report(diag::err_fe_pch_malformed_block) << ASTFileName;
return std::string();
}
@@ -3071,19 +3091,191 @@ std::string ASTReader::getOriginalSourceFile(const std::string &ASTFileName,
Record.clear();
const char *BlobStart = 0;
unsigned BlobLen = 0;
- if (Stream.ReadRecord(Code, Record, &BlobStart, &BlobLen)
- == ORIGINAL_FILE_NAME)
+ if (Stream.ReadRecord(Code, Record, &BlobStart, &BlobLen) == ORIGINAL_FILE)
return std::string(BlobStart, BlobLen);
}
return std::string();
}
-ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
+namespace {
+ class SimplePCHValidator : public ASTReaderListener {
+ const LangOptions &ExistingLangOpts;
+ const TargetOptions &ExistingTargetOpts;
+ const PreprocessorOptions &ExistingPPOpts;
+ FileManager &FileMgr;
+
+ public:
+ SimplePCHValidator(const LangOptions &ExistingLangOpts,
+ const TargetOptions &ExistingTargetOpts,
+ const PreprocessorOptions &ExistingPPOpts,
+ FileManager &FileMgr)
+ : ExistingLangOpts(ExistingLangOpts),
+ ExistingTargetOpts(ExistingTargetOpts),
+ ExistingPPOpts(ExistingPPOpts),
+ FileMgr(FileMgr)
+ {
+ }
+
+ virtual bool ReadLanguageOptions(const LangOptions &LangOpts,
+ bool Complain) {
+ return checkLanguageOptions(ExistingLangOpts, LangOpts, 0);
+ }
+ virtual bool ReadTargetOptions(const TargetOptions &TargetOpts,
+ bool Complain) {
+ return checkTargetOptions(ExistingTargetOpts, TargetOpts, 0);
+ }
+ virtual bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
+ bool Complain,
+ std::string &SuggestedPredefines) {
+ return checkPreprocessorOptions(ExistingPPOpts, PPOpts, 0, FileMgr,
+ SuggestedPredefines);
+ }
+ };
+}
+
+bool ASTReader::readASTFileControlBlock(StringRef Filename,
+ FileManager &FileMgr,
+ ASTReaderListener &Listener) {
+ // Open the AST file.
+ std::string ErrStr;
+ OwningPtr<llvm::MemoryBuffer> Buffer;
+ Buffer.reset(FileMgr.getBufferForFile(Filename, &ErrStr));
+ if (!Buffer) {
+ return true;
+ }
+
+ // Initialize the stream
+ llvm::BitstreamReader StreamFile;
+ llvm::BitstreamCursor Stream;
+ StreamFile.init((const unsigned char *)Buffer->getBufferStart(),
+ (const unsigned char *)Buffer->getBufferEnd());
+ Stream.init(StreamFile);
+
+ // Sniff for the signature.
+ if (Stream.Read(8) != 'C' ||
+ Stream.Read(8) != 'P' ||
+ Stream.Read(8) != 'C' ||
+ Stream.Read(8) != 'H') {
+ return true;
+ }
+
+ RecordData Record;
+ bool InControlBlock = false;
+ while (!Stream.AtEndOfStream()) {
+ unsigned Code = Stream.ReadCode();
+
+ if (Code == llvm::bitc::ENTER_SUBBLOCK) {
+ unsigned BlockID = Stream.ReadSubBlockID();
+
+ // We only know the control subblock ID.
+ switch (BlockID) {
+ case CONTROL_BLOCK_ID:
+ if (Stream.EnterSubBlock(CONTROL_BLOCK_ID)) {
+ return true;
+ } else {
+ InControlBlock = true;
+ }
+ break;
+
+ default:
+ if (Stream.SkipBlock())
+ return true;
+ break;
+ }
+ continue;
+ }
+
+ if (Code == llvm::bitc::END_BLOCK) {
+ if (Stream.ReadBlockEnd()) {
+ return true;
+ }
+
+ InControlBlock = false;
+ continue;
+ }
+
+ if (Code == llvm::bitc::DEFINE_ABBREV) {
+ Stream.ReadAbbrevRecord();
+ continue;
+ }
+
+ Record.clear();
+ const char *BlobStart = 0;
+ unsigned BlobLen = 0;
+ unsigned RecCode = Stream.ReadRecord(Code, Record, &BlobStart, &BlobLen);
+ if (InControlBlock) {
+ switch ((ControlRecordTypes)RecCode) {
+ case METADATA: {
+ if (Record[0] != VERSION_MAJOR) {
+ return true;
+ }
+
+ const std::string &CurBranch = getClangFullRepositoryVersion();
+ StringRef ASTBranch(BlobStart, BlobLen);
+ if (StringRef(CurBranch) != ASTBranch)
+ return true;
+
+ break;
+ }
+ case LANGUAGE_OPTIONS:
+ if (ParseLanguageOptions(Record, false, Listener))
+ return true;
+ break;
+
+ case TARGET_OPTIONS:
+ if (ParseTargetOptions(Record, false, Listener))
+ return true;
+ break;
+
+ case DIAGNOSTIC_OPTIONS:
+ if (ParseDiagnosticOptions(Record, false, Listener))
+ return true;
+ break;
+
+ case FILE_SYSTEM_OPTIONS:
+ if (ParseFileSystemOptions(Record, false, Listener))
+ return true;
+ break;
+
+ case HEADER_SEARCH_OPTIONS:
+ if (ParseHeaderSearchOptions(Record, false, Listener))
+ return true;
+ break;
+
+ case PREPROCESSOR_OPTIONS: {
+ std::string IgnoredSuggestedPredefines;
+ if (ParsePreprocessorOptions(Record, false, Listener,
+ IgnoredSuggestedPredefines))
+ return true;
+ break;
+ }
+
+ default:
+ // No other validation to perform.
+ break;
+ }
+ }
+ }
+
+ return false;
+}
+
+
+bool ASTReader::isAcceptableASTFile(StringRef Filename,
+ FileManager &FileMgr,
+ const LangOptions &LangOpts,
+ const TargetOptions &TargetOpts,
+ const PreprocessorOptions &PPOpts) {
+ SimplePCHValidator validator(LangOpts, TargetOpts, PPOpts, FileMgr);
+ return !readASTFileControlBlock(Filename, FileMgr, validator);
+}
+
+bool ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
// Enter the submodule block.
if (F.Stream.EnterSubBlock(SUBMODULE_BLOCK_ID)) {
Error("malformed submodule block record in AST file");
- return Failure;
+ return true;
}
ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
@@ -3095,9 +3287,9 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
if (Code == llvm::bitc::END_BLOCK) {
if (F.Stream.ReadBlockEnd()) {
Error("error at end of submodule block in AST file");
- return Failure;
+ return true;
}
- return Success;
+ return false;
}
if (Code == llvm::bitc::ENTER_SUBBLOCK) {
@@ -3105,7 +3297,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
F.Stream.ReadSubBlockID();
if (F.Stream.SkipBlock()) {
Error("malformed block record in AST file");
- return Failure;
+ return true;
}
continue;
}
@@ -3126,12 +3318,12 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
case SUBMODULE_DEFINITION: {
if (First) {
Error("missing submodule metadata record at beginning of block");
- return Failure;
+ return true;
}
if (Record.size() < 7) {
Error("malformed module definition");
- return Failure;
+ return true;
}
StringRef Name(BlobStart, BlobLen);
@@ -3157,9 +3349,10 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
if (GlobalIndex >= SubmodulesLoaded.size() ||
SubmodulesLoaded[GlobalIndex]) {
Error("too many submodules");
- return Failure;
+ return true;
}
+ CurrentModule->setASTFile(F.File);
CurrentModule->IsFromModuleFile = true;
CurrentModule->IsSystem = IsSystem || CurrentModule->IsSystem;
CurrentModule->InferSubmodules = InferSubmodules;
@@ -3175,7 +3368,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
case SUBMODULE_UMBRELLA_HEADER: {
if (First) {
Error("missing submodule metadata record at beginning of block");
- return Failure;
+ return true;
}
if (!CurrentModule)
@@ -3187,7 +3380,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
ModMap.setUmbrellaHeader(CurrentModule, Umbrella);
else if (CurrentModule->getUmbrellaHeader() != Umbrella) {
Error("mismatched umbrella headers in submodule");
- return Failure;
+ return true;
}
}
break;
@@ -3196,7 +3389,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
case SUBMODULE_HEADER: {
if (First) {
Error("missing submodule metadata record at beginning of block");
- return Failure;
+ return true;
}
if (!CurrentModule)
@@ -3208,15 +3401,51 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
if (std::find(CurrentModule->Headers.begin(),
CurrentModule->Headers.end(),
File) == CurrentModule->Headers.end())
- ModMap.addHeader(CurrentModule, File);
+ ModMap.addHeader(CurrentModule, File, false);
}
break;
}
-
+
+ case SUBMODULE_EXCLUDED_HEADER: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return true;
+ }
+
+ if (!CurrentModule)
+ break;
+
+ // FIXME: Be more lazy about this!
+ StringRef FileName(BlobStart, BlobLen);
+ if (const FileEntry *File = PP.getFileManager().getFile(FileName)) {
+ if (std::find(CurrentModule->Headers.begin(),
+ CurrentModule->Headers.end(),
+ File) == CurrentModule->Headers.end())
+ ModMap.addHeader(CurrentModule, File, true);
+ }
+ break;
+ }
+
+ case SUBMODULE_TOPHEADER: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return true;
+ }
+
+ if (!CurrentModule)
+ break;
+
+ // FIXME: Be more lazy about this!
+ StringRef FileName(BlobStart, BlobLen);
+ if (const FileEntry *File = PP.getFileManager().getFile(FileName))
+ CurrentModule->TopHeaders.insert(File);
+ break;
+ }
+
case SUBMODULE_UMBRELLA_DIR: {
if (First) {
Error("missing submodule metadata record at beginning of block");
- return Failure;
+ return true;
}
if (!CurrentModule)
@@ -3229,7 +3458,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
ModMap.setUmbrellaDir(CurrentModule, Umbrella);
else if (CurrentModule->getUmbrellaDir() != Umbrella) {
Error("mismatched umbrella directories in submodule");
- return Failure;
+ return true;
}
}
break;
@@ -3238,7 +3467,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
case SUBMODULE_METADATA: {
if (!First) {
Error("submodule metadata record not at beginning of block");
- return Failure;
+ return true;
}
First = false;
@@ -3264,7 +3493,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
case SUBMODULE_IMPORTS: {
if (First) {
Error("missing submodule metadata record at beginning of block");
- return Failure;
+ return true;
}
if (!CurrentModule)
@@ -3285,7 +3514,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
case SUBMODULE_EXPORTS: {
if (First) {
Error("missing submodule metadata record at beginning of block");
- return Failure;
+ return true;
}
if (!CurrentModule)
@@ -3309,7 +3538,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
case SUBMODULE_REQUIRES: {
if (First) {
Error("missing submodule metadata record at beginning of block");
- return Failure;
+ return true;
}
if (!CurrentModule)
@@ -3331,27 +3560,144 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
/// them to the AST listener if one is set.
///
/// \returns true if the listener deems the file unacceptable, false otherwise.
-bool ASTReader::ParseLanguageOptions(const RecordData &Record) {
- if (Listener) {
- LangOptions LangOpts;
- unsigned Idx = 0;
+bool ASTReader::ParseLanguageOptions(const RecordData &Record,
+ bool Complain,
+ ASTReaderListener &Listener) {
+ LangOptions LangOpts;
+ unsigned Idx = 0;
#define LANGOPT(Name, Bits, Default, Description) \
LangOpts.Name = Record[Idx++];
#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
LangOpts.set##Name(static_cast<LangOptions::Type>(Record[Idx++]));
#include "clang/Basic/LangOptions.def"
- ObjCRuntime::Kind runtimeKind = (ObjCRuntime::Kind) Record[Idx++];
- VersionTuple runtimeVersion = ReadVersionTuple(Record, Idx);
- LangOpts.ObjCRuntime = ObjCRuntime(runtimeKind, runtimeVersion);
-
- unsigned Length = Record[Idx++];
- LangOpts.CurrentModule.assign(Record.begin() + Idx,
- Record.begin() + Idx + Length);
- return Listener->ReadLanguageOptions(LangOpts);
+ ObjCRuntime::Kind runtimeKind = (ObjCRuntime::Kind) Record[Idx++];
+ VersionTuple runtimeVersion = ReadVersionTuple(Record, Idx);
+ LangOpts.ObjCRuntime = ObjCRuntime(runtimeKind, runtimeVersion);
+
+ unsigned Length = Record[Idx++];
+ LangOpts.CurrentModule.assign(Record.begin() + Idx,
+ Record.begin() + Idx + Length);
+ return Listener.ReadLanguageOptions(LangOpts, Complain);
+}
+
+bool ASTReader::ParseTargetOptions(const RecordData &Record,
+ bool Complain,
+ ASTReaderListener &Listener) {
+ unsigned Idx = 0;
+ TargetOptions TargetOpts;
+ TargetOpts.Triple = ReadString(Record, Idx);
+ TargetOpts.CPU = ReadString(Record, Idx);
+ TargetOpts.ABI = ReadString(Record, Idx);
+ TargetOpts.CXXABI = ReadString(Record, Idx);
+ TargetOpts.LinkerVersion = ReadString(Record, Idx);
+ for (unsigned N = Record[Idx++]; N; --N) {
+ TargetOpts.FeaturesAsWritten.push_back(ReadString(Record, Idx));
+ }
+ for (unsigned N = Record[Idx++]; N; --N) {
+ TargetOpts.Features.push_back(ReadString(Record, Idx));
}
- return false;
+ return Listener.ReadTargetOptions(TargetOpts, Complain);
+}
+
+bool ASTReader::ParseDiagnosticOptions(const RecordData &Record, bool Complain,
+ ASTReaderListener &Listener) {
+ DiagnosticOptions DiagOpts;
+ unsigned Idx = 0;
+#define DIAGOPT(Name, Bits, Default) DiagOpts.Name = Record[Idx++];
+#define ENUM_DIAGOPT(Name, Type, Bits, Default) \
+ DiagOpts.set##Name(static_cast<Type>(Record[Idx++]));
+#include "clang/Basic/DiagnosticOptions.def"
+
+ for (unsigned N = Record[Idx++]; N; --N) {
+ DiagOpts.Warnings.push_back(ReadString(Record, Idx));
+ }
+
+ return Listener.ReadDiagnosticOptions(DiagOpts, Complain);
+}
+
+bool ASTReader::ParseFileSystemOptions(const RecordData &Record, bool Complain,
+ ASTReaderListener &Listener) {
+ FileSystemOptions FSOpts;
+ unsigned Idx = 0;
+ FSOpts.WorkingDir = ReadString(Record, Idx);
+ return Listener.ReadFileSystemOptions(FSOpts, Complain);
+}
+
+bool ASTReader::ParseHeaderSearchOptions(const RecordData &Record,
+ bool Complain,
+ ASTReaderListener &Listener) {
+ HeaderSearchOptions HSOpts;
+ unsigned Idx = 0;
+ HSOpts.Sysroot = ReadString(Record, Idx);
+
+ // Include entries.
+ for (unsigned N = Record[Idx++]; N; --N) {
+ std::string Path = ReadString(Record, Idx);
+ frontend::IncludeDirGroup Group
+ = static_cast<frontend::IncludeDirGroup>(Record[Idx++]);
+ bool IsUserSupplied = Record[Idx++];
+ bool IsFramework = Record[Idx++];
+ bool IgnoreSysRoot = Record[Idx++];
+ bool IsInternal = Record[Idx++];
+ bool ImplicitExternC = Record[Idx++];
+ HSOpts.UserEntries.push_back(
+ HeaderSearchOptions::Entry(Path, Group, IsUserSupplied, IsFramework,
+ IgnoreSysRoot, IsInternal, ImplicitExternC));
+ }
+
+ // System header prefixes.
+ for (unsigned N = Record[Idx++]; N; --N) {
+ std::string Prefix = ReadString(Record, Idx);
+ bool IsSystemHeader = Record[Idx++];
+ HSOpts.SystemHeaderPrefixes.push_back(
+ HeaderSearchOptions::SystemHeaderPrefix(Prefix, IsSystemHeader));
+ }
+
+ HSOpts.ResourceDir = ReadString(Record, Idx);
+ HSOpts.ModuleCachePath = ReadString(Record, Idx);
+ HSOpts.DisableModuleHash = Record[Idx++];
+ HSOpts.UseBuiltinIncludes = Record[Idx++];
+ HSOpts.UseStandardSystemIncludes = Record[Idx++];
+ HSOpts.UseStandardCXXIncludes = Record[Idx++];
+ HSOpts.UseLibcxx = Record[Idx++];
+
+ return Listener.ReadHeaderSearchOptions(HSOpts, Complain);
+}
+
+bool ASTReader::ParsePreprocessorOptions(const RecordData &Record,
+ bool Complain,
+ ASTReaderListener &Listener,
+ std::string &SuggestedPredefines) {
+ PreprocessorOptions PPOpts;
+ unsigned Idx = 0;
+
+ // Macro definitions/undefs
+ for (unsigned N = Record[Idx++]; N; --N) {
+ std::string Macro = ReadString(Record, Idx);
+ bool IsUndef = Record[Idx++];
+ PPOpts.Macros.push_back(std::make_pair(Macro, IsUndef));
+ }
+
+ // Includes
+ for (unsigned N = Record[Idx++]; N; --N) {
+ PPOpts.Includes.push_back(ReadString(Record, Idx));
+ }
+
+ // Macro Includes
+ for (unsigned N = Record[Idx++]; N; --N) {
+ PPOpts.MacroIncludes.push_back(ReadString(Record, Idx));
+ }
+
+ PPOpts.UsePredefines = Record[Idx++];
+ PPOpts.ImplicitPCHInclude = ReadString(Record, Idx);
+ PPOpts.ImplicitPTHInclude = ReadString(Record, Idx);
+ PPOpts.ObjCXXARCStandardLibrary =
+ static_cast<ObjCXXARCStandardLibraryKind>(Record[Idx++]);
+ SuggestedPredefines.clear();
+ return Listener.ReadPreprocessorOptions(PPOpts, Complain,
+ SuggestedPredefines);
}
std::pair<ModuleFile *, unsigned>
@@ -3365,6 +3711,23 @@ ASTReader::getModulePreprocessedEntity(unsigned GlobalIndex) {
return std::make_pair(M, LocalIndex);
}
+std::pair<PreprocessingRecord::iterator, PreprocessingRecord::iterator>
+ASTReader::getModulePreprocessedEntities(ModuleFile &Mod) const {
+ if (PreprocessingRecord *PPRec = PP.getPreprocessingRecord())
+ return PPRec->getIteratorsForLoadedRange(Mod.BasePreprocessedEntityID,
+ Mod.NumPreprocessedEntities);
+
+ return std::make_pair(PreprocessingRecord::iterator(),
+ PreprocessingRecord::iterator());
+}
+
+std::pair<ASTReader::ModuleDeclIterator, ASTReader::ModuleDeclIterator>
+ASTReader::getModuleFileLevelDecls(ModuleFile &Mod) {
+ return std::make_pair(ModuleDeclIterator(this, &Mod, Mod.FileSortedDecls),
+ ModuleDeclIterator(this, &Mod,
+ Mod.FileSortedDecls + Mod.NumFileSortedDecls));
+}
+
PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
PreprocessedEntityID PPID = Index+1;
std::pair<ModuleFile *, unsigned> PPInfo = getModulePreprocessedEntity(Index);
@@ -3455,7 +3818,7 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
InclusionDirective *ID
= new (PPRec) InclusionDirective(PPRec, Kind,
StringRef(BlobStart, Record[0]),
- Record[1],
+ Record[1], Record[3],
File,
Range);
return ID;
@@ -3476,7 +3839,7 @@ PreprocessedEntityID ASTReader::findNextPreprocessedEntity(
EndI = GlobalSLocOffsetMap.end(); SLocMapI != EndI; ++SLocMapI) {
ModuleFile &M = *SLocMapI->second;
if (M.NumPreprocessedEntities)
- return getGlobalPreprocessedEntityID(M, M.BasePreprocessedEntityID);
+ return M.BasePreprocessedEntityID;
}
return getTotalNumPreprocessedEntities();
@@ -3559,8 +3922,7 @@ ASTReader::findBeginPreprocessedEntity(SourceLocation BLoc) const {
if (PPI == pp_end)
return findNextPreprocessedEntity(SLocMapI);
- return getGlobalPreprocessedEntityID(M,
- M.BasePreprocessedEntityID + (PPI - pp_begin));
+ return M.BasePreprocessedEntityID + (PPI - pp_begin);
}
/// \brief Returns the first preprocessed entity ID that begins after \arg ELoc.
@@ -3589,8 +3951,7 @@ ASTReader::findEndPreprocessedEntity(SourceLocation ELoc) const {
if (PPI == pp_end)
return findNextPreprocessedEntity(SLocMapI);
- return getGlobalPreprocessedEntityID(M,
- M.BasePreprocessedEntityID + (PPI - pp_begin));
+ return M.BasePreprocessedEntityID + (PPI - pp_begin);
}
/// \brief Returns a pair of [Begin, End) indices of preallocated
@@ -3681,14 +4042,31 @@ HeaderFileInfo ASTReader::GetHeaderFileInfo(const FileEntry *FE) {
}
void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
+ // FIXME: Make it work properly with modules.
+ llvm::SmallVector<DiagnosticsEngine::DiagState *, 32> DiagStates;
for (ModuleIterator I = ModuleMgr.begin(), E = ModuleMgr.end(); I != E; ++I) {
ModuleFile &F = *(*I);
unsigned Idx = 0;
+ DiagStates.clear();
+ assert(!Diag.DiagStates.empty());
+ DiagStates.push_back(&Diag.DiagStates.front()); // the command-line one.
while (Idx < F.PragmaDiagMappings.size()) {
SourceLocation Loc = ReadSourceLocation(F, F.PragmaDiagMappings[Idx++]);
+ unsigned DiagStateID = F.PragmaDiagMappings[Idx++];
+ if (DiagStateID != 0) {
+ Diag.DiagStatePoints.push_back(
+ DiagnosticsEngine::DiagStatePoint(DiagStates[DiagStateID-1],
+ FullSourceLoc(Loc, SourceMgr)));
+ continue;
+ }
+
+ assert(DiagStateID == 0);
+ // A new DiagState was created here.
Diag.DiagStates.push_back(*Diag.GetCurDiagState());
+ DiagnosticsEngine::DiagState *NewState = &Diag.DiagStates.back();
+ DiagStates.push_back(NewState);
Diag.DiagStatePoints.push_back(
- DiagnosticsEngine::DiagStatePoint(&Diag.DiagStates.back(),
+ DiagnosticsEngine::DiagStatePoint(NewState,
FullSourceLoc(Loc, SourceMgr)));
while (1) {
assert(Idx < F.PragmaDiagMappings.size() &&
@@ -4258,6 +4636,8 @@ void TypeLocReader::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
}
void TypeLocReader::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
TL.setLocalRangeBegin(ReadSourceLocation(Record, Idx));
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
TL.setLocalRangeEnd(ReadSourceLocation(Record, Idx));
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) {
TL.setArg(i, ReadDeclAs<ParmVarDecl>(Record, Idx));
@@ -4467,6 +4847,10 @@ QualType ASTReader::GetType(TypeID ID) {
case PREDEF_TYPE_VA_LIST_TAG:
T = Context.getVaListTagType();
break;
+
+ case PREDEF_TYPE_BUILTIN_FN:
+ T = Context.BuiltinFnTy;
+ break;
}
assert(!T.isNull() && "Unknown predefined type");
@@ -4537,7 +4921,9 @@ ASTReader::GetTemplateArgumentLocInfo(ModuleFile &F,
case TemplateArgument::Null:
case TemplateArgument::Integral:
case TemplateArgument::Declaration:
+ case TemplateArgument::NullPtr:
case TemplateArgument::Pack:
+ // FIXME: Is this right?
return TemplateArgumentLocInfo();
}
llvm_unreachable("unexpected template argument loc");
@@ -4593,7 +4979,7 @@ CXXBaseSpecifier *ASTReader::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
}
serialization::DeclID
-ASTReader::getGlobalDeclID(ModuleFile &F, unsigned LocalID) const {
+ASTReader::getGlobalDeclID(ModuleFile &F, LocalDeclID LocalID) const {
if (LocalID < NUM_PREDEF_DECL_IDS)
return LocalID;
@@ -4930,8 +5316,10 @@ namespace {
continue;
if (ND->getDeclName() != This->Name) {
- assert(!This->Name.getCXXNameType().isNull() &&
- "Name mismatch without a type");
+ // A name might be null because the decl's redeclarable part is
+ // currently read before reading its name. The lookup is triggered by
+ // building that decl (likely indirectly), and so it is later in the
+ // sense of "already existing" and can be ignored here.
continue;
}
@@ -5132,13 +5520,15 @@ void ASTReader::PrintStats() {
= IdentifiersLoaded.size() - std::count(IdentifiersLoaded.begin(),
IdentifiersLoaded.end(),
(IdentifierInfo *)0);
+ unsigned NumMacrosLoaded
+ = MacrosLoaded.size() - std::count(MacrosLoaded.begin(),
+ MacrosLoaded.end(),
+ (MacroInfo *)0);
unsigned NumSelectorsLoaded
= SelectorsLoaded.size() - std::count(SelectorsLoaded.begin(),
SelectorsLoaded.end(),
Selector());
- std::fprintf(stderr, " %u stat cache hits\n", NumStatHits);
- std::fprintf(stderr, " %u stat cache misses\n", NumStatMisses);
if (unsigned TotalNumSLocEntries = getTotalNumSLocs())
std::fprintf(stderr, " %u/%u source location entries read (%f%%)\n",
NumSLocEntriesRead, TotalNumSLocEntries,
@@ -5155,6 +5545,10 @@ void ASTReader::PrintStats() {
std::fprintf(stderr, " %u/%u identifiers read (%f%%)\n",
NumIdentifiersLoaded, (unsigned)IdentifiersLoaded.size(),
((float)NumIdentifiersLoaded/IdentifiersLoaded.size() * 100));
+ if (!MacrosLoaded.empty())
+ std::fprintf(stderr, " %u/%u macros read (%f%%)\n",
+ NumMacrosLoaded, (unsigned)MacrosLoaded.size(),
+ ((float)NumMacrosLoaded/MacrosLoaded.size() * 100));
if (!SelectorsLoaded.empty())
std::fprintf(stderr, " %u/%u selectors read (%f%%)\n",
NumSelectorsLoaded, (unsigned)SelectorsLoaded.size(),
@@ -5213,6 +5607,7 @@ void ASTReader::dump() {
dumpModuleIDMap("Global type map", GlobalTypeMap);
dumpModuleIDMap("Global declaration map", GlobalDeclMap);
dumpModuleIDMap("Global identifier map", GlobalIdentifierMap);
+ dumpModuleIDMap("Global macro map", GlobalMacroMap);
dumpModuleIDMap("Global submodule map", GlobalSubmoduleMap);
dumpModuleIDMap("Global selector map", GlobalSelectorMap);
dumpModuleIDMap("Global preprocessed entity map",
@@ -5246,7 +5641,7 @@ void ASTReader::getMemoryBufferSizes(MemoryBufferSizes &sizes) const {
void ASTReader::InitializeSema(Sema &S) {
SemaObj = &S;
- S.ExternalSource = this;
+ S.addExternalSource(this);
// Makes sure any declarations that were deserialized "too early"
// still get added to the identifier's declaration chains.
@@ -5281,6 +5676,9 @@ void ASTReader::InitializeSema(Sema &S) {
}
IdentifierInfo* ASTReader::get(const char *NameStart, const char *NameEnd) {
+ // Note that we are loading an identifier.
+ Deserializing AnIdentifier(this);
+
IdentifierLookupVisitor Visitor(StringRef(NameStart, NameEnd - NameStart),
/*PriorGeneration=*/0);
ModuleMgr.visit(IdentifierLookupVisitor::visit, &Visitor);
@@ -5570,6 +5968,7 @@ void ASTReader::ReadPendingInstantiations(
ValueDecl *D = cast<ValueDecl>(GetDecl(PendingInstantiations[Idx++]));
SourceLocation Loc
= SourceLocation::getFromRawEncoding(PendingInstantiations[Idx++]);
+
Pending.push_back(std::make_pair(D, Loc));
}
PendingInstantiations.clear();
@@ -5682,8 +6081,37 @@ IdentifierID ASTReader::getGlobalIdentifierID(ModuleFile &M, unsigned LocalID) {
return LocalID + I->second;
}
-bool ASTReader::ReadSLocEntry(int ID) {
- return ReadSLocEntryRecord(ID) != Success;
+MacroInfo *ASTReader::getMacro(MacroID ID, MacroInfo *Hint) {
+ if (ID == 0)
+ return 0;
+
+ if (MacrosLoaded.empty()) {
+ Error("no macro table in AST file");
+ return 0;
+ }
+
+ ID -= NUM_PREDEF_MACRO_IDS;
+ if (!MacrosLoaded[ID]) {
+ GlobalMacroMapType::iterator I
+ = GlobalMacroMap.find(ID + NUM_PREDEF_MACRO_IDS);
+ assert(I != GlobalMacroMap.end() && "Corrupted global macro map");
+ ModuleFile *M = I->second;
+ unsigned Index = ID - M->BaseMacroID;
+ ReadMacroRecord(*M, M->MacroOffsets[Index], Hint);
+ }
+
+ return MacrosLoaded[ID];
+}
+
+MacroID ASTReader::getGlobalMacroID(ModuleFile &M, unsigned LocalID) {
+ if (LocalID < NUM_PREDEF_MACRO_IDS)
+ return LocalID;
+
+ ContinuousRangeMap<uint32_t, int, 2>::iterator I
+ = M.MacroRemap.find(LocalID - NUM_PREDEF_MACRO_IDS);
+ assert(I != M.MacroRemap.end() && "Invalid index into macro index remap");
+
+ return LocalID + I->second;
}
serialization::SubmoduleID
@@ -5694,7 +6122,7 @@ ASTReader::getGlobalSubmoduleID(ModuleFile &M, unsigned LocalID) {
ContinuousRangeMap<uint32_t, int, 2>::iterator I
= M.SubmoduleRemap.find(LocalID - NUM_PREDEF_SUBMODULE_IDS);
assert(I != M.SubmoduleRemap.end()
- && "Invalid index into identifier index remap");
+ && "Invalid index into submodule index remap");
return LocalID + I->second;
}
@@ -5759,7 +6187,7 @@ ASTReader::getGlobalSelectorID(ModuleFile &M, unsigned LocalID) const {
ContinuousRangeMap<uint32_t, int, 2>::iterator I
= M.SelectorRemap.find(LocalID - NUM_PREDEF_SELECTOR_IDS);
assert(I != M.SelectorRemap.end()
- && "Invalid index into identifier index remap");
+ && "Invalid index into selector index remap");
return LocalID + I->second;
}
@@ -5926,8 +6354,13 @@ ASTReader::ReadTemplateArgument(ModuleFile &F,
return TemplateArgument();
case TemplateArgument::Type:
return TemplateArgument(readType(F, Record, Idx));
- case TemplateArgument::Declaration:
- return TemplateArgument(ReadDecl(F, Record, Idx));
+ case TemplateArgument::Declaration: {
+ ValueDecl *D = ReadDeclAs<ValueDecl>(F, Record, Idx);
+ bool ForReferenceParam = Record[Idx++];
+ return TemplateArgument(D, ForReferenceParam);
+ }
+ case TemplateArgument::NullPtr:
+ return TemplateArgument(readType(F, Record, Idx), /*isNullPtr*/true);
case TemplateArgument::Integral: {
llvm::APSInt Value = ReadAPSInt(Record, Idx);
QualType T = readType(F, Record, Idx);
@@ -6340,7 +6773,8 @@ void ASTReader::ReadComments() {
}
void ASTReader::finishPendingActions() {
- while (!PendingIdentifierInfos.empty() || !PendingDeclChains.empty()) {
+ while (!PendingIdentifierInfos.empty() || !PendingDeclChains.empty() ||
+ !PendingMacroIDs.empty()) {
// If any identifiers with corresponding top-level declarations have
// been loaded, load those declarations now.
while (!PendingIdentifierInfos.empty()) {
@@ -6355,6 +6789,18 @@ void ASTReader::finishPendingActions() {
PendingDeclChainsKnown.erase(PendingDeclChains[I]);
}
PendingDeclChains.clear();
+
+ // Load any pending macro definitions.
+ for (unsigned I = 0; I != PendingMacroIDs.size(); ++I) {
+ // FIXME: std::move here
+ SmallVector<MacroID, 2> GlobalIDs = PendingMacroIDs.begin()[I].second;
+ MacroInfo *Hint = 0;
+ for (unsigned IDIdx = 0, NumIDs = GlobalIDs.size(); IDIdx != NumIDs;
+ ++IDIdx) {
+ Hint = getMacro(GlobalIDs[IDIdx], Hint);
+ }
+ }
+ PendingMacroIDs.clear();
}
// If we deserialized any C++ or Objective-C class definitions, any
@@ -6408,9 +6854,29 @@ void ASTReader::finishPendingActions() {
for (RedeclarableTemplateDecl::redecl_iterator R = RTD->redecls_begin(),
REnd = RTD->redecls_end();
R != REnd; ++R)
- R->Common = RTD->Common;
+ R->Common = RTD->Common;
}
PendingDefinitions.clear();
+
+ // Load the bodies of any functions or methods we've encountered. We do
+ // this now (delayed) so that we can be sure that the declaration chains
+ // have been fully wired up.
+ for (PendingBodiesMap::iterator PB = PendingBodies.begin(),
+ PBEnd = PendingBodies.end();
+ PB != PBEnd; ++PB) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(PB->first)) {
+ // FIXME: Check for =delete/=default?
+ // FIXME: Complain about ODR violations here?
+ if (!getContext().getLangOpts().Modules || !FD->hasBody())
+ FD->setLazyBody(PB->second);
+ continue;
+ }
+
+ ObjCMethodDecl *MD = cast<ObjCMethodDecl>(PB->first);
+ if (!getContext().getLangOpts().Modules || !MD->hasBody())
+ MD->setLazyBody(PB->second);
+ }
+ PendingBodies.clear();
}
void ASTReader::FinishedDeserializing() {
@@ -6442,17 +6908,14 @@ void ASTReader::FinishedDeserializing() {
ASTReader::ASTReader(Preprocessor &PP, ASTContext &Context,
StringRef isysroot, bool DisableValidation,
- bool DisableStatCache, bool AllowASTWithCompilerErrors)
+ bool AllowASTWithCompilerErrors)
: Listener(new PCHValidator(PP, *this)), DeserializationListener(0),
SourceMgr(PP.getSourceManager()), FileMgr(PP.getFileManager()),
Diags(PP.getDiagnostics()), SemaObj(0), PP(PP), Context(Context),
- Consumer(0), ModuleMgr(FileMgr.getFileSystemOptions()),
- RelocatablePCH(false), isysroot(isysroot),
- DisableValidation(DisableValidation),
- DisableStatCache(DisableStatCache),
+ Consumer(0), ModuleMgr(PP.getFileManager()),
+ isysroot(isysroot), DisableValidation(DisableValidation),
AllowASTWithCompilerErrors(AllowASTWithCompilerErrors),
CurrentGeneration(0), CurrSwitchCaseStmts(&SwitchCaseStmts),
- NumStatHits(0), NumStatMisses(0),
NumSLocEntriesRead(0), TotalNumSLocEntries(0),
NumStatementsRead(0), TotalNumStatements(0), NumMacrosRead(0),
TotalNumMacros(0), NumSelectorsRead(0), NumMethodPoolEntriesRead(0),
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
index cb21f82..c42944d 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -37,7 +37,6 @@ namespace clang {
class ASTDeclReader : public DeclVisitor<ASTDeclReader, void> {
ASTReader &Reader;
ModuleFile &F;
- llvm::BitstreamCursor &Cursor;
const DeclID ThisDeclID;
const unsigned RawLocation;
typedef ASTReader::RecordData RecordData;
@@ -48,6 +47,8 @@ namespace clang {
DeclID DeclContextIDForTemplateParmDecl;
DeclID LexicalDeclContextIDForTemplateParmDecl;
+ bool HasPendingBody;
+
uint64_t GetCurrentCursorOffset();
SourceLocation ReadSourceLocation(const RecordData &R, unsigned &I) {
@@ -116,7 +117,7 @@ namespace clang {
GlobalDeclID FirstID;
mutable bool Owning;
- RedeclarableResult &operator=(RedeclarableResult&); // DO NOT IMPLEMENT
+ void operator=(RedeclarableResult &) LLVM_DELETED_FUNCTION;
public:
RedeclarableResult(ASTReader &Reader, GlobalDeclID FirstID)
@@ -162,7 +163,7 @@ namespace clang {
NamedDecl *Existing;
mutable bool AddResult;
- FindExistingResult &operator=(FindExistingResult&); // DO NOT IMPLEMENT
+ void operator=(FindExistingResult&) LLVM_DELETED_FUNCTION;
public:
FindExistingResult(ASTReader &Reader)
@@ -194,16 +195,19 @@ namespace clang {
public:
ASTDeclReader(ASTReader &Reader, ModuleFile &F,
- llvm::BitstreamCursor &Cursor, DeclID thisDeclID,
+ DeclID thisDeclID,
unsigned RawLocation,
const RecordData &Record, unsigned &Idx)
- : Reader(Reader), F(F), Cursor(Cursor), ThisDeclID(thisDeclID),
+ : Reader(Reader), F(F), ThisDeclID(thisDeclID),
RawLocation(RawLocation), Record(Record), Idx(Idx),
- TypeIDForTypeDecl(0) { }
+ TypeIDForTypeDecl(0), HasPendingBody(false) { }
static void attachPreviousDecl(Decl *D, Decl *previous);
static void attachLatestDecl(Decl *D, Decl *latest);
+ /// \brief Determine whether this declaration has a pending body.
+ bool hasPendingBody() const { return HasPendingBody; }
+
void Visit(Decl *D);
void UpdateDecl(Decl *D, ModuleFile &ModuleFile,
@@ -321,8 +325,14 @@ void ASTDeclReader::Visit(Decl *D) {
ID->TypeForDecl = Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull();
} else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// FunctionDecl's body was written last after all other Stmts/Exprs.
- if (Record[Idx++])
- FD->setLazyBody(GetCurrentCursorOffset());
+ // We only read it if FD doesn't already have a body (e.g., from another
+ // module).
+ // FIXME: Also consider = default and = delete.
+ // FIXME: Can we diagnose ODR violations somehow?
+ if (Record[Idx++]) {
+ Reader.PendingBodies[FD] = GetCurrentCursorOffset();
+ HasPendingBody = true;
+ }
} else if (D->isTemplateParameter()) {
// If we have a fully initialized template parameter, we can now
// set its DeclContext.
@@ -590,8 +600,10 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
TemplArgs.size(), C);
void *InsertPos = 0;
CanonTemplate->getSpecializations().FindNodeOrInsertPos(ID, InsertPos);
- assert(InsertPos && "Another specialization already inserted!");
- CanonTemplate->getSpecializations().InsertNode(FTInfo, InsertPos);
+ if (InsertPos)
+ CanonTemplate->getSpecializations().InsertNode(FTInfo, InsertPos);
+ else
+ assert(0 && "Another specialization already inserted!");
}
break;
}
@@ -628,19 +640,16 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
VisitNamedDecl(MD);
if (Record[Idx++]) {
- // In practice, this won't be executed (since method definitions
- // don't occur in header files).
- // Switch case IDs for this method body.
- ASTReader::SwitchCaseMapTy SwitchCaseStmtsForObjCMethod;
- SaveAndRestore<ASTReader::SwitchCaseMapTy *>
- SCFOM(Reader.CurrSwitchCaseStmts, &SwitchCaseStmtsForObjCMethod);
- MD->setBody(Reader.ReadStmt(F));
+ // Load the body on-demand. Most clients won't care, because method
+ // definitions rarely show up in headers.
+ Reader.PendingBodies[MD] = GetCurrentCursorOffset();
+ HasPendingBody = true;
MD->setSelfDecl(ReadDeclAs<ImplicitParamDecl>(Record, Idx));
MD->setCmdDecl(ReadDeclAs<ImplicitParamDecl>(Record, Idx));
}
MD->setInstanceMethod(Record[Idx++]);
MD->setVariadic(Record[Idx++]);
- MD->setSynthesized(Record[Idx++]);
+ MD->setPropertyAccessor(Record[Idx++]);
MD->setDefined(Record[Idx++]);
MD->IsOverriding = Record[Idx++];
@@ -846,6 +855,8 @@ void ASTDeclReader::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
D->setSuperClass(ReadDeclAs<ObjCInterfaceDecl>(Record, Idx));
D->setIvarLBraceLoc(ReadSourceLocation(Record, Idx));
D->setIvarRBraceLoc(ReadSourceLocation(Record, Idx));
+ D->setHasNonZeroConstructors(Record[Idx++]);
+ D->setHasDestructors(Record[Idx++]);
llvm::tie(D->IvarInitializers, D->NumIvarInitializers)
= Reader.ReadCXXCtorInitializers(F, Record, Idx);
}
@@ -897,7 +908,8 @@ void ASTDeclReader::VisitVarDecl(VarDecl *VD) {
VD->VarDeclBits.NRVOVariable = Record[Idx++];
VD->VarDeclBits.CXXForRangeDecl = Record[Idx++];
VD->VarDeclBits.ARCPseudoStrong = Record[Idx++];
-
+ VD->VarDeclBits.IsConstexpr = Record[Idx++];
+
// Only true variables (not parameters or implicit parameters) can be merged.
if (VD->getKind() == Decl::Var)
mergeRedeclarable(VD, Redecl);
@@ -1135,6 +1147,7 @@ void ASTDeclReader::ReadCXXDefinitionData(
Lambda.Captures
= (Capture*)Reader.Context.Allocate(sizeof(Capture)*Lambda.NumCaptures);
Capture *ToCapture = Lambda.Captures;
+ Lambda.MethodTyInfo = GetTypeSourceInfo(Record, Idx);
for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) {
SourceLocation Loc = ReadSourceLocation(Record, Idx);
bool IsImplicit = Record[Idx++];
@@ -1155,7 +1168,8 @@ void ASTDeclReader::VisitCXXRecordDecl(CXXRecordDecl *D) {
// allocate the appropriate DefinitionData structure.
bool IsLambda = Record[Idx++];
if (IsLambda)
- D->DefinitionData = new (C) CXXRecordDecl::LambdaDefinitionData(D, false);
+ D->DefinitionData = new (C) CXXRecordDecl::LambdaDefinitionData(D, 0,
+ false);
else
D->DefinitionData = new (C) struct CXXRecordDecl::DefinitionData(D);
@@ -1242,6 +1256,7 @@ void ASTDeclReader::VisitImportDecl(ImportDecl *D) {
SourceLocation *StoredLocs = reinterpret_cast<SourceLocation *>(D + 1);
for (unsigned I = 0, N = Record.back(); I != N; ++I)
StoredLocs[I] = ReadSourceLocation(Record, Idx);
+ ++Idx; // The number of stored source locations.
}
void ASTDeclReader::VisitAccessSpecDecl(AccessSpecDecl *D) {
@@ -1308,10 +1323,12 @@ ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
D->setMemberSpecialization();
}
}
-
+
VisitTemplateDecl(D);
D->IdentifierNamespace = Record[Idx++];
-
+
+ mergeRedeclarable(D, Redecl);
+
return Redecl;
}
@@ -1336,10 +1353,10 @@ void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) {
for (unsigned I = 0; I != Size; ++I)
SpecIDs.push_back(ReadDeclID(Record, Idx));
+ ClassTemplateDecl::Common *CommonPtr = D->getCommonPtr();
if (SpecIDs[0]) {
typedef serialization::DeclID DeclID;
- ClassTemplateDecl::Common *CommonPtr = D->getCommonPtr();
// FIXME: Append specializations!
CommonPtr->LazySpecializations
= new (Reader.getContext()) DeclID [SpecIDs.size()];
@@ -1347,7 +1364,7 @@ void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) {
SpecIDs.size() * sizeof(DeclID));
}
- // InjectedClassNameType is computed.
+ CommonPtr->InjectedClassNameType = Reader.readType(F, Record, Idx);
}
}
@@ -1391,14 +1408,17 @@ void ASTDeclReader::VisitClassTemplateSpecializationDecl(
TemplArgs.size());
D->PointOfInstantiation = ReadSourceLocation(Record, Idx);
D->SpecializationKind = (TemplateSpecializationKind)Record[Idx++];
-
- if (D->isCanonicalDecl()) { // It's kept in the folding set.
+
+ bool writtenAsCanonicalDecl = Record[Idx++];
+ if (writtenAsCanonicalDecl) {
ClassTemplateDecl *CanonPattern = ReadDeclAs<ClassTemplateDecl>(Record,Idx);
- if (ClassTemplatePartialSpecializationDecl *Partial
- = dyn_cast<ClassTemplatePartialSpecializationDecl>(D)) {
- CanonPattern->getCommonPtr()->PartialSpecializations.InsertNode(Partial);
- } else {
- CanonPattern->getCommonPtr()->Specializations.InsertNode(D);
+ if (D->isCanonicalDecl()) { // It's kept in the folding set.
+ if (ClassTemplatePartialSpecializationDecl *Partial
+ = dyn_cast<ClassTemplatePartialSpecializationDecl>(D)) {
+ CanonPattern->getCommonPtr()->PartialSpecializations.GetOrInsertNode(Partial);
+ } else {
+ CanonPattern->getCommonPtr()->Specializations.GetOrInsertNode(D);
+ }
}
}
}
@@ -1486,11 +1506,18 @@ void ASTDeclReader::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
// TemplateParmPosition.
D->setDepth(Record[Idx++]);
D->setPosition(Record[Idx++]);
- // Rest of TemplateTemplateParmDecl.
- TemplateArgumentLoc Arg = Reader.ReadTemplateArgumentLoc(F, Record, Idx);
- bool IsInherited = Record[Idx++];
- D->setDefaultArgument(Arg, IsInherited);
- D->ParameterPack = Record[Idx++];
+ if (D->isExpandedParameterPack()) {
+ void **Data = reinterpret_cast<void **>(D + 1);
+ for (unsigned I = 0, N = D->getNumExpansionTemplateParameters();
+ I != N; ++I)
+ Data[I] = Reader.ReadTemplateParameterList(F, Record, Idx);
+ } else {
+ // Rest of TemplateTemplateParmDecl.
+ TemplateArgumentLoc Arg = Reader.ReadTemplateArgumentLoc(F, Record, Idx);
+ bool IsInherited = Record[Idx++];
+ D->setDefaultArgument(Arg, IsInherited);
+ D->ParameterPack = Record[Idx++];
+ }
}
void ASTDeclReader::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
@@ -1640,7 +1667,7 @@ inline void ASTReader::LoadedDecl(unsigned Index, Decl *D) {
/// This routine should return true for anything that might affect
/// code generation, e.g., inline function definitions, Objective-C
/// declarations with metadata, etc.
-static bool isConsumerInterestedIn(Decl *D) {
+static bool isConsumerInterestedIn(Decl *D, bool HasBody) {
// An ObjCMethodDecl is never considered as "interesting" because its
// implementation container always is.
@@ -1652,7 +1679,7 @@ static bool isConsumerInterestedIn(Decl *D) {
return Var->isFileVarDecl() &&
Var->isThisDeclarationADefinition() == VarDecl::Definition;
if (FunctionDecl *Func = dyn_cast<FunctionDecl>(D))
- return Func->doesThisDeclarationHaveABody();
+ return Func->doesThisDeclarationHaveABody() || HasBody;
return false;
}
@@ -1719,8 +1746,10 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
if (TagDecl *TagX = dyn_cast<TagDecl>(X)) {
TagDecl *TagY = cast<TagDecl>(Y);
return (TagX->getTagKind() == TagY->getTagKind()) ||
- ((TagX->getTagKind() == TTK_Struct || TagX->getTagKind() == TTK_Class) &&
- (TagY->getTagKind() == TTK_Struct || TagY->getTagKind() == TTK_Class));
+ ((TagX->getTagKind() == TTK_Struct || TagX->getTagKind() == TTK_Class ||
+ TagX->getTagKind() == TTK_Interface) &&
+ (TagY->getTagKind() == TTK_Struct || TagY->getTagKind() == TTK_Class ||
+ TagY->getTagKind() == TTK_Interface));
}
// Functions with the same type and linkage match.
@@ -1731,7 +1760,7 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
return (FuncX->getLinkage() == FuncY->getLinkage()) &&
FuncX->getASTContext().hasSameType(FuncX->getType(), FuncY->getType());
}
-
+
// Variables with the same type and linkage match.
if (VarDecl *VarX = dyn_cast<VarDecl>(X)) {
VarDecl *VarY = cast<VarDecl>(Y);
@@ -1744,7 +1773,11 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
NamespaceDecl *NamespaceY = cast<NamespaceDecl>(Y);
return NamespaceX->isInline() == NamespaceY->isInline();
}
-
+
+ // Identical template names and kinds match.
+ if (isa<TemplateDecl>(X))
+ return true;
+
// FIXME: Many other cases to implement.
return false;
}
@@ -1753,11 +1786,13 @@ ASTDeclReader::FindExistingResult::~FindExistingResult() {
if (!AddResult || Existing)
return;
- DeclContext *DC = New->getDeclContext()->getRedeclContext();
- if (DC->isTranslationUnit() && Reader.SemaObj) {
+ if (New->getDeclContext()->getRedeclContext()->isTranslationUnit()
+ && Reader.SemaObj) {
Reader.SemaObj->IdResolver.tryAddTopLevelDecl(New, New->getDeclName());
- } else if (DC->isNamespace()) {
- DC->addDecl(New);
+ } else {
+ DeclContext *DC = New->getLexicalDeclContext();
+ if (DC->isNamespace())
+ DC->addDecl(New);
}
}
@@ -1899,7 +1934,7 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
RecordData Record;
unsigned Code = DeclsCursor.ReadCode();
unsigned Idx = 0;
- ASTDeclReader Reader(*this, *Loc.F, DeclsCursor, ID, RawLocation, Record,Idx);
+ ASTDeclReader Reader(*this, *Loc.F, ID, RawLocation, Record,Idx);
Decl *D = 0;
switch ((DeclCode)DeclsCursor.ReadRecord(Code, Record)) {
@@ -2002,6 +2037,10 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_TEMPLATE_TEMPLATE_PARM:
D = TemplateTemplateParmDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_EXPANDED_TEMPLATE_TEMPLATE_PARM_PACK:
+ D = TemplateTemplateParmDecl::CreateDeserialized(Context, ID,
+ Record[Idx++]);
+ break;
case DECL_TYPE_ALIAS_TEMPLATE:
D = TypeAliasTemplateDecl::CreateDeserialized(Context, ID);
break;
@@ -2110,6 +2149,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
}
PendingVisibleUpdates.erase(I);
}
+
+ if (!DC->hasExternalVisibleStorage() && DC->hasExternalLexicalStorage())
+ DC->setMustBuildLookupTable();
}
assert(Idx == Record.size());
@@ -2125,9 +2167,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
// AST consumer might need to know about, queue it.
// We don't pass it to the consumer immediately because we may be in recursive
// loading, and some declarations may still be initializing.
- if (isConsumerInterestedIn(D))
+ if (isConsumerInterestedIn(D, Reader.hasPendingBody()))
InterestingDecls.push_back(D);
-
+
return D;
}
@@ -2152,7 +2194,7 @@ void ASTReader::loadDeclUpdateRecords(serialization::DeclID ID, Decl *D) {
assert(RecCode == DECL_UPDATES && "Expected DECL_UPDATES record!");
unsigned Idx = 0;
- ASTDeclReader Reader(*this, *F, Cursor, ID, 0, Record, Idx);
+ ASTDeclReader Reader(*this, *F, ID, 0, Record, Idx);
Reader.UpdateDecl(D, *F, Record);
}
}
@@ -2207,10 +2249,8 @@ namespace {
if (!D)
return;
- if (Deserialized.count(D)) {
- Deserialized.erase(D);
+ if (Deserialized.erase(D))
Chain.push_back(D);
- }
}
void searchForID(ModuleFile &M, GlobalDeclID GlobalID) {
@@ -2275,7 +2315,7 @@ void ASTReader::loadPendingDeclChain(serialization::GlobalDeclID ID) {
}
MergedDeclsMap::iterator MergedPos = combineStoredMergedDecls(CanonDecl, ID);
if (MergedPos != MergedDecls.end())
- SearchDecls.append(MergedPos->second.begin(), MergedPos->second.end());
+ SearchDecls.append(MergedPos->second.begin(), MergedPos->second.end());
// Build up the list of redeclarations.
RedeclChainVisitor Visitor(*this, SearchDecls, RedeclsDeserialized, CanonID);
@@ -2331,9 +2371,8 @@ namespace {
void add(ObjCCategoryDecl *Cat) {
// Only process each category once.
- if (!Deserialized.count(Cat))
+ if (!Deserialized.erase(Cat))
return;
- Deserialized.erase(Cat);
// Check for duplicate categories.
if (Cat->getDeclName()) {
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
index c5325b5..367f75f 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -288,7 +288,7 @@ void ASTStmtReader::VisitDeclStmt(DeclStmt *S) {
}
}
-void ASTStmtReader::VisitAsmStmt(AsmStmt *S) {
+void ASTStmtReader::VisitGCCAsmStmt(GCCAsmStmt *S) {
VisitStmt(S);
unsigned NumOutputs = Record[Idx++];
unsigned NumInputs = Record[Idx++];
@@ -297,7 +297,6 @@ void ASTStmtReader::VisitAsmStmt(AsmStmt *S) {
S->setRParenLoc(ReadSourceLocation(Record, Idx));
S->setVolatile(Record[Idx++]);
S->setSimple(Record[Idx++]);
- S->setMSAsm(Record[Idx++]);
S->setAsmString(cast_or_null<StringLiteral>(Reader.ReadSubStmt()));
@@ -566,6 +565,7 @@ void ASTStmtReader::VisitBinaryOperator(BinaryOperator *E) {
E->setRHS(Reader.ReadSubExpr());
E->setOpcode((BinaryOperator::Opcode)Record[Idx++]);
E->setOperatorLoc(ReadSourceLocation(Record, Idx));
+ E->setFPContractable((bool)Record[Idx++]);
}
void ASTStmtReader::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
@@ -627,7 +627,8 @@ void ASTStmtReader::VisitExtVectorElementExpr(ExtVectorElementExpr *E) {
void ASTStmtReader::VisitInitListExpr(InitListExpr *E) {
VisitExpr(E);
- E->setSyntacticForm(cast_or_null<InitListExpr>(Reader.ReadSubStmt()));
+ if (InitListExpr *SyntForm = cast_or_null<InitListExpr>(Reader.ReadSubStmt()))
+ E->setSyntacticForm(SyntForm);
E->setLBraceLoc(ReadSourceLocation(Record, Idx));
E->setRBraceLoc(ReadSourceLocation(Record, Idx));
bool isArrayFiller = Record[Idx++];
@@ -1087,6 +1088,7 @@ void ASTStmtReader::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
E->Operator = (OverloadedOperatorKind)Record[Idx++];
E->Range = Reader.ReadSourceRange(F, Record, Idx);
+ E->setFPContractable((bool)Record[Idx++]);
}
void ASTStmtReader::VisitCXXConstructExpr(CXXConstructExpr *E) {
@@ -1242,7 +1244,7 @@ void ASTStmtReader::VisitCXXNewExpr(CXXNewExpr *E) {
E->setOperatorDelete(ReadDeclAs<FunctionDecl>(Record, Idx));
E->AllocatedTypeInfo = GetTypeSourceInfo(Record, Idx);
E->TypeIdParens = ReadSourceRange(Record, Idx);
- E->StartLoc = ReadSourceLocation(Record, Idx);
+ E->Range = ReadSourceRange(Record, Idx);
E->DirectInitRange = ReadSourceRange(Record, Idx);
E->AllocateArgsArray(Reader.getContext(), isArray, NumPlacementArgs,
@@ -1367,8 +1369,6 @@ void ASTStmtReader::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
void ASTStmtReader::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
VisitOverloadExpr(E);
E->RequiresADL = Record[Idx++];
- if (E->RequiresADL)
- E->StdIsAssociatedNamespace = Record[Idx++];
E->Overloaded = Record[Idx++];
E->NamingClass = ReadDeclAs<CXXRecordDecl>(Record, Idx);
}
@@ -1469,6 +1469,16 @@ void ASTStmtReader::VisitSubstNonTypeTemplateParmPackExpr(
E->NameLoc = ReadSourceLocation(Record, Idx);
}
+void ASTStmtReader::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) {
+ VisitExpr(E);
+ E->NumParameters = Record[Idx++];
+ E->ParamPack = ReadDeclAs<ParmVarDecl>(Record, Idx);
+ E->NameLoc = ReadSourceLocation(Record, Idx);
+ ParmVarDecl **Parms = reinterpret_cast<ParmVarDecl**>(E+1);
+ for (unsigned i = 0, n = E->NumParameters; i != n; ++i)
+ Parms[i] = ReadDeclAs<ParmVarDecl>(Record, Idx);
+}
+
void ASTStmtReader::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
VisitExpr(E);
E->Temporary = Reader.ReadSubExpr();
@@ -1701,8 +1711,12 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = new (Context) DeclStmt(Empty);
break;
- case STMT_ASM:
- S = new (Context) AsmStmt(Empty);
+ case STMT_GCCASM:
+ S = new (Context) GCCAsmStmt(Empty);
+ break;
+
+ case STMT_MSASM:
+ S = new (Context) MSAsmStmt(Empty);
break;
case EXPR_PREDEFINED:
@@ -2180,6 +2194,11 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK:
S = new (Context) SubstNonTypeTemplateParmPackExpr(Empty);
break;
+
+ case EXPR_FUNCTION_PARM_PACK:
+ S = FunctionParmPackExpr::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields]);
+ break;
case EXPR_MATERIALIZE_TEMPORARY:
S = new (Context) MaterializeTemporaryExpr(Empty);
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
index 425d2e3..a2e8b71 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
@@ -25,9 +25,11 @@
#include "clang/AST/Type.h"
#include "clang/AST/TypeLocVisitor.h"
#include "clang/Serialization/ASTReader.h"
+#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/FileSystemStatCache.h"
@@ -35,6 +37,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/SourceManagerInternals.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/Version.h"
#include "clang/Basic/VersionTuple.h"
#include "llvm/ADT/APFloat.h"
@@ -485,6 +488,8 @@ void TypeLocWriter::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
}
void TypeLocWriter::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
Writer.AddSourceLocation(TL.getLocalRangeBegin(), Record);
+ Writer.AddSourceLocation(TL.getLParenLoc(), Record);
+ Writer.AddSourceLocation(TL.getRParenLoc(), Record);
Writer.AddSourceLocation(TL.getLocalRangeEnd(), Record);
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
Writer.AddDeclRef(TL.getArg(i), Record);
@@ -667,7 +672,8 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(STMT_BREAK);
RECORD(STMT_RETURN);
RECORD(STMT_DECL);
- RECORD(STMT_ASM);
+ RECORD(STMT_GCCASM);
+ RECORD(STMT_MSASM);
RECORD(EXPR_PREDEFINED);
RECORD(EXPR_DECL_REF);
RECORD(EXPR_INTEGER_LITERAL);
@@ -763,14 +769,27 @@ void ASTWriter::WriteBlockInfoBlock() {
#define BLOCK(X) EmitBlockID(X ## _ID, #X, Stream, Record)
#define RECORD(X) EmitRecordID(X, #X, Stream, Record)
+ // Control Block.
+ BLOCK(CONTROL_BLOCK);
+ RECORD(METADATA);
+ RECORD(IMPORTS);
+ RECORD(LANGUAGE_OPTIONS);
+ RECORD(TARGET_OPTIONS);
+ RECORD(ORIGINAL_FILE);
+ RECORD(ORIGINAL_PCH_DIR);
+ RECORD(INPUT_FILE_OFFSETS);
+ RECORD(DIAGNOSTIC_OPTIONS);
+ RECORD(FILE_SYSTEM_OPTIONS);
+ RECORD(HEADER_SEARCH_OPTIONS);
+ RECORD(PREPROCESSOR_OPTIONS);
+
+ BLOCK(INPUT_FILES_BLOCK);
+ RECORD(INPUT_FILE);
+
// AST Top-Level Block.
BLOCK(AST_BLOCK);
- RECORD(ORIGINAL_FILE_NAME);
- RECORD(ORIGINAL_FILE_ID);
RECORD(TYPE_OFFSET);
RECORD(DECL_OFFSET);
- RECORD(LANGUAGE_OPTIONS);
- RECORD(METADATA);
RECORD(IDENTIFIER_OFFSET);
RECORD(IDENTIFIER_TABLE);
RECORD(EXTERNAL_DEFINITIONS);
@@ -784,11 +803,8 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(PP_COUNTER_VALUE);
RECORD(SOURCE_LOCATION_OFFSETS);
RECORD(SOURCE_LOCATION_PRELOADS);
- RECORD(STAT_CACHE);
RECORD(EXT_VECTOR_DECLS);
- RECORD(VERSION_CONTROL_BRANCH_REVISION);
RECORD(PPD_ENTITIES_OFFSETS);
- RECORD(IMPORTS);
RECORD(REFERENCED_SELECTOR_POOL);
RECORD(TU_UPDATE_LEXICAL);
RECORD(LOCAL_REDECLARATIONS_MAP);
@@ -803,11 +819,9 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(DIAG_PRAGMA_MAPPINGS);
RECORD(CUDA_SPECIAL_DECL_REFS);
RECORD(HEADER_SEARCH_TABLE);
- RECORD(ORIGINAL_PCH_DIR);
RECORD(FP_PRAGMA_OPTIONS);
RECORD(OPENCL_EXTENSIONS);
RECORD(DELEGATING_CTORS);
- RECORD(FILE_SOURCE_LOCATION_OFFSETS);
RECORD(KNOWN_NAMESPACES);
RECORD(MODULE_OFFSET_MAP);
RECORD(SOURCE_MANAGER_LINE_TABLE);
@@ -817,6 +831,8 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(MERGED_DECLARATIONS);
RECORD(LOCAL_REDECLARATIONS);
RECORD(OBJC_CATEGORIES);
+ RECORD(MACRO_OFFSET);
+ RECORD(MACRO_UPDATES);
// SourceManager Block.
BLOCK(SOURCE_MANAGER_BLOCK);
@@ -972,25 +988,25 @@ adjustFilenameForRelocatablePCH(const char *Filename, StringRef isysroot) {
return Filename + Pos;
}
-/// \brief Write the AST metadata (e.g., i686-apple-darwin9).
-void ASTWriter::WriteMetadata(ASTContext &Context, StringRef isysroot,
- const std::string &OutputFile) {
+/// \brief Write the control block.
+void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
+ StringRef isysroot,
+ const std::string &OutputFile) {
using namespace llvm;
-
- // Metadata
- const TargetInfo &Target = Context.getTargetInfo();
- BitCodeAbbrev *MetaAbbrev = new BitCodeAbbrev();
- MetaAbbrev->Add(BitCodeAbbrevOp(METADATA));
- MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // AST major
- MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // AST minor
- MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang major
- MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang minor
- MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Relocatable
- MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Has errors
- MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Target triple
- unsigned MetaAbbrevCode = Stream.EmitAbbrev(MetaAbbrev);
-
+ Stream.EnterSubblock(CONTROL_BLOCK_ID, 5);
RecordData Record;
+
+ // Metadata
+ BitCodeAbbrev *MetadataAbbrev = new BitCodeAbbrev();
+ MetadataAbbrev->Add(BitCodeAbbrevOp(METADATA));
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Major
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Minor
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang maj.
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang min.
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Relocatable
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Errors
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // SVN branch/tag
+ unsigned MetadataAbbrevCode = Stream.EmitAbbrev(MetadataAbbrev);
Record.push_back(METADATA);
Record.push_back(VERSION_MAJOR);
Record.push_back(VERSION_MINOR);
@@ -998,9 +1014,10 @@ void ASTWriter::WriteMetadata(ASTContext &Context, StringRef isysroot,
Record.push_back(CLANG_VERSION_MINOR);
Record.push_back(!isysroot.empty());
Record.push_back(ASTHasCompilerErrors);
- const std::string &Triple = Target.getTriple().getTriple();
- Stream.EmitRecordWithBlob(MetaAbbrevCode, Record, Triple);
+ Stream.EmitRecordWithBlob(MetadataAbbrevCode, Record,
+ getClangFullRepositoryVersion());
+ // Imports
if (Chain) {
serialization::ModuleManager &Mgr = Chain->getModuleManager();
llvm::SmallVector<char, 128> ModulePaths;
@@ -1022,11 +1039,131 @@ void ASTWriter::WriteMetadata(ASTContext &Context, StringRef isysroot,
Stream.EmitRecord(IMPORTS, Record);
}
+ // Language options.
+ Record.clear();
+ const LangOptions &LangOpts = Context.getLangOpts();
+#define LANGOPT(Name, Bits, Default, Description) \
+ Record.push_back(LangOpts.Name);
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ Record.push_back(static_cast<unsigned>(LangOpts.get##Name()));
+#include "clang/Basic/LangOptions.def"
+
+ Record.push_back((unsigned) LangOpts.ObjCRuntime.getKind());
+ AddVersionTuple(LangOpts.ObjCRuntime.getVersion(), Record);
+
+ Record.push_back(LangOpts.CurrentModule.size());
+ Record.append(LangOpts.CurrentModule.begin(), LangOpts.CurrentModule.end());
+ Stream.EmitRecord(LANGUAGE_OPTIONS, Record);
+
+ // Target options.
+ Record.clear();
+ const TargetInfo &Target = Context.getTargetInfo();
+ const TargetOptions &TargetOpts = Target.getTargetOpts();
+ AddString(TargetOpts.Triple, Record);
+ AddString(TargetOpts.CPU, Record);
+ AddString(TargetOpts.ABI, Record);
+ AddString(TargetOpts.CXXABI, Record);
+ AddString(TargetOpts.LinkerVersion, Record);
+ Record.push_back(TargetOpts.FeaturesAsWritten.size());
+ for (unsigned I = 0, N = TargetOpts.FeaturesAsWritten.size(); I != N; ++I) {
+ AddString(TargetOpts.FeaturesAsWritten[I], Record);
+ }
+ Record.push_back(TargetOpts.Features.size());
+ for (unsigned I = 0, N = TargetOpts.Features.size(); I != N; ++I) {
+ AddString(TargetOpts.Features[I], Record);
+ }
+ Stream.EmitRecord(TARGET_OPTIONS, Record);
+
+ // Diagnostic options.
+ Record.clear();
+ const DiagnosticOptions &DiagOpts
+ = Context.getDiagnostics().getDiagnosticOptions();
+#define DIAGOPT(Name, Bits, Default) Record.push_back(DiagOpts.Name);
+#define ENUM_DIAGOPT(Name, Type, Bits, Default) \
+ Record.push_back(static_cast<unsigned>(DiagOpts.get##Name()));
+#include "clang/Basic/DiagnosticOptions.def"
+ Record.push_back(DiagOpts.Warnings.size());
+ for (unsigned I = 0, N = DiagOpts.Warnings.size(); I != N; ++I)
+ AddString(DiagOpts.Warnings[I], Record);
+ // Note: we don't serialize the log or serialization file names, because they
+ // are generally transient files and will almost always be overridden.
+ Stream.EmitRecord(DIAGNOSTIC_OPTIONS, Record);
+
+ // File system options.
+ Record.clear();
+ const FileSystemOptions &FSOpts
+ = Context.getSourceManager().getFileManager().getFileSystemOptions();
+ AddString(FSOpts.WorkingDir, Record);
+ Stream.EmitRecord(FILE_SYSTEM_OPTIONS, Record);
+
+ // Header search options.
+ Record.clear();
+ const HeaderSearchOptions &HSOpts
+ = PP.getHeaderSearchInfo().getHeaderSearchOpts();
+ AddString(HSOpts.Sysroot, Record);
+
+ // Include entries.
+ Record.push_back(HSOpts.UserEntries.size());
+ for (unsigned I = 0, N = HSOpts.UserEntries.size(); I != N; ++I) {
+ const HeaderSearchOptions::Entry &Entry = HSOpts.UserEntries[I];
+ AddString(Entry.Path, Record);
+ Record.push_back(static_cast<unsigned>(Entry.Group));
+ Record.push_back(Entry.IsUserSupplied);
+ Record.push_back(Entry.IsFramework);
+ Record.push_back(Entry.IgnoreSysRoot);
+ Record.push_back(Entry.IsInternal);
+ Record.push_back(Entry.ImplicitExternC);
+ }
+
+ // System header prefixes.
+ Record.push_back(HSOpts.SystemHeaderPrefixes.size());
+ for (unsigned I = 0, N = HSOpts.SystemHeaderPrefixes.size(); I != N; ++I) {
+ AddString(HSOpts.SystemHeaderPrefixes[I].Prefix, Record);
+ Record.push_back(HSOpts.SystemHeaderPrefixes[I].IsSystemHeader);
+ }
+
+ AddString(HSOpts.ResourceDir, Record);
+ AddString(HSOpts.ModuleCachePath, Record);
+ Record.push_back(HSOpts.DisableModuleHash);
+ Record.push_back(HSOpts.UseBuiltinIncludes);
+ Record.push_back(HSOpts.UseStandardSystemIncludes);
+ Record.push_back(HSOpts.UseStandardCXXIncludes);
+ Record.push_back(HSOpts.UseLibcxx);
+ Stream.EmitRecord(HEADER_SEARCH_OPTIONS, Record);
+
+ // Preprocessor options.
+ Record.clear();
+ const PreprocessorOptions &PPOpts = PP.getPreprocessorOpts();
+
+ // Macro definitions.
+ Record.push_back(PPOpts.Macros.size());
+ for (unsigned I = 0, N = PPOpts.Macros.size(); I != N; ++I) {
+ AddString(PPOpts.Macros[I].first, Record);
+ Record.push_back(PPOpts.Macros[I].second);
+ }
+
+ // Includes
+ Record.push_back(PPOpts.Includes.size());
+ for (unsigned I = 0, N = PPOpts.Includes.size(); I != N; ++I)
+ AddString(PPOpts.Includes[I], Record);
+
+ // Macro includes
+ Record.push_back(PPOpts.MacroIncludes.size());
+ for (unsigned I = 0, N = PPOpts.MacroIncludes.size(); I != N; ++I)
+ AddString(PPOpts.MacroIncludes[I], Record);
+
+ Record.push_back(PPOpts.UsePredefines);
+ AddString(PPOpts.ImplicitPCHInclude, Record);
+ AddString(PPOpts.ImplicitPTHInclude, Record);
+ Record.push_back(static_cast<unsigned>(PPOpts.ObjCXXARCStandardLibrary));
+ Stream.EmitRecord(PREPROCESSOR_OPTIONS, Record);
+
// Original file name and file ID
SourceManager &SM = Context.getSourceManager();
if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
BitCodeAbbrev *FileAbbrev = new BitCodeAbbrev();
- FileAbbrev->Add(BitCodeAbbrevOp(ORIGINAL_FILE_NAME));
+ FileAbbrev->Add(BitCodeAbbrevOp(ORIGINAL_FILE));
+ FileAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // File ID
FileAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
unsigned FileAbbrevCode = Stream.EmitAbbrev(FileAbbrev);
@@ -1037,13 +1174,10 @@ void ASTWriter::WriteMetadata(ASTContext &Context, StringRef isysroot,
const char *MainFileNameStr = MainFilePath.c_str();
MainFileNameStr = adjustFilenameForRelocatablePCH(MainFileNameStr,
isysroot);
- RecordData Record;
- Record.push_back(ORIGINAL_FILE_NAME);
- Stream.EmitRecordWithBlob(FileAbbrevCode, Record, MainFileNameStr);
-
Record.clear();
+ Record.push_back(ORIGINAL_FILE);
Record.push_back(SM.getMainFileID().getOpaqueValue());
- Stream.EmitRecord(ORIGINAL_FILE_ID, Record);
+ Stream.EmitRecordWithBlob(FileAbbrevCode, Record, MainFileNameStr);
}
// Original PCH directory
@@ -1063,32 +1197,86 @@ void ASTWriter::WriteMetadata(ASTContext &Context, StringRef isysroot,
Stream.EmitRecordWithBlob(AbbrevCode, Record, origDir);
}
- // Repository branch/version information.
- BitCodeAbbrev *RepoAbbrev = new BitCodeAbbrev();
- RepoAbbrev->Add(BitCodeAbbrevOp(VERSION_CONTROL_BRANCH_REVISION));
- RepoAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // SVN branch/tag
- unsigned RepoAbbrevCode = Stream.EmitAbbrev(RepoAbbrev);
- Record.clear();
- Record.push_back(VERSION_CONTROL_BRANCH_REVISION);
- Stream.EmitRecordWithBlob(RepoAbbrevCode, Record,
- getClangFullRepositoryVersion());
+ WriteInputFiles(Context.SourceMgr, isysroot);
+ Stream.ExitBlock();
}
-/// \brief Write the LangOptions structure.
-void ASTWriter::WriteLanguageOptions(const LangOptions &LangOpts) {
+void ASTWriter::WriteInputFiles(SourceManager &SourceMgr, StringRef isysroot) {
+ using namespace llvm;
+ Stream.EnterSubblock(INPUT_FILES_BLOCK_ID, 4);
RecordData Record;
-#define LANGOPT(Name, Bits, Default, Description) \
- Record.push_back(LangOpts.Name);
-#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
- Record.push_back(static_cast<unsigned>(LangOpts.get##Name()));
-#include "clang/Basic/LangOptions.def"
+
+ // Create input-file abbreviation.
+ BitCodeAbbrev *IFAbbrev = new BitCodeAbbrev();
+ IFAbbrev->Add(BitCodeAbbrevOp(INPUT_FILE));
+ IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ID
+ IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 12)); // Size
+ IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 32)); // Modification time
+ IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Overridden
+ IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
+ unsigned IFAbbrevCode = Stream.EmitAbbrev(IFAbbrev);
+
+ // Write out all of the input files.
+ std::vector<uint32_t> InputFileOffsets;
+ for (unsigned I = 1, N = SourceMgr.local_sloc_entry_size(); I != N; ++I) {
+ // Get this source location entry.
+ const SrcMgr::SLocEntry *SLoc = &SourceMgr.getLocalSLocEntry(I);
+ assert(&SourceMgr.getSLocEntry(FileID::get(I)) == SLoc);
- Record.push_back((unsigned) LangOpts.ObjCRuntime.getKind());
- AddVersionTuple(LangOpts.ObjCRuntime.getVersion(), Record);
+ // We only care about file entries that were not overridden.
+ if (!SLoc->isFile())
+ continue;
+ const SrcMgr::ContentCache *Cache = SLoc->getFile().getContentCache();
+ if (!Cache->OrigEntry)
+ continue;
+
+ // Record this entry's offset.
+ InputFileOffsets.push_back(Stream.GetCurrentBitNo());
+ InputFileIDs[Cache->OrigEntry] = InputFileOffsets.size();
+
+ Record.clear();
+ Record.push_back(INPUT_FILE);
+ Record.push_back(InputFileOffsets.size());
+
+ // Emit size/modification time for this file.
+ Record.push_back(Cache->OrigEntry->getSize());
+ Record.push_back(Cache->OrigEntry->getModificationTime());
+
+ // Whether this file was overridden.
+ Record.push_back(Cache->BufferOverridden);
+
+ // Turn the file name into an absolute path, if it isn't already.
+ const char *Filename = Cache->OrigEntry->getName();
+ SmallString<128> FilePath(Filename);
+
+ // Ask the file manager to fixup the relative path for us. This will
+ // honor the working directory.
+ SourceMgr.getFileManager().FixupRelativePath(FilePath);
+
+ // FIXME: This call to make_absolute shouldn't be necessary, the
+ // call to FixupRelativePath should always return an absolute path.
+ llvm::sys::fs::make_absolute(FilePath);
+ Filename = FilePath.c_str();
+
+ Filename = adjustFilenameForRelocatablePCH(Filename, isysroot);
+
+ Stream.EmitRecordWithBlob(IFAbbrevCode, Record, Filename);
+ }
- Record.push_back(LangOpts.CurrentModule.size());
- Record.append(LangOpts.CurrentModule.begin(), LangOpts.CurrentModule.end());
- Stream.EmitRecord(LANGUAGE_OPTIONS, Record);
+ Stream.ExitBlock();
+
+ // Create input file offsets abbreviation.
+ BitCodeAbbrev *OffsetsAbbrev = new BitCodeAbbrev();
+ OffsetsAbbrev->Add(BitCodeAbbrevOp(INPUT_FILE_OFFSETS));
+ OffsetsAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # input files
+ OffsetsAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Array
+ unsigned OffsetsAbbrevCode = Stream.EmitAbbrev(OffsetsAbbrev);
+
+ // Write input file offsets.
+ Record.clear();
+ Record.push_back(INPUT_FILE_OFFSETS);
+ Record.push_back(InputFileOffsets.size());
+ Stream.EmitRecordWithBlob(OffsetsAbbrevCode, Record, data(InputFileOffsets));
}
//===----------------------------------------------------------------------===//
@@ -1139,46 +1327,6 @@ public:
};
} // end anonymous namespace
-/// \brief Write the stat() system call cache to the AST file.
-void ASTWriter::WriteStatCache(MemorizeStatCalls &StatCalls) {
- // Build the on-disk hash table containing information about every
- // stat() call.
- OnDiskChainedHashTableGenerator<ASTStatCacheTrait> Generator;
- unsigned NumStatEntries = 0;
- for (MemorizeStatCalls::iterator Stat = StatCalls.begin(),
- StatEnd = StatCalls.end();
- Stat != StatEnd; ++Stat, ++NumStatEntries) {
- StringRef Filename = Stat->first();
- Generator.insert(Filename.data(), Stat->second);
- }
-
- // Create the on-disk hash table in a buffer.
- SmallString<4096> StatCacheData;
- uint32_t BucketOffset;
- {
- llvm::raw_svector_ostream Out(StatCacheData);
- // Make sure that no bucket is at offset 0
- clang::io::Emit32(Out, 0);
- BucketOffset = Generator.Emit(Out);
- }
-
- // Create a blob abbreviation
- using namespace llvm;
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
- Abbrev->Add(BitCodeAbbrevOp(STAT_CACHE));
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
- unsigned StatCacheAbbrev = Stream.EmitAbbrev(Abbrev);
-
- // Write the stat cache
- RecordData Record;
- Record.push_back(STAT_CACHE);
- Record.push_back(BucketOffset);
- Record.push_back(NumStatEntries);
- Stream.EmitRecordWithBlob(StatCacheAbbrev, Record, StatCacheData.str());
-}
-
//===----------------------------------------------------------------------===//
// Source Manager Serialization
//===----------------------------------------------------------------------===//
@@ -1194,13 +1342,10 @@ static unsigned CreateSLocFileAbbrev(llvm::BitstreamWriter &Stream) {
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // Characteristic
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Line directives
// FileEntry fields.
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 12)); // Size
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 32)); // Modification time
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // BufferOverridden
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Input File ID
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // NumCreatedFIDs
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 24)); // FirstDeclIndex
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // NumDecls
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
return Stream.EmitAbbrev(Abbrev);
}
@@ -1329,8 +1474,6 @@ namespace {
/// \brief Write the header search block for the list of files that
///
/// \param HS The header search structure to save.
-///
-/// \param Chain Whether we're creating a chained AST file.
void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS, StringRef isysroot) {
SmallVector<const FileEntry *, 16> FilesByUID;
HS.getFileMgr().GetUniqueIDMapping(FilesByUID);
@@ -1427,15 +1570,14 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// Write out the source location entry table. We skip the first
// entry, which is always the same dummy entry.
std::vector<uint32_t> SLocEntryOffsets;
- // Write out the offsets of only source location file entries.
- // We will go through them in ASTReader::validateFileEntries().
- std::vector<uint32_t> SLocFileEntryOffsets;
RecordData PreloadSLocs;
SLocEntryOffsets.reserve(SourceMgr.local_sloc_entry_size() - 1);
for (unsigned I = 1, N = SourceMgr.local_sloc_entry_size();
I != N; ++I) {
// Get this source location entry.
const SrcMgr::SLocEntry *SLoc = &SourceMgr.getLocalSLocEntry(I);
+ FileID FID = FileID::get(I);
+ assert(&SourceMgr.getSLocEntry(FID) == SLoc);
// Record the offset of this source-location entry.
SLocEntryOffsets.push_back(Stream.GetCurrentBitNo());
@@ -1446,7 +1588,6 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
const SrcMgr::ContentCache *Cache = SLoc->getFile().getContentCache();
if (Cache->OrigEntry) {
Code = SM_SLOC_FILE_ENTRY;
- SLocFileEntryOffsets.push_back(Stream.GetCurrentBitNo());
} else
Code = SM_SLOC_BUFFER_ENTRY;
} else
@@ -1467,16 +1608,13 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
assert(Content->OrigEntry == Content->ContentsEntry &&
"Writing to AST an overridden file is not supported");
- // The source location entry is a file. The blob associated
- // with this entry is the file name.
+ // The source location entry is a file. Emit input file ID.
+ assert(InputFileIDs[Content->OrigEntry] != 0 && "Missed file entry");
+ Record.push_back(InputFileIDs[Content->OrigEntry]);
- // Emit size/modification time for this file.
- Record.push_back(Content->OrigEntry->getSize());
- Record.push_back(Content->OrigEntry->getModificationTime());
- Record.push_back(Content->BufferOverridden);
Record.push_back(File.NumCreatedFIDs);
- FileDeclIDsTy::iterator FDI = FileDeclIDs.find(SLoc);
+ FileDeclIDsTy::iterator FDI = FileDeclIDs.find(FID);
if (FDI != FileDeclIDs.end()) {
Record.push_back(FDI->second->FirstDeclIndex);
Record.push_back(FDI->second->DeclIDs.size());
@@ -1485,21 +1623,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
Record.push_back(0);
}
- // Turn the file name into an absolute path, if it isn't already.
- const char *Filename = Content->OrigEntry->getName();
- SmallString<128> FilePath(Filename);
-
- // Ask the file manager to fixup the relative path for us. This will
- // honor the working directory.
- SourceMgr.getFileManager().FixupRelativePath(FilePath);
-
- // FIXME: This call to make_absolute shouldn't be necessary, the
- // call to FixupRelativePath should always return an absolute path.
- llvm::sys::fs::make_absolute(FilePath);
- Filename = FilePath.c_str();
-
- Filename = adjustFilenameForRelocatablePCH(Filename, isysroot);
- Stream.EmitRecordWithBlob(SLocFileAbbrv, Record, Filename);
+ Stream.EmitRecordWithAbbrev(SLocFileAbbrv, Record);
if (Content->BufferOverridden) {
Record.clear();
@@ -1570,18 +1694,6 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
Record.push_back(SourceMgr.getNextLocalOffset() - 1); // skip dummy
Stream.EmitRecordWithBlob(SLocOffsetsAbbrev, Record, data(SLocEntryOffsets));
- Abbrev = new BitCodeAbbrev();
- Abbrev->Add(BitCodeAbbrevOp(FILE_SOURCE_LOCATION_OFFSETS));
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // # of slocs
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // offsets
- unsigned SLocFileOffsetsAbbrev = Stream.EmitAbbrev(Abbrev);
-
- Record.clear();
- Record.push_back(FILE_SOURCE_LOCATION_OFFSETS);
- Record.push_back(SLocFileEntryOffsets.size());
- Stream.EmitRecordWithBlob(SLocFileOffsetsAbbrev, Record,
- data(SLocFileEntryOffsets));
-
// Write the source location entry preloads array, telling the AST
// reader which source locations entries it should load eagerly.
Stream.EmitRecord(SOURCE_LOCATION_PRELOADS, PreloadSLocs);
@@ -1675,102 +1787,129 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
SmallVector<std::pair<const IdentifierInfo *, MacroInfo *>, 2>
MacrosToEmit;
llvm::SmallPtrSet<const IdentifierInfo*, 4> MacroDefinitionsSeen;
- for (Preprocessor::macro_iterator I = PP.macro_begin(Chain == 0),
+ for (Preprocessor::macro_iterator I = PP.macro_begin(Chain == 0),
E = PP.macro_end(Chain == 0);
I != E; ++I) {
- const IdentifierInfo *Name = I->first;
if (!IsModule || I->second->isPublic()) {
- MacroDefinitionsSeen.insert(Name);
+ MacroDefinitionsSeen.insert(I->first);
MacrosToEmit.push_back(std::make_pair(I->first, I->second));
}
}
-
+
// Sort the set of macro definitions that need to be serialized by the
// name of the macro, to provide a stable ordering.
- llvm::array_pod_sort(MacrosToEmit.begin(), MacrosToEmit.end(),
+ llvm::array_pod_sort(MacrosToEmit.begin(), MacrosToEmit.end(),
&compareMacroDefinitions);
-
- // Resolve any identifiers that defined macros at the time they were
- // deserialized, adding them to the list of macros to emit (if appropriate).
- for (unsigned I = 0, N = DeserializedMacroNames.size(); I != N; ++I) {
- IdentifierInfo *Name
- = const_cast<IdentifierInfo *>(DeserializedMacroNames[I]);
- if (Name->hasMacroDefinition() && MacroDefinitionsSeen.insert(Name))
- MacrosToEmit.push_back(std::make_pair(Name, PP.getMacroInfo(Name)));
- }
-
+
+ /// \brief Offsets of each of the macros into the bitstream, indexed by
+ /// the local macro ID
+ ///
+ /// For each identifier that is associated with a macro, this map
+ /// provides the offset into the bitstream where that macro is
+ /// defined.
+ std::vector<uint32_t> MacroOffsets;
+
for (unsigned I = 0, N = MacrosToEmit.size(); I != N; ++I) {
const IdentifierInfo *Name = MacrosToEmit[I].first;
- MacroInfo *MI = MacrosToEmit[I].second;
- if (!MI)
- continue;
-
- // Don't emit builtin macros like __LINE__ to the AST file unless they have
- // been redefined by the header (in which case they are not isBuiltinMacro).
- // Also skip macros from a AST file if we're chaining.
-
- // FIXME: There is a (probably minor) optimization we could do here, if
- // the macro comes from the original PCH but the identifier comes from a
- // chained PCH, by storing the offset into the original PCH rather than
- // writing the macro definition a second time.
- if (MI->isBuiltinMacro() ||
- (Chain &&
- Name->isFromAST() && !Name->hasChangedSinceDeserialization() &&
- MI->isFromAST() && !MI->hasChangedAfterLoad()))
- continue;
- AddIdentifierRef(Name, Record);
- MacroOffsets[Name] = Stream.GetCurrentBitNo();
- Record.push_back(MI->getDefinitionLoc().getRawEncoding());
- Record.push_back(MI->isUsed());
- Record.push_back(MI->isPublic());
- AddSourceLocation(MI->getVisibilityLocation(), Record);
- unsigned Code;
- if (MI->isObjectLike()) {
- Code = PP_MACRO_OBJECT_LIKE;
- } else {
- Code = PP_MACRO_FUNCTION_LIKE;
+ for (MacroInfo *MI = MacrosToEmit[I].second; MI;
+ MI = MI->getPreviousDefinition()) {
+ MacroID ID = getMacroRef(MI);
+ if (!ID)
+ continue;
- Record.push_back(MI->isC99Varargs());
- Record.push_back(MI->isGNUVarargs());
- Record.push_back(MI->getNumArgs());
- for (MacroInfo::arg_iterator I = MI->arg_begin(), E = MI->arg_end();
- I != E; ++I)
- AddIdentifierRef(*I, Record);
- }
+ // Skip macros from a AST file if we're chaining.
+ if (Chain && MI->isFromAST() && !MI->hasChangedAfterLoad())
+ continue;
- // If we have a detailed preprocessing record, record the macro definition
- // ID that corresponds to this macro.
- if (PPRec)
- Record.push_back(MacroDefinitions[PPRec->findMacroDefinition(MI)]);
+ if (ID < FirstMacroID) {
+ // This will have been dealt with via an update record.
+ assert(MacroUpdates.count(MI) > 0 && "Missing macro update");
+ continue;
+ }
- Stream.EmitRecord(Code, Record);
- Record.clear();
+ // Record the local offset of this macro.
+ unsigned Index = ID - FirstMacroID;
+ if (Index == MacroOffsets.size())
+ MacroOffsets.push_back(Stream.GetCurrentBitNo());
+ else {
+ if (Index > MacroOffsets.size())
+ MacroOffsets.resize(Index + 1);
- // Emit the tokens array.
- for (unsigned TokNo = 0, e = MI->getNumTokens(); TokNo != e; ++TokNo) {
- // Note that we know that the preprocessor does not have any annotation
- // tokens in it because they are created by the parser, and thus can't be
- // in a macro definition.
- const Token &Tok = MI->getReplacementToken(TokNo);
-
- Record.push_back(Tok.getLocation().getRawEncoding());
- Record.push_back(Tok.getLength());
-
- // FIXME: When reading literal tokens, reconstruct the literal pointer if
- // it is needed.
- AddIdentifierRef(Tok.getIdentifierInfo(), Record);
- // FIXME: Should translate token kind to a stable encoding.
- Record.push_back(Tok.getKind());
- // FIXME: Should translate token flags to a stable encoding.
- Record.push_back(Tok.getFlags());
-
- Stream.EmitRecord(PP_TOKEN, Record);
+ MacroOffsets[Index] = Stream.GetCurrentBitNo();
+ }
+
+ AddIdentifierRef(Name, Record);
+ addMacroRef(MI, Record);
+ Record.push_back(inferSubmoduleIDFromLocation(MI->getDefinitionLoc()));
+ AddSourceLocation(MI->getDefinitionLoc(), Record);
+ AddSourceLocation(MI->getUndefLoc(), Record);
+ Record.push_back(MI->isUsed());
+ Record.push_back(MI->isPublic());
+ AddSourceLocation(MI->getVisibilityLocation(), Record);
+ unsigned Code;
+ if (MI->isObjectLike()) {
+ Code = PP_MACRO_OBJECT_LIKE;
+ } else {
+ Code = PP_MACRO_FUNCTION_LIKE;
+
+ Record.push_back(MI->isC99Varargs());
+ Record.push_back(MI->isGNUVarargs());
+ Record.push_back(MI->getNumArgs());
+ for (MacroInfo::arg_iterator I = MI->arg_begin(), E = MI->arg_end();
+ I != E; ++I)
+ AddIdentifierRef(*I, Record);
+ }
+
+ // If we have a detailed preprocessing record, record the macro definition
+ // ID that corresponds to this macro.
+ if (PPRec)
+ Record.push_back(MacroDefinitions[PPRec->findMacroDefinition(MI)]);
+
+ Stream.EmitRecord(Code, Record);
Record.clear();
+
+ // Emit the tokens array.
+ for (unsigned TokNo = 0, e = MI->getNumTokens(); TokNo != e; ++TokNo) {
+ // Note that we know that the preprocessor does not have any annotation
+ // tokens in it because they are created by the parser, and thus can't
+ // be in a macro definition.
+ const Token &Tok = MI->getReplacementToken(TokNo);
+
+ Record.push_back(Tok.getLocation().getRawEncoding());
+ Record.push_back(Tok.getLength());
+
+ // FIXME: When reading literal tokens, reconstruct the literal pointer
+ // if it is needed.
+ AddIdentifierRef(Tok.getIdentifierInfo(), Record);
+ // FIXME: Should translate token kind to a stable encoding.
+ Record.push_back(Tok.getKind());
+ // FIXME: Should translate token flags to a stable encoding.
+ Record.push_back(Tok.getFlags());
+
+ Stream.EmitRecord(PP_TOKEN, Record);
+ Record.clear();
+ }
+ ++NumMacros;
}
- ++NumMacros;
}
Stream.ExitBlock();
+
+ // Write the offsets table for macro IDs.
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(MACRO_OFFSET));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of macros
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // first ID
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+
+ unsigned MacroOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+ Record.clear();
+ Record.push_back(MACRO_OFFSET);
+ Record.push_back(MacroOffsets.size());
+ Record.push_back(FirstMacroID - NUM_PREDEF_MACRO_IDS);
+ Stream.EmitRecordWithBlob(MacroOffsetAbbrev, Record,
+ data(MacroOffsets));
}
void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
@@ -1794,6 +1933,7 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // filename length
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // in quotes
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // kind
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // imported module
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
InclusionAbbrev = Stream.EmitAbbrev(Abbrev);
}
@@ -1836,6 +1976,7 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
Record.push_back(ID->getFileName().size());
Record.push_back(ID->wasInQuotes());
Record.push_back(static_cast<unsigned>(ID->getKind()));
+ Record.push_back(ID->importedModule());
SmallString<64> Buffer;
Buffer += ID->getFileName();
// Check that the FileEntry is not null because it was not resolved and
@@ -1935,6 +2076,11 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
unsigned HeaderAbbrev = Stream.EmitAbbrev(Abbrev);
Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_TOPHEADER));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned TopHeaderAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_UMBRELLA_DIR));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
unsigned UmbrellaDirAbbrev = Stream.EmitAbbrev(Abbrev);
@@ -1944,6 +2090,11 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Feature
unsigned RequiresAbbrev = Stream.EmitAbbrev(Abbrev);
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_EXCLUDED_HEADER));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned ExcludedHeaderAbbrev = Stream.EmitAbbrev(Abbrev);
+
// Write the submodule metadata block.
RecordData Record;
Record.push_back(getNumberOfModules(WritingModule));
@@ -2005,6 +2156,19 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Stream.EmitRecordWithBlob(HeaderAbbrev, Record,
Mod->Headers[I]->getName());
}
+ // Emit the excluded headers.
+ for (unsigned I = 0, N = Mod->ExcludedHeaders.size(); I != N; ++I) {
+ Record.clear();
+ Record.push_back(SUBMODULE_EXCLUDED_HEADER);
+ Stream.EmitRecordWithBlob(ExcludedHeaderAbbrev, Record,
+ Mod->ExcludedHeaders[I]->getName());
+ }
+ for (unsigned I = 0, N = Mod->TopHeaders.size(); I != N; ++I) {
+ Record.clear();
+ Record.push_back(SUBMODULE_TOPHEADER);
+ Stream.EmitRecordWithBlob(TopHeaderAbbrev, Record,
+ Mod->TopHeaders[I]->getName());
+ }
// Emit the imports.
if (!Mod->Imports.empty()) {
@@ -2067,24 +2231,35 @@ ASTWriter::inferSubmoduleIDFromLocation(SourceLocation Loc) {
}
void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag) {
+ // FIXME: Make it work properly with modules.
+ llvm::SmallDenseMap<const DiagnosticsEngine::DiagState *, unsigned, 64>
+ DiagStateIDMap;
+ unsigned CurrID = 0;
+ DiagStateIDMap[&Diag.DiagStates.front()] = ++CurrID; // the command-line one.
RecordData Record;
for (DiagnosticsEngine::DiagStatePointsTy::const_iterator
I = Diag.DiagStatePoints.begin(), E = Diag.DiagStatePoints.end();
I != E; ++I) {
- const DiagnosticsEngine::DiagStatePoint &point = *I;
+ const DiagnosticsEngine::DiagStatePoint &point = *I;
if (point.Loc.isInvalid())
continue;
Record.push_back(point.Loc.getRawEncoding());
- for (DiagnosticsEngine::DiagState::const_iterator
- I = point.State->begin(), E = point.State->end(); I != E; ++I) {
- if (I->second.isPragma()) {
- Record.push_back(I->first);
- Record.push_back(I->second.getMapping());
+ unsigned &DiagStateID = DiagStateIDMap[point.State];
+ Record.push_back(DiagStateID);
+
+ if (DiagStateID == 0) {
+ DiagStateID = ++CurrID;
+ for (DiagnosticsEngine::DiagState::const_iterator
+ I = point.State->begin(), E = point.State->end(); I != E; ++I) {
+ if (I->second.isPragma()) {
+ Record.push_back(I->first);
+ Record.push_back(I->second.getMapping());
+ }
}
+ Record.push_back(-1); // mark the end of the diag/map pairs for this
+ // location.
}
- Record.push_back(-1); // mark the end of the diag/map pairs for this
- // location.
}
if (!Record.empty())
@@ -2238,9 +2413,11 @@ void ASTWriter::WriteFileDeclIDsMap() {
BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(FILE_SORTED_DECLS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
Record.push_back(FILE_SORTED_DECLS);
+ Record.push_back(FileSortedIDs.size());
Stream.EmitRecordWithBlob(AbbrevCode, Record, data(FileSortedIDs));
}
@@ -2495,17 +2672,17 @@ class ASTIdentifierTableTrait {
II->getFETokenInfo<void>())
return true;
- return hasMacroDefinition(II, Macro);
+ return hadMacroDefinition(II, Macro);
}
-
- bool hasMacroDefinition(IdentifierInfo *II, MacroInfo *&Macro) {
- if (!II->hasMacroDefinition())
+
+ bool hadMacroDefinition(IdentifierInfo *II, MacroInfo *&Macro) {
+ if (!II->hadMacroDefinition())
return false;
-
- if (Macro || (Macro = PP.getMacroInfo(II)))
+
+ if (Macro || (Macro = PP.getMacroInfoHistory(II)))
return !Macro->isBuiltinMacro() && (!IsModule || Macro->isPublic());
-
- return false;
+
+ return false;
}
public:
@@ -2529,10 +2706,17 @@ public:
unsigned DataLen = 4; // 4 bytes for the persistent ID << 1
MacroInfo *Macro = 0;
if (isInterestingIdentifier(II, Macro)) {
- DataLen += 2; // 2 bytes for builtin ID, flags
- if (hasMacroDefinition(II, Macro))
- DataLen += 8;
-
+ DataLen += 2; // 2 bytes for builtin ID
+ DataLen += 2; // 2 bytes for flags
+ if (hadMacroDefinition(II, Macro)) {
+ for (MacroInfo *M = Macro; M; M = M->getPreviousDefinition()) {
+ if (Writer.getMacroRef(M) != 0)
+ DataLen += 4;
+ }
+
+ DataLen += 4;
+ }
+
for (IdentifierResolver::iterator D = IdResolver.begin(II),
DEnd = IdResolver.end();
D != DEnd; ++D)
@@ -2563,23 +2747,28 @@ public:
}
clang::io::Emit32(Out, (ID << 1) | 0x01);
- uint32_t Bits = 0;
- bool HasMacroDefinition = hasMacroDefinition(II, Macro);
- Bits = (uint32_t)II->getObjCOrBuiltinID();
- assert((Bits & 0x7ff) == Bits && "ObjCOrBuiltinID too big for ASTReader.");
- Bits = (Bits << 1) | unsigned(HasMacroDefinition);
+ uint32_t Bits = (uint32_t)II->getObjCOrBuiltinID();
+ assert((Bits & 0xffff) == Bits && "ObjCOrBuiltinID too big for ASTReader.");
+ clang::io::Emit16(Out, Bits);
+ Bits = 0;
+ bool HadMacroDefinition = hadMacroDefinition(II, Macro);
+ Bits = (Bits << 1) | unsigned(HadMacroDefinition);
Bits = (Bits << 1) | unsigned(II->isExtensionToken());
Bits = (Bits << 1) | unsigned(II->isPoisoned());
Bits = (Bits << 1) | unsigned(II->hasRevertedTokenIDToIdentifier());
Bits = (Bits << 1) | unsigned(II->isCPlusPlusOperatorKeyword());
clang::io::Emit16(Out, Bits);
- if (HasMacroDefinition) {
- clang::io::Emit32(Out, Writer.getMacroOffset(II));
- clang::io::Emit32(Out,
- Writer.inferSubmoduleIDFromLocation(Macro->getDefinitionLoc()));
+ if (HadMacroDefinition) {
+ // Write all of the macro IDs associated with this identifier.
+ for (MacroInfo *M = Macro; M; M = M->getPreviousDefinition()) {
+ if (MacroID ID = Writer.getMacroRef(M))
+ clang::io::Emit32(Out, ID);
+ }
+
+ clang::io::Emit32(Out, 0);
}
-
+
// Emit the declaration IDs in reverse order, because the
// IdentifierResolver provides the declarations as they would be
// visible (e.g., the function "stat" would come before the struct
@@ -2756,30 +2945,32 @@ public:
void EmitKey(raw_ostream& Out, DeclarationName Name, unsigned) {
using namespace clang::io;
- assert(Name.getNameKind() < 0x100 && "Invalid name kind ?");
Emit8(Out, Name.getNameKind());
switch (Name.getNameKind()) {
case DeclarationName::Identifier:
Emit32(Out, Writer.getIdentifierRef(Name.getAsIdentifierInfo()));
- break;
+ return;
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
Emit32(Out, Writer.getSelectorRef(Name.getObjCSelector()));
- break;
+ return;
case DeclarationName::CXXOperatorName:
- assert(Name.getCXXOverloadedOperator() < 0x100 && "Invalid operator ?");
+ assert(Name.getCXXOverloadedOperator() < NUM_OVERLOADED_OPERATORS &&
+ "Invalid operator?");
Emit8(Out, Name.getCXXOverloadedOperator());
- break;
+ return;
case DeclarationName::CXXLiteralOperatorName:
Emit32(Out, Writer.getIdentifierRef(Name.getCXXLiteralIdentifier()));
- break;
+ return;
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
case DeclarationName::CXXUsingDirective:
- break;
+ return;
}
+
+ llvm_unreachable("Invalid name kind?");
}
void EmitData(raw_ostream& Out, key_type_ref,
@@ -3147,7 +3338,8 @@ ASTWriter::ASTWriter(llvm::BitstreamWriter &Stream)
ASTHasCompilerErrors(false),
FirstDeclID(NUM_PREDEF_DECL_IDS), NextDeclID(FirstDeclID),
FirstTypeID(NUM_PREDEF_TYPE_IDS), NextTypeID(FirstTypeID),
- FirstIdentID(NUM_PREDEF_IDENT_IDS), NextIdentID(FirstIdentID),
+ FirstIdentID(NUM_PREDEF_IDENT_IDS), NextIdentID(FirstIdentID),
+ FirstMacroID(NUM_PREDEF_MACRO_IDS), NextMacroID(FirstMacroID),
FirstSubmoduleID(NUM_PREDEF_SUBMODULE_IDS),
NextSubmoduleID(FirstSubmoduleID),
FirstSelectorID(NUM_PREDEF_SELECTOR_IDS), NextSelectorID(FirstSelectorID),
@@ -3171,7 +3363,7 @@ ASTWriter::~ASTWriter() {
delete I->second;
}
-void ASTWriter::WriteAST(Sema &SemaRef, MemorizeStatCalls *StatCalls,
+void ASTWriter::WriteAST(Sema &SemaRef,
const std::string &OutputFile,
Module *WritingModule, StringRef isysroot,
bool hasErrors) {
@@ -3190,7 +3382,7 @@ void ASTWriter::WriteAST(Sema &SemaRef, MemorizeStatCalls *StatCalls,
Context = &SemaRef.Context;
PP = &SemaRef.PP;
this->WritingModule = WritingModule;
- WriteASTCore(SemaRef, StatCalls, isysroot, OutputFile, WritingModule);
+ WriteASTCore(SemaRef, isysroot, OutputFile, WritingModule);
Context = 0;
PP = 0;
this->WritingModule = 0;
@@ -3207,7 +3399,7 @@ static void AddLazyVectorDecls(ASTWriter &Writer, Vector &Vec,
}
}
-void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
+void ASTWriter::WriteASTCore(Sema &SemaRef,
StringRef isysroot,
const std::string &OutputFile,
Module *WritingModule) {
@@ -3357,14 +3549,13 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
if (!I->second)
AddDeclRef(I->first, KnownNamespaces);
}
-
+
+ // Write the control block
+ WriteControlBlock(PP, Context, isysroot, OutputFile);
+
// Write the remaining AST contents.
RecordData Record;
Stream.EnterSubblock(AST_BLOCK_ID, 5);
- WriteMetadata(Context, isysroot, OutputFile);
- WriteLanguageOptions(Context.getLangOpts());
- if (StatCalls && isysroot.empty())
- WriteStatCache(*StatCalls);
// Create a lexical update block containing all of the declarations in the
// translation unit that do not come from other AST files.
@@ -3482,6 +3673,7 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
Out.write(FileName.data(), FileName.size());
io::Emit32(Out, (*M)->SLocEntryBaseOffset);
io::Emit32(Out, (*M)->BaseIdentifierID);
+ io::Emit32(Out, (*M)->BaseMacroID);
io::Emit32(Out, (*M)->BasePreprocessedEntityID);
io::Emit32(Out, (*M)->BaseSubmoduleID);
io::Emit32(Out, (*M)->BaseSelectorID);
@@ -3595,7 +3787,8 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
Stream.EmitRecord(IMPORTED_MODULES, ImportedModules);
}
}
-
+
+ WriteMacroUpdates();
WriteDeclUpdatesBlocks();
WriteDeclReplacementsBlock();
WriteMergedDecls();
@@ -3612,6 +3805,21 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
Stream.ExitBlock();
}
+void ASTWriter::WriteMacroUpdates() {
+ if (MacroUpdates.empty())
+ return;
+
+ RecordData Record;
+ for (MacroUpdatesMap::iterator I = MacroUpdates.begin(),
+ E = MacroUpdates.end();
+ I != E; ++I) {
+ addMacroRef(I->first, Record);
+ AddSourceLocation(I->second.UndefLoc, Record);
+ Record.push_back(inferSubmoduleIDFromLocation(I->second.UndefLoc));
+ }
+ Stream.EmitRecord(MACRO_UPDATES, Record);
+}
+
/// \brief Go through the declaration update blocks and resolve declaration
/// pointers into declaration IDs.
void ASTWriter::ResolveDeclUpdatesBlocks() {
@@ -3707,6 +3915,10 @@ void ASTWriter::AddIdentifierRef(const IdentifierInfo *II, RecordDataImpl &Recor
Record.push_back(getIdentifierRef(II));
}
+void ASTWriter::addMacroRef(MacroInfo *MI, RecordDataImpl &Record) {
+ Record.push_back(getMacroRef(MI));
+}
+
IdentID ASTWriter::getIdentifierRef(const IdentifierInfo *II) {
if (II == 0)
return 0;
@@ -3717,6 +3929,19 @@ IdentID ASTWriter::getIdentifierRef(const IdentifierInfo *II) {
return ID;
}
+MacroID ASTWriter::getMacroRef(MacroInfo *MI) {
+ // Don't emit builtin macros like __LINE__ to the AST file unless they
+ // have been redefined by the header (in which case they are not
+ // isBuiltinMacro).
+ if (MI == 0 || MI->isBuiltinMacro())
+ return 0;
+
+ MacroID &ID = MacroIDs[MI];
+ if (ID == 0)
+ ID = NextMacroID++;
+ return ID;
+}
+
void ASTWriter::AddSelectorRef(const Selector SelRef, RecordDataImpl &Record) {
Record.push_back(getSelectorRef(SelRef));
}
@@ -3774,7 +3999,9 @@ void ASTWriter::AddTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
case TemplateArgument::Null:
case TemplateArgument::Integral:
case TemplateArgument::Declaration:
+ case TemplateArgument::NullPtr:
case TemplateArgument::Pack:
+ // FIXME: Is this right?
break;
}
}
@@ -3931,10 +4158,9 @@ void ASTWriter::associateDeclWithFile(const Decl *D, DeclID ID) {
llvm::tie(FID, Offset) = SM.getDecomposedLoc(FileLoc);
if (FID.isInvalid())
return;
- const SrcMgr::SLocEntry *Entry = &SM.getSLocEntry(FID);
- assert(Entry->isFile());
+ assert(SM.getSLocEntry(FID).isFile());
- DeclIDInFileInfo *&Info = FileDeclIDs[Entry];
+ DeclIDInFileInfo *&Info = FileDeclIDs[FID];
if (!Info)
Info = new DeclIDInFileInfo();
@@ -4191,6 +4417,10 @@ void ASTWriter::AddTemplateArgument(const TemplateArgument &Arg,
break;
case TemplateArgument::Declaration:
AddDeclRef(Arg.getAsDecl(), Record);
+ Record.push_back(Arg.isDeclForReferenceParam());
+ break;
+ case TemplateArgument::NullPtr:
+ AddTypeRef(Arg.getNullPtrType(), Record);
break;
case TemplateArgument::Integral:
AddAPSInt(Arg.getAsIntegral(), Record);
@@ -4403,6 +4633,7 @@ void ASTWriter::AddCXXDefinitionData(const CXXRecordDecl *D, RecordDataImpl &Rec
Record.push_back(Lambda.NumExplicitCaptures);
Record.push_back(Lambda.ManglingNumber);
AddDeclRef(Lambda.ContextDecl, Record);
+ AddTypeSourceInfo(Lambda.MethodTyInfo, Record);
for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) {
LambdaExpr::Capture &Capture = Lambda.Captures[I];
AddSourceLocation(Capture.getLocation(), Record);
@@ -4423,6 +4654,7 @@ void ASTWriter::ReaderInitialized(ASTReader *Reader) {
assert(FirstDeclID == NextDeclID &&
FirstTypeID == NextTypeID &&
FirstIdentID == NextIdentID &&
+ FirstMacroID == NextMacroID &&
FirstSubmoduleID == NextSubmoduleID &&
FirstSelectorID == NextSelectorID &&
"Setting chain after writing has started.");
@@ -4432,19 +4664,23 @@ void ASTWriter::ReaderInitialized(ASTReader *Reader) {
FirstDeclID = NUM_PREDEF_DECL_IDS + Chain->getTotalNumDecls();
FirstTypeID = NUM_PREDEF_TYPE_IDS + Chain->getTotalNumTypes();
FirstIdentID = NUM_PREDEF_IDENT_IDS + Chain->getTotalNumIdentifiers();
+ FirstMacroID = NUM_PREDEF_MACRO_IDS + Chain->getTotalNumMacros();
FirstSubmoduleID = NUM_PREDEF_SUBMODULE_IDS + Chain->getTotalNumSubmodules();
FirstSelectorID = NUM_PREDEF_SELECTOR_IDS + Chain->getTotalNumSelectors();
NextDeclID = FirstDeclID;
NextTypeID = FirstTypeID;
NextIdentID = FirstIdentID;
+ NextMacroID = FirstMacroID;
NextSelectorID = FirstSelectorID;
NextSubmoduleID = FirstSubmoduleID;
}
void ASTWriter::IdentifierRead(IdentID ID, IdentifierInfo *II) {
IdentifierIDs[II] = ID;
- if (II->hasMacroDefinition())
- DeserializedMacroNames.push_back(II);
+}
+
+void ASTWriter::MacroRead(serialization::MacroID ID, MacroInfo *MI) {
+ MacroIDs[MI] = ID;
}
void ASTWriter::TypeRead(TypeIdx Idx, QualType T) {
@@ -4468,15 +4704,15 @@ void ASTWriter::MacroDefinitionRead(serialization::PreprocessedEntityID ID,
MacroDefinitions[MD] = ID;
}
-void ASTWriter::MacroVisible(IdentifierInfo *II) {
- DeserializedMacroNames.push_back(II);
-}
-
void ASTWriter::ModuleRead(serialization::SubmoduleID ID, Module *Mod) {
assert(SubmoduleIDs.find(Mod) == SubmoduleIDs.end());
SubmoduleIDs[Mod] = ID;
}
+void ASTWriter::UndefinedMacro(MacroInfo *MI) {
+ MacroUpdates[MI].UndefLoc = MI->getUndefLoc();
+}
+
void ASTWriter::CompletedTagDefinition(const TagDecl *D) {
assert(D->isCompleteDefinition());
assert(!WritingAST && "Already writing the AST!");
@@ -4490,6 +4726,7 @@ void ASTWriter::CompletedTagDefinition(const TagDecl *D) {
}
}
}
+
void ASTWriter::AddedVisibleDecl(const DeclContext *DC, const Decl *D) {
assert(!WritingAST && "Already writing the AST!");
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
index 602943b..7486565 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -416,7 +416,7 @@ void ASTDeclWriter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
}
Record.push_back(D->isInstanceMethod());
Record.push_back(D->isVariadic());
- Record.push_back(D->isSynthesized());
+ Record.push_back(D->isPropertyAccessor());
Record.push_back(D->isDefined());
Record.push_back(D->IsOverriding);
@@ -606,6 +606,8 @@ void ASTDeclWriter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
Writer.AddDeclRef(D->getSuperClass(), Record);
Writer.AddSourceLocation(D->getIvarLBraceLoc(), Record);
Writer.AddSourceLocation(D->getIvarRBraceLoc(), Record);
+ Record.push_back(D->hasNonZeroConstructors());
+ Record.push_back(D->hasDestructors());
Writer.AddCXXCtorInitializers(D->IvarInitializers, D->NumIvarInitializers,
Record);
Code = serialization::DECL_OBJC_IMPLEMENTATION;
@@ -675,6 +677,8 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
Record.push_back(D->isNRVOVariable());
Record.push_back(D->isCXXForRangeDecl());
Record.push_back(D->isARCPseudoStrong());
+ Record.push_back(D->isConstexpr());
+
if (D->getInit()) {
Record.push_back(!D->isInitKnownICE() ? 1 : (D->isInitICE() ? 3 : 2));
Writer.AddStmt(D->getInit());
@@ -705,6 +709,7 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
D->getInitStyle() == VarDecl::CInit &&
D->getInit() == 0 &&
!isa<ParmVarDecl>(D) &&
+ !D->isConstexpr() &&
!SpecInfo)
AbbrevToUse = Writer.getDeclVarAbbrev();
@@ -945,11 +950,16 @@ void ASTDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) {
void ASTDeclWriter::VisitCXXMethodDecl(CXXMethodDecl *D) {
VisitFunctionDecl(D);
- Record.push_back(D->size_overridden_methods());
- for (CXXMethodDecl::method_iterator
- I = D->begin_overridden_methods(), E = D->end_overridden_methods();
- I != E; ++I)
- Writer.AddDeclRef(*I, Record);
+ if (D->isCanonicalDecl()) {
+ Record.push_back(D->size_overridden_methods());
+ for (CXXMethodDecl::method_iterator
+ I = D->begin_overridden_methods(), E = D->end_overridden_methods();
+ I != E; ++I)
+ Writer.AddDeclRef(*I, Record);
+ } else {
+ // We only need to record overridden methods once for the canonical decl.
+ Record.push_back(0);
+ }
Code = serialization::DECL_CXX_METHOD;
}
@@ -981,6 +991,7 @@ void ASTDeclWriter::VisitCXXConversionDecl(CXXConversionDecl *D) {
void ASTDeclWriter::VisitImportDecl(ImportDecl *D) {
VisitDecl(D);
+ Record.push_back(Writer.getSubmoduleID(D->getImportedModule()));
ArrayRef<SourceLocation> IdentifierLocs = D->getIdentifierLocs();
Record.push_back(!IdentifierLocs.empty());
if (IdentifierLocs.empty()) {
@@ -1073,7 +1084,7 @@ void ASTDeclWriter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
Writer.AddDeclRef(&*I, Record);
}
- // InjectedClassNameType is computed, no need to write it.
+ Writer.AddTypeRef(D->getCommonPtr()->InjectedClassNameType, Record);
}
Code = serialization::DECL_CLASS_TEMPLATE;
}
@@ -1103,6 +1114,7 @@ void ASTDeclWriter::VisitClassTemplateSpecializationDecl(
Writer.AddTemplateArgumentList(&D->getTemplateArgs(), Record);
Writer.AddSourceLocation(D->getPointOfInstantiation(), Record);
Record.push_back(D->getSpecializationKind());
+ Record.push_back(D->isCanonicalDecl());
if (D->isCanonicalDecl()) {
// When reading, we'll add it to the folding set of the following template.
@@ -1172,7 +1184,8 @@ void ASTDeclWriter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
void ASTDeclWriter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
// For an expanded parameter pack, record the number of expansion types here
- // so that it's easier for
+ // so that it's easier for deserialization to allocate the right amount of
+ // memory.
if (D->isExpandedParameterPack())
Record.push_back(D->getNumExpansionTypes());
@@ -1201,15 +1214,30 @@ void ASTDeclWriter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
}
void ASTDeclWriter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
+ // For an expanded parameter pack, record the number of expansion types here
+ // so that it's easier for deserialization to allocate the right amount of
+ // memory.
+ if (D->isExpandedParameterPack())
+ Record.push_back(D->getNumExpansionTemplateParameters());
+
VisitTemplateDecl(D);
// TemplateParmPosition.
Record.push_back(D->getDepth());
Record.push_back(D->getPosition());
- // Rest of TemplateTemplateParmDecl.
- Writer.AddTemplateArgumentLoc(D->getDefaultArgument(), Record);
- Record.push_back(D->defaultArgumentWasInherited());
- Record.push_back(D->isParameterPack());
- Code = serialization::DECL_TEMPLATE_TEMPLATE_PARM;
+
+ if (D->isExpandedParameterPack()) {
+ for (unsigned I = 0, N = D->getNumExpansionTemplateParameters();
+ I != N; ++I)
+ Writer.AddTemplateParameterList(D->getExpansionTemplateParameters(I),
+ Record);
+ Code = serialization::DECL_EXPANDED_TEMPLATE_TEMPLATE_PARM_PACK;
+ } else {
+ // Rest of TemplateTemplateParmDecl.
+ Writer.AddTemplateArgumentLoc(D->getDefaultArgument(), Record);
+ Record.push_back(D->defaultArgumentWasInherited());
+ Record.push_back(D->isParameterPack());
+ Code = serialization::DECL_TEMPLATE_TEMPLATE_PARM;
+ }
}
void ASTDeclWriter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
@@ -1462,6 +1490,7 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // isNRVOVariable
Abv->Add(BitCodeAbbrevOp(0)); // isCXXForRangeDecl
Abv->Add(BitCodeAbbrevOp(0)); // isARCPseudoStrong
+ Abv->Add(BitCodeAbbrevOp(0)); // isConstexpr
Abv->Add(BitCodeAbbrevOp(0)); // HasInit
Abv->Add(BitCodeAbbrevOp(0)); // HasMemberSpecializationInfo
// ParmVarDecl
@@ -1540,6 +1569,7 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isNRVOVariable
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCXXForRangeDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isARCPseudoStrong
+ Abv->Add(BitCodeAbbrevOp(0)); // isConstexpr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // HasInit
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // HasMemberSpecInfo
// Type Source Info
@@ -1603,7 +1633,7 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
//Character Literal
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getValue
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //IsWide
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // getKind
CharacterLiteralAbbrev = Stream.EmitAbbrev(Abv);
Abv = new BitCodeAbbrev();
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
index f63388f..7e8ce42 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -218,7 +218,7 @@ void ASTStmtWriter::VisitDeclStmt(DeclStmt *S) {
Code = serialization::STMT_DECL;
}
-void ASTStmtWriter::VisitAsmStmt(AsmStmt *S) {
+void ASTStmtWriter::VisitGCCAsmStmt(GCCAsmStmt *S) {
VisitStmt(S);
Record.push_back(S->getNumOutputs());
Record.push_back(S->getNumInputs());
@@ -227,7 +227,6 @@ void ASTStmtWriter::VisitAsmStmt(AsmStmt *S) {
Writer.AddSourceLocation(S->getRParenLoc(), Record);
Record.push_back(S->isVolatile());
Record.push_back(S->isSimple());
- Record.push_back(S->isMSAsm());
Writer.AddStmt(S->getAsmString());
// Outputs
@@ -246,14 +245,16 @@ void ASTStmtWriter::VisitAsmStmt(AsmStmt *S) {
// Clobbers
for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I)
- Writer.AddStmt(S->getClobber(I));
+ Writer.AddStmt(S->getClobberStringLiteral(I));
- Code = serialization::STMT_ASM;
+ Code = serialization::STMT_GCCASM;
}
void ASTStmtWriter::VisitMSAsmStmt(MSAsmStmt *S) {
// FIXME: Statement writer not yet implemented for MS style inline asm.
VisitStmt(S);
+
+ Code = serialization::STMT_MSASM;
}
void ASTStmtWriter::VisitExpr(Expr *E) {
@@ -535,6 +536,7 @@ void ASTStmtWriter::VisitBinaryOperator(BinaryOperator *E) {
Writer.AddStmt(E->getRHS());
Record.push_back(E->getOpcode()); // FIXME: stable encoding
Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Record.push_back(E->isFPContractable());
Code = serialization::EXPR_BINARY_OPERATOR;
}
@@ -604,6 +606,8 @@ void ASTStmtWriter::VisitExtVectorElementExpr(ExtVectorElementExpr *E) {
void ASTStmtWriter::VisitInitListExpr(InitListExpr *E) {
VisitExpr(E);
+ // NOTE: only add the (possibly null) syntactic form.
+ // No need to serialize the isSemanticForm flag and the semantic form.
Writer.AddStmt(E->getSyntacticForm());
Writer.AddSourceLocation(E->getLBraceLoc(), Record);
Writer.AddSourceLocation(E->getRBraceLoc(), Record);
@@ -1054,6 +1058,7 @@ void ASTStmtWriter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
Record.push_back(E->getOperator());
Writer.AddSourceRange(E->Range, Record);
+ Record.push_back(E->isFPContractable());
Code = serialization::EXPR_CXX_OPERATOR_CALL;
}
@@ -1233,7 +1238,7 @@ void ASTStmtWriter::VisitCXXNewExpr(CXXNewExpr *E) {
Writer.AddDeclRef(E->getOperatorDelete(), Record);
Writer.AddTypeSourceInfo(E->getAllocatedTypeSourceInfo(), Record);
Writer.AddSourceRange(E->getTypeIdParens(), Record);
- Writer.AddSourceLocation(E->getStartLoc(), Record);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
Writer.AddSourceRange(E->getDirectInitRange(), Record);
for (CXXNewExpr::arg_iterator I = E->raw_arg_begin(), e = E->raw_arg_end();
I != e; ++I)
@@ -1382,8 +1387,6 @@ void ASTStmtWriter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
void ASTStmtWriter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
VisitOverloadExpr(E);
Record.push_back(E->requiresADL());
- if (E->requiresADL())
- Record.push_back(E->isStdAssociatedNamespace());
Record.push_back(E->isOverloaded());
Writer.AddDeclRef(E->getNamingClass(), Record);
Code = serialization::EXPR_CXX_UNRESOLVED_LOOKUP;
@@ -1480,6 +1483,17 @@ void ASTStmtWriter::VisitSubstNonTypeTemplateParmPackExpr(
Code = serialization::EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK;
}
+void ASTStmtWriter::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumExpansions());
+ Writer.AddDeclRef(E->getParameterPack(), Record);
+ Writer.AddSourceLocation(E->getParameterPackLocation(), Record);
+ for (FunctionParmPackExpr::iterator I = E->begin(), End = E->end();
+ I != End; ++I)
+ Writer.AddDeclRef(*I, Record);
+ Code = serialization::EXPR_FUNCTION_PARM_PACK;
+}
+
void ASTStmtWriter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
VisitExpr(E);
Writer.AddStmt(E->Temporary);
diff --git a/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp b/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp
index 02aed10..870d654 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp
@@ -18,7 +18,6 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/FileManager.h"
-#include "clang/Basic/FileSystemStatCache.h"
#include "llvm/Bitcode/BitstreamWriter.h"
#include "llvm/Support/raw_ostream.h"
#include <string>
@@ -32,11 +31,7 @@ PCHGenerator::PCHGenerator(const Preprocessor &PP,
raw_ostream *OS)
: PP(PP), OutputFile(OutputFile), Module(Module),
isysroot(isysroot.str()), Out(OS),
- SemaPtr(0), StatCalls(0), Stream(Buffer), Writer(Stream) {
- // Install a stat() listener to keep track of all of the stat()
- // calls.
- StatCalls = new MemorizeStatCalls();
- PP.getFileManager().addStatCache(StatCalls, /*AtBeginning=*/false);
+ SemaPtr(0), Stream(Buffer), Writer(Stream) {
}
PCHGenerator::~PCHGenerator() {
@@ -48,7 +43,7 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
// Emit the PCH file
assert(SemaPtr && "No Sema?");
- Writer.WriteAST(*SemaPtr, StatCalls, OutputFile, Module, isysroot);
+ Writer.WriteAST(*SemaPtr, OutputFile, Module, isysroot);
// Write the generated bitstream to "Out".
Out->write((char *)&Buffer.front(), Buffer.size());
@@ -60,6 +55,10 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
Buffer.clear();
}
+PPMutationListener *PCHGenerator::GetPPMutationListener() {
+ return &Writer;
+}
+
ASTMutationListener *PCHGenerator::GetASTMutationListener() {
return &Writer;
}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/Module.cpp b/contrib/llvm/tools/clang/lib/Serialization/Module.cpp
index ff241d3..5e42ab4 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/Module.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/Module.cpp
@@ -21,12 +21,15 @@ using namespace serialization;
using namespace reader;
ModuleFile::ModuleFile(ModuleKind Kind, unsigned Generation)
- : Kind(Kind), DirectlyImported(false), Generation(Generation), SizeInBits(0),
+ : Kind(Kind), File(0), DirectlyImported(false),
+ Generation(Generation), SizeInBits(0),
LocalNumSLocEntries(0), SLocEntryBaseID(0),
SLocEntryBaseOffset(0), SLocEntryOffsets(0),
- SLocFileOffsets(0), LocalNumIdentifiers(0),
+ LocalNumIdentifiers(0),
IdentifierOffsets(0), BaseIdentifierID(0), IdentifierTableData(0),
- IdentifierLookupTable(0), BasePreprocessedEntityID(0),
+ IdentifierLookupTable(0),
+ LocalNumMacros(0), MacroOffsets(0),
+ BasePreprocessedEntityID(0),
PreprocessedEntityOffsets(0), NumPreprocessedEntities(0),
LocalNumHeaderFileInfos(0),
HeaderFileInfoTableData(0), HeaderFileInfoTable(0),
@@ -35,9 +38,10 @@ ModuleFile::ModuleFile(ModuleKind Kind, unsigned Generation)
SelectorLookupTableData(0), SelectorLookupTable(0), LocalNumDecls(0),
DeclOffsets(0), BaseDeclID(0),
LocalNumCXXBaseSpecifiers(0), CXXBaseSpecifiersOffsets(0),
- FileSortedDecls(0), RedeclarationsMap(0), LocalNumRedeclarationsInMap(0),
+ FileSortedDecls(0), NumFileSortedDecls(0),
+ RedeclarationsMap(0), LocalNumRedeclarationsInMap(0),
ObjCCategoriesMap(0), LocalNumObjCCategoriesInMap(0),
- LocalNumTypes(0), TypeOffsets(0), BaseTypeIndex(0), StatCache(0)
+ LocalNumTypes(0), TypeOffsets(0), BaseTypeIndex(0)
{}
ModuleFile::~ModuleFile() {
@@ -89,6 +93,10 @@ void ModuleFile::dump() {
<< " Number of identifiers: " << LocalNumIdentifiers << '\n';
dumpLocalRemap("Identifier ID local -> global map", IdentifierRemap);
+ llvm::errs() << " Base macro ID: " << BaseMacroID << '\n'
+ << " Number of macros: " << LocalNumMacros << '\n';
+ dumpLocalRemap("Macro ID local -> global map", MacroRemap);
+
llvm::errs() << " Base submodule ID: " << BaseSubmoduleID << '\n'
<< " Number of submodules: " << LocalNumSubmodules << '\n';
dumpLocalRemap("Submodule ID local -> global map", SubmoduleRemap);
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp b/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp
index ab364b7..efe4421 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp
@@ -50,6 +50,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
// Allocate a new module.
ModuleFile *New = new ModuleFile(Type, Generation);
New->FileName = FileName.str();
+ New->File = Entry;
Chain.push_back(New);
NewModule = true;
ModuleEntry = New;
@@ -87,6 +88,45 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
return std::make_pair(ModuleEntry, NewModule);
}
+namespace {
+ /// \brief Predicate that checks whether a module file occurs within
+ /// the given set.
+ class IsInModuleFileSet : public std::unary_function<ModuleFile *, bool> {
+ llvm::SmallPtrSet<ModuleFile *, 4> &Removed;
+
+ public:
+ IsInModuleFileSet(llvm::SmallPtrSet<ModuleFile *, 4> &Removed)
+ : Removed(Removed) { }
+
+ bool operator()(ModuleFile *MF) const {
+ return Removed.count(MF);
+ }
+ };
+}
+
+void ModuleManager::removeModules(ModuleIterator first, ModuleIterator last) {
+ if (first == last)
+ return;
+
+ // Collect the set of module file pointers that we'll be removing.
+ llvm::SmallPtrSet<ModuleFile *, 4> victimSet(first, last);
+
+ // Remove any references to the now-destroyed modules.
+ IsInModuleFileSet checkInSet(victimSet);
+ for (unsigned i = 0, n = Chain.size(); i != n; ++i) {
+ Chain[i]->ImportedBy.remove_if(checkInSet);
+ }
+
+ // Delete the modules and erase them from the various structures.
+ for (ModuleIterator victim = first; victim != last; ++victim) {
+ Modules.erase((*victim)->File);
+ delete *victim;
+ }
+
+ // Remove the modules from the chain.
+ Chain.erase(first, last);
+}
+
void ModuleManager::addInMemoryBuffer(StringRef FileName,
llvm::MemoryBuffer *Buffer) {
@@ -95,7 +135,7 @@ void ModuleManager::addInMemoryBuffer(StringRef FileName,
InMemoryBuffers[Entry] = Buffer;
}
-ModuleManager::ModuleManager(const FileSystemOptions &FSO) : FileMgr(FSO) { }
+ModuleManager::ModuleManager(FileManager &FileMgr) : FileMgr(FileMgr) { }
ModuleManager::~ModuleManager() {
for (unsigned i = 0, e = Chain.size(); i != e; ++i)
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp
deleted file mode 100644
index 84ea8c7..0000000
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-//== AdjustedReturnValueChecker.cpp -----------------------------*- C++ -*--==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines AdjustedReturnValueChecker, a simple check to see if the
-// return value of a function call is different than the one the caller thinks
-// it is.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ClangSACheckers.h"
-#include "clang/StaticAnalyzer/Core/Checker.h"
-#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
-
-using namespace clang;
-using namespace ento;
-
-namespace {
-class AdjustedReturnValueChecker :
- public Checker< check::PostStmt<CallExpr> > {
-public:
- void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
-};
-}
-
-void AdjustedReturnValueChecker::checkPostStmt(const CallExpr *CE,
- CheckerContext &C) const {
-
- // Get the result type of the call.
- QualType expectedResultTy = CE->getType();
-
- // Fetch the signature of the called function.
- ProgramStateRef state = C.getState();
- const LocationContext *LCtx = C.getLocationContext();
-
- SVal V = state->getSVal(CE, LCtx);
-
- if (V.isUnknown())
- return;
-
- // Casting to void? Discard the value.
- if (expectedResultTy->isVoidType()) {
- C.addTransition(state->BindExpr(CE, LCtx, UnknownVal()));
- return;
- }
-
- const MemRegion *callee = state->getSVal(CE->getCallee(), LCtx).getAsRegion();
- if (!callee)
- return;
-
- QualType actualResultTy;
-
- if (const FunctionTextRegion *FT = dyn_cast<FunctionTextRegion>(callee)) {
- const FunctionDecl *FD = FT->getDecl();
- actualResultTy = FD->getResultType();
- }
- else if (const BlockDataRegion *BD = dyn_cast<BlockDataRegion>(callee)) {
- const BlockTextRegion *BR = BD->getCodeRegion();
- const BlockPointerType *BT=BR->getLocationType()->getAs<BlockPointerType>();
- const FunctionType *FT = BT->getPointeeType()->getAs<FunctionType>();
- actualResultTy = FT->getResultType();
- }
-
- // Can this happen?
- if (actualResultTy.isNull())
- return;
-
- // For now, ignore references.
- if (actualResultTy->getAs<ReferenceType>())
- return;
-
-
- // Are they the same?
- if (expectedResultTy != actualResultTy) {
- // FIXME: Do more checking and actual emit an error. At least performing
- // the cast avoids some assertion failures elsewhere.
- SValBuilder &svalBuilder = C.getSValBuilder();
- V = svalBuilder.evalCast(V, expectedResultTy, actualResultTy);
- C.addTransition(state->BindExpr(CE, LCtx, V));
- }
-}
-
-void ento::registerAdjustedReturnValueChecker(CheckerManager &mgr) {
- mgr.registerChecker<AdjustedReturnValueChecker>();
-}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
index b2ad184..535d8ee 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
@@ -78,7 +78,7 @@ void ArrayBoundChecker::checkLocation(SVal l, bool isLoad, const Stmt* LoadS,
new BugReport(*BT, BT->getDescription(), N);
report->addRange(LoadS->getSourceRange());
- C.EmitReport(report);
+ C.emitReport(report);
return;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index c6efe94..457c870 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -208,7 +208,7 @@ void ArrayBoundCheckerV2::reportOOB(CheckerContext &checkerContext,
break;
}
- checkerContext.EmitReport(new BugReport(*BT, os.str(), errorNode));
+ checkerContext.emitReport(new BugReport(*BT, os.str(), errorNode));
}
void RegionRawOffsetV2::dump() const {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp
index c582cfc..81e8dd8 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp
@@ -105,9 +105,9 @@ void AttrNonNullChecker::checkPreCall(const CallEvent &Call,
// Highlight the range of the argument that was null.
R->addRange(Call.getArgSourceRange(idx));
if (const Expr *ArgE = Call.getArgExpr(idx))
- bugreporter::addTrackNullOrUndefValueVisitor(errorNode, ArgE, R);
+ bugreporter::trackNullOrUndefValue(errorNode, ArgE, *R);
// Emit the bug report.
- C.EmitReport(R);
+ C.emitReport(R);
}
// Always return. Either we cached out or we just emitted an error.
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index 955e79a..eba534e 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -117,7 +117,7 @@ void NilArgChecker::WarnNilArg(CheckerContext &C,
BugReport *R = new BugReport(*BT, os.str(), N);
R->addRange(msg.getArgSourceRange(Arg));
- C.EmitReport(R);
+ C.emitReport(R);
}
}
@@ -358,20 +358,20 @@ void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
BugReport *report = new BugReport(*BT, os.str(), N);
report->addRange(CE->getArg(2)->getSourceRange());
- C.EmitReport(report);
+ C.emitReport(report);
}
}
//===----------------------------------------------------------------------===//
-// CFRetain/CFRelease checking for null arguments.
+// CFRetain/CFRelease/CFMakeCollectable checking for null arguments.
//===----------------------------------------------------------------------===//
namespace {
class CFRetainReleaseChecker : public Checker< check::PreStmt<CallExpr> > {
mutable OwningPtr<APIMisuse> BT;
- mutable IdentifierInfo *Retain, *Release;
+ mutable IdentifierInfo *Retain, *Release, *MakeCollectable;
public:
- CFRetainReleaseChecker(): Retain(0), Release(0) {}
+ CFRetainReleaseChecker(): Retain(0), Release(0), MakeCollectable(0) {}
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
};
} // end anonymous namespace
@@ -392,12 +392,14 @@ void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE,
ASTContext &Ctx = C.getASTContext();
Retain = &Ctx.Idents.get("CFRetain");
Release = &Ctx.Idents.get("CFRelease");
- BT.reset(new APIMisuse("null passed to CFRetain/CFRelease"));
+ MakeCollectable = &Ctx.Idents.get("CFMakeCollectable");
+ BT.reset(
+ new APIMisuse("null passed to CFRetain/CFRelease/CFMakeCollectable"));
}
- // Check if we called CFRetain/CFRelease.
+ // Check if we called CFRetain/CFRelease/CFMakeCollectable.
const IdentifierInfo *FuncII = FD->getIdentifier();
- if (!(FuncII == Retain || FuncII == Release))
+ if (!(FuncII == Retain || FuncII == Release || FuncII == MakeCollectable))
return;
// FIXME: The rest of this just checks that the argument is non-null.
@@ -426,14 +428,20 @@ void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE,
if (!N)
return;
- const char *description = (FuncII == Retain)
- ? "Null pointer argument in call to CFRetain"
- : "Null pointer argument in call to CFRelease";
+ const char *description;
+ if (FuncII == Retain)
+ description = "Null pointer argument in call to CFRetain";
+ else if (FuncII == Release)
+ description = "Null pointer argument in call to CFRelease";
+ else if (FuncII == MakeCollectable)
+ description = "Null pointer argument in call to CFMakeCollectable";
+ else
+ llvm_unreachable("impossible case");
BugReport *report = new BugReport(*BT, description, N);
report->addRange(Arg->getSourceRange());
- bugreporter::addTrackNullOrUndefValueVisitor(N, Arg, report);
- C.EmitReport(report);
+ bugreporter::trackNullOrUndefValue(N, Arg, *report);
+ C.emitReport(report);
return;
}
@@ -491,7 +499,7 @@ void ClassReleaseChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
BugReport *report = new BugReport(*BT, os.str(), N);
report->addRange(msg.getSourceRange());
- C.EmitReport(report);
+ C.emitReport(report);
}
}
@@ -644,7 +652,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
BugReport *R = new BugReport(*BT, os.str(), errorNode.getValue());
R->addRange(msg.getArgSourceRange(I));
- C.EmitReport(R);
+ C.emitReport(R);
}
}
@@ -716,6 +724,73 @@ void ObjCLoopChecker::checkPostStmt(const ObjCForCollectionStmt *FCS,
C.addTransition(State);
}
+namespace {
+/// \class ObjCNonNilReturnValueChecker
+/// \brief The checker restricts the return values of APIs known to
+/// never (or almost never) return 'nil'.
+class ObjCNonNilReturnValueChecker
+ : public Checker<check::PostObjCMessage> {
+ mutable bool Initialized;
+ mutable Selector ObjectAtIndex;
+ mutable Selector ObjectAtIndexedSubscript;
+
+public:
+ ObjCNonNilReturnValueChecker() : Initialized(false) {}
+ void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
+};
+}
+
+static ProgramStateRef assumeExprIsNonNull(const Expr *NonNullExpr,
+ ProgramStateRef State,
+ CheckerContext &C) {
+ SVal Val = State->getSVal(NonNullExpr, C.getLocationContext());
+ if (DefinedOrUnknownSVal *DV = dyn_cast<DefinedOrUnknownSVal>(&Val))
+ return State->assume(*DV, true);
+ return State;
+}
+
+void ObjCNonNilReturnValueChecker::checkPostObjCMessage(const ObjCMethodCall &M,
+ CheckerContext &C)
+ const {
+ ProgramStateRef State = C.getState();
+
+ if (!Initialized) {
+ ASTContext &Ctx = C.getASTContext();
+ ObjectAtIndex = GetUnarySelector("objectAtIndex", Ctx);
+ ObjectAtIndexedSubscript = GetUnarySelector("objectAtIndexedSubscript", Ctx);
+ }
+
+ // Check the receiver type.
+ if (const ObjCInterfaceDecl *Interface = M.getReceiverInterface()) {
+
+ // Assume that object returned from '[self init]' or '[super init]' is not
+ // 'nil' if we are processing an inlined function/method.
+ //
+ // A defensive callee will (and should) check if the object returned by
+ // '[super init]' is 'nil' before doing it's own initialization. However,
+ // since 'nil' is rarely returned in practice, we should not warn when the
+ // caller to the defensive constructor uses the object in contexts where
+ // 'nil' is not accepted.
+ if (!C.inTopFrame() && M.getDecl() &&
+ M.getDecl()->getMethodFamily() == OMF_init &&
+ M.isReceiverSelfOrSuper()) {
+ State = assumeExprIsNonNull(M.getOriginExpr(), State, C);
+ }
+
+ // Objects returned from
+ // [NSArray|NSOrderedSet]::[ObjectAtIndex|ObjectAtIndexedSubscript]
+ // are never 'nil'.
+ FoundationClass Cl = findKnownClass(Interface);
+ if (Cl == FC_NSArray || Cl == FC_NSOrderedSet) {
+ Selector Sel = M.getSelector();
+ if (Sel == ObjectAtIndex || Sel == ObjectAtIndexedSubscript) {
+ // Go ahead and assume the value is non-nil.
+ State = assumeExprIsNonNull(M.getOriginExpr(), State, C);
+ }
+ }
+ }
+ C.addTransition(State);
+}
//===----------------------------------------------------------------------===//
// Check registration.
@@ -744,3 +819,7 @@ void ento::registerVariadicMethodTypeChecker(CheckerManager &mgr) {
void ento::registerObjCLoopChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCLoopChecker>();
}
+
+void ento::registerObjCNonNilReturnValueChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCNonNilReturnValueChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
index a4fc396..92edefe 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -35,7 +35,7 @@ void BoolAssignmentChecker::emitReport(ProgramStateRef state,
if (ExplodedNode *N = C.addTransition(state)) {
if (!BT)
BT.reset(new BuiltinBug("Assignment of a non-Boolean value"));
- C.EmitReport(new BugReport(*BT, BT->getDescription(), N));
+ C.emitReport(new BugReport(*BT, BT->getDescription(), N));
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index 509bc79..6ef022b 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -55,7 +55,7 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
// FIXME: Refactor into StoreManager itself?
MemRegionManager& RM = C.getStoreManager().getRegionManager();
const AllocaRegion* R =
- RM.getAllocaRegion(CE, C.getCurrentBlockCount(), C.getLocationContext());
+ RM.getAllocaRegion(CE, C.blockCount(), C.getLocationContext());
// Set the extent of the region in bytes. This enables us to use the
// SVal of the argument directly. If we save the extent in bits, we
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 483082a..eae9ddf 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -188,21 +188,9 @@ public:
NonLoc right) const;
};
-class CStringLength {
-public:
- typedef llvm::ImmutableMap<const MemRegion *, SVal> EntryMap;
-};
} //end anonymous namespace
-namespace clang {
-namespace ento {
- template <>
- struct ProgramStateTrait<CStringLength>
- : public ProgramStatePartialTrait<CStringLength::EntryMap> {
- static void *GDMIndex() { return CStringChecker::getTag(); }
- };
-}
-}
+REGISTER_MAP_WITH_PROGRAMSTATE(CStringLength, const MemRegion *, SVal)
//===----------------------------------------------------------------------===//
// Individual checks and utility methods.
@@ -252,8 +240,8 @@ ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
BugReport *report = new BugReport(*BT, os.str(), N);
report->addRange(S->getSourceRange());
- bugreporter::addTrackNullOrUndefValueVisitor(N, S, report);
- C.EmitReport(report);
+ bugreporter::trackNullOrUndefValue(N, S, *report);
+ C.emitReport(report);
return NULL;
}
@@ -327,7 +315,7 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
// reference is outside the range.
report->addRange(S->getSourceRange());
- C.EmitReport(report);
+ C.emitReport(report);
return NULL;
}
@@ -544,7 +532,7 @@ void CStringChecker::emitOverlapBug(CheckerContext &C, ProgramStateRef state,
report->addRange(First->getSourceRange());
report->addRange(Second->getSourceRange());
- C.EmitReport(report);
+ C.emitReport(report);
}
ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C,
@@ -607,7 +595,7 @@ ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C,
// Generate a report for this bug.
BugReport *report = new BugReport(*BT_AdditionOverflow, warning, N);
- C.EmitReport(report);
+ C.emitReport(report);
return NULL;
}
@@ -673,11 +661,11 @@ SVal CStringChecker::getCStringLengthForRegion(CheckerContext &C,
}
// Otherwise, get a new symbol and update the state.
- unsigned Count = C.getCurrentBlockCount();
SValBuilder &svalBuilder = C.getSValBuilder();
QualType sizeTy = svalBuilder.getContext().getSizeType();
SVal strLength = svalBuilder.getMetadataSymbolVal(CStringChecker::getTag(),
- MR, Ex, sizeTy, Count);
+ MR, Ex, sizeTy,
+ C.blockCount());
if (!hypothetical)
state = state->set<CStringLength>(MR, strLength);
@@ -714,7 +702,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
os.str(), N);
report->addRange(Ex->getSourceRange());
- C.EmitReport(report);
+ C.emitReport(report);
}
return UndefinedVal();
@@ -778,7 +766,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
os.str(), N);
report->addRange(Ex->getSourceRange());
- C.EmitReport(report);
+ C.emitReport(report);
}
return UndefinedVal();
@@ -826,15 +814,14 @@ ProgramStateRef CStringChecker::InvalidateBuffer(CheckerContext &C,
}
// Invalidate this region.
- unsigned Count = C.getCurrentBlockCount();
const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
- return state->invalidateRegions(R, E, Count, LCtx);
+ return state->invalidateRegions(R, E, C.blockCount(), LCtx);
}
// If we have a non-region value by chance, just remove the binding.
// FIXME: is this necessary or correct? This handles the non-Region
// cases. Is it ever valid to store to these?
- return state->unbindLoc(*L);
+ return state->killBinding(*L);
}
bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
@@ -843,7 +830,7 @@ bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
switch (MR->getKind()) {
case MemRegion::FunctionTextRegionKind: {
- const FunctionDecl *FD = cast<FunctionTextRegion>(MR)->getDecl();
+ const NamedDecl *FD = cast<FunctionTextRegion>(MR)->getDecl();
if (FD)
os << "the address of the function '" << *FD << '\'';
else
@@ -957,9 +944,8 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
} else {
// If we don't know how much we copied, we can at least
// conjure a return value for later.
- unsigned Count = C.getCurrentBlockCount();
- SVal result =
- C.getSValBuilder().getConjuredSymbolVal(NULL, CE, LCtx, Count);
+ SVal result = C.getSValBuilder().conjureSymbolVal(0, CE, LCtx,
+ C.blockCount());
state = state->BindExpr(CE, LCtx, result);
}
@@ -1093,8 +1079,7 @@ void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
state = CheckBufferAccess(C, state, Size, Left, Right);
if (state) {
// The return value is the comparison result, which we don't know.
- unsigned Count = C.getCurrentBlockCount();
- SVal CmpV = svalBuilder.getConjuredSymbolVal(NULL, CE, LCtx, Count);
+ SVal CmpV = svalBuilder.conjureSymbolVal(0, CE, LCtx, C.blockCount());
state = state->BindExpr(CE, LCtx, CmpV);
C.addTransition(state);
}
@@ -1206,8 +1191,7 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
// no guarantee the full string length will actually be returned.
// All we know is the return value is the min of the string length
// and the limit. This is better than nothing.
- unsigned Count = C.getCurrentBlockCount();
- result = C.getSValBuilder().getConjuredSymbolVal(NULL, CE, LCtx, Count);
+ result = C.getSValBuilder().conjureSymbolVal(0, CE, LCtx, C.blockCount());
NonLoc *resultNL = cast<NonLoc>(&result);
if (strLengthNL) {
@@ -1234,8 +1218,7 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
// If we don't know the length of the string, conjure a return
// value, so it can be used in constraints, at least.
if (result.isUnknown()) {
- unsigned Count = C.getCurrentBlockCount();
- result = C.getSValBuilder().getConjuredSymbolVal(NULL, CE, LCtx, Count);
+ result = C.getSValBuilder().conjureSymbolVal(0, CE, LCtx, C.blockCount());
}
}
@@ -1612,8 +1595,7 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// If this is a stpcpy-style copy, but we were unable to check for a buffer
// overflow, we still need a result. Conjure a return value.
if (returnEnd && Result.isUnknown()) {
- unsigned Count = C.getCurrentBlockCount();
- Result = svalBuilder.getConjuredSymbolVal(NULL, CE, LCtx, Count);
+ Result = svalBuilder.conjureSymbolVal(0, CE, LCtx, C.blockCount());
}
// Set the return value.
@@ -1770,8 +1752,7 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
if (!canComputeResult) {
// Conjure a symbolic value. It's the best we can do.
- unsigned Count = C.getCurrentBlockCount();
- SVal resultVal = svalBuilder.getConjuredSymbolVal(NULL, CE, LCtx, Count);
+ SVal resultVal = svalBuilder.conjureSymbolVal(0, CE, LCtx, C.blockCount());
state = state->BindExpr(CE, LCtx, resultVal);
}
@@ -1885,7 +1866,7 @@ void CStringChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
}
bool CStringChecker::wantsRegionChangeUpdate(ProgramStateRef state) const {
- CStringLength::EntryMap Entries = state->get<CStringLength>();
+ CStringLengthTy Entries = state->get<CStringLength>();
return !Entries.isEmpty();
}
@@ -1895,7 +1876,7 @@ CStringChecker::checkRegionChanges(ProgramStateRef state,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
const CallEvent *Call) const {
- CStringLength::EntryMap Entries = state->get<CStringLength>();
+ CStringLengthTy Entries = state->get<CStringLength>();
if (Entries.isEmpty())
return state;
@@ -1915,10 +1896,10 @@ CStringChecker::checkRegionChanges(ProgramStateRef state,
}
}
- CStringLength::EntryMap::Factory &F = state->get_context<CStringLength>();
+ CStringLengthTy::Factory &F = state->get_context<CStringLength>();
// Then loop over the entries in the current state.
- for (CStringLength::EntryMap::iterator I = Entries.begin(),
+ for (CStringLengthTy::iterator I = Entries.begin(),
E = Entries.end(); I != E; ++I) {
const MemRegion *MR = I.getKey();
@@ -1945,9 +1926,9 @@ CStringChecker::checkRegionChanges(ProgramStateRef state,
void CStringChecker::checkLiveSymbols(ProgramStateRef state,
SymbolReaper &SR) const {
// Mark all symbols in our string length map as valid.
- CStringLength::EntryMap Entries = state->get<CStringLength>();
+ CStringLengthTy Entries = state->get<CStringLength>();
- for (CStringLength::EntryMap::iterator I = Entries.begin(), E = Entries.end();
+ for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end();
I != E; ++I) {
SVal Len = I.getData();
@@ -1963,12 +1944,12 @@ void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
return;
ProgramStateRef state = C.getState();
- CStringLength::EntryMap Entries = state->get<CStringLength>();
+ CStringLengthTy Entries = state->get<CStringLength>();
if (Entries.isEmpty())
return;
- CStringLength::EntryMap::Factory &F = state->get_context<CStringLength>();
- for (CStringLength::EntryMap::iterator I = Entries.begin(), E = Entries.end();
+ CStringLengthTy::Factory &F = state->get_context<CStringLength>();
+ for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end();
I != E; ++I) {
SVal Len = I.getData();
if (SymbolRef Sym = Len.getAsSymbol()) {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
index befc935..f1a3aac 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
@@ -33,7 +33,6 @@ namespace {
class WalkAST: public StmtVisitor<WalkAST> {
BugReporter &BR;
AnalysisDeclContext* AC;
- ASTContext &ASTC;
/// Check if two expressions refer to the same declaration.
inline bool sameDecl(const Expr *A1, const Expr *A2) {
@@ -58,8 +57,8 @@ class WalkAST: public StmtVisitor<WalkAST> {
const FunctionDecl *FD = CE->getDirectCallee();
if (!FD)
return false;
- return (CheckerContext::isCLibraryFunction(FD, "strlen", ASTC)
- && sameDecl(CE->getArg(0), WithArg));
+ return (CheckerContext::isCLibraryFunction(FD, "strlen") &&
+ sameDecl(CE->getArg(0), WithArg));
}
return false;
}
@@ -83,7 +82,7 @@ class WalkAST: public StmtVisitor<WalkAST> {
public:
WalkAST(BugReporter &br, AnalysisDeclContext* ac) :
- BR(br), AC(ac), ASTC(AC->getASTContext()) {
+ BR(br), AC(ac) {
}
// Statement visitor methods.
@@ -136,7 +135,7 @@ void WalkAST::VisitCallExpr(CallExpr *CE) {
if (!FD)
return;
- if (CheckerContext::isCLibraryFunction(FD, "strncat", ASTC)) {
+ if (CheckerContext::isCLibraryFunction(FD, "strncat")) {
if (containsBadStrncatPattern(CE)) {
const Expr *DstArg = CE->getArg(0);
const Expr *LenArg = CE->getArg(2);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index 5edcf09..82bc136 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -75,13 +75,13 @@ void CallAndMessageChecker::emitBadCall(BugType *BT, CheckerContext &C,
BugReport *R = new BugReport(*BT, BT->getName(), N);
if (BadE) {
R->addRange(BadE->getSourceRange());
- bugreporter::addTrackNullOrUndefValueVisitor(N, BadE, R);
+ bugreporter::trackNullOrUndefValue(N, BadE, *R);
}
- C.EmitReport(R);
+ C.emitReport(R);
}
-StringRef describeUninitializedArgumentInCall(const CallEvent &Call,
- bool IsFirstArgument) {
+static StringRef describeUninitializedArgumentInCall(const CallEvent &Call,
+ bool IsFirstArgument) {
switch (Call.getKind()) {
case CE_ObjCMessage: {
const ObjCMethodCall &Msg = cast<ObjCMethodCall>(Call);
@@ -122,8 +122,8 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
BugReport *R = new BugReport(*BT, Desc, N);
R->addRange(argRange);
if (argEx)
- bugreporter::addTrackNullOrUndefValueVisitor(N, argEx, R);
- C.EmitReport(R);
+ bugreporter::trackNullOrUndefValue(N, argEx, *R);
+ C.emitReport(R);
}
return true;
}
@@ -207,7 +207,7 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
// FIXME: enhance track back for uninitialized value for arbitrary
// memregions
- C.EmitReport(R);
+ C.emitReport(R);
}
return true;
}
@@ -335,8 +335,8 @@ void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
// FIXME: getTrackNullOrUndefValueVisitor can't handle "super" yet.
if (const Expr *ReceiverE = ME->getInstanceReceiver())
- bugreporter::addTrackNullOrUndefValueVisitor(N, ReceiverE, R);
- C.EmitReport(R);
+ bugreporter::trackNullOrUndefValue(N, ReceiverE, *R);
+ C.emitReport(R);
}
return;
} else {
@@ -377,9 +377,9 @@ void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
report->addRange(ME->getReceiverRange());
// FIXME: This won't track "self" in messages to super.
if (const Expr *receiver = ME->getInstanceReceiver()) {
- bugreporter::addTrackNullOrUndefValueVisitor(N, receiver, report);
+ bugreporter::trackNullOrUndefValue(N, receiver, *report);
}
- C.EmitReport(report);
+ C.emitReport(report);
}
static bool supportsNilWithFloatRet(const llvm::Triple &triple) {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index 2e184fb..1cb8a8d 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -75,7 +75,7 @@ void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
BugReport *R = new BugReport(*BT, BT->getDescription(),
errorNode);
R->addRange(CE->getSourceRange());
- C.EmitReport(R);
+ C.emitReport(R);
}
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
index 1407638..d6d0e3c 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
@@ -64,7 +64,7 @@ void CastToStructChecker::checkPreStmt(const CastExpr *CE,
"errors or data corruption."));
BugReport *R = new BugReport(*BT,BT->getDescription(), N);
R->addRange(CE->getSourceRange());
- C.EmitReport(R);
+ C.emitReport(R);
}
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 7a25865..9087205 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -85,7 +85,7 @@ static bool scan_ivar_release(Stmt *S, ObjCIvarDecl *ID,
Expr::NPC_ValueDependentIsNull)) {
// This is only a 'release' if the property kind is not
// 'assign'.
- return PD->getSetterKind() != ObjCPropertyDecl::Assign;;
+ return PD->getSetterKind() != ObjCPropertyDecl::Assign;
}
// Recurse to children.
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
index b8b7c36..5cd6194 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -270,6 +270,7 @@ void WalkAST::checkLoopConditionForFloat(const ForStmt *FS) {
// Emit the error. First figure out which DeclRefExpr in the condition
// referenced the compared variable.
+ assert(drInc->getDecl());
const DeclRefExpr *drCond = vdLHS == drInc->getDecl() ? drLHS : drRHS;
SmallVector<SourceRange, 2> ranges;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
index 0e9efaa..efaec2b 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -25,6 +25,7 @@ using namespace ento;
// All checkers should be placed into anonymous namespace.
// We place the CheckerDocumentation inside ento namespace to make the
// it visible in doxygen.
+namespace clang {
namespace ento {
/// This checker documents the callback functions checkers can use to implement
@@ -33,8 +34,8 @@ namespace ento {
/// checking.
///
/// \sa CheckerContext
-class CheckerDocumentation : public Checker< check::PreStmt<DeclStmt>,
- check::PostStmt<CallExpr>,
+class CheckerDocumentation : public Checker< check::PreStmt<ReturnStmt>,
+ check::PostStmt<DeclStmt>,
check::PreObjCMessage,
check::PostObjCMessage,
check::PreCall,
@@ -64,8 +65,8 @@ public:
/// See checkBranchCondition() callback for performing custom processing of
/// the branching statements.
///
- /// check::PreStmt<DeclStmt>
- void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {}
+ /// check::PreStmt<ReturnStmt>
+ void checkPreStmt(const ReturnStmt *DS, CheckerContext &C) const {}
/// \brief Post-visit the Statement.
///
@@ -74,8 +75,8 @@ public:
/// which does not include the control flow statements such as IfStmt. The
/// callback can be specialized to be called with any subclass of Stmt.
///
- /// check::PostStmt<CallExpr>
- void checkPostStmt(const CallExpr *DS, CheckerContext &C) const;
+ /// check::PostStmt<DeclStmt>
+ void checkPostStmt(const DeclStmt *DS, CheckerContext &C) const;
/// \brief Pre-visit the Objective C message.
///
@@ -98,8 +99,8 @@ public:
/// behavior for functions and methods no matter how they are being invoked.
///
/// Note that this includes ALL cross-body invocations, so if you want to
- /// limit your checks to, say, function calls, you can either test for that
- /// or fall back to the explicit callback (i.e. check::PreStmt).
+ /// limit your checks to, say, function calls, you should test for that at the
+ /// beginning of your callback function.
///
/// check::PreCall
void checkPreCall(const CallEvent &Call, CheckerContext &C) const {}
@@ -151,9 +152,8 @@ public:
/// check::DeadSymbols
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const {}
- /// \brief Called when an end of path is reached in the ExplodedGraph.
- ///
- /// This callback should be used to check if the allocated resources are freed.
+ /// \brief Called when the analyzer core reaches the end of the top-level
+ /// function being analyzed.
///
/// check::EndPath
void checkEndPath(CheckerContext &Ctx) const {}
@@ -213,21 +213,35 @@ public:
/// check::LiveSymbols
void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const {}
-
+ /// \brief Called to determine if the checker currently needs to know if when
+ /// contents of any regions change.
+ ///
+ /// Since it is not necessarily cheap to compute which regions are being
+ /// changed, this allows the analyzer core to skip the more expensive
+ /// #checkRegionChanges when no checkers are tracking any state.
bool wantsRegionChangeUpdate(ProgramStateRef St) const { return true; }
- /// \brief Allows tracking regions which get invalidated.
+ /// \brief Called when the contents of one or more regions change.
+ ///
+ /// This can occur in many different ways: an explicit bind, a blanket
+ /// invalidation of the region contents, or by passing a region to a function
+ /// call whose behavior the analyzer cannot model perfectly.
///
/// \param State The current program state.
/// \param Invalidated A set of all symbols potentially touched by the change.
/// \param ExplicitRegions The regions explicitly requested for invalidation.
- /// For example, in the case of a function call, these would be arguments.
- /// \param Regions The transitive closure of accessible regions,
- /// i.e. all regions that may have been touched by this change.
- /// \param Call The call expression wrapper if the regions are invalidated
- /// by a call, 0 otherwise.
- /// Note, in order to be notified, the checker should also implement the
- /// wantsRegionChangeUpdate callback.
+ /// For a function call, this would be the arguments. For a bind, this
+ /// would be the region being bound to.
+ /// \param Regions The transitive closure of regions accessible from,
+ /// \p ExplicitRegions, i.e. all regions that may have been touched
+ /// by this change. For a simple bind, this list will be the same as
+ /// \p ExplicitRegions, since a bind does not affect the contents of
+ /// anything accessible through the base region.
+ /// \param Call The opaque call triggering this invalidation. Will be 0 if the
+ /// change was not triggered by a call.
+ ///
+ /// Note that this callback will not be invoked unless
+ /// #wantsRegionChangeUpdate returns \c true.
///
/// check::RegionChanges
ProgramStateRef
@@ -256,9 +270,10 @@ public:
};
-void CheckerDocumentation::checkPostStmt(const CallExpr *DS,
+void CheckerDocumentation::checkPostStmt(const DeclStmt *DS,
CheckerContext &C) const {
return;
}
-} // end namespace
+} // end namespace ento
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
index 8110bd0..235e633 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
@@ -13,33 +13,33 @@ include "clang/StaticAnalyzer/Checkers/CheckerBase.td"
// Packages.
//===----------------------------------------------------------------------===//
-def Experimental : Package<"experimental">;
+def Alpha : Package<"alpha">;
def Core : Package<"core">;
def CoreBuiltin : Package<"builtin">, InPackage<Core>;
def CoreUninitialized : Package<"uninitialized">, InPackage<Core>;
-def CoreExperimental : Package<"core">, InPackage<Experimental>, Hidden;
+def CoreAlpha : Package<"core">, InPackage<Alpha>, Hidden;
def Cplusplus : Package<"cplusplus">;
-def CplusplusExperimental : Package<"cplusplus">, InPackage<Experimental>, Hidden;
+def CplusplusAlpha : Package<"cplusplus">, InPackage<Alpha>, Hidden;
def DeadCode : Package<"deadcode">;
-def DeadCodeExperimental : Package<"deadcode">, InPackage<Experimental>, Hidden;
+def DeadCodeAlpha : Package<"deadcode">, InPackage<Alpha>, Hidden;
def Security : Package <"security">;
def InsecureAPI : Package<"insecureAPI">, InPackage<Security>;
-def SecurityExperimental : Package<"security">, InPackage<Experimental>, Hidden;
-def Taint : Package<"taint">, InPackage<SecurityExperimental>, Hidden;
+def SecurityAlpha : Package<"security">, InPackage<Alpha>, Hidden;
+def Taint : Package<"taint">, InPackage<SecurityAlpha>, Hidden;
def Unix : Package<"unix">;
-def UnixExperimental : Package<"unix">, InPackage<Experimental>, Hidden;
+def UnixAlpha : Package<"unix">, InPackage<Alpha>, Hidden;
def CString : Package<"cstring">, InPackage<Unix>, Hidden;
-def CStringExperimental : Package<"cstring">, InPackage<UnixExperimental>, Hidden;
+def CStringAlpha : Package<"cstring">, InPackage<UnixAlpha>, Hidden;
def OSX : Package<"osx">;
-def OSXExperimental : Package<"osx">, InPackage<Experimental>, Hidden;
+def OSXAlpha : Package<"osx">, InPackage<Alpha>, Hidden;
def Cocoa : Package<"cocoa">, InPackage<OSX>;
-def CocoaExperimental : Package<"cocoa">, InPackage<OSXExperimental>, Hidden;
+def CocoaAlpha : Package<"cocoa">, InPackage<OSXAlpha>, Hidden;
def CoreFoundation : Package<"coreFoundation">, InPackage<OSX>;
def Containers : Package<"containers">, InPackage<CoreFoundation>;
@@ -60,10 +60,6 @@ def CallAndMessageChecker : Checker<"CallAndMessage">,
HelpText<"Check for logical errors for function calls and Objective-C message expressions (e.g., uninitialized arguments, null function pointers)">,
DescFile<"CallAndMessageChecker.cpp">;
-def AdjustedReturnValueChecker : Checker<"AdjustedReturnValue">,
- HelpText<"Check to see if the return value of a function call is different than the caller expects (e.g., from calls through function pointers)">,
- DescFile<"AdjustedReturnValueChecker.cpp">;
-
def AttrNonNullChecker : Checker<"AttributeNonNull">,
HelpText<"Check for null pointers passed as arguments to a function whose arguments are marked with the 'nonnull' attribute">,
DescFile<"AttrNonNullChecker.cpp">;
@@ -90,7 +86,7 @@ def DynamicTypePropagation : Checker<"DynamicTypePropagation">,
} // end "core"
-let ParentPackage = CoreExperimental in {
+let ParentPackage = CoreAlpha in {
def BoolAssignmentChecker : Checker<"BoolAssignment">,
HelpText<"Warn about assigning non-{0,1} values to Boolean variables">,
@@ -120,7 +116,7 @@ def SizeofPointerChecker : Checker<"SizeofPtr">,
HelpText<"Warn about unintended use of sizeof() on pointer expressions">,
DescFile<"CheckSizeofPointer.cpp">;
-} // end "core.experimental"
+} // end "alpha.core"
//===----------------------------------------------------------------------===//
// Evaluate "builtin" functions.
@@ -170,13 +166,13 @@ def ReturnUndefChecker : Checker<"UndefReturn">,
// C++ checkers.
//===----------------------------------------------------------------------===//
-let ParentPackage = CplusplusExperimental in {
+let ParentPackage = CplusplusAlpha in {
def VirtualCallChecker : Checker<"VirtualCall">,
HelpText<"Check virtual function calls during construction or destruction">,
DescFile<"VirtualCallChecker.cpp">;
-} // end: "cplusplus.experimental"
+} // end: "alpha.cplusplus"
//===----------------------------------------------------------------------===//
// Deadcode checkers.
@@ -189,7 +185,7 @@ def DeadStoresChecker : Checker<"DeadStores">,
DescFile<"DeadStoresChecker.cpp">;
} // end DeadCode
-let ParentPackage = DeadCodeExperimental in {
+let ParentPackage = DeadCodeAlpha in {
def IdempotentOperationChecker : Checker<"IdempotentOperations">,
HelpText<"Warn about idempotent operations">,
@@ -199,7 +195,7 @@ def UnreachableCodeChecker : Checker<"UnreachableCode">,
HelpText<"Check unreachable code">,
DescFile<"UnreachableCodeChecker.cpp">;
-} // end "deadcode.experimental"
+} // end "alpha.deadcode"
//===----------------------------------------------------------------------===//
// Security checkers.
@@ -237,7 +233,7 @@ let ParentPackage = Security in {
DescFile<"CheckSecuritySyntaxOnly.cpp">;
}
-let ParentPackage = SecurityExperimental in {
+let ParentPackage = SecurityAlpha in {
def ArrayBoundChecker : Checker<"ArrayBound">,
HelpText<"Warn about buffer overflows (older checker)">,
@@ -255,7 +251,7 @@ def MallocOverflowSecurityChecker : Checker<"MallocOverflow">,
HelpText<"Check for overflows in the arguments to malloc()">,
DescFile<"MallocOverflowSecurityChecker.cpp">;
-} // end "security.experimental"
+} // end "alpha.security"
//===----------------------------------------------------------------------===//
// Taint checkers.
@@ -267,7 +263,7 @@ def GenericTaintChecker : Checker<"TaintPropagation">,
HelpText<"Generate taint information used by other checkers">,
DescFile<"GenericTaintChecker.cpp">;
-} // end "experimental.security.taint"
+} // end "alpha.security.taint"
//===----------------------------------------------------------------------===//
// Unix API checkers.
@@ -289,7 +285,7 @@ def MallocSizeofChecker : Checker<"MallocSizeof">,
} // end "unix"
-let ParentPackage = UnixExperimental in {
+let ParentPackage = UnixAlpha in {
def ChrootChecker : Checker<"Chroot">,
HelpText<"Check improper use of chroot">,
@@ -307,7 +303,11 @@ def StreamChecker : Checker<"Stream">,
HelpText<"Check stream handling functions">,
DescFile<"StreamChecker.cpp">;
-} // end "unix.experimental"
+def SimpleStreamChecker : Checker<"SimpleStream">,
+ HelpText<"Check for misuses of stream APIs">,
+ DescFile<"SimpleStreamChecker.cpp">;
+
+} // end "alpha.unix"
let ParentPackage = CString in {
@@ -320,7 +320,7 @@ def CStringSyntaxChecker : Checker<"BadSizeArg">,
DescFile<"CStringSyntaxChecker.cpp">;
}
-let ParentPackage = CStringExperimental in {
+let ParentPackage = CStringAlpha in {
def CStringOutOfBounds : Checker<"OutOfBounds">,
HelpText<"Check for out-of-bounds access in string functions">,
@@ -346,11 +346,6 @@ def MacOSXAPIChecker : Checker<"API">,
HelpText<"Check for proper uses of various Mac OS X APIs">,
DescFile<"MacOSXAPIChecker.cpp">;
-def OSAtomicChecker : Checker<"AtomicCAS">,
- InPackage<OSX>,
- HelpText<"Evaluate calls to OSAtomic functions">,
- DescFile<"OSAtomicChecker.cpp">;
-
def MacOSKeychainAPIChecker : Checker<"SecKeychainAPI">,
InPackage<OSX>,
HelpText<"Check for proper uses of Secure Keychain APIs">,
@@ -397,6 +392,10 @@ def ObjCLoopChecker : Checker<"Loops">,
HelpText<"Improved modeling of loops using Cocoa collection types">,
DescFile<"BasicObjCFoundationChecks.cpp">;
+def ObjCNonNilReturnValueChecker : Checker<"NonNilReturnValue">,
+ HelpText<"Model the APIs that are guaranteed to return a non-nil value">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
def NSErrorChecker : Checker<"NSError">,
HelpText<"Check usage of NSError** parameters">,
DescFile<"NSErrorChecker.cpp">;
@@ -407,13 +406,25 @@ def RetainCountChecker : Checker<"RetainCount">,
} // end "osx.cocoa"
-let ParentPackage = CocoaExperimental in {
+let ParentPackage = CocoaAlpha in {
def ObjCDeallocChecker : Checker<"Dealloc">,
HelpText<"Warn about Objective-C classes that lack a correct implementation of -dealloc">,
DescFile<"CheckObjCDealloc.cpp">;
-} // end "cocoa.experimental"
+def IvarInvalidationChecker : Checker<"InstanceVariableInvalidation">,
+ HelpText<"Check that the invalidatable instance variables are invalidated in the methods annotated with objc_instance_variable_invalidator">,
+ DescFile<"IvarInvalidationChecker.cpp">;
+
+def DirectIvarAssignment : Checker<"DirectIvarAssignment">,
+ HelpText<"Check that the invalidatable instance variables are invalidated in the methods annotated with objc_instance_variable_invalidator">,
+ DescFile<"DirectIvarAssignment.cpp">;
+
+def ObjCSuperCallChecker : Checker<"MissingSuperCall">,
+ HelpText<"Warn about Objective-C methods that lack a necessary call to super">,
+ DescFile<"ObjCMissingSuperCallChecker.cpp">;
+
+} // end "alpha.osx.cocoa"
let ParentPackage = CoreFoundation in {
@@ -422,7 +433,7 @@ def CFNumberCreateChecker : Checker<"CFNumber">,
DescFile<"BasicObjCFoundationChecks.cpp">;
def CFRetainReleaseChecker : Checker<"CFRetainRelease">,
- HelpText<"Check for null arguments to CFRetain/CFRelease">,
+ HelpText<"Check for null arguments to CFRetain/CFRelease/CFMakeCollectable">,
DescFile<"BasicObjCFoundationChecks.cpp">;
def CFErrorChecker : Checker<"CFError">,
@@ -479,6 +490,10 @@ def CallGraphDumper : Checker<"DumpCallGraph">,
HelpText<"Display Call Graph">,
DescFile<"DebugCheckers.cpp">;
+def ConfigDumper : Checker<"ConfigDumper">,
+ HelpText<"Dump config table">,
+ DescFile<"DebugCheckers.cpp">;
+
def TraversalDumper : Checker<"DumpTraversal">,
HelpText<"Print branch conditions as they are traversed by the engine">,
DescFile<"TraversalChecker.cpp">;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
index 30d0609..c885616 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -147,7 +147,7 @@ void ChrootChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
"after chroot"));
BugReport *R = new BugReport(*BT_BreakJail,
BT_BreakJail->getDescription(), N);
- C.EmitReport(R);
+ C.emitReport(R);
}
return;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index 510e8cd..59e03ec 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -13,22 +13,54 @@
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
-#include "clang/StaticAnalyzer/Core/Checker.h"
-#include "clang/Analysis/Analyses/LiveVariables.h"
-#include "clang/Analysis/Visitors/CFGRecStmtVisitor.h"
-#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h"
-#include "clang/Basic/Diagnostic.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ParentMap.h"
-#include "llvm/ADT/SmallPtrSet.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
using namespace ento;
-namespace {
+namespace {
+
+/// A simple visitor to record what VarDecls occur in EH-handling code.
+class EHCodeVisitor : public RecursiveASTVisitor<EHCodeVisitor> {
+public:
+ bool inEH;
+ llvm::DenseSet<const VarDecl *> &S;
+
+ bool TraverseObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+ SaveAndRestore<bool> inFinally(inEH, true);
+ return ::RecursiveASTVisitor<EHCodeVisitor>::TraverseObjCAtFinallyStmt(S);
+ }
+
+ bool TraverseObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ SaveAndRestore<bool> inCatch(inEH, true);
+ return ::RecursiveASTVisitor<EHCodeVisitor>::TraverseObjCAtCatchStmt(S);
+ }
+
+ bool TraverseCXXCatchStmt(CXXCatchStmt *S) {
+ SaveAndRestore<bool> inCatch(inEH, true);
+ return TraverseStmt(S->getHandlerBlock());
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *DR) {
+ if (inEH)
+ if (const VarDecl *D = dyn_cast<VarDecl>(DR->getDecl()))
+ S.insert(D);
+ return true;
+ }
+
+ EHCodeVisitor(llvm::DenseSet<const VarDecl *> &S) :
+ inEH(false), S(S) {}
+};
// FIXME: Eventually migrate into its own file, and have it managed by
// AnalysisManager.
@@ -93,6 +125,7 @@ class DeadStoreObs : public LiveVariables::Observer {
llvm::SmallPtrSet<const VarDecl*, 20> Escaped;
OwningPtr<ReachableCode> reachableCode;
const CFGBlock *currentBlock;
+ llvm::OwningPtr<llvm::DenseSet<const VarDecl *> > InEH;
enum DeadStoreKind { Standard, Enclosing, DeadIncrement, DeadInit };
@@ -105,6 +138,23 @@ public:
virtual ~DeadStoreObs() {}
+ bool isLive(const LiveVariables::LivenessValues &Live, const VarDecl *D) {
+ if (Live.isLive(D))
+ return true;
+ // Lazily construct the set that records which VarDecls are in
+ // EH code.
+ if (!InEH.get()) {
+ InEH.reset(new llvm::DenseSet<const VarDecl *>());
+ EHCodeVisitor V(*InEH.get());
+ V.TraverseStmt(AC->getBody());
+ }
+ // Treat all VarDecls that occur in EH code as being "always live"
+ // when considering to suppress dead stores. Frequently stores
+ // are followed by reads in EH code, but we don't have the ability
+ // to analyze that yet.
+ return InEH->count(D);
+ }
+
void Report(const VarDecl *V, DeadStoreKind dsk,
PathDiagnosticLocation L, SourceRange R) {
if (Escaped.count(V))
@@ -159,7 +209,7 @@ public:
if (VD->getType()->getAs<ReferenceType>())
return;
- if (!Live.isLive(VD) &&
+ if (!isLive(Live, VD) &&
!(VD->getAttr<UnusedAttr>() || VD->getAttr<BlocksAttr>())) {
PathDiagnosticLocation ExLoc =
@@ -285,7 +335,7 @@ public:
// A dead initialization is a variable that is dead after it
// is initialized. We don't flag warnings for those variables
// marked 'unused'.
- if (!Live.isLive(V) && V->getAttr<UnusedAttr>() == 0) {
+ if (!isLive(Live, V) && V->getAttr<UnusedAttr>() == 0) {
// Special case: check for initializations with constants.
//
// e.g. : int x = 0;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
index 34053cd..7ad9c59 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -144,3 +144,38 @@ public:
void ento::registerCallGraphDumper(CheckerManager &mgr) {
mgr.registerChecker<CallGraphDumper>();
}
+
+
+//===----------------------------------------------------------------------===//
+// ConfigDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ConfigDumper : public Checker< check::EndOfTranslationUnit > {
+public:
+ void checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
+ AnalysisManager& mgr,
+ BugReporter &BR) const {
+
+ const AnalyzerOptions::ConfigTable &Config = mgr.options.Config;
+ AnalyzerOptions::ConfigTable::const_iterator I =
+ Config.begin(), E = Config.end();
+
+ std::vector<StringRef> Keys;
+ for (; I != E ; ++I) { Keys.push_back(I->getKey()); }
+ sort(Keys.begin(), Keys.end());
+
+ llvm::errs() << "[config]\n";
+ for (unsigned i = 0, n = Keys.size(); i < n ; ++i) {
+ StringRef Key = Keys[i];
+ I = Config.find(Key);
+ llvm::errs() << Key << " = " << I->second << '\n';
+ }
+ llvm::errs() << "[stats]\n" << "num-entries = " << Keys.size() << '\n';
+ }
+};
+}
+
+void ento::registerConfigDumper(CheckerManager &mgr) {
+ mgr.registerChecker<ConfigDumper>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index e98c131..3ace4be 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -39,7 +39,7 @@ public:
CheckerContext &C) const;
void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
- static const MemRegion *AddDerefSource(raw_ostream &os,
+ static void AddDerefSource(raw_ostream &os,
SmallVectorImpl<SourceRange> &Ranges,
const Expr *Ex, const ProgramState *state,
const LocationContext *LCtx,
@@ -47,7 +47,7 @@ public:
};
} // end anonymous namespace
-const MemRegion *
+void
DereferenceChecker::AddDerefSource(raw_ostream &os,
SmallVectorImpl<SourceRange> &Ranges,
const Expr *Ex,
@@ -55,7 +55,6 @@ DereferenceChecker::AddDerefSource(raw_ostream &os,
const LocationContext *LCtx,
bool loadedFrom) {
Ex = Ex->IgnoreParenLValueCasts();
- const MemRegion *sourceR = 0;
switch (Ex->getStmtClass()) {
default:
break;
@@ -65,7 +64,6 @@ DereferenceChecker::AddDerefSource(raw_ostream &os,
os << " (" << (loadedFrom ? "loaded from" : "from")
<< " variable '" << VD->getName() << "')";
Ranges.push_back(DR->getSourceRange());
- sourceR = state->getLValue(VD, LCtx).getAsRegion();
}
break;
}
@@ -78,7 +76,6 @@ DereferenceChecker::AddDerefSource(raw_ostream &os,
break;
}
}
- return sourceR;
}
void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
@@ -94,6 +91,8 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
BT_null.reset(new BuiltinBug("Dereference of null pointer"));
SmallString<100> buf;
+ llvm::raw_svector_ostream os(buf);
+
SmallVector<SourceRange, 2> Ranges;
// Walk through lvalue casts to get the original expression
@@ -101,8 +100,6 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
if (const Expr *expr = dyn_cast<Expr>(S))
S = expr->IgnoreParenLValueCasts();
- const MemRegion *sourceR = 0;
-
if (IsBind) {
if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) {
if (BO->isAssignmentOp())
@@ -117,68 +114,55 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
switch (S->getStmtClass()) {
case Stmt::ArraySubscriptExprClass: {
- llvm::raw_svector_ostream os(buf);
os << "Array access";
const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(S);
- sourceR = AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(),
- State.getPtr(), N->getLocationContext());
+ AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(),
+ State.getPtr(), N->getLocationContext());
os << " results in a null pointer dereference";
break;
}
case Stmt::UnaryOperatorClass: {
- llvm::raw_svector_ostream os(buf);
os << "Dereference of null pointer";
const UnaryOperator *U = cast<UnaryOperator>(S);
- sourceR = AddDerefSource(os, Ranges, U->getSubExpr()->IgnoreParens(),
- State.getPtr(), N->getLocationContext(), true);
+ AddDerefSource(os, Ranges, U->getSubExpr()->IgnoreParens(),
+ State.getPtr(), N->getLocationContext(), true);
break;
}
case Stmt::MemberExprClass: {
const MemberExpr *M = cast<MemberExpr>(S);
- if (M->isArrow()) {
- llvm::raw_svector_ostream os(buf);
+ if (M->isArrow() || bugreporter::isDeclRefExprToReference(M->getBase())) {
os << "Access to field '" << M->getMemberNameInfo()
<< "' results in a dereference of a null pointer";
- sourceR = AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(),
- State.getPtr(), N->getLocationContext(), true);
+ AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(),
+ State.getPtr(), N->getLocationContext(), true);
}
break;
}
case Stmt::ObjCIvarRefExprClass: {
const ObjCIvarRefExpr *IV = cast<ObjCIvarRefExpr>(S);
- if (const DeclRefExpr *DR =
- dyn_cast<DeclRefExpr>(IV->getBase()->IgnoreParenCasts())) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
- llvm::raw_svector_ostream os(buf);
- os << "Instance variable access (via '" << VD->getName()
- << "') results in a null pointer dereference";
- }
- }
- Ranges.push_back(IV->getSourceRange());
+ os << "Access to instance variable '" << *IV->getDecl()
+ << "' results in a dereference of a null pointer";
+ AddDerefSource(os, Ranges, IV->getBase()->IgnoreParenCasts(),
+ State.getPtr(), N->getLocationContext(), true);
break;
}
default:
break;
}
+ os.flush();
BugReport *report =
new BugReport(*BT_null,
buf.empty() ? BT_null->getDescription() : buf.str(),
N);
- bugreporter::addTrackNullOrUndefValueVisitor(N, bugreporter::GetDerefExpr(N),
- report);
+ bugreporter::trackNullOrUndefValue(N, bugreporter::GetDerefExpr(N), *report);
for (SmallVectorImpl<SourceRange>::iterator
I = Ranges.begin(), E = Ranges.end(); I!=E; ++I)
report->addRange(*I);
- if (sourceR) {
- report->markInteresting(sourceR);
- report->markInteresting(State->getRawSVal(loc::MemRegionVal(sourceR)));
- }
-
- C.EmitReport(report);
+ C.emitReport(report);
}
void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
@@ -191,11 +175,9 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
BugReport *report =
new BugReport(*BT_undef, BT_undef->getDescription(), N);
- bugreporter::addTrackNullOrUndefValueVisitor(N,
- bugreporter::GetDerefExpr(N),
- report);
- report->disablePathPruning();
- C.EmitReport(report);
+ bugreporter::trackNullOrUndefValue(N, bugreporter::GetDerefExpr(N),
+ *report);
+ C.emitReport(report);
}
return;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
new file mode 100644
index 0000000..dc90b67
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
@@ -0,0 +1,180 @@
+//=- DirectIvarAssignment.cpp - Check rules on ObjC properties -*- C++ ----*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Check that Objective C properties follow the following rules:
+// - The property should be set with the setter, not though a direct
+// assignment.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/ADT/DenseMap.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class DirectIvarAssignment :
+ public Checker<check::ASTDecl<ObjCImplementationDecl> > {
+
+ typedef llvm::DenseMap<const ObjCIvarDecl*,
+ const ObjCPropertyDecl*> IvarToPropertyMapTy;
+
+ /// A helper class, which walks the AST and locates all assignments to ivars
+ /// in the given function.
+ class MethodCrawler : public ConstStmtVisitor<MethodCrawler> {
+ const IvarToPropertyMapTy &IvarToPropMap;
+ const ObjCMethodDecl *MD;
+ const ObjCInterfaceDecl *InterfD;
+ BugReporter &BR;
+ LocationOrAnalysisDeclContext DCtx;
+
+ public:
+ MethodCrawler(const IvarToPropertyMapTy &InMap, const ObjCMethodDecl *InMD,
+ const ObjCInterfaceDecl *InID,
+ BugReporter &InBR, AnalysisDeclContext *InDCtx)
+ : IvarToPropMap(InMap), MD(InMD), InterfD(InID), BR(InBR), DCtx(InDCtx) {}
+
+ void VisitStmt(const Stmt *S) { VisitChildren(S); }
+
+ void VisitBinaryOperator(const BinaryOperator *BO);
+
+ void VisitChildren(const Stmt *S) {
+ for (Stmt::const_child_range I = S->children(); I; ++I)
+ if (*I)
+ this->Visit(*I);
+ }
+ };
+
+public:
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& Mgr,
+ BugReporter &BR) const;
+};
+
+static const ObjCIvarDecl *findPropertyBackingIvar(const ObjCPropertyDecl *PD,
+ const ObjCInterfaceDecl *InterD,
+ ASTContext &Ctx) {
+ // Check for synthesized ivars.
+ ObjCIvarDecl *ID = PD->getPropertyIvarDecl();
+ if (ID)
+ return ID;
+
+ ObjCInterfaceDecl *NonConstInterD = const_cast<ObjCInterfaceDecl*>(InterD);
+
+ // Check for existing "_PropName".
+ ID = NonConstInterD->lookupInstanceVariable(PD->getDefaultSynthIvarName(Ctx));
+ if (ID)
+ return ID;
+
+ // Check for existing "PropName".
+ IdentifierInfo *PropIdent = PD->getIdentifier();
+ ID = NonConstInterD->lookupInstanceVariable(PropIdent);
+
+ return ID;
+}
+
+void DirectIvarAssignment::checkASTDecl(const ObjCImplementationDecl *D,
+ AnalysisManager& Mgr,
+ BugReporter &BR) const {
+ const ObjCInterfaceDecl *InterD = D->getClassInterface();
+
+
+ IvarToPropertyMapTy IvarToPropMap;
+
+ // Find all properties for this class.
+ for (ObjCInterfaceDecl::prop_iterator I = InterD->prop_begin(),
+ E = InterD->prop_end(); I != E; ++I) {
+ ObjCPropertyDecl *PD = *I;
+
+ // Find the corresponding IVar.
+ const ObjCIvarDecl *ID = findPropertyBackingIvar(PD, InterD,
+ Mgr.getASTContext());
+
+ if (!ID)
+ continue;
+
+ // Store the IVar to property mapping.
+ IvarToPropMap[ID] = PD;
+ }
+
+ if (IvarToPropMap.empty())
+ return;
+
+ for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(),
+ E = D->instmeth_end(); I != E; ++I) {
+
+ ObjCMethodDecl *M = *I;
+ AnalysisDeclContext *DCtx = Mgr.getAnalysisDeclContext(M);
+
+ // Skip the init, dealloc functions and any functions that might be doing
+ // initialization based on their name.
+ if (M->getMethodFamily() == OMF_init ||
+ M->getMethodFamily() == OMF_dealloc ||
+ M->getMethodFamily() == OMF_copy ||
+ M->getMethodFamily() == OMF_mutableCopy ||
+ M->getSelector().getNameForSlot(0).find("init") != StringRef::npos ||
+ M->getSelector().getNameForSlot(0).find("Init") != StringRef::npos)
+ continue;
+
+ const Stmt *Body = M->getBody();
+ assert(Body);
+
+ MethodCrawler MC(IvarToPropMap, M->getCanonicalDecl(), InterD, BR, DCtx);
+ MC.VisitStmt(Body);
+ }
+}
+
+void DirectIvarAssignment::MethodCrawler::VisitBinaryOperator(
+ const BinaryOperator *BO) {
+ if (!BO->isAssignmentOp())
+ return;
+
+ const ObjCIvarRefExpr *IvarRef =
+ dyn_cast<ObjCIvarRefExpr>(BO->getLHS()->IgnoreParenCasts());
+
+ if (!IvarRef)
+ return;
+
+ if (const ObjCIvarDecl *D = IvarRef->getDecl()) {
+ IvarToPropertyMapTy::const_iterator I = IvarToPropMap.find(D);
+ if (I != IvarToPropMap.end()) {
+ const ObjCPropertyDecl *PD = I->second;
+
+ ObjCMethodDecl *GetterMethod =
+ InterfD->getInstanceMethod(PD->getGetterName());
+ ObjCMethodDecl *SetterMethod =
+ InterfD->getInstanceMethod(PD->getSetterName());
+
+ if (SetterMethod && SetterMethod->getCanonicalDecl() == MD)
+ return;
+
+ if (GetterMethod && GetterMethod->getCanonicalDecl() == MD)
+ return;
+
+ BR.EmitBasicReport(MD,
+ "Property access",
+ categories::CoreFoundationObjectiveC,
+ "Direct assignment to an instance variable backing a property; "
+ "use the setter instead", PathDiagnosticLocation(IvarRef,
+ BR.getSourceManager(),
+ DCtx));
+ }
+ }
+}
+}
+
+void ento::registerDirectIvarAssignment(CheckerManager &mgr) {
+ mgr.registerChecker<DirectIvarAssignment>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
index dcf6a86..76fb3f2 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -39,13 +39,9 @@ void DivZeroChecker::reportBug(const char *Msg,
if (!BT)
BT.reset(new BuiltinBug("Division by zero"));
- BugReport *R =
- new BugReport(*BT, Msg, N);
-
- bugreporter::addTrackNullOrUndefValueVisitor(N,
- bugreporter::GetDenomExpr(N),
- R);
- C.EmitReport(R);
+ BugReport *R = new BugReport(*BT, Msg, N);
+ bugreporter::trackNullOrUndefValue(N, bugreporter::GetDenomExpr(N), *R);
+ C.emitReport(R);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index b636efb..b0a4bc6 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -83,14 +83,14 @@ void DynamicTypePropagation::checkPreCall(const CallEvent &Call,
if (const CXXDestructorCall *Dtor = dyn_cast<CXXDestructorCall>(&Call)) {
// C++11 [class.cdtor]p4 (see above)
+ if (!Dtor->isBaseDestructor())
+ return;
const MemRegion *Target = Dtor->getCXXThisVal().getAsRegion();
if (!Target)
return;
- // FIXME: getRuntimeDefinition() can be expensive. It would be better to do
- // this when we are entering the stack frame for the destructor.
- const Decl *D = Dtor->getRuntimeDefinition().getDecl();
+ const Decl *D = Dtor->getDecl();
if (!D)
return;
@@ -105,8 +105,7 @@ void DynamicTypePropagation::checkPostCall(const CallEvent &Call,
if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
// Get the returned value if it's a region.
- SVal Result = C.getSVal(Call.getOriginExpr());
- const MemRegion *RetReg = Result.getAsRegion();
+ const MemRegion *RetReg = Call.getReturnValue().getAsRegion();
if (!RetReg)
return;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
index 7acf223..e7e3162 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -93,7 +93,7 @@ void ExprInspectionChecker::analyzerEval(const CallExpr *CE,
BT.reset(new BugType("Checking analyzer assumptions", "debug"));
BugReport *R = new BugReport(*BT, getArgumentValueString(CE, C), N);
- C.EmitReport(R);
+ C.emitReport(R);
}
void ExprInspectionChecker::analyzerCheckInlined(const CallExpr *CE,
@@ -113,7 +113,7 @@ void ExprInspectionChecker::analyzerCheckInlined(const CallExpr *CE,
BT.reset(new BugType("Checking analyzer assumptions", "debug"));
BugReport *R = new BugReport(*BT, getArgumentValueString(CE, C), N);
- C.EmitReport(R);
+ C.emitReport(R);
}
void ento::registerExprInspectionChecker(CheckerManager &Mgr) {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
index a1f2f3b..7fde689 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
@@ -58,7 +58,7 @@ void FixedAddressChecker::checkPreStmt(const BinaryOperator *B,
"environments or platforms."));
BugReport *R = new BugReport(*BT, BT->getDescription(), N);
R->addRange(B->getRHS()->getSourceRange());
- C.EmitReport(R);
+ C.emitReport(R);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
index afb862c..a9e0217 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -192,13 +192,7 @@ const char GenericTaintChecker::MsgTaintedBufferSize[] =
/// to the call post-visit. The values are unsigned integers, which are either
/// ReturnValueIndex, or indexes of the pointer/reference argument, which
/// points to data, which should be tainted on return.
-namespace { struct TaintArgsOnPostVisit{}; }
-namespace clang { namespace ento {
-template<> struct ProgramStateTrait<TaintArgsOnPostVisit>
- : public ProgramStatePartialTrait<llvm::ImmutableSet<unsigned> > {
- static void *GDMIndex() { return GenericTaintChecker::getTag(); }
-};
-}}
+REGISTER_SET_WITH_PROGRAMSTATE(TaintArgsOnPostVisit, unsigned)
GenericTaintChecker::TaintPropagationRule
GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
@@ -337,7 +331,7 @@ bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
// Depending on what was tainted at pre-visit, we determined a set of
// arguments which should be tainted after the function returns. These are
// stored in the state as TaintArgsOnPostVisit set.
- llvm::ImmutableSet<unsigned> TaintArgs = State->get<TaintArgsOnPostVisit>();
+ TaintArgsOnPostVisitTy TaintArgs = State->get<TaintArgsOnPostVisit>();
if (TaintArgs.isEmpty())
return false;
@@ -653,7 +647,7 @@ bool GenericTaintChecker::generateReportIfTainted(const Expr *E,
initBugType();
BugReport *report = new BugReport(*BT, Msg, N);
report->addRange(E->getSourceRange());
- C.EmitReport(report);
+ C.emitReport(report);
return true;
}
return false;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
index 9d0b83f..ffbbb8b 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
@@ -430,7 +430,7 @@ void IdempotentOperationChecker::checkEndAnalysis(ExplodedGraph &G,
FindLastStoreBRVisitor::registerStatementVarDecls(*report, RHS);
}
- BR.EmitReport(report);
+ BR.emitReport(report);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
new file mode 100644
index 0000000..bf256cd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
@@ -0,0 +1,550 @@
+//=- IvarInvalidationChecker.cpp - -*- C++ -------------------------------*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker implements annotation driven invalidation checking. If a class
+// contains a method annotated with 'objc_instance_variable_invalidator',
+// - (void) foo
+// __attribute__((annotate("objc_instance_variable_invalidator")));
+// all the "ivalidatable" instance variables of this class should be
+// invalidated. We call an instance variable ivalidatable if it is an object of
+// a class which contains an invalidation method. There could be multiple
+// methods annotated with such annotations per class, either one can be used
+// to invalidate the ivar. An ivar or property are considered to be
+// invalidated if they are being assigned 'nil' or an invalidation method has
+// been called on them. An invalidation method should either invalidate all
+// the ivars or call another invalidation method (on self).
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class IvarInvalidationChecker :
+ public Checker<check::ASTDecl<ObjCMethodDecl> > {
+
+ typedef llvm::DenseSet<const ObjCMethodDecl*> MethodSet;
+ typedef llvm::DenseMap<const ObjCMethodDecl*,
+ const ObjCIvarDecl*> MethToIvarMapTy;
+ typedef llvm::DenseMap<const ObjCPropertyDecl*,
+ const ObjCIvarDecl*> PropToIvarMapTy;
+ typedef llvm::DenseMap<const ObjCIvarDecl*,
+ const ObjCPropertyDecl*> IvarToPropMapTy;
+
+
+ struct IvarInfo {
+ /// Has the ivar been invalidated?
+ bool IsInvalidated;
+
+ /// The methods which can be used to invalidate the ivar.
+ MethodSet InvalidationMethods;
+
+ IvarInfo() : IsInvalidated(false) {}
+ void addInvalidationMethod(const ObjCMethodDecl *MD) {
+ InvalidationMethods.insert(MD);
+ }
+
+ bool needsInvalidation() const {
+ return !InvalidationMethods.empty();
+ }
+
+ void markInvalidated() {
+ IsInvalidated = true;
+ }
+
+ bool markInvalidated(const ObjCMethodDecl *MD) {
+ if (IsInvalidated)
+ return true;
+ for (MethodSet::iterator I = InvalidationMethods.begin(),
+ E = InvalidationMethods.end(); I != E; ++I) {
+ if (*I == MD) {
+ IsInvalidated = true;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool isInvalidated() const {
+ return IsInvalidated;
+ }
+ };
+
+ typedef llvm::DenseMap<const ObjCIvarDecl*, IvarInfo> IvarSet;
+
+ /// Statement visitor, which walks the method body and flags the ivars
+ /// referenced in it (either directly or via property).
+ class MethodCrawler : public ConstStmtVisitor<MethodCrawler> {
+ /// The set of Ivars which need to be invalidated.
+ IvarSet &IVars;
+
+ /// Flag is set as the result of a message send to another
+ /// invalidation method.
+ bool &CalledAnotherInvalidationMethod;
+
+ /// Property setter to ivar mapping.
+ const MethToIvarMapTy &PropertySetterToIvarMap;
+
+ /// Property getter to ivar mapping.
+ const MethToIvarMapTy &PropertyGetterToIvarMap;
+
+ /// Property to ivar mapping.
+ const PropToIvarMapTy &PropertyToIvarMap;
+
+ /// The invalidation method being currently processed.
+ const ObjCMethodDecl *InvalidationMethod;
+
+ ASTContext &Ctx;
+
+ /// Peel off parens, casts, OpaqueValueExpr, and PseudoObjectExpr.
+ const Expr *peel(const Expr *E) const;
+
+ /// Does this expression represent zero: '0'?
+ bool isZero(const Expr *E) const;
+
+ /// Mark the given ivar as invalidated.
+ void markInvalidated(const ObjCIvarDecl *Iv);
+
+ /// Checks if IvarRef refers to the tracked IVar, if yes, marks it as
+ /// invalidated.
+ void checkObjCIvarRefExpr(const ObjCIvarRefExpr *IvarRef);
+
+ /// Checks if ObjCPropertyRefExpr refers to the tracked IVar, if yes, marks
+ /// it as invalidated.
+ void checkObjCPropertyRefExpr(const ObjCPropertyRefExpr *PA);
+
+ /// Checks if ObjCMessageExpr refers to (is a getter for) the tracked IVar,
+ /// if yes, marks it as invalidated.
+ void checkObjCMessageExpr(const ObjCMessageExpr *ME);
+
+ /// Checks if the Expr refers to an ivar, if yes, marks it as invalidated.
+ void check(const Expr *E);
+
+ public:
+ MethodCrawler(IvarSet &InIVars,
+ bool &InCalledAnotherInvalidationMethod,
+ const MethToIvarMapTy &InPropertySetterToIvarMap,
+ const MethToIvarMapTy &InPropertyGetterToIvarMap,
+ const PropToIvarMapTy &InPropertyToIvarMap,
+ ASTContext &InCtx)
+ : IVars(InIVars),
+ CalledAnotherInvalidationMethod(InCalledAnotherInvalidationMethod),
+ PropertySetterToIvarMap(InPropertySetterToIvarMap),
+ PropertyGetterToIvarMap(InPropertyGetterToIvarMap),
+ PropertyToIvarMap(InPropertyToIvarMap),
+ InvalidationMethod(0),
+ Ctx(InCtx) {}
+
+ void VisitStmt(const Stmt *S) { VisitChildren(S); }
+
+ void VisitBinaryOperator(const BinaryOperator *BO);
+
+ void VisitObjCMessageExpr(const ObjCMessageExpr *ME);
+
+ void VisitChildren(const Stmt *S) {
+ for (Stmt::const_child_range I = S->children(); I; ++I) {
+ if (*I)
+ this->Visit(*I);
+ if (CalledAnotherInvalidationMethod)
+ return;
+ }
+ }
+ };
+
+ /// Check if the any of the methods inside the interface are annotated with
+ /// the invalidation annotation, update the IvarInfo accordingly.
+ static void containsInvalidationMethod(const ObjCContainerDecl *D,
+ IvarInfo &Out);
+
+ /// Check if ivar should be tracked and add to TrackedIvars if positive.
+ /// Returns true if ivar should be tracked.
+ static bool trackIvar(const ObjCIvarDecl *Iv, IvarSet &TrackedIvars);
+
+ /// Given the property declaration, and the list of tracked ivars, finds
+ /// the ivar backing the property when possible. Returns '0' when no such
+ /// ivar could be found.
+ static const ObjCIvarDecl *findPropertyBackingIvar(
+ const ObjCPropertyDecl *Prop,
+ const ObjCInterfaceDecl *InterfaceD,
+ IvarSet &TrackedIvars);
+
+public:
+ void checkASTDecl(const ObjCMethodDecl *D, AnalysisManager& Mgr,
+ BugReporter &BR) const;
+
+ // TODO: We are currently ignoring the ivars coming from class extensions.
+};
+
+static bool isInvalidationMethod(const ObjCMethodDecl *M) {
+ for (specific_attr_iterator<AnnotateAttr>
+ AI = M->specific_attr_begin<AnnotateAttr>(),
+ AE = M->specific_attr_end<AnnotateAttr>(); AI != AE; ++AI) {
+ const AnnotateAttr *Ann = *AI;
+ if (Ann->getAnnotation() == "objc_instance_variable_invalidator")
+ return true;
+ }
+ return false;
+}
+
+void IvarInvalidationChecker::containsInvalidationMethod(
+ const ObjCContainerDecl *D, IvarInfo &OutInfo) {
+
+ // TODO: Cache the results.
+
+ if (!D)
+ return;
+
+ // Check all methods.
+ for (ObjCContainerDecl::method_iterator
+ I = D->meth_begin(),
+ E = D->meth_end(); I != E; ++I) {
+ const ObjCMethodDecl *MDI = *I;
+ if (isInvalidationMethod(MDI))
+ OutInfo.addInvalidationMethod(
+ cast<ObjCMethodDecl>(MDI->getCanonicalDecl()));
+ }
+
+ // If interface, check all parent protocols and super.
+ // TODO: Visit all categories in case the invalidation method is declared in
+ // a category.
+ if (const ObjCInterfaceDecl *InterfaceD = dyn_cast<ObjCInterfaceDecl>(D)) {
+ for (ObjCInterfaceDecl::protocol_iterator
+ I = InterfaceD->protocol_begin(),
+ E = InterfaceD->protocol_end(); I != E; ++I) {
+ containsInvalidationMethod(*I, OutInfo);
+ }
+ containsInvalidationMethod(InterfaceD->getSuperClass(), OutInfo);
+ return;
+ }
+
+ // If protocol, check all parent protocols.
+ if (const ObjCProtocolDecl *ProtD = dyn_cast<ObjCProtocolDecl>(D)) {
+ for (ObjCInterfaceDecl::protocol_iterator
+ I = ProtD->protocol_begin(),
+ E = ProtD->protocol_end(); I != E; ++I) {
+ containsInvalidationMethod(*I, OutInfo);
+ }
+ return;
+ }
+
+ llvm_unreachable("One of the casts above should have succeeded.");
+}
+
+bool IvarInvalidationChecker::trackIvar(const ObjCIvarDecl *Iv,
+ IvarSet &TrackedIvars) {
+ QualType IvQTy = Iv->getType();
+ const ObjCObjectPointerType *IvTy = IvQTy->getAs<ObjCObjectPointerType>();
+ if (!IvTy)
+ return false;
+ const ObjCInterfaceDecl *IvInterf = IvTy->getInterfaceDecl();
+
+ IvarInfo Info;
+ containsInvalidationMethod(IvInterf, Info);
+ if (Info.needsInvalidation()) {
+ TrackedIvars[cast<ObjCIvarDecl>(Iv->getCanonicalDecl())] = Info;
+ return true;
+ }
+ return false;
+}
+
+const ObjCIvarDecl *IvarInvalidationChecker::findPropertyBackingIvar(
+ const ObjCPropertyDecl *Prop,
+ const ObjCInterfaceDecl *InterfaceD,
+ IvarSet &TrackedIvars) {
+ const ObjCIvarDecl *IvarD = 0;
+
+ // Lookup for the synthesized case.
+ IvarD = Prop->getPropertyIvarDecl();
+ if (IvarD) {
+ if (TrackedIvars.count(IvarD)) {
+ return IvarD;
+ }
+ // If the ivar is synthesized we still want to track it.
+ if (trackIvar(IvarD, TrackedIvars))
+ return IvarD;
+ }
+
+ // Lookup IVars named "_PropName"or "PropName" among the tracked Ivars.
+ StringRef PropName = Prop->getIdentifier()->getName();
+ for (IvarSet::const_iterator I = TrackedIvars.begin(),
+ E = TrackedIvars.end(); I != E; ++I) {
+ const ObjCIvarDecl *Iv = I->first;
+ StringRef IvarName = Iv->getName();
+
+ if (IvarName == PropName)
+ return Iv;
+
+ SmallString<128> PropNameWithUnderscore;
+ {
+ llvm::raw_svector_ostream os(PropNameWithUnderscore);
+ os << '_' << PropName;
+ }
+ if (IvarName == PropNameWithUnderscore.str())
+ return Iv;
+ }
+
+ // Note, this is a possible source of false positives. We could look at the
+ // getter implementation to find the ivar when its name is not derived from
+ // the property name.
+ return 0;
+}
+
+void IvarInvalidationChecker::checkASTDecl(const ObjCMethodDecl *D,
+ AnalysisManager& Mgr,
+ BugReporter &BR) const {
+ // We are only interested in checking the cleanup methods.
+ if (!D->hasBody() || !isInvalidationMethod(D))
+ return;
+
+ // Collect all ivars that need cleanup.
+ IvarSet Ivars;
+ const ObjCInterfaceDecl *InterfaceD = D->getClassInterface();
+
+ // Collect ivars declared in this class, its extensions and its implementation
+ ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(InterfaceD);
+ for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
+ Iv= Iv->getNextIvar())
+ trackIvar(Iv, Ivars);
+
+ // Construct Property/Property Accessor to Ivar maps to assist checking if an
+ // ivar which is backing a property has been reset.
+ MethToIvarMapTy PropSetterToIvarMap;
+ MethToIvarMapTy PropGetterToIvarMap;
+ PropToIvarMapTy PropertyToIvarMap;
+ IvarToPropMapTy IvarToPopertyMap;
+
+ ObjCInterfaceDecl::PropertyMap PropMap;
+ InterfaceD->collectPropertiesToImplement(PropMap);
+
+ for (ObjCInterfaceDecl::PropertyMap::iterator
+ I = PropMap.begin(), E = PropMap.end(); I != E; ++I) {
+ const ObjCPropertyDecl *PD = I->second;
+
+ const ObjCIvarDecl *ID = findPropertyBackingIvar(PD, InterfaceD, Ivars);
+ if (!ID) {
+ continue;
+ }
+
+ // Store the mappings.
+ PD = cast<ObjCPropertyDecl>(PD->getCanonicalDecl());
+ PropertyToIvarMap[PD] = ID;
+ IvarToPopertyMap[ID] = PD;
+
+ // Find the setter and the getter.
+ const ObjCMethodDecl *SetterD = PD->getSetterMethodDecl();
+ if (SetterD) {
+ SetterD = cast<ObjCMethodDecl>(SetterD->getCanonicalDecl());
+ PropSetterToIvarMap[SetterD] = ID;
+ }
+
+ const ObjCMethodDecl *GetterD = PD->getGetterMethodDecl();
+ if (GetterD) {
+ GetterD = cast<ObjCMethodDecl>(GetterD->getCanonicalDecl());
+ PropGetterToIvarMap[GetterD] = ID;
+ }
+ }
+
+
+ // Check which ivars have been invalidated in the method body.
+ bool CalledAnotherInvalidationMethod = false;
+ MethodCrawler(Ivars,
+ CalledAnotherInvalidationMethod,
+ PropSetterToIvarMap,
+ PropGetterToIvarMap,
+ PropertyToIvarMap,
+ BR.getContext()).VisitStmt(D->getBody());
+
+ if (CalledAnotherInvalidationMethod)
+ return;
+
+ // Warn on the ivars that were not accessed by the method.
+ for (IvarSet::const_iterator I = Ivars.begin(), E = Ivars.end(); I != E; ++I){
+ if (!I->second.isInvalidated()) {
+ const ObjCIvarDecl *IvarDecl = I->first;
+
+ PathDiagnosticLocation IvarDecLocation =
+ PathDiagnosticLocation::createEnd(D->getBody(), BR.getSourceManager(),
+ Mgr.getAnalysisDeclContext(D));
+
+ SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+
+ // Construct the warning message.
+ if (IvarDecl->getSynthesize()) {
+ const ObjCPropertyDecl *PD = IvarToPopertyMap[IvarDecl];
+ assert(PD &&
+ "Do we synthesize ivars for something other than properties?");
+ os << "Property "<< PD->getName() <<
+ " needs to be invalidated or set to nil";
+ } else {
+ os << "Instance variable "<< IvarDecl->getName()
+ << " needs to be invalidated or set to nil";
+ }
+
+ BR.EmitBasicReport(D,
+ "Incomplete invalidation",
+ categories::CoreFoundationObjectiveC, os.str(),
+ IvarDecLocation);
+ }
+ }
+}
+
+void IvarInvalidationChecker::MethodCrawler::markInvalidated(
+ const ObjCIvarDecl *Iv) {
+ IvarSet::iterator I = IVars.find(Iv);
+ if (I != IVars.end()) {
+ // If InvalidationMethod is present, we are processing the message send and
+ // should ensure we are invalidating with the appropriate method,
+ // otherwise, we are processing setting to 'nil'.
+ if (InvalidationMethod)
+ I->second.markInvalidated(InvalidationMethod);
+ else
+ I->second.markInvalidated();
+ }
+}
+
+const Expr *IvarInvalidationChecker::MethodCrawler::peel(const Expr *E) const {
+ E = E->IgnoreParenCasts();
+ if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E))
+ E = POE->getSyntacticForm()->IgnoreParenCasts();
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E))
+ E = OVE->getSourceExpr()->IgnoreParenCasts();
+ return E;
+}
+
+void IvarInvalidationChecker::MethodCrawler::checkObjCIvarRefExpr(
+ const ObjCIvarRefExpr *IvarRef) {
+ if (const Decl *D = IvarRef->getDecl())
+ markInvalidated(cast<ObjCIvarDecl>(D->getCanonicalDecl()));
+}
+
+void IvarInvalidationChecker::MethodCrawler::checkObjCMessageExpr(
+ const ObjCMessageExpr *ME) {
+ const ObjCMethodDecl *MD = ME->getMethodDecl();
+ if (MD) {
+ MD = cast<ObjCMethodDecl>(MD->getCanonicalDecl());
+ MethToIvarMapTy::const_iterator IvI = PropertyGetterToIvarMap.find(MD);
+ if (IvI != PropertyGetterToIvarMap.end())
+ markInvalidated(IvI->second);
+ }
+}
+
+void IvarInvalidationChecker::MethodCrawler::checkObjCPropertyRefExpr(
+ const ObjCPropertyRefExpr *PA) {
+
+ if (PA->isExplicitProperty()) {
+ const ObjCPropertyDecl *PD = PA->getExplicitProperty();
+ if (PD) {
+ PD = cast<ObjCPropertyDecl>(PD->getCanonicalDecl());
+ PropToIvarMapTy::const_iterator IvI = PropertyToIvarMap.find(PD);
+ if (IvI != PropertyToIvarMap.end())
+ markInvalidated(IvI->second);
+ return;
+ }
+ }
+
+ if (PA->isImplicitProperty()) {
+ const ObjCMethodDecl *MD = PA->getImplicitPropertySetter();
+ if (MD) {
+ MD = cast<ObjCMethodDecl>(MD->getCanonicalDecl());
+ MethToIvarMapTy::const_iterator IvI =PropertyGetterToIvarMap.find(MD);
+ if (IvI != PropertyGetterToIvarMap.end())
+ markInvalidated(IvI->second);
+ return;
+ }
+ }
+}
+
+bool IvarInvalidationChecker::MethodCrawler::isZero(const Expr *E) const {
+ E = peel(E);
+
+ return (E->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNotNull)
+ != Expr::NPCK_NotNull);
+}
+
+void IvarInvalidationChecker::MethodCrawler::check(const Expr *E) {
+ E = peel(E);
+
+ if (const ObjCIvarRefExpr *IvarRef = dyn_cast<ObjCIvarRefExpr>(E)) {
+ checkObjCIvarRefExpr(IvarRef);
+ return;
+ }
+
+ if (const ObjCPropertyRefExpr *PropRef = dyn_cast<ObjCPropertyRefExpr>(E)) {
+ checkObjCPropertyRefExpr(PropRef);
+ return;
+ }
+
+ if (const ObjCMessageExpr *MsgExpr = dyn_cast<ObjCMessageExpr>(E)) {
+ checkObjCMessageExpr(MsgExpr);
+ return;
+ }
+}
+
+void IvarInvalidationChecker::MethodCrawler::VisitBinaryOperator(
+ const BinaryOperator *BO) {
+ VisitStmt(BO);
+
+ if (BO->getOpcode() != BO_Assign)
+ return;
+
+ // Do we assign zero?
+ if (!isZero(BO->getRHS()))
+ return;
+
+ // Check the variable we are assigning to.
+ check(BO->getLHS());
+}
+
+void IvarInvalidationChecker::MethodCrawler::VisitObjCMessageExpr(
+ const ObjCMessageExpr *ME) {
+ const ObjCMethodDecl *MD = ME->getMethodDecl();
+ const Expr *Receiver = ME->getInstanceReceiver();
+
+ // Stop if we are calling '[self invalidate]'.
+ if (Receiver && isInvalidationMethod(MD))
+ if (Receiver->isObjCSelfExpr()) {
+ CalledAnotherInvalidationMethod = true;
+ return;
+ }
+
+ // Check if we call a setter and set the property to 'nil'.
+ if (MD && (ME->getNumArgs() == 1) && isZero(ME->getArg(0))) {
+ MD = cast<ObjCMethodDecl>(MD->getCanonicalDecl());
+ MethToIvarMapTy::const_iterator IvI = PropertySetterToIvarMap.find(MD);
+ if (IvI != PropertySetterToIvarMap.end()) {
+ markInvalidated(IvI->second);
+ return;
+ }
+ }
+
+ // Check if we call the 'invalidation' routine on the ivar.
+ if (Receiver) {
+ InvalidationMethod = MD;
+ check(Receiver->IgnoreParenCasts());
+ InvalidationMethod = 0;
+ }
+
+ VisitStmt(ME);
+}
+}
+
+// Register the checker.
+void ento::registerIvarInvalidationChecker(CheckerManager &mgr) {
+ mgr.registerChecker<IvarInvalidationChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index 969f2dd..76f20b6 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -158,16 +158,9 @@ private:
/// ProgramState traits to store the currently allocated (and not yet freed)
/// symbols. This is a map from the allocated content symbol to the
/// corresponding AllocationState.
-typedef llvm::ImmutableMap<SymbolRef,
- MacOSKeychainAPIChecker::AllocationState> AllocatedSetTy;
-
-namespace { struct AllocatedData {}; }
-namespace clang { namespace ento {
-template<> struct ProgramStateTrait<AllocatedData>
- : public ProgramStatePartialTrait<AllocatedSetTy > {
- static void *GDMIndex() { static int index = 0; return &index; }
-};
-}}
+REGISTER_MAP_WITH_PROGRAMSTATE(AllocatedData,
+ SymbolRef,
+ MacOSKeychainAPIChecker::AllocationState)
static bool isEnclosingFunctionParam(const Expr *E) {
E = E->IgnoreParenCasts();
@@ -282,7 +275,7 @@ void MacOSKeychainAPIChecker::
Report->addVisitor(new SecKeychainBugVisitor(AP.first));
Report->addRange(ArgExpr->getSourceRange());
markInteresting(Report, AP);
- C.EmitReport(Report);
+ C.emitReport(Report);
}
void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
@@ -323,7 +316,7 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
Report->addVisitor(new SecKeychainBugVisitor(V));
Report->addRange(ArgExpr->getSourceRange());
Report->markInteresting(AS->Region);
- C.EmitReport(Report);
+ C.emitReport(Report);
}
}
return;
@@ -376,7 +369,7 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
Report->addRange(ArgExpr->getSourceRange());
if (AS)
Report->markInteresting(AS->Region);
- C.EmitReport(Report);
+ C.emitReport(Report);
return;
}
@@ -440,7 +433,7 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
Report->addVisitor(new SecKeychainBugVisitor(ArgSM));
Report->addRange(ArgExpr->getSourceRange());
Report->markInteresting(AS->Region);
- C.EmitReport(Report);
+ C.emitReport(Report);
return;
}
@@ -571,13 +564,13 @@ BugReport *MacOSKeychainAPIChecker::
void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- AllocatedSetTy ASet = State->get<AllocatedData>();
+ AllocatedDataTy ASet = State->get<AllocatedData>();
if (ASet.isEmpty())
return;
bool Changed = false;
AllocationPairVec Errors;
- for (AllocatedSetTy::iterator I = ASet.begin(), E = ASet.end(); I != E; ++I) {
+ for (AllocatedDataTy::iterator I = ASet.begin(), E = ASet.end(); I != E; ++I) {
if (SR.isLive(I->first))
continue;
@@ -585,7 +578,9 @@ void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR,
State = State->remove<AllocatedData>(I->first);
// If the allocated symbol is null or if the allocation call might have
// returned an error, do not report.
- if (State->getSymVal(I->first) ||
+ ConstraintManager &CMgr = State->getConstraintManager();
+ ConditionTruthVal AllocFailed = CMgr.isNull(State, I.getKey());
+ if (AllocFailed.isConstrainedTrue() ||
definitelyReturnedError(I->second.Region, State, C.getSValBuilder()))
continue;
Errors.push_back(std::make_pair(I->first, &I->second));
@@ -602,7 +597,7 @@ void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR,
// Generate the error reports.
for (AllocationPairVec::iterator I = Errors.begin(), E = Errors.end();
I != E; ++I) {
- C.EmitReport(generateAllocatedDataNotReleasedReport(*I, N, C));
+ C.emitReport(generateAllocatedDataNotReleasedReport(*I, N, C));
}
// Generate the new, cleaned up state.
@@ -617,7 +612,7 @@ void MacOSKeychainAPIChecker::checkEndPath(CheckerContext &C) const {
if (C.getLocationContext()->getParent() != 0)
return;
- AllocatedSetTy AS = state->get<AllocatedData>();
+ AllocatedDataTy AS = state->get<AllocatedData>();
if (AS.isEmpty())
return;
@@ -625,12 +620,14 @@ void MacOSKeychainAPIChecker::checkEndPath(CheckerContext &C) const {
// found here, so report it.
bool Changed = false;
AllocationPairVec Errors;
- for (AllocatedSetTy::iterator I = AS.begin(), E = AS.end(); I != E; ++I ) {
+ for (AllocatedDataTy::iterator I = AS.begin(), E = AS.end(); I != E; ++I ) {
Changed = true;
state = state->remove<AllocatedData>(I->first);
// If the allocated symbol is null or if error code was returned at
// allocation, do not report.
- if (state->getSymVal(I.getKey()) ||
+ ConstraintManager &CMgr = state->getConstraintManager();
+ ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+ if (AllocFailed.isConstrainedTrue() ||
definitelyReturnedError(I->second.Region, state,
C.getSValBuilder())) {
continue;
@@ -650,7 +647,7 @@ void MacOSKeychainAPIChecker::checkEndPath(CheckerContext &C) const {
// Generate the error reports.
for (AllocationPairVec::iterator I = Errors.begin(), E = Errors.end();
I != E; ++I) {
- C.EmitReport(generateAllocatedDataNotReleasedReport(*I, N, C));
+ C.emitReport(generateAllocatedDataNotReleasedReport(*I, N, C));
}
C.addTransition(state, N);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
index cfdb55d..467b8b1 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -70,6 +70,16 @@ void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
BT_dispatchOnce.reset(new BugType("Improper use of 'dispatch_once'",
"Mac OS X API"));
+ // Handle _dispatch_once. In some versions of the OS X SDK we have the case
+ // that dispatch_once is a macro that wraps a call to _dispatch_once.
+ // _dispatch_once is then a function which then calls the real dispatch_once.
+ // Users do not care; they just want the warning at the top-level call.
+ if (CE->getLocStart().isMacroID()) {
+ StringRef TrimmedFName = FName.ltrim("_");
+ if (TrimmedFName != FName)
+ FName = TrimmedFName;
+ }
+
SmallString<256> S;
llvm::raw_svector_ostream os(S);
os << "Call to '" << FName << "' uses";
@@ -84,7 +94,7 @@ void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
BugReport *report = new BugReport(*BT_dispatchOnce, os.str(), N);
report->addRange(CE->getArg(0)->getSourceRange());
- C.EmitReport(report);
+ C.emitReport(report);
}
//===----------------------------------------------------------------------===//
@@ -99,7 +109,9 @@ void MacOSXAPIChecker::checkPreStmt(const CallExpr *CE,
SubChecker SC =
llvm::StringSwitch<SubChecker>(Name)
- .Cases("dispatch_once", "dispatch_once_f",
+ .Cases("dispatch_once",
+ "_dispatch_once",
+ "dispatch_once_f",
&MacOSXAPIChecker::CheckDispatchOnce)
.Default(NULL);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index dfcedf6..caf70ca 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -26,6 +26,7 @@
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include <climits>
using namespace clang;
@@ -70,17 +71,31 @@ public:
}
};
+enum ReallocPairKind {
+ RPToBeFreedAfterFailure,
+ // The symbol has been freed when reallocation failed.
+ RPIsFreeOnFailure,
+ // The symbol does not need to be freed after reallocation fails.
+ RPDoNotTrackAfterFailure
+};
+
+/// \class ReallocPair
+/// \brief Stores information about the symbol being reallocated by a call to
+/// 'realloc' to allow modeling failed reallocation later in the path.
struct ReallocPair {
+ // \brief The symbol which realloc reallocated.
SymbolRef ReallocatedSym;
- bool IsFreeOnFailure;
- ReallocPair(SymbolRef S, bool F) : ReallocatedSym(S), IsFreeOnFailure(F) {}
+ ReallocPairKind Kind;
+
+ ReallocPair(SymbolRef S, ReallocPairKind K) :
+ ReallocatedSym(S), Kind(K) {}
void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddInteger(IsFreeOnFailure);
+ ID.AddInteger(Kind);
ID.AddPointer(ReallocatedSym);
}
bool operator==(const ReallocPair &X) const {
return ReallocatedSym == X.ReallocatedSym &&
- IsFreeOnFailure == X.IsFreeOnFailure;
+ Kind == X.Kind;
}
};
@@ -92,7 +107,7 @@ class MallocChecker : public Checker<check::DeadSymbols,
check::PreStmt<CallExpr>,
check::PostStmt<CallExpr>,
check::PostStmt<BlockExpr>,
- check::PreObjCMessage,
+ check::PostObjCMessage,
check::Location,
check::Bind,
eval::Assume,
@@ -120,7 +135,7 @@ public:
void checkPreStmt(const CallExpr *S, CheckerContext &C) const;
void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
- void checkPreObjCMessage(const ObjCMethodCall &Call, CheckerContext &C) const;
+ void checkPostObjCMessage(const ObjCMethodCall &Call, CheckerContext &C) const;
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
void checkEndPath(CheckerContext &C) const;
@@ -177,11 +192,15 @@ private:
const OwnershipAttr* Att) const;
ProgramStateRef FreeMemAux(CheckerContext &C, const CallExpr *CE,
ProgramStateRef state, unsigned Num,
- bool Hold) const;
+ bool Hold,
+ bool &ReleasedAllocated,
+ bool ReturnsNullOnFailure = false) const;
ProgramStateRef FreeMemAux(CheckerContext &C, const Expr *Arg,
const Expr *ParentExpr,
- ProgramStateRef state,
- bool Hold) const;
+ ProgramStateRef State,
+ bool Hold,
+ bool &ReleasedAllocated,
+ bool ReturnsNullOnFailure = false) const;
ProgramStateRef ReallocMem(CheckerContext &C, const CallExpr *CE,
bool FreesMemOnFailure) const;
@@ -301,13 +320,14 @@ private:
: StackHintGeneratorForSymbol(S, M) {}
virtual std::string getMessageForArg(const Expr *ArgE, unsigned ArgIndex) {
+ // Printed parameters start at 1, not 0.
+ ++ArgIndex;
+
SmallString<200> buf;
llvm::raw_svector_ostream os(buf);
- os << "Reallocation of ";
- // Printed parameters start at 1, not 0.
- printOrdinal(++ArgIndex, os);
- os << " parameter failed";
+ os << "Reallocation of " << ArgIndex << llvm::getOrdinalSuffix(ArgIndex)
+ << " parameter failed";
return os.str();
}
@@ -320,25 +340,12 @@ private:
};
} // end anonymous namespace
-typedef llvm::ImmutableMap<SymbolRef, RefState> RegionStateTy;
-typedef llvm::ImmutableMap<SymbolRef, ReallocPair > ReallocMap;
-class RegionState {};
-class ReallocPairs {};
-namespace clang {
-namespace ento {
- template <>
- struct ProgramStateTrait<RegionState>
- : public ProgramStatePartialTrait<RegionStateTy> {
- static void *GDMIndex() { static int x; return &x; }
- };
+REGISTER_MAP_WITH_PROGRAMSTATE(RegionState, SymbolRef, RefState)
+REGISTER_MAP_WITH_PROGRAMSTATE(ReallocPairs, SymbolRef, ReallocPair)
- template <>
- struct ProgramStateTrait<ReallocPairs>
- : public ProgramStatePartialTrait<ReallocMap> {
- static void *GDMIndex() { static int x; return &x; }
- };
-}
-}
+// A map from the freed symbol to the symbol representing the return value of
+// the free function.
+REGISTER_MAP_WITH_PROGRAMSTATE(FreeReturnValue, SymbolRef, SymbolRef)
namespace {
class StopTrackingCallback : public SymbolVisitor {
@@ -426,11 +433,15 @@ bool MallocChecker::isFreeFunction(const FunctionDecl *FD, ASTContext &C) const
}
void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
+ if (C.wasInlined)
+ return;
+
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return;
ProgramStateRef State = C.getState();
+ bool ReleasedAllocatedMemory = false;
if (FD->getKind() == Decl::Function) {
initIdentifierInfo(C.getASTContext());
@@ -447,7 +458,7 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
} else if (FunI == II_calloc) {
State = CallocMem(C, CE);
} else if (FunI == II_free) {
- State = FreeMemAux(C, CE, State, 0, false);
+ State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory);
} else if (FunI == II_strdup) {
State = MallocUpdateRefState(C, CE, State);
} else if (FunI == II_strndup) {
@@ -487,21 +498,26 @@ static bool isFreeWhenDoneSetToZero(const ObjCMethodCall &Call) {
return false;
}
-void MallocChecker::checkPreObjCMessage(const ObjCMethodCall &Call,
- CheckerContext &C) const {
+void MallocChecker::checkPostObjCMessage(const ObjCMethodCall &Call,
+ CheckerContext &C) const {
// If the first selector is dataWithBytesNoCopy, assume that the memory will
// be released with 'free' by the new object.
// Ex: [NSData dataWithBytesNoCopy:bytes length:10];
// Unless 'freeWhenDone' param set to 0.
// TODO: Check that the memory was allocated with malloc.
+ bool ReleasedAllocatedMemory = false;
Selector S = Call.getSelector();
if ((S.getNameForSlot(0) == "dataWithBytesNoCopy" ||
S.getNameForSlot(0) == "initWithBytesNoCopy" ||
S.getNameForSlot(0) == "initWithCharactersNoCopy") &&
!isFreeWhenDoneSetToZero(Call)){
unsigned int argIdx = 0;
- C.addTransition(FreeMemAux(C, Call.getArgExpr(argIdx),
- Call.getOriginExpr(), C.getState(), true));
+ ProgramStateRef State = FreeMemAux(C, Call.getArgExpr(argIdx),
+ Call.getOriginExpr(), C.getState(), true,
+ ReleasedAllocatedMemory,
+ /* RetNullOnFailure*/ true);
+
+ C.addTransition(State);
}
}
@@ -526,7 +542,7 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
// Bind the return value to the symbolic value from the heap region.
// TODO: We could rewrite post visit to eval call; 'malloc' does not have
// side effects other than what we model here.
- unsigned Count = C.getCurrentBlockCount();
+ unsigned Count = C.blockCount();
SValBuilder &svalBuilder = C.getSValBuilder();
const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
DefinedSVal RetVal =
@@ -584,11 +600,13 @@ ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C,
return 0;
ProgramStateRef State = C.getState();
+ bool ReleasedAllocated = false;
for (OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end();
I != E; ++I) {
ProgramStateRef StateI = FreeMemAux(C, CE, State, *I,
- Att->getOwnKind() == OwnershipAttr::Holds);
+ Att->getOwnKind() == OwnershipAttr::Holds,
+ ReleasedAllocated);
if (StateI)
State = StateI;
}
@@ -599,20 +617,40 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
const CallExpr *CE,
ProgramStateRef state,
unsigned Num,
- bool Hold) const {
+ bool Hold,
+ bool &ReleasedAllocated,
+ bool ReturnsNullOnFailure) const {
if (CE->getNumArgs() < (Num + 1))
return 0;
- return FreeMemAux(C, CE->getArg(Num), CE, state, Hold);
+ return FreeMemAux(C, CE->getArg(Num), CE, state, Hold,
+ ReleasedAllocated, ReturnsNullOnFailure);
+}
+
+/// Checks if the previous call to free on the given symbol failed - if free
+/// failed, returns true. Also, returns the corresponding return value symbol.
+bool didPreviousFreeFail(ProgramStateRef State,
+ SymbolRef Sym, SymbolRef &RetStatusSymbol) {
+ const SymbolRef *Ret = State->get<FreeReturnValue>(Sym);
+ if (Ret) {
+ assert(*Ret && "We should not store the null return symbol");
+ ConstraintManager &CMgr = State->getConstraintManager();
+ ConditionTruthVal FreeFailed = CMgr.isNull(State, *Ret);
+ RetStatusSymbol = *Ret;
+ return FreeFailed.isConstrainedTrue();
+ }
+ return false;
}
ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
const Expr *ArgExpr,
const Expr *ParentExpr,
- ProgramStateRef state,
- bool Hold) const {
+ ProgramStateRef State,
+ bool Hold,
+ bool &ReleasedAllocated,
+ bool ReturnsNullOnFailure) const {
- SVal ArgVal = state->getSVal(ArgExpr, C.getLocationContext());
+ SVal ArgVal = State->getSVal(ArgExpr, C.getLocationContext());
if (!isa<DefinedOrUnknownSVal>(ArgVal))
return 0;
DefinedOrUnknownSVal location = cast<DefinedOrUnknownSVal>(ArgVal);
@@ -623,7 +661,7 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
// The explicit NULL case, no operation is performed.
ProgramStateRef notNullState, nullState;
- llvm::tie(notNullState, nullState) = state->assume(location);
+ llvm::tie(notNullState, nullState) = State->assume(location);
if (nullState && !notNullState)
return 0;
@@ -672,10 +710,14 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
return 0;
SymbolRef Sym = SR->getSymbol();
- const RefState *RS = state->get<RegionState>(Sym);
+ const RefState *RS = State->get<RegionState>(Sym);
+ SymbolRef PreviousRetStatusSymbol = 0;
// Check double free.
- if (RS && (RS->isReleased() || RS->isRelinquished())) {
+ if (RS &&
+ (RS->isReleased() || RS->isRelinquished()) &&
+ !didPreviousFreeFail(State, Sym, PreviousRetStatusSymbol)) {
+
if (ExplodedNode *N = C.generateSink()) {
if (!BT_DoubleFree)
BT_DoubleFree.reset(
@@ -685,16 +727,34 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
"Attempt to free non-owned memory"), N);
R->addRange(ArgExpr->getSourceRange());
R->markInteresting(Sym);
+ if (PreviousRetStatusSymbol)
+ R->markInteresting(PreviousRetStatusSymbol);
R->addVisitor(new MallocBugVisitor(Sym));
- C.EmitReport(R);
+ C.emitReport(R);
}
return 0;
}
+ ReleasedAllocated = (RS != 0);
+
+ // Clean out the info on previous call to free return info.
+ State = State->remove<FreeReturnValue>(Sym);
+
+ // Keep track of the return value. If it is NULL, we will know that free
+ // failed.
+ if (ReturnsNullOnFailure) {
+ SVal RetVal = C.getSVal(ParentExpr);
+ SymbolRef RetStatusSymbol = RetVal.getAsSymbol();
+ if (RetStatusSymbol) {
+ C.getSymbolManager().addSymbolDependency(Sym, RetStatusSymbol);
+ State = State->set<FreeReturnValue>(Sym, RetStatusSymbol);
+ }
+ }
+
// Normal free.
if (Hold)
- return state->set<RegionState>(Sym, RefState::getRelinquished(ParentExpr));
- return state->set<RegionState>(Sym, RefState::getReleased(ParentExpr));
+ return State->set<RegionState>(Sym, RefState::getRelinquished(ParentExpr));
+ return State->set<RegionState>(Sym, RefState::getReleased(ParentExpr));
}
bool MallocChecker::SummarizeValue(raw_ostream &os, SVal V) {
@@ -714,7 +774,7 @@ bool MallocChecker::SummarizeRegion(raw_ostream &os,
const MemRegion *MR) {
switch (MR->getKind()) {
case MemRegion::FunctionTextRegionKind: {
- const FunctionDecl *FD = cast<FunctionTextRegion>(MR)->getDecl();
+ const NamedDecl *FD = cast<FunctionTextRegion>(MR)->getDecl();
if (FD)
os << "the address of the function '" << *FD << '\'';
else
@@ -819,7 +879,7 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
BugReport *R = new BugReport(*BT_BadFree, os.str(), N);
R->markInteresting(MR);
R->addRange(range);
- C.EmitReport(R);
+ C.emitReport(R);
}
}
@@ -886,9 +946,12 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
if (!FromPtr || !ToPtr)
return 0;
+ bool ReleasedAllocated = false;
+
// If the size is 0, free the memory.
if (SizeIsZero)
- if (ProgramStateRef stateFree = FreeMemAux(C, CE, StateSizeIsZero,0,false)){
+ if (ProgramStateRef stateFree = FreeMemAux(C, CE, StateSizeIsZero, 0,
+ false, ReleasedAllocated)){
// The semantics of the return value are:
// If size was equal to 0, either NULL or a pointer suitable to be passed
// to free() is returned. We just free the input pointer and do not add
@@ -897,14 +960,25 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
}
// Default behavior.
- if (ProgramStateRef stateFree = FreeMemAux(C, CE, state, 0, false)) {
- // FIXME: We should copy the content of the original buffer.
+ if (ProgramStateRef stateFree =
+ FreeMemAux(C, CE, state, 0, false, ReleasedAllocated)) {
+
ProgramStateRef stateRealloc = MallocMemAux(C, CE, CE->getArg(1),
UnknownVal(), stateFree);
if (!stateRealloc)
return 0;
+
+ ReallocPairKind Kind = RPToBeFreedAfterFailure;
+ if (FreesOnFail)
+ Kind = RPIsFreeOnFailure;
+ else if (!ReleasedAllocated)
+ Kind = RPDoNotTrackAfterFailure;
+
+ // Record the info about the reallocated symbol so that we could properly
+ // process failed reallocation.
stateRealloc = stateRealloc->set<ReallocPairs>(ToPtr,
- ReallocPair(FromPtr, FreesOnFail));
+ ReallocPair(FromPtr, Kind));
+ // The reallocated symbol should stay alive for as long as the new symbol.
C.getSymbolManager().addSymbolDependency(ToPtr, FromPtr);
return stateRealloc;
}
@@ -1004,7 +1078,7 @@ void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
BugReport *R = new BugReport(*BT_Leak, os.str(), N, LocUsedForUniqueing);
R->markInteresting(Sym);
R->addVisitor(new MallocBugVisitor(Sym, true));
- C.EmitReport(R);
+ C.emitReport(R);
}
void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
@@ -1017,14 +1091,11 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
RegionStateTy RS = state->get<RegionState>();
RegionStateTy::Factory &F = state->get_context<RegionState>();
- bool generateReport = false;
llvm::SmallVector<SymbolRef, 2> Errors;
for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
if (SymReaper.isDead(I->first)) {
- if (I->second.isAllocated()) {
- generateReport = true;
+ if (I->second.isAllocated())
Errors.push_back(I->first);
- }
// Remove the dead symbol from the map.
RS = F.remove(RS, I->first);
@@ -1032,24 +1103,34 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
}
// Cleanup the Realloc Pairs Map.
- ReallocMap RP = state->get<ReallocPairs>();
- for (ReallocMap::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
+ ReallocPairsTy RP = state->get<ReallocPairs>();
+ for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
if (SymReaper.isDead(I->first) ||
SymReaper.isDead(I->second.ReallocatedSym)) {
state = state->remove<ReallocPairs>(I->first);
}
}
- // Generate leak node.
- static SimpleProgramPointTag Tag("MallocChecker : DeadSymbolsLeak");
- ExplodedNode *N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
+ // Cleanup the FreeReturnValue Map.
+ FreeReturnValueTy FR = state->get<FreeReturnValue>();
+ for (FreeReturnValueTy::iterator I = FR.begin(), E = FR.end(); I != E; ++I) {
+ if (SymReaper.isDead(I->first) ||
+ SymReaper.isDead(I->second)) {
+ state = state->remove<FreeReturnValue>(I->first);
+ }
+ }
- if (generateReport) {
+ // Generate leak node.
+ ExplodedNode *N = C.getPredecessor();
+ if (!Errors.empty()) {
+ static SimpleProgramPointTag Tag("MallocChecker : DeadSymbolsLeak");
+ N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
for (llvm::SmallVector<SymbolRef, 2>::iterator
- I = Errors.begin(), E = Errors.end(); I != E; ++I) {
+ I = Errors.begin(), E = Errors.end(); I != E; ++I) {
reportLeak(*I, N, C);
}
}
+
C.addTransition(state->set<RegionState>(RS), N);
}
@@ -1182,7 +1263,7 @@ bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
R->addRange(S->getSourceRange());
R->markInteresting(Sym);
R->addVisitor(new MallocBugVisitor(Sym));
- C.EmitReport(R);
+ C.emitReport(R);
return true;
}
}
@@ -1249,28 +1330,36 @@ ProgramStateRef MallocChecker::evalAssume(ProgramStateRef state,
bool Assumption) const {
RegionStateTy RS = state->get<RegionState>();
for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
- // If the symbol is assumed to NULL or another constant, this will
- // return an APSInt*.
- if (state->getSymVal(I.getKey()))
+ // If the symbol is assumed to be NULL, remove it from consideration.
+ ConstraintManager &CMgr = state->getConstraintManager();
+ ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+ if (AllocFailed.isConstrainedTrue())
state = state->remove<RegionState>(I.getKey());
}
// Realloc returns 0 when reallocation fails, which means that we should
// restore the state of the pointer being reallocated.
- ReallocMap RP = state->get<ReallocPairs>();
- for (ReallocMap::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
- // If the symbol is assumed to NULL or another constant, this will
- // return an APSInt*.
- if (state->getSymVal(I.getKey())) {
- SymbolRef ReallocSym = I.getData().ReallocatedSym;
- const RefState *RS = state->get<RegionState>(ReallocSym);
- if (RS) {
- if (RS->isReleased() && ! I.getData().IsFreeOnFailure)
+ ReallocPairsTy RP = state->get<ReallocPairs>();
+ for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
+ // If the symbol is assumed to be NULL, remove it from consideration.
+ ConstraintManager &CMgr = state->getConstraintManager();
+ ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+ if (!AllocFailed.isConstrainedTrue())
+ continue;
+
+ SymbolRef ReallocSym = I.getData().ReallocatedSym;
+ if (const RefState *RS = state->get<RegionState>(ReallocSym)) {
+ if (RS->isReleased()) {
+ if (I.getData().Kind == RPToBeFreedAfterFailure)
state = state->set<RegionState>(ReallocSym,
- RefState::getAllocated(RS->getStmt()));
+ RefState::getAllocated(RS->getStmt()));
+ else if (I.getData().Kind == RPDoNotTrackAfterFailure)
+ state = state->remove<RegionState>(ReallocSym);
+ else
+ assert(I.getData().Kind == RPIsFreeOnFailure);
}
- state = state->remove<ReallocPairs>(I.getKey());
}
+ state = state->remove<ReallocPairs>(I.getKey());
}
return state;
@@ -1463,10 +1552,10 @@ MallocChecker::checkRegionChanges(ProgramStateRef State,
static SymbolRef findFailedReallocSymbol(ProgramStateRef currState,
ProgramStateRef prevState) {
- ReallocMap currMap = currState->get<ReallocPairs>();
- ReallocMap prevMap = prevState->get<ReallocPairs>();
+ ReallocPairsTy currMap = currState->get<ReallocPairs>();
+ ReallocPairsTy prevMap = prevState->get<ReallocPairs>();
- for (ReallocMap::iterator I = prevMap.begin(), E = prevMap.end();
+ for (ReallocPairsTy::iterator I = prevMap.begin(), E = prevMap.end();
I != E; ++I) {
SymbolRef sym = I.getKey();
if (!currMap.lookup(sym))
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
index 6292a47..fb40f22 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -146,9 +146,9 @@ static bool typesCompatible(ASTContext &C, QualType A, QualType B) {
if (const PointerType *ptrA = A->getAs<PointerType>())
if (const PointerType *ptrB = B->getAs<PointerType>()) {
- A = ptrA->getPointeeType();
- B = ptrB->getPointeeType();
- continue;
+ A = ptrA->getPointeeType();
+ B = ptrB->getPointeeType();
+ continue;
}
break;
@@ -157,6 +157,18 @@ static bool typesCompatible(ASTContext &C, QualType A, QualType B) {
return false;
}
+static bool compatibleWithArrayType(ASTContext &C, QualType PT, QualType T) {
+ // Ex: 'int a[10][2]' is compatible with 'int', 'int[2]', 'int[10][2]'.
+ while (const ArrayType *AT = T->getAsArrayTypeUnsafe()) {
+ QualType ElemType = AT->getElementType();
+ if (typesCompatible(C, PT, AT->getElementType()))
+ return true;
+ T = ElemType;
+ }
+
+ return false;
+}
+
class MallocSizeofChecker : public Checker<check::ASTCodeBody> {
public:
void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
@@ -184,38 +196,49 @@ public:
continue;
QualType SizeofType = SFinder.Sizeofs[0]->getTypeOfArgument();
- if (!typesCompatible(BR.getContext(), PointeeType, SizeofType)) {
- const TypeSourceInfo *TSI = 0;
- if (i->CastedExprParent.is<const VarDecl *>()) {
- TSI =
+
+ if (typesCompatible(BR.getContext(), PointeeType, SizeofType))
+ continue;
+
+ // If the argument to sizeof is an array, the result could be a
+ // pointer to any array element.
+ if (compatibleWithArrayType(BR.getContext(), PointeeType, SizeofType))
+ continue;
+
+ const TypeSourceInfo *TSI = 0;
+ if (i->CastedExprParent.is<const VarDecl *>()) {
+ TSI =
i->CastedExprParent.get<const VarDecl *>()->getTypeSourceInfo();
- } else {
- TSI = i->ExplicitCastType;
- }
-
- SmallString<64> buf;
- llvm::raw_svector_ostream OS(buf);
-
- OS << "Result of '"
- << i->AllocCall->getDirectCallee()->getIdentifier()->getName()
- << "' is converted to a pointer of type '"
- << PointeeType.getAsString() << "', which is incompatible with "
- << "sizeof operand type '" << SizeofType.getAsString() << "'";
- llvm::SmallVector<SourceRange, 4> Ranges;
- Ranges.push_back(i->AllocCall->getCallee()->getSourceRange());
- Ranges.push_back(SFinder.Sizeofs[0]->getSourceRange());
- if (TSI)
- Ranges.push_back(TSI->getTypeLoc().getSourceRange());
-
- PathDiagnosticLocation L =
+ } else {
+ TSI = i->ExplicitCastType;
+ }
+
+ SmallString<64> buf;
+ llvm::raw_svector_ostream OS(buf);
+
+ OS << "Result of ";
+ const FunctionDecl *Callee = i->AllocCall->getDirectCallee();
+ if (Callee && Callee->getIdentifier())
+ OS << '\'' << Callee->getIdentifier()->getName() << '\'';
+ else
+ OS << "call";
+ OS << " is converted to a pointer of type '"
+ << PointeeType.getAsString() << "', which is incompatible with "
+ << "sizeof operand type '" << SizeofType.getAsString() << "'";
+ llvm::SmallVector<SourceRange, 4> Ranges;
+ Ranges.push_back(i->AllocCall->getCallee()->getSourceRange());
+ Ranges.push_back(SFinder.Sizeofs[0]->getSourceRange());
+ if (TSI)
+ Ranges.push_back(TSI->getTypeLoc().getSourceRange());
+
+ PathDiagnosticLocation L =
PathDiagnosticLocation::createBegin(i->AllocCall->getCallee(),
- BR.getSourceManager(), ADC);
+ BR.getSourceManager(), ADC);
- BR.EmitBasicReport(D, "Allocator sizeof operand mismatch",
- categories::UnixAPI,
- OS.str(),
- L, Ranges.data(), Ranges.size());
- }
+ BR.EmitBasicReport(D, "Allocator sizeof operand mismatch",
+ categories::UnixAPI,
+ OS.str(),
+ L, Ranges.data(), Ranges.size());
}
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
index aad3b0f..3331bc8 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
@@ -71,7 +71,7 @@ void NSAutoreleasePoolChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
BugReport *Report = new BugReport(*BT, "Use -drain instead of -release when "
"using NSAutoreleasePool and garbage collection", N);
Report->addRange(msg.getSourceRange());
- C.EmitReport(Report);
+ C.emitReport(Report);
}
void ento::registerNSAutoreleasePoolChecker(CheckerManager &mgr) {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
index f826573..7a66ec3 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -163,23 +163,9 @@ public:
};
}
-namespace { struct NSErrorOut {}; }
-namespace { struct CFErrorOut {}; }
-
typedef llvm::ImmutableMap<SymbolRef, unsigned> ErrorOutFlag;
-
-namespace clang {
-namespace ento {
- template <>
- struct ProgramStateTrait<NSErrorOut> : public ProgramStatePartialTrait<ErrorOutFlag> {
- static void *GDMIndex() { static int index = 0; return &index; }
- };
- template <>
- struct ProgramStateTrait<CFErrorOut> : public ProgramStatePartialTrait<ErrorOutFlag> {
- static void *GDMIndex() { static int index = 0; return &index; }
- };
-}
-}
+REGISTER_TRAIT_WITH_PROGRAMSTATE(NSErrorOut, ErrorOutFlag)
+REGISTER_TRAIT_WITH_PROGRAMSTATE(CFErrorOut, ErrorOutFlag)
template <typename T>
static bool hasFlag(SVal val, ProgramStateRef state) {
@@ -285,7 +271,7 @@ void NSOrCFErrorDerefChecker::checkEvent(ImplicitNullDerefEvent event) const {
bug = new CFErrorDerefBug();
BugReport *report = new BugReport(*bug, os.str(),
event.SinkNode);
- BR.EmitReport(report);
+ BR.emitReport(report);
}
static bool IsNSError(QualType T, IdentifierInfo *II) {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp
deleted file mode 100644
index 7b724d2..0000000
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp
+++ /dev/null
@@ -1,218 +0,0 @@
-//=== OSAtomicChecker.cpp - OSAtomic functions evaluator --------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This checker evaluates OSAtomic functions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ClangSACheckers.h"
-#include "clang/StaticAnalyzer/Core/Checker.h"
-#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/Basic/Builtins.h"
-
-using namespace clang;
-using namespace ento;
-
-namespace {
-
-class OSAtomicChecker : public Checker<eval::InlineCall> {
-public:
- bool inlineCall(const CallExpr *CE, ExprEngine &Eng,
- ExplodedNode *Pred, ExplodedNodeSet &Dst) const;
-
-private:
- bool evalOSAtomicCompareAndSwap(const CallExpr *CE,
- ExprEngine &Eng,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst) const;
-};
-}
-
-static StringRef getCalleeName(ProgramStateRef State,
- const CallExpr *CE,
- const LocationContext *LCtx) {
- const Expr *Callee = CE->getCallee();
- SVal L = State->getSVal(Callee, LCtx);
- const FunctionDecl *funDecl = L.getAsFunctionDecl();
- if (!funDecl)
- return StringRef();
- IdentifierInfo *funI = funDecl->getIdentifier();
- if (!funI)
- return StringRef();
- return funI->getName();
-}
-
-bool OSAtomicChecker::inlineCall(const CallExpr *CE,
- ExprEngine &Eng,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst) const {
- StringRef FName = getCalleeName(Pred->getState(),
- CE, Pred->getLocationContext());
- if (FName.empty())
- return false;
-
- // Check for compare and swap.
- if (FName.startswith("OSAtomicCompareAndSwap") ||
- FName.startswith("objc_atomicCompareAndSwap"))
- return evalOSAtomicCompareAndSwap(CE, Eng, Pred, Dst);
-
- // FIXME: Other atomics.
- return false;
-}
-
-bool OSAtomicChecker::evalOSAtomicCompareAndSwap(const CallExpr *CE,
- ExprEngine &Eng,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst) const {
- // Not enough arguments to match OSAtomicCompareAndSwap?
- if (CE->getNumArgs() != 3)
- return false;
-
- ASTContext &Ctx = Eng.getContext();
- const Expr *oldValueExpr = CE->getArg(0);
- QualType oldValueType = Ctx.getCanonicalType(oldValueExpr->getType());
-
- const Expr *newValueExpr = CE->getArg(1);
- QualType newValueType = Ctx.getCanonicalType(newValueExpr->getType());
-
- // Do the types of 'oldValue' and 'newValue' match?
- if (oldValueType != newValueType)
- return false;
-
- const Expr *theValueExpr = CE->getArg(2);
- const PointerType *theValueType=theValueExpr->getType()->getAs<PointerType>();
-
- // theValueType not a pointer?
- if (!theValueType)
- return false;
-
- QualType theValueTypePointee =
- Ctx.getCanonicalType(theValueType->getPointeeType()).getUnqualifiedType();
-
- // The pointee must match newValueType and oldValueType.
- if (theValueTypePointee != newValueType)
- return false;
-
- static SimpleProgramPointTag OSAtomicLoadTag("OSAtomicChecker : Load");
- static SimpleProgramPointTag OSAtomicStoreTag("OSAtomicChecker : Store");
-
- // Load 'theValue'.
- ProgramStateRef state = Pred->getState();
- const LocationContext *LCtx = Pred->getLocationContext();
- ExplodedNodeSet Tmp;
- SVal location = state->getSVal(theValueExpr, LCtx);
- // Here we should use the value type of the region as the load type, because
- // we are simulating the semantics of the function, not the semantics of
- // passing argument. So the type of theValue expr is not we are loading.
- // But usually the type of the varregion is not the type we want either,
- // we still need to do a CastRetrievedVal in store manager. So actually this
- // LoadTy specifying can be omitted. But we put it here to emphasize the
- // semantics.
- QualType LoadTy;
- if (const TypedValueRegion *TR =
- dyn_cast_or_null<TypedValueRegion>(location.getAsRegion())) {
- LoadTy = TR->getValueType();
- }
- Eng.evalLoad(Tmp, CE, theValueExpr, Pred,
- state, location, &OSAtomicLoadTag, LoadTy);
-
- if (Tmp.empty()) {
- // If no nodes were generated, other checkers must have generated sinks.
- // We return an empty Dst.
- return true;
- }
-
- for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end();
- I != E; ++I) {
-
- ExplodedNode *N = *I;
- ProgramStateRef stateLoad = N->getState();
-
- // Use direct bindings from the environment since we are forcing a load
- // from a location that the Environment would typically not be used
- // to bind a value.
- SVal theValueVal_untested = stateLoad->getSVal(theValueExpr, LCtx, true);
-
- SVal oldValueVal_untested = stateLoad->getSVal(oldValueExpr, LCtx);
-
- // FIXME: Issue an error.
- if (theValueVal_untested.isUndef() || oldValueVal_untested.isUndef()) {
- return false;
- }
-
- DefinedOrUnknownSVal theValueVal =
- cast<DefinedOrUnknownSVal>(theValueVal_untested);
- DefinedOrUnknownSVal oldValueVal =
- cast<DefinedOrUnknownSVal>(oldValueVal_untested);
-
- SValBuilder &svalBuilder = Eng.getSValBuilder();
-
- // Perform the comparison.
- DefinedOrUnknownSVal Cmp =
- svalBuilder.evalEQ(stateLoad,theValueVal,oldValueVal);
-
- ProgramStateRef stateEqual = stateLoad->assume(Cmp, true);
-
- // Were they equal?
- if (stateEqual) {
- // Perform the store.
- ExplodedNodeSet TmpStore;
- SVal val = stateEqual->getSVal(newValueExpr, LCtx);
-
- // Handle implicit value casts.
- if (const TypedValueRegion *R =
- dyn_cast_or_null<TypedValueRegion>(location.getAsRegion())) {
- val = svalBuilder.evalCast(val,R->getValueType(), newValueExpr->getType());
- }
-
- Eng.evalStore(TmpStore, CE, theValueExpr, N,
- stateEqual, location, val, &OSAtomicStoreTag);
-
- if (TmpStore.empty()) {
- // If no nodes were generated, other checkers must have generated sinks.
- // We return an empty Dst.
- return true;
- }
-
- StmtNodeBuilder B(TmpStore, Dst, Eng.getBuilderContext());
- // Now bind the result of the comparison.
- for (ExplodedNodeSet::iterator I2 = TmpStore.begin(),
- E2 = TmpStore.end(); I2 != E2; ++I2) {
- ExplodedNode *predNew = *I2;
- ProgramStateRef stateNew = predNew->getState();
- // Check for 'void' return type if we have a bogus function prototype.
- SVal Res = UnknownVal();
- QualType T = CE->getType();
- if (!T->isVoidType())
- Res = Eng.getSValBuilder().makeTruthVal(true, T);
- B.generateNode(CE, predNew, stateNew->BindExpr(CE, LCtx, Res),
- false, this);
- }
- }
-
- // Were they not equal?
- if (ProgramStateRef stateNotEqual = stateLoad->assume(Cmp, false)) {
- // Check for 'void' return type if we have a bogus function prototype.
- SVal Res = UnknownVal();
- QualType T = CE->getType();
- if (!T->isVoidType())
- Res = Eng.getSValBuilder().makeTruthVal(false, CE->getType());
- StmtNodeBuilder B(N, Dst, Eng.getBuilderContext());
- B.generateNode(CE, N, stateNotEqual->BindExpr(CE, LCtx, Res),
- false, this);
- }
- }
-
- return true;
-}
-
-void ento::registerOSAtomicChecker(CheckerManager &mgr) {
- mgr.registerChecker<OSAtomicChecker>();
-}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
index 4cc92ce..9d84f52 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -18,7 +18,6 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
-#include "clang/StaticAnalyzer/Checkers/DereferenceChecker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
@@ -50,8 +49,8 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
"for @synchronized"));
BugReport *report =
new BugReport(*BT_undef, BT_undef->getDescription(), N);
- bugreporter::addTrackNullOrUndefValueVisitor(N, Ex, report);
- C.EmitReport(report);
+ bugreporter::trackNullOrUndefValue(N, Ex, *report);
+ C.emitReport(report);
}
return;
}
@@ -73,9 +72,9 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
"(no synchronization will occur)"));
BugReport *report =
new BugReport(*BT_null, BT_null->getDescription(), N);
- bugreporter::addTrackNullOrUndefValueVisitor(N, Ex, report);
+ bugreporter::trackNullOrUndefValue(N, Ex, *report);
- C.EmitReport(report);
+ C.emitReport(report);
return;
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
index f2929c0..63a8480 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
@@ -31,8 +31,6 @@ class WalkAST : public StmtVisitor<WalkAST> {
ASTContext &ASTC;
uint64_t PtrWidth;
- static const unsigned InvalidArgIndex = UINT_MAX;
-
/// Check if the type has pointer size (very conservative).
inline bool isPointerSize(const Type *T) {
if (!T)
@@ -102,16 +100,18 @@ void WalkAST::VisitCallExpr(CallExpr *CE) {
return;
const Expr *Arg = 0;
- unsigned ArgNum = InvalidArgIndex;
+ unsigned ArgNum;
if (Name.equals("CFArrayCreate") || Name.equals("CFSetCreate")) {
+ if (CE->getNumArgs() != 4)
+ return;
ArgNum = 1;
Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
if (hasPointerToPointerSizedType(Arg))
return;
- }
-
- if (Arg == 0 && Name.equals("CFDictionaryCreate")) {
+ } else if (Name.equals("CFDictionaryCreate")) {
+ if (CE->getNumArgs() != 6)
+ return;
// Check first argument.
ArgNum = 1;
Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
@@ -125,17 +125,18 @@ void WalkAST::VisitCallExpr(CallExpr *CE) {
}
}
- if (ArgNum != InvalidArgIndex) {
+ if (Arg) {
assert(ArgNum == 1 || ArgNum == 2);
- SmallString<256> BufName;
+ SmallString<64> BufName;
llvm::raw_svector_ostream OsName(BufName);
- assert(ArgNum == 1 || ArgNum == 2);
OsName << " Invalid use of '" << Name << "'" ;
SmallString<256> Buf;
llvm::raw_svector_ostream Os(Buf);
- Os << " The "<< ((ArgNum == 1) ? "first" : "second") << " argument to '"
+ // Use "second" and "third" since users will expect 1-based indexing
+ // for parameter names when mentioned in prose.
+ Os << " The "<< ((ArgNum == 1) ? "second" : "third") << " argument to '"
<< Name << "' must be a C array of pointer-sized values, not '"
<< Arg->getType().getAsString() << "'";
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
index 2ab49ed..999c994 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -55,16 +55,8 @@ public:
};
} // end anonymous namespace
-// ProgramState trait - a map from array symbol to it's state.
-typedef llvm::ImmutableMap<SymbolRef, DefinedSVal> ArraySizeM;
-
-namespace { struct ArraySizeMap {}; }
-namespace clang { namespace ento {
-template<> struct ProgramStateTrait<ArraySizeMap>
- : public ProgramStatePartialTrait<ArraySizeM > {
- static void *GDMIndex() { return ObjCContainersChecker::getTag(); }
-};
-}}
+// ProgramState trait - a map from array symbol to its state.
+REGISTER_MAP_WITH_PROGRAMSTATE(ArraySizeMap, SymbolRef, DefinedSVal)
void ObjCContainersChecker::addSizeInfo(const Expr *Array, const Expr *Size,
CheckerContext &C) const {
@@ -146,7 +138,7 @@ void ObjCContainersChecker::checkPreStmt(const CallExpr *CE,
initBugType();
BugReport *R = new BugReport(*BT, "Index is out of bounds", N);
R->addRange(IdxExpr->getSourceRange());
- C.EmitReport(R);
+ C.emitReport(R);
return;
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
new file mode 100644
index 0000000..e906e8a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
@@ -0,0 +1,203 @@
+//==- ObjCMissingSuperCallChecker.cpp - Check missing super-calls in ObjC --==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a ObjCMissingSuperCallChecker, a checker that
+// analyzes a UIViewController implementation to determine if it
+// correctly calls super in the methods where this is mandatory.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool isUIViewControllerSubclass(ASTContext &Ctx,
+ const ObjCImplementationDecl *D) {
+ IdentifierInfo *ViewControllerII = &Ctx.Idents.get("UIViewController");
+ const ObjCInterfaceDecl *ID = D->getClassInterface();
+
+ for ( ; ID; ID = ID->getSuperClass())
+ if (ID->getIdentifier() == ViewControllerII)
+ return true;
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// FindSuperCallVisitor - Identify specific calls to the superclass.
+//===----------------------------------------------------------------------===//
+
+class FindSuperCallVisitor : public RecursiveASTVisitor<FindSuperCallVisitor> {
+public:
+ explicit FindSuperCallVisitor(Selector S) : DoesCallSuper(false), Sel(S) {}
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ if (E->getSelector() == Sel)
+ if (E->getReceiverKind() == ObjCMessageExpr::SuperInstance)
+ DoesCallSuper = true;
+
+ // Recurse if we didn't find the super call yet.
+ return !DoesCallSuper;
+ }
+
+ bool DoesCallSuper;
+
+private:
+ Selector Sel;
+};
+
+//===----------------------------------------------------------------------===//
+// ObjCSuperCallChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCSuperCallChecker : public Checker<
+ check::ASTDecl<ObjCImplementationDecl> > {
+public:
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const;
+};
+}
+
+void ObjCSuperCallChecker::checkASTDecl(const ObjCImplementationDecl *D,
+ AnalysisManager &Mgr,
+ BugReporter &BR) const {
+ ASTContext &Ctx = BR.getContext();
+
+ if (!isUIViewControllerSubclass(Ctx, D))
+ return;
+
+ const char *SelectorNames[] =
+ {"addChildViewController", "viewDidAppear", "viewDidDisappear",
+ "viewWillAppear", "viewWillDisappear", "removeFromParentViewController",
+ "didReceiveMemoryWarning", "viewDidUnload", "viewWillUnload",
+ "viewDidLoad"};
+ const unsigned SelectorArgumentCounts[] =
+ {1, 1, 1, 1, 1, 0, 0, 0, 0, 0};
+ const size_t SelectorCount = llvm::array_lengthof(SelectorNames);
+ assert(llvm::array_lengthof(SelectorArgumentCounts) == SelectorCount);
+
+ // Fill the Selectors SmallSet with all selectors we want to check.
+ llvm::SmallSet<Selector, 16> Selectors;
+ for (size_t i = 0; i < SelectorCount; i++) {
+ unsigned ArgumentCount = SelectorArgumentCounts[i];
+ const char *SelectorCString = SelectorNames[i];
+
+ // Get the selector.
+ IdentifierInfo *II = &Ctx.Idents.get(SelectorCString);
+ Selectors.insert(Ctx.Selectors.getSelector(ArgumentCount, &II));
+ }
+
+ // Iterate over all instance methods.
+ for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(),
+ E = D->instmeth_end();
+ I != E; ++I) {
+ Selector S = (*I)->getSelector();
+ // Find out whether this is a selector that we want to check.
+ if (!Selectors.count(S))
+ continue;
+
+ ObjCMethodDecl *MD = *I;
+
+ // Check if the method calls its superclass implementation.
+ if (MD->getBody())
+ {
+ FindSuperCallVisitor Visitor(S);
+ Visitor.TraverseDecl(MD);
+
+ // It doesn't call super, emit a diagnostic.
+ if (!Visitor.DoesCallSuper) {
+ PathDiagnosticLocation DLoc =
+ PathDiagnosticLocation::createEnd(MD->getBody(),
+ BR.getSourceManager(),
+ Mgr.getAnalysisDeclContext(D));
+
+ const char *Name = "Missing call to superclass";
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream os(Buf);
+
+ os << "The '" << S.getAsString()
+ << "' instance method in UIViewController subclass '" << *D
+ << "' is missing a [super " << S.getAsString() << "] call";
+
+ BR.EmitBasicReport(MD, Name, categories::CoreFoundationObjectiveC,
+ os.str(), DLoc);
+ }
+ }
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Check registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerObjCSuperCallChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<ObjCSuperCallChecker>();
+}
+
+
+/*
+ ToDo list for expanding this check in the future, the list is not exhaustive.
+ There are also cases where calling super is suggested but not "mandatory".
+ In addition to be able to check the classes and methods below, architectural
+ improvements like being able to allow for the super-call to be done in a called
+ method would be good too.
+
+*** trivial cases:
+UIResponder subclasses
+- resignFirstResponder
+
+NSResponder subclasses
+- cursorUpdate
+
+*** more difficult cases:
+
+UIDocument subclasses
+- finishedHandlingError:recovered: (is multi-arg)
+- finishedHandlingError:recovered: (is multi-arg)
+
+UIViewController subclasses
+- loadView (should *never* call super)
+- transitionFromViewController:toViewController:
+ duration:options:animations:completion: (is multi-arg)
+
+UICollectionViewController subclasses
+- loadView (take care because UIViewController subclasses should NOT call super
+ in loadView, but UICollectionViewController subclasses should)
+
+NSObject subclasses
+- doesNotRecognizeSelector (it only has to call super if it doesn't throw)
+
+UIPopoverBackgroundView subclasses (some of those are class methods)
+- arrowDirection (should *never* call super)
+- arrowOffset (should *never* call super)
+- arrowBase (should *never* call super)
+- arrowHeight (should *never* call super)
+- contentViewInsets (should *never* call super)
+
+UITextSelectionRect subclasses (some of those are properties)
+- rect (should *never* call super)
+- range (should *never* call super)
+- writingDirection (should *never* call super)
+- isVertical (should *never* call super)
+- containsStart (should *never* call super)
+- containsEnd (should *never* call super)
+*/
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
index be45da1..98d2a85a 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -72,6 +72,8 @@ public:
void checkPreCall(const CallEvent &CE, CheckerContext &C) const;
void checkPostCall(const CallEvent &CE, CheckerContext &C) const;
+ void printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const;
};
} // end anonymous namespace
@@ -97,31 +99,14 @@ enum SelfFlagEnum {
};
}
-typedef llvm::ImmutableMap<SymbolRef, unsigned> SelfFlag;
-namespace { struct CalledInit {}; }
-namespace { struct PreCallSelfFlags {}; }
-
-namespace clang {
-namespace ento {
- template<>
- struct ProgramStateTrait<SelfFlag> : public ProgramStatePartialTrait<SelfFlag> {
- static void *GDMIndex() { static int index = 0; return &index; }
- };
- template <>
- struct ProgramStateTrait<CalledInit> : public ProgramStatePartialTrait<bool> {
- static void *GDMIndex() { static int index = 0; return &index; }
- };
-
- /// \brief A call receiving a reference to 'self' invalidates the object that
- /// 'self' contains. This keeps the "self flags" assigned to the 'self'
- /// object before the call so we can assign them to the new object that 'self'
- /// points to after the call.
- template <>
- struct ProgramStateTrait<PreCallSelfFlags> : public ProgramStatePartialTrait<unsigned> {
- static void *GDMIndex() { static int index = 0; return &index; }
- };
-}
-}
+REGISTER_MAP_WITH_PROGRAMSTATE(SelfFlag, SymbolRef, unsigned)
+REGISTER_TRAIT_WITH_PROGRAMSTATE(CalledInit, bool)
+
+/// \brief A call receiving a reference to 'self' invalidates the object that
+/// 'self' contains. This keeps the "self flags" assigned to the 'self'
+/// object before the call so we can assign them to the new object that 'self'
+/// points to after the call.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(PreCallSelfFlags, unsigned)
static SelfFlagEnum getSelfFlags(SVal val, ProgramStateRef state) {
if (SymbolRef sym = val.getAsSymbol())
@@ -138,7 +123,8 @@ static void addSelfFlag(ProgramStateRef state, SVal val,
SelfFlagEnum flag, CheckerContext &C) {
// We tag the symbol that the SVal wraps.
if (SymbolRef sym = val.getAsSymbol())
- C.addTransition(state->set<SelfFlag>(sym, getSelfFlags(val, C) | flag));
+ state = state->set<SelfFlag>(sym, getSelfFlags(val, state) | flag);
+ C.addTransition(state);
}
static bool hasSelfFlag(SVal val, SelfFlagEnum flag, CheckerContext &C) {
@@ -176,7 +162,7 @@ static void checkForInvalidSelf(const Expr *E, CheckerContext &C,
BugReport *report =
new BugReport(*new InitSelfBug(), errorStr, N);
- C.EmitReport(report);
+ C.emitReport(report);
}
void ObjCSelfInitChecker::checkPostObjCMessage(const ObjCMethodCall &Msg,
@@ -305,13 +291,12 @@ void ObjCSelfInitChecker::checkPostCall(const CallEvent &CE,
// returns 'self'. So assign the flags, which were set on 'self' to the
// return value.
// EX: self = performMoreInitialization(self)
- const Expr *CallExpr = CE.getOriginExpr();
- if (CallExpr)
- addSelfFlag(state, state->getSVal(CallExpr, C.getLocationContext()),
- prevFlags, C);
+ addSelfFlag(state, CE.getReturnValue(), prevFlags, C);
return;
}
}
+
+ C.addTransition(state);
}
void ObjCSelfInitChecker::checkLocation(SVal location, bool isLoad,
@@ -346,6 +331,53 @@ void ObjCSelfInitChecker::checkBind(SVal loc, SVal val, const Stmt *S,
}
}
+void ObjCSelfInitChecker::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+ SelfFlagTy FlagMap = State->get<SelfFlag>();
+ bool DidCallInit = State->get<CalledInit>();
+ SelfFlagEnum PreCallFlags = (SelfFlagEnum)State->get<PreCallSelfFlags>();
+
+ if (FlagMap.isEmpty() && !DidCallInit && !PreCallFlags)
+ return;
+
+ Out << Sep << NL << "ObjCSelfInitChecker:" << NL;
+
+ if (DidCallInit)
+ Out << " An init method has been called." << NL;
+
+ if (PreCallFlags != SelfFlag_None) {
+ if (PreCallFlags & SelfFlag_Self) {
+ Out << " An argument of the current call came from the 'self' variable."
+ << NL;
+ }
+ if (PreCallFlags & SelfFlag_InitRes) {
+ Out << " An argument of the current call came from an init method."
+ << NL;
+ }
+ }
+
+ Out << NL;
+ for (SelfFlagTy::iterator I = FlagMap.begin(), E = FlagMap.end();
+ I != E; ++I) {
+ Out << I->first << " : ";
+
+ if (I->second == SelfFlag_None)
+ Out << "none";
+
+ if (I->second & SelfFlag_Self)
+ Out << "self variable";
+
+ if (I->second & SelfFlag_InitRes) {
+ if (I->second != SelfFlag_InitRes)
+ Out << " | ";
+ Out << "result of init method";
+ }
+
+ Out << NL;
+ }
+}
+
+
// FIXME: A callback should disable checkers at the start of functions.
static bool shouldRunOnFunctionOrMethod(const NamedDecl *ND) {
if (!ND)
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
index fe4845b..b5d9959 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -59,7 +59,7 @@ void PointerArithChecker::checkPreStmt(const BinaryOperator *B,
"dangerous."));
BugReport *R = new BugReport(*BT, BT->getDescription(), N);
R->addRange(B->getSourceRange());
- C.EmitReport(R);
+ C.emitReport(R);
}
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
index fa5c6a3..47da87f 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
@@ -67,7 +67,7 @@ void PointerSubChecker::checkPreStmt(const BinaryOperator *B,
"the same memory chunk may cause incorrect result."));
BugReport *R = new BugReport(*BT, BT->getDescription(), N);
R->addRange(B->getSourceRange());
- C.EmitReport(R);
+ C.emitReport(R);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index 2d018ef..d9b6384 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -43,15 +43,7 @@ public:
} // end anonymous namespace
// GDM Entry for tracking lock state.
-namespace { class LockSet {}; }
-namespace clang {
-namespace ento {
-template <> struct ProgramStateTrait<LockSet> :
- public ProgramStatePartialTrait<llvm::ImmutableList<const MemRegion*> > {
- static void *GDMIndex() { static int x = 0; return &x; }
-};
-} // end of ento (ProgramState) namespace
-} // end clang namespace
+REGISTER_LIST_WITH_PROGRAMSTATE(LockSet, const MemRegion *)
void PthreadLockChecker::checkPostStmt(const CallExpr *CE,
@@ -118,7 +110,7 @@ void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
"This lock has already "
"been acquired", N);
report->addRange(CE->getArg(0)->getSourceRange());
- C.EmitReport(report);
+ C.emitReport(report);
return;
}
@@ -163,7 +155,7 @@ void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
return;
ProgramStateRef state = C.getState();
- llvm::ImmutableList<const MemRegion*> LS = state->get<LockSet>();
+ LockSetTy LS = state->get<LockSet>();
// FIXME: Better analysis requires IPA for wrappers.
// FIXME: check for double unlocks
@@ -183,7 +175,7 @@ void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
"Possible lock order "
"reversal", N);
report->addRange(CE->getArg(0)->getSourceRange());
- C.EmitReport(report);
+ C.emitReport(report);
return;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
index 3c00d99..304051c 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
@@ -40,24 +40,6 @@ using namespace clang;
using namespace ento;
using llvm::StrInStrNoCase;
-namespace {
-/// Wrapper around different kinds of node builder, so that helper functions
-/// can have a common interface.
-class GenericNodeBuilderRefCount {
- CheckerContext *C;
- const ProgramPointTag *tag;
-public:
- GenericNodeBuilderRefCount(CheckerContext &c,
- const ProgramPointTag *t = 0)
- : C(&c), tag(t){}
-
- ExplodedNode *MakeNode(ProgramStateRef state, ExplodedNode *Pred,
- bool MarkAsSink = false) {
- return C->addTransition(state, Pred, tag, MarkAsSink);
- }
-};
-} // end anonymous namespace
-
//===----------------------------------------------------------------------===//
// Primitives used for constructing summaries for function/method calls.
//===----------------------------------------------------------------------===//
@@ -66,9 +48,23 @@ public:
/// particular argument.
enum ArgEffect { DoNothing, Autorelease, Dealloc, DecRef, DecRefMsg,
DecRefBridgedTransfered,
- DecRefAndStopTracking, DecRefMsgAndStopTracking,
IncRefMsg, IncRef, MakeCollectable, MayEscape,
- NewAutoreleasePool, StopTracking };
+ NewAutoreleasePool,
+
+ // Stop tracking the argument - the effect of the call is
+ // unknown.
+ StopTracking,
+
+ // In some cases, we obtain a better summary for this checker
+ // by looking at the call site than by inlining the function.
+ // Signifies that we should stop tracking the symbol even if
+ // the function is inlined.
+ StopTrackingHard,
+
+ // The function decrements the reference count and the checker
+ // should stop tracking the argument.
+ DecRefAndStopTrackingHard, DecRefMsgAndStopTrackingHard
+ };
namespace llvm {
template <> struct FoldingSetTrait<ArgEffect> {
@@ -90,7 +86,13 @@ class RetEffect {
public:
enum Kind { NoRet, OwnedSymbol, OwnedAllocatedSymbol,
NotOwnedSymbol, GCNotOwnedSymbol, ARCNotOwnedSymbol,
- OwnedWhenTrackedReceiver };
+ OwnedWhenTrackedReceiver,
+ // Treat this function as returning a non-tracked symbol even if
+ // the function has been inlined. This is used where the call
+ // site summary is more presise than the summary indirectly produced
+ // by inlining the function
+ NoRetHard
+ };
enum ObjKind { CF, ObjC, AnyObj };
@@ -133,6 +135,9 @@ public:
static RetEffect MakeNoRet() {
return RetEffect(NoRet);
}
+ static RetEffect MakeNoRetHard() {
+ return RetEffect(NoRetHard);
+ }
void Profile(llvm::FoldingSetNodeID& ID) const {
ID.AddInteger((unsigned) K);
@@ -337,20 +342,7 @@ void RefVal::print(raw_ostream &Out) const {
// RefBindings - State used to track object reference counts.
//===----------------------------------------------------------------------===//
-typedef llvm::ImmutableMap<SymbolRef, RefVal> RefBindings;
-
-namespace clang {
-namespace ento {
-template<>
-struct ProgramStateTrait<RefBindings>
- : public ProgramStatePartialTrait<RefBindings> {
- static void *GDMIndex() {
- static int RefBIndex = 0;
- return &RefBIndex;
- }
-};
-}
-}
+REGISTER_MAP_WITH_PROGRAMSTATE(RefBindings, SymbolRef, RefVal)
static inline const RefVal *getRefBinding(ProgramStateRef State,
SymbolRef Sym) {
@@ -893,7 +885,7 @@ static bool isMakeCollectable(const FunctionDecl *FD, StringRef FName) {
return FName.find("MakeCollectable") != StringRef::npos;
}
-static ArgEffect getStopTrackingEquivalent(ArgEffect E) {
+static ArgEffect getStopTrackingHardEquivalent(ArgEffect E) {
switch (E) {
case DoNothing:
case Autorelease:
@@ -904,13 +896,14 @@ static ArgEffect getStopTrackingEquivalent(ArgEffect E) {
case MayEscape:
case NewAutoreleasePool:
case StopTracking:
- return StopTracking;
+ case StopTrackingHard:
+ return StopTrackingHard;
case DecRef:
- case DecRefAndStopTracking:
- return DecRefAndStopTracking;
+ case DecRefAndStopTrackingHard:
+ return DecRefAndStopTrackingHard;
case DecRefMsg:
- case DecRefMsgAndStopTracking:
- return DecRefMsgAndStopTracking;
+ case DecRefMsgAndStopTrackingHard:
+ return DecRefMsgAndStopTrackingHard;
case Dealloc:
return Dealloc;
}
@@ -921,33 +914,65 @@ static ArgEffect getStopTrackingEquivalent(ArgEffect E) {
void RetainSummaryManager::updateSummaryForCall(const RetainSummary *&S,
const CallEvent &Call) {
if (Call.hasNonZeroCallbackArg()) {
- ArgEffect RecEffect = getStopTrackingEquivalent(S->getReceiverEffect());
- ArgEffect DefEffect = getStopTrackingEquivalent(S->getDefaultArgEffect());
+ ArgEffect RecEffect =
+ getStopTrackingHardEquivalent(S->getReceiverEffect());
+ ArgEffect DefEffect =
+ getStopTrackingHardEquivalent(S->getDefaultArgEffect());
ArgEffects CustomArgEffects = S->getArgEffects();
for (ArgEffects::iterator I = CustomArgEffects.begin(),
E = CustomArgEffects.end();
I != E; ++I) {
- ArgEffect Translated = getStopTrackingEquivalent(I->second);
+ ArgEffect Translated = getStopTrackingHardEquivalent(I->second);
if (Translated != DefEffect)
ScratchArgs = AF.add(ScratchArgs, I->first, Translated);
}
- RetEffect RE = RetEffect::MakeNoRet();
+ RetEffect RE = RetEffect::MakeNoRetHard();
// Special cases where the callback argument CANNOT free the return value.
// This can generally only happen if we know that the callback will only be
// called when the return value is already being deallocated.
if (const FunctionCall *FC = dyn_cast<FunctionCall>(&Call)) {
- IdentifierInfo *Name = FC->getDecl()->getIdentifier();
-
- // This callback frees the associated buffer.
- if (Name->isStr("CGBitmapContextCreateWithData"))
- RE = S->getRetEffect();
+ if (IdentifierInfo *Name = FC->getDecl()->getIdentifier()) {
+ // When the CGBitmapContext is deallocated, the callback here will free
+ // the associated data buffer.
+ if (Name->isStr("CGBitmapContextCreateWithData"))
+ RE = S->getRetEffect();
+ }
}
S = getPersistentSummary(RE, RecEffect, DefEffect);
}
+
+ // Special case '[super init];' and '[self init];'
+ //
+ // Even though calling '[super init]' without assigning the result to self
+ // and checking if the parent returns 'nil' is a bad pattern, it is common.
+ // Additionally, our Self Init checker already warns about it. To avoid
+ // overwhelming the user with messages from both checkers, we model the case
+ // of '[super init]' in cases when it is not consumed by another expression
+ // as if the call preserves the value of 'self'; essentially, assuming it can
+ // never fail and return 'nil'.
+ // Note, we don't want to just stop tracking the value since we want the
+ // RetainCount checker to report leaks and use-after-free if SelfInit checker
+ // is turned off.
+ if (const ObjCMethodCall *MC = dyn_cast<ObjCMethodCall>(&Call)) {
+ if (MC->getMethodFamily() == OMF_init && MC->isReceiverSelfOrSuper()) {
+
+ // Check if the message is not consumed, we know it will not be used in
+ // an assignment, ex: "self = [super init]".
+ const Expr *ME = MC->getOriginExpr();
+ const LocationContext *LCtx = MC->getLocationContext();
+ ParentMap &PM = LCtx->getAnalysisDeclContext()->getParentMap();
+ if (!PM.isConsumedExpr(ME)) {
+ RetainSummaryTemplate ModifiableSummaryTemplate(S, *this);
+ ModifiableSummaryTemplate->setReceiverEffect(DoNothing);
+ ModifiableSummaryTemplate->setRetEffect(RetEffect::MakeNoRet());
+ }
+ }
+
+ }
}
const RetainSummary *
@@ -1036,6 +1061,8 @@ RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) {
// The headers on OS X 10.8 use cf_consumed/ns_returns_retained,
// but we can fully model NSMakeCollectable ourselves.
AllowAnnotations = false;
+ } else if (FName == "CFPlugInInstanceCreate") {
+ S = getPersistentSummary(RetEffect::MakeNoRet());
} else if (FName == "IOBSDNameMatching" ||
FName == "IOServiceMatching" ||
FName == "IOServiceNameMatching" ||
@@ -1108,6 +1135,11 @@ RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) {
break;
if (RetTy->isPointerType()) {
+ if (FD->getAttr<CFAuditedTransferAttr>()) {
+ S = getCFCreateGetRuleSummary(FD);
+ break;
+ }
+
// For CoreFoundation ('CF') types.
if (cocoa::isRefType(RetTy, "CF", FName)) {
if (isRetain(FD, FName))
@@ -1347,22 +1379,6 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
const RetainSummary *
RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
Selector S, QualType RetTy) {
-
- if (MD) {
- // Scan the method decl for 'void*' arguments. These should be treated
- // as 'StopTracking' because they are often used with delegates.
- // Delegates are a frequent form of false positives with the retain
- // count checker.
- unsigned i = 0;
- for (ObjCMethodDecl::param_const_iterator I = MD->param_begin(),
- E = MD->param_end(); I != E; ++I, ++i)
- if (const ParmVarDecl *PD = *I) {
- QualType Ty = Ctx.getCanonicalType(PD->getType());
- if (Ty.getLocalUnqualifiedType() == Ctx.VoidPtrTy)
- ScratchArgs = AF.add(ScratchArgs, i, StopTracking);
- }
- }
-
// Any special effects?
ArgEffect ReceiverEff = DoNothing;
RetEffect ResultEff = RetEffect::MakeNoRet();
@@ -1441,9 +1457,9 @@ RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
StringRef Slot = S.getNameForSlot(i);
if (Slot.substr(Slot.size() - 8).equals_lower("delegate")) {
if (ResultEff == ObjCInitRetE)
- ResultEff = RetEffect::MakeNoRet();
+ ResultEff = RetEffect::MakeNoRetHard();
else
- ReceiverEff = StopTracking;
+ ReceiverEff = StopTrackingHard;
}
}
}
@@ -2174,6 +2190,7 @@ GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
// If allocation happened in a function different from the leak node context,
// do not report the binding.
+ assert(N && "Could not find allocation node");
if (N->getLocationContext() != LeakContext) {
FirstBinding = 0;
}
@@ -2229,27 +2246,36 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
// Get the retain count.
const RefVal* RV = getRefBinding(EndN->getState(), Sym);
+ assert(RV);
if (RV->getKind() == RefVal::ErrorLeakReturned) {
// FIXME: Per comments in rdar://6320065, "create" only applies to CF
// objects. Only "copy", "alloc", "retain" and "new" transfer ownership
// to the caller for NS objects.
const Decl *D = &EndN->getCodeDecl();
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
- os << " is returned from a method whose name ('"
- << MD->getSelector().getAsString()
- << "') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'."
- " This violates the naming convention rules"
- " given in the Memory Management Guide for Cocoa";
- }
+
+ os << (isa<ObjCMethodDecl>(D) ? " is returned from a method "
+ : " is returned from a function ");
+
+ if (D->getAttr<CFReturnsNotRetainedAttr>())
+ os << "that is annotated as CF_RETURNS_NOT_RETAINED";
+ else if (D->getAttr<NSReturnsNotRetainedAttr>())
+ os << "that is annotated as NS_RETURNS_NOT_RETAINED";
else {
- const FunctionDecl *FD = cast<FunctionDecl>(D);
- os << " is returned from a function whose name ('"
- << *FD
- << "') does not contain 'Copy' or 'Create'. This violates the naming"
- " convention rules given in the Memory Management Guide for Core"
- " Foundation";
- }
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ os << "whose name ('" << MD->getSelector().getAsString()
+ << "') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'."
+ " This violates the naming convention rules"
+ " given in the Memory Management Guide for Cocoa";
+ }
+ else {
+ const FunctionDecl *FD = cast<FunctionDecl>(D);
+ os << "whose name ('" << *FD
+ << "') does not contain 'Copy' or 'Create'. This violates the naming"
+ " convention rules given in the Memory Management Guide for Core"
+ " Foundation";
+ }
+ }
}
else if (RV->getKind() == RefVal::ErrorGCLeakReturned) {
ObjCMethodDecl &MD = cast<ObjCMethodDecl>(EndN->getCodeDecl());
@@ -2474,6 +2500,10 @@ public:
void checkSummary(const RetainSummary &Summ, const CallEvent &Call,
CheckerContext &C) const;
+ void processSummaryOfInlined(const RetainSummary &Summ,
+ const CallEvent &Call,
+ CheckerContext &C) const;
+
bool evalCall(const CallExpr *CE, CheckerContext &C) const;
ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
@@ -2499,8 +2529,8 @@ public:
void checkEndPath(CheckerContext &C) const;
ProgramStateRef updateSymbol(ProgramStateRef state, SymbolRef sym,
- RefVal V, ArgEffect E, RefVal::Kind &hasErr,
- CheckerContext &C) const;
+ RefVal V, ArgEffect E, RefVal::Kind &hasErr,
+ CheckerContext &C) const;
void processNonLeakError(ProgramStateRef St, SourceRange ErrorRange,
RefVal::Kind ErrorKind, SymbolRef Sym,
@@ -2515,13 +2545,12 @@ public:
SmallVectorImpl<SymbolRef> &Leaked) const;
std::pair<ExplodedNode *, ProgramStateRef >
- handleAutoreleaseCounts(ProgramStateRef state,
- GenericNodeBuilderRefCount Bd, ExplodedNode *Pred,
- CheckerContext &Ctx, SymbolRef Sym, RefVal V) const;
+ handleAutoreleaseCounts(ProgramStateRef state, ExplodedNode *Pred,
+ const ProgramPointTag *Tag, CheckerContext &Ctx,
+ SymbolRef Sym, RefVal V) const;
ExplodedNode *processLeaks(ProgramStateRef state,
SmallVectorImpl<SymbolRef> &Leaked,
- GenericNodeBuilderRefCount &Builder,
CheckerContext &Ctx,
ExplodedNode *Pred = 0) const;
};
@@ -2685,11 +2714,13 @@ void RetainCountChecker::checkPostStmt(const ObjCBoxedExpr *Ex,
void RetainCountChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
- if (C.wasInlined)
- return;
-
RetainSummaryManager &Summaries = getSummaryManager(C);
const RetainSummary *Summ = Summaries.getSummary(Call, C.getState());
+
+ if (C.wasInlined) {
+ processSummaryOfInlined(*Summ, Call, C);
+ return;
+ }
checkSummary(*Summ, Call, C);
}
@@ -2721,6 +2752,45 @@ static QualType GetReturnType(const Expr *RetE, ASTContext &Ctx) {
return RetTy;
}
+// We don't always get the exact modeling of the function with regards to the
+// retain count checker even when the function is inlined. For example, we need
+// to stop tracking the symbols which were marked with StopTrackingHard.
+void RetainCountChecker::processSummaryOfInlined(const RetainSummary &Summ,
+ const CallEvent &CallOrMsg,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+
+ // Evaluate the effect of the arguments.
+ for (unsigned idx = 0, e = CallOrMsg.getNumArgs(); idx != e; ++idx) {
+ if (Summ.getArg(idx) == StopTrackingHard) {
+ SVal V = CallOrMsg.getArgSVal(idx);
+ if (SymbolRef Sym = V.getAsLocSymbol()) {
+ state = removeRefBinding(state, Sym);
+ }
+ }
+ }
+
+ // Evaluate the effect on the message receiver.
+ const ObjCMethodCall *MsgInvocation = dyn_cast<ObjCMethodCall>(&CallOrMsg);
+ if (MsgInvocation) {
+ if (SymbolRef Sym = MsgInvocation->getReceiverSVal().getAsLocSymbol()) {
+ if (Summ.getReceiverEffect() == StopTrackingHard) {
+ state = removeRefBinding(state, Sym);
+ }
+ }
+ }
+
+ // Consult the summary for the return value.
+ RetEffect RE = Summ.getRetEffect();
+ if (RE.getKind() == RetEffect::NoRetHard) {
+ SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
+ if (Sym)
+ state = removeRefBinding(state, Sym);
+ }
+
+ C.addTransition(state);
+}
+
void RetainCountChecker::checkSummary(const RetainSummary &Summ,
const CallEvent &CallOrMsg,
CheckerContext &C) const {
@@ -2755,7 +2825,7 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
if (const RefVal *T = getRefBinding(state, Sym)) {
ReceiverIsTracked = true;
state = updateSymbol(state, Sym, *T, Summ.getReceiverEffect(),
- hasErr, C);
+ hasErr, C);
if (hasErr) {
ErrorRange = MsgInvocation->getOriginExpr()->getReceiverRange();
ErrorSym = Sym;
@@ -2786,13 +2856,13 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
llvm_unreachable("Unhandled RetEffect.");
case RetEffect::NoRet:
+ case RetEffect::NoRetHard:
// No work necessary.
break;
case RetEffect::OwnedAllocatedSymbol:
case RetEffect::OwnedSymbol: {
- SymbolRef Sym = state->getSVal(CallOrMsg.getOriginExpr(),
- C.getLocationContext()).getAsSymbol();
+ SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
if (!Sym)
break;
@@ -2811,10 +2881,10 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
case RetEffect::ARCNotOwnedSymbol:
case RetEffect::NotOwnedSymbol: {
const Expr *Ex = CallOrMsg.getOriginExpr();
- SymbolRef Sym = state->getSVal(Ex, C.getLocationContext()).getAsSymbol();
+ SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
if (!Sym)
break;
-
+ assert(Ex);
// Use GetReturnType in order to give [NSFoo alloc] the type NSFoo *.
QualType ResultTy = GetReturnType(Ex, C.getASTContext());
state = setRefBinding(state, Sym, RefVal::makeNotOwned(RE.getObjKind(),
@@ -2864,8 +2934,8 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
case DecRefMsg:
E = IgnoreRetainMsg ? DoNothing : DecRef;
break;
- case DecRefMsgAndStopTracking:
- E = IgnoreRetainMsg ? StopTracking : DecRefAndStopTracking;
+ case DecRefMsgAndStopTrackingHard:
+ E = IgnoreRetainMsg ? StopTracking : DecRefAndStopTrackingHard;
break;
case MakeCollectable:
E = C.isObjCGCEnabled() ? DecRef : DoNothing;
@@ -2886,7 +2956,7 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
case DecRefMsg:
case IncRefMsg:
case MakeCollectable:
- case DecRefMsgAndStopTracking:
+ case DecRefMsgAndStopTrackingHard:
llvm_unreachable("DecRefMsg/IncRefMsg/MakeCollectable already converted");
case Dealloc:
@@ -2935,6 +3005,7 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
break;
case StopTracking:
+ case StopTrackingHard:
return removeRefBinding(state, sym);
case IncRef:
@@ -2955,7 +3026,7 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
case DecRef:
case DecRefBridgedTransfered:
- case DecRefAndStopTracking:
+ case DecRefAndStopTrackingHard:
switch (V.getKind()) {
default:
// case 'RefVal::Released' handled above.
@@ -2966,7 +3037,7 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
if (V.getCount() == 1)
V = V ^ (E == DecRefBridgedTransfered ?
RefVal::NotOwned : RefVal::Released);
- else if (E == DecRefAndStopTracking)
+ else if (E == DecRefAndStopTrackingHard)
return removeRefBinding(state, sym);
V = V - 1;
@@ -2974,7 +3045,7 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
case RefVal::NotOwned:
if (V.getCount() > 0) {
- if (E == DecRefAndStopTracking)
+ if (E == DecRefAndStopTrackingHard)
return removeRefBinding(state, sym);
V = V - 1;
} else {
@@ -3035,7 +3106,7 @@ void RetainCountChecker::processNonLeakError(ProgramStateRef St,
C.isObjCGCEnabled(), SummaryLog,
N, Sym);
report->addRange(ErrorRange);
- C.EmitReport(report);
+ C.emitReport(report);
}
//===----------------------------------------------------------------------===//
@@ -3090,8 +3161,7 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
if (RetVal.isUnknown()) {
// If the receiver is unknown, conjure a return value.
SValBuilder &SVB = C.getSValBuilder();
- unsigned Count = C.getCurrentBlockCount();
- RetVal = SVB.getConjuredSymbolVal(0, CE, LCtx, ResultTy, Count);
+ RetVal = SVB.conjureSymbolVal(0, CE, LCtx, ResultTy, C.blockCount());
}
state = state->BindExpr(CE, LCtx, RetVal, false);
@@ -3105,8 +3175,7 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
Binding = getRefBinding(state, Sym);
// Invalidate the argument region.
- unsigned Count = C.getCurrentBlockCount();
- state = state->invalidateRegions(ArgRegion, CE, Count, LCtx);
+ state = state->invalidateRegions(ArgRegion, CE, C.blockCount(), LCtx);
// Restore the refcount status of the argument.
if (Binding)
@@ -3121,12 +3190,6 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
// Handle return statements.
//===----------------------------------------------------------------------===//
-// Return true if the current LocationContext has no caller context.
-static bool inTopFrame(CheckerContext &C) {
- const LocationContext *LC = C.getLocationContext();
- return LC->getParent() == 0;
-}
-
void RetainCountChecker::checkPreStmt(const ReturnStmt *S,
CheckerContext &C) const {
@@ -3135,7 +3198,7 @@ void RetainCountChecker::checkPreStmt(const ReturnStmt *S,
// better checking even for inlined calls, and see if they match
// with their expected semantics (e.g., the method should return a retained
// object, etc.).
- if (!inTopFrame(C))
+ if (!C.inTopFrame())
return;
const Expr *RetE = S->getRetValue();
@@ -3196,8 +3259,8 @@ void RetainCountChecker::checkPreStmt(const ReturnStmt *S,
// Update the autorelease counts.
static SimpleProgramPointTag
AutoreleaseTag("RetainCountChecker : Autorelease");
- GenericNodeBuilderRefCount Bd(C, &AutoreleaseTag);
- llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Bd, Pred, C, Sym, X);
+ llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Pred, &AutoreleaseTag,
+ C, Sym, X);
// Did we cache out?
if (!Pred)
@@ -3267,7 +3330,7 @@ void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
new CFRefLeakReport(*getLeakAtReturnBug(LOpts, GCEnabled),
LOpts, GCEnabled, SummaryLog,
N, Sym, C);
- C.EmitReport(report);
+ C.emitReport(report);
}
}
}
@@ -3288,7 +3351,7 @@ void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
new CFRefReport(*returnNotOwnedForOwned,
C.getASTContext().getLangOpts(),
C.isObjCGCEnabled(), SummaryLog, N, Sym);
- C.EmitReport(report);
+ C.emitReport(report);
}
}
}
@@ -3354,18 +3417,19 @@ ProgramStateRef RetainCountChecker::evalAssume(ProgramStateRef state,
// too bad since the number of symbols we will track in practice are
// probably small and evalAssume is only called at branches and a few
// other places.
- RefBindings B = state->get<RefBindings>();
+ RefBindingsTy B = state->get<RefBindings>();
if (B.isEmpty())
return state;
bool changed = false;
- RefBindings::Factory &RefBFactory = state->get_context<RefBindings>();
+ RefBindingsTy::Factory &RefBFactory = state->get_context<RefBindings>();
- for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- // Check if the symbol is null (or equal to any constant).
- // If this is the case, stop tracking the symbol.
- if (state->getSymVal(I.getKey())) {
+ for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ // Check if the symbol is null stop tracking the symbol.
+ ConstraintManager &CMgr = state->getConstraintManager();
+ ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+ if (AllocFailed.isConstrainedTrue()) {
changed = true;
B = RefBFactory.remove(B, I.getKey());
}
@@ -3410,8 +3474,8 @@ RetainCountChecker::checkRegionChanges(ProgramStateRef state,
std::pair<ExplodedNode *, ProgramStateRef >
RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
- GenericNodeBuilderRefCount Bd,
ExplodedNode *Pred,
+ const ProgramPointTag *Tag,
CheckerContext &Ctx,
SymbolRef Sym, RefVal V) const {
unsigned ACnt = V.getAutoreleaseCount();
@@ -3440,7 +3504,7 @@ RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
V.setAutoreleaseCount(0);
}
state = setRefBinding(state, Sym, V);
- ExplodedNode *N = Bd.MakeNode(state, Pred);
+ ExplodedNode *N = Ctx.addTransition(state, Pred, Tag);
if (N == 0)
state = 0;
return std::make_pair(N, state);
@@ -3451,7 +3515,8 @@ RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
V = V ^ RefVal::ErrorOverAutorelease;
state = setRefBinding(state, Sym, V);
- if (ExplodedNode *N = Bd.MakeNode(state, Pred, true)) {
+ ExplodedNode *N = Ctx.generateSink(state, Pred, Tag);
+ if (N) {
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
os << "Object over-autoreleased: object was sent -autorelease ";
@@ -3466,7 +3531,7 @@ RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
CFRefReport *report =
new CFRefReport(*overAutorelease, LOpts, /* GCEnabled = */ false,
SummaryLog, N, Sym, os.str());
- Ctx.EmitReport(report);
+ Ctx.emitReport(report);
}
return std::make_pair((ExplodedNode *)0, (ProgramStateRef )0);
@@ -3492,14 +3557,13 @@ RetainCountChecker::handleSymbolDeath(ProgramStateRef state,
ExplodedNode *
RetainCountChecker::processLeaks(ProgramStateRef state,
SmallVectorImpl<SymbolRef> &Leaked,
- GenericNodeBuilderRefCount &Builder,
CheckerContext &Ctx,
ExplodedNode *Pred) const {
if (Leaked.empty())
return Pred;
// Generate an intermediate node representing the leak point.
- ExplodedNode *N = Builder.MakeNode(state, Pred);
+ ExplodedNode *N = Ctx.addTransition(state, Pred);
if (N) {
for (SmallVectorImpl<SymbolRef>::iterator
@@ -3513,7 +3577,7 @@ RetainCountChecker::processLeaks(ProgramStateRef state,
CFRefLeakReport *report = new CFRefLeakReport(*BT, LOpts, GCEnabled,
SummaryLog, N, *I, Ctx);
- Ctx.EmitReport(report);
+ Ctx.emitReport(report);
}
}
@@ -3522,13 +3586,12 @@ RetainCountChecker::processLeaks(ProgramStateRef state,
void RetainCountChecker::checkEndPath(CheckerContext &Ctx) const {
ProgramStateRef state = Ctx.getState();
- GenericNodeBuilderRefCount Bd(Ctx);
- RefBindings B = state->get<RefBindings>();
+ RefBindingsTy B = state->get<RefBindings>();
ExplodedNode *Pred = Ctx.getPredecessor();
- for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Bd, Pred, Ctx,
- I->first, I->second);
+ for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Pred, /*Tag=*/0,
+ Ctx, I->first, I->second);
if (!state)
return;
}
@@ -3543,10 +3606,10 @@ void RetainCountChecker::checkEndPath(CheckerContext &Ctx) const {
B = state->get<RefBindings>();
SmallVector<SymbolRef, 10> Leaked;
- for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I)
+ for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I)
state = handleSymbolDeath(state, I->first, I->second, Leaked);
- processLeaks(state, Leaked, Bd, Ctx, Pred);
+ processLeaks(state, Leaked, Ctx, Pred);
}
const ProgramPointTag *
@@ -3567,7 +3630,7 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
ExplodedNode *Pred = C.getPredecessor();
ProgramStateRef state = C.getState();
- RefBindings B = state->get<RefBindings>();
+ RefBindingsTy B = state->get<RefBindings>();
// Update counts from autorelease pools
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
@@ -3576,8 +3639,8 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
if (const RefVal *T = B.lookup(Sym)){
// Use the symbol as the tag.
// FIXME: This might not be as unique as we would like.
- GenericNodeBuilderRefCount Bd(C, getDeadSymbolTag(Sym));
- llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Bd, Pred, C,
+ const ProgramPointTag *Tag = getDeadSymbolTag(Sym);
+ llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Pred, Tag, C,
Sym, *T);
if (!state)
return;
@@ -3593,17 +3656,14 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
state = handleSymbolDeath(state, *I, *T, Leaked);
}
- {
- GenericNodeBuilderRefCount Bd(C, this);
- Pred = processLeaks(state, Leaked, Bd, C, Pred);
- }
+ Pred = processLeaks(state, Leaked, C, Pred);
// Did we cache out?
if (!Pred)
return;
// Now generate a new node that nukes the old bindings.
- RefBindings::Factory &F = state->get_context<RefBindings>();
+ RefBindingsTy::Factory &F = state->get_context<RefBindings>();
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
E = SymReaper.dead_end(); I != E; ++I)
@@ -3616,12 +3676,12 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const {
- RefBindings B = State->get<RefBindings>();
+ RefBindingsTy B = State->get<RefBindings>();
if (!B.isEmpty())
Out << Sep << NL;
- for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
Out << I->first << " : ";
I->second.print(Out);
Out << NL;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
index 6e56593..f3560aa 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -82,7 +82,7 @@ void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
new BugReport(*BT, BT->getDescription(), N);
report->addRange(RetE->getSourceRange());
- C.EmitReport(report);
+ C.emitReport(report);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
index ca2a55d..37ec1aa 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -16,6 +16,7 @@
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -41,6 +42,19 @@ void ReturnUndefChecker::checkPreStmt(const ReturnStmt *RS,
if (!C.getState()->getSVal(RetE, C.getLocationContext()).isUndef())
return;
+ // "return;" is modeled to evaluate to an UndefinedValue. Allow UndefinedValue
+ // to be returned in functions returning void to support the following pattern:
+ // void foo() {
+ // return;
+ // }
+ // void test() {
+ // return foo();
+ // }
+ const StackFrameContext *SFC = C.getStackFrame();
+ QualType RT = CallEvent::getDeclaredResultType(SFC->getDecl());
+ if (!RT.isNull() && RT->isSpecificBuiltinType(BuiltinType::Void))
+ return;
+
ExplodedNode *N = C.generateSink();
if (!N)
@@ -53,11 +67,10 @@ void ReturnUndefChecker::checkPreStmt(const ReturnStmt *RS,
BugReport *report =
new BugReport(*BT, BT->getDescription(), N);
- report->disablePathPruning();
report->addRange(RetE->getSourceRange());
- bugreporter::addTrackNullOrUndefValueVisitor(N, RetE, report);
+ bugreporter::trackNullOrUndefValue(N, RetE, *report);
- C.EmitReport(report);
+ C.emitReport(report);
}
void ento::registerReturnUndefChecker(CheckerManager &mgr) {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
new file mode 100644
index 0000000..ee055ad
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
@@ -0,0 +1,348 @@
+//===-- SimpleStreamChecker.cpp -----------------------------------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a checker for proper use of fopen/fclose APIs.
+// - If a file has been closed with fclose, it should not be accessed again.
+// Accessing a closed file results in undefined behavior.
+// - If a file was opened with fopen, it must be closed with fclose before
+// the execution ends. Failing to do so results in a resource leak.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+typedef llvm::SmallVector<SymbolRef, 2> SymbolVector;
+
+struct StreamState {
+private:
+ enum Kind { Opened, Closed } K;
+ StreamState(Kind InK) : K(InK) { }
+
+public:
+ bool isOpened() const { return K == Opened; }
+ bool isClosed() const { return K == Closed; }
+
+ static StreamState getOpened() { return StreamState(Opened); }
+ static StreamState getClosed() { return StreamState(Closed); }
+
+ bool operator==(const StreamState &X) const {
+ return K == X.K;
+ }
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(K);
+ }
+};
+
+class SimpleStreamChecker : public Checker<check::PostCall,
+ check::PreCall,
+ check::DeadSymbols,
+ check::Bind,
+ check::RegionChanges> {
+
+ mutable IdentifierInfo *IIfopen, *IIfclose;
+
+ OwningPtr<BugType> DoubleCloseBugType;
+ OwningPtr<BugType> LeakBugType;
+
+ void initIdentifierInfo(ASTContext &Ctx) const;
+
+ void reportDoubleClose(SymbolRef FileDescSym,
+ const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void reportLeaks(SymbolVector LeakedStreams,
+ CheckerContext &C,
+ ExplodedNode *ErrNode) const;
+
+ bool guaranteedNotToCloseFile(const CallEvent &Call) const;
+
+public:
+ SimpleStreamChecker();
+
+ /// Process fopen.
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ /// Process fclose.
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+
+ /// Deal with symbol escape as a byproduct of a bind.
+ void checkBind(SVal location, SVal val, const Stmt*S,
+ CheckerContext &C) const;
+
+ /// Deal with symbol escape as a byproduct of a region change.
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef state,
+ const StoreManager::InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallEvent *Call) const;
+ bool wantsRegionChangeUpdate(ProgramStateRef state) const {
+ return true;
+ }
+};
+
+} // end anonymous namespace
+
+/// The state of the checker is a map from tracked stream symbols to their
+/// state. Let's store it in the ProgramState.
+REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
+
+namespace {
+class StopTrackingCallback : public SymbolVisitor {
+ ProgramStateRef state;
+public:
+ StopTrackingCallback(ProgramStateRef st) : state(st) {}
+ ProgramStateRef getState() const { return state; }
+
+ bool VisitSymbol(SymbolRef sym) {
+ state = state->remove<StreamMap>(sym);
+ return true;
+ }
+};
+} // end anonymous namespace
+
+
+SimpleStreamChecker::SimpleStreamChecker() : IIfopen(0), IIfclose(0) {
+ // Initialize the bug types.
+ DoubleCloseBugType.reset(new BugType("Double fclose",
+ "Unix Stream API Error"));
+
+ LeakBugType.reset(new BugType("Resource Leak",
+ "Unix Stream API Error"));
+ // Sinks are higher importance bugs as well as calls to assert() or exit(0).
+ LeakBugType->setSuppressOnSink(true);
+}
+
+void SimpleStreamChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ initIdentifierInfo(C.getASTContext());
+
+ if (!Call.isGlobalCFunction())
+ return;
+
+ if (Call.getCalleeIdentifier() != IIfopen)
+ return;
+
+ // Get the symbolic value corresponding to the file handle.
+ SymbolRef FileDesc = Call.getReturnValue().getAsSymbol();
+ if (!FileDesc)
+ return;
+
+ // Generate the next transition (an edge in the exploded graph).
+ ProgramStateRef State = C.getState();
+ State = State->set<StreamMap>(FileDesc, StreamState::getOpened());
+ C.addTransition(State);
+}
+
+void SimpleStreamChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ initIdentifierInfo(C.getASTContext());
+
+ if (!Call.isGlobalCFunction())
+ return;
+
+ if (Call.getCalleeIdentifier() != IIfclose)
+ return;
+
+ if (Call.getNumArgs() != 1)
+ return;
+
+ // Get the symbolic value corresponding to the file handle.
+ SymbolRef FileDesc = Call.getArgSVal(0).getAsSymbol();
+ if (!FileDesc)
+ return;
+
+ // Check if the stream has already been closed.
+ ProgramStateRef State = C.getState();
+ const StreamState *SS = State->get<StreamMap>(FileDesc);
+ if (SS && SS->isClosed()) {
+ reportDoubleClose(FileDesc, Call, C);
+ return;
+ }
+
+ // Generate the next transition, in which the stream is closed.
+ State = State->set<StreamMap>(FileDesc, StreamState::getClosed());
+ C.addTransition(State);
+}
+
+static bool isLeaked(SymbolRef Sym, const StreamState &SS,
+ bool IsSymDead, ProgramStateRef State) {
+ if (IsSymDead && SS.isOpened()) {
+ // If a symbol is NULL, assume that fopen failed on this path.
+ // A symbol should only be considered leaked if it is non-null.
+ ConstraintManager &CMgr = State->getConstraintManager();
+ ConditionTruthVal OpenFailed = CMgr.isNull(State, Sym);
+ return !OpenFailed.isConstrainedTrue();
+ }
+ return false;
+}
+
+void SimpleStreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SymbolVector LeakedStreams;
+ StreamMapTy TrackedStreams = State->get<StreamMap>();
+ for (StreamMapTy::iterator I = TrackedStreams.begin(),
+ E = TrackedStreams.end(); I != E; ++I) {
+ SymbolRef Sym = I->first;
+ bool IsSymDead = SymReaper.isDead(Sym);
+
+ // Collect leaked symbols.
+ if (isLeaked(Sym, I->second, IsSymDead, State))
+ LeakedStreams.push_back(Sym);
+
+ // Remove the dead symbol from the streams map.
+ if (IsSymDead)
+ State = State->remove<StreamMap>(Sym);
+ }
+
+ ExplodedNode *N = C.addTransition(State);
+ reportLeaks(LeakedStreams, C, N);
+}
+
+void SimpleStreamChecker::reportDoubleClose(SymbolRef FileDescSym,
+ const CallEvent &Call,
+ CheckerContext &C) const {
+ // We reached a bug, stop exploring the path here by generating a sink.
+ ExplodedNode *ErrNode = C.generateSink();
+ // If we've already reached this node on another path, return.
+ if (!ErrNode)
+ return;
+
+ // Generate the report.
+ BugReport *R = new BugReport(*DoubleCloseBugType,
+ "Closing a previously closed file stream", ErrNode);
+ R->addRange(Call.getSourceRange());
+ R->markInteresting(FileDescSym);
+ C.emitReport(R);
+}
+
+void SimpleStreamChecker::reportLeaks(SymbolVector LeakedStreams,
+ CheckerContext &C,
+ ExplodedNode *ErrNode) const {
+ // Attach bug reports to the leak node.
+ // TODO: Identify the leaked file descriptor.
+ for (llvm::SmallVector<SymbolRef, 2>::iterator
+ I = LeakedStreams.begin(), E = LeakedStreams.end(); I != E; ++I) {
+ BugReport *R = new BugReport(*LeakBugType,
+ "Opened file is never closed; potential resource leak", ErrNode);
+ R->markInteresting(*I);
+ C.emitReport(R);
+ }
+}
+
+// Check various ways a symbol can be invalidated.
+// Stop tracking symbols when a value escapes as a result of checkBind.
+// A value escapes in three possible cases:
+// (1) We are binding to something that is not a memory region.
+// (2) We are binding to a MemRegion that does not have stack storage
+// (3) We are binding to a MemRegion with stack storage that the store
+// does not understand.
+void SimpleStreamChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+ CheckerContext &C) const {
+ // Are we storing to something that causes the value to "escape"?
+ bool escapes = true;
+ ProgramStateRef state = C.getState();
+
+ if (loc::MemRegionVal *regionLoc = dyn_cast<loc::MemRegionVal>(&loc)) {
+ escapes = !regionLoc->getRegion()->hasStackStorage();
+
+ if (!escapes) {
+ // To test (3), generate a new state with the binding added. If it is
+ // the same state, then it escapes (since the store cannot represent
+ // the binding). Do this only if we know that the store is not supposed
+ // to generate the same state.
+ SVal StoredVal = state->getSVal(regionLoc->getRegion());
+ if (StoredVal != val)
+ escapes = (state == (state->bindLoc(*regionLoc, val)));
+ }
+ }
+
+ // If our store can represent the binding and we aren't storing to something
+ // that doesn't have local storage then just return the state and
+ // continue as is.
+ if (!escapes)
+ return;
+
+ // Otherwise, find all symbols referenced by 'val' that we are tracking
+ // and stop tracking them.
+ state = state->scanReachableSymbols<StopTrackingCallback>(val).getState();
+ C.addTransition(state);
+}
+
+bool SimpleStreamChecker::guaranteedNotToCloseFile(const CallEvent &Call) const{
+ // If it's not in a system header, assume it might close a file.
+ if (!Call.isInSystemHeader())
+ return false;
+
+ // Handle cases where we know a buffer's /address/ can escape.
+ if (Call.argumentsMayEscape())
+ return false;
+
+ // Note, even though fclose closes the file, we do not list it here
+ // since the checker is modeling the call.
+
+ return true;
+}
+
+// If the symbol we are tracking is invalidated, do not track the symbol as
+// we cannot reason about it anymore.
+ProgramStateRef
+SimpleStreamChecker::checkRegionChanges(ProgramStateRef State,
+ const StoreManager::InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallEvent *Call) const {
+
+ if (!invalidated || invalidated->empty())
+ return State;
+
+ // If it's a call which might close the file, we assume that all regions
+ // (explicit and implicit) escaped. Otherwise, whitelist explicit pointers
+ // (the parameters to the call); we still can track them.
+ llvm::SmallPtrSet<SymbolRef, 8> WhitelistedSymbols;
+ if (!Call || guaranteedNotToCloseFile(*Call)) {
+ for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
+ E = ExplicitRegions.end(); I != E; ++I) {
+ if (const SymbolicRegion *R = (*I)->StripCasts()->getAs<SymbolicRegion>())
+ WhitelistedSymbols.insert(R->getSymbol());
+ }
+ }
+
+ for (StoreManager::InvalidatedSymbols::const_iterator I=invalidated->begin(),
+ E = invalidated->end(); I!=E; ++I) {
+ SymbolRef sym = *I;
+ if (WhitelistedSymbols.count(sym))
+ continue;
+ // The symbol escaped. Optimistically, assume that the corresponding file
+ // handle will be closed somewhere else.
+ State = State->remove<StreamMap>(sym);
+ }
+ return State;
+}
+
+void SimpleStreamChecker::initIdentifierInfo(ASTContext &Ctx) const {
+ if (IIfopen)
+ return;
+ IIfopen = &Ctx.Idents.get("fopen");
+ IIfclose = &Ctx.Idents.get("fclose");
+}
+
+void ento::registerSimpleStreamChecker(CheckerManager &mgr) {
+ mgr.registerChecker<SimpleStreamChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
index 54cf569..0c2f266 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -109,7 +109,7 @@ void StackAddrEscapeChecker::EmitStackError(CheckerContext &C, const MemRegion *
if (range.isValid())
report->addRange(range);
- C.EmitReport(report);
+ C.emitReport(report);
}
void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
@@ -118,8 +118,10 @@ void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
const Expr *RetE = RS->getRetValue();
if (!RetE)
return;
-
- SVal V = C.getState()->getSVal(RetE, C.getLocationContext());
+ RetE = RetE->IgnoreParens();
+
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal V = C.getState()->getSVal(RetE, LCtx);
const MemRegion *R = V.getAsRegion();
if (!R)
@@ -132,8 +134,9 @@ void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
return;
// Return stack memory in an ancestor stack frame is fine.
- const StackFrameContext *SFC = SS->getStackFrame();
- if (SFC != C.getLocationContext()->getCurrentStackFrame())
+ const StackFrameContext *CurFrame = LCtx->getCurrentStackFrame();
+ const StackFrameContext *MemFrame = SS->getStackFrame();
+ if (MemFrame != CurFrame)
return;
// Automatic reference counting automatically copies blocks.
@@ -141,6 +144,14 @@ void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
isa<BlockDataRegion>(R))
return;
+ // Returning a record by value is fine. (In this case, the returned
+ // expression will be a copy-constructor, possibly wrapped in an
+ // ExprWithCleanups node.)
+ if (const ExprWithCleanups *Cleanup = dyn_cast<ExprWithCleanups>(RetE))
+ RetE = Cleanup->getSubExpr();
+ if (isa<CXXConstructExpr>(RetE) && RetE->getType()->isRecordType())
+ return;
+
EmitStackError(C, R, RetE);
}
@@ -221,7 +232,7 @@ void StackAddrEscapeChecker::checkEndPath(CheckerContext &Ctx) const {
if (range.isValid())
report->addRange(range);
- Ctx.EmitReport(report);
+ Ctx.emitReport(report);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 731dd66..c06ba7c 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -104,15 +104,8 @@ private:
} // end anonymous namespace
-namespace clang {
-namespace ento {
- template <>
- struct ProgramStateTrait<StreamState>
- : public ProgramStatePartialTrait<llvm::ImmutableMap<SymbolRef, StreamState> > {
- static void *GDMIndex() { static int x; return &x; }
- };
-}
-}
+REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
+
bool StreamChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
const FunctionDecl *FD = C.getCalleeDecl(CE);
@@ -219,11 +212,11 @@ void StreamChecker::Tmpfile(CheckerContext &C, const CallExpr *CE) const {
void StreamChecker::OpenFileAux(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
- unsigned Count = C.getCurrentBlockCount();
SValBuilder &svalBuilder = C.getSValBuilder();
const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
DefinedSVal RetVal =
- cast<DefinedSVal>(svalBuilder.getConjuredSymbolVal(0, CE, LCtx, Count));
+ cast<DefinedSVal>(svalBuilder.conjureSymbolVal(0, CE, LCtx,
+ C.blockCount()));
state = state->BindExpr(CE, C.getLocationContext(), RetVal);
ConstraintManager &CM = C.getConstraintManager();
@@ -235,9 +228,9 @@ void StreamChecker::OpenFileAux(CheckerContext &C, const CallExpr *CE) const {
if (SymbolRef Sym = RetVal.getAsSymbol()) {
// if RetVal is not NULL, set the symbol's state to Opened.
stateNotNull =
- stateNotNull->set<StreamState>(Sym,StreamState::getOpened(CE));
+ stateNotNull->set<StreamMap>(Sym,StreamState::getOpened(CE));
stateNull =
- stateNull->set<StreamState>(Sym, StreamState::getOpenFailed(CE));
+ stateNull->set<StreamMap>(Sym, StreamState::getOpenFailed(CE));
C.addTransition(stateNotNull);
C.addTransition(stateNull);
@@ -287,7 +280,7 @@ void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) const {
"SEEK_SET, SEEK_END, or SEEK_CUR."));
BugReport *R = new BugReport(*BT_illegalwhence,
BT_illegalwhence->getDescription(), N);
- C.EmitReport(R);
+ C.emitReport(R);
}
}
@@ -363,7 +356,7 @@ ProgramStateRef StreamChecker::CheckNullStream(SVal SV, ProgramStateRef state,
BT_nullfp.reset(new BuiltinBug("NULL stream pointer",
"Stream pointer might be NULL."));
BugReport *R =new BugReport(*BT_nullfp, BT_nullfp->getDescription(), N);
- C.EmitReport(R);
+ C.emitReport(R);
}
return 0;
}
@@ -378,7 +371,7 @@ ProgramStateRef StreamChecker::CheckDoubleClose(const CallExpr *CE,
if (!Sym)
return state;
- const StreamState *SS = state->get<StreamState>(Sym);
+ const StreamState *SS = state->get<StreamMap>(Sym);
// If the file stream is not tracked, return.
if (!SS)
@@ -395,22 +388,24 @@ ProgramStateRef StreamChecker::CheckDoubleClose(const CallExpr *CE,
" closed. Cause undefined behaviour."));
BugReport *R = new BugReport(*BT_doubleclose,
BT_doubleclose->getDescription(), N);
- C.EmitReport(R);
+ C.emitReport(R);
}
return NULL;
}
// Close the File Descriptor.
- return state->set<StreamState>(Sym, StreamState::getClosed(CE));
+ return state->set<StreamMap>(Sym, StreamState::getClosed(CE));
}
void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
CheckerContext &C) const {
+ // TODO: Clean up the state.
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
E = SymReaper.dead_end(); I != E; ++I) {
SymbolRef Sym = *I;
ProgramStateRef state = C.getState();
- const StreamState *SS = state->get<StreamState>(Sym);
+ const StreamState *SS = state->get<StreamMap>(Sym);
+ // TODO: Shouldn't we have a continue here?
if (!SS)
return;
@@ -422,7 +417,7 @@ void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
"Opened File never closed. Potential Resource leak."));
BugReport *R = new BugReport(*BT_ResourceLeak,
BT_ResourceLeak->getDescription(), N);
- C.EmitReport(R);
+ C.emitReport(R);
}
}
}
@@ -430,10 +425,9 @@ void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
void StreamChecker::checkEndPath(CheckerContext &Ctx) const {
ProgramStateRef state = Ctx.getState();
- typedef llvm::ImmutableMap<SymbolRef, StreamState> SymMap;
- SymMap M = state->get<StreamState>();
+ StreamMapTy M = state->get<StreamMap>();
- for (SymMap::iterator I = M.begin(), E = M.end(); I != E; ++I) {
+ for (StreamMapTy::iterator I = M.begin(), E = M.end(); I != E; ++I) {
StreamState SS = I->second;
if (SS.isOpened()) {
ExplodedNode *N = Ctx.addTransition(state);
@@ -443,7 +437,7 @@ void StreamChecker::checkEndPath(CheckerContext &Ctx) const {
"Opened File never closed. Potential Resource leak."));
BugReport *R = new BugReport(*BT_ResourceLeak,
BT_ResourceLeak->getDescription(), N);
- Ctx.EmitReport(R);
+ Ctx.emitReport(R);
}
}
}
@@ -460,12 +454,12 @@ void StreamChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const {
if (!Sym)
return;
- const StreamState *SS = state->get<StreamState>(Sym);
+ const StreamState *SS = state->get<StreamMap>(Sym);
if(!SS)
return;
if (SS->isOpened())
- state = state->set<StreamState>(Sym, StreamState::getEscaped(S));
+ state = state->set<StreamMap>(Sym, StreamState::getEscaped(S));
C.addTransition(state);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
index 1133682..382be84 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
@@ -52,7 +52,7 @@ void TaintTesterChecker::checkPostStmt(const Expr *E,
initBugType();
BugReport *report = new BugReport(*BT, "tainted",N);
report->addRange(E->getSourceRange());
- C.EmitReport(report);
+ C.emitReport(report);
}
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
index 70a33c7..70e141e 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -99,11 +99,10 @@ void UndefBranchChecker::checkBranchCondition(const Stmt *Condition,
// Emit the bug report.
BugReport *R = new BugReport(*BT, BT->getDescription(), N);
- bugreporter::addTrackNullOrUndefValueVisitor(N, Ex, R);
+ bugreporter::trackNullOrUndefValue(N, Ex, *R);
R->addRange(Ex->getSourceRange());
- R->disablePathPruning();
- Ctx.EmitReport(R);
+ Ctx.emitReport(R);
}
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index 675b38a..30ccffa 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -96,7 +96,7 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
R->addVisitor(new FindLastStoreBRVisitor(VRVal, VR));
R->disablePathPruning();
// need location of block
- C.EmitReport(R);
+ C.emitReport(R);
}
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index e220499..415bab5 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -76,13 +76,12 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
BugReport *report = new BugReport(*BT, OS.str(), N);
if (Ex) {
report->addRange(Ex->getSourceRange());
- bugreporter::addTrackNullOrUndefValueVisitor(N, Ex, report);
+ bugreporter::trackNullOrUndefValue(N, Ex, *report);
}
else
- bugreporter::addTrackNullOrUndefValueVisitor(N, B, report);
+ bugreporter::trackNullOrUndefValue(N, B, *report);
- report->disablePathPruning();
- C.EmitReport(report);
+ C.emitReport(report);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
index 6ae3c18..b3a83e8 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -42,8 +42,8 @@ UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A,
// Generate a report for this bug.
BugReport *R = new BugReport(*BT, BT->getName(), N);
R->addRange(A->getIdx()->getSourceRange());
- bugreporter::addTrackNullOrUndefValueVisitor(N, A->getIdx(), R);
- C.EmitReport(R);
+ bugreporter::trackNullOrUndefValue(N, A->getIdx(), *R);
+ C.emitReport(R);
}
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
index 14a884e..410010a 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -78,10 +78,9 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
BugReport *R = new BugReport(*BT, str, N);
if (ex) {
R->addRange(ex->getSourceRange());
- bugreporter::addTrackNullOrUndefValueVisitor(N, ex, R);
+ bugreporter::trackNullOrUndefValue(N, ex, *R);
}
- R->disablePathPruning();
- C.EmitReport(R);
+ C.emitReport(R);
}
void ento::registerUndefinedAssignmentChecker(CheckerManager &mgr) {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index d35455c..171e15b 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -41,6 +41,7 @@ public:
void CheckCallocZero(CheckerContext &C, const CallExpr *CE) const;
void CheckMallocZero(CheckerContext &C, const CallExpr *CE) const;
void CheckReallocZero(CheckerContext &C, const CallExpr *CE) const;
+ void CheckReallocfZero(CheckerContext &C, const CallExpr *CE) const;
void CheckAllocaZero(CheckerContext &C, const CallExpr *CE) const;
void CheckVallocZero(CheckerContext &C, const CallExpr *CE) const;
@@ -138,7 +139,7 @@ void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
"Call to 'open' requires a third argument when "
"the 'O_CREAT' flag is set", N);
report->addRange(oflagsEx->getSourceRange());
- C.EmitReport(report);
+ C.emitReport(report);
}
}
@@ -183,11 +184,12 @@ void UnixAPIChecker::CheckPthreadOnce(CheckerContext &C,
BugReport *report = new BugReport(*BT_pthreadOnce, os.str(), N);
report->addRange(CE->getArg(0)->getSourceRange());
- C.EmitReport(report);
+ C.emitReport(report);
}
//===----------------------------------------------------------------------===//
-// "calloc", "malloc", "realloc", "alloca" and "valloc" with allocation size 0
+// "calloc", "malloc", "realloc", "reallocf", "alloca" and "valloc"
+// with allocation size 0
//===----------------------------------------------------------------------===//
// FIXME: Eventually these should be rolled into the MallocChecker, but right now
// they're more basic and valuable for widespread use.
@@ -224,8 +226,8 @@ bool UnixAPIChecker::ReportZeroByteAllocation(CheckerContext &C,
BugReport *report = new BugReport(*BT_mallocZero, os.str(), N);
report->addRange(arg->getSourceRange());
- bugreporter::addTrackNullOrUndefValueVisitor(N, arg, report);
- C.EmitReport(report);
+ bugreporter::trackNullOrUndefValue(N, arg, *report);
+ C.emitReport(report);
return true;
}
@@ -307,6 +309,11 @@ void UnixAPIChecker::CheckReallocZero(CheckerContext &C,
BasicAllocationCheck(C, CE, 2, 1, "realloc");
}
+void UnixAPIChecker::CheckReallocfZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ BasicAllocationCheck(C, CE, 2, 1, "reallocf");
+}
+
void UnixAPIChecker::CheckAllocaZero(CheckerContext &C,
const CallExpr *CE) const {
BasicAllocationCheck(C, CE, 1, 0, "alloca");
@@ -339,6 +346,7 @@ void UnixAPIChecker::checkPreStmt(const CallExpr *CE,
.Case("calloc", &UnixAPIChecker::CheckCallocZero)
.Case("malloc", &UnixAPIChecker::CheckMallocZero)
.Case("realloc", &UnixAPIChecker::CheckReallocZero)
+ .Case("reallocf", &UnixAPIChecker::CheckReallocfZero)
.Cases("alloca", "__builtin_alloca", &UnixAPIChecker::CheckAllocaZero)
.Case("valloc", &UnixAPIChecker::CheckVallocZero)
.Default(NULL);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index fab4adf..58f9ec0 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -69,8 +69,8 @@ void VLASizeChecker::reportBug(VLASize_Kind Kind,
BugReport *report = new BugReport(*BT, os.str(), N);
report->addRange(SizeE->getSourceRange());
- bugreporter::addTrackNullOrUndefValueVisitor(N, SizeE, report);
- C.EmitReport(report);
+ bugreporter::trackNullOrUndefValue(N, SizeE, *report);
+ C.emitReport(report);
return;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
index efeba17..011d4c09 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -20,33 +20,19 @@ AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
StoreManagerCreator storemgr,
ConstraintManagerCreator constraintmgr,
CheckerManager *checkerMgr,
- unsigned maxnodes, unsigned maxvisit,
- bool vizdot, bool vizubi,
- AnalysisPurgeMode purge,
- bool eager, bool trim,
- bool useUnoptimizedCFG,
- bool addImplicitDtors,
- bool eagerlyTrimEGraph,
- AnalysisIPAMode ipa,
- unsigned inlineMaxStack,
- unsigned inlineMaxFunctionSize,
- AnalysisInliningMode IMode,
- bool NoRetry)
- : AnaCtxMgr(useUnoptimizedCFG, addImplicitDtors, /*addInitializers=*/true),
- Ctx(ctx), Diags(diags), LangOpts(lang),
+ AnalyzerOptions &Options)
+ : AnaCtxMgr(Options.UnoptimizedCFG,
+ /*AddImplicitDtors=*/true,
+ /*AddInitializers=*/true,
+ Options.includeTemporaryDtorsInCFG(),
+ Options.shouldSynthesizeBodies()),
+ Ctx(ctx),
+ Diags(diags),
+ LangOpts(lang),
PathConsumers(PDC),
CreateStoreMgr(storemgr), CreateConstraintMgr(constraintmgr),
- CheckerMgr(checkerMgr),
- MaxNodes(maxnodes), MaxVisit(maxvisit),
- VisualizeEGDot(vizdot), VisualizeEGUbi(vizubi), PurgeDead(purge),
- EagerlyAssume(eager), TrimGraph(trim),
- EagerlyTrimEGraph(eagerlyTrimEGraph),
- IPAMode(ipa),
- InlineMaxStackDepth(inlineMaxStack),
- InlineMaxFunctionSize(inlineMaxFunctionSize),
- InliningMode(IMode),
- NoRetryExhausted(NoRetry)
-{
+ CheckerMgr(checkerMgr),
+ options(Options) {
AnaCtxMgr.getCFGBuildOptions().setAllAlwaysAdd();
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
new file mode 100644
index 0000000..da88589
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
@@ -0,0 +1,138 @@
+//===-- AnalyzerOptions.cpp - Analysis Engine Options -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains special accessors for analyzer configuration options
+// with string representations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace llvm;
+
+bool
+AnalyzerOptions::mayInlineCXXMemberFunction(CXXInlineableMemberKind K) {
+ if (IPAMode < Inlining)
+ return false;
+
+ if (!CXXMemberInliningMode) {
+ static const char *ModeKey = "c++-inlining";
+
+ StringRef ModeStr(Config.GetOrCreateValue(ModeKey,
+ "methods").getValue());
+
+ CXXInlineableMemberKind &MutableMode =
+ const_cast<CXXInlineableMemberKind &>(CXXMemberInliningMode);
+
+ MutableMode = llvm::StringSwitch<CXXInlineableMemberKind>(ModeStr)
+ .Case("constructors", CIMK_Constructors)
+ .Case("destructors", CIMK_Destructors)
+ .Case("none", CIMK_None)
+ .Case("methods", CIMK_MemberFunctions)
+ .Default(CXXInlineableMemberKind());
+
+ if (!MutableMode) {
+ // FIXME: We should emit a warning here about an unknown inlining kind,
+ // but the AnalyzerOptions doesn't have access to a diagnostic engine.
+ MutableMode = CIMK_None;
+ }
+ }
+
+ return CXXMemberInliningMode >= K;
+}
+
+static StringRef toString(bool b) { return b ? "true" : "false"; }
+
+bool AnalyzerOptions::getBooleanOption(StringRef Name, bool DefaultVal) {
+ // FIXME: We should emit a warning here if the value is something other than
+ // "true", "false", or the empty string (meaning the default value),
+ // but the AnalyzerOptions doesn't have access to a diagnostic engine.
+ StringRef V(Config.GetOrCreateValue(Name, toString(DefaultVal)).getValue());
+ return llvm::StringSwitch<bool>(V)
+ .Case("true", true)
+ .Case("false", false)
+ .Default(DefaultVal);
+}
+
+bool AnalyzerOptions::getBooleanOption(llvm::Optional<bool> &V,
+ StringRef Name,
+ bool DefaultVal) {
+ if (!V.hasValue())
+ V = getBooleanOption(Name, DefaultVal);
+ return V.getValue();
+}
+
+bool AnalyzerOptions::includeTemporaryDtorsInCFG() {
+ return getBooleanOption(IncludeTemporaryDtorsInCFG,
+ "cfg-temporary-dtors",
+ /* Default = */ false);
+}
+
+bool AnalyzerOptions::mayInlineCXXStandardLibrary() {
+ return getBooleanOption(InlineCXXStandardLibrary,
+ "c++-stdlib-inlining",
+ /*Default=*/true);
+}
+
+bool AnalyzerOptions::mayInlineTemplateFunctions() {
+ return getBooleanOption(InlineTemplateFunctions,
+ "c++-template-inlining",
+ /*Default=*/true);
+}
+
+bool AnalyzerOptions::mayInlineObjCMethod() {
+ return getBooleanOption(ObjCInliningMode,
+ "objc-inlining",
+ /* Default = */ true);
+}
+
+bool AnalyzerOptions::shouldPruneNullReturnPaths() {
+ return getBooleanOption(PruneNullReturnPaths,
+ "suppress-null-return-paths",
+ /* Default = */ true);
+}
+
+bool AnalyzerOptions::shouldAvoidSuppressingNullArgumentPaths() {
+ return getBooleanOption(AvoidSuppressingNullArgumentPaths,
+ "avoid-suppressing-null-argument-paths",
+ /* Default = */ false);
+}
+
+int AnalyzerOptions::getOptionAsInteger(StringRef Name, int DefaultVal) {
+ llvm::SmallString<10> StrBuf;
+ llvm::raw_svector_ostream OS(StrBuf);
+ OS << DefaultVal;
+
+ StringRef V(Config.GetOrCreateValue(Name, OS.str()).getValue());
+ int Res = DefaultVal;
+ bool b = V.getAsInteger(10, Res);
+ assert(!b && "analyzer-config option should be numeric");
+ (void) b;
+ return Res;
+}
+
+unsigned AnalyzerOptions::getAlwaysInlineSize() {
+ if (!AlwaysInlineSize.hasValue())
+ AlwaysInlineSize = getOptionAsInteger("ipa-always-inline-size", 3);
+ return AlwaysInlineSize.getValue();
+}
+
+unsigned AnalyzerOptions::getGraphTrimInterval() {
+ if (!GraphTrimInterval.hasValue())
+ GraphTrimInterval = getOptionAsInteger("graph-trim-interval", 1000);
+ return GraphTrimInterval.getValue();
+}
+
+bool AnalyzerOptions::shouldSynthesizeBodies() {
+ return getBooleanOption("faux-bodies", true);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp
deleted file mode 100644
index 8897756..0000000
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp
+++ /dev/null
@@ -1,446 +0,0 @@
-//== BasicConstraintManager.cpp - Manage basic constraints.------*- C++ -*--==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines BasicConstraintManager, a class that tracks simple
-// equality and inequality constraints on symbolic values of ProgramState.
-//
-//===----------------------------------------------------------------------===//
-
-#include "SimpleConstraintManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace clang;
-using namespace ento;
-
-
-namespace { class ConstNotEq {}; }
-namespace { class ConstEq {}; }
-
-typedef llvm::ImmutableMap<SymbolRef,ProgramState::IntSetTy> ConstNotEqTy;
-typedef llvm::ImmutableMap<SymbolRef,const llvm::APSInt*> ConstEqTy;
-
-static int ConstEqIndex = 0;
-static int ConstNotEqIndex = 0;
-
-namespace clang {
-namespace ento {
-template<>
-struct ProgramStateTrait<ConstNotEq> :
- public ProgramStatePartialTrait<ConstNotEqTy> {
- static inline void *GDMIndex() { return &ConstNotEqIndex; }
-};
-
-template<>
-struct ProgramStateTrait<ConstEq> : public ProgramStatePartialTrait<ConstEqTy> {
- static inline void *GDMIndex() { return &ConstEqIndex; }
-};
-}
-}
-
-namespace {
-// BasicConstraintManager only tracks equality and inequality constraints of
-// constants and integer variables.
-class BasicConstraintManager
- : public SimpleConstraintManager {
- ProgramState::IntSetTy::Factory ISetFactory;
-public:
- BasicConstraintManager(ProgramStateManager &statemgr, SubEngine &subengine)
- : SimpleConstraintManager(subengine, statemgr.getBasicVals()),
- ISetFactory(statemgr.getAllocator()) {}
-
- ProgramStateRef assumeSymEquality(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment,
- bool Assumption);
-
- ProgramStateRef assumeSymNE(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) {
- return assumeSymEquality(State, Sym, V, Adjustment, false);
- }
-
- ProgramStateRef assumeSymEQ(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) {
- return assumeSymEquality(State, Sym, V, Adjustment, true);
- }
-
- ProgramStateRef assumeSymLT(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt& V,
- const llvm::APSInt& Adjustment);
-
- ProgramStateRef assumeSymGT(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt& V,
- const llvm::APSInt& Adjustment);
-
- ProgramStateRef assumeSymGE(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt& V,
- const llvm::APSInt& Adjustment);
-
- ProgramStateRef assumeSymLE(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt& V,
- const llvm::APSInt& Adjustment);
-
- ProgramStateRef AddEQ(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt& V);
-
- ProgramStateRef AddNE(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt& V);
-
- const llvm::APSInt* getSymVal(ProgramStateRef state,
- SymbolRef sym) const;
-
- bool isNotEqual(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt& V) const;
-
- bool isEqual(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt& V) const;
-
- ProgramStateRef removeDeadBindings(ProgramStateRef state,
- SymbolReaper& SymReaper);
-
- bool performTest(llvm::APSInt SymVal, llvm::APSInt Adjustment,
- BinaryOperator::Opcode Op, llvm::APSInt ComparisonVal);
-
- void print(ProgramStateRef state,
- raw_ostream &Out,
- const char* nl,
- const char *sep);
-};
-
-} // end anonymous namespace
-
-ConstraintManager*
-ento::CreateBasicConstraintManager(ProgramStateManager& statemgr,
- SubEngine &subengine) {
- return new BasicConstraintManager(statemgr, subengine);
-}
-
-// FIXME: This is a more general utility and should live somewhere else.
-bool BasicConstraintManager::performTest(llvm::APSInt SymVal,
- llvm::APSInt Adjustment,
- BinaryOperator::Opcode Op,
- llvm::APSInt ComparisonVal) {
- APSIntType Type(Adjustment);
- Type.apply(SymVal);
- Type.apply(ComparisonVal);
- SymVal += Adjustment;
-
- assert(BinaryOperator::isComparisonOp(Op));
- BasicValueFactory &BVF = getBasicVals();
- const llvm::APSInt *Result = BVF.evalAPSInt(Op, SymVal, ComparisonVal);
- assert(Result && "Comparisons should always have valid results.");
-
- return Result->getBoolValue();
-}
-
-ProgramStateRef
-BasicConstraintManager::assumeSymEquality(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment,
- bool Assumption) {
- // Before we do any real work, see if the value can even show up.
- APSIntType AdjustmentType(Adjustment);
- if (AdjustmentType.testInRange(V) != APSIntType::RTR_Within)
- return Assumption ? NULL : State;
-
- // Get the symbol type.
- BasicValueFactory &BVF = getBasicVals();
- ASTContext &Ctx = BVF.getContext();
- APSIntType SymbolType = BVF.getAPSIntType(Sym->getType(Ctx));
-
- // First, see if the adjusted value is within range for the symbol.
- llvm::APSInt Adjusted = AdjustmentType.convert(V) - Adjustment;
- if (SymbolType.testInRange(Adjusted) != APSIntType::RTR_Within)
- return Assumption ? NULL : State;
-
- // Now we can do things properly in the symbol space.
- SymbolType.apply(Adjusted);
-
- // Second, determine if sym == X, where X+Adjustment != V.
- if (const llvm::APSInt *X = getSymVal(State, Sym)) {
- bool IsFeasible = (*X == Adjusted);
- return (IsFeasible == Assumption) ? State : NULL;
- }
-
- // Third, determine if we already know sym+Adjustment != V.
- if (isNotEqual(State, Sym, Adjusted))
- return Assumption ? NULL : State;
-
- // If we reach here, sym is not a constant and we don't know if it is != V.
- // Make the correct assumption.
- if (Assumption)
- return AddEQ(State, Sym, Adjusted);
- else
- return AddNE(State, Sym, Adjusted);
-}
-
-// The logic for these will be handled in another ConstraintManager.
-// Approximate it here anyway by handling some edge cases.
-ProgramStateRef
-BasicConstraintManager::assumeSymLT(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) {
- APSIntType ComparisonType(V), AdjustmentType(Adjustment);
-
- // Is 'V' out of range above the type?
- llvm::APSInt Max = AdjustmentType.getMaxValue();
- if (V > ComparisonType.convert(Max)) {
- // This path is trivially feasible.
- return state;
- }
-
- // Is 'V' the smallest possible value, or out of range below the type?
- llvm::APSInt Min = AdjustmentType.getMinValue();
- if (V <= ComparisonType.convert(Min)) {
- // sym cannot be any value less than 'V'. This path is infeasible.
- return NULL;
- }
-
- // Reject a path if the value of sym is a constant X and !(X+Adj < V).
- if (const llvm::APSInt *X = getSymVal(state, sym)) {
- bool isFeasible = performTest(*X, Adjustment, BO_LT, V);
- return isFeasible ? state : NULL;
- }
-
- // FIXME: For now have assuming x < y be the same as assuming sym != V;
- return assumeSymNE(state, sym, V, Adjustment);
-}
-
-ProgramStateRef
-BasicConstraintManager::assumeSymGT(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) {
- APSIntType ComparisonType(V), AdjustmentType(Adjustment);
-
- // Is 'V' the largest possible value, or out of range above the type?
- llvm::APSInt Max = AdjustmentType.getMaxValue();
- if (V >= ComparisonType.convert(Max)) {
- // sym cannot be any value greater than 'V'. This path is infeasible.
- return NULL;
- }
-
- // Is 'V' out of range below the type?
- llvm::APSInt Min = AdjustmentType.getMinValue();
- if (V < ComparisonType.convert(Min)) {
- // This path is trivially feasible.
- return state;
- }
-
- // Reject a path if the value of sym is a constant X and !(X+Adj > V).
- if (const llvm::APSInt *X = getSymVal(state, sym)) {
- bool isFeasible = performTest(*X, Adjustment, BO_GT, V);
- return isFeasible ? state : NULL;
- }
-
- // FIXME: For now have assuming x > y be the same as assuming sym != V;
- return assumeSymNE(state, sym, V, Adjustment);
-}
-
-ProgramStateRef
-BasicConstraintManager::assumeSymGE(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) {
- APSIntType ComparisonType(V), AdjustmentType(Adjustment);
-
- // Is 'V' the largest possible value, or out of range above the type?
- llvm::APSInt Max = AdjustmentType.getMaxValue();
- ComparisonType.apply(Max);
-
- if (V > Max) {
- // sym cannot be any value greater than 'V'. This path is infeasible.
- return NULL;
- } else if (V == Max) {
- // If the path is feasible then as a consequence we know that
- // 'sym+Adjustment == V' because there are no larger values.
- // Add this constraint.
- return assumeSymEQ(state, sym, V, Adjustment);
- }
-
- // Is 'V' out of range below the type?
- llvm::APSInt Min = AdjustmentType.getMinValue();
- if (V < ComparisonType.convert(Min)) {
- // This path is trivially feasible.
- return state;
- }
-
- // Reject a path if the value of sym is a constant X and !(X+Adj >= V).
- if (const llvm::APSInt *X = getSymVal(state, sym)) {
- bool isFeasible = performTest(*X, Adjustment, BO_GE, V);
- return isFeasible ? state : NULL;
- }
-
- return state;
-}
-
-ProgramStateRef
-BasicConstraintManager::assumeSymLE(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) {
- APSIntType ComparisonType(V), AdjustmentType(Adjustment);
-
- // Is 'V' out of range above the type?
- llvm::APSInt Max = AdjustmentType.getMaxValue();
- if (V > ComparisonType.convert(Max)) {
- // This path is trivially feasible.
- return state;
- }
-
- // Is 'V' the smallest possible value, or out of range below the type?
- llvm::APSInt Min = AdjustmentType.getMinValue();
- ComparisonType.apply(Min);
-
- if (V < Min) {
- // sym cannot be any value less than 'V'. This path is infeasible.
- return NULL;
- } else if (V == Min) {
- // If the path is feasible then as a consequence we know that
- // 'sym+Adjustment == V' because there are no smaller values.
- // Add this constraint.
- return assumeSymEQ(state, sym, V, Adjustment);
- }
-
- // Reject a path if the value of sym is a constant X and !(X+Adj >= V).
- if (const llvm::APSInt *X = getSymVal(state, sym)) {
- bool isFeasible = performTest(*X, Adjustment, BO_LE, V);
- return isFeasible ? state : NULL;
- }
-
- return state;
-}
-
-ProgramStateRef BasicConstraintManager::AddEQ(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt& V) {
- // Now that we have an actual value, we can throw out the NE-set.
- // Create a new state with the old bindings replaced.
- state = state->remove<ConstNotEq>(sym);
- return state->set<ConstEq>(sym, &getBasicVals().getValue(V));
-}
-
-ProgramStateRef BasicConstraintManager::AddNE(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt& V) {
-
- // First, retrieve the NE-set associated with the given symbol.
- ConstNotEqTy::data_type* T = state->get<ConstNotEq>(sym);
- ProgramState::IntSetTy S = T ? *T : ISetFactory.getEmptySet();
-
- // Now add V to the NE set.
- S = ISetFactory.add(S, &getBasicVals().getValue(V));
-
- // Create a new state with the old binding replaced.
- return state->set<ConstNotEq>(sym, S);
-}
-
-const llvm::APSInt* BasicConstraintManager::getSymVal(ProgramStateRef state,
- SymbolRef sym) const {
- const ConstEqTy::data_type* T = state->get<ConstEq>(sym);
- return T ? *T : NULL;
-}
-
-bool BasicConstraintManager::isNotEqual(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt& V) const {
-
- // Retrieve the NE-set associated with the given symbol.
- const ConstNotEqTy::data_type* T = state->get<ConstNotEq>(sym);
-
- // See if V is present in the NE-set.
- return T ? T->contains(&getBasicVals().getValue(V)) : false;
-}
-
-bool BasicConstraintManager::isEqual(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt& V) const {
- // Retrieve the EQ-set associated with the given symbol.
- const ConstEqTy::data_type* T = state->get<ConstEq>(sym);
- // See if V is present in the EQ-set.
- return T ? **T == V : false;
-}
-
-/// Scan all symbols referenced by the constraints. If the symbol is not alive
-/// as marked in LSymbols, mark it as dead in DSymbols.
-ProgramStateRef
-BasicConstraintManager::removeDeadBindings(ProgramStateRef state,
- SymbolReaper& SymReaper) {
-
- ConstEqTy CE = state->get<ConstEq>();
- ConstEqTy::Factory& CEFactory = state->get_context<ConstEq>();
-
- for (ConstEqTy::iterator I = CE.begin(), E = CE.end(); I!=E; ++I) {
- SymbolRef sym = I.getKey();
- if (SymReaper.maybeDead(sym))
- CE = CEFactory.remove(CE, sym);
- }
- state = state->set<ConstEq>(CE);
-
- ConstNotEqTy CNE = state->get<ConstNotEq>();
- ConstNotEqTy::Factory& CNEFactory = state->get_context<ConstNotEq>();
-
- for (ConstNotEqTy::iterator I = CNE.begin(), E = CNE.end(); I != E; ++I) {
- SymbolRef sym = I.getKey();
- if (SymReaper.maybeDead(sym))
- CNE = CNEFactory.remove(CNE, sym);
- }
-
- return state->set<ConstNotEq>(CNE);
-}
-
-void BasicConstraintManager::print(ProgramStateRef state,
- raw_ostream &Out,
- const char* nl, const char *sep) {
- // Print equality constraints.
-
- ConstEqTy CE = state->get<ConstEq>();
-
- if (!CE.isEmpty()) {
- Out << nl << sep << "'==' constraints:";
- for (ConstEqTy::iterator I = CE.begin(), E = CE.end(); I!=E; ++I)
- Out << nl << " $" << I.getKey() << " : " << *I.getData();
- }
-
- // Print != constraints.
-
- ConstNotEqTy CNE = state->get<ConstNotEq>();
-
- if (!CNE.isEmpty()) {
- Out << nl << sep << "'!=' constraints:";
-
- for (ConstNotEqTy::iterator I = CNE.begin(), EI = CNE.end(); I!=EI; ++I) {
- Out << nl << " $" << I.getKey() << " : ";
- bool isFirst = true;
-
- ProgramState::IntSetTy::iterator J = I.getData().begin(),
- EJ = I.getData().end();
-
- for ( ; J != EJ; ++J) {
- if (isFirst) isFirst = false;
- else Out << ", ";
-
- Out << (*J)->getSExtValue(); // Hack: should print to raw_ostream.
- }
- }
- }
-}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index 20c7361..a6c400f 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -101,11 +101,7 @@ const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, unsigned BitWidth,
const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, QualType T) {
- unsigned bits = Ctx.getTypeSize(T);
- llvm::APSInt V(bits,
- T->isUnsignedIntegerOrEnumerationType() || Loc::isLocType(T));
- V = X;
- return getValue(V);
+ return getValue(getAPSIntType(T).getValue(X));
}
const CompoundValData*
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
index 571baec..c898d65 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -118,10 +118,82 @@ GetCurrentOrNextStmt(const ExplodedNode *N) {
// Diagnostic cleanup.
//===----------------------------------------------------------------------===//
+static PathDiagnosticEventPiece *
+eventsDescribeSameCondition(PathDiagnosticEventPiece *X,
+ PathDiagnosticEventPiece *Y) {
+ // Prefer diagnostics that come from ConditionBRVisitor over
+ // those that came from TrackConstraintBRVisitor.
+ const void *tagPreferred = ConditionBRVisitor::getTag();
+ const void *tagLesser = TrackConstraintBRVisitor::getTag();
+
+ if (X->getLocation() != Y->getLocation())
+ return 0;
+
+ if (X->getTag() == tagPreferred && Y->getTag() == tagLesser)
+ return X;
+
+ if (Y->getTag() == tagPreferred && X->getTag() == tagLesser)
+ return Y;
+
+ return 0;
+}
+
+/// An optimization pass over PathPieces that removes redundant diagnostics
+/// generated by both ConditionBRVisitor and TrackConstraintBRVisitor. Both
+/// BugReporterVisitors use different methods to generate diagnostics, with
+/// one capable of emitting diagnostics in some cases but not in others. This
+/// can lead to redundant diagnostic pieces at the same point in a path.
+static void removeRedundantMsgs(PathPieces &path) {
+ unsigned N = path.size();
+ if (N < 2)
+ return;
+ // NOTE: this loop intentionally is not using an iterator. Instead, we
+ // are streaming the path and modifying it in place. This is done by
+ // grabbing the front, processing it, and if we decide to keep it append
+ // it to the end of the path. The entire path is processed in this way.
+ for (unsigned i = 0; i < N; ++i) {
+ IntrusiveRefCntPtr<PathDiagnosticPiece> piece(path.front());
+ path.pop_front();
+
+ switch (piece->getKind()) {
+ case clang::ento::PathDiagnosticPiece::Call:
+ removeRedundantMsgs(cast<PathDiagnosticCallPiece>(piece)->path);
+ break;
+ case clang::ento::PathDiagnosticPiece::Macro:
+ removeRedundantMsgs(cast<PathDiagnosticMacroPiece>(piece)->subPieces);
+ break;
+ case clang::ento::PathDiagnosticPiece::ControlFlow:
+ break;
+ case clang::ento::PathDiagnosticPiece::Event: {
+ if (i == N-1)
+ break;
+
+ if (PathDiagnosticEventPiece *nextEvent =
+ dyn_cast<PathDiagnosticEventPiece>(path.front().getPtr())) {
+ PathDiagnosticEventPiece *event =
+ cast<PathDiagnosticEventPiece>(piece);
+ // Check to see if we should keep one of the two pieces. If we
+ // come up with a preference, record which piece to keep, and consume
+ // another piece from the path.
+ if (PathDiagnosticEventPiece *pieceToKeep =
+ eventsDescribeSameCondition(event, nextEvent)) {
+ piece = pieceToKeep;
+ path.pop_front();
+ ++i;
+ }
+ }
+ break;
+ }
+ }
+ path.push_back(piece);
+ }
+}
+
/// Recursively scan through a path and prune out calls and macros pieces
/// that aren't needed. Return true if afterwards the path contains
/// "interesting stuff" which means it should be pruned from the parent path.
-static bool RemoveUneededCalls(PathPieces &pieces) {
+bool BugReporter::RemoveUneededCalls(PathPieces &pieces, BugReport *R,
+ PathDiagnosticCallPiece *CallWithLoc) {
bool containsSomethingInteresting = false;
const unsigned N = pieces.size();
@@ -131,30 +203,49 @@ static bool RemoveUneededCalls(PathPieces &pieces) {
IntrusiveRefCntPtr<PathDiagnosticPiece> piece(pieces.front());
pieces.pop_front();
+ // Throw away pieces with invalid locations.
+ if (piece->getKind() != PathDiagnosticPiece::Call &&
+ piece->getLocation().asLocation().isInvalid())
+ continue;
+
switch (piece->getKind()) {
case PathDiagnosticPiece::Call: {
PathDiagnosticCallPiece *call = cast<PathDiagnosticCallPiece>(piece);
+ // Check if the location context is interesting.
+ assert(LocationContextMap.count(call));
+ if (R->isInteresting(LocationContextMap[call])) {
+ containsSomethingInteresting = true;
+ break;
+ }
// Recursively clean out the subclass. Keep this call around if
// it contains any informative diagnostics.
- if (!RemoveUneededCalls(call->path))
+ PathDiagnosticCallPiece *NewCallWithLoc =
+ call->getLocation().asLocation().isValid()
+ ? call : CallWithLoc;
+
+ if (!RemoveUneededCalls(call->path, R, NewCallWithLoc))
continue;
+
+ if (NewCallWithLoc == CallWithLoc && CallWithLoc) {
+ call->callEnter = CallWithLoc->callEnter;
+ }
+
containsSomethingInteresting = true;
break;
}
case PathDiagnosticPiece::Macro: {
PathDiagnosticMacroPiece *macro = cast<PathDiagnosticMacroPiece>(piece);
- if (!RemoveUneededCalls(macro->subPieces))
+ if (!RemoveUneededCalls(macro->subPieces, R))
continue;
containsSomethingInteresting = true;
break;
}
case PathDiagnosticPiece::Event: {
PathDiagnosticEventPiece *event = cast<PathDiagnosticEventPiece>(piece);
+
// We never throw away an event, but we do throw it away wholesale
// as part of a path if we throw the entire path away.
- if (event->isPrunable())
- continue;
- containsSomethingInteresting = true;
+ containsSomethingInteresting |= !event->isPrunable();
break;
}
case PathDiagnosticPiece::ControlFlow:
@@ -382,6 +473,35 @@ PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
}
//===----------------------------------------------------------------------===//
+// "Visitors only" path diagnostic generation algorithm.
+//===----------------------------------------------------------------------===//
+static bool GenerateVisitorsOnlyPathDiagnostic(PathDiagnostic &PD,
+ PathDiagnosticBuilder &PDB,
+ const ExplodedNode *N,
+ ArrayRef<BugReporterVisitor *> visitors) {
+ // All path generation skips the very first node (the error node).
+ // This is because there is special handling for the end-of-path note.
+ N = N->getFirstPred();
+ if (!N)
+ return true;
+
+ BugReport *R = PDB.getBugReport();
+ while (const ExplodedNode *Pred = N->getFirstPred()) {
+ for (ArrayRef<BugReporterVisitor *>::iterator I = visitors.begin(),
+ E = visitors.end();
+ I != E; ++I) {
+ // Visit all the node pairs, but throw the path pieces away.
+ PathDiagnosticPiece *Piece = (*I)->VisitNode(N, Pred, PDB, *R);
+ delete Piece;
+ }
+
+ N = Pred;
+ }
+
+ return R->isValid();
+}
+
+//===----------------------------------------------------------------------===//
// "Minimal" path diagnostic generation algorithm.
//===----------------------------------------------------------------------===//
typedef std::pair<PathDiagnosticCallPiece*, const ExplodedNode*> StackDiagPair;
@@ -412,7 +532,7 @@ static void updateStackPiecesWithMessage(PathDiagnosticPiece *P,
static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM);
-static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
+static bool GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
PathDiagnosticBuilder &PDB,
const ExplodedNode *N,
ArrayRef<BugReporterVisitor *> visitors) {
@@ -430,55 +550,60 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
NextNode = GetPredecessorNode(N);
ProgramPoint P = N->getLocation();
-
- if (const CallExitEnd *CE = dyn_cast<CallExitEnd>(&P)) {
- PathDiagnosticCallPiece *C =
- PathDiagnosticCallPiece::construct(N, *CE, SMgr);
- PD.getActivePath().push_front(C);
- PD.pushActivePath(&C->path);
- CallStack.push_back(StackDiagPair(C, N));
- continue;
- }
-
- if (const CallEnter *CE = dyn_cast<CallEnter>(&P)) {
- // Flush all locations, and pop the active path.
- bool VisitedEntireCall = PD.isWithinCall();
- PD.popActivePath();
-
- // Either we just added a bunch of stuff to the top-level path, or
- // we have a previous CallExitEnd. If the former, it means that the
- // path terminated within a function call. We must then take the
- // current contents of the active path and place it within
- // a new PathDiagnosticCallPiece.
- PathDiagnosticCallPiece *C;
- if (VisitedEntireCall) {
- C = cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
- } else {
- const Decl *Caller = CE->getLocationContext()->getDecl();
- C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
+
+ do {
+ if (const CallExitEnd *CE = dyn_cast<CallExitEnd>(&P)) {
+ PathDiagnosticCallPiece *C =
+ PathDiagnosticCallPiece::construct(N, *CE, SMgr);
+ GRBugReporter& BR = PDB.getBugReporter();
+ BR.addCallPieceLocationContextPair(C, CE->getCalleeContext());
+ PD.getActivePath().push_front(C);
+ PD.pushActivePath(&C->path);
+ CallStack.push_back(StackDiagPair(C, N));
+ break;
}
- C->setCallee(*CE, SMgr);
- if (!CallStack.empty()) {
- assert(CallStack.back().first == C);
- CallStack.pop_back();
+ if (const CallEnter *CE = dyn_cast<CallEnter>(&P)) {
+ // Flush all locations, and pop the active path.
+ bool VisitedEntireCall = PD.isWithinCall();
+ PD.popActivePath();
+
+ // Either we just added a bunch of stuff to the top-level path, or
+ // we have a previous CallExitEnd. If the former, it means that the
+ // path terminated within a function call. We must then take the
+ // current contents of the active path and place it within
+ // a new PathDiagnosticCallPiece.
+ PathDiagnosticCallPiece *C;
+ if (VisitedEntireCall) {
+ C = cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
+ } else {
+ const Decl *Caller = CE->getLocationContext()->getDecl();
+ C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
+ GRBugReporter& BR = PDB.getBugReporter();
+ BR.addCallPieceLocationContextPair(C, CE->getCalleeContext());
+ }
+
+ C->setCallee(*CE, SMgr);
+ if (!CallStack.empty()) {
+ assert(CallStack.back().first == C);
+ CallStack.pop_back();
+ }
+ break;
}
- continue;
- }
- if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
- const CFGBlock *Src = BE->getSrc();
- const CFGBlock *Dst = BE->getDst();
- const Stmt *T = Src->getTerminator();
+ if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ const CFGBlock *Src = BE->getSrc();
+ const CFGBlock *Dst = BE->getDst();
+ const Stmt *T = Src->getTerminator();
- if (!T)
- continue;
+ if (!T)
+ break;
- PathDiagnosticLocation Start =
- PathDiagnosticLocation::createBegin(T, SMgr,
- N->getLocationContext());
+ PathDiagnosticLocation Start =
+ PathDiagnosticLocation::createBegin(T, SMgr,
+ N->getLocationContext());
- switch (T->getStmtClass()) {
+ switch (T->getStmtClass()) {
default:
break;
@@ -487,16 +612,16 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
const Stmt *S = GetNextStmt(N);
if (!S)
- continue;
+ break;
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
const PathDiagnosticLocation &End = PDB.getEnclosingStmtLocation(S);
os << "Control jumps to line "
- << End.asLocation().getExpansionLineNumber();
- PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ << End.asLocation().getExpansionLineNumber();
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
break;
}
@@ -509,52 +634,52 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
PathDiagnosticLocation End(S, SMgr, LC);
switch (S->getStmtClass()) {
- default:
- os << "No cases match in the switch statement. "
- "Control jumps to line "
- << End.asLocation().getExpansionLineNumber();
- break;
- case Stmt::DefaultStmtClass:
- os << "Control jumps to the 'default' case at line "
- << End.asLocation().getExpansionLineNumber();
- break;
-
- case Stmt::CaseStmtClass: {
- os << "Control jumps to 'case ";
- const CaseStmt *Case = cast<CaseStmt>(S);
- const Expr *LHS = Case->getLHS()->IgnoreParenCasts();
-
- // Determine if it is an enum.
- bool GetRawInt = true;
-
- if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS)) {
- // FIXME: Maybe this should be an assertion. Are there cases
- // were it is not an EnumConstantDecl?
- const EnumConstantDecl *D =
+ default:
+ os << "No cases match in the switch statement. "
+ "Control jumps to line "
+ << End.asLocation().getExpansionLineNumber();
+ break;
+ case Stmt::DefaultStmtClass:
+ os << "Control jumps to the 'default' case at line "
+ << End.asLocation().getExpansionLineNumber();
+ break;
+
+ case Stmt::CaseStmtClass: {
+ os << "Control jumps to 'case ";
+ const CaseStmt *Case = cast<CaseStmt>(S);
+ const Expr *LHS = Case->getLHS()->IgnoreParenCasts();
+
+ // Determine if it is an enum.
+ bool GetRawInt = true;
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS)) {
+ // FIXME: Maybe this should be an assertion. Are there cases
+ // were it is not an EnumConstantDecl?
+ const EnumConstantDecl *D =
dyn_cast<EnumConstantDecl>(DR->getDecl());
- if (D) {
- GetRawInt = false;
- os << *D;
- }
+ if (D) {
+ GetRawInt = false;
+ os << *D;
}
+ }
- if (GetRawInt)
- os << LHS->EvaluateKnownConstInt(PDB.getASTContext());
+ if (GetRawInt)
+ os << LHS->EvaluateKnownConstInt(PDB.getASTContext());
- os << ":' at line "
- << End.asLocation().getExpansionLineNumber();
- break;
- }
+ os << ":' at line "
+ << End.asLocation().getExpansionLineNumber();
+ break;
}
- PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ }
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
}
else {
os << "'Default' branch taken. ";
const PathDiagnosticLocation &End = PDB.ExecutionContinues(os, N);
- PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
}
break;
@@ -565,12 +690,12 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
- PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
break;
}
- // Determine control-flow for ternary '?'.
+ // Determine control-flow for ternary '?'.
case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass: {
std::string sbuf;
@@ -587,12 +712,12 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
- PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
break;
}
- // Determine control-flow for short-circuited '&&' and '||'.
+ // Determine control-flow for short-circuited '&&' and '||'.
case Stmt::BinaryOperatorClass: {
if (!PDB.supportsLogicalOpControlFlow())
break;
@@ -609,16 +734,16 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
os << "false";
PathDiagnosticLocation End(B->getLHS(), SMgr, LC);
PathDiagnosticLocation Start =
- PathDiagnosticLocation::createOperatorLoc(B, SMgr);
- PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ PathDiagnosticLocation::createOperatorLoc(B, SMgr);
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
}
else {
os << "true";
PathDiagnosticLocation Start(B->getLHS(), SMgr, LC);
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
- PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
}
}
else {
@@ -629,16 +754,16 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
os << "false";
PathDiagnosticLocation Start(B->getLHS(), SMgr, LC);
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
- PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
}
else {
os << "true";
PathDiagnosticLocation End(B->getLHS(), SMgr, LC);
PathDiagnosticLocation Start =
- PathDiagnosticLocation::createOperatorLoc(B, SMgr);
- PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ PathDiagnosticLocation::createOperatorLoc(B, SMgr);
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
}
}
@@ -656,8 +781,8 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
- PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
}
else {
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
@@ -665,8 +790,8 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
- PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
- "Loop condition is false. Exiting loop"));
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, "Loop condition is false. Exiting loop"));
}
break;
@@ -683,16 +808,16 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
- PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, os.str()));
}
else {
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
- PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
- "Loop condition is true. Entering loop body"));
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, "Loop condition is true. Entering loop body"));
}
break;
@@ -705,16 +830,17 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
End = PDB.getEnclosingStmtLocation(S);
if (*(Src->succ_begin()+1) == Dst)
- PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
- "Taking false branch"));
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, "Taking false branch"));
else
- PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
- "Taking true branch"));
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+ Start, End, "Taking true branch"));
break;
}
+ }
}
- }
+ } while(0);
if (NextNode) {
// Add diagnostic pieces from custom visitors.
@@ -730,9 +856,13 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
}
}
+ if (!PDB.getBugReport()->isValid())
+ return false;
+
// After constructing the full PathDiagnostic, do a pass over it to compact
// PathDiagnosticPieces that occur within a macro.
CompactPathDiagnostic(PD.getMutablePieces(), PDB.getSourceManager());
+ return true;
}
//===----------------------------------------------------------------------===//
@@ -944,6 +1074,11 @@ void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) {
const PathDiagnosticLocation &NewLocClean = cleanUpLocation(NewLoc);
const PathDiagnosticLocation &PrevLocClean = cleanUpLocation(PrevLoc);
+ if (PrevLocClean.asLocation().isInvalid()) {
+ PrevLoc = NewLoc;
+ return;
+ }
+
if (NewLocClean.asLocation() == PrevLocClean.asLocation())
return;
@@ -1133,7 +1268,7 @@ static void reversePropagateInterestingSymbols(BugReport &R,
}
}
-static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
+static bool GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
PathDiagnosticBuilder &PDB,
const ExplodedNode *N,
ArrayRef<BugReporterVisitor *> visitors) {
@@ -1166,6 +1301,8 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
PathDiagnosticCallPiece *C =
PathDiagnosticCallPiece::construct(N, *CE, SM);
+ GRBugReporter& BR = PDB.getBugReporter();
+ BR.addCallPieceLocationContextPair(C, CE->getCalleeContext());
EB.addEdge(C->callReturn, true);
EB.flushLocations();
@@ -1202,6 +1339,8 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
} else {
const Decl *Caller = CE->getLocationContext()->getDecl();
C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
+ GRBugReporter& BR = PDB.getBugReporter();
+ BR.addCallPieceLocationContextPair(C, CE->getCalleeContext());
}
C->setCallee(*CE, SM);
@@ -1234,20 +1373,15 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
}
}
- const CFGBlock &Blk = *BE->getSrc();
- const Stmt *Term = Blk.getTerminator();
-
// Are we jumping to the head of a loop? Add a special diagnostic.
- if (const Stmt *Loop = BE->getDst()->getLoopTarget()) {
+ if (const Stmt *Loop = BE->getSrc()->getLoopTarget()) {
PathDiagnosticLocation L(Loop, SM, PDB.LC);
const CompoundStmt *CS = NULL;
- if (!Term) {
- if (const ForStmt *FS = dyn_cast<ForStmt>(Loop))
- CS = dyn_cast<CompoundStmt>(FS->getBody());
- else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop))
- CS = dyn_cast<CompoundStmt>(WS->getBody());
- }
+ if (const ForStmt *FS = dyn_cast<ForStmt>(Loop))
+ CS = dyn_cast<CompoundStmt>(FS->getBody());
+ else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop))
+ CS = dyn_cast<CompoundStmt>(WS->getBody());
PathDiagnosticEventPiece *p =
new PathDiagnosticEventPiece(L,
@@ -1263,15 +1397,16 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
EB.addEdge(BL);
}
}
-
- if (Term)
+
+ if (const Stmt *Term = BE->getSrc()->getTerminator())
EB.addContext(Term);
break;
}
if (const BlockEntrance *BE = dyn_cast<BlockEntrance>(&P)) {
- if (const CFGStmt *S = BE->getFirstElement().getAs<CFGStmt>()) {
+ CFGElement First = BE->getFirstElement();
+ if (const CFGStmt *S = First.getAs<CFGStmt>()) {
const Stmt *stmt = S->getStmt();
if (IsControlFlowExpr(stmt)) {
// Add the proper context for '&&', '||', and '?'.
@@ -1306,6 +1441,8 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
}
}
}
+
+ return PDB.getBugReport()->isValid();
}
//===----------------------------------------------------------------------===//
@@ -1414,6 +1551,12 @@ void BugReport::markInteresting(SVal V) {
markInteresting(V.getAsSymbol());
}
+void BugReport::markInteresting(const LocationContext *LC) {
+ if (!LC)
+ return;
+ InterestingLocationContexts.insert(LC);
+}
+
bool BugReport::isInteresting(SVal V) {
return isInteresting(V.getAsRegion()) || isInteresting(V.getAsSymbol());
}
@@ -1438,6 +1581,12 @@ bool BugReport::isInteresting(const MemRegion *R) {
return false;
}
+bool BugReport::isInteresting(const LocationContext *LC) {
+ if (!LC)
+ return false;
+ return InterestingLocationContexts.count(LC);
+}
+
void BugReport::lazyInitializeInterestingSets() {
if (interestingSymbols.empty()) {
interestingSymbols.push_back(new Symbols());
@@ -1823,17 +1972,27 @@ static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) {
path.push_back(*I);
}
-void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
+bool GRBugReporter::generatePathDiagnostic(PathDiagnostic& PD,
PathDiagnosticConsumer &PC,
ArrayRef<BugReport *> &bugReports) {
-
assert(!bugReports.empty());
+
+ bool HasValid = false;
SmallVector<const ExplodedNode *, 10> errorNodes;
for (ArrayRef<BugReport*>::iterator I = bugReports.begin(),
E = bugReports.end(); I != E; ++I) {
+ if ((*I)->isValid()) {
+ HasValid = true;
errorNodes.push_back((*I)->getErrorNode());
+ } else {
+ errorNodes.push_back(0);
+ }
}
+ // If all the reports have been marked invalid, we're done.
+ if (!HasValid)
+ return false;
+
// Construct a new graph that contains only a single path from the error
// node to a root.
const std::pair<std::pair<ExplodedGraph*, NodeBackMap*>,
@@ -1844,6 +2003,7 @@ void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
assert(GPair.second.second < bugReports.size());
BugReport *R = bugReports[GPair.second.second];
assert(R && "No original report found for sliced graph.");
+ assert(R->isValid() && "Report selected from trimmed graph marked invalid.");
OwningPtr<ExplodedGraph> ReportGraph(GPair.first.first);
OwningPtr<NodeBackMap> BackMap(GPair.first.second);
@@ -1870,36 +2030,53 @@ void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
visitors.push_back((*I)->clone());
// Clear out the active path from any previous work.
- PD.getActivePath().clear();
+ PD.resetPath();
originalReportConfigToken = R->getConfigurationChangeToken();
// Generate the very last diagnostic piece - the piece is visible before
// the trace is expanded.
- PathDiagnosticPiece *LastPiece = 0;
- for (BugReport::visitor_iterator I = visitors.begin(), E = visitors.end();
- I != E; ++I) {
- if (PathDiagnosticPiece *Piece = (*I)->getEndPath(PDB, N, *R)) {
- assert (!LastPiece &&
- "There can only be one final piece in a diagnostic.");
- LastPiece = Piece;
+ if (PDB.getGenerationScheme() != PathDiagnosticConsumer::None) {
+ PathDiagnosticPiece *LastPiece = 0;
+ for (BugReport::visitor_iterator I = visitors.begin(), E = visitors.end();
+ I != E; ++I) {
+ if (PathDiagnosticPiece *Piece = (*I)->getEndPath(PDB, N, *R)) {
+ assert (!LastPiece &&
+ "There can only be one final piece in a diagnostic.");
+ LastPiece = Piece;
+ }
}
+ if (!LastPiece)
+ LastPiece = BugReporterVisitor::getDefaultEndPath(PDB, N, *R);
+ if (LastPiece)
+ PD.setEndOfPath(LastPiece);
+ else
+ return false;
}
- if (!LastPiece)
- LastPiece = BugReporterVisitor::getDefaultEndPath(PDB, N, *R);
- if (LastPiece)
- PD.getActivePath().push_back(LastPiece);
- else
- return;
switch (PDB.getGenerationScheme()) {
case PathDiagnosticConsumer::Extensive:
- GenerateExtensivePathDiagnostic(PD, PDB, N, visitors);
+ if (!GenerateExtensivePathDiagnostic(PD, PDB, N, visitors)) {
+ assert(!R->isValid() && "Failed on valid report");
+ // Try again. We'll filter out the bad report when we trim the graph.
+ // FIXME: It would be more efficient to use the same intermediate
+ // trimmed graph, and just repeat the shortest-path search.
+ return generatePathDiagnostic(PD, PC, bugReports);
+ }
break;
case PathDiagnosticConsumer::Minimal:
- GenerateMinimalPathDiagnostic(PD, PDB, N, visitors);
+ if (!GenerateMinimalPathDiagnostic(PD, PDB, N, visitors)) {
+ assert(!R->isValid() && "Failed on valid report");
+ // Try again. We'll filter out the bad report when we trim the graph.
+ return generatePathDiagnostic(PD, PC, bugReports);
+ }
break;
case PathDiagnosticConsumer::None:
- llvm_unreachable("PathDiagnosticConsumer::None should never appear here");
+ if (!GenerateVisitorsOnlyPathDiagnostic(PD, PDB, N, visitors)) {
+ assert(!R->isValid() && "Failed on valid report");
+ // Try again. We'll filter out the bad report when we trim the graph.
+ return generatePathDiagnostic(PD, PC, bugReports);
+ }
+ break;
}
// Clean up the visitors we used.
@@ -1910,18 +2087,26 @@ void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
} while(finalReportConfigToken != originalReportConfigToken);
// Finally, prune the diagnostic path of uninteresting stuff.
- if (R->shouldPrunePath()) {
- bool hasSomethingInteresting = RemoveUneededCalls(PD.getMutablePieces());
- assert(hasSomethingInteresting);
- (void) hasSomethingInteresting;
+ if (!PD.path.empty()) {
+ // Remove messages that are basically the same.
+ removeRedundantMsgs(PD.getMutablePieces());
+
+ if (R->shouldPrunePath()) {
+ bool hasSomethingInteresting = RemoveUneededCalls(PD.getMutablePieces(),
+ R);
+ assert(hasSomethingInteresting);
+ (void) hasSomethingInteresting;
+ }
}
+
+ return true;
}
void BugReporter::Register(BugType *BT) {
BugTypes = F.add(BugTypes, BT);
}
-void BugReporter::EmitReport(BugReport* R) {
+void BugReporter::emitReport(BugReport* R) {
// Compute the bug report's hash to determine its equivalence class.
llvm::FoldingSetNodeID ID;
R->Profile(ID);
@@ -2078,17 +2263,17 @@ void BugReporter::FlushReport(BugReport *exampleReport,
OwningPtr<PathDiagnostic>
D(new PathDiagnostic(exampleReport->getDeclWithIssue(),
exampleReport->getBugType().getName(),
- PD.useVerboseDescription()
- ? exampleReport->getDescription()
- : exampleReport->getShortDescription(),
+ exampleReport->getDescription(),
+ exampleReport->getShortDescription(/*Fallback=*/false),
BT.getCategory()));
// Generate the full path diagnostic, using the generation scheme
- // specified by the PathDiagnosticConsumer.
- if (PD.getGenerationScheme() != PathDiagnosticConsumer::None) {
- if (!bugReports.empty())
- GeneratePathDiagnostic(*D.get(), PD, bugReports);
- }
+ // specified by the PathDiagnosticConsumer. Note that we have to generate
+ // path diagnostics even for consumers which do not support paths, because
+ // the BugReporterVisitors may mark this bug as a false positive.
+ if (!bugReports.empty())
+ if (!generatePathDiagnostic(*D.get(), PD, bugReports))
+ return;
// If the path is empty, generate a single step path with the location
// of the issue.
@@ -2100,7 +2285,7 @@ void BugReporter::FlushReport(BugReport *exampleReport,
llvm::tie(Beg, End) = exampleReport->getRanges();
for ( ; Beg != End; ++Beg)
piece->addRange(*Beg);
- D->getActivePath().push_back(piece);
+ D->setEndOfPath(piece);
}
// Get the meta data.
@@ -2124,7 +2309,7 @@ void BugReporter::EmitBasicReport(const Decl *DeclWithIssue,
BugReport *R = new BugReport(*BT, str, Loc);
R->setDeclWithIssue(DeclWithIssue);
for ( ; NumRanges > 0 ; --NumRanges, ++RBeg) R->addRange(*RBeg);
- EmitReport(R);
+ emitReport(R);
}
BugType *BugReporter::getBugTypeForName(StringRef name,
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index e729587..328e8a6 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -17,10 +17,13 @@
#include "clang/AST/ExprObjC.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace ento;
@@ -29,10 +32,24 @@ using namespace ento;
// Utility functions.
//===----------------------------------------------------------------------===//
+bool bugreporter::isDeclRefExprToReference(const Expr *E) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ return DRE->getDecl()->getType()->isReferenceType();
+ }
+ return false;
+}
+
const Stmt *bugreporter::GetDerefExpr(const ExplodedNode *N) {
// Pattern match for a few useful cases (do something smarter later):
// a[0], p->f, *p
- const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+ const PostStmt *Loc = N->getLocationAs<PostStmt>();
+ if (!Loc)
+ return 0;
+
+ const Expr *S = dyn_cast<Expr>(Loc->getStmt());
+ if (!S)
+ return 0;
+ S = S->IgnoreParenCasts();
while (true) {
if (const BinaryOperator *B = dyn_cast<BinaryOperator>(S)) {
@@ -45,7 +62,12 @@ const Stmt *bugreporter::GetDerefExpr(const ExplodedNode *N) {
return U->getSubExpr()->IgnoreParenCasts();
}
else if (const MemberExpr *ME = dyn_cast<MemberExpr>(S)) {
- return ME->getBase()->IgnoreParenCasts();
+ if (ME->isArrow() || isDeclRefExprToReference(ME->getBase())) {
+ return ME->getBase()->IgnoreParenCasts();
+ }
+ }
+ else if (const ObjCIvarRefExpr *IvarRef = dyn_cast<ObjCIvarRefExpr>(S)) {
+ return IvarRef->getBase()->IgnoreParenCasts();
}
else if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(S)) {
return AE->getBase();
@@ -103,6 +125,228 @@ BugReporterVisitor::getDefaultEndPath(BugReporterContext &BRC,
}
+namespace {
+/// Emits an extra note at the return statement of an interesting stack frame.
+///
+/// The returned value is marked as an interesting value, and if it's null,
+/// adds a visitor to track where it became null.
+///
+/// This visitor is intended to be used when another visitor discovers that an
+/// interesting value comes from an inlined function call.
+class ReturnVisitor : public BugReporterVisitorImpl<ReturnVisitor> {
+ const StackFrameContext *StackFrame;
+ enum {
+ Initial,
+ MaybeSuppress,
+ Satisfied
+ } Mode;
+
+public:
+ ReturnVisitor(const StackFrameContext *Frame)
+ : StackFrame(Frame), Mode(Initial) {}
+
+ static void *getTag() {
+ static int Tag = 0;
+ return static_cast<void *>(&Tag);
+ }
+
+ virtual void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(ReturnVisitor::getTag());
+ ID.AddPointer(StackFrame);
+ }
+
+ /// Adds a ReturnVisitor if the given statement represents a call that was
+ /// inlined.
+ ///
+ /// This will search back through the ExplodedGraph, starting from the given
+ /// node, looking for when the given statement was processed. If it turns out
+ /// the statement is a call that was inlined, we add the visitor to the
+ /// bug report, so it can print a note later.
+ static void addVisitorIfNecessary(const ExplodedNode *Node, const Stmt *S,
+ BugReport &BR) {
+ if (!CallEvent::isCallStmt(S))
+ return;
+
+ // First, find when we processed the statement.
+ do {
+ if (const CallExitEnd *CEE = Node->getLocationAs<CallExitEnd>())
+ if (CEE->getCalleeContext()->getCallSite() == S)
+ break;
+ if (const StmtPoint *SP = Node->getLocationAs<StmtPoint>())
+ if (SP->getStmt() == S)
+ break;
+
+ Node = Node->getFirstPred();
+ } while (Node);
+
+ // Next, step over any post-statement checks.
+ while (Node && isa<PostStmt>(Node->getLocation()))
+ Node = Node->getFirstPred();
+
+ // Finally, see if we inlined the call.
+ if (Node) {
+ if (const CallExitEnd *CEE = Node->getLocationAs<CallExitEnd>()) {
+ const StackFrameContext *CalleeContext = CEE->getCalleeContext();
+ if (CalleeContext->getCallSite() == S) {
+ BR.markInteresting(CalleeContext);
+ BR.addVisitor(new ReturnVisitor(CalleeContext));
+ }
+ }
+ }
+ }
+
+ /// Returns true if any counter-suppression heuristics are enabled for
+ /// ReturnVisitor.
+ static bool hasCounterSuppression(AnalyzerOptions &Options) {
+ return Options.shouldAvoidSuppressingNullArgumentPaths();
+ }
+
+ PathDiagnosticPiece *visitNodeInitial(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ // Only print a message at the interesting return statement.
+ if (N->getLocationContext() != StackFrame)
+ return 0;
+
+ const StmtPoint *SP = N->getLocationAs<StmtPoint>();
+ if (!SP)
+ return 0;
+
+ const ReturnStmt *Ret = dyn_cast<ReturnStmt>(SP->getStmt());
+ if (!Ret)
+ return 0;
+
+ // Okay, we're at the right return statement, but do we have the return
+ // value available?
+ ProgramStateRef State = N->getState();
+ SVal V = State->getSVal(Ret, StackFrame);
+ if (V.isUnknownOrUndef())
+ return 0;
+
+ // Don't print any more notes after this one.
+ Mode = Satisfied;
+
+ const Expr *RetE = Ret->getRetValue();
+ assert(RetE && "Tracking a return value for a void function");
+ RetE = RetE->IgnoreParenCasts();
+
+ // If we can't prove the return value is 0, just mark it interesting, and
+ // make sure to track it into any further inner functions.
+ if (State->assume(cast<DefinedSVal>(V), true)) {
+ BR.markInteresting(V);
+ ReturnVisitor::addVisitorIfNecessary(N, RetE, BR);
+ return 0;
+ }
+
+ // If we're returning 0, we should track where that 0 came from.
+ bugreporter::trackNullOrUndefValue(N, RetE, BR);
+
+ // Build an appropriate message based on the return value.
+ SmallString<64> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+
+ if (isa<Loc>(V)) {
+ // If we are pruning null-return paths as unlikely error paths, mark the
+ // report invalid. We still want to emit a path note, however, in case
+ // the report is resurrected as valid later on.
+ ExprEngine &Eng = BRC.getBugReporter().getEngine();
+ AnalyzerOptions &Options = Eng.getAnalysisManager().options;
+ if (Options.shouldPruneNullReturnPaths()) {
+ if (hasCounterSuppression(Options))
+ Mode = MaybeSuppress;
+ else
+ BR.markInvalid(ReturnVisitor::getTag(), StackFrame);
+ }
+
+ if (RetE->getType()->isObjCObjectPointerType())
+ Out << "Returning nil";
+ else
+ Out << "Returning null pointer";
+ } else {
+ Out << "Returning zero";
+ }
+
+ // FIXME: We should have a more generalized location printing mechanism.
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(RetE))
+ if (const DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(DR->getDecl()))
+ Out << " (loaded from '" << *DD << "')";
+
+ PathDiagnosticLocation L(Ret, BRC.getSourceManager(), StackFrame);
+ return new PathDiagnosticEventPiece(L, Out.str());
+ }
+
+ PathDiagnosticPiece *visitNodeMaybeSuppress(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ // Are we at the entry node for this call?
+ const CallEnter *CE = N->getLocationAs<CallEnter>();
+ if (!CE)
+ return 0;
+
+ if (CE->getCalleeContext() != StackFrame)
+ return 0;
+
+ Mode = Satisfied;
+
+ ExprEngine &Eng = BRC.getBugReporter().getEngine();
+ AnalyzerOptions &Options = Eng.getAnalysisManager().options;
+ if (Options.shouldAvoidSuppressingNullArgumentPaths()) {
+ // Don't automatically suppress a report if one of the arguments is
+ // known to be a null pointer. Instead, start tracking /that/ null
+ // value back to its origin.
+ ProgramStateManager &StateMgr = BRC.getStateManager();
+ CallEventManager &CallMgr = StateMgr.getCallEventManager();
+
+ ProgramStateRef State = N->getState();
+ CallEventRef<> Call = CallMgr.getCaller(StackFrame, State);
+ for (unsigned I = 0, E = Call->getNumArgs(); I != E; ++I) {
+ SVal ArgV = Call->getArgSVal(I);
+ if (!isa<Loc>(ArgV))
+ continue;
+
+ const Expr *ArgE = Call->getArgExpr(I);
+ if (!ArgE)
+ continue;
+
+ // Is it possible for this argument to be non-null?
+ if (State->assume(cast<Loc>(ArgV), true))
+ continue;
+
+ if (bugreporter::trackNullOrUndefValue(N, ArgE, BR, /*IsArg=*/true))
+ return 0;
+
+ // If we /can't/ track the null pointer, we should err on the side of
+ // false negatives, and continue towards marking this report invalid.
+ // (We will still look at the other arguments, though.)
+ }
+ }
+
+ // There is no reason not to suppress this report; go ahead and do it.
+ BR.markInvalid(ReturnVisitor::getTag(), StackFrame);
+ return 0;
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ switch (Mode) {
+ case Initial:
+ return visitNodeInitial(N, PrevN, BRC, BR);
+ case MaybeSuppress:
+ return visitNodeMaybeSuppress(N, PrevN, BRC, BR);
+ case Satisfied:
+ return 0;
+ }
+
+ llvm_unreachable("Invalid visit mode!");
+ }
+};
+} // end anonymous namespace
+
+
void FindLastStoreBRVisitor ::Profile(llvm::FoldingSetNodeID &ID) const {
static int tag = 0;
ID.AddPointer(&tag);
@@ -110,59 +354,90 @@ void FindLastStoreBRVisitor ::Profile(llvm::FoldingSetNodeID &ID) const {
ID.Add(V);
}
-PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
- BugReporterContext &BRC,
- BugReport &BR) {
+PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
+ const ExplodedNode *Pred,
+ BugReporterContext &BRC,
+ BugReport &BR) {
if (satisfied)
return NULL;
- if (!StoreSite) {
- // Make sure the region is actually bound to value V here.
- // This is necessary because the region may not actually be live at the
- // report's error node.
- if (N->getState()->getSVal(R) != V)
- return NULL;
-
- const ExplodedNode *Node = N, *Last = N;
-
- // Now look for the store of V.
- for ( ; Node ; Node = Node->getFirstPred()) {
- if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
- if (const PostStmt *P = Node->getLocationAs<PostStmt>())
- if (const DeclStmt *DS = P->getStmtAs<DeclStmt>())
- if (DS->getSingleDecl() == VR->getDecl()) {
- // Record the last seen initialization point.
- Last = Node;
- break;
- }
+ const ExplodedNode *StoreSite = 0;
+ const Expr *InitE = 0;
+ bool IsParam = false;
+
+ // First see if we reached the declaration of the region.
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ if (const PostStmt *P = Pred->getLocationAs<PostStmt>()) {
+ if (const DeclStmt *DS = P->getStmtAs<DeclStmt>()) {
+ if (DS->getSingleDecl() == VR->getDecl()) {
+ StoreSite = Pred;
+ InitE = VR->getDecl()->getInit();
+ }
}
-
- // Does the region still bind to value V? If not, we are done
- // looking for store sites.
- if (Node->getState()->getSVal(R) != V)
- break;
-
- Last = Node;
}
+ }
- if (!Node) {
- satisfied = true;
+ // Otherwise, check that Succ has this binding and Pred does not, i.e. this is
+ // where the binding first occurred.
+ if (!StoreSite) {
+ if (Succ->getState()->getSVal(R) != V)
+ return NULL;
+ if (Pred->getState()->getSVal(R) == V)
return NULL;
- }
- StoreSite = Last;
+ StoreSite = Succ;
+
+ // If this is an assignment expression, we can track the value
+ // being assigned.
+ if (const PostStmt *P = Succ->getLocationAs<PostStmt>())
+ if (const BinaryOperator *BO = P->getStmtAs<BinaryOperator>())
+ if (BO->isAssignmentOp())
+ InitE = BO->getRHS();
+
+ // If this is a call entry, the variable should be a parameter.
+ // FIXME: Handle CXXThisRegion as well. (This is not a priority because
+ // 'this' should never be NULL, but this visitor isn't just for NULL and
+ // UndefinedVal.)
+ if (const CallEnter *CE = Succ->getLocationAs<CallEnter>()) {
+ const VarRegion *VR = cast<VarRegion>(R);
+ const ParmVarDecl *Param = cast<ParmVarDecl>(VR->getDecl());
+
+ ProgramStateManager &StateMgr = BRC.getStateManager();
+ CallEventManager &CallMgr = StateMgr.getCallEventManager();
+
+ CallEventRef<> Call = CallMgr.getCaller(CE->getCalleeContext(),
+ Succ->getState());
+ InitE = Call->getArgExpr(Param->getFunctionScopeIndex());
+ IsParam = true;
+ }
}
- if (StoreSite != N)
+ if (!StoreSite)
return NULL;
-
satisfied = true;
+
+ // If we have an expression that provided the value, try to track where it
+ // came from.
+ if (InitE) {
+ if (V.isUndef() || isa<loc::ConcreteInt>(V)) {
+ if (!IsParam)
+ InitE = InitE->IgnoreParenCasts();
+ bugreporter::trackNullOrUndefValue(StoreSite, InitE, BR, IsParam);
+ } else {
+ ReturnVisitor::addVisitorIfNecessary(StoreSite, InitE->IgnoreParenCasts(),
+ BR);
+ }
+ }
+
+ if (!R->canPrintPretty())
+ return 0;
+
+ // Okay, we've found the binding. Emit an appropriate message.
SmallString<256> sbuf;
llvm::raw_svector_ostream os(sbuf);
- if (const PostStmt *PS = N->getLocationAs<PostStmt>()) {
+ if (const PostStmt *PS = StoreSite->getLocationAs<PostStmt>()) {
if (const DeclStmt *DS = PS->getStmtAs<DeclStmt>()) {
if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
@@ -201,6 +476,30 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *N,
os << "initialized here";
}
}
+ } else if (isa<CallEnter>(StoreSite->getLocation())) {
+ const ParmVarDecl *Param = cast<ParmVarDecl>(cast<VarRegion>(R)->getDecl());
+
+ os << "Passing ";
+
+ if (isa<loc::ConcreteInt>(V)) {
+ if (Param->getType()->isObjCObjectPointerType())
+ os << "nil object reference";
+ else
+ os << "null pointer value";
+ } else if (V.isUndef()) {
+ os << "uninitialized value";
+ } else if (isa<nonloc::ConcreteInt>(V)) {
+ os << "the value " << cast<nonloc::ConcreteInt>(V).getValue();
+ } else {
+ os << "value";
+ }
+
+ // Printed parameter indexes are 1-based, not 0-based.
+ unsigned Idx = Param->getFunctionScopeIndex() + 1;
+ os << " via " << Idx << llvm::getOrdinalSuffix(Idx) << " parameter '";
+
+ R->printPretty(os);
+ os << '\'';
}
if (os.str().empty()) {
@@ -228,17 +527,19 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *N,
else
os << "Value assigned to ";
- if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
- os << '\'' << *VR->getDecl() << '\'';
- }
- else
- return NULL;
+ os << '\'';
+ R->printPretty(os);
+ os << '\'';
}
// Construct a new PathDiagnosticPiece.
- ProgramPoint P = N->getLocation();
- PathDiagnosticLocation L =
- PathDiagnosticLocation::create(P, BRC.getSourceManager());
+ ProgramPoint P = StoreSite->getLocation();
+ PathDiagnosticLocation L;
+ if (isa<CallEnter>(P))
+ L = PathDiagnosticLocation(InitE, BRC.getSourceManager(),
+ P.getLocationContext());
+ else
+ L = PathDiagnosticLocation::create(P, BRC.getSourceManager());
if (!L.isValid())
return NULL;
return new PathDiagnosticEventPiece(L, os.str());
@@ -251,6 +552,12 @@ void TrackConstraintBRVisitor::Profile(llvm::FoldingSetNodeID &ID) const {
ID.Add(Constraint);
}
+/// Return the tag associated with this visitor. This tag will be used
+/// to make all PathDiagnosticPieces created by this visitor.
+const char *TrackConstraintBRVisitor::getTag() {
+ return "TrackConstraintBRVisitor";
+}
+
PathDiagnosticPiece *
TrackConstraintBRVisitor::VisitNode(const ExplodedNode *N,
const ExplodedNode *PrevN,
@@ -290,62 +597,97 @@ TrackConstraintBRVisitor::VisitNode(const ExplodedNode *N,
PathDiagnosticLocation::create(P, BRC.getSourceManager());
if (!L.isValid())
return NULL;
- return new PathDiagnosticEventPiece(L, os.str());
+
+ PathDiagnosticEventPiece *X = new PathDiagnosticEventPiece(L, os.str());
+ X->setTag(getTag());
+ return X;
}
return NULL;
}
-void bugreporter::addTrackNullOrUndefValueVisitor(const ExplodedNode *N,
- const Stmt *S,
- BugReport *report) {
+bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N, const Stmt *S,
+ BugReport &report, bool IsArg) {
if (!S || !N)
- return;
+ return false;
- ProgramStateManager &StateMgr = N->getState()->getStateManager();
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S))
+ S = OVE->getSourceExpr();
+
+ if (IsArg) {
+ assert(isa<CallEnter>(N->getLocation()) && "Tracking arg but not at call");
+ } else {
+ // Walk through nodes until we get one that matches the statement exactly.
+ do {
+ const ProgramPoint &pp = N->getLocation();
+ if (const PostStmt *ps = dyn_cast<PostStmt>(&pp)) {
+ if (ps->getStmt() == S)
+ break;
+ } else if (const CallExitEnd *CEE = dyn_cast<CallExitEnd>(&pp)) {
+ if (CEE->getCalleeContext()->getCallSite() == S)
+ break;
+ }
+ N = N->getFirstPred();
+ } while (N);
- // Walk through nodes until we get one that matches the statement
- // exactly.
- while (N) {
- const ProgramPoint &pp = N->getLocation();
- if (const PostStmt *ps = dyn_cast<PostStmt>(&pp)) {
- if (ps->getStmt() == S)
- break;
- }
- N = N->getFirstPred();
+ if (!N)
+ return false;
}
-
- if (!N)
- return;
ProgramStateRef state = N->getState();
- // Walk through lvalue-to-rvalue conversions.
- const Expr *Ex = dyn_cast<Expr>(S);
- if (Ex) {
+ // See if the expression we're interested refers to a variable.
+ // If so, we can track both its contents and constraints on its value.
+ if (const Expr *Ex = dyn_cast<Expr>(S)) {
+ // Strip off parens and casts. Note that this will never have issues with
+ // C++ user-defined implicit conversions, because those have a constructor
+ // or function call inside.
Ex = Ex->IgnoreParenCasts();
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex)) {
+ // FIXME: Right now we only track VarDecls because it's non-trivial to
+ // get a MemRegion for any other DeclRefExprs. <rdar://problem/12114812>
if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
- const VarRegion *R =
- StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext());
+ ProgramStateManager &StateMgr = state->getStateManager();
+ MemRegionManager &MRMgr = StateMgr.getRegionManager();
+ const VarRegion *R = MRMgr.getVarRegion(VD, N->getLocationContext());
- // What did we load?
+ // Mark both the variable region and its contents as interesting.
SVal V = state->getRawSVal(loc::MemRegionVal(R));
- report->markInteresting(R);
- report->markInteresting(V);
+ // If the value matches the default for the variable region, that
+ // might mean that it's been cleared out of the state. Fall back to
+ // the full argument expression (with casts and such intact).
+ if (IsArg) {
+ bool UseArgValue = V.isUnknownOrUndef() || V.isZeroConstant();
+ if (!UseArgValue) {
+ const SymbolRegionValue *SRV =
+ dyn_cast_or_null<SymbolRegionValue>(V.getAsLocSymbol());
+ if (SRV)
+ UseArgValue = (SRV->getRegion() == R);
+ }
+ if (UseArgValue)
+ V = state->getSValAsScalarOrLoc(S, N->getLocationContext());
+ }
+
+ report.markInteresting(R);
+ report.markInteresting(V);
+ report.addVisitor(new UndefOrNullArgVisitor(R));
+
+ // If the contents are symbolic, find out when they became null.
if (V.getAsLocSymbol()) {
BugReporterVisitor *ConstraintTracker
- = new TrackConstraintBRVisitor(cast<loc::MemRegionVal>(V), false);
- report->addVisitor(ConstraintTracker);
+ = new TrackConstraintBRVisitor(cast<DefinedSVal>(V), false);
+ report.addVisitor(ConstraintTracker);
}
- report->addVisitor(new FindLastStoreBRVisitor(V, R));
- return;
+ report.addVisitor(new FindLastStoreBRVisitor(V, R));
+ return true;
}
}
}
+ // If the expression does NOT refer to a variable, we can still track
+ // constraints on its contents.
SVal V = state->getSValAsScalarOrLoc(S, N->getLocationContext());
// Uncomment this to find cases where we aren't properly getting the
@@ -354,17 +696,27 @@ void bugreporter::addTrackNullOrUndefValueVisitor(const ExplodedNode *N,
// Is it a symbolic value?
if (loc::MemRegionVal *L = dyn_cast<loc::MemRegionVal>(&V)) {
- const SubRegion *R = cast<SubRegion>(L->getRegion());
- while (R && !isa<SymbolicRegion>(R)) {
- R = dyn_cast<SubRegion>(R->getSuperRegion());
- }
+ // At this point we are dealing with the region's LValue.
+ // However, if the rvalue is a symbolic region, we should track it as well.
+ SVal RVal = state->getSVal(L->getRegion());
+ const MemRegion *RegionRVal = RVal.getAsRegion();
+ report.addVisitor(new UndefOrNullArgVisitor(L->getRegion()));
+
- if (R) {
- report->markInteresting(R);
- report->addVisitor(new TrackConstraintBRVisitor(loc::MemRegionVal(R),
- false));
+ if (RegionRVal && isa<SymbolicRegion>(RegionRVal)) {
+ report.markInteresting(RegionRVal);
+ report.addVisitor(new TrackConstraintBRVisitor(
+ loc::MemRegionVal(RegionRVal), false));
}
+ } else {
+ // Otherwise, if the value came from an inlined function call,
+ // we should at least make sure that function isn't pruned in our output.
+ if (const Expr *E = dyn_cast<Expr>(S))
+ S = E->IgnoreParenCasts();
+ ReturnVisitor::addVisitorIfNecessary(N, S, report);
}
+
+ return true;
}
BugReporterVisitor *
@@ -406,7 +758,7 @@ PathDiagnosticPiece *NilReceiverBRVisitor::VisitNode(const ExplodedNode *N,
// The receiver was nil, and hence the method was skipped.
// Register a BugReporterVisitor to issue a message telling us how
// the receiver was null.
- bugreporter::addTrackNullOrUndefValueVisitor(N, Receiver, &BR);
+ bugreporter::trackNullOrUndefValue(N, Receiver, BR);
// Issue a message saying that the method was skipped.
PathDiagnosticLocation L(Receiver, BRC.getSourceManager(),
N->getLocationContext());
@@ -452,14 +804,23 @@ void FindLastStoreBRVisitor::registerStatementVarDecls(BugReport &BR,
//===----------------------------------------------------------------------===//
// Visitor that tries to report interesting diagnostics from conditions.
//===----------------------------------------------------------------------===//
+
+/// Return the tag associated with this visitor. This tag will be used
+/// to make all PathDiagnosticPieces created by this visitor.
+const char *ConditionBRVisitor::getTag() {
+ return "ConditionBRVisitor";
+}
+
PathDiagnosticPiece *ConditionBRVisitor::VisitNode(const ExplodedNode *N,
const ExplodedNode *Prev,
BugReporterContext &BRC,
BugReport &BR) {
PathDiagnosticPiece *piece = VisitNodeImpl(N, Prev, BRC, BR);
- if (PathDiagnosticEventPiece *ev =
- dyn_cast_or_null<PathDiagnosticEventPiece>(piece))
- ev->setPrunable(true, /* override */ false);
+ if (piece) {
+ piece->setTag(getTag());
+ if (PathDiagnosticEventPiece *ev=dyn_cast<PathDiagnosticEventPiece>(piece))
+ ev->setPrunable(true, /* override */ false);
+ }
return piece;
}
@@ -468,8 +829,7 @@ PathDiagnosticPiece *ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N,
BugReporterContext &BRC,
BugReport &BR) {
- const ProgramPoint &progPoint = N->getLocation();
-
+ ProgramPoint progPoint = N->getLocation();
ProgramStateRef CurrentState = N->getState();
ProgramStateRef PrevState = Prev->getState();
@@ -494,7 +854,7 @@ PathDiagnosticPiece *ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N,
// violation.
const std::pair<const ProgramPointTag *, const ProgramPointTag *> &tags =
cast<GRBugReporter>(BRC.getBugReporter()).
- getEngine().getEagerlyAssumeTags();
+ getEngine().geteagerlyAssumeBinOpBifurcationTags();
const ProgramPointTag *tag = PS->getTag();
if (tag == tags.first)
@@ -533,8 +893,7 @@ ConditionBRVisitor::VisitTerminator(const Stmt *Term,
assert(Cond);
assert(srcBlk->succ_size() == 2);
const bool tookTrue = *(srcBlk->succ_begin()) == dstBlk;
- return VisitTrueTest(Cond->IgnoreParenNoopCasts(BRC.getASTContext()),
- tookTrue, BRC, R, N);
+ return VisitTrueTest(Cond, tookTrue, BRC, R, N);
}
PathDiagnosticPiece *
@@ -547,7 +906,7 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
const Expr *Ex = Cond;
while (true) {
- Ex = Ex->IgnoreParens();
+ Ex = Ex->IgnoreParenCasts();
switch (Ex->getStmtClass()) {
default:
return 0;
@@ -561,7 +920,7 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
const UnaryOperator *UO = cast<UnaryOperator>(Ex);
if (UO->getOpcode() == UO_LNot) {
tookTrue = !tookTrue;
- Ex = UO->getSubExpr()->IgnoreParenNoopCasts(BRC.getASTContext());
+ Ex = UO->getSubExpr();
continue;
}
return 0;
@@ -802,3 +1161,54 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
return event;
}
+PathDiagnosticPiece *
+UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+
+ ProgramStateRef State = N->getState();
+ ProgramPoint ProgLoc = N->getLocation();
+
+ // We are only interested in visiting CallEnter nodes.
+ CallEnter *CEnter = dyn_cast<CallEnter>(&ProgLoc);
+ if (!CEnter)
+ return 0;
+
+ // Check if one of the arguments is the region the visitor is tracking.
+ CallEventManager &CEMgr = BRC.getStateManager().getCallEventManager();
+ CallEventRef<> Call = CEMgr.getCaller(CEnter->getCalleeContext(), State);
+ unsigned Idx = 0;
+ for (CallEvent::param_iterator I = Call->param_begin(),
+ E = Call->param_end(); I != E; ++I, ++Idx) {
+ const MemRegion *ArgReg = Call->getArgSVal(Idx).getAsRegion();
+
+ // Are we tracking the argument or its subregion?
+ if ( !ArgReg || (ArgReg != R && !R->isSubRegionOf(ArgReg->StripCasts())))
+ continue;
+
+ // Check the function parameter type.
+ const ParmVarDecl *ParamDecl = *I;
+ assert(ParamDecl && "Formal parameter has no decl?");
+ QualType T = ParamDecl->getType();
+
+ if (!(T->isAnyPointerType() || T->isReferenceType())) {
+ // Function can only change the value passed in by address.
+ continue;
+ }
+
+ // If it is a const pointer value, the function does not intend to
+ // change the value.
+ if (T->getPointeeType().isConstQualified())
+ continue;
+
+ // Mark the call site (LocationContext) as interesting if the value of the
+ // argument is undefined or '0'/'NULL'.
+ SVal BoundVal = State->getSVal(R);
+ if (BoundVal.isUndef() || BoundVal.isZeroConstant()) {
+ BR.markInteresting(CEnter->getCalleeContext());
+ return 0;
+ }
+ }
+ return 0;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index 5345bd5..c5cb317 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -14,6 +14,7 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/Analysis/ProgramPoint.h"
#include "clang/AST/ParentMap.h"
#include "llvm/ADT/SmallSet.h"
@@ -23,10 +24,26 @@ using namespace clang;
using namespace ento;
QualType CallEvent::getResultType() const {
- QualType ResultTy = getDeclaredResultType();
+ const Expr *E = getOriginExpr();
+ assert(E && "Calls without origin expressions do not have results");
+ QualType ResultTy = E->getType();
- if (ResultTy.isNull())
- ResultTy = getOriginExpr()->getType();
+ ASTContext &Ctx = getState()->getStateManager().getContext();
+
+ // A function that returns a reference to 'int' will have a result type
+ // of simply 'int'. Check the origin expr's value kind to recover the
+ // proper type.
+ switch (E->getValueKind()) {
+ case VK_LValue:
+ ResultTy = Ctx.getLValueReferenceType(ResultTy);
+ break;
+ case VK_XValue:
+ ResultTy = Ctx.getRValueReferenceType(ResultTy);
+ break;
+ case VK_RValue:
+ // No adjustment is necessary.
+ break;
+ }
return ResultTy;
}
@@ -45,7 +62,7 @@ static bool isCallbackArg(SVal V, QualType T) {
// Check if a callback is passed inside a struct (for both, struct passed by
// reference and by value). Dig just one level into the struct for now.
- if (isa<PointerType>(T) || isa<ReferenceType>(T))
+ if (T->isAnyPointerType() || T->isReferenceType())
T = T->getPointeeType();
if (const RecordType *RT = T->getAsStructureType()) {
@@ -83,6 +100,14 @@ bool CallEvent::hasNonZeroCallbackArg() const {
return false;
}
+bool CallEvent::isGlobalCFunction(StringRef FunctionName) const {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(getDecl());
+ if (!FD)
+ return false;
+
+ return CheckerContext::isCLibraryFunction(FD, FunctionName);
+}
+
/// \brief Returns true if a type is a pointer-to-const or reference-to-const
/// with no further indirection.
static bool isPointerToConst(QualType Ty) {
@@ -207,6 +232,13 @@ SourceRange CallEvent::getArgSourceRange(unsigned Index) const {
return ArgE->getSourceRange();
}
+SVal CallEvent::getReturnValue() const {
+ const Expr *E = getOriginExpr();
+ if (!E)
+ return UndefinedVal();
+ return getSVal(E);
+}
+
void CallEvent::dump() const {
dump(llvm::errs());
}
@@ -230,10 +262,20 @@ void CallEvent::dump(raw_ostream &Out) const {
}
-bool CallEvent::mayBeInlined(const Stmt *S) {
- // FIXME: Kill this.
+bool CallEvent::isCallStmt(const Stmt *S) {
return isa<CallExpr>(S) || isa<ObjCMessageExpr>(S)
- || isa<CXXConstructExpr>(S);
+ || isa<CXXConstructExpr>(S)
+ || isa<CXXNewExpr>(S);
+}
+
+/// \brief Returns the result type, adjusted for references.
+QualType CallEvent::getDeclaredResultType(const Decl *D) {
+ assert(D);
+ if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(D))
+ return FD->getResultType();
+ else if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->getResultType();
+ return QualType();
}
static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
@@ -285,14 +327,6 @@ void AnyFunctionCall::getInitialStackFrameContents(
D->param_begin(), D->param_end());
}
-QualType AnyFunctionCall::getDeclaredResultType() const {
- const FunctionDecl *D = getDecl();
- if (!D)
- return QualType();
-
- return D->getResultType();
-}
-
bool AnyFunctionCall::argumentsMayEscape() const {
if (hasNonZeroCallbackArg())
return true;
@@ -303,7 +337,7 @@ bool AnyFunctionCall::argumentsMayEscape() const {
const IdentifierInfo *II = D->getIdentifier();
if (!II)
- return true;
+ return false;
// This set of "escaping" APIs is
@@ -376,6 +410,17 @@ void CXXInstanceCall::getExtraInvalidatedRegions(RegionList &Regions) const {
Regions.push_back(R);
}
+SVal CXXInstanceCall::getCXXThisVal() const {
+ const Expr *Base = getCXXThisExpr();
+ // FIXME: This doesn't handle an overloaded ->* operator.
+ if (!Base)
+ return UnknownVal();
+
+ SVal ThisVal = getSVal(Base);
+ assert(ThisVal.isUnknownOrUndef() || isa<Loc>(ThisVal));
+ return ThisVal;
+}
+
RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const {
// Do we have a decl at all?
@@ -400,13 +445,30 @@ RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const {
// Is the type a C++ class? (This is mostly a defensive check.)
QualType RegionType = DynType.getType()->getPointeeType();
+ assert(!RegionType.isNull() && "DynamicTypeInfo should always be a pointer.");
+
const CXXRecordDecl *RD = RegionType->getAsCXXRecordDecl();
if (!RD || !RD->hasDefinition())
return RuntimeDefinition();
// Find the decl for this method in that class.
const CXXMethodDecl *Result = MD->getCorrespondingMethodInClass(RD, true);
- assert(Result && "At the very least the static decl should show up.");
+ if (!Result) {
+ // We might not even get the original statically-resolved method due to
+ // some particularly nasty casting (e.g. casts to sister classes).
+ // However, we should at least be able to search up and down our own class
+ // hierarchy, and some real bugs have been caught by checking this.
+ assert(!RD->isDerivedFrom(MD->getParent()) && "Couldn't find known method");
+
+ // FIXME: This is checking that our DynamicTypeInfo is at least as good as
+ // the static type. However, because we currently don't update
+ // DynamicTypeInfo when an object is cast, we can't actually be sure the
+ // DynamicTypeInfo is up to date. This assert should be re-enabled once
+ // this is fixed. <rdar://problem/12287087>
+ //assert(!MD->getParent()->isDerivedFrom(RD) && "Bad DynamicTypeInfo");
+
+ return RuntimeDefinition();
+ }
// Does the decl that we found have an implementation?
const FunctionDecl *Definition;
@@ -459,6 +521,18 @@ const Expr *CXXMemberCall::getCXXThisExpr() const {
return getOriginExpr()->getImplicitObjectArgument();
}
+RuntimeDefinition CXXMemberCall::getRuntimeDefinition() const {
+ // C++11 [expr.call]p1: ...If the selected function is non-virtual, or if the
+ // id-expression in the class member access expression is a qualified-id,
+ // that function is called. Otherwise, its final overrider in the dynamic type
+ // of the object expression is called.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(getOriginExpr()->getCallee()))
+ if (ME->hasQualifier())
+ return AnyFunctionCall::getRuntimeDefinition();
+
+ return CXXInstanceCall::getRuntimeDefinition();
+}
+
const Expr *CXXMemberOperatorCall::getCXXThisExpr() const {
return getOriginExpr()->getArg(0);
@@ -501,15 +575,6 @@ void BlockCall::getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
}
-QualType BlockCall::getDeclaredResultType() const {
- const BlockDataRegion *BR = getBlockRegion();
- if (!BR)
- return QualType();
- QualType BlockTy = BR->getCodeRegion()->getLocationType();
- return cast<FunctionType>(BlockTy->getPointeeType())->getResultType();
-}
-
-
SVal CXXConstructorCall::getCXXThisVal() const {
if (Data)
return loc::MemRegionVal(static_cast<const MemRegion *>(Data));
@@ -539,10 +604,19 @@ void CXXConstructorCall::getInitialStackFrameContents(
SVal CXXDestructorCall::getCXXThisVal() const {
if (Data)
- return loc::MemRegionVal(static_cast<const MemRegion *>(Data));
+ return loc::MemRegionVal(DtorDataTy::getFromOpaqueValue(Data).getPointer());
return UnknownVal();
}
+RuntimeDefinition CXXDestructorCall::getRuntimeDefinition() const {
+ // Base destructors are always called non-virtually.
+ // Skip CXXInstanceCall's devirtualization logic in this case.
+ if (isBaseDestructor())
+ return AnyFunctionCall::getRuntimeDefinition();
+
+ return CXXInstanceCall::getRuntimeDefinition();
+}
+
CallEvent::param_iterator ObjCMethodCall::param_begin() const {
const ObjCMethodDecl *D = getDecl();
@@ -566,12 +640,12 @@ ObjCMethodCall::getExtraInvalidatedRegions(RegionList &Regions) const {
Regions.push_back(R);
}
-QualType ObjCMethodCall::getDeclaredResultType() const {
- const ObjCMethodDecl *D = getDecl();
- if (!D)
- return QualType();
-
- return D->getResultType();
+SVal ObjCMethodCall::getSelfSVal() const {
+ const LocationContext *LCtx = getLocationContext();
+ const ImplicitParamDecl *SelfDecl = LCtx->getSelfDecl();
+ if (!SelfDecl)
+ return SVal();
+ return getState()->getSVal(getState()->getRegion(SelfDecl, LCtx));
}
SVal ObjCMethodCall::getReceiverSVal() const {
@@ -584,10 +658,23 @@ SVal ObjCMethodCall::getReceiverSVal() const {
// An instance message with no expression means we are sending to super.
// In this case the object reference is the same as 'self'.
- const LocationContext *LCtx = getLocationContext();
- const ImplicitParamDecl *SelfDecl = LCtx->getSelfDecl();
- assert(SelfDecl && "No message receiver Expr, but not in an ObjC method");
- return getState()->getSVal(getState()->getRegion(SelfDecl, LCtx));
+ assert(getOriginExpr()->getReceiverKind() == ObjCMessageExpr::SuperInstance);
+ SVal SelfVal = getSelfSVal();
+ assert(SelfVal.isValid() && "Calling super but not in ObjC method");
+ return SelfVal;
+}
+
+bool ObjCMethodCall::isReceiverSelfOrSuper() const {
+ if (getOriginExpr()->getReceiverKind() == ObjCMessageExpr::SuperInstance ||
+ getOriginExpr()->getReceiverKind() == ObjCMessageExpr::SuperClass)
+ return true;
+
+ if (!isInstanceMessage())
+ return false;
+
+ SVal RecVal = getSVal(getOriginExpr()->getInstanceReceiver());
+
+ return (RecVal == getSelfSVal());
}
SourceRange ObjCMethodCall::getSourceRange() const {
@@ -820,7 +907,8 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
return getSimpleCall(CE, State, CallerCtx);
switch (CallSite->getStmtClass()) {
- case Stmt::CXXConstructExprClass: {
+ case Stmt::CXXConstructExprClass:
+ case Stmt::CXXTemporaryObjectExprClass: {
SValBuilder &SVB = State->getStateManager().getSValBuilder();
const CXXMethodDecl *Ctor = cast<CXXMethodDecl>(CalleeCtx->getDecl());
Loc ThisPtr = SVB.getCXXThis(Ctor, CalleeCtx);
@@ -858,5 +946,5 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
Trigger = Dtor->getBody();
return getCXXDestructorCall(Dtor, Trigger, ThisVal.getAsRegion(),
- State, CallerCtx);
+ isa<CFGBaseDtor>(E), State, CallerCtx);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
index 0a047d9..74eeef1 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
@@ -38,17 +38,14 @@ StringRef CheckerContext::getCalleeName(const FunctionDecl *FunDecl) const {
bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
StringRef Name) {
- return isCLibraryFunction(FD, Name, getASTContext());
-}
-
-bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
- StringRef Name, ASTContext &Context) {
// To avoid false positives (Ex: finding user defined functions with
// similar names), only perform fuzzy name matching when it's a builtin.
// Using a string compare is slow, we might want to switch on BuiltinID here.
unsigned BId = FD->getBuiltinID();
if (BId != 0) {
- StringRef BName = Context.BuiltinInfo.GetName(BId);
+ if (Name.empty())
+ return true;
+ StringRef BName = FD->getASTContext().BuiltinInfo.GetName(BId);
if (BName.find(Name) != StringRef::npos)
return true;
}
@@ -59,6 +56,24 @@ bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
if (!II)
return false;
+ // Look through 'extern "C"' and anything similar invented in the future.
+ const DeclContext *DC = FD->getDeclContext();
+ while (DC->isTransparentContext())
+ DC = DC->getParent();
+
+ // If this function is in a namespace, it is not a C library function.
+ if (!DC->isTranslationUnit())
+ return false;
+
+ // If this function is not externally visible, it is not a C library function.
+ // Note that we make an exception for inline functions, which may be
+ // declared in header files without external linkage.
+ if (!FD->isInlined() && FD->getLinkage() != ExternalLinkage)
+ return false;
+
+ if (Name.empty())
+ return true;
+
StringRef FName = II->getName();
if (FName.equals(Name))
return true;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
index c786655..3672952 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -36,8 +36,7 @@ bool CheckerManager::hasPathSensitiveCheckers() const {
!DeadSymbolsCheckers.empty() ||
!RegionChangesCheckers.empty() ||
!EvalAssumeCheckers.empty() ||
- !EvalCallCheckers.empty() ||
- !InlineCallCheckers.empty();
+ !EvalCallCheckers.empty();
}
void CheckerManager::finishedCheckerRegistration() {
@@ -314,20 +313,19 @@ namespace {
SVal Val;
const Stmt *S;
ExprEngine &Eng;
- ProgramPoint::Kind PointKind;
+ const ProgramPoint &PP;
CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
CheckBindContext(const CheckersTy &checkers,
SVal loc, SVal val, const Stmt *s, ExprEngine &eng,
- ProgramPoint::Kind PK)
- : Checkers(checkers), Loc(loc), Val(val), S(s), Eng(eng), PointKind(PK) {}
+ const ProgramPoint &pp)
+ : Checkers(checkers), Loc(loc), Val(val), S(s), Eng(eng), PP(pp) {}
void runChecker(CheckerManager::CheckBindFunc checkFn,
NodeBuilder &Bldr, ExplodedNode *Pred) {
- const ProgramPoint &L = ProgramPoint::getProgramPoint(S, PointKind,
- Pred->getLocationContext(), checkFn.Checker);
+ const ProgramPoint &L = PP.withTag(checkFn.Checker);
CheckerContext C(Bldr, Eng, Pred, L);
checkFn(Loc, Val, S, C);
@@ -340,8 +338,8 @@ void CheckerManager::runCheckersForBind(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
SVal location, SVal val,
const Stmt *S, ExprEngine &Eng,
- ProgramPoint::Kind PointKind) {
- CheckBindContext C(BindCheckers, location, val, S, Eng, PointKind);
+ const ProgramPoint &PP) {
+ CheckBindContext C(BindCheckers, location, val, S, Eng, PP);
expandGraphWithCheckers(C, Dst, Src);
}
@@ -357,8 +355,8 @@ void CheckerManager::runCheckersForEndAnalysis(ExplodedGraph &G,
// for this callback since end of path nodes are expected to be final.
void CheckerManager::runCheckersForEndPath(NodeBuilderContext &BC,
ExplodedNodeSet &Dst,
+ ExplodedNode *Pred,
ExprEngine &Eng) {
- ExplodedNode *Pred = BC.Pred;
// We define the builder outside of the loop bacause if at least one checkers
// creates a sucsessor for Pred, we do not need to generate an
@@ -509,41 +507,13 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
const CallExpr *CE = cast<CallExpr>(Call.getOriginExpr());
for (ExplodedNodeSet::iterator
NI = Src.begin(), NE = Src.end(); NI != NE; ++NI) {
-
ExplodedNode *Pred = *NI;
bool anyEvaluated = false;
- // First, check if any of the InlineCall callbacks can evaluate the call.
- assert(InlineCallCheckers.size() <= 1 &&
- "InlineCall is a special hacky callback to allow intrusive"
- "evaluation of the call (which simulates inlining). It is "
- "currently only used by OSAtomicChecker and should go away "
- "at some point.");
- for (std::vector<InlineCallFunc>::iterator
- EI = InlineCallCheckers.begin(), EE = InlineCallCheckers.end();
- EI != EE; ++EI) {
- ExplodedNodeSet checkDst;
- bool evaluated = (*EI)(CE, Eng, Pred, checkDst);
- assert(!(evaluated && anyEvaluated)
- && "There are more than one checkers evaluating the call");
- if (evaluated) {
- anyEvaluated = true;
- Dst.insert(checkDst);
-#ifdef NDEBUG
- break; // on release don't check that no other checker also evals.
-#endif
- }
- }
-
-#ifdef NDEBUG // on release don't check that no other checker also evals.
- if (anyEvaluated) {
- break;
- }
-#endif
-
ExplodedNodeSet checkDst;
NodeBuilder B(Pred, checkDst, Eng.getBuilderContext());
- // Next, check if any of the EvalCall callbacks can evaluate the call.
+
+ // Check if any of the EvalCall callbacks can evaluate the call.
for (std::vector<EvalCallFunc>::iterator
EI = EvalCallCheckers.begin(), EE = EvalCallCheckers.end();
EI != EE; ++EI) {
@@ -679,10 +649,6 @@ void CheckerManager::_registerForEvalCall(EvalCallFunc checkfn) {
EvalCallCheckers.push_back(checkfn);
}
-void CheckerManager::_registerForInlineCall(InlineCallFunc checkfn) {
- InlineCallCheckers.push_back(checkfn);
-}
-
void CheckerManager::_registerForEndOfTranslationUnit(
CheckEndOfTranslationUnit checkfn) {
EndOfTranslationUnitCheckers.push_back(checkfn);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp
new file mode 100644
index 0000000..4dec526
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp
@@ -0,0 +1,39 @@
+//== ConstraintManager.cpp - Constraints on symbolic values -----*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the interface to manage constraints on symbolic values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+using namespace clang;
+using namespace ento;
+
+ConstraintManager::~ConstraintManager() {}
+
+static DefinedSVal getLocFromSymbol(const ProgramStateRef &State,
+ SymbolRef Sym) {
+ const MemRegion *R = State->getStateManager().getRegionManager()
+ .getSymbolicRegion(Sym);
+ return loc::MemRegionVal(R);
+}
+
+ConditionTruthVal ConstraintManager::checkNull(ProgramStateRef State,
+ SymbolRef Sym) {
+ QualType Ty = Sym->getType();
+ DefinedSVal V = Loc::isLocType(Ty) ? getLocFromSymbol(State, Sym)
+ : nonloc::SymbolVal(Sym);
+ const ProgramStatePair &P = assumeDual(State, V);
+ if (P.first && !P.second)
+ return ConditionTruthVal(false);
+ if (!P.first && P.second)
+ return ConditionTruthVal(true);
+ return ConditionTruthVal();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
index 1f13742..ec23792 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -243,11 +243,6 @@ void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc,
case ProgramPoint::CallEnterKind: {
CallEnter CEnter = cast<CallEnter>(Loc);
- if (AnalyzedCallees)
- if (const CallExpr* CE =
- dyn_cast_or_null<CallExpr>(CEnter.getCallExpr()))
- if (const Decl *CD = CE->getCalleeDecl())
- AnalyzedCallees->insert(CD);
SubEng.processCallEnter(CEnter, Pred);
break;
}
@@ -303,7 +298,7 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
&& "EXIT block cannot contain Stmts.");
// Process the final state transition.
- SubEng.processEndOfFunction(BuilderCtx);
+ SubEng.processEndOfFunction(BuilderCtx, Pred);
// This path is done. Don't enqueue any more nodes.
return;
@@ -313,7 +308,7 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
ExplodedNodeSet dstNodes;
BlockEntrance BE(Blk, Pred->getLocationContext());
NodeBuilderWithSinks nodeBuilder(Pred, dstNodes, BuilderCtx, BE);
- SubEng.processCFGBlockEntrance(L, nodeBuilder);
+ SubEng.processCFGBlockEntrance(L, nodeBuilder, Pred);
// Auto-generate a node.
if (!nodeBuilder.hasGeneratedNodes()) {
@@ -519,9 +514,9 @@ void CoreEngine::enqueueStmtNode(ExplodedNode *N,
return;
}
- const CFGStmt *CS = (*Block)[Idx].getAs<CFGStmt>();
- const Stmt *St = CS ? CS->getStmt() : 0;
- PostStmt Loc(St, N->getLocationContext());
+ // At this point, we know we're processing a normal statement.
+ CFGStmt CS = cast<CFGStmt>((*Block)[Idx]);
+ PostStmt Loc(CS.getStmt(), N->getLocationContext());
if (Loc == N->getLocation()) {
// Note: 'N' should be a fresh node because otherwise it shouldn't be
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
index 52644f7..bab89c5 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
@@ -20,6 +20,44 @@
using namespace clang;
using namespace ento;
+static const Expr *ignoreTransparentExprs(const Expr *E) {
+ E = E->IgnoreParens();
+
+ switch (E->getStmtClass()) {
+ case Stmt::OpaqueValueExprClass:
+ E = cast<OpaqueValueExpr>(E)->getSourceExpr();
+ break;
+ case Stmt::ExprWithCleanupsClass:
+ E = cast<ExprWithCleanups>(E)->getSubExpr();
+ break;
+ case Stmt::CXXBindTemporaryExprClass:
+ E = cast<CXXBindTemporaryExpr>(E)->getSubExpr();
+ break;
+ case Stmt::SubstNonTypeTemplateParmExprClass:
+ E = cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement();
+ break;
+ case Stmt::CXXDefaultArgExprClass:
+ E = cast<CXXDefaultArgExpr>(E)->getExpr();
+ break;
+ default:
+ // This is the base case: we can't look through more than we already have.
+ return E;
+ }
+
+ return ignoreTransparentExprs(E);
+}
+
+static const Stmt *ignoreTransparentExprs(const Stmt *S) {
+ if (const Expr *E = dyn_cast<Expr>(S))
+ return ignoreTransparentExprs(E);
+ return S;
+}
+
+EnvironmentEntry::EnvironmentEntry(const Stmt *S, const LocationContext *L)
+ : std::pair<const Stmt *,
+ const StackFrameContext *>(ignoreTransparentExprs(S),
+ L ? L->getCurrentStackFrame() : 0) {}
+
SVal Environment::lookupExpr(const EnvironmentEntry &E) const {
const SVal* X = ExprBindings.lookup(E);
if (X) {
@@ -30,101 +68,72 @@ SVal Environment::lookupExpr(const EnvironmentEntry &E) const {
}
SVal Environment::getSVal(const EnvironmentEntry &Entry,
- SValBuilder& svalBuilder,
- bool useOnlyDirectBindings) const {
-
- if (useOnlyDirectBindings) {
- // This branch is rarely taken, but can be exercised by
- // checkers that explicitly bind values to arbitrary
- // expressions. It is crucial that we do not ignore any
- // expression here, and do a direct lookup.
- return lookupExpr(Entry);
+ SValBuilder& svalBuilder) const {
+ const Stmt *S = Entry.getStmt();
+ const LocationContext *LCtx = Entry.getLocationContext();
+
+ switch (S->getStmtClass()) {
+ case Stmt::CXXBindTemporaryExprClass:
+ case Stmt::CXXDefaultArgExprClass:
+ case Stmt::ExprWithCleanupsClass:
+ case Stmt::GenericSelectionExprClass:
+ case Stmt::OpaqueValueExprClass:
+ case Stmt::ParenExprClass:
+ case Stmt::SubstNonTypeTemplateParmExprClass:
+ llvm_unreachable("Should have been handled by ignoreTransparentExprs");
+
+ case Stmt::AddrLabelExprClass:
+ return svalBuilder.makeLoc(cast<AddrLabelExpr>(S));
+
+ case Stmt::CharacterLiteralClass: {
+ const CharacterLiteral *C = cast<CharacterLiteral>(S);
+ return svalBuilder.makeIntVal(C->getValue(), C->getType());
}
- const Stmt *E = Entry.getStmt();
- const LocationContext *LCtx = Entry.getLocationContext();
-
- for (;;) {
- if (const Expr *Ex = dyn_cast<Expr>(E))
- E = Ex->IgnoreParens();
-
- switch (E->getStmtClass()) {
- case Stmt::AddrLabelExprClass:
- return svalBuilder.makeLoc(cast<AddrLabelExpr>(E));
- case Stmt::OpaqueValueExprClass: {
- const OpaqueValueExpr *ope = cast<OpaqueValueExpr>(E);
- E = ope->getSourceExpr();
- continue;
- }
- case Stmt::ParenExprClass:
- case Stmt::GenericSelectionExprClass:
- llvm_unreachable("ParenExprs and GenericSelectionExprs should "
- "have been handled by IgnoreParens()");
- case Stmt::CharacterLiteralClass: {
- const CharacterLiteral* C = cast<CharacterLiteral>(E);
- return svalBuilder.makeIntVal(C->getValue(), C->getType());
- }
- case Stmt::CXXBoolLiteralExprClass: {
- const SVal *X = ExprBindings.lookup(EnvironmentEntry(E, LCtx));
- if (X)
- return *X;
- else
- return svalBuilder.makeBoolVal(cast<CXXBoolLiteralExpr>(E));
- }
- case Stmt::CXXScalarValueInitExprClass:
- case Stmt::ImplicitValueInitExprClass: {
- QualType Ty = cast<Expr>(E)->getType();
- return svalBuilder.makeZeroVal(Ty);
- }
- case Stmt::IntegerLiteralClass: {
- // In C++, this expression may have been bound to a temporary object.
- SVal const *X = ExprBindings.lookup(EnvironmentEntry(E, LCtx));
- if (X)
- return *X;
- else
- return svalBuilder.makeIntVal(cast<IntegerLiteral>(E));
- }
- case Stmt::ObjCBoolLiteralExprClass:
- return svalBuilder.makeBoolVal(cast<ObjCBoolLiteralExpr>(E));
-
- // For special C0xx nullptr case, make a null pointer SVal.
- case Stmt::CXXNullPtrLiteralExprClass:
- return svalBuilder.makeNull();
- case Stmt::ExprWithCleanupsClass:
- E = cast<ExprWithCleanups>(E)->getSubExpr();
- continue;
- case Stmt::CXXBindTemporaryExprClass:
- E = cast<CXXBindTemporaryExpr>(E)->getSubExpr();
- continue;
- case Stmt::SubstNonTypeTemplateParmExprClass:
- E = cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement();
- continue;
- case Stmt::ObjCStringLiteralClass: {
- MemRegionManager &MRMgr = svalBuilder.getRegionManager();
- const ObjCStringLiteral *SL = cast<ObjCStringLiteral>(E);
- return svalBuilder.makeLoc(MRMgr.getObjCStringRegion(SL));
- }
- case Stmt::StringLiteralClass: {
- MemRegionManager &MRMgr = svalBuilder.getRegionManager();
- const StringLiteral *SL = cast<StringLiteral>(E);
- return svalBuilder.makeLoc(MRMgr.getStringRegion(SL));
- }
- case Stmt::ReturnStmtClass: {
- const ReturnStmt *RS = cast<ReturnStmt>(E);
- if (const Expr *RE = RS->getRetValue()) {
- E = RE;
- continue;
- }
- return UndefinedVal();
- }
-
- // Handle all other Stmt* using a lookup.
- default:
- break;
- };
+ case Stmt::CXXBoolLiteralExprClass:
+ return svalBuilder.makeBoolVal(cast<CXXBoolLiteralExpr>(S));
+
+ case Stmt::CXXScalarValueInitExprClass:
+ case Stmt::ImplicitValueInitExprClass: {
+ QualType Ty = cast<Expr>(S)->getType();
+ return svalBuilder.makeZeroVal(Ty);
+ }
+
+ case Stmt::IntegerLiteralClass:
+ return svalBuilder.makeIntVal(cast<IntegerLiteral>(S));
+
+ case Stmt::ObjCBoolLiteralExprClass:
+ return svalBuilder.makeBoolVal(cast<ObjCBoolLiteralExpr>(S));
+
+ // For special C0xx nullptr case, make a null pointer SVal.
+ case Stmt::CXXNullPtrLiteralExprClass:
+ return svalBuilder.makeNull();
+
+ case Stmt::ObjCStringLiteralClass: {
+ MemRegionManager &MRMgr = svalBuilder.getRegionManager();
+ const ObjCStringLiteral *SL = cast<ObjCStringLiteral>(S);
+ return svalBuilder.makeLoc(MRMgr.getObjCStringRegion(SL));
+ }
+
+ case Stmt::StringLiteralClass: {
+ MemRegionManager &MRMgr = svalBuilder.getRegionManager();
+ const StringLiteral *SL = cast<StringLiteral>(S);
+ return svalBuilder.makeLoc(MRMgr.getStringRegion(SL));
+ }
+
+ case Stmt::ReturnStmtClass: {
+ const ReturnStmt *RS = cast<ReturnStmt>(S);
+ if (const Expr *RE = RS->getRetValue())
+ return getSVal(EnvironmentEntry(RE, LCtx), svalBuilder);
+ return UndefinedVal();
+ }
+
+ // Handle all other Stmt* using a lookup.
+ default:
break;
}
- return lookupExpr(EnvironmentEntry(E, LCtx));
+
+ return lookupExpr(EnvironmentEntry(S, LCtx));
}
Environment EnvironmentManager::bindExpr(Environment Env,
@@ -140,16 +149,16 @@ Environment EnvironmentManager::bindExpr(Environment Env,
return Environment(F.add(Env.ExprBindings, E, V));
}
-static inline EnvironmentEntry MakeLocation(const EnvironmentEntry &E) {
- const Stmt *S = E.getStmt();
- S = (const Stmt*) (((uintptr_t) S) | 0x1);
- return EnvironmentEntry(S, E.getLocationContext());
+EnvironmentEntry EnvironmentEntry::makeLocation() const {
+ EnvironmentEntry Result = *this;
+ reinterpret_cast<uintptr_t &>(Result.first) |= 0x1;
+ return Result;
}
Environment EnvironmentManager::bindExprAndLocation(Environment Env,
const EnvironmentEntry &E,
SVal location, SVal V) {
- return Environment(F.add(F.add(Env.ExprBindings, MakeLocation(E), location),
+ return Environment(F.add(F.add(Env.ExprBindings, E.makeLocation(), location),
E, V));
}
@@ -286,7 +295,8 @@ void Environment::printAux(raw_ostream &Out, bool printLocations,
S = (Stmt*) (((uintptr_t) S) & ((uintptr_t) ~0x1));
}
- Out << " (" << (void*) En.getLocationContext() << ',' << (void*) S << ") ";
+ Out << " (" << (const void*) En.getLocationContext() << ','
+ << (const void*) S << ") ";
LangOptions LO; // FIXME.
S->printPretty(Out, 0, PrintingPolicy(LO));
Out << " : " << I.getData();
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index b79f3f5..c284bd7 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -47,10 +47,8 @@ void ExplodedNode::SetAuditor(ExplodedNode::Auditor* A) {
// Cleanup.
//===----------------------------------------------------------------------===//
-static const unsigned CounterTop = 1000;
-
ExplodedGraph::ExplodedGraph()
- : NumNodes(0), reclaimNodes(false), reclaimCounter(CounterTop) {}
+ : NumNodes(0), ReclaimNodeInterval(0) {}
ExplodedGraph::~ExplodedGraph() {}
@@ -63,12 +61,12 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
//
// (1) 1 predecessor (that has one successor)
// (2) 1 successor (that has one predecessor)
- // (3) The ProgramPoint is for a PostStmt.
+ // (3) The ProgramPoint is for a PostStmt, but not a PostStore.
// (4) There is no 'tag' for the ProgramPoint.
// (5) The 'store' is the same as the predecessor.
// (6) The 'GDM' is the same as the predecessor.
// (7) The LocationContext is the same as the predecessor.
- // (8) The PostStmt is for a non-consumed Stmt or Expr.
+ // (8) The PostStmt isn't for a non-consumed Stmt or Expr.
// (9) The successor is not a CallExpr StmtPoint (so that we would be able to
// find it when retrying a call with no inlining).
// FIXME: It may be safe to reclaim PreCall and PostCall nodes as well.
@@ -87,16 +85,13 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
// Condition 3.
ProgramPoint progPoint = node->getLocation();
- if (!isa<PostStmt>(progPoint))
+ if (!isa<PostStmt>(progPoint) || isa<PostStore>(progPoint))
return false;
// Condition 4.
PostStmt ps = cast<PostStmt>(progPoint);
if (ps.getTag())
return false;
-
- if (isa<BinaryOperator>(ps.getStmt()))
- return false;
// Conditions 5, 6, and 7.
ProgramStateRef state = node->getState();
@@ -106,6 +101,12 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
return false;
// Condition 8.
+ // Do not collect nodes for non-consumed Stmt or Expr to ensure precise
+ // diagnostic generation; specifically, so that we could anchor arrows
+ // pointing to the beginning of statements (as written in code).
+ if (!isa<Expr>(ps.getStmt()))
+ return false;
+
if (const Expr *Ex = dyn_cast<Expr>(ps.getStmt())) {
ParentMap &PM = progPoint.getLocationContext()->getParentMap();
if (!PM.isConsumedExpr(Ex))
@@ -115,7 +116,7 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
// Condition 9.
const ProgramPoint SuccLoc = succ->getLocation();
if (const StmtPoint *SP = dyn_cast<StmtPoint>(&SuccLoc))
- if (CallEvent::mayBeInlined(SP->getStmt()))
+ if (CallEvent::isCallStmt(SP->getStmt()))
return false;
return true;
@@ -141,13 +142,13 @@ void ExplodedGraph::reclaimRecentlyAllocatedNodes() {
if (ChangedNodes.empty())
return;
- // Only periodically relcaim nodes so that we can build up a set of
+ // Only periodically reclaim nodes so that we can build up a set of
// nodes that meet the reclamation criteria. Freshly created nodes
// by definition have no successor, and thus cannot be reclaimed (see below).
- assert(reclaimCounter > 0);
- if (--reclaimCounter != 0)
+ assert(ReclaimCounter > 0);
+ if (--ReclaimCounter != 0)
return;
- reclaimCounter = CounterTop;
+ ReclaimCounter = ReclaimNodeInterval;
for (NodeVector::iterator it = ChangedNodes.begin(), et = ChangedNodes.end();
it != et; ++it) {
@@ -162,9 +163,18 @@ void ExplodedGraph::reclaimRecentlyAllocatedNodes() {
// ExplodedNode.
//===----------------------------------------------------------------------===//
-static inline BumpVector<ExplodedNode*>& getVector(void *P) {
- return *reinterpret_cast<BumpVector<ExplodedNode*>*>(P);
-}
+// An NodeGroup's storage type is actually very much like a TinyPtrVector:
+// it can be either a pointer to a single ExplodedNode, or a pointer to a
+// BumpVector allocated with the ExplodedGraph's allocator. This allows the
+// common case of single-node NodeGroups to be implemented with no extra memory.
+//
+// Consequently, each of the NodeGroup methods have up to four cases to handle:
+// 1. The flag is set and this group does not actually contain any nodes.
+// 2. The group is empty, in which case the storage value is null.
+// 3. The group contains a single node.
+// 4. The group contains more than one node.
+typedef BumpVector<ExplodedNode *> ExplodedNodeVector;
+typedef llvm::PointerUnion<ExplodedNode *, ExplodedNodeVector *> GroupStorage;
void ExplodedNode::addPredecessor(ExplodedNode *V, ExplodedGraph &G) {
assert (!V->isSink());
@@ -176,71 +186,77 @@ void ExplodedNode::addPredecessor(ExplodedNode *V, ExplodedGraph &G) {
}
void ExplodedNode::NodeGroup::replaceNode(ExplodedNode *node) {
- assert(getKind() == Size1);
- P = reinterpret_cast<uintptr_t>(node);
- assert(getKind() == Size1);
+ assert(!getFlag());
+
+ GroupStorage &Storage = reinterpret_cast<GroupStorage&>(P);
+ assert(Storage.is<ExplodedNode *>());
+ Storage = node;
+ assert(Storage.is<ExplodedNode *>());
}
void ExplodedNode::NodeGroup::addNode(ExplodedNode *N, ExplodedGraph &G) {
- assert((reinterpret_cast<uintptr_t>(N) & Mask) == 0x0);
assert(!getFlag());
- if (getKind() == Size1) {
- if (ExplodedNode *NOld = getNode()) {
- BumpVectorContext &Ctx = G.getNodeAllocator();
- BumpVector<ExplodedNode*> *V =
- G.getAllocator().Allocate<BumpVector<ExplodedNode*> >();
- new (V) BumpVector<ExplodedNode*>(Ctx, 4);
-
- assert((reinterpret_cast<uintptr_t>(V) & Mask) == 0x0);
- V->push_back(NOld, Ctx);
- V->push_back(N, Ctx);
- P = reinterpret_cast<uintptr_t>(V) | SizeOther;
- assert(getPtr() == (void*) V);
- assert(getKind() == SizeOther);
- }
- else {
- P = reinterpret_cast<uintptr_t>(N);
- assert(getKind() == Size1);
- }
+ GroupStorage &Storage = reinterpret_cast<GroupStorage&>(P);
+ if (Storage.isNull()) {
+ Storage = N;
+ assert(Storage.is<ExplodedNode *>());
+ return;
}
- else {
- assert(getKind() == SizeOther);
- getVector(getPtr()).push_back(N, G.getNodeAllocator());
+
+ ExplodedNodeVector *V = Storage.dyn_cast<ExplodedNodeVector *>();
+
+ if (!V) {
+ // Switch from single-node to multi-node representation.
+ ExplodedNode *Old = Storage.get<ExplodedNode *>();
+
+ BumpVectorContext &Ctx = G.getNodeAllocator();
+ V = G.getAllocator().Allocate<ExplodedNodeVector>();
+ new (V) ExplodedNodeVector(Ctx, 4);
+ V->push_back(Old, Ctx);
+
+ Storage = V;
+ assert(!getFlag());
+ assert(Storage.is<ExplodedNodeVector *>());
}
+
+ V->push_back(N, G.getNodeAllocator());
}
unsigned ExplodedNode::NodeGroup::size() const {
if (getFlag())
return 0;
- if (getKind() == Size1)
- return getNode() ? 1 : 0;
- else
- return getVector(getPtr()).size();
+ const GroupStorage &Storage = reinterpret_cast<const GroupStorage &>(P);
+ if (Storage.isNull())
+ return 0;
+ if (ExplodedNodeVector *V = Storage.dyn_cast<ExplodedNodeVector *>())
+ return V->size();
+ return 1;
}
-ExplodedNode **ExplodedNode::NodeGroup::begin() const {
+ExplodedNode * const *ExplodedNode::NodeGroup::begin() const {
if (getFlag())
- return NULL;
+ return 0;
- if (getKind() == Size1)
- return (ExplodedNode**) (getPtr() ? &P : NULL);
- else
- return const_cast<ExplodedNode**>(&*(getVector(getPtr()).begin()));
+ const GroupStorage &Storage = reinterpret_cast<const GroupStorage &>(P);
+ if (Storage.isNull())
+ return 0;
+ if (ExplodedNodeVector *V = Storage.dyn_cast<ExplodedNodeVector *>())
+ return V->begin();
+ return Storage.getAddrOfPtr1();
}
-ExplodedNode** ExplodedNode::NodeGroup::end() const {
+ExplodedNode * const *ExplodedNode::NodeGroup::end() const {
if (getFlag())
- return NULL;
-
- if (getKind() == Size1)
- return (ExplodedNode**) (getPtr() ? &P+1 : NULL);
- else {
- // Dereferencing end() is undefined behaviour. The vector is not empty, so
- // we can dereference the last elem and then add 1 to the result.
- return const_cast<ExplodedNode**>(getVector(getPtr()).end());
- }
+ return 0;
+
+ const GroupStorage &Storage = reinterpret_cast<const GroupStorage &>(P);
+ if (Storage.isNull())
+ return 0;
+ if (ExplodedNodeVector *V = Storage.dyn_cast<ExplodedNodeVector *>())
+ return V->end();
+ return Storage.getAddrOfPtr1() + 1;
}
ExplodedNode *ExplodedGraph::getNode(const ProgramPoint &L,
@@ -266,7 +282,7 @@ ExplodedNode *ExplodedGraph::getNode(const ProgramPoint &L,
new (V) NodeTy(L, State, IsSink);
- if (reclaimNodes)
+ if (ReclaimNodeInterval)
ChangedNodes.push_back(V);
// Insert the node into the node set and return it.
@@ -314,8 +330,8 @@ ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources,
// ===- Pass 1 (reverse DFS) -===
for (const ExplodedNode* const* I = BeginSources; I != EndSources; ++I) {
- assert(*I);
- WL1.push_back(*I);
+ if (*I)
+ WL1.push_back(*I);
}
// Process the first worklist until it is empty. Because it is a std::list
@@ -338,7 +354,8 @@ ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources,
}
// Visit our predecessors and enqueue them.
- for (ExplodedNode** I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I)
+ for (ExplodedNode::pred_iterator I = N->Preds.begin(), E = N->Preds.end();
+ I != E; ++I)
WL1.push_back(*I);
}
@@ -375,7 +392,8 @@ ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources,
// Walk through the predecessors of 'N' and hook up their corresponding
// nodes in the new graph (if any) to the freshly created node.
- for (ExplodedNode **I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I) {
+ for (ExplodedNode::pred_iterator I = N->Preds.begin(), E = N->Preds.end();
+ I != E; ++I) {
Pass2Ty::iterator PI = Pass2.find(*I);
if (PI == Pass2.end())
continue;
@@ -387,7 +405,8 @@ ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources,
// been created, we should hook them up as successors. Otherwise, enqueue
// the new nodes from the original graph that should have nodes created
// in the new graph.
- for (ExplodedNode **I=N->Succs.begin(), **E=N->Succs.end(); I!=E; ++I) {
+ for (ExplodedNode::succ_iterator I = N->Succs.begin(), E = N->Succs.end();
+ I != E; ++I) {
Pass2Ty::iterator PI = Pass2.find(*I);
if (PI != Pass2.end()) {
PI->second->addPredecessor(NewN, *G);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index b0435fb..045591c 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -55,32 +55,32 @@ STATISTIC(NumTimesRetriedWithoutInlining,
//===----------------------------------------------------------------------===//
ExprEngine::ExprEngine(AnalysisManager &mgr, bool gcEnabled,
- SetOfConstDecls *VisitedCallees,
+ SetOfConstDecls *VisitedCalleesIn,
FunctionSummariesTy *FS)
: AMgr(mgr),
AnalysisDeclContexts(mgr.getAnalysisDeclContextManager()),
- Engine(*this, VisitedCallees, FS),
+ Engine(*this, FS),
G(Engine.getGraph()),
StateMgr(getContext(), mgr.getStoreManagerCreator(),
mgr.getConstraintManagerCreator(), G.getAllocator(),
- *this),
+ this),
SymMgr(StateMgr.getSymbolManager()),
svalBuilder(StateMgr.getSValBuilder()),
EntryNode(NULL),
- currentStmt(NULL), currentStmtIdx(0), currentBuilderContext(0),
- NSExceptionII(NULL), NSExceptionInstanceRaiseSelectors(NULL),
- RaiseSel(GetNullarySelector("raise", getContext())),
- ObjCGCEnabled(gcEnabled), BR(mgr, *this) {
-
- if (mgr.shouldEagerlyTrimExplodedGraph()) {
- // Enable eager node reclaimation when constructing the ExplodedGraph.
- G.enableNodeReclamation();
+ currStmt(NULL), currStmtIdx(0), currBldrCtx(0),
+ ObjCNoRet(mgr.getASTContext()),
+ ObjCGCEnabled(gcEnabled), BR(mgr, *this),
+ VisitedCallees(VisitedCalleesIn)
+{
+ unsigned TrimInterval = mgr.options.getGraphTrimInterval();
+ if (TrimInterval != 0) {
+ // Enable eager node reclaimation when constructing the ExplodedGraph.
+ G.enableNodeReclamation(TrimInterval);
}
}
ExprEngine::~ExprEngine() {
BR.FlushReports();
- delete [] NSExceptionInstanceRaiseSelectors;
}
//===----------------------------------------------------------------------===//
@@ -164,6 +164,23 @@ ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
return state;
}
+/// If the value of the given expression is a NonLoc, copy it into a new
+/// temporary region, and replace the value of the expression with that.
+static ProgramStateRef createTemporaryRegionIfNeeded(ProgramStateRef State,
+ const LocationContext *LC,
+ const Expr *E) {
+ SVal V = State->getSVal(E, LC);
+
+ if (isa<NonLoc>(V)) {
+ MemRegionManager &MRMgr = State->getStateManager().getRegionManager();
+ const MemRegion *R = MRMgr.getCXXTempObjectRegion(E, LC);
+ State = State->bindLoc(loc::MemRegionVal(R), V);
+ State = State->BindExpr(E, LC, loc::MemRegionVal(R));
+ }
+
+ return State;
+}
+
//===----------------------------------------------------------------------===//
// Top-level transfer function logic (Dispatcher).
//===----------------------------------------------------------------------===//
@@ -200,8 +217,8 @@ void ExprEngine::processEndWorklist(bool hasWorkRemaining) {
void ExprEngine::processCFGElement(const CFGElement E, ExplodedNode *Pred,
unsigned StmtIdx, NodeBuilderContext *Ctx) {
- currentStmtIdx = StmtIdx;
- currentBuilderContext = Ctx;
+ currStmtIdx = StmtIdx;
+ currBldrCtx = Ctx;
switch (E.getKind()) {
case CFGElement::Invalid:
@@ -219,7 +236,7 @@ void ExprEngine::processCFGElement(const CFGElement E, ExplodedNode *Pred,
ProcessImplicitDtor(*E.getAs<CFGImplicitDtor>(), Pred);
return;
}
- currentBuilderContext = 0;
+ currBldrCtx = 0;
}
static bool shouldRemoveDeadBindings(AnalysisManager &AMgr,
@@ -228,7 +245,7 @@ static bool shouldRemoveDeadBindings(AnalysisManager &AMgr,
const LocationContext *LC) {
// Are we never purging state values?
- if (AMgr.getPurgeMode() == PurgeNone)
+ if (AMgr.options.AnalysisPurgeOpt == PurgeNone)
return false;
// Is this the beginning of a basic block?
@@ -240,7 +257,7 @@ static bool shouldRemoveDeadBindings(AnalysisManager &AMgr,
return true;
// Run before processing a call.
- if (CallEvent::mayBeInlined(S.getStmt()))
+ if (CallEvent::isCallStmt(S.getStmt()))
return true;
// Is this an expression that is consumed by another expression? If so,
@@ -251,12 +268,12 @@ static bool shouldRemoveDeadBindings(AnalysisManager &AMgr,
void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out,
const Stmt *ReferenceStmt,
- const LocationContext *LC,
+ const StackFrameContext *LC,
const Stmt *DiagnosticStmt,
ProgramPoint::Kind K) {
assert((K == ProgramPoint::PreStmtPurgeDeadSymbolsKind ||
- ReferenceStmt == 0) && "PreStmt is not generally supported by "
- "the SymbolReaper yet");
+ ReferenceStmt == 0)
+ && "PostStmt is not generally supported by the SymbolReaper yet");
NumRemoveDeadBindings++;
CleanedState = Pred->getState();
SymbolReaper SymReaper(LC, ReferenceStmt, SymMgr, getStoreManager());
@@ -276,8 +293,8 @@ void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out,
// Generate a CleanedNode that has the environment and store cleaned
// up. Since no symbols are dead, we can optimize and not clean out
// the constraint manager.
- StmtNodeBuilder Bldr(Pred, Out, *currentBuilderContext);
- Bldr.generateNode(DiagnosticStmt, Pred, CleanedState, false, &cleanupTag,K);
+ StmtNodeBuilder Bldr(Pred, Out, *currBldrCtx);
+ Bldr.generateNode(DiagnosticStmt, Pred, CleanedState, &cleanupTag, K);
} else {
// Call checkers with the non-cleaned state so that they could query the
@@ -289,7 +306,7 @@ void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out,
// For each node in CheckedSet, generate CleanedNodes that have the
// environment, the store, and the constraints cleaned up but have the
// user-supplied states as the predecessors.
- StmtNodeBuilder Bldr(CheckedSet, Out, *currentBuilderContext);
+ StmtNodeBuilder Bldr(CheckedSet, Out, *currBldrCtx);
for (ExplodedNodeSet::const_iterator
I = CheckedSet.begin(), E = CheckedSet.end(); I != E; ++I) {
ProgramStateRef CheckerState = (*I)->getState();
@@ -309,8 +326,7 @@ void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out,
// generate a transition to that state.
ProgramStateRef CleanedCheckerSt =
StateMgr.getPersistentStateWithGDM(CleanedState, CheckerState);
- Bldr.generateNode(DiagnosticStmt, *I, CleanedCheckerSt, false,
- &cleanupTag, K);
+ Bldr.generateNode(DiagnosticStmt, *I, CleanedCheckerSt, &cleanupTag, K);
}
}
}
@@ -320,17 +336,17 @@ void ExprEngine::ProcessStmt(const CFGStmt S,
// Reclaim any unnecessary nodes in the ExplodedGraph.
G.reclaimRecentlyAllocatedNodes();
- currentStmt = S.getStmt();
+ currStmt = S.getStmt();
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
- currentStmt->getLocStart(),
+ currStmt->getLocStart(),
"Error evaluating statement");
// Remove dead bindings and symbols.
EntryNode = Pred;
ExplodedNodeSet CleanedStates;
if (shouldRemoveDeadBindings(AMgr, S, Pred, EntryNode->getLocationContext())){
- removeDead(EntryNode, CleanedStates, currentStmt,
- Pred->getLocationContext(), currentStmt);
+ removeDead(EntryNode, CleanedStates, currStmt,
+ Pred->getStackFrame(), currStmt);
} else
CleanedStates.Add(EntryNode);
@@ -340,44 +356,45 @@ void ExprEngine::ProcessStmt(const CFGStmt S,
E = CleanedStates.end(); I != E; ++I) {
ExplodedNodeSet DstI;
// Visit the statement.
- Visit(currentStmt, *I, DstI);
+ Visit(currStmt, *I, DstI);
Dst.insert(DstI);
}
// Enqueue the new nodes onto the work list.
- Engine.enqueue(Dst, currentBuilderContext->getBlock(), currentStmtIdx);
+ Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
// NULL out these variables to cleanup.
CleanedState = NULL;
EntryNode = NULL;
- currentStmt = 0;
+ currStmt = 0;
}
void ExprEngine::ProcessInitializer(const CFGInitializer Init,
ExplodedNode *Pred) {
- ExplodedNodeSet Dst;
- NodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
-
- ProgramStateRef State = Pred->getState();
-
const CXXCtorInitializer *BMI = Init.getInitializer();
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
BMI->getSourceLocation(),
"Error evaluating initializer");
- // We don't set EntryNode and currentStmt. And we don't clean up state.
+ // We don't set EntryNode and currStmt. And we don't clean up state.
const StackFrameContext *stackFrame =
cast<StackFrameContext>(Pred->getLocationContext());
const CXXConstructorDecl *decl =
cast<CXXConstructorDecl>(stackFrame->getDecl());
+
+ ProgramStateRef State = Pred->getState();
SVal thisVal = State->getSVal(svalBuilder.getCXXThis(decl, stackFrame));
+ PostInitializer PP(BMI, stackFrame);
+ ExplodedNodeSet Tmp(Pred);
+
// Evaluate the initializer, if necessary
if (BMI->isAnyMemberInitializer()) {
// Constructors build the object directly in the field,
// but non-objects must be copied in from the initializer.
- if (!isa<CXXConstructExpr>(BMI->getInit())) {
+ const Expr *Init = BMI->getInit();
+ if (!isa<CXXConstructExpr>(Init)) {
SVal FieldLoc;
if (BMI->isIndirectMemberInitializer())
FieldLoc = State->getLValue(BMI->getIndirectMember(), thisVal);
@@ -385,22 +402,26 @@ void ExprEngine::ProcessInitializer(const CFGInitializer Init,
FieldLoc = State->getLValue(BMI->getMember(), thisVal);
SVal InitVal = State->getSVal(BMI->getInit(), stackFrame);
- State = State->bindLoc(FieldLoc, InitVal);
+
+ Tmp.clear();
+ evalBind(Tmp, Init, Pred, FieldLoc, InitVal, /*isInit=*/true, &PP);
}
} else {
assert(BMI->isBaseInitializer() || BMI->isDelegatingInitializer());
// We already did all the work when visiting the CXXConstructExpr.
}
- // Construct a PostInitializer node whether the state changed or not,
+ // Construct PostInitializer nodes whether the state changed or not,
// so that the diagnostics don't get confused.
- PostInitializer PP(BMI, stackFrame);
- // Builder automatically add the generated node to the deferred set,
- // which are processed in the builder's dtor.
- Bldr.generateNode(PP, State, Pred);
+ ExplodedNodeSet Dst;
+ NodeBuilder Bldr(Tmp, Dst, *currBldrCtx);
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
+ ExplodedNode *N = *I;
+ Bldr.generateNode(PP, N->getState(), N);
+ }
// Enqueue the new nodes onto the work list.
- Engine.enqueue(Dst, currentBuilderContext->getBlock(), currentStmtIdx);
+ Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
}
void ExprEngine::ProcessImplicitDtor(const CFGImplicitDtor D,
@@ -424,7 +445,7 @@ void ExprEngine::ProcessImplicitDtor(const CFGImplicitDtor D,
}
// Enqueue the new nodes onto the work list.
- Engine.enqueue(Dst, currentBuilderContext->getBlock(), currentStmtIdx);
+ Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
}
void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor,
@@ -441,7 +462,7 @@ void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor,
Loc dest = state->getLValue(varDecl, Pred->getLocationContext());
VisitCXXDestructor(varType, cast<loc::MemRegionVal>(dest).getRegion(),
- Dtor.getTriggerStmt(), Pred, Dst);
+ Dtor.getTriggerStmt(), /*IsBase=*/false, Pred, Dst);
}
void ExprEngine::ProcessBaseDtor(const CFGBaseDtor D,
@@ -459,7 +480,7 @@ void ExprEngine::ProcessBaseDtor(const CFGBaseDtor D,
SVal BaseVal = getStoreManager().evalDerivedToBase(ThisVal, BaseTy);
VisitCXXDestructor(BaseTy, cast<loc::MemRegionVal>(BaseVal).getRegion(),
- CurDtor->getBody(), Pred, Dst);
+ CurDtor->getBody(), /*IsBase=*/true, Pred, Dst);
}
void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D,
@@ -475,7 +496,7 @@ void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D,
VisitCXXDestructor(Member->getType(),
cast<loc::MemRegionVal>(FieldVal).getRegion(),
- CurDtor->getBody(), Pred, Dst);
+ CurDtor->getBody(), /*IsBase=*/false, Pred, Dst);
}
void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D,
@@ -488,7 +509,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
S->getLocStart(),
"Error evaluating statement");
ExplodedNodeSet Dst;
- StmtNodeBuilder Bldr(Pred, DstTop, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, DstTop, *currBldrCtx);
// Expressions to ignore.
if (const Expr *Ex = dyn_cast<Expr>(S))
@@ -498,7 +519,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
// this check when we KNOW that there is no block-level subexpression.
// The motivation is that this check requires a hashtable lookup.
- if (S != currentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(S))
+ if (S != currStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(S))
return;
switch (S->getStmtClass()) {
@@ -521,21 +542,16 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::CXXNoexceptExprClass:
case Stmt::PackExpansionExprClass:
case Stmt::SubstNonTypeTemplateParmPackExprClass:
+ case Stmt::FunctionParmPackExprClass:
case Stmt::SEHTryStmtClass:
case Stmt::SEHExceptStmtClass:
case Stmt::LambdaExprClass:
case Stmt::SEHFinallyStmtClass: {
- const ExplodedNode *node = Bldr.generateNode(S, Pred, Pred->getState(),
- /* sink */ true);
- Engine.addAbortedBlock(node, currentBuilderContext->getBlock());
+ const ExplodedNode *node = Bldr.generateSink(S, Pred, Pred->getState());
+ Engine.addAbortedBlock(node, currBldrCtx->getBlock());
break;
}
- // We don't handle default arguments either yet, but we can fake it
- // for now by just skipping them.
- case Stmt::CXXDefaultArgExprClass:
- break;
-
case Stmt::ParenExprClass:
llvm_unreachable("ParenExprs already handled.");
case Stmt::GenericSelectionExprClass:
@@ -607,11 +623,6 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::AtomicExprClass:
// Fall through.
- // Currently all handling of 'throw' just falls to the CFG. We
- // can consider doing more if necessary.
- case Stmt::CXXThrowExprClass:
- // Fall through.
-
// Cases we intentionally don't evaluate, since they don't need
// to be explicitly evaluated.
case Stmt::AddrLabelExprClass:
@@ -626,6 +637,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::StringLiteralClass:
case Stmt::ObjCStringLiteralClass:
case Stmt::CXXBindTemporaryExprClass:
+ case Stmt::CXXDefaultArgExprClass:
case Stmt::SubstNonTypeTemplateParmExprClass:
case Stmt::CXXNullPtrLiteralExprClass: {
Bldr.takeNodes(Pred);
@@ -647,7 +659,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
getCheckerManager().runCheckersForPreStmt(preVisit, Pred, S, *this);
ExplodedNodeSet Tmp;
- StmtNodeBuilder Bldr2(preVisit, Tmp, *currentBuilderContext);
+ StmtNodeBuilder Bldr2(preVisit, Tmp, *currBldrCtx);
const Expr *Ex = cast<Expr>(S);
QualType resultType = Ex->getType();
@@ -656,9 +668,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
it != et; ++it) {
ExplodedNode *N = *it;
const LocationContext *LCtx = N->getLocationContext();
- SVal result =
- svalBuilder.getConjuredSymbolVal(0, Ex, LCtx, resultType,
- currentBuilderContext->getCurrentBlockCount());
+ SVal result = svalBuilder.conjureSymbolVal(0, Ex, LCtx, resultType,
+ currBldrCtx->blockCount());
ProgramStateRef state = N->getState()->BindExpr(Ex, LCtx, result);
Bldr2.generateNode(S, N, state);
}
@@ -674,9 +685,9 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
- case Stmt::AsmStmtClass:
+ case Stmt::GCCAsmStmtClass:
Bldr.takeNodes(Pred);
- VisitAsmStmt(cast<AsmStmt>(S), Pred, Dst);
+ VisitGCCAsmStmt(cast<GCCAsmStmt>(S), Pred, Dst);
Bldr.addNodes(Dst);
break;
@@ -711,11 +722,11 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.takeNodes(Pred);
- if (AMgr.shouldEagerlyAssume() &&
+ if (AMgr.options.eagerlyAssumeBinOpBifurcation &&
(B->isRelationalOp() || B->isEqualityOp())) {
ExplodedNodeSet Tmp;
VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Tmp);
- evalEagerlyAssume(Dst, Tmp, cast<Expr>(S));
+ evalEagerlyAssumeBinOpBifurcation(Dst, Tmp, cast<Expr>(S));
}
else
VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
@@ -724,8 +735,26 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
break;
}
+ case Stmt::CXXOperatorCallExprClass: {
+ const CXXOperatorCallExpr *OCE = cast<CXXOperatorCallExpr>(S);
+
+ // For instance method operators, make sure the 'this' argument has a
+ // valid region.
+ const Decl *Callee = OCE->getCalleeDecl();
+ if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(Callee)) {
+ if (MD->isInstance()) {
+ ProgramStateRef State = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ProgramStateRef NewState =
+ createTemporaryRegionIfNeeded(State, LCtx, OCE->getArg(0));
+ if (NewState != State)
+ Pred = Bldr.generateNode(OCE, Pred, NewState, /*Tag=*/0,
+ ProgramPoint::PreStmtKind);
+ }
+ }
+ // FALLTHROUGH
+ }
case Stmt::CallExprClass:
- case Stmt::CXXOperatorCallExprClass:
case Stmt::CXXMemberCallExprClass:
case Stmt::UserDefinedLiteralClass: {
Bldr.takeNodes(Pred);
@@ -846,12 +875,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Expr::MaterializeTemporaryExprClass: {
Bldr.takeNodes(Pred);
- const MaterializeTemporaryExpr *Materialize
- = cast<MaterializeTemporaryExpr>(S);
- if (Materialize->getType()->isRecordType())
- Dst.Add(Pred);
- else
- CreateCXXTemporaryObject(Materialize, Pred, Dst);
+ const MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(S);
+ CreateCXXTemporaryObject(MTE, Pred, Dst);
Bldr.addNodes(Dst);
break;
}
@@ -886,12 +911,12 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
- case Stmt::ObjCAtThrowStmtClass: {
+ case Stmt::ObjCAtThrowStmtClass:
+ case Stmt::CXXThrowExprClass:
// FIXME: This is not complete. We basically treat @throw as
// an abort.
- Bldr.generateNode(S, Pred, Pred->getState());
+ Bldr.generateSink(S, Pred, Pred->getState());
break;
- }
case Stmt::ReturnStmtClass:
Bldr.takeNodes(Pred);
@@ -935,10 +960,10 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::UnaryOperatorClass: {
Bldr.takeNodes(Pred);
const UnaryOperator *U = cast<UnaryOperator>(S);
- if (AMgr.shouldEagerlyAssume() && (U->getOpcode() == UO_LNot)) {
+ if (AMgr.options.eagerlyAssumeBinOpBifurcation && (U->getOpcode() == UO_LNot)) {
ExplodedNodeSet Tmp;
VisitUnaryOperator(U, Pred, Tmp);
- evalEagerlyAssume(Dst, Tmp, U);
+ evalEagerlyAssumeBinOpBifurcation(Dst, Tmp, U);
}
else
VisitUnaryOperator(U, Pred, Dst);
@@ -1030,19 +1055,18 @@ bool ExprEngine::replayWithoutInlining(ExplodedNode *N,
/// Block entrance. (Update counters).
void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
- NodeBuilderWithSinks &nodeBuilder) {
+ NodeBuilderWithSinks &nodeBuilder,
+ ExplodedNode *Pred) {
// FIXME: Refactor this into a checker.
- ExplodedNode *pred = nodeBuilder.getContext().getPred();
-
- if (nodeBuilder.getContext().getCurrentBlockCount() >= AMgr.getMaxVisit()) {
+ if (nodeBuilder.getContext().blockCount() >= AMgr.options.maxBlockVisitOnPath) {
static SimpleProgramPointTag tag("ExprEngine : Block count exceeded");
const ExplodedNode *Sink =
- nodeBuilder.generateNode(pred->getState(), pred, &tag, true);
+ nodeBuilder.generateSink(Pred->getState(), Pred, &tag);
// Check if we stopped at the top level function or not.
// Root node should have the location context of the top most function.
- const LocationContext *CalleeLC = pred->getLocation().getLocationContext();
+ const LocationContext *CalleeLC = Pred->getLocation().getLocationContext();
const LocationContext *CalleeSF = CalleeLC->getCurrentStackFrame();
const LocationContext *RootLC =
(*G.roots_begin())->getLocation().getLocationContext();
@@ -1053,7 +1077,8 @@ void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
// no-inlining policy in the state and enqueuing the new work item on
// the list. Replay should almost never fail. Use the stats to catch it
// if it does.
- if ((!AMgr.NoRetryExhausted && replayWithoutInlining(pred, CalleeLC)))
+ if ((!AMgr.options.NoRetryExhausted &&
+ replayWithoutInlining(Pred, CalleeLC)))
return;
NumMaxBlockCountReachedInInlined++;
} else
@@ -1155,7 +1180,7 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
ExplodedNodeSet &Dst,
const CFGBlock *DstT,
const CFGBlock *DstF) {
- currentBuilderContext = &BldCtx;
+ currBldrCtx = &BldCtx;
// Check for NULL conditions; e.g. "for(;;)"
if (!Condition) {
@@ -1238,7 +1263,7 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
builder.markInfeasible(false);
}
}
- currentBuilderContext = 0;
+ currBldrCtx = 0;
}
/// processIndirectGoto - Called by CoreEngine. Used to generate successor
@@ -1287,10 +1312,25 @@ void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
/// ProcessEndPath - Called by CoreEngine. Used to generate end-of-path
/// nodes when the control reaches the end of a function.
-void ExprEngine::processEndOfFunction(NodeBuilderContext& BC) {
- StateMgr.EndPath(BC.Pred->getState());
+void ExprEngine::processEndOfFunction(NodeBuilderContext& BC,
+ ExplodedNode *Pred) {
+ StateMgr.EndPath(Pred->getState());
+
ExplodedNodeSet Dst;
- getCheckerManager().runCheckersForEndPath(BC, Dst, *this);
+ if (Pred->getLocationContext()->inTopFrame()) {
+ // Remove dead symbols.
+ ExplodedNodeSet AfterRemovedDead;
+ removeDeadOnEndOfFunction(BC, Pred, AfterRemovedDead);
+
+ // Notify checkers.
+ for (ExplodedNodeSet::iterator I = AfterRemovedDead.begin(),
+ E = AfterRemovedDead.end(); I != E; ++I) {
+ getCheckerManager().runCheckersForEndPath(BC, Dst, *I, *this);
+ }
+ } else {
+ getCheckerManager().runCheckersForEndPath(BC, Dst, Pred, *this);
+ }
+
Engine.enqueueEndOfFunction(Dst);
}
@@ -1404,7 +1444,7 @@ void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
- StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
@@ -1422,7 +1462,7 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
V = UnknownVal();
}
- Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), false, 0,
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), 0,
ProgramPoint::PostLValueKind);
return;
}
@@ -1434,19 +1474,23 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
}
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
SVal V = svalBuilder.getFunctionPointer(FD);
- Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), false, 0,
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), 0,
ProgramPoint::PostLValueKind);
return;
}
if (isa<FieldDecl>(D)) {
- // FIXME: Compute lvalue of fields.
- Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, UnknownVal()),
- false, 0, ProgramPoint::PostLValueKind);
+ // FIXME: Compute lvalue of field pointers-to-member.
+ // Right now we just use a non-null void pointer, so that it gives proper
+ // results in boolean contexts.
+ SVal V = svalBuilder.conjureSymbolVal(Ex, LCtx, getContext().VoidPtrTy,
+ currBldrCtx->blockCount());
+ state = state->assume(cast<DefinedOrUnknownSVal>(V), true);
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), 0,
+ ProgramPoint::PostLValueKind);
return;
}
- assert (false &&
- "ValueDecl support for this ValueDecl not implemented.");
+ llvm_unreachable("Support for this Decl not implemented.");
}
/// VisitArraySubscriptExpr - Transfer function for array accesses
@@ -1461,7 +1505,7 @@ void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *A,
ExplodedNodeSet checkerPreStmt;
getCheckerManager().runCheckersForPreStmt(checkerPreStmt, Pred, A, *this);
- StmtNodeBuilder Bldr(checkerPreStmt, Dst, *currentBuilderContext);
+ StmtNodeBuilder Bldr(checkerPreStmt, Dst, *currBldrCtx);
for (ExplodedNodeSet::iterator it = checkerPreStmt.begin(),
ei = checkerPreStmt.end(); it != ei; ++it) {
@@ -1471,8 +1515,8 @@ void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *A,
state->getSVal(Idx, LCtx),
state->getSVal(Base, LCtx));
assert(A->isGLValue());
- Bldr.generateNode(A, *it, state->BindExpr(A, LCtx, V),
- false, 0, ProgramPoint::PostLValueKind);
+ Bldr.generateNode(A, *it, state->BindExpr(A, LCtx, V), 0,
+ ProgramPoint::PostLValueKind);
}
}
@@ -1480,52 +1524,40 @@ void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *A,
void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
ExplodedNodeSet &TopDst) {
- StmtNodeBuilder Bldr(Pred, TopDst, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, TopDst, *currBldrCtx);
ExplodedNodeSet Dst;
- Decl *member = M->getMemberDecl();
+ ValueDecl *Member = M->getMemberDecl();
- if (VarDecl *VD = dyn_cast<VarDecl>(member)) {
- assert(M->isGLValue());
+ // Handle static member variables and enum constants accessed via
+ // member syntax.
+ if (isa<VarDecl>(Member) || isa<EnumConstantDecl>(Member)) {
Bldr.takeNodes(Pred);
- VisitCommonDeclRefExpr(M, VD, Pred, Dst);
+ VisitCommonDeclRefExpr(M, Member, Pred, Dst);
Bldr.addNodes(Dst);
return;
}
- // Handle C++ method calls.
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(member)) {
- Bldr.takeNodes(Pred);
- SVal MDVal = svalBuilder.getFunctionPointer(MD);
- ProgramStateRef state =
- Pred->getState()->BindExpr(M, Pred->getLocationContext(), MDVal);
- Bldr.generateNode(M, Pred, state);
- return;
- }
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ Expr *BaseExpr = M->getBase();
+ // Handle C++ method calls.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member)) {
+ if (MD->isInstance())
+ state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr);
- FieldDecl *field = dyn_cast<FieldDecl>(member);
- if (!field) // FIXME: skipping member expressions for non-fields
- return;
+ SVal MDVal = svalBuilder.getFunctionPointer(MD);
+ state = state->BindExpr(M, LCtx, MDVal);
- Expr *baseExpr = M->getBase()->IgnoreParens();
- ProgramStateRef state = Pred->getState();
- const LocationContext *LCtx = Pred->getLocationContext();
- SVal baseExprVal = state->getSVal(baseExpr, Pred->getLocationContext());
- if (isa<nonloc::LazyCompoundVal>(baseExprVal) ||
- isa<nonloc::CompoundVal>(baseExprVal) ||
- // FIXME: This can originate by conjuring a symbol for an unknown
- // temporary struct object, see test/Analysis/fields.c:
- // (p = getit()).x
- isa<nonloc::SymbolVal>(baseExprVal)) {
- Bldr.generateNode(M, Pred, state->BindExpr(M, LCtx, UnknownVal()));
+ Bldr.generateNode(M, Pred, state);
return;
}
- // FIXME: Should we insert some assumption logic in here to determine
- // if "Base" is a valid piece of memory? Before we put this assumption
- // later when using FieldOffset lvals (which we no longer have).
+ // Handle regular struct fields / member variables.
+ state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr);
+ SVal baseExprVal = state->getSVal(BaseExpr, LCtx);
- // For all other cases, compute an lvalue.
+ FieldDecl *field = cast<FieldDecl>(Member);
SVal L = state->getLValue(field, baseExprVal);
if (M->isGLValue()) {
if (field->getType()->isReferenceType()) {
@@ -1535,7 +1567,7 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
L = UnknownVal();
}
- Bldr.generateNode(M, Pred, state->BindExpr(M, LCtx, L), false, 0,
+ Bldr.generateNode(M, Pred, state->BindExpr(M, LCtx, L), 0,
ProgramPoint::PostLValueKind);
} else {
Bldr.takeNodes(Pred);
@@ -1548,40 +1580,48 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
/// This method is used by evalStore and (soon) VisitDeclStmt, and others.
void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
ExplodedNode *Pred,
- SVal location, SVal Val, bool atDeclInit) {
+ SVal location, SVal Val,
+ bool atDeclInit, const ProgramPoint *PP) {
+
+ const LocationContext *LC = Pred->getLocationContext();
+ PostStmt PS(StoreE, LC);
+ if (!PP)
+ PP = &PS;
// Do a previsit of the bind.
ExplodedNodeSet CheckedSet;
getCheckerManager().runCheckersForBind(CheckedSet, Pred, location, Val,
- StoreE, *this,
- ProgramPoint::PostStmtKind);
+ StoreE, *this, *PP);
+ // If the location is not a 'Loc', it will already be handled by
+ // the checkers. There is nothing left to do.
+ if (!isa<Loc>(location)) {
+ Dst = CheckedSet;
+ return;
+ }
+
ExplodedNodeSet TmpDst;
- StmtNodeBuilder Bldr(CheckedSet, TmpDst, *currentBuilderContext);
+ StmtNodeBuilder Bldr(CheckedSet, TmpDst, *currBldrCtx);
- const LocationContext *LC = Pred->getLocationContext();
for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
I!=E; ++I) {
ExplodedNode *PredI = *I;
ProgramStateRef state = PredI->getState();
-
- if (atDeclInit) {
- const VarRegion *VR =
- cast<VarRegion>(cast<loc::MemRegionVal>(location).getRegion());
-
- state = state->bindDecl(VR, Val);
- } else {
- state = state->bindLoc(location, Val);
- }
-
+
+ // When binding the value, pass on the hint that this is a initialization.
+ // For initializations, we do not need to inform clients of region
+ // changes.
+ state = state->bindLoc(cast<Loc>(location),
+ Val, /* notifyChanges = */ !atDeclInit);
+
const MemRegion *LocReg = 0;
- if (loc::MemRegionVal *LocRegVal = dyn_cast<loc::MemRegionVal>(&location))
+ if (loc::MemRegionVal *LocRegVal = dyn_cast<loc::MemRegionVal>(&location)) {
LocReg = LocRegVal->getRegion();
-
+ }
+
const ProgramPoint L = PostStore(StoreE, LC, LocReg, 0);
- Bldr.generateNode(L, PredI, state, false);
+ Bldr.generateNode(L, state, PredI);
}
-
Dst.insert(TmpDst);
}
@@ -1671,7 +1711,7 @@ void ExprEngine::evalLoadCommon(ExplodedNodeSet &Dst,
if (Tmp.empty())
return;
- StmtNodeBuilder Bldr(Tmp, Dst, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Tmp, Dst, *currBldrCtx);
if (location.isUndef())
return;
@@ -1684,8 +1724,7 @@ void ExprEngine::evalLoadCommon(ExplodedNodeSet &Dst,
// This is important. We must nuke the old binding.
Bldr.generateNode(NodeEx, *NI,
state->BindExpr(BoundEx, LCtx, UnknownVal()),
- false, tag,
- ProgramPoint::PostLoadKind);
+ tag, ProgramPoint::PostLoadKind);
}
else {
if (LoadTy.isNull())
@@ -1693,7 +1732,7 @@ void ExprEngine::evalLoadCommon(ExplodedNodeSet &Dst,
SVal V = state->getSVal(cast<Loc>(location), LoadTy);
Bldr.generateNode(NodeEx, *NI,
state->bindExprAndLocation(BoundEx, LCtx, location, V),
- false, tag, ProgramPoint::PostLoadKind);
+ tag, ProgramPoint::PostLoadKind);
}
}
}
@@ -1706,7 +1745,7 @@ void ExprEngine::evalLocation(ExplodedNodeSet &Dst,
SVal location,
const ProgramPointTag *tag,
bool isLoad) {
- StmtNodeBuilder BldrTop(Pred, Dst, *currentBuilderContext);
+ StmtNodeBuilder BldrTop(Pred, Dst, *currBldrCtx);
// Early checks for performance reason.
if (location.isUnknown()) {
return;
@@ -1714,7 +1753,7 @@ void ExprEngine::evalLocation(ExplodedNodeSet &Dst,
ExplodedNodeSet Src;
BldrTop.takeNodes(Pred);
- StmtNodeBuilder Bldr(Pred, Src, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, Src, *currBldrCtx);
if (Pred->getState() != state) {
// Associate this new state with an ExplodedNode.
// FIXME: If I pass null tag, the graph is incorrect, e.g for
@@ -1725,9 +1764,8 @@ void ExprEngine::evalLocation(ExplodedNodeSet &Dst,
// instead "int *p" is noted as
// "Variable 'p' initialized to a null pointer value"
- // FIXME: why is 'tag' not used instead of etag?
- static SimpleProgramPointTag etag("ExprEngine: Location");
- Bldr.generateNode(NodeEx, Pred, state, false, &etag);
+ static SimpleProgramPointTag tag("ExprEngine: Location");
+ Bldr.generateNode(NodeEx, Pred, state, &tag);
}
ExplodedNodeSet Tmp;
getCheckerManager().runCheckersForLocation(Tmp, Src, location, isLoad,
@@ -1736,16 +1774,18 @@ void ExprEngine::evalLocation(ExplodedNodeSet &Dst,
}
std::pair<const ProgramPointTag *, const ProgramPointTag*>
-ExprEngine::getEagerlyAssumeTags() {
+ExprEngine::geteagerlyAssumeBinOpBifurcationTags() {
static SimpleProgramPointTag
- EagerlyAssumeTrue("ExprEngine : Eagerly Assume True"),
- EagerlyAssumeFalse("ExprEngine : Eagerly Assume False");
- return std::make_pair(&EagerlyAssumeTrue, &EagerlyAssumeFalse);
+ eagerlyAssumeBinOpBifurcationTrue("ExprEngine : Eagerly Assume True"),
+ eagerlyAssumeBinOpBifurcationFalse("ExprEngine : Eagerly Assume False");
+ return std::make_pair(&eagerlyAssumeBinOpBifurcationTrue,
+ &eagerlyAssumeBinOpBifurcationFalse);
}
-void ExprEngine::evalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src,
- const Expr *Ex) {
- StmtNodeBuilder Bldr(Src, Dst, *currentBuilderContext);
+void ExprEngine::evalEagerlyAssumeBinOpBifurcation(ExplodedNodeSet &Dst,
+ ExplodedNodeSet &Src,
+ const Expr *Ex) {
+ StmtNodeBuilder Bldr(Src, Dst, *currBldrCtx);
for (ExplodedNodeSet::iterator I=Src.begin(), E=Src.end(); I!=E; ++I) {
ExplodedNode *Pred = *I;
@@ -1762,28 +1802,28 @@ void ExprEngine::evalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src,
nonloc::SymbolVal *SEV = dyn_cast<nonloc::SymbolVal>(&V);
if (SEV && SEV->isExpression()) {
const std::pair<const ProgramPointTag *, const ProgramPointTag*> &tags =
- getEagerlyAssumeTags();
+ geteagerlyAssumeBinOpBifurcationTags();
// First assume that the condition is true.
if (ProgramStateRef StateTrue = state->assume(*SEV, true)) {
SVal Val = svalBuilder.makeIntVal(1U, Ex->getType());
StateTrue = StateTrue->BindExpr(Ex, Pred->getLocationContext(), Val);
- Bldr.generateNode(Ex, Pred, StateTrue, false, tags.first);
+ Bldr.generateNode(Ex, Pred, StateTrue, tags.first);
}
// Next, assume that the condition is false.
if (ProgramStateRef StateFalse = state->assume(*SEV, false)) {
SVal Val = svalBuilder.makeIntVal(0U, Ex->getType());
StateFalse = StateFalse->BindExpr(Ex, Pred->getLocationContext(), Val);
- Bldr.generateNode(Ex, Pred, StateFalse, false, tags.second);
+ Bldr.generateNode(Ex, Pred, StateFalse, tags.second);
}
}
}
}
-void ExprEngine::VisitAsmStmt(const AsmStmt *A, ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
- StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+void ExprEngine::VisitGCCAsmStmt(const GCCAsmStmt *A, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
// We have processed both the inputs and the outputs. All of the outputs
// should evaluate to Locs. Nuke all of their values.
@@ -1793,7 +1833,7 @@ void ExprEngine::VisitAsmStmt(const AsmStmt *A, ExplodedNode *Pred,
ProgramStateRef state = Pred->getState();
- for (AsmStmt::const_outputs_iterator OI = A->begin_outputs(),
+ for (GCCAsmStmt::const_outputs_iterator OI = A->begin_outputs(),
OE = A->end_outputs(); OI != OE; ++OI) {
SVal X = state->getSVal(*OI, Pred->getLocationContext());
assert (!isa<NonLoc>(X)); // Should be an Lval, or unknown, undef.
@@ -1807,7 +1847,7 @@ void ExprEngine::VisitAsmStmt(const AsmStmt *A, ExplodedNode *Pred,
void ExprEngine::VisitMSAsmStmt(const MSAsmStmt *A, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
- StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
Bldr.generateNode(A, Pred, Pred->getState());
}
@@ -1932,7 +1972,7 @@ struct DOTGraphTraits<ExplodedNode*> :
if (StmtPoint *L = dyn_cast<StmtPoint>(&Loc)) {
const Stmt *S = L->getStmt();
- Out << S->getStmtClassName() << ' ' << (void*) S << ' ';
+ Out << S->getStmtClassName() << ' ' << (const void*) S << ' ';
LangOptions LO; // FIXME.
S->printPretty(Out, 0, PrintingPolicy(LO));
printLocation(Out, S->getLocStart());
@@ -2038,8 +2078,8 @@ struct DOTGraphTraits<ExplodedNode*> :
}
ProgramStateRef state = N->getState();
- Out << "\\|StateID: " << (void*) state.getPtr()
- << " NodeID: " << (void*) N << "\\|";
+ Out << "\\|StateID: " << (const void*) state.getPtr()
+ << " NodeID: " << (const void*) N << "\\|";
state->printDOT(Out);
Out << "\\l";
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 46cba81..00b2f4a 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -45,8 +45,8 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
// EXPERIMENTAL: "Conjured" symbols.
// FIXME: Handle structs.
if (RightV.isUnknown()) {
- unsigned Count = currentBuilderContext->getCurrentBlockCount();
- RightV = svalBuilder.getConjuredSymbolVal(NULL, B->getRHS(), LCtx, Count);
+ unsigned Count = currBldrCtx->blockCount();
+ RightV = svalBuilder.conjureSymbolVal(0, B->getRHS(), LCtx, Count);
}
// Simulate the effects of a "store": bind the value of the RHS
// to the L-Value represented by the LHS.
@@ -57,7 +57,7 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
}
if (!B->isAssignmentOp()) {
- StmtNodeBuilder Bldr(*it, Tmp2, *currentBuilderContext);
+ StmtNodeBuilder Bldr(*it, Tmp2, *currBldrCtx);
if (B->isAdditiveOp()) {
// If one of the operands is a location, conjure a symbol for the other
@@ -65,16 +65,16 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
// results in an ElementRegion.
// TODO: This can be removed after we enable history tracking with
// SymSymExpr.
- unsigned Count = currentBuilderContext->getCurrentBlockCount();
+ unsigned Count = currBldrCtx->blockCount();
if (isa<Loc>(LeftV) &&
RHS->getType()->isIntegerType() && RightV.isUnknown()) {
- RightV = svalBuilder.getConjuredSymbolVal(RHS, LCtx,
- RHS->getType(), Count);
+ RightV = svalBuilder.conjureSymbolVal(RHS, LCtx, RHS->getType(),
+ Count);
}
if (isa<Loc>(RightV) &&
LHS->getType()->isIntegerType() && LeftV.isUnknown()) {
- LeftV = svalBuilder.getConjuredSymbolVal(LHS, LCtx,
- LHS->getType(), Count);
+ LeftV = svalBuilder.conjureSymbolVal(LHS, LCtx, LHS->getType(),
+ Count);
}
}
@@ -145,15 +145,11 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
SVal LHSVal;
if (Result.isUnknown()) {
-
- unsigned Count = currentBuilderContext->getCurrentBlockCount();
-
// The symbolic value is actually for the type of the left-hand side
// expression, not the computation type, as this is the value the
// LValue on the LHS will bind to.
- LHSVal = svalBuilder.getConjuredSymbolVal(NULL, B->getRHS(), LCtx,
- LTy, Count);
-
+ LHSVal = svalBuilder.conjureSymbolVal(0, B->getRHS(), LCtx, LTy,
+ currBldrCtx->blockCount());
// However, we need to convert the symbol to the computation type.
Result = svalBuilder.evalCast(LHSVal, CTy, LTy);
}
@@ -208,11 +204,10 @@ void ExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
}
ExplodedNodeSet Tmp;
- StmtNodeBuilder Bldr(Pred, Tmp, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, Tmp, *currBldrCtx);
Bldr.generateNode(BE, Pred,
State->BindExpr(BE, Pred->getLocationContext(), V),
- false, 0,
- ProgramPoint::PostLValueKind);
+ 0, ProgramPoint::PostLValueKind);
// FIXME: Move all post/pre visits to ::Visit().
getCheckerManager().runCheckersForPostStmt(Dst, Tmp, BE, *this);
@@ -242,12 +237,14 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
if (const ExplicitCastExpr *ExCast=dyn_cast_or_null<ExplicitCastExpr>(CastE))
T = ExCast->getTypeAsWritten();
- StmtNodeBuilder Bldr(dstPreStmt, Dst, *currentBuilderContext);
+ StmtNodeBuilder Bldr(dstPreStmt, Dst, *currBldrCtx);
for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
I != E; ++I) {
Pred = *I;
-
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+
switch (CastE->getCastKind()) {
case CK_LValueToRValue:
llvm_unreachable("LValueToRValue casts handled earlier.");
@@ -267,7 +264,10 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_NonAtomicToAtomic:
// True no-ops.
case CK_NoOp:
- case CK_FunctionToPointerDecay: {
+ case CK_ConstructorConversion:
+ case CK_UserDefinedConversion:
+ case CK_FunctionToPointerDecay:
+ case CK_BuiltinFnToFnPtr: {
// Copy the SVal of Ex to CastE.
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
@@ -276,6 +276,9 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
Bldr.generateNode(CastE, Pred, state);
continue;
}
+ case CK_MemberPointerToBoolean:
+ // FIXME: For now, member pointers are represented by void *.
+ // FALLTHROUGH
case CK_Dependent:
case CK_ArrayToPointerDecay:
case CK_BitCast:
@@ -304,8 +307,6 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_AnyPointerToBlockPointerCast:
case CK_ObjCObjectLValueCast: {
// Delegate to SValBuilder to process.
- ProgramStateRef state = Pred->getState();
- const LocationContext *LCtx = Pred->getLocationContext();
SVal V = state->getSVal(Ex, LCtx);
V = svalBuilder.evalCast(V, T, ExTy);
state = state->BindExpr(CastE, LCtx, V);
@@ -315,8 +316,6 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_DerivedToBase:
case CK_UncheckedDerivedToBase: {
// For DerivedToBase cast, delegate to the store manager.
- ProgramStateRef state = Pred->getState();
- const LocationContext *LCtx = Pred->getLocationContext();
SVal val = state->getSVal(Ex, LCtx);
val = getStoreManager().evalDerivedToBase(val, CastE);
state = state->BindExpr(CastE, LCtx, val);
@@ -325,8 +324,6 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
}
// Handle C++ dyn_cast.
case CK_Dynamic: {
- ProgramStateRef state = Pred->getState();
- const LocationContext *LCtx = Pred->getLocationContext();
SVal val = state->getSVal(Ex, LCtx);
// Compute the type of the result.
@@ -347,7 +344,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
if (T->isReferenceType()) {
// A bad_cast exception is thrown if input value is a reference.
// Currently, we model this, by generating a sink.
- Bldr.generateNode(CastE, Pred, state, true);
+ Bldr.generateSink(CastE, Pred, state);
continue;
} else {
// If the cast fails on a pointer, bind to 0.
@@ -356,9 +353,9 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
} else {
// If we don't know if the cast succeeded, conjure a new symbol.
if (val.isUnknown()) {
- DefinedOrUnknownSVal NewSym = svalBuilder.getConjuredSymbolVal(NULL,
- CastE, LCtx, resultType,
- currentBuilderContext->getCurrentBlockCount());
+ DefinedOrUnknownSVal NewSym =
+ svalBuilder.conjureSymbolVal(0, CastE, LCtx, resultType,
+ currBldrCtx->blockCount());
state = state->BindExpr(CastE, LCtx, NewSym);
} else
// Else, bind to the derived region value.
@@ -367,27 +364,29 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
Bldr.generateNode(CastE, Pred, state);
continue;
}
+ case CK_NullToMemberPointer: {
+ // FIXME: For now, member pointers are represented by void *.
+ SVal V = svalBuilder.makeIntValWithPtrWidth(0, true);
+ state = state->BindExpr(CastE, LCtx, V);
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
// Various C++ casts that are not handled yet.
case CK_ToUnion:
case CK_BaseToDerived:
- case CK_NullToMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_DerivedToBaseMemberPointer:
case CK_ReinterpretMemberPointer:
- case CK_UserDefinedConversion:
- case CK_ConstructorConversion:
case CK_VectorSplat:
- case CK_MemberPointerToBoolean:
case CK_LValueBitCast: {
// Recover some path-sensitivty by conjuring a new value.
QualType resultType = CastE->getType();
if (CastE->isGLValue())
resultType = getContext().getPointerType(resultType);
- const LocationContext *LCtx = Pred->getLocationContext();
- SVal result = svalBuilder.getConjuredSymbolVal(NULL, CastE, LCtx,
- resultType, currentBuilderContext->getCurrentBlockCount());
- ProgramStateRef state = Pred->getState()->BindExpr(CastE, LCtx,
- result);
+ SVal result = svalBuilder.conjureSymbolVal(0, CastE, LCtx,
+ resultType,
+ currBldrCtx->blockCount());
+ state = state->BindExpr(CastE, LCtx, result);
Bldr.generateNode(CastE, Pred, state);
continue;
}
@@ -398,7 +397,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
void ExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr *CL,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
- StmtNodeBuilder B(Pred, Dst, *currentBuilderContext);
+ StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
const InitListExpr *ILE
= cast<InitListExpr>(CL->getInitializer()->IgnoreParens());
@@ -442,7 +441,7 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
ExplodedNodeSet dstPreVisit;
getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, DS, *this);
- StmtNodeBuilder B(dstPreVisit, Dst, *currentBuilderContext);
+ StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
const VarDecl *VD = dyn_cast<VarDecl>(D);
for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
I!=E; ++I) {
@@ -478,8 +477,8 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
Ty = getContext().getPointerType(Ty);
}
- InitVal = svalBuilder.getConjuredSymbolVal(NULL, InitEx, LC, Ty,
- currentBuilderContext->getCurrentBlockCount());
+ InitVal = svalBuilder.conjureSymbolVal(0, InitEx, LC, Ty,
+ currBldrCtx->blockCount());
}
B.takeNodes(N);
ExplodedNodeSet Dst2;
@@ -488,7 +487,7 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
}
}
else {
- B.generateNode(DS, N,state->bindDeclWithNoInit(state->getRegion(VD, LC)));
+ B.generateNode(DS, N, state);
}
}
}
@@ -498,7 +497,7 @@ void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred,
assert(B->getOpcode() == BO_LAnd ||
B->getOpcode() == BO_LOr);
- StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
ProgramStateRef state = Pred->getState();
ExplodedNode *N = Pred;
@@ -531,10 +530,28 @@ void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred,
else {
// If there is no terminator, by construction the last statement
// in SrcBlock is the value of the enclosing expression.
+ // However, we still need to constrain that value to be 0 or 1.
assert(!SrcBlock->empty());
CFGStmt Elem = cast<CFGStmt>(*SrcBlock->rbegin());
- const Stmt *S = Elem.getStmt();
- X = N->getState()->getSVal(S, Pred->getLocationContext());
+ const Expr *RHS = cast<Expr>(Elem.getStmt());
+ SVal RHSVal = N->getState()->getSVal(RHS, Pred->getLocationContext());
+
+ DefinedOrUnknownSVal DefinedRHS = cast<DefinedOrUnknownSVal>(RHSVal);
+ ProgramStateRef StTrue, StFalse;
+ llvm::tie(StTrue, StFalse) = N->getState()->assume(DefinedRHS);
+ if (StTrue) {
+ if (StFalse) {
+ // We can't constrain the value to 0 or 1; the best we can do is a cast.
+ X = getSValBuilder().evalCast(RHSVal, B->getType(), RHS->getType());
+ } else {
+ // The value is known to be true.
+ X = getSValBuilder().makeIntVal(1, B->getType());
+ }
+ } else {
+ // The value is known to be false.
+ assert(StFalse && "Infeasible path!");
+ X = getSValBuilder().makeIntVal(0, B->getType());
+ }
}
Bldr.generateNode(B, Pred, state->BindExpr(B, Pred->getLocationContext(), X));
@@ -543,14 +560,15 @@ void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred,
void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
- StmtNodeBuilder B(Pred, Dst, *currentBuilderContext);
+ StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
QualType T = getContext().getCanonicalType(IE->getType());
unsigned NumInitElements = IE->getNumInits();
- if (T->isArrayType() || T->isRecordType() || T->isVectorType()) {
+ if (T->isArrayType() || T->isRecordType() || T->isVectorType() ||
+ T->isAnyComplexType()) {
llvm::ImmutableList<SVal> vals = getBasicVals().getEmptySValList();
// Handle base case where the initializer has no elements.
@@ -590,7 +608,7 @@ void ExprEngine::VisitGuardedExpr(const Expr *Ex,
const Expr *R,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
- StmtNodeBuilder B(Pred, Dst, *currentBuilderContext);
+ StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
const CFGBlock *SrcBlock = 0;
@@ -631,7 +649,7 @@ void ExprEngine::VisitGuardedExpr(const Expr *Ex,
void ExprEngine::
VisitOffsetOfExpr(const OffsetOfExpr *OOE,
ExplodedNode *Pred, ExplodedNodeSet &Dst) {
- StmtNodeBuilder B(Pred, Dst, *currentBuilderContext);
+ StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
APSInt IV;
if (OOE->EvaluateAsInt(IV, getContext())) {
assert(IV.getBitWidth() == getContext().getTypeSize(OOE->getType()));
@@ -650,7 +668,7 @@ void ExprEngine::
VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
- StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
QualType T = Ex->getTypeOfArgument();
@@ -683,7 +701,7 @@ VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
- StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
switch (U->getOpcode()) {
default: {
Bldr.takeNodes(Pred);
@@ -816,7 +834,7 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
evalLoad(Tmp, U, Ex, Pred, state, loc);
ExplodedNodeSet Dst2;
- StmtNodeBuilder Bldr(Tmp, Dst2, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Tmp, Dst2, *currBldrCtx);
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end();I!=E;++I) {
state = (*I)->getState();
@@ -840,16 +858,17 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
if (U->getType()->isAnyPointerType())
RHS = svalBuilder.makeArrayIndex(1);
- else
+ else if (U->getType()->isIntegralOrEnumerationType())
RHS = svalBuilder.makeIntVal(1, U->getType());
+ else
+ RHS = UnknownVal();
SVal Result = evalBinOp(state, Op, V2, RHS, U->getType());
// Conjure a new symbol if necessary to recover precision.
if (Result.isUnknown()){
DefinedOrUnknownSVal SymVal =
- svalBuilder.getConjuredSymbolVal(NULL, Ex, LCtx,
- currentBuilderContext->getCurrentBlockCount());
+ svalBuilder.conjureSymbolVal(0, Ex, LCtx, currBldrCtx->blockCount());
Result = SymVal;
// If the value is a location, ++/-- should always preserve
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index 44a860f..b3baa79 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -25,20 +25,28 @@ using namespace ento;
void ExprEngine::CreateCXXTemporaryObject(const MaterializeTemporaryExpr *ME,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
- StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
const Expr *tempExpr = ME->GetTemporaryExpr()->IgnoreParens();
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
// Bind the temporary object to the value of the expression. Then bind
// the expression to the location of the object.
- SVal V = state->getSVal(tempExpr, Pred->getLocationContext());
-
- const MemRegion *R =
- svalBuilder.getRegionManager().getCXXTempObjectRegion(ME, LCtx);
+ SVal V = state->getSVal(tempExpr, LCtx);
+
+ // If the value is already a CXXTempObjectRegion, it is fine as it is.
+ // Otherwise, create a new CXXTempObjectRegion, and copy the value into it.
+ const MemRegion *MR = V.getAsRegion();
+ if (!MR || !isa<CXXTempObjectRegion>(MR)) {
+ const MemRegion *R =
+ svalBuilder.getRegionManager().getCXXTempObjectRegion(ME, LCtx);
+
+ SVal L = loc::MemRegionVal(R);
+ state = state->bindLoc(L, V);
+ V = L;
+ }
- state = state->bindLoc(loc::MemRegionVal(R), V);
- Bldr.generateNode(ME, Pred, state->BindExpr(ME, LCtx, loc::MemRegionVal(R)));
+ Bldr.generateNode(ME, Pred, state->BindExpr(ME, LCtx, V));
}
void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
@@ -53,9 +61,9 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
case CXXConstructExpr::CK_Complete: {
// See if we're constructing an existing region by looking at the next
// element in the CFG.
- const CFGBlock *B = currentBuilderContext->getBlock();
- if (currentStmtIdx + 1 < B->size()) {
- CFGElement Next = (*B)[currentStmtIdx+1];
+ const CFGBlock *B = currBldrCtx->getBlock();
+ if (currStmtIdx + 1 < B->size()) {
+ CFGElement Next = (*B)[currStmtIdx+1];
// Is this a constructor for a local variable?
if (const CFGStmt *StmtElem = dyn_cast<CFGStmt>(&Next)) {
@@ -101,8 +109,12 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
// FIXME: This will eventually need to handle new-expressions as well.
}
- // If we couldn't find an existing region to construct into, we'll just
- // generate a symbolic region, which is fine.
+ // If we couldn't find an existing region to construct into, assume we're
+ // constructing a temporary.
+ if (!Target) {
+ MemRegionManager &MRMgr = getSValBuilder().getRegionManager();
+ Target = MRMgr.getCXXTempObjectRegion(CE, LCtx);
+ }
break;
}
@@ -137,7 +149,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
*Call, *this);
ExplodedNodeSet DstInvalidated;
- StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currentBuilderContext);
+ StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currBldrCtx);
for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
I != E; ++I)
defaultEvalCall(Bldr, *I, *Call);
@@ -151,6 +163,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
void ExprEngine::VisitCXXDestructor(QualType ObjectType,
const MemRegion *Dest,
const Stmt *S,
+ bool IsBaseDtor,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
const LocationContext *LCtx = Pred->getLocationContext();
@@ -171,7 +184,7 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType,
CallEventManager &CEMgr = getStateManager().getCallEventManager();
CallEventRef<CXXDestructorCall> Call =
- CEMgr.getCXXDestructorCall(DtorDecl, S, Dest, State, LCtx);
+ CEMgr.getCXXDestructorCall(DtorDecl, S, Dest, IsBaseDtor, State, LCtx);
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
Call->getSourceRange().getBegin(),
@@ -182,7 +195,7 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType,
*Call, *this);
ExplodedNodeSet DstInvalidated;
- StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currentBuilderContext);
+ StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currBldrCtx);
for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
I != E; ++I)
defaultEvalCall(Bldr, *I, *Call);
@@ -198,12 +211,13 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
// Also, we need to decide how allocators actually work -- they're not
// really part of the CXXNewExpr because they happen BEFORE the
// CXXConstructExpr subexpression. See PR12014 for some discussion.
- StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
- unsigned blockCount = currentBuilderContext->getCurrentBlockCount();
+ unsigned blockCount = currBldrCtx->blockCount();
const LocationContext *LCtx = Pred->getLocationContext();
- DefinedOrUnknownSVal symVal =
- svalBuilder.getConjuredSymbolVal(0, CNE, LCtx, CNE->getType(), blockCount);
+ DefinedOrUnknownSVal symVal = svalBuilder.conjureSymbolVal(0, CNE, LCtx,
+ CNE->getType(),
+ blockCount);
ProgramStateRef State = Pred->getState();
CallEventManager &CEMgr = getStateManager().getCallEventManager();
@@ -215,6 +229,18 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
// we should be using the usual pre-/(default-)eval-/post-call checks here.
State = Call->invalidateRegions(blockCount);
+ // If we're compiling with exceptions enabled, and this allocation function
+ // is not declared as non-throwing, failures /must/ be signalled by
+ // exceptions, and thus the return value will never be NULL.
+ // C++11 [basic.stc.dynamic.allocation]p3.
+ FunctionDecl *FD = CNE->getOperatorNew();
+ if (FD && getContext().getLangOpts().CXXExceptions) {
+ QualType Ty = FD->getType();
+ if (const FunctionProtoType *ProtoType = Ty->getAs<FunctionProtoType>())
+ if (!ProtoType->isNothrow(getContext()))
+ State = State->assume(symVal, true);
+ }
+
if (CNE->isArray()) {
// FIXME: allocating an array requires simulating the constructors.
// For now, just return a symbolicated region.
@@ -232,11 +258,12 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
// CXXNewExpr, we need to make sure that the constructed object is not
// immediately invalidated here. (The placement call should happen before
// the constructor call anyway.)
- FunctionDecl *FD = CNE->getOperatorNew();
if (FD && FD->isReservedGlobalPlacementOperator()) {
// Non-array placement new should always return the placement location.
SVal PlacementLoc = State->getSVal(CNE->getPlacementArg(0), LCtx);
- State = State->BindExpr(CNE, LCtx, PlacementLoc);
+ SVal Result = svalBuilder.evalCast(PlacementLoc, CNE->getType(),
+ CNE->getPlacementArg(0)->getType());
+ State = State->BindExpr(CNE, LCtx, Result);
} else {
State = State->BindExpr(CNE, LCtx, symVal);
}
@@ -259,7 +286,7 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE,
ExplodedNode *Pred, ExplodedNodeSet &Dst) {
- StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
ProgramStateRef state = Pred->getState();
Bldr.generateNode(CDE, Pred, state);
}
@@ -274,18 +301,18 @@ void ExprEngine::VisitCXXCatchStmt(const CXXCatchStmt *CS,
}
const LocationContext *LCtx = Pred->getLocationContext();
- SVal V = svalBuilder.getConjuredSymbolVal(CS, LCtx, VD->getType(),
- currentBuilderContext->getCurrentBlockCount());
+ SVal V = svalBuilder.conjureSymbolVal(CS, LCtx, VD->getType(),
+ currBldrCtx->blockCount());
ProgramStateRef state = Pred->getState();
state = state->bindLoc(state->getLValue(VD, LCtx), V);
- StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
Bldr.generateNode(CS, Pred, state);
}
void ExprEngine::VisitCXXThisExpr(const CXXThisExpr *TE, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
- StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
// Get the this object region from StoreManager.
const LocationContext *LCtx = Pred->getLocationContext();
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 3b2e4ec..3ead081 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -17,19 +17,22 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ParentMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/SaveAndRestore.h"
-#define CXX_INLINING_ENABLED 1
-
using namespace clang;
using namespace ento;
STATISTIC(NumOfDynamicDispatchPathSplits,
"The # of times we split the path due to imprecise dynamic dispatch info");
+STATISTIC(NumInlinedCalls,
+ "The # of times we inlined a call");
+
void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) {
// Get the entry block in the CFG of the callee.
const StackFrameContext *calleeCtx = CE.getCalleeContext();
@@ -64,35 +67,47 @@ static std::pair<const Stmt*,
const StackFrameContext *SF =
Node->getLocation().getLocationContext()->getCurrentStackFrame();
- // Back up through the ExplodedGraph until we reach a statement node.
+ // Back up through the ExplodedGraph until we reach a statement node in this
+ // stack frame.
while (Node) {
const ProgramPoint &PP = Node->getLocation();
- if (const StmtPoint *SP = dyn_cast<StmtPoint>(&PP)) {
- S = SP->getStmt();
- break;
- } else if (const CallExitEnd *CEE = dyn_cast<CallExitEnd>(&PP)) {
- S = CEE->getCalleeContext()->getCallSite();
- if (S)
+ if (PP.getLocationContext()->getCurrentStackFrame() == SF) {
+ if (const StmtPoint *SP = dyn_cast<StmtPoint>(&PP)) {
+ S = SP->getStmt();
break;
- // If we have an implicit call, we'll probably end up with a
- // StmtPoint inside the callee, which is acceptable.
- // (It's possible a function ONLY contains implicit calls -- such as an
- // implicitly-generated destructor -- so we shouldn't just skip back to
- // the CallEnter node and keep going.)
+ } else if (const CallExitEnd *CEE = dyn_cast<CallExitEnd>(&PP)) {
+ S = CEE->getCalleeContext()->getCallSite();
+ if (S)
+ break;
+
+ // If there is no statement, this is an implicitly-generated call.
+ // We'll walk backwards over it and then continue the loop to find
+ // an actual statement.
+ const CallEnter *CE;
+ do {
+ Node = Node->getFirstPred();
+ CE = Node->getLocationAs<CallEnter>();
+ } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
+
+ // Continue searching the graph.
+ }
} else if (const CallEnter *CE = dyn_cast<CallEnter>(&PP)) {
// If we reached the CallEnter for this function, it has no statements.
if (CE->getCalleeContext() == SF)
break;
}
+ if (Node->pred_empty())
+ return std::pair<const Stmt*, const CFGBlock*>((Stmt*)0, (CFGBlock*)0);
+
Node = *Node->pred_begin();
}
const CFGBlock *Blk = 0;
if (S) {
// Now, get the enclosing basic block.
- while (Node && Node->pred_size() >=1 ) {
+ while (Node) {
const ProgramPoint &PP = Node->getLocation();
if (isa<BlockEdge>(PP) &&
(PP.getLocationContext()->getCurrentStackFrame() == SF)) {
@@ -100,6 +115,9 @@ static std::pair<const Stmt*,
Blk = EPP.getDst();
break;
}
+ if (Node->pred_empty())
+ return std::pair<const Stmt*, const CFGBlock*>(S, (CFGBlock*)0);
+
Node = *Node->pred_begin();
}
}
@@ -107,6 +125,82 @@ static std::pair<const Stmt*,
return std::pair<const Stmt*, const CFGBlock*>(S, Blk);
}
+/// Adjusts a return value when the called function's return type does not
+/// match the caller's expression type. This can happen when a dynamic call
+/// is devirtualized, and the overridding method has a covariant (more specific)
+/// return type than the parent's method. For C++ objects, this means we need
+/// to add base casts.
+static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
+ StoreManager &StoreMgr) {
+ // For now, the only adjustments we handle apply only to locations.
+ if (!isa<Loc>(V))
+ return V;
+
+ // If the types already match, don't do any unnecessary work.
+ ExpectedTy = ExpectedTy.getCanonicalType();
+ ActualTy = ActualTy.getCanonicalType();
+ if (ExpectedTy == ActualTy)
+ return V;
+
+ // No adjustment is needed between Objective-C pointer types.
+ if (ExpectedTy->isObjCObjectPointerType() &&
+ ActualTy->isObjCObjectPointerType())
+ return V;
+
+ // C++ object pointers may need "derived-to-base" casts.
+ const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
+ const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
+ if (ExpectedClass && ActualClass) {
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
+ !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
+ return StoreMgr.evalDerivedToBase(V, Paths.front());
+ }
+ }
+
+ // Unfortunately, Objective-C does not enforce that overridden methods have
+ // covariant return types, so we can't assert that that never happens.
+ // Be safe and return UnknownVal().
+ return UnknownVal();
+}
+
+void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ NodeBuilder Bldr(Pred, Dst, BC);
+
+ // Find the last statement in the function and the corresponding basic block.
+ const Stmt *LastSt = 0;
+ const CFGBlock *Blk = 0;
+ llvm::tie(LastSt, Blk) = getLastStmt(Pred);
+ if (!Blk || !LastSt) {
+ return;
+ }
+
+ // If the last statement is return, everything it references should stay live.
+ if (isa<ReturnStmt>(LastSt))
+ return;
+
+ // Here, we call the Symbol Reaper with 0 stack context telling it to clean up
+ // everything on the stack. We use LastStmt as a diagnostic statement, with
+ // which the PreStmtPurgeDead point will be associated.
+ currBldrCtx = &BC;
+ removeDead(Pred, Dst, 0, 0, LastSt,
+ ProgramPoint::PostStmtPurgeDeadSymbolsKind);
+ currBldrCtx = 0;
+}
+
+static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
+ const StackFrameContext *calleeCtx) {
+ const Decl *RuntimeCallee = calleeCtx->getDecl();
+ const Decl *StaticDecl = Call->getDecl();
+ assert(RuntimeCallee);
+ if (!StaticDecl)
+ return true;
+ return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
+}
+
/// The call exit is simulated with a sequence of nodes, which occur between
/// CallExitBegin and CallExitEnd. The following operations occur between the
/// two program points:
@@ -133,6 +227,11 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
const CFGBlock *Blk = 0;
llvm::tie(LastSt, Blk) = getLastStmt(CEBNode);
+ // Generate a CallEvent /before/ cleaning the state, so that we can get the
+ // correct value for 'this' (if necessary).
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
+
// Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
// If the callee returns an expression, bind its value to CallExpr.
@@ -140,6 +239,19 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
const LocationContext *LCtx = CEBNode->getLocationContext();
SVal V = state->getSVal(RS, LCtx);
+
+ // Ensure that the return type matches the type of the returned Expr.
+ if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
+ QualType ReturnedTy =
+ CallEvent::getDeclaredResultType(calleeCtx->getDecl());
+ if (!ReturnedTy.isNull()) {
+ if (const Expr *Ex = dyn_cast<Expr>(CE)) {
+ V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
+ getStoreManager());
+ }
+ }
+ }
+
state = state->BindExpr(CE, callerCtx, V);
}
@@ -149,23 +261,25 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
SVal ThisV = state->getSVal(This);
- // Always bind the region to the CXXConstructExpr.
+ // If the constructed object is a prvalue, get its bindings.
+ // Note that we have to be careful here because constructors embedded
+ // in DeclStmts are not marked as lvalues.
+ if (!CCE->isGLValue())
+ if (const MemRegion *MR = ThisV.getAsRegion())
+ if (isa<CXXTempObjectRegion>(MR))
+ ThisV = state->getSVal(cast<Loc>(ThisV));
+
state = state->BindExpr(CCE, callerCtx, ThisV);
}
}
- // Generate a CallEvent /before/ cleaning the state, so that we can get the
- // correct value for 'this' (if necessary).
- CallEventManager &CEMgr = getStateManager().getCallEventManager();
- CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
-
// Step 3: BindedRetNode -> CleanedNodes
// If we can find a statement and a block in the inlined function, run remove
// dead bindings before returning from the call. This is important to ensure
// that we report the issues such as leaks in the stack contexts in which
// they occurred.
ExplodedNodeSet CleanedNodes;
- if (LastSt && Blk) {
+ if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
static SimpleProgramPointTag retValBind("ExprEngine : Bind Return Value");
PostStmt Loc(LastSt, calleeCtx, &retValBind);
bool isNew;
@@ -175,14 +289,14 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
return;
NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
- currentBuilderContext = &Ctx;
+ currBldrCtx = &Ctx;
// Here, we call the Symbol Reaper with 0 statement and caller location
// context, telling it to clean up everything in the callee's context
// (and it's children). We use LastStmt as a diagnostic statement, which
// which the PreStmtPurge Dead point will be associated.
removeDead(BindedRetNode, CleanedNodes, 0, callerCtx, LastSt,
ProgramPoint::PostStmtPurgeDeadSymbolsKind);
- currentBuilderContext = 0;
+ currBldrCtx = 0;
} else {
CleanedNodes.Add(CEBNode);
}
@@ -204,9 +318,9 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
// result onto the work list.
// CEENode -> Dst -> WorkList
NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
- SaveAndRestore<const NodeBuilderContext*> NBCSave(currentBuilderContext,
+ SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx,
&Ctx);
- SaveAndRestore<unsigned> CBISave(currentStmtIdx, calleeCtx->getIndex());
+ SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex());
CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
@@ -236,14 +350,48 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
}
}
-static unsigned getNumberStackFrames(const LocationContext *LCtx) {
- unsigned count = 0;
+void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
+ bool &IsRecursive, unsigned &StackDepth) {
+ IsRecursive = false;
+ StackDepth = 0;
+
while (LCtx) {
- if (isa<StackFrameContext>(LCtx))
- ++count;
+ if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
+ const Decl *DI = SFC->getDecl();
+
+ // Mark recursive (and mutually recursive) functions and always count
+ // them when measuring the stack depth.
+ if (DI == D) {
+ IsRecursive = true;
+ ++StackDepth;
+ LCtx = LCtx->getParent();
+ continue;
+ }
+
+ // Do not count the small functions when determining the stack depth.
+ AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
+ const CFG *CalleeCFG = CalleeADC->getCFG();
+ if (CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize())
+ ++StackDepth;
+ }
LCtx = LCtx->getParent();
}
- return count;
+
+}
+
+static bool IsInStdNamespace(const FunctionDecl *FD) {
+ const DeclContext *DC = FD->getEnclosingNamespaceContext();
+ const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
+ if (!ND)
+ return false;
+
+ while (const DeclContext *Parent = ND->getParent()) {
+ if (!isa<NamespaceDecl>(Parent))
+ break;
+ ND = cast<NamespaceDecl>(Parent);
+ }
+
+ return ND->getName() == "std";
}
// Determine if we should inline the call.
@@ -256,14 +404,18 @@ bool ExprEngine::shouldInlineDecl(const Decl *D, ExplodedNode *Pred) {
if (!CalleeCFG)
return false;
- if (getNumberStackFrames(Pred->getLocationContext())
- == AMgr.InlineMaxStackDepth)
+ bool IsRecursive = false;
+ unsigned StackDepth = 0;
+ examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
+ if ((StackDepth >= AMgr.options.InlineMaxStackDepth) &&
+ ((CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize())
+ || IsRecursive))
return false;
if (Engine.FunctionSummaries->hasReachedMaxBlockCount(D))
return false;
- if (CalleeCFG->getNumBlockIDs() > AMgr.InlineMaxFunctionSize)
+ if (CalleeCFG->getNumBlockIDs() > AMgr.options.InlineMaxFunctionSize)
return false;
// Do not inline variadic calls (for now).
@@ -276,6 +428,21 @@ bool ExprEngine::shouldInlineDecl(const Decl *D, ExplodedNode *Pred) {
return false;
}
+ if (getContext().getLangOpts().CPlusPlus) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // Conditionally allow the inlining of template functions.
+ if (!getAnalysisManager().options.mayInlineTemplateFunctions())
+ if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
+ return false;
+
+ // Conditionally allow the inlining of C++ standard library functions.
+ if (!getAnalysisManager().options.mayInlineCXXStandardLibrary())
+ if (getContext().getSourceManager().isInSystemHeader(FD->getLocation()))
+ if (IsInStdNamespace(FD))
+ return false;
+ }
+ }
+
// It is possible that the live variables analysis cannot be
// run. If so, bail out.
if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
@@ -284,26 +451,21 @@ bool ExprEngine::shouldInlineDecl(const Decl *D, ExplodedNode *Pred) {
return true;
}
-/// The GDM component containing the dynamic dispatch bifurcation info. When
-/// the exact type of the receiver is not known, we want to explore both paths -
-/// one on which we do inline it and the other one on which we don't. This is
-/// done to ensure we do not drop coverage.
-/// This is the map from the receiver region to a bool, specifying either we
-/// consider this region's information precise or not along the given path.
-namespace clang {
-namespace ento {
-enum DynamicDispatchMode { DynamicDispatchModeInlined = 1,
- DynamicDispatchModeConservative };
-
-struct DynamicDispatchBifurcationMap {};
-typedef llvm::ImmutableMap<const MemRegion*,
- unsigned int> DynamicDispatchBifur;
-template<> struct ProgramStateTrait<DynamicDispatchBifurcationMap>
- : public ProgramStatePartialTrait<DynamicDispatchBifur> {
- static void *GDMIndex() { static int index; return &index; }
-};
-
-}}
+// The GDM component containing the dynamic dispatch bifurcation info. When
+// the exact type of the receiver is not known, we want to explore both paths -
+// one on which we do inline it and the other one on which we don't. This is
+// done to ensure we do not drop coverage.
+// This is the map from the receiver region to a bool, specifying either we
+// consider this region's information precise or not along the given path.
+namespace {
+ enum DynamicDispatchMode {
+ DynamicDispatchModeInlined = 1,
+ DynamicDispatchModeConservative
+ };
+}
+REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
+ CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *,
+ unsigned))
bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
NodeBuilder &Bldr, ExplodedNode *Pred,
@@ -314,24 +476,19 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
const LocationContext *ParentOfCallee = 0;
+ AnalyzerOptions &Opts = getAnalysisManager().options;
+
// FIXME: Refactor this check into a hypothetical CallEvent::canInline.
switch (Call.getKind()) {
case CE_Function:
break;
case CE_CXXMember:
case CE_CXXMemberOperator:
- if (!CXX_INLINING_ENABLED)
+ if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
return false;
break;
case CE_CXXConstructor: {
- if (!CXX_INLINING_ENABLED)
- return false;
-
- // Only inline constructors and destructors if we built the CFGs for them
- // properly.
- const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
- if (!ADC->getCFGBuildOptions().AddImplicitDtors ||
- !ADC->getCFGBuildOptions().AddInitializers)
+ if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
return false;
const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
@@ -341,9 +498,31 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
if (Target && isa<ElementRegion>(Target))
return false;
+ // FIXME: This is a hack. We don't use the correct region for a new
+ // expression, so if we inline the constructor its result will just be
+ // thrown away. This short-term hack is tracked in <rdar://problem/12180598>
+ // and the longer-term possible fix is discussed in PR12014.
+ const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
+ if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr))
+ if (isa<CXXNewExpr>(Parent))
+ return false;
+
+ // Inlining constructors requires including initializers in the CFG.
+ const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
+ assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
+ (void)ADC;
+
+ // If the destructor is trivial, it's always safe to inline the constructor.
+ if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
+ break;
+
+ // For other types, only inline constructors if destructor inlining is
+ // also enabled.
+ if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
+ return false;
+
// FIXME: This is a hack. We don't handle temporary destructors
// right now, so we shouldn't inline their constructors.
- const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete)
if (!Target || !isa<DeclRegion>(Target))
return false;
@@ -351,15 +530,13 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
break;
}
case CE_CXXDestructor: {
- if (!CXX_INLINING_ENABLED)
+ if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
return false;
- // Only inline constructors and destructors if we built the CFGs for them
- // properly.
+ // Inlining destructors requires building the CFG correctly.
const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
- if (!ADC->getCFGBuildOptions().AddImplicitDtors ||
- !ADC->getCFGBuildOptions().AddInitializers)
- return false;
+ assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
+ (void)ADC;
const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call);
@@ -371,9 +548,6 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
break;
}
case CE_CXXAllocator:
- if (!CXX_INLINING_ENABLED)
- return false;
-
// Do not inline allocators until we model deallocators.
// This is unfortunate, but basically necessary for smart pointers and such.
return false;
@@ -387,8 +561,10 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
break;
}
case CE_ObjCMessage:
- if (!(getAnalysisManager().IPAMode == DynamicDispatch ||
- getAnalysisManager().IPAMode == DynamicDispatchBifurcate))
+ if (!Opts.mayInlineObjCMethod())
+ return false;
+ if (!(getAnalysisManager().options.IPAMode == DynamicDispatch ||
+ getAnalysisManager().options.IPAMode == DynamicDispatchBifurcate))
return false;
break;
}
@@ -406,8 +582,8 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
const StackFrameContext *CalleeSFC =
CalleeADC->getStackFrame(ParentOfCallee, CallE,
- currentBuilderContext->getBlock(),
- currentStmtIdx);
+ currBldrCtx->getBlock(),
+ currStmtIdx);
CallEnter Loc(CallE, CalleeSFC, CurLC);
@@ -426,6 +602,12 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
// added onto the work list so remove it from the node builder.
Bldr.takeNodes(Pred);
+ NumInlinedCalls++;
+
+ // Mark the decl as visited.
+ if (VisitedCallees)
+ VisitedCallees->insert(D);
+
return true;
}
@@ -520,8 +702,8 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
// Conjure a symbol if the return value is unknown.
QualType ResultTy = Call.getResultType();
SValBuilder &SVB = getSValBuilder();
- unsigned Count = currentBuilderContext->getCurrentBlockCount();
- SVal R = SVB.getConjuredSymbolVal(0, E, LCtx, ResultTy, Count);
+ unsigned Count = currBldrCtx->blockCount();
+ SVal R = SVB.conjureSymbolVal(0, E, LCtx, ResultTy, Count);
return State->BindExpr(E, LCtx, R);
}
@@ -529,8 +711,7 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
// a conjured return value.
void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
ExplodedNode *Pred, ProgramStateRef State) {
- unsigned Count = currentBuilderContext->getCurrentBlockCount();
- State = Call.invalidateRegions(Count, State);
+ State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
State = bindReturnValue(Call, Pred->getLocationContext(), State);
// And make the result node.
@@ -562,13 +743,13 @@ void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
if (D) {
if (RD.mayHaveOtherDefinitions()) {
// Explore with and without inlining the call.
- if (getAnalysisManager().IPAMode == DynamicDispatchBifurcate) {
+ if (getAnalysisManager().options.IPAMode == DynamicDispatchBifurcate) {
BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
return;
}
// Don't inline if we're not in any dynamic dispatch mode.
- if (getAnalysisManager().IPAMode != DynamicDispatch) {
+ if (getAnalysisManager().options.IPAMode != DynamicDispatch) {
conservativeEvalCall(*Call, Bldr, Pred, State);
return;
}
@@ -593,7 +774,7 @@ void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
// Check if we've performed the split already - note, we only want
// to split the path once per memory region.
ProgramStateRef State = Pred->getState();
- const unsigned int *BState =
+ const unsigned *BState =
State->get<DynamicDispatchBifurcationMap>(BifurReg);
if (BState) {
// If we are on "inline path", keep inlining if possible.
@@ -630,7 +811,7 @@ void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
ExplodedNodeSet dstPreVisit;
getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
- StmtNodeBuilder B(dstPreVisit, Dst, *currentBuilderContext);
+ StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
if (RS->getRetValue()) {
for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
index e3bc498..51dda19b 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
@@ -28,7 +28,7 @@ void ExprEngine::VisitLvalObjCIvarRefExpr(const ObjCIvarRefExpr *Ex,
SVal location = state->getLValue(Ex->getDecl(), baseVal);
ExplodedNodeSet dstIvar;
- StmtNodeBuilder Bldr(Pred, dstIvar, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, dstIvar, *currBldrCtx);
Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, location));
// Perform the post-condition check of the ObjCIvarRefExpr and store
@@ -88,7 +88,7 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
evalLocation(dstLocation, S, elem, Pred, state, elementV, NULL, false);
ExplodedNodeSet Tmp;
- StmtNodeBuilder Bldr(Pred, Tmp, *currentBuilderContext);
+ StmtNodeBuilder Bldr(Pred, Tmp, *currBldrCtx);
for (ExplodedNodeSet::iterator NI = dstLocation.begin(),
NE = dstLocation.end(); NI!=NE; ++NI) {
@@ -112,8 +112,8 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
// For now, just 'conjure' up a symbolic value.
QualType T = R->getValueType();
assert(Loc::isLocType(T));
- unsigned Count = currentBuilderContext->getCurrentBlockCount();
- SymbolRef Sym = SymMgr.getConjuredSymbol(elem, LCtx, T, Count);
+ SymbolRef Sym = SymMgr.conjureSymbol(elem, LCtx, T,
+ currBldrCtx->blockCount());
SVal V = svalBuilder.makeLoc(Sym);
hasElems = hasElems->bindLoc(elementV, V);
@@ -132,14 +132,6 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
getCheckerManager().runCheckersForPostStmt(Dst, Tmp, S, *this);
}
-static bool isSubclass(const ObjCInterfaceDecl *Class, IdentifierInfo *II) {
- if (!Class)
- return false;
- if (Class->getIdentifier() == II)
- return true;
- return isSubclass(Class->getSuperClass(), II);
-}
-
void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
@@ -157,7 +149,7 @@ void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
// Proceed with evaluate the message expression.
ExplodedNodeSet dstEval;
- StmtNodeBuilder Bldr(dstGenericPrevisit, dstEval, *currentBuilderContext);
+ StmtNodeBuilder Bldr(dstGenericPrevisit, dstEval, *currBldrCtx);
for (ExplodedNodeSet::iterator DI = dstGenericPrevisit.begin(),
DE = dstGenericPrevisit.end(); DI != DE; ++DI) {
@@ -184,68 +176,30 @@ void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
// Check if the "raise" message was sent.
assert(notNilState);
- if (Msg->getSelector() == RaiseSel) {
+ if (ObjCNoRet.isImplicitNoReturn(ME)) {
// If we raise an exception, for now treat it as a sink.
// Eventually we will want to handle exceptions properly.
- Bldr.generateNode(currentStmt, Pred, State, true);
+ Bldr.generateSink(currStmt, Pred, State);
continue;
}
// Generate a transition to non-Nil state.
- if (notNilState != State)
- Pred = Bldr.generateNode(currentStmt, Pred, notNilState);
+ if (notNilState != State) {
+ Pred = Bldr.generateNode(currStmt, Pred, notNilState);
+ assert(Pred && "Should have cached out already!");
+ }
}
} else {
- // Check for special class methods.
- if (const ObjCInterfaceDecl *Iface = Msg->getReceiverInterface()) {
- if (!NSExceptionII) {
- ASTContext &Ctx = getContext();
- NSExceptionII = &Ctx.Idents.get("NSException");
- }
-
- if (isSubclass(Iface, NSExceptionII)) {
- enum { NUM_RAISE_SELECTORS = 2 };
-
- // Lazily create a cache of the selectors.
- if (!NSExceptionInstanceRaiseSelectors) {
- ASTContext &Ctx = getContext();
- NSExceptionInstanceRaiseSelectors =
- new Selector[NUM_RAISE_SELECTORS];
- SmallVector<IdentifierInfo*, NUM_RAISE_SELECTORS> II;
- unsigned idx = 0;
-
- // raise:format:
- II.push_back(&Ctx.Idents.get("raise"));
- II.push_back(&Ctx.Idents.get("format"));
- NSExceptionInstanceRaiseSelectors[idx++] =
- Ctx.Selectors.getSelector(II.size(), &II[0]);
-
- // raise:format:arguments:
- II.push_back(&Ctx.Idents.get("arguments"));
- NSExceptionInstanceRaiseSelectors[idx++] =
- Ctx.Selectors.getSelector(II.size(), &II[0]);
- }
-
- Selector S = Msg->getSelector();
- bool RaisesException = false;
- for (unsigned i = 0; i < NUM_RAISE_SELECTORS; ++i) {
- if (S == NSExceptionInstanceRaiseSelectors[i]) {
- RaisesException = true;
- break;
- }
- }
- if (RaisesException) {
- // If we raise an exception, for now treat it as a sink.
- // Eventually we will want to handle exceptions properly.
- Bldr.generateNode(currentStmt, Pred, Pred->getState(), true);
- continue;
- }
-
- }
+ // Check for special class methods that are known to not return
+ // and that we should treat as a sink.
+ if (ObjCNoRet.isImplicitNoReturn(ME)) {
+ // If we raise an exception, for now treat it as a sink.
+ // Eventually we will want to handle exceptions properly.
+ Bldr.generateSink(currStmt, Pred, Pred->getState());
+ continue;
}
}
- // Evaluate the call.
defaultEvalCall(Bldr, Pred, *UpdatedMsg);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index 982bcbf..fd875f6 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -17,8 +17,8 @@
#include "clang/AST/Decl.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
-#include "clang/Rewrite/Rewriter.h"
-#include "clang/Rewrite/HTMLRewrite.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "clang/Rewrite/Core/HTMLRewrite.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/Support/FileSystem.h"
@@ -189,7 +189,7 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
<< (*path.rbegin())->getLocation().asLocation().getExpansionColumnNumber()
<< "</a></td></tr>\n"
"<tr><td class=\"rowname\">Description:</td><td>"
- << D.getDescription() << "</td></tr>\n";
+ << D.getVerboseDescription() << "</td></tr>\n";
// Output any other meta data.
@@ -209,15 +209,15 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
std::string s;
llvm::raw_string_ostream os(s);
- const std::string& BugDesc = D.getDescription();
+ StringRef BugDesc = D.getVerboseDescription();
if (!BugDesc.empty())
os << "\n<!-- BUGDESC " << BugDesc << " -->\n";
- const std::string& BugType = D.getBugType();
+ StringRef BugType = D.getBugType();
if (!BugType.empty())
os << "\n<!-- BUGTYPE " << BugType << " -->\n";
- const std::string& BugCategory = D.getCategory();
+ StringRef BugCategory = D.getCategory();
if (!BugCategory.empty())
os << "\n<!-- BUGCATEGORY " << BugCategory << " -->\n";
@@ -267,8 +267,7 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
}
if (filesMade) {
- filesMade->push_back(std::make_pair(StringRef(getName()),
- llvm::sys::path::filename(H.str())));
+ filesMade->addDiagnostic(D, getName(), llvm::sys::path::filename(H.str()));
}
// Emit the HTML to disk.
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index 62e602a..fab10cf 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -352,7 +352,7 @@ void ElementRegion::Profile(llvm::FoldingSetNodeID& ID) const {
}
void FunctionTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
- const FunctionDecl *FD,
+ const NamedDecl *FD,
const MemRegion*) {
ID.AddInteger(MemRegion::FunctionTextRegionKind);
ID.AddPointer(FD);
@@ -444,7 +444,7 @@ void MemRegion::dumpToStream(raw_ostream &os) const {
}
void AllocaRegion::dumpToStream(raw_ostream &os) const {
- os << "alloca{" << (void*) Ex << ',' << Cnt << '}';
+ os << "alloca{" << (const void*) Ex << ',' << Cnt << '}';
}
void FunctionTextRegion::dumpToStream(raw_ostream &os) const {
@@ -452,7 +452,7 @@ void FunctionTextRegion::dumpToStream(raw_ostream &os) const {
}
void BlockTextRegion::dumpToStream(raw_ostream &os) const {
- os << "block_code{" << (void*) this << '}';
+ os << "block_code{" << (const void*) this << '}';
}
void BlockDataRegion::dumpToStream(raw_ostream &os) const {
@@ -461,12 +461,12 @@ void BlockDataRegion::dumpToStream(raw_ostream &os) const {
void CompoundLiteralRegion::dumpToStream(raw_ostream &os) const {
// FIXME: More elaborate pretty-printing.
- os << "{ " << (void*) CL << " }";
+ os << "{ " << (const void*) CL << " }";
}
void CXXTempObjectRegion::dumpToStream(raw_ostream &os) const {
os << "temp_object{" << getValueType().getAsString() << ','
- << (void*) Ex << '}';
+ << (const void*) Ex << '}';
}
void CXXBaseObjectRegion::dumpToStream(raw_ostream &os) const {
@@ -748,11 +748,11 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
}
else {
assert(D->isStaticLocal());
- const Decl *D = STC->getDecl();
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ const Decl *STCD = STC->getDecl();
+ if (isa<FunctionDecl>(STCD) || isa<ObjCMethodDecl>(STCD))
sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind,
- getFunctionTextRegion(FD));
- else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ getFunctionTextRegion(cast<NamedDecl>(STCD)));
+ else if (const BlockDecl *BD = dyn_cast<BlockDecl>(STCD)) {
const BlockTextRegion *BTR =
getBlockTextRegion(BD,
C.getCanonicalType(BD->getSignatureAsWritten()->getType()),
@@ -761,8 +761,6 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
BTR);
}
else {
- // FIXME: For ObjC-methods, we need a new CodeTextRegion. For now
- // just use the main global memspace.
sReg = getGlobalsRegion();
}
}
@@ -845,7 +843,7 @@ MemRegionManager::getElementRegion(QualType elementType, NonLoc Idx,
}
const FunctionTextRegion *
-MemRegionManager::getFunctionTextRegion(const FunctionDecl *FD) {
+MemRegionManager::getFunctionTextRegion(const NamedDecl *FD) {
return getSubRegion<FunctionTextRegion>(FD, getCodeRegion());
}
@@ -990,6 +988,10 @@ const MemRegion *MemRegion::getBaseRegion() const {
return R;
}
+bool MemRegion::isSubRegionOf(const MemRegion *R) const {
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// View handling.
//===----------------------------------------------------------------------===//
@@ -1107,7 +1109,7 @@ RegionOffset MemRegion::getAsOffset() const {
// If our base region is symbolic, we don't know what type it really is.
// Pretend the type of the symbol is the true dynamic type.
// (This will at least be self-consistent for the life of the symbol.)
- Ty = SR->getSymbol()->getType(getContext())->getPointeeType();
+ Ty = SR->getSymbol()->getType()->getPointeeType();
}
const CXXRecordDecl *Child = Ty->getAsCXXRecordDecl();
@@ -1166,8 +1168,12 @@ RegionOffset MemRegion::getAsOffset() const {
R = FR->getSuperRegion();
const RecordDecl *RD = FR->getDecl()->getParent();
- if (!RD->isCompleteDefinition()) {
+ if (RD->isUnion() || !RD->isCompleteDefinition()) {
// We cannot compute offset for incomplete type.
+ // For unions, we could treat everything as offset 0, but we'd rather
+ // treat each field as a symbolic offset so they aren't stored on top
+ // of each other, since we depend on things in typed regions actually
+ // matching their types.
SymbolicOffsetBase = R;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
index c849778..0f48d1e 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/ParentMap.h"
#include "clang/AST/StmtCXX.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace ento;
@@ -104,11 +105,12 @@ void PathPieces::flattenTo(PathPieces &Primary, PathPieces &Current,
PathDiagnostic::~PathDiagnostic() {}
PathDiagnostic::PathDiagnostic(const Decl *declWithIssue,
- StringRef bugtype, StringRef desc,
- StringRef category)
+ StringRef bugtype, StringRef verboseDesc,
+ StringRef shortDesc, StringRef category)
: DeclWithIssue(declWithIssue),
BugType(StripTrailingDots(bugtype)),
- Desc(StripTrailingDots(desc)),
+ VerboseDesc(StripTrailingDots(verboseDesc)),
+ ShortDesc(StripTrailingDots(shortDesc)),
Category(StripTrailingDots(category)),
path(pathImpl) {}
@@ -198,6 +200,7 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) {
if (orig_size <= new_size)
return;
+ assert(orig != D);
Diags.RemoveNode(orig);
delete orig;
}
@@ -205,39 +208,151 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) {
Diags.InsertNode(OwningD.take());
}
+static llvm::Optional<bool> comparePath(const PathPieces &X,
+ const PathPieces &Y);
+static llvm::Optional<bool>
+compareControlFlow(const PathDiagnosticControlFlowPiece &X,
+ const PathDiagnosticControlFlowPiece &Y) {
+ FullSourceLoc XSL = X.getStartLocation().asLocation();
+ FullSourceLoc YSL = Y.getStartLocation().asLocation();
+ if (XSL != YSL)
+ return XSL.isBeforeInTranslationUnitThan(YSL);
+ FullSourceLoc XEL = X.getEndLocation().asLocation();
+ FullSourceLoc YEL = Y.getEndLocation().asLocation();
+ if (XEL != YEL)
+ return XEL.isBeforeInTranslationUnitThan(YEL);
+ return llvm::Optional<bool>();
+}
+
+static llvm::Optional<bool>
+compareMacro(const PathDiagnosticMacroPiece &X,
+ const PathDiagnosticMacroPiece &Y) {
+ return comparePath(X.subPieces, Y.subPieces);
+}
+
+static llvm::Optional<bool>
+compareCall(const PathDiagnosticCallPiece &X,
+ const PathDiagnosticCallPiece &Y) {
+ FullSourceLoc X_CEL = X.callEnter.asLocation();
+ FullSourceLoc Y_CEL = Y.callEnter.asLocation();
+ if (X_CEL != Y_CEL)
+ return X_CEL.isBeforeInTranslationUnitThan(Y_CEL);
+ FullSourceLoc X_CEWL = X.callEnterWithin.asLocation();
+ FullSourceLoc Y_CEWL = Y.callEnterWithin.asLocation();
+ if (X_CEWL != Y_CEWL)
+ return X_CEWL.isBeforeInTranslationUnitThan(Y_CEWL);
+ FullSourceLoc X_CRL = X.callReturn.asLocation();
+ FullSourceLoc Y_CRL = Y.callReturn.asLocation();
+ if (X_CRL != Y_CRL)
+ return X_CRL.isBeforeInTranslationUnitThan(Y_CRL);
+ return comparePath(X.path, Y.path);
+}
+
+static llvm::Optional<bool> comparePiece(const PathDiagnosticPiece &X,
+ const PathDiagnosticPiece &Y) {
+ if (X.getKind() != Y.getKind())
+ return X.getKind() < Y.getKind();
+
+ FullSourceLoc XL = X.getLocation().asLocation();
+ FullSourceLoc YL = Y.getLocation().asLocation();
+ if (XL != YL)
+ return XL.isBeforeInTranslationUnitThan(YL);
+
+ if (X.getString() != Y.getString())
+ return X.getString() < Y.getString();
+
+ if (X.getRanges().size() != Y.getRanges().size())
+ return X.getRanges().size() < Y.getRanges().size();
+
+ const SourceManager &SM = XL.getManager();
+
+ for (unsigned i = 0, n = X.getRanges().size(); i < n; ++i) {
+ SourceRange XR = X.getRanges()[i];
+ SourceRange YR = Y.getRanges()[i];
+ if (XR != YR) {
+ if (XR.getBegin() != YR.getBegin())
+ return SM.isBeforeInTranslationUnit(XR.getBegin(), YR.getBegin());
+ return SM.isBeforeInTranslationUnit(XR.getEnd(), YR.getEnd());
+ }
+ }
+
+ switch (X.getKind()) {
+ case clang::ento::PathDiagnosticPiece::ControlFlow:
+ return compareControlFlow(cast<PathDiagnosticControlFlowPiece>(X),
+ cast<PathDiagnosticControlFlowPiece>(Y));
+ case clang::ento::PathDiagnosticPiece::Event:
+ return llvm::Optional<bool>();
+ case clang::ento::PathDiagnosticPiece::Macro:
+ return compareMacro(cast<PathDiagnosticMacroPiece>(X),
+ cast<PathDiagnosticMacroPiece>(Y));
+ case clang::ento::PathDiagnosticPiece::Call:
+ return compareCall(cast<PathDiagnosticCallPiece>(X),
+ cast<PathDiagnosticCallPiece>(Y));
+ }
+ llvm_unreachable("all cases handled");
+}
+
+static llvm::Optional<bool> comparePath(const PathPieces &X,
+ const PathPieces &Y) {
+ if (X.size() != Y.size())
+ return X.size() < Y.size();
+ for (unsigned i = 0, n = X.size(); i != n; ++i) {
+ llvm::Optional<bool> b = comparePiece(*X[i], *Y[i]);
+ if (b.hasValue())
+ return b.getValue();
+ }
+ return llvm::Optional<bool>();
+}
+
+static bool compare(const PathDiagnostic &X, const PathDiagnostic &Y) {
+ FullSourceLoc XL = X.getLocation().asLocation();
+ FullSourceLoc YL = Y.getLocation().asLocation();
+ if (XL != YL)
+ return XL.isBeforeInTranslationUnitThan(YL);
+ if (X.getBugType() != Y.getBugType())
+ return X.getBugType() < Y.getBugType();
+ if (X.getCategory() != Y.getCategory())
+ return X.getCategory() < Y.getCategory();
+ if (X.getVerboseDescription() != Y.getVerboseDescription())
+ return X.getVerboseDescription() < Y.getVerboseDescription();
+ if (X.getShortDescription() != Y.getShortDescription())
+ return X.getShortDescription() < Y.getShortDescription();
+ if (X.getDeclWithIssue() != Y.getDeclWithIssue()) {
+ const Decl *XD = X.getDeclWithIssue();
+ if (!XD)
+ return true;
+ const Decl *YD = Y.getDeclWithIssue();
+ if (!YD)
+ return false;
+ SourceLocation XDL = XD->getLocation();
+ SourceLocation YDL = YD->getLocation();
+ if (XDL != YDL) {
+ const SourceManager &SM = XL.getManager();
+ return SM.isBeforeInTranslationUnit(XDL, YDL);
+ }
+ }
+ PathDiagnostic::meta_iterator XI = X.meta_begin(), XE = X.meta_end();
+ PathDiagnostic::meta_iterator YI = Y.meta_begin(), YE = Y.meta_end();
+ if (XE - XI != YE - YI)
+ return (XE - XI) < (YE - YI);
+ for ( ; XI != XE ; ++XI, ++YI) {
+ if (*XI != *YI)
+ return (*XI) < (*YI);
+ }
+ llvm::Optional<bool> b = comparePath(X.path, Y.path);
+ assert(b.hasValue());
+ return b.getValue();
+}
namespace {
struct CompareDiagnostics {
// Compare if 'X' is "<" than 'Y'.
bool operator()(const PathDiagnostic *X, const PathDiagnostic *Y) const {
- // First compare by location
- const FullSourceLoc &XLoc = X->getLocation().asLocation();
- const FullSourceLoc &YLoc = Y->getLocation().asLocation();
- if (XLoc < YLoc)
- return true;
- if (XLoc != YLoc)
+ if (X == Y)
return false;
-
- // Next, compare by bug type.
- StringRef XBugType = X->getBugType();
- StringRef YBugType = Y->getBugType();
- if (XBugType < YBugType)
- return true;
- if (XBugType != YBugType)
- return false;
-
- // Next, compare by bug description.
- StringRef XDesc = X->getDescription();
- StringRef YDesc = Y->getDescription();
- if (XDesc < YDesc)
- return true;
- if (XDesc != YDesc)
- return false;
-
- // FIXME: Further refine by comparing PathDiagnosticPieces?
- return false;
- }
-};
+ return compare(*X, *Y);
+ }
+};
}
void PathDiagnosticConsumer::FlushDiagnostics(
@@ -250,11 +365,9 @@ void PathDiagnosticConsumer::FlushDiagnostics(
std::vector<const PathDiagnostic *> BatchDiags;
for (llvm::FoldingSet<PathDiagnostic>::iterator it = Diags.begin(),
et = Diags.end(); it != et; ++it) {
- BatchDiags.push_back(&*it);
+ const PathDiagnostic *D = &*it;
+ BatchDiags.push_back(D);
}
-
- // Clear out the FoldingSet.
- Diags.clear();
// Sort the diagnostics so that they are always emitted in a deterministic
// order.
@@ -269,6 +382,42 @@ void PathDiagnosticConsumer::FlushDiagnostics(
const PathDiagnostic *D = *it;
delete D;
}
+
+ // Clear out the FoldingSet.
+ Diags.clear();
+}
+
+void PathDiagnosticConsumer::FilesMade::addDiagnostic(const PathDiagnostic &PD,
+ StringRef ConsumerName,
+ StringRef FileName) {
+ llvm::FoldingSetNodeID NodeID;
+ NodeID.Add(PD);
+ void *InsertPos;
+ PDFileEntry *Entry = FindNodeOrInsertPos(NodeID, InsertPos);
+ if (!Entry) {
+ Entry = Alloc.Allocate<PDFileEntry>();
+ Entry = new (Entry) PDFileEntry(NodeID);
+ InsertNode(Entry, InsertPos);
+ }
+
+ // Allocate persistent storage for the file name.
+ char *FileName_cstr = (char*) Alloc.Allocate(FileName.size(), 1);
+ memcpy(FileName_cstr, FileName.data(), FileName.size());
+
+ Entry->files.push_back(std::make_pair(ConsumerName,
+ StringRef(FileName_cstr,
+ FileName.size())));
+}
+
+PathDiagnosticConsumer::PDFileEntry::ConsumerFiles *
+PathDiagnosticConsumer::FilesMade::getFiles(const PathDiagnostic &PD) {
+ llvm::FoldingSetNodeID NodeID;
+ NodeID.Add(PD);
+ void *InsertPos;
+ PDFileEntry *Entry = FindNodeOrInsertPos(NodeID, InsertPos);
+ if (!Entry)
+ return 0;
+ return &Entry->files;
}
//===----------------------------------------------------------------------===//
@@ -437,8 +586,8 @@ PathDiagnosticLocation
const CFGBlock *BSrc = BE->getSrc();
S = BSrc->getTerminatorCondition();
}
- else if (const PostStmt *PS = dyn_cast<PostStmt>(&P)) {
- S = PS->getStmt();
+ else if (const StmtPoint *SP = dyn_cast<StmtPoint>(&P)) {
+ S = SP->getStmt();
}
else if (const PostImplicitCall *PIE = dyn_cast<PostImplicitCall>(&P)) {
return PathDiagnosticLocation(PIE->getLocation(), SMng);
@@ -453,6 +602,9 @@ PathDiagnosticLocation
CEE->getLocationContext(),
SMng);
}
+ else {
+ llvm_unreachable("Unexpected ProgramPoint");
+ }
return PathDiagnosticLocation(S, SMng, P.getLocationContext());
}
@@ -463,21 +615,26 @@ PathDiagnosticLocation
assert(N && "Cannot create a location with a null node.");
const ExplodedNode *NI = N;
+ const Stmt *S = 0;
while (NI) {
ProgramPoint P = NI->getLocation();
- const LocationContext *LC = P.getLocationContext();
if (const StmtPoint *PS = dyn_cast<StmtPoint>(&P))
- return PathDiagnosticLocation(PS->getStmt(), SM, LC);
- else if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
- const Stmt *Term = BE->getSrc()->getTerminator();
- if (Term) {
- return PathDiagnosticLocation(Term, SM, LC);
- }
- }
+ S = PS->getStmt();
+ else if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P))
+ S = BE->getSrc()->getTerminator();
+ if (S)
+ break;
NI = NI->succ_empty() ? 0 : *(NI->succ_begin());
}
+ if (S) {
+ const LocationContext *LC = NI->getLocationContext();
+ if (S->getLocStart().isValid())
+ return PathDiagnosticLocation(S, SM, LC);
+ return PathDiagnosticLocation(getValidSourceLocation(S, LC), SM);
+ }
+
return createDeclEnd(N->getLocationContext(), SM);
}
@@ -587,24 +744,6 @@ void PathDiagnosticLocation::flatten() {
}
}
-PathDiagnosticLocation PathDiagnostic::getLocation() const {
- assert(path.size() > 0 &&
- "getLocation() requires a non-empty PathDiagnostic.");
-
- PathDiagnosticPiece *p = path.rbegin()->getPtr();
-
- while (true) {
- if (PathDiagnosticCallPiece *cp = dyn_cast<PathDiagnosticCallPiece>(p)) {
- assert(!cp->path.empty());
- p = cp->path.rbegin()->getPtr();
- continue;
- }
- break;
- }
-
- return p->getLocation();
-}
-
//===----------------------------------------------------------------------===//
// Manipulation of PathDiagnosticCallPieces.
//===----------------------------------------------------------------------===//
@@ -753,10 +892,9 @@ void PathDiagnosticMacroPiece::Profile(llvm::FoldingSetNodeID &ID) const {
}
void PathDiagnostic::Profile(llvm::FoldingSetNodeID &ID) const {
- if (!path.empty())
- getLocation().Profile(ID);
+ ID.Add(getLocation());
ID.AddString(BugType);
- ID.AddString(Desc);
+ ID.AddString(VerboseDesc);
ID.AddString(Category);
}
@@ -818,42 +956,16 @@ std::string StackHintGeneratorForSymbol::getMessage(const ExplodedNode *N){
return getMessageForSymbolNotFound();
}
-/// TODO: This is copied from clang diagnostics. Maybe we could just move it to
-/// some common place. (Same as HandleOrdinalModifier.)
-void StackHintGeneratorForSymbol::printOrdinal(unsigned ValNo,
- llvm::raw_svector_ostream &Out) {
- assert(ValNo != 0 && "ValNo must be strictly positive!");
-
- // We could use text forms for the first N ordinals, but the numeric
- // forms are actually nicer in diagnostics because they stand out.
- Out << ValNo;
-
- // It is critically important that we do this perfectly for
- // user-written sequences with over 100 elements.
- switch (ValNo % 100) {
- case 11:
- case 12:
- case 13:
- Out << "th"; return;
- default:
- switch (ValNo % 10) {
- case 1: Out << "st"; return;
- case 2: Out << "nd"; return;
- case 3: Out << "rd"; return;
- default: Out << "th"; return;
- }
- }
-}
-
std::string StackHintGeneratorForSymbol::getMessageForArg(const Expr *ArgE,
- unsigned ArgIndex) {
+ unsigned ArgIndex) {
+ // Printed parameters start at 1, not 0.
+ ++ArgIndex;
+
SmallString<200> buf;
llvm::raw_svector_ostream os(buf);
- os << Msg << " via ";
- // Printed parameters start at 1, not 0.
- printOrdinal(++ArgIndex, os);
- os << " parameter";
+ os << Msg << " via " << ArgIndex << llvm::getOrdinalSuffix(ArgIndex)
+ << " parameter";
return os.str();
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index d5fdd9d..17ef4cf 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -13,8 +13,9 @@
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
-#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Version.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Casting.h"
@@ -47,7 +48,6 @@ namespace {
PathGenerationScheme getGenerationScheme() const { return Extensive; }
bool supportsLogicalOpControlFlow() const { return true; }
bool supportsAllBlockEdges() const { return true; }
- virtual bool useVerboseDescription() const { return false; }
virtual bool supportsCrossFileDiagnostics() const {
return SupportsCrossFileDiagnostics;
}
@@ -247,6 +247,7 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P,
// Output the short text.
// FIXME: Really use a short string.
Indent(o, indent) << "<key>message</key>\n";
+ Indent(o, indent);
EmitString(o, P.getString()) << '\n';
// Finish up.
@@ -409,10 +410,13 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
"<plist version=\"1.0\">\n";
// Write the root object: a <dict> containing...
+ // - "clang_version", the string representation of clang version
// - "files", an <array> mapping from FIDs to file names
// - "diagnostics", an <array> containing the path diagnostics
- o << "<dict>\n"
- " <key>files</key>\n"
+ o << "<dict>\n" <<
+ " <key>clang_version</key>\n";
+ EmitString(o, getClangFullVersion()) << '\n';
+ o << " <key>files</key>\n"
" <array>\n";
for (SmallVectorImpl<FileID>::iterator I=Fids.begin(), E=Fids.end();
@@ -443,7 +447,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
// Output the bug type and bug category.
o << " <key>description</key>";
- EmitString(o, D->getDescription()) << '\n';
+ EmitString(o, D->getShortDescription()) << '\n';
o << " <key>category</key>";
EmitString(o, D->getCategory()) << '\n';
o << " <key>type</key>";
@@ -499,19 +503,23 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
// Output the diagnostic to the sub-diagnostic client, if any.
if (!filesMade->empty()) {
StringRef lastName;
- for (FilesMade::iterator I = filesMade->begin(), E = filesMade->end();
- I != E; ++I) {
- StringRef newName = I->first;
- if (newName != lastName) {
- if (!lastName.empty())
- o << " </array>\n";
- lastName = newName;
- o << " <key>" << lastName << "_files</key>\n";
- o << " <array>\n";
+ PDFileEntry::ConsumerFiles *files = filesMade->getFiles(*D);
+ if (files) {
+ for (PDFileEntry::ConsumerFiles::const_iterator CI = files->begin(),
+ CE = files->end(); CI != CE; ++CI) {
+ StringRef newName = CI->first;
+ if (newName != lastName) {
+ if (!lastName.empty()) {
+ o << " </array>\n";
+ }
+ lastName = newName;
+ o << " <key>" << lastName << "_files</key>\n";
+ o << " <array>\n";
+ }
+ o << " <string>" << CI->second << "</string>\n";
}
- o << " <string>" << I->second << "</string>\n";
+ o << " </array>\n";
}
- o << " </array>\n";
}
// Close up the entry.
@@ -521,10 +529,5 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
o << " </array>\n";
// Finish.
- o << "</dict>\n</plist>";
-
- if (filesMade) {
- StringRef Name(getName());
- filesMade->push_back(std::make_pair(Name, OutputFile));
- }
+ o << "</dict>\n</plist>";
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
index 2000338..b49a11e 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -22,10 +22,6 @@
using namespace clang;
using namespace ento;
-// Give the vtable for ConstraintManager somewhere to live.
-// FIXME: Move this elsewhere.
-ConstraintManager::~ConstraintManager() {}
-
namespace clang { namespace ento {
/// Increments the number of times this state is referenced.
@@ -75,8 +71,8 @@ ProgramStateManager::ProgramStateManager(ASTContext &Ctx,
StoreManagerCreator CreateSMgr,
ConstraintManagerCreator CreateCMgr,
llvm::BumpPtrAllocator &alloc,
- SubEngine &SubEng)
- : Eng(&SubEng), EnvMgr(alloc), GDMFactory(alloc),
+ SubEngine *SubEng)
+ : Eng(SubEng), EnvMgr(alloc), GDMFactory(alloc),
svalBuilder(createSimpleSValBuilder(alloc, Ctx, *this)),
CallEventMgr(new CallEventManager(alloc)), Alloc(alloc) {
StoreMgr.reset((*CreateSMgr)(*this));
@@ -110,47 +106,25 @@ ProgramStateManager::removeDeadBindings(ProgramStateRef state,
SymReaper);
NewState.setStore(newStore);
SymReaper.setReapedStore(newStore);
-
- return getPersistentState(NewState);
-}
-
-ProgramStateRef ProgramStateManager::MarshalState(ProgramStateRef state,
- const StackFrameContext *InitLoc) {
- // make up an empty state for now.
- ProgramState State(this,
- EnvMgr.getInitialEnvironment(),
- StoreMgr->getInitialStore(InitLoc),
- GDMFactory.getEmptyMap());
- return getPersistentState(State);
+ ProgramStateRef Result = getPersistentState(NewState);
+ return ConstraintMgr->removeDeadBindings(Result, SymReaper);
}
ProgramStateRef ProgramState::bindCompoundLiteral(const CompoundLiteralExpr *CL,
const LocationContext *LC,
SVal V) const {
const StoreRef &newStore =
- getStateManager().StoreMgr->BindCompoundLiteral(getStore(), CL, LC, V);
+ getStateManager().StoreMgr->bindCompoundLiteral(getStore(), CL, LC, V);
return makeWithStore(newStore);
}
-ProgramStateRef ProgramState::bindDecl(const VarRegion* VR, SVal IVal) const {
- const StoreRef &newStore =
- getStateManager().StoreMgr->BindDecl(getStore(), VR, IVal);
- return makeWithStore(newStore);
-}
-
-ProgramStateRef ProgramState::bindDeclWithNoInit(const VarRegion* VR) const {
- const StoreRef &newStore =
- getStateManager().StoreMgr->BindDeclWithNoInit(getStore(), VR);
- return makeWithStore(newStore);
-}
-
-ProgramStateRef ProgramState::bindLoc(Loc LV, SVal V) const {
+ProgramStateRef ProgramState::bindLoc(Loc LV, SVal V, bool notifyChanges) const {
ProgramStateManager &Mgr = getStateManager();
ProgramStateRef newState = makeWithStore(Mgr.StoreMgr->Bind(getStore(),
LV, V));
const MemRegion *MR = LV.getAsRegion();
- if (MR && Mgr.getOwningEngine())
+ if (MR && Mgr.getOwningEngine() && notifyChanges)
return Mgr.getOwningEngine()->processRegionChange(newState, MR);
return newState;
@@ -204,11 +178,12 @@ ProgramState::invalidateRegionsImpl(ArrayRef<const MemRegion *> Regions,
return makeWithStore(newStore);
}
-ProgramStateRef ProgramState::unbindLoc(Loc LV) const {
+ProgramStateRef ProgramState::killBinding(Loc LV) const {
assert(!isa<loc::MemRegionVal>(LV) && "Use invalidateRegion instead.");
Store OldStore = getStore();
- const StoreRef &newStore = getStateManager().StoreMgr->Remove(OldStore, LV);
+ const StoreRef &newStore =
+ getStateManager().StoreMgr->killBinding(OldStore, LV);
if (newStore.getStore() == OldStore)
return this;
@@ -249,7 +224,9 @@ SVal ProgramState::getSVal(Loc location, QualType T) const {
// about).
if (!T.isNull()) {
if (SymbolRef sym = V.getAsSymbol()) {
- if (const llvm::APSInt *Int = getSymVal(sym)) {
+ if (const llvm::APSInt *Int = getStateManager()
+ .getConstraintManager()
+ .getSymVal(this, sym)) {
// FIXME: Because we don't correctly model (yet) sign-extension
// and truncation of symbolic values, we need to convert
// the integer value to the correct signedness and bitwidth.
@@ -710,7 +687,9 @@ bool ProgramState::isTainted(SymbolRef Sym, TaintTagType Kind) const {
bool Tainted = false;
for (SymExpr::symbol_iterator SI = Sym->symbol_begin(), SE =Sym->symbol_end();
SI != SE; ++SI) {
- assert(isa<SymbolData>(*SI));
+ if (!isa<SymbolData>(*SI))
+ continue;
+
const TaintTagType *Tag = get<TaintMap>(*SI);
Tainted = (Tag && *Tag == Kind);
@@ -734,15 +713,10 @@ bool ProgramState::isTainted(SymbolRef Sym, TaintTagType Kind) const {
}
/// The GDM component containing the dynamic type info. This is a map from a
-/// symbol to it's most likely type.
-namespace clang {
-namespace ento {
-typedef llvm::ImmutableMap<const MemRegion *, DynamicTypeInfo> DynamicTypeMap;
-template<> struct ProgramStateTrait<DynamicTypeMap>
- : public ProgramStatePartialTrait<DynamicTypeMap> {
- static void *GDMIndex() { static int index; return &index; }
-};
-}}
+/// symbol to its most likely type.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicTypeMap,
+ CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *,
+ DynamicTypeInfo))
DynamicTypeInfo ProgramState::getDynamicTypeInfo(const MemRegion *Reg) const {
Reg = Reg->StripCasts();
@@ -758,7 +732,7 @@ DynamicTypeInfo ProgramState::getDynamicTypeInfo(const MemRegion *Reg) const {
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg)) {
SymbolRef Sym = SR->getSymbol();
- return DynamicTypeInfo(Sym->getType(getStateManager().getContext()));
+ return DynamicTypeInfo(Sym->getType());
}
return DynamicTypeInfo();
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 550404a..411094b 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -24,9 +24,6 @@
using namespace clang;
using namespace ento;
-namespace { class ConstraintRange {}; }
-static int ConstraintRangeIndex = 0;
-
/// A Range represents the closed range [from, to]. The caller must
/// guarantee that from <= to. Note that Range is immutable, so as not
/// to subvert RangeSet's immutability.
@@ -280,23 +277,15 @@ public:
};
} // end anonymous namespace
-typedef llvm::ImmutableMap<SymbolRef,RangeSet> ConstraintRangeTy;
-
-namespace clang {
-namespace ento {
-template<>
-struct ProgramStateTrait<ConstraintRange>
- : public ProgramStatePartialTrait<ConstraintRangeTy> {
- static inline void *GDMIndex() { return &ConstraintRangeIndex; }
-};
-}
-}
+REGISTER_TRAIT_WITH_PROGRAMSTATE(ConstraintRange,
+ CLANG_ENTO_PROGRAMSTATE_MAP(SymbolRef,
+ RangeSet))
namespace {
class RangeConstraintManager : public SimpleConstraintManager{
RangeSet GetRange(ProgramStateRef state, SymbolRef sym);
public:
- RangeConstraintManager(SubEngine &subengine, BasicValueFactory &BVF)
+ RangeConstraintManager(SubEngine *subengine, BasicValueFactory &BVF)
: SimpleConstraintManager(subengine, BVF) {}
ProgramStateRef assumeSymNE(ProgramStateRef state, SymbolRef sym,
@@ -324,12 +313,7 @@ public:
const llvm::APSInt& Adjustment);
const llvm::APSInt* getSymVal(ProgramStateRef St, SymbolRef sym) const;
-
- // FIXME: Refactor into SimpleConstraintManager?
- bool isEqual(ProgramStateRef St, SymbolRef sym, const llvm::APSInt& V) const {
- const llvm::APSInt *i = getSymVal(St, sym);
- return i ? *i == V : false;
- }
+ ConditionTruthVal checkNull(ProgramStateRef State, SymbolRef Sym);
ProgramStateRef removeDeadBindings(ProgramStateRef St, SymbolReaper& SymReaper);
@@ -343,7 +327,7 @@ private:
} // end anonymous namespace
ConstraintManager *
-ento::CreateRangeConstraintManager(ProgramStateManager &StMgr, SubEngine &Eng) {
+ento::CreateRangeConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
return new RangeConstraintManager(Eng, StMgr.getBasicVals());
}
@@ -353,6 +337,30 @@ const llvm::APSInt* RangeConstraintManager::getSymVal(ProgramStateRef St,
return T ? T->getConcreteValue() : NULL;
}
+ConditionTruthVal RangeConstraintManager::checkNull(ProgramStateRef State,
+ SymbolRef Sym) {
+ const RangeSet *Ranges = State->get<ConstraintRange>(Sym);
+
+ // If we don't have any information about this symbol, it's underconstrained.
+ if (!Ranges)
+ return ConditionTruthVal();
+
+ // If we have a concrete value, see if it's zero.
+ if (const llvm::APSInt *Value = Ranges->getConcreteValue())
+ return *Value == 0;
+
+ BasicValueFactory &BV = getBasicVals();
+ APSIntType IntType = BV.getAPSIntType(Sym->getType());
+ llvm::APSInt Zero = IntType.getZeroValue();
+
+ // Check if zero is in the set of possible values.
+ if (Ranges->Intersect(BV, F, Zero, Zero).isEmpty())
+ return false;
+
+ // Zero is a possible value, but it is not the /only/ possible value.
+ return ConditionTruthVal();
+}
+
/// Scan all symbols referenced by the constraints. If the symbol is not alive
/// as marked in LSymbols, mark it as dead in DSymbols.
ProgramStateRef
@@ -379,8 +387,18 @@ RangeConstraintManager::GetRange(ProgramStateRef state, SymbolRef sym) {
// Lazily generate a new RangeSet representing all possible values for the
// given symbol type.
BasicValueFactory &BV = getBasicVals();
- QualType T = sym->getType(BV.getContext());
- return RangeSet(F, BV.getMinValue(T), BV.getMaxValue(T));
+ QualType T = sym->getType();
+
+ RangeSet Result(F, BV.getMinValue(T), BV.getMaxValue(T));
+
+ // Special case: references are known to be non-zero.
+ if (T->isReferenceType()) {
+ APSIntType IntType = BV.getAPSIntType(T);
+ Result = Result.Intersect(BV, F, ++IntType.getZeroValue(),
+ --IntType.getZeroValue());
+ }
+
+ return Result;
}
//===------------------------------------------------------------------------===
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index bc4e4bb..aed994d 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -15,9 +15,6 @@
//
//===----------------------------------------------------------------------===//
#include "clang/AST/CharUnits.h"
-#include "clang/AST/DeclCXX.h"
-#include "clang/AST/ExprCXX.h"
-#include "clang/AST/CXXInheritance.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Basic/TargetInfo.h"
@@ -193,19 +190,6 @@ public:
/// casts from arrays to pointers.
SVal ArrayToPointer(Loc Array);
- /// For DerivedToBase casts, create a CXXBaseObjectRegion and return it.
- virtual SVal evalDerivedToBase(SVal derived, QualType basePtrType);
-
- /// \brief Evaluates C++ dynamic_cast cast.
- /// The callback may result in the following 3 scenarios:
- /// - Successful cast (ex: derived is subclass of base).
- /// - Failed cast (ex: derived is definitely not a subclass of base).
- /// - We don't know (base is a symbolic region and we don't have
- /// enough info to determine if the cast will succeed at run time).
- /// The function returns an SVal representing the derived class; it's
- /// valid only if Failed flag is set to false.
- virtual SVal evalDynamicCast(SVal base, QualType derivedPtrType,bool &Failed);
-
StoreRef getInitialStore(const LocationContext *InitLoc) {
return StoreRef(RBFactory.getEmptyMap().getRootWithoutRetain(), *this);
}
@@ -244,7 +228,7 @@ public: // Made public for helper classes.
RegionBindings removeBinding(RegionBindings B, BindingKey K);
RegionBindings removeBinding(RegionBindings B, const MemRegion *R,
- BindingKey::Kind k);
+ BindingKey::Kind k);
RegionBindings removeBinding(RegionBindings B, const MemRegion *R) {
return removeBinding(removeBinding(B, R, BindingKey::Direct), R,
@@ -266,15 +250,20 @@ public: // Part of public interface to class.
.getRootWithoutRetain(), *this);
}
- StoreRef BindCompoundLiteral(Store store, const CompoundLiteralExpr *CL,
+ /// \brief Create a new store that binds a value to a compound literal.
+ ///
+ /// \param ST The original store whose bindings are the basis for the new
+ /// store.
+ ///
+ /// \param CL The compound literal to bind (the binding key).
+ ///
+ /// \param LC The LocationContext for the binding.
+ ///
+ /// \param V The value to bind to the compound literal.
+ StoreRef bindCompoundLiteral(Store ST,
+ const CompoundLiteralExpr *CL,
const LocationContext *LC, SVal V);
- StoreRef BindDecl(Store store, const VarRegion *VR, SVal InitVal);
-
- StoreRef BindDeclWithNoInit(Store store, const VarRegion *) {
- return StoreRef(store, *this);
- }
-
/// BindStruct - Bind a compound value to a structure.
StoreRef BindStruct(Store store, const TypedValueRegion* R, SVal V);
@@ -287,7 +276,10 @@ public: // Part of public interface to class.
/// as a Default binding.
StoreRef BindAggregate(Store store, const TypedRegion *R, SVal DefaultVal);
- StoreRef Remove(Store store, Loc LV);
+ /// \brief Create a new store with the specified binding removed.
+ /// \param ST the original store, that is the basis for the new store.
+ /// \param L the location whose binding should be removed.
+ StoreRef killBinding(Store ST, Loc L);
void incrementReferenceCount(Store store) {
GetRegionBindings(store).manualRetain();
@@ -477,12 +469,8 @@ public:
}
bool AddToWorkList(const MemRegion *R, const ClusterBindings *C) {
- if (C) {
- if (Visited.count(C))
- return false;
- Visited.insert(C);
- }
-
+ if (C && !Visited.insert(C))
+ return false;
WL.push_back(R);
return true;
}
@@ -534,6 +522,46 @@ bool RegionStoreManager::scanReachableSymbols(Store S, const MemRegion *R,
return true;
}
+static inline bool isUnionField(const FieldRegion *FR) {
+ return FR->getDecl()->getParent()->isUnion();
+}
+
+typedef SmallVector<const FieldDecl *, 8> FieldVector;
+
+void getSymbolicOffsetFields(BindingKey K, FieldVector &Fields) {
+ assert(K.hasSymbolicOffset() && "Not implemented for concrete offset keys");
+
+ const MemRegion *Base = K.getConcreteOffsetRegion();
+ const MemRegion *R = K.getRegion();
+
+ while (R != Base) {
+ if (const FieldRegion *FR = dyn_cast<FieldRegion>(R))
+ if (!isUnionField(FR))
+ Fields.push_back(FR->getDecl());
+
+ R = cast<SubRegion>(R)->getSuperRegion();
+ }
+}
+
+static bool isCompatibleWithFields(BindingKey K, const FieldVector &Fields) {
+ assert(K.hasSymbolicOffset() && "Not implemented for concrete offset keys");
+
+ if (Fields.empty())
+ return true;
+
+ FieldVector FieldsInBindingKey;
+ getSymbolicOffsetFields(K, FieldsInBindingKey);
+
+ ptrdiff_t Delta = FieldsInBindingKey.size() - Fields.size();
+ if (Delta >= 0)
+ return std::equal(FieldsInBindingKey.begin() + Delta,
+ FieldsInBindingKey.end(),
+ Fields.begin());
+ else
+ return std::equal(FieldsInBindingKey.begin(), FieldsInBindingKey.end(),
+ Fields.begin() - Delta);
+}
+
RegionBindings RegionStoreManager::removeSubRegionBindings(RegionBindings B,
const SubRegion *R) {
BindingKey SRKey = BindingKey::Make(R, BindingKey::Default);
@@ -543,10 +571,12 @@ RegionBindings RegionStoreManager::removeSubRegionBindings(RegionBindings B,
return RBFactory.remove(B, R);
}
- if (SRKey.hasSymbolicOffset()) {
- const SubRegion *Base = cast<SubRegion>(SRKey.getConcreteOffsetRegion());
- B = removeSubRegionBindings(B, Base);
- return addBinding(B, Base, BindingKey::Default, UnknownVal());
+ FieldVector FieldsInSymbolicSubregions;
+ bool HasSymbolicOffset = SRKey.hasSymbolicOffset();
+ if (HasSymbolicOffset) {
+ getSymbolicOffsetFields(SRKey, FieldsInSymbolicSubregions);
+ R = cast<SubRegion>(SRKey.getConcreteOffsetRegion());
+ SRKey = BindingKey::Make(R, BindingKey::Default);
}
// This assumes the region being invalidated is char-aligned. This isn't
@@ -574,11 +604,17 @@ RegionBindings RegionStoreManager::removeSubRegionBindings(RegionBindings B,
I != E; ++I) {
BindingKey NextKey = I.getKey();
if (NextKey.getRegion() == SRKey.getRegion()) {
+ // FIXME: This doesn't catch the case where we're really invalidating a
+ // region with a symbolic offset. Example:
+ // R: points[i].y
+ // Next: points[0].x
+
if (NextKey.getOffset() > SRKey.getOffset() &&
NextKey.getOffset() - SRKey.getOffset() < Length) {
// Case 1: The next binding is inside the region we're invalidating.
// Remove it.
Result = CBFactory.remove(Result, NextKey);
+
} else if (NextKey.getOffset() == SRKey.getOffset()) {
// Case 2: The next binding is at the same offset as the region we're
// invalidating. In this case, we need to leave default bindings alone,
@@ -589,6 +625,7 @@ RegionBindings RegionStoreManager::removeSubRegionBindings(RegionBindings B,
if (NextKey.isDirect())
Result = CBFactory.remove(Result, NextKey);
}
+
} else if (NextKey.hasSymbolicOffset()) {
const MemRegion *Base = NextKey.getConcreteOffsetRegion();
if (R->isSubRegionOf(Base)) {
@@ -596,16 +633,24 @@ RegionBindings RegionStoreManager::removeSubRegionBindings(RegionBindings B,
// its concrete region. We don't know if the binding is still valid, so
// we'll be conservative and remove it.
if (NextKey.isDirect())
- Result = CBFactory.remove(Result, NextKey);
+ if (isCompatibleWithFields(NextKey, FieldsInSymbolicSubregions))
+ Result = CBFactory.remove(Result, NextKey);
} else if (const SubRegion *BaseSR = dyn_cast<SubRegion>(Base)) {
// Case 4: The next key is symbolic, but we changed a known
// super-region. In this case the binding is certainly no longer valid.
if (R == Base || BaseSR->isSubRegionOf(R))
- Result = CBFactory.remove(Result, NextKey);
+ if (isCompatibleWithFields(NextKey, FieldsInSymbolicSubregions))
+ Result = CBFactory.remove(Result, NextKey);
}
}
}
+ // If we're invalidating a region with a symbolic offset, we need to make sure
+ // we don't treat the base region as uninitialized anymore.
+ // FIXME: This isn't very precise; see the example in the loop.
+ if (HasSymbolicOffset)
+ Result = CBFactory.add(Result, SRKey, UnknownVal());
+
if (Result.isEmpty())
return RBFactory.remove(B, ClusterHead);
return RBFactory.add(B, ClusterHead, Result);
@@ -724,7 +769,7 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
// Invalidate the region by setting its default value to
// conjured symbol. The type of the symbol is irrelavant.
DefinedOrUnknownSVal V =
- svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx, Ctx.IntTy, Count);
+ svalBuilder.conjureSymbolVal(baseR, Ex, LCtx, Ctx.IntTy, Count);
B = RM.addBinding(B, baseR, BindingKey::Default, V);
return;
}
@@ -739,8 +784,8 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
if (T->isStructureOrClassType()) {
// Invalidate the region by setting its default value to
// conjured symbol. The type of the symbol is irrelavant.
- DefinedOrUnknownSVal V =
- svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx, Ctx.IntTy, Count);
+ DefinedOrUnknownSVal V = svalBuilder.conjureSymbolVal(baseR, Ex, LCtx,
+ Ctx.IntTy, Count);
B = RM.addBinding(B, baseR, BindingKey::Default, V);
return;
}
@@ -748,7 +793,7 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
if (const ArrayType *AT = Ctx.getAsArrayType(T)) {
// Set the default value of the array to conjured symbol.
DefinedOrUnknownSVal V =
- svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx,
+ svalBuilder.conjureSymbolVal(baseR, Ex, LCtx,
AT->getElementType(), Count);
B = RM.addBinding(B, baseR, BindingKey::Default, V);
return;
@@ -764,8 +809,8 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
}
- DefinedOrUnknownSVal V = svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx,
- T,Count);
+ DefinedOrUnknownSVal V = svalBuilder.conjureSymbolVal(baseR, Ex, LCtx,
+ T,Count);
assert(SymbolManager::canSymbolicate(T) || V.isUnknown());
B = RM.addBinding(B, baseR, BindingKey::Direct, V);
}
@@ -779,10 +824,9 @@ RegionBindings RegionStoreManager::invalidateGlobalRegion(MemRegion::Kind K,
// Bind the globals memory space to a new symbol that we will use to derive
// the bindings for all globals.
const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion(K);
- SVal V =
- svalBuilder.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, Ex, LCtx,
- /* symbol type, doesn't matter */ Ctx.IntTy,
- Count);
+ SVal V = svalBuilder.conjureSymbolVal(/* SymbolTag = */ (const void*) GS, Ex, LCtx,
+ /* type does not matter */ Ctx.IntTy,
+ Count);
B = removeBinding(B, GS);
B = addBinding(B, BindingKey::Make(GS, BindingKey::Default), V);
@@ -897,103 +941,6 @@ SVal RegionStoreManager::ArrayToPointer(Loc Array) {
return loc::MemRegionVal(MRMgr.getElementRegion(T, ZeroIdx, ArrayR, Ctx));
}
-// This mirrors Type::getCXXRecordDeclForPointerType(), but there doesn't
-// appear to be another need for this in the rest of the codebase.
-static const CXXRecordDecl *GetCXXRecordDeclForReferenceType(QualType Ty) {
- if (const ReferenceType *RT = Ty->getAs<ReferenceType>())
- if (const RecordType *RCT = RT->getPointeeType()->getAs<RecordType>())
- return dyn_cast<CXXRecordDecl>(RCT->getDecl());
- return 0;
-}
-
-SVal RegionStoreManager::evalDerivedToBase(SVal derived, QualType baseType) {
- const CXXRecordDecl *baseDecl;
-
- if (baseType->isPointerType())
- baseDecl = baseType->getCXXRecordDeclForPointerType();
- else if (baseType->isReferenceType())
- baseDecl = GetCXXRecordDeclForReferenceType(baseType);
- else
- baseDecl = baseType->getAsCXXRecordDecl();
-
- assert(baseDecl && "not a CXXRecordDecl?");
-
- loc::MemRegionVal *derivedRegVal = dyn_cast<loc::MemRegionVal>(&derived);
- if (!derivedRegVal)
- return derived;
-
- const MemRegion *baseReg =
- MRMgr.getCXXBaseObjectRegion(baseDecl, derivedRegVal->getRegion());
-
- return loc::MemRegionVal(baseReg);
-}
-
-SVal RegionStoreManager::evalDynamicCast(SVal base, QualType derivedType,
- bool &Failed) {
- Failed = false;
-
- loc::MemRegionVal *baseRegVal = dyn_cast<loc::MemRegionVal>(&base);
- if (!baseRegVal)
- return UnknownVal();
- const MemRegion *BaseRegion = baseRegVal->stripCasts(/*StripBases=*/false);
-
- // Assume the derived class is a pointer or a reference to a CXX record.
- derivedType = derivedType->getPointeeType();
- assert(!derivedType.isNull());
- const CXXRecordDecl *DerivedDecl = derivedType->getAsCXXRecordDecl();
- if (!DerivedDecl && !derivedType->isVoidType())
- return UnknownVal();
-
- // Drill down the CXXBaseObject chains, which represent upcasts (casts from
- // derived to base).
- const MemRegion *SR = BaseRegion;
- while (const TypedRegion *TSR = dyn_cast_or_null<TypedRegion>(SR)) {
- QualType BaseType = TSR->getLocationType()->getPointeeType();
- assert(!BaseType.isNull());
- const CXXRecordDecl *SRDecl = BaseType->getAsCXXRecordDecl();
- if (!SRDecl)
- return UnknownVal();
-
- // If found the derived class, the cast succeeds.
- if (SRDecl == DerivedDecl)
- return loc::MemRegionVal(TSR);
-
- if (!derivedType->isVoidType()) {
- // Static upcasts are marked as DerivedToBase casts by Sema, so this will
- // only happen when multiple or virtual inheritance is involved.
- CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true,
- /*DetectVirtual=*/false);
- if (SRDecl->isDerivedFrom(DerivedDecl, Paths)) {
- SVal Result = loc::MemRegionVal(TSR);
- const CXXBasePath &Path = *Paths.begin();
- for (CXXBasePath::const_iterator I = Path.begin(), E = Path.end();
- I != E; ++I) {
- Result = evalDerivedToBase(Result, I->Base->getType());
- }
- return Result;
- }
- }
-
- if (const CXXBaseObjectRegion *R = dyn_cast<CXXBaseObjectRegion>(TSR))
- // Drill down the chain to get the derived classes.
- SR = R->getSuperRegion();
- else {
- // We reached the bottom of the hierarchy.
-
- // If this is a cast to void*, return the region.
- if (derivedType->isVoidType())
- return loc::MemRegionVal(TSR);
-
- // We did not find the derived class. We we must be casting the base to
- // derived, so the cast should fail.
- Failed = true;
- return UnknownVal();
- }
- }
-
- return UnknownVal();
-}
-
//===----------------------------------------------------------------------===//
// Loading values from regions.
//===----------------------------------------------------------------------===//
@@ -1047,7 +994,7 @@ SVal RegionStoreManager::getBinding(Store store, Loc L, QualType T) {
T = TR->getLocationType();
else {
const SymbolicRegion *SR = cast<SymbolicRegion>(MR);
- T = SR->getSymbol()->getType(Ctx);
+ T = SR->getSymbol()->getType();
}
}
MR = GetElementZeroRegion(MR, T);
@@ -1540,14 +1487,14 @@ bool RegionStoreManager::includedInBindings(Store store,
// Binding values to regions.
//===----------------------------------------------------------------------===//
-StoreRef RegionStoreManager::Remove(Store store, Loc L) {
+StoreRef RegionStoreManager::killBinding(Store ST, Loc L) {
if (isa<loc::MemRegionVal>(L))
if (const MemRegion* R = cast<loc::MemRegionVal>(L).getRegion())
- return StoreRef(removeBinding(GetRegionBindings(store),
+ return StoreRef(removeBinding(GetRegionBindings(ST),
R).getRootWithoutRetain(),
*this);
- return StoreRef(store, *this);
+ return StoreRef(ST, *this);
}
StoreRef RegionStoreManager::Bind(Store store, Loc L, SVal V) {
@@ -1560,6 +1507,8 @@ StoreRef RegionStoreManager::Bind(Store store, Loc L, SVal V) {
// Check if the region is a struct region.
if (const TypedValueRegion* TR = dyn_cast<TypedValueRegion>(R)) {
QualType Ty = TR->getValueType();
+ if (Ty->isArrayType())
+ return BindArray(store, TR, V);
if (Ty->isStructureOrClassType())
return BindStruct(store, TR, V);
if (Ty->isVectorType())
@@ -1569,13 +1518,9 @@ StoreRef RegionStoreManager::Bind(Store store, Loc L, SVal V) {
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
// Binding directly to a symbolic region should be treated as binding
// to element 0.
- QualType T = SR->getSymbol()->getType(Ctx);
-
- // FIXME: Is this the right way to handle symbols that are references?
- if (const PointerType *PT = T->getAs<PointerType>())
- T = PT->getPointeeType();
- else
- T = T->getAs<ReferenceType>()->getPointeeType();
+ QualType T = SR->getSymbol()->getType();
+ if (T->isAnyPointerType() || T->isReferenceType())
+ T = T->getPointeeType();
R = GetElementZeroRegion(SR, T);
}
@@ -1589,26 +1534,12 @@ StoreRef RegionStoreManager::Bind(Store store, Loc L, SVal V) {
return StoreRef(addBinding(B, Key, V).getRootWithoutRetain(), *this);
}
-StoreRef RegionStoreManager::BindDecl(Store store, const VarRegion *VR,
- SVal InitVal) {
-
- QualType T = VR->getDecl()->getType();
-
- if (T->isArrayType())
- return BindArray(store, VR, InitVal);
- if (T->isStructureOrClassType())
- return BindStruct(store, VR, InitVal);
-
- return Bind(store, svalBuilder.makeLoc(VR), InitVal);
-}
-
// FIXME: this method should be merged into Bind().
-StoreRef RegionStoreManager::BindCompoundLiteral(Store store,
+StoreRef RegionStoreManager::bindCompoundLiteral(Store ST,
const CompoundLiteralExpr *CL,
const LocationContext *LC,
SVal V) {
- return Bind(store, loc::MemRegionVal(MRMgr.getCompoundLiteralRegion(CL, LC)),
- V);
+ return Bind(ST, loc::MemRegionVal(MRMgr.getCompoundLiteralRegion(CL, LC)), V);
}
StoreRef RegionStoreManager::setImplicitDefaultValue(Store store,
@@ -1864,7 +1795,7 @@ RegionBindings RegionStoreManager::removeBinding(RegionBindings B,
RegionBindings RegionStoreManager::removeBinding(RegionBindings B,
const MemRegion *R,
- BindingKey::Kind k){
+ BindingKey::Kind k){
return removeBinding(B, BindingKey::Make(R, k));
}
@@ -1897,7 +1828,6 @@ public:
void VisitAddedToCluster(const MemRegion *baseR, const ClusterBindings &C);
void VisitCluster(const MemRegion *baseR, const ClusterBindings &C);
- void VisitBindingKey(BindingKey K);
bool UpdatePostponed();
void VisitBinding(SVal V);
};
@@ -1932,17 +1862,21 @@ void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
const StackArgumentsSpaceRegion *StackReg =
cast<StackArgumentsSpaceRegion>(TR->getSuperRegion());
const StackFrameContext *RegCtx = StackReg->getStackFrame();
- if (RegCtx == CurrentLCtx || RegCtx->isParentOf(CurrentLCtx))
+ if (CurrentLCtx &&
+ (RegCtx == CurrentLCtx || RegCtx->isParentOf(CurrentLCtx)))
AddToWorkList(TR, &C);
}
}
void removeDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
const ClusterBindings &C) {
- for (ClusterBindings::iterator I = C.begin(), E = C.end(); I != E; ++I) {
- VisitBindingKey(I.getKey());
+ // Mark the symbol for any SymbolicRegion with live bindings as live itself.
+ // This means we should continue to track that symbol.
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(baseR))
+ SymReaper.markLive(SymR->getSymbol());
+
+ for (ClusterBindings::iterator I = C.begin(), E = C.end(); I != E; ++I)
VisitBinding(I.getData());
- }
}
void removeDeadBindingsWorker::VisitBinding(SVal V) {
@@ -1979,8 +1913,8 @@ void removeDeadBindingsWorker::VisitBinding(SVal V) {
if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
BlockDataRegion::referenced_vars_iterator I = BR->referenced_vars_begin(),
E = BR->referenced_vars_end();
- for ( ; I != E; ++I)
- AddToWorkList(I.getCapturedRegion());
+ for ( ; I != E; ++I)
+ AddToWorkList(I.getCapturedRegion());
}
}
@@ -1991,20 +1925,6 @@ void removeDeadBindingsWorker::VisitBinding(SVal V) {
SymReaper.markLive(*SI);
}
-void removeDeadBindingsWorker::VisitBindingKey(BindingKey K) {
- const MemRegion *R = K.getRegion();
-
- // Mark this region "live" by adding it to the worklist. This will cause
- // use to visit all regions in the cluster (if we haven't visited them
- // already).
- if (AddToWorkList(R)) {
- // Mark the symbol for any live SymbolicRegion as "live". This means we
- // should continue to track that symbol.
- if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R))
- SymReaper.markLive(SymR->getSymbol());
- }
-}
-
bool removeDeadBindingsWorker::UpdatePostponed() {
// See if any postponed SymbolicRegions are actually live now, after
// having done a scan.
@@ -2012,7 +1932,7 @@ bool removeDeadBindingsWorker::UpdatePostponed() {
for (SmallVectorImpl<const SymbolicRegion*>::iterator
I = Postponed.begin(), E = Postponed.end() ; I != E ; ++I) {
- if (const SymbolicRegion *SR = cast_or_null<SymbolicRegion>(*I)) {
+ if (const SymbolicRegion *SR = *I) {
if (SymReaper.isLive(SR->getSymbol())) {
changed |= AddToWorkList(SR);
*I = NULL;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index d1936cd..b87169a 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -106,25 +106,23 @@ SValBuilder::getRegionValueSymbolVal(const TypedValueRegion* region) {
return nonloc::SymbolVal(sym);
}
-DefinedOrUnknownSVal
-SValBuilder::getConjuredSymbolVal(const void *symbolTag,
- const Expr *expr,
- const LocationContext *LCtx,
- unsigned count) {
+DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *symbolTag,
+ const Expr *expr,
+ const LocationContext *LCtx,
+ unsigned count) {
QualType T = expr->getType();
- return getConjuredSymbolVal(symbolTag, expr, LCtx, T, count);
+ return conjureSymbolVal(symbolTag, expr, LCtx, T, count);
}
-DefinedOrUnknownSVal
-SValBuilder::getConjuredSymbolVal(const void *symbolTag,
- const Expr *expr,
- const LocationContext *LCtx,
- QualType type,
- unsigned count) {
+DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *symbolTag,
+ const Expr *expr,
+ const LocationContext *LCtx,
+ QualType type,
+ unsigned count) {
if (!SymbolManager::canSymbolicate(type))
return UnknownVal();
- SymbolRef sym = SymMgr.getConjuredSymbol(expr, LCtx, type, count, symbolTag);
+ SymbolRef sym = SymMgr.conjureSymbol(expr, LCtx, type, count, symbolTag);
if (Loc::isLocType(type))
return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
@@ -133,15 +131,14 @@ SValBuilder::getConjuredSymbolVal(const void *symbolTag,
}
-DefinedOrUnknownSVal
-SValBuilder::getConjuredSymbolVal(const Stmt *stmt,
- const LocationContext *LCtx,
- QualType type,
- unsigned visitCount) {
+DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const Stmt *stmt,
+ const LocationContext *LCtx,
+ QualType type,
+ unsigned visitCount) {
if (!SymbolManager::canSymbolicate(type))
return UnknownVal();
- SymbolRef sym = SymMgr.getConjuredSymbol(stmt, LCtx, type, visitCount);
+ SymbolRef sym = SymMgr.conjureSymbol(stmt, LCtx, type, visitCount);
if (Loc::isLocType(type))
return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
@@ -157,7 +154,7 @@ SValBuilder::getConjuredHeapSymbolVal(const Expr *E,
assert(Loc::isLocType(T));
assert(SymbolManager::canSymbolicate(T));
- SymbolRef sym = SymMgr.getConjuredSymbol(E, LCtx, T, VisitCount);
+ SymbolRef sym = SymMgr.conjureSymbol(E, LCtx, T, VisitCount);
return loc::MemRegionVal(MemMgr.getSymbolicHeapRegion(sym));
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
index 8437f50..e34ab6a 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
@@ -51,7 +51,8 @@ const FunctionDecl *SVal::getAsFunctionDecl() const {
if (const loc::MemRegionVal* X = dyn_cast<loc::MemRegionVal>(this)) {
const MemRegion* R = X->getRegion();
if (const FunctionTextRegion *CTR = R->getAs<FunctionTextRegion>())
- return CTR->getDecl();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CTR->getDecl()))
+ return FD;
}
return 0;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
index 5568f1c..4236ee4 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -67,7 +67,9 @@ ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state,
ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state, Loc cond,
bool assumption) {
state = assumeAux(state, cond, assumption);
- return SU.processAssume(state, cond, assumption);
+ if (NotifyAssumeClients && SU)
+ return SU->processAssume(state, cond, assumption);
+ return state;
}
ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
@@ -113,7 +115,9 @@ ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state,
NonLoc cond,
bool assumption) {
state = assumeAux(state, cond, assumption);
- return SU.processAssume(state, cond, assumption);
+ if (NotifyAssumeClients && SU)
+ return SU->processAssume(state, cond, assumption);
+ return state;
}
static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) {
@@ -136,7 +140,7 @@ ProgramStateRef
SimpleConstraintManager::assumeAuxForSymbol(ProgramStateRef State,
SymbolRef Sym, bool Assumption) {
BasicValueFactory &BVF = getBasicVals();
- QualType T = Sym->getType(BVF.getContext());
+ QualType T = Sym->getType();
// None of the constraint solvers currently support non-integer types.
if (!T->isIntegerType())
@@ -186,7 +190,7 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
BinaryOperator::Opcode op = SE->getOpcode();
// Implicitly compare non-comparison expressions to 0.
if (!BinaryOperator::isComparisonOp(op)) {
- QualType T = SE->getType(BasicVals.getContext());
+ QualType T = SE->getType();
const llvm::APSInt &zero = BasicVals.getValue(0, T);
op = (Assumption ? BO_NE : BO_EQ);
return assumeSymRel(state, SE, op, zero);
@@ -235,11 +239,9 @@ ProgramStateRef SimpleConstraintManager::assumeSymRel(ProgramStateRef state,
assert(BinaryOperator::isComparisonOp(op) &&
"Non-comparison ops should be rewritten as comparisons to zero.");
- BasicValueFactory &BVF = getBasicVals();
- ASTContext &Ctx = BVF.getContext();
-
// Get the type used for calculating wraparound.
- APSIntType WraparoundType = BVF.getAPSIntType(LHS->getType(Ctx));
+ BasicValueFactory &BVF = getBasicVals();
+ APSIntType WraparoundType = BVF.getAPSIntType(LHS->getType());
// We only handle simple comparisons of the form "$sym == constant"
// or "($sym+constant1) == constant2".
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
index 088d70c..01f0b4e 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
@@ -22,10 +22,10 @@ namespace clang {
namespace ento {
class SimpleConstraintManager : public ConstraintManager {
- SubEngine &SU;
+ SubEngine *SU;
BasicValueFactory &BVF;
public:
- SimpleConstraintManager(SubEngine &subengine, BasicValueFactory &BV)
+ SimpleConstraintManager(SubEngine *subengine, BasicValueFactory &BV)
: SU(subengine), BVF(BV) {}
virtual ~SimpleConstraintManager();
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index ad58a07..fbc6ba0 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -81,7 +81,7 @@ SVal SimpleSValBuilder::evalCastFromNonLoc(NonLoc val, QualType castTy) {
}
if (const SymExpr *se = val.getAsSymbolicExpression()) {
- QualType T = Context.getCanonicalType(se->getType(Context));
+ QualType T = Context.getCanonicalType(se->getType());
// If types are the same or both are integers, ignore the cast.
// FIXME: Remove this hack when we support symbolic truncation/extension.
// HACK: If both castTy and T are integers, ignore the cast. This is
@@ -276,7 +276,7 @@ SVal SimpleSValBuilder::MakeSymIntVal(const SymExpr *LHS,
// with the given constant.
// FIXME: This is an approximation of Sema::UsualArithmeticConversions.
ASTContext &Ctx = getContext();
- QualType SymbolType = LHS->getType(Ctx);
+ QualType SymbolType = LHS->getType();
uint64_t ValWidth = RHS.getBitWidth();
uint64_t TypeWidth = Ctx.getTypeSize(SymbolType);
@@ -318,7 +318,9 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
return makeTruthVal(false, resultTy);
case BO_Xor:
case BO_Sub:
- return makeIntVal(0, resultTy);
+ if (resultTy->isIntegralOrEnumerationType())
+ return makeIntVal(0, resultTy);
+ return evalCastFromNonLoc(makeIntVal(0, /*Unsigned=*/false), resultTy);
case BO_Or:
case BO_And:
return evalCastFromNonLoc(lhs, resultTy);
@@ -459,7 +461,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
case BO_NE:
// Negate the comparison and make a value.
opc = NegateComparison(opc);
- assert(symIntExpr->getType(Context) == resultTy);
+ assert(symIntExpr->getType() == resultTy);
return makeNonLoc(symIntExpr->getLHS(), opc,
symIntExpr->getRHS(), resultTy);
}
@@ -505,7 +507,8 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
} else if (isa<SymbolData>(Sym)) {
// Does the symbol simplify to a constant? If so, "fold" the constant
// by setting 'lhs' to a ConcreteInt and try again.
- if (const llvm::APSInt *Constant = state->getSymVal(Sym)) {
+ if (const llvm::APSInt *Constant = state->getConstraintManager()
+ .getSymVal(state, Sym)) {
lhs = nonloc::ConcreteInt(*Constant);
continue;
}
@@ -916,14 +919,8 @@ SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
else if (isa<SubRegion>(region)) {
superR = region;
index = rhs;
- if (const PointerType *PT = resultTy->getAs<PointerType>()) {
- elementType = PT->getPointeeType();
- }
- else {
- const ObjCObjectPointerType *OT =
- resultTy->getAs<ObjCObjectPointerType>();
- elementType = OT->getPointeeType();
- }
+ if (resultTy->isAnyPointerType())
+ elementType = resultTy->getPointeeType();
}
if (NonLoc *indexV = dyn_cast<NonLoc>(&index)) {
@@ -946,7 +943,7 @@ const llvm::APSInt *SimpleSValBuilder::getKnownValue(ProgramStateRef state,
return &X->getValue();
if (SymbolRef Sym = V.getAsSymbol())
- return state->getSymVal(Sym);
+ return state->getConstraintManager().getSymVal(state, Sym);
// FIXME: Add support for SymExprs.
return NULL;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
index 3af60a1..939ae54 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -15,6 +15,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/AST/CharUnits.h"
+#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclObjC.h"
using namespace clang;
@@ -233,6 +234,91 @@ SVal StoreManager::evalDerivedToBase(SVal Derived, const CastExpr *Cast) {
return Result;
}
+SVal StoreManager::evalDerivedToBase(SVal Derived, const CXXBasePath &Path) {
+ // Walk through the path to create nested CXXBaseRegions.
+ SVal Result = Derived;
+ for (CXXBasePath::const_iterator I = Path.begin(), E = Path.end();
+ I != E; ++I) {
+ Result = evalDerivedToBase(Result, I->Base->getType());
+ }
+ return Result;
+}
+
+SVal StoreManager::evalDerivedToBase(SVal Derived, QualType BaseType) {
+ loc::MemRegionVal *DerivedRegVal = dyn_cast<loc::MemRegionVal>(&Derived);
+ if (!DerivedRegVal)
+ return Derived;
+
+ const CXXRecordDecl *BaseDecl = BaseType->getPointeeCXXRecordDecl();
+ if (!BaseDecl)
+ BaseDecl = BaseType->getAsCXXRecordDecl();
+ assert(BaseDecl && "not a C++ object?");
+
+ const MemRegion *BaseReg =
+ MRMgr.getCXXBaseObjectRegion(BaseDecl, DerivedRegVal->getRegion());
+
+ return loc::MemRegionVal(BaseReg);
+}
+
+SVal StoreManager::evalDynamicCast(SVal Base, QualType DerivedType,
+ bool &Failed) {
+ Failed = false;
+
+ loc::MemRegionVal *BaseRegVal = dyn_cast<loc::MemRegionVal>(&Base);
+ if (!BaseRegVal)
+ return UnknownVal();
+ const MemRegion *BaseRegion = BaseRegVal->stripCasts(/*StripBases=*/false);
+
+ // Assume the derived class is a pointer or a reference to a CXX record.
+ DerivedType = DerivedType->getPointeeType();
+ assert(!DerivedType.isNull());
+ const CXXRecordDecl *DerivedDecl = DerivedType->getAsCXXRecordDecl();
+ if (!DerivedDecl && !DerivedType->isVoidType())
+ return UnknownVal();
+
+ // Drill down the CXXBaseObject chains, which represent upcasts (casts from
+ // derived to base).
+ const MemRegion *SR = BaseRegion;
+ while (const TypedRegion *TSR = dyn_cast_or_null<TypedRegion>(SR)) {
+ QualType BaseType = TSR->getLocationType()->getPointeeType();
+ assert(!BaseType.isNull());
+ const CXXRecordDecl *SRDecl = BaseType->getAsCXXRecordDecl();
+ if (!SRDecl)
+ return UnknownVal();
+
+ // If found the derived class, the cast succeeds.
+ if (SRDecl == DerivedDecl)
+ return loc::MemRegionVal(TSR);
+
+ if (!DerivedType->isVoidType()) {
+ // Static upcasts are marked as DerivedToBase casts by Sema, so this will
+ // only happen when multiple or virtual inheritance is involved.
+ CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ if (SRDecl->isDerivedFrom(DerivedDecl, Paths))
+ return evalDerivedToBase(loc::MemRegionVal(TSR), Paths.front());
+ }
+
+ if (const CXXBaseObjectRegion *R = dyn_cast<CXXBaseObjectRegion>(TSR))
+ // Drill down the chain to get the derived classes.
+ SR = R->getSuperRegion();
+ else {
+ // We reached the bottom of the hierarchy.
+
+ // If this is a cast to void*, return the region.
+ if (DerivedType->isVoidType())
+ return loc::MemRegionVal(TSR);
+
+ // We did not find the derived class. We we must be casting the base to
+ // derived, so the cast should fail.
+ Failed = true;
+ return UnknownVal();
+ }
+ }
+
+ return UnknownVal();
+}
+
/// CastRetrievedVal - Used by subclasses of StoreManager to implement
/// implicit casts that arise from loads from regions that are reinterpreted
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
index 0bc192d..0c5098b 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -117,21 +117,17 @@ bool SymExpr::symbol_iterator::operator!=(const symbol_iterator &X) const {
SymExpr::symbol_iterator::symbol_iterator(const SymExpr *SE) {
itr.push_back(SE);
- while (!isa<SymbolData>(itr.back())) expand();
}
SymExpr::symbol_iterator &SymExpr::symbol_iterator::operator++() {
assert(!itr.empty() && "attempting to iterate on an 'end' iterator");
- assert(isa<SymbolData>(itr.back()));
- itr.pop_back();
- if (!itr.empty())
- while (!isa<SymbolData>(itr.back())) expand();
+ expand();
return *this;
}
SymbolRef SymExpr::symbol_iterator::operator*() {
assert(!itr.empty() && "attempting to dereference an 'end' iterator");
- return cast<SymbolData>(itr.back());
+ return itr.back();
}
void SymExpr::symbol_iterator::expand() {
@@ -187,11 +183,11 @@ SymbolManager::getRegionValueSymbol(const TypedValueRegion* R) {
return cast<SymbolRegionValue>(SD);
}
-const SymbolConjured*
-SymbolManager::getConjuredSymbol(const Stmt *E, const LocationContext *LCtx,
- QualType T, unsigned Count,
- const void *SymbolTag) {
-
+const SymbolConjured* SymbolManager::conjureSymbol(const Stmt *E,
+ const LocationContext *LCtx,
+ QualType T,
+ unsigned Count,
+ const void *SymbolTag) {
llvm::FoldingSetNodeID profile;
SymbolConjured::Profile(profile, E, T, Count, LCtx, SymbolTag);
void *InsertPos;
@@ -328,23 +324,24 @@ const SymSymExpr *SymbolManager::getSymSymExpr(const SymExpr *lhs,
return cast<SymSymExpr>(data);
}
-QualType SymbolConjured::getType(ASTContext&) const {
+QualType SymbolConjured::getType() const {
return T;
}
-QualType SymbolDerived::getType(ASTContext &Ctx) const {
+QualType SymbolDerived::getType() const {
return R->getValueType();
}
-QualType SymbolExtent::getType(ASTContext &Ctx) const {
+QualType SymbolExtent::getType() const {
+ ASTContext &Ctx = R->getMemRegionManager()->getContext();
return Ctx.getSizeType();
}
-QualType SymbolMetadata::getType(ASTContext&) const {
+QualType SymbolMetadata::getType() const {
return T;
}
-QualType SymbolRegionValue::getType(ASTContext &C) const {
+QualType SymbolRegionValue::getType() const {
return R->getValueType();
}
@@ -466,41 +463,56 @@ bool SymbolReaper::isLive(SymbolRef sym) {
markDependentsLive(sym);
return true;
}
-
- if (const SymbolDerived *derived = dyn_cast<SymbolDerived>(sym)) {
- if (isLive(derived->getParentSymbol())) {
- markLive(sym);
- return true;
- }
- return false;
- }
-
- if (const SymbolExtent *extent = dyn_cast<SymbolExtent>(sym)) {
- if (isLiveRegion(extent->getRegion())) {
- markLive(sym);
- return true;
- }
- return false;
+
+ bool KnownLive;
+
+ switch (sym->getKind()) {
+ case SymExpr::RegionValueKind:
+ // FIXME: We should be able to use isLiveRegion here (this behavior
+ // predates isLiveRegion), but doing so causes test failures. Investigate.
+ KnownLive = true;
+ break;
+ case SymExpr::ConjuredKind:
+ KnownLive = false;
+ break;
+ case SymExpr::DerivedKind:
+ KnownLive = isLive(cast<SymbolDerived>(sym)->getParentSymbol());
+ break;
+ case SymExpr::ExtentKind:
+ KnownLive = isLiveRegion(cast<SymbolExtent>(sym)->getRegion());
+ break;
+ case SymExpr::MetadataKind:
+ KnownLive = MetadataInUse.count(sym) &&
+ isLiveRegion(cast<SymbolMetadata>(sym)->getRegion());
+ if (KnownLive)
+ MetadataInUse.erase(sym);
+ break;
+ case SymExpr::SymIntKind:
+ KnownLive = isLive(cast<SymIntExpr>(sym)->getLHS());
+ break;
+ case SymExpr::IntSymKind:
+ KnownLive = isLive(cast<IntSymExpr>(sym)->getRHS());
+ break;
+ case SymExpr::SymSymKind:
+ KnownLive = isLive(cast<SymSymExpr>(sym)->getLHS()) &&
+ isLive(cast<SymSymExpr>(sym)->getRHS());
+ break;
+ case SymExpr::CastSymbolKind:
+ KnownLive = isLive(cast<SymbolCast>(sym)->getOperand());
+ break;
}
- if (const SymbolMetadata *metadata = dyn_cast<SymbolMetadata>(sym)) {
- if (MetadataInUse.count(sym)) {
- if (isLiveRegion(metadata->getRegion())) {
- markLive(sym);
- MetadataInUse.erase(sym);
- return true;
- }
- }
- return false;
- }
+ if (KnownLive)
+ markLive(sym);
- // Interogate the symbol. It may derive from an input value to
- // the analyzed function/method.
- return isa<SymbolRegionValue>(sym);
+ return KnownLive;
}
bool
SymbolReaper::isLive(const Stmt *ExprVal, const LocationContext *ELCtx) const {
+ if (LCtx == 0)
+ return false;
+
if (LCtx != ELCtx) {
// If the reaper's location context is a parent of the expression's
// location context, then the expression value is now "out of scope".
@@ -508,6 +520,7 @@ SymbolReaper::isLive(const Stmt *ExprVal, const LocationContext *ELCtx) const {
return false;
return true;
}
+
// If no statement is provided, everything is this and parent contexts is live.
if (!Loc)
return true;
@@ -517,10 +530,16 @@ SymbolReaper::isLive(const Stmt *ExprVal, const LocationContext *ELCtx) const {
bool SymbolReaper::isLive(const VarRegion *VR, bool includeStoreBindings) const{
const StackFrameContext *VarContext = VR->getStackFrame();
+
+ if (!VarContext)
+ return true;
+
+ if (!LCtx)
+ return false;
const StackFrameContext *CurrentContext = LCtx->getCurrentStackFrame();
if (VarContext == CurrentContext) {
- // If no statemetnt is provided, everything is live.
+ // If no statement is provided, everything is live.
if (!Loc)
return true;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp
index 66bf4bb..e09f4e3 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp
@@ -41,7 +41,6 @@ public:
PathGenerationScheme getGenerationScheme() const { return Minimal; }
bool supportsLogicalOpControlFlow() const { return true; }
bool supportsAllBlockEdges() const { return true; }
- virtual bool useVerboseDescription() const { return true; }
virtual bool supportsCrossFileDiagnostics() const { return true; }
};
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index 34b5266..7dbac3c 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -34,7 +34,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Frontend/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Path.h"
@@ -78,7 +78,6 @@ public:
ClangDiagPathDiagConsumer(DiagnosticsEngine &Diag) : Diag(Diag) {}
virtual ~ClangDiagPathDiagConsumer() {}
virtual StringRef getName() const { return "ClangDiags"; }
- virtual bool useVerboseDescription() const { return false; }
virtual PathGenerationScheme getGenerationScheme() const { return None; }
void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
@@ -86,7 +85,7 @@ public:
for (std::vector<const PathDiagnostic*>::iterator I = Diags.begin(),
E = Diags.end(); I != E; ++I) {
const PathDiagnostic *PD = *I;
- StringRef desc = PD->getDescription();
+ StringRef desc = PD->getShortDescription();
SmallString<512> TmpStr;
llvm::raw_svector_ostream Out(TmpStr);
for (StringRef::iterator I=desc.begin(), E=desc.end(); I!=E; ++I) {
@@ -121,11 +120,12 @@ namespace {
class AnalysisConsumer : public ASTConsumer,
public RecursiveASTVisitor<AnalysisConsumer> {
- enum AnalysisMode {
- ANALYSIS_SYNTAX,
- ANALYSIS_PATH,
- ANALYSIS_ALL
+ enum {
+ AM_None = 0,
+ AM_Syntax = 0x1,
+ AM_Path = 0x2
};
+ typedef unsigned AnalysisMode;
/// Mode of the analyzes while recursively visiting Decls.
AnalysisMode RecVisitorMode;
@@ -136,7 +136,7 @@ public:
ASTContext *Ctx;
const Preprocessor &PP;
const std::string OutDir;
- AnalyzerOptions Opts;
+ AnalyzerOptionsRef Opts;
ArrayRef<std::string> Plugins;
/// \brief Stores the declarations from the local translation unit.
@@ -164,19 +164,19 @@ public:
AnalysisConsumer(const Preprocessor& pp,
const std::string& outdir,
- const AnalyzerOptions& opts,
+ AnalyzerOptionsRef opts,
ArrayRef<std::string> plugins)
- : RecVisitorMode(ANALYSIS_ALL), RecVisitorBR(0),
+ : RecVisitorMode(0), RecVisitorBR(0),
Ctx(0), PP(pp), OutDir(outdir), Opts(opts), Plugins(plugins) {
DigestAnalyzerOptions();
- if (Opts.PrintStats) {
+ if (Opts->PrintStats) {
llvm::EnableStatistics();
TUTotalTimer = new llvm::Timer("Analyzer Total Time");
}
}
~AnalysisConsumer() {
- if (Opts.PrintStats)
+ if (Opts->PrintStats)
delete TUTotalTimer;
}
@@ -185,49 +185,52 @@ public:
PathConsumers.push_back(new ClangDiagPathDiagConsumer(PP.getDiagnostics()));
if (!OutDir.empty()) {
- switch (Opts.AnalysisDiagOpt) {
+ switch (Opts->AnalysisDiagOpt) {
default:
#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN, AUTOCREATE) \
case PD_##NAME: CREATEFN(PathConsumers, OutDir, PP); break;
-#include "clang/Frontend/Analyses.def"
+#include "clang/StaticAnalyzer/Core/Analyses.def"
}
- } else if (Opts.AnalysisDiagOpt == PD_TEXT) {
+ } else if (Opts->AnalysisDiagOpt == PD_TEXT) {
// Create the text client even without a specified output file since
// it just uses diagnostic notes.
createTextPathDiagnosticConsumer(PathConsumers, "", PP);
}
// Create the analyzer component creators.
- switch (Opts.AnalysisStoreOpt) {
+ switch (Opts->AnalysisStoreOpt) {
default:
llvm_unreachable("Unknown store manager.");
#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATEFN) \
case NAME##Model: CreateStoreMgr = CREATEFN; break;
-#include "clang/Frontend/Analyses.def"
+#include "clang/StaticAnalyzer/Core/Analyses.def"
}
- switch (Opts.AnalysisConstraintsOpt) {
+ switch (Opts->AnalysisConstraintsOpt) {
default:
llvm_unreachable("Unknown store manager.");
#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATEFN) \
case NAME##Model: CreateConstraintMgr = CREATEFN; break;
-#include "clang/Frontend/Analyses.def"
+#include "clang/StaticAnalyzer/Core/Analyses.def"
}
}
void DisplayFunction(const Decl *D, AnalysisMode Mode) {
- if (!Opts.AnalyzerDisplayProgress)
+ if (!Opts->AnalyzerDisplayProgress)
return;
SourceManager &SM = Mgr->getASTContext().getSourceManager();
PresumedLoc Loc = SM.getPresumedLoc(D->getLocation());
if (Loc.isValid()) {
llvm::errs() << "ANALYZE";
- switch (Mode) {
- case ANALYSIS_SYNTAX: llvm::errs() << "(Syntax)"; break;
- case ANALYSIS_PATH: llvm::errs() << "(Path Sensitive)"; break;
- case ANALYSIS_ALL: break;
- };
+
+ if (Mode == AM_Syntax)
+ llvm::errs() << " (Syntax)";
+ else if (Mode == AM_Path)
+ llvm::errs() << " (Path)";
+ else
+ assert(Mode == (AM_Syntax | AM_Path) && "Unexpected mode!");
+
llvm::errs() << ": " << Loc.getFilename();
if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
const NamedDecl *ND = cast<NamedDecl>(D);
@@ -246,7 +249,7 @@ public:
virtual void Initialize(ASTContext &Context) {
Ctx = &Context;
- checkerMgr.reset(createCheckerManager(Opts, PP.getLangOpts(), Plugins,
+ checkerMgr.reset(createCheckerManager(*Opts, PP.getLangOpts(), Plugins,
PP.getDiagnostics()));
Mgr.reset(new AnalysisManager(*Ctx,
PP.getDiagnostics(),
@@ -255,17 +258,7 @@ public:
CreateStoreMgr,
CreateConstraintMgr,
checkerMgr.get(),
- Opts.MaxNodes, Opts.MaxLoop,
- Opts.VisualizeEGDot, Opts.VisualizeEGUbi,
- Opts.AnalysisPurgeOpt, Opts.EagerlyAssume,
- Opts.TrimGraph,
- Opts.UnoptimizedCFG, Opts.CFGAddImplicitDtors,
- Opts.EagerlyTrimEGraph,
- Opts.IPAMode,
- Opts.InlineMaxStackDepth,
- Opts.InlineMaxFunctionSize,
- Opts.InliningMode,
- Opts.NoRetryExhausted));
+ *Opts));
}
/// \brief Store the top level decls in the set to be processed later on.
@@ -277,7 +270,7 @@ public:
/// \brief Build the call graph for all the top level decls of this TU and
/// use it to define the order in which the functions should be visited.
- void HandleDeclsGallGraph(const unsigned LocalTUDeclsSize);
+ void HandleDeclsCallGraph(const unsigned LocalTUDeclsSize);
/// \brief Run analyzes(syntax or path sensitive) on the given function.
/// \param Mode - determines if we are requesting syntax only or path
@@ -297,7 +290,9 @@ public:
/// Handle callbacks for arbitrary Decls.
bool VisitDecl(Decl *D) {
- checkerMgr->runCheckersOnASTDecl(D, *Mgr, *RecVisitorBR);
+ AnalysisMode Mode = getModeForDecl(D, RecVisitorMode);
+ if (Mode & AM_Syntax)
+ checkerMgr->runCheckersOnASTDecl(D, *Mgr, *RecVisitorBR);
return true;
}
@@ -316,7 +311,6 @@ public:
}
bool VisitObjCMethodDecl(ObjCMethodDecl *MD) {
- checkerMgr->runCheckersOnASTDecl(MD, *Mgr, *RecVisitorBR);
if (MD->isThisDeclarationADefinition())
HandleCode(MD, RecVisitorMode);
return true;
@@ -326,7 +320,7 @@ private:
void storeTopLevelDecls(DeclGroupRef DG);
/// \brief Check if we should skip (not analyze) the given function.
- bool skipFunction(Decl *D);
+ AnalysisMode getModeForDecl(Decl *D, AnalysisMode Mode);
};
} // end anonymous namespace
@@ -358,7 +352,23 @@ void AnalysisConsumer::storeTopLevelDecls(DeclGroupRef DG) {
}
}
-void AnalysisConsumer::HandleDeclsGallGraph(const unsigned LocalTUDeclsSize) {
+static bool shouldSkipFunction(CallGraphNode *N,
+ SmallPtrSet<CallGraphNode*,24> Visited) {
+ // We want to re-analyse the functions as top level in several cases:
+ // - The 'init' methods should be reanalyzed because
+ // ObjCNonNilReturnValueChecker assumes that '[super init]' never returns
+ // 'nil' and unless we analyze the 'init' functions as top level, we will not
+ // catch errors within defensive code.
+ // - We want to reanalyze all ObjC methods as top level to report Retain
+ // Count naming convention errors more aggressively.
+ if (isa<ObjCMethodDecl>(N->getDecl()))
+ return false;
+
+ // Otherwise, if we visited the function before, do not reanalyze it.
+ return Visited.count(N);
+}
+
+void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
// Otherwise, use the Callgraph to derive the order.
// Build the Call Graph.
CallGraph CG;
@@ -407,21 +417,21 @@ void AnalysisConsumer::HandleDeclsGallGraph(const unsigned LocalTUDeclsSize) {
// Push the children into the queue.
for (CallGraphNode::const_iterator CI = N->begin(),
CE = N->end(); CI != CE; ++CI) {
- if (!Visited.count(*CI))
+ if (!shouldSkipFunction(*CI, Visited))
BFSQueue.push_back(*CI);
}
// Skip the functions which have been processed already or previously
// inlined.
- if (Visited.count(N))
+ if (shouldSkipFunction(N, Visited))
continue;
// Analyze the function.
SetOfConstDecls VisitedCallees;
Decl *D = N->getDecl();
assert(D);
- HandleCode(D, ANALYSIS_PATH,
- (Mgr->InliningMode == All ? 0 : &VisitedCallees));
+ HandleCode(D, AM_Path,
+ (Mgr->options.InliningMode == All ? 0 : &VisitedCallees));
// Add the visited callees to the global visited set.
for (SetOfConstDecls::iterator I = VisitedCallees.begin(),
@@ -451,7 +461,9 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
// Run the AST-only checks using the order in which functions are defined.
// If inlining is not turned on, use the simplest function order for path
// sensitive analyzes as well.
- RecVisitorMode = (Mgr->shouldInlineCall() ? ANALYSIS_SYNTAX : ANALYSIS_ALL);
+ RecVisitorMode = AM_Syntax;
+ if (!Mgr->shouldInlineCall())
+ RecVisitorMode |= AM_Path;
RecVisitorBR = &BR;
// Process all the top level declarations.
@@ -466,7 +478,7 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
}
if (Mgr->shouldInlineCall())
- HandleDeclsGallGraph(LocalTUDeclsSize);
+ HandleDeclsCallGraph(LocalTUDeclsSize);
// After all decls handled, run checkers on the entire TranslationUnit.
checkerMgr->runCheckersOnEndOfTranslationUnit(TU, *Mgr, BR);
@@ -513,24 +525,32 @@ static std::string getFunctionName(const Decl *D) {
return "";
}
-bool AnalysisConsumer::skipFunction(Decl *D) {
- if (!Opts.AnalyzeSpecificFunction.empty() &&
- getFunctionName(D) != Opts.AnalyzeSpecificFunction)
- return true;
-
- // Don't run the actions on declarations in header files unless
- // otherwise specified.
+AnalysisConsumer::AnalysisMode
+AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) {
+ if (!Opts->AnalyzeSpecificFunction.empty() &&
+ getFunctionName(D) != Opts->AnalyzeSpecificFunction)
+ return AM_None;
+
+ // Unless -analyze-all is specified, treat decls differently depending on
+ // where they came from:
+ // - Main source file: run both path-sensitive and non-path-sensitive checks.
+ // - Header files: run non-path-sensitive checks only.
+ // - System headers: don't run any checks.
SourceManager &SM = Ctx->getSourceManager();
SourceLocation SL = SM.getExpansionLoc(D->getLocation());
- if (!Opts.AnalyzeAll && !SM.isFromMainFile(SL))
- return true;
+ if (!Opts->AnalyzeAll && !SM.isFromMainFile(SL)) {
+ if (SL.isInvalid() || SM.isInSystemHeader(SL))
+ return AM_None;
+ return Mode & ~AM_Path;
+ }
- return false;
+ return Mode;
}
void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
SetOfConstDecls *VisitedCallees) {
- if (skipFunction(D))
+ Mode = getModeForDecl(D, Mode);
+ if (Mode == AM_None)
return;
DisplayFunction(D, Mode);
@@ -548,16 +568,16 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
SmallVector<Decl*, 10> WL;
WL.push_back(D);
- if (D->hasBody() && Opts.AnalyzeNestedBlocks)
+ if (D->hasBody() && Opts->AnalyzeNestedBlocks)
FindBlocks(cast<DeclContext>(D), WL);
BugReporter BR(*Mgr);
for (SmallVectorImpl<Decl*>::iterator WI=WL.begin(), WE=WL.end();
WI != WE; ++WI)
if ((*WI)->hasBody()) {
- if (Mode != ANALYSIS_PATH)
+ if (Mode & AM_Syntax)
checkerMgr->runCheckersOnASTBody(*WI, *Mgr, BR);
- if (Mode != ANALYSIS_SYNTAX && checkerMgr->hasPathSensitiveCheckers()) {
+ if ((Mode & AM_Path) && checkerMgr->hasPathSensitiveCheckers()) {
RunPathSensitiveChecks(*WI, VisitedCallees);
NumFunctionsAnalyzed++;
}
@@ -583,22 +603,22 @@ void AnalysisConsumer::ActionExprEngine(Decl *D, bool ObjCGCEnabled,
// Set the graph auditor.
OwningPtr<ExplodedNode::Auditor> Auditor;
- if (Mgr->shouldVisualizeUbigraph()) {
+ if (Mgr->options.visualizeExplodedGraphWithUbiGraph) {
Auditor.reset(CreateUbiViz());
ExplodedNode::SetAuditor(Auditor.get());
}
// Execute the worklist algorithm.
Eng.ExecuteWorkList(Mgr->getAnalysisDeclContextManager().getStackFrame(D),
- Mgr->getMaxNodes());
+ Mgr->options.MaxNodes);
// Release the auditor (if any) so that it doesn't monitor the graph
// created BugReporter.
ExplodedNode::SetAuditor(0);
// Visualize the exploded graph.
- if (Mgr->shouldVisualizeGraphviz())
- Eng.ViewGraph(Mgr->shouldTrimGraph());
+ if (Mgr->options.visualizeExplodedGraphWithGraphViz)
+ Eng.ViewGraph(Mgr->options.TrimGraph);
// Display warnings.
Eng.getBugReporter().FlushReports();
@@ -629,7 +649,7 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
ASTConsumer* ento::CreateAnalysisConsumer(const Preprocessor& pp,
const std::string& outDir,
- const AnalyzerOptions& opts,
+ AnalyzerOptionsRef opts,
ArrayRef<std::string> plugins) {
// Disable the effects of '-Werror' when using the AnalysisConsumer.
pp.getDiagnostics().setWarningsAsErrors(false);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h
index 5a16bff..b75220b 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h
@@ -16,11 +16,11 @@
#define LLVM_CLANG_GR_ANALYSISCONSUMER_H
#include "clang/Basic/LLVM.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include <string>
namespace clang {
-class AnalyzerOptions;
class ASTConsumer;
class Preprocessor;
class DiagnosticsEngine;
@@ -33,7 +33,7 @@ class CheckerManager;
/// options.)
ASTConsumer* CreateAnalysisConsumer(const Preprocessor &pp,
const std::string &output,
- const AnalyzerOptions& opts,
+ AnalyzerOptionsRef opts,
ArrayRef<std::string> plugins);
} // end GR namespace
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
index 0229aed..e8daa65 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -17,7 +17,7 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/CheckerOptInfo.h"
#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
-#include "clang/Frontend/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Basic/Diagnostic.h"
#include "llvm/Support/DynamicLibrary.h"
diff --git a/contrib/llvm/tools/clang/lib/Tooling/CommandLineClangTool.cpp b/contrib/llvm/tools/clang/lib/Tooling/CommonOptionsParser.cpp
index 8da2a33..15091c7 100644
--- a/contrib/llvm/tools/clang/lib/Tooling/CommandLineClangTool.cpp
+++ b/contrib/llvm/tools/clang/lib/Tooling/CommonOptionsParser.cpp
@@ -1,4 +1,4 @@
-//===--- CommandLineClangTool.cpp - command-line clang tools driver -------===//
+//===--- CommonOptionsParser.cpp - common options for clang tools ---------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,28 +7,31 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the CommandLineClangTool class used to run clang
-// tools as separate command-line applications with a consistent common
-// interface for handling compilation database and input files.
+// This file implements the CommonOptionsParser class used to parse common
+// command-line options for clang tools, so that they can be run as separate
+// command-line applications with a consistent common interface for handling
+// compilation database and input files.
//
// It provides a common subset of command-line options, common algorithm
// for locating a compilation database and source files, and help messages
// for the basic command-line interface.
//
-// It creates a CompilationDatabase, initializes a ClangTool and runs a
-// user-specified FrontendAction over all TUs in which the given files are
-// compiled.
+// It creates a CompilationDatabase and reads common command-line options.
+//
+// This class uses the Clang Tooling infrastructure, see
+// http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html
+// for details on setting it up with LLVM source tree.
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/FrontendActions.h"
-#include "clang/Tooling/CommandLineClangTool.h"
+#include "llvm/Support/CommandLine.h"
+#include "clang/Tooling/CommonOptionsParser.h"
#include "clang/Tooling/Tooling.h"
using namespace clang::tooling;
using namespace llvm;
-static const char *MoreHelpText =
+const char *const CommonOptionsParser::HelpMessage =
"\n"
"-p <build-path> is used to read a compile command database.\n"
"\n"
@@ -40,26 +43,27 @@ static const char *MoreHelpText =
"\thttp://clang.llvm.org/docs/HowToSetupToolingForLLVM.html for an\n"
"\texample of setting up Clang Tooling on a source tree.\n"
"\n"
- "<source0> ... specify the paths of source files. These paths are looked\n"
- "\tup in the compile command database. If the path of a file is absolute,\n"
- "\tit needs to point into CMake's source tree. If the path is relative,\n"
- "\tthe current working directory needs to be in the CMake source tree and\n"
- "\tthe file must be in a subdirectory of the current working directory.\n"
- "\t\"./\" prefixes in the relative files will be automatically removed,\n"
- "\tbut the rest of a relative path must be a suffix of a path in the\n"
- "\tcompile command database.\n"
+ "<source0> ... specify the paths of source files. These paths are\n"
+ "\tlooked up in the compile command database. If the path of a file is\n"
+ "\tabsolute, it needs to point into CMake's source tree. If the path is\n"
+ "\trelative, the current working directory needs to be in the CMake\n"
+ "\tsource tree and the file must be in a subdirectory of the current\n"
+ "\tworking directory. \"./\" prefixes in the relative files will be\n"
+ "\tautomatically removed, but the rest of a relative path must be a\n"
+ "\tsuffix of a path in the compile command database.\n"
"\n";
-CommandLineClangTool::CommandLineClangTool() :
- BuildPath("p", cl::desc("Build path"), cl::Optional),
- SourcePaths(cl::Positional, cl::desc("<source0> [... <sourceN>]"),
- cl::OneOrMore),
- MoreHelp(MoreHelpText) {
-}
+CommonOptionsParser::CommonOptionsParser(int &argc, const char **argv) {
+ static cl::opt<std::string> BuildPath(
+ "p", cl::desc("Build path"), cl::Optional);
+
+ static cl::list<std::string> SourcePaths(
+ cl::Positional, cl::desc("<source0> [... <sourceN>]"), cl::OneOrMore);
-void CommandLineClangTool::initialize(int argc, const char **argv) {
- Compilations.reset(FixedCompilationDatabase::loadFromCommandLine(argc, argv));
+ Compilations.reset(FixedCompilationDatabase::loadFromCommandLine(argc,
+ argv));
cl::ParseCommandLineOptions(argc, argv);
+ SourcePathList = SourcePaths;
if (!Compilations) {
std::string ErrorMessage;
if (!BuildPath.empty()) {
@@ -73,8 +77,3 @@ void CommandLineClangTool::initialize(int argc, const char **argv) {
llvm::report_fatal_error(ErrorMessage);
}
}
-
-int CommandLineClangTool::run(FrontendActionFactory *ActionFactory) {
- ClangTool Tool(*Compilations, SourcePaths);
- return Tool.run(ActionFactory);
-}
diff --git a/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp b/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp
index 3139cc2..4149cda 100644
--- a/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp
+++ b/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp
@@ -7,132 +7,49 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains multiple implementations for CompilationDatabases.
+// This file contains implementations of the CompilationDatabase base class
+// and the FixedCompilationDatabase.
//
//===----------------------------------------------------------------------===//
+#include <sstream>
#include "clang/Tooling/CompilationDatabase.h"
+#include "clang/Tooling/CompilationDatabasePluginRegistry.h"
#include "clang/Tooling/Tooling.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/system_error.h"
-#ifdef USE_CUSTOM_COMPILATION_DATABASE
-#include "CustomCompilationDatabase.h"
-#endif
-
namespace clang {
namespace tooling {
-namespace {
-
-/// \brief A parser for escaped strings of command line arguments.
-///
-/// Assumes \-escaping for quoted arguments (see the documentation of
-/// unescapeCommandLine(...)).
-class CommandLineArgumentParser {
- public:
- CommandLineArgumentParser(StringRef CommandLine)
- : Input(CommandLine), Position(Input.begin()-1) {}
-
- std::vector<std::string> parse() {
- bool HasMoreInput = true;
- while (HasMoreInput && nextNonWhitespace()) {
- std::string Argument;
- HasMoreInput = parseStringInto(Argument);
- CommandLine.push_back(Argument);
- }
- return CommandLine;
- }
-
- private:
- // All private methods return true if there is more input available.
-
- bool parseStringInto(std::string &String) {
- do {
- if (*Position == '"') {
- if (!parseQuotedStringInto(String)) return false;
- } else {
- if (!parseFreeStringInto(String)) return false;
- }
- } while (*Position != ' ');
- return true;
- }
-
- bool parseQuotedStringInto(std::string &String) {
- if (!next()) return false;
- while (*Position != '"') {
- if (!skipEscapeCharacter()) return false;
- String.push_back(*Position);
- if (!next()) return false;
- }
- return next();
- }
-
- bool parseFreeStringInto(std::string &String) {
- do {
- if (!skipEscapeCharacter()) return false;
- String.push_back(*Position);
- if (!next()) return false;
- } while (*Position != ' ' && *Position != '"');
- return true;
- }
-
- bool skipEscapeCharacter() {
- if (*Position == '\\') {
- return next();
- }
- return true;
- }
-
- bool nextNonWhitespace() {
- do {
- if (!next()) return false;
- } while (*Position == ' ');
- return true;
- }
-
- bool next() {
- ++Position;
- return Position != Input.end();
- }
-
- const StringRef Input;
- StringRef::iterator Position;
- std::vector<std::string> CommandLine;
-};
-
-std::vector<std::string> unescapeCommandLine(
- StringRef EscapedCommandLine) {
- CommandLineArgumentParser parser(EscapedCommandLine);
- return parser.parse();
-}
-
-} // end namespace
-
CompilationDatabase::~CompilationDatabase() {}
CompilationDatabase *
CompilationDatabase::loadFromDirectory(StringRef BuildDirectory,
std::string &ErrorMessage) {
- llvm::SmallString<1024> JSONDatabasePath(BuildDirectory);
- llvm::sys::path::append(JSONDatabasePath, "compile_commands.json");
- llvm::OwningPtr<CompilationDatabase> Database(
- JSONCompilationDatabase::loadFromFile(JSONDatabasePath, ErrorMessage));
- if (!Database) {
- return NULL;
+ std::stringstream ErrorStream;
+ for (CompilationDatabasePluginRegistry::iterator
+ It = CompilationDatabasePluginRegistry::begin(),
+ Ie = CompilationDatabasePluginRegistry::end();
+ It != Ie; ++It) {
+ std::string DatabaseErrorMessage;
+ OwningPtr<CompilationDatabasePlugin> Plugin(It->instantiate());
+ if (CompilationDatabase *DB =
+ Plugin->loadFromDirectory(BuildDirectory, DatabaseErrorMessage))
+ return DB;
+ else
+ ErrorStream << It->getName() << ": " << DatabaseErrorMessage << "\n";
}
- return Database.take();
+ ErrorMessage = ErrorStream.str();
+ return NULL;
}
static CompilationDatabase *
-findCompilationDatabaseFromDirectory(StringRef Directory) {
-#ifdef USE_CUSTOM_COMPILATION_DATABASE
- if (CompilationDatabase *DB =
- ::clang::tooling::findCompilationDatabaseForDirectory(Directory))
- return DB;
-#endif
+findCompilationDatabaseFromDirectory(StringRef Directory,
+ std::string &ErrorMessage) {
+ std::stringstream ErrorStream;
+ bool HasErrorMessage = false;
while (!Directory.empty()) {
std::string LoadErrorMessage;
@@ -140,8 +57,15 @@ findCompilationDatabaseFromDirectory(StringRef Directory) {
CompilationDatabase::loadFromDirectory(Directory, LoadErrorMessage))
return DB;
+ if (!HasErrorMessage) {
+ ErrorStream << "No compilation database found in " << Directory.str()
+ << " or any parent directory\n" << LoadErrorMessage;
+ HasErrorMessage = true;
+ }
+
Directory = llvm::sys::path::parent_path(Directory);
}
+ ErrorMessage = ErrorStream.str();
return NULL;
}
@@ -151,11 +75,12 @@ CompilationDatabase::autoDetectFromSource(StringRef SourceFile,
llvm::SmallString<1024> AbsolutePath(getAbsolutePath(SourceFile));
StringRef Directory = llvm::sys::path::parent_path(AbsolutePath);
- CompilationDatabase *DB = findCompilationDatabaseFromDirectory(Directory);
+ CompilationDatabase *DB = findCompilationDatabaseFromDirectory(Directory,
+ ErrorMessage);
if (!DB)
ErrorMessage = ("Could not auto-detect compilation database for file \"" +
- SourceFile + "\"").str();
+ SourceFile + "\"\n" + ErrorMessage).str();
return DB;
}
@@ -164,14 +89,17 @@ CompilationDatabase::autoDetectFromDirectory(StringRef SourceDir,
std::string &ErrorMessage) {
llvm::SmallString<1024> AbsolutePath(getAbsolutePath(SourceDir));
- CompilationDatabase *DB = findCompilationDatabaseFromDirectory(AbsolutePath);
+ CompilationDatabase *DB = findCompilationDatabaseFromDirectory(AbsolutePath,
+ ErrorMessage);
if (!DB)
ErrorMessage = ("Could not auto-detect compilation database from directory \"" +
- SourceDir + "\"").str();
+ SourceDir + "\"\n" + ErrorMessage).str();
return DB;
}
+CompilationDatabasePlugin::~CompilationDatabasePlugin() {}
+
FixedCompilationDatabase *
FixedCompilationDatabase::loadFromCommandLine(int &Argc,
const char **Argv,
@@ -204,153 +132,10 @@ FixedCompilationDatabase::getAllFiles() const {
return std::vector<std::string>();
}
-JSONCompilationDatabase *
-JSONCompilationDatabase::loadFromFile(StringRef FilePath,
- std::string &ErrorMessage) {
- llvm::OwningPtr<llvm::MemoryBuffer> DatabaseBuffer;
- llvm::error_code Result =
- llvm::MemoryBuffer::getFile(FilePath, DatabaseBuffer);
- if (Result != 0) {
- ErrorMessage = "Error while opening JSON database: " + Result.message();
- return NULL;
- }
- llvm::OwningPtr<JSONCompilationDatabase> Database(
- new JSONCompilationDatabase(DatabaseBuffer.take()));
- if (!Database->parse(ErrorMessage))
- return NULL;
- return Database.take();
-}
-
-JSONCompilationDatabase *
-JSONCompilationDatabase::loadFromBuffer(StringRef DatabaseString,
- std::string &ErrorMessage) {
- llvm::OwningPtr<llvm::MemoryBuffer> DatabaseBuffer(
- llvm::MemoryBuffer::getMemBuffer(DatabaseString));
- llvm::OwningPtr<JSONCompilationDatabase> Database(
- new JSONCompilationDatabase(DatabaseBuffer.take()));
- if (!Database->parse(ErrorMessage))
- return NULL;
- return Database.take();
-}
-
-std::vector<CompileCommand>
-JSONCompilationDatabase::getCompileCommands(StringRef FilePath) const {
- llvm::SmallString<128> NativeFilePath;
- llvm::sys::path::native(FilePath, NativeFilePath);
- llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
- CommandsRefI = IndexByFile.find(NativeFilePath);
- if (CommandsRefI == IndexByFile.end())
- return std::vector<CompileCommand>();
- const std::vector<CompileCommandRef> &CommandsRef = CommandsRefI->getValue();
- std::vector<CompileCommand> Commands;
- for (int I = 0, E = CommandsRef.size(); I != E; ++I) {
- llvm::SmallString<8> DirectoryStorage;
- llvm::SmallString<1024> CommandStorage;
- Commands.push_back(CompileCommand(
- // FIXME: Escape correctly:
- CommandsRef[I].first->getValue(DirectoryStorage),
- unescapeCommandLine(CommandsRef[I].second->getValue(CommandStorage))));
- }
- return Commands;
-}
-
-std::vector<std::string>
-JSONCompilationDatabase::getAllFiles() const {
- std::vector<std::string> Result;
-
- llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
- CommandsRefI = IndexByFile.begin();
- const llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
- CommandsRefEnd = IndexByFile.end();
- for (; CommandsRefI != CommandsRefEnd; ++CommandsRefI) {
- Result.push_back(CommandsRefI->first().str());
- }
-
- return Result;
-}
-
-bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
- llvm::yaml::document_iterator I = YAMLStream.begin();
- if (I == YAMLStream.end()) {
- ErrorMessage = "Error while parsing YAML.";
- return false;
- }
- llvm::yaml::Node *Root = I->getRoot();
- if (Root == NULL) {
- ErrorMessage = "Error while parsing YAML.";
- return false;
- }
- llvm::yaml::SequenceNode *Array =
- llvm::dyn_cast<llvm::yaml::SequenceNode>(Root);
- if (Array == NULL) {
- ErrorMessage = "Expected array.";
- return false;
- }
- for (llvm::yaml::SequenceNode::iterator AI = Array->begin(),
- AE = Array->end();
- AI != AE; ++AI) {
- llvm::yaml::MappingNode *Object =
- llvm::dyn_cast<llvm::yaml::MappingNode>(&*AI);
- if (Object == NULL) {
- ErrorMessage = "Expected object.";
- return false;
- }
- llvm::yaml::ScalarNode *Directory = NULL;
- llvm::yaml::ScalarNode *Command = NULL;
- llvm::yaml::ScalarNode *File = NULL;
- for (llvm::yaml::MappingNode::iterator KVI = Object->begin(),
- KVE = Object->end();
- KVI != KVE; ++KVI) {
- llvm::yaml::Node *Value = (*KVI).getValue();
- if (Value == NULL) {
- ErrorMessage = "Expected value.";
- return false;
- }
- llvm::yaml::ScalarNode *ValueString =
- llvm::dyn_cast<llvm::yaml::ScalarNode>(Value);
- if (ValueString == NULL) {
- ErrorMessage = "Expected string as value.";
- return false;
- }
- llvm::yaml::ScalarNode *KeyString =
- llvm::dyn_cast<llvm::yaml::ScalarNode>((*KVI).getKey());
- if (KeyString == NULL) {
- ErrorMessage = "Expected strings as key.";
- return false;
- }
- llvm::SmallString<8> KeyStorage;
- if (KeyString->getValue(KeyStorage) == "directory") {
- Directory = ValueString;
- } else if (KeyString->getValue(KeyStorage) == "command") {
- Command = ValueString;
- } else if (KeyString->getValue(KeyStorage) == "file") {
- File = ValueString;
- } else {
- ErrorMessage = ("Unknown key: \"" +
- KeyString->getRawValue() + "\"").str();
- return false;
- }
- }
- if (!File) {
- ErrorMessage = "Missing key: \"file\".";
- return false;
- }
- if (!Command) {
- ErrorMessage = "Missing key: \"command\".";
- return false;
- }
- if (!Directory) {
- ErrorMessage = "Missing key: \"directory\".";
- return false;
- }
- llvm::SmallString<8> FileStorage;
- llvm::SmallString<128> NativeFilePath;
- llvm::sys::path::native(File->getValue(FileStorage), NativeFilePath);
- IndexByFile[NativeFilePath].push_back(
- CompileCommandRef(Directory, Command));
- }
- return true;
-}
+// This anchor is used to force the linker to link in the generated object file
+// and thus register the JSONCompilationDatabasePlugin.
+extern volatile int JSONAnchorSource;
+static int JSONAnchorDest = JSONAnchorSource;
} // end namespace tooling
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Tooling/CustomCompilationDatabase.h b/contrib/llvm/tools/clang/lib/Tooling/CustomCompilationDatabase.h
deleted file mode 100644
index b375f8d..0000000
--- a/contrib/llvm/tools/clang/lib/Tooling/CustomCompilationDatabase.h
+++ /dev/null
@@ -1,42 +0,0 @@
-//===--- CustomCompilationDatabase.h --------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a hook to supply a custom \c CompilationDatabase
-// implementation.
-//
-// The mechanism can be used by IDEs or non-public code bases to integrate with
-// their build system. Currently we support statically linking in an
-// implementation of \c findCompilationDatabaseForDirectory and enabling it
-// with -DUSE_CUSTOM_COMPILATION_DATABASE when compiling the Tooling library.
-//
-// FIXME: The strategy forward is to provide a plugin system that can load
-// custom compilation databases and make enabling that a build option.
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_CUSTOM_COMPILATION_DATABASE_H
-#define LLVM_CLANG_TOOLING_CUSTOM_COMPILATION_DATABASE_H
-
-#include "llvm/ADT/StringRef.h"
-
-namespace clang {
-namespace tooling {
-class CompilationDatabase;
-
-/// \brief Returns a CompilationDatabase for the given \c Directory.
-///
-/// \c Directory can be any directory within a project. This methods will
-/// then try to find compilation database files in \c Directory or any of its
-/// parents. If a compilation database cannot be found or loaded, returns NULL.
-clang::tooling::CompilationDatabase *findCompilationDatabaseForDirectory(
- llvm::StringRef Directory);
-
-} // namespace tooling
-} // namespace clang
-
-#endif // LLVM_CLANG_TOOLING_CUSTOM_COMPILATION_DATABASE_H
diff --git a/contrib/llvm/tools/clang/lib/Tooling/FileMatchTrie.cpp b/contrib/llvm/tools/clang/lib/Tooling/FileMatchTrie.cpp
new file mode 100644
index 0000000..8f25a8c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/FileMatchTrie.cpp
@@ -0,0 +1,188 @@
+//===--- FileMatchTrie.cpp - ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation of a FileMatchTrie.
+//
+//===----------------------------------------------------------------------===//
+
+#include <sstream>
+#include "clang/Tooling/FileMatchTrie.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/PathV2.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+namespace tooling {
+
+/// \brief Default \c PathComparator using \c llvm::sys::fs::equivalent().
+struct DefaultPathComparator : public PathComparator {
+ virtual ~DefaultPathComparator() {}
+ virtual bool equivalent(StringRef FileA, StringRef FileB) const {
+ return FileA == FileB || llvm::sys::fs::equivalent(FileA, FileB);
+ }
+};
+
+/// \brief A node of the \c FileMatchTrie.
+///
+/// Each node has storage for up to one path and a map mapping a path segment to
+/// child nodes. The trie starts with an empty root node.
+class FileMatchTrieNode {
+public:
+ /// \brief Inserts 'NewPath' into this trie. \c ConsumedLength denotes
+ /// the number of \c NewPath's trailing characters already consumed during
+ /// recursion.
+ ///
+ /// An insert of a path
+ /// 'p'starts at the root node and does the following:
+ /// - If the node is empty, insert 'p' into its storage and abort.
+ /// - If the node has a path 'p2' but no children, take the last path segment
+ /// 's' of 'p2', put a new child into the map at 's' an insert the rest of
+ /// 'p2' there.
+ /// - Insert a new child for the last segment of 'p' and insert the rest of
+ /// 'p' there.
+ ///
+ /// An insert operation is linear in the number of a path's segments.
+ void insert(StringRef NewPath, unsigned ConsumedLength = 0) {
+ // We cannot put relative paths into the FileMatchTrie as then a path can be
+ // a postfix of another path, violating a core assumption of the trie.
+ if (llvm::sys::path::is_relative(NewPath))
+ return;
+ if (Path.empty()) {
+ // This is an empty leaf. Store NewPath and return.
+ Path = NewPath;
+ return;
+ }
+ if (Children.empty()) {
+ // This is a leaf, ignore duplicate entry if 'Path' equals 'NewPath'.
+ if (NewPath == Path)
+ return;
+ // Make this a node and create a child-leaf with 'Path'.
+ StringRef Element(llvm::sys::path::filename(
+ StringRef(Path).drop_back(ConsumedLength)));
+ Children[Element].Path = Path;
+ }
+ StringRef Element(llvm::sys::path::filename(
+ StringRef(NewPath).drop_back(ConsumedLength)));
+ Children[Element].insert(NewPath, ConsumedLength + Element.size() + 1);
+ }
+
+ /// \brief Tries to find the node under this \c FileMatchTrieNode that best
+ /// matches 'FileName'.
+ ///
+ /// If multiple paths fit 'FileName' equally well, \c IsAmbiguous is set to
+ /// \c true and an empty string is returned. If no path fits 'FileName', an
+ /// empty string is returned. \c ConsumedLength denotes the number of
+ /// \c Filename's trailing characters already consumed during recursion.
+ ///
+ /// To find the best matching node for a given path 'p', the
+ /// \c findEquivalent() function is called recursively for each path segment
+ /// (back to fron) of 'p' until a node 'n' is reached that does not ..
+ /// - .. have children. In this case it is checked
+ /// whether the stored path is equivalent to 'p'. If yes, the best match is
+ /// found. Otherwise continue with the parent node as if this node did not
+ /// exist.
+ /// - .. a child matching the next path segment. In this case, all children of
+ /// 'n' are an equally good match for 'p'. All children are of 'n' are found
+ /// recursively and their equivalence to 'p' is determined. If none are
+ /// equivalent, continue with the parent node as if 'n' didn't exist. If one
+ /// is equivalent, the best match is found. Otherwise, report and ambigiuity
+ /// error.
+ StringRef findEquivalent(const PathComparator& Comparator,
+ StringRef FileName,
+ bool &IsAmbiguous,
+ unsigned ConsumedLength = 0) const {
+ if (Children.empty()) {
+ if (Comparator.equivalent(StringRef(Path), FileName))
+ return StringRef(Path);
+ return StringRef();
+ }
+ StringRef Element(llvm::sys::path::filename(FileName.drop_back(
+ ConsumedLength)));
+ llvm::StringMap<FileMatchTrieNode>::const_iterator MatchingChild =
+ Children.find(Element);
+ if (MatchingChild != Children.end()) {
+ StringRef Result = MatchingChild->getValue().findEquivalent(
+ Comparator, FileName, IsAmbiguous,
+ ConsumedLength + Element.size() + 1);
+ if (!Result.empty() || IsAmbiguous)
+ return Result;
+ }
+ std::vector<StringRef> AllChildren;
+ getAll(AllChildren, MatchingChild);
+ StringRef Result;
+ for (unsigned i = 0; i < AllChildren.size(); i++) {
+ if (Comparator.equivalent(AllChildren[i], FileName)) {
+ if (Result.empty()) {
+ Result = AllChildren[i];
+ } else {
+ IsAmbiguous = true;
+ return StringRef();
+ }
+ }
+ }
+ return Result;
+ }
+
+private:
+ /// \brief Gets all paths under this FileMatchTrieNode.
+ void getAll(std::vector<StringRef> &Results,
+ llvm::StringMap<FileMatchTrieNode>::const_iterator Except) const {
+ if (Path.empty())
+ return;
+ if (Children.empty()) {
+ Results.push_back(StringRef(Path));
+ return;
+ }
+ for (llvm::StringMap<FileMatchTrieNode>::const_iterator
+ It = Children.begin(), E = Children.end();
+ It != E; ++It) {
+ if (It == Except)
+ continue;
+ It->getValue().getAll(Results, Children.end());
+ }
+ }
+
+ // The stored absolute path in this node. Only valid for leaf nodes, i.e.
+ // nodes where Children.empty().
+ std::string Path;
+
+ // The children of this node stored in a map based on the next path segment.
+ llvm::StringMap<FileMatchTrieNode> Children;
+};
+
+FileMatchTrie::FileMatchTrie()
+ : Root(new FileMatchTrieNode), Comparator(new DefaultPathComparator()) {}
+
+FileMatchTrie::FileMatchTrie(PathComparator *Comparator)
+ : Root(new FileMatchTrieNode), Comparator(Comparator) {}
+
+FileMatchTrie::~FileMatchTrie() {
+ delete Root;
+}
+
+void FileMatchTrie::insert(StringRef NewPath) {
+ Root->insert(NewPath);
+}
+
+StringRef FileMatchTrie::findEquivalent(StringRef FileName,
+ llvm::raw_ostream &Error) const {
+ if (llvm::sys::path::is_relative(FileName)) {
+ Error << "Cannot resolve relative paths";
+ return StringRef();
+ }
+ bool IsAmbiguous = false;
+ StringRef Result = Root->findEquivalent(*Comparator, FileName, IsAmbiguous);
+ if (IsAmbiguous)
+ Error << "Path is ambiguous";
+ return Result;
+}
+
+} // end namespace tooling
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp b/contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp
new file mode 100644
index 0000000..cf35a25
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp
@@ -0,0 +1,303 @@
+//===--- JSONCompilationDatabase.cpp - ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation of the JSONCompilationDatabase.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/JSONCompilationDatabase.h"
+
+#include "clang/Tooling/CompilationDatabase.h"
+#include "clang/Tooling/CompilationDatabasePluginRegistry.h"
+#include "clang/Tooling/Tooling.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/system_error.h"
+
+namespace clang {
+namespace tooling {
+
+namespace {
+
+/// \brief A parser for escaped strings of command line arguments.
+///
+/// Assumes \-escaping for quoted arguments (see the documentation of
+/// unescapeCommandLine(...)).
+class CommandLineArgumentParser {
+ public:
+ CommandLineArgumentParser(StringRef CommandLine)
+ : Input(CommandLine), Position(Input.begin()-1) {}
+
+ std::vector<std::string> parse() {
+ bool HasMoreInput = true;
+ while (HasMoreInput && nextNonWhitespace()) {
+ std::string Argument;
+ HasMoreInput = parseStringInto(Argument);
+ CommandLine.push_back(Argument);
+ }
+ return CommandLine;
+ }
+
+ private:
+ // All private methods return true if there is more input available.
+
+ bool parseStringInto(std::string &String) {
+ do {
+ if (*Position == '"') {
+ if (!parseQuotedStringInto(String)) return false;
+ } else {
+ if (!parseFreeStringInto(String)) return false;
+ }
+ } while (*Position != ' ');
+ return true;
+ }
+
+ bool parseQuotedStringInto(std::string &String) {
+ if (!next()) return false;
+ while (*Position != '"') {
+ if (!skipEscapeCharacter()) return false;
+ String.push_back(*Position);
+ if (!next()) return false;
+ }
+ return next();
+ }
+
+ bool parseFreeStringInto(std::string &String) {
+ do {
+ if (!skipEscapeCharacter()) return false;
+ String.push_back(*Position);
+ if (!next()) return false;
+ } while (*Position != ' ' && *Position != '"');
+ return true;
+ }
+
+ bool skipEscapeCharacter() {
+ if (*Position == '\\') {
+ return next();
+ }
+ return true;
+ }
+
+ bool nextNonWhitespace() {
+ do {
+ if (!next()) return false;
+ } while (*Position == ' ');
+ return true;
+ }
+
+ bool next() {
+ ++Position;
+ return Position != Input.end();
+ }
+
+ const StringRef Input;
+ StringRef::iterator Position;
+ std::vector<std::string> CommandLine;
+};
+
+std::vector<std::string> unescapeCommandLine(
+ StringRef EscapedCommandLine) {
+ CommandLineArgumentParser parser(EscapedCommandLine);
+ return parser.parse();
+}
+
+} // end namespace
+
+class JSONCompilationDatabasePlugin : public CompilationDatabasePlugin {
+ virtual CompilationDatabase *loadFromDirectory(
+ StringRef Directory, std::string &ErrorMessage) {
+ llvm::SmallString<1024> JSONDatabasePath(Directory);
+ llvm::sys::path::append(JSONDatabasePath, "compile_commands.json");
+ llvm::OwningPtr<CompilationDatabase> Database(
+ JSONCompilationDatabase::loadFromFile(JSONDatabasePath, ErrorMessage));
+ if (!Database)
+ return NULL;
+ return Database.take();
+ }
+};
+
+// Register the JSONCompilationDatabasePlugin with the
+// CompilationDatabasePluginRegistry using this statically initialized variable.
+static CompilationDatabasePluginRegistry::Add<JSONCompilationDatabasePlugin>
+X("json-compilation-database", "Reads JSON formatted compilation databases");
+
+// This anchor is used to force the linker to link in the generated object file
+// and thus register the JSONCompilationDatabasePlugin.
+volatile int JSONAnchorSource = 0;
+
+JSONCompilationDatabase *
+JSONCompilationDatabase::loadFromFile(StringRef FilePath,
+ std::string &ErrorMessage) {
+ llvm::OwningPtr<llvm::MemoryBuffer> DatabaseBuffer;
+ llvm::error_code Result =
+ llvm::MemoryBuffer::getFile(FilePath, DatabaseBuffer);
+ if (Result != 0) {
+ ErrorMessage = "Error while opening JSON database: " + Result.message();
+ return NULL;
+ }
+ llvm::OwningPtr<JSONCompilationDatabase> Database(
+ new JSONCompilationDatabase(DatabaseBuffer.take()));
+ if (!Database->parse(ErrorMessage))
+ return NULL;
+ return Database.take();
+}
+
+JSONCompilationDatabase *
+JSONCompilationDatabase::loadFromBuffer(StringRef DatabaseString,
+ std::string &ErrorMessage) {
+ llvm::OwningPtr<llvm::MemoryBuffer> DatabaseBuffer(
+ llvm::MemoryBuffer::getMemBuffer(DatabaseString));
+ llvm::OwningPtr<JSONCompilationDatabase> Database(
+ new JSONCompilationDatabase(DatabaseBuffer.take()));
+ if (!Database->parse(ErrorMessage))
+ return NULL;
+ return Database.take();
+}
+
+std::vector<CompileCommand>
+JSONCompilationDatabase::getCompileCommands(StringRef FilePath) const {
+ llvm::SmallString<128> NativeFilePath;
+ llvm::sys::path::native(FilePath, NativeFilePath);
+ std::vector<StringRef> PossibleMatches;
+ std::string Error;
+ llvm::raw_string_ostream ES(Error);
+ StringRef Match = MatchTrie.findEquivalent(NativeFilePath.str(), ES);
+ if (Match.empty()) {
+ if (Error.empty())
+ Error = "No match found.";
+ llvm::outs() << Error << "\n";
+ return std::vector<CompileCommand>();
+ }
+ llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
+ CommandsRefI = IndexByFile.find(Match);
+ if (CommandsRefI == IndexByFile.end())
+ return std::vector<CompileCommand>();
+ const std::vector<CompileCommandRef> &CommandsRef = CommandsRefI->getValue();
+ std::vector<CompileCommand> Commands;
+ for (int I = 0, E = CommandsRef.size(); I != E; ++I) {
+ llvm::SmallString<8> DirectoryStorage;
+ llvm::SmallString<1024> CommandStorage;
+ Commands.push_back(CompileCommand(
+ // FIXME: Escape correctly:
+ CommandsRef[I].first->getValue(DirectoryStorage),
+ unescapeCommandLine(CommandsRef[I].second->getValue(CommandStorage))));
+ }
+ return Commands;
+}
+
+std::vector<std::string>
+JSONCompilationDatabase::getAllFiles() const {
+ std::vector<std::string> Result;
+
+ llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
+ CommandsRefI = IndexByFile.begin();
+ const llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
+ CommandsRefEnd = IndexByFile.end();
+ for (; CommandsRefI != CommandsRefEnd; ++CommandsRefI) {
+ Result.push_back(CommandsRefI->first().str());
+ }
+
+ return Result;
+}
+
+bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
+ llvm::yaml::document_iterator I = YAMLStream.begin();
+ if (I == YAMLStream.end()) {
+ ErrorMessage = "Error while parsing YAML.";
+ return false;
+ }
+ llvm::yaml::Node *Root = I->getRoot();
+ if (Root == NULL) {
+ ErrorMessage = "Error while parsing YAML.";
+ return false;
+ }
+ llvm::yaml::SequenceNode *Array =
+ llvm::dyn_cast<llvm::yaml::SequenceNode>(Root);
+ if (Array == NULL) {
+ ErrorMessage = "Expected array.";
+ return false;
+ }
+ for (llvm::yaml::SequenceNode::iterator AI = Array->begin(),
+ AE = Array->end();
+ AI != AE; ++AI) {
+ llvm::yaml::MappingNode *Object =
+ llvm::dyn_cast<llvm::yaml::MappingNode>(&*AI);
+ if (Object == NULL) {
+ ErrorMessage = "Expected object.";
+ return false;
+ }
+ llvm::yaml::ScalarNode *Directory = NULL;
+ llvm::yaml::ScalarNode *Command = NULL;
+ llvm::yaml::ScalarNode *File = NULL;
+ for (llvm::yaml::MappingNode::iterator KVI = Object->begin(),
+ KVE = Object->end();
+ KVI != KVE; ++KVI) {
+ llvm::yaml::Node *Value = (*KVI).getValue();
+ if (Value == NULL) {
+ ErrorMessage = "Expected value.";
+ return false;
+ }
+ llvm::yaml::ScalarNode *ValueString =
+ llvm::dyn_cast<llvm::yaml::ScalarNode>(Value);
+ if (ValueString == NULL) {
+ ErrorMessage = "Expected string as value.";
+ return false;
+ }
+ llvm::yaml::ScalarNode *KeyString =
+ llvm::dyn_cast<llvm::yaml::ScalarNode>((*KVI).getKey());
+ if (KeyString == NULL) {
+ ErrorMessage = "Expected strings as key.";
+ return false;
+ }
+ llvm::SmallString<8> KeyStorage;
+ if (KeyString->getValue(KeyStorage) == "directory") {
+ Directory = ValueString;
+ } else if (KeyString->getValue(KeyStorage) == "command") {
+ Command = ValueString;
+ } else if (KeyString->getValue(KeyStorage) == "file") {
+ File = ValueString;
+ } else {
+ ErrorMessage = ("Unknown key: \"" +
+ KeyString->getRawValue() + "\"").str();
+ return false;
+ }
+ }
+ if (!File) {
+ ErrorMessage = "Missing key: \"file\".";
+ return false;
+ }
+ if (!Command) {
+ ErrorMessage = "Missing key: \"command\".";
+ return false;
+ }
+ if (!Directory) {
+ ErrorMessage = "Missing key: \"directory\".";
+ return false;
+ }
+ llvm::SmallString<8> FileStorage;
+ StringRef FileName = File->getValue(FileStorage);
+ llvm::SmallString<128> NativeFilePath;
+ if (llvm::sys::path::is_relative(FileName)) {
+ llvm::SmallString<8> DirectoryStorage;
+ llvm::SmallString<128> AbsolutePath(
+ Directory->getValue(DirectoryStorage));
+ llvm::sys::path::append(AbsolutePath, FileName);
+ llvm::sys::path::native(AbsolutePath.str(), NativeFilePath);
+ } else {
+ llvm::sys::path::native(FileName, NativeFilePath);
+ }
+ IndexByFile[NativeFilePath].push_back(
+ CompileCommandRef(Directory, Command));
+ MatchTrie.insert(NativeFilePath.str());
+ }
+ return true;
+}
+
+} // end namespace tooling
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Tooling/Refactoring.cpp b/contrib/llvm/tools/clang/lib/Tooling/Refactoring.cpp
index 6284353..c5002ef 100644
--- a/contrib/llvm/tools/clang/lib/Tooling/Refactoring.cpp
+++ b/contrib/llvm/tools/clang/lib/Tooling/Refactoring.cpp
@@ -11,12 +11,12 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Frontend/DiagnosticOptions.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Lex/Lexer.h"
-#include "clang/Rewrite/Rewriter.h"
+#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/Tooling/Refactoring.h"
#include "llvm/Support/raw_os_ostream.h"
@@ -164,12 +164,11 @@ Replacements &RefactoringTool::getReplacements() { return Replace; }
int RefactoringTool::run(FrontendActionFactory *ActionFactory) {
int Result = Tool.run(ActionFactory);
LangOptions DefaultLangOptions;
- DiagnosticOptions DefaultDiagnosticOptions;
- TextDiagnosticPrinter DiagnosticPrinter(llvm::errs(),
- DefaultDiagnosticOptions);
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
+ TextDiagnosticPrinter DiagnosticPrinter(llvm::errs(), &*DiagOpts);
DiagnosticsEngine Diagnostics(
llvm::IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()),
- &DiagnosticPrinter, false);
+ &*DiagOpts, &DiagnosticPrinter, false);
SourceManager Sources(Diagnostics, Tool.getFiles());
Rewriter Rewrite(Sources, DefaultLangOptions);
if (!applyAllReplacements(Replace, Rewrite)) {
diff --git a/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp b/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp
index e93e0c9..af20254 100644
--- a/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp
+++ b/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp
@@ -97,17 +97,22 @@ static clang::CompilerInvocation *newInvocation(
bool runToolOnCode(clang::FrontendAction *ToolAction, const Twine &Code,
const Twine &FileName) {
+ return runToolOnCodeWithArgs(
+ ToolAction, Code, std::vector<std::string>(), FileName);
+}
+
+bool runToolOnCodeWithArgs(clang::FrontendAction *ToolAction, const Twine &Code,
+ const std::vector<std::string> &Args,
+ const Twine &FileName) {
SmallString<16> FileNameStorage;
StringRef FileNameRef = FileName.toNullTerminatedStringRef(FileNameStorage);
- const char *const CommandLine[] = {
- "clang-tool", "-fsyntax-only", FileNameRef.data()
- };
+ std::vector<std::string> Commands;
+ Commands.push_back("clang-tool");
+ Commands.push_back("-fsyntax-only");
+ Commands.insert(Commands.end(), Args.begin(), Args.end());
+ Commands.push_back(FileNameRef.data());
FileManager Files((FileSystemOptions()));
- ToolInvocation Invocation(
- std::vector<std::string>(
- CommandLine,
- CommandLine + llvm::array_lengthof(CommandLine)),
- ToolAction, &Files);
+ ToolInvocation Invocation(Commands, ToolAction, &Files);
SmallString<1024> CodeStorage;
Invocation.mapVirtualFile(FileNameRef,
@@ -154,11 +159,12 @@ bool ToolInvocation::run() {
for (int I = 0, E = CommandLine.size(); I != E; ++I)
Argv.push_back(CommandLine[I].c_str());
const char *const BinaryName = Argv[0];
- DiagnosticOptions DefaultDiagnosticOptions;
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
TextDiagnosticPrinter DiagnosticPrinter(
- llvm::errs(), DefaultDiagnosticOptions);
- DiagnosticsEngine Diagnostics(llvm::IntrusiveRefCntPtr<clang::DiagnosticIDs>(
- new DiagnosticIDs()), &DiagnosticPrinter, false);
+ llvm::errs(), &*DiagOpts);
+ DiagnosticsEngine Diagnostics(
+ llvm::IntrusiveRefCntPtr<clang::DiagnosticIDs>(new DiagnosticIDs()),
+ &*DiagOpts, &DiagnosticPrinter, false);
const llvm::OwningPtr<clang::driver::Driver> Driver(
newDriver(&Diagnostics, BinaryName));
diff --git a/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp b/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
index f8e8a6b..f196856 100644
--- a/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
+++ b/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
@@ -47,86 +47,11 @@ static void LLVMErrorHandler(void *UserData, const std::string &Message) {
exit(1);
}
-// FIXME: Define the need for this testing away.
-static int cc1_test(DiagnosticsEngine &Diags,
- const char **ArgBegin, const char **ArgEnd) {
- using namespace clang::driver;
-
- llvm::errs() << "cc1 argv:";
- for (const char **i = ArgBegin; i != ArgEnd; ++i)
- llvm::errs() << " \"" << *i << '"';
- llvm::errs() << "\n";
-
- // Parse the arguments.
- OptTable *Opts = createDriverOptTable();
- unsigned MissingArgIndex, MissingArgCount;
- InputArgList *Args = Opts->ParseArgs(ArgBegin, ArgEnd,
- MissingArgIndex, MissingArgCount);
-
- // Check for missing argument error.
- if (MissingArgCount)
- Diags.Report(clang::diag::err_drv_missing_argument)
- << Args->getArgString(MissingArgIndex) << MissingArgCount;
-
- // Dump the parsed arguments.
- llvm::errs() << "cc1 parsed options:\n";
- for (ArgList::const_iterator it = Args->begin(), ie = Args->end();
- it != ie; ++it)
- (*it)->dump();
-
- // Create a compiler invocation.
- llvm::errs() << "cc1 creating invocation.\n";
- CompilerInvocation Invocation;
- if (!CompilerInvocation::CreateFromArgs(Invocation, ArgBegin, ArgEnd, Diags))
- return 1;
-
- // Convert the invocation back to argument strings.
- std::vector<std::string> InvocationArgs;
- Invocation.toArgs(InvocationArgs);
-
- // Dump the converted arguments.
- SmallVector<const char*, 32> Invocation2Args;
- llvm::errs() << "invocation argv :";
- for (unsigned i = 0, e = InvocationArgs.size(); i != e; ++i) {
- Invocation2Args.push_back(InvocationArgs[i].c_str());
- llvm::errs() << " \"" << InvocationArgs[i] << '"';
- }
- llvm::errs() << "\n";
-
- // Convert those arguments to another invocation, and check that we got the
- // same thing.
- CompilerInvocation Invocation2;
- if (!CompilerInvocation::CreateFromArgs(Invocation2, Invocation2Args.begin(),
- Invocation2Args.end(), Diags))
- return 1;
-
- // FIXME: Implement CompilerInvocation comparison.
- if (true) {
- //llvm::errs() << "warning: Invocations differ!\n";
-
- std::vector<std::string> Invocation2Args;
- Invocation2.toArgs(Invocation2Args);
- llvm::errs() << "invocation2 argv:";
- for (unsigned i = 0, e = Invocation2Args.size(); i != e; ++i)
- llvm::errs() << " \"" << Invocation2Args[i] << '"';
- llvm::errs() << "\n";
- }
-
- return 0;
-}
-
int cc1_main(const char **ArgBegin, const char **ArgEnd,
const char *Argv0, void *MainAddr) {
OwningPtr<CompilerInstance> Clang(new CompilerInstance());
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- // Run clang -cc1 test.
- if (ArgBegin != ArgEnd && StringRef(ArgBegin[0]) == "-cc1test") {
- DiagnosticsEngine Diags(DiagID, new TextDiagnosticPrinter(llvm::errs(),
- DiagnosticOptions()));
- return cc1_test(Diags, ArgBegin + 1, ArgEnd);
- }
-
// Initialize targets first, so that --version shows registered targets.
llvm::InitializeAllTargets();
llvm::InitializeAllTargetMCs();
@@ -135,8 +60,9 @@ int cc1_main(const char **ArgBegin, const char **ArgEnd,
// Buffer diagnostics from argument parsing so that we can output them using a
// well formed diagnostic object.
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
TextDiagnosticBuffer *DiagsBuffer = new TextDiagnosticBuffer;
- DiagnosticsEngine Diags(DiagID, DiagsBuffer);
+ DiagnosticsEngine Diags(DiagID, &*DiagOpts, DiagsBuffer);
bool Success;
Success = CompilerInvocation::CreateFromArgs(Clang->getInvocation(),
ArgBegin, ArgEnd, Diags);
diff --git a/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp b/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
index 5502a35..5587e40 100644
--- a/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
+++ b/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
@@ -19,9 +19,9 @@
#include "clang/Driver/CC1AsOptions.h"
#include "clang/Driver/OptTable.h"
#include "clang/Driver/Options.h"
-#include "clang/Frontend/DiagnosticOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Basic/DiagnosticOptions.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
@@ -51,7 +51,7 @@
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/system_error.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace clang;
using namespace clang::driver;
using namespace llvm;
@@ -189,7 +189,7 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
ie = Args->filtered_end(); it != ie; ++it, First=false) {
const Arg *A = it;
if (First)
- Opts.InputFile = A->getValue(*Args);
+ Opts.InputFile = A->getValue();
else {
Diags.Report(diag::err_drv_unknown_argument) << A->getAsString(*Args);
Success = false;
@@ -201,7 +201,7 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
Opts.LLVMArgs.push_back("-fatal-assembler-warnings");
Opts.OutputPath = Args->getLastArgValue(OPT_o);
if (Arg *A = Args->getLastArg(OPT_filetype)) {
- StringRef Name = A->getValue(*Args);
+ StringRef Name = A->getValue();
unsigned OutputType = StringSwitch<unsigned>(Name)
.Case("asm", FT_Asm)
.Case("null", FT_Null)
@@ -394,11 +394,12 @@ int cc1as_main(const char **ArgBegin, const char **ArgEnd,
InitializeAllAsmParsers();
// Construct our diagnostic client.
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
TextDiagnosticPrinter *DiagClient
- = new TextDiagnosticPrinter(errs(), DiagnosticOptions());
+ = new TextDiagnosticPrinter(errs(), &*DiagOpts);
DiagClient->setPrefix("clang -cc1as");
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- DiagnosticsEngine Diags(DiagID, DiagClient);
+ DiagnosticsEngine Diags(DiagID, &*DiagOpts, DiagClient);
// Set an error handler, so that any LLVM backend diagnostics go through our
// error handler.
diff --git a/contrib/llvm/tools/clang/tools/driver/driver.cpp b/contrib/llvm/tools/clang/tools/driver/driver.cpp
index 12a9329..81979ec 100644
--- a/contrib/llvm/tools/clang/tools/driver/driver.cpp
+++ b/contrib/llvm/tools/clang/tools/driver/driver.cpp
@@ -12,6 +12,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Driver/ArgList.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Compilation.h"
@@ -19,7 +20,6 @@
#include "clang/Driver/Option.h"
#include "clang/Driver/OptTable.h"
#include "clang/Frontend/CompilerInvocation.h"
-#include "clang/Frontend/DiagnosticOptions.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Frontend/Utils.h"
@@ -375,7 +375,7 @@ int main(int argc_, const char **argv_) {
llvm::sys::Path Path = GetExecutablePath(argv[0], CanonicalPrefixes);
- DiagnosticOptions DiagOpts;
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions;
{
// Note that ParseDiagnosticArgs() uses the cc1 option table.
OwningPtr<OptTable> CC1Opts(createDriverOptTable());
@@ -385,17 +385,17 @@ int main(int argc_, const char **argv_) {
// We ignore MissingArgCount and the return value of ParseDiagnosticArgs.
// Any errors that would be diagnosed here will also be diagnosed later,
// when the DiagnosticsEngine actually exists.
- (void) ParseDiagnosticArgs(DiagOpts, *Args);
+ (void) ParseDiagnosticArgs(*DiagOpts, *Args);
}
// Now we can create the DiagnosticsEngine with a properly-filled-out
// DiagnosticOptions instance.
TextDiagnosticPrinter *DiagClient
- = new TextDiagnosticPrinter(llvm::errs(), DiagOpts);
+ = new TextDiagnosticPrinter(llvm::errs(), &*DiagOpts);
DiagClient->setPrefix(llvm::sys::path::stem(Path.str()));
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- DiagnosticsEngine Diags(DiagID, DiagClient);
- ProcessWarningOptions(Diags, DiagOpts);
+ DiagnosticsEngine Diags(DiagID, &*DiagOpts, DiagClient);
+ ProcessWarningOptions(Diags, *DiagOpts);
#ifdef CLANG_IS_PRODUCTION
const bool IsProduction = true;
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp
index ef1ad3e..521f604 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp
@@ -33,7 +33,7 @@ getValueAsListOfStrings(Record &R, StringRef FieldName) {
i != e;
++i) {
assert(*i && "Got a null element in a ListInit");
- if (StringInit *S = dynamic_cast<StringInit *>(*i))
+ if (StringInit *S = dyn_cast<StringInit>(*i))
Strings.push_back(S->getValue());
else
assert(false && "Got a non-string, non-code element in a ListInit");
@@ -743,8 +743,6 @@ void EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
OS << " static bool classof(const Attr *A) { return A->getKind() == "
<< "attr::" << R.getName() << "; }\n";
- OS << " static bool classof(const " << R.getName()
- << "Attr *) { return true; }\n";
bool LateParsed = R.getValueAsBit("LateParsed");
OS << " virtual bool isLateParsed() const { return "
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangCommentCommandInfoEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangCommentCommandInfoEmitter.cpp
new file mode 100644
index 0000000..36fbcd4
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangCommentCommandInfoEmitter.cpp
@@ -0,0 +1,72 @@
+//===--- ClangCommentCommandInfoEmitter.cpp - Generate command lists -----====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits command lists and efficient matchers command
+// names that are used in documentation comments.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/StringMatcher.h"
+#include <vector>
+
+using namespace llvm;
+
+namespace clang {
+void EmitClangCommentCommandInfo(RecordKeeper &Records, raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ OS << "namespace {\n"
+ "const CommandInfo Commands[] = {\n";
+ std::vector<Record *> Tags = Records.getAllDerivedDefinitions("Command");
+ for (size_t i = 0, e = Tags.size(); i != e; ++i) {
+ Record &Tag = *Tags[i];
+ OS << " { "
+ << "\"" << Tag.getValueAsString("Name") << "\", "
+ << "\"" << Tag.getValueAsString("EndCommandName") << "\", "
+ << i << ", "
+ << Tag.getValueAsInt("NumArgs") << ", "
+ << Tag.getValueAsBit("IsInlineCommand") << ", "
+ << Tag.getValueAsBit("IsBlockCommand") << ", "
+ << Tag.getValueAsBit("IsBriefCommand") << ", "
+ << Tag.getValueAsBit("IsReturnsCommand") << ", "
+ << Tag.getValueAsBit("IsParamCommand") << ", "
+ << Tag.getValueAsBit("IsTParamCommand") << ", "
+ << Tag.getValueAsBit("IsDeprecatedCommand") << ", "
+ << Tag.getValueAsBit("IsEmptyParagraphAllowed") << ", "
+ << Tag.getValueAsBit("IsVerbatimBlockCommand") << ", "
+ << Tag.getValueAsBit("IsVerbatimBlockEndCommand") << ", "
+ << Tag.getValueAsBit("IsVerbatimLineCommand") << ", "
+ << Tag.getValueAsBit("IsDeclarationCommand") << ", "
+ << /* IsUnknownCommand = */ "0"
+ << " }";
+ if (i + 1 != e)
+ OS << ",";
+ OS << "\n";
+ }
+ OS << "};\n"
+ "} // unnamed namespace\n\n";
+
+ std::vector<StringMatcher::StringPair> Matches;
+ for (size_t i = 0, e = Tags.size(); i != e; ++i) {
+ Record &Tag = *Tags[i];
+ std::string Name = Tag.getValueAsString("Name");
+ std::string Return;
+ raw_string_ostream(Return) << "return &Commands[" << i << "];";
+ Matches.push_back(StringMatcher::StringPair(Name, Return));
+ }
+
+ OS << "const CommandInfo *CommandTraits::getBuiltinCommandInfo(\n"
+ << " StringRef Name) {\n";
+ StringMatcher("Name", Matches, OS).Emit();
+ OS << " return NULL;\n"
+ << "}\n\n";
+}
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangCommentHTMLTagsEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangCommentHTMLTagsEmitter.cpp
new file mode 100644
index 0000000..0ae23b2
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangCommentHTMLTagsEmitter.cpp
@@ -0,0 +1,69 @@
+//===--- ClangCommentHTMLTagsEmitter.cpp - Generate HTML tag list for Clang -=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits efficient matchers for HTML tags that are used
+// in documentation comments.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/StringMatcher.h"
+#include <vector>
+
+using namespace llvm;
+
+namespace clang {
+void EmitClangCommentHTMLTags(RecordKeeper &Records, raw_ostream &OS) {
+ std::vector<Record *> Tags = Records.getAllDerivedDefinitions("Tag");
+ std::vector<StringMatcher::StringPair> Matches;
+ for (std::vector<Record *>::iterator I = Tags.begin(), E = Tags.end();
+ I != E; ++I) {
+ Record &Tag = **I;
+ std::string Spelling = Tag.getValueAsString("Spelling");
+ Matches.push_back(StringMatcher::StringPair(Spelling, "return true;"));
+ }
+
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ OS << "bool isHTMLTagName(StringRef Name) {\n";
+ StringMatcher("Name", Matches, OS).Emit();
+ OS << " return false;\n"
+ << "}\n\n";
+}
+
+void EmitClangCommentHTMLTagsProperties(RecordKeeper &Records,
+ raw_ostream &OS) {
+ std::vector<Record *> Tags = Records.getAllDerivedDefinitions("Tag");
+ std::vector<StringMatcher::StringPair> MatchesEndTagOptional;
+ std::vector<StringMatcher::StringPair> MatchesEndTagForbidden;
+ for (std::vector<Record *>::iterator I = Tags.begin(), E = Tags.end();
+ I != E; ++I) {
+ Record &Tag = **I;
+ std::string Spelling = Tag.getValueAsString("Spelling");
+ StringMatcher::StringPair Match(Spelling, "return true;");
+ if (Tag.getValueAsBit("EndTagOptional"))
+ MatchesEndTagOptional.push_back(Match);
+ if (Tag.getValueAsBit("EndTagForbidden"))
+ MatchesEndTagForbidden.push_back(Match);
+ }
+
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ OS << "bool isHTMLEndTagOptional(StringRef Name) {\n";
+ StringMatcher("Name", MatchesEndTagOptional, OS).Emit();
+ OS << " return false;\n"
+ << "}\n\n";
+
+ OS << "bool isHTMLEndTagForbidden(StringRef Name) {\n";
+ StringMatcher("Name", MatchesEndTagForbidden, OS).Emit();
+ OS << " return false;\n"
+ << "}\n\n";
+}
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
index 8615d2d..b1472a8 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/Optional.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
+#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <algorithm>
@@ -75,7 +76,7 @@ getCategoryFromDiagGroup(const Record *Group,
static std::string getDiagnosticCategory(const Record *R,
DiagGroupParentMap &DiagGroupParents) {
// If the diagnostic is in a group, and that group has a category, use it.
- if (DefInit *Group = dynamic_cast<DefInit*>(R->getValueInit("Group"))) {
+ if (DefInit *Group = dyn_cast<DefInit>(R->getValueInit("Group"))) {
// Check the diagnostic's diag group for a category.
std::string CatName = getCategoryFromDiagGroup(Group->getDef(),
DiagGroupParents);
@@ -136,7 +137,7 @@ static void groupDiagnostics(const std::vector<Record*> &Diags,
std::map<std::string, GroupInfo> &DiagsInGroup) {
for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
const Record *R = Diags[i];
- DefInit *DI = dynamic_cast<DefInit*>(R->getValueInit("Group"));
+ DefInit *DI = dyn_cast<DefInit>(R->getValueInit("Group"));
if (DI == 0) continue;
assert(R->getValueAsDef("Class")->getName() != "CLASS_NOTE" &&
"Note can't be in a DiagGroup");
@@ -280,7 +281,7 @@ void InferPedantic::compute(VecOrSet DiagsInPedantic,
Record *R = Diags[i];
if (isExtension(R) && isOffByDefault(R)) {
DiagsSet.insert(R);
- if (DefInit *Group = dynamic_cast<DefInit*>(R->getValueInit("Group"))) {
+ if (DefInit *Group = dyn_cast<DefInit>(R->getValueInit("Group"))) {
const Record *GroupRec = Group->getDef();
if (!isSubGroupOfGroup(GroupRec, "pedantic")) {
markGroup(GroupRec);
@@ -299,7 +300,7 @@ void InferPedantic::compute(VecOrSet DiagsInPedantic,
// Check if the group is implicitly in -Wpedantic. If so,
// the diagnostic should not be directly included in the -Wpedantic
// diagnostic group.
- if (DefInit *Group = dynamic_cast<DefInit*>(R->getValueInit("Group")))
+ if (DefInit *Group = dyn_cast<DefInit>(R->getValueInit("Group")))
if (groupInPedantic(Group->getDef()))
continue;
@@ -391,11 +392,11 @@ void EmitClangDiagsDefs(RecordKeeper &Records, raw_ostream &OS,
// Check if this is an error that is accidentally in a warning
// group.
if (isError(R)) {
- if (DefInit *Group = dynamic_cast<DefInit*>(R.getValueInit("Group"))) {
+ if (DefInit *Group = dyn_cast<DefInit>(R.getValueInit("Group"))) {
const Record *GroupRec = Group->getDef();
const std::string &GroupName = GroupRec->getValueAsString("GroupName");
- throw "Error " + R.getName() + " cannot be in a warning group [" +
- GroupName + "]";
+ PrintFatalError(R.getLoc(), "Error " + R.getName() +
+ " cannot be in a warning group [" + GroupName + "]");
}
}
@@ -413,7 +414,7 @@ void EmitClangDiagsDefs(RecordKeeper &Records, raw_ostream &OS,
// Warning associated with the diagnostic. This is stored as an index into
// the alphabetically sorted warning table.
- if (DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group"))) {
+ if (DefInit *DI = dyn_cast<DefInit>(R.getValueInit("Group"))) {
std::map<std::string, GroupInfo>::iterator I =
DiagsInGroup.find(DI->getDef()->getValueAsString("GroupName"));
assert(I != DiagsInGroup.end());
@@ -556,7 +557,8 @@ void EmitClangDiagGroups(RecordKeeper &Records, raw_ostream &OS) {
if (I->first.find_first_not_of("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#$%^*-+=:?")!=std::string::npos)
- throw "Invalid character in diagnostic group '" + I->first + "'";
+ PrintFatalError("Invalid character in diagnostic group '" +
+ I->first + "'");
OS.write_escaped(I->first) << "\","
<< std::string(MaxLen-I->first.size()+1, ' ');
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.cpp
index 5a0db50..8c74064 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/ADT/DenseSet.h"
+#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <map>
@@ -28,7 +29,7 @@ static bool isHidden(const Record &R) {
if (R.getValueAsBit("Hidden"))
return true;
// Not declared as hidden, check the parent package if it is hidden.
- if (DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("ParentPackage")))
+ if (DefInit *DI = dyn_cast<DefInit>(R.getValueInit("ParentPackage")))
return isHidden(*DI->getDef());
return false;
@@ -42,7 +43,7 @@ static std::string getPackageFullName(const Record *R);
static std::string getParentPackageFullName(const Record *R) {
std::string name;
- if (DefInit *DI = dynamic_cast<DefInit*>(R->getValueInit("ParentPackage")))
+ if (DefInit *DI = dyn_cast<DefInit>(R->getValueInit("ParentPackage")))
name = getPackageFullName(DI->getDef());
return name;
}
@@ -63,8 +64,7 @@ static std::string getCheckerFullName(const Record *R) {
}
static std::string getStringValue(const Record &R, StringRef field) {
- if (StringInit *
- SI = dynamic_cast<StringInit*>(R.getValueInit(field)))
+ if (StringInit *SI = dyn_cast<StringInit>(R.getValueInit(field)))
return SI->getValue();
return std::string();
}
@@ -131,10 +131,11 @@ void EmitClangSACheckers(RecordKeeper &Records, raw_ostream &OS) {
Record *R = checkers[i];
Record *package = 0;
if (DefInit *
- DI = dynamic_cast<DefInit*>(R->getValueInit("ParentPackage")))
+ DI = dyn_cast<DefInit>(R->getValueInit("ParentPackage")))
package = DI->getDef();
if (!isCheckerNamed(R) && !package)
- throw "Checker '" + R->getName() + "' is neither named, nor in a package!";
+ PrintFatalError(R->getLoc(), "Checker '" + R->getName() +
+ "' is neither named, nor in a package!");
if (isCheckerNamed(R)) {
// Create a pseudo-group to hold this checker.
@@ -151,20 +152,20 @@ void EmitClangSACheckers(RecordKeeper &Records, raw_ostream &OS) {
// Insert the checker and its parent packages into the subgroups set of
// the corresponding parent package.
while (DefInit *DI
- = dynamic_cast<DefInit*>(currR->getValueInit("ParentPackage"))) {
+ = dyn_cast<DefInit>(currR->getValueInit("ParentPackage"))) {
Record *parentPackage = DI->getDef();
recordGroupMap[parentPackage]->SubGroups.insert(currR);
currR = parentPackage;
}
// Insert the checker into the set of its group.
- if (DefInit *DI = dynamic_cast<DefInit*>(R->getValueInit("Group")))
+ if (DefInit *DI = dyn_cast<DefInit>(R->getValueInit("Group")))
recordGroupMap[DI->getDef()]->Checkers.insert(R);
}
// If a package is in group, add all its checkers and its sub-packages
// checkers into the group.
for (unsigned i = 0, e = packages.size(); i != e; ++i)
- if (DefInit *DI = dynamic_cast<DefInit*>(packages[i]->getValueInit("Group")))
+ if (DefInit *DI = dyn_cast<DefInit>(packages[i]->getValueInit("Group")))
addPackageToCheckerGroup(packages[i], DI->getDef(), recordGroupMap);
typedef std::map<std::string, const Record *> SortedRecords;
@@ -205,7 +206,7 @@ void EmitClangSACheckers(RecordKeeper &Records, raw_ostream &OS) {
OS << "PACKAGE(" << "\"";
OS.write_escaped(getPackageFullName(&R)) << "\", ";
// Group index
- if (DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group")))
+ if (DefInit *DI = dyn_cast<DefInit>(R.getValueInit("Group")))
OS << groupToSortIndex[DI->getDef()] << ", ";
else
OS << "-1, ";
@@ -233,7 +234,7 @@ void EmitClangSACheckers(RecordKeeper &Records, raw_ostream &OS) {
OS << "\"";
OS.write_escaped(getStringValue(R, "HelpText")) << "\", ";
// Group index
- if (DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group")))
+ if (DefInit *DI = dyn_cast<DefInit>(R.getValueInit("Group")))
OS << groupToSortIndex[DI->getDef()] << ", ";
else
OS << "-1, ";
diff --git a/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp
index 6837306..d453ede 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp
@@ -245,7 +245,7 @@ static void ParseTypes(Record *r, std::string &s,
case 'f':
break;
default:
- throw TGError(r->getLoc(),
+ PrintFatalError(r->getLoc(),
"Unexpected letter: " + std::string(data + len, 1));
}
TV.push_back(StringRef(data, len + 1));
@@ -266,7 +266,8 @@ static char Widen(const char t) {
return 'l';
case 'h':
return 'f';
- default: throw "unhandled type in widen!";
+ default:
+ PrintFatalError("unhandled type in widen!");
}
}
@@ -282,7 +283,8 @@ static char Narrow(const char t) {
return 'i';
case 'f':
return 'h';
- default: throw "unhandled type in narrow!";
+ default:
+ PrintFatalError("unhandled type in narrow!");
}
}
@@ -453,7 +455,7 @@ static std::string TypeString(const char mod, StringRef typestr) {
s += quad ? "x4" : "x2";
break;
default:
- throw "unhandled type!";
+ PrintFatalError("unhandled type!");
}
if (mod == '2')
@@ -635,7 +637,7 @@ static std::string MangleName(const std::string &name, StringRef typestr,
}
break;
default:
- throw "unhandled type!";
+ PrintFatalError("unhandled type!");
}
if (ck == ClassB)
s += "_v";
@@ -773,7 +775,7 @@ static unsigned GetNumElements(StringRef typestr, bool &quad) {
case 'h': nElts = 4; break;
case 'f': nElts = 2; break;
default:
- throw "unhandled type!";
+ PrintFatalError("unhandled type!");
}
if (quad) nElts <<= 1;
return nElts;
@@ -1004,7 +1006,7 @@ static std::string GenOpString(OpKind op, const std::string &proto,
break;
}
default:
- throw "unknown OpKind!";
+ PrintFatalError("unknown OpKind!");
}
return s;
}
@@ -1049,7 +1051,7 @@ static unsigned GetNeonEnum(const std::string &proto, StringRef typestr) {
ET = NeonTypeFlags::Float32;
break;
default:
- throw "unhandled type!";
+ PrintFatalError("unhandled type!");
}
NeonTypeFlags Flags(ET, usgn, quad && proto[1] != 'g');
return Flags.getFlags();
@@ -1381,7 +1383,7 @@ void NeonEmitter::emitIntrinsic(raw_ostream &OS, Record *R) {
if (R->getSuperClasses().size() >= 2)
classKind = ClassMap[R->getSuperClasses()[1]];
if (classKind == ClassNone && kind == OpNone)
- throw TGError(R->getLoc(), "Builtin has no class kind");
+ PrintFatalError(R->getLoc(), "Builtin has no class kind");
for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
if (kind == OpReinterpret) {
@@ -1423,7 +1425,7 @@ static unsigned RangeFromType(const char mod, StringRef typestr) {
case 'l':
return (1 << (int)quad) - 1;
default:
- throw "unhandled type!";
+ PrintFatalError("unhandled type!");
}
}
@@ -1456,7 +1458,7 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
ParseTypes(R, Types, TypeVec);
if (R->getSuperClasses().size() < 2)
- throw TGError(R->getLoc(), "Builtin has no class kind");
+ PrintFatalError(R->getLoc(), "Builtin has no class kind");
std::string name = R->getValueAsString("Name");
ClassKind ck = ClassMap[R->getSuperClasses()[1]];
@@ -1501,7 +1503,7 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
ParseTypes(R, Types, TypeVec);
if (R->getSuperClasses().size() < 2)
- throw TGError(R->getLoc(), "Builtin has no class kind");
+ PrintFatalError(R->getLoc(), "Builtin has no class kind");
int si = -1, qi = -1;
uint64_t mask = 0, qmask = 0;
@@ -1600,7 +1602,7 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
ParseTypes(R, Types, TypeVec);
if (R->getSuperClasses().size() < 2)
- throw TGError(R->getLoc(), "Builtin has no class kind");
+ PrintFatalError(R->getLoc(), "Builtin has no class kind");
ClassKind ck = ClassMap[R->getSuperClasses()[1]];
diff --git a/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.cpp
index b0431a9..674c89a 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.cpp
@@ -7,9 +7,15 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+
+#include <map>
+
using namespace llvm;
static int StrCmpOptionName(const char *A, const char *B) {
@@ -32,8 +38,8 @@ static int StrCmpOptionName(const char *A, const char *B) {
}
static int CompareOptionRecords(const void *Av, const void *Bv) {
- const Record *A = *(Record**) Av;
- const Record *B = *(Record**) Bv;
+ const Record *A = *(const Record*const*) Av;
+ const Record *B = *(const Record*const*) Bv;
// Sentinel options precede all others and are only ordered by precedence.
bool ASent = A->getValueAsDef("Kind")->getValueAsBit("Sentinel");
@@ -47,16 +53,38 @@ static int CompareOptionRecords(const void *Av, const void *Bv) {
B->getValueAsString("Name").c_str()))
return Cmp;
+ if (!ASent) {
+ std::vector<std::string> APrefixes = A->getValueAsListOfStrings("Prefixes");
+ std::vector<std::string> BPrefixes = B->getValueAsListOfStrings("Prefixes");
+
+ for (std::vector<std::string>::const_iterator APre = APrefixes.begin(),
+ AEPre = APrefixes.end(),
+ BPre = BPrefixes.begin(),
+ BEPre = BPrefixes.end();
+ APre != AEPre &&
+ BPre != BEPre;
+ ++APre, ++BPre) {
+ if (int Cmp = StrCmpOptionName(APre->c_str(), BPre->c_str()))
+ return Cmp;
+ }
+ }
+
// Then by the kind precedence;
int APrec = A->getValueAsDef("Kind")->getValueAsInt("Precedence");
int BPrec = B->getValueAsDef("Kind")->getValueAsInt("Precedence");
- assert(APrec != BPrec && "Options are equivalent!");
+ if (APrec == BPrec &&
+ A->getValueAsListOfStrings("Prefixes") ==
+ B->getValueAsListOfStrings("Prefixes")) {
+ PrintError(A->getLoc(), Twine("Option is equivilent to"));
+ PrintError(B->getLoc(), Twine("Other defined here"));
+ PrintFatalError("Equivalent Options found.");
+ }
return APrec < BPrec ? -1 : 1;
}
static const std::string getOptionName(const Record &R) {
// Use the record name unless EnumName is defined.
- if (dynamic_cast<UnsetInit*>(R.getValueInit("EnumName")))
+ if (isa<UnsetInit>(R.getValueInit("EnumName")))
return R.getName();
return R.getValueAsString("EnumName");
@@ -86,6 +114,48 @@ void EmitOptParser(RecordKeeper &Records, raw_ostream &OS, bool GenDefs) {
array_pod_sort(Opts.begin(), Opts.end(), CompareOptionRecords);
if (GenDefs) {
+ // Generate prefix groups.
+ typedef SmallVector<SmallString<2>, 2> PrefixKeyT;
+ typedef std::map<PrefixKeyT, std::string> PrefixesT;
+ PrefixesT Prefixes;
+ Prefixes.insert(std::make_pair(PrefixKeyT(), "prefix_0"));
+ unsigned CurPrefix = 0;
+ for (unsigned i = 0, e = Opts.size(); i != e; ++i) {
+ const Record &R = *Opts[i];
+ std::vector<std::string> prf = R.getValueAsListOfStrings("Prefixes");
+ PrefixKeyT prfkey(prf.begin(), prf.end());
+ unsigned NewPrefix = CurPrefix + 1;
+ if (Prefixes.insert(std::make_pair(prfkey, (Twine("prefix_") +
+ Twine(NewPrefix)).str())).second)
+ CurPrefix = NewPrefix;
+ }
+
+ OS << "#ifndef PREFIX\n";
+ OS << "#error \"Define PREFIX prior to including this file!\"\n";
+ OS << "#endif\n\n";
+
+ // Dump prefixes.
+ OS << "/////////\n";
+ OS << "// Prefixes\n\n";
+ OS << "#define COMMA ,\n";
+ for (PrefixesT::const_iterator I = Prefixes.begin(), E = Prefixes.end();
+ I != E; ++I) {
+ OS << "PREFIX(";
+
+ // Prefix name.
+ OS << I->second;
+
+ // Prefix values.
+ OS << ", {";
+ for (PrefixKeyT::const_iterator PI = I->first.begin(),
+ PE = I->first.end(); PI != PE; ++PI) {
+ OS << "\"" << *PI << "\" COMMA ";
+ }
+ OS << "0})\n";
+ }
+ OS << "#undef COMMA\n";
+ OS << "\n";
+
OS << "#ifndef OPTION\n";
OS << "#error \"Define OPTION prior to including this file!\"\n";
OS << "#endif\n\n";
@@ -98,8 +168,11 @@ void EmitOptParser(RecordKeeper &Records, raw_ostream &OS, bool GenDefs) {
// Start a single option entry.
OS << "OPTION(";
+ // The option prefix;
+ OS << "0";
+
// The option string.
- OS << '"' << R.getValueAsString("Name") << '"';
+ OS << ", \"" << R.getValueAsString("Name") << '"';
// The option identifier name.
OS << ", "<< getOptionName(R);
@@ -109,7 +182,7 @@ void EmitOptParser(RecordKeeper &Records, raw_ostream &OS, bool GenDefs) {
// The containing option group (if any).
OS << ", ";
- if (const DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group")))
+ if (const DefInit *DI = dyn_cast<DefInit>(R.getValueInit("Group")))
OS << getOptionName(*DI->getDef());
else
OS << "INVALID";
@@ -118,7 +191,7 @@ void EmitOptParser(RecordKeeper &Records, raw_ostream &OS, bool GenDefs) {
OS << ", INVALID, 0, 0";
// The option help text.
- if (!dynamic_cast<UnsetInit*>(R.getValueInit("HelpText"))) {
+ if (!isa<UnsetInit>(R.getValueInit("HelpText"))) {
OS << ",\n";
OS << " ";
write_cstring(OS, R.getValueAsString("HelpText"));
@@ -138,6 +211,10 @@ void EmitOptParser(RecordKeeper &Records, raw_ostream &OS, bool GenDefs) {
// Start a single option entry.
OS << "OPTION(";
+ // The option prefix;
+ std::vector<std::string> prf = R.getValueAsListOfStrings("Prefixes");
+ OS << Prefixes[PrefixKeyT(prf.begin(), prf.end())] << ", ";
+
// The option string.
write_cstring(OS, R.getValueAsString("Name"));
@@ -149,14 +226,14 @@ void EmitOptParser(RecordKeeper &Records, raw_ostream &OS, bool GenDefs) {
// The containing option group (if any).
OS << ", ";
- if (const DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group")))
+ if (const DefInit *DI = dyn_cast<DefInit>(R.getValueInit("Group")))
OS << getOptionName(*DI->getDef());
else
OS << "INVALID";
// The option alias (if any).
OS << ", ";
- if (const DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Alias")))
+ if (const DefInit *DI = dyn_cast<DefInit>(R.getValueInit("Alias")))
OS << getOptionName(*DI->getDef());
else
OS << "INVALID";
@@ -170,7 +247,7 @@ void EmitOptParser(RecordKeeper &Records, raw_ostream &OS, bool GenDefs) {
for (unsigned i = 0, e = LI->size(); i != e; ++i) {
if (i)
OS << " | ";
- OS << dynamic_cast<DefInit*>(LI->getElement(i))->getDef()->getName();
+ OS << cast<DefInit>(LI->getElement(i))->getDef()->getName();
}
}
@@ -178,7 +255,7 @@ void EmitOptParser(RecordKeeper &Records, raw_ostream &OS, bool GenDefs) {
OS << ", " << R.getValueAsInt("NumArgs");
// The option help text.
- if (!dynamic_cast<UnsetInit*>(R.getValueInit("HelpText"))) {
+ if (!isa<UnsetInit>(R.getValueInit("HelpText"))) {
OS << ",\n";
OS << " ";
write_cstring(OS, R.getValueAsString("HelpText"));
@@ -187,7 +264,7 @@ void EmitOptParser(RecordKeeper &Records, raw_ostream &OS, bool GenDefs) {
// The option meta-variable name.
OS << ", ";
- if (!dynamic_cast<UnsetInit*>(R.getValueInit("MetaVarName")))
+ if (!isa<UnsetInit>(R.getValueInit("MetaVarName")))
write_cstring(OS, R.getValueAsString("MetaVarName"));
else
OS << "0";
diff --git a/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp b/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp
index d3408ed..41471a4 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp
@@ -19,7 +19,6 @@
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Main.h"
#include "llvm/TableGen/Record.h"
-#include "llvm/TableGen/TableGenAction.h"
using namespace llvm;
using namespace clang;
@@ -42,6 +41,9 @@ enum ActionType {
GenClangDeclNodes,
GenClangStmtNodes,
GenClangSACheckers,
+ GenClangCommentHTMLTags,
+ GenClangCommentHTMLTagsProperties,
+ GenClangCommentCommandInfo,
GenOptParserDefs, GenOptParserImpl,
GenArmNeon,
GenArmNeonSema,
@@ -95,6 +97,18 @@ namespace {
"Generate Clang AST statement nodes"),
clEnumValN(GenClangSACheckers, "gen-clang-sa-checkers",
"Generate Clang Static Analyzer checkers"),
+ clEnumValN(GenClangCommentHTMLTags,
+ "gen-clang-comment-html-tags",
+ "Generate efficient matchers for HTML tag "
+ "names that are used in documentation comments"),
+ clEnumValN(GenClangCommentHTMLTagsProperties,
+ "gen-clang-comment-html-tags-properties",
+ "Generate efficient matchers for HTML tag "
+ "properties"),
+ clEnumValN(GenClangCommentCommandInfo,
+ "gen-clang-comment-command-info",
+ "Generate list of commands that are used in "
+ "documentation comments"),
clEnumValN(GenArmNeon, "gen-arm-neon",
"Generate arm_neon.h for clang"),
clEnumValN(GenArmNeonSema, "gen-arm-neon-sema",
@@ -108,82 +122,88 @@ namespace {
cl::desc("Only use warnings from specified component"),
cl::value_desc("component"), cl::Hidden);
-class ClangTableGenAction : public TableGenAction {
-public:
- bool operator()(raw_ostream &OS, RecordKeeper &Records) {
- switch (Action) {
- case GenClangAttrClasses:
- EmitClangAttrClass(Records, OS);
- break;
- case GenClangAttrImpl:
- EmitClangAttrImpl(Records, OS);
- break;
- case GenClangAttrList:
- EmitClangAttrList(Records, OS);
- break;
- case GenClangAttrPCHRead:
- EmitClangAttrPCHRead(Records, OS);
- break;
- case GenClangAttrPCHWrite:
- EmitClangAttrPCHWrite(Records, OS);
- break;
- case GenClangAttrSpellingList:
- EmitClangAttrSpellingList(Records, OS);
- break;
- case GenClangAttrLateParsedList:
- EmitClangAttrLateParsedList(Records, OS);
- break;
- case GenClangAttrTemplateInstantiate:
- EmitClangAttrTemplateInstantiate(Records, OS);
- break;
- case GenClangAttrParsedAttrList:
- EmitClangAttrParsedAttrList(Records, OS);
- break;
- case GenClangAttrParsedAttrKinds:
- EmitClangAttrParsedAttrKinds(Records, OS);
- break;
- case GenClangDiagsDefs:
- EmitClangDiagsDefs(Records, OS, ClangComponent);
- break;
- case GenClangDiagGroups:
- EmitClangDiagGroups(Records, OS);
- break;
- case GenClangDiagsIndexName:
- EmitClangDiagsIndexName(Records, OS);
- break;
- case GenClangCommentNodes:
- EmitClangASTNodes(Records, OS, "Comment", "");
- break;
- case GenClangDeclNodes:
- EmitClangASTNodes(Records, OS, "Decl", "Decl");
- EmitClangDeclContext(Records, OS);
- break;
- case GenClangStmtNodes:
- EmitClangASTNodes(Records, OS, "Stmt", "");
- break;
- case GenClangSACheckers:
- EmitClangSACheckers(Records, OS);
- break;
- case GenOptParserDefs:
- EmitOptParser(Records, OS, true);
- break;
- case GenOptParserImpl:
- EmitOptParser(Records, OS, false);
- break;
- case GenArmNeon:
- EmitNeon(Records, OS);
- break;
- case GenArmNeonSema:
- EmitNeonSema(Records, OS);
- break;
- case GenArmNeonTest:
- EmitNeonTest(Records, OS);
- break;
- }
-
- return false;
+bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
+ switch (Action) {
+ case GenClangAttrClasses:
+ EmitClangAttrClass(Records, OS);
+ break;
+ case GenClangAttrImpl:
+ EmitClangAttrImpl(Records, OS);
+ break;
+ case GenClangAttrList:
+ EmitClangAttrList(Records, OS);
+ break;
+ case GenClangAttrPCHRead:
+ EmitClangAttrPCHRead(Records, OS);
+ break;
+ case GenClangAttrPCHWrite:
+ EmitClangAttrPCHWrite(Records, OS);
+ break;
+ case GenClangAttrSpellingList:
+ EmitClangAttrSpellingList(Records, OS);
+ break;
+ case GenClangAttrLateParsedList:
+ EmitClangAttrLateParsedList(Records, OS);
+ break;
+ case GenClangAttrTemplateInstantiate:
+ EmitClangAttrTemplateInstantiate(Records, OS);
+ break;
+ case GenClangAttrParsedAttrList:
+ EmitClangAttrParsedAttrList(Records, OS);
+ break;
+ case GenClangAttrParsedAttrKinds:
+ EmitClangAttrParsedAttrKinds(Records, OS);
+ break;
+ case GenClangDiagsDefs:
+ EmitClangDiagsDefs(Records, OS, ClangComponent);
+ break;
+ case GenClangDiagGroups:
+ EmitClangDiagGroups(Records, OS);
+ break;
+ case GenClangDiagsIndexName:
+ EmitClangDiagsIndexName(Records, OS);
+ break;
+ case GenClangCommentNodes:
+ EmitClangASTNodes(Records, OS, "Comment", "");
+ break;
+ case GenClangDeclNodes:
+ EmitClangASTNodes(Records, OS, "Decl", "Decl");
+ EmitClangDeclContext(Records, OS);
+ break;
+ case GenClangStmtNodes:
+ EmitClangASTNodes(Records, OS, "Stmt", "");
+ break;
+ case GenClangSACheckers:
+ EmitClangSACheckers(Records, OS);
+ break;
+ case GenClangCommentHTMLTags:
+ EmitClangCommentHTMLTags(Records, OS);
+ break;
+ case GenClangCommentHTMLTagsProperties:
+ EmitClangCommentHTMLTagsProperties(Records, OS);
+ break;
+ case GenClangCommentCommandInfo:
+ EmitClangCommentCommandInfo(Records, OS);
+ break;
+ case GenOptParserDefs:
+ EmitOptParser(Records, OS, true);
+ break;
+ case GenOptParserImpl:
+ EmitOptParser(Records, OS, false);
+ break;
+ case GenArmNeon:
+ EmitNeon(Records, OS);
+ break;
+ case GenArmNeonSema:
+ EmitNeonSema(Records, OS);
+ break;
+ case GenArmNeonTest:
+ EmitNeonTest(Records, OS);
+ break;
}
-};
+
+ return false;
+}
}
int main(int argc, char **argv) {
@@ -191,6 +211,5 @@ int main(int argc, char **argv) {
PrettyStackTraceProgram X(argc, argv);
cl::ParseCommandLineOptions(argc, argv);
- ClangTableGenAction Action;
- return TableGenMain(argv[0], Action);
+ return TableGenMain(argv[0], &ClangTableGenMain);
}
diff --git a/contrib/llvm/tools/clang/utils/TableGen/TableGenBackends.h b/contrib/llvm/tools/clang/utils/TableGen/TableGenBackends.h
index 779de7c..838fc84 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/TableGenBackends.h
+++ b/contrib/llvm/tools/clang/utils/TableGen/TableGenBackends.h
@@ -47,6 +47,11 @@ void EmitClangDiagsIndexName(RecordKeeper &Records, raw_ostream &OS);
void EmitClangSACheckers(RecordKeeper &Records, raw_ostream &OS);
+void EmitClangCommentHTMLTags(RecordKeeper &Records, raw_ostream &OS);
+void EmitClangCommentHTMLTagsProperties(RecordKeeper &Records, raw_ostream &OS);
+
+void EmitClangCommentCommandInfo(RecordKeeper &Records, raw_ostream &OS);
+
void EmitNeon(RecordKeeper &Records, raw_ostream &OS);
void EmitNeonSema(RecordKeeper &Records, raw_ostream &OS);
void EmitNeonTest(RecordKeeper &Records, raw_ostream &OS);
diff --git a/contrib/llvm/tools/llc/llc.cpp b/contrib/llvm/tools/llc/llc.cpp
index 8951050..4d4a74c 100644
--- a/contrib/llvm/tools/llc/llc.cpp
+++ b/contrib/llvm/tools/llc/llc.cpp
@@ -14,12 +14,14 @@
//===----------------------------------------------------------------------===//
#include "llvm/LLVMContext.h"
+#include "llvm/DataLayout.h"
#include "llvm/Module.h"
#include "llvm/PassManager.h"
#include "llvm/Pass.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/Support/IRReader.h"
+#include "llvm/CodeGen/CommandFlags.h"
#include "llvm/CodeGen/LinkAllAsmWriterComponents.h"
#include "llvm/CodeGen/LinkAllCodegenComponents.h"
#include "llvm/MC/SubtargetFeature.h"
@@ -34,7 +36,6 @@
#include "llvm/Support/Signals.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TargetSelect.h"
-#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetMachine.h"
#include <memory>
@@ -62,211 +63,13 @@ OptLevel("O",
static cl::opt<std::string>
TargetTriple("mtriple", cl::desc("Override target triple for module"));
-static cl::opt<std::string>
-MArch("march", cl::desc("Architecture to generate code for (see --version)"));
-
-static cl::opt<std::string>
-MCPU("mcpu",
- cl::desc("Target a specific cpu type (-mcpu=help for details)"),
- cl::value_desc("cpu-name"),
- cl::init(""));
-
-static cl::list<std::string>
-MAttrs("mattr",
- cl::CommaSeparated,
- cl::desc("Target specific attributes (-mattr=help for details)"),
- cl::value_desc("a1,+a2,-a3,..."));
-
-static cl::opt<Reloc::Model>
-RelocModel("relocation-model",
- cl::desc("Choose relocation model"),
- cl::init(Reloc::Default),
- cl::values(
- clEnumValN(Reloc::Default, "default",
- "Target default relocation model"),
- clEnumValN(Reloc::Static, "static",
- "Non-relocatable code"),
- clEnumValN(Reloc::PIC_, "pic",
- "Fully relocatable, position independent code"),
- clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
- "Relocatable external references, non-relocatable code"),
- clEnumValEnd));
-
-static cl::opt<llvm::CodeModel::Model>
-CMModel("code-model",
- cl::desc("Choose code model"),
- cl::init(CodeModel::Default),
- cl::values(clEnumValN(CodeModel::Default, "default",
- "Target default code model"),
- clEnumValN(CodeModel::Small, "small",
- "Small code model"),
- clEnumValN(CodeModel::Kernel, "kernel",
- "Kernel code model"),
- clEnumValN(CodeModel::Medium, "medium",
- "Medium code model"),
- clEnumValN(CodeModel::Large, "large",
- "Large code model"),
- clEnumValEnd));
-
-static cl::opt<bool>
-RelaxAll("mc-relax-all",
- cl::desc("When used with filetype=obj, "
- "relax all fixups in the emitted object file"));
-
-cl::opt<TargetMachine::CodeGenFileType>
-FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
- cl::desc("Choose a file type (not all types are supported by all targets):"),
- cl::values(
- clEnumValN(TargetMachine::CGFT_AssemblyFile, "asm",
- "Emit an assembly ('.s') file"),
- clEnumValN(TargetMachine::CGFT_ObjectFile, "obj",
- "Emit a native object ('.o') file"),
- clEnumValN(TargetMachine::CGFT_Null, "null",
- "Emit nothing, for performance testing"),
- clEnumValEnd));
-
cl::opt<bool> NoVerify("disable-verify", cl::Hidden,
cl::desc("Do not verify input module"));
-cl::opt<bool> DisableDotLoc("disable-dot-loc", cl::Hidden,
- cl::desc("Do not use .loc entries"));
-
-cl::opt<bool> DisableCFI("disable-cfi", cl::Hidden,
- cl::desc("Do not use .cfi_* directives"));
-
-cl::opt<bool> EnableDwarfDirectory("enable-dwarf-directory", cl::Hidden,
- cl::desc("Use .file directives with an explicit directory."));
-
-static cl::opt<bool>
-DisableRedZone("disable-red-zone",
- cl::desc("Do not emit code that uses the red zone."),
- cl::init(false));
-
-static cl::opt<bool>
-EnableFPMAD("enable-fp-mad",
- cl::desc("Enable less precise MAD instructions to be generated"),
- cl::init(false));
-
-static cl::opt<bool>
-DisableFPElim("disable-fp-elim",
- cl::desc("Disable frame pointer elimination optimization"),
- cl::init(false));
-
-static cl::opt<bool>
-DisableFPElimNonLeaf("disable-non-leaf-fp-elim",
- cl::desc("Disable frame pointer elimination optimization for non-leaf funcs"),
- cl::init(false));
-
-static cl::opt<bool>
-EnableUnsafeFPMath("enable-unsafe-fp-math",
- cl::desc("Enable optimizations that may decrease FP precision"),
- cl::init(false));
-
-static cl::opt<bool>
-EnableNoInfsFPMath("enable-no-infs-fp-math",
- cl::desc("Enable FP math optimizations that assume no +-Infs"),
- cl::init(false));
-
-static cl::opt<bool>
-EnableNoNaNsFPMath("enable-no-nans-fp-math",
- cl::desc("Enable FP math optimizations that assume no NaNs"),
- cl::init(false));
-
-static cl::opt<bool>
-EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math",
- cl::Hidden,
- cl::desc("Force codegen to assume rounding mode can change dynamically"),
- cl::init(false));
-
-static cl::opt<bool>
-GenerateSoftFloatCalls("soft-float",
- cl::desc("Generate software floating point library calls"),
- cl::init(false));
-
-static cl::opt<llvm::FloatABI::ABIType>
-FloatABIForCalls("float-abi",
- cl::desc("Choose float ABI type"),
- cl::init(FloatABI::Default),
- cl::values(
- clEnumValN(FloatABI::Default, "default",
- "Target default float ABI type"),
- clEnumValN(FloatABI::Soft, "soft",
- "Soft float ABI (implied by -soft-float)"),
- clEnumValN(FloatABI::Hard, "hard",
- "Hard float ABI (uses FP registers)"),
- clEnumValEnd));
-
-static cl::opt<llvm::FPOpFusion::FPOpFusionMode>
-FuseFPOps("fp-contract",
- cl::desc("Enable aggresive formation of fused FP ops"),
- cl::init(FPOpFusion::Standard),
- cl::values(
- clEnumValN(FPOpFusion::Fast, "fast",
- "Fuse FP ops whenever profitable"),
- clEnumValN(FPOpFusion::Standard, "on",
- "Only fuse 'blessed' FP ops."),
- clEnumValN(FPOpFusion::Strict, "off",
- "Only fuse FP ops when the result won't be effected."),
- clEnumValEnd));
-
-static cl::opt<bool>
-DontPlaceZerosInBSS("nozero-initialized-in-bss",
- cl::desc("Don't place zero-initialized symbols into bss section"),
- cl::init(false));
-
-static cl::opt<bool>
+cl::opt<bool>
DisableSimplifyLibCalls("disable-simplify-libcalls",
- cl::desc("Disable simplify-libcalls"),
- cl::init(false));
-
-static cl::opt<bool>
-EnableGuaranteedTailCallOpt("tailcallopt",
- cl::desc("Turn fastcc calls into tail calls by (potentially) changing ABI."),
- cl::init(false));
-
-static cl::opt<bool>
-DisableTailCalls("disable-tail-calls",
- cl::desc("Never emit tail calls"),
- cl::init(false));
-
-static cl::opt<unsigned>
-OverrideStackAlignment("stack-alignment",
- cl::desc("Override default stack alignment"),
- cl::init(0));
-
-static cl::opt<bool>
-EnableRealignStack("realign-stack",
- cl::desc("Realign stack if needed"),
- cl::init(true));
-
-static cl::opt<std::string>
-TrapFuncName("trap-func", cl::Hidden,
- cl::desc("Emit a call to trap function rather than a trap instruction"),
- cl::init(""));
-
-static cl::opt<bool>
-EnablePIE("enable-pie",
- cl::desc("Assume the creation of a position independent executable."),
- cl::init(false));
-
-static cl::opt<bool>
-SegmentedStacks("segmented-stacks",
- cl::desc("Use segmented stacks if possible."),
- cl::init(false));
-
-static cl::opt<bool>
-UseInitArray("use-init-array",
- cl::desc("Use .init_array instead of .ctors."),
- cl::init(false));
-
-static cl::opt<std::string> StopAfter("stop-after",
- cl::desc("Stop compilation after a specific pass"),
- cl::value_desc("pass-name"),
- cl::init(""));
-static cl::opt<std::string> StartAfter("start-after",
- cl::desc("Resume compilation after a specific pass"),
- cl::value_desc("pass-name"),
- cl::init(""));
+ cl::desc("Disable simplify-libcalls"),
+ cl::init(false));
// GetFileNameRoot - Helper function to get the basename of a filename.
static inline std::string
@@ -459,6 +262,7 @@ int main(int argc, char **argv) {
Options.PositionIndependentExecutable = EnablePIE;
Options.EnableSegmentedStacks = SegmentedStacks;
Options.UseInitArray = UseInitArray;
+ Options.SSPBufferSize = SSPBufferSize;
std::auto_ptr<TargetMachine>
target(TheTarget->createTargetMachine(TheTriple.getTriple(),
@@ -499,11 +303,16 @@ int main(int argc, char **argv) {
TLI->disableAllFunctions();
PM.add(TLI);
+ if (target.get()) {
+ PM.add(new TargetTransformInfo(target->getScalarTargetTransformInfo(),
+ target->getVectorTargetTransformInfo()));
+ }
+
// Add the target data from the target machine, if it exists, or the module.
- if (const TargetData *TD = Target.getTargetData())
- PM.add(new TargetData(*TD));
+ if (const DataLayout *TD = Target.getDataLayout())
+ PM.add(new DataLayout(*TD));
else
- PM.add(new TargetData(mod));
+ PM.add(new DataLayout(mod));
// Override default to generate verbose assembly.
Target.setAsmVerbosityDefault(true);
diff --git a/contrib/llvm/tools/lli/RecordingMemoryManager.cpp b/contrib/llvm/tools/lli/RecordingMemoryManager.cpp
new file mode 100644
index 0000000..9e1cff5
--- /dev/null
+++ b/contrib/llvm/tools/lli/RecordingMemoryManager.cpp
@@ -0,0 +1,87 @@
+//===- RecordingMemoryManager.cpp - Recording memory manager --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This memory manager allocates local storage and keeps a record of each
+// allocation. Iterators are provided for all data and code allocations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RecordingMemoryManager.h"
+using namespace llvm;
+
+uint8_t *RecordingMemoryManager::
+allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID) {
+ // The recording memory manager is just a local copy of the remote target.
+ // The alignment requirement is just stored here for later use. Regular
+ // heap storage is sufficient here.
+ void *Addr = malloc(Size);
+ assert(Addr && "malloc() failure!");
+ sys::MemoryBlock Block(Addr, Size);
+ AllocatedCodeMem.push_back(Allocation(Block, Alignment));
+ return (uint8_t*)Addr;
+}
+
+uint8_t *RecordingMemoryManager::
+allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID) {
+ // The recording memory manager is just a local copy of the remote target.
+ // The alignment requirement is just stored here for later use. Regular
+ // heap storage is sufficient here.
+ void *Addr = malloc(Size);
+ assert(Addr && "malloc() failure!");
+ sys::MemoryBlock Block(Addr, Size);
+ AllocatedDataMem.push_back(Allocation(Block, Alignment));
+ return (uint8_t*)Addr;
+}
+void RecordingMemoryManager::setMemoryWritable() { llvm_unreachable("Unexpected!"); }
+void RecordingMemoryManager::setMemoryExecutable() { llvm_unreachable("Unexpected!"); }
+void RecordingMemoryManager::setPoisonMemory(bool poison) { llvm_unreachable("Unexpected!"); }
+void RecordingMemoryManager::AllocateGOT() { llvm_unreachable("Unexpected!"); }
+uint8_t *RecordingMemoryManager::getGOTBase() const {
+ llvm_unreachable("Unexpected!");
+ return 0;
+}
+uint8_t *RecordingMemoryManager::startFunctionBody(const Function *F, uintptr_t &ActualSize){
+ llvm_unreachable("Unexpected!");
+ return 0;
+}
+uint8_t *RecordingMemoryManager::allocateStub(const GlobalValue* F, unsigned StubSize,
+ unsigned Alignment) {
+ llvm_unreachable("Unexpected!");
+ return 0;
+}
+void RecordingMemoryManager::endFunctionBody(const Function *F, uint8_t *FunctionStart,
+ uint8_t *FunctionEnd) {
+ llvm_unreachable("Unexpected!");
+}
+uint8_t *RecordingMemoryManager::allocateSpace(intptr_t Size, unsigned Alignment) {
+ llvm_unreachable("Unexpected!");
+ return 0;
+}
+uint8_t *RecordingMemoryManager::allocateGlobal(uintptr_t Size, unsigned Alignment) {
+ llvm_unreachable("Unexpected!");
+ return 0;
+}
+void RecordingMemoryManager::deallocateFunctionBody(void *Body) {
+ llvm_unreachable("Unexpected!");
+}
+uint8_t* RecordingMemoryManager::startExceptionTable(const Function* F, uintptr_t &ActualSize) {
+ llvm_unreachable("Unexpected!");
+ return 0;
+}
+void RecordingMemoryManager::endExceptionTable(const Function *F, uint8_t *TableStart,
+ uint8_t *TableEnd, uint8_t* FrameRegister) {
+ llvm_unreachable("Unexpected!");
+}
+void RecordingMemoryManager::deallocateExceptionTable(void *ET) {
+ llvm_unreachable("Unexpected!");
+}
+void *RecordingMemoryManager::getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure) {
+ return NULL;
+}
diff --git a/contrib/llvm/tools/lli/RecordingMemoryManager.h b/contrib/llvm/tools/lli/RecordingMemoryManager.h
new file mode 100644
index 0000000..1590235
--- /dev/null
+++ b/contrib/llvm/tools/lli/RecordingMemoryManager.h
@@ -0,0 +1,78 @@
+//===- RecordingMemoryManager.h - LLI MCJIT recording memory manager ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This memory manager allocates local storage and keeps a record of each
+// allocation. Iterators are provided for all data and code allocations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef RECORDINGMEMORYMANAGER_H
+#define RECORDINGMEMORYMANAGER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ExecutionEngine/JITMemoryManager.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Memory.h"
+#include <utility>
+
+namespace llvm {
+
+class RecordingMemoryManager : public JITMemoryManager {
+public:
+ typedef std::pair<sys::MemoryBlock, unsigned> Allocation;
+
+private:
+ SmallVector<Allocation, 16> AllocatedDataMem;
+ SmallVector<Allocation, 16> AllocatedCodeMem;
+
+public:
+ RecordingMemoryManager() {}
+ virtual ~RecordingMemoryManager() {}
+
+ typedef SmallVectorImpl<Allocation>::const_iterator const_data_iterator;
+ typedef SmallVectorImpl<Allocation>::const_iterator const_code_iterator;
+
+ const_data_iterator data_begin() const { return AllocatedDataMem.begin(); }
+ const_data_iterator data_end() const { return AllocatedDataMem.end(); }
+ const_code_iterator code_begin() const { return AllocatedCodeMem.begin(); }
+ const_code_iterator code_end() const { return AllocatedCodeMem.end(); }
+
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID);
+
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID);
+
+ void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true);
+ // The following obsolete JITMemoryManager calls are stubbed out for
+ // this model.
+ void setMemoryWritable();
+ void setMemoryExecutable();
+ void setPoisonMemory(bool poison);
+ void AllocateGOT();
+ uint8_t *getGOTBase() const;
+ uint8_t *startFunctionBody(const Function *F, uintptr_t &ActualSize);
+ uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize,
+ unsigned Alignment);
+ void endFunctionBody(const Function *F, uint8_t *FunctionStart,
+ uint8_t *FunctionEnd);
+ uint8_t *allocateSpace(intptr_t Size, unsigned Alignment);
+ uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment);
+ void deallocateFunctionBody(void *Body);
+ uint8_t* startExceptionTable(const Function* F, uintptr_t &ActualSize);
+ void endExceptionTable(const Function *F, uint8_t *TableStart,
+ uint8_t *TableEnd, uint8_t* FrameRegister);
+ void deallocateExceptionTable(void *ET);
+
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/lli/RemoteTarget.cpp b/contrib/llvm/tools/lli/RemoteTarget.cpp
new file mode 100644
index 0000000..212bdfd
--- /dev/null
+++ b/contrib/llvm/tools/lli/RemoteTarget.cpp
@@ -0,0 +1,61 @@
+//===- RemoteTarget.cpp - LLVM Remote process JIT execution --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the RemoteTarget class which executes JITed code in a
+// separate address range from where it was built.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RemoteTarget.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Memory.h"
+#include <stdlib.h>
+#include <string>
+using namespace llvm;
+
+bool RemoteTarget::allocateSpace(size_t Size, unsigned Alignment,
+ uint64_t &Address) {
+ sys::MemoryBlock *Prev = Allocations.size() ? &Allocations.back() : NULL;
+ sys::MemoryBlock Mem = sys::Memory::AllocateRWX(Size, Prev, &ErrorMsg);
+ if (Mem.base() == NULL)
+ return true;
+ if ((uintptr_t)Mem.base() % Alignment) {
+ ErrorMsg = "unable to allocate sufficiently aligned memory";
+ return true;
+ }
+ Address = reinterpret_cast<uint64_t>(Mem.base());
+ return false;
+}
+
+bool RemoteTarget::loadData(uint64_t Address, const void *Data, size_t Size) {
+ memcpy ((void*)Address, Data, Size);
+ return false;
+}
+
+bool RemoteTarget::loadCode(uint64_t Address, const void *Data, size_t Size) {
+ memcpy ((void*)Address, Data, Size);
+ sys::MemoryBlock Mem((void*)Address, Size);
+ sys::Memory::setExecutable(Mem, &ErrorMsg);
+ return false;
+}
+
+bool RemoteTarget::executeCode(uint64_t Address, int &RetVal) {
+ int (*fn)(void) = (int(*)(void))Address;
+ RetVal = fn();
+ return false;
+}
+
+void RemoteTarget::create() {
+}
+
+void RemoteTarget::stop() {
+ for (unsigned i = 0, e = Allocations.size(); i != e; ++i)
+ sys::Memory::ReleaseRWX(Allocations[i]);
+}
diff --git a/contrib/llvm/tools/lli/RemoteTarget.h b/contrib/llvm/tools/lli/RemoteTarget.h
new file mode 100644
index 0000000..d05d3c6
--- /dev/null
+++ b/contrib/llvm/tools/lli/RemoteTarget.h
@@ -0,0 +1,101 @@
+//===- RemoteTarget.h - LLVM Remote process JIT execution ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Definition of the RemoteTarget class which executes JITed code in a
+// separate address range from where it was built.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef REMOTEPROCESS_H
+#define REMOTEPROCESS_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Memory.h"
+#include <stdlib.h>
+#include <string>
+
+namespace llvm {
+
+class RemoteTarget {
+ std::string ErrorMsg;
+ bool IsRunning;
+
+ SmallVector<sys::MemoryBlock, 16> Allocations;
+
+public:
+ StringRef getErrorMsg() const { return ErrorMsg; }
+
+ /// Allocate space in the remote target address space.
+ ///
+ /// @param Size Amount of space, in bytes, to allocate.
+ /// @param Alignment Required minimum alignment for allocated space.
+ /// @param[out] Address Remote address of the allocated memory.
+ ///
+ /// @returns False on success. On failure, ErrorMsg is updated with
+ /// descriptive text of the encountered error.
+ bool allocateSpace(size_t Size, unsigned Alignment, uint64_t &Address);
+
+ /// Load data into the target address space.
+ ///
+ /// @param Address Destination address in the target process.
+ /// @param Data Source address in the host process.
+ /// @param Size Number of bytes to copy.
+ ///
+ /// @returns False on success. On failure, ErrorMsg is updated with
+ /// descriptive text of the encountered error.
+ bool loadData(uint64_t Address, const void *Data, size_t Size);
+
+ /// Load code into the target address space and prepare it for execution.
+ ///
+ /// @param Address Destination address in the target process.
+ /// @param Data Source address in the host process.
+ /// @param Size Number of bytes to copy.
+ ///
+ /// @returns False on success. On failure, ErrorMsg is updated with
+ /// descriptive text of the encountered error.
+ bool loadCode(uint64_t Address, const void *Data, size_t Size);
+
+ /// Execute code in the target process. The called function is required
+ /// to be of signature int "(*)(void)".
+ ///
+ /// @param Address Address of the loaded function in the target
+ /// process.
+ /// @param[out] RetVal The integer return value of the called function.
+ ///
+ /// @returns False on success. On failure, ErrorMsg is updated with
+ /// descriptive text of the encountered error.
+ bool executeCode(uint64_t Address, int &RetVal);
+
+ /// Minimum alignment for memory permissions. Used to seperate code and
+ /// data regions to make sure data doesn't get marked as code or vice
+ /// versa.
+ ///
+ /// @returns Page alignment return value. Default of 4k.
+ unsigned getPageAlignment() { return 4096; }
+
+ /// Start the remote process.
+ void create();
+
+ /// Terminate the remote process.
+ void stop();
+
+ RemoteTarget() : ErrorMsg(""), IsRunning(false) {}
+ ~RemoteTarget() { if (IsRunning) stop(); }
+
+private:
+ // Main processing function for the remote target process. Command messages
+ // are received on file descriptor CmdFD and responses come back on OutFD.
+ static void doRemoteTargeting(int CmdFD, int OutFD);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/lli/lli.cpp b/contrib/llvm/tools/lli/lli.cpp
index b6c9299..d41a595 100644
--- a/contrib/llvm/tools/lli/lli.cpp
+++ b/contrib/llvm/tools/lli/lli.cpp
@@ -13,6 +13,9 @@
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "lli"
+#include "RecordingMemoryManager.h"
+#include "RemoteTarget.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/Type.h"
@@ -32,11 +35,14 @@
#include "llvm/Support/PluginLoader.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Signals.h"
#include "llvm/Support/TargetSelect.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/Memory.h"
+#include "llvm/Support/MathExtras.h"
#include <cerrno>
#ifdef __linux__
@@ -73,6 +79,13 @@ namespace {
"use-mcjit", cl::desc("Enable use of the MC-based JIT (if available)"),
cl::init(false));
+ // The MCJIT supports building for a target address space separate from
+ // the JIT compilation process. Use a forked process and a copying
+ // memory manager with IPC to execute using this functionality.
+ cl::opt<bool> RemoteMCJIT("remote-mcjit",
+ cl::desc("Execute MCJIT'ed code in a separate process."),
+ cl::init(false));
+
// Determine optimization level.
cl::opt<char>
OptLevel("O",
@@ -159,6 +172,23 @@ namespace {
cl::init(false));
cl::opt<bool>
+ GenerateSoftFloatCalls("soft-float",
+ cl::desc("Generate software floating point library calls"),
+ cl::init(false));
+
+ cl::opt<llvm::FloatABI::ABIType>
+ FloatABIForCalls("float-abi",
+ cl::desc("Choose float ABI type"),
+ cl::init(FloatABI::Default),
+ cl::values(
+ clEnumValN(FloatABI::Default, "default",
+ "Target default float ABI type"),
+ clEnumValN(FloatABI::Soft, "soft",
+ "Soft float ABI (implied by -soft-float)"),
+ clEnumValN(FloatABI::Hard, "hard",
+ "Hard float ABI (uses FP registers)"),
+ clEnumValEnd));
+ cl::opt<bool>
// In debug builds, make this default to true.
#ifdef NDEBUG
#define EMIT_DEBUG false
@@ -212,7 +242,7 @@ public:
// the data cache but not to the instruction cache.
virtual void invalidateInstructionCache();
- // The MCJITMemoryManager doesn't use the following functions, so we don't
+ // The RTDyldMemoryManager doesn't use the following functions, so we don't
// need implement them.
virtual void setMemoryWritable() {
llvm_unreachable("Unexpected call!");
@@ -274,9 +304,16 @@ uint8_t *LLIMCJITMemoryManager::allocateDataSection(uintptr_t Size,
unsigned SectionID) {
if (!Alignment)
Alignment = 16;
- uint8_t *Addr = (uint8_t*)calloc((Size + Alignment - 1)/Alignment, Alignment);
- AllocatedDataMem.push_back(sys::MemoryBlock(Addr, Size));
- return Addr;
+ // Ensure that enough memory is requested to allow aligning.
+ size_t NumElementsAligned = 1 + (Size + Alignment - 1)/Alignment;
+ uint8_t *Addr = (uint8_t*)calloc(NumElementsAligned, Alignment);
+
+ // Honour the alignment requirement.
+ uint8_t *AlignedAddr = (uint8_t*)RoundUpToAlignment((uint64_t)Addr, Alignment);
+
+ // Store the original address from calloc so we can free it later.
+ AllocatedDataMem.push_back(sys::MemoryBlock(Addr, NumElementsAligned*Alignment));
+ return AlignedAddr;
}
uint8_t *LLIMCJITMemoryManager::allocateCodeSection(uintptr_t Size,
@@ -326,6 +363,10 @@ void LLIMCJITMemoryManager::invalidateInstructionCache() {
AllocatedCodeMem[i].size());
}
+static int jit_noop() {
+ return 0;
+}
+
void *LLIMCJITMemoryManager::getPointerToNamedFunction(const std::string &Name,
bool AbortOnFailure) {
#if defined(__linux__)
@@ -348,6 +389,14 @@ void *LLIMCJITMemoryManager::getPointerToNamedFunction(const std::string &Name,
if (Name == "mknod") return (void*)(intptr_t)&mknod;
#endif // __linux__
+ // We should not invoke parent's ctors/dtors from generated main()!
+ // On Mingw and Cygwin, the symbol __main is resolved to
+ // callee's(eg. tools/lli) one, to invoke wrong duplicated ctors
+ // (and register wrong callee's dtors with atexit(3)).
+ // We expect ExecutionEngine::runStaticConstructorsDestructors()
+ // is called before ExecutionEngine::runFunctionAsMain() is called.
+ if (Name == "__main") return (void*)(intptr_t)&jit_noop;
+
const char *NameStr = Name.c_str();
void *Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
if (Ptr) return Ptr;
@@ -372,6 +421,83 @@ LLIMCJITMemoryManager::~LLIMCJITMemoryManager() {
free(AllocatedDataMem[i].base());
}
+
+void layoutRemoteTargetMemory(RemoteTarget *T, RecordingMemoryManager *JMM) {
+ // Lay out our sections in order, with all the code sections first, then
+ // all the data sections.
+ uint64_t CurOffset = 0;
+ unsigned MaxAlign = T->getPageAlignment();
+ SmallVector<std::pair<const void*, uint64_t>, 16> Offsets;
+ SmallVector<unsigned, 16> Sizes;
+ for (RecordingMemoryManager::const_code_iterator I = JMM->code_begin(),
+ E = JMM->code_end();
+ I != E; ++I) {
+ DEBUG(dbgs() << "code region: size " << I->first.size()
+ << ", alignment " << I->second << "\n");
+ // Align the current offset up to whatever is needed for the next
+ // section.
+ unsigned Align = I->second;
+ CurOffset = (CurOffset + Align - 1) / Align * Align;
+ // Save off the address of the new section and allocate its space.
+ Offsets.push_back(std::pair<const void*,uint64_t>(I->first.base(), CurOffset));
+ Sizes.push_back(I->first.size());
+ CurOffset += I->first.size();
+ }
+ // Adjust to keep code and data aligned on seperate pages.
+ CurOffset = (CurOffset + MaxAlign - 1) / MaxAlign * MaxAlign;
+ unsigned FirstDataIndex = Offsets.size();
+ for (RecordingMemoryManager::const_data_iterator I = JMM->data_begin(),
+ E = JMM->data_end();
+ I != E; ++I) {
+ DEBUG(dbgs() << "data region: size " << I->first.size()
+ << ", alignment " << I->second << "\n");
+ // Align the current offset up to whatever is needed for the next
+ // section.
+ unsigned Align = I->second;
+ CurOffset = (CurOffset + Align - 1) / Align * Align;
+ // Save off the address of the new section and allocate its space.
+ Offsets.push_back(std::pair<const void*,uint64_t>(I->first.base(), CurOffset));
+ Sizes.push_back(I->first.size());
+ CurOffset += I->first.size();
+ }
+
+ // Allocate space in the remote target.
+ uint64_t RemoteAddr;
+ if (T->allocateSpace(CurOffset, MaxAlign, RemoteAddr))
+ report_fatal_error(T->getErrorMsg());
+ // Map the section addresses so relocations will get updated in the local
+ // copies of the sections.
+ for (unsigned i = 0, e = Offsets.size(); i != e; ++i) {
+ uint64_t Addr = RemoteAddr + Offsets[i].second;
+ EE->mapSectionAddress(const_cast<void*>(Offsets[i].first), Addr);
+
+ DEBUG(dbgs() << " Mapping local: " << Offsets[i].first
+ << " to remote: " << format("%p", Addr) << "\n");
+
+ }
+
+ // Trigger application of relocations
+ EE->finalizeObject();
+
+ // Now load it all to the target.
+ for (unsigned i = 0, e = Offsets.size(); i != e; ++i) {
+ uint64_t Addr = RemoteAddr + Offsets[i].second;
+
+ if (i < FirstDataIndex) {
+ T->loadCode(Addr, Offsets[i].first, Sizes[i]);
+
+ DEBUG(dbgs() << " loading code: " << Offsets[i].first
+ << " to remote: " << format("%p", Addr) << "\n");
+ } else {
+ T->loadData(Addr, Offsets[i].first, Sizes[i]);
+
+ DEBUG(dbgs() << " loading data: " << Offsets[i].first
+ << " to remote: " << format("%p", Addr) << "\n");
+ }
+
+ }
+}
+
//===----------------------------------------------------------------------===//
// main Driver function
//
@@ -386,6 +512,7 @@ int main(int argc, char **argv, char * const *envp) {
// usable by the JIT.
InitializeNativeTarget();
InitializeNativeTargetAsmPrinter();
+ InitializeNativeTargetAsmParser();
cl::ParseCommandLineOptions(argc, argv,
"llvm interpreter & dynamic compiler\n");
@@ -428,12 +555,19 @@ int main(int argc, char **argv, char * const *envp) {
Mod->setTargetTriple(Triple::normalize(TargetTriple));
// Enable MCJIT if desired.
- LLIMCJITMemoryManager *JMM = 0;
+ JITMemoryManager *JMM = 0;
if (UseMCJIT && !ForceInterpreter) {
builder.setUseMCJIT(true);
- JMM = new LLIMCJITMemoryManager();
+ if (RemoteMCJIT)
+ JMM = new RecordingMemoryManager();
+ else
+ JMM = new LLIMCJITMemoryManager();
builder.setJITMemoryManager(JMM);
} else {
+ if (RemoteMCJIT) {
+ errs() << "error: Remote process execution requires -use-mcjit\n";
+ exit(1);
+ }
builder.setJITMemoryManager(ForceInterpreter ? 0 :
JITMemoryManager::CreateDefaultMemManager());
}
@@ -452,9 +586,19 @@ int main(int argc, char **argv, char * const *envp) {
builder.setOptLevel(OLvl);
TargetOptions Options;
- Options.JITExceptionHandling = EnableJITExceptionHandling;
- Options.JITEmitDebugInfo = EmitJitDebugInfo;
- Options.JITEmitDebugInfoToDisk = EmitJitDebugInfoToDisk;
+ Options.UseSoftFloat = GenerateSoftFloatCalls;
+ if (FloatABIForCalls != FloatABI::Default)
+ Options.FloatABIType = FloatABIForCalls;
+ if (GenerateSoftFloatCalls)
+ FloatABIForCalls = FloatABI::Soft;
+
+ // Remote target execution doesn't handle EH or debug registration.
+ if (!RemoteMCJIT) {
+ Options.JITExceptionHandling = EnableJITExceptionHandling;
+ Options.JITEmitDebugInfo = EmitJitDebugInfo;
+ Options.JITEmitDebugInfoToDisk = EmitJitDebugInfoToDisk;
+ }
+
builder.setTargetOptions(Options);
EE = builder.create();
@@ -466,10 +610,6 @@ int main(int argc, char **argv, char * const *envp) {
exit(1);
}
- // Clear instruction cache before code will be executed.
- if (JMM)
- JMM->invalidateInstructionCache();
-
// The following functions have no effect if their respective profiling
// support wasn't enabled in the build configuration.
EE->RegisterJITEventListener(
@@ -477,6 +617,10 @@ int main(int argc, char **argv, char * const *envp) {
EE->RegisterJITEventListener(
JITEventListener::createIntelJITEventListener());
+ if (!NoLazyCompilation && RemoteMCJIT) {
+ errs() << "warning: remote mcjit does not support lazy compilation\n";
+ NoLazyCompilation = true;
+ }
EE->DisableLazyCompilation(NoLazyCompilation);
// If the user specifically requested an argv[0] to pass into the program,
@@ -513,8 +657,13 @@ int main(int argc, char **argv, char * const *envp) {
// Reset errno to zero on entry to main.
errno = 0;
+ // Remote target MCJIT doesn't (yet) support static constructors. No reason
+ // it couldn't. This is a limitation of the LLI implemantation, not the
+ // MCJIT itself. FIXME.
+ //
// Run static constructors.
- EE->runStaticConstructorsDestructors(false);
+ if (!RemoteMCJIT)
+ EE->runStaticConstructorsDestructors(false);
if (NoLazyCompilation) {
for (Module::iterator I = Mod->begin(), E = Mod->end(); I != E; ++I) {
@@ -524,24 +673,69 @@ int main(int argc, char **argv, char * const *envp) {
}
}
- // Run main.
- int Result = EE->runFunctionAsMain(EntryFn, InputArgv, envp);
-
- // Run static destructors.
- EE->runStaticConstructorsDestructors(true);
-
- // If the program didn't call exit explicitly, we should call it now.
- // This ensures that any atexit handlers get called correctly.
- if (Function *ExitF = dyn_cast<Function>(Exit)) {
- std::vector<GenericValue> Args;
- GenericValue ResultGV;
- ResultGV.IntVal = APInt(32, Result);
- Args.push_back(ResultGV);
- EE->runFunction(ExitF, Args);
- errs() << "ERROR: exit(" << Result << ") returned!\n";
- abort();
+ int Result;
+ if (RemoteMCJIT) {
+ RecordingMemoryManager *MM = static_cast<RecordingMemoryManager*>(JMM);
+ // Everything is prepared now, so lay out our program for the target
+ // address space, assign the section addresses to resolve any relocations,
+ // and send it to the target.
+ RemoteTarget Target;
+ Target.create();
+
+ // Ask for a pointer to the entry function. This triggers the actual
+ // compilation.
+ (void)EE->getPointerToFunction(EntryFn);
+
+ // Enough has been compiled to execute the entry function now, so
+ // layout the target memory.
+ layoutRemoteTargetMemory(&Target, MM);
+
+ // Since we're executing in a (at least simulated) remote address space,
+ // we can't use the ExecutionEngine::runFunctionAsMain(). We have to
+ // grab the function address directly here and tell the remote target
+ // to execute the function.
+ // FIXME: argv and envp handling.
+ uint64_t Entry = (uint64_t)EE->getPointerToFunction(EntryFn);
+
+ DEBUG(dbgs() << "Executing '" << EntryFn->getName() << "' at "
+ << format("%p", Entry) << "\n");
+
+ if (Target.executeCode(Entry, Result))
+ errs() << "ERROR: " << Target.getErrorMsg() << "\n";
+
+ Target.stop();
} else {
- errs() << "ERROR: exit defined with wrong prototype!\n";
- abort();
+ // Trigger compilation separately so code regions that need to be
+ // invalidated will be known.
+ (void)EE->getPointerToFunction(EntryFn);
+ // Clear instruction cache before code will be executed.
+ if (JMM)
+ static_cast<LLIMCJITMemoryManager*>(JMM)->invalidateInstructionCache();
+
+ // Run main.
+ Result = EE->runFunctionAsMain(EntryFn, InputArgv, envp);
+ }
+
+ // Like static constructors, the remote target MCJIT support doesn't handle
+ // this yet. It could. FIXME.
+ if (!RemoteMCJIT) {
+ // Run static destructors.
+ EE->runStaticConstructorsDestructors(true);
+
+ // If the program didn't call exit explicitly, we should call it now.
+ // This ensures that any atexit handlers get called correctly.
+ if (Function *ExitF = dyn_cast<Function>(Exit)) {
+ std::vector<GenericValue> Args;
+ GenericValue ResultGV;
+ ResultGV.IntVal = APInt(32, Result);
+ Args.push_back(ResultGV);
+ EE->runFunction(ExitF, Args);
+ errs() << "ERROR: exit(" << Result << ") returned!\n";
+ abort();
+ } else {
+ errs() << "ERROR: exit defined with wrong prototype!\n";
+ abort();
+ }
}
+ return Result;
}
diff --git a/contrib/llvm/tools/llvm-ar/llvm-ar.cpp b/contrib/llvm/tools/llvm-ar/llvm-ar.cpp
index 7c53701..a8a5013a 100644
--- a/contrib/llvm/tools/llvm-ar/llvm-ar.cpp
+++ b/contrib/llvm/tools/llvm-ar/llvm-ar.cpp
@@ -23,6 +23,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Signals.h"
#include <algorithm>
+#include <cstdlib>
#include <memory>
#include <fstream>
using namespace llvm;
@@ -126,40 +127,57 @@ std::set<sys::Path> Paths;
// The Archive object to which all the editing operations will be sent.
Archive* TheArchive = 0;
+// The name this program was invoked as.
+static const char *program_name;
+
+// show_help - Show the error message, the help message and exit.
+LLVM_ATTRIBUTE_NORETURN static void
+show_help(const std::string &msg) {
+ errs() << program_name << ": " << msg << "\n\n";
+ cl::PrintHelpMessage();
+ if (TheArchive)
+ delete TheArchive;
+ std::exit(1);
+}
+
+// fail - Show the error message and exit.
+LLVM_ATTRIBUTE_NORETURN static void
+fail(const std::string &msg) {
+ errs() << program_name << ": " << msg << "\n\n";
+ if (TheArchive)
+ delete TheArchive;
+ std::exit(1);
+}
+
// getRelPos - Extract the member filename from the command line for
// the [relpos] argument associated with a, b, and i modifiers
void getRelPos() {
- if(RestOfArgs.size() > 0) {
- RelPos = RestOfArgs[0];
- RestOfArgs.erase(RestOfArgs.begin());
- }
- else
- throw "Expected [relpos] for a, b, or i modifier";
+ if(RestOfArgs.size() == 0)
+ show_help("Expected [relpos] for a, b, or i modifier");
+ RelPos = RestOfArgs[0];
+ RestOfArgs.erase(RestOfArgs.begin());
}
// getCount - Extract the [count] argument associated with the N modifier
// from the command line and check its value.
void getCount() {
- if(RestOfArgs.size() > 0) {
- Count = atoi(RestOfArgs[0].c_str());
- RestOfArgs.erase(RestOfArgs.begin());
- }
- else
- throw "Expected [count] value with N modifier";
+ if(RestOfArgs.size() == 0)
+ show_help("Expected [count] value with N modifier");
+
+ Count = atoi(RestOfArgs[0].c_str());
+ RestOfArgs.erase(RestOfArgs.begin());
// Non-positive counts are not allowed
if (Count < 1)
- throw "Invalid [count] value (not a positive integer)";
+ show_help("Invalid [count] value (not a positive integer)");
}
// getArchive - Get the archive file name from the command line
void getArchive() {
- if(RestOfArgs.size() > 0) {
- ArchiveName = RestOfArgs[0];
- RestOfArgs.erase(RestOfArgs.begin());
- }
- else
- throw "An archive name must be specified.";
+ if(RestOfArgs.size() == 0)
+ show_help("An archive name must be specified");
+ ArchiveName = RestOfArgs[0];
+ RestOfArgs.erase(RestOfArgs.begin());
}
// getMembers - Copy over remaining items in RestOfArgs to our Members vector
@@ -240,25 +258,27 @@ ArchiveOperation parseCommandLine() {
// Perform various checks on the operation/modifier specification
// to make sure we are dealing with a legal request.
if (NumOperations == 0)
- throw "You must specify at least one of the operations";
+ show_help("You must specify at least one of the operations");
if (NumOperations > 1)
- throw "Only one operation may be specified";
+ show_help("Only one operation may be specified");
if (NumPositional > 1)
- throw "You may only specify one of a, b, and i modifiers";
- if (AddAfter || AddBefore || InsertBefore)
+ show_help("You may only specify one of a, b, and i modifiers");
+ if (AddAfter || AddBefore || InsertBefore) {
if (Operation != Move && Operation != ReplaceOrInsert)
- throw "The 'a', 'b' and 'i' modifiers can only be specified with "
- "the 'm' or 'r' operations";
+ show_help("The 'a', 'b' and 'i' modifiers can only be specified with "
+ "the 'm' or 'r' operations");
+ }
if (RecurseDirectories && Operation != ReplaceOrInsert)
- throw "The 'R' modifiers is only applicabe to the 'r' operation";
+ show_help("The 'R' modifiers is only applicabe to the 'r' operation");
if (OriginalDates && Operation != Extract)
- throw "The 'o' modifier is only applicable to the 'x' operation";
+ show_help("The 'o' modifier is only applicable to the 'x' operation");
if (TruncateNames && Operation!=QuickAppend && Operation!=ReplaceOrInsert)
- throw "The 'f' modifier is only applicable to the 'q' and 'r' operations";
+ show_help("The 'f' modifier is only applicable to the 'q' and 'r' "
+ "operations");
if (OnlyUpdate && Operation != ReplaceOrInsert)
- throw "The 'u' modifier is only applicable to the 'r' operation";
+ show_help("The 'u' modifier is only applicable to the 'r' operation");
if (Count > 1 && Members.size() > 1)
- throw "Only one member name may be specified with the 'N' modifier";
+ show_help("Only one member name may be specified with the 'N' modifier");
// Return the parsed operation to the caller
return Operation;
@@ -304,16 +324,16 @@ bool buildPaths(bool checkExistence, std::string* ErrMsg) {
for (unsigned i = 0; i < Members.size(); i++) {
sys::Path aPath;
if (!aPath.set(Members[i]))
- throw std::string("File member name invalid: ") + Members[i];
+ fail(std::string("File member name invalid: ") + Members[i]);
if (checkExistence) {
bool Exists;
if (sys::fs::exists(aPath.str(), Exists) || !Exists)
- throw std::string("File does not exist: ") + Members[i];
+ fail(std::string("File does not exist: ") + Members[i]);
std::string Err;
sys::PathWithStatus PwS(aPath);
const sys::FileStatus *si = PwS.getFileStatus(false, &Err);
if (!si)
- throw Err;
+ fail(Err);
if (si->isDir) {
std::set<sys::Path> dirpaths;
if (recurseDirectories(aPath, dirpaths, ErrMsg))
@@ -683,6 +703,7 @@ doReplaceOrInsert(std::string* ErrMsg) {
// main - main program for llvm-ar .. see comments in the code
int main(int argc, char **argv) {
+ program_name = argv[0];
// Print a stack trace if we signal out.
sys::PrintStackTraceOnErrorSignal();
PrettyStackTraceProgram X(argc, argv);
@@ -698,77 +719,61 @@ int main(int argc, char **argv) {
int exitCode = 0;
- // Make sure we don't exit with "unhandled exception".
- try {
- // Do our own parsing of the command line because the CommandLine utility
- // can't handle the grouped positional parameters without a dash.
- ArchiveOperation Operation = parseCommandLine();
-
- // Check the path name of the archive
- sys::Path ArchivePath;
- if (!ArchivePath.set(ArchiveName))
- throw std::string("Archive name invalid: ") + ArchiveName;
-
- // Create or open the archive object.
- bool Exists;
- if (llvm::sys::fs::exists(ArchivePath.str(), Exists) || !Exists) {
- // Produce a warning if we should and we're creating the archive
- if (!Create)
- errs() << argv[0] << ": creating " << ArchivePath.str() << "\n";
- TheArchive = Archive::CreateEmpty(ArchivePath, Context);
- TheArchive->writeToDisk();
- } else {
- std::string Error;
- TheArchive = Archive::OpenAndLoad(ArchivePath, Context, &Error);
- if (TheArchive == 0) {
- errs() << argv[0] << ": error loading '" << ArchivePath.str() << "': "
- << Error << "!\n";
- return 1;
- }
- }
+ // Do our own parsing of the command line because the CommandLine utility
+ // can't handle the grouped positional parameters without a dash.
+ ArchiveOperation Operation = parseCommandLine();
- // Make sure we're not fooling ourselves.
- assert(TheArchive && "Unable to instantiate the archive");
-
- // Make sure we clean up the archive even on failure.
- std::auto_ptr<Archive> AutoArchive(TheArchive);
-
- // Perform the operation
- std::string ErrMsg;
- bool haveError = false;
- switch (Operation) {
- case Print: haveError = doPrint(&ErrMsg); break;
- case Delete: haveError = doDelete(&ErrMsg); break;
- case Move: haveError = doMove(&ErrMsg); break;
- case QuickAppend: haveError = doQuickAppend(&ErrMsg); break;
- case ReplaceOrInsert: haveError = doReplaceOrInsert(&ErrMsg); break;
- case DisplayTable: haveError = doDisplayTable(&ErrMsg); break;
- case Extract: haveError = doExtract(&ErrMsg); break;
- case NoOperation:
- errs() << argv[0] << ": No operation was selected.\n";
- break;
- }
- if (haveError) {
- errs() << argv[0] << ": " << ErrMsg << "\n";
+ // Check the path name of the archive
+ sys::Path ArchivePath;
+ if (!ArchivePath.set(ArchiveName)) {
+ errs() << argv[0] << ": Archive name invalid: " << ArchiveName << "\n";
+ return 1;
+ }
+
+ // Create or open the archive object.
+ bool Exists;
+ if (llvm::sys::fs::exists(ArchivePath.str(), Exists) || !Exists) {
+ // Produce a warning if we should and we're creating the archive
+ if (!Create)
+ errs() << argv[0] << ": creating " << ArchivePath.str() << "\n";
+ TheArchive = Archive::CreateEmpty(ArchivePath, Context);
+ TheArchive->writeToDisk();
+ } else {
+ std::string Error;
+ TheArchive = Archive::OpenAndLoad(ArchivePath, Context, &Error);
+ if (TheArchive == 0) {
+ errs() << argv[0] << ": error loading '" << ArchivePath.str() << "': "
+ << Error << "!\n";
return 1;
}
- } catch (const char*msg) {
- // These errors are usage errors, thrown only by the various checks in the
- // code above.
- errs() << argv[0] << ": " << msg << "\n\n";
- cl::PrintHelpMessage();
- exitCode = 1;
- } catch (const std::string& msg) {
- // These errors are thrown by LLVM libraries (e.g. lib System) and represent
- // a more serious error so we bump the exitCode and don't print the usage.
- errs() << argv[0] << ": " << msg << "\n";
- exitCode = 2;
- } catch (...) {
- // This really shouldn't happen, but just in case ....
- errs() << argv[0] << ": An unexpected unknown exception occurred.\n";
- exitCode = 3;
}
+ // Make sure we're not fooling ourselves.
+ assert(TheArchive && "Unable to instantiate the archive");
+
+ // Perform the operation
+ std::string ErrMsg;
+ bool haveError = false;
+ switch (Operation) {
+ case Print: haveError = doPrint(&ErrMsg); break;
+ case Delete: haveError = doDelete(&ErrMsg); break;
+ case Move: haveError = doMove(&ErrMsg); break;
+ case QuickAppend: haveError = doQuickAppend(&ErrMsg); break;
+ case ReplaceOrInsert: haveError = doReplaceOrInsert(&ErrMsg); break;
+ case DisplayTable: haveError = doDisplayTable(&ErrMsg); break;
+ case Extract: haveError = doExtract(&ErrMsg); break;
+ case NoOperation:
+ errs() << argv[0] << ": No operation was selected.\n";
+ break;
+ }
+ if (haveError) {
+ errs() << argv[0] << ": " << ErrMsg << "\n";
+ return 1;
+ }
+
+ delete TheArchive;
+ TheArchive = 0;
+
// Return result code back to operating system.
return exitCode;
}
diff --git a/contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp b/contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp
index d630087..8109ca4 100644
--- a/contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp
+++ b/contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp
@@ -40,7 +40,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Signals.h"
#include "llvm/Support/system_error.h"
-#include <cstdio>
+
#include <map>
#include <algorithm>
using namespace llvm;
@@ -463,11 +463,11 @@ static bool ParseBlock(BitstreamCursor &Stream, unsigned IndentLevel) {
}
static void PrintSize(double Bits) {
- fprintf(stderr, "%.2f/%.2fB/%luW", Bits, Bits/8,(unsigned long)(Bits/32));
+ outs() << format("%.2f/%.2fB/%luW", Bits, Bits/8,(unsigned long)(Bits/32));
}
static void PrintSize(uint64_t Bits) {
- fprintf(stderr, "%lub/%.2fB/%luW", (unsigned long)Bits,
- (double)Bits/8, (unsigned long)(Bits/32));
+ outs() << format("%lub/%.2fB/%luW", (unsigned long)Bits,
+ (double)Bits/8, (unsigned long)(Bits/32));
}
@@ -483,7 +483,7 @@ static int AnalyzeBitcode() {
if (MemBuf->getBufferSize() & 3)
return Error("Bitcode stream should be a multiple of 4 bytes in length");
- const unsigned char *BufPtr = (unsigned char *)MemBuf->getBufferStart();
+ const unsigned char *BufPtr = (const unsigned char *)MemBuf->getBufferStart();
const unsigned char *EndBufPtr = BufPtr+MemBuf->getBufferSize();
// If we have a wrapper header, parse it and ignore the non-bc file contents.
@@ -556,7 +556,7 @@ static int AnalyzeBitcode() {
PrintSize(Stats.NumBits);
outs() << "\n";
double pct = (Stats.NumBits * 100.0) / BufferSizeBits;
- errs() << " Percent of file: " << format("%2.4f%%", pct) << "\n";
+ outs() << " Percent of file: " << format("%2.4f%%", pct) << "\n";
if (Stats.NumInstances > 1) {
outs() << " Average Size: ";
PrintSize(Stats.NumBits/(double)Stats.NumInstances);
@@ -588,24 +588,26 @@ static int AnalyzeBitcode() {
std::reverse(FreqPairs.begin(), FreqPairs.end());
outs() << "\tRecord Histogram:\n";
- fprintf(stderr, "\t\t Count # Bits %% Abv Record Kind\n");
+ outs() << "\t\t Count # Bits %% Abv Record Kind\n";
for (unsigned i = 0, e = FreqPairs.size(); i != e; ++i) {
const PerRecordStats &RecStats = Stats.CodeFreq[FreqPairs[i].second];
- fprintf(stderr, "\t\t%7d %9lu ", RecStats.NumInstances,
- (unsigned long)RecStats.TotalBits);
+ outs() << format("\t\t%7d %9lu",
+ RecStats.NumInstances,
+ (unsigned long)RecStats.TotalBits);
if (RecStats.NumAbbrev)
- fprintf(stderr, "%7.2f ",
- (double)RecStats.NumAbbrev/RecStats.NumInstances*100);
+ outs() <<
+ format("%7.2f ",
+ (double)RecStats.NumAbbrev/RecStats.NumInstances*100);
else
- fprintf(stderr, " ");
+ outs() << " ";
if (const char *CodeName =
GetCodeName(FreqPairs[i].second, I->first, StreamFile))
- fprintf(stderr, "%s\n", CodeName);
+ outs() << CodeName << "\n";
else
- fprintf(stderr, "UnknownCode%d\n", FreqPairs[i].second);
+ outs() << "UnknownCode" << FreqPairs[i].second << "\n";
}
outs() << "\n";
diff --git a/contrib/llvm/tools/llvm-extract/llvm-extract.cpp b/contrib/llvm/tools/llvm-extract/llvm-extract.cpp
index 2ed11c5..ac82d98 100644
--- a/contrib/llvm/tools/llvm-extract/llvm-extract.cpp
+++ b/contrib/llvm/tools/llvm-extract/llvm-extract.cpp
@@ -18,7 +18,7 @@
#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/Transforms/IPO.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/IRReader.h"
#include "llvm/Support/ManagedStatic.h"
@@ -59,6 +59,19 @@ ExtractRegExpFuncs("rfunc", cl::desc("Specify function(s) to extract using a "
"regular expression"),
cl::ZeroOrMore, cl::value_desc("rfunction"));
+// ExtractAlias - The alias to extract from the module.
+static cl::list<std::string>
+ExtractAliases("alias", cl::desc("Specify alias to extract"),
+ cl::ZeroOrMore, cl::value_desc("alias"));
+
+
+// ExtractRegExpAliases - The aliases, matched via regular expression, to
+// extract from the module.
+static cl::list<std::string>
+ExtractRegExpAliases("ralias", cl::desc("Specify alias(es) to extract using a "
+ "regular expression"),
+ cl::ZeroOrMore, cl::value_desc("ralias"));
+
// ExtractGlobals - The globals to extract from the module.
static cl::list<std::string>
ExtractGlobals("glob", cl::desc("Specify global to extract"),
@@ -97,6 +110,40 @@ int main(int argc, char **argv) {
// Use SetVector to avoid duplicates.
SetVector<GlobalValue *> GVs;
+ // Figure out which aliases we should extract.
+ for (size_t i = 0, e = ExtractAliases.size(); i != e; ++i) {
+ GlobalAlias *GA = M->getNamedAlias(ExtractAliases[i]);
+ if (!GA) {
+ errs() << argv[0] << ": program doesn't contain alias named '"
+ << ExtractAliases[i] << "'!\n";
+ return 1;
+ }
+ GVs.insert(GA);
+ }
+
+ // Extract aliases via regular expression matching.
+ for (size_t i = 0, e = ExtractRegExpAliases.size(); i != e; ++i) {
+ std::string Error;
+ Regex RegEx(ExtractRegExpAliases[i]);
+ if (!RegEx.isValid(Error)) {
+ errs() << argv[0] << ": '" << ExtractRegExpAliases[i] << "' "
+ "invalid regex: " << Error;
+ }
+ bool match = false;
+ for (Module::alias_iterator GA = M->alias_begin(), E = M->alias_end();
+ GA != E; GA++) {
+ if (RegEx.match(GA->getName())) {
+ GVs.insert(&*GA);
+ match = true;
+ }
+ }
+ if (!match) {
+ errs() << argv[0] << ": program doesn't contain global named '"
+ << ExtractRegExpAliases[i] << "'!\n";
+ return 1;
+ }
+ }
+
// Figure out which globals we should extract.
for (size_t i = 0, e = ExtractGlobals.size(); i != e; ++i) {
GlobalValue *GV = M->getNamedGlobal(ExtractGlobals[i]);
@@ -206,7 +253,7 @@ int main(int argc, char **argv) {
// In addition to deleting all other functions, we also want to spiff it
// up a little bit. Do this now.
PassManager Passes;
- Passes.add(new TargetData(M.get())); // Use correct TargetData
+ Passes.add(new DataLayout(M.get())); // Use correct DataLayout
std::vector<GlobalValue*> Gvs(GVs.begin(), GVs.end());
diff --git a/contrib/llvm/tools/llvm-mc/llvm-mc.cpp b/contrib/llvm/tools/llvm-mc/llvm-mc.cpp
index 756221b..f7c3748 100644
--- a/contrib/llvm/tools/llvm-mc/llvm-mc.cpp
+++ b/contrib/llvm/tools/llvm-mc/llvm-mc.cpp
@@ -158,7 +158,8 @@ enum ActionType {
AC_AsLex,
AC_Assemble,
AC_Disassemble,
- AC_EDisassemble
+ AC_EDisassemble,
+ AC_MDisassemble
};
static cl::opt<ActionType>
@@ -172,6 +173,8 @@ Action(cl::desc("Action to perform:"),
"Disassemble strings of hex bytes"),
clEnumValN(AC_EDisassemble, "edis",
"Enhanced disassembly of strings of hex bytes"),
+ clEnumValN(AC_MDisassemble, "mdis",
+ "Marked up disassembly of strings of hex bytes"),
clEnumValEnd));
static const Target *GetTarget(const char *ProgName) {
@@ -402,8 +405,9 @@ int main(int argc, char **argv) {
OwningPtr<MCSubtargetInfo>
STI(TheTarget->createMCSubtargetInfo(TripleName, MCPU, FeaturesStr));
+ MCInstPrinter *IP;
if (FileType == OFT_AssemblyFile) {
- MCInstPrinter *IP =
+ IP =
TheTarget->createMCInstPrinter(OutputAsmVariant, *MAI, *MCII, *MRI, *STI);
MCCodeEmitter *CE = 0;
MCAsmBackend *MAB = 0;
@@ -436,6 +440,9 @@ int main(int argc, char **argv) {
case AC_Assemble:
Res = AssembleInput(ProgName, TheTarget, SrcMgr, Ctx, *Str, *MAI, *STI);
break;
+ case AC_MDisassemble:
+ IP->setUseMarkup(1);
+ // Fall through to do disassembly.
case AC_Disassemble:
Res = Disassembler::disassemble(*TheTarget, TripleName, *STI, *Str,
*Buffer, SrcMgr, Out->os());
diff --git a/contrib/llvm/tools/llvm-nm/llvm-nm.cpp b/contrib/llvm/tools/llvm-nm/llvm-nm.cpp
index 9afbd4d..0543e83 100644
--- a/contrib/llvm/tools/llvm-nm/llvm-nm.cpp
+++ b/contrib/llvm/tools/llvm-nm/llvm-nm.cpp
@@ -110,6 +110,9 @@ namespace {
cl::opt<bool> SizeSort("size-sort", cl::desc("Sort symbols by size"));
+ cl::opt<bool> WithoutAliases("without-aliases", cl::Hidden,
+ cl::desc("Exclude aliases from output"));
+
bool PrintAddress = true;
bool MultipleFiles = false;
@@ -256,7 +259,6 @@ static void DumpSymbolNameForGlobalValue(GlobalValue &GV) {
if (GV.hasPrivateLinkage() ||
GV.hasLinkerPrivateLinkage() ||
GV.hasLinkerPrivateWeakLinkage() ||
- GV.hasLinkerPrivateWeakDefAutoLinkage() ||
GV.hasAvailableExternallyLinkage())
return;
char TypeChar = TypeCharForSymbol(GV);
@@ -276,8 +278,9 @@ static void DumpSymbolNamesFromModule(Module *M) {
std::for_each (M->begin(), M->end(), DumpSymbolNameForGlobalValue);
std::for_each (M->global_begin(), M->global_end(),
DumpSymbolNameForGlobalValue);
- std::for_each (M->alias_begin(), M->alias_end(),
- DumpSymbolNameForGlobalValue);
+ if (!WithoutAliases)
+ std::for_each (M->alias_begin(), M->alias_end(),
+ DumpSymbolNameForGlobalValue);
SortAndPrintSymbolList();
}
diff --git a/contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp b/contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp
index b431c76..13ea4e3 100644
--- a/contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp
+++ b/contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp
@@ -94,6 +94,12 @@ static cl::alias
SectionHeadersShorter("h", cl::desc("Alias for --section-headers"),
cl::aliasopt(SectionHeaders));
+static cl::list<std::string>
+MAttrs("mattr",
+ cl::CommaSeparated,
+ cl::desc("Target specific attributes"),
+ cl::value_desc("a1,+a2,-a3,..."));
+
static StringRef ToolName;
static bool error(error_code ec) {
@@ -169,6 +175,15 @@ static void DisassembleObject(const ObjectFile *Obj, bool InlineRelocs) {
if (!TheTarget)
return;
+ // Package up features to be passed to target/subtarget
+ std::string FeaturesStr;
+ if (MAttrs.size()) {
+ SubtargetFeatures Features;
+ for (unsigned i = 0; i != MAttrs.size(); ++i)
+ Features.AddFeature(MAttrs[i]);
+ FeaturesStr = Features.getString();
+ }
+
error_code ec;
for (section_iterator i = Obj->begin_sections(),
e = Obj->end_sections();
@@ -233,7 +248,7 @@ static void DisassembleObject(const ObjectFile *Obj, bool InlineRelocs) {
}
OwningPtr<const MCSubtargetInfo> STI(
- TheTarget->createMCSubtargetInfo(TripleName, "", ""));
+ TheTarget->createMCSubtargetInfo(TripleName, "", FeaturesStr));
if (!STI) {
errs() << "error: no subtarget info for target " << TripleName << "\n";
diff --git a/contrib/llvm/tools/llvm-ranlib/llvm-ranlib.cpp b/contrib/llvm/tools/llvm-ranlib/llvm-ranlib.cpp
index 4006765..d2f5f0f 100644
--- a/contrib/llvm/tools/llvm-ranlib/llvm-ranlib.cpp
+++ b/contrib/llvm/tools/llvm-ranlib/llvm-ranlib.cpp
@@ -61,41 +61,38 @@ int main(int argc, char **argv) {
int exitCode = 0;
- // Make sure we don't exit with "unhandled exception".
- try {
-
- // Check the path name of the archive
- sys::Path ArchivePath;
- if (!ArchivePath.set(ArchiveName))
- throw std::string("Archive name invalid: ") + ArchiveName;
+ // Check the path name of the archive
+ sys::Path ArchivePath;
+ if (!ArchivePath.set(ArchiveName)) {
+ errs() << argv[0] << ": " << "Archive name invalid: " << ArchiveName <<
+ "\n";
+ return 1;
+ }
- // Make sure it exists, we don't create empty archives
- bool Exists;
- if (llvm::sys::fs::exists(ArchivePath.str(), Exists) || !Exists)
- throw std::string("Archive file does not exist");
+ // Make sure it exists, we don't create empty archives
+ bool Exists;
+ if (llvm::sys::fs::exists(ArchivePath.str(), Exists) || !Exists) {
+ errs() << argv[0] << ": " << "Archive file does not exist" <<
+ ArchivePath.str() << "\n";
+ return 1;
+ }
- std::string err_msg;
- std::auto_ptr<Archive>
- AutoArchive(Archive::OpenAndLoad(ArchivePath, Context, &err_msg));
- Archive* TheArchive = AutoArchive.get();
- if (!TheArchive)
- throw err_msg;
+ std::string err_msg;
+ std::auto_ptr<Archive>
+ AutoArchive(Archive::OpenAndLoad(ArchivePath, Context, &err_msg));
+ Archive* TheArchive = AutoArchive.get();
+ if (!TheArchive) {
+ errs() << argv[0] << ": " << err_msg << "\n";
+ return 1;
+ }
- if (TheArchive->writeToDisk(true, false, &err_msg ))
- throw err_msg;
+ if (TheArchive->writeToDisk(true, false, &err_msg )) {
+ errs() << argv[0] << ": " << err_msg << "\n";
+ return 1;
+ }
- if (Verbose)
- printSymbolTable(TheArchive);
+ if (Verbose)
+ printSymbolTable(TheArchive);
- } catch (const char* msg) {
- errs() << argv[0] << ": " << msg << "\n\n";
- exitCode = 1;
- } catch (const std::string& msg) {
- errs() << argv[0] << ": " << msg << "\n";
- exitCode = 2;
- } catch (...) {
- errs() << argv[0] << ": An unexpected unknown exception occurred.\n";
- exitCode = 3;
- }
return exitCode;
}
diff --git a/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp b/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
index 95de8d8..7b5bd03 100644
--- a/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
+++ b/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
@@ -14,6 +14,8 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/ExecutionEngine/ObjectImage.h"
+#include "llvm/ExecutionEngine/ObjectBuffer.h"
#include "llvm/Object/MachOObject.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ManagedStatic.h"
@@ -120,12 +122,14 @@ static int executeInput() {
for(unsigned i = 0, e = InputFileList.size(); i != e; ++i) {
// Load the input memory buffer.
OwningPtr<MemoryBuffer> InputBuffer;
+ OwningPtr<ObjectImage> LoadedObject;
if (error_code ec = MemoryBuffer::getFileOrSTDIN(InputFileList[i],
InputBuffer))
return Error("unable to read input: '" + ec.message() + "'");
- // Load the object file into it.
- if (Dyld.loadObject(InputBuffer.take())) {
+ // Load the object file
+ LoadedObject.reset(Dyld.loadObject(new ObjectBuffer(InputBuffer.take())));
+ if (!LoadedObject) {
return Error(Dyld.getErrorString());
}
}
diff --git a/contrib/llvm/tools/llvm-stress/llvm-stress.cpp b/contrib/llvm/tools/llvm-stress/llvm-stress.cpp
index 31252dd..8473d94 100644
--- a/contrib/llvm/tools/llvm-stress/llvm-stress.cpp
+++ b/contrib/llvm/tools/llvm-stress/llvm-stress.cpp
@@ -126,6 +126,10 @@ public:
/// C'tor
Modifier(BasicBlock *Block, PieceTable *PT, Random *R):
BB(Block),PT(PT),Ran(R),Context(BB->getContext()) {}
+
+ /// virtual D'tor to silence warnings.
+ virtual ~Modifier() {}
+
/// Add a new instruction.
virtual void Act() = 0;
/// Add N new instructions,
diff --git a/contrib/llvm/tools/opt/opt.cpp b/contrib/llvm/tools/opt/opt.cpp
index 4ada7d1..bac0d46 100644
--- a/contrib/llvm/tools/opt/opt.cpp
+++ b/contrib/llvm/tools/opt/opt.cpp
@@ -13,17 +13,18 @@
//===----------------------------------------------------------------------===//
#include "llvm/LLVMContext.h"
+#include "llvm/DataLayout.h"
#include "llvm/DebugInfo.h"
#include "llvm/Module.h"
#include "llvm/PassManager.h"
#include "llvm/CallGraphSCCPass.h"
+#include "llvm/CodeGen/CommandFlags.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/RegionPass.h"
#include "llvm/Analysis/CallGraph.h"
-#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/ADT/StringSet.h"
@@ -36,7 +37,10 @@
#include "llvm/Support/PluginLoader.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SystemUtils.h"
+#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/MC/SubtargetFeature.h"
#include "llvm/LinkAllPasses.h"
#include "llvm/LinkAllVMCore.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
@@ -478,6 +482,75 @@ static void AddStandardLinkPasses(PassManagerBase &PM) {
/*RunInliner=*/ !DisableInline);
}
+//===----------------------------------------------------------------------===//
+// CodeGen-related helper functions.
+//
+static TargetOptions GetTargetOptions() {
+ TargetOptions Options;
+ Options.LessPreciseFPMADOption = EnableFPMAD;
+ Options.NoFramePointerElim = DisableFPElim;
+ Options.NoFramePointerElimNonLeaf = DisableFPElimNonLeaf;
+ Options.AllowFPOpFusion = FuseFPOps;
+ Options.UnsafeFPMath = EnableUnsafeFPMath;
+ Options.NoInfsFPMath = EnableNoInfsFPMath;
+ Options.NoNaNsFPMath = EnableNoNaNsFPMath;
+ Options.HonorSignDependentRoundingFPMathOption =
+ EnableHonorSignDependentRoundingFPMath;
+ Options.UseSoftFloat = GenerateSoftFloatCalls;
+ if (FloatABIForCalls != FloatABI::Default)
+ Options.FloatABIType = FloatABIForCalls;
+ Options.NoZerosInBSS = DontPlaceZerosInBSS;
+ Options.GuaranteedTailCallOpt = EnableGuaranteedTailCallOpt;
+ Options.DisableTailCalls = DisableTailCalls;
+ Options.StackAlignmentOverride = OverrideStackAlignment;
+ Options.RealignStack = EnableRealignStack;
+ Options.TrapFuncName = TrapFuncName;
+ Options.PositionIndependentExecutable = EnablePIE;
+ Options.EnableSegmentedStacks = SegmentedStacks;
+ Options.UseInitArray = UseInitArray;
+ Options.SSPBufferSize = SSPBufferSize;
+ return Options;
+}
+
+CodeGenOpt::Level GetCodeGenOptLevel() {
+ if (OptLevelO1)
+ return CodeGenOpt::Less;
+ if (OptLevelO2)
+ return CodeGenOpt::Default;
+ if (OptLevelO3)
+ return CodeGenOpt::Aggressive;
+ return CodeGenOpt::None;
+}
+
+// Returns the TargetMachine instance or zero if no triple is provided.
+static TargetMachine* GetTargetMachine(std::string TripleStr) {
+ if (TripleStr.empty())
+ return 0;
+
+ // Get the target specific parser.
+ std::string Error;
+ Triple TheTriple(Triple::normalize(TargetTriple));
+
+ const Target *TheTarget = TargetRegistry::lookupTarget(MArch, TheTriple,
+ Error);
+ if (!TheTarget) {
+ return 0;
+ }
+
+ // Package up features to be passed to target/subtarget
+ std::string FeaturesStr;
+ if (MAttrs.size()) {
+ SubtargetFeatures Features;
+ for (unsigned i = 0; i != MAttrs.size(); ++i)
+ Features.AddFeature(MAttrs[i]);
+ FeaturesStr = Features.getString();
+ }
+
+ return TheTarget->createTargetMachine(TheTriple.getTriple(),
+ MCPU, FeaturesStr, GetTargetOptions(),
+ RelocModel, CMModel,
+ GetCodeGenOptLevel());
+}
//===----------------------------------------------------------------------===//
// main for opt
@@ -492,6 +565,9 @@ int main(int argc, char **argv) {
llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
LLVMContext &Context = getGlobalContext();
+ InitializeAllTargets();
+ InitializeAllTargetMCs();
+
// Initialize passes
PassRegistry &Registry = *PassRegistry::getPassRegistry();
initializeCore(Registry);
@@ -513,10 +589,6 @@ int main(int argc, char **argv) {
return 1;
}
- // Allocate a full target machine description only if necessary.
- // FIXME: The choice of target should be controllable on the command line.
- std::auto_ptr<TargetMachine> target;
-
SMDiagnostic Err;
// Load the input module...
@@ -572,22 +644,28 @@ int main(int argc, char **argv) {
TLI->disableAllFunctions();
Passes.add(TLI);
- // Add an appropriate TargetData instance for this module.
- TargetData *TD = 0;
+ // Add an appropriate DataLayout instance for this module.
+ DataLayout *TD = 0;
const std::string &ModuleDataLayout = M.get()->getDataLayout();
if (!ModuleDataLayout.empty())
- TD = new TargetData(ModuleDataLayout);
+ TD = new DataLayout(ModuleDataLayout);
else if (!DefaultDataLayout.empty())
- TD = new TargetData(DefaultDataLayout);
+ TD = new DataLayout(DefaultDataLayout);
if (TD)
Passes.add(TD);
+ std::auto_ptr<TargetMachine> TM(GetTargetMachine(TargetTriple));
+ if (TM.get()) {
+ Passes.add(new TargetTransformInfo(TM->getScalarTargetTransformInfo(),
+ TM->getVectorTargetTransformInfo()));
+ }
+
OwningPtr<FunctionPassManager> FPasses;
if (OptLevelO1 || OptLevelO2 || OptLevelOs || OptLevelOz || OptLevelO3) {
FPasses.reset(new FunctionPassManager(M.get()));
if (TD)
- FPasses->add(new TargetData(*TD));
+ FPasses->add(new DataLayout(*TD));
}
if (PrintBreakpoints) {
diff --git a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
index 026d47f..ee83311 100644
--- a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -77,7 +77,7 @@
//
// Some targets need a custom way to parse operands, some specific instructions
// can contain arguments that can represent processor flags and other kinds of
-// identifiers that need to be mapped to specific valeus in the final encoded
+// identifiers that need to be mapped to specific values in the final encoded
// instructions. The target specific custom operand parsing works in the
// following way:
//
@@ -199,7 +199,7 @@ public:
return Kind >= UserClass0;
}
- /// isRelatedTo - Check whether this class is "related" to \arg RHS. Classes
+ /// isRelatedTo - Check whether this class is "related" to \p RHS. Classes
/// are related if they are in the same class hierarchy.
bool isRelatedTo(const ClassInfo &RHS) const {
// Tokens are only related to tokens.
@@ -238,7 +238,7 @@ public:
return Root == RHSRoot;
}
- /// isSubsetOf - Test whether this class is a subset of \arg RHS;
+ /// isSubsetOf - Test whether this class is a subset of \p RHS.
bool isSubsetOf(const ClassInfo &RHS) const {
// This is a subset of RHS if it is the same class...
if (this == &RHS)
@@ -279,6 +279,15 @@ public:
}
};
+namespace {
+/// Sort ClassInfo pointers independently of pointer value.
+struct LessClassInfoPtr {
+ bool operator()(const ClassInfo *LHS, const ClassInfo *RHS) const {
+ return *LHS < *RHS;
+ }
+};
+}
+
/// MatchableInfo - Helper class for storing the necessary information for an
/// instruction or alias which is capable of being matched.
struct MatchableInfo {
@@ -416,7 +425,7 @@ struct MatchableInfo {
SmallVector<SubtargetFeatureInfo*, 4> RequiredFeatures;
/// ConversionFnKind - The enum value which is passed to the generated
- /// ConvertToMCInst to convert parsed operands into an MCInst for this
+ /// convertToMCInst to convert parsed operands into an MCInst for this
/// function.
std::string ConversionFnKind;
@@ -488,11 +497,20 @@ struct MatchableInfo {
return false;
}
+ // Give matches that require more features higher precedence. This is useful
+ // because we cannot define AssemblerPredicates with the negation of
+ // processor features. For example, ARM v6 "nop" may be either a HINT or
+ // MOV. With v6, we want to match HINT. The assembler has no way to
+ // predicate MOV under "NoV6", but HINT will always match first because it
+ // requires V6 while MOV does not.
+ if (RequiredFeatures.size() != RHS.RequiredFeatures.size())
+ return RequiredFeatures.size() > RHS.RequiredFeatures.size();
+
return false;
}
/// couldMatchAmbiguouslyWith - Check whether this matchable could
- /// ambiguously match the same set of operands as \arg RHS (without being a
+ /// ambiguously match the same set of operands as \p RHS (without being a
/// strictly superior match).
bool couldMatchAmbiguouslyWith(const MatchableInfo &RHS) {
// The primary comparator is the instruction mnemonic.
@@ -590,7 +608,8 @@ public:
std::vector<OperandMatchEntry> OperandMatchInfo;
/// Map of Register records to their class information.
- std::map<Record*, ClassInfo*> RegisterClasses;
+ typedef std::map<Record*, ClassInfo*, LessRecordByID> RegisterClassesTy;
+ RegisterClassesTy RegisterClasses;
/// Map of Predicate records to their subtarget information.
std::map<Record*, SubtargetFeatureInfo*> SubtargetFeatures;
@@ -666,22 +685,22 @@ void MatchableInfo::dump() {
}
static std::pair<StringRef, StringRef>
-parseTwoOperandConstraint(StringRef S, SMLoc Loc) {
+parseTwoOperandConstraint(StringRef S, ArrayRef<SMLoc> Loc) {
// Split via the '='.
std::pair<StringRef, StringRef> Ops = S.split('=');
if (Ops.second == "")
- throw TGError(Loc, "missing '=' in two-operand alias constraint");
+ PrintFatalError(Loc, "missing '=' in two-operand alias constraint");
// Trim whitespace and the leading '$' on the operand names.
size_t start = Ops.first.find_first_of('$');
if (start == std::string::npos)
- throw TGError(Loc, "expected '$' prefix on asm operand name");
+ PrintFatalError(Loc, "expected '$' prefix on asm operand name");
Ops.first = Ops.first.slice(start + 1, std::string::npos);
size_t end = Ops.first.find_last_of(" \t");
Ops.first = Ops.first.slice(0, end);
// Now the second operand.
start = Ops.second.find_first_of('$');
if (start == std::string::npos)
- throw TGError(Loc, "expected '$' prefix on asm operand name");
+ PrintFatalError(Loc, "expected '$' prefix on asm operand name");
Ops.second = Ops.second.slice(start + 1, std::string::npos);
end = Ops.second.find_last_of(" \t");
Ops.first = Ops.first.slice(0, end);
@@ -697,11 +716,11 @@ void MatchableInfo::formTwoOperandAlias(StringRef Constraint) {
int SrcAsmOperand = findAsmOperandNamed(Ops.first);
int DstAsmOperand = findAsmOperandNamed(Ops.second);
if (SrcAsmOperand == -1)
- throw TGError(TheDef->getLoc(),
+ PrintFatalError(TheDef->getLoc(),
"unknown source two-operand alias operand '" +
Ops.first.str() + "'.");
if (DstAsmOperand == -1)
- throw TGError(TheDef->getLoc(),
+ PrintFatalError(TheDef->getLoc(),
"unknown destination two-operand alias operand '" +
Ops.second.str() + "'.");
@@ -833,15 +852,15 @@ void MatchableInfo::tokenizeAsmString(const AsmMatcherInfo &Info) {
// The first token of the instruction is the mnemonic, which must be a
// simple string, not a $foo variable or a singleton register.
if (AsmOperands.empty())
- throw TGError(TheDef->getLoc(),
+ PrintFatalError(TheDef->getLoc(),
"Instruction '" + TheDef->getName() + "' has no tokens");
Mnemonic = AsmOperands[0].Token;
if (Mnemonic.empty())
- throw TGError(TheDef->getLoc(),
+ PrintFatalError(TheDef->getLoc(),
"Missing instruction mnemonic");
// FIXME : Check and raise an error if it is a register.
if (Mnemonic[0] == '$')
- throw TGError(TheDef->getLoc(),
+ PrintFatalError(TheDef->getLoc(),
"Invalid instruction mnemonic '" + Mnemonic.str() + "'!");
// Remove the first operand, it is tracked in the mnemonic field.
@@ -851,12 +870,12 @@ void MatchableInfo::tokenizeAsmString(const AsmMatcherInfo &Info) {
bool MatchableInfo::validate(StringRef CommentDelimiter, bool Hack) const {
// Reject matchables with no .s string.
if (AsmString.empty())
- throw TGError(TheDef->getLoc(), "instruction with empty asm string");
+ PrintFatalError(TheDef->getLoc(), "instruction with empty asm string");
// Reject any matchables with a newline in them, they should be marked
// isCodeGenOnly if they are pseudo instructions.
if (AsmString.find('\n') != std::string::npos)
- throw TGError(TheDef->getLoc(),
+ PrintFatalError(TheDef->getLoc(),
"multiline instruction is not valid for the asmparser, "
"mark it isCodeGenOnly");
@@ -864,7 +883,7 @@ bool MatchableInfo::validate(StringRef CommentDelimiter, bool Hack) const {
// has one line.
if (!CommentDelimiter.empty() &&
StringRef(AsmString).find(CommentDelimiter) != StringRef::npos)
- throw TGError(TheDef->getLoc(),
+ PrintFatalError(TheDef->getLoc(),
"asmstring for instruction has comment character in it, "
"mark it isCodeGenOnly");
@@ -878,7 +897,7 @@ bool MatchableInfo::validate(StringRef CommentDelimiter, bool Hack) const {
for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
StringRef Tok = AsmOperands[i].Token;
if (Tok[0] == '$' && Tok.find(':') != StringRef::npos)
- throw TGError(TheDef->getLoc(),
+ PrintFatalError(TheDef->getLoc(),
"matchable with operand modifier '" + Tok.str() +
"' not supported by asm matcher. Mark isCodeGenOnly!");
@@ -886,7 +905,7 @@ bool MatchableInfo::validate(StringRef CommentDelimiter, bool Hack) const {
// We reject aliases and ignore instructions for now.
if (Tok[0] == '$' && !OperandNames.insert(Tok).second) {
if (!Hack)
- throw TGError(TheDef->getLoc(),
+ PrintFatalError(TheDef->getLoc(),
"ERROR: matchable with tied operand '" + Tok.str() +
"' can never be matched!");
// FIXME: Should reject these. The ARM backend hits this with $lane in a
@@ -974,7 +993,7 @@ AsmMatcherInfo::getOperandClass(const CGIOperandList::OperandInfo &OI,
int SubOpIdx) {
Record *Rec = OI.Rec;
if (SubOpIdx != -1)
- Rec = dynamic_cast<DefInit*>(OI.MIOperandInfo->getArg(SubOpIdx))->getDef();
+ Rec = cast<DefInit>(OI.MIOperandInfo->getArg(SubOpIdx))->getDef();
return getOperandClass(Rec, SubOpIdx);
}
@@ -985,10 +1004,10 @@ AsmMatcherInfo::getOperandClass(Record *Rec, int SubOpIdx) {
// use it, else just fall back to the underlying register class.
const RecordVal *R = Rec->getValue("ParserMatchClass");
if (R == 0 || R->getValue() == 0)
- throw "Record `" + Rec->getName() +
- "' does not have a ParserMatchClass!\n";
+ PrintFatalError("Record `" + Rec->getName() +
+ "' does not have a ParserMatchClass!\n");
- if (DefInit *DI= dynamic_cast<DefInit*>(R->getValue())) {
+ if (DefInit *DI= dyn_cast<DefInit>(R->getValue())) {
Record *MatchClass = DI->getDef();
if (ClassInfo *CI = AsmOperandClasses[MatchClass])
return CI;
@@ -997,26 +1016,28 @@ AsmMatcherInfo::getOperandClass(Record *Rec, int SubOpIdx) {
// No custom match class. Just use the register class.
Record *ClassRec = Rec->getValueAsDef("RegClass");
if (!ClassRec)
- throw TGError(Rec->getLoc(), "RegisterOperand `" + Rec->getName() +
+ PrintFatalError(Rec->getLoc(), "RegisterOperand `" + Rec->getName() +
"' has no associated register class!\n");
if (ClassInfo *CI = RegisterClassClasses[ClassRec])
return CI;
- throw TGError(Rec->getLoc(), "register class has no class info!");
+ PrintFatalError(Rec->getLoc(), "register class has no class info!");
}
if (Rec->isSubClassOf("RegisterClass")) {
if (ClassInfo *CI = RegisterClassClasses[Rec])
return CI;
- throw TGError(Rec->getLoc(), "register class has no class info!");
+ PrintFatalError(Rec->getLoc(), "register class has no class info!");
}
- assert(Rec->isSubClassOf("Operand") && "Unexpected operand!");
+ if (!Rec->isSubClassOf("Operand"))
+ PrintFatalError(Rec->getLoc(), "Operand `" + Rec->getName() +
+ "' does not derive from class Operand!\n");
Record *MatchClass = Rec->getValueAsDef("ParserMatchClass");
if (ClassInfo *CI = AsmOperandClasses[MatchClass])
return CI;
- throw TGError(Rec->getLoc(), "operand has no match class!");
+ PrintFatalError(Rec->getLoc(), "operand has no match class!");
}
void AsmMatcherInfo::
@@ -1164,7 +1185,7 @@ void AsmMatcherInfo::buildOperandClasses() {
ListInit *Supers = (*it)->getValueAsListInit("SuperClasses");
for (unsigned i = 0, e = Supers->getSize(); i != e; ++i) {
- DefInit *DI = dynamic_cast<DefInit*>(Supers->getElement(i));
+ DefInit *DI = dyn_cast<DefInit>(Supers->getElement(i));
if (!DI) {
PrintError((*it)->getLoc(), "Invalid super class reference!");
continue;
@@ -1182,33 +1203,31 @@ void AsmMatcherInfo::buildOperandClasses() {
// Get or construct the predicate method name.
Init *PMName = (*it)->getValueInit("PredicateMethod");
- if (StringInit *SI = dynamic_cast<StringInit*>(PMName)) {
+ if (StringInit *SI = dyn_cast<StringInit>(PMName)) {
CI->PredicateMethod = SI->getValue();
} else {
- assert(dynamic_cast<UnsetInit*>(PMName) &&
- "Unexpected PredicateMethod field!");
+ assert(isa<UnsetInit>(PMName) && "Unexpected PredicateMethod field!");
CI->PredicateMethod = "is" + CI->ClassName;
}
// Get or construct the render method name.
Init *RMName = (*it)->getValueInit("RenderMethod");
- if (StringInit *SI = dynamic_cast<StringInit*>(RMName)) {
+ if (StringInit *SI = dyn_cast<StringInit>(RMName)) {
CI->RenderMethod = SI->getValue();
} else {
- assert(dynamic_cast<UnsetInit*>(RMName) &&
- "Unexpected RenderMethod field!");
+ assert(isa<UnsetInit>(RMName) && "Unexpected RenderMethod field!");
CI->RenderMethod = "add" + CI->ClassName + "Operands";
}
// Get the parse method name or leave it as empty.
Init *PRMName = (*it)->getValueInit("ParserMethod");
- if (StringInit *SI = dynamic_cast<StringInit*>(PRMName))
+ if (StringInit *SI = dyn_cast<StringInit>(PRMName))
CI->ParserMethod = SI->getValue();
// Get the diagnostic type or leave it as empty.
// Get the parse method name or leave it as empty.
Init *DiagnosticType = (*it)->getValueInit("DiagnosticType");
- if (StringInit *SI = dynamic_cast<StringInit*>(DiagnosticType))
+ if (StringInit *SI = dyn_cast<StringInit>(DiagnosticType))
CI->DiagnosticType = SI->getValue();
AsmOperandClasses[*it] = CI;
@@ -1228,7 +1247,8 @@ void AsmMatcherInfo::buildOperandMatchInfo() {
/// Map containing a mask with all operands indices that can be found for
/// that class inside a instruction.
- std::map<ClassInfo*, unsigned> OpClassMask;
+ typedef std::map<ClassInfo*, unsigned, LessClassInfoPtr> OpClassMaskTy;
+ OpClassMaskTy OpClassMask;
for (std::vector<MatchableInfo*>::const_iterator it =
Matchables.begin(), ie = Matchables.end();
@@ -1247,7 +1267,7 @@ void AsmMatcherInfo::buildOperandMatchInfo() {
}
// Generate operand match info for each mnemonic/operand class pair.
- for (std::map<ClassInfo*, unsigned>::iterator iit = OpClassMask.begin(),
+ for (OpClassMaskTy::iterator iit = OpClassMask.begin(),
iie = OpClassMask.end(); iit != iie; ++iit) {
unsigned OpMask = iit->second;
ClassInfo *CI = iit->first;
@@ -1267,7 +1287,7 @@ void AsmMatcherInfo::buildInfo() {
continue;
if (Pred->getName().empty())
- throw TGError(Pred->getLoc(), "Predicate has no name!");
+ PrintFatalError(Pred->getLoc(), "Predicate has no name!");
unsigned FeatureNo = SubtargetFeatures.size();
SubtargetFeatures[Pred] = new SubtargetFeatureInfo(Pred, FeatureNo);
@@ -1448,7 +1468,7 @@ void AsmMatcherInfo::buildInfo() {
ClassInfo *FromClass = getTokenClass(Rec->getValueAsString("FromToken"));
ClassInfo *ToClass = getTokenClass(Rec->getValueAsString("ToToken"));
if (FromClass == ToClass)
- throw TGError(Rec->getLoc(),
+ PrintFatalError(Rec->getLoc(),
"error: Destination value identical to source value.");
FromClass->SuperClasses.push_back(ToClass);
}
@@ -1470,7 +1490,7 @@ buildInstructionOperandReference(MatchableInfo *II,
// Map this token to an operand.
unsigned Idx;
if (!Operands.hasOperandNamed(OperandName, Idx))
- throw TGError(II->TheDef->getLoc(), "error: unable to find operand: '" +
+ PrintFatalError(II->TheDef->getLoc(), "error: unable to find operand: '" +
OperandName.str() + "'");
// If the instruction operand has multiple suboperands, but the parser
@@ -1541,7 +1561,7 @@ void AsmMatcherInfo::buildAliasOperandReference(MatchableInfo *II,
return;
}
- throw TGError(II->TheDef->getLoc(), "error: unable to find operand: '" +
+ PrintFatalError(II->TheDef->getLoc(), "error: unable to find operand: '" +
OperandName.str() + "'");
}
@@ -1563,7 +1583,7 @@ void MatchableInfo::buildInstructionResultOperands() {
// Find out what operand from the asmparser this MCInst operand comes from.
int SrcOperand = findAsmOperandNamed(OpInfo.Name);
if (OpInfo.Name.empty() || SrcOperand == -1)
- throw TGError(TheDef->getLoc(), "Instruction '" +
+ PrintFatalError(TheDef->getLoc(), "Instruction '" +
TheDef->getName() + "' has operand '" + OpInfo.Name +
"' that doesn't appear in asm string!");
@@ -1615,7 +1635,7 @@ void MatchableInfo::buildAliasResultOperands() {
StringRef Name = CGA.ResultOperands[AliasOpNo].getName();
int SrcOperand = findAsmOperand(Name, SubIdx);
if (SrcOperand == -1)
- throw TGError(TheDef->getLoc(), "Instruction '" +
+ PrintFatalError(TheDef->getLoc(), "Instruction '" +
TheDef->getName() + "' has operand '" + OpName +
"' that doesn't appear in asm string!");
unsigned NumOperands = (SubIdx == -1 ? OpInfo->MINumOperands : 1);
@@ -1638,35 +1658,85 @@ void MatchableInfo::buildAliasResultOperands() {
}
}
-static void emitConvertToMCInst(CodeGenTarget &Target, StringRef ClassName,
- std::vector<MatchableInfo*> &Infos,
- raw_ostream &OS) {
- // Write the convert function to a separate stream, so we can drop it after
- // the enum.
- std::string ConvertFnBody;
- raw_string_ostream CvtOS(ConvertFnBody);
+static unsigned getConverterOperandID(const std::string &Name,
+ SetVector<std::string> &Table,
+ bool &IsNew) {
+ IsNew = Table.insert(Name);
- // Function we have already generated.
- std::set<std::string> GeneratedFns;
+ unsigned ID = IsNew ? Table.size() - 1 :
+ std::find(Table.begin(), Table.end(), Name) - Table.begin();
- // Start the unified conversion function.
- CvtOS << "bool " << Target.getName() << ClassName << "::\n";
- CvtOS << "ConvertToMCInst(unsigned Kind, MCInst &Inst, "
- << "unsigned Opcode,\n"
- << " const SmallVectorImpl<MCParsedAsmOperand*"
- << "> &Operands) {\n";
- CvtOS << " Inst.setOpcode(Opcode);\n";
- CvtOS << " switch (Kind) {\n";
- CvtOS << " default:\n";
+ assert(ID < Table.size());
+
+ return ID;
+}
- // Start the enum, which we will generate inline.
- OS << "// Unified function for converting operands to MCInst instances.\n\n";
- OS << "enum ConversionKind {\n";
+static void emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
+ std::vector<MatchableInfo*> &Infos,
+ raw_ostream &OS) {
+ SetVector<std::string> OperandConversionKinds;
+ SetVector<std::string> InstructionConversionKinds;
+ std::vector<std::vector<uint8_t> > ConversionTable;
+ size_t MaxRowLength = 2; // minimum is custom converter plus terminator.
// TargetOperandClass - This is the target's operand class, like X86Operand.
std::string TargetOperandClass = Target.getName() + "Operand";
+ // Write the convert function to a separate stream, so we can drop it after
+ // the enum. We'll build up the conversion handlers for the individual
+ // operand types opportunistically as we encounter them.
+ std::string ConvertFnBody;
+ raw_string_ostream CvtOS(ConvertFnBody);
+ // Start the unified conversion function.
+ CvtOS << "void " << Target.getName() << ClassName << "::\n"
+ << "convertToMCInst(unsigned Kind, MCInst &Inst, "
+ << "unsigned Opcode,\n"
+ << " const SmallVectorImpl<MCParsedAsmOperand*"
+ << "> &Operands) {\n"
+ << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n"
+ << " const uint8_t *Converter = ConversionTable[Kind];\n"
+ << " Inst.setOpcode(Opcode);\n"
+ << " for (const uint8_t *p = Converter; *p; p+= 2) {\n"
+ << " switch (*p) {\n"
+ << " default: llvm_unreachable(\"invalid conversion entry!\");\n"
+ << " case CVT_Reg:\n"
+ << " static_cast<" << TargetOperandClass
+ << "*>(Operands[*(p + 1)])->addRegOperands(Inst, 1);\n"
+ << " break;\n"
+ << " case CVT_Tied:\n"
+ << " Inst.addOperand(Inst.getOperand(*(p + 1)));\n"
+ << " break;\n";
+
+ std::string OperandFnBody;
+ raw_string_ostream OpOS(OperandFnBody);
+ // Start the operand number lookup function.
+ OpOS << "void " << Target.getName() << ClassName << "::\n"
+ << "convertToMapAndConstraints(unsigned Kind,\n";
+ OpOS.indent(27);
+ OpOS << "const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {\n"
+ << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n"
+ << " unsigned NumMCOperands = 0;\n"
+ << " const uint8_t *Converter = ConversionTable[Kind];\n"
+ << " for (const uint8_t *p = Converter; *p; p+= 2) {\n"
+ << " switch (*p) {\n"
+ << " default: llvm_unreachable(\"invalid conversion entry!\");\n"
+ << " case CVT_Reg:\n"
+ << " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n"
+ << " Operands[*(p + 1)]->setConstraint(\"m\");\n"
+ << " ++NumMCOperands;\n"
+ << " break;\n"
+ << " case CVT_Tied:\n"
+ << " ++NumMCOperands;\n"
+ << " break;\n";
+
+ // Pre-populate the operand conversion kinds with the standard always
+ // available entries.
+ OperandConversionKinds.insert("CVT_Done");
+ OperandConversionKinds.insert("CVT_Reg");
+ OperandConversionKinds.insert("CVT_Tied");
+ enum { CVT_Done, CVT_Reg, CVT_Tied };
+
for (std::vector<MatchableInfo*>::const_iterator it = Infos.begin(),
ie = Infos.end(); it != ie; ++it) {
MatchableInfo &II = **it;
@@ -1679,24 +1749,35 @@ static void emitConvertToMCInst(CodeGenTarget &Target, StringRef ClassName,
II.ConversionFnKind = Signature;
// Check if we have already generated this signature.
- if (!GeneratedFns.insert(Signature).second)
+ if (!InstructionConversionKinds.insert(Signature))
continue;
- // If not, emit it now. Add to the enum list.
- OS << " " << Signature << ",\n";
+ // Remember this converter for the kind enum.
+ unsigned KindID = OperandConversionKinds.size();
+ OperandConversionKinds.insert("CVT_" + AsmMatchConverter);
+
+ // Add the converter row for this instruction.
+ ConversionTable.push_back(std::vector<uint8_t>());
+ ConversionTable.back().push_back(KindID);
+ ConversionTable.back().push_back(CVT_Done);
+
+ // Add the handler to the conversion driver function.
+ CvtOS << " case CVT_" << AsmMatchConverter << ":\n"
+ << " " << AsmMatchConverter << "(Inst, Operands);\n"
+ << " break;\n";
- CvtOS << " case " << Signature << ":\n";
- CvtOS << " return " << AsmMatchConverter
- << "(Inst, Opcode, Operands);\n";
+ // FIXME: Handle the operand number lookup for custom match functions.
continue;
}
// Build the conversion function signature.
std::string Signature = "Convert";
- std::string CaseBody;
- raw_string_ostream CaseOS(CaseBody);
+
+ std::vector<uint8_t> ConversionRow;
// Compute the convert enum and the case body.
+ MaxRowLength = std::max(MaxRowLength, II.ResOperands.size()*2 + 1 );
+
for (unsigned i = 0, e = II.ResOperands.size(); i != e; ++i) {
const MatchableInfo::ResOperand &OpInfo = II.ResOperands[i];
@@ -1709,74 +1790,180 @@ static void emitConvertToMCInst(CodeGenTarget &Target, StringRef ClassName,
// Registers are always converted the same, don't duplicate the
// conversion function based on them.
Signature += "__";
- if (Op.Class->isRegisterClass())
- Signature += "Reg";
- else
- Signature += Op.Class->ClassName;
+ std::string Class;
+ Class = Op.Class->isRegisterClass() ? "Reg" : Op.Class->ClassName;
+ Signature += Class;
Signature += utostr(OpInfo.MINumOperands);
Signature += "_" + itostr(OpInfo.AsmOperandNum);
- CaseOS << " ((" << TargetOperandClass << "*)Operands["
- << (OpInfo.AsmOperandNum+1) << "])->" << Op.Class->RenderMethod
- << "(Inst, " << OpInfo.MINumOperands << ");\n";
+ // Add the conversion kind, if necessary, and get the associated ID
+ // the index of its entry in the vector).
+ std::string Name = "CVT_" + (Op.Class->isRegisterClass() ? "Reg" :
+ Op.Class->RenderMethod);
+
+ bool IsNewConverter = false;
+ unsigned ID = getConverterOperandID(Name, OperandConversionKinds,
+ IsNewConverter);
+
+ // Add the operand entry to the instruction kind conversion row.
+ ConversionRow.push_back(ID);
+ ConversionRow.push_back(OpInfo.AsmOperandNum + 1);
+
+ if (!IsNewConverter)
+ break;
+
+ // This is a new operand kind. Add a handler for it to the
+ // converter driver.
+ CvtOS << " case " << Name << ":\n"
+ << " static_cast<" << TargetOperandClass
+ << "*>(Operands[*(p + 1)])->"
+ << Op.Class->RenderMethod << "(Inst, " << OpInfo.MINumOperands
+ << ");\n"
+ << " break;\n";
+
+ // Add a handler for the operand number lookup.
+ OpOS << " case " << Name << ":\n"
+ << " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n"
+ << " Operands[*(p + 1)]->setConstraint(\"m\");\n"
+ << " NumMCOperands += " << OpInfo.MINumOperands << ";\n"
+ << " break;\n";
break;
}
-
case MatchableInfo::ResOperand::TiedOperand: {
// If this operand is tied to a previous one, just copy the MCInst
// operand from the earlier one.We can only tie single MCOperand values.
//assert(OpInfo.MINumOperands == 1 && "Not a singular MCOperand");
unsigned TiedOp = OpInfo.TiedOperandNum;
assert(i > TiedOp && "Tied operand precedes its target!");
- CaseOS << " Inst.addOperand(Inst.getOperand(" << TiedOp << "));\n";
Signature += "__Tie" + utostr(TiedOp);
+ ConversionRow.push_back(CVT_Tied);
+ ConversionRow.push_back(TiedOp);
+ // FIXME: Handle the operand number lookup for tied operands.
break;
}
case MatchableInfo::ResOperand::ImmOperand: {
int64_t Val = OpInfo.ImmVal;
- CaseOS << " Inst.addOperand(MCOperand::CreateImm(" << Val << "));\n";
- Signature += "__imm" + itostr(Val);
+ std::string Ty = "imm_" + itostr(Val);
+ Signature += "__" + Ty;
+
+ std::string Name = "CVT_" + Ty;
+ bool IsNewConverter = false;
+ unsigned ID = getConverterOperandID(Name, OperandConversionKinds,
+ IsNewConverter);
+ // Add the operand entry to the instruction kind conversion row.
+ ConversionRow.push_back(ID);
+ ConversionRow.push_back(0);
+
+ if (!IsNewConverter)
+ break;
+
+ CvtOS << " case " << Name << ":\n"
+ << " Inst.addOperand(MCOperand::CreateImm(" << Val << "));\n"
+ << " break;\n";
+
+ OpOS << " case " << Name << ":\n"
+ << " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n"
+ << " Operands[*(p + 1)]->setConstraint(\"\");\n"
+ << " ++NumMCOperands;\n"
+ << " break;\n";
break;
}
case MatchableInfo::ResOperand::RegOperand: {
+ std::string Reg, Name;
if (OpInfo.Register == 0) {
- CaseOS << " Inst.addOperand(MCOperand::CreateReg(0));\n";
- Signature += "__reg0";
+ Name = "reg0";
+ Reg = "0";
} else {
- std::string N = getQualifiedName(OpInfo.Register);
- CaseOS << " Inst.addOperand(MCOperand::CreateReg(" << N << "));\n";
- Signature += "__reg" + OpInfo.Register->getName();
+ Reg = getQualifiedName(OpInfo.Register);
+ Name = "reg" + OpInfo.Register->getName();
}
+ Signature += "__" + Name;
+ Name = "CVT_" + Name;
+ bool IsNewConverter = false;
+ unsigned ID = getConverterOperandID(Name, OperandConversionKinds,
+ IsNewConverter);
+ // Add the operand entry to the instruction kind conversion row.
+ ConversionRow.push_back(ID);
+ ConversionRow.push_back(0);
+
+ if (!IsNewConverter)
+ break;
+ CvtOS << " case " << Name << ":\n"
+ << " Inst.addOperand(MCOperand::CreateReg(" << Reg << "));\n"
+ << " break;\n";
+
+ OpOS << " case " << Name << ":\n"
+ << " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n"
+ << " Operands[*(p + 1)]->setConstraint(\"m\");\n"
+ << " ++NumMCOperands;\n"
+ << " break;\n";
}
}
}
+ // If there were no operands, add to the signature to that effect
+ if (Signature == "Convert")
+ Signature += "_NoOperands";
+
II.ConversionFnKind = Signature;
- // Check if we have already generated this signature.
- if (!GeneratedFns.insert(Signature).second)
+ // Save the signature. If we already have it, don't add a new row
+ // to the table.
+ if (!InstructionConversionKinds.insert(Signature))
continue;
- // If not, emit it now. Add to the enum list.
- OS << " " << Signature << ",\n";
-
- CvtOS << " case " << Signature << ":\n";
- CvtOS << CaseOS.str();
- CvtOS << " return true;\n";
+ // Add the row to the table.
+ ConversionTable.push_back(ConversionRow);
}
- // Finish the convert function.
+ // Finish up the converter driver function.
+ CvtOS << " }\n }\n}\n\n";
+
+ // Finish up the operand number lookup function.
+ OpOS << " }\n }\n}\n\n";
- CvtOS << " }\n";
- CvtOS << " return false;\n";
- CvtOS << "}\n\n";
+ OS << "namespace {\n";
+
+ // Output the operand conversion kind enum.
+ OS << "enum OperatorConversionKind {\n";
+ for (unsigned i = 0, e = OperandConversionKinds.size(); i != e; ++i)
+ OS << " " << OperandConversionKinds[i] << ",\n";
+ OS << " CVT_NUM_CONVERTERS\n";
+ OS << "};\n\n";
+
+ // Output the instruction conversion kind enum.
+ OS << "enum InstructionConversionKind {\n";
+ for (SetVector<std::string>::const_iterator
+ i = InstructionConversionKinds.begin(),
+ e = InstructionConversionKinds.end(); i != e; ++i)
+ OS << " " << *i << ",\n";
+ OS << " CVT_NUM_SIGNATURES\n";
+ OS << "};\n\n";
+
+
+ OS << "} // end anonymous namespace\n\n";
- // Finish the enum, and drop the convert function after it.
+ // Output the conversion table.
+ OS << "static const uint8_t ConversionTable[CVT_NUM_SIGNATURES]["
+ << MaxRowLength << "] = {\n";
+
+ for (unsigned Row = 0, ERow = ConversionTable.size(); Row != ERow; ++Row) {
+ assert(ConversionTable[Row].size() % 2 == 0 && "bad conversion row!");
+ OS << " // " << InstructionConversionKinds[Row] << "\n";
+ OS << " { ";
+ for (unsigned i = 0, e = ConversionTable[Row].size(); i != e; i += 2)
+ OS << OperandConversionKinds[ConversionTable[Row][i]] << ", "
+ << (unsigned)(ConversionTable[Row][i + 1]) << ", ";
+ OS << "CVT_Done },\n";
+ }
- OS << " NumConversionVariants\n";
OS << "};\n\n";
+ // Spit out the conversion driver function.
OS << CvtOS.str();
+
+ // Spit out the operand number lookup function.
+ OS << OpOS.str();
}
/// emitMatchClassEnumeration - Emit the enumeration for match class kinds.
@@ -1853,7 +2040,7 @@ static void emitValidateOperandClass(AsmMatcherInfo &Info,
OS << " MatchClassKind OpKind;\n";
OS << " switch (Operand.getReg()) {\n";
OS << " default: OpKind = InvalidMatchClass; break;\n";
- for (std::map<Record*, ClassInfo*>::iterator
+ for (AsmMatcherInfo::RegisterClassesTy::iterator
it = Info.RegisterClasses.begin(), ie = Info.RegisterClasses.end();
it != ie; ++it)
OS << " case " << Info.Target.getName() << "::"
@@ -1874,7 +2061,7 @@ static void emitValidateOperandClass(AsmMatcherInfo &Info,
static void emitIsSubclass(CodeGenTarget &Target,
std::vector<ClassInfo*> &Infos,
raw_ostream &OS) {
- OS << "/// isSubclass - Compute whether \\arg A is a subclass of \\arg B.\n";
+ OS << "/// isSubclass - Compute whether \\p A is a subclass of \\p B.\n";
OS << "static bool isSubclass(MatchClassKind A, MatchClassKind B) {\n";
OS << " if (A == B)\n";
OS << " return true;\n\n";
@@ -2083,7 +2270,7 @@ static std::string GetAliasRequiredFeatures(Record *R,
SubtargetFeatureInfo *F = Info.getSubtargetFeature(ReqFeatures[i]);
if (F == 0)
- throw TGError(R->getLoc(), "Predicate '" + ReqFeatures[i]->getName() +
+ PrintFatalError(R->getLoc(), "Predicate '" + ReqFeatures[i]->getName() +
"' is not marked as an AssemblerPredicate!");
if (NumFeatures)
@@ -2146,14 +2333,14 @@ static bool emitMnemonicAliases(raw_ostream &OS, const AsmMatcherInfo &Info) {
// We can't have two aliases from the same mnemonic with no predicate.
PrintError(ToVec[AliasWithNoPredicate]->getLoc(),
"two MnemonicAliases with the same 'from' mnemonic!");
- throw TGError(R->getLoc(), "this is the other MnemonicAlias.");
+ PrintFatalError(R->getLoc(), "this is the other MnemonicAlias.");
}
AliasWithNoPredicate = i;
continue;
}
if (R->getValueAsString("ToMnemonic") == I->first)
- throw TGError(R->getLoc(), "MnemonicAlias to the same string");
+ PrintFatalError(R->getLoc(), "MnemonicAlias to the same string");
if (!MatchCode.empty())
MatchCode += "else ";
@@ -2189,17 +2376,27 @@ static const char *getMinimalTypeForRange(uint64_t Range) {
}
static void emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
- const AsmMatcherInfo &Info, StringRef ClassName) {
+ const AsmMatcherInfo &Info, StringRef ClassName,
+ StringToOffsetTable &StringTable,
+ unsigned MaxMnemonicIndex) {
+ unsigned MaxMask = 0;
+ for (std::vector<OperandMatchEntry>::const_iterator it =
+ Info.OperandMatchInfo.begin(), ie = Info.OperandMatchInfo.end();
+ it != ie; ++it) {
+ MaxMask |= it->OperandMask;
+ }
+
// Emit the static custom operand parsing table;
OS << "namespace {\n";
OS << " struct OperandMatchEntry {\n";
- OS << " static const char *const MnemonicTable;\n";
- OS << " uint32_t OperandMask;\n";
- OS << " uint32_t Mnemonic;\n";
OS << " " << getMinimalTypeForRange(1ULL << Info.SubtargetFeatures.size())
<< " RequiredFeatures;\n";
+ OS << " " << getMinimalTypeForRange(MaxMnemonicIndex)
+ << " Mnemonic;\n";
OS << " " << getMinimalTypeForRange(Info.Classes.size())
- << " Class;\n\n";
+ << " Class;\n";
+ OS << " " << getMinimalTypeForRange(MaxMask)
+ << " OperandMask;\n\n";
OS << " StringRef getMnemonic() const {\n";
OS << " return StringRef(MnemonicTable + Mnemonic + 1,\n";
OS << " MnemonicTable[Mnemonic]);\n";
@@ -2222,8 +2419,6 @@ static void emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
OS << "} // end anonymous namespace.\n\n";
- StringToOffsetTable StringTable;
-
OS << "static const OperandMatchEntry OperandMatchTable["
<< Info.OperandMatchInfo.size() << "] = {\n";
@@ -2234,8 +2429,25 @@ static void emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
const OperandMatchEntry &OMI = *it;
const MatchableInfo &II = *OMI.MI;
- OS << " { " << OMI.OperandMask;
+ OS << " { ";
+ // Write the required features mask.
+ if (!II.RequiredFeatures.empty()) {
+ for (unsigned i = 0, e = II.RequiredFeatures.size(); i != e; ++i) {
+ if (i) OS << "|";
+ OS << II.RequiredFeatures[i]->getEnumName();
+ }
+ } else
+ OS << "0";
+
+ // Store a pascal-style length byte in the mnemonic.
+ std::string LenMnemonic = char(II.Mnemonic.size()) + II.Mnemonic.str();
+ OS << ", " << StringTable.GetOrAddStringOffset(LenMnemonic, false)
+ << " /* " << II.Mnemonic << " */, ";
+
+ OS << OMI.CI->Name;
+
+ OS << ", " << OMI.OperandMask;
OS << " /* ";
bool printComma = false;
for (int i = 0, e = 31; i !=e; ++i)
@@ -2247,30 +2459,10 @@ static void emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
}
OS << " */";
- // Store a pascal-style length byte in the mnemonic.
- std::string LenMnemonic = char(II.Mnemonic.size()) + II.Mnemonic.str();
- OS << ", " << StringTable.GetOrAddStringOffset(LenMnemonic, false)
- << " /* " << II.Mnemonic << " */, ";
-
- // Write the required features mask.
- if (!II.RequiredFeatures.empty()) {
- for (unsigned i = 0, e = II.RequiredFeatures.size(); i != e; ++i) {
- if (i) OS << "|";
- OS << II.RequiredFeatures[i]->getEnumName();
- }
- } else
- OS << "0";
-
- OS << ", " << OMI.CI->Name;
-
OS << " },\n";
}
OS << "};\n\n";
- OS << "const char *const OperandMatchEntry::MnemonicTable =\n";
- StringTable.EmitString(OS);
- OS << ";\n\n";
-
// Emit the operand class switch to call the correct custom parser for
// the found operand class.
OS << Target.getName() << ClassName << "::OperandMatchResultTy "
@@ -2407,14 +2599,20 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << " // This should be included into the middle of the declaration of\n";
OS << " // your subclasses implementation of MCTargetAsmParser.\n";
OS << " unsigned ComputeAvailableFeatures(uint64_t FeatureBits) const;\n";
- OS << " bool ConvertToMCInst(unsigned Kind, MCInst &Inst, "
+ OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, "
<< "unsigned Opcode,\n"
<< " const SmallVectorImpl<MCParsedAsmOperand*> "
<< "&Operands);\n";
- OS << " bool MnemonicIsValid(StringRef Mnemonic);\n";
+ OS << " void convertToMapAndConstraints(unsigned Kind,\n ";
+ OS << " const SmallVectorImpl<MCParsedAsmOperand*> &Operands);\n";
+ OS << " bool mnemonicIsValid(StringRef Mnemonic);\n";
OS << " unsigned MatchInstructionImpl(\n";
- OS << " const SmallVectorImpl<MCParsedAsmOperand*> &Operands,\n";
- OS << " MCInst &Inst, unsigned &ErrorInfo, unsigned VariantID = 0);\n";
+ OS.indent(27);
+ OS << "const SmallVectorImpl<MCParsedAsmOperand*> &Operands,\n"
+ << " MCInst &Inst,\n"
+ << " unsigned &ErrorInfo,"
+ << " bool matchingInlineAsm,\n"
+ << " unsigned VariantID = 0);\n";
if (Info.OperandMatchInfo.size()) {
OS << "\n enum OperandMatchResultTy {\n";
@@ -2447,7 +2645,9 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
emitSubtargetFeatureFlagEnumeration(Info, OS);
// Emit the function to match a register name to number.
- emitMatchRegisterName(Target, AsmParser, OS);
+ // This should be omitted for Mips target
+ if (AsmParser->getValueAsBit("ShouldEmitMatchRegisterName"))
+ emitMatchRegisterName(Target, AsmParser, OS);
OS << "#endif // GET_REGISTER_MATCHER\n\n";
@@ -2465,8 +2665,10 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
// Generate the function that remaps for mnemonic aliases.
bool HasMnemonicAliases = emitMnemonicAliases(OS, Info);
- // Generate the unified function to convert operands into an MCInst.
- emitConvertToMCInst(Target, ClassName, Info.Matchables, OS);
+ // Generate the convertToMCInst function to convert operands into an MCInst.
+ // Also, generate the convertToMapAndConstraints function for MS-style inline
+ // assembly. The latter doesn't actually generate a MCInst.
+ emitConvertFuncs(Target, ClassName, Info.Matchables, OS);
// Emit the enumeration for classes which participate in matching.
emitMatchClassEnumeration(Target, Info.Classes, OS);
@@ -2484,11 +2686,25 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
emitComputeAvailableFeatures(Info, OS);
+ StringToOffsetTable StringTable;
+
size_t MaxNumOperands = 0;
+ unsigned MaxMnemonicIndex = 0;
for (std::vector<MatchableInfo*>::const_iterator it =
Info.Matchables.begin(), ie = Info.Matchables.end();
- it != ie; ++it)
- MaxNumOperands = std::max(MaxNumOperands, (*it)->AsmOperands.size());
+ it != ie; ++it) {
+ MatchableInfo &II = **it;
+ MaxNumOperands = std::max(MaxNumOperands, II.AsmOperands.size());
+
+ // Store a pascal-style length byte in the mnemonic.
+ std::string LenMnemonic = char(II.Mnemonic.size()) + II.Mnemonic.str();
+ MaxMnemonicIndex = std::max(MaxMnemonicIndex,
+ StringTable.GetOrAddStringOffset(LenMnemonic, false));
+ }
+
+ OS << "static const char *const MnemonicTable =\n";
+ StringTable.EmitString(OS);
+ OS << ";\n\n";
// Emit the static match table; unused classes get initalized to 0 which is
// guaranteed to be InvalidMatchClass.
@@ -2502,8 +2718,8 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
// following the mnemonic.
OS << "namespace {\n";
OS << " struct MatchEntry {\n";
- OS << " static const char *const MnemonicTable;\n";
- OS << " uint32_t Mnemonic;\n";
+ OS << " " << getMinimalTypeForRange(MaxMnemonicIndex)
+ << " Mnemonic;\n";
OS << " uint16_t Opcode;\n";
OS << " " << getMinimalTypeForRange(Info.Matchables.size())
<< " ConvertFn;\n";
@@ -2533,8 +2749,6 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << "} // end anonymous namespace.\n\n";
- StringToOffsetTable StringTable;
-
OS << "static const MatchEntry MatchTable["
<< Info.Matchables.size() << "] = {\n";
@@ -2573,13 +2787,9 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << "};\n\n";
- OS << "const char *const MatchEntry::MnemonicTable =\n";
- StringTable.EmitString(OS);
- OS << ";\n\n";
-
// A method to determine if a mnemonic is in the list.
OS << "bool " << Target.getName() << ClassName << "::\n"
- << "MnemonicIsValid(StringRef Mnemonic) {\n";
+ << "mnemonicIsValid(StringRef Mnemonic) {\n";
OS << " // Search the table.\n";
OS << " std::pair<const MatchEntry*, const MatchEntry*> MnemonicRange =\n";
OS << " std::equal_range(MatchTable, MatchTable+"
@@ -2592,8 +2802,14 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
<< Target.getName() << ClassName << "::\n"
<< "MatchInstructionImpl(const SmallVectorImpl<MCParsedAsmOperand*>"
<< " &Operands,\n";
- OS << " MCInst &Inst, unsigned &ErrorInfo, ";
- OS << "unsigned VariantID) {\n";
+ OS << " MCInst &Inst,\n"
+ << "unsigned &ErrorInfo, bool matchingInlineAsm, unsigned VariantID) {\n";
+
+ OS << " // Eliminate obvious mismatches.\n";
+ OS << " if (Operands.size() > " << (MaxNumOperands+1) << ") {\n";
+ OS << " ErrorInfo = " << (MaxNumOperands+1) << ";\n";
+ OS << " return Match_InvalidOperand;\n";
+ OS << " }\n\n";
// Emit code to get the available features.
OS << " // Get the current feature set.\n";
@@ -2611,12 +2827,6 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
}
// Emit code to compute the class list for this operand vector.
- OS << " // Eliminate obvious mismatches.\n";
- OS << " if (Operands.size() > " << (MaxNumOperands+1) << ") {\n";
- OS << " ErrorInfo = " << (MaxNumOperands+1) << ";\n";
- OS << " return Match_InvalidOperand;\n";
- OS << " }\n\n";
-
OS << " // Some state to try to produce better error messages.\n";
OS << " bool HadMatchOtherThanFeatures = false;\n";
OS << " bool HadMatchOtherThanPredicate = false;\n";
@@ -2681,17 +2891,20 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << " HadMatchOtherThanFeatures = true;\n";
OS << " unsigned NewMissingFeatures = it->RequiredFeatures & "
"~AvailableFeatures;\n";
- OS << " if (CountPopulation_32(NewMissingFeatures) <= "
- "CountPopulation_32(MissingFeatures))\n";
+ OS << " if (CountPopulation_32(NewMissingFeatures) <=\n"
+ " CountPopulation_32(MissingFeatures))\n";
OS << " MissingFeatures = NewMissingFeatures;\n";
OS << " continue;\n";
OS << " }\n";
OS << "\n";
+ OS << " if (matchingInlineAsm) {\n";
+ OS << " Inst.setOpcode(it->Opcode);\n";
+ OS << " convertToMapAndConstraints(it->ConvertFn, Operands);\n";
+ OS << " return Match_Success;\n";
+ OS << " }\n\n";
OS << " // We have selected a definite instruction, convert the parsed\n"
<< " // operands into the appropriate MCInst.\n";
- OS << " if (!ConvertToMCInst(it->ConvertFn, Inst,\n"
- << " it->Opcode, Operands))\n";
- OS << " return Match_ConversionFail;\n";
+ OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n";
OS << "\n";
// Verify the instruction with the target-specific match predicate function.
@@ -2716,15 +2929,16 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << " }\n\n";
OS << " // Okay, we had no match. Try to return a useful error code.\n";
- OS << " if (HadMatchOtherThanPredicate || !HadMatchOtherThanFeatures)";
- OS << " return RetCode;\n";
+ OS << " if (HadMatchOtherThanPredicate || !HadMatchOtherThanFeatures)\n";
+ OS << " return RetCode;\n\n";
OS << " // Missing feature matches return which features were missing\n";
OS << " ErrorInfo = MissingFeatures;\n";
OS << " return Match_MissingFeature;\n";
OS << "}\n\n";
if (Info.OperandMatchInfo.size())
- emitCustomOperandParsing(OS, Target, Info, ClassName);
+ emitCustomOperandParsing(OS, Target, Info, ClassName, StringTable,
+ MaxMnemonicIndex);
OS << "#endif // GET_MATCHER_IMPLEMENTATION\n\n";
}
diff --git a/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp b/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp
index 57979b3..a4114d9 100644
--- a/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp
@@ -313,7 +313,9 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
/// OpcodeInfo - This encodes the index of the string to use for the first
/// chunk of the output as well as indices used for operand printing.
- std::vector<unsigned> OpcodeInfo;
+ /// To reduce the number of unhandled cases, we expand the size from 32-bit
+ /// to 32+16 = 48-bit.
+ std::vector<uint64_t> OpcodeInfo;
// Add all strings to the string table upfront so it can generate an optimized
// representation.
@@ -362,7 +364,7 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
// To reduce code size, we compactify common instructions into a few bits
// in the opcode-indexed table.
- unsigned BitsLeft = 32-AsmStrBits;
+ unsigned BitsLeft = 64-AsmStrBits;
std::vector<std::vector<std::string> > TableDrivenOperandPrinters;
@@ -388,10 +390,11 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
}
// Otherwise, we can include this in the initial lookup table. Add it in.
- BitsLeft -= NumBits;
for (unsigned i = 0, e = InstIdxs.size(); i != e; ++i)
- if (InstIdxs[i] != ~0U)
- OpcodeInfo[i] |= InstIdxs[i] << (BitsLeft+AsmStrBits);
+ if (InstIdxs[i] != ~0U) {
+ OpcodeInfo[i] |= (uint64_t)InstIdxs[i] << (64-BitsLeft);
+ }
+ BitsLeft -= NumBits;
// Remove the info about this operand.
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
@@ -410,16 +413,32 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
}
-
- O<<" static const unsigned OpInfo[] = {\n";
+ // We always emit at least one 32-bit table. A second table is emitted if
+ // more bits are needed.
+ O<<" static const uint32_t OpInfo[] = {\n";
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
- O << " " << OpcodeInfo[i] << "U,\t// "
+ O << " " << (OpcodeInfo[i] & 0xffffffff) << "U,\t// "
<< NumberedInstructions[i]->TheDef->getName() << "\n";
}
// Add a dummy entry so the array init doesn't end with a comma.
O << " 0U\n";
O << " };\n\n";
+ if (BitsLeft < 32) {
+ // Add a second OpInfo table only when it is necessary.
+ // Adjust the type of the second table based on the number of bits needed.
+ O << " static const uint"
+ << ((BitsLeft < 16) ? "32" : (BitsLeft < 24) ? "16" : "8")
+ << "_t OpInfo2[] = {\n";
+ for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
+ O << " " << (OpcodeInfo[i] >> 32) << "U,\t// "
+ << NumberedInstructions[i]->TheDef->getName() << "\n";
+ }
+ // Add a dummy entry so the array init doesn't end with a comma.
+ O << " 0U\n";
+ O << " };\n\n";
+ }
+
// Emit the string itself.
O << " const char AsmStrs[] = {\n";
StringTable.emit(O, printChar);
@@ -427,13 +446,22 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
O << " O << \"\\t\";\n\n";
- O << " // Emit the opcode for the instruction.\n"
- << " unsigned Bits = OpInfo[MI->getOpcode()];\n"
- << " assert(Bits != 0 && \"Cannot print this instruction.\");\n"
+ O << " // Emit the opcode for the instruction.\n";
+ if (BitsLeft < 32) {
+ // If we have two tables then we need to perform two lookups and combine
+ // the results into a single 64-bit value.
+ O << " uint64_t Bits1 = OpInfo[MI->getOpcode()];\n"
+ << " uint64_t Bits2 = OpInfo2[MI->getOpcode()];\n"
+ << " uint64_t Bits = (Bits2 << 32) | Bits1;\n";
+ } else {
+ // If only one table is used we just need to perform a single lookup.
+ O << " uint32_t Bits = OpInfo[MI->getOpcode()];\n";
+ }
+ O << " assert(Bits != 0 && \"Cannot print this instruction.\");\n"
<< " O << AsmStrs+(Bits & " << (1 << AsmStrBits)-1 << ")-1;\n\n";
// Output the table driven operand information.
- BitsLeft = 32-AsmStrBits;
+ BitsLeft = 64-AsmStrBits;
for (unsigned i = 0, e = TableDrivenOperandPrinters.size(); i != e; ++i) {
std::vector<std::string> &Commands = TableDrivenOperandPrinters[i];
@@ -443,14 +471,13 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
assert(NumBits <= BitsLeft && "consistency error");
// Emit code to extract this field from Bits.
- BitsLeft -= NumBits;
-
O << "\n // Fragment " << i << " encoded into " << NumBits
<< " bits for " << Commands.size() << " unique commands.\n";
if (Commands.size() == 2) {
// Emit two possibilitys with if/else.
- O << " if ((Bits >> " << (BitsLeft+AsmStrBits) << ") & "
+ O << " if ((Bits >> "
+ << (64-BitsLeft) << ") & "
<< ((1 << NumBits)-1) << ") {\n"
<< Commands[1]
<< " } else {\n"
@@ -460,7 +487,8 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
// Emit a single possibility.
O << Commands[0] << "\n\n";
} else {
- O << " switch ((Bits >> " << (BitsLeft+AsmStrBits) << ") & "
+ O << " switch ((Bits >> "
+ << (64-BitsLeft) << ") & "
<< ((1 << NumBits)-1) << ") {\n"
<< " default: // unreachable.\n";
@@ -472,6 +500,7 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
}
O << " }\n\n";
}
+ BitsLeft -= NumBits;
}
// Okay, delete instructions with no operand info left.
@@ -537,9 +566,9 @@ emitRegisterNameString(raw_ostream &O, StringRef AltName,
std::vector<std::string> AltNames =
Reg.TheDef->getValueAsListOfStrings("AltNames");
if (AltNames.size() <= Idx)
- throw TGError(Reg.TheDef->getLoc(),
- (Twine("Register definition missing alt name for '") +
- AltName + "'.").str());
+ PrintFatalError(Reg.TheDef->getLoc(),
+ (Twine("Register definition missing alt name for '") +
+ AltName + "'.").str());
AsmName = AltNames[Idx];
}
}
@@ -551,7 +580,7 @@ emitRegisterNameString(raw_ostream &O, StringRef AltName,
StringTable.emit(O, printChar);
O << " };\n\n";
- O << " static const unsigned RegAsmOffset" << AltName << "[] = {";
+ O << " static const uint32_t RegAsmOffset" << AltName << "[] = {";
for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
if ((i % 14) == 0)
O << "\n ";
@@ -590,7 +619,7 @@ void AsmWriterEmitter::EmitGetRegisterName(raw_ostream &O) {
emitRegisterNameString(O, "", Registers);
if (hasAltNames) {
- O << " const unsigned *RegAsmOffset;\n"
+ O << " const uint32_t *RegAsmOffset;\n"
<< " const char *AsmStrs;\n"
<< " switch(AltIdx) {\n"
<< " default: llvm_unreachable(\"Invalid register alt name index!\");\n";
@@ -763,7 +792,7 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
if (!R->getValueAsBit("EmitAlias"))
continue; // We were told not to emit the alias, but to emit the aliasee.
const DagInit *DI = R->getValueAsDag("ResultInst");
- const DefInit *Op = dynamic_cast<const DefInit*>(DI->getOperator());
+ const DefInit *Op = cast<DefInit>(DI->getOperator());
AliasMap[getQualifiedName(Op->getDef())].push_back(Alias);
}
diff --git a/contrib/llvm/utils/TableGen/AsmWriterInst.cpp b/contrib/llvm/utils/TableGen/AsmWriterInst.cpp
index 350a2cc..fe1f756 100644
--- a/contrib/llvm/utils/TableGen/AsmWriterInst.cpp
+++ b/contrib/llvm/utils/TableGen/AsmWriterInst.cpp
@@ -14,6 +14,7 @@
#include "AsmWriterInst.h"
#include "CodeGenTarget.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
using namespace llvm;
@@ -123,8 +124,8 @@ AsmWriterInst::AsmWriterInst(const CodeGenInstruction &CGI,
!= std::string::npos) {
AddLiteralString(std::string(1, AsmString[DollarPos+1]));
} else {
- throw "Non-supported escaped character found in instruction '" +
- CGI.TheDef->getName() + "'!";
+ PrintFatalError("Non-supported escaped character found in instruction '" +
+ CGI.TheDef->getName() + "'!");
}
LastEmitted = DollarPos+2;
continue;
@@ -162,15 +163,15 @@ AsmWriterInst::AsmWriterInst(const CodeGenInstruction &CGI,
// brace.
if (hasCurlyBraces) {
if (VarEnd >= AsmString.size())
- throw "Reached end of string before terminating curly brace in '"
- + CGI.TheDef->getName() + "'";
+ PrintFatalError("Reached end of string before terminating curly brace in '"
+ + CGI.TheDef->getName() + "'");
// Look for a modifier string.
if (AsmString[VarEnd] == ':') {
++VarEnd;
if (VarEnd >= AsmString.size())
- throw "Reached end of string before terminating curly brace in '"
- + CGI.TheDef->getName() + "'";
+ PrintFatalError("Reached end of string before terminating curly brace in '"
+ + CGI.TheDef->getName() + "'");
unsigned ModifierStart = VarEnd;
while (VarEnd < AsmString.size() && isIdentChar(AsmString[VarEnd]))
@@ -178,17 +179,17 @@ AsmWriterInst::AsmWriterInst(const CodeGenInstruction &CGI,
Modifier = std::string(AsmString.begin()+ModifierStart,
AsmString.begin()+VarEnd);
if (Modifier.empty())
- throw "Bad operand modifier name in '"+ CGI.TheDef->getName() + "'";
+ PrintFatalError("Bad operand modifier name in '"+ CGI.TheDef->getName() + "'");
}
if (AsmString[VarEnd] != '}')
- throw "Variable name beginning with '{' did not end with '}' in '"
- + CGI.TheDef->getName() + "'";
+ PrintFatalError("Variable name beginning with '{' did not end with '}' in '"
+ + CGI.TheDef->getName() + "'");
++VarEnd;
}
if (VarName.empty() && Modifier.empty())
- throw "Stray '$' in '" + CGI.TheDef->getName() +
- "' asm string, maybe you want $$?";
+ PrintFatalError("Stray '$' in '" + CGI.TheDef->getName() +
+ "' asm string, maybe you want $$?");
if (VarName.empty()) {
// Just a modifier, pass this into PrintSpecial.
diff --git a/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp b/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp
index e9c4bd3..94f3c65 100644
--- a/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenTarget.h"
+#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <cassert>
@@ -93,7 +94,7 @@ void CallingConvEmitter::EmitAction(Record *Action,
O << Action->getValueAsString("Predicate");
} else {
Action->dump();
- throw "Unknown CCPredicateAction!";
+ PrintFatalError("Unknown CCPredicateAction!");
}
O << ") {\n";
@@ -131,7 +132,7 @@ void CallingConvEmitter::EmitAction(Record *Action,
ListInit *ShadowRegList = Action->getValueAsListInit("ShadowRegList");
if (ShadowRegList->getSize() >0 &&
ShadowRegList->getSize() != RegList->getSize())
- throw "Invalid length of list of shadowed registers";
+ PrintFatalError("Invalid length of list of shadowed registers");
if (RegList->getSize() == 1) {
O << IndentStr << "if (unsigned Reg = State.AllocateReg(";
@@ -177,12 +178,12 @@ void CallingConvEmitter::EmitAction(Record *Action,
if (Size)
O << Size << ", ";
else
- O << "\n" << IndentStr << " State.getTarget().getTargetData()"
+ O << "\n" << IndentStr << " State.getTarget().getDataLayout()"
"->getTypeAllocSize(EVT(LocVT).getTypeForEVT(State.getContext())), ";
if (Align)
O << Align;
else
- O << "\n" << IndentStr << " State.getTarget().getTargetData()"
+ O << "\n" << IndentStr << " State.getTarget().getDataLayout()"
"->getABITypeAlignment(EVT(LocVT).getTypeForEVT(State.getContext()))";
if (Action->isSubClassOf("CCAssignToStackWithShadow"))
O << ", " << getQualifiedName(Action->getValueAsDef("ShadowReg"));
@@ -221,7 +222,7 @@ void CallingConvEmitter::EmitAction(Record *Action,
O << IndentStr << IndentStr << "return false;\n";
} else {
Action->dump();
- throw "Unknown CCAction!";
+ PrintFatalError("Unknown CCAction!");
}
}
}
diff --git a/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp b/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
index 31a39b1..3e4f626 100644
--- a/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
+++ b/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
@@ -91,11 +91,11 @@ void CodeEmitterGen::reverseBits(std::vector<Record*> &Insts) {
// return the variable bit position. Otherwise return -1.
int CodeEmitterGen::getVariableBit(const std::string &VarName,
BitsInit *BI, int bit) {
- if (VarBitInit *VBI = dynamic_cast<VarBitInit*>(BI->getBit(bit))) {
- if (VarInit *VI = dynamic_cast<VarInit*>(VBI->getVariable()))
+ if (VarBitInit *VBI = dyn_cast<VarBitInit>(BI->getBit(bit))) {
+ if (VarInit *VI = dyn_cast<VarInit>(VBI->getBitVar()))
if (VI->getName() == VarName)
return VBI->getBitNum();
- } else if (VarInit *VI = dynamic_cast<VarInit*>(BI->getBit(bit))) {
+ } else if (VarInit *VI = dyn_cast<VarInit>(BI->getBit(bit))) {
if (VI->getName() == VarName)
return 0;
}
@@ -134,10 +134,13 @@ AddCodeToMergeInOperand(Record *R, BitsInit *BI, const std::string &VarName,
assert(!CGI.Operands.isFlatOperandNotEmitted(OpIdx) &&
"Explicitly used operand also marked as not emitted!");
} else {
+ unsigned NumberOps = CGI.Operands.size();
/// If this operand is not supposed to be emitted by the
/// generated emitter, skip it.
- while (CGI.Operands.isFlatOperandNotEmitted(NumberedOp))
+ while (NumberedOp < NumberOps &&
+ CGI.Operands.isFlatOperandNotEmitted(NumberedOp))
++NumberedOp;
+
OpIdx = NumberedOp++;
}
@@ -269,7 +272,7 @@ void CodeEmitterGen::run(raw_ostream &o) {
// Start by filling in fixed values.
uint64_t Value = 0;
for (unsigned i = 0, e = BI->getNumBits(); i != e; ++i) {
- if (BitInit *B = dynamic_cast<BitInit*>(BI->getBit(e-i-1)))
+ if (BitInit *B = dyn_cast<BitInit>(BI->getBit(e-i-1)))
Value |= (uint64_t)B->getValue() << (e-i-1);
}
o << " UINT64_C(" << Value << ")," << '\t' << "// " << R->getName() << "\n";
diff --git a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
index 34f8a34..d5b581b 100644
--- a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
@@ -79,14 +79,19 @@ bool EEVT::TypeSet::FillWithPossibleTypes(TreePattern &TP,
const std::vector<MVT::SimpleValueType> &LegalTypes =
TP.getDAGPatterns().getTargetInfo().getLegalValueTypes();
+ if (TP.hasError())
+ return false;
+
for (unsigned i = 0, e = LegalTypes.size(); i != e; ++i)
if (Pred == 0 || Pred(LegalTypes[i]))
TypeVec.push_back(LegalTypes[i]);
// If we have nothing that matches the predicate, bail out.
- if (TypeVec.empty())
+ if (TypeVec.empty()) {
TP.error("Type inference contradiction found, no " +
std::string(PredicateName) + " types found");
+ return false;
+ }
// No need to sort with one element.
if (TypeVec.size() == 1) return true;
@@ -146,9 +151,9 @@ std::string EEVT::TypeSet::getName() const {
/// MergeInTypeInfo - This merges in type information from the specified
/// argument. If 'this' changes, it returns true. If the two types are
-/// contradictory (e.g. merge f32 into i32) then this throws an exception.
+/// contradictory (e.g. merge f32 into i32) then this flags an error.
bool EEVT::TypeSet::MergeInTypeInfo(const EEVT::TypeSet &InVT, TreePattern &TP){
- if (InVT.isCompletelyUnknown() || *this == InVT)
+ if (InVT.isCompletelyUnknown() || *this == InVT || TP.hasError())
return false;
if (isCompletelyUnknown()) {
@@ -224,11 +229,13 @@ bool EEVT::TypeSet::MergeInTypeInfo(const EEVT::TypeSet &InVT, TreePattern &TP){
// FIXME: Really want an SMLoc here!
TP.error("Type inference contradiction found, merging '" +
InVT.getName() + "' into '" + InputSet.getName() + "'");
- return true; // unreachable
+ return false;
}
/// EnforceInteger - Remove all non-integer types from this set.
bool EEVT::TypeSet::EnforceInteger(TreePattern &TP) {
+ if (TP.hasError())
+ return false;
// If we know nothing, then get the full set.
if (TypeVec.empty())
return FillWithPossibleTypes(TP, isInteger, "integer");
@@ -242,14 +249,18 @@ bool EEVT::TypeSet::EnforceInteger(TreePattern &TP) {
if (!isInteger(TypeVec[i]))
TypeVec.erase(TypeVec.begin()+i--);
- if (TypeVec.empty())
+ if (TypeVec.empty()) {
TP.error("Type inference contradiction found, '" +
InputSet.getName() + "' needs to be integer");
+ return false;
+ }
return true;
}
/// EnforceFloatingPoint - Remove all integer types from this set.
bool EEVT::TypeSet::EnforceFloatingPoint(TreePattern &TP) {
+ if (TP.hasError())
+ return false;
// If we know nothing, then get the full set.
if (TypeVec.empty())
return FillWithPossibleTypes(TP, isFloatingPoint, "floating point");
@@ -264,14 +275,19 @@ bool EEVT::TypeSet::EnforceFloatingPoint(TreePattern &TP) {
if (!isFloatingPoint(TypeVec[i]))
TypeVec.erase(TypeVec.begin()+i--);
- if (TypeVec.empty())
+ if (TypeVec.empty()) {
TP.error("Type inference contradiction found, '" +
InputSet.getName() + "' needs to be floating point");
+ return false;
+ }
return true;
}
/// EnforceScalar - Remove all vector types from this.
bool EEVT::TypeSet::EnforceScalar(TreePattern &TP) {
+ if (TP.hasError())
+ return false;
+
// If we know nothing, then get the full set.
if (TypeVec.empty())
return FillWithPossibleTypes(TP, isScalar, "scalar");
@@ -286,14 +302,19 @@ bool EEVT::TypeSet::EnforceScalar(TreePattern &TP) {
if (!isScalar(TypeVec[i]))
TypeVec.erase(TypeVec.begin()+i--);
- if (TypeVec.empty())
+ if (TypeVec.empty()) {
TP.error("Type inference contradiction found, '" +
InputSet.getName() + "' needs to be scalar");
+ return false;
+ }
return true;
}
/// EnforceVector - Remove all vector types from this.
bool EEVT::TypeSet::EnforceVector(TreePattern &TP) {
+ if (TP.hasError())
+ return false;
+
// If we know nothing, then get the full set.
if (TypeVec.empty())
return FillWithPossibleTypes(TP, isVector, "vector");
@@ -308,9 +329,11 @@ bool EEVT::TypeSet::EnforceVector(TreePattern &TP) {
MadeChange = true;
}
- if (TypeVec.empty())
+ if (TypeVec.empty()) {
TP.error("Type inference contradiction found, '" +
InputSet.getName() + "' needs to be a vector");
+ return false;
+ }
return MadeChange;
}
@@ -319,6 +342,9 @@ bool EEVT::TypeSet::EnforceVector(TreePattern &TP) {
/// EnforceSmallerThan - 'this' must be a smaller VT than Other. Update
/// this an other based on this information.
bool EEVT::TypeSet::EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP) {
+ if (TP.hasError())
+ return false;
+
// Both operands must be integer or FP, but we don't care which.
bool MadeChange = false;
@@ -365,19 +391,22 @@ bool EEVT::TypeSet::EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP) {
if (hasVectorTypes() && Other.hasVectorTypes()) {
if (Type.getSizeInBits() >= OtherType.getSizeInBits())
if (Type.getVectorElementType().getSizeInBits()
- >= OtherType.getVectorElementType().getSizeInBits())
+ >= OtherType.getVectorElementType().getSizeInBits()) {
TP.error("Type inference contradiction found, '" +
getName() + "' element type not smaller than '" +
Other.getName() +"'!");
+ return false;
+ }
}
else
// For scalar types, the bitsize of this type must be larger
// than that of the other.
- if (Type.getSizeInBits() >= OtherType.getSizeInBits())
+ if (Type.getSizeInBits() >= OtherType.getSizeInBits()) {
TP.error("Type inference contradiction found, '" +
getName() + "' is not smaller than '" +
Other.getName() +"'!");
-
+ return false;
+ }
}
@@ -437,9 +466,11 @@ bool EEVT::TypeSet::EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP) {
// If this is the only type in the large set, the constraint can never be
// satisfied.
if ((Other.hasIntegerTypes() && OtherIntSize == 0)
- || (Other.hasFloatingPointTypes() && OtherFPSize == 0))
+ || (Other.hasFloatingPointTypes() && OtherFPSize == 0)) {
TP.error("Type inference contradiction found, '" +
Other.getName() + "' has nothing larger than '" + getName() +"'!");
+ return false;
+ }
// Okay, find the largest type in the Other set and remove it from the
// current set.
@@ -493,9 +524,11 @@ bool EEVT::TypeSet::EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP) {
// If this is the only type in the small set, the constraint can never be
// satisfied.
if ((hasIntegerTypes() && IntSize == 0)
- || (hasFloatingPointTypes() && FPSize == 0))
+ || (hasFloatingPointTypes() && FPSize == 0)) {
TP.error("Type inference contradiction found, '" +
getName() + "' has nothing smaller than '" + Other.getName()+"'!");
+ return false;
+ }
return MadeChange;
}
@@ -504,6 +537,9 @@ bool EEVT::TypeSet::EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP) {
/// whose element is specified by VTOperand.
bool EEVT::TypeSet::EnforceVectorEltTypeIs(EEVT::TypeSet &VTOperand,
TreePattern &TP) {
+ if (TP.hasError())
+ return false;
+
// "This" must be a vector and "VTOperand" must be a scalar.
bool MadeChange = false;
MadeChange |= EnforceVector(TP);
@@ -535,9 +571,11 @@ bool EEVT::TypeSet::EnforceVectorEltTypeIs(EEVT::TypeSet &VTOperand,
}
}
- if (TypeVec.empty()) // FIXME: Really want an SMLoc here!
+ if (TypeVec.empty()) { // FIXME: Really want an SMLoc here!
TP.error("Type inference contradiction found, forcing '" +
InputSet.getName() + "' to have a vector element");
+ return false;
+ }
return MadeChange;
}
@@ -574,10 +612,6 @@ bool EEVT::TypeSet::EnforceVectorSubVectorTypeIs(EEVT::TypeSet &VTOperand,
//===----------------------------------------------------------------------===//
// Helpers for working with extended types.
-bool RecordPtrCmp::operator()(const Record *LHS, const Record *RHS) const {
- return LHS->getID() < RHS->getID();
-}
-
/// Dependent variable map for CodeGenDAGPattern variant generation
typedef std::map<std::string, int> DepVarMap;
@@ -586,7 +620,7 @@ typedef DepVarMap::const_iterator DepVarMap_citer;
static void FindDepVarsOf(TreePatternNode *N, DepVarMap &DepMap) {
if (N->isLeaf()) {
- if (dynamic_cast<DefInit*>(N->getLeafValue()) != NULL)
+ if (isa<DefInit>(N->getLeafValue()))
DepMap[N->getName()]++;
} else {
for (size_t i = 0, e = N->getNumChildren(); i != e; ++i)
@@ -695,7 +729,7 @@ static unsigned getPatternSize(const TreePatternNode *P,
unsigned Size = 3; // The node itself.
// If the root node is a ConstantSDNode, increases its size.
// e.g. (set R32:$dst, 0).
- if (P->isLeaf() && dynamic_cast<IntInit*>(P->getLeafValue()))
+ if (P->isLeaf() && isa<IntInit>(P->getLeafValue()))
Size += 2;
// FIXME: This is a hack to statically increase the priority of patterns
@@ -719,7 +753,7 @@ static unsigned getPatternSize(const TreePatternNode *P,
Child->getType(0) != MVT::Other)
Size += getPatternSize(Child, CGP);
else if (Child->isLeaf()) {
- if (dynamic_cast<IntInit*>(Child->getLeafValue()))
+ if (isa<IntInit>(Child->getLeafValue()))
Size += 5; // Matches a ConstantSDNode (+3) and a specific value (+2).
else if (Child->getComplexPatternInfo(CGP))
Size += getPatternSize(Child, CGP);
@@ -745,7 +779,7 @@ getPatternComplexity(const CodeGenDAGPatterns &CGP) const {
std::string PatternToMatch::getPredicateCheck() const {
std::string PredicateCheck;
for (unsigned i = 0, e = Predicates->getSize(); i != e; ++i) {
- if (DefInit *Pred = dynamic_cast<DefInit*>(Predicates->getElement(i))) {
+ if (DefInit *Pred = dyn_cast<DefInit>(Predicates->getElement(i))) {
Record *Def = Pred->getDef();
if (!Def->isSubClassOf("Predicate")) {
#ifndef NDEBUG
@@ -773,7 +807,7 @@ SDTypeConstraint::SDTypeConstraint(Record *R) {
ConstraintType = SDTCisVT;
x.SDTCisVT_Info.VT = getValueType(R->getValueAsDef("VT"));
if (x.SDTCisVT_Info.VT == MVT::isVoid)
- throw TGError(R->getLoc(), "Cannot use 'Void' as type to SDTCisVT");
+ PrintFatalError(R->getLoc(), "Cannot use 'Void' as type to SDTCisVT");
} else if (R->isSubClassOf("SDTCisPtrTy")) {
ConstraintType = SDTCisPtrTy;
@@ -833,11 +867,13 @@ static TreePatternNode *getOperandNum(unsigned OpNo, TreePatternNode *N,
/// ApplyTypeConstraint - Given a node in a pattern, apply this type
/// constraint to the nodes operands. This returns true if it makes a
-/// change, false otherwise. If a type contradiction is found, throw an
-/// exception.
+/// change, false otherwise. If a type contradiction is found, flag an error.
bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
const SDNodeInfo &NodeInfo,
TreePattern &TP) const {
+ if (TP.hasError())
+ return false;
+
unsigned ResNo = 0; // The result number being referenced.
TreePatternNode *NodeToApply = getOperandNum(OperandNo, N, NodeInfo, ResNo);
@@ -868,10 +904,12 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
// The NodeToApply must be a leaf node that is a VT. OtherOperandNum must
// have an integer type that is smaller than the VT.
if (!NodeToApply->isLeaf() ||
- !dynamic_cast<DefInit*>(NodeToApply->getLeafValue()) ||
+ !isa<DefInit>(NodeToApply->getLeafValue()) ||
!static_cast<DefInit*>(NodeToApply->getLeafValue())->getDef()
- ->isSubClassOf("ValueType"))
+ ->isSubClassOf("ValueType")) {
TP.error(N->getOperator()->getName() + " expects a VT operand!");
+ return false;
+ }
MVT::SimpleValueType VT =
getValueType(static_cast<DefInit*>(NodeToApply->getLeafValue())->getDef());
@@ -1025,8 +1063,9 @@ static unsigned GetNumNodeResults(Record *Operator, CodeGenDAGPatterns &CDP) {
// Get the result tree.
DagInit *Tree = Operator->getValueAsDag("Fragment");
Record *Op = 0;
- if (Tree && dynamic_cast<DefInit*>(Tree->getOperator()))
- Op = dynamic_cast<DefInit*>(Tree->getOperator())->getDef();
+ if (Tree)
+ if (DefInit *DI = dyn_cast<DefInit>(Tree->getOperator()))
+ Op = DI->getDef();
assert(Op && "Invalid Fragment");
return GetNumNodeResults(Op, CDP);
}
@@ -1100,8 +1139,8 @@ bool TreePatternNode::isIsomorphicTo(const TreePatternNode *N,
return false;
if (isLeaf()) {
- if (DefInit *DI = dynamic_cast<DefInit*>(getLeafValue())) {
- if (DefInit *NDI = dynamic_cast<DefInit*>(N->getLeafValue())) {
+ if (DefInit *DI = dyn_cast<DefInit>(getLeafValue())) {
+ if (DefInit *NDI = dyn_cast<DefInit>(N->getLeafValue())) {
return ((DI->getDef() == NDI->getDef())
&& (DepVars.find(getName()) == DepVars.end()
|| getName() == N->getName()));
@@ -1158,8 +1197,8 @@ SubstituteFormalArguments(std::map<std::string, TreePatternNode*> &ArgMap) {
TreePatternNode *Child = getChild(i);
if (Child->isLeaf()) {
Init *Val = Child->getLeafValue();
- if (dynamic_cast<DefInit*>(Val) &&
- static_cast<DefInit*>(Val)->getDef()->getName() == "node") {
+ if (isa<DefInit>(Val) &&
+ cast<DefInit>(Val)->getDef()->getName() == "node") {
// We found a use of a formal argument, replace it with its value.
TreePatternNode *NewChild = ArgMap[Child->getName()];
assert(NewChild && "Couldn't find formal argument!");
@@ -1179,7 +1218,11 @@ SubstituteFormalArguments(std::map<std::string, TreePatternNode*> &ArgMap) {
/// fragments, inline them into place, giving us a pattern without any
/// PatFrag references.
TreePatternNode *TreePatternNode::InlinePatternFragments(TreePattern &TP) {
- if (isLeaf()) return this; // nothing to do.
+ if (TP.hasError())
+ return 0;
+
+ if (isLeaf())
+ return this; // nothing to do.
Record *Op = getOperator();
if (!Op->isSubClassOf("PatFrag")) {
@@ -1202,9 +1245,11 @@ TreePatternNode *TreePatternNode::InlinePatternFragments(TreePattern &TP) {
TreePattern *Frag = TP.getDAGPatterns().getPatternFragment(Op);
// Verify that we are passing the right number of operands.
- if (Frag->getNumArgs() != Children.size())
+ if (Frag->getNumArgs() != Children.size()) {
TP.error("'" + Op->getName() + "' fragment requires " +
utostr(Frag->getNumArgs()) + " operands!");
+ return 0;
+ }
TreePatternNode *FragTree = Frag->getOnlyTree()->clone();
@@ -1320,8 +1365,7 @@ getIntrinsicInfo(const CodeGenDAGPatterns &CDP) const {
getOperator() != CDP.get_intrinsic_wo_chain_sdnode())
return 0;
- unsigned IID =
- dynamic_cast<IntInit*>(getChild(0)->getLeafValue())->getValue();
+ unsigned IID = cast<IntInit>(getChild(0)->getLeafValue())->getValue();
return &CDP.getIntrinsicInfo(IID);
}
@@ -1331,7 +1375,7 @@ const ComplexPattern *
TreePatternNode::getComplexPatternInfo(const CodeGenDAGPatterns &CGP) const {
if (!isLeaf()) return 0;
- DefInit *DI = dynamic_cast<DefInit*>(getLeafValue());
+ DefInit *DI = dyn_cast<DefInit>(getLeafValue());
if (DI && DI->getDef()->isSubClassOf("ComplexPattern"))
return &CGP.getComplexPattern(DI->getDef());
return 0;
@@ -1379,12 +1423,14 @@ TreePatternNode::isCommutativeIntrinsic(const CodeGenDAGPatterns &CDP) const {
/// ApplyTypeConstraints - Apply all of the type constraints relevant to
/// this node and its children in the tree. This returns true if it makes a
-/// change, false otherwise. If a type contradiction is found, throw an
-/// exception.
+/// change, false otherwise. If a type contradiction is found, flag an error.
bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
+ if (TP.hasError())
+ return false;
+
CodeGenDAGPatterns &CDP = TP.getDAGPatterns();
if (isLeaf()) {
- if (DefInit *DI = dynamic_cast<DefInit*>(getLeafValue())) {
+ if (DefInit *DI = dyn_cast<DefInit>(getLeafValue())) {
// If it's a regclass or something else known, include the type.
bool MadeChange = false;
for (unsigned i = 0, e = Types.size(); i != e; ++i)
@@ -1393,7 +1439,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
return MadeChange;
}
- if (IntInit *II = dynamic_cast<IntInit*>(getLeafValue())) {
+ if (IntInit *II = dyn_cast<IntInit>(getLeafValue())) {
assert(Types.size() == 1 && "Invalid IntInit");
// Int inits are always integers. :)
@@ -1410,21 +1456,15 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
// Make sure that the value is representable for this type.
if (Size >= 32) return MadeChange;
- int Val = (II->getValue() << (32-Size)) >> (32-Size);
- if (Val == II->getValue()) return MadeChange;
-
- // If sign-extended doesn't fit, does it fit as unsigned?
- unsigned ValueMask;
- unsigned UnsignedVal;
- ValueMask = unsigned(~uint32_t(0UL) >> (32-Size));
- UnsignedVal = unsigned(II->getValue());
-
- if ((ValueMask & UnsignedVal) == UnsignedVal)
+ // Check that the value doesn't use more bits than we have. It must either
+ // be a sign- or zero-extended equivalent of the original.
+ int64_t SignBitAndAbove = II->getValue() >> (Size - 1);
+ if (SignBitAndAbove == -1 || SignBitAndAbove == 0 || SignBitAndAbove == 1)
return MadeChange;
- TP.error("Integer value '" + itostr(II->getValue())+
+ TP.error("Integer value '" + itostr(II->getValue()) +
"' is out of range for type '" + getEnumName(getType(0)) + "'!");
- return MadeChange;
+ return false;
}
return false;
}
@@ -1487,10 +1527,12 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
for (unsigned i = 0, e = NumRetVTs; i != e; ++i)
MadeChange |= UpdateNodeType(i, Int->IS.RetVTs[i], TP);
- if (getNumChildren() != NumParamVTs + 1)
+ if (getNumChildren() != NumParamVTs + 1) {
TP.error("Intrinsic '" + Int->Name + "' expects " +
utostr(NumParamVTs) + " operands, not " +
utostr(getNumChildren() - 1) + " operands!");
+ return false;
+ }
// Apply type info to the intrinsic ID.
MadeChange |= getChild(0)->UpdateNodeType(0, MVT::iPTR, TP);
@@ -1510,9 +1552,11 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
// Check that the number of operands is sane. Negative operands -> varargs.
if (NI.getNumOperands() >= 0 &&
- getNumChildren() != (unsigned)NI.getNumOperands())
+ getNumChildren() != (unsigned)NI.getNumOperands()) {
TP.error(getOperator()->getName() + " node requires exactly " +
itostr(NI.getNumOperands()) + " operands!");
+ return false;
+ }
bool MadeChange = NI.ApplyTypeConstraints(this, TP);
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
@@ -1541,7 +1585,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
const CodeGenRegisterClass &RC =
CDP.getTargetInfo().getRegisterClass(RegClass);
MadeChange |= UpdateNodeType(ResNo, RC.getValueTypes(), TP);
- } else if (ResultNode->getName() == "unknown") {
+ } else if (ResultNode->isSubClassOf("unknown_class")) {
// Nothing to do.
} else {
assert(ResultNode->isSubClassOf("RegisterClass") &&
@@ -1581,15 +1625,16 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
// If the instruction expects a predicate or optional def operand, we
// codegen this by setting the operand to it's default value if it has a
// non-empty DefaultOps field.
- if ((OperandNode->isSubClassOf("PredicateOperand") ||
- OperandNode->isSubClassOf("OptionalDefOperand")) &&
+ if (OperandNode->isSubClassOf("OperandWithDefaultOps") &&
!CDP.getDefaultOperand(OperandNode).DefaultOps.empty())
continue;
// Verify that we didn't run out of provided operands.
- if (ChildNo >= getNumChildren())
+ if (ChildNo >= getNumChildren()) {
TP.error("Instruction '" + getOperator()->getName() +
"' expects more operands than were provided.");
+ return false;
+ }
MVT::SimpleValueType VT;
TreePatternNode *Child = getChild(ChildNo++);
@@ -1609,7 +1654,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
MadeChange |= Child->UpdateNodeType(ChildResNo, VT, TP);
} else if (OperandNode->isSubClassOf("PointerLikeRegClass")) {
MadeChange |= Child->UpdateNodeType(ChildResNo, MVT::iPTR, TP);
- } else if (OperandNode->getName() == "unknown") {
+ } else if (OperandNode->isSubClassOf("unknown_class")) {
// Nothing to do.
} else
llvm_unreachable("Unknown operand type!");
@@ -1617,9 +1662,11 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
MadeChange |= Child->ApplyTypeConstraints(TP, NotRegisters);
}
- if (ChildNo != getNumChildren())
+ if (ChildNo != getNumChildren()) {
TP.error("Instruction '" + getOperator()->getName() +
"' was provided too many operands!");
+ return false;
+ }
return MadeChange;
}
@@ -1627,9 +1674,11 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
assert(getOperator()->isSubClassOf("SDNodeXForm") && "Unknown node type!");
// Node transforms always take one operand.
- if (getNumChildren() != 1)
+ if (getNumChildren() != 1) {
TP.error("Node transform '" + getOperator()->getName() +
"' requires one operand!");
+ return false;
+ }
bool MadeChange = getChild(0)->ApplyTypeConstraints(TP, NotRegisters);
@@ -1652,7 +1701,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
static bool OnlyOnRHSOfCommutative(TreePatternNode *N) {
if (!N->isLeaf() && N->getOperator()->getName() == "imm")
return true;
- if (N->isLeaf() && dynamic_cast<IntInit*>(N->getLeafValue()))
+ if (N->isLeaf() && isa<IntInit>(N->getLeafValue()))
return true;
return false;
}
@@ -1703,27 +1752,30 @@ bool TreePatternNode::canPatternMatch(std::string &Reason,
//
TreePattern::TreePattern(Record *TheRec, ListInit *RawPat, bool isInput,
- CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp){
- isInputPattern = isInput;
+ CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp),
+ isInputPattern(isInput), HasError(false) {
for (unsigned i = 0, e = RawPat->getSize(); i != e; ++i)
Trees.push_back(ParseTreePattern(RawPat->getElement(i), ""));
}
TreePattern::TreePattern(Record *TheRec, DagInit *Pat, bool isInput,
- CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp){
- isInputPattern = isInput;
+ CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp),
+ isInputPattern(isInput), HasError(false) {
Trees.push_back(ParseTreePattern(Pat, ""));
}
TreePattern::TreePattern(Record *TheRec, TreePatternNode *Pat, bool isInput,
- CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp){
- isInputPattern = isInput;
+ CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp),
+ isInputPattern(isInput), HasError(false) {
Trees.push_back(Pat);
}
-void TreePattern::error(const std::string &Msg) const {
+void TreePattern::error(const std::string &Msg) {
+ if (HasError)
+ return;
dump();
- throw TGError(TheRecord->getLoc(), "In " + TheRecord->getName() + ": " + Msg);
+ PrintError(TheRecord->getLoc(), "In " + TheRecord->getName() + ": " + Msg);
+ HasError = true;
}
void TreePattern::ComputeNamedNodes() {
@@ -1741,7 +1793,7 @@ void TreePattern::ComputeNamedNodes(TreePatternNode *N) {
TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){
- if (DefInit *DI = dynamic_cast<DefInit*>(TheInit)) {
+ if (DefInit *DI = dyn_cast<DefInit>(TheInit)) {
Record *R = DI->getDef();
// Direct reference to a leaf DagNode or PatFrag? Turn it into a
@@ -1765,26 +1817,26 @@ TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){
return Res;
}
- if (IntInit *II = dynamic_cast<IntInit*>(TheInit)) {
+ if (IntInit *II = dyn_cast<IntInit>(TheInit)) {
if (!OpName.empty())
error("Constant int argument should not have a name!");
return new TreePatternNode(II, 1);
}
- if (BitsInit *BI = dynamic_cast<BitsInit*>(TheInit)) {
+ if (BitsInit *BI = dyn_cast<BitsInit>(TheInit)) {
// Turn this into an IntInit.
Init *II = BI->convertInitializerTo(IntRecTy::get());
- if (II == 0 || !dynamic_cast<IntInit*>(II))
+ if (II == 0 || !isa<IntInit>(II))
error("Bits value must be constants!");
return ParseTreePattern(II, OpName);
}
- DagInit *Dag = dynamic_cast<DagInit*>(TheInit);
+ DagInit *Dag = dyn_cast<DagInit>(TheInit);
if (!Dag) {
TheInit->dump();
error("Pattern has unexpected init kind!");
}
- DefInit *OpDef = dynamic_cast<DefInit*>(Dag->getOperator());
+ DefInit *OpDef = dyn_cast<DefInit>(Dag->getOperator());
if (!OpDef) error("Pattern has unexpected operator type!");
Record *Operator = OpDef->getDef();
@@ -1912,7 +1964,7 @@ static bool SimplifyTree(TreePatternNode *&N) {
/// InferAllTypes - Infer/propagate as many types throughout the expression
/// patterns as possible. Return true if all types are inferred, false
-/// otherwise. Throw an exception if a type contradiction is found.
+/// otherwise. Flags an error if a type contradiction is found.
bool TreePattern::
InferAllTypes(const StringMap<SmallVector<TreePatternNode*,1> > *InNamedTypes) {
if (NamedNodes.empty())
@@ -1949,7 +2001,7 @@ InferAllTypes(const StringMap<SmallVector<TreePatternNode*,1> > *InNamedTypes) {
// us to match things like:
// def : Pat<(v1i64 (bitconvert(v2i32 DPR:$src))), (v1i64 DPR:$src)>;
if (Nodes[i] == Trees[0] && Nodes[i]->isLeaf()) {
- DefInit *DI = dynamic_cast<DefInit*>(Nodes[i]->getLeafValue());
+ DefInit *DI = dyn_cast<DefInit>(Nodes[i]->getLeafValue());
if (DI && (DI->getDef()->isSubClassOf("RegisterClass") ||
DI->getDef()->isSubClassOf("RegisterOperand")))
continue;
@@ -2033,6 +2085,9 @@ CodeGenDAGPatterns::CodeGenDAGPatterns(RecordKeeper &R) :
// stores, and side effects in many cases by examining an
// instruction's pattern.
InferInstructionFlags();
+
+ // Verify that instruction flags match the patterns.
+ VerifyInstructionFlags();
}
CodeGenDAGPatterns::~CodeGenDAGPatterns() {
@@ -2111,7 +2166,7 @@ void CodeGenDAGPatterns::ParsePatternFragments() {
// Parse the operands list.
DagInit *OpsList = Fragments[i]->getValueAsDag("Operands");
- DefInit *OpsOp = dynamic_cast<DefInit*>(OpsList->getOperator());
+ DefInit *OpsOp = dyn_cast<DefInit>(OpsList->getOperator());
// Special cases: ops == outs == ins. Different names are used to
// improve readability.
if (!OpsOp ||
@@ -2123,9 +2178,8 @@ void CodeGenDAGPatterns::ParsePatternFragments() {
// Copy over the arguments.
Args.clear();
for (unsigned j = 0, e = OpsList->getNumArgs(); j != e; ++j) {
- if (!dynamic_cast<DefInit*>(OpsList->getArg(j)) ||
- static_cast<DefInit*>(OpsList->getArg(j))->
- getDef()->getName() != "node")
+ if (!isa<DefInit>(OpsList->getArg(j)) ||
+ cast<DefInit>(OpsList->getArg(j))->getDef()->getName() != "node")
P->error("Operands list should all be 'node' values.");
if (OpsList->getArgName(j).empty())
P->error("Operands list should have names for each operand!");
@@ -2161,14 +2215,8 @@ void CodeGenDAGPatterns::ParsePatternFragments() {
// Infer as many types as possible. Don't worry about it if we don't infer
// all of them, some may depend on the inputs of the pattern.
- try {
- ThePat->InferAllTypes();
- } catch (...) {
- // If this pattern fragment is not supported by this target (no types can
- // satisfy its constraints), just ignore it. If the bogus pattern is
- // actually used by instructions, the type consistency error will be
- // reported there.
- }
+ ThePat->InferAllTypes();
+ ThePat->resetError();
// If debugging, print out the pattern fragment result.
DEBUG(ThePat->dump());
@@ -2176,53 +2224,46 @@ void CodeGenDAGPatterns::ParsePatternFragments() {
}
void CodeGenDAGPatterns::ParseDefaultOperands() {
- std::vector<Record*> DefaultOps[2];
- DefaultOps[0] = Records.getAllDerivedDefinitions("PredicateOperand");
- DefaultOps[1] = Records.getAllDerivedDefinitions("OptionalDefOperand");
+ std::vector<Record*> DefaultOps;
+ DefaultOps = Records.getAllDerivedDefinitions("OperandWithDefaultOps");
// Find some SDNode.
assert(!SDNodes.empty() && "No SDNodes parsed?");
Init *SomeSDNode = DefInit::get(SDNodes.begin()->first);
- for (unsigned iter = 0; iter != 2; ++iter) {
- for (unsigned i = 0, e = DefaultOps[iter].size(); i != e; ++i) {
- DagInit *DefaultInfo = DefaultOps[iter][i]->getValueAsDag("DefaultOps");
-
- // Clone the DefaultInfo dag node, changing the operator from 'ops' to
- // SomeSDnode so that we can parse this.
- std::vector<std::pair<Init*, std::string> > Ops;
- for (unsigned op = 0, e = DefaultInfo->getNumArgs(); op != e; ++op)
- Ops.push_back(std::make_pair(DefaultInfo->getArg(op),
- DefaultInfo->getArgName(op)));
- DagInit *DI = DagInit::get(SomeSDNode, "", Ops);
-
- // Create a TreePattern to parse this.
- TreePattern P(DefaultOps[iter][i], DI, false, *this);
- assert(P.getNumTrees() == 1 && "This ctor can only produce one tree!");
-
- // Copy the operands over into a DAGDefaultOperand.
- DAGDefaultOperand DefaultOpInfo;
-
- TreePatternNode *T = P.getTree(0);
- for (unsigned op = 0, e = T->getNumChildren(); op != e; ++op) {
- TreePatternNode *TPN = T->getChild(op);
- while (TPN->ApplyTypeConstraints(P, false))
- /* Resolve all types */;
-
- if (TPN->ContainsUnresolvedType()) {
- if (iter == 0)
- throw "Value #" + utostr(i) + " of PredicateOperand '" +
- DefaultOps[iter][i]->getName() +"' doesn't have a concrete type!";
- else
- throw "Value #" + utostr(i) + " of OptionalDefOperand '" +
- DefaultOps[iter][i]->getName() +"' doesn't have a concrete type!";
- }
- DefaultOpInfo.DefaultOps.push_back(TPN);
+ for (unsigned i = 0, e = DefaultOps.size(); i != e; ++i) {
+ DagInit *DefaultInfo = DefaultOps[i]->getValueAsDag("DefaultOps");
+
+ // Clone the DefaultInfo dag node, changing the operator from 'ops' to
+ // SomeSDnode so that we can parse this.
+ std::vector<std::pair<Init*, std::string> > Ops;
+ for (unsigned op = 0, e = DefaultInfo->getNumArgs(); op != e; ++op)
+ Ops.push_back(std::make_pair(DefaultInfo->getArg(op),
+ DefaultInfo->getArgName(op)));
+ DagInit *DI = DagInit::get(SomeSDNode, "", Ops);
+
+ // Create a TreePattern to parse this.
+ TreePattern P(DefaultOps[i], DI, false, *this);
+ assert(P.getNumTrees() == 1 && "This ctor can only produce one tree!");
+
+ // Copy the operands over into a DAGDefaultOperand.
+ DAGDefaultOperand DefaultOpInfo;
+
+ TreePatternNode *T = P.getTree(0);
+ for (unsigned op = 0, e = T->getNumChildren(); op != e; ++op) {
+ TreePatternNode *TPN = T->getChild(op);
+ while (TPN->ApplyTypeConstraints(P, false))
+ /* Resolve all types */;
+
+ if (TPN->ContainsUnresolvedType()) {
+ PrintFatalError("Value #" + utostr(i) + " of OperandWithDefaultOps '" +
+ DefaultOps[i]->getName() +"' doesn't have a concrete type!");
}
-
- // Insert it into the DefaultOperands map so we can find it later.
- DefaultOperands[DefaultOps[iter][i]] = DefaultOpInfo;
+ DefaultOpInfo.DefaultOps.push_back(TPN);
}
+
+ // Insert it into the DefaultOperands map so we can find it later.
+ DefaultOperands[DefaultOps[i]] = DefaultOpInfo;
}
}
@@ -2233,7 +2274,7 @@ static bool HandleUse(TreePattern *I, TreePatternNode *Pat,
// No name -> not interesting.
if (Pat->getName().empty()) {
if (Pat->isLeaf()) {
- DefInit *DI = dynamic_cast<DefInit*>(Pat->getLeafValue());
+ DefInit *DI = dyn_cast<DefInit>(Pat->getLeafValue());
if (DI && (DI->getDef()->isSubClassOf("RegisterClass") ||
DI->getDef()->isSubClassOf("RegisterOperand")))
I->error("Input " + DI->getDef()->getName() + " must be named!");
@@ -2243,7 +2284,7 @@ static bool HandleUse(TreePattern *I, TreePatternNode *Pat,
Record *Rec;
if (Pat->isLeaf()) {
- DefInit *DI = dynamic_cast<DefInit*>(Pat->getLeafValue());
+ DefInit *DI = dyn_cast<DefInit>(Pat->getLeafValue());
if (!DI) I->error("Input $" + Pat->getName() + " must be an identifier!");
Rec = DI->getDef();
} else {
@@ -2261,7 +2302,7 @@ static bool HandleUse(TreePattern *I, TreePatternNode *Pat,
}
Record *SlotRec;
if (Slot->isLeaf()) {
- SlotRec = dynamic_cast<DefInit*>(Slot->getLeafValue())->getDef();
+ SlotRec = cast<DefInit>(Slot->getLeafValue())->getDef();
} else {
assert(Slot->getNumChildren() == 0 && "can't be a use with children!");
SlotRec = Slot->getOperator();
@@ -2296,7 +2337,7 @@ FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
if (!Dest->isLeaf())
I->error("implicitly defined value should be a register!");
- DefInit *Val = dynamic_cast<DefInit*>(Dest->getLeafValue());
+ DefInit *Val = dyn_cast<DefInit>(Dest->getLeafValue());
if (!Val || !Val->getDef()->isSubClassOf("Register"))
I->error("implicitly defined value should be a register!");
InstImpResults.push_back(Val->getDef());
@@ -2337,7 +2378,7 @@ FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
if (!Dest->isLeaf())
I->error("set destination should be a register!");
- DefInit *Val = dynamic_cast<DefInit*>(Dest->getLeafValue());
+ DefInit *Val = dyn_cast<DefInit>(Dest->getLeafValue());
if (!Val)
I->error("set destination should be a register!");
@@ -2367,43 +2408,36 @@ FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
class InstAnalyzer {
const CodeGenDAGPatterns &CDP;
- bool &mayStore;
- bool &mayLoad;
- bool &IsBitcast;
- bool &HasSideEffects;
- bool &IsVariadic;
public:
- InstAnalyzer(const CodeGenDAGPatterns &cdp,
- bool &maystore, bool &mayload, bool &isbc, bool &hse, bool &isv)
- : CDP(cdp), mayStore(maystore), mayLoad(mayload), IsBitcast(isbc),
- HasSideEffects(hse), IsVariadic(isv) {
- }
+ bool hasSideEffects;
+ bool mayStore;
+ bool mayLoad;
+ bool isBitcast;
+ bool isVariadic;
- /// Analyze - Analyze the specified instruction, returning true if the
- /// instruction had a pattern.
- bool Analyze(Record *InstRecord) {
- const TreePattern *Pattern = CDP.getInstruction(InstRecord).getPattern();
- if (Pattern == 0) {
- HasSideEffects = 1;
- return false; // No pattern.
- }
+ InstAnalyzer(const CodeGenDAGPatterns &cdp)
+ : CDP(cdp), hasSideEffects(false), mayStore(false), mayLoad(false),
+ isBitcast(false), isVariadic(false) {}
- // FIXME: Assume only the first tree is the pattern. The others are clobber
- // nodes.
- AnalyzeNode(Pattern->getTree(0));
- return true;
+ void Analyze(const TreePattern *Pat) {
+ // Assume only the first tree is the pattern. The others are clobber nodes.
+ AnalyzeNode(Pat->getTree(0));
+ }
+
+ void Analyze(const PatternToMatch *Pat) {
+ AnalyzeNode(Pat->getSrcPattern());
}
private:
bool IsNodeBitcast(const TreePatternNode *N) const {
- if (HasSideEffects || mayLoad || mayStore || IsVariadic)
+ if (hasSideEffects || mayLoad || mayStore || isVariadic)
return false;
if (N->getNumChildren() != 2)
return false;
const TreePatternNode *N0 = N->getChild(0);
- if (!N0->isLeaf() || !dynamic_cast<DefInit*>(N0->getLeafValue()))
+ if (!N0->isLeaf() || !isa<DefInit>(N0->getLeafValue()))
return false;
const TreePatternNode *N1 = N->getChild(1);
@@ -2418,16 +2452,17 @@ private:
return OpInfo.getEnumName() == "ISD::BITCAST";
}
+public:
void AnalyzeNode(const TreePatternNode *N) {
if (N->isLeaf()) {
- if (DefInit *DI = dynamic_cast<DefInit*>(N->getLeafValue())) {
+ if (DefInit *DI = dyn_cast<DefInit>(N->getLeafValue())) {
Record *LeafRec = DI->getDef();
// Handle ComplexPattern leaves.
if (LeafRec->isSubClassOf("ComplexPattern")) {
const ComplexPattern &CP = CDP.getComplexPattern(LeafRec);
if (CP.hasProperty(SDNPMayStore)) mayStore = true;
if (CP.hasProperty(SDNPMayLoad)) mayLoad = true;
- if (CP.hasProperty(SDNPSideEffect)) HasSideEffects = true;
+ if (CP.hasProperty(SDNPSideEffect)) hasSideEffects = true;
}
}
return;
@@ -2439,7 +2474,7 @@ private:
// Ignore set nodes, which are not SDNodes.
if (N->getOperator()->getName() == "set") {
- IsBitcast = IsNodeBitcast(N);
+ isBitcast = IsNodeBitcast(N);
return;
}
@@ -2449,8 +2484,8 @@ private:
// Notice properties of the node.
if (OpInfo.hasProperty(SDNPMayStore)) mayStore = true;
if (OpInfo.hasProperty(SDNPMayLoad)) mayLoad = true;
- if (OpInfo.hasProperty(SDNPSideEffect)) HasSideEffects = true;
- if (OpInfo.hasProperty(SDNPVariadic)) IsVariadic = true;
+ if (OpInfo.hasProperty(SDNPSideEffect)) hasSideEffects = true;
+ if (OpInfo.hasProperty(SDNPVariadic)) isVariadic = true;
if (const CodeGenIntrinsic *IntInfo = N->getIntrinsicInfo(CDP)) {
// If this is an intrinsic, analyze it.
@@ -2462,68 +2497,70 @@ private:
if (IntInfo->ModRef >= CodeGenIntrinsic::ReadWriteMem)
// WriteMem intrinsics can have other strange effects.
- HasSideEffects = true;
+ hasSideEffects = true;
}
}
};
-static void InferFromPattern(const CodeGenInstruction &Inst,
- bool &MayStore, bool &MayLoad,
- bool &IsBitcast,
- bool &HasSideEffects, bool &IsVariadic,
- const CodeGenDAGPatterns &CDP) {
- MayStore = MayLoad = IsBitcast = HasSideEffects = IsVariadic = false;
-
- bool HadPattern =
- InstAnalyzer(CDP, MayStore, MayLoad, IsBitcast, HasSideEffects, IsVariadic)
- .Analyze(Inst.TheDef);
-
- // InstAnalyzer only correctly analyzes mayStore/mayLoad so far.
- if (Inst.mayStore) { // If the .td file explicitly sets mayStore, use it.
- // If we decided that this is a store from the pattern, then the .td file
- // entry is redundant.
- if (MayStore)
- PrintWarning(Inst.TheDef->getLoc(),
- "mayStore flag explicitly set on "
- "instruction, but flag already inferred from pattern.");
- MayStore = true;
+static bool InferFromPattern(CodeGenInstruction &InstInfo,
+ const InstAnalyzer &PatInfo,
+ Record *PatDef) {
+ bool Error = false;
+
+ // Remember where InstInfo got its flags.
+ if (InstInfo.hasUndefFlags())
+ InstInfo.InferredFrom = PatDef;
+
+ // Check explicitly set flags for consistency.
+ if (InstInfo.hasSideEffects != PatInfo.hasSideEffects &&
+ !InstInfo.hasSideEffects_Unset) {
+ // Allow explicitly setting hasSideEffects = 1 on instructions, even when
+ // the pattern has no side effects. That could be useful for div/rem
+ // instructions that may trap.
+ if (!InstInfo.hasSideEffects) {
+ Error = true;
+ PrintError(PatDef->getLoc(), "Pattern doesn't match hasSideEffects = " +
+ Twine(InstInfo.hasSideEffects));
+ }
}
- if (Inst.mayLoad) { // If the .td file explicitly sets mayLoad, use it.
- // If we decided that this is a load from the pattern, then the .td file
- // entry is redundant.
- if (MayLoad)
- PrintWarning(Inst.TheDef->getLoc(),
- "mayLoad flag explicitly set on "
- "instruction, but flag already inferred from pattern.");
- MayLoad = true;
+ if (InstInfo.mayStore != PatInfo.mayStore && !InstInfo.mayStore_Unset) {
+ Error = true;
+ PrintError(PatDef->getLoc(), "Pattern doesn't match mayStore = " +
+ Twine(InstInfo.mayStore));
}
- if (Inst.neverHasSideEffects) {
- if (HadPattern)
- PrintWarning(Inst.TheDef->getLoc(),
- "neverHasSideEffects flag explicitly set on "
- "instruction, but flag already inferred from pattern.");
- HasSideEffects = false;
+ if (InstInfo.mayLoad != PatInfo.mayLoad && !InstInfo.mayLoad_Unset) {
+ // Allow explicitly setting mayLoad = 1, even when the pattern has no loads.
+ // Some targets translate imediates to loads.
+ if (!InstInfo.mayLoad) {
+ Error = true;
+ PrintError(PatDef->getLoc(), "Pattern doesn't match mayLoad = " +
+ Twine(InstInfo.mayLoad));
+ }
}
- if (Inst.hasSideEffects) {
- if (HasSideEffects)
- PrintWarning(Inst.TheDef->getLoc(),
- "hasSideEffects flag explicitly set on "
- "instruction, but flag already inferred from pattern.");
- HasSideEffects = true;
- }
+ // Transfer inferred flags.
+ InstInfo.hasSideEffects |= PatInfo.hasSideEffects;
+ InstInfo.mayStore |= PatInfo.mayStore;
+ InstInfo.mayLoad |= PatInfo.mayLoad;
+
+ // These flags are silently added without any verification.
+ InstInfo.isBitcast |= PatInfo.isBitcast;
+
+ // Don't infer isVariadic. This flag means something different on SDNodes and
+ // instructions. For example, a CALL SDNode is variadic because it has the
+ // call arguments as operands, but a CALL instruction is not variadic - it
+ // has argument registers as implicit, not explicit uses.
- if (Inst.Operands.isVariadic)
- IsVariadic = true; // Can warn if we want.
+ return Error;
}
/// hasNullFragReference - Return true if the DAG has any reference to the
/// null_frag operator.
static bool hasNullFragReference(DagInit *DI) {
- DefInit *OpDef = dynamic_cast<DefInit*>(DI->getOperator());
+ DefInit *OpDef = dyn_cast<DefInit>(DI->getOperator());
if (!OpDef) return false;
Record *Operator = OpDef->getDef();
@@ -2531,7 +2568,7 @@ static bool hasNullFragReference(DagInit *DI) {
if (Operator->getName() == "null_frag") return true;
// If any of the arguments reference the null fragment, return true.
for (unsigned i = 0, e = DI->getNumArgs(); i != e; ++i) {
- DagInit *Arg = dynamic_cast<DagInit*>(DI->getArg(i));
+ DagInit *Arg = dyn_cast<DagInit>(DI->getArg(i));
if (Arg && hasNullFragReference(Arg))
return true;
}
@@ -2543,7 +2580,7 @@ static bool hasNullFragReference(DagInit *DI) {
/// the null_frag operator.
static bool hasNullFragReference(ListInit *LI) {
for (unsigned i = 0, e = LI->getSize(); i != e; ++i) {
- DagInit *DI = dynamic_cast<DagInit*>(LI->getElement(i));
+ DagInit *DI = dyn_cast<DagInit>(LI->getElement(i));
assert(DI && "non-dag in an instruction Pattern list?!");
if (hasNullFragReference(DI))
return true;
@@ -2551,6 +2588,17 @@ static bool hasNullFragReference(ListInit *LI) {
return false;
}
+/// Get all the instructions in a tree.
+static void
+getInstructionsInTree(TreePatternNode *Tree, SmallVectorImpl<Record*> &Instrs) {
+ if (Tree->isLeaf())
+ return;
+ if (Tree->getOperator()->isSubClassOf("Instruction"))
+ Instrs.push_back(Tree->getOperator());
+ for (unsigned i = 0, e = Tree->getNumChildren(); i != e; ++i)
+ getInstructionsInTree(Tree->getChild(i), Instrs);
+}
+
/// ParseInstructions - Parse all of the instructions, inlining and resolving
/// any fragments involved. This populates the Instructions list with fully
/// resolved instructions.
@@ -2560,7 +2608,7 @@ void CodeGenDAGPatterns::ParseInstructions() {
for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
ListInit *LI = 0;
- if (dynamic_cast<ListInit*>(Instrs[i]->getValueInit("Pattern")))
+ if (isa<ListInit>(Instrs[i]->getValueInit("Pattern")))
LI = Instrs[i]->getValueAsListInit("Pattern");
// If there is no pattern, only collect minimal information about the
@@ -2655,7 +2703,7 @@ void CodeGenDAGPatterns::ParseInstructions() {
if (i == 0)
Res0Node = RNode;
- Record *R = dynamic_cast<DefInit*>(RNode->getLeafValue())->getDef();
+ Record *R = cast<DefInit>(RNode->getLeafValue())->getDef();
if (R == 0)
I->error("Operand $" + OpName + " should be a set destination: all "
"outputs must occur before inputs in operand list!");
@@ -2683,11 +2731,9 @@ void CodeGenDAGPatterns::ParseInstructions() {
I->error("Operand #" + utostr(i) + " in operands list has no name!");
if (!InstInputsCheck.count(OpName)) {
- // If this is an predicate operand or optional def operand with an
- // DefaultOps set filled in, we can ignore this. When we codegen it,
- // we will do so as always executed.
- if (Op.Rec->isSubClassOf("PredicateOperand") ||
- Op.Rec->isSubClassOf("OptionalDefOperand")) {
+ // If this is an operand with a DefaultOps set filled in, we can ignore
+ // this. When we codegen it, we will do so as always executed.
+ if (Op.Rec->isSubClassOf("OperandWithDefaultOps")) {
// Does it have a non-empty DefaultOps field? If so, ignore this
// operand.
if (!getDefaultOperand(Op.Rec).DefaultOps.empty())
@@ -2699,8 +2745,7 @@ void CodeGenDAGPatterns::ParseInstructions() {
TreePatternNode *InVal = InstInputsCheck[OpName];
InstInputsCheck.erase(OpName); // It occurred, remove from map.
- if (InVal->isLeaf() &&
- dynamic_cast<DefInit*>(InVal->getLeafValue())) {
+ if (InVal->isLeaf() && isa<DefInit>(InVal->getLeafValue())) {
Record *InRec = static_cast<DefInit*>(InVal->getLeafValue())->getDef();
if (Op.Rec != InRec && !InRec->isSubClassOf("ComplexPattern"))
I->error("Operand $" + OpName + "'s register class disagrees"
@@ -2754,11 +2799,11 @@ void CodeGenDAGPatterns::ParseInstructions() {
}
// If we can, convert the instructions to be patterns that are matched!
- for (std::map<Record*, DAGInstruction, RecordPtrCmp>::iterator II =
+ for (std::map<Record*, DAGInstruction, LessRecordByID>::iterator II =
Instructions.begin(),
E = Instructions.end(); II != E; ++II) {
DAGInstruction &TheInst = II->second;
- const TreePattern *I = TheInst.getPattern();
+ TreePattern *I = TheInst.getPattern();
if (I == 0) continue; // No pattern.
// FIXME: Assume only the first tree is the pattern. The others are clobber
@@ -2789,7 +2834,7 @@ typedef std::pair<const TreePatternNode*, unsigned> NameRecord;
static void FindNames(const TreePatternNode *P,
std::map<std::string, NameRecord> &Names,
- const TreePattern *PatternTop) {
+ TreePattern *PatternTop) {
if (!P->getName().empty()) {
NameRecord &Rec = Names[P->getName()];
// If this is the first instance of the name, remember the node.
@@ -2806,12 +2851,15 @@ static void FindNames(const TreePatternNode *P,
}
}
-void CodeGenDAGPatterns::AddPatternToMatch(const TreePattern *Pattern,
+void CodeGenDAGPatterns::AddPatternToMatch(TreePattern *Pattern,
const PatternToMatch &PTM) {
// Do some sanity checking on the pattern we're about to match.
std::string Reason;
- if (!PTM.getSrcPattern()->canPatternMatch(Reason, *this))
- Pattern->error("Pattern can never match: " + Reason);
+ if (!PTM.getSrcPattern()->canPatternMatch(Reason, *this)) {
+ PrintWarning(Pattern->getRecord()->getLoc(),
+ Twine("Pattern can never match: ") + Reason);
+ return;
+ }
// If the source pattern's root is a complex pattern, that complex pattern
// must specify the nodes it can potentially match.
@@ -2852,25 +2900,156 @@ void CodeGenDAGPatterns::AddPatternToMatch(const TreePattern *Pattern,
void CodeGenDAGPatterns::InferInstructionFlags() {
const std::vector<const CodeGenInstruction*> &Instructions =
Target.getInstructionsByEnumValue();
+
+ // First try to infer flags from the primary instruction pattern, if any.
+ SmallVector<CodeGenInstruction*, 8> Revisit;
+ unsigned Errors = 0;
for (unsigned i = 0, e = Instructions.size(); i != e; ++i) {
CodeGenInstruction &InstInfo =
const_cast<CodeGenInstruction &>(*Instructions[i]);
- // Determine properties of the instruction from its pattern.
- bool MayStore, MayLoad, IsBitcast, HasSideEffects, IsVariadic;
- InferFromPattern(InstInfo, MayStore, MayLoad, IsBitcast,
- HasSideEffects, IsVariadic, *this);
- InstInfo.mayStore = MayStore;
- InstInfo.mayLoad = MayLoad;
- InstInfo.isBitcast = IsBitcast;
- InstInfo.hasSideEffects = HasSideEffects;
- InstInfo.Operands.isVariadic = IsVariadic;
- // Sanity checks.
- if (InstInfo.isReMaterializable && InstInfo.hasSideEffects)
- throw TGError(InstInfo.TheDef->getLoc(), "The instruction " +
- InstInfo.TheDef->getName() +
- " is rematerializable AND has unmodeled side effects?");
+ // Treat neverHasSideEffects = 1 as the equivalent of hasSideEffects = 0.
+ // This flag is obsolete and will be removed.
+ if (InstInfo.neverHasSideEffects) {
+ assert(!InstInfo.hasSideEffects);
+ InstInfo.hasSideEffects_Unset = false;
+ }
+
+ // Get the primary instruction pattern.
+ const TreePattern *Pattern = getInstruction(InstInfo.TheDef).getPattern();
+ if (!Pattern) {
+ if (InstInfo.hasUndefFlags())
+ Revisit.push_back(&InstInfo);
+ continue;
+ }
+ InstAnalyzer PatInfo(*this);
+ PatInfo.Analyze(Pattern);
+ Errors += InferFromPattern(InstInfo, PatInfo, InstInfo.TheDef);
+ }
+
+ // Second, look for single-instruction patterns defined outside the
+ // instruction.
+ for (ptm_iterator I = ptm_begin(), E = ptm_end(); I != E; ++I) {
+ const PatternToMatch &PTM = *I;
+
+ // We can only infer from single-instruction patterns, otherwise we won't
+ // know which instruction should get the flags.
+ SmallVector<Record*, 8> PatInstrs;
+ getInstructionsInTree(PTM.getDstPattern(), PatInstrs);
+ if (PatInstrs.size() != 1)
+ continue;
+
+ // Get the single instruction.
+ CodeGenInstruction &InstInfo = Target.getInstruction(PatInstrs.front());
+
+ // Only infer properties from the first pattern. We'll verify the others.
+ if (InstInfo.InferredFrom)
+ continue;
+
+ InstAnalyzer PatInfo(*this);
+ PatInfo.Analyze(&PTM);
+ Errors += InferFromPattern(InstInfo, PatInfo, PTM.getSrcRecord());
+ }
+
+ if (Errors)
+ PrintFatalError("pattern conflicts");
+
+ // Revisit instructions with undefined flags and no pattern.
+ if (Target.guessInstructionProperties()) {
+ for (unsigned i = 0, e = Revisit.size(); i != e; ++i) {
+ CodeGenInstruction &InstInfo = *Revisit[i];
+ if (InstInfo.InferredFrom)
+ continue;
+ // The mayLoad and mayStore flags default to false.
+ // Conservatively assume hasSideEffects if it wasn't explicit.
+ if (InstInfo.hasSideEffects_Unset)
+ InstInfo.hasSideEffects = true;
+ }
+ return;
}
+
+ // Complain about any flags that are still undefined.
+ for (unsigned i = 0, e = Revisit.size(); i != e; ++i) {
+ CodeGenInstruction &InstInfo = *Revisit[i];
+ if (InstInfo.InferredFrom)
+ continue;
+ if (InstInfo.hasSideEffects_Unset)
+ PrintError(InstInfo.TheDef->getLoc(),
+ "Can't infer hasSideEffects from patterns");
+ if (InstInfo.mayStore_Unset)
+ PrintError(InstInfo.TheDef->getLoc(),
+ "Can't infer mayStore from patterns");
+ if (InstInfo.mayLoad_Unset)
+ PrintError(InstInfo.TheDef->getLoc(),
+ "Can't infer mayLoad from patterns");
+ }
+}
+
+
+/// Verify instruction flags against pattern node properties.
+void CodeGenDAGPatterns::VerifyInstructionFlags() {
+ unsigned Errors = 0;
+ for (ptm_iterator I = ptm_begin(), E = ptm_end(); I != E; ++I) {
+ const PatternToMatch &PTM = *I;
+ SmallVector<Record*, 8> Instrs;
+ getInstructionsInTree(PTM.getDstPattern(), Instrs);
+ if (Instrs.empty())
+ continue;
+
+ // Count the number of instructions with each flag set.
+ unsigned NumSideEffects = 0;
+ unsigned NumStores = 0;
+ unsigned NumLoads = 0;
+ for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
+ const CodeGenInstruction &InstInfo = Target.getInstruction(Instrs[i]);
+ NumSideEffects += InstInfo.hasSideEffects;
+ NumStores += InstInfo.mayStore;
+ NumLoads += InstInfo.mayLoad;
+ }
+
+ // Analyze the source pattern.
+ InstAnalyzer PatInfo(*this);
+ PatInfo.Analyze(&PTM);
+
+ // Collect error messages.
+ SmallVector<std::string, 4> Msgs;
+
+ // Check for missing flags in the output.
+ // Permit extra flags for now at least.
+ if (PatInfo.hasSideEffects && !NumSideEffects)
+ Msgs.push_back("pattern has side effects, but hasSideEffects isn't set");
+
+ // Don't verify store flags on instructions with side effects. At least for
+ // intrinsics, side effects implies mayStore.
+ if (!PatInfo.hasSideEffects && PatInfo.mayStore && !NumStores)
+ Msgs.push_back("pattern may store, but mayStore isn't set");
+
+ // Similarly, mayStore implies mayLoad on intrinsics.
+ if (!PatInfo.mayStore && PatInfo.mayLoad && !NumLoads)
+ Msgs.push_back("pattern may load, but mayLoad isn't set");
+
+ // Print error messages.
+ if (Msgs.empty())
+ continue;
+ ++Errors;
+
+ for (unsigned i = 0, e = Msgs.size(); i != e; ++i)
+ PrintError(PTM.getSrcRecord()->getLoc(), Twine(Msgs[i]) + " on the " +
+ (Instrs.size() == 1 ?
+ "instruction" : "output instructions"));
+ // Provide the location of the relevant instruction definitions.
+ for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
+ if (Instrs[i] != PTM.getSrcRecord())
+ PrintError(Instrs[i]->getLoc(), "defined here");
+ const CodeGenInstruction &InstInfo = Target.getInstruction(Instrs[i]);
+ if (InstInfo.InferredFrom &&
+ InstInfo.InferredFrom != InstInfo.TheDef &&
+ InstInfo.InferredFrom != PTM.getSrcRecord())
+ PrintError(InstInfo.InferredFrom->getLoc(), "inferred from patttern");
+ }
+ }
+ if (Errors)
+ PrintFatalError("Errors in DAG patterns");
}
/// Given a pattern result with an unresolved type, see if we can find one
@@ -3230,7 +3409,7 @@ static void GenerateVariantsOf(TreePatternNode *N,
for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i) {
TreePatternNode *Child = N->getChild(i);
if (Child->isLeaf())
- if (DefInit *DI = dynamic_cast<DefInit*>(Child->getLeafValue())) {
+ if (DefInit *DI = dyn_cast<DefInit>(Child->getLeafValue())) {
Record *RR = DI->getDef();
if (RR->isSubClassOf("Register"))
continue;
@@ -3330,4 +3509,3 @@ void CodeGenDAGPatterns::GenerateVariants() {
DEBUG(errs() << "\n");
}
}
-
diff --git a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h
index 5a2d40a..9be763f 100644
--- a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h
+++ b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h
@@ -105,7 +105,7 @@ namespace EEVT {
/// MergeInTypeInfo - This merges in type information from the specified
/// argument. If 'this' changes, it returns true. If the two types are
- /// contradictory (e.g. merge f32 into i32) then this throws an exception.
+ /// contradictory (e.g. merge f32 into i32) then this flags an error.
bool MergeInTypeInfo(const EEVT::TypeSet &InVT, TreePattern &TP);
bool MergeInTypeInfo(MVT::SimpleValueType InVT, TreePattern &TP) {
@@ -187,8 +187,8 @@ struct SDTypeConstraint {
/// ApplyTypeConstraint - Given a node in a pattern, apply this type
/// constraint to the nodes operands. This returns true if it makes a
- /// change, false otherwise. If a type contradiction is found, throw an
- /// exception.
+ /// change, false otherwise. If a type contradiction is found, an error
+ /// is flagged.
bool ApplyTypeConstraint(TreePatternNode *N, const SDNodeInfo &NodeInfo,
TreePattern &TP) const;
};
@@ -232,7 +232,7 @@ public:
/// ApplyTypeConstraints - Given a node in a pattern, apply the type
/// constraints for this node to the operands of the node. This returns
/// true if it makes a change, false otherwise. If a type contradiction is
- /// found, throw an exception.
+ /// found, an error is flagged.
bool ApplyTypeConstraints(TreePatternNode *N, TreePattern &TP) const {
bool MadeChange = false;
for (unsigned i = 0, e = TypeConstraints.size(); i != e; ++i)
@@ -446,13 +446,12 @@ public: // Higher level manipulation routines.
/// ApplyTypeConstraints - Apply all of the type constraints relevant to
/// this node and its children in the tree. This returns true if it makes a
- /// change, false otherwise. If a type contradiction is found, throw an
- /// exception.
+ /// change, false otherwise. If a type contradiction is found, flag an error.
bool ApplyTypeConstraints(TreePattern &TP, bool NotRegisters);
/// UpdateNodeType - Set the node type of N to VT if VT contains
- /// information. If N already contains a conflicting type, then throw an
- /// exception. This returns true if any information was updated.
+ /// information. If N already contains a conflicting type, then flag an
+ /// error. This returns true if any information was updated.
///
bool UpdateNodeType(unsigned ResNo, const EEVT::TypeSet &InTy,
TreePattern &TP) {
@@ -514,6 +513,10 @@ class TreePattern {
/// isInputPattern - True if this is an input pattern, something to match.
/// False if this is an output pattern, something to emit.
bool isInputPattern;
+
+ /// hasError - True if the currently processed nodes have unresolvable types
+ /// or other non-fatal errors
+ bool HasError;
public:
/// TreePattern constructor - Parse the specified DagInits into the
@@ -565,13 +568,19 @@ public:
/// InferAllTypes - Infer/propagate as many types throughout the expression
/// patterns as possible. Return true if all types are inferred, false
- /// otherwise. Throw an exception if a type contradiction is found.
+ /// otherwise. Bail out if a type contradiction is found.
bool InferAllTypes(const StringMap<SmallVector<TreePatternNode*,1> >
*NamedTypes=0);
- /// error - Throw an exception, prefixing it with information about this
- /// pattern.
- void error(const std::string &Msg) const;
+ /// error - If this is the first error in the current resolution step,
+ /// print it and set the error flag. Otherwise, continue silently.
+ void error(const std::string &Msg);
+ bool hasError() const {
+ return HasError;
+ }
+ void resetError() {
+ HasError = false;
+ }
void print(raw_ostream &OS) const;
void dump() const;
@@ -582,8 +591,8 @@ private:
void ComputeNamedNodes(TreePatternNode *N);
};
-/// DAGDefaultOperand - One of these is created for each PredicateOperand
-/// or OptionalDefOperand that has a set ExecuteAlways / DefaultOps field.
+/// DAGDefaultOperand - One of these is created for each OperandWithDefaultOps
+/// that has a set ExecuteAlways / DefaultOps field.
struct DAGDefaultOperand {
std::vector<TreePatternNode*> DefaultOps;
};
@@ -602,7 +611,7 @@ public:
: Pattern(TP), Results(results), Operands(operands),
ImpResults(impresults), ResultPattern(0) {}
- const TreePattern *getPattern() const { return Pattern; }
+ TreePattern *getPattern() const { return Pattern; }
unsigned getNumResults() const { return Results.size(); }
unsigned getNumOperands() const { return Operands.size(); }
unsigned getNumImpResults() const { return ImpResults.size(); }
@@ -661,23 +670,18 @@ public:
unsigned getPatternComplexity(const CodeGenDAGPatterns &CGP) const;
};
-// Deterministic comparison of Record*.
-struct RecordPtrCmp {
- bool operator()(const Record *LHS, const Record *RHS) const;
-};
-
class CodeGenDAGPatterns {
RecordKeeper &Records;
CodeGenTarget Target;
std::vector<CodeGenIntrinsic> Intrinsics;
std::vector<CodeGenIntrinsic> TgtIntrinsics;
- std::map<Record*, SDNodeInfo, RecordPtrCmp> SDNodes;
- std::map<Record*, std::pair<Record*, std::string>, RecordPtrCmp> SDNodeXForms;
- std::map<Record*, ComplexPattern, RecordPtrCmp> ComplexPatterns;
- std::map<Record*, TreePattern*, RecordPtrCmp> PatternFragments;
- std::map<Record*, DAGDefaultOperand, RecordPtrCmp> DefaultOperands;
- std::map<Record*, DAGInstruction, RecordPtrCmp> Instructions;
+ std::map<Record*, SDNodeInfo, LessRecordByID> SDNodes;
+ std::map<Record*, std::pair<Record*, std::string>, LessRecordByID> SDNodeXForms;
+ std::map<Record*, ComplexPattern, LessRecordByID> ComplexPatterns;
+ std::map<Record*, TreePattern*, LessRecordByID> PatternFragments;
+ std::map<Record*, DAGDefaultOperand, LessRecordByID> DefaultOperands;
+ std::map<Record*, DAGInstruction, LessRecordByID> Instructions;
// Specific SDNode definitions:
Record *intrinsic_void_sdnode;
@@ -708,7 +712,7 @@ public:
return SDNodeXForms.find(R)->second;
}
- typedef std::map<Record*, NodeXForm, RecordPtrCmp>::const_iterator
+ typedef std::map<Record*, NodeXForm, LessRecordByID>::const_iterator
nx_iterator;
nx_iterator nx_begin() const { return SDNodeXForms.begin(); }
nx_iterator nx_end() const { return SDNodeXForms.end(); }
@@ -758,7 +762,7 @@ public:
return PatternFragments.find(R)->second;
}
- typedef std::map<Record*, TreePattern*, RecordPtrCmp>::const_iterator
+ typedef std::map<Record*, TreePattern*, LessRecordByID>::const_iterator
pf_iterator;
pf_iterator pf_begin() const { return PatternFragments.begin(); }
pf_iterator pf_end() const { return PatternFragments.end(); }
@@ -797,8 +801,9 @@ private:
void ParsePatterns();
void InferInstructionFlags();
void GenerateVariants();
+ void VerifyInstructionFlags();
- void AddPatternToMatch(const TreePattern *Pattern, const PatternToMatch &PTM);
+ void AddPatternToMatch(TreePattern *Pattern, const PatternToMatch &PTM);
void FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
std::map<std::string,
TreePatternNode*> &InstInputs,
diff --git a/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp b/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
index 12e153a..0a8684d 100644
--- a/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
@@ -32,20 +32,20 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) {
DagInit *OutDI = R->getValueAsDag("OutOperandList");
- if (DefInit *Init = dynamic_cast<DefInit*>(OutDI->getOperator())) {
+ if (DefInit *Init = dyn_cast<DefInit>(OutDI->getOperator())) {
if (Init->getDef()->getName() != "outs")
- throw R->getName() + ": invalid def name for output list: use 'outs'";
+ PrintFatalError(R->getName() + ": invalid def name for output list: use 'outs'");
} else
- throw R->getName() + ": invalid output list: use 'outs'";
+ PrintFatalError(R->getName() + ": invalid output list: use 'outs'");
NumDefs = OutDI->getNumArgs();
DagInit *InDI = R->getValueAsDag("InOperandList");
- if (DefInit *Init = dynamic_cast<DefInit*>(InDI->getOperator())) {
+ if (DefInit *Init = dyn_cast<DefInit>(InDI->getOperator())) {
if (Init->getDef()->getName() != "ins")
- throw R->getName() + ": invalid def name for input list: use 'ins'";
+ PrintFatalError(R->getName() + ": invalid def name for input list: use 'ins'");
} else
- throw R->getName() + ": invalid input list: use 'ins'";
+ PrintFatalError(R->getName() + ": invalid input list: use 'ins'");
unsigned MIOperandNo = 0;
std::set<std::string> OperandNames;
@@ -60,9 +60,9 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) {
ArgName = InDI->getArgName(i-NumDefs);
}
- DefInit *Arg = dynamic_cast<DefInit*>(ArgInit);
+ DefInit *Arg = dyn_cast<DefInit>(ArgInit);
if (!Arg)
- throw "Illegal operand for the '" + R->getName() + "' instruction!";
+ PrintFatalError("Illegal operand for the '" + R->getName() + "' instruction!");
Record *Rec = Arg->getDef();
std::string PrintMethod = "printOperand";
@@ -80,11 +80,10 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) {
MIOpInfo = Rec->getValueAsDag("MIOperandInfo");
// Verify that MIOpInfo has an 'ops' root value.
- if (!dynamic_cast<DefInit*>(MIOpInfo->getOperator()) ||
- dynamic_cast<DefInit*>(MIOpInfo->getOperator())
- ->getDef()->getName() != "ops")
- throw "Bad value for MIOperandInfo in operand '" + Rec->getName() +
- "'\n";
+ if (!isa<DefInit>(MIOpInfo->getOperator()) ||
+ cast<DefInit>(MIOpInfo->getOperator())->getDef()->getName() != "ops")
+ PrintFatalError("Bad value for MIOperandInfo in operand '" + Rec->getName() +
+ "'\n");
// If we have MIOpInfo, then we have #operands equal to number of entries
// in MIOperandInfo.
@@ -101,17 +100,17 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) {
} else if (Rec->isSubClassOf("RegisterClass")) {
OperandType = "OPERAND_REGISTER";
} else if (!Rec->isSubClassOf("PointerLikeRegClass") &&
- Rec->getName() != "unknown")
- throw "Unknown operand class '" + Rec->getName() +
- "' in '" + R->getName() + "' instruction!";
+ !Rec->isSubClassOf("unknown_class"))
+ PrintFatalError("Unknown operand class '" + Rec->getName() +
+ "' in '" + R->getName() + "' instruction!");
// Check that the operand has a name and that it's unique.
if (ArgName.empty())
- throw "In instruction '" + R->getName() + "', operand #" + utostr(i) +
- " has no name!";
+ PrintFatalError("In instruction '" + R->getName() + "', operand #" + utostr(i) +
+ " has no name!");
if (!OperandNames.insert(ArgName).second)
- throw "In instruction '" + R->getName() + "', operand #" + utostr(i) +
- " has the same name as a previous operand!";
+ PrintFatalError("In instruction '" + R->getName() + "', operand #" + utostr(i) +
+ " has the same name as a previous operand!");
OperandList.push_back(OperandInfo(Rec, ArgName, PrintMethod, EncoderMethod,
OperandType, MIOperandNo, NumOps,
@@ -129,13 +128,13 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) {
/// getOperandNamed - Return the index of the operand with the specified
/// non-empty name. If the instruction does not have an operand with the
-/// specified name, throw an exception.
+/// specified name, abort.
///
unsigned CGIOperandList::getOperandNamed(StringRef Name) const {
unsigned OpIdx;
if (hasOperandNamed(Name, OpIdx)) return OpIdx;
- throw "'" + TheDef->getName() + "' does not have an operand named '$" +
- Name.str() + "'!";
+ PrintFatalError("'" + TheDef->getName() + "' does not have an operand named '$" +
+ Name.str() + "'!");
}
/// hasOperandNamed - Query whether the instruction has an operand of the
@@ -154,7 +153,7 @@ bool CGIOperandList::hasOperandNamed(StringRef Name, unsigned &OpIdx) const {
std::pair<unsigned,unsigned>
CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) {
if (Op.empty() || Op[0] != '$')
- throw TheDef->getName() + ": Illegal operand name: '" + Op + "'";
+ PrintFatalError(TheDef->getName() + ": Illegal operand name: '" + Op + "'");
std::string OpName = Op.substr(1);
std::string SubOpName;
@@ -164,7 +163,7 @@ CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) {
if (DotIdx != std::string::npos) {
SubOpName = OpName.substr(DotIdx+1);
if (SubOpName.empty())
- throw TheDef->getName() + ": illegal empty suboperand name in '" +Op +"'";
+ PrintFatalError(TheDef->getName() + ": illegal empty suboperand name in '" +Op +"'");
OpName = OpName.substr(0, DotIdx);
}
@@ -174,8 +173,8 @@ CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) {
// If one was needed, throw.
if (OperandList[OpIdx].MINumOperands > 1 && !AllowWholeOp &&
SubOpName.empty())
- throw TheDef->getName() + ": Illegal to refer to"
- " whole operand part of complex operand '" + Op + "'";
+ PrintFatalError(TheDef->getName() + ": Illegal to refer to"
+ " whole operand part of complex operand '" + Op + "'");
// Otherwise, return the operand.
return std::make_pair(OpIdx, 0U);
@@ -184,7 +183,7 @@ CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) {
// Find the suboperand number involved.
DagInit *MIOpInfo = OperandList[OpIdx].MIOperandInfo;
if (MIOpInfo == 0)
- throw TheDef->getName() + ": unknown suboperand name in '" + Op + "'";
+ PrintFatalError(TheDef->getName() + ": unknown suboperand name in '" + Op + "'");
// Find the operand with the right name.
for (unsigned i = 0, e = MIOpInfo->getNumArgs(); i != e; ++i)
@@ -192,7 +191,7 @@ CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) {
return std::make_pair(OpIdx, i);
// Otherwise, didn't find it!
- throw TheDef->getName() + ": unknown suboperand name in '" + Op + "'";
+ PrintFatalError(TheDef->getName() + ": unknown suboperand name in '" + Op + "'");
}
static void ParseConstraint(const std::string &CStr, CGIOperandList &Ops) {
@@ -204,13 +203,13 @@ static void ParseConstraint(const std::string &CStr, CGIOperandList &Ops) {
std::string Name = CStr.substr(wpos+1);
wpos = Name.find_first_not_of(" \t");
if (wpos == std::string::npos)
- throw "Illegal format for @earlyclobber constraint: '" + CStr + "'";
+ PrintFatalError("Illegal format for @earlyclobber constraint: '" + CStr + "'");
Name = Name.substr(wpos);
std::pair<unsigned,unsigned> Op = Ops.ParseOperandName(Name, false);
// Build the string for the operand
if (!Ops[Op.first].Constraints[Op.second].isNone())
- throw "Operand '" + Name + "' cannot have multiple constraints!";
+ PrintFatalError("Operand '" + Name + "' cannot have multiple constraints!");
Ops[Op.first].Constraints[Op.second] =
CGIOperandList::ConstraintInfo::getEarlyClobber();
return;
@@ -225,25 +224,27 @@ static void ParseConstraint(const std::string &CStr, CGIOperandList &Ops) {
// TIED_TO: $src1 = $dst
wpos = Name.find_first_of(" \t");
if (wpos == std::string::npos)
- throw "Illegal format for tied-to constraint: '" + CStr + "'";
+ PrintFatalError("Illegal format for tied-to constraint: '" + CStr + "'");
std::string DestOpName = Name.substr(0, wpos);
std::pair<unsigned,unsigned> DestOp = Ops.ParseOperandName(DestOpName, false);
Name = CStr.substr(pos+1);
wpos = Name.find_first_not_of(" \t");
if (wpos == std::string::npos)
- throw "Illegal format for tied-to constraint: '" + CStr + "'";
-
- std::pair<unsigned,unsigned> SrcOp =
- Ops.ParseOperandName(Name.substr(wpos), false);
- if (SrcOp > DestOp)
- throw "Illegal tied-to operand constraint '" + CStr + "'";
+ PrintFatalError("Illegal format for tied-to constraint: '" + CStr + "'");
+ std::string SrcOpName = Name.substr(wpos);
+ std::pair<unsigned,unsigned> SrcOp = Ops.ParseOperandName(SrcOpName, false);
+ if (SrcOp > DestOp) {
+ std::swap(SrcOp, DestOp);
+ std::swap(SrcOpName, DestOpName);
+ }
unsigned FlatOpNo = Ops.getFlattenedOperandNumber(SrcOp);
if (!Ops[DestOp.first].Constraints[DestOp.second].isNone())
- throw "Operand '" + DestOpName + "' cannot have multiple constraints!";
+ PrintFatalError("Operand '" + DestOpName +
+ "' cannot have multiple constraints!");
Ops[DestOp.first].Constraints[DestOp.second] =
CGIOperandList::ConstraintInfo::getTied(FlatOpNo);
}
@@ -287,7 +288,8 @@ void CGIOperandList::ProcessDisableEncoding(std::string DisableEncoding) {
// CodeGenInstruction Implementation
//===----------------------------------------------------------------------===//
-CodeGenInstruction::CodeGenInstruction(Record *R) : TheDef(R), Operands(R) {
+CodeGenInstruction::CodeGenInstruction(Record *R)
+ : TheDef(R), Operands(R), InferredFrom(0) {
Namespace = R->getValueAsString("Namespace");
AsmString = R->getValueAsString("AsmString");
@@ -301,8 +303,6 @@ CodeGenInstruction::CodeGenInstruction(Record *R) : TheDef(R), Operands(R) {
isBarrier = R->getValueAsBit("isBarrier");
isCall = R->getValueAsBit("isCall");
canFoldAsLoad = R->getValueAsBit("canFoldAsLoad");
- mayLoad = R->getValueAsBit("mayLoad");
- mayStore = R->getValueAsBit("mayStore");
isPredicable = Operands.isPredicable || R->getValueAsBit("isPredicable");
isConvertibleToThreeAddress = R->getValueAsBit("isConvertibleToThreeAddress");
isCommutable = R->getValueAsBit("isCommutable");
@@ -313,8 +313,13 @@ CodeGenInstruction::CodeGenInstruction(Record *R) : TheDef(R), Operands(R) {
hasPostISelHook = R->getValueAsBit("hasPostISelHook");
hasCtrlDep = R->getValueAsBit("hasCtrlDep");
isNotDuplicable = R->getValueAsBit("isNotDuplicable");
- hasSideEffects = R->getValueAsBit("hasSideEffects");
+
+ mayLoad = R->getValueAsBitOrUnset("mayLoad", mayLoad_Unset);
+ mayStore = R->getValueAsBitOrUnset("mayStore", mayStore_Unset);
+ hasSideEffects = R->getValueAsBitOrUnset("hasSideEffects",
+ hasSideEffects_Unset);
neverHasSideEffects = R->getValueAsBit("neverHasSideEffects");
+
isAsCheapAsAMove = R->getValueAsBit("isAsCheapAsAMove");
hasExtraSrcRegAllocReq = R->getValueAsBit("hasExtraSrcRegAllocReq");
hasExtraDefRegAllocReq = R->getValueAsBit("hasExtraDefRegAllocReq");
@@ -324,7 +329,7 @@ CodeGenInstruction::CodeGenInstruction(Record *R) : TheDef(R), Operands(R) {
ImplicitUses = R->getValueAsListOfDefs("Uses");
if (neverHasSideEffects + hasSideEffects > 1)
- throw R->getName() + ": multiple conflicting side-effect flags set!";
+ PrintFatalError(R->getName() + ": multiple conflicting side-effect flags set!");
// Parse Constraints.
ParseConstraints(R->getValueAsString("Constraints"), Operands);
@@ -409,16 +414,16 @@ FlattenAsmStringVariants(StringRef Cur, unsigned Variant) {
/// successful match, with ResOp set to the result operand to be used.
bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
Record *InstOpRec, bool hasSubOps,
- SMLoc Loc, CodeGenTarget &T,
+ ArrayRef<SMLoc> Loc, CodeGenTarget &T,
ResultOperand &ResOp) {
Init *Arg = Result->getArg(AliasOpNo);
- DefInit *ADI = dynamic_cast<DefInit*>(Arg);
+ DefInit *ADI = dyn_cast<DefInit>(Arg);
if (ADI && ADI->getDef() == InstOpRec) {
// If the operand is a record, it must have a name, and the record type
// must match up with the instruction's argument type.
if (Result->getArgName(AliasOpNo).empty())
- throw TGError(Loc, "result argument #" + utostr(AliasOpNo) +
+ PrintFatalError(Loc, "result argument #" + utostr(AliasOpNo) +
" must have a name!");
ResOp = ResultOperand(Result->getArgName(AliasOpNo), ADI->getDef());
return true;
@@ -442,7 +447,7 @@ bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
DagInit *DI = InstOpRec->getValueAsDag("MIOperandInfo");
// The operand info should only have a single (register) entry. We
// want the register class of it.
- InstOpRec = dynamic_cast<DefInit*>(DI->getArg(0))->getDef();
+ InstOpRec = cast<DefInit>(DI->getArg(0))->getDef();
}
if (InstOpRec->isSubClassOf("RegisterOperand"))
@@ -453,13 +458,13 @@ bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
if (!T.getRegisterClass(InstOpRec)
.contains(T.getRegBank().getReg(ADI->getDef())))
- throw TGError(Loc, "fixed register " + ADI->getDef()->getName() +
- " is not a member of the " + InstOpRec->getName() +
- " register class!");
+ PrintFatalError(Loc, "fixed register " + ADI->getDef()->getName() +
+ " is not a member of the " + InstOpRec->getName() +
+ " register class!");
if (!Result->getArgName(AliasOpNo).empty())
- throw TGError(Loc, "result fixed register argument must "
- "not have a name!");
+ PrintFatalError(Loc, "result fixed register argument must "
+ "not have a name!");
ResOp = ResultOperand(ADI->getDef());
return true;
@@ -482,13 +487,13 @@ bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
}
// Literal integers.
- if (IntInit *II = dynamic_cast<IntInit*>(Arg)) {
+ if (IntInit *II = dyn_cast<IntInit>(Arg)) {
if (hasSubOps || !InstOpRec->isSubClassOf("Operand"))
return false;
// Integer arguments can't have names.
if (!Result->getArgName(AliasOpNo).empty())
- throw TGError(Loc, "result argument #" + utostr(AliasOpNo) +
- " must not have a name!");
+ PrintFatalError(Loc, "result argument #" + utostr(AliasOpNo) +
+ " must not have a name!");
ResOp = ResultOperand(II->getValue());
return true;
}
@@ -514,9 +519,10 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) {
Result = R->getValueAsDag("ResultInst");
// Verify that the root of the result is an instruction.
- DefInit *DI = dynamic_cast<DefInit*>(Result->getOperator());
+ DefInit *DI = dyn_cast<DefInit>(Result->getOperator());
if (DI == 0 || !DI->getDef()->isSubClassOf("Instruction"))
- throw TGError(R->getLoc(), "result of inst alias should be an instruction");
+ PrintFatalError(R->getLoc(),
+ "result of inst alias should be an instruction");
ResultInst = &T.getInstruction(DI->getDef());
@@ -524,7 +530,7 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) {
// the same class.
StringMap<Record*> NameClass;
for (unsigned i = 0, e = Result->getNumArgs(); i != e; ++i) {
- DefInit *ADI = dynamic_cast<DefInit*>(Result->getArg(i));
+ DefInit *ADI = dyn_cast<DefInit>(Result->getArg(i));
if (!ADI || Result->getArgName(i).empty())
continue;
// Verify we don't have something like: (someinst GR16:$foo, GR32:$foo)
@@ -532,9 +538,9 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) {
// same type.
Record *&Entry = NameClass[Result->getArgName(i)];
if (Entry && Entry != ADI->getDef())
- throw TGError(R->getLoc(), "result value $" + Result->getArgName(i) +
- " is both " + Entry->getName() + " and " +
- ADI->getDef()->getName() + "!");
+ PrintFatalError(R->getLoc(), "result value $" + Result->getArgName(i) +
+ " is both " + Entry->getName() + " and " +
+ ADI->getDef()->getName() + "!");
Entry = ADI->getDef();
}
@@ -550,7 +556,7 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) {
continue;
if (AliasOpNo >= Result->getNumArgs())
- throw TGError(R->getLoc(), "not enough arguments for instruction!");
+ PrintFatalError(R->getLoc(), "not enough arguments for instruction!");
Record *InstOpRec = ResultInst->Operands[i].Rec;
unsigned NumSubOps = ResultInst->Operands[i].MINumOperands;
@@ -571,7 +577,7 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) {
} else {
DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo;
for (unsigned SubOp = 0; SubOp != NumSubOps; ++SubOp) {
- Record *SubRec = dynamic_cast<DefInit*>(MIOI->getArg(SubOp))->getDef();
+ Record *SubRec = cast<DefInit>(MIOI->getArg(SubOp))->getDef();
// Take care to instantiate each of the suboperands with the correct
// nomenclature: $foo.bar
@@ -591,26 +597,26 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) {
DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo;
for (unsigned SubOp = 0; SubOp != NumSubOps; ++SubOp) {
if (AliasOpNo >= Result->getNumArgs())
- throw TGError(R->getLoc(), "not enough arguments for instruction!");
- Record *SubRec = dynamic_cast<DefInit*>(MIOI->getArg(SubOp))->getDef();
+ PrintFatalError(R->getLoc(), "not enough arguments for instruction!");
+ Record *SubRec = cast<DefInit>(MIOI->getArg(SubOp))->getDef();
if (tryAliasOpMatch(Result, AliasOpNo, SubRec, false,
R->getLoc(), T, ResOp)) {
ResultOperands.push_back(ResOp);
ResultInstOperandIndex.push_back(std::make_pair(i, SubOp));
++AliasOpNo;
} else {
- throw TGError(R->getLoc(), "result argument #" + utostr(AliasOpNo) +
+ PrintFatalError(R->getLoc(), "result argument #" + utostr(AliasOpNo) +
" does not match instruction operand class " +
(SubOp == 0 ? InstOpRec->getName() :SubRec->getName()));
}
}
continue;
}
- throw TGError(R->getLoc(), "result argument #" + utostr(AliasOpNo) +
- " does not match instruction operand class " +
- InstOpRec->getName());
+ PrintFatalError(R->getLoc(), "result argument #" + utostr(AliasOpNo) +
+ " does not match instruction operand class " +
+ InstOpRec->getName());
}
if (AliasOpNo != Result->getNumArgs())
- throw TGError(R->getLoc(), "too many operands for instruction!");
+ PrintFatalError(R->getLoc(), "too many operands for instruction!");
}
diff --git a/contrib/llvm/utils/TableGen/CodeGenInstruction.h b/contrib/llvm/utils/TableGen/CodeGenInstruction.h
index 95b572d..55d4439 100644
--- a/contrib/llvm/utils/TableGen/CodeGenInstruction.h
+++ b/contrib/llvm/utils/TableGen/CodeGenInstruction.h
@@ -152,7 +152,7 @@ namespace llvm {
/// getOperandNamed - Return the index of the operand with the specified
/// non-empty name. If the instruction does not have an operand with the
- /// specified name, throw an exception.
+ /// specified name, abort.
unsigned getOperandNamed(StringRef Name) const;
/// hasOperandNamed - Query whether the instruction has an operand of the
@@ -162,9 +162,8 @@ namespace llvm {
/// ParseOperandName - Parse an operand name like "$foo" or "$foo.bar",
/// where $foo is a whole operand and $foo.bar refers to a suboperand.
- /// This throws an exception if the name is invalid. If AllowWholeOp is
- /// true, references to operands with suboperands are allowed, otherwise
- /// not.
+ /// This aborts if the name is invalid. If AllowWholeOp is true, references
+ /// to operands with suboperands are allowed, otherwise not.
std::pair<unsigned,unsigned> ParseOperandName(const std::string &Op,
bool AllowWholeOp = true);
@@ -226,7 +225,10 @@ namespace llvm {
bool isBarrier;
bool isCall;
bool canFoldAsLoad;
- bool mayLoad, mayStore;
+ bool mayLoad;
+ bool mayLoad_Unset;
+ bool mayStore;
+ bool mayStore_Unset;
bool isPredicable;
bool isConvertibleToThreeAddress;
bool isCommutable;
@@ -238,6 +240,7 @@ namespace llvm {
bool hasCtrlDep;
bool isNotDuplicable;
bool hasSideEffects;
+ bool hasSideEffects_Unset;
bool neverHasSideEffects;
bool isAsCheapAsAMove;
bool hasExtraSrcRegAllocReq;
@@ -245,6 +248,14 @@ namespace llvm {
bool isCodeGenOnly;
bool isPseudo;
+ /// Are there any undefined flags?
+ bool hasUndefFlags() const {
+ return mayLoad_Unset || mayStore_Unset || hasSideEffects_Unset;
+ }
+
+ // The record used to infer instruction flags, or NULL if no flag values
+ // have been inferred.
+ Record *InferredFrom;
CodeGenInstruction(Record *R);
@@ -319,7 +330,7 @@ namespace llvm {
CodeGenInstAlias(Record *R, CodeGenTarget &T);
bool tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
- Record *InstOpRec, bool hasSubOps, SMLoc Loc,
+ Record *InstOpRec, bool hasSubOps, ArrayRef<SMLoc> Loc,
CodeGenTarget &T, ResultOperand &ResOp);
};
}
diff --git a/contrib/llvm/utils/TableGen/CodeGenMapTable.cpp b/contrib/llvm/utils/TableGen/CodeGenMapTable.cpp
new file mode 100644
index 0000000..1653d67
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/CodeGenMapTable.cpp
@@ -0,0 +1,606 @@
+//===- CodeGenMapTable.cpp - Instruction Mapping Table Generator ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// CodeGenMapTable provides functionality for the TabelGen to create
+// relation mapping between instructions. Relation models are defined using
+// InstrMapping as a base class. This file implements the functionality which
+// parses these definitions and generates relation maps using the information
+// specified there. These maps are emitted as tables in the XXXGenInstrInfo.inc
+// file along with the functions to query them.
+//
+// A relationship model to relate non-predicate instructions with their
+// predicated true/false forms can be defined as follows:
+//
+// def getPredOpcode : InstrMapping {
+// let FilterClass = "PredRel";
+// let RowFields = ["BaseOpcode"];
+// let ColFields = ["PredSense"];
+// let KeyCol = ["none"];
+// let ValueCols = [["true"], ["false"]]; }
+//
+// CodeGenMapTable parses this map and generates a table in XXXGenInstrInfo.inc
+// file that contains the instructions modeling this relationship. This table
+// is defined in the function
+// "int getPredOpcode(uint16_t Opcode, enum PredSense inPredSense)"
+// that can be used to retrieve the predicated form of the instruction by
+// passing its opcode value and the predicate sense (true/false) of the desired
+// instruction as arguments.
+//
+// Short description of the algorithm:
+//
+// 1) Iterate through all the records that derive from "InstrMapping" class.
+// 2) For each record, filter out instructions based on the FilterClass value.
+// 3) Iterate through this set of instructions and insert them into
+// RowInstrMap map based on their RowFields values. RowInstrMap is keyed by the
+// vector of RowFields values and contains vectors of Records (instructions) as
+// values. RowFields is a list of fields that are required to have the same
+// values for all the instructions appearing in the same row of the relation
+// table. All the instructions in a given row of the relation table have some
+// sort of relationship with the key instruction defined by the corresponding
+// relationship model.
+//
+// Ex: RowInstrMap(RowVal1, RowVal2, ...) -> [Instr1, Instr2, Instr3, ... ]
+// Here Instr1, Instr2, Instr3 have same values (RowVal1, RowVal2) for
+// RowFields. These groups of instructions are later matched against ValueCols
+// to determine the column they belong to, if any.
+//
+// While building the RowInstrMap map, collect all the key instructions in
+// KeyInstrVec. These are the instructions having the same values as KeyCol
+// for all the fields listed in ColFields.
+//
+// For Example:
+//
+// Relate non-predicate instructions with their predicated true/false forms.
+//
+// def getPredOpcode : InstrMapping {
+// let FilterClass = "PredRel";
+// let RowFields = ["BaseOpcode"];
+// let ColFields = ["PredSense"];
+// let KeyCol = ["none"];
+// let ValueCols = [["true"], ["false"]]; }
+//
+// Here, only instructions that have "none" as PredSense will be selected as key
+// instructions.
+//
+// 4) For each key instruction, get the group of instructions that share the
+// same key-value as the key instruction from RowInstrMap. Iterate over the list
+// of columns in ValueCols (it is defined as a list<list<string> >. Therefore,
+// it can specify multi-column relationships). For each column, find the
+// instruction from the group that matches all the values for the column.
+// Multiple matches are not allowed.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTarget.h"
+#include "llvm/Support/Format.h"
+#include "llvm/TableGen/Error.h"
+using namespace llvm;
+typedef std::map<std::string, std::vector<Record*> > InstrRelMapTy;
+
+typedef std::map<std::vector<Init*>, std::vector<Record*> > RowInstrMapTy;
+
+namespace {
+
+//===----------------------------------------------------------------------===//
+// This class is used to represent InstrMapping class defined in Target.td file.
+class InstrMap {
+private:
+ std::string Name;
+ std::string FilterClass;
+ ListInit *RowFields;
+ ListInit *ColFields;
+ ListInit *KeyCol;
+ std::vector<ListInit*> ValueCols;
+
+public:
+ InstrMap(Record* MapRec) {
+ Name = MapRec->getName();
+
+ // FilterClass - It's used to reduce the search space only to the
+ // instructions that define the kind of relationship modeled by
+ // this InstrMapping object/record.
+ const RecordVal *Filter = MapRec->getValue("FilterClass");
+ FilterClass = Filter->getValue()->getAsUnquotedString();
+
+ // List of fields/attributes that need to be same across all the
+ // instructions in a row of the relation table.
+ RowFields = MapRec->getValueAsListInit("RowFields");
+
+ // List of fields/attributes that are constant across all the instruction
+ // in a column of the relation table. Ex: ColFields = 'predSense'
+ ColFields = MapRec->getValueAsListInit("ColFields");
+
+ // Values for the fields/attributes listed in 'ColFields'.
+ // Ex: KeyCol = 'noPred' -- key instruction is non predicated
+ KeyCol = MapRec->getValueAsListInit("KeyCol");
+
+ // List of values for the fields/attributes listed in 'ColFields', one for
+ // each column in the relation table.
+ //
+ // Ex: ValueCols = [['true'],['false']] -- it results two columns in the
+ // table. First column requires all the instructions to have predSense
+ // set to 'true' and second column requires it to be 'false'.
+ ListInit *ColValList = MapRec->getValueAsListInit("ValueCols");
+
+ // Each instruction map must specify at least one column for it to be valid.
+ if (ColValList->getSize() == 0)
+ PrintFatalError(MapRec->getLoc(), "InstrMapping record `" +
+ MapRec->getName() + "' has empty " + "`ValueCols' field!");
+
+ for (unsigned i = 0, e = ColValList->getSize(); i < e; i++) {
+ ListInit *ColI = dyn_cast<ListInit>(ColValList->getElement(i));
+
+ // Make sure that all the sub-lists in 'ValueCols' have same number of
+ // elements as the fields in 'ColFields'.
+ if (ColI->getSize() != ColFields->getSize())
+ PrintFatalError(MapRec->getLoc(), "Record `" + MapRec->getName() +
+ "', field `ValueCols' entries don't match with " +
+ " the entries in 'ColFields'!");
+ ValueCols.push_back(ColI);
+ }
+ }
+
+ std::string getName() const {
+ return Name;
+ }
+
+ std::string getFilterClass() {
+ return FilterClass;
+ }
+
+ ListInit *getRowFields() const {
+ return RowFields;
+ }
+
+ ListInit *getColFields() const {
+ return ColFields;
+ }
+
+ ListInit *getKeyCol() const {
+ return KeyCol;
+ }
+
+ const std::vector<ListInit*> &getValueCols() const {
+ return ValueCols;
+ }
+};
+} // End anonymous namespace.
+
+
+//===----------------------------------------------------------------------===//
+// class MapTableEmitter : It builds the instruction relation maps using
+// the information provided in InstrMapping records. It outputs these
+// relationship maps as tables into XXXGenInstrInfo.inc file along with the
+// functions to query them.
+
+namespace {
+class MapTableEmitter {
+private:
+// std::string TargetName;
+ const CodeGenTarget &Target;
+ // InstrMapDesc - InstrMapping record to be processed.
+ InstrMap InstrMapDesc;
+
+ // InstrDefs - list of instructions filtered using FilterClass defined
+ // in InstrMapDesc.
+ std::vector<Record*> InstrDefs;
+
+ // RowInstrMap - maps RowFields values to the instructions. It's keyed by the
+ // values of the row fields and contains vector of records as values.
+ RowInstrMapTy RowInstrMap;
+
+ // KeyInstrVec - list of key instructions.
+ std::vector<Record*> KeyInstrVec;
+ DenseMap<Record*, std::vector<Record*> > MapTable;
+
+public:
+ MapTableEmitter(CodeGenTarget &Target, RecordKeeper &Records, Record *IMRec):
+ Target(Target), InstrMapDesc(IMRec) {
+ const std::string FilterClass = InstrMapDesc.getFilterClass();
+ InstrDefs = Records.getAllDerivedDefinitions(FilterClass);
+ }
+
+ void buildRowInstrMap();
+
+ // Returns true if an instruction is a key instruction, i.e., its ColFields
+ // have same values as KeyCol.
+ bool isKeyColInstr(Record* CurInstr);
+
+ // Find column instruction corresponding to a key instruction based on the
+ // constraints for that column.
+ Record *getInstrForColumn(Record *KeyInstr, ListInit *CurValueCol);
+
+ // Find column instructions for each key instruction based
+ // on ValueCols and store them into MapTable.
+ void buildMapTable();
+
+ void emitBinSearch(raw_ostream &OS, unsigned TableSize);
+ void emitTablesWithFunc(raw_ostream &OS);
+ unsigned emitBinSearchTable(raw_ostream &OS);
+
+ // Lookup functions to query binary search tables.
+ void emitMapFuncBody(raw_ostream &OS, unsigned TableSize);
+
+};
+} // End anonymous namespace.
+
+
+//===----------------------------------------------------------------------===//
+// Process all the instructions that model this relation (alreday present in
+// InstrDefs) and insert them into RowInstrMap which is keyed by the values of
+// the fields listed as RowFields. It stores vectors of records as values.
+// All the related instructions have the same values for the RowFields thus are
+// part of the same key-value pair.
+//===----------------------------------------------------------------------===//
+
+void MapTableEmitter::buildRowInstrMap() {
+ for (unsigned i = 0, e = InstrDefs.size(); i < e; i++) {
+ std::vector<Record*> InstrList;
+ Record *CurInstr = InstrDefs[i];
+ std::vector<Init*> KeyValue;
+ ListInit *RowFields = InstrMapDesc.getRowFields();
+ for (unsigned j = 0, endRF = RowFields->getSize(); j < endRF; j++) {
+ Init *RowFieldsJ = RowFields->getElement(j);
+ Init *CurInstrVal = CurInstr->getValue(RowFieldsJ)->getValue();
+ KeyValue.push_back(CurInstrVal);
+ }
+
+ // Collect key instructions into KeyInstrVec. Later, these instructions are
+ // processed to assign column position to the instructions sharing
+ // their KeyValue in RowInstrMap.
+ if (isKeyColInstr(CurInstr))
+ KeyInstrVec.push_back(CurInstr);
+
+ RowInstrMap[KeyValue].push_back(CurInstr);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Return true if an instruction is a KeyCol instruction.
+//===----------------------------------------------------------------------===//
+
+bool MapTableEmitter::isKeyColInstr(Record* CurInstr) {
+ ListInit *ColFields = InstrMapDesc.getColFields();
+ ListInit *KeyCol = InstrMapDesc.getKeyCol();
+
+ // Check if the instruction is a KeyCol instruction.
+ bool MatchFound = true;
+ for (unsigned j = 0, endCF = ColFields->getSize();
+ (j < endCF) && MatchFound; j++) {
+ RecordVal *ColFieldName = CurInstr->getValue(ColFields->getElement(j));
+ std::string CurInstrVal = ColFieldName->getValue()->getAsUnquotedString();
+ std::string KeyColValue = KeyCol->getElement(j)->getAsUnquotedString();
+ MatchFound = (CurInstrVal == KeyColValue);
+ }
+ return MatchFound;
+}
+
+//===----------------------------------------------------------------------===//
+// Build a map to link key instructions with the column instructions arranged
+// according to their column positions.
+//===----------------------------------------------------------------------===//
+
+void MapTableEmitter::buildMapTable() {
+ // Find column instructions for a given key based on the ColField
+ // constraints.
+ const std::vector<ListInit*> &ValueCols = InstrMapDesc.getValueCols();
+ unsigned NumOfCols = ValueCols.size();
+ for (unsigned j = 0, endKI = KeyInstrVec.size(); j < endKI; j++) {
+ Record *CurKeyInstr = KeyInstrVec[j];
+ std::vector<Record*> ColInstrVec(NumOfCols);
+
+ // Find the column instruction based on the constraints for the column.
+ for (unsigned ColIdx = 0; ColIdx < NumOfCols; ColIdx++) {
+ ListInit *CurValueCol = ValueCols[ColIdx];
+ Record *ColInstr = getInstrForColumn(CurKeyInstr, CurValueCol);
+ ColInstrVec[ColIdx] = ColInstr;
+ }
+ MapTable[CurKeyInstr] = ColInstrVec;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Find column instruction based on the constraints for that column.
+//===----------------------------------------------------------------------===//
+
+Record *MapTableEmitter::getInstrForColumn(Record *KeyInstr,
+ ListInit *CurValueCol) {
+ ListInit *RowFields = InstrMapDesc.getRowFields();
+ std::vector<Init*> KeyValue;
+
+ // Construct KeyValue using KeyInstr's values for RowFields.
+ for (unsigned j = 0, endRF = RowFields->getSize(); j < endRF; j++) {
+ Init *RowFieldsJ = RowFields->getElement(j);
+ Init *KeyInstrVal = KeyInstr->getValue(RowFieldsJ)->getValue();
+ KeyValue.push_back(KeyInstrVal);
+ }
+
+ // Get all the instructions that share the same KeyValue as the KeyInstr
+ // in RowInstrMap. We search through these instructions to find a match
+ // for the current column, i.e., the instruction which has the same values
+ // as CurValueCol for all the fields in ColFields.
+ const std::vector<Record*> &RelatedInstrVec = RowInstrMap[KeyValue];
+
+ ListInit *ColFields = InstrMapDesc.getColFields();
+ Record *MatchInstr = NULL;
+
+ for (unsigned i = 0, e = RelatedInstrVec.size(); i < e; i++) {
+ bool MatchFound = true;
+ Record *CurInstr = RelatedInstrVec[i];
+ for (unsigned j = 0, endCF = ColFields->getSize();
+ (j < endCF) && MatchFound; j++) {
+ Init *ColFieldJ = ColFields->getElement(j);
+ Init *CurInstrInit = CurInstr->getValue(ColFieldJ)->getValue();
+ std::string CurInstrVal = CurInstrInit->getAsUnquotedString();
+ Init *ColFieldJVallue = CurValueCol->getElement(j);
+ MatchFound = (CurInstrVal == ColFieldJVallue->getAsUnquotedString());
+ }
+
+ if (MatchFound) {
+ if (MatchInstr) // Already had a match
+ // Error if multiple matches are found for a column.
+ PrintFatalError("Multiple matches found for `" + KeyInstr->getName() +
+ "', for the relation `" + InstrMapDesc.getName());
+ MatchInstr = CurInstr;
+ }
+ }
+ return MatchInstr;
+}
+
+//===----------------------------------------------------------------------===//
+// Emit one table per relation. Only instructions with a valid relation of a
+// given type are included in the table sorted by their enum values (opcodes).
+// Binary search is used for locating instructions in the table.
+//===----------------------------------------------------------------------===//
+
+unsigned MapTableEmitter::emitBinSearchTable(raw_ostream &OS) {
+
+ const std::vector<const CodeGenInstruction*> &NumberedInstructions =
+ Target.getInstructionsByEnumValue();
+ std::string TargetName = Target.getName();
+ const std::vector<ListInit*> &ValueCols = InstrMapDesc.getValueCols();
+ unsigned NumCol = ValueCols.size();
+ unsigned TotalNumInstr = NumberedInstructions.size();
+ unsigned TableSize = 0;
+
+ OS << "static const uint16_t "<<InstrMapDesc.getName();
+ // Number of columns in the table are NumCol+1 because key instructions are
+ // emitted as first column.
+ OS << "Table[]["<< NumCol+1 << "] = {\n";
+ for (unsigned i = 0; i < TotalNumInstr; i++) {
+ Record *CurInstr = NumberedInstructions[i]->TheDef;
+ std::vector<Record*> ColInstrs = MapTable[CurInstr];
+ std::string OutStr("");
+ unsigned RelExists = 0;
+ if (ColInstrs.size()) {
+ for (unsigned j = 0; j < NumCol; j++) {
+ if (ColInstrs[j] != NULL) {
+ RelExists = 1;
+ OutStr += ", ";
+ OutStr += TargetName;
+ OutStr += "::";
+ OutStr += ColInstrs[j]->getName();
+ } else { OutStr += ", -1";}
+ }
+
+ if (RelExists) {
+ OS << " { " << TargetName << "::" << CurInstr->getName();
+ OS << OutStr <<" },\n";
+ TableSize++;
+ }
+ }
+ }
+ if (!TableSize) {
+ OS << " { " << TargetName << "::" << "INSTRUCTION_LIST_END, ";
+ OS << TargetName << "::" << "INSTRUCTION_LIST_END }";
+ }
+ OS << "}; // End of " << InstrMapDesc.getName() << "Table\n\n";
+ return TableSize;
+}
+
+//===----------------------------------------------------------------------===//
+// Emit binary search algorithm as part of the functions used to query
+// relation tables.
+//===----------------------------------------------------------------------===//
+
+void MapTableEmitter::emitBinSearch(raw_ostream &OS, unsigned TableSize) {
+ OS << " unsigned mid;\n";
+ OS << " unsigned start = 0;\n";
+ OS << " unsigned end = " << TableSize << ";\n";
+ OS << " while (start < end) {\n";
+ OS << " mid = start + (end - start)/2;\n";
+ OS << " if (Opcode == " << InstrMapDesc.getName() << "Table[mid][0]) {\n";
+ OS << " break;\n";
+ OS << " }\n";
+ OS << " if (Opcode < " << InstrMapDesc.getName() << "Table[mid][0])\n";
+ OS << " end = mid;\n";
+ OS << " else\n";
+ OS << " start = mid + 1;\n";
+ OS << " }\n";
+ OS << " if (start == end)\n";
+ OS << " return -1; // Instruction doesn't exist in this table.\n\n";
+}
+
+//===----------------------------------------------------------------------===//
+// Emit functions to query relation tables.
+//===----------------------------------------------------------------------===//
+
+void MapTableEmitter::emitMapFuncBody(raw_ostream &OS,
+ unsigned TableSize) {
+
+ ListInit *ColFields = InstrMapDesc.getColFields();
+ const std::vector<ListInit*> &ValueCols = InstrMapDesc.getValueCols();
+
+ // Emit binary search algorithm to locate instructions in the
+ // relation table. If found, return opcode value from the appropriate column
+ // of the table.
+ emitBinSearch(OS, TableSize);
+
+ if (ValueCols.size() > 1) {
+ for (unsigned i = 0, e = ValueCols.size(); i < e; i++) {
+ ListInit *ColumnI = ValueCols[i];
+ for (unsigned j = 0, ColSize = ColumnI->getSize(); j < ColSize; j++) {
+ std::string ColName = ColFields->getElement(j)->getAsUnquotedString();
+ OS << " if (in" << ColName;
+ OS << " == ";
+ OS << ColName << "_" << ColumnI->getElement(j)->getAsUnquotedString();
+ if (j < ColumnI->getSize() - 1) OS << " && ";
+ else OS << ")\n";
+ }
+ OS << " return " << InstrMapDesc.getName();
+ OS << "Table[mid]["<<i+1<<"];\n";
+ }
+ OS << " return -1;";
+ }
+ else
+ OS << " return " << InstrMapDesc.getName() << "Table[mid][1];\n";
+
+ OS <<"}\n\n";
+}
+
+//===----------------------------------------------------------------------===//
+// Emit relation tables and the functions to query them.
+//===----------------------------------------------------------------------===//
+
+void MapTableEmitter::emitTablesWithFunc(raw_ostream &OS) {
+
+ // Emit function name and the input parameters : mostly opcode value of the
+ // current instruction. However, if a table has multiple columns (more than 2
+ // since first column is used for the key instructions), then we also need
+ // to pass another input to indicate the column to be selected.
+
+ ListInit *ColFields = InstrMapDesc.getColFields();
+ const std::vector<ListInit*> &ValueCols = InstrMapDesc.getValueCols();
+ OS << "// "<< InstrMapDesc.getName() << "\n";
+ OS << "int "<< InstrMapDesc.getName() << "(uint16_t Opcode";
+ if (ValueCols.size() > 1) {
+ for (unsigned i = 0, e = ColFields->getSize(); i < e; i++) {
+ std::string ColName = ColFields->getElement(i)->getAsUnquotedString();
+ OS << ", enum " << ColName << " in" << ColName << ") {\n";
+ }
+ } else { OS << ") {\n"; }
+
+ // Emit map table.
+ unsigned TableSize = emitBinSearchTable(OS);
+
+ // Emit rest of the function body.
+ emitMapFuncBody(OS, TableSize);
+}
+
+//===----------------------------------------------------------------------===//
+// Emit enums for the column fields across all the instruction maps.
+//===----------------------------------------------------------------------===//
+
+static void emitEnums(raw_ostream &OS, RecordKeeper &Records) {
+
+ std::vector<Record*> InstrMapVec;
+ InstrMapVec = Records.getAllDerivedDefinitions("InstrMapping");
+ std::map<std::string, std::vector<Init*> > ColFieldValueMap;
+
+ // Iterate over all InstrMapping records and create a map between column
+ // fields and their possible values across all records.
+ for (unsigned i = 0, e = InstrMapVec.size(); i < e; i++) {
+ Record *CurMap = InstrMapVec[i];
+ ListInit *ColFields;
+ ColFields = CurMap->getValueAsListInit("ColFields");
+ ListInit *List = CurMap->getValueAsListInit("ValueCols");
+ std::vector<ListInit*> ValueCols;
+ unsigned ListSize = List->getSize();
+
+ for (unsigned j = 0; j < ListSize; j++) {
+ ListInit *ListJ = dyn_cast<ListInit>(List->getElement(j));
+
+ if (ListJ->getSize() != ColFields->getSize())
+ PrintFatalError("Record `" + CurMap->getName() + "', field "
+ "`ValueCols' entries don't match with the entries in 'ColFields' !");
+ ValueCols.push_back(ListJ);
+ }
+
+ for (unsigned j = 0, endCF = ColFields->getSize(); j < endCF; j++) {
+ for (unsigned k = 0; k < ListSize; k++){
+ std::string ColName = ColFields->getElement(j)->getAsUnquotedString();
+ ColFieldValueMap[ColName].push_back((ValueCols[k])->getElement(j));
+ }
+ }
+ }
+
+ for (std::map<std::string, std::vector<Init*> >::iterator
+ II = ColFieldValueMap.begin(), IE = ColFieldValueMap.end();
+ II != IE; II++) {
+ std::vector<Init*> FieldValues = (*II).second;
+ unsigned FieldSize = FieldValues.size();
+
+ // Delete duplicate entries from ColFieldValueMap
+ for (unsigned i = 0; i < FieldSize - 1; i++) {
+ Init *CurVal = FieldValues[i];
+ for (unsigned j = i+1; j < FieldSize; j++) {
+ if (CurVal == FieldValues[j]) {
+ FieldValues.erase(FieldValues.begin()+j);
+ }
+ }
+ }
+
+ // Emit enumerated values for the column fields.
+ OS << "enum " << (*II).first << " {\n";
+ for (unsigned i = 0; i < FieldSize; i++) {
+ OS << "\t" << (*II).first << "_" << FieldValues[i]->getAsUnquotedString();
+ if (i != FieldValues.size() - 1)
+ OS << ",\n";
+ else
+ OS << "\n};\n\n";
+ }
+ }
+}
+
+namespace llvm {
+//===----------------------------------------------------------------------===//
+// Parse 'InstrMapping' records and use the information to form relationship
+// between instructions. These relations are emitted as a tables along with the
+// functions to query them.
+//===----------------------------------------------------------------------===//
+void EmitMapTable(RecordKeeper &Records, raw_ostream &OS) {
+ CodeGenTarget Target(Records);
+ std::string TargetName = Target.getName();
+ std::vector<Record*> InstrMapVec;
+ InstrMapVec = Records.getAllDerivedDefinitions("InstrMapping");
+
+ if (!InstrMapVec.size())
+ return;
+
+ OS << "#ifdef GET_INSTRMAP_INFO\n";
+ OS << "#undef GET_INSTRMAP_INFO\n";
+ OS << "namespace llvm {\n\n";
+ OS << "namespace " << TargetName << " {\n\n";
+
+ // Emit coulumn field names and their values as enums.
+ emitEnums(OS, Records);
+
+ // Iterate over all instruction mapping records and construct relationship
+ // maps based on the information specified there.
+ //
+ for (unsigned i = 0, e = InstrMapVec.size(); i < e; i++) {
+ MapTableEmitter IMap(Target, Records, InstrMapVec[i]);
+
+ // Build RowInstrMap to group instructions based on their values for
+ // RowFields. In the process, also collect key instructions into
+ // KeyInstrVec.
+ IMap.buildRowInstrMap();
+
+ // Build MapTable to map key instructions with the corresponding column
+ // instructions.
+ IMap.buildMapTable();
+
+ // Emit map tables and the functions to query them.
+ IMap.emitTablesWithFunc(OS);
+ }
+ OS << "} // End " << TargetName << " namespace\n";
+ OS << "} // End llvm namespace\n";
+ OS << "#endif // GET_INSTRMAP_INFO\n\n";
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp b/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp
index 011f4b7..580e319 100644
--- a/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp
@@ -28,7 +28,7 @@ using namespace llvm;
//===----------------------------------------------------------------------===//
CodeGenSubRegIndex::CodeGenSubRegIndex(Record *R, unsigned Enum)
- : TheDef(R), EnumValue(Enum) {
+ : TheDef(R), EnumValue(Enum), LaneMask(0) {
Name = R->getName();
if (R->getValue("Namespace"))
Namespace = R->getValueAsString("Namespace");
@@ -36,7 +36,7 @@ CodeGenSubRegIndex::CodeGenSubRegIndex(Record *R, unsigned Enum)
CodeGenSubRegIndex::CodeGenSubRegIndex(StringRef N, StringRef Nspace,
unsigned Enum)
- : TheDef(0), Name(N), Namespace(Nspace), EnumValue(Enum) {
+ : TheDef(0), Name(N), Namespace(Nspace), EnumValue(Enum), LaneMask(0) {
}
std::string CodeGenSubRegIndex::getQualifiedName() const {
@@ -54,19 +54,20 @@ void CodeGenSubRegIndex::updateComponents(CodeGenRegBank &RegBank) {
std::vector<Record*> Comps = TheDef->getValueAsListOfDefs("ComposedOf");
if (!Comps.empty()) {
if (Comps.size() != 2)
- throw TGError(TheDef->getLoc(), "ComposedOf must have exactly two entries");
+ PrintFatalError(TheDef->getLoc(),
+ "ComposedOf must have exactly two entries");
CodeGenSubRegIndex *A = RegBank.getSubRegIdx(Comps[0]);
CodeGenSubRegIndex *B = RegBank.getSubRegIdx(Comps[1]);
CodeGenSubRegIndex *X = A->addComposite(B, this);
if (X)
- throw TGError(TheDef->getLoc(), "Ambiguous ComposedOf entries");
+ PrintFatalError(TheDef->getLoc(), "Ambiguous ComposedOf entries");
}
std::vector<Record*> Parts =
TheDef->getValueAsListOfDefs("CoveringSubRegIndices");
if (!Parts.empty()) {
if (Parts.size() < 2)
- throw TGError(TheDef->getLoc(),
+ PrintFatalError(TheDef->getLoc(),
"CoveredBySubRegs must have two or more entries");
SmallVector<CodeGenSubRegIndex*, 8> IdxParts;
for (unsigned i = 0, e = Parts.size(); i != e; ++i)
@@ -75,14 +76,21 @@ void CodeGenSubRegIndex::updateComponents(CodeGenRegBank &RegBank) {
}
}
-void CodeGenSubRegIndex::cleanComposites() {
- // Clean out redundant mappings of the form this+X -> X.
- for (CompMap::iterator i = Composed.begin(), e = Composed.end(); i != e;) {
- CompMap::iterator j = i;
- ++i;
- if (j->first == j->second)
- Composed.erase(j);
- }
+unsigned CodeGenSubRegIndex::computeLaneMask() {
+ // Already computed?
+ if (LaneMask)
+ return LaneMask;
+
+ // Recursion guard, shouldn't be required.
+ LaneMask = ~0u;
+
+ // The lane mask is simply the union of all sub-indices.
+ unsigned M = 0;
+ for (CompMap::iterator I = Composed.begin(), E = Composed.end(); I != E; ++I)
+ M |= I->second->computeLaneMask();
+ assert(M && "Missing lane mask, sub-register cycle?");
+ LaneMask = M;
+ return LaneMask;
}
//===----------------------------------------------------------------------===//
@@ -105,8 +113,8 @@ void CodeGenRegister::buildObjectGraph(CodeGenRegBank &RegBank) {
std::vector<Record*> SRs = TheDef->getValueAsListOfDefs("SubRegs");
if (SRIs.size() != SRs.size())
- throw TGError(TheDef->getLoc(),
- "SubRegs and SubRegIndices must have the same size");
+ PrintFatalError(TheDef->getLoc(),
+ "SubRegs and SubRegIndices must have the same size");
for (unsigned i = 0, e = SRIs.size(); i != e; ++i) {
ExplicitSubRegIndices.push_back(RegBank.getSubRegIdx(SRIs[i]));
@@ -217,8 +225,8 @@ CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) {
CodeGenRegister *SR = ExplicitSubRegs[i];
CodeGenSubRegIndex *Idx = ExplicitSubRegIndices[i];
if (!SubRegs.insert(std::make_pair(Idx, SR)).second)
- throw TGError(TheDef->getLoc(), "SubRegIndex " + Idx->getName() +
- " appears twice in Register " + getName());
+ PrintFatalError(TheDef->getLoc(), "SubRegIndex " + Idx->getName() +
+ " appears twice in Register " + getName());
// Map explicit sub-registers first, so the names take precedence.
// The inherited sub-registers are mapped below.
SubReg2Idx.insert(std::make_pair(SR, Idx));
@@ -298,11 +306,11 @@ CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) {
for (SubRegMap::const_iterator SI = SubRegs.begin(), SE = SubRegs.end();
SI != SE; ++SI) {
if (SI->second == this) {
- SMLoc Loc;
+ ArrayRef<SMLoc> Loc;
if (TheDef)
Loc = TheDef->getLoc();
- throw TGError(Loc, "Register " + getName() +
- " has itself as a sub-register");
+ PrintFatalError(Loc, "Register " + getName() +
+ " has itself as a sub-register");
}
// Ensure that every sub-register has a unique name.
DenseMap<const CodeGenRegister*, CodeGenSubRegIndex*>::iterator Ins =
@@ -310,10 +318,10 @@ CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) {
if (Ins->second == SI->first)
continue;
// Trouble: Two different names for SI->second.
- SMLoc Loc;
+ ArrayRef<SMLoc> Loc;
if (TheDef)
Loc = TheDef->getLoc();
- throw TGError(Loc, "Sub-register can't have two names: " +
+ PrintFatalError(Loc, "Sub-register can't have two names: " +
SI->second->getName() + " available as " +
SI->first->getName() + " and " + Ins->second->getName());
}
@@ -460,8 +468,8 @@ void CodeGenRegister::computeSecondarySubRegs(CodeGenRegBank &RegBank) {
SE = NewSubReg->SubRegs.end(); SI != SE; ++SI) {
CodeGenSubRegIndex *SubIdx = getSubRegIndex(SI->second);
if (!SubIdx)
- throw TGError(TheDef->getLoc(), "No SubRegIndex for " +
- SI->second->getName() + " in " + getName());
+ PrintFatalError(TheDef->getLoc(), "No SubRegIndex for " +
+ SI->second->getName() + " in " + getName());
NewIdx->addComposite(SI->first, SubIdx);
}
}
@@ -585,15 +593,16 @@ struct TupleExpander : SetTheory::Expander {
unsigned Dim = Indices.size();
ListInit *SubRegs = Def->getValueAsListInit("SubRegs");
if (Dim != SubRegs->getSize())
- throw TGError(Def->getLoc(), "SubRegIndices and SubRegs size mismatch");
+ PrintFatalError(Def->getLoc(), "SubRegIndices and SubRegs size mismatch");
if (Dim < 2)
- throw TGError(Def->getLoc(), "Tuples must have at least 2 sub-registers");
+ PrintFatalError(Def->getLoc(),
+ "Tuples must have at least 2 sub-registers");
// Evaluate the sub-register lists to be zipped.
unsigned Length = ~0u;
SmallVector<SetTheory::RecSet, 4> Lists(Dim);
for (unsigned i = 0; i != Dim; ++i) {
- ST.evaluate(SubRegs->getElement(i), Lists[i]);
+ ST.evaluate(SubRegs->getElement(i), Lists[i], Def->getLoc());
Length = std::min(Length, unsigned(Lists[i].size()));
}
@@ -699,8 +708,8 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, Record *R)
for (unsigned i = 0, e = TypeList.size(); i != e; ++i) {
Record *Type = TypeList[i];
if (!Type->isSubClassOf("ValueType"))
- throw "RegTypes list member '" + Type->getName() +
- "' does not derive from the ValueType class!";
+ PrintFatalError("RegTypes list member '" + Type->getName() +
+ "' does not derive from the ValueType class!");
VTs.push_back(getValueType(Type));
}
assert(!VTs.empty() && "RegisterClass must contain at least one ValueType!");
@@ -721,14 +730,14 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, Record *R)
// Alternative allocation orders may be subsets.
SetTheory::RecSet Order;
for (unsigned i = 0, e = AltOrders->size(); i != e; ++i) {
- RegBank.getSets().evaluate(AltOrders->getElement(i), Order);
+ RegBank.getSets().evaluate(AltOrders->getElement(i), Order, R->getLoc());
Orders[1 + i].append(Order.begin(), Order.end());
// Verify that all altorder members are regclass members.
while (!Order.empty()) {
CodeGenRegister *Reg = RegBank.getReg(Order.back());
Order.pop_back();
if (!contains(Reg))
- throw TGError(R->getLoc(), " AltOrder register " + Reg->getName() +
+ PrintFatalError(R->getLoc(), " AltOrder register " + Reg->getName() +
" is not a class member");
}
}
@@ -986,6 +995,12 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records) {
for (unsigned i = 0, e = Registers.size(); i != e; ++i)
Registers[i]->buildObjectGraph(*this);
+ // Compute register name map.
+ for (unsigned i = 0, e = Registers.size(); i != e; ++i)
+ RegistersByName.GetOrCreateValue(
+ Registers[i]->TheDef->getValueAsString("AsmName"),
+ Registers[i]);
+
// Precompute all sub-register maps.
// This will create Composite entries for all inferred sub-register indices.
for (unsigned i = 0, e = Registers.size(); i != e; ++i)
@@ -1008,7 +1023,7 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records) {
// Read in register class definitions.
std::vector<Record*> RCs = Records.getAllDerivedDefinitions("RegisterClass");
if (RCs.empty())
- throw std::string("No 'RegisterClass' subclasses defined!");
+ PrintFatalError(std::string("No 'RegisterClass' subclasses defined!"));
// Allocate user-defined register classes.
RegClasses.reserve(RCs.size());
@@ -1085,7 +1100,7 @@ CodeGenRegisterClass *CodeGenRegBank::getRegClass(Record *Def) {
if (CodeGenRegisterClass *RC = Def2RC[Def])
return RC;
- throw TGError(Def->getLoc(), "Not a known RegisterClass!");
+ PrintFatalError(Def->getLoc(), "Not a known RegisterClass!");
}
CodeGenSubRegIndex*
@@ -1164,11 +1179,35 @@ void CodeGenRegBank::computeComposites() {
}
}
}
+}
+
+// Compute lane masks. This is similar to register units, but at the
+// sub-register index level. Each bit in the lane mask is like a register unit
+// class, and two lane masks will have a bit in common if two sub-register
+// indices overlap in some register.
+//
+// Conservatively share a lane mask bit if two sub-register indices overlap in
+// some registers, but not in others. That shouldn't happen a lot.
+void CodeGenRegBank::computeSubRegIndexLaneMasks() {
+ // First assign individual bits to all the leaf indices.
+ unsigned Bit = 0;
+ for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
+ CodeGenSubRegIndex *Idx = SubRegIndices[i];
+ if (Idx->getComposites().empty()) {
+ Idx->LaneMask = 1u << Bit;
+ // Share bit 31 in the unlikely case there are more than 32 leafs.
+ if (Bit < 31) ++Bit;
+ } else {
+ Idx->LaneMask = 0;
+ }
+ }
+
+ // FIXME: What if ad-hoc aliasing introduces overlaps that aren't represented
+ // by the sub-register graph? This doesn't occur in any known targets.
- // We don't care about the difference between (Idx1, Idx2) -> Idx2 and invalid
- // compositions, so remove any mappings of that form.
+ // Inherit lanes from composites.
for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i)
- SubRegIndices[i]->cleanComposites();
+ SubRegIndices[i]->computeLaneMask();
}
namespace {
@@ -1554,6 +1593,7 @@ void CodeGenRegBank::computeRegUnitSets() {
void CodeGenRegBank::computeDerivedInfo() {
computeComposites();
+ computeSubRegIndexLaneMasks();
// Compute a weight for each register unit created during getSubRegs.
// This may create adopted register units (with unit # >= NumNativeRegUnits).
diff --git a/contrib/llvm/utils/TableGen/CodeGenRegisters.h b/contrib/llvm/utils/TableGen/CodeGenRegisters.h
index 827063e..e411074 100644
--- a/contrib/llvm/utils/TableGen/CodeGenRegisters.h
+++ b/contrib/llvm/utils/TableGen/CodeGenRegisters.h
@@ -40,6 +40,7 @@ namespace llvm {
public:
const unsigned EnumValue;
+ unsigned LaneMask;
CodeGenSubRegIndex(Record *R, unsigned Enum);
CodeGenSubRegIndex(StringRef N, StringRef Nspace, unsigned Enum);
@@ -80,12 +81,12 @@ namespace llvm {
// Update the composite maps of components specified in 'ComposedOf'.
void updateComponents(CodeGenRegBank&);
- // Clean out redundant composite mappings.
- void cleanComposites();
-
// Return the map of composites.
const CompMap &getComposites() const { return Composed; }
+ // Compute LaneMask from Composed. Return LaneMask.
+ unsigned computeLaneMask();
+
private:
CompMap Composed;
};
@@ -439,6 +440,7 @@ namespace llvm {
// Registers.
std::vector<CodeGenRegister*> Registers;
+ StringMap<CodeGenRegister*> RegistersByName;
DenseMap<Record*, CodeGenRegister*> Def2Reg;
unsigned NumNativeRegUnits;
@@ -489,6 +491,9 @@ namespace llvm {
// Populate the Composite map from sub-register relationships.
void computeComposites();
+ // Compute a lane mask for each sub-register index.
+ void computeSubRegIndexLaneMasks();
+
public:
CodeGenRegBank(RecordKeeper&);
@@ -518,6 +523,9 @@ namespace llvm {
}
const std::vector<CodeGenRegister*> &getRegisters() { return Registers; }
+ const StringMap<CodeGenRegister*> &getRegistersByName() {
+ return RegistersByName;
+ }
// Find a register from its Record def.
CodeGenRegister *getReg(Record*);
diff --git a/contrib/llvm/utils/TableGen/CodeGenSchedule.cpp b/contrib/llvm/utils/TableGen/CodeGenSchedule.cpp
index f57fd18..63cc97a 100644
--- a/contrib/llvm/utils/TableGen/CodeGenSchedule.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenSchedule.cpp
@@ -16,41 +16,505 @@
#include "CodeGenSchedule.h"
#include "CodeGenTarget.h"
+#include "llvm/TableGen/Error.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/ADT/STLExtras.h"
using namespace llvm;
-// CodeGenModels ctor interprets machine model records and populates maps.
+#ifndef NDEBUG
+static void dumpIdxVec(const IdxVec &V) {
+ for (unsigned i = 0, e = V.size(); i < e; ++i) {
+ dbgs() << V[i] << ", ";
+ }
+}
+static void dumpIdxVec(const SmallVectorImpl<unsigned> &V) {
+ for (unsigned i = 0, e = V.size(); i < e; ++i) {
+ dbgs() << V[i] << ", ";
+ }
+}
+#endif
+
+// (instrs a, b, ...) Evaluate and union all arguments. Identical to AddOp.
+struct InstrsOp : public SetTheory::Operator {
+ void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts,
+ ArrayRef<SMLoc> Loc) {
+ ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts, Loc);
+ }
+};
+
+// (instregex "OpcPat",...) Find all instructions matching an opcode pattern.
+//
+// TODO: Since this is a prefix match, perform a binary search over the
+// instruction names using lower_bound. Note that the predefined instrs must be
+// scanned linearly first. However, this is only safe if the regex pattern has
+// no top-level bars. The DAG already has a list of patterns, so there's no
+// reason to use top-level bars, but we need a way to verify they don't exist
+// before implementing the optimization.
+struct InstRegexOp : public SetTheory::Operator {
+ const CodeGenTarget &Target;
+ InstRegexOp(const CodeGenTarget &t): Target(t) {}
+
+ void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts,
+ ArrayRef<SMLoc> Loc) {
+ SmallVector<Regex*, 4> RegexList;
+ for (DagInit::const_arg_iterator
+ AI = Expr->arg_begin(), AE = Expr->arg_end(); AI != AE; ++AI) {
+ StringInit *SI = dyn_cast<StringInit>(*AI);
+ if (!SI)
+ PrintFatalError(Loc, "instregex requires pattern string: "
+ + Expr->getAsString());
+ std::string pat = SI->getValue();
+ // Implement a python-style prefix match.
+ if (pat[0] != '^') {
+ pat.insert(0, "^(");
+ pat.insert(pat.end(), ')');
+ }
+ RegexList.push_back(new Regex(pat));
+ }
+ for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
+ E = Target.inst_end(); I != E; ++I) {
+ for (SmallVectorImpl<Regex*>::iterator
+ RI = RegexList.begin(), RE = RegexList.end(); RI != RE; ++RI) {
+ if ((*RI)->match((*I)->TheDef->getName()))
+ Elts.insert((*I)->TheDef);
+ }
+ }
+ DeleteContainerPointers(RegexList);
+ }
+};
+
+/// CodeGenModels ctor interprets machine model records and populates maps.
CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK,
const CodeGenTarget &TGT):
- Records(RK), Target(TGT), NumItineraryClasses(0), HasProcItineraries(false) {
+ Records(RK), Target(TGT), NumItineraryClasses(0) {
+
+ Sets.addFieldExpander("InstRW", "Instrs");
+
+ // Allow Set evaluation to recognize the dags used in InstRW records:
+ // (instrs Op1, Op1...)
+ Sets.addOperator("instrs", new InstrsOp);
+ Sets.addOperator("instregex", new InstRegexOp(Target));
+
+ // Instantiate a CodeGenProcModel for each SchedMachineModel with the values
+ // that are explicitly referenced in tablegen records. Resources associated
+ // with each processor will be derived later. Populate ProcModelMap with the
+ // CodeGenProcModel instances.
+ collectProcModels();
+
+ // Instantiate a CodeGenSchedRW for each SchedReadWrite record explicitly
+ // defined, and populate SchedReads and SchedWrites vectors. Implicit
+ // SchedReadWrites that represent sequences derived from expanded variant will
+ // be inferred later.
+ collectSchedRW();
+
+ // Instantiate a CodeGenSchedClass for each unique SchedRW signature directly
+ // required by an instruction definition, and populate SchedClassIdxMap. Set
+ // NumItineraryClasses to the number of explicit itinerary classes referenced
+ // by instructions. Set NumInstrSchedClasses to the number of itinerary
+ // classes plus any classes implied by instructions that derive from class
+ // Sched and provide SchedRW list. This does not infer any new classes from
+ // SchedVariant.
+ collectSchedClasses();
+
+ // Find instruction itineraries for each processor. Sort and populate
+ // CodeGenProcModel::ItinDefList. (Cycle-to-cycle itineraries). This requires
+ // all itinerary classes to be discovered.
+ collectProcItins();
+
+ // Find ItinRW records for each processor and itinerary class.
+ // (For per-operand resources mapped to itinerary classes).
+ collectProcItinRW();
+
+ // Infer new SchedClasses from SchedVariant.
+ inferSchedClasses();
+
+ // Populate each CodeGenProcModel's WriteResDefs, ReadAdvanceDefs, and
+ // ProcResourceDefs.
+ collectProcResources();
+}
+
+/// Gather all processor models.
+void CodeGenSchedModels::collectProcModels() {
+ RecVec ProcRecords = Records.getAllDerivedDefinitions("Processor");
+ std::sort(ProcRecords.begin(), ProcRecords.end(), LessRecordFieldName());
+
+ // Reserve space because we can. Reallocation would be ok.
+ ProcModels.reserve(ProcRecords.size()+1);
+
+ // Use idx=0 for NoModel/NoItineraries.
+ Record *NoModelDef = Records.getDef("NoSchedModel");
+ Record *NoItinsDef = Records.getDef("NoItineraries");
+ ProcModels.push_back(CodeGenProcModel(0, "NoSchedModel",
+ NoModelDef, NoItinsDef));
+ ProcModelMap[NoModelDef] = 0;
+
+ // For each processor, find a unique machine model.
+ for (unsigned i = 0, N = ProcRecords.size(); i < N; ++i)
+ addProcModel(ProcRecords[i]);
+}
+
+/// Get a unique processor model based on the defined MachineModel and
+/// ProcessorItineraries.
+void CodeGenSchedModels::addProcModel(Record *ProcDef) {
+ Record *ModelKey = getModelOrItinDef(ProcDef);
+ if (!ProcModelMap.insert(std::make_pair(ModelKey, ProcModels.size())).second)
+ return;
+
+ std::string Name = ModelKey->getName();
+ if (ModelKey->isSubClassOf("SchedMachineModel")) {
+ Record *ItinsDef = ModelKey->getValueAsDef("Itineraries");
+ ProcModels.push_back(
+ CodeGenProcModel(ProcModels.size(), Name, ModelKey, ItinsDef));
+ }
+ else {
+ // An itinerary is defined without a machine model. Infer a new model.
+ if (!ModelKey->getValueAsListOfDefs("IID").empty())
+ Name = Name + "Model";
+ ProcModels.push_back(
+ CodeGenProcModel(ProcModels.size(), Name,
+ ProcDef->getValueAsDef("SchedModel"), ModelKey));
+ }
+ DEBUG(ProcModels.back().dump());
+}
+
+// Recursively find all reachable SchedReadWrite records.
+static void scanSchedRW(Record *RWDef, RecVec &RWDefs,
+ SmallPtrSet<Record*, 16> &RWSet) {
+ if (!RWSet.insert(RWDef))
+ return;
+ RWDefs.push_back(RWDef);
+ // Reads don't current have sequence records, but it can be added later.
+ if (RWDef->isSubClassOf("WriteSequence")) {
+ RecVec Seq = RWDef->getValueAsListOfDefs("Writes");
+ for (RecIter I = Seq.begin(), E = Seq.end(); I != E; ++I)
+ scanSchedRW(*I, RWDefs, RWSet);
+ }
+ else if (RWDef->isSubClassOf("SchedVariant")) {
+ // Visit each variant (guarded by a different predicate).
+ RecVec Vars = RWDef->getValueAsListOfDefs("Variants");
+ for (RecIter VI = Vars.begin(), VE = Vars.end(); VI != VE; ++VI) {
+ // Visit each RW in the sequence selected by the current variant.
+ RecVec Selected = (*VI)->getValueAsListOfDefs("Selected");
+ for (RecIter I = Selected.begin(), E = Selected.end(); I != E; ++I)
+ scanSchedRW(*I, RWDefs, RWSet);
+ }
+ }
+}
+
+// Collect and sort all SchedReadWrites reachable via tablegen records.
+// More may be inferred later when inferring new SchedClasses from variants.
+void CodeGenSchedModels::collectSchedRW() {
+ // Reserve idx=0 for invalid writes/reads.
+ SchedWrites.resize(1);
+ SchedReads.resize(1);
+
+ SmallPtrSet<Record*, 16> RWSet;
+
+ // Find all SchedReadWrites referenced by instruction defs.
+ RecVec SWDefs, SRDefs;
+ for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
+ E = Target.inst_end(); I != E; ++I) {
+ Record *SchedDef = (*I)->TheDef;
+ if (!SchedDef->isSubClassOf("Sched"))
+ continue;
+ RecVec RWs = SchedDef->getValueAsListOfDefs("SchedRW");
+ for (RecIter RWI = RWs.begin(), RWE = RWs.end(); RWI != RWE; ++RWI) {
+ if ((*RWI)->isSubClassOf("SchedWrite"))
+ scanSchedRW(*RWI, SWDefs, RWSet);
+ else {
+ assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
+ scanSchedRW(*RWI, SRDefs, RWSet);
+ }
+ }
+ }
+ // Find all ReadWrites referenced by InstRW.
+ RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW");
+ for (RecIter OI = InstRWDefs.begin(), OE = InstRWDefs.end(); OI != OE; ++OI) {
+ // For all OperandReadWrites.
+ RecVec RWDefs = (*OI)->getValueAsListOfDefs("OperandReadWrites");
+ for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end();
+ RWI != RWE; ++RWI) {
+ if ((*RWI)->isSubClassOf("SchedWrite"))
+ scanSchedRW(*RWI, SWDefs, RWSet);
+ else {
+ assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
+ scanSchedRW(*RWI, SRDefs, RWSet);
+ }
+ }
+ }
+ // Find all ReadWrites referenced by ItinRW.
+ RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW");
+ for (RecIter II = ItinRWDefs.begin(), IE = ItinRWDefs.end(); II != IE; ++II) {
+ // For all OperandReadWrites.
+ RecVec RWDefs = (*II)->getValueAsListOfDefs("OperandReadWrites");
+ for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end();
+ RWI != RWE; ++RWI) {
+ if ((*RWI)->isSubClassOf("SchedWrite"))
+ scanSchedRW(*RWI, SWDefs, RWSet);
+ else {
+ assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
+ scanSchedRW(*RWI, SRDefs, RWSet);
+ }
+ }
+ }
+ // Find all ReadWrites referenced by SchedAlias. AliasDefs needs to be sorted
+ // for the loop below that initializes Alias vectors.
+ RecVec AliasDefs = Records.getAllDerivedDefinitions("SchedAlias");
+ std::sort(AliasDefs.begin(), AliasDefs.end(), LessRecord());
+ for (RecIter AI = AliasDefs.begin(), AE = AliasDefs.end(); AI != AE; ++AI) {
+ Record *MatchDef = (*AI)->getValueAsDef("MatchRW");
+ Record *AliasDef = (*AI)->getValueAsDef("AliasRW");
+ if (MatchDef->isSubClassOf("SchedWrite")) {
+ if (!AliasDef->isSubClassOf("SchedWrite"))
+ PrintFatalError((*AI)->getLoc(), "SchedWrite Alias must be SchedWrite");
+ scanSchedRW(AliasDef, SWDefs, RWSet);
+ }
+ else {
+ assert(MatchDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
+ if (!AliasDef->isSubClassOf("SchedRead"))
+ PrintFatalError((*AI)->getLoc(), "SchedRead Alias must be SchedRead");
+ scanSchedRW(AliasDef, SRDefs, RWSet);
+ }
+ }
+ // Sort and add the SchedReadWrites directly referenced by instructions or
+ // itinerary resources. Index reads and writes in separate domains.
+ std::sort(SWDefs.begin(), SWDefs.end(), LessRecord());
+ for (RecIter SWI = SWDefs.begin(), SWE = SWDefs.end(); SWI != SWE; ++SWI) {
+ assert(!getSchedRWIdx(*SWI, /*IsRead=*/false) && "duplicate SchedWrite");
+ SchedWrites.push_back(CodeGenSchedRW(SchedWrites.size(), *SWI));
+ }
+ std::sort(SRDefs.begin(), SRDefs.end(), LessRecord());
+ for (RecIter SRI = SRDefs.begin(), SRE = SRDefs.end(); SRI != SRE; ++SRI) {
+ assert(!getSchedRWIdx(*SRI, /*IsRead-*/true) && "duplicate SchedWrite");
+ SchedReads.push_back(CodeGenSchedRW(SchedReads.size(), *SRI));
+ }
+ // Initialize WriteSequence vectors.
+ for (std::vector<CodeGenSchedRW>::iterator WI = SchedWrites.begin(),
+ WE = SchedWrites.end(); WI != WE; ++WI) {
+ if (!WI->IsSequence)
+ continue;
+ findRWs(WI->TheDef->getValueAsListOfDefs("Writes"), WI->Sequence,
+ /*IsRead=*/false);
+ }
+ // Initialize Aliases vectors.
+ for (RecIter AI = AliasDefs.begin(), AE = AliasDefs.end(); AI != AE; ++AI) {
+ Record *AliasDef = (*AI)->getValueAsDef("AliasRW");
+ getSchedRW(AliasDef).IsAlias = true;
+ Record *MatchDef = (*AI)->getValueAsDef("MatchRW");
+ CodeGenSchedRW &RW = getSchedRW(MatchDef);
+ if (RW.IsAlias)
+ PrintFatalError((*AI)->getLoc(), "Cannot Alias an Alias");
+ RW.Aliases.push_back(*AI);
+ }
+ DEBUG(
+ for (unsigned WIdx = 0, WEnd = SchedWrites.size(); WIdx != WEnd; ++WIdx) {
+ dbgs() << WIdx << ": ";
+ SchedWrites[WIdx].dump();
+ dbgs() << '\n';
+ }
+ for (unsigned RIdx = 0, REnd = SchedReads.size(); RIdx != REnd; ++RIdx) {
+ dbgs() << RIdx << ": ";
+ SchedReads[RIdx].dump();
+ dbgs() << '\n';
+ }
+ RecVec RWDefs = Records.getAllDerivedDefinitions("SchedReadWrite");
+ for (RecIter RI = RWDefs.begin(), RE = RWDefs.end();
+ RI != RE; ++RI) {
+ if (!getSchedRWIdx(*RI, (*RI)->isSubClassOf("SchedRead"))) {
+ const std::string &Name = (*RI)->getName();
+ if (Name != "NoWrite" && Name != "ReadDefault")
+ dbgs() << "Unused SchedReadWrite " << (*RI)->getName() << '\n';
+ }
+ });
+}
+
+/// Compute a SchedWrite name from a sequence of writes.
+std::string CodeGenSchedModels::genRWName(const IdxVec& Seq, bool IsRead) {
+ std::string Name("(");
+ for (IdxIter I = Seq.begin(), E = Seq.end(); I != E; ++I) {
+ if (I != Seq.begin())
+ Name += '_';
+ Name += getSchedRW(*I, IsRead).Name;
+ }
+ Name += ')';
+ return Name;
+}
+
+unsigned CodeGenSchedModels::getSchedRWIdx(Record *Def, bool IsRead,
+ unsigned After) const {
+ const std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites;
+ assert(After < RWVec.size() && "start position out of bounds");
+ for (std::vector<CodeGenSchedRW>::const_iterator I = RWVec.begin() + After,
+ E = RWVec.end(); I != E; ++I) {
+ if (I->TheDef == Def)
+ return I - RWVec.begin();
+ }
+ return 0;
+}
+
+bool CodeGenSchedModels::hasReadOfWrite(Record *WriteDef) const {
+ for (unsigned i = 0, e = SchedReads.size(); i < e; ++i) {
+ Record *ReadDef = SchedReads[i].TheDef;
+ if (!ReadDef || !ReadDef->isSubClassOf("ProcReadAdvance"))
+ continue;
+
+ RecVec ValidWrites = ReadDef->getValueAsListOfDefs("ValidWrites");
+ if (std::find(ValidWrites.begin(), ValidWrites.end(), WriteDef)
+ != ValidWrites.end()) {
+ return true;
+ }
+ }
+ return false;
+}
- // Populate SchedClassIdxMap and set NumItineraryClasses.
- CollectSchedClasses();
+namespace llvm {
+void splitSchedReadWrites(const RecVec &RWDefs,
+ RecVec &WriteDefs, RecVec &ReadDefs) {
+ for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end(); RWI != RWE; ++RWI) {
+ if ((*RWI)->isSubClassOf("SchedWrite"))
+ WriteDefs.push_back(*RWI);
+ else {
+ assert((*RWI)->isSubClassOf("SchedRead") && "unknown SchedReadWrite");
+ ReadDefs.push_back(*RWI);
+ }
+ }
+}
+} // namespace llvm
+
+// Split the SchedReadWrites defs and call findRWs for each list.
+void CodeGenSchedModels::findRWs(const RecVec &RWDefs,
+ IdxVec &Writes, IdxVec &Reads) const {
+ RecVec WriteDefs;
+ RecVec ReadDefs;
+ splitSchedReadWrites(RWDefs, WriteDefs, ReadDefs);
+ findRWs(WriteDefs, Writes, false);
+ findRWs(ReadDefs, Reads, true);
+}
+
+// Call getSchedRWIdx for all elements in a sequence of SchedRW defs.
+void CodeGenSchedModels::findRWs(const RecVec &RWDefs, IdxVec &RWs,
+ bool IsRead) const {
+ for (RecIter RI = RWDefs.begin(), RE = RWDefs.end(); RI != RE; ++RI) {
+ unsigned Idx = getSchedRWIdx(*RI, IsRead);
+ assert(Idx && "failed to collect SchedReadWrite");
+ RWs.push_back(Idx);
+ }
+}
+
+void CodeGenSchedModels::expandRWSequence(unsigned RWIdx, IdxVec &RWSeq,
+ bool IsRead) const {
+ const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead);
+ if (!SchedRW.IsSequence) {
+ RWSeq.push_back(RWIdx);
+ return;
+ }
+ int Repeat =
+ SchedRW.TheDef ? SchedRW.TheDef->getValueAsInt("Repeat") : 1;
+ for (int i = 0; i < Repeat; ++i) {
+ for (IdxIter I = SchedRW.Sequence.begin(), E = SchedRW.Sequence.end();
+ I != E; ++I) {
+ expandRWSequence(*I, RWSeq, IsRead);
+ }
+ }
+}
+
+// Expand a SchedWrite as a sequence following any aliases that coincide with
+// the given processor model.
+void CodeGenSchedModels::expandRWSeqForProc(
+ unsigned RWIdx, IdxVec &RWSeq, bool IsRead,
+ const CodeGenProcModel &ProcModel) const {
+
+ const CodeGenSchedRW &SchedWrite = getSchedRW(RWIdx, IsRead);
+ Record *AliasDef = 0;
+ for (RecIter AI = SchedWrite.Aliases.begin(), AE = SchedWrite.Aliases.end();
+ AI != AE; ++AI) {
+ const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW"));
+ if ((*AI)->getValueInit("SchedModel")->isComplete()) {
+ Record *ModelDef = (*AI)->getValueAsDef("SchedModel");
+ if (&getProcModel(ModelDef) != &ProcModel)
+ continue;
+ }
+ if (AliasDef)
+ PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases "
+ "defined for processor " + ProcModel.ModelName +
+ " Ensure only one SchedAlias exists per RW.");
+ AliasDef = AliasRW.TheDef;
+ }
+ if (AliasDef) {
+ expandRWSeqForProc(getSchedRWIdx(AliasDef, IsRead),
+ RWSeq, IsRead,ProcModel);
+ return;
+ }
+ if (!SchedWrite.IsSequence) {
+ RWSeq.push_back(RWIdx);
+ return;
+ }
+ int Repeat =
+ SchedWrite.TheDef ? SchedWrite.TheDef->getValueAsInt("Repeat") : 1;
+ for (int i = 0; i < Repeat; ++i) {
+ for (IdxIter I = SchedWrite.Sequence.begin(), E = SchedWrite.Sequence.end();
+ I != E; ++I) {
+ expandRWSeqForProc(*I, RWSeq, IsRead, ProcModel);
+ }
+ }
+}
+
+// Find the existing SchedWrite that models this sequence of writes.
+unsigned CodeGenSchedModels::findRWForSequence(const IdxVec &Seq,
+ bool IsRead) {
+ std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites;
- // Populate ProcModelMap.
- CollectProcModels();
+ for (std::vector<CodeGenSchedRW>::iterator I = RWVec.begin(), E = RWVec.end();
+ I != E; ++I) {
+ if (I->Sequence == Seq)
+ return I - RWVec.begin();
+ }
+ // Index zero reserved for invalid RW.
+ return 0;
}
-// Visit all the instruction definitions for this target to gather and enumerate
-// the itinerary classes. These are the explicitly specified SchedClasses. More
-// SchedClasses may be inferred.
-void CodeGenSchedModels::CollectSchedClasses() {
+/// Add this ReadWrite if it doesn't already exist.
+unsigned CodeGenSchedModels::findOrInsertRW(ArrayRef<unsigned> Seq,
+ bool IsRead) {
+ assert(!Seq.empty() && "cannot insert empty sequence");
+ if (Seq.size() == 1)
+ return Seq.back();
- // NoItinerary is always the first class at Index=0
+ unsigned Idx = findRWForSequence(Seq, IsRead);
+ if (Idx)
+ return Idx;
+
+ unsigned RWIdx = IsRead ? SchedReads.size() : SchedWrites.size();
+ CodeGenSchedRW SchedRW(RWIdx, IsRead, Seq, genRWName(Seq, IsRead));
+ if (IsRead)
+ SchedReads.push_back(SchedRW);
+ else
+ SchedWrites.push_back(SchedRW);
+ return RWIdx;
+}
+
+/// Visit all the instruction definitions for this target to gather and
+/// enumerate the itinerary classes. These are the explicitly specified
+/// SchedClasses. More SchedClasses may be inferred.
+void CodeGenSchedModels::collectSchedClasses() {
+
+ // NoItinerary is always the first class at Idx=0
SchedClasses.resize(1);
SchedClasses.back().Name = "NoItinerary";
+ SchedClasses.back().ProcIndices.push_back(0);
SchedClassIdxMap[SchedClasses.back().Name] = 0;
// Gather and sort all itinerary classes used by instruction descriptions.
- std::vector<Record*> ItinClassList;
+ RecVec ItinClassList;
for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
E = Target.inst_end(); I != E; ++I) {
- Record *SchedDef = (*I)->TheDef->getValueAsDef("Itinerary");
+ Record *ItinDef = (*I)->TheDef->getValueAsDef("Itinerary");
// Map a new SchedClass with no index.
- if (!SchedClassIdxMap.count(SchedDef->getName())) {
- SchedClassIdxMap[SchedDef->getName()] = 0;
- ItinClassList.push_back(SchedDef);
+ if (!SchedClassIdxMap.count(ItinDef->getName())) {
+ SchedClassIdxMap[ItinDef->getName()] = 0;
+ ItinClassList.push_back(ItinDef);
}
}
// Assign each itinerary class unique number, skipping NoItinerary==0
@@ -61,91 +525,1139 @@ void CodeGenSchedModels::CollectSchedClasses() {
SchedClassIdxMap[ItinDef->getName()] = SchedClasses.size();
SchedClasses.push_back(CodeGenSchedClass(ItinDef));
}
+ // Infer classes from SchedReadWrite resources listed for each
+ // instruction definition that inherits from class Sched.
+ for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
+ E = Target.inst_end(); I != E; ++I) {
+ if (!(*I)->TheDef->isSubClassOf("Sched"))
+ continue;
+ IdxVec Writes, Reads;
+ findRWs((*I)->TheDef->getValueAsListOfDefs("SchedRW"), Writes, Reads);
+ // ProcIdx == 0 indicates the class applies to all processors.
+ IdxVec ProcIndices(1, 0);
+ addSchedClass(Writes, Reads, ProcIndices);
+ }
+ // Create classes for InstRW defs.
+ RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW");
+ std::sort(InstRWDefs.begin(), InstRWDefs.end(), LessRecord());
+ for (RecIter OI = InstRWDefs.begin(), OE = InstRWDefs.end(); OI != OE; ++OI)
+ createInstRWClass(*OI);
+
+ NumInstrSchedClasses = SchedClasses.size();
+
+ bool EnableDump = false;
+ DEBUG(EnableDump = true);
+ if (!EnableDump)
+ return;
+ for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
+ E = Target.inst_end(); I != E; ++I) {
+ Record *SchedDef = (*I)->TheDef;
+ std::string InstName = (*I)->TheDef->getName();
+ if (SchedDef->isSubClassOf("Sched")) {
+ IdxVec Writes;
+ IdxVec Reads;
+ findRWs((*I)->TheDef->getValueAsListOfDefs("SchedRW"), Writes, Reads);
+ dbgs() << "SchedRW machine model for " << InstName;
+ for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI)
+ dbgs() << " " << SchedWrites[*WI].Name;
+ for (IdxIter RI = Reads.begin(), RE = Reads.end(); RI != RE; ++RI)
+ dbgs() << " " << SchedReads[*RI].Name;
+ dbgs() << '\n';
+ }
+ unsigned SCIdx = InstrClassMap.lookup((*I)->TheDef);
+ if (SCIdx) {
+ const RecVec &RWDefs = SchedClasses[SCIdx].InstRWs;
+ for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end();
+ RWI != RWE; ++RWI) {
+ const CodeGenProcModel &ProcModel =
+ getProcModel((*RWI)->getValueAsDef("SchedModel"));
+ dbgs() << "InstRW on " << ProcModel.ModelName << " for " << InstName;
+ IdxVec Writes;
+ IdxVec Reads;
+ findRWs((*RWI)->getValueAsListOfDefs("OperandReadWrites"),
+ Writes, Reads);
+ for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI)
+ dbgs() << " " << SchedWrites[*WI].Name;
+ for (IdxIter RI = Reads.begin(), RE = Reads.end(); RI != RE; ++RI)
+ dbgs() << " " << SchedReads[*RI].Name;
+ dbgs() << '\n';
+ }
+ continue;
+ }
+ if (!SchedDef->isSubClassOf("Sched")
+ && (SchedDef->getValueAsDef("Itinerary")->getName() == "NoItinerary")) {
+ dbgs() << "No machine model for " << (*I)->TheDef->getName() << '\n';
+ }
+ }
+}
+
+unsigned CodeGenSchedModels::getSchedClassIdx(
+ const RecVec &RWDefs) const {
- // TODO: Infer classes from non-itinerary scheduler resources.
+ IdxVec Writes, Reads;
+ findRWs(RWDefs, Writes, Reads);
+ return findSchedClassIdx(Writes, Reads);
}
-// Gather all processor models.
-void CodeGenSchedModels::CollectProcModels() {
- std::vector<Record*> ProcRecords =
- Records.getAllDerivedDefinitions("Processor");
- std::sort(ProcRecords.begin(), ProcRecords.end(), LessRecordFieldName());
+/// Find an SchedClass that has been inferred from a per-operand list of
+/// SchedWrites and SchedReads.
+unsigned CodeGenSchedModels::findSchedClassIdx(const IdxVec &Writes,
+ const IdxVec &Reads) const {
+ for (SchedClassIter I = schedClassBegin(), E = schedClassEnd(); I != E; ++I) {
+ // Classes with InstRWs may have the same Writes/Reads as a class originally
+ // produced by a SchedRW definition. We need to be able to recover the
+ // original class index for processors that don't match any InstRWs.
+ if (I->ItinClassDef || !I->InstRWs.empty())
+ continue;
- // Reserve space because we can. Reallocation would be ok.
- ProcModels.reserve(ProcRecords.size());
+ if (I->Writes == Writes && I->Reads == Reads) {
+ return I - schedClassBegin();
+ }
+ }
+ return 0;
+}
- // For each processor, find a unique machine model.
- for (unsigned i = 0, N = ProcRecords.size(); i < N; ++i)
- addProcModel(ProcRecords[i]);
+// Get the SchedClass index for an instruction.
+unsigned CodeGenSchedModels::getSchedClassIdx(
+ const CodeGenInstruction &Inst) const {
+
+ unsigned SCIdx = InstrClassMap.lookup(Inst.TheDef);
+ if (SCIdx)
+ return SCIdx;
+
+ // If this opcode isn't mapped by the subtarget fallback to the instruction
+ // definition's SchedRW or ItinDef values.
+ if (Inst.TheDef->isSubClassOf("Sched")) {
+ RecVec RWs = Inst.TheDef->getValueAsListOfDefs("SchedRW");
+ return getSchedClassIdx(RWs);
+ }
+ Record *ItinDef = Inst.TheDef->getValueAsDef("Itinerary");
+ assert(SchedClassIdxMap.count(ItinDef->getName()) && "missing ItinClass");
+ unsigned Idx = SchedClassIdxMap.lookup(ItinDef->getName());
+ assert(Idx <= NumItineraryClasses && "bad ItinClass index");
+ return Idx;
}
-// Get a unique processor model based on the defined MachineModel and
-// ProcessorItineraries.
-void CodeGenSchedModels::addProcModel(Record *ProcDef) {
- unsigned Idx = getProcModelIdx(ProcDef);
- if (Idx < ProcModels.size())
- return;
+std::string CodeGenSchedModels::createSchedClassName(
+ const IdxVec &OperWrites, const IdxVec &OperReads) {
+
+ std::string Name;
+ for (IdxIter WI = OperWrites.begin(), WE = OperWrites.end(); WI != WE; ++WI) {
+ if (WI != OperWrites.begin())
+ Name += '_';
+ Name += SchedWrites[*WI].Name;
+ }
+ for (IdxIter RI = OperReads.begin(), RE = OperReads.end(); RI != RE; ++RI) {
+ Name += '_';
+ Name += SchedReads[*RI].Name;
+ }
+ return Name;
+}
+
+std::string CodeGenSchedModels::createSchedClassName(const RecVec &InstDefs) {
+
+ std::string Name;
+ for (RecIter I = InstDefs.begin(), E = InstDefs.end(); I != E; ++I) {
+ if (I != InstDefs.begin())
+ Name += '_';
+ Name += (*I)->getName();
+ }
+ return Name;
+}
+
+/// Add an inferred sched class from a per-operand list of SchedWrites and
+/// SchedReads. ProcIndices contains the set of IDs of processors that may
+/// utilize this class.
+unsigned CodeGenSchedModels::addSchedClass(const IdxVec &OperWrites,
+ const IdxVec &OperReads,
+ const IdxVec &ProcIndices)
+{
+ assert(!ProcIndices.empty() && "expect at least one ProcIdx");
+
+ unsigned Idx = findSchedClassIdx(OperWrites, OperReads);
+ if (Idx) {
+ IdxVec PI;
+ std::set_union(SchedClasses[Idx].ProcIndices.begin(),
+ SchedClasses[Idx].ProcIndices.end(),
+ ProcIndices.begin(), ProcIndices.end(),
+ std::back_inserter(PI));
+ SchedClasses[Idx].ProcIndices.swap(PI);
+ return Idx;
+ }
+ Idx = SchedClasses.size();
+ SchedClasses.resize(Idx+1);
+ CodeGenSchedClass &SC = SchedClasses.back();
+ SC.Name = createSchedClassName(OperWrites, OperReads);
+ SC.Writes = OperWrites;
+ SC.Reads = OperReads;
+ SC.ProcIndices = ProcIndices;
+
+ return Idx;
+}
+
+// Create classes for each set of opcodes that are in the same InstReadWrite
+// definition across all processors.
+void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) {
+ // ClassInstrs will hold an entry for each subset of Instrs in InstRWDef that
+ // intersects with an existing class via a previous InstRWDef. Instrs that do
+ // not intersect with an existing class refer back to their former class as
+ // determined from ItinDef or SchedRW.
+ SmallVector<std::pair<unsigned, SmallVector<Record *, 8> >, 4> ClassInstrs;
+ // Sort Instrs into sets.
+ const RecVec *InstDefs = Sets.expand(InstRWDef);
+ if (InstDefs->empty())
+ PrintFatalError(InstRWDef->getLoc(), "No matching instruction opcodes");
+
+ for (RecIter I = InstDefs->begin(), E = InstDefs->end(); I != E; ++I) {
+ unsigned SCIdx = 0;
+ InstClassMapTy::const_iterator Pos = InstrClassMap.find(*I);
+ if (Pos != InstrClassMap.end())
+ SCIdx = Pos->second;
+ else {
+ // This instruction has not been mapped yet. Get the original class. All
+ // instructions in the same InstrRW class must be from the same original
+ // class because that is the fall-back class for other processors.
+ Record *ItinDef = (*I)->getValueAsDef("Itinerary");
+ SCIdx = SchedClassIdxMap.lookup(ItinDef->getName());
+ if (!SCIdx && (*I)->isSubClassOf("Sched"))
+ SCIdx = getSchedClassIdx((*I)->getValueAsListOfDefs("SchedRW"));
+ }
+ unsigned CIdx = 0, CEnd = ClassInstrs.size();
+ for (; CIdx != CEnd; ++CIdx) {
+ if (ClassInstrs[CIdx].first == SCIdx)
+ break;
+ }
+ if (CIdx == CEnd) {
+ ClassInstrs.resize(CEnd + 1);
+ ClassInstrs[CIdx].first = SCIdx;
+ }
+ ClassInstrs[CIdx].second.push_back(*I);
+ }
+ // For each set of Instrs, create a new class if necessary, and map or remap
+ // the Instrs to it.
+ unsigned CIdx = 0, CEnd = ClassInstrs.size();
+ for (; CIdx != CEnd; ++CIdx) {
+ unsigned OldSCIdx = ClassInstrs[CIdx].first;
+ ArrayRef<Record*> InstDefs = ClassInstrs[CIdx].second;
+ // If the all instrs in the current class are accounted for, then leave
+ // them mapped to their old class.
+ if (SchedClasses[OldSCIdx].InstRWs.size() == InstDefs.size()) {
+ assert(SchedClasses[OldSCIdx].ProcIndices[0] == 0 &&
+ "expected a generic SchedClass");
+ continue;
+ }
+ unsigned SCIdx = SchedClasses.size();
+ SchedClasses.resize(SCIdx+1);
+ CodeGenSchedClass &SC = SchedClasses.back();
+ SC.Name = createSchedClassName(InstDefs);
+ // Preserve ItinDef and Writes/Reads for processors without an InstRW entry.
+ SC.ItinClassDef = SchedClasses[OldSCIdx].ItinClassDef;
+ SC.Writes = SchedClasses[OldSCIdx].Writes;
+ SC.Reads = SchedClasses[OldSCIdx].Reads;
+ SC.ProcIndices.push_back(0);
+ // Map each Instr to this new class.
+ // Note that InstDefs may be a smaller list than InstRWDef's "Instrs".
+ Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel");
+ SmallSet<unsigned, 4> RemappedClassIDs;
+ for (ArrayRef<Record*>::const_iterator
+ II = InstDefs.begin(), IE = InstDefs.end(); II != IE; ++II) {
+ unsigned OldSCIdx = InstrClassMap[*II];
+ if (OldSCIdx && RemappedClassIDs.insert(OldSCIdx)) {
+ for (RecIter RI = SchedClasses[OldSCIdx].InstRWs.begin(),
+ RE = SchedClasses[OldSCIdx].InstRWs.end(); RI != RE; ++RI) {
+ if ((*RI)->getValueAsDef("SchedModel") == RWModelDef) {
+ PrintFatalError(InstRWDef->getLoc(), "Overlapping InstRW def " +
+ (*II)->getName() + " also matches " +
+ (*RI)->getValue("Instrs")->getValue()->getAsString());
+ }
+ assert(*RI != InstRWDef && "SchedClass has duplicate InstRW def");
+ SC.InstRWs.push_back(*RI);
+ }
+ }
+ InstrClassMap[*II] = SCIdx;
+ }
+ SC.InstRWs.push_back(InstRWDef);
+ }
+}
+
+// Gather the processor itineraries.
+void CodeGenSchedModels::collectProcItins() {
+ for (std::vector<CodeGenProcModel>::iterator PI = ProcModels.begin(),
+ PE = ProcModels.end(); PI != PE; ++PI) {
+ CodeGenProcModel &ProcModel = *PI;
+ RecVec ItinRecords = ProcModel.ItinsDef->getValueAsListOfDefs("IID");
+ // Skip empty itinerary.
+ if (ItinRecords.empty())
+ continue;
+
+ ProcModel.ItinDefList.resize(NumItineraryClasses+1);
+
+ // Insert each itinerary data record in the correct position within
+ // the processor model's ItinDefList.
+ for (unsigned i = 0, N = ItinRecords.size(); i < N; i++) {
+ Record *ItinData = ItinRecords[i];
+ Record *ItinDef = ItinData->getValueAsDef("TheClass");
+ if (!SchedClassIdxMap.count(ItinDef->getName())) {
+ DEBUG(dbgs() << ProcModel.ItinsDef->getName()
+ << " has unused itinerary class " << ItinDef->getName() << '\n');
+ continue;
+ }
+ assert(SchedClassIdxMap.count(ItinDef->getName()) && "missing ItinClass");
+ unsigned Idx = SchedClassIdxMap.lookup(ItinDef->getName());
+ assert(Idx <= NumItineraryClasses && "bad ItinClass index");
+ ProcModel.ItinDefList[Idx] = ItinData;
+ }
+ // Check for missing itinerary entries.
+ assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec");
+ DEBUG(
+ for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) {
+ if (!ProcModel.ItinDefList[i])
+ dbgs() << ProcModel.ItinsDef->getName()
+ << " missing itinerary for class "
+ << SchedClasses[i].Name << '\n';
+ });
+ }
+}
+
+// Gather the read/write types for each itinerary class.
+void CodeGenSchedModels::collectProcItinRW() {
+ RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW");
+ std::sort(ItinRWDefs.begin(), ItinRWDefs.end(), LessRecord());
+ for (RecIter II = ItinRWDefs.begin(), IE = ItinRWDefs.end(); II != IE; ++II) {
+ if (!(*II)->getValueInit("SchedModel")->isComplete())
+ PrintFatalError((*II)->getLoc(), "SchedModel is undefined");
+ Record *ModelDef = (*II)->getValueAsDef("SchedModel");
+ ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef);
+ if (I == ProcModelMap.end()) {
+ PrintFatalError((*II)->getLoc(), "Undefined SchedMachineModel "
+ + ModelDef->getName());
+ }
+ ProcModels[I->second].ItinRWDefs.push_back(*II);
+ }
+}
+
+/// Infer new classes from existing classes. In the process, this may create new
+/// SchedWrites from sequences of existing SchedWrites.
+void CodeGenSchedModels::inferSchedClasses() {
+ // Visit all existing classes and newly created classes.
+ for (unsigned Idx = 0; Idx != SchedClasses.size(); ++Idx) {
+ if (SchedClasses[Idx].ItinClassDef)
+ inferFromItinClass(SchedClasses[Idx].ItinClassDef, Idx);
+ else if (!SchedClasses[Idx].InstRWs.empty())
+ inferFromInstRWs(Idx);
+ else {
+ inferFromRW(SchedClasses[Idx].Writes, SchedClasses[Idx].Reads,
+ Idx, SchedClasses[Idx].ProcIndices);
+ }
+ assert(SchedClasses.size() < (NumInstrSchedClasses*6) &&
+ "too many SchedVariants");
+ }
+}
+
+/// Infer classes from per-processor itinerary resources.
+void CodeGenSchedModels::inferFromItinClass(Record *ItinClassDef,
+ unsigned FromClassIdx) {
+ for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) {
+ const CodeGenProcModel &PM = ProcModels[PIdx];
+ // For all ItinRW entries.
+ bool HasMatch = false;
+ for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end();
+ II != IE; ++II) {
+ RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses");
+ if (!std::count(Matched.begin(), Matched.end(), ItinClassDef))
+ continue;
+ if (HasMatch)
+ PrintFatalError((*II)->getLoc(), "Duplicate itinerary class "
+ + ItinClassDef->getName()
+ + " in ItinResources for " + PM.ModelName);
+ HasMatch = true;
+ IdxVec Writes, Reads;
+ findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
+ IdxVec ProcIndices(1, PIdx);
+ inferFromRW(Writes, Reads, FromClassIdx, ProcIndices);
+ }
+ }
+}
+
+/// Infer classes from per-processor InstReadWrite definitions.
+void CodeGenSchedModels::inferFromInstRWs(unsigned SCIdx) {
+ const RecVec &RWDefs = SchedClasses[SCIdx].InstRWs;
+ for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end(); RWI != RWE; ++RWI) {
+ const RecVec *InstDefs = Sets.expand(*RWI);
+ RecIter II = InstDefs->begin(), IE = InstDefs->end();
+ for (; II != IE; ++II) {
+ if (InstrClassMap[*II] == SCIdx)
+ break;
+ }
+ // If this class no longer has any instructions mapped to it, it has become
+ // irrelevant.
+ if (II == IE)
+ continue;
+ IdxVec Writes, Reads;
+ findRWs((*RWI)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
+ unsigned PIdx = getProcModel((*RWI)->getValueAsDef("SchedModel")).Index;
+ IdxVec ProcIndices(1, PIdx);
+ inferFromRW(Writes, Reads, SCIdx, ProcIndices);
+ }
+}
+
+namespace {
+// Helper for substituteVariantOperand.
+struct TransVariant {
+ Record *VarOrSeqDef; // Variant or sequence.
+ unsigned RWIdx; // Index of this variant or sequence's matched type.
+ unsigned ProcIdx; // Processor model index or zero for any.
+ unsigned TransVecIdx; // Index into PredTransitions::TransVec.
+
+ TransVariant(Record *def, unsigned rwi, unsigned pi, unsigned ti):
+ VarOrSeqDef(def), RWIdx(rwi), ProcIdx(pi), TransVecIdx(ti) {}
+};
+
+// Associate a predicate with the SchedReadWrite that it guards.
+// RWIdx is the index of the read/write variant.
+struct PredCheck {
+ bool IsRead;
+ unsigned RWIdx;
+ Record *Predicate;
+
+ PredCheck(bool r, unsigned w, Record *p): IsRead(r), RWIdx(w), Predicate(p) {}
+};
+
+// A Predicate transition is a list of RW sequences guarded by a PredTerm.
+struct PredTransition {
+ // A predicate term is a conjunction of PredChecks.
+ SmallVector<PredCheck, 4> PredTerm;
+ SmallVector<SmallVector<unsigned,4>, 16> WriteSequences;
+ SmallVector<SmallVector<unsigned,4>, 16> ReadSequences;
+ SmallVector<unsigned, 4> ProcIndices;
+};
+
+// Encapsulate a set of partially constructed transitions.
+// The results are built by repeated calls to substituteVariants.
+class PredTransitions {
+ CodeGenSchedModels &SchedModels;
+
+public:
+ std::vector<PredTransition> TransVec;
+
+ PredTransitions(CodeGenSchedModels &sm): SchedModels(sm) {}
+
+ void substituteVariantOperand(const SmallVectorImpl<unsigned> &RWSeq,
+ bool IsRead, unsigned StartIdx);
+
+ void substituteVariants(const PredTransition &Trans);
+
+#ifndef NDEBUG
+ void dump() const;
+#endif
+
+private:
+ bool mutuallyExclusive(Record *PredDef, ArrayRef<PredCheck> Term);
+ void getIntersectingVariants(
+ const CodeGenSchedRW &SchedRW, unsigned TransIdx,
+ std::vector<TransVariant> &IntersectingVariants);
+ void pushVariant(const TransVariant &VInfo, bool IsRead);
+};
+} // anonymous
+
+// Return true if this predicate is mutually exclusive with a PredTerm. This
+// degenerates into checking if the predicate is mutually exclusive with any
+// predicate in the Term's conjunction.
+//
+// All predicates associated with a given SchedRW are considered mutually
+// exclusive. This should work even if the conditions expressed by the
+// predicates are not exclusive because the predicates for a given SchedWrite
+// are always checked in the order they are defined in the .td file. Later
+// conditions implicitly negate any prior condition.
+bool PredTransitions::mutuallyExclusive(Record *PredDef,
+ ArrayRef<PredCheck> Term) {
+
+ for (ArrayRef<PredCheck>::iterator I = Term.begin(), E = Term.end();
+ I != E; ++I) {
+ if (I->Predicate == PredDef)
+ return false;
- Record *ModelDef = ProcDef->getValueAsDef("SchedModel");
- Record *ItinsDef = ProcDef->getValueAsDef("ProcItin");
+ const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(I->RWIdx, I->IsRead);
+ assert(SchedRW.HasVariants && "PredCheck must refer to a SchedVariant");
+ RecVec Variants = SchedRW.TheDef->getValueAsListOfDefs("Variants");
+ for (RecIter VI = Variants.begin(), VE = Variants.end(); VI != VE; ++VI) {
+ if ((*VI)->getValueAsDef("Predicate") == PredDef)
+ return true;
+ }
+ }
+ return false;
+}
- std::string ModelName = ModelDef->getName();
- const std::string &ItinName = ItinsDef->getName();
+static bool hasAliasedVariants(const CodeGenSchedRW &RW,
+ CodeGenSchedModels &SchedModels) {
+ if (RW.HasVariants)
+ return true;
- bool NoModel = ModelDef->getValueAsBit("NoModel");
- bool hasTopLevelItin = !ItinsDef->getValueAsListOfDefs("IID").empty();
- if (NoModel) {
- // If an itinerary is defined without a machine model, infer a new model.
- if (NoModel && hasTopLevelItin) {
- ModelName = ItinName + "Model";
- ModelDef = NULL;
+ for (RecIter I = RW.Aliases.begin(), E = RW.Aliases.end(); I != E; ++I) {
+ const CodeGenSchedRW &AliasRW =
+ SchedModels.getSchedRW((*I)->getValueAsDef("AliasRW"));
+ if (AliasRW.HasVariants)
+ return true;
+ if (AliasRW.IsSequence) {
+ IdxVec ExpandedRWs;
+ SchedModels.expandRWSequence(AliasRW.Index, ExpandedRWs, AliasRW.IsRead);
+ for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end();
+ SI != SE; ++SI) {
+ if (hasAliasedVariants(SchedModels.getSchedRW(*SI, AliasRW.IsRead),
+ SchedModels)) {
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+static bool hasVariant(ArrayRef<PredTransition> Transitions,
+ CodeGenSchedModels &SchedModels) {
+ for (ArrayRef<PredTransition>::iterator
+ PTI = Transitions.begin(), PTE = Transitions.end();
+ PTI != PTE; ++PTI) {
+ for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
+ WSI = PTI->WriteSequences.begin(), WSE = PTI->WriteSequences.end();
+ WSI != WSE; ++WSI) {
+ for (SmallVectorImpl<unsigned>::const_iterator
+ WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) {
+ if (hasAliasedVariants(SchedModels.getSchedWrite(*WI), SchedModels))
+ return true;
+ }
+ }
+ for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
+ RSI = PTI->ReadSequences.begin(), RSE = PTI->ReadSequences.end();
+ RSI != RSE; ++RSI) {
+ for (SmallVectorImpl<unsigned>::const_iterator
+ RI = RSI->begin(), RE = RSI->end(); RI != RE; ++RI) {
+ if (hasAliasedVariants(SchedModels.getSchedRead(*RI), SchedModels))
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+// Populate IntersectingVariants with any variants or aliased sequences of the
+// given SchedRW whose processor indices and predicates are not mutually
+// exclusive with the given transition,
+void PredTransitions::getIntersectingVariants(
+ const CodeGenSchedRW &SchedRW, unsigned TransIdx,
+ std::vector<TransVariant> &IntersectingVariants) {
+
+ std::vector<TransVariant> Variants;
+ if (SchedRW.HasVariants) {
+ unsigned VarProcIdx = 0;
+ if (SchedRW.TheDef->getValueInit("SchedModel")->isComplete()) {
+ Record *ModelDef = SchedRW.TheDef->getValueAsDef("SchedModel");
+ VarProcIdx = SchedModels.getProcModel(ModelDef).Index;
+ }
+ // Push each variant. Assign TransVecIdx later.
+ const RecVec VarDefs = SchedRW.TheDef->getValueAsListOfDefs("Variants");
+ for (RecIter RI = VarDefs.begin(), RE = VarDefs.end(); RI != RE; ++RI)
+ Variants.push_back(TransVariant(*RI, SchedRW.Index, VarProcIdx, 0));
+ }
+ for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end();
+ AI != AE; ++AI) {
+ // If either the SchedAlias itself or the SchedReadWrite that it aliases
+ // to is defined within a processor model, constrain all variants to
+ // that processor.
+ unsigned AliasProcIdx = 0;
+ if ((*AI)->getValueInit("SchedModel")->isComplete()) {
+ Record *ModelDef = (*AI)->getValueAsDef("SchedModel");
+ AliasProcIdx = SchedModels.getProcModel(ModelDef).Index;
+ }
+ const CodeGenSchedRW &AliasRW =
+ SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW"));
+
+ if (AliasRW.HasVariants) {
+ const RecVec VarDefs = AliasRW.TheDef->getValueAsListOfDefs("Variants");
+ for (RecIter RI = VarDefs.begin(), RE = VarDefs.end(); RI != RE; ++RI)
+ Variants.push_back(TransVariant(*RI, AliasRW.Index, AliasProcIdx, 0));
+ }
+ if (AliasRW.IsSequence) {
+ Variants.push_back(
+ TransVariant(AliasRW.TheDef, SchedRW.Index, AliasProcIdx, 0));
+ }
+ }
+ for (unsigned VIdx = 0, VEnd = Variants.size(); VIdx != VEnd; ++VIdx) {
+ TransVariant &Variant = Variants[VIdx];
+ // Don't expand variants if the processor models don't intersect.
+ // A zero processor index means any processor.
+ SmallVector<unsigned, 4> &ProcIndices = TransVec[TransIdx].ProcIndices;
+ if (ProcIndices[0] && Variants[VIdx].ProcIdx) {
+ unsigned Cnt = std::count(ProcIndices.begin(), ProcIndices.end(),
+ Variant.ProcIdx);
+ if (!Cnt)
+ continue;
+ if (Cnt > 1) {
+ const CodeGenProcModel &PM =
+ *(SchedModels.procModelBegin() + Variant.ProcIdx);
+ PrintFatalError(Variant.VarOrSeqDef->getLoc(),
+ "Multiple variants defined for processor " +
+ PM.ModelName +
+ " Ensure only one SchedAlias exists per RW.");
+ }
+ }
+ if (Variant.VarOrSeqDef->isSubClassOf("SchedVar")) {
+ Record *PredDef = Variant.VarOrSeqDef->getValueAsDef("Predicate");
+ if (mutuallyExclusive(PredDef, TransVec[TransIdx].PredTerm))
+ continue;
+ }
+ if (IntersectingVariants.empty()) {
+ // The first variant builds on the existing transition.
+ Variant.TransVecIdx = TransIdx;
+ IntersectingVariants.push_back(Variant);
+ }
+ else {
+ // Push another copy of the current transition for more variants.
+ Variant.TransVecIdx = TransVec.size();
+ IntersectingVariants.push_back(Variant);
+ TransVec.push_back(TransVec[TransIdx]);
}
}
+}
+
+// Push the Reads/Writes selected by this variant onto the PredTransition
+// specified by VInfo.
+void PredTransitions::
+pushVariant(const TransVariant &VInfo, bool IsRead) {
+
+ PredTransition &Trans = TransVec[VInfo.TransVecIdx];
+
+ // If this operand transition is reached through a processor-specific alias,
+ // then the whole transition is specific to this processor.
+ if (VInfo.ProcIdx != 0)
+ Trans.ProcIndices.assign(1, VInfo.ProcIdx);
+
+ IdxVec SelectedRWs;
+ if (VInfo.VarOrSeqDef->isSubClassOf("SchedVar")) {
+ Record *PredDef = VInfo.VarOrSeqDef->getValueAsDef("Predicate");
+ Trans.PredTerm.push_back(PredCheck(IsRead, VInfo.RWIdx,PredDef));
+ RecVec SelectedDefs = VInfo.VarOrSeqDef->getValueAsListOfDefs("Selected");
+ SchedModels.findRWs(SelectedDefs, SelectedRWs, IsRead);
+ }
else {
- // If a machine model is defined, the itinerary must be defined within it
- // rather than in the Processor definition itself.
- assert(!hasTopLevelItin && "Itinerary must be defined in SchedModel");
- ItinsDef = ModelDef->getValueAsDef("Itineraries");
+ assert(VInfo.VarOrSeqDef->isSubClassOf("WriteSequence") &&
+ "variant must be a SchedVariant or aliased WriteSequence");
+ SelectedRWs.push_back(SchedModels.getSchedRWIdx(VInfo.VarOrSeqDef, IsRead));
}
- ProcModelMap[getProcModelKey(ProcDef)]= ProcModels.size();
+ const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(VInfo.RWIdx, IsRead);
- ProcModels.push_back(CodeGenProcModel(ModelName, ModelDef, ItinsDef));
+ SmallVectorImpl<SmallVector<unsigned,4> > &RWSequences = IsRead
+ ? Trans.ReadSequences : Trans.WriteSequences;
+ if (SchedRW.IsVariadic) {
+ unsigned OperIdx = RWSequences.size()-1;
+ // Make N-1 copies of this transition's last sequence.
+ for (unsigned i = 1, e = SelectedRWs.size(); i != e; ++i) {
+ RWSequences.push_back(RWSequences[OperIdx]);
+ }
+ // Push each of the N elements of the SelectedRWs onto a copy of the last
+ // sequence (split the current operand into N operands).
+ // Note that write sequences should be expanded within this loop--the entire
+ // sequence belongs to a single operand.
+ for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end();
+ RWI != RWE; ++RWI, ++OperIdx) {
+ IdxVec ExpandedRWs;
+ if (IsRead)
+ ExpandedRWs.push_back(*RWI);
+ else
+ SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead);
+ RWSequences[OperIdx].insert(RWSequences[OperIdx].end(),
+ ExpandedRWs.begin(), ExpandedRWs.end());
+ }
+ assert(OperIdx == RWSequences.size() && "missed a sequence");
+ }
+ else {
+ // Push this transition's expanded sequence onto this transition's last
+ // sequence (add to the current operand's sequence).
+ SmallVectorImpl<unsigned> &Seq = RWSequences.back();
+ IdxVec ExpandedRWs;
+ for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end();
+ RWI != RWE; ++RWI) {
+ if (IsRead)
+ ExpandedRWs.push_back(*RWI);
+ else
+ SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead);
+ }
+ Seq.insert(Seq.end(), ExpandedRWs.begin(), ExpandedRWs.end());
+ }
+}
+
+// RWSeq is a sequence of all Reads or all Writes for the next read or write
+// operand. StartIdx is an index into TransVec where partial results
+// starts. RWSeq must be applied to all transitions between StartIdx and the end
+// of TransVec.
+void PredTransitions::substituteVariantOperand(
+ const SmallVectorImpl<unsigned> &RWSeq, bool IsRead, unsigned StartIdx) {
- std::vector<Record*> ItinRecords = ItinsDef->getValueAsListOfDefs("IID");
- CollectProcItin(ProcModels.back(), ItinRecords);
+ // Visit each original RW within the current sequence.
+ for (SmallVectorImpl<unsigned>::const_iterator
+ RWI = RWSeq.begin(), RWE = RWSeq.end(); RWI != RWE; ++RWI) {
+ const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(*RWI, IsRead);
+ // Push this RW on all partial PredTransitions or distribute variants.
+ // New PredTransitions may be pushed within this loop which should not be
+ // revisited (TransEnd must be loop invariant).
+ for (unsigned TransIdx = StartIdx, TransEnd = TransVec.size();
+ TransIdx != TransEnd; ++TransIdx) {
+ // In the common case, push RW onto the current operand's sequence.
+ if (!hasAliasedVariants(SchedRW, SchedModels)) {
+ if (IsRead)
+ TransVec[TransIdx].ReadSequences.back().push_back(*RWI);
+ else
+ TransVec[TransIdx].WriteSequences.back().push_back(*RWI);
+ continue;
+ }
+ // Distribute this partial PredTransition across intersecting variants.
+ // This will push a copies of TransVec[TransIdx] on the back of TransVec.
+ std::vector<TransVariant> IntersectingVariants;
+ getIntersectingVariants(SchedRW, TransIdx, IntersectingVariants);
+ if (IntersectingVariants.empty())
+ PrintFatalError(SchedRW.TheDef->getLoc(),
+ "No variant of this type has "
+ "a matching predicate on any processor");
+ // Now expand each variant on top of its copy of the transition.
+ for (std::vector<TransVariant>::const_iterator
+ IVI = IntersectingVariants.begin(),
+ IVE = IntersectingVariants.end();
+ IVI != IVE; ++IVI) {
+ pushVariant(*IVI, IsRead);
+ }
+ }
+ }
}
-// Gather the processor itineraries.
-void CodeGenSchedModels::CollectProcItin(CodeGenProcModel &ProcModel,
- std::vector<Record*> ItinRecords) {
- // Skip empty itinerary.
- if (ItinRecords.empty())
+// For each variant of a Read/Write in Trans, substitute the sequence of
+// Read/Writes guarded by the variant. This is exponential in the number of
+// variant Read/Writes, but in practice detection of mutually exclusive
+// predicates should result in linear growth in the total number variants.
+//
+// This is one step in a breadth-first search of nested variants.
+void PredTransitions::substituteVariants(const PredTransition &Trans) {
+ // Build up a set of partial results starting at the back of
+ // PredTransitions. Remember the first new transition.
+ unsigned StartIdx = TransVec.size();
+ TransVec.resize(TransVec.size() + 1);
+ TransVec.back().PredTerm = Trans.PredTerm;
+ TransVec.back().ProcIndices = Trans.ProcIndices;
+
+ // Visit each original write sequence.
+ for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
+ WSI = Trans.WriteSequences.begin(), WSE = Trans.WriteSequences.end();
+ WSI != WSE; ++WSI) {
+ // Push a new (empty) write sequence onto all partial Transitions.
+ for (std::vector<PredTransition>::iterator I =
+ TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) {
+ I->WriteSequences.resize(I->WriteSequences.size() + 1);
+ }
+ substituteVariantOperand(*WSI, /*IsRead=*/false, StartIdx);
+ }
+ // Visit each original read sequence.
+ for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
+ RSI = Trans.ReadSequences.begin(), RSE = Trans.ReadSequences.end();
+ RSI != RSE; ++RSI) {
+ // Push a new (empty) read sequence onto all partial Transitions.
+ for (std::vector<PredTransition>::iterator I =
+ TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) {
+ I->ReadSequences.resize(I->ReadSequences.size() + 1);
+ }
+ substituteVariantOperand(*RSI, /*IsRead=*/true, StartIdx);
+ }
+}
+
+// Create a new SchedClass for each variant found by inferFromRW. Pass
+static void inferFromTransitions(ArrayRef<PredTransition> LastTransitions,
+ unsigned FromClassIdx,
+ CodeGenSchedModels &SchedModels) {
+ // For each PredTransition, create a new CodeGenSchedTransition, which usually
+ // requires creating a new SchedClass.
+ for (ArrayRef<PredTransition>::iterator
+ I = LastTransitions.begin(), E = LastTransitions.end(); I != E; ++I) {
+ IdxVec OperWritesVariant;
+ for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
+ WSI = I->WriteSequences.begin(), WSE = I->WriteSequences.end();
+ WSI != WSE; ++WSI) {
+ // Create a new write representing the expanded sequence.
+ OperWritesVariant.push_back(
+ SchedModels.findOrInsertRW(*WSI, /*IsRead=*/false));
+ }
+ IdxVec OperReadsVariant;
+ for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
+ RSI = I->ReadSequences.begin(), RSE = I->ReadSequences.end();
+ RSI != RSE; ++RSI) {
+ // Create a new read representing the expanded sequence.
+ OperReadsVariant.push_back(
+ SchedModels.findOrInsertRW(*RSI, /*IsRead=*/true));
+ }
+ IdxVec ProcIndices(I->ProcIndices.begin(), I->ProcIndices.end());
+ CodeGenSchedTransition SCTrans;
+ SCTrans.ToClassIdx =
+ SchedModels.addSchedClass(OperWritesVariant, OperReadsVariant,
+ ProcIndices);
+ SCTrans.ProcIndices = ProcIndices;
+ // The final PredTerm is unique set of predicates guarding the transition.
+ RecVec Preds;
+ for (SmallVectorImpl<PredCheck>::const_iterator
+ PI = I->PredTerm.begin(), PE = I->PredTerm.end(); PI != PE; ++PI) {
+ Preds.push_back(PI->Predicate);
+ }
+ RecIter PredsEnd = std::unique(Preds.begin(), Preds.end());
+ Preds.resize(PredsEnd - Preds.begin());
+ SCTrans.PredTerm = Preds;
+ SchedModels.getSchedClass(FromClassIdx).Transitions.push_back(SCTrans);
+ }
+}
+
+// Create new SchedClasses for the given ReadWrite list. If any of the
+// ReadWrites refers to a SchedVariant, create a new SchedClass for each variant
+// of the ReadWrite list, following Aliases if necessary.
+void CodeGenSchedModels::inferFromRW(const IdxVec &OperWrites,
+ const IdxVec &OperReads,
+ unsigned FromClassIdx,
+ const IdxVec &ProcIndices) {
+ DEBUG(dbgs() << "INFER RW: ");
+
+ // Create a seed transition with an empty PredTerm and the expanded sequences
+ // of SchedWrites for the current SchedClass.
+ std::vector<PredTransition> LastTransitions;
+ LastTransitions.resize(1);
+ LastTransitions.back().ProcIndices.append(ProcIndices.begin(),
+ ProcIndices.end());
+
+ for (IdxIter I = OperWrites.begin(), E = OperWrites.end(); I != E; ++I) {
+ IdxVec WriteSeq;
+ expandRWSequence(*I, WriteSeq, /*IsRead=*/false);
+ unsigned Idx = LastTransitions[0].WriteSequences.size();
+ LastTransitions[0].WriteSequences.resize(Idx + 1);
+ SmallVectorImpl<unsigned> &Seq = LastTransitions[0].WriteSequences[Idx];
+ for (IdxIter WI = WriteSeq.begin(), WE = WriteSeq.end(); WI != WE; ++WI)
+ Seq.push_back(*WI);
+ DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
+ }
+ DEBUG(dbgs() << " Reads: ");
+ for (IdxIter I = OperReads.begin(), E = OperReads.end(); I != E; ++I) {
+ IdxVec ReadSeq;
+ expandRWSequence(*I, ReadSeq, /*IsRead=*/true);
+ unsigned Idx = LastTransitions[0].ReadSequences.size();
+ LastTransitions[0].ReadSequences.resize(Idx + 1);
+ SmallVectorImpl<unsigned> &Seq = LastTransitions[0].ReadSequences[Idx];
+ for (IdxIter RI = ReadSeq.begin(), RE = ReadSeq.end(); RI != RE; ++RI)
+ Seq.push_back(*RI);
+ DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
+ }
+ DEBUG(dbgs() << '\n');
+
+ // Collect all PredTransitions for individual operands.
+ // Iterate until no variant writes remain.
+ while (hasVariant(LastTransitions, *this)) {
+ PredTransitions Transitions(*this);
+ for (std::vector<PredTransition>::const_iterator
+ I = LastTransitions.begin(), E = LastTransitions.end();
+ I != E; ++I) {
+ Transitions.substituteVariants(*I);
+ }
+ DEBUG(Transitions.dump());
+ LastTransitions.swap(Transitions.TransVec);
+ }
+ // If the first transition has no variants, nothing to do.
+ if (LastTransitions[0].PredTerm.empty())
return;
- HasProcItineraries = true;
+ // WARNING: We are about to mutate the SchedClasses vector. Do not refer to
+ // OperWrites, OperReads, or ProcIndices after calling inferFromTransitions.
+ inferFromTransitions(LastTransitions, FromClassIdx, *this);
+}
- ProcModel.ItinDefList.resize(NumItineraryClasses+1);
+// Collect and sort WriteRes, ReadAdvance, and ProcResources.
+void CodeGenSchedModels::collectProcResources() {
+ // Add any subtarget-specific SchedReadWrites that are directly associated
+ // with processor resources. Refer to the parent SchedClass's ProcIndices to
+ // determine which processors they apply to.
+ for (SchedClassIter SCI = schedClassBegin(), SCE = schedClassEnd();
+ SCI != SCE; ++SCI) {
+ if (SCI->ItinClassDef)
+ collectItinProcResources(SCI->ItinClassDef);
+ else
+ collectRWResources(SCI->Writes, SCI->Reads, SCI->ProcIndices);
+ }
+ // Add resources separately defined by each subtarget.
+ RecVec WRDefs = Records.getAllDerivedDefinitions("WriteRes");
+ for (RecIter WRI = WRDefs.begin(), WRE = WRDefs.end(); WRI != WRE; ++WRI) {
+ Record *ModelDef = (*WRI)->getValueAsDef("SchedModel");
+ addWriteRes(*WRI, getProcModel(ModelDef).Index);
+ }
+ RecVec RADefs = Records.getAllDerivedDefinitions("ReadAdvance");
+ for (RecIter RAI = RADefs.begin(), RAE = RADefs.end(); RAI != RAE; ++RAI) {
+ Record *ModelDef = (*RAI)->getValueAsDef("SchedModel");
+ addReadAdvance(*RAI, getProcModel(ModelDef).Index);
+ }
+ // Finalize each ProcModel by sorting the record arrays.
+ for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) {
+ CodeGenProcModel &PM = ProcModels[PIdx];
+ std::sort(PM.WriteResDefs.begin(), PM.WriteResDefs.end(),
+ LessRecord());
+ std::sort(PM.ReadAdvanceDefs.begin(), PM.ReadAdvanceDefs.end(),
+ LessRecord());
+ std::sort(PM.ProcResourceDefs.begin(), PM.ProcResourceDefs.end(),
+ LessRecord());
+ DEBUG(
+ PM.dump();
+ dbgs() << "WriteResDefs: ";
+ for (RecIter RI = PM.WriteResDefs.begin(),
+ RE = PM.WriteResDefs.end(); RI != RE; ++RI) {
+ if ((*RI)->isSubClassOf("WriteRes"))
+ dbgs() << (*RI)->getValueAsDef("WriteType")->getName() << " ";
+ else
+ dbgs() << (*RI)->getName() << " ";
+ }
+ dbgs() << "\nReadAdvanceDefs: ";
+ for (RecIter RI = PM.ReadAdvanceDefs.begin(),
+ RE = PM.ReadAdvanceDefs.end(); RI != RE; ++RI) {
+ if ((*RI)->isSubClassOf("ReadAdvance"))
+ dbgs() << (*RI)->getValueAsDef("ReadType")->getName() << " ";
+ else
+ dbgs() << (*RI)->getName() << " ";
+ }
+ dbgs() << "\nProcResourceDefs: ";
+ for (RecIter RI = PM.ProcResourceDefs.begin(),
+ RE = PM.ProcResourceDefs.end(); RI != RE; ++RI) {
+ dbgs() << (*RI)->getName() << " ";
+ }
+ dbgs() << '\n');
+ }
+}
- // Insert each itinerary data record in the correct position within
- // the processor model's ItinDefList.
- for (unsigned i = 0, N = ItinRecords.size(); i < N; i++) {
- Record *ItinData = ItinRecords[i];
- Record *ItinDef = ItinData->getValueAsDef("TheClass");
- if (!SchedClassIdxMap.count(ItinDef->getName())) {
- DEBUG(dbgs() << ProcModel.ItinsDef->getName()
- << " has unused itinerary class " << ItinDef->getName() << '\n');
- continue;
+// Collect itinerary class resources for each processor.
+void CodeGenSchedModels::collectItinProcResources(Record *ItinClassDef) {
+ for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) {
+ const CodeGenProcModel &PM = ProcModels[PIdx];
+ // For all ItinRW entries.
+ bool HasMatch = false;
+ for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end();
+ II != IE; ++II) {
+ RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses");
+ if (!std::count(Matched.begin(), Matched.end(), ItinClassDef))
+ continue;
+ if (HasMatch)
+ PrintFatalError((*II)->getLoc(), "Duplicate itinerary class "
+ + ItinClassDef->getName()
+ + " in ItinResources for " + PM.ModelName);
+ HasMatch = true;
+ IdxVec Writes, Reads;
+ findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
+ IdxVec ProcIndices(1, PIdx);
+ collectRWResources(Writes, Reads, ProcIndices);
+ }
+ }
+}
+
+void CodeGenSchedModels::collectRWResources(unsigned RWIdx, bool IsRead,
+ const IdxVec &ProcIndices) {
+ const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead);
+ if (SchedRW.TheDef) {
+ if (!IsRead && SchedRW.TheDef->isSubClassOf("SchedWriteRes")) {
+ for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end();
+ PI != PE; ++PI) {
+ addWriteRes(SchedRW.TheDef, *PI);
+ }
+ }
+ else if (IsRead && SchedRW.TheDef->isSubClassOf("SchedReadAdvance")) {
+ for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end();
+ PI != PE; ++PI) {
+ addReadAdvance(SchedRW.TheDef, *PI);
+ }
+ }
+ }
+ for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end();
+ AI != AE; ++AI) {
+ IdxVec AliasProcIndices;
+ if ((*AI)->getValueInit("SchedModel")->isComplete()) {
+ AliasProcIndices.push_back(
+ getProcModel((*AI)->getValueAsDef("SchedModel")).Index);
+ }
+ else
+ AliasProcIndices = ProcIndices;
+ const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW"));
+ assert(AliasRW.IsRead == IsRead && "cannot alias reads to writes");
+
+ IdxVec ExpandedRWs;
+ expandRWSequence(AliasRW.Index, ExpandedRWs, IsRead);
+ for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end();
+ SI != SE; ++SI) {
+ collectRWResources(*SI, IsRead, AliasProcIndices);
+ }
+ }
+}
+
+// Collect resources for a set of read/write types and processor indices.
+void CodeGenSchedModels::collectRWResources(const IdxVec &Writes,
+ const IdxVec &Reads,
+ const IdxVec &ProcIndices) {
+
+ for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI)
+ collectRWResources(*WI, /*IsRead=*/false, ProcIndices);
+
+ for (IdxIter RI = Reads.begin(), RE = Reads.end(); RI != RE; ++RI)
+ collectRWResources(*RI, /*IsRead=*/true, ProcIndices);
+}
+
+
+// Find the processor's resource units for this kind of resource.
+Record *CodeGenSchedModels::findProcResUnits(Record *ProcResKind,
+ const CodeGenProcModel &PM) const {
+ if (ProcResKind->isSubClassOf("ProcResourceUnits"))
+ return ProcResKind;
+
+ Record *ProcUnitDef = 0;
+ RecVec ProcResourceDefs =
+ Records.getAllDerivedDefinitions("ProcResourceUnits");
+
+ for (RecIter RI = ProcResourceDefs.begin(), RE = ProcResourceDefs.end();
+ RI != RE; ++RI) {
+
+ if ((*RI)->getValueAsDef("Kind") == ProcResKind
+ && (*RI)->getValueAsDef("SchedModel") == PM.ModelDef) {
+ if (ProcUnitDef) {
+ PrintFatalError((*RI)->getLoc(),
+ "Multiple ProcessorResourceUnits associated with "
+ + ProcResKind->getName());
+ }
+ ProcUnitDef = *RI;
}
- ProcModel.ItinDefList[getItinClassIdx(ItinDef)] = ItinData;
}
+ if (!ProcUnitDef) {
+ PrintFatalError(ProcResKind->getLoc(),
+ "No ProcessorResources associated with "
+ + ProcResKind->getName());
+ }
+ return ProcUnitDef;
+}
+
+// Iteratively add a resource and its super resources.
+void CodeGenSchedModels::addProcResource(Record *ProcResKind,
+ CodeGenProcModel &PM) {
+ for (;;) {
+ Record *ProcResUnits = findProcResUnits(ProcResKind, PM);
+
+ // See if this ProcResource is already associated with this processor.
+ RecIter I = std::find(PM.ProcResourceDefs.begin(),
+ PM.ProcResourceDefs.end(), ProcResUnits);
+ if (I != PM.ProcResourceDefs.end())
+ return;
+
+ PM.ProcResourceDefs.push_back(ProcResUnits);
+ if (!ProcResUnits->getValueInit("Super")->isComplete())
+ return;
+
+ ProcResKind = ProcResUnits->getValueAsDef("Super");
+ }
+}
+
+// Add resources for a SchedWrite to this processor if they don't exist.
+void CodeGenSchedModels::addWriteRes(Record *ProcWriteResDef, unsigned PIdx) {
+ assert(PIdx && "don't add resources to an invalid Processor model");
+
+ RecVec &WRDefs = ProcModels[PIdx].WriteResDefs;
+ RecIter WRI = std::find(WRDefs.begin(), WRDefs.end(), ProcWriteResDef);
+ if (WRI != WRDefs.end())
+ return;
+ WRDefs.push_back(ProcWriteResDef);
+
+ // Visit ProcResourceKinds referenced by the newly discovered WriteRes.
+ RecVec ProcResDefs = ProcWriteResDef->getValueAsListOfDefs("ProcResources");
+ for (RecIter WritePRI = ProcResDefs.begin(), WritePRE = ProcResDefs.end();
+ WritePRI != WritePRE; ++WritePRI) {
+ addProcResource(*WritePRI, ProcModels[PIdx]);
+ }
+}
+
+// Add resources for a ReadAdvance to this processor if they don't exist.
+void CodeGenSchedModels::addReadAdvance(Record *ProcReadAdvanceDef,
+ unsigned PIdx) {
+ RecVec &RADefs = ProcModels[PIdx].ReadAdvanceDefs;
+ RecIter I = std::find(RADefs.begin(), RADefs.end(), ProcReadAdvanceDef);
+ if (I != RADefs.end())
+ return;
+ RADefs.push_back(ProcReadAdvanceDef);
+}
+
+unsigned CodeGenProcModel::getProcResourceIdx(Record *PRDef) const {
+ RecIter PRPos = std::find(ProcResourceDefs.begin(), ProcResourceDefs.end(),
+ PRDef);
+ if (PRPos == ProcResourceDefs.end())
+ PrintFatalError(PRDef->getLoc(), "ProcResource def is not included in "
+ "the ProcResources list for " + ModelName);
+ // Idx=0 is reserved for invalid.
+ return 1 + (PRPos - ProcResourceDefs.begin());
+}
+
#ifndef NDEBUG
- // Check for missing itinerary entries.
- assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec");
- for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) {
- if (!ProcModel.ItinDefList[i])
- DEBUG(dbgs() << ProcModel.ItinsDef->getName()
- << " missing itinerary for class " << SchedClasses[i].Name << '\n');
+void CodeGenProcModel::dump() const {
+ dbgs() << Index << ": " << ModelName << " "
+ << (ModelDef ? ModelDef->getName() : "inferred") << " "
+ << (ItinsDef ? ItinsDef->getName() : "no itinerary") << '\n';
+}
+
+void CodeGenSchedRW::dump() const {
+ dbgs() << Name << (IsVariadic ? " (V) " : " ");
+ if (IsSequence) {
+ dbgs() << "(";
+ dumpIdxVec(Sequence);
+ dbgs() << ")";
+ }
+}
+
+void CodeGenSchedClass::dump(const CodeGenSchedModels* SchedModels) const {
+ dbgs() << "SCHEDCLASS " << Name << '\n'
+ << " Writes: ";
+ for (unsigned i = 0, N = Writes.size(); i < N; ++i) {
+ SchedModels->getSchedWrite(Writes[i]).dump();
+ if (i < N-1) {
+ dbgs() << '\n';
+ dbgs().indent(10);
+ }
+ }
+ dbgs() << "\n Reads: ";
+ for (unsigned i = 0, N = Reads.size(); i < N; ++i) {
+ SchedModels->getSchedRead(Reads[i]).dump();
+ if (i < N-1) {
+ dbgs() << '\n';
+ dbgs().indent(10);
+ }
+ }
+ dbgs() << "\n ProcIdx: "; dumpIdxVec(ProcIndices); dbgs() << '\n';
+}
+
+void PredTransitions::dump() const {
+ dbgs() << "Expanded Variants:\n";
+ for (std::vector<PredTransition>::const_iterator
+ TI = TransVec.begin(), TE = TransVec.end(); TI != TE; ++TI) {
+ dbgs() << "{";
+ for (SmallVectorImpl<PredCheck>::const_iterator
+ PCI = TI->PredTerm.begin(), PCE = TI->PredTerm.end();
+ PCI != PCE; ++PCI) {
+ if (PCI != TI->PredTerm.begin())
+ dbgs() << ", ";
+ dbgs() << SchedModels.getSchedRW(PCI->RWIdx, PCI->IsRead).Name
+ << ":" << PCI->Predicate->getName();
+ }
+ dbgs() << "},\n => {";
+ for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
+ WSI = TI->WriteSequences.begin(), WSE = TI->WriteSequences.end();
+ WSI != WSE; ++WSI) {
+ dbgs() << "(";
+ for (SmallVectorImpl<unsigned>::const_iterator
+ WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) {
+ if (WI != WSI->begin())
+ dbgs() << ", ";
+ dbgs() << SchedModels.getSchedWrite(*WI).Name;
+ }
+ dbgs() << "),";
+ }
+ dbgs() << "}\n";
}
-#endif
}
+#endif // NDEBUG
diff --git a/contrib/llvm/utils/TableGen/CodeGenSchedule.h b/contrib/llvm/utils/TableGen/CodeGenSchedule.h
index 9da0145..eed0589 100644
--- a/contrib/llvm/utils/TableGen/CodeGenSchedule.h
+++ b/contrib/llvm/utils/TableGen/CodeGenSchedule.h
@@ -15,6 +15,7 @@
#ifndef CODEGEN_SCHEDULE_H
#define CODEGEN_SCHEDULE_H
+#include "SetTheory.h"
#include "llvm/TableGen/Record.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/ADT/DenseMap.h"
@@ -23,21 +24,131 @@
namespace llvm {
class CodeGenTarget;
+class CodeGenSchedModels;
+class CodeGenInstruction;
-// Scheduling class.
-//
-// Each instruction description will be mapped to a scheduling class. It may be
-// an explicitly defined itinerary class, or an inferred class in which case
-// ItinClassDef == NULL.
+typedef std::vector<Record*> RecVec;
+typedef std::vector<Record*>::const_iterator RecIter;
+
+typedef std::vector<unsigned> IdxVec;
+typedef std::vector<unsigned>::const_iterator IdxIter;
+
+void splitSchedReadWrites(const RecVec &RWDefs,
+ RecVec &WriteDefs, RecVec &ReadDefs);
+
+/// We have two kinds of SchedReadWrites. Explicitly defined and inferred
+/// sequences. TheDef is nonnull for explicit SchedWrites, but Sequence may or
+/// may not be empty. TheDef is null for inferred sequences, and Sequence must
+/// be nonempty.
+///
+/// IsVariadic controls whether the variants are expanded into multiple operands
+/// or a sequence of writes on one operand.
+struct CodeGenSchedRW {
+ unsigned Index;
+ std::string Name;
+ Record *TheDef;
+ bool IsRead;
+ bool IsAlias;
+ bool HasVariants;
+ bool IsVariadic;
+ bool IsSequence;
+ IdxVec Sequence;
+ RecVec Aliases;
+
+ CodeGenSchedRW(): Index(0), TheDef(0), IsAlias(false), HasVariants(false),
+ IsVariadic(false), IsSequence(false) {}
+ CodeGenSchedRW(unsigned Idx, Record *Def): Index(Idx), TheDef(Def),
+ IsAlias(false), IsVariadic(false) {
+ Name = Def->getName();
+ IsRead = Def->isSubClassOf("SchedRead");
+ HasVariants = Def->isSubClassOf("SchedVariant");
+ if (HasVariants)
+ IsVariadic = Def->getValueAsBit("Variadic");
+
+ // Read records don't currently have sequences, but it can be easily
+ // added. Note that implicit Reads (from ReadVariant) may have a Sequence
+ // (but no record).
+ IsSequence = Def->isSubClassOf("WriteSequence");
+ }
+
+ CodeGenSchedRW(unsigned Idx, bool Read, const IdxVec &Seq,
+ const std::string &Name):
+ Index(Idx), Name(Name), TheDef(0), IsRead(Read), IsAlias(false),
+ HasVariants(false), IsVariadic(false), IsSequence(true), Sequence(Seq) {
+ assert(Sequence.size() > 1 && "implied sequence needs >1 RWs");
+ }
+
+ bool isValid() const {
+ assert((!HasVariants || TheDef) && "Variant write needs record def");
+ assert((!IsVariadic || HasVariants) && "Variadic write needs variants");
+ assert((!IsSequence || !HasVariants) && "Sequence can't have variant");
+ assert((!IsSequence || !Sequence.empty()) && "Sequence should be nonempty");
+ assert((!IsAlias || Aliases.empty()) && "Alias cannot have aliases");
+ return TheDef || !Sequence.empty();
+ }
+
+#ifndef NDEBUG
+ void dump() const;
+#endif
+};
+
+/// Represent a transition between SchedClasses induced by SchedVariant.
+struct CodeGenSchedTransition {
+ unsigned ToClassIdx;
+ IdxVec ProcIndices;
+ RecVec PredTerm;
+};
+
+/// Scheduling class.
+///
+/// Each instruction description will be mapped to a scheduling class. There are
+/// four types of classes:
+///
+/// 1) An explicitly defined itinerary class with ItinClassDef set.
+/// Writes and ReadDefs are empty. ProcIndices contains 0 for any processor.
+///
+/// 2) An implied class with a list of SchedWrites and SchedReads that are
+/// defined in an instruction definition and which are common across all
+/// subtargets. ProcIndices contains 0 for any processor.
+///
+/// 3) An implied class with a list of InstRW records that map instructions to
+/// SchedWrites and SchedReads per-processor. InstrClassMap should map the same
+/// instructions to this class. ProcIndices contains all the processors that
+/// provided InstrRW records for this class. ItinClassDef or Writes/Reads may
+/// still be defined for processors with no InstRW entry.
+///
+/// 4) An inferred class represents a variant of another class that may be
+/// resolved at runtime. ProcIndices contains the set of processors that may
+/// require the class. ProcIndices are propagated through SchedClasses as
+/// variants are expanded. Multiple SchedClasses may be inferred from an
+/// itinerary class. Each inherits the processor index from the ItinRW record
+/// that mapped the itinerary class to the variant Writes or Reads.
struct CodeGenSchedClass {
std::string Name;
- unsigned Index;
Record *ItinClassDef;
- CodeGenSchedClass(): Index(0), ItinClassDef(0) {}
- CodeGenSchedClass(Record *rec): Index(0), ItinClassDef(rec) {
+ IdxVec Writes;
+ IdxVec Reads;
+ // Sorted list of ProcIdx, where ProcIdx==0 implies any processor.
+ IdxVec ProcIndices;
+
+ std::vector<CodeGenSchedTransition> Transitions;
+
+ // InstRW records associated with this class. These records may refer to an
+ // Instruction no longer mapped to this class by InstrClassMap. These
+ // Instructions should be ignored by this class because they have been split
+ // off to join another inferred class.
+ RecVec InstRWs;
+
+ CodeGenSchedClass(): ItinClassDef(0) {}
+ CodeGenSchedClass(Record *rec): ItinClassDef(rec) {
Name = rec->getName();
+ ProcIndices.push_back(0);
}
+
+#ifndef NDEBUG
+ void dump(const CodeGenSchedModels *SchedModels) const;
+#endif
};
// Processor model.
@@ -55,28 +166,69 @@ struct CodeGenSchedClass {
//
// ItinDefList orders this processor's InstrItinData records by SchedClass idx.
struct CodeGenProcModel {
+ unsigned Index;
std::string ModelName;
Record *ModelDef;
Record *ItinsDef;
- // Array of InstrItinData records indexed by CodeGenSchedClass::Index.
- // The list is empty if the subtarget has no itineraries.
- std::vector<Record *> ItinDefList;
+ // Derived members...
- CodeGenProcModel(const std::string &Name, Record *MDef, Record *IDef):
- ModelName(Name), ModelDef(MDef), ItinsDef(IDef) {}
+ // Array of InstrItinData records indexed by a CodeGenSchedClass index.
+ // This list is empty if the Processor has no value for Itineraries.
+ // Initialized by collectProcItins().
+ RecVec ItinDefList;
+
+ // Map itinerary classes to per-operand resources.
+ // This list is empty if no ItinRW refers to this Processor.
+ RecVec ItinRWDefs;
+
+ // All read/write resources associated with this processor.
+ RecVec WriteResDefs;
+ RecVec ReadAdvanceDefs;
+
+ // Per-operand machine model resources associated with this processor.
+ RecVec ProcResourceDefs;
+
+ CodeGenProcModel(unsigned Idx, const std::string &Name, Record *MDef,
+ Record *IDef) :
+ Index(Idx), ModelName(Name), ModelDef(MDef), ItinsDef(IDef) {}
+
+ bool hasInstrSchedModel() const {
+ return !WriteResDefs.empty() || !ItinRWDefs.empty();
+ }
+
+ unsigned getProcResourceIdx(Record *PRDef) const;
+
+#ifndef NDEBUG
+ void dump() const;
+#endif
};
-// Top level container for machine model data.
+/// Top level container for machine model data.
class CodeGenSchedModels {
RecordKeeper &Records;
const CodeGenTarget &Target;
+ // Map dag expressions to Instruction lists.
+ SetTheory Sets;
+
+ // List of unique processor models.
+ std::vector<CodeGenProcModel> ProcModels;
+
+ // Map Processor's MachineModel or ProcItin to a CodeGenProcModel index.
+ typedef DenseMap<Record*, unsigned> ProcModelMapTy;
+ ProcModelMapTy ProcModelMap;
+
+ // Per-operand SchedReadWrite types.
+ std::vector<CodeGenSchedRW> SchedWrites;
+ std::vector<CodeGenSchedRW> SchedReads;
+
// List of unique SchedClasses.
std::vector<CodeGenSchedClass> SchedClasses;
// Map SchedClass name to itinerary index.
- // These are either explicit itinerary classes or inferred classes.
+ // These are either explicit itinerary classes or classes implied by
+ // instruction definitions with SchedReadWrite lists.
StringMap<unsigned> SchedClassIdxMap;
// SchedClass indices 1 up to and including NumItineraryClasses identify
@@ -84,22 +236,80 @@ class CodeGenSchedModels {
// definitions. NoItinerary always has index 0 regardless of whether it is
// explicitly referenced.
//
- // Any inferred SchedClass have a index greater than NumItineraryClasses.
+ // Any implied SchedClass has an index greater than NumItineraryClasses.
unsigned NumItineraryClasses;
- // List of unique processor models.
- std::vector<CodeGenProcModel> ProcModels;
-
- // Map Processor's MachineModel + ProcItin fields to a CodeGenProcModel index.
- typedef DenseMap<std::pair<Record*, Record*>, unsigned> ProcModelMapTy;
- ProcModelMapTy ProcModelMap;
+ // Any inferred SchedClass has an index greater than NumInstrSchedClassses.
+ unsigned NumInstrSchedClasses;
- // True if any processors have nonempty itineraries.
- bool HasProcItineraries;
+ // Map Instruction to SchedClass index. Only for Instructions mentioned in
+ // InstRW records.
+ typedef DenseMap<Record*, unsigned> InstClassMapTy;
+ InstClassMapTy InstrClassMap;
public:
CodeGenSchedModels(RecordKeeper& RK, const CodeGenTarget &TGT);
+ Record *getModelOrItinDef(Record *ProcDef) const {
+ Record *ModelDef = ProcDef->getValueAsDef("SchedModel");
+ Record *ItinsDef = ProcDef->getValueAsDef("ProcItin");
+ if (!ItinsDef->getValueAsListOfDefs("IID").empty()) {
+ assert(ModelDef->getValueAsBit("NoModel")
+ && "Itineraries must be defined within SchedMachineModel");
+ return ItinsDef;
+ }
+ return ModelDef;
+ }
+
+ const CodeGenProcModel &getModelForProc(Record *ProcDef) const {
+ Record *ModelDef = getModelOrItinDef(ProcDef);
+ ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef);
+ assert(I != ProcModelMap.end() && "missing machine model");
+ return ProcModels[I->second];
+ }
+
+ const CodeGenProcModel &getProcModel(Record *ModelDef) const {
+ ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef);
+ assert(I != ProcModelMap.end() && "missing machine model");
+ return ProcModels[I->second];
+ }
+
+ // Iterate over the unique processor models.
+ typedef std::vector<CodeGenProcModel>::const_iterator ProcIter;
+ ProcIter procModelBegin() const { return ProcModels.begin(); }
+ ProcIter procModelEnd() const { return ProcModels.end(); }
+
+ // Get a SchedWrite from its index.
+ const CodeGenSchedRW &getSchedWrite(unsigned Idx) const {
+ assert(Idx < SchedWrites.size() && "bad SchedWrite index");
+ assert(SchedWrites[Idx].isValid() && "invalid SchedWrite");
+ return SchedWrites[Idx];
+ }
+ // Get a SchedWrite from its index.
+ const CodeGenSchedRW &getSchedRead(unsigned Idx) const {
+ assert(Idx < SchedReads.size() && "bad SchedRead index");
+ assert(SchedReads[Idx].isValid() && "invalid SchedRead");
+ return SchedReads[Idx];
+ }
+
+ const CodeGenSchedRW &getSchedRW(unsigned Idx, bool IsRead) const {
+ return IsRead ? getSchedRead(Idx) : getSchedWrite(Idx);
+ }
+ CodeGenSchedRW &getSchedRW(Record *Def) {
+ bool IsRead = Def->isSubClassOf("SchedRead");
+ unsigned Idx = getSchedRWIdx(Def, IsRead);
+ return const_cast<CodeGenSchedRW&>(
+ IsRead ? getSchedRead(Idx) : getSchedWrite(Idx));
+ }
+ const CodeGenSchedRW &getSchedRW(Record*Def) const {
+ return const_cast<CodeGenSchedModels&>(*this).getSchedRW(Def);
+ }
+
+ unsigned getSchedRWIdx(Record *Def, bool IsRead, unsigned After = 0) const;
+
+ // Return true if the given write record is referenced by a ReadAdvance.
+ bool hasReadOfWrite(Record *WriteDef) const;
+
// Check if any instructions are assigned to an explicit itinerary class other
// than NoItinerary.
bool hasItineraryClasses() const { return NumItineraryClasses > 0; }
@@ -111,60 +321,90 @@ public:
}
// Get a SchedClass from its index.
- const CodeGenSchedClass &getSchedClass(unsigned Idx) {
+ CodeGenSchedClass &getSchedClass(unsigned Idx) {
assert(Idx < SchedClasses.size() && "bad SchedClass index");
return SchedClasses[Idx];
}
-
- // Get an itinerary class's index. Value indices are '0' for NoItinerary up to
- // and including numItineraryClasses().
- unsigned getItinClassIdx(Record *ItinDef) const {
- assert(SchedClassIdxMap.count(ItinDef->getName()) && "missing ItinClass");
- unsigned Idx = SchedClassIdxMap.lookup(ItinDef->getName());
- assert(Idx <= NumItineraryClasses && "bad ItinClass index");
- return Idx;
+ const CodeGenSchedClass &getSchedClass(unsigned Idx) const {
+ assert(Idx < SchedClasses.size() && "bad SchedClass index");
+ return SchedClasses[Idx];
}
- bool hasProcessorItineraries() const {
- return HasProcItineraries;
- }
+ // Get the SchedClass index for an instruction. Instructions with no
+ // itinerary, no SchedReadWrites, and no InstrReadWrites references return 0
+ // for NoItinerary.
+ unsigned getSchedClassIdx(const CodeGenInstruction &Inst) const;
+
+ unsigned getSchedClassIdx(const RecVec &RWDefs) const;
- // Get an existing machine model for a processor definition.
- const CodeGenProcModel &getProcModel(Record *ProcDef) const {
- unsigned idx = getProcModelIdx(ProcDef);
- assert(idx < ProcModels.size() && "missing machine model");
- return ProcModels[idx];
+ unsigned getSchedClassIdxForItin(const Record *ItinDef) {
+ return SchedClassIdxMap[ItinDef->getName()];
}
- // Iterate over the unique processor models.
- typedef std::vector<CodeGenProcModel>::const_iterator ProcIter;
- ProcIter procModelBegin() const { return ProcModels.begin(); }
- ProcIter procModelEnd() const { return ProcModels.end(); }
+ typedef std::vector<CodeGenSchedClass>::const_iterator SchedClassIter;
+ SchedClassIter schedClassBegin() const { return SchedClasses.begin(); }
+ SchedClassIter schedClassEnd() const { return SchedClasses.end(); }
-private:
- // Get a key that can uniquely identify a machine model.
- ProcModelMapTy::key_type getProcModelKey(Record *ProcDef) const {
- Record *ModelDef = ProcDef->getValueAsDef("SchedModel");
- Record *ItinsDef = ProcDef->getValueAsDef("ProcItin");
- return std::make_pair(ModelDef, ItinsDef);
- }
+ void findRWs(const RecVec &RWDefs, IdxVec &Writes, IdxVec &Reads) const;
+ void findRWs(const RecVec &RWDefs, IdxVec &RWs, bool IsRead) const;
+ void expandRWSequence(unsigned RWIdx, IdxVec &RWSeq, bool IsRead) const;
+ void expandRWSeqForProc(unsigned RWIdx, IdxVec &RWSeq, bool IsRead,
+ const CodeGenProcModel &ProcModel) const;
- // Get the unique index of a machine model.
- unsigned getProcModelIdx(Record *ProcDef) const {
- ProcModelMapTy::const_iterator I =
- ProcModelMap.find(getProcModelKey(ProcDef));
- if (I == ProcModelMap.end())
- return ProcModels.size();
- return I->second;
- }
+ unsigned addSchedClass(const IdxVec &OperWrites, const IdxVec &OperReads,
+ const IdxVec &ProcIndices);
+
+ unsigned findOrInsertRW(ArrayRef<unsigned> Seq, bool IsRead);
+
+ unsigned findSchedClassIdx(const IdxVec &Writes, const IdxVec &Reads) const;
+
+ Record *findProcResUnits(Record *ProcResKind,
+ const CodeGenProcModel &PM) const;
+
+private:
+ void collectProcModels();
// Initialize a new processor model if it is unique.
void addProcModel(Record *ProcDef);
- void CollectSchedClasses();
- void CollectProcModels();
- void CollectProcItin(CodeGenProcModel &ProcModel,
- std::vector<Record*> ItinRecords);
+ void collectSchedRW();
+
+ std::string genRWName(const IdxVec& Seq, bool IsRead);
+ unsigned findRWForSequence(const IdxVec &Seq, bool IsRead);
+
+ void collectSchedClasses();
+
+ std::string createSchedClassName(const IdxVec &OperWrites,
+ const IdxVec &OperReads);
+ std::string createSchedClassName(const RecVec &InstDefs);
+ void createInstRWClass(Record *InstRWDef);
+
+ void collectProcItins();
+
+ void collectProcItinRW();
+
+ void inferSchedClasses();
+
+ void inferFromRW(const IdxVec &OperWrites, const IdxVec &OperReads,
+ unsigned FromClassIdx, const IdxVec &ProcIndices);
+ void inferFromItinClass(Record *ItinClassDef, unsigned FromClassIdx);
+ void inferFromInstRWs(unsigned SCIdx);
+
+ void collectProcResources();
+
+ void collectItinProcResources(Record *ItinClassDef);
+
+ void collectRWResources(unsigned RWIdx, bool IsRead,
+ const IdxVec &ProcIndices);
+
+ void collectRWResources(const IdxVec &Writes, const IdxVec &Reads,
+ const IdxVec &ProcIndices);
+
+ void addProcResource(Record *ProcResourceKind, CodeGenProcModel &PM);
+
+ void addWriteRes(Record *ProcWriteResDef, unsigned PIdx);
+
+ void addReadAdvance(Record *ProcReadAdvanceDef, unsigned PIdx);
};
} // namespace llvm
diff --git a/contrib/llvm/utils/TableGen/CodeGenTarget.cpp b/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
index 1dd2efc..c9992eb 100644
--- a/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
@@ -10,13 +10,14 @@
// This class wraps target description classes used by the various code
// generation TableGen backends. This makes it easier to access the data and
// provides a single place that needs to check it for validity. All of these
-// classes throw exceptions on error conditions.
+// classes abort on error conditions.
//
//===----------------------------------------------------------------------===//
#include "CodeGenTarget.h"
#include "CodeGenIntrinsics.h"
#include "CodeGenSchedule.h"
+#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/STLExtras.h"
@@ -68,22 +69,30 @@ std::string llvm::getEnumName(MVT::SimpleValueType T) {
case MVT::x86mmx: return "MVT::x86mmx";
case MVT::Glue: return "MVT::Glue";
case MVT::isVoid: return "MVT::isVoid";
+ case MVT::v2i1: return "MVT::v2i1";
+ case MVT::v4i1: return "MVT::v4i1";
+ case MVT::v8i1: return "MVT::v8i1";
+ case MVT::v16i1: return "MVT::v16i1";
case MVT::v2i8: return "MVT::v2i8";
case MVT::v4i8: return "MVT::v4i8";
case MVT::v8i8: return "MVT::v8i8";
case MVT::v16i8: return "MVT::v16i8";
case MVT::v32i8: return "MVT::v32i8";
+ case MVT::v1i16: return "MVT::v1i16";
case MVT::v2i16: return "MVT::v2i16";
case MVT::v4i16: return "MVT::v4i16";
case MVT::v8i16: return "MVT::v8i16";
case MVT::v16i16: return "MVT::v16i16";
+ case MVT::v1i32: return "MVT::v1i32";
case MVT::v2i32: return "MVT::v2i32";
case MVT::v4i32: return "MVT::v4i32";
case MVT::v8i32: return "MVT::v8i32";
+ case MVT::v16i32: return "MVT::v16i32";
case MVT::v1i64: return "MVT::v1i64";
case MVT::v2i64: return "MVT::v2i64";
case MVT::v4i64: return "MVT::v4i64";
case MVT::v8i64: return "MVT::v8i64";
+ case MVT::v16i64: return "MVT::v16i64";
case MVT::v2f16: return "MVT::v2f16";
case MVT::v2f32: return "MVT::v2f32";
case MVT::v4f32: return "MVT::v4f32";
@@ -116,9 +125,9 @@ CodeGenTarget::CodeGenTarget(RecordKeeper &records)
: Records(records), RegBank(0), SchedModels(0) {
std::vector<Record*> Targets = Records.getAllDerivedDefinitions("Target");
if (Targets.size() == 0)
- throw std::string("ERROR: No 'Target' subclasses defined!");
+ PrintFatalError("ERROR: No 'Target' subclasses defined!");
if (Targets.size() != 1)
- throw std::string("ERROR: Multiple subclasses of Target defined!");
+ PrintFatalError("ERROR: Multiple subclasses of Target defined!");
TargetRec = Targets[0];
}
@@ -152,7 +161,7 @@ Record *CodeGenTarget::getInstructionSet() const {
Record *CodeGenTarget::getAsmParser() const {
std::vector<Record*> LI = TargetRec->getValueAsListOfDefs("AssemblyParsers");
if (AsmParserNum >= LI.size())
- throw "Target does not have an AsmParser #" + utostr(AsmParserNum) + "!";
+ PrintFatalError("Target does not have an AsmParser #" + utostr(AsmParserNum) + "!");
return LI[AsmParserNum];
}
@@ -163,7 +172,7 @@ Record *CodeGenTarget::getAsmParserVariant(unsigned i) const {
std::vector<Record*> LI =
TargetRec->getValueAsListOfDefs("AssemblyParserVariants");
if (i >= LI.size())
- throw "Target does not have an AsmParserVariant #" + utostr(i) + "!";
+ PrintFatalError("Target does not have an AsmParserVariant #" + utostr(i) + "!");
return LI[i];
}
@@ -181,7 +190,7 @@ unsigned CodeGenTarget::getAsmParserVariantCount() const {
Record *CodeGenTarget::getAsmWriter() const {
std::vector<Record*> LI = TargetRec->getValueAsListOfDefs("AssemblyWriters");
if (AsmWriterNum >= LI.size())
- throw "Target does not have an AsmWriter #" + utostr(AsmWriterNum) + "!";
+ PrintFatalError("Target does not have an AsmWriter #" + utostr(AsmWriterNum) + "!");
return LI[AsmWriterNum];
}
@@ -199,12 +208,11 @@ void CodeGenTarget::ReadRegAltNameIndices() const {
/// getRegisterByName - If there is a register with the specific AsmName,
/// return it.
const CodeGenRegister *CodeGenTarget::getRegisterByName(StringRef Name) const {
- const std::vector<CodeGenRegister*> &Regs = getRegBank().getRegisters();
- for (unsigned i = 0, e = Regs.size(); i != e; ++i)
- if (Regs[i]->TheDef->getValueAsString("AsmName") == Name)
- return Regs[i];
-
- return 0;
+ const StringMap<CodeGenRegister*> &Regs = getRegBank().getRegistersByName();
+ StringMap<CodeGenRegister*>::const_iterator I = Regs.find(Name);
+ if (I == Regs.end())
+ return 0;
+ return I->second;
}
std::vector<MVT::SimpleValueType> CodeGenTarget::
@@ -249,7 +257,7 @@ CodeGenSchedModels &CodeGenTarget::getSchedModels() const {
void CodeGenTarget::ReadInstructions() const {
std::vector<Record*> Insts = Records.getAllDerivedDefinitions("Instruction");
if (Insts.size() <= 2)
- throw std::string("No 'Instruction' subclasses defined!");
+ PrintFatalError("No 'Instruction' subclasses defined!");
// Parse the instructions defined in the .td file.
for (unsigned i = 0, e = Insts.size(); i != e; ++i)
@@ -265,7 +273,7 @@ GetInstByName(const char *Name,
DenseMap<const Record*, CodeGenInstruction*>::const_iterator
I = Insts.find(Rec);
if (Rec == 0 || I == Insts.end())
- throw std::string("Could not find '") + Name + "' instruction!";
+ PrintFatalError(std::string("Could not find '") + Name + "' instruction!");
return I->second;
}
@@ -300,6 +308,8 @@ void CodeGenTarget::ComputeInstrsByEnum() const {
"REG_SEQUENCE",
"COPY",
"BUNDLE",
+ "LIFETIME_START",
+ "LIFETIME_END",
0
};
const DenseMap<const Record*, CodeGenInstruction*> &Insts = getInstructions();
@@ -334,6 +344,15 @@ bool CodeGenTarget::isLittleEndianEncoding() const {
return getInstructionSet()->getValueAsBit("isLittleEndianEncoding");
}
+/// guessInstructionProperties - Return true if it's OK to guess instruction
+/// properties instead of raising an error.
+///
+/// This is configurable as a temporary migration aid. It will eventually be
+/// permanently false.
+bool CodeGenTarget::guessInstructionProperties() const {
+ return getInstructionSet()->getValueAsBit("guessInstructionProperties");
+}
+
//===----------------------------------------------------------------------===//
// ComplexPattern implementation
//
@@ -401,7 +420,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
if (DefName.size() <= 4 ||
std::string(DefName.begin(), DefName.begin() + 4) != "int_")
- throw "Intrinsic '" + DefName + "' does not start with 'int_'!";
+ PrintFatalError("Intrinsic '" + DefName + "' does not start with 'int_'!");
EnumName = std::string(DefName.begin()+4, DefName.end());
@@ -421,7 +440,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
// Verify it starts with "llvm.".
if (Name.size() <= 5 ||
std::string(Name.begin(), Name.begin() + 5) != "llvm.")
- throw "Intrinsic '" + DefName + "'s name does not start with 'llvm.'!";
+ PrintFatalError("Intrinsic '" + DefName + "'s name does not start with 'llvm.'!");
}
// If TargetPrefix is specified, make sure that Name starts with
@@ -430,8 +449,8 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
if (Name.size() < 6+TargetPrefix.size() ||
std::string(Name.begin() + 5, Name.begin() + 6 + TargetPrefix.size())
!= (TargetPrefix + "."))
- throw "Intrinsic '" + DefName + "' does not start with 'llvm." +
- TargetPrefix + ".'!";
+ PrintFatalError("Intrinsic '" + DefName + "' does not start with 'llvm." +
+ TargetPrefix + ".'!");
}
// Parse the list of return types.
@@ -463,7 +482,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
// Reject invalid types.
if (VT == MVT::isVoid)
- throw "Intrinsic '" + DefName + " has void in result type list!";
+ PrintFatalError("Intrinsic '" + DefName + " has void in result type list!");
IS.RetVTs.push_back(VT);
IS.RetTypeDefs.push_back(TyEl);
@@ -497,7 +516,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
// Reject invalid types.
if (VT == MVT::isVoid && i != e-1 /*void at end means varargs*/)
- throw "Intrinsic '" + DefName + " has void in result type list!";
+ PrintFatalError("Intrinsic '" + DefName + " has void in result type list!");
IS.ParamVTs.push_back(VT);
IS.ParamTypeDefs.push_back(TyEl);
diff --git a/contrib/llvm/utils/TableGen/CodeGenTarget.h b/contrib/llvm/utils/TableGen/CodeGenTarget.h
index 2f8cee4..ddeecee 100644
--- a/contrib/llvm/utils/TableGen/CodeGenTarget.h
+++ b/contrib/llvm/utils/TableGen/CodeGenTarget.h
@@ -9,8 +9,8 @@
//
// This file defines wrappers for the Target class and related global
// functionality. This makes it easier to access the data and provides a single
-// place that needs to check it for validity. All of these classes throw
-// exceptions on error conditions.
+// place that needs to check it for validity. All of these classes abort
+// on error conditions.
//
//===----------------------------------------------------------------------===//
@@ -177,6 +177,10 @@ public:
///
bool isLittleEndianEncoding() const;
+ /// guessInstructionProperties - should we just guess unset instruction
+ /// properties?
+ bool guessInstructionProperties() const;
+
private:
void ComputeInstrsByEnum() const;
};
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcher.h b/contrib/llvm/utils/TableGen/DAGISelMatcher.h
index 3ca16f0..7c6ce3b 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcher.h
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcher.h
@@ -99,8 +99,6 @@ public:
OwningPtr<Matcher> &getNextPtr() { return Next; }
- static inline bool classof(const Matcher *) { return true; }
-
bool isEqual(const Matcher *M) const {
if (getKind() != M->getKind()) return false;
return isEqualImpl(M);
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp b/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
index 1445edb..713f174 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
@@ -598,7 +598,7 @@ EmitMatcherList(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
// Emit pattern predicates.
if (!PatternPredicates.empty()) {
- OS << "bool CheckPatternPredicate(unsigned PredNo) const {\n";
+ OS << "virtual bool CheckPatternPredicate(unsigned PredNo) const {\n";
OS << " switch (PredNo) {\n";
OS << " default: llvm_unreachable(\"Invalid predicate in table?\");\n";
for (unsigned i = 0, e = PatternPredicates.size(); i != e; ++i)
@@ -616,7 +616,8 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
PFsByName[I->first->getName()] = I->second;
if (!NodePredicates.empty()) {
- OS << "bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {\n";
+ OS << "virtual bool CheckNodePredicate(SDNode *Node,\n";
+ OS << " unsigned PredNo) const {\n";
OS << " switch (PredNo) {\n";
OS << " default: llvm_unreachable(\"Invalid predicate in table?\");\n";
for (unsigned i = 0, e = NodePredicates.size(); i != e; ++i) {
@@ -635,8 +636,8 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
// Emit CompletePattern matchers.
// FIXME: This should be const.
if (!ComplexPatterns.empty()) {
- OS << "bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N,\n";
- OS << " unsigned PatternNo,\n";
+ OS << "virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent,\n";
+ OS << " SDValue N, unsigned PatternNo,\n";
OS << " SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) {\n";
OS << " unsigned NextRes = Result.size();\n";
OS << " switch (PatternNo) {\n";
@@ -676,7 +677,7 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
// Emit SDNodeXForm handlers.
// FIXME: This should be const.
if (!NodeXForms.empty()) {
- OS << "SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {\n";
+ OS << "virtual SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {\n";
OS << " switch (XFormNo) {\n";
OS << " default: llvm_unreachable(\"Invalid xform # in table?\");\n";
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp b/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp
index aed222c..573f558 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp
@@ -10,6 +10,7 @@
#include "DAGISelMatcher.h"
#include "CodeGenDAGPatterns.h"
#include "CodeGenRegisters.h"
+#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
@@ -172,15 +173,10 @@ void MatcherGen::InferPossibleTypes() {
// diagnostics, which we know are impossible at this point.
TreePattern &TP = *CGP.pf_begin()->second;
- try {
- bool MadeChange = true;
- while (MadeChange)
- MadeChange = PatWithNoTypes->ApplyTypeConstraints(TP,
- true/*Ignore reg constraints*/);
- } catch (...) {
- errs() << "Type constraint application shouldn't fail!";
- abort();
- }
+ bool MadeChange = true;
+ while (MadeChange)
+ MadeChange = PatWithNoTypes->ApplyTypeConstraints(TP,
+ true/*Ignore reg constraints*/);
}
@@ -203,7 +199,7 @@ void MatcherGen::EmitLeafMatchCode(const TreePatternNode *N) {
assert(N->isLeaf() && "Not a leaf?");
// Direct match against an integer constant.
- if (IntInit *II = dynamic_cast<IntInit*>(N->getLeafValue())) {
+ if (IntInit *II = dyn_cast<IntInit>(N->getLeafValue())) {
// If this is the root of the dag we're matching, we emit a redundant opcode
// check to ensure that this gets folded into the normal top-level
// OpcodeSwitch.
@@ -215,7 +211,7 @@ void MatcherGen::EmitLeafMatchCode(const TreePatternNode *N) {
return AddMatcher(new CheckIntegerMatcher(II->getValue()));
}
- DefInit *DI = dynamic_cast<DefInit*>(N->getLeafValue());
+ DefInit *DI = dyn_cast<DefInit>(N->getLeafValue());
if (DI == 0) {
errs() << "Unknown leaf kind: " << *N << "\n";
abort();
@@ -283,7 +279,7 @@ void MatcherGen::EmitOperatorMatchCode(const TreePatternNode *N,
N->getOperator()->getName() == "or") &&
N->getChild(1)->isLeaf() && N->getChild(1)->getPredicateFns().empty() &&
N->getPredicateFns().empty()) {
- if (IntInit *II = dynamic_cast<IntInit*>(N->getChild(1)->getLeafValue())) {
+ if (IntInit *II = dyn_cast<IntInit>(N->getChild(1)->getLeafValue())) {
if (!isPowerOf2_32(II->getValue())) { // Don't bother with single bits.
// If this is at the root of the pattern, we emit a redundant
// CheckOpcode so that the following checks get factored properly under
@@ -572,14 +568,14 @@ void MatcherGen::EmitResultLeafAsOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &ResultOps) {
assert(N->isLeaf() && "Must be a leaf");
- if (IntInit *II = dynamic_cast<IntInit*>(N->getLeafValue())) {
+ if (IntInit *II = dyn_cast<IntInit>(N->getLeafValue())) {
AddMatcher(new EmitIntegerMatcher(II->getValue(), N->getType(0)));
ResultOps.push_back(NextRecordedOperandNo++);
return;
}
// If this is an explicit register reference, handle it.
- if (DefInit *DI = dynamic_cast<DefInit*>(N->getLeafValue())) {
+ if (DefInit *DI = dyn_cast<DefInit>(N->getLeafValue())) {
Record *Def = DI->getDef();
if (Def->isSubClassOf("Register")) {
const CodeGenRegister *Reg =
@@ -727,8 +723,7 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
// Determine what to emit for this operand.
Record *OperandNode = II.Operands[InstOpNo].Rec;
- if ((OperandNode->isSubClassOf("PredicateOperand") ||
- OperandNode->isSubClassOf("OptionalDefOperand")) &&
+ if (OperandNode->isSubClassOf("OperandWithDefaultOps") &&
!CGP.getDefaultOperand(OperandNode).DefaultOps.empty()) {
// This is a predicate or optional def operand; emit the
// 'default ops' operands.
@@ -877,7 +872,7 @@ void MatcherGen::EmitResultOperand(const TreePatternNode *N,
if (OpRec->isSubClassOf("SDNodeXForm"))
return EmitResultSDNodeXFormAsOperand(N, ResultOps);
errs() << "Unknown result node to emit code for: " << *N << '\n';
- throw std::string("Unknown node in result pattern!");
+ PrintFatalError("Unknown node in result pattern!");
}
void MatcherGen::EmitResultCode() {
diff --git a/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.cpp b/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.cpp
index 8bfecea..0ad25a5 100644
--- a/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.cpp
@@ -17,6 +17,7 @@
#include "CodeGenTarget.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <list>
@@ -74,6 +75,8 @@ public:
// Another way of thinking about this transition is we are mapping a NDFA with
// two states [0x01] and [0x10] into a DFA with a single state [0x01, 0x10].
//
+// A State instance also contains a collection of transitions from that state:
+// a map from inputs to new states.
//
namespace {
class State {
@@ -82,10 +85,16 @@ class State {
int stateNum;
bool isInitial;
std::set<unsigned> stateInfo;
+ typedef std::map<unsigned, State *> TransitionMap;
+ TransitionMap Transitions;
State();
State(const State &S);
+ bool operator<(const State &s) const {
+ return stateNum < s.stateNum;
+ }
+
//
// canAddInsnClass - Returns true if an instruction of type InsnClass is a
// valid transition from this state, i.e., can an instruction of type InsnClass
@@ -100,38 +109,18 @@ class State {
// which are possible from this state (PossibleStates).
//
void AddInsnClass(unsigned InsnClass, std::set<unsigned> &PossibleStates);
+ //
+ // addTransition - Add a transition from this state given the input InsnClass
+ //
+ void addTransition(unsigned InsnClass, State *To);
+ //
+ // hasTransition - Returns true if there is a transition from this state
+ // given the input InsnClass
+ //
+ bool hasTransition(unsigned InsnClass);
};
} // End anonymous namespace.
-
-namespace {
-struct Transition {
- public:
- static int currentTransitionNum;
- int transitionNum;
- State *from;
- unsigned input;
- State *to;
-
- Transition(State *from_, unsigned input_, State *to_);
-};
-} // End anonymous namespace.
-
-
-//
-// Comparators to keep set of states sorted.
-//
-namespace {
-struct ltState {
- bool operator()(const State *s1, const State *s2) const;
-};
-
-struct ltTransition {
- bool operator()(const Transition *s1, const Transition *s2) const;
-};
-} // End anonymous namespace.
-
-
//
// class DFA: deterministic finite automaton for processor resource tracking.
//
@@ -139,36 +128,19 @@ namespace {
class DFA {
public:
DFA();
+ ~DFA();
// Set of states. Need to keep this sorted to emit the transition table.
- std::set<State*, ltState> states;
+ typedef std::set<State *, less_ptr<State> > StateSet;
+ StateSet states;
- // Map from a state to the list of transitions with that state as source.
- std::map<State*, std::set<Transition*, ltTransition>, ltState>
- stateTransitions;
State *currentState;
- // Highest valued Input seen.
- unsigned LargestInput;
-
//
// Modify the DFA.
//
void initialize();
void addState(State *);
- void addTransition(Transition *);
-
- //
- // getTransition - Return the state when a transition is made from
- // State From with Input I. If a transition is not found, return NULL.
- //
- State *getTransition(State *, unsigned);
-
- //
- // isValidTransition: Predicate that checks if there is a valid transition
- // from state From on input InsnClass.
- //
- bool isValidTransition(State *From, unsigned InsnClass);
//
// writeTable: Print out a table representing the DFA.
@@ -179,7 +151,7 @@ public:
//
-// Constructors for State, Transition, and DFA
+// Constructors and destructors for State and DFA
//
State::State() :
stateNum(currentStateNum++), isInitial(false) {}
@@ -189,22 +161,27 @@ State::State(const State &S) :
stateNum(currentStateNum++), isInitial(S.isInitial),
stateInfo(S.stateInfo) {}
+DFA::DFA(): currentState(NULL) {}
-Transition::Transition(State *from_, unsigned input_, State *to_) :
- transitionNum(currentTransitionNum++), from(from_), input(input_),
- to(to_) {}
-
-
-DFA::DFA() :
- LargestInput(0) {}
-
+DFA::~DFA() {
+ DeleteContainerPointers(states);
+}
-bool ltState::operator()(const State *s1, const State *s2) const {
- return (s1->stateNum < s2->stateNum);
+//
+// addTransition - Add a transition from this state given the input InsnClass
+//
+void State::addTransition(unsigned InsnClass, State *To) {
+ assert(!Transitions.count(InsnClass) &&
+ "Cannot have multiple transitions for the same input");
+ Transitions[InsnClass] = To;
}
-bool ltTransition::operator()(const Transition *s1, const Transition *s2) const {
- return (s1->input < s2->input);
+//
+// hasTransition - Returns true if there is a transition from this state
+// given the input InsnClass
+//
+bool State::hasTransition(unsigned InsnClass) {
+ return Transitions.count(InsnClass) > 0;
}
//
@@ -272,6 +249,7 @@ bool State::canAddInsnClass(unsigned InsnClass) const {
void DFA::initialize() {
+ assert(currentState && "Missing current state");
currentState->isInitial = true;
}
@@ -282,47 +260,7 @@ void DFA::addState(State *S) {
}
-void DFA::addTransition(Transition *T) {
- // Update LargestInput.
- if (T->input > LargestInput)
- LargestInput = T->input;
-
- // Add the new transition.
- bool Added = stateTransitions[T->from].insert(T).second;
- assert(Added && "Cannot have multiple states for the same input");
- (void)Added;
-}
-
-
-//
-// getTransition - Return the state when a transition is made from
-// State From with Input I. If a transition is not found, return NULL.
-//
-State *DFA::getTransition(State *From, unsigned I) {
- // Do we have a transition from state From?
- if (!stateTransitions.count(From))
- return NULL;
-
- // Do we have a transition from state From with Input I?
- Transition TVal(NULL, I, NULL);
- // Do not count this temporal instance
- Transition::currentTransitionNum--;
- std::set<Transition*, ltTransition>::iterator T =
- stateTransitions[From].find(&TVal);
- if (T != stateTransitions[From].end())
- return (*T)->to;
-
- return NULL;
-}
-
-
-bool DFA::isValidTransition(State *From, unsigned InsnClass) {
- return (getTransition(From, InsnClass) != NULL);
-}
-
-
int State::currentStateNum = 0;
-int Transition::currentTransitionNum = 0;
DFAPacketizerEmitter::DFAPacketizerEmitter(RecordKeeper &R):
TargetName(CodeGenTarget(R).getName()),
@@ -341,7 +279,7 @@ DFAPacketizerEmitter::DFAPacketizerEmitter(RecordKeeper &R):
//
//
void DFA::writeTableAndAPI(raw_ostream &OS, const std::string &TargetName) {
- std::set<State*, ltState>::iterator SI = states.begin();
+ DFA::StateSet::iterator SI = states.begin();
// This table provides a map to the beginning of the transitions for State s
// in DFAStateInputTable.
std::vector<int> StateEntry(states.size());
@@ -353,18 +291,16 @@ void DFA::writeTableAndAPI(raw_ostream &OS, const std::string &TargetName) {
// to construct the StateEntry table.
int ValidTransitions = 0;
for (unsigned i = 0; i < states.size(); ++i, ++SI) {
+ assert (((*SI)->stateNum == (int) i) && "Mismatch in state numbers");
StateEntry[i] = ValidTransitions;
- for (unsigned j = 0; j <= LargestInput; ++j) {
- assert (((*SI)->stateNum == (int) i) && "Mismatch in state numbers");
- State *To = getTransition(*SI, j);
- if (To == NULL)
- continue;
-
- OS << "{" << j << ", "
- << To->stateNum
+ for (State::TransitionMap::iterator
+ II = (*SI)->Transitions.begin(), IE = (*SI)->Transitions.end();
+ II != IE; ++II) {
+ OS << "{" << II->first << ", "
+ << II->second->stateNum
<< "}, ";
- ++ValidTransitions;
}
+ ValidTransitions += (*SI)->Transitions.size();
// If there are no valid transitions from this stage, we need a sentinel
// transition.
@@ -539,7 +475,7 @@ void DFAPacketizerEmitter::run(raw_ostream &OS) {
// If we haven't already created a transition for this input
// and the state can accommodate this InsnClass, create a transition.
//
- if (!D.getTransition(current, InsnClass) &&
+ if (!current->hasTransition(InsnClass) &&
current->canAddInsnClass(InsnClass)) {
State *NewState = NULL;
current->AddInsnClass(InsnClass, NewStateResources);
@@ -559,10 +495,8 @@ void DFAPacketizerEmitter::run(raw_ostream &OS) {
Visited[NewStateResources] = NewState;
WorkList.push_back(NewState);
}
-
- Transition *NewTransition = new Transition(current, InsnClass,
- NewState);
- D.addTransition(NewTransition);
+
+ current->addTransition(InsnClass, NewState);
}
}
}
diff --git a/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp b/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp
index 826465a..2d11d24 100644
--- a/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp
@@ -117,11 +117,9 @@ void EmitDisassembler(RecordKeeper &Records, raw_ostream &OS) {
for (unsigned i = 0, e = numberedInstructions.size(); i != e; ++i)
RecognizableInstr::processInstr(Tables, *numberedInstructions[i], i);
- // FIXME: As long as we are using exceptions, might as well drop this to the
- // actual conflict site.
if (Tables.hasConflicts())
- throw TGError(Target.getTargetRecord()->getLoc(),
- "Primary decode conflict");
+ PrintFatalError(Target.getTargetRecord()->getLoc(),
+ "Primary decode conflict");
Tables.emit(OS);
return;
diff --git a/contrib/llvm/utils/TableGen/EDEmitter.cpp b/contrib/llvm/utils/TableGen/EDEmitter.cpp
index 0c8b28d..ea25450 100644
--- a/contrib/llvm/utils/TableGen/EDEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/EDEmitter.cpp
@@ -19,6 +19,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <string>
@@ -358,8 +359,8 @@ static int X86TypeFromOpName(LiteralConstantEmitter *type,
/// X86PopulateOperands - Handles all the operands in an X86 instruction, adding
/// the appropriate flags to their descriptors
///
-/// @operandFlags - A reference the array of operand flag objects
-/// @inst - The instruction to use as a source of information
+/// \param operandTypes A reference the array of operand type objects
+/// \param inst The instruction to use as a source of information
static void X86PopulateOperands(
LiteralConstantEmitter *(&operandTypes)[EDIS_MAX_OPERANDS],
const CodeGenInstruction &inst) {
@@ -385,11 +386,12 @@ static void X86PopulateOperands(
/// decorate1 - Decorates a named operand with a new flag
///
-/// @operandFlags - The array of operand flag objects, which don't have names
-/// @inst - The CodeGenInstruction, which provides a way to translate
-/// between names and operand indices
-/// @opName - The name of the operand
-/// @flag - The name of the flag to add
+/// \param operandFlags The array of operand flag objects, which don't have
+/// names
+/// \param inst The CodeGenInstruction, which provides a way to
+// translate between names and operand indices
+/// \param opName The name of the operand
+/// \param opFlag The name of the flag to add
static inline void decorate1(
FlagsConstantEmitter *(&operandFlags)[EDIS_MAX_OPERANDS],
const CodeGenInstruction &inst,
@@ -438,9 +440,9 @@ static inline void decorate1(
/// instruction to determine what sort of an instruction it is and then adds
/// the appropriate flags to the instruction and its operands
///
-/// @arg instType - A reference to the type for the instruction as a whole
-/// @arg operandFlags - A reference to the array of operand flag object pointers
-/// @arg inst - A reference to the original instruction
+/// \param instType A reference to the type for the instruction as a whole
+/// \param operandFlags A reference to the array of operand flag object pointers
+/// \param inst A reference to the original instruction
static void X86ExtractSemantics(
LiteralConstantEmitter &instType,
FlagsConstantEmitter *(&operandFlags)[EDIS_MAX_OPERANDS],
@@ -567,8 +569,8 @@ static void X86ExtractSemantics(
/// ARMFlagFromOpName - Processes the name of a single ARM operand (which is
/// actually its type) and translates it into an operand type
///
-/// @arg type - The type object to set
-/// @arg name - The name of the operand
+/// \param type The type object to set
+/// \param name The name of the operand
static int ARMFlagFromOpName(LiteralConstantEmitter *type,
const std::string &name) {
REG("GPR");
@@ -750,8 +752,8 @@ static int ARMFlagFromOpName(LiteralConstantEmitter *type,
/// ARMPopulateOperands - Handles all the operands in an ARM instruction, adding
/// the appropriate flags to their descriptors
///
-/// @operandFlags - A reference the array of operand flag objects
-/// @inst - The instruction to use as a source of information
+/// \param operandTypes A reference the array of operand type objects
+/// \param inst The instruction to use as a source of information
static void ARMPopulateOperands(
LiteralConstantEmitter *(&operandTypes)[EDIS_MAX_OPERANDS],
const CodeGenInstruction &inst) {
@@ -776,7 +778,7 @@ static void ARMPopulateOperands(
errs() << "Operand type: " << rec.getName() << '\n';
errs() << "Operand name: " << operandInfo.Name << '\n';
errs() << "Instruction name: " << inst.TheDef->getName() << '\n';
- throw("Unhandled type in EDEmitter");
+ PrintFatalError("Unhandled type in EDEmitter");
}
}
}
@@ -790,10 +792,10 @@ static void ARMPopulateOperands(
/// instruction to determine what sort of an instruction it is and then adds
/// the appropriate flags to the instruction and its operands
///
-/// @arg instType - A reference to the type for the instruction as a whole
-/// @arg operandTypes - A reference to the array of operand type object pointers
-/// @arg operandFlags - A reference to the array of operand flag object pointers
-/// @arg inst - A reference to the original instruction
+/// \param instType A reference to the type for the instruction as a whole
+/// \param operandTypes A reference to the array of operand type object pointers
+/// \param operandFlags A reference to the array of operand flag object pointers
+/// \param inst A reference to the original instruction
static void ARMExtractSemantics(
LiteralConstantEmitter &instType,
LiteralConstantEmitter *(&operandTypes)[EDIS_MAX_OPERANDS],
@@ -831,8 +833,8 @@ static void ARMExtractSemantics(
/// populateInstInfo - Fills an array of InstInfos with information about each
/// instruction in a target
///
-/// @arg infoArray - The array of InstInfo objects to populate
-/// @arg target - The CodeGenTarget to use as a source of instructions
+/// \param infoArray The array of InstInfo objects to populate
+/// \param target The CodeGenTarget to use as a source of instructions
static void populateInstInfo(CompoundConstantEmitter &infoArray,
CodeGenTarget &target) {
const std::vector<const CodeGenInstruction*> &numberedInstructions =
diff --git a/contrib/llvm/utils/TableGen/FastISelEmitter.cpp b/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
index ca784d0..8b1e7f9 100644
--- a/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
@@ -245,7 +245,7 @@ struct OperandsSignature {
if (Op->getType(0) != VT)
return false;
- DefInit *OpDI = dynamic_cast<DefInit*>(Op->getLeafValue());
+ DefInit *OpDI = dyn_cast<DefInit>(Op->getLeafValue());
if (!OpDI)
return false;
Record *OpLeafRec = OpDI->getDef();
@@ -406,13 +406,12 @@ static std::string PhyRegForNode(TreePatternNode *Op,
if (!Op->isLeaf())
return PhysReg;
- DefInit *OpDI = dynamic_cast<DefInit*>(Op->getLeafValue());
- Record *OpLeafRec = OpDI->getDef();
+ Record *OpLeafRec = cast<DefInit>(Op->getLeafValue())->getDef();
if (!OpLeafRec->isSubClassOf("Register"))
return PhysReg;
- PhysReg += static_cast<StringInit*>(OpLeafRec->getValue( \
- "Namespace")->getValue())->getValue();
+ PhysReg += cast<StringInit>(OpLeafRec->getValue("Namespace")->getValue())
+ ->getValue();
PhysReg += "::";
PhysReg += Target.getRegBank().getReg(OpLeafRec)->getName();
return PhysReg;
@@ -473,7 +472,7 @@ void FastISelMap::collectPatterns(CodeGenDAGPatterns &CGP) {
// a bit too complicated for now.
if (!Dst->getChild(1)->isLeaf()) continue;
- DefInit *SR = dynamic_cast<DefInit*>(Dst->getChild(1)->getLeafValue());
+ DefInit *SR = dyn_cast<DefInit>(Dst->getChild(1)->getLeafValue());
if (SR)
SubRegNo = getQualifiedName(SR->getDef());
else
@@ -550,7 +549,7 @@ void FastISelMap::collectPatterns(CodeGenDAGPatterns &CGP) {
};
if (SimplePatterns[Operands][OpcodeName][VT][RetVT].count(PredicateCheck))
- throw TGError(Pattern.getSrcRecord()->getLoc(),
+ PrintFatalError(Pattern.getSrcRecord()->getLoc(),
"Duplicate record in FastISel table!");
SimplePatterns[Operands][OpcodeName][VT][RetVT][PredicateCheck] = Memo;
diff --git a/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp b/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp
index e89c393..5cabcad 100644
--- a/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp
@@ -15,6 +15,7 @@
#define DEBUG_TYPE "decoder-emitter"
#include "CodeGenTarget.h"
+#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SmallString.h"
@@ -142,7 +143,7 @@ static int Value(bit_value_t V) {
return ValueNotSet(V) ? -1 : (V == BIT_FALSE ? 0 : 1);
}
static bit_value_t bitFromBits(const BitsInit &bits, unsigned index) {
- if (BitInit *bit = dynamic_cast<BitInit*>(bits.getBit(index)))
+ if (BitInit *bit = dyn_cast<BitInit>(bits.getBit(index)))
return bit->getValue() ? BIT_TRUE : BIT_FALSE;
// The bit is uninitialized.
@@ -741,7 +742,7 @@ void FixedLenDecoderEmitter::emitTable(formatted_raw_ostream &OS,
switch (*I) {
default:
- throw "invalid decode table opcode";
+ PrintFatalError("invalid decode table opcode");
case MCD::OPC_ExtractField: {
++I;
unsigned Start = *I++;
@@ -1757,8 +1758,8 @@ static bool populateInstruction(const CodeGenInstruction &CGI, unsigned Opc,
// for decoding register classes.
// FIXME: This need to be extended to handle instructions with custom
// decoder methods, and operands with (simple) MIOperandInfo's.
- TypedInit *TI = dynamic_cast<TypedInit*>(NI->first);
- RecordRecTy *Type = dynamic_cast<RecordRecTy*>(TI->getType());
+ TypedInit *TI = cast<TypedInit>(NI->first);
+ RecordRecTy *Type = cast<RecordRecTy>(TI->getType());
Record *TypeRecord = Type->getRecord();
bool isReg = false;
if (TypeRecord->isSubClassOf("RegisterOperand"))
@@ -1770,7 +1771,7 @@ static bool populateInstruction(const CodeGenInstruction &CGI, unsigned Opc,
RecordVal *DecoderString = TypeRecord->getValue("DecoderMethod");
StringInit *String = DecoderString ?
- dynamic_cast<StringInit*>(DecoderString->getValue()) : 0;
+ dyn_cast<StringInit>(DecoderString->getValue()) : 0;
if (!isReg && String && String->getValue() != "")
Decoder = String->getValue();
@@ -1781,11 +1782,11 @@ static bool populateInstruction(const CodeGenInstruction &CGI, unsigned Opc,
for (unsigned bi = 0; bi < Bits.getNumBits(); ++bi) {
VarInit *Var = 0;
- VarBitInit *BI = dynamic_cast<VarBitInit*>(Bits.getBit(bi));
+ VarBitInit *BI = dyn_cast<VarBitInit>(Bits.getBit(bi));
if (BI)
- Var = dynamic_cast<VarInit*>(BI->getVariable());
+ Var = dyn_cast<VarInit>(BI->getBitVar());
else
- Var = dynamic_cast<VarInit*>(Bits.getBit(bi));
+ Var = dyn_cast<VarInit>(Bits.getBit(bi));
if (!Var) {
if (Base != ~0U) {
@@ -1882,7 +1883,7 @@ static void emitDecodeInstruction(formatted_raw_ostream &OS) {
<< " uint64_t Bits = STI.getFeatureBits();\n"
<< "\n"
<< " const uint8_t *Ptr = DecodeTable;\n"
- << " uint32_t CurFieldValue;\n"
+ << " uint32_t CurFieldValue = 0;\n"
<< " DecodeStatus S = MCDisassembler::Success;\n"
<< " for (;;) {\n"
<< " ptrdiff_t Loc = Ptr - DecodeTable;\n"
diff --git a/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp b/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
index b41ad94..48d41d7 100644
--- a/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
@@ -16,8 +16,10 @@
#include "CodeGenDAGPatterns.h"
#include "CodeGenSchedule.h"
#include "CodeGenTarget.h"
+#include "TableGenBackends.h"
#include "SequenceToOffsetTable.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <algorithm>
@@ -89,7 +91,7 @@ InstrInfoEmitter::GetOperandInfo(const CodeGenInstruction &Inst) {
for (unsigned j = 0, e = Inst.Operands[i].MINumOperands; j != e; ++j) {
OperandList.push_back(Inst.Operands[i]);
- Record *OpR = dynamic_cast<DefInit*>(MIOI->getArg(j))->getDef();
+ Record *OpR = cast<DefInit>(MIOI->getArg(j))->getDef();
OperandList.back().Rec = OpR;
}
}
@@ -299,16 +301,15 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
const OperandInfoMapTy &OpInfo,
raw_ostream &OS) {
int MinOperands = 0;
- if (!Inst.Operands.size() == 0)
+ if (!Inst.Operands.empty())
// Each logical operand can be multiple MI operands.
MinOperands = Inst.Operands.back().MIOperandNo +
Inst.Operands.back().MINumOperands;
- Record *ItinDef = Inst.TheDef->getValueAsDef("Itinerary");
OS << " { ";
OS << Num << ",\t" << MinOperands << ",\t"
<< Inst.Operands.NumDefs << ",\t"
- << SchedModels.getItinClassIdx(ItinDef) << ",\t"
+ << SchedModels.getSchedClassIdx(Inst) << ",\t"
<< Inst.TheDef->getValueAsInt("Size") << ",\t0";
// Emit all of the target indepedent flags...
@@ -343,13 +344,14 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
// Emit all of the target-specific flags...
BitsInit *TSF = Inst.TheDef->getValueAsBitsInit("TSFlags");
- if (!TSF) throw "no TSFlags?";
+ if (!TSF)
+ PrintFatalError("no TSFlags?");
uint64_t Value = 0;
for (unsigned i = 0, e = TSF->getNumBits(); i != e; ++i) {
- if (BitInit *Bit = dynamic_cast<BitInit*>(TSF->getBit(i)))
+ if (BitInit *Bit = dyn_cast<BitInit>(TSF->getBit(i)))
Value |= uint64_t(Bit->getValue()) << i;
else
- throw "Invalid TSFlags bit in " + Inst.TheDef->getName();
+ PrintFatalError("Invalid TSFlags bit in " + Inst.TheDef->getName());
}
OS << ", 0x";
OS.write_hex(Value);
@@ -416,6 +418,7 @@ namespace llvm {
void EmitInstrInfo(RecordKeeper &RK, raw_ostream &OS) {
InstrInfoEmitter(RK).run(OS);
+ EmitMapTable(RK, OS);
}
} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp b/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
index 155d1ab..fe55242 100644
--- a/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
@@ -15,6 +15,7 @@
#include "CodeGenTarget.h"
#include "SequenceToOffsetTable.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/StringMatcher.h"
#include "llvm/TableGen/TableGenBackend.h"
@@ -249,7 +250,7 @@ static void EncodeFixedValueType(MVT::SimpleValueType VT,
if (EVT(VT).isInteger()) {
unsigned BitWidth = EVT(VT).getSizeInBits();
switch (BitWidth) {
- default: throw "unhandled integer type width in intrinsic!";
+ default: PrintFatalError("unhandled integer type width in intrinsic!");
case 1: return Sig.push_back(IIT_I1);
case 8: return Sig.push_back(IIT_I8);
case 16: return Sig.push_back(IIT_I16);
@@ -259,7 +260,7 @@ static void EncodeFixedValueType(MVT::SimpleValueType VT,
}
switch (VT) {
- default: throw "unhandled MVT in intrinsic!";
+ default: PrintFatalError("unhandled MVT in intrinsic!");
case MVT::f32: return Sig.push_back(IIT_F32);
case MVT::f64: return Sig.push_back(IIT_F64);
case MVT::Metadata: return Sig.push_back(IIT_METADATA);
@@ -328,7 +329,7 @@ static void EncodeFixedType(Record *R, std::vector<unsigned char> &ArgCodes,
if (EVT(VT).isVector()) {
EVT VVT = VT;
switch (VVT.getVectorNumElements()) {
- default: throw "unhandled vector type width in intrinsic!";
+ default: PrintFatalError("unhandled vector type width in intrinsic!");
case 2: Sig.push_back(IIT_V2); break;
case 4: Sig.push_back(IIT_V4); break;
case 8: Sig.push_back(IIT_V8); break;
@@ -510,10 +511,10 @@ EmitAttributes(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS) {
OS << "// Add parameter attributes that are not common to all intrinsics.\n";
OS << "#ifdef GET_INTRINSIC_ATTRIBUTES\n";
if (TargetOnly)
- OS << "static AttrListPtr getAttributes(" << TargetPrefix
+ OS << "static AttrListPtr getAttributes(LLVMContext &C, " << TargetPrefix
<< "Intrinsic::ID id) {\n";
else
- OS << "AttrListPtr Intrinsic::getAttributes(ID id) {\n";
+ OS << "AttrListPtr Intrinsic::getAttributes(LLVMContext &C, ID id) {\n";
// Compute the maximum number of attribute arguments and the map
typedef std::map<const CodeGenIntrinsic*, unsigned,
@@ -547,6 +548,7 @@ EmitAttributes(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS) {
OS << " AttributeWithIndex AWI[" << maxArgAttrs+1 << "];\n";
OS << " unsigned NumAttrs = 0;\n";
OS << " if (id != 0) {\n";
+ OS << " SmallVector<Attributes::AttrVal, 8> AttrVec;\n";
OS << " switch(IntrinsicsToAttributesMap[id - ";
if (TargetOnly)
OS << "Intrinsic::num_intrinsics";
@@ -564,58 +566,49 @@ EmitAttributes(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS) {
unsigned numAttrs = 0;
// The argument attributes are alreadys sorted by argument index.
- for (unsigned ai = 0, ae = intrinsic.ArgumentAttributes.size(); ai != ae;) {
- unsigned argNo = intrinsic.ArgumentAttributes[ai].first;
+ unsigned ai = 0, ae = intrinsic.ArgumentAttributes.size();
+ if (ae) {
+ while (ai != ae) {
+ unsigned argNo = intrinsic.ArgumentAttributes[ai].first;
- OS << " AWI[" << numAttrs++ << "] = AttributeWithIndex::get("
- << argNo+1 << ", ";
+ OS << " AttrVec.clear();\n";
- bool moreThanOne = false;
+ do {
+ switch (intrinsic.ArgumentAttributes[ai].second) {
+ case CodeGenIntrinsic::NoCapture:
+ OS << " AttrVec.push_back(Attributes::NoCapture);\n";
+ break;
+ }
- do {
- if (moreThanOne) OS << '|';
+ ++ai;
+ } while (ai != ae && intrinsic.ArgumentAttributes[ai].first == argNo);
- switch (intrinsic.ArgumentAttributes[ai].second) {
- case CodeGenIntrinsic::NoCapture:
- OS << "Attribute::NoCapture";
- break;
- }
-
- ++ai;
- moreThanOne = true;
- } while (ai != ae && intrinsic.ArgumentAttributes[ai].first == argNo);
-
- OS << ");\n";
+ OS << " AWI[" << numAttrs++ << "] = AttributeWithIndex::get(C, "
+ << argNo+1 << ", AttrVec);\n";
+ }
}
ModRefKind modRef = getModRefKind(intrinsic);
if (!intrinsic.canThrow || modRef || intrinsic.isNoReturn) {
- OS << " AWI[" << numAttrs++ << "] = AttributeWithIndex::get(~0, ";
- bool Emitted = false;
- if (!intrinsic.canThrow) {
- OS << "Attribute::NoUnwind";
- Emitted = true;
- }
-
- if (intrinsic.isNoReturn) {
- if (Emitted) OS << '|';
- OS << "Attribute::NoReturn";
- Emitted = true;
- }
+ OS << " AttrVec.clear();\n";
+
+ if (!intrinsic.canThrow)
+ OS << " AttrVec.push_back(Attributes::NoUnwind);\n";
+ if (intrinsic.isNoReturn)
+ OS << " AttrVec.push_back(Attributes::NoReturn);\n";
switch (modRef) {
case MRK_none: break;
case MRK_readonly:
- if (Emitted) OS << '|';
- OS << "Attribute::ReadOnly";
+ OS << " AttrVec.push_back(Attributes::ReadOnly);\n";
break;
case MRK_readnone:
- if (Emitted) OS << '|';
- OS << "Attribute::ReadNone";
+ OS << " AttrVec.push_back(Attributes::ReadNone);\n";
break;
}
- OS << ");\n";
+ OS << " AWI[" << numAttrs++ << "] = AttributeWithIndex::get(C, "
+ << "AttrListPtr::FunctionIndex, AttrVec);\n";
}
if (numAttrs) {
@@ -628,7 +621,7 @@ EmitAttributes(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS) {
OS << " }\n";
OS << " }\n";
- OS << " return AttrListPtr::get(ArrayRef<AttributeWithIndex>(AWI, "
+ OS << " return AttrListPtr::get(C, ArrayRef<AttributeWithIndex>(AWI, "
"NumAttrs));\n";
OS << "}\n";
OS << "#endif // GET_INTRINSIC_ATTRIBUTES\n\n";
@@ -700,8 +693,8 @@ EmitIntrinsicToGCCBuiltinMap(const std::vector<CodeGenIntrinsic> &Ints,
if (!BIM.insert(std::make_pair(Ints[i].GCCBuiltinName,
Ints[i].EnumName)).second)
- throw "Intrinsic '" + Ints[i].TheDef->getName() +
- "': duplicate GCC builtin name!";
+ PrintFatalError("Intrinsic '" + Ints[i].TheDef->getName() +
+ "': duplicate GCC builtin name!");
}
}
diff --git a/contrib/llvm/utils/TableGen/PseudoLoweringEmitter.cpp b/contrib/llvm/utils/TableGen/PseudoLoweringEmitter.cpp
index 8d9d419..64aaee7 100644
--- a/contrib/llvm/utils/TableGen/PseudoLoweringEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/PseudoLoweringEmitter.cpp
@@ -74,7 +74,7 @@ addDagOperandMapping(Record *Rec, DagInit *Dag, CodeGenInstruction &Insn,
IndexedMap<OpData> &OperandMap, unsigned BaseIdx) {
unsigned OpsAdded = 0;
for (unsigned i = 0, e = Dag->getNumArgs(); i != e; ++i) {
- if (DefInit *DI = dynamic_cast<DefInit*>(Dag->getArg(i))) {
+ if (DefInit *DI = dyn_cast<DefInit>(Dag->getArg(i))) {
// Physical register reference. Explicit check for the special case
// "zero_reg" definition.
if (DI->getDef()->isSubClassOf("Register") ||
@@ -90,7 +90,7 @@ addDagOperandMapping(Record *Rec, DagInit *Dag, CodeGenInstruction &Insn,
// FIXME: We probably shouldn't ever get a non-zero BaseIdx here.
assert(BaseIdx == 0 && "Named subargument in pseudo expansion?!");
if (DI->getDef() != Insn.Operands[BaseIdx + i].Rec)
- throw TGError(Rec->getLoc(),
+ PrintFatalError(Rec->getLoc(),
"Pseudo operand type '" + DI->getDef()->getName() +
"' does not match expansion operand type '" +
Insn.Operands[BaseIdx + i].Rec->getName() + "'");
@@ -100,11 +100,11 @@ addDagOperandMapping(Record *Rec, DagInit *Dag, CodeGenInstruction &Insn,
for (unsigned I = 0, E = Insn.Operands[i].MINumOperands; I != E; ++I)
OperandMap[BaseIdx + i + I].Kind = OpData::Operand;
OpsAdded += Insn.Operands[i].MINumOperands;
- } else if (IntInit *II = dynamic_cast<IntInit*>(Dag->getArg(i))) {
+ } else if (IntInit *II = dyn_cast<IntInit>(Dag->getArg(i))) {
OperandMap[BaseIdx + i].Kind = OpData::Imm;
OperandMap[BaseIdx + i].Data.Imm = II->getValue();
++OpsAdded;
- } else if (DagInit *SubDag = dynamic_cast<DagInit*>(Dag->getArg(i))) {
+ } else if (DagInit *SubDag = dyn_cast<DagInit>(Dag->getArg(i))) {
// Just add the operands recursively. This is almost certainly
// a constant value for a complex operand (> 1 MI operand).
unsigned NewOps =
@@ -127,24 +127,24 @@ void PseudoLoweringEmitter::evaluateExpansion(Record *Rec) {
assert(Dag && "Missing result instruction in pseudo expansion!");
DEBUG(dbgs() << " Result: " << *Dag << "\n");
- DefInit *OpDef = dynamic_cast<DefInit*>(Dag->getOperator());
+ DefInit *OpDef = dyn_cast<DefInit>(Dag->getOperator());
if (!OpDef)
- throw TGError(Rec->getLoc(), Rec->getName() +
+ PrintFatalError(Rec->getLoc(), Rec->getName() +
" has unexpected operator type!");
Record *Operator = OpDef->getDef();
if (!Operator->isSubClassOf("Instruction"))
- throw TGError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
- "' is not an instruction!");
+ PrintFatalError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
+ "' is not an instruction!");
CodeGenInstruction Insn(Operator);
if (Insn.isCodeGenOnly || Insn.isPseudo)
- throw TGError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
- "' cannot be another pseudo instruction!");
+ PrintFatalError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
+ "' cannot be another pseudo instruction!");
if (Insn.Operands.size() != Dag->getNumArgs())
- throw TGError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
- "' operand count mismatch");
+ PrintFatalError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
+ "' operand count mismatch");
unsigned NumMIOperands = 0;
for (unsigned i = 0, e = Insn.Operands.size(); i != e; ++i)
@@ -156,7 +156,7 @@ void PseudoLoweringEmitter::evaluateExpansion(Record *Rec) {
// If there are more operands that weren't in the DAG, they have to
// be operands that have default values, or we have an error. Currently,
- // PredicateOperand and OptionalDefOperand both have default values.
+ // Operands that are a sublass of OperandWithDefaultOp have default values.
// Validate that each result pattern argument has a matching (by name)
@@ -179,9 +179,9 @@ void PseudoLoweringEmitter::evaluateExpansion(Record *Rec) {
StringMap<unsigned>::iterator SourceOp =
SourceOperands.find(Dag->getArgName(i));
if (SourceOp == SourceOperands.end())
- throw TGError(Rec->getLoc(),
- "Pseudo output operand '" + Dag->getArgName(i) +
- "' has no matching source operand.");
+ PrintFatalError(Rec->getLoc(),
+ "Pseudo output operand '" + Dag->getArgName(i) +
+ "' has no matching source operand.");
// Map the source operand to the destination operand index for each
// MachineInstr operand.
for (unsigned I = 0, E = Insn.Operands[i].MINumOperands; I != E; ++I)
@@ -267,7 +267,7 @@ void PseudoLoweringEmitter::emitLoweringEmitter(raw_ostream &o) {
void PseudoLoweringEmitter::run(raw_ostream &o) {
Record *ExpansionClass = Records.getClass("PseudoInstExpansion");
- Record *InstructionClass = Records.getClass("PseudoInstExpansion");
+ Record *InstructionClass = Records.getClass("Instruction");
assert(ExpansionClass && "PseudoInstExpansion class definition missing!");
assert(InstructionClass && "Instruction class definition missing!");
diff --git a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
index 02546df..95b6267 100644
--- a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
@@ -62,6 +62,8 @@ private:
void EmitRegUnitPressure(raw_ostream &OS, const CodeGenRegBank &RegBank,
const std::string &ClassName);
+ void emitComposeSubRegIndices(raw_ostream &OS, CodeGenRegBank &RegBank,
+ const std::string &ClassName);
};
} // End anonymous namespace
@@ -325,7 +327,7 @@ RegisterInfoEmitter::EmitRegMappingTables(raw_ostream &OS,
if (!V || !V->getValue())
continue;
- DefInit *DI = dynamic_cast<DefInit*>(V->getValue());
+ DefInit *DI = cast<DefInit>(V->getValue());
Record *Alias = DI->getDef();
DwarfRegNums[Reg] = DwarfRegNums[Alias];
}
@@ -530,6 +532,102 @@ static void printDiff16(raw_ostream &OS, uint16_t Val) {
OS << Val;
}
+// Try to combine Idx's compose map into Vec if it is compatible.
+// Return false if it's not possible.
+static bool combine(const CodeGenSubRegIndex *Idx,
+ SmallVectorImpl<CodeGenSubRegIndex*> &Vec) {
+ const CodeGenSubRegIndex::CompMap &Map = Idx->getComposites();
+ for (CodeGenSubRegIndex::CompMap::const_iterator
+ I = Map.begin(), E = Map.end(); I != E; ++I) {
+ CodeGenSubRegIndex *&Entry = Vec[I->first->EnumValue - 1];
+ if (Entry && Entry != I->second)
+ return false;
+ }
+
+ // All entries are compatible. Make it so.
+ for (CodeGenSubRegIndex::CompMap::const_iterator
+ I = Map.begin(), E = Map.end(); I != E; ++I)
+ Vec[I->first->EnumValue - 1] = I->second;
+ return true;
+}
+
+static const char *getMinimalTypeForRange(uint64_t Range) {
+ assert(Range < 0xFFFFFFFFULL && "Enum too large");
+ if (Range > 0xFFFF)
+ return "uint32_t";
+ if (Range > 0xFF)
+ return "uint16_t";
+ return "uint8_t";
+}
+
+void
+RegisterInfoEmitter::emitComposeSubRegIndices(raw_ostream &OS,
+ CodeGenRegBank &RegBank,
+ const std::string &ClName) {
+ ArrayRef<CodeGenSubRegIndex*> SubRegIndices = RegBank.getSubRegIndices();
+ OS << "unsigned " << ClName
+ << "::composeSubRegIndicesImpl(unsigned IdxA, unsigned IdxB) const {\n";
+
+ // Many sub-register indexes are composition-compatible, meaning that
+ //
+ // compose(IdxA, IdxB) == compose(IdxA', IdxB)
+ //
+ // for many IdxA, IdxA' pairs. Not all sub-register indexes can be composed.
+ // The illegal entries can be use as wildcards to compress the table further.
+
+ // Map each Sub-register index to a compatible table row.
+ SmallVector<unsigned, 4> RowMap;
+ SmallVector<SmallVector<CodeGenSubRegIndex*, 4>, 4> Rows;
+
+ for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
+ unsigned Found = ~0u;
+ for (unsigned r = 0, re = Rows.size(); r != re; ++r) {
+ if (combine(SubRegIndices[i], Rows[r])) {
+ Found = r;
+ break;
+ }
+ }
+ if (Found == ~0u) {
+ Found = Rows.size();
+ Rows.resize(Found + 1);
+ Rows.back().resize(SubRegIndices.size());
+ combine(SubRegIndices[i], Rows.back());
+ }
+ RowMap.push_back(Found);
+ }
+
+ // Output the row map if there is multiple rows.
+ if (Rows.size() > 1) {
+ OS << " static const " << getMinimalTypeForRange(Rows.size())
+ << " RowMap[" << SubRegIndices.size() << "] = {\n ";
+ for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i)
+ OS << RowMap[i] << ", ";
+ OS << "\n };\n";
+ }
+
+ // Output the rows.
+ OS << " static const " << getMinimalTypeForRange(SubRegIndices.size()+1)
+ << " Rows[" << Rows.size() << "][" << SubRegIndices.size() << "] = {\n";
+ for (unsigned r = 0, re = Rows.size(); r != re; ++r) {
+ OS << " { ";
+ for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i)
+ if (Rows[r][i])
+ OS << Rows[r][i]->EnumValue << ", ";
+ else
+ OS << "0, ";
+ OS << "},\n";
+ }
+ OS << " };\n\n";
+
+ OS << " --IdxA; assert(IdxA < " << SubRegIndices.size() << ");\n"
+ << " --IdxB; assert(IdxB < " << SubRegIndices.size() << ");\n";
+ if (Rows.size() > 1)
+ OS << " return Rows[RowMap[IdxA]][IdxB];\n";
+ else
+ OS << " return Rows[0][IdxB];\n";
+ OS << "}\n\n";
+}
+
//
// runMCDesc - Print out MC register descriptions.
//
@@ -751,7 +849,7 @@ RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
BitsInit *BI = Reg->getValueAsBitsInit("HWEncoding");
uint64_t Value = 0;
for (unsigned b = 0, be = BI->getNumBits(); b != be; ++b) {
- if (BitInit *B = dynamic_cast<BitInit*>(BI->getBit(b)))
+ if (BitInit *B = dyn_cast<BitInit>(BI->getBit(b)))
Value |= (uint64_t)B->getValue() << b;
}
OS << " " << Value << ",\n";
@@ -770,7 +868,7 @@ RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
<< TargetName << "RegDiffLists, "
<< TargetName << "RegStrings, "
<< TargetName << "SubRegIdxLists, "
- << SubRegIndices.size() << ",\n"
+ << (SubRegIndices.size() + 1) << ",\n"
<< " " << TargetName << "RegEncodingTable);\n\n";
EmitRegMapping(OS, Regs, false);
@@ -802,16 +900,17 @@ RegisterInfoEmitter::runTargetHeader(raw_ostream &OS, CodeGenTarget &Target,
<< " virtual bool needsStackRealignment(const MachineFunction &) const\n"
<< " { return false; }\n";
if (!RegBank.getSubRegIndices().empty()) {
- OS << " unsigned composeSubRegIndices(unsigned, unsigned) const;\n"
- << " const TargetRegisterClass *"
+ OS << " virtual unsigned composeSubRegIndicesImpl"
+ << "(unsigned, unsigned) const;\n"
+ << " virtual const TargetRegisterClass *"
"getSubClassWithSubReg(const TargetRegisterClass*, unsigned) const;\n";
}
- OS << " const RegClassWeight &getRegClassWeight("
+ OS << " virtual const RegClassWeight &getRegClassWeight("
<< "const TargetRegisterClass *RC) const;\n"
- << " unsigned getNumRegPressureSets() const;\n"
- << " const char *getRegPressureSetName(unsigned Idx) const;\n"
- << " unsigned getRegPressureSetLimit(unsigned Idx) const;\n"
- << " const int *getRegClassPressureSets("
+ << " virtual unsigned getNumRegPressureSets() const;\n"
+ << " virtual const char *getRegPressureSetName(unsigned Idx) const;\n"
+ << " virtual unsigned getRegPressureSetLimit(unsigned Idx) const;\n"
+ << " virtual const int *getRegClassPressureSets("
<< "const TargetRegisterClass *RC) const;\n"
<< "};\n\n";
@@ -876,15 +975,23 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
VTSeqs.emit(OS, printSimpleValueType, "MVT::Other");
OS << "};\n";
- // Emit SubRegIndex names, skipping 0
- OS << "\nstatic const char *const SubRegIndexTable[] = { \"";
+ // Emit SubRegIndex names, skipping 0.
+ OS << "\nstatic const char *const SubRegIndexNameTable[] = { \"";
for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
OS << SubRegIndices[i]->getName();
- if (i+1 != e)
+ if (i + 1 != e)
OS << "\", \"";
}
OS << "\" };\n\n";
+ // Emit SubRegIndex lane masks, including 0.
+ OS << "\nstatic const unsigned SubRegIndexLaneMaskTable[] = {\n ~0u,\n";
+ for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
+ OS << format(" 0x%08x, // ", SubRegIndices[i]->LaneMask)
+ << SubRegIndices[i]->getName() << '\n';
+ }
+ OS << " };\n\n";
+
OS << "\n";
// Now that all of the structs have been emitted, emit the instances.
@@ -1046,31 +1153,8 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
std::string ClassName = Target.getName() + "GenRegisterInfo";
- // Emit composeSubRegIndices
- if (!SubRegIndices.empty()) {
- OS << "unsigned " << ClassName
- << "::composeSubRegIndices(unsigned IdxA, unsigned IdxB) const {\n"
- << " switch (IdxA) {\n"
- << " default:\n return IdxB;\n";
- for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
- bool Open = false;
- for (unsigned j = 0; j != e; ++j) {
- if (CodeGenSubRegIndex *Comp =
- SubRegIndices[i]->compose(SubRegIndices[j])) {
- if (!Open) {
- OS << " case " << SubRegIndices[i]->getQualifiedName()
- << ": switch(IdxB) {\n default: return IdxB;\n";
- Open = true;
- }
- OS << " case " << SubRegIndices[j]->getQualifiedName()
- << ": return " << Comp->getQualifiedName() << ";\n";
- }
- }
- if (Open)
- OS << " }\n";
- }
- OS << " }\n}\n\n";
- }
+ if (!SubRegIndices.empty())
+ emitComposeSubRegIndices(OS, RegBank, ClassName);
// Emit getSubClassWithSubReg.
if (!SubRegIndices.empty()) {
@@ -1084,7 +1168,7 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
else if (RegisterClasses.size() < UINT16_MAX)
OS << " static const uint16_t Table[";
else
- throw "Too many register classes.";
+ PrintFatalError("Too many register classes.");
OS << RegisterClasses.size() << "][" << SubRegIndices.size() << "] = {\n";
for (unsigned rci = 0, rce = RegisterClasses.size(); rci != rce; ++rci) {
const CodeGenRegisterClass &RC = *RegisterClasses[rci];
@@ -1122,7 +1206,7 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
<< "(unsigned RA, unsigned DwarfFlavour, unsigned EHFlavour)\n"
<< " : TargetRegisterInfo(" << TargetName << "RegInfoDesc"
<< ", RegisterClasses, RegisterClasses+" << RegisterClasses.size() <<",\n"
- << " SubRegIndexTable) {\n"
+ << " SubRegIndexNameTable, SubRegIndexLaneMaskTable) {\n"
<< " InitMCRegisterInfo(" << TargetName << "RegDesc, "
<< Regs.size()+1 << ", RA,\n " << TargetName
<< "MCRegisterClasses, " << RegisterClasses.size() << ",\n"
@@ -1131,7 +1215,7 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
<< " " << TargetName << "RegDiffLists,\n"
<< " " << TargetName << "RegStrings,\n"
<< " " << TargetName << "SubRegIdxLists,\n"
- << " " << SubRegIndices.size() << ",\n"
+ << " " << SubRegIndices.size() + 1 << ",\n"
<< " " << TargetName << "RegEncodingTable);\n\n";
EmitRegMapping(OS, Regs, true);
diff --git a/contrib/llvm/utils/TableGen/SequenceToOffsetTable.h b/contrib/llvm/utils/TableGen/SequenceToOffsetTable.h
index d8ab2ee..d4db152 100644
--- a/contrib/llvm/utils/TableGen/SequenceToOffsetTable.h
+++ b/contrib/llvm/utils/TableGen/SequenceToOffsetTable.h
@@ -29,8 +29,8 @@ namespace llvm {
/// Compute the layout of a table that contains all the sequences, possibly by
/// reusing entries.
///
-/// @param SeqT The sequence container. (vector or string).
-/// @param Less A stable comparator for SeqT elements.
+/// @tparam SeqT The sequence container. (vector or string).
+/// @tparam Less A stable comparator for SeqT elements.
template<typename SeqT, typename Less = std::less<typename SeqT::value_type> >
class SequenceToOffsetTable {
typedef typename SeqT::value_type ElemT;
@@ -82,7 +82,7 @@ public:
}
bool empty() const { return Seqs.empty(); }
-
+
/// layout - Computes the final table layout.
void layout() {
assert(Entries == 0 && "Can only call layout() once");
diff --git a/contrib/llvm/utils/TableGen/SetTheory.cpp b/contrib/llvm/utils/TableGen/SetTheory.cpp
index 46e6db1..0dd9853 100644
--- a/contrib/llvm/utils/TableGen/SetTheory.cpp
+++ b/contrib/llvm/utils/TableGen/SetTheory.cpp
@@ -27,20 +27,20 @@ typedef SetTheory::RecVec RecVec;
// (add a, b, ...) Evaluate and union all arguments.
struct AddOp : public SetTheory::Operator {
- void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) {
- ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts);
+ void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc) {
+ ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts, Loc);
}
};
// (sub Add, Sub, ...) Set difference.
struct SubOp : public SetTheory::Operator {
- void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) {
+ void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc) {
if (Expr->arg_size() < 2)
- throw "Set difference needs at least two arguments: " +
- Expr->getAsString();
+ PrintFatalError(Loc, "Set difference needs at least two arguments: " +
+ Expr->getAsString());
RecSet Add, Sub;
- ST.evaluate(*Expr->arg_begin(), Add);
- ST.evaluate(Expr->arg_begin() + 1, Expr->arg_end(), Sub);
+ ST.evaluate(*Expr->arg_begin(), Add, Loc);
+ ST.evaluate(Expr->arg_begin() + 1, Expr->arg_end(), Sub, Loc);
for (RecSet::iterator I = Add.begin(), E = Add.end(); I != E; ++I)
if (!Sub.count(*I))
Elts.insert(*I);
@@ -49,12 +49,13 @@ struct SubOp : public SetTheory::Operator {
// (and S1, S2) Set intersection.
struct AndOp : public SetTheory::Operator {
- void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) {
+ void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc) {
if (Expr->arg_size() != 2)
- throw "Set intersection requires two arguments: " + Expr->getAsString();
+ PrintFatalError(Loc, "Set intersection requires two arguments: " +
+ Expr->getAsString());
RecSet S1, S2;
- ST.evaluate(Expr->arg_begin()[0], S1);
- ST.evaluate(Expr->arg_begin()[1], S2);
+ ST.evaluate(Expr->arg_begin()[0], S1, Loc);
+ ST.evaluate(Expr->arg_begin()[1], S2, Loc);
for (RecSet::iterator I = S1.begin(), E = S1.end(); I != E; ++I)
if (S2.count(*I))
Elts.insert(*I);
@@ -65,17 +66,19 @@ struct AndOp : public SetTheory::Operator {
struct SetIntBinOp : public SetTheory::Operator {
virtual void apply2(SetTheory &ST, DagInit *Expr,
RecSet &Set, int64_t N,
- RecSet &Elts) =0;
+ RecSet &Elts, ArrayRef<SMLoc> Loc) =0;
- void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) {
+ void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc) {
if (Expr->arg_size() != 2)
- throw "Operator requires (Op Set, Int) arguments: " + Expr->getAsString();
+ PrintFatalError(Loc, "Operator requires (Op Set, Int) arguments: " +
+ Expr->getAsString());
RecSet Set;
- ST.evaluate(Expr->arg_begin()[0], Set);
- IntInit *II = dynamic_cast<IntInit*>(Expr->arg_begin()[1]);
+ ST.evaluate(Expr->arg_begin()[0], Set, Loc);
+ IntInit *II = dyn_cast<IntInit>(Expr->arg_begin()[1]);
if (!II)
- throw "Second argument must be an integer: " + Expr->getAsString();
- apply2(ST, Expr, Set, II->getValue(), Elts);
+ PrintFatalError(Loc, "Second argument must be an integer: " +
+ Expr->getAsString());
+ apply2(ST, Expr, Set, II->getValue(), Elts, Loc);
}
};
@@ -83,9 +86,10 @@ struct SetIntBinOp : public SetTheory::Operator {
struct ShlOp : public SetIntBinOp {
void apply2(SetTheory &ST, DagInit *Expr,
RecSet &Set, int64_t N,
- RecSet &Elts) {
+ RecSet &Elts, ArrayRef<SMLoc> Loc) {
if (N < 0)
- throw "Positive shift required: " + Expr->getAsString();
+ PrintFatalError(Loc, "Positive shift required: " +
+ Expr->getAsString());
if (unsigned(N) < Set.size())
Elts.insert(Set.begin() + N, Set.end());
}
@@ -95,9 +99,10 @@ struct ShlOp : public SetIntBinOp {
struct TruncOp : public SetIntBinOp {
void apply2(SetTheory &ST, DagInit *Expr,
RecSet &Set, int64_t N,
- RecSet &Elts) {
+ RecSet &Elts, ArrayRef<SMLoc> Loc) {
if (N < 0)
- throw "Positive length required: " + Expr->getAsString();
+ PrintFatalError(Loc, "Positive length required: " +
+ Expr->getAsString());
if (unsigned(N) > Set.size())
N = Set.size();
Elts.insert(Set.begin(), Set.begin() + N);
@@ -112,7 +117,7 @@ struct RotOp : public SetIntBinOp {
void apply2(SetTheory &ST, DagInit *Expr,
RecSet &Set, int64_t N,
- RecSet &Elts) {
+ RecSet &Elts, ArrayRef<SMLoc> Loc) {
if (Reverse)
N = -N;
// N > 0 -> rotate left, N < 0 -> rotate right.
@@ -131,9 +136,10 @@ struct RotOp : public SetIntBinOp {
struct DecimateOp : public SetIntBinOp {
void apply2(SetTheory &ST, DagInit *Expr,
RecSet &Set, int64_t N,
- RecSet &Elts) {
+ RecSet &Elts, ArrayRef<SMLoc> Loc) {
if (N <= 0)
- throw "Positive stride required: " + Expr->getAsString();
+ PrintFatalError(Loc, "Positive stride required: " +
+ Expr->getAsString());
for (unsigned I = 0; I < Set.size(); I += N)
Elts.insert(Set[I]);
}
@@ -141,12 +147,12 @@ struct DecimateOp : public SetIntBinOp {
// (interleave S1, S2, ...) Interleave elements of the arguments.
struct InterleaveOp : public SetTheory::Operator {
- void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) {
+ void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc) {
// Evaluate the arguments individually.
SmallVector<RecSet, 4> Args(Expr->getNumArgs());
unsigned MaxSize = 0;
for (unsigned i = 0, e = Expr->getNumArgs(); i != e; ++i) {
- ST.evaluate(Expr->getArg(i), Args[i]);
+ ST.evaluate(Expr->getArg(i), Args[i], Loc);
MaxSize = std::max(MaxSize, unsigned(Args[i].size()));
}
// Interleave arguments into Elts.
@@ -159,41 +165,42 @@ struct InterleaveOp : public SetTheory::Operator {
// (sequence "Format", From, To) Generate a sequence of records by name.
struct SequenceOp : public SetTheory::Operator {
- void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) {
+ void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc) {
int Step = 1;
if (Expr->arg_size() > 4)
- throw "Bad args to (sequence \"Format\", From, To): " +
- Expr->getAsString();
+ PrintFatalError(Loc, "Bad args to (sequence \"Format\", From, To): " +
+ Expr->getAsString());
else if (Expr->arg_size() == 4) {
- if (IntInit *II = dynamic_cast<IntInit*>(Expr->arg_begin()[3])) {
+ if (IntInit *II = dyn_cast<IntInit>(Expr->arg_begin()[3])) {
Step = II->getValue();
} else
- throw "Stride must be an integer: " + Expr->getAsString();
+ PrintFatalError(Loc, "Stride must be an integer: " +
+ Expr->getAsString());
}
std::string Format;
- if (StringInit *SI = dynamic_cast<StringInit*>(Expr->arg_begin()[0]))
+ if (StringInit *SI = dyn_cast<StringInit>(Expr->arg_begin()[0]))
Format = SI->getValue();
else
- throw "Format must be a string: " + Expr->getAsString();
+ PrintFatalError(Loc, "Format must be a string: " + Expr->getAsString());
int64_t From, To;
- if (IntInit *II = dynamic_cast<IntInit*>(Expr->arg_begin()[1]))
+ if (IntInit *II = dyn_cast<IntInit>(Expr->arg_begin()[1]))
From = II->getValue();
else
- throw "From must be an integer: " + Expr->getAsString();
+ PrintFatalError(Loc, "From must be an integer: " + Expr->getAsString());
if (From < 0 || From >= (1 << 30))
- throw "From out of range";
+ PrintFatalError(Loc, "From out of range");
- if (IntInit *II = dynamic_cast<IntInit*>(Expr->arg_begin()[2]))
+ if (IntInit *II = dyn_cast<IntInit>(Expr->arg_begin()[2]))
To = II->getValue();
else
- throw "From must be an integer: " + Expr->getAsString();
+ PrintFatalError(Loc, "From must be an integer: " + Expr->getAsString());
if (To < 0 || To >= (1 << 30))
- throw "To out of range";
+ PrintFatalError(Loc, "To out of range");
RecordKeeper &Records =
- dynamic_cast<DefInit&>(*Expr->getOperator()).getDef()->getRecords();
+ cast<DefInit>(Expr->getOperator())->getDef()->getRecords();
Step *= From <= To ? 1 : -1;
while (true) {
@@ -206,7 +213,8 @@ struct SequenceOp : public SetTheory::Operator {
OS << format(Format.c_str(), unsigned(From));
Record *Rec = Records.getDef(OS.str());
if (!Rec)
- throw "No def named '" + Name + "': " + Expr->getAsString();
+ PrintFatalError(Loc, "No def named '" + Name + "': " +
+ Expr->getAsString());
// Try to reevaluate Rec in case it is a set.
if (const RecVec *Result = ST.expand(Rec))
Elts.insert(Result->begin(), Result->end());
@@ -225,7 +233,7 @@ struct FieldExpander : public SetTheory::Expander {
FieldExpander(StringRef fn) : FieldName(fn) {}
void expand(SetTheory &ST, Record *Def, RecSet &Elts) {
- ST.evaluate(Def->getValueInit(FieldName), Elts);
+ ST.evaluate(Def->getValueInit(FieldName), Elts, Def->getLoc());
}
};
} // end anonymous namespace
@@ -259,9 +267,9 @@ void SetTheory::addFieldExpander(StringRef ClassName, StringRef FieldName) {
addExpander(ClassName, new FieldExpander(FieldName));
}
-void SetTheory::evaluate(Init *Expr, RecSet &Elts) {
+void SetTheory::evaluate(Init *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc) {
// A def in a list can be a just an element, or it may expand.
- if (DefInit *Def = dynamic_cast<DefInit*>(Expr)) {
+ if (DefInit *Def = dyn_cast<DefInit>(Expr)) {
if (const RecVec *Result = expand(Def->getDef()))
return Elts.insert(Result->begin(), Result->end());
Elts.insert(Def->getDef());
@@ -269,20 +277,20 @@ void SetTheory::evaluate(Init *Expr, RecSet &Elts) {
}
// Lists simply expand.
- if (ListInit *LI = dynamic_cast<ListInit*>(Expr))
- return evaluate(LI->begin(), LI->end(), Elts);
+ if (ListInit *LI = dyn_cast<ListInit>(Expr))
+ return evaluate(LI->begin(), LI->end(), Elts, Loc);
// Anything else must be a DAG.
- DagInit *DagExpr = dynamic_cast<DagInit*>(Expr);
+ DagInit *DagExpr = dyn_cast<DagInit>(Expr);
if (!DagExpr)
- throw "Invalid set element: " + Expr->getAsString();
- DefInit *OpInit = dynamic_cast<DefInit*>(DagExpr->getOperator());
+ PrintFatalError(Loc, "Invalid set element: " + Expr->getAsString());
+ DefInit *OpInit = dyn_cast<DefInit>(DagExpr->getOperator());
if (!OpInit)
- throw "Bad set expression: " + Expr->getAsString();
+ PrintFatalError(Loc, "Bad set expression: " + Expr->getAsString());
Operator *Op = Operators.lookup(OpInit->getDef()->getName());
if (!Op)
- throw "Unknown set operator: " + Expr->getAsString();
- Op->apply(*this, DagExpr, Elts);
+ PrintFatalError(Loc, "Unknown set operator: " + Expr->getAsString());
+ Op->apply(*this, DagExpr, Elts, Loc);
}
const RecVec *SetTheory::expand(Record *Set) {
@@ -292,19 +300,19 @@ const RecVec *SetTheory::expand(Record *Set) {
return &I->second;
// This is the first time we see Set. Find a suitable expander.
- try {
- const std::vector<Record*> &SC = Set->getSuperClasses();
- for (unsigned i = 0, e = SC.size(); i != e; ++i)
- if (Expander *Exp = Expanders.lookup(SC[i]->getName())) {
- // This breaks recursive definitions.
- RecVec &EltVec = Expansions[Set];
- RecSet Elts;
- Exp->expand(*this, Set, Elts);
- EltVec.assign(Elts.begin(), Elts.end());
- return &EltVec;
- }
- } catch (const std::string &Error) {
- throw TGError(Set->getLoc(), Error);
+ const std::vector<Record*> &SC = Set->getSuperClasses();
+ for (unsigned i = 0, e = SC.size(); i != e; ++i) {
+ // Skip unnamed superclasses.
+ if (!dyn_cast<StringInit>(SC[i]->getNameInit()))
+ continue;
+ if (Expander *Exp = Expanders.lookup(SC[i]->getName())) {
+ // This breaks recursive definitions.
+ RecVec &EltVec = Expansions[Set];
+ RecSet Elts;
+ Exp->expand(*this, Set, Elts);
+ EltVec.assign(Elts.begin(), Elts.end());
+ return &EltVec;
+ }
}
// Set is not expandable.
diff --git a/contrib/llvm/utils/TableGen/SetTheory.h b/contrib/llvm/utils/TableGen/SetTheory.h
index b394058..122372a 100644
--- a/contrib/llvm/utils/TableGen/SetTheory.h
+++ b/contrib/llvm/utils/TableGen/SetTheory.h
@@ -49,6 +49,7 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/SourceMgr.h"
#include <map>
#include <vector>
@@ -72,7 +73,8 @@ public:
/// apply - Apply this operator to Expr's arguments and insert the result
/// in Elts.
- virtual void apply(SetTheory&, DagInit *Expr, RecSet &Elts) =0;
+ virtual void apply(SetTheory&, DagInit *Expr, RecSet &Elts,
+ ArrayRef<SMLoc> Loc) =0;
};
/// Expander - A callback function that can transform a Record representing a
@@ -119,13 +121,13 @@ public:
void addOperator(StringRef Name, Operator*);
/// evaluate - Evaluate Expr and append the resulting set to Elts.
- void evaluate(Init *Expr, RecSet &Elts);
+ void evaluate(Init *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc);
/// evaluate - Evaluate a sequence of Inits and append to Elts.
template<typename Iter>
- void evaluate(Iter begin, Iter end, RecSet &Elts) {
+ void evaluate(Iter begin, Iter end, RecSet &Elts, ArrayRef<SMLoc> Loc) {
while (begin != end)
- evaluate(*begin++, Elts);
+ evaluate(*begin++, Elts, Loc);
}
/// expand - Expand a record into a set of elements if possible. Return a
diff --git a/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp b/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp
index 3472343..f1a06bb 100644
--- a/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp
@@ -11,13 +11,18 @@
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "subtarget-emitter"
+
#include "CodeGenTarget.h"
#include "CodeGenSchedule.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/MC/MCInstrItineraries.h"
-#include "llvm/Support/Debug.h"
+#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
#include <algorithm>
#include <map>
#include <string>
@@ -26,6 +31,32 @@ using namespace llvm;
namespace {
class SubtargetEmitter {
+ // Each processor has a SchedClassDesc table with an entry for each SchedClass.
+ // The SchedClassDesc table indexes into a global write resource table, write
+ // latency table, and read advance table.
+ struct SchedClassTables {
+ std::vector<std::vector<MCSchedClassDesc> > ProcSchedClasses;
+ std::vector<MCWriteProcResEntry> WriteProcResources;
+ std::vector<MCWriteLatencyEntry> WriteLatencies;
+ std::vector<std::string> WriterNames;
+ std::vector<MCReadAdvanceEntry> ReadAdvanceEntries;
+
+ // Reserve an invalid entry at index 0
+ SchedClassTables() {
+ ProcSchedClasses.resize(1);
+ WriteProcResources.resize(1);
+ WriteLatencies.resize(1);
+ WriterNames.push_back("InvalidWrite");
+ ReadAdvanceEntries.resize(1);
+ }
+ };
+
+ struct LessWriteProcResources {
+ bool operator()(const MCWriteProcResEntry &LHS,
+ const MCWriteProcResEntry &RHS) {
+ return LHS.ProcResourceIdx < RHS.ProcResourceIdx;
+ }
+ };
RecordKeeper &Records;
CodeGenSchedModels &SchedModels;
@@ -50,8 +81,18 @@ class SubtargetEmitter {
&ProcItinLists);
void EmitProcessorProp(raw_ostream &OS, const Record *R, const char *Name,
char Separator);
+ void EmitProcessorResources(const CodeGenProcModel &ProcModel,
+ raw_ostream &OS);
+ Record *FindWriteResources(const CodeGenSchedRW &SchedWrite,
+ const CodeGenProcModel &ProcModel);
+ Record *FindReadAdvance(const CodeGenSchedRW &SchedRead,
+ const CodeGenProcModel &ProcModel);
+ void GenSchedClassTables(const CodeGenProcModel &ProcModel,
+ SchedClassTables &SchedTables);
+ void EmitSchedClassTables(SchedClassTables &SchedTables, raw_ostream &OS);
void EmitProcessorModels(raw_ostream &OS);
void EmitProcessorLookup(raw_ostream &OS);
+ void EmitSchedModelHelpers(std::string ClassName, raw_ostream &OS);
void EmitSchedModel(raw_ostream &OS);
void ParseFeaturesFunction(raw_ostream &OS, unsigned NumFeatures,
unsigned NumProcs);
@@ -521,7 +562,7 @@ EmitItineraries(raw_ostream &OS,
std::vector<std::vector<InstrItinerary> >::iterator
ProcItinListsIter = ProcItinLists.begin();
for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
- PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
+ PE = SchedModels.procModelEnd(); PI != PE; ++PI, ++ProcItinListsIter) {
Record *ItinsDef = PI->ItinsDef;
if (!ItinsDefSet.insert(ItinsDef))
@@ -532,7 +573,7 @@ EmitItineraries(raw_ostream &OS,
// Get the itinerary list for the processor.
assert(ProcItinListsIter != ProcItinLists.end() && "bad iterator");
- std::vector<InstrItinerary> &ItinList = *ProcItinListsIter++;
+ std::vector<InstrItinerary> &ItinList = *ProcItinListsIter;
OS << "\n";
OS << "static const llvm::InstrItinerary ";
@@ -578,11 +619,488 @@ void SubtargetEmitter::EmitProcessorProp(raw_ostream &OS, const Record *R,
OS << '\n';
}
+void SubtargetEmitter::EmitProcessorResources(const CodeGenProcModel &ProcModel,
+ raw_ostream &OS) {
+ char Sep = ProcModel.ProcResourceDefs.empty() ? ' ' : ',';
+
+ OS << "\n// {Name, NumUnits, SuperIdx, IsBuffered}\n";
+ OS << "static const llvm::MCProcResourceDesc "
+ << ProcModel.ModelName << "ProcResources" << "[] = {\n"
+ << " {DBGFIELD(\"InvalidUnit\") 0, 0, 0}" << Sep << "\n";
+
+ for (unsigned i = 0, e = ProcModel.ProcResourceDefs.size(); i < e; ++i) {
+ Record *PRDef = ProcModel.ProcResourceDefs[i];
+
+ // Find the SuperIdx
+ unsigned SuperIdx = 0;
+ Record *SuperDef = 0;
+ if (PRDef->getValueInit("Super")->isComplete()) {
+ SuperDef =
+ SchedModels.findProcResUnits(PRDef->getValueAsDef("Super"), ProcModel);
+ SuperIdx = ProcModel.getProcResourceIdx(SuperDef);
+ }
+ // Emit the ProcResourceDesc
+ if (i+1 == e)
+ Sep = ' ';
+ OS << " {DBGFIELD(\"" << PRDef->getName() << "\") ";
+ if (PRDef->getName().size() < 15)
+ OS.indent(15 - PRDef->getName().size());
+ OS << PRDef->getValueAsInt("NumUnits") << ", " << SuperIdx << ", "
+ << PRDef->getValueAsBit("Buffered") << "}" << Sep << " // #" << i+1;
+ if (SuperDef)
+ OS << ", Super=" << SuperDef->getName();
+ OS << "\n";
+ }
+ OS << "};\n";
+}
+
+// Find the WriteRes Record that defines processor resources for this
+// SchedWrite.
+Record *SubtargetEmitter::FindWriteResources(
+ const CodeGenSchedRW &SchedWrite, const CodeGenProcModel &ProcModel) {
+
+ // Check if the SchedWrite is already subtarget-specific and directly
+ // specifies a set of processor resources.
+ if (SchedWrite.TheDef->isSubClassOf("SchedWriteRes"))
+ return SchedWrite.TheDef;
+
+ Record *AliasDef = 0;
+ for (RecIter AI = SchedWrite.Aliases.begin(), AE = SchedWrite.Aliases.end();
+ AI != AE; ++AI) {
+ const CodeGenSchedRW &AliasRW =
+ SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW"));
+ if (AliasRW.TheDef->getValueInit("SchedModel")->isComplete()) {
+ Record *ModelDef = AliasRW.TheDef->getValueAsDef("SchedModel");
+ if (&SchedModels.getProcModel(ModelDef) != &ProcModel)
+ continue;
+ }
+ if (AliasDef)
+ PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases "
+ "defined for processor " + ProcModel.ModelName +
+ " Ensure only one SchedAlias exists per RW.");
+ AliasDef = AliasRW.TheDef;
+ }
+ if (AliasDef && AliasDef->isSubClassOf("SchedWriteRes"))
+ return AliasDef;
+
+ // Check this processor's list of write resources.
+ Record *ResDef = 0;
+ for (RecIter WRI = ProcModel.WriteResDefs.begin(),
+ WRE = ProcModel.WriteResDefs.end(); WRI != WRE; ++WRI) {
+ if (!(*WRI)->isSubClassOf("WriteRes"))
+ continue;
+ if (AliasDef == (*WRI)->getValueAsDef("WriteType")
+ || SchedWrite.TheDef == (*WRI)->getValueAsDef("WriteType")) {
+ if (ResDef) {
+ PrintFatalError((*WRI)->getLoc(), "Resources are defined for both "
+ "SchedWrite and its alias on processor " +
+ ProcModel.ModelName);
+ }
+ ResDef = *WRI;
+ }
+ }
+ // TODO: If ProcModel has a base model (previous generation processor),
+ // then call FindWriteResources recursively with that model here.
+ if (!ResDef) {
+ PrintFatalError(ProcModel.ModelDef->getLoc(),
+ std::string("Processor does not define resources for ")
+ + SchedWrite.TheDef->getName());
+ }
+ return ResDef;
+}
+
+/// Find the ReadAdvance record for the given SchedRead on this processor or
+/// return NULL.
+Record *SubtargetEmitter::FindReadAdvance(const CodeGenSchedRW &SchedRead,
+ const CodeGenProcModel &ProcModel) {
+ // Check for SchedReads that directly specify a ReadAdvance.
+ if (SchedRead.TheDef->isSubClassOf("SchedReadAdvance"))
+ return SchedRead.TheDef;
+
+ // Check this processor's list of aliases for SchedRead.
+ Record *AliasDef = 0;
+ for (RecIter AI = SchedRead.Aliases.begin(), AE = SchedRead.Aliases.end();
+ AI != AE; ++AI) {
+ const CodeGenSchedRW &AliasRW =
+ SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW"));
+ if (AliasRW.TheDef->getValueInit("SchedModel")->isComplete()) {
+ Record *ModelDef = AliasRW.TheDef->getValueAsDef("SchedModel");
+ if (&SchedModels.getProcModel(ModelDef) != &ProcModel)
+ continue;
+ }
+ if (AliasDef)
+ PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases "
+ "defined for processor " + ProcModel.ModelName +
+ " Ensure only one SchedAlias exists per RW.");
+ AliasDef = AliasRW.TheDef;
+ }
+ if (AliasDef && AliasDef->isSubClassOf("SchedReadAdvance"))
+ return AliasDef;
+
+ // Check this processor's ReadAdvanceList.
+ Record *ResDef = 0;
+ for (RecIter RAI = ProcModel.ReadAdvanceDefs.begin(),
+ RAE = ProcModel.ReadAdvanceDefs.end(); RAI != RAE; ++RAI) {
+ if (!(*RAI)->isSubClassOf("ReadAdvance"))
+ continue;
+ if (AliasDef == (*RAI)->getValueAsDef("ReadType")
+ || SchedRead.TheDef == (*RAI)->getValueAsDef("ReadType")) {
+ if (ResDef) {
+ PrintFatalError((*RAI)->getLoc(), "Resources are defined for both "
+ "SchedRead and its alias on processor " +
+ ProcModel.ModelName);
+ }
+ ResDef = *RAI;
+ }
+ }
+ // TODO: If ProcModel has a base model (previous generation processor),
+ // then call FindReadAdvance recursively with that model here.
+ if (!ResDef && SchedRead.TheDef->getName() != "ReadDefault") {
+ PrintFatalError(ProcModel.ModelDef->getLoc(),
+ std::string("Processor does not define resources for ")
+ + SchedRead.TheDef->getName());
+ }
+ return ResDef;
+}
+
+// Generate the SchedClass table for this processor and update global
+// tables. Must be called for each processor in order.
+void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel,
+ SchedClassTables &SchedTables) {
+ SchedTables.ProcSchedClasses.resize(SchedTables.ProcSchedClasses.size() + 1);
+ if (!ProcModel.hasInstrSchedModel())
+ return;
+
+ std::vector<MCSchedClassDesc> &SCTab = SchedTables.ProcSchedClasses.back();
+ for (CodeGenSchedModels::SchedClassIter SCI = SchedModels.schedClassBegin(),
+ SCE = SchedModels.schedClassEnd(); SCI != SCE; ++SCI) {
+ DEBUG(SCI->dump(&SchedModels));
+
+ SCTab.resize(SCTab.size() + 1);
+ MCSchedClassDesc &SCDesc = SCTab.back();
+ // SCDesc.Name is guarded by NDEBUG
+ SCDesc.NumMicroOps = 0;
+ SCDesc.BeginGroup = false;
+ SCDesc.EndGroup = false;
+ SCDesc.WriteProcResIdx = 0;
+ SCDesc.WriteLatencyIdx = 0;
+ SCDesc.ReadAdvanceIdx = 0;
+
+ // A Variant SchedClass has no resources of its own.
+ if (!SCI->Transitions.empty()) {
+ SCDesc.NumMicroOps = MCSchedClassDesc::VariantNumMicroOps;
+ continue;
+ }
+
+ // Determine if the SchedClass is actually reachable on this processor. If
+ // not don't try to locate the processor resources, it will fail.
+ // If ProcIndices contains 0, this class applies to all processors.
+ assert(!SCI->ProcIndices.empty() && "expect at least one procidx");
+ if (SCI->ProcIndices[0] != 0) {
+ IdxIter PIPos = std::find(SCI->ProcIndices.begin(),
+ SCI->ProcIndices.end(), ProcModel.Index);
+ if (PIPos == SCI->ProcIndices.end())
+ continue;
+ }
+ IdxVec Writes = SCI->Writes;
+ IdxVec Reads = SCI->Reads;
+ if (SCI->ItinClassDef) {
+ assert(SCI->InstRWs.empty() && "ItinClass should not have InstRWs");
+ // Check this processor's itinerary class resources.
+ for (RecIter II = ProcModel.ItinRWDefs.begin(),
+ IE = ProcModel.ItinRWDefs.end(); II != IE; ++II) {
+ RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses");
+ if (std::find(Matched.begin(), Matched.end(), SCI->ItinClassDef)
+ != Matched.end()) {
+ SchedModels.findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"),
+ Writes, Reads);
+ break;
+ }
+ }
+ if (Writes.empty()) {
+ DEBUG(dbgs() << ProcModel.ItinsDef->getName()
+ << " does not have resources for itinerary class "
+ << SCI->ItinClassDef->getName() << '\n');
+ }
+ }
+ else if (!SCI->InstRWs.empty()) {
+ // This class may have a default ReadWrite list which can be overriden by
+ // InstRW definitions.
+ Record *RWDef = 0;
+ for (RecIter RWI = SCI->InstRWs.begin(), RWE = SCI->InstRWs.end();
+ RWI != RWE; ++RWI) {
+ Record *RWModelDef = (*RWI)->getValueAsDef("SchedModel");
+ if (&ProcModel == &SchedModels.getProcModel(RWModelDef)) {
+ RWDef = *RWI;
+ break;
+ }
+ }
+ if (RWDef) {
+ Writes.clear();
+ Reads.clear();
+ SchedModels.findRWs(RWDef->getValueAsListOfDefs("OperandReadWrites"),
+ Writes, Reads);
+ }
+ }
+ // Sum resources across all operand writes.
+ std::vector<MCWriteProcResEntry> WriteProcResources;
+ std::vector<MCWriteLatencyEntry> WriteLatencies;
+ std::vector<std::string> WriterNames;
+ std::vector<MCReadAdvanceEntry> ReadAdvanceEntries;
+ for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI) {
+ IdxVec WriteSeq;
+ SchedModels.expandRWSeqForProc(*WI, WriteSeq, /*IsRead=*/false,
+ ProcModel);
+
+ // For each operand, create a latency entry.
+ MCWriteLatencyEntry WLEntry;
+ WLEntry.Cycles = 0;
+ unsigned WriteID = WriteSeq.back();
+ WriterNames.push_back(SchedModels.getSchedWrite(WriteID).Name);
+ // If this Write is not referenced by a ReadAdvance, don't distinguish it
+ // from other WriteLatency entries.
+ if (!SchedModels.hasReadOfWrite(SchedModels.getSchedWrite(WriteID).TheDef)) {
+ WriteID = 0;
+ }
+ WLEntry.WriteResourceID = WriteID;
+
+ for (IdxIter WSI = WriteSeq.begin(), WSE = WriteSeq.end();
+ WSI != WSE; ++WSI) {
+
+ Record *WriteRes =
+ FindWriteResources(SchedModels.getSchedWrite(*WSI), ProcModel);
+
+ // Mark the parent class as invalid for unsupported write types.
+ if (WriteRes->getValueAsBit("Unsupported")) {
+ SCDesc.NumMicroOps = MCSchedClassDesc::InvalidNumMicroOps;
+ break;
+ }
+ WLEntry.Cycles += WriteRes->getValueAsInt("Latency");
+ SCDesc.NumMicroOps += WriteRes->getValueAsInt("NumMicroOps");
+ SCDesc.BeginGroup |= WriteRes->getValueAsBit("BeginGroup");
+ SCDesc.EndGroup |= WriteRes->getValueAsBit("EndGroup");
+
+ // Create an entry for each ProcResource listed in WriteRes.
+ RecVec PRVec = WriteRes->getValueAsListOfDefs("ProcResources");
+ std::vector<int64_t> Cycles =
+ WriteRes->getValueAsListOfInts("ResourceCycles");
+ for (unsigned PRIdx = 0, PREnd = PRVec.size();
+ PRIdx != PREnd; ++PRIdx) {
+ MCWriteProcResEntry WPREntry;
+ WPREntry.ProcResourceIdx = ProcModel.getProcResourceIdx(PRVec[PRIdx]);
+ assert(WPREntry.ProcResourceIdx && "Bad ProcResourceIdx");
+ if (Cycles.size() > PRIdx)
+ WPREntry.Cycles = Cycles[PRIdx];
+ else
+ WPREntry.Cycles = 1;
+ WriteProcResources.push_back(WPREntry);
+ }
+ }
+ WriteLatencies.push_back(WLEntry);
+ }
+ // Create an entry for each operand Read in this SchedClass.
+ // Entries must be sorted first by UseIdx then by WriteResourceID.
+ for (unsigned UseIdx = 0, EndIdx = Reads.size();
+ UseIdx != EndIdx; ++UseIdx) {
+ Record *ReadAdvance =
+ FindReadAdvance(SchedModels.getSchedRead(Reads[UseIdx]), ProcModel);
+ if (!ReadAdvance)
+ continue;
+
+ // Mark the parent class as invalid for unsupported write types.
+ if (ReadAdvance->getValueAsBit("Unsupported")) {
+ SCDesc.NumMicroOps = MCSchedClassDesc::InvalidNumMicroOps;
+ break;
+ }
+ RecVec ValidWrites = ReadAdvance->getValueAsListOfDefs("ValidWrites");
+ IdxVec WriteIDs;
+ if (ValidWrites.empty())
+ WriteIDs.push_back(0);
+ else {
+ for (RecIter VWI = ValidWrites.begin(), VWE = ValidWrites.end();
+ VWI != VWE; ++VWI) {
+ WriteIDs.push_back(SchedModels.getSchedRWIdx(*VWI, /*IsRead=*/false));
+ }
+ }
+ std::sort(WriteIDs.begin(), WriteIDs.end());
+ for(IdxIter WI = WriteIDs.begin(), WE = WriteIDs.end(); WI != WE; ++WI) {
+ MCReadAdvanceEntry RAEntry;
+ RAEntry.UseIdx = UseIdx;
+ RAEntry.WriteResourceID = *WI;
+ RAEntry.Cycles = ReadAdvance->getValueAsInt("Cycles");
+ ReadAdvanceEntries.push_back(RAEntry);
+ }
+ }
+ if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
+ WriteProcResources.clear();
+ WriteLatencies.clear();
+ ReadAdvanceEntries.clear();
+ }
+ // Add the information for this SchedClass to the global tables using basic
+ // compression.
+ //
+ // WritePrecRes entries are sorted by ProcResIdx.
+ std::sort(WriteProcResources.begin(), WriteProcResources.end(),
+ LessWriteProcResources());
+
+ SCDesc.NumWriteProcResEntries = WriteProcResources.size();
+ std::vector<MCWriteProcResEntry>::iterator WPRPos =
+ std::search(SchedTables.WriteProcResources.begin(),
+ SchedTables.WriteProcResources.end(),
+ WriteProcResources.begin(), WriteProcResources.end());
+ if (WPRPos != SchedTables.WriteProcResources.end())
+ SCDesc.WriteProcResIdx = WPRPos - SchedTables.WriteProcResources.begin();
+ else {
+ SCDesc.WriteProcResIdx = SchedTables.WriteProcResources.size();
+ SchedTables.WriteProcResources.insert(WPRPos, WriteProcResources.begin(),
+ WriteProcResources.end());
+ }
+ // Latency entries must remain in operand order.
+ SCDesc.NumWriteLatencyEntries = WriteLatencies.size();
+ std::vector<MCWriteLatencyEntry>::iterator WLPos =
+ std::search(SchedTables.WriteLatencies.begin(),
+ SchedTables.WriteLatencies.end(),
+ WriteLatencies.begin(), WriteLatencies.end());
+ if (WLPos != SchedTables.WriteLatencies.end()) {
+ unsigned idx = WLPos - SchedTables.WriteLatencies.begin();
+ SCDesc.WriteLatencyIdx = idx;
+ for (unsigned i = 0, e = WriteLatencies.size(); i < e; ++i)
+ if (SchedTables.WriterNames[idx + i].find(WriterNames[i]) ==
+ std::string::npos) {
+ SchedTables.WriterNames[idx + i] += std::string("_") + WriterNames[i];
+ }
+ }
+ else {
+ SCDesc.WriteLatencyIdx = SchedTables.WriteLatencies.size();
+ SchedTables.WriteLatencies.insert(SchedTables.WriteLatencies.end(),
+ WriteLatencies.begin(),
+ WriteLatencies.end());
+ SchedTables.WriterNames.insert(SchedTables.WriterNames.end(),
+ WriterNames.begin(), WriterNames.end());
+ }
+ // ReadAdvanceEntries must remain in operand order.
+ SCDesc.NumReadAdvanceEntries = ReadAdvanceEntries.size();
+ std::vector<MCReadAdvanceEntry>::iterator RAPos =
+ std::search(SchedTables.ReadAdvanceEntries.begin(),
+ SchedTables.ReadAdvanceEntries.end(),
+ ReadAdvanceEntries.begin(), ReadAdvanceEntries.end());
+ if (RAPos != SchedTables.ReadAdvanceEntries.end())
+ SCDesc.ReadAdvanceIdx = RAPos - SchedTables.ReadAdvanceEntries.begin();
+ else {
+ SCDesc.ReadAdvanceIdx = SchedTables.ReadAdvanceEntries.size();
+ SchedTables.ReadAdvanceEntries.insert(RAPos, ReadAdvanceEntries.begin(),
+ ReadAdvanceEntries.end());
+ }
+ }
+}
+
+// Emit SchedClass tables for all processors and associated global tables.
+void SubtargetEmitter::EmitSchedClassTables(SchedClassTables &SchedTables,
+ raw_ostream &OS) {
+ // Emit global WriteProcResTable.
+ OS << "\n// {ProcResourceIdx, Cycles}\n"
+ << "extern const llvm::MCWriteProcResEntry "
+ << Target << "WriteProcResTable[] = {\n"
+ << " { 0, 0}, // Invalid\n";
+ for (unsigned WPRIdx = 1, WPREnd = SchedTables.WriteProcResources.size();
+ WPRIdx != WPREnd; ++WPRIdx) {
+ MCWriteProcResEntry &WPREntry = SchedTables.WriteProcResources[WPRIdx];
+ OS << " {" << format("%2d", WPREntry.ProcResourceIdx) << ", "
+ << format("%2d", WPREntry.Cycles) << "}";
+ if (WPRIdx + 1 < WPREnd)
+ OS << ',';
+ OS << " // #" << WPRIdx << '\n';
+ }
+ OS << "}; // " << Target << "WriteProcResTable\n";
+
+ // Emit global WriteLatencyTable.
+ OS << "\n// {Cycles, WriteResourceID}\n"
+ << "extern const llvm::MCWriteLatencyEntry "
+ << Target << "WriteLatencyTable[] = {\n"
+ << " { 0, 0}, // Invalid\n";
+ for (unsigned WLIdx = 1, WLEnd = SchedTables.WriteLatencies.size();
+ WLIdx != WLEnd; ++WLIdx) {
+ MCWriteLatencyEntry &WLEntry = SchedTables.WriteLatencies[WLIdx];
+ OS << " {" << format("%2d", WLEntry.Cycles) << ", "
+ << format("%2d", WLEntry.WriteResourceID) << "}";
+ if (WLIdx + 1 < WLEnd)
+ OS << ',';
+ OS << " // #" << WLIdx << " " << SchedTables.WriterNames[WLIdx] << '\n';
+ }
+ OS << "}; // " << Target << "WriteLatencyTable\n";
+
+ // Emit global ReadAdvanceTable.
+ OS << "\n// {UseIdx, WriteResourceID, Cycles}\n"
+ << "extern const llvm::MCReadAdvanceEntry "
+ << Target << "ReadAdvanceTable[] = {\n"
+ << " {0, 0, 0}, // Invalid\n";
+ for (unsigned RAIdx = 1, RAEnd = SchedTables.ReadAdvanceEntries.size();
+ RAIdx != RAEnd; ++RAIdx) {
+ MCReadAdvanceEntry &RAEntry = SchedTables.ReadAdvanceEntries[RAIdx];
+ OS << " {" << RAEntry.UseIdx << ", "
+ << format("%2d", RAEntry.WriteResourceID) << ", "
+ << format("%2d", RAEntry.Cycles) << "}";
+ if (RAIdx + 1 < RAEnd)
+ OS << ',';
+ OS << " // #" << RAIdx << '\n';
+ }
+ OS << "}; // " << Target << "ReadAdvanceTable\n";
+
+ // Emit a SchedClass table for each processor.
+ for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
+ PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
+ if (!PI->hasInstrSchedModel())
+ continue;
+
+ std::vector<MCSchedClassDesc> &SCTab =
+ SchedTables.ProcSchedClasses[1 + (PI - SchedModels.procModelBegin())];
+
+ OS << "\n// {Name, NumMicroOps, BeginGroup, EndGroup,"
+ << " WriteProcResIdx,#, WriteLatencyIdx,#, ReadAdvanceIdx,#}\n";
+ OS << "static const llvm::MCSchedClassDesc "
+ << PI->ModelName << "SchedClasses[] = {\n";
+
+ // The first class is always invalid. We no way to distinguish it except by
+ // name and position.
+ assert(SchedModels.getSchedClass(0).Name == "NoItinerary"
+ && "invalid class not first");
+ OS << " {DBGFIELD(\"InvalidSchedClass\") "
+ << MCSchedClassDesc::InvalidNumMicroOps
+ << ", 0, 0, 0, 0, 0, 0, 0, 0},\n";
+
+ for (unsigned SCIdx = 1, SCEnd = SCTab.size(); SCIdx != SCEnd; ++SCIdx) {
+ MCSchedClassDesc &MCDesc = SCTab[SCIdx];
+ const CodeGenSchedClass &SchedClass = SchedModels.getSchedClass(SCIdx);
+ OS << " {DBGFIELD(\"" << SchedClass.Name << "\") ";
+ if (SchedClass.Name.size() < 18)
+ OS.indent(18 - SchedClass.Name.size());
+ OS << MCDesc.NumMicroOps
+ << ", " << MCDesc.BeginGroup << ", " << MCDesc.EndGroup
+ << ", " << format("%2d", MCDesc.WriteProcResIdx)
+ << ", " << MCDesc.NumWriteProcResEntries
+ << ", " << format("%2d", MCDesc.WriteLatencyIdx)
+ << ", " << MCDesc.NumWriteLatencyEntries
+ << ", " << format("%2d", MCDesc.ReadAdvanceIdx)
+ << ", " << MCDesc.NumReadAdvanceEntries << "}";
+ if (SCIdx + 1 < SCEnd)
+ OS << ',';
+ OS << " // #" << SCIdx << '\n';
+ }
+ OS << "}; // " << PI->ModelName << "SchedClasses\n";
+ }
+}
+
void SubtargetEmitter::EmitProcessorModels(raw_ostream &OS) {
// For each processor model.
for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
- // Skip default
+ // Emit processor resource table.
+ if (PI->hasInstrSchedModel())
+ EmitProcessorResources(*PI, OS);
+ else if(!PI->ProcResourceDefs.empty())
+ PrintFatalError(PI->ModelDef->getLoc(), "SchedMachineModel defines "
+ "ProcResources without defining WriteRes SchedWriteRes");
+
// Begin processor itinerary properties
OS << "\n";
OS << "static const llvm::MCSchedModel " << PI->ModelName << "(\n";
@@ -591,11 +1109,19 @@ void SubtargetEmitter::EmitProcessorModels(raw_ostream &OS) {
EmitProcessorProp(OS, PI->ModelDef, "LoadLatency", ',');
EmitProcessorProp(OS, PI->ModelDef, "HighLatency", ',');
EmitProcessorProp(OS, PI->ModelDef, "MispredictPenalty", ',');
+ OS << " " << PI->Index << ", // Processor ID\n";
+ if (PI->hasInstrSchedModel())
+ OS << " " << PI->ModelName << "ProcResources" << ",\n"
+ << " " << PI->ModelName << "SchedClasses" << ",\n"
+ << " " << PI->ProcResourceDefs.size()+1 << ",\n"
+ << " " << (SchedModels.schedClassEnd()
+ - SchedModels.schedClassBegin()) << ",\n";
+ else
+ OS << " 0, 0, 0, 0, // No instruction-level machine model.\n";
if (SchedModels.hasItineraryClasses())
- OS << " " << PI->ItinsDef->getName();
+ OS << " " << PI->ItinsDef->getName() << ");\n";
else
- OS << " 0";
- OS << ");\n";
+ OS << " 0); // No Itinerary\n";
}
}
@@ -621,14 +1147,10 @@ void SubtargetEmitter::EmitProcessorLookup(raw_ostream &OS) {
const std::string &Name = Processor->getValueAsString("Name");
const std::string &ProcModelName =
- SchedModels.getProcModel(Processor).ModelName;
+ SchedModels.getModelForProc(Processor).ModelName;
// Emit as { "cpu", procinit },
- OS << " { "
- << "\"" << Name << "\", "
- << "(void *)&" << ProcModelName;
-
- OS << " }";
+ OS << " { \"" << Name << "\", (const void *)&" << ProcModelName << " }";
// Depending on ''if more in the list'' emit comma
if (++i < N) OS << ",";
@@ -644,16 +1166,116 @@ void SubtargetEmitter::EmitProcessorLookup(raw_ostream &OS) {
// EmitSchedModel - Emits all scheduling model tables, folding common patterns.
//
void SubtargetEmitter::EmitSchedModel(raw_ostream &OS) {
+ OS << "#ifdef DBGFIELD\n"
+ << "#error \"<target>GenSubtargetInfo.inc requires a DBGFIELD macro\"\n"
+ << "#endif\n"
+ << "#ifndef NDEBUG\n"
+ << "#define DBGFIELD(x) x,\n"
+ << "#else\n"
+ << "#define DBGFIELD(x)\n"
+ << "#endif\n";
+
if (SchedModels.hasItineraryClasses()) {
std::vector<std::vector<InstrItinerary> > ProcItinLists;
// Emit the stage data
EmitStageAndOperandCycleData(OS, ProcItinLists);
EmitItineraries(OS, ProcItinLists);
}
+ OS << "\n// ===============================================================\n"
+ << "// Data tables for the new per-operand machine model.\n";
+
+ SchedClassTables SchedTables;
+ for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
+ PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
+ GenSchedClassTables(*PI, SchedTables);
+ }
+ EmitSchedClassTables(SchedTables, OS);
+
// Emit the processor machine model
EmitProcessorModels(OS);
// Emit the processor lookup data
EmitProcessorLookup(OS);
+
+ OS << "#undef DBGFIELD";
+}
+
+void SubtargetEmitter::EmitSchedModelHelpers(std::string ClassName,
+ raw_ostream &OS) {
+ OS << "unsigned " << ClassName
+ << "\n::resolveSchedClass(unsigned SchedClass, const MachineInstr *MI,"
+ << " const TargetSchedModel *SchedModel) const {\n";
+
+ std::vector<Record*> Prologs = Records.getAllDerivedDefinitions("PredicateProlog");
+ std::sort(Prologs.begin(), Prologs.end(), LessRecord());
+ for (std::vector<Record*>::const_iterator
+ PI = Prologs.begin(), PE = Prologs.end(); PI != PE; ++PI) {
+ OS << (*PI)->getValueAsString("Code") << '\n';
+ }
+ IdxVec VariantClasses;
+ for (CodeGenSchedModels::SchedClassIter SCI = SchedModels.schedClassBegin(),
+ SCE = SchedModels.schedClassEnd(); SCI != SCE; ++SCI) {
+ if (SCI->Transitions.empty())
+ continue;
+ VariantClasses.push_back(SCI - SchedModels.schedClassBegin());
+ }
+ if (!VariantClasses.empty()) {
+ OS << " switch (SchedClass) {\n";
+ for (IdxIter VCI = VariantClasses.begin(), VCE = VariantClasses.end();
+ VCI != VCE; ++VCI) {
+ const CodeGenSchedClass &SC = SchedModels.getSchedClass(*VCI);
+ OS << " case " << *VCI << ": // " << SC.Name << '\n';
+ IdxVec ProcIndices;
+ for (std::vector<CodeGenSchedTransition>::const_iterator
+ TI = SC.Transitions.begin(), TE = SC.Transitions.end();
+ TI != TE; ++TI) {
+ IdxVec PI;
+ std::set_union(TI->ProcIndices.begin(), TI->ProcIndices.end(),
+ ProcIndices.begin(), ProcIndices.end(),
+ std::back_inserter(PI));
+ ProcIndices.swap(PI);
+ }
+ for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end();
+ PI != PE; ++PI) {
+ OS << " ";
+ if (*PI != 0)
+ OS << "if (SchedModel->getProcessorID() == " << *PI << ") ";
+ OS << "{ // " << (SchedModels.procModelBegin() + *PI)->ModelName
+ << '\n';
+ for (std::vector<CodeGenSchedTransition>::const_iterator
+ TI = SC.Transitions.begin(), TE = SC.Transitions.end();
+ TI != TE; ++TI) {
+ OS << " if (";
+ if (*PI != 0 && !std::count(TI->ProcIndices.begin(),
+ TI->ProcIndices.end(), *PI)) {
+ continue;
+ }
+ for (RecIter RI = TI->PredTerm.begin(), RE = TI->PredTerm.end();
+ RI != RE; ++RI) {
+ if (RI != TI->PredTerm.begin())
+ OS << "\n && ";
+ OS << "(" << (*RI)->getValueAsString("Predicate") << ")";
+ }
+ OS << ")\n"
+ << " return " << TI->ToClassIdx << "; // "
+ << SchedModels.getSchedClass(TI->ToClassIdx).Name << '\n';
+ }
+ OS << " }\n";
+ if (*PI == 0)
+ break;
+ }
+ unsigned SCIdx = 0;
+ if (SC.ItinClassDef)
+ SCIdx = SchedModels.getSchedClassIdxForItin(SC.ItinClassDef);
+ else
+ SCIdx = SchedModels.findSchedClassIdx(SC.Writes, SC.Reads);
+ if (SCIdx != *VCI)
+ OS << " return " << SCIdx << ";\n";
+ OS << " break;\n";
+ }
+ OS << " };\n";
+ }
+ OS << " report_fatal_error(\"Expected a variant SchedClass\");\n"
+ << "} // " << ClassName << "::resolveSchedClass\n";
}
//
@@ -680,7 +1302,8 @@ void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS,
return;
}
- OS << " uint64_t Bits = ReInitMCSubtargetInfo(CPU, FS);\n";
+ OS << " InitMCProcessorInfo(CPU, FS);\n"
+ << " uint64_t Bits = getFeatureBits();\n";
for (unsigned i = 0; i < Features.size(); i++) {
// Next record
@@ -747,13 +1370,18 @@ void SubtargetEmitter::run(raw_ostream &OS) {
OS << Target << "SubTypeKV, ";
else
OS << "0, ";
+ OS << '\n'; OS.indent(22);
+ OS << Target << "ProcSchedKV, "
+ << Target << "WriteProcResTable, "
+ << Target << "WriteLatencyTable, "
+ << Target << "ReadAdvanceTable, ";
if (SchedModels.hasItineraryClasses()) {
- OS << Target << "ProcSchedKV, "
- << Target << "Stages, "
+ OS << '\n'; OS.indent(22);
+ OS << Target << "Stages, "
<< Target << "OperandCycles, "
<< Target << "ForwardingPaths, ";
} else
- OS << "0, 0, 0, 0, ";
+ OS << "0, 0, 0, ";
OS << NumFeatures << ", " << NumProcs << ");\n}\n\n";
OS << "} // End llvm namespace \n";
@@ -780,6 +1408,8 @@ void SubtargetEmitter::run(raw_ostream &OS) {
<< " explicit " << ClassName << "(StringRef TT, StringRef CPU, "
<< "StringRef FS);\n"
<< "public:\n"
+ << " unsigned resolveSchedClass(unsigned SchedClass, const MachineInstr *DefMI,"
+ << " const TargetSchedModel *SchedModel) const;\n"
<< " DFAPacketizer *createDFAPacketizer(const InstrItineraryData *IID)"
<< " const;\n"
<< "};\n";
@@ -790,11 +1420,19 @@ void SubtargetEmitter::run(raw_ostream &OS) {
OS << "\n#ifdef GET_SUBTARGETINFO_CTOR\n";
OS << "#undef GET_SUBTARGETINFO_CTOR\n";
+ OS << "#include \"llvm/CodeGen/TargetSchedule.h\"\n";
OS << "namespace llvm {\n";
OS << "extern const llvm::SubtargetFeatureKV " << Target << "FeatureKV[];\n";
OS << "extern const llvm::SubtargetFeatureKV " << Target << "SubTypeKV[];\n";
+ OS << "extern const llvm::SubtargetInfoKV " << Target << "ProcSchedKV[];\n";
+ OS << "extern const llvm::MCWriteProcResEntry "
+ << Target << "WriteProcResTable[];\n";
+ OS << "extern const llvm::MCWriteLatencyEntry "
+ << Target << "WriteLatencyTable[];\n";
+ OS << "extern const llvm::MCReadAdvanceEntry "
+ << Target << "ReadAdvanceTable[];\n";
+
if (SchedModels.hasItineraryClasses()) {
- OS << "extern const llvm::SubtargetInfoKV " << Target << "ProcSchedKV[];\n";
OS << "extern const llvm::InstrStage " << Target << "Stages[];\n";
OS << "extern const unsigned " << Target << "OperandCycles[];\n";
OS << "extern const unsigned " << Target << "ForwardingPaths[];\n";
@@ -812,14 +1450,22 @@ void SubtargetEmitter::run(raw_ostream &OS) {
OS << Target << "SubTypeKV, ";
else
OS << "0, ";
+ OS << '\n'; OS.indent(22);
+ OS << Target << "ProcSchedKV, "
+ << Target << "WriteProcResTable, "
+ << Target << "WriteLatencyTable, "
+ << Target << "ReadAdvanceTable, ";
+ OS << '\n'; OS.indent(22);
if (SchedModels.hasItineraryClasses()) {
- OS << Target << "ProcSchedKV, "
- << Target << "Stages, "
+ OS << Target << "Stages, "
<< Target << "OperandCycles, "
<< Target << "ForwardingPaths, ";
} else
- OS << "0, 0, 0, 0, ";
+ OS << "0, 0, 0, ";
OS << NumFeatures << ", " << NumProcs << ");\n}\n\n";
+
+ EmitSchedModelHelpers(ClassName, OS);
+
OS << "} // End llvm namespace \n";
OS << "#endif // GET_SUBTARGETINFO_CTOR\n\n";
diff --git a/contrib/llvm/utils/TableGen/TGValueTypes.cpp b/contrib/llvm/utils/TableGen/TGValueTypes.cpp
index af0d9f4..3ac71a4 100644
--- a/contrib/llvm/utils/TableGen/TGValueTypes.cpp
+++ b/contrib/llvm/utils/TableGen/TGValueTypes.cpp
@@ -15,13 +15,25 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/Support/Casting.h"
#include <map>
using namespace llvm;
namespace llvm {
class Type {
+protected:
+ enum TypeKind {
+ TK_ExtendedIntegerType,
+ TK_ExtendedVectorType
+ };
+private:
+ TypeKind Kind;
public:
+ TypeKind getKind() const {
+ return Kind;
+ }
+ Type(TypeKind K) : Kind(K) {}
virtual unsigned getSizeInBits() const = 0;
virtual ~Type() {}
};
@@ -32,7 +44,10 @@ class ExtendedIntegerType : public Type {
unsigned BitWidth;
public:
explicit ExtendedIntegerType(unsigned bits)
- : BitWidth(bits) {}
+ : Type(TK_ExtendedIntegerType), BitWidth(bits) {}
+ static bool classof(const Type *T) {
+ return T->getKind() == TK_ExtendedIntegerType;
+ }
unsigned getSizeInBits() const {
return getBitWidth();
}
@@ -46,7 +61,10 @@ class ExtendedVectorType : public Type {
unsigned NumElements;
public:
ExtendedVectorType(EVT elty, unsigned num)
- : ElementType(elty), NumElements(num) {}
+ : Type(TK_ExtendedVectorType), ElementType(elty), NumElements(num) {}
+ static bool classof(const Type *T) {
+ return T->getKind() == TK_ExtendedVectorType;
+ }
unsigned getSizeInBits() const {
return getNumElements() * getElementType().getSizeInBits();
}
@@ -71,12 +89,12 @@ bool EVT::isExtendedFloatingPoint() const {
bool EVT::isExtendedInteger() const {
assert(isExtended() && "Type is not extended!");
- return dynamic_cast<const ExtendedIntegerType *>(LLVMTy) != 0;
+ return isa<ExtendedIntegerType>(LLVMTy);
}
bool EVT::isExtendedVector() const {
assert(isExtended() && "Type is not extended!");
- return dynamic_cast<const ExtendedVectorType *>(LLVMTy) != 0;
+ return isa<ExtendedVectorType>(LLVMTy);
}
bool EVT::isExtended64BitVector() const {
diff --git a/contrib/llvm/utils/TableGen/TableGen.cpp b/contrib/llvm/utils/TableGen/TableGen.cpp
index 9695b4a..49efe7e 100644
--- a/contrib/llvm/utils/TableGen/TableGen.cpp
+++ b/contrib/llvm/utils/TableGen/TableGen.cpp
@@ -20,7 +20,6 @@
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Main.h"
#include "llvm/TableGen/Record.h"
-#include "llvm/TableGen/TableGenAction.h"
using namespace llvm;
@@ -90,86 +89,83 @@ namespace {
Class("class", cl::desc("Print Enum list for this class"),
cl::value_desc("class name"));
- class LLVMTableGenAction : public TableGenAction {
- public:
- bool operator()(raw_ostream &OS, RecordKeeper &Records) {
- switch (Action) {
- case PrintRecords:
- OS << Records; // No argument, dump all contents
- break;
- case GenEmitter:
- EmitCodeEmitter(Records, OS);
- break;
- case GenRegisterInfo:
- EmitRegisterInfo(Records, OS);
- break;
- case GenInstrInfo:
- EmitInstrInfo(Records, OS);
- break;
- case GenCallingConv:
- EmitCallingConv(Records, OS);
- break;
- case GenAsmWriter:
- EmitAsmWriter(Records, OS);
- break;
- case GenAsmMatcher:
- EmitAsmMatcher(Records, OS);
- break;
- case GenDisassembler:
- EmitDisassembler(Records, OS);
- break;
- case GenPseudoLowering:
- EmitPseudoLowering(Records, OS);
- break;
- case GenDAGISel:
- EmitDAGISel(Records, OS);
- break;
- case GenDFAPacketizer:
- EmitDFAPacketizer(Records, OS);
- break;
- case GenFastISel:
- EmitFastISel(Records, OS);
- break;
- case GenSubtarget:
- EmitSubtarget(Records, OS);
- break;
- case GenIntrinsic:
- EmitIntrinsics(Records, OS);
- break;
- case GenTgtIntrinsic:
- EmitIntrinsics(Records, OS, true);
- break;
- case GenEDInfo:
- EmitEnhancedDisassemblerInfo(Records, OS);
- break;
- case PrintEnums:
- {
- std::vector<Record*> Recs = Records.getAllDerivedDefinitions(Class);
- for (unsigned i = 0, e = Recs.size(); i != e; ++i)
- OS << Recs[i]->getName() << ", ";
- OS << "\n";
- break;
- }
- case PrintSets:
- {
- SetTheory Sets;
- Sets.addFieldExpander("Set", "Elements");
- std::vector<Record*> Recs = Records.getAllDerivedDefinitions("Set");
- for (unsigned i = 0, e = Recs.size(); i != e; ++i) {
- OS << Recs[i]->getName() << " = [";
- const std::vector<Record*> *Elts = Sets.expand(Recs[i]);
- assert(Elts && "Couldn't expand Set instance");
- for (unsigned ei = 0, ee = Elts->size(); ei != ee; ++ei)
- OS << ' ' << (*Elts)[ei]->getName();
- OS << " ]\n";
- }
- break;
- }
- }
-
- return false;
+bool LLVMTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
+ switch (Action) {
+ case PrintRecords:
+ OS << Records; // No argument, dump all contents
+ break;
+ case GenEmitter:
+ EmitCodeEmitter(Records, OS);
+ break;
+ case GenRegisterInfo:
+ EmitRegisterInfo(Records, OS);
+ break;
+ case GenInstrInfo:
+ EmitInstrInfo(Records, OS);
+ break;
+ case GenCallingConv:
+ EmitCallingConv(Records, OS);
+ break;
+ case GenAsmWriter:
+ EmitAsmWriter(Records, OS);
+ break;
+ case GenAsmMatcher:
+ EmitAsmMatcher(Records, OS);
+ break;
+ case GenDisassembler:
+ EmitDisassembler(Records, OS);
+ break;
+ case GenPseudoLowering:
+ EmitPseudoLowering(Records, OS);
+ break;
+ case GenDAGISel:
+ EmitDAGISel(Records, OS);
+ break;
+ case GenDFAPacketizer:
+ EmitDFAPacketizer(Records, OS);
+ break;
+ case GenFastISel:
+ EmitFastISel(Records, OS);
+ break;
+ case GenSubtarget:
+ EmitSubtarget(Records, OS);
+ break;
+ case GenIntrinsic:
+ EmitIntrinsics(Records, OS);
+ break;
+ case GenTgtIntrinsic:
+ EmitIntrinsics(Records, OS, true);
+ break;
+ case GenEDInfo:
+ EmitEnhancedDisassemblerInfo(Records, OS);
+ break;
+ case PrintEnums:
+ {
+ std::vector<Record*> Recs = Records.getAllDerivedDefinitions(Class);
+ for (unsigned i = 0, e = Recs.size(); i != e; ++i)
+ OS << Recs[i]->getName() << ", ";
+ OS << "\n";
+ break;
+ }
+ case PrintSets:
+ {
+ SetTheory Sets;
+ Sets.addFieldExpander("Set", "Elements");
+ std::vector<Record*> Recs = Records.getAllDerivedDefinitions("Set");
+ for (unsigned i = 0, e = Recs.size(); i != e; ++i) {
+ OS << Recs[i]->getName() << " = [";
+ const std::vector<Record*> *Elts = Sets.expand(Recs[i]);
+ assert(Elts && "Couldn't expand Set instance");
+ for (unsigned ei = 0, ee = Elts->size(); ei != ee; ++ei)
+ OS << ' ' << (*Elts)[ei]->getName();
+ OS << " ]\n";
}
- };
+ break;
+ }
+ }
+
+ return false;
+}
}
int main(int argc, char **argv) {
@@ -177,6 +173,5 @@ int main(int argc, char **argv) {
PrettyStackTraceProgram X(argc, argv);
cl::ParseCommandLineOptions(argc, argv);
- LLVMTableGenAction Action;
- return TableGenMain(argv[0], Action);
+ return TableGenMain(argv[0], &LLVMTableGenMain);
}
diff --git a/contrib/llvm/utils/TableGen/TableGenBackends.h b/contrib/llvm/utils/TableGen/TableGenBackends.h
index 2c00c40..f0d25d8 100644
--- a/contrib/llvm/utils/TableGen/TableGenBackends.h
+++ b/contrib/llvm/utils/TableGen/TableGenBackends.h
@@ -74,5 +74,6 @@ void EmitInstrInfo(RecordKeeper &RK, raw_ostream &OS);
void EmitPseudoLowering(RecordKeeper &RK, raw_ostream &OS);
void EmitRegisterInfo(RecordKeeper &RK, raw_ostream &OS);
void EmitSubtarget(RecordKeeper &RK, raw_ostream &OS);
+void EmitMapTable(RecordKeeper &RK, raw_ostream &OS);
} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp b/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp
index f3bd373..468a1f8 100644
--- a/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp
+++ b/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp
@@ -209,6 +209,7 @@ static ModRMDecisionType getDecisionType(ModRMDecision &decision) {
bool satisfiesOneEntry = true;
bool satisfiesSplitRM = true;
bool satisfiesSplitReg = true;
+ bool satisfiesSplitMisc = true;
for (unsigned index = 0; index < 256; ++index) {
if (decision.instructionIDs[index] != decision.instructionIDs[0])
@@ -228,7 +229,7 @@ static ModRMDecisionType getDecisionType(ModRMDecision &decision) {
if (((index & 0xc0) != 0xc0) &&
(decision.instructionIDs[index] != decision.instructionIDs[index&0x38]))
- satisfiesSplitReg = false;
+ satisfiesSplitMisc = false;
}
if (satisfiesOneEntry)
@@ -237,9 +238,12 @@ static ModRMDecisionType getDecisionType(ModRMDecision &decision) {
if (satisfiesSplitRM)
return MODRM_SPLITRM;
- if (satisfiesSplitReg)
+ if (satisfiesSplitReg && satisfiesSplitMisc)
return MODRM_SPLITREG;
+ if (satisfiesSplitMisc)
+ return MODRM_SPLITMISC;
+
return MODRM_FULL;
}
@@ -332,6 +336,12 @@ void DisassemblerTables::emitModRMDecision(raw_ostream &o1, raw_ostream &o2,
for (unsigned index = 0xc0; index < 256; index += 8)
emitOneID(o1, i1, decision.instructionIDs[index], true);
break;
+ case MODRM_SPLITMISC:
+ for (unsigned index = 0; index < 64; index += 8)
+ emitOneID(o1, i1, decision.instructionIDs[index], true);
+ for (unsigned index = 0xc0; index < 256; ++index)
+ emitOneID(o1, i1, decision.instructionIDs[index], true);
+ break;
case MODRM_FULL:
for (unsigned index = 0; index < 256; ++index)
emitOneID(o1, i1, decision.instructionIDs[index], true);
@@ -361,11 +371,18 @@ void DisassemblerTables::emitModRMDecision(raw_ostream &o1, raw_ostream &o2,
case MODRM_SPLITREG:
sEntryNumber += 16;
break;
+ case MODRM_SPLITMISC:
+ sEntryNumber += 8 + 64;
+ break;
case MODRM_FULL:
sEntryNumber += 256;
break;
}
+ // We assume that the index can fit into uint16_t.
+ assert(sEntryNumber < 65536U &&
+ "Index into ModRMDecision is too large for uint16_t!");
+
++sTableNumber;
}
diff --git a/contrib/llvm/utils/TableGen/X86ModRMFilters.h b/contrib/llvm/utils/TableGen/X86ModRMFilters.h
index 19fecbc..2cbaf79 100644
--- a/contrib/llvm/utils/TableGen/X86ModRMFilters.h
+++ b/contrib/llvm/utils/TableGen/X86ModRMFilters.h
@@ -70,7 +70,7 @@ class ModFilter : public ModRMFilter {
public:
/// Constructor
///
- /// @r - True if the mod bits of the ModR/M byte must be 11; false
+ /// \param r True if the mod bits of the ModR/M byte must be 11; false
/// otherwise. The name r derives from the fact that the mod
/// bits indicate whether the R/M bits [bits 2-0] signify a
/// register or a memory operand.
@@ -98,11 +98,12 @@ class EscapeFilter : public ModRMFilter {
public:
/// Constructor
///
- /// @c0_ff - True if the ModR/M byte must fall between 0xc0 and 0xff;
- /// false otherwise.
- /// @nnn_or_modRM - If c0_ff is true, the required value of the entire ModR/M
- /// byte. If c0_ff is false, the required value of the nnn
- /// field.
+ /// \param c0_ff True if the ModR/M byte must fall between 0xc0 and 0xff;
+ /// false otherwise.
+ ///
+ /// \param nnn_or_modRM If c0_ff is true, the required value of the entire
+ /// ModR/M byte. If c0_ff is false, the required value
+ /// of the nnn field.
EscapeFilter(bool c0_ff, uint8_t nnn_or_modRM) :
ModRMFilter(),
C0_FF(c0_ff),
@@ -128,8 +129,8 @@ class AddRegEscapeFilter : public ModRMFilter {
public:
/// Constructor
///
- /// @modRM - The value of the ModR/M byte when the register operand
- /// refers to the first register in the register set.
+ /// \param modRM The value of the ModR/M byte when the register operand
+ /// refers to the first register in the register set.
AddRegEscapeFilter(uint8_t modRM) : ModRM(modRM) {
}
@@ -150,9 +151,9 @@ class ExtendedFilter : public ModRMFilter {
public:
/// Constructor
///
- /// @r - True if the mod field must be set to 11; false otherwise.
- /// The name is explained at ModFilter.
- /// @nnn - The required value of the nnn field.
+ /// \param r True if the mod field must be set to 11; false otherwise.
+ /// The name is explained at ModFilter.
+ /// \param nnn The required value of the nnn field.
ExtendedFilter(bool r, uint8_t nnn) :
ModRMFilter(),
R(r),
@@ -177,7 +178,7 @@ class ExactFilter : public ModRMFilter {
public:
/// Constructor
///
- /// @modRM - The required value of the full ModR/M byte.
+ /// \param modRM The required value of the full ModR/M byte.
ExactFilter(uint8_t modRM) :
ModRMFilter(),
ModRM(modRM) {
diff --git a/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp b/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
index 7ac2336..d6ed2fe 100644
--- a/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
+++ b/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
@@ -38,14 +38,15 @@ using namespace llvm;
MAP(D0, 45) \
MAP(D1, 46) \
MAP(D4, 47) \
- MAP(D8, 48) \
- MAP(D9, 49) \
- MAP(DA, 50) \
- MAP(DB, 51) \
- MAP(DC, 52) \
- MAP(DD, 53) \
- MAP(DE, 54) \
- MAP(DF, 55)
+ MAP(D5, 48) \
+ MAP(D8, 49) \
+ MAP(D9, 50) \
+ MAP(DA, 51) \
+ MAP(DB, 52) \
+ MAP(DC, 53) \
+ MAP(DD, 54) \
+ MAP(DE, 55) \
+ MAP(DF, 56)
// A clone of X86 since we can't depend on something that is generated.
namespace X86Local {
@@ -244,7 +245,7 @@ RecognizableInstr::RecognizableInstr(DisassemblerTables &tables,
IsSSE = (HasOpSizePrefix && (Name.find("16") == Name.npos)) ||
(Name.find("CRC32") != Name.npos);
HasFROperands = hasFROperands();
- HasVEX_LPrefix = has256BitOperands() || Rec->getValueAsBit("hasVEX_L");
+ HasVEX_LPrefix = Rec->getValueAsBit("hasVEX_L");
// Check for 64-bit inst which does not require REX
Is32Bit = false;
@@ -479,20 +480,6 @@ bool RecognizableInstr::hasFROperands() const {
return false;
}
-bool RecognizableInstr::has256BitOperands() const {
- const std::vector<CGIOperandList::OperandInfo> &OperandList = *Operands;
- unsigned numOperands = OperandList.size();
-
- for (unsigned operandIndex = 0; operandIndex < numOperands; ++operandIndex) {
- const std::string &recName = OperandList[operandIndex].Rec->getName();
-
- if (!recName.compare("VR256")) {
- return true;
- }
- }
- return false;
-}
-
void RecognizableInstr::handleOperand(bool optional, unsigned &operandIndex,
unsigned &physicalOperandIndex,
unsigned &numPhysicalOperands,
@@ -1145,6 +1132,8 @@ OperandEncoding RecognizableInstr::immediateEncodingFromString
// register IDs in 8-bit immediates nowadays.
ENCODING("VR256", ENCODING_IB)
ENCODING("VR128", ENCODING_IB)
+ ENCODING("FR32", ENCODING_IB)
+ ENCODING("FR64", ENCODING_IB)
errs() << "Unhandled immediate encoding " << s << "\n";
llvm_unreachable("Unhandled immediate encoding");
}
diff --git a/contrib/llvm/utils/TableGen/X86RecognizableInstr.h b/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
index 542e510..9feb3c3 100644
--- a/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
+++ b/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
@@ -127,10 +127,7 @@ private:
/// hasFROperands - Returns true if any operand is a FR operand.
bool hasFROperands() const;
-
- /// has256BitOperands - Returns true if any operand is a 256-bit SSE operand.
- bool has256BitOperands() const;
-
+
/// typeFromString - Translates an operand type from the string provided in
/// the LLVM tables to an OperandType for use in the operand specifier.
///
@@ -143,7 +140,7 @@ private:
/// @param hasREX_WPrefix - Indicates whether the instruction has a REX.W
/// prefix. If it does, 32-bit register operands stay
/// 32-bit regardless of the operand size.
- /// @param hasOpSizePrefix- Indicates whether the instruction has an OpSize
+ /// @param hasOpSizePrefix Indicates whether the instruction has an OpSize
/// prefix. If it does not, then 16-bit register
/// operands stay 16-bit.
/// @return - The operand's type.
@@ -225,23 +222,23 @@ private:
/// emitInstructionSpecifier - Loads the instruction specifier for the current
/// instruction into a DisassemblerTables.
///
- /// @arg tables - The DisassemblerTables to populate with the specifier for
+ /// \param tables The DisassemblerTables to populate with the specifier for
/// the current instruction.
void emitInstructionSpecifier(DisassemblerTables &tables);
/// emitDecodePath - Populates the proper fields in the decode tables
/// corresponding to the decode paths for this instruction.
///
- /// @arg tables - The DisassemblerTables to populate with the decode
+ /// \param tables The DisassemblerTables to populate with the decode
/// decode information for the current instruction.
void emitDecodePath(DisassemblerTables &tables) const;
/// Constructor - Initializes a RecognizableInstr with the appropriate fields
/// from a CodeGenInstruction.
///
- /// @arg tables - The DisassemblerTables that the specifier will be added to.
- /// @arg insn - The CodeGenInstruction to extract information from.
- /// @arg uid - The unique ID of the current instruction.
+ /// \param tables The DisassemblerTables that the specifier will be added to.
+ /// \param insn The CodeGenInstruction to extract information from.
+ /// \param uid The unique ID of the current instruction.
RecognizableInstr(DisassemblerTables &tables,
const CodeGenInstruction &insn,
InstrUID uid);
@@ -249,11 +246,11 @@ public:
/// processInstr - Accepts a CodeGenInstruction and loads decode information
/// for it into a DisassemblerTables if appropriate.
///
- /// @arg tables - The DiassemblerTables to be populated with decode
+ /// \param tables The DiassemblerTables to be populated with decode
/// information.
- /// @arg insn - The CodeGenInstruction to be used as a source for this
+ /// \param insn The CodeGenInstruction to be used as a source for this
/// information.
- /// @uid - The unique ID of the instruction.
+ /// \param uid The unique ID of the instruction.
static void processInstr(DisassemblerTables &tables,
const CodeGenInstruction &insn,
InstrUID uid);
diff --git a/lib/clang/Makefile b/lib/clang/Makefile
index dde515e..1d993cd 100644
--- a/lib/clang/Makefile
+++ b/lib/clang/Makefile
@@ -14,7 +14,8 @@ SUBDIR= libclanganalysis \
libclangfrontendtool \
libclanglex \
libclangparse \
- libclangrewrite \
+ libclangrewritecore \
+ libclangrewritefrontend \
libclangsema \
libclangserialization \
libclangstaticanalyzercheckers \
diff --git a/lib/clang/clang.build.mk b/lib/clang/clang.build.mk
index 40dc4ab..298a2eb 100644
--- a/lib/clang/clang.build.mk
+++ b/lib/clang/clang.build.mk
@@ -16,20 +16,9 @@ BUILD_ARCH?= ${MACHINE_ARCH}
TARGET_TRIPLE?= ${TARGET_ARCH:C/amd64/x86_64/}-unknown-freebsd10.0
BUILD_TRIPLE?= ${BUILD_ARCH:C/amd64/x86_64/}-unknown-freebsd10.0
CFLAGS+= -DLLVM_DEFAULT_TARGET_TRIPLE=\"${TARGET_TRIPLE}\" \
- -DLLVM_HOSTTRIPLE=\"${BUILD_TRIPLE}\"
-
-.ifndef LLVM_REQUIRES_EH
-CXXFLAGS+= -fno-exceptions
-.else
-# If the library or program requires EH, it also requires RTTI.
-LLVM_REQUIRES_RTTI=
-.endif
-
-.ifndef LLVM_REQUIRES_RTTI
-CXXFLAGS+= -fno-rtti
-.endif
-
-CFLAGS+= -DDEFAULT_SYSROOT=\"${TOOLS_PREFIX}\"
+ -DLLVM_HOSTTRIPLE=\"${BUILD_TRIPLE}\" \
+ -DDEFAULT_SYSROOT=\"${TOOLS_PREFIX}\"
+CXXFLAGS+= -fno-exceptions -fno-rtti
.PATH: ${LLVM_SRCS}/${SRCDIR}
@@ -114,6 +103,18 @@ AttrTemplateInstantiate.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td
-gen-clang-attr-template-instantiate -o ${.TARGET} \
-I ${CLANG_SRCS}/include ${.ALLSRC}
+CommentCommandInfo.inc.h: ${CLANG_SRCS}/include/clang/AST/CommentCommands.td
+ ${CLANG_TBLGEN} -I ${CLANG_SRCS}/include/clang/AST ${TBLINC} \
+ -gen-clang-comment-command-info -o ${.TARGET} ${.ALLSRC}
+
+CommentHTMLTags.inc.h: ${CLANG_SRCS}/include/clang/AST/CommentHTMLTags.td
+ ${CLANG_TBLGEN} -I ${CLANG_SRCS}/include/clang/AST ${TBLINC} \
+ -gen-clang-comment-html-tags -o ${.TARGET} ${.ALLSRC}
+
+CommentHTMLTagsProperties.inc.h: ${CLANG_SRCS}/include/clang/AST/CommentHTMLTags.td
+ ${CLANG_TBLGEN} -I ${CLANG_SRCS}/include/clang/AST ${TBLINC} \
+ -gen-clang-comment-html-tags-properties -o ${.TARGET} ${.ALLSRC}
+
CommentNodes.inc.h: ${CLANG_SRCS}/include/clang/Basic/CommentNodes.td
${CLANG_TBLGEN} -I ${CLANG_SRCS}/include/clang/AST ${TBLINC} \
-gen-clang-comment-nodes -o ${.TARGET} ${.ALLSRC}
diff --git a/lib/clang/include/Makefile b/lib/clang/include/Makefile
index 93d91c7..918c6df 100644
--- a/lib/clang/include/Makefile
+++ b/lib/clang/include/Makefile
@@ -4,7 +4,9 @@
INCSDIR=${INCLUDEDIR}/clang/3.2
-INCS= altivec.h \
+INCS= __wmmintrin_aes.h \
+ __wmmintrin_pclmul.h \
+ altivec.h \
ammintrin.h \
avx2intrin.h \
avxintrin.h \
@@ -12,6 +14,7 @@ INCS= altivec.h \
bmiintrin.h \
cpuid.h \
emmintrin.h \
+ f16cintrin.h \
fma4intrin.h \
fmaintrin.h \
immintrin.h \
@@ -23,6 +26,7 @@ INCS= altivec.h \
nmmintrin.h \
pmmintrin.h \
popcntintrin.h \
+ rtmintrin.h \
smmintrin.h \
tmmintrin.h \
wmmintrin.h \
diff --git a/lib/clang/include/MipsGenAsmMatcher.inc b/lib/clang/include/MipsGenAsmMatcher.inc
new file mode 100644
index 0000000..b9e2fd1
--- /dev/null
+++ b/lib/clang/include/MipsGenAsmMatcher.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "MipsGenAsmMatcher.inc.h"
diff --git a/lib/clang/include/MipsGenMCPseudoLowering.inc b/lib/clang/include/MipsGenMCPseudoLowering.inc
new file mode 100644
index 0000000..4e4cbbe
--- /dev/null
+++ b/lib/clang/include/MipsGenMCPseudoLowering.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "MipsGenMCPseudoLowering.inc.h"
diff --git a/lib/clang/include/clang/AST/CommentCommandInfo.inc b/lib/clang/include/clang/AST/CommentCommandInfo.inc
new file mode 100644
index 0000000..cd17190
--- /dev/null
+++ b/lib/clang/include/clang/AST/CommentCommandInfo.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "CommentCommandInfo.inc.h"
diff --git a/lib/clang/include/clang/AST/CommentHTMLTags.inc b/lib/clang/include/clang/AST/CommentHTMLTags.inc
new file mode 100644
index 0000000..0932f32
--- /dev/null
+++ b/lib/clang/include/clang/AST/CommentHTMLTags.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "CommentHTMLTags.inc.h"
diff --git a/lib/clang/include/clang/AST/CommentHTMLTagsProperties.inc b/lib/clang/include/clang/AST/CommentHTMLTagsProperties.inc
new file mode 100644
index 0000000..77af956
--- /dev/null
+++ b/lib/clang/include/clang/AST/CommentHTMLTagsProperties.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "CommentHTMLTagsProperties.inc.h"
diff --git a/lib/clang/include/clang/Basic/Version.inc b/lib/clang/include/clang/Basic/Version.inc
index d04836e..d912900 100644
--- a/lib/clang/include/clang/Basic/Version.inc
+++ b/lib/clang/include/clang/Basic/Version.inc
@@ -5,6 +5,6 @@
#define CLANG_VERSION_MINOR 2
#define CLANG_VENDOR "FreeBSD "
-#define CLANG_VENDOR_SUFFIX " 20120817"
+#define CLANG_VENDOR_SUFFIX " 20121130"
-#define SVN_REVISION "162107"
+#define SVN_REVISION "168974"
diff --git a/lib/clang/include/llvm/Config/config.h b/lib/clang/include/llvm/Config/config.h
index 15d7e2d..1894727 100644
--- a/lib/clang/include/llvm/Config/config.h
+++ b/lib/clang/include/llvm/Config/config.h
@@ -8,6 +8,9 @@
/* Bug report URL. */
#define BUG_REPORT_URL "http://llvm.org/bugs/"
+/* Define if we have libxml2 */
+/* #undef CLANG_HAVE_LIBXML */
+
/* Relative directory for resource files */
#define CLANG_RESOURCE_DIR ""
@@ -17,6 +20,9 @@
/* Default <path> to all compiler invocations for --sysroot=<path>. */
/* #undef DEFAULT_SYSROOT */
+/* Define if you want backtraces on crash */
+#define ENABLE_BACKTRACES 1
+
/* Define if position independent code is enabled */
#define ENABLE_PIC 0
diff --git a/lib/clang/libclanganalysis/Makefile b/lib/clang/libclanganalysis/Makefile
index d96f5dc..61c45be 100644
--- a/lib/clang/libclanganalysis/Makefile
+++ b/lib/clang/libclanganalysis/Makefile
@@ -4,14 +4,16 @@ LIB= clanganalysis
SRCDIR= tools/clang/lib/Analysis
SRCS= AnalysisDeclContext.cpp \
- CallGraph.cpp \
+ BodyFarm.cpp \
CFG.cpp \
CFGReachabilityAnalysis.cpp \
CFGStmtMap.cpp \
+ CallGraph.cpp \
CocoaConventions.cpp \
Dominators.cpp \
FormatString.cpp \
LiveVariables.cpp \
+ ObjCNoReturn.cpp \
PostOrderCFGView.cpp \
PrintfFormatString.cpp \
ProgramPoint.cpp \
diff --git a/lib/clang/libclangast/Makefile b/lib/clang/libclangast/Makefile
index a80bc7e..0b851e6 100644
--- a/lib/clang/libclangast/Makefile
+++ b/lib/clang/libclangast/Makefile
@@ -63,6 +63,9 @@ SRCS= APValue.cpp \
TGHDRS= AttrImpl \
AttrList \
Attrs \
+ CommentCommandInfo \
+ CommentHTMLTags \
+ CommentHTMLTagsProperties \
CommentNodes \
DeclNodes \
DiagnosticASTKinds \
diff --git a/lib/clang/libclangrewritecore/Makefile b/lib/clang/libclangrewritecore/Makefile
new file mode 100644
index 0000000..3a628d0
--- /dev/null
+++ b/lib/clang/libclangrewritecore/Makefile
@@ -0,0 +1,18 @@
+# $FreeBSD$
+
+LIB= clangrewritecore
+
+SRCDIR= tools/clang/lib/Rewrite/Core
+SRCS= DeltaTree.cpp \
+ HTMLRewrite.cpp \
+ RewriteRope.cpp \
+ Rewriter.cpp \
+ TokenRewriter.cpp
+
+TGHDRS= AttrList \
+ Attrs \
+ DeclNodes \
+ DiagnosticCommonKinds \
+ StmtNodes
+
+.include "../clang.lib.mk"
diff --git a/lib/clang/libclangrewrite/Makefile b/lib/clang/libclangrewritefrontend/Makefile
index e165b0b..35de008 100644
--- a/lib/clang/libclangrewrite/Makefile
+++ b/lib/clang/libclangrewritefrontend/Makefile
@@ -1,26 +1,20 @@
# $FreeBSD$
-LIB= clangrewrite
+LIB= clangrewritefrontend
-SRCDIR= tools/clang/lib/Rewrite
-SRCS= DeltaTree.cpp \
- FixItRewriter.cpp \
+SRCDIR= tools/clang/lib/Rewrite/Frontend
+SRCS= FixItRewriter.cpp \
FrontendActions.cpp \
HTMLPrint.cpp \
- HTMLRewrite.cpp \
InclusionRewriter.cpp \
RewriteMacros.cpp \
RewriteModernObjC.cpp \
RewriteObjC.cpp \
- RewriteRope.cpp \
- RewriteTest.cpp \
- Rewriter.cpp \
- TokenRewriter.cpp
+ RewriteTest.cpp
TGHDRS= AttrList \
AttrParsedAttrList \
Attrs \
- CommentNodes \
DeclNodes \
DiagnosticCommonKinds \
DiagnosticFrontendKinds \
diff --git a/lib/clang/libclangsema/Makefile b/lib/clang/libclangsema/Makefile
index 83a1c56..235fa13 100644
--- a/lib/clang/libclangsema/Makefile
+++ b/lib/clang/libclangsema/Makefile
@@ -10,7 +10,9 @@ SRCS= AnalysisBasedWarnings.cpp \
DelayedDiagnostic.cpp \
IdentifierResolver.cpp \
JumpDiagnostics.cpp \
+ MultiplexExternalSemaSource.cpp \
Scope.cpp \
+ ScopeInfo.cpp \
Sema.cpp \
SemaAccess.cpp \
SemaAttr.cpp \
@@ -36,6 +38,7 @@ SRCS= AnalysisBasedWarnings.cpp \
SemaOverload.cpp \
SemaPseudoObject.cpp \
SemaStmt.cpp \
+ SemaStmtAsm.cpp \
SemaStmtAttr.cpp \
SemaTemplate.cpp \
SemaTemplateDeduction.cpp \
diff --git a/lib/clang/libclangstaticanalyzercheckers/Makefile b/lib/clang/libclangstaticanalyzercheckers/Makefile
index 1ad97b2..96274d9 100644
--- a/lib/clang/libclangstaticanalyzercheckers/Makefile
+++ b/lib/clang/libclangstaticanalyzercheckers/Makefile
@@ -3,8 +3,7 @@
LIB= clangstaticanalyzercheckers
SRCDIR= tools/clang/lib/StaticAnalyzer/Checkers
-SRCS= AdjustedReturnValueChecker.cpp \
- AnalyzerStatsChecker.cpp \
+SRCS= AnalyzerStatsChecker.cpp \
ArrayBoundChecker.cpp \
ArrayBoundCheckerV2.cpp \
AttrNonNullChecker.cpp \
@@ -27,12 +26,14 @@ SRCS= AdjustedReturnValueChecker.cpp \
DeadStoresChecker.cpp \
DebugCheckers.cpp \
DereferenceChecker.cpp \
+ DirectIvarAssignment.cpp \
DivZeroChecker.cpp \
DynamicTypePropagation.cpp \
ExprInspectionChecker.cpp \
FixedAddressChecker.cpp \
GenericTaintChecker.cpp \
IdempotentOperationChecker.cpp \
+ IvarInvalidationChecker.cpp \
LLVMConventionsChecker.cpp \
MacOSKeychainAPIChecker.cpp \
MacOSXAPIChecker.cpp \
@@ -42,10 +43,10 @@ SRCS= AdjustedReturnValueChecker.cpp \
NSAutoreleasePoolChecker.cpp \
NSErrorChecker.cpp \
NoReturnFunctionChecker.cpp \
- OSAtomicChecker.cpp \
ObjCAtSyncChecker.cpp \
ObjCContainersASTChecker.cpp \
ObjCContainersChecker.cpp \
+ ObjCMissingSuperCallChecker.cpp \
ObjCSelfInitChecker.cpp \
ObjCUnusedIVarsChecker.cpp \
PointerArithChecker.cpp \
@@ -54,6 +55,7 @@ SRCS= AdjustedReturnValueChecker.cpp \
RetainCountChecker.cpp \
ReturnPointerRangeChecker.cpp \
ReturnUndefChecker.cpp \
+ SimpleStreamChecker.cpp \
StackAddrEscapeChecker.cpp \
StreamChecker.cpp \
TaintTesterChecker.cpp \
diff --git a/lib/clang/libclangstaticanalyzercore/Makefile b/lib/clang/libclangstaticanalyzercore/Makefile
index 75b3d66..82ff5a3 100644
--- a/lib/clang/libclangstaticanalyzercore/Makefile
+++ b/lib/clang/libclangstaticanalyzercore/Makefile
@@ -3,9 +3,9 @@
LIB= clangstaticanalyzercore
SRCDIR= tools/clang/lib/StaticAnalyzer/Core
-SRCS= AnalysisManager.cpp \
- APSIntType.cpp \
- BasicConstraintManager.cpp \
+SRCS= APSIntType.cpp \
+ AnalysisManager.cpp \
+ AnalyzerOptions.cpp \
BasicValueFactory.cpp \
BlockCounter.cpp \
BugReporter.cpp \
@@ -16,6 +16,7 @@ SRCS= AnalysisManager.cpp \
CheckerHelpers.cpp \
CheckerManager.cpp \
CheckerRegistry.cpp \
+ ConstraintManager.cpp \
CoreEngine.cpp \
Environment.cpp \
ExplodedGraph.cpp \
diff --git a/lib/clang/libllvmanalysis/Makefile b/lib/clang/libllvmanalysis/Makefile
index cb1e345..3c01352 100644
--- a/lib/clang/libllvmanalysis/Makefile
+++ b/lib/clang/libllvmanalysis/Makefile
@@ -18,7 +18,9 @@ SRCS= AliasAnalysis.cpp \
CaptureTracking.cpp \
CodeMetrics.cpp \
ConstantFolding.cpp \
+ CostModel.cpp \
DbgInfoPrinter.cpp \
+ DependenceAnalysis.cpp \
DomPrinter.cpp \
DominanceFrontier.cpp \
IVUsers.cpp \
@@ -32,7 +34,6 @@ SRCS= AliasAnalysis.cpp \
LibCallSemantics.cpp \
Lint.cpp \
Loads.cpp \
- LoopDependenceAnalysis.cpp \
LoopInfo.cpp \
LoopPass.cpp \
MemDepPrinter.cpp \
@@ -50,6 +51,8 @@ SRCS= AliasAnalysis.cpp \
ProfileInfoLoader.cpp \
ProfileInfoLoaderPass.cpp \
ProfileVerifierPass.cpp \
+ ProfileDataLoader.cpp \
+ ProfileDataLoaderPass.cpp \
RegionInfo.cpp \
RegionPass.cpp \
RegionPrinter.cpp \
diff --git a/lib/clang/libllvmarmcodegen/Makefile b/lib/clang/libllvmarmcodegen/Makefile
index b18f12c..6ae9251 100644
--- a/lib/clang/libllvmarmcodegen/Makefile
+++ b/lib/clang/libllvmarmcodegen/Makefile
@@ -9,7 +9,6 @@ SRCS= ARMAsmPrinter.cpp \
ARMCodeEmitter.cpp \
ARMConstantIslandPass.cpp \
ARMConstantPoolValue.cpp \
- ARMELFWriterInfo.cpp \
ARMExpandPseudoInsts.cpp \
ARMFastISel.cpp \
ARMFrameLowering.cpp \
diff --git a/lib/clang/libllvmcodegen/Makefile b/lib/clang/libllvmcodegen/Makefile
index a0db98f..8beb583 100644
--- a/lib/clang/libllvmcodegen/Makefile
+++ b/lib/clang/libllvmcodegen/Makefile
@@ -12,8 +12,8 @@ SRCS= AggressiveAntiDepBreaker.cpp \
CodeGen.cpp \
CodePlacementOpt.cpp \
CriticalAntiDepBreaker.cpp \
- DeadMachineInstructionElim.cpp \
DFAPacketizer.cpp \
+ DeadMachineInstructionElim.cpp \
DwarfEHPrepare.cpp \
EarlyIfConversion.cpp \
EdgeBundles.cpp \
@@ -35,19 +35,19 @@ SRCS= AggressiveAntiDepBreaker.cpp \
LiveInterval.cpp \
LiveIntervalAnalysis.cpp \
LiveIntervalUnion.cpp \
+ LiveRangeCalc.cpp \
+ LiveRangeEdit.cpp \
LiveRegMatrix.cpp \
LiveStackAnalysis.cpp \
LiveVariables.cpp \
- LiveRangeCalc.cpp \
- LiveRangeEdit.cpp \
LocalStackSlotAllocation.cpp \
MachineBasicBlock.cpp \
MachineBlockFrequencyInfo.cpp \
MachineBlockPlacement.cpp \
MachineBranchProbabilityInfo.cpp \
+ MachineCSE.cpp \
MachineCodeEmitter.cpp \
MachineCopyPropagation.cpp \
- MachineCSE.cpp \
MachineDominators.cpp \
MachineFunction.cpp \
MachineFunctionAnalysis.cpp \
@@ -61,6 +61,7 @@ SRCS= AggressiveAntiDepBreaker.cpp \
MachineModuleInfo.cpp \
MachineModuleInfoImpls.cpp \
MachinePassRegistry.cpp \
+ MachinePostDominators.cpp \
MachineRegisterInfo.cpp \
MachineSSAUpdater.cpp \
MachineScheduler.cpp \
@@ -94,9 +95,10 @@ SRCS= AggressiveAntiDepBreaker.cpp \
ShrinkWrapping.cpp \
SjLjEHPrepare.cpp \
SlotIndexes.cpp \
- Spiller.cpp \
SpillPlacement.cpp \
+ Spiller.cpp \
SplitKit.cpp \
+ StackColoring.cpp \
StackProtector.cpp \
StackSlotColoring.cpp \
StrongPHIElimination.cpp \
@@ -105,6 +107,7 @@ SRCS= AggressiveAntiDepBreaker.cpp \
TargetInstrInfoImpl.cpp \
TargetLoweringObjectFileImpl.cpp \
TargetOptionsImpl.cpp \
+ TargetSchedule.cpp \
TwoAddressInstructionPass.cpp \
UnreachableBlockElim.cpp \
VirtRegMap.cpp
diff --git a/lib/clang/libllvmcore/Makefile b/lib/clang/libllvmcore/Makefile
index 8de6731..5b597fe 100644
--- a/lib/clang/libllvmcore/Makefile
+++ b/lib/clang/libllvmcore/Makefile
@@ -11,10 +11,12 @@ SRCS= AsmWriter.cpp \
Constants.cpp \
Core.cpp \
DIBuilder.cpp \
+ DataLayout.cpp \
DebugInfo.cpp \
DebugLoc.cpp \
Dominators.cpp \
Function.cpp \
+ GCOV.cpp \
GVMaterializer.cpp \
Globals.cpp \
IRBuilder.cpp \
@@ -33,6 +35,7 @@ SRCS= AsmWriter.cpp \
PrintModulePass.cpp \
Type.cpp \
TypeFinder.cpp \
+ TargetTransformInfo.cpp \
Use.cpp \
User.cpp \
Value.cpp \
diff --git a/lib/clang/libllvmdebuginfo/Makefile b/lib/clang/libllvmdebuginfo/Makefile
index b724157..e12289b 100644
--- a/lib/clang/libllvmdebuginfo/Makefile
+++ b/lib/clang/libllvmdebuginfo/Makefile
@@ -12,6 +12,7 @@ SRCS= DIContext.cpp \
DWARFDebugAranges.cpp \
DWARFDebugInfoEntry.cpp \
DWARFDebugLine.cpp \
+ DWARFDebugRangeList.cpp \
DWARFFormValue.cpp
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvminstrumentation/Makefile b/lib/clang/libllvminstrumentation/Makefile
index 8e7e7a2..ef09370 100644
--- a/lib/clang/libllvminstrumentation/Makefile
+++ b/lib/clang/libllvminstrumentation/Makefile
@@ -4,9 +4,9 @@ LIB= llvminstrumentation
SRCDIR= lib/Transforms/Instrumentation
SRCS= AddressSanitizer.cpp \
+ BlackList.cpp \
BoundsChecking.cpp \
EdgeProfiling.cpp \
- FunctionBlackList.cpp \
GCOVProfiling.cpp \
Instrumentation.cpp \
OptimalEdgeProfiling.cpp \
diff --git a/lib/clang/libllvmipo/Makefile b/lib/clang/libllvmipo/Makefile
index a493532..b1e9016 100644
--- a/lib/clang/libllvmipo/Makefile
+++ b/lib/clang/libllvmipo/Makefile
@@ -6,6 +6,7 @@ LIB= llvmipo
SRCDIR= lib/Transforms/IPO
SRCS= ArgumentPromotion.cpp \
+ BarrierNoopPass.cpp \
ConstantMerge.cpp \
DeadArgumentElimination.cpp \
ExtractGV.cpp \
diff --git a/lib/clang/libllvmmcjit/Makefile b/lib/clang/libllvmmcjit/Makefile
index 553186f..207fd81 100644
--- a/lib/clang/libllvmmcjit/Makefile
+++ b/lib/clang/libllvmmcjit/Makefile
@@ -7,8 +7,4 @@ LIB= llvmmcjit
SRCDIR= lib/ExecutionEngine/MCJIT
SRCS= MCJIT.cpp
-.if ${MK_CLANG_EXTRAS} != "no"
-SRCS+= MCJITMemoryManager.cpp
-.endif
-
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmmipsasmparser/Makefile b/lib/clang/libllvmmipsasmparser/Makefile
index dd95394..f5fb3c3 100644
--- a/lib/clang/libllvmmipsasmparser/Makefile
+++ b/lib/clang/libllvmmipsasmparser/Makefile
@@ -6,7 +6,8 @@ SRCDIR= lib/Target/Mips/AsmParser
INCDIR= lib/Target/Mips
SRCS= MipsAsmParser.cpp
-TGHDRS= MipsGenInstrInfo \
+TGHDRS= MipsGenAsmMatcher \
+ MipsGenInstrInfo \
MipsGenRegisterInfo \
MipsGenSubtargetInfo
diff --git a/lib/clang/libllvmmipscodegen/Makefile b/lib/clang/libllvmmipscodegen/Makefile
index ae7d2f6..c33d954 100644
--- a/lib/clang/libllvmmipscodegen/Makefile
+++ b/lib/clang/libllvmmipscodegen/Makefile
@@ -33,6 +33,7 @@ TGHDRS= Intrinsics \
MipsGenCodeEmitter \
MipsGenDAGISel \
MipsGenInstrInfo \
+ MipsGenMCPseudoLowering \
MipsGenRegisterInfo \
MipsGenSubtargetInfo
diff --git a/lib/clang/libllvmmipsdesc/Makefile b/lib/clang/libllvmmipsdesc/Makefile
index 5c17588..120b866 100644
--- a/lib/clang/libllvmmipsdesc/Makefile
+++ b/lib/clang/libllvmmipsdesc/Makefile
@@ -4,6 +4,7 @@ LIB= llvmmipsdesc
SRCDIR= lib/Target/Mips/MCTargetDesc
SRCS= MipsAsmBackend.cpp \
+ MipsDirectObjLower.cpp \
MipsELFObjectWriter.cpp \
MipsMCAsmInfo.cpp \
MipsMCCodeEmitter.cpp \
diff --git a/lib/clang/libllvmscalaropts/Makefile b/lib/clang/libllvmscalaropts/Makefile
index f89b0a2..87eaf23 100644
--- a/lib/clang/libllvmscalaropts/Makefile
+++ b/lib/clang/libllvmscalaropts/Makefile
@@ -20,6 +20,7 @@ SRCS= ADCE.cpp \
LICM.cpp \
LoopDeletion.cpp \
LoopIdiomRecognize.cpp \
+ LoopInstSimplify.cpp \
LoopRotation.cpp \
LoopStrengthReduce.cpp \
LoopUnrollPass.cpp \
@@ -30,6 +31,8 @@ SRCS= ADCE.cpp \
Reassociate.cpp \
Reg2Mem.cpp \
SCCP.cpp \
+ SROA.cpp \
+ Scalar.cpp \
ScalarReplAggregates.cpp \
SimplifyCFGPass.cpp \
SimplifyLibCalls.cpp \
diff --git a/lib/clang/libllvmtablegen/Makefile b/lib/clang/libllvmtablegen/Makefile
index 5ac6954..8dffad0 100644
--- a/lib/clang/libllvmtablegen/Makefile
+++ b/lib/clang/libllvmtablegen/Makefile
@@ -7,7 +7,6 @@ SRCS= Error.cpp \
Main.cpp \
Record.cpp \
StringMatcher.cpp \
- TableGenAction.cpp \
TableGenBackend.cpp \
TGLexer.cpp \
TGParser.cpp
diff --git a/lib/clang/libllvmtarget/Makefile b/lib/clang/libllvmtarget/Makefile
index eaad17e..b82377e 100644
--- a/lib/clang/libllvmtarget/Makefile
+++ b/lib/clang/libllvmtarget/Makefile
@@ -5,15 +5,15 @@ LIB= llvmtarget
SRCDIR= lib/Target
SRCS= Mangler.cpp \
Target.cpp \
- TargetData.cpp \
- TargetELFWriterInfo.cpp \
TargetInstrInfo.cpp \
TargetIntrinsicInfo.cpp \
TargetJITInfo.cpp \
TargetLibraryInfo.cpp \
TargetLoweringObjectFile.cpp \
TargetMachine.cpp \
+ TargetMachineC.cpp \
TargetRegisterInfo.cpp \
- TargetSubtargetInfo.cpp
+ TargetSubtargetInfo.cpp \
+ TargetTransformImpl.cpp
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmtransformutils/Makefile b/lib/clang/libllvmtransformutils/Makefile
index f8afaec..0ed2624 100644
--- a/lib/clang/libllvmtransformutils/Makefile
+++ b/lib/clang/libllvmtransformutils/Makefile
@@ -9,6 +9,7 @@ SRCS= AddrModeMatcher.cpp \
BasicBlockUtils.cpp \
BreakCriticalEdges.cpp \
BuildLibCalls.cpp \
+ BypassSlowDivision.cpp \
CloneFunction.cpp \
CloneModule.cpp \
CmpInstAnalysis.cpp \
@@ -16,6 +17,7 @@ SRCS= AddrModeMatcher.cpp \
DemoteRegToStack.cpp \
InlineFunction.cpp \
InstructionNamer.cpp \
+ IntegerDivision.cpp \
LCSSA.cpp \
Local.cpp \
LoopSimplify.cpp \
@@ -25,18 +27,19 @@ SRCS= AddrModeMatcher.cpp \
LowerInvoke.cpp \
LowerSwitch.cpp \
Mem2Reg.cpp \
+ MetaRenamer.cpp \
ModuleUtils.cpp \
PromoteMemoryToRegister.cpp \
SSAUpdater.cpp \
SimplifyCFG.cpp \
SimplifyIndVar.cpp \
SimplifyInstructions.cpp \
+ SimplifyLibCalls.cpp \
UnifyFunctionExitNodes.cpp \
ValueMapper.cpp
.if ${MK_CLANG_EXTRAS} != "no"
-SRCS+= SimplifyInstructions.cpp \
- Utils.cpp
+SRCS+= Utils.cpp
.endif
TGHDRS= Intrinsics
diff --git a/lib/clang/libllvmvectorize/Makefile b/lib/clang/libllvmvectorize/Makefile
index 61a37b5..444d672 100644
--- a/lib/clang/libllvmvectorize/Makefile
+++ b/lib/clang/libllvmvectorize/Makefile
@@ -4,6 +4,7 @@ LIB= llvmvectorize
SRCDIR= lib/Transforms/Vectorize
SRCS= BBVectorize.cpp \
+ LoopVectorize.cpp \
Vectorize.cpp
TGHDRS= Intrinsics
diff --git a/lib/clang/libllvmx86codegen/Makefile b/lib/clang/libllvmx86codegen/Makefile
index a61cad9..bcc7aa0 100644
--- a/lib/clang/libllvmx86codegen/Makefile
+++ b/lib/clang/libllvmx86codegen/Makefile
@@ -6,7 +6,6 @@ SRCDIR= lib/Target/X86
SRCS= X86AsmPrinter.cpp \
X86COFFMachineModuleInfo.cpp \
X86CodeEmitter.cpp \
- X86ELFWriterInfo.cpp \
X86FastISel.cpp \
X86FloatingPoint.cpp \
X86FrameLowering.cpp \
diff --git a/tools/build/mk/OptionalObsoleteFiles.inc b/tools/build/mk/OptionalObsoleteFiles.inc
index 018596c..311d917 100644
--- a/tools/build/mk/OptionalObsoleteFiles.inc
+++ b/tools/build/mk/OptionalObsoleteFiles.inc
@@ -755,6 +755,8 @@ OLD_FILES+=usr/include/clang/3.1/wmmintrin.h
OLD_FILES+=usr/include/clang/3.1/x86intrin.h
OLD_FILES+=usr/include/clang/3.1/xmmintrin.h
OLD_DIRS+=usr/include/clang/3.1
+OLD_FILES+=usr/include/clang/3.2/__wmmintrin_aes.h
+OLD_FILES+=usr/include/clang/3.2/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.2/altivec.h
OLD_FILES+=usr/include/clang/3.2/ammintrin.h
OLD_FILES+=usr/include/clang/3.2/avx2intrin.h
@@ -763,6 +765,7 @@ OLD_FILES+=usr/include/clang/3.2/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.2/bmiintrin.h
OLD_FILES+=usr/include/clang/3.2/cpuid.h
OLD_FILES+=usr/include/clang/3.2/emmintrin.h
+OLD_FILES+=usr/include/clang/3.2/f16cintrin.h
OLD_FILES+=usr/include/clang/3.2/fma4intrin.h
OLD_FILES+=usr/include/clang/3.2/fmaintrin.h
OLD_FILES+=usr/include/clang/3.2/immintrin.h
@@ -774,6 +777,7 @@ OLD_FILES+=usr/include/clang/3.2/module.map
OLD_FILES+=usr/include/clang/3.2/nmmintrin.h
OLD_FILES+=usr/include/clang/3.2/pmmintrin.h
OLD_FILES+=usr/include/clang/3.2/popcntintrin.h
+OLD_FILES+=usr/include/clang/3.2/rtmintrin.h
OLD_FILES+=usr/include/clang/3.2/smmintrin.h
OLD_FILES+=usr/include/clang/3.2/tmmintrin.h
OLD_FILES+=usr/include/clang/3.2/unwind.h
diff --git a/usr.bin/clang/clang-tblgen/Makefile b/usr.bin/clang/clang-tblgen/Makefile
index bc949ec..9e8af45 100644
--- a/usr.bin/clang/clang-tblgen/Makefile
+++ b/usr.bin/clang/clang-tblgen/Makefile
@@ -6,6 +6,8 @@ NO_MAN=
SRCDIR= tools/clang/utils/TableGen
SRCS= ClangASTNodesEmitter.cpp \
ClangAttrEmitter.cpp \
+ ClangCommentCommandInfoEmitter.cpp \
+ ClangCommentHTMLTagsEmitter.cpp \
ClangDiagnosticsEmitter.cpp \
ClangSACheckersEmitter.cpp \
NeonEmitter.cpp \
diff --git a/usr.bin/clang/clang/Makefile b/usr.bin/clang/clang/Makefile
index 0c3a02a..6fa9b80 100644
--- a/usr.bin/clang/clang/Makefile
+++ b/usr.bin/clang/clang/Makefile
@@ -47,7 +47,8 @@ LIBDEPS=clangfrontendtool \
clangstaticanalyzercore \
clanganalysis \
clangarcmigrate \
- clangrewrite \
+ clangrewritefrontend \
+ clangrewritecore \
clangedit \
clangast \
clanglex \
@@ -66,12 +67,12 @@ LIBDEPS=clangfrontendtool \
llvmarmdesc \
llvmarminfo \
llvmarminstprinter \
- llvmmipscodegen \
- llvmmipsdisassembler \
llvmmipsasmparser \
+ llvmmipscodegen \
llvmmipsdesc \
- llvmmipsinfo \
llvmmipsinstprinter \
+ llvmmipsdisassembler \
+ llvmmipsinfo \
llvmpowerpccodegen \
llvmpowerpcdesc \
llvmpowerpcinfo \
diff --git a/usr.bin/clang/llc/Makefile b/usr.bin/clang/llc/Makefile
index b99f876..3606fef 100644
--- a/usr.bin/clang/llc/Makefile
+++ b/usr.bin/clang/llc/Makefile
@@ -13,12 +13,12 @@ LIBDEPS=llvmasmparser \
llvmarmdesc \
llvmarminfo \
llvmarminstprinter \
- llvmmipscodegen \
- llvmmipsdisassembler \
llvmmipsasmparser \
+ llvmmipscodegen \
llvmmipsdesc \
- llvmmipsinfo \
llvmmipsinstprinter \
+ llvmmipsdisassembler \
+ llvmmipsinfo \
llvmpowerpccodegen \
llvmpowerpcdesc \
llvmpowerpcinfo \
diff --git a/usr.bin/clang/lli/Makefile b/usr.bin/clang/lli/Makefile
index 6ce563b..d6ecab5 100644
--- a/usr.bin/clang/lli/Makefile
+++ b/usr.bin/clang/lli/Makefile
@@ -3,9 +3,13 @@
PROG_CXX=lli
SRCDIR= tools/lli
-SRCS= lli.cpp
+SRCS= lli.cpp \
+ RecordingMemoryManager.cpp \
+ RemoteTarget.cpp
-LIBDEPS=llvmasmparser \
+LIBDEPS=llvmx86asmparser \
+ llvmx86disassembler \
+ llvmasmparser \
llvmbitreader \
llvmx86codegen \
llvmx86desc \
diff --git a/usr.bin/clang/llvm-mc/Makefile b/usr.bin/clang/llvm-mc/Makefile
index 1c08894..c2106e1 100644
--- a/usr.bin/clang/llvm-mc/Makefile
+++ b/usr.bin/clang/llvm-mc/Makefile
@@ -14,12 +14,12 @@ LIBDEPS=llvmmcdisassembler \
llvmarmdesc \
llvmarminfo \
llvmarminstprinter \
- llvmmipscodegen \
- llvmmipsdisassembler \
llvmmipsasmparser \
+ llvmmipscodegen \
llvmmipsdesc \
- llvmmipsinfo \
llvmmipsinstprinter \
+ llvmmipsdisassembler \
+ llvmmipsinfo \
llvmpowerpccodegen \
llvmpowerpcdesc \
llvmpowerpcinfo \
diff --git a/usr.bin/clang/llvm-objdump/Makefile b/usr.bin/clang/llvm-objdump/Makefile
index 351c06c..38d23ae 100644
--- a/usr.bin/clang/llvm-objdump/Makefile
+++ b/usr.bin/clang/llvm-objdump/Makefile
@@ -16,12 +16,12 @@ LIBDEPS=llvmmcdisassembler \
llvmarmdesc \
llvmarminfo \
llvmarminstprinter \
- llvmmipscodegen \
- llvmmipsdisassembler \
llvmmipsasmparser \
+ llvmmipscodegen \
llvmmipsdesc \
- llvmmipsinfo \
llvmmipsinstprinter \
+ llvmmipsdisassembler \
+ llvmmipsinfo \
llvmpowerpccodegen \
llvmpowerpcdesc \
llvmpowerpcinfo \
diff --git a/usr.bin/clang/llvm-rtdyld/Makefile b/usr.bin/clang/llvm-rtdyld/Makefile
index e3b5d87..e899d70 100644
--- a/usr.bin/clang/llvm-rtdyld/Makefile
+++ b/usr.bin/clang/llvm-rtdyld/Makefile
@@ -15,12 +15,12 @@ LIBDEPS=llvmjit \
llvmarmdesc \
llvmarminfo \
llvmarminstprinter \
- llvmmipscodegen \
- llvmmipsdisassembler \
llvmmipsasmparser \
+ llvmmipscodegen \
llvmmipsdesc \
- llvmmipsinfo \
llvmmipsinstprinter \
+ llvmmipsdisassembler \
+ llvmmipsinfo \
llvmpowerpccodegen \
llvmpowerpcdesc \
llvmpowerpcinfo \
diff --git a/usr.bin/clang/opt/Makefile b/usr.bin/clang/opt/Makefile
index 14403ce1..5afd58d 100644
--- a/usr.bin/clang/opt/Makefile
+++ b/usr.bin/clang/opt/Makefile
@@ -9,7 +9,34 @@ SRCS= AnalysisWrappers.cpp \
opt.cpp
TGHDRS= Intrinsics
-LIBDEPS=llvmipo \
+LIBDEPS=llvmarmdisassembler \
+ llvmarmasmparser \
+ llvmarmcodegen \
+ llvmarmdesc \
+ llvmarminfo \
+ llvmarminstprinter \
+ llvmmipsasmparser \
+ llvmmipscodegen \
+ llvmmipsdesc \
+ llvmmipsinstprinter \
+ llvmmipsdisassembler \
+ llvmmipsinfo \
+ llvmpowerpccodegen \
+ llvmpowerpcdesc \
+ llvmpowerpcinfo \
+ llvmpowerpcinstprinter \
+ llvmx86asmparser \
+ llvmx86codegen \
+ llvmselectiondag \
+ llvmasmprinter \
+ llvmmcparser \
+ llvmcodegen \
+ llvmx86disassembler \
+ llvmx86desc \
+ llvmx86info \
+ llvmx86instprinter \
+ llvmx86utils \
+ llvmipo \
llvmvectorize \
llvmscalaropts \
llvminstcombine \
diff --git a/usr.bin/clang/tblgen/Makefile b/usr.bin/clang/tblgen/Makefile
index c74ad83..90a5789 100644
--- a/usr.bin/clang/tblgen/Makefile
+++ b/usr.bin/clang/tblgen/Makefile
@@ -10,6 +10,7 @@ SRCS= AsmMatcherEmitter.cpp \
CodeEmitterGen.cpp \
CodeGenDAGPatterns.cpp \
CodeGenInstruction.cpp \
+ CodeGenMapTable.cpp \
CodeGenRegisters.cpp \
CodeGenSchedule.cpp \
CodeGenTarget.cpp \
OpenPOWER on IntegriCloud